vserver 1.9.3
[linux-2.6.git] / drivers / s390 / cio / device_fsm.c
1 /*
2  * drivers/s390/cio/device_fsm.c
3  * finite state machine for device handling
4  *
5  *    Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
6  *                       IBM Corporation
7  *    Author(s): Cornelia Huck(cohuck@de.ibm.com)
8  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
9  */
10
11 #include <linux/module.h>
12 #include <linux/config.h>
13 #include <linux/init.h>
14
15 #include <asm/ccwdev.h>
16 #include <asm/qdio.h>
17
18 #include "cio.h"
19 #include "cio_debug.h"
20 #include "css.h"
21 #include "device.h"
22 #include "chsc.h"
23 #include "ioasm.h"
24 #include "qdio.h"
25
26 int
27 device_is_disconnected(struct subchannel *sch)
28 {
29         struct ccw_device *cdev;
30
31         if (!sch->dev.driver_data)
32                 return 0;
33         cdev = sch->dev.driver_data;
34         return (cdev->private->state == DEV_STATE_DISCONNECTED ||
35                 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
36 }
37
38 void
39 device_set_disconnected(struct subchannel *sch)
40 {
41         struct ccw_device *cdev;
42
43         if (!sch->dev.driver_data)
44                 return;
45         cdev = sch->dev.driver_data;
46         ccw_device_set_timeout(cdev, 0);
47         cdev->private->state = DEV_STATE_DISCONNECTED;
48 }
49
50 void
51 device_set_waiting(struct subchannel *sch)
52 {
53         struct ccw_device *cdev;
54
55         if (!sch->dev.driver_data)
56                 return;
57         cdev = sch->dev.driver_data;
58         ccw_device_set_timeout(cdev, 10*HZ);
59         cdev->private->state = DEV_STATE_WAIT4IO;
60 }
61
62 /*
63  * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
64  */
65 static void
66 ccw_device_timeout(unsigned long data)
67 {
68         struct ccw_device *cdev;
69
70         cdev = (struct ccw_device *) data;
71         spin_lock_irq(cdev->ccwlock);
72         dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
73         spin_unlock_irq(cdev->ccwlock);
74 }
75
76 /*
77  * Set timeout
78  */
79 void
80 ccw_device_set_timeout(struct ccw_device *cdev, int expires)
81 {
82         if (expires == 0) {
83                 del_timer(&cdev->private->timer);
84                 return;
85         }
86         if (timer_pending(&cdev->private->timer)) {
87                 if (mod_timer(&cdev->private->timer, jiffies + expires))
88                         return;
89         }
90         cdev->private->timer.function = ccw_device_timeout;
91         cdev->private->timer.data = (unsigned long) cdev;
92         cdev->private->timer.expires = jiffies + expires;
93         add_timer(&cdev->private->timer);
94 }
95
96 /*
97  * Cancel running i/o. This is called repeatedly since halt/clear are
98  * asynchronous operations. We do one try with cio_cancel, two tries
99  * with cio_halt, 255 tries with cio_clear. If everythings fails panic.
100  * Returns 0 if device now idle, -ENODEV for device not operational and
101  * -EBUSY if an interrupt is expected (either from halt/clear or from a
102  * status pending).
103  */
104 int
105 ccw_device_cancel_halt_clear(struct ccw_device *cdev)
106 {
107         struct subchannel *sch;
108         int ret;
109
110         sch = to_subchannel(cdev->dev.parent);
111         ret = stsch(sch->irq, &sch->schib);
112         if (ret || !sch->schib.pmcw.dnv)
113                 return -ENODEV; 
114         if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0)
115                 /* Not operational or no activity -> done. */
116                 return 0;
117         /* Stage 1: cancel io. */
118         if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) &&
119             !(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
120                 ret = cio_cancel(sch);
121                 if (ret != -EINVAL)
122                         return ret;
123                 /* cancel io unsuccessful. From now on it is asynchronous. */
124                 cdev->private->iretry = 3;      /* 3 halt retries. */
125         }
126         if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
127                 /* Stage 2: halt io. */
128                 if (cdev->private->iretry) {
129                         cdev->private->iretry--;
130                         ret = cio_halt(sch);
131                         return (ret == 0) ? -EBUSY : ret;
132                 }
133                 /* halt io unsuccessful. */
134                 cdev->private->iretry = 255;    /* 255 clear retries. */
135         }
136         /* Stage 3: clear io. */
137         if (cdev->private->iretry) {
138                 cdev->private->iretry--;
139                 ret = cio_clear (sch);
140                 return (ret == 0) ? -EBUSY : ret;
141         }
142         panic("Can't stop i/o on subchannel.\n");
143 }
144
145 static void
146 ccw_device_handle_oper(struct ccw_device *cdev)
147 {
148         struct subchannel *sch;
149
150         sch = to_subchannel(cdev->dev.parent);
151         cdev->private->flags.recog_done = 1;
152         /*
153          * Check if cu type and device type still match. If
154          * not, it is certainly another device and we have to
155          * de- and re-register. Also check here for non-matching devno.
156          */
157         if (cdev->id.cu_type != cdev->private->senseid.cu_type ||
158             cdev->id.cu_model != cdev->private->senseid.cu_model ||
159             cdev->id.dev_type != cdev->private->senseid.dev_type ||
160             cdev->id.dev_model != cdev->private->senseid.dev_model ||
161             cdev->private->devno != sch->schib.pmcw.dev) {
162                 PREPARE_WORK(&cdev->private->kick_work,
163                              ccw_device_do_unreg_rereg, (void *)cdev);
164                 queue_work(ccw_device_work, &cdev->private->kick_work);
165                 return;
166         }
167         cdev->private->flags.donotify = 1;
168 }
169
170 /*
171  * The machine won't give us any notification by machine check if a chpid has
172  * been varied online on the SE so we have to find out by magic (i. e. driving
173  * the channel subsystem to device selection and updating our path masks).
174  */
175 static inline void
176 __recover_lost_chpids(struct subchannel *sch, int old_lpm)
177 {
178         int mask, i;
179
180         for (i = 0; i<8; i++) {
181                 mask = 0x80 >> i;
182                 if (!(sch->lpm & mask))
183                         continue;
184                 if (old_lpm & mask)
185                         continue;
186                 chpid_is_actually_online(sch->schib.pmcw.chpid[i]);
187         }
188 }
189
190 /*
191  * Stop device recognition.
192  */
193 static void
194 ccw_device_recog_done(struct ccw_device *cdev, int state)
195 {
196         struct subchannel *sch;
197         int notify, old_lpm;
198
199         sch = to_subchannel(cdev->dev.parent);
200
201         ccw_device_set_timeout(cdev, 0);
202         cio_disable_subchannel(sch);
203         /*
204          * Now that we tried recognition, we have performed device selection
205          * through ssch() and the path information is up to date.
206          */
207         old_lpm = sch->lpm;
208         stsch(sch->irq, &sch->schib);
209         sch->lpm = sch->schib.pmcw.pim &
210                 sch->schib.pmcw.pam &
211                 sch->schib.pmcw.pom &
212                 sch->opm;
213         if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
214                 /* Force reprobe on all chpids. */
215                 old_lpm = 0;
216         if (sch->lpm != old_lpm)
217                 __recover_lost_chpids(sch, old_lpm);
218         if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
219                 if (state == DEV_STATE_NOT_OPER) {
220                         cdev->private->flags.recog_done = 1;
221                         cdev->private->state = DEV_STATE_DISCONNECTED;
222                         return;
223                 }
224                 /* Boxed devices don't need extra treatment. */
225         }
226         notify = 0;
227         switch (state) {
228         case DEV_STATE_NOT_OPER:
229                 CIO_DEBUG(KERN_WARNING, 2,
230                           "SenseID : unknown device %04x on subchannel %04x\n",
231                           cdev->private->devno, sch->irq);
232                 break;
233         case DEV_STATE_OFFLINE:
234                 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
235                         ccw_device_handle_oper(cdev);
236                         notify = 1;
237                 }
238                 /* fill out sense information */
239                 cdev->id = (struct ccw_device_id) {
240                         .cu_type   = cdev->private->senseid.cu_type,
241                         .cu_model  = cdev->private->senseid.cu_model,
242                         .dev_type  = cdev->private->senseid.dev_type,
243                         .dev_model = cdev->private->senseid.dev_model,
244                 };
245                 if (notify) {
246                         /* Get device online again. */
247                         cdev->private->state = DEV_STATE_OFFLINE;
248                         ccw_device_online(cdev);
249                         wake_up(&cdev->private->wait_q);
250                         return;
251                 }
252                 /* Issue device info message. */
253                 CIO_DEBUG(KERN_INFO, 2, "SenseID : device %04x reports: "
254                           "CU  Type/Mod = %04X/%02X, Dev Type/Mod = "
255                           "%04X/%02X\n", cdev->private->devno,
256                           cdev->id.cu_type, cdev->id.cu_model,
257                           cdev->id.dev_type, cdev->id.dev_model);
258                 break;
259         case DEV_STATE_BOXED:
260                 CIO_DEBUG(KERN_WARNING, 2,
261                           "SenseID : boxed device %04x on subchannel %04x\n",
262                           cdev->private->devno, sch->irq);
263                 break;
264         }
265         cdev->private->state = state;
266         io_subchannel_recog_done(cdev);
267         if (state != DEV_STATE_NOT_OPER)
268                 wake_up(&cdev->private->wait_q);
269 }
270
271 /*
272  * Function called from device_id.c after sense id has completed.
273  */
274 void
275 ccw_device_sense_id_done(struct ccw_device *cdev, int err)
276 {
277         switch (err) {
278         case 0:
279                 ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
280                 break;
281         case -ETIME:            /* Sense id stopped by timeout. */
282                 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
283                 break;
284         default:
285                 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
286                 break;
287         }
288 }
289
290 static void
291 ccw_device_oper_notify(void *data)
292 {
293         struct ccw_device *cdev;
294         struct subchannel *sch;
295         int ret;
296
297         cdev = (struct ccw_device *)data;
298         sch = to_subchannel(cdev->dev.parent);
299         ret = (sch->driver && sch->driver->notify) ?
300                 sch->driver->notify(&sch->dev, CIO_OPER) : 0;
301         if (!ret)
302                 /* Driver doesn't want device back. */
303                 ccw_device_do_unreg_rereg((void *)cdev);
304         else
305                 wake_up(&cdev->private->wait_q);
306 }
307
308 /*
309  * Finished with online/offline processing.
310  */
311 static void
312 ccw_device_done(struct ccw_device *cdev, int state)
313 {
314         struct subchannel *sch;
315
316         sch = to_subchannel(cdev->dev.parent);
317
318         if (state != DEV_STATE_ONLINE)
319                 cio_disable_subchannel(sch);
320
321         /* Reset device status. */
322         memset(&cdev->private->irb, 0, sizeof(struct irb));
323
324         cdev->private->state = state;
325
326
327         if (state == DEV_STATE_BOXED)
328                 CIO_DEBUG(KERN_WARNING, 2,
329                           "Boxed device %04x on subchannel %04x\n",
330                           cdev->private->devno, sch->irq);
331
332         if (cdev->private->flags.donotify) {
333                 cdev->private->flags.donotify = 0;
334                 PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify,
335                              (void *)cdev);
336                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
337         }
338         wake_up(&cdev->private->wait_q);
339
340         if (css_init_done && state != DEV_STATE_ONLINE)
341                 put_device (&cdev->dev);
342 }
343
344 /*
345  * Function called from device_pgid.c after sense path ground has completed.
346  */
347 void
348 ccw_device_sense_pgid_done(struct ccw_device *cdev, int err)
349 {
350         struct subchannel *sch;
351
352         sch = to_subchannel(cdev->dev.parent);
353         switch (err) {
354         case 0:
355                 /* Start Path Group verification. */
356                 sch->vpm = 0;   /* Start with no path groups set. */
357                 cdev->private->state = DEV_STATE_VERIFY;
358                 ccw_device_verify_start(cdev);
359                 break;
360         case -ETIME:            /* Sense path group id stopped by timeout. */
361         case -EUSERS:           /* device is reserved for someone else. */
362                 ccw_device_done(cdev, DEV_STATE_BOXED);
363                 break;
364         case -EOPNOTSUPP: /* path grouping not supported, just set online. */
365                 cdev->private->options.pgroup = 0;
366                 ccw_device_done(cdev, DEV_STATE_ONLINE);
367                 break;
368         default:
369                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
370                 break;
371         }
372 }
373
374 /*
375  * Start device recognition.
376  */
377 int
378 ccw_device_recognition(struct ccw_device *cdev)
379 {
380         struct subchannel *sch;
381         int ret;
382
383         if ((cdev->private->state != DEV_STATE_NOT_OPER) &&
384             (cdev->private->state != DEV_STATE_BOXED))
385                 return -EINVAL;
386         sch = to_subchannel(cdev->dev.parent);
387         ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
388         if (ret != 0)
389                 /* Couldn't enable the subchannel for i/o. Sick device. */
390                 return ret;
391
392         /* After 60s the device recognition is considered to have failed. */
393         ccw_device_set_timeout(cdev, 60*HZ);
394
395         /*
396          * We used to start here with a sense pgid to find out whether a device
397          * is locked by someone else. Unfortunately, the sense pgid command
398          * code has other meanings on devices predating the path grouping
399          * algorithm, so we start with sense id and box the device after an
400          * timeout (or if sense pgid during path verification detects the device
401          * is locked, as may happen on newer devices).
402          */
403         cdev->private->flags.recog_done = 0;
404         cdev->private->state = DEV_STATE_SENSE_ID;
405         ccw_device_sense_id_start(cdev);
406         return 0;
407 }
408
409 /*
410  * Handle timeout in device recognition.
411  */
412 static void
413 ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
414 {
415         int ret;
416
417         ret = ccw_device_cancel_halt_clear(cdev);
418         switch (ret) {
419         case 0:
420                 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
421                 break;
422         case -ENODEV:
423                 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
424                 break;
425         default:
426                 ccw_device_set_timeout(cdev, 3*HZ);
427         }
428 }
429
430
431 static void
432 ccw_device_nopath_notify(void *data)
433 {
434         struct ccw_device *cdev;
435         struct subchannel *sch;
436         int ret;
437
438         cdev = (struct ccw_device *)data;
439         sch = to_subchannel(cdev->dev.parent);
440         /* Extra sanity. */
441         if (sch->lpm)
442                 return;
443         ret = (sch->driver && sch->driver->notify) ?
444                 sch->driver->notify(&sch->dev, CIO_NO_PATH) : 0;
445         if (!ret) {
446                 if (get_device(&sch->dev)) {
447                         /* Driver doesn't want to keep device. */
448                         cio_disable_subchannel(sch);
449                         if (get_device(&cdev->dev)) {
450                                 PREPARE_WORK(&cdev->private->kick_work,
451                                              ccw_device_call_sch_unregister,
452                                              (void *)cdev);
453                                 queue_work(ccw_device_work,
454                                            &cdev->private->kick_work);
455                         }
456                 }
457         } else {
458                 cio_disable_subchannel(sch);
459                 ccw_device_set_timeout(cdev, 0);
460                 cdev->private->state = DEV_STATE_DISCONNECTED;
461                 wake_up(&cdev->private->wait_q);
462         }
463 }
464
465 void
466 ccw_device_verify_done(struct ccw_device *cdev, int err)
467 {
468         cdev->private->flags.doverify = 0;
469         switch (err) {
470         case -EOPNOTSUPP: /* path grouping not supported, just set online. */
471                 cdev->private->options.pgroup = 0;
472         case 0:
473                 ccw_device_done(cdev, DEV_STATE_ONLINE);
474                 break;
475         case -ETIME:
476                 ccw_device_done(cdev, DEV_STATE_BOXED);
477                 break;
478         default:
479                 PREPARE_WORK(&cdev->private->kick_work,
480                              ccw_device_nopath_notify, (void *)cdev);
481                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
482                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
483                 break;
484         }
485 }
486
487 /*
488  * Get device online.
489  */
490 int
491 ccw_device_online(struct ccw_device *cdev)
492 {
493         struct subchannel *sch;
494         int ret;
495
496         if ((cdev->private->state != DEV_STATE_OFFLINE) &&
497             (cdev->private->state != DEV_STATE_BOXED))
498                 return -EINVAL;
499         sch = to_subchannel(cdev->dev.parent);
500         if (css_init_done && !get_device(&cdev->dev))
501                 return -ENODEV;
502         ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
503         if (ret != 0) {
504                 /* Couldn't enable the subchannel for i/o. Sick device. */
505                 if (ret == -ENODEV)
506                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
507                 return ret;
508         }
509         /* Do we want to do path grouping? */
510         if (!cdev->private->options.pgroup) {
511                 /* No, set state online immediately. */
512                 ccw_device_done(cdev, DEV_STATE_ONLINE);
513                 return 0;
514         }
515         /* Do a SensePGID first. */
516         cdev->private->state = DEV_STATE_SENSE_PGID;
517         ccw_device_sense_pgid_start(cdev);
518         return 0;
519 }
520
521 void
522 ccw_device_disband_done(struct ccw_device *cdev, int err)
523 {
524         switch (err) {
525         case 0:
526                 ccw_device_done(cdev, DEV_STATE_OFFLINE);
527                 break;
528         case -ETIME:
529                 ccw_device_done(cdev, DEV_STATE_BOXED);
530                 break;
531         default:
532                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
533                 break;
534         }
535 }
536
537 /*
538  * Shutdown device.
539  */
540 int
541 ccw_device_offline(struct ccw_device *cdev)
542 {
543         struct subchannel *sch;
544
545         sch = to_subchannel(cdev->dev.parent);
546         if (cdev->private->state != DEV_STATE_ONLINE) {
547                 if (sch->schib.scsw.actl != 0)
548                         return -EBUSY;
549                 return -EINVAL;
550         }
551         if (sch->schib.scsw.actl != 0)
552                 return -EBUSY;
553         /* Are we doing path grouping? */
554         if (!cdev->private->options.pgroup) {
555                 /* No, set state offline immediately. */
556                 ccw_device_done(cdev, DEV_STATE_OFFLINE);
557                 return 0;
558         }
559         /* Start Set Path Group commands. */
560         cdev->private->state = DEV_STATE_DISBAND_PGID;
561         ccw_device_disband_start(cdev);
562         return 0;
563 }
564
565 /*
566  * Handle timeout in device online/offline process.
567  */
568 static void
569 ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event)
570 {
571         int ret;
572
573         ret = ccw_device_cancel_halt_clear(cdev);
574         switch (ret) {
575         case 0:
576                 ccw_device_done(cdev, DEV_STATE_BOXED);
577                 break;
578         case -ENODEV:
579                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
580                 break;
581         default:
582                 ccw_device_set_timeout(cdev, 3*HZ);
583         }
584 }
585
586 /*
587  * Handle not oper event in device recognition.
588  */
589 static void
590 ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event)
591 {
592         ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
593 }
594
595 /*
596  * Handle not operational event while offline.
597  */
598 static void
599 ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event)
600 {
601         struct subchannel *sch;
602
603         cdev->private->state = DEV_STATE_NOT_OPER;
604         sch = to_subchannel(cdev->dev.parent);
605         device_unregister(&sch->dev);
606         sch->schib.pmcw.intparm = 0;
607         cio_modify(sch);
608         wake_up(&cdev->private->wait_q);
609 }
610
611 /*
612  * Handle not operational event while online.
613  */
614 static void
615 ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event)
616 {
617         struct subchannel *sch;
618
619         sch = to_subchannel(cdev->dev.parent);
620         if (sch->driver->notify &&
621             sch->driver->notify(&sch->dev, sch->lpm ? CIO_GONE : CIO_NO_PATH)) {
622                         ccw_device_set_timeout(cdev, 0);
623                         cdev->private->state = DEV_STATE_DISCONNECTED;
624                         wake_up(&cdev->private->wait_q);
625                         return;
626         }
627         cdev->private->state = DEV_STATE_NOT_OPER;
628         cio_disable_subchannel(sch);
629         if (sch->schib.scsw.actl != 0) {
630                 // FIXME: not-oper indication to device driver ?
631                 ccw_device_call_handler(cdev);
632         }
633         device_unregister(&sch->dev);
634         sch->schib.pmcw.intparm = 0;
635         cio_modify(sch);
636         wake_up(&cdev->private->wait_q);
637 }
638
639 /*
640  * Handle path verification event.
641  */
642 static void
643 ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
644 {
645         struct subchannel *sch;
646
647         if (!cdev->private->options.pgroup)
648                 return;
649         if (cdev->private->state == DEV_STATE_W4SENSE) {
650                 cdev->private->flags.doverify = 1;
651                 return;
652         }
653         sch = to_subchannel(cdev->dev.parent);
654         if (sch->schib.scsw.actl != 0 ||
655             (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) {
656                 /*
657                  * No final status yet or final status not yet delivered
658                  * to the device driver. Can't do path verfication now,
659                  * delay until final status was delivered.
660                  */
661                 cdev->private->flags.doverify = 1;
662                 return;
663         }
664         /* Device is idle, we can do the path verification. */
665         cdev->private->state = DEV_STATE_VERIFY;
666         ccw_device_verify_start(cdev);
667 }
668
669 /*
670  * Got an interrupt for a normal io (state online).
671  */
672 static void
673 ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
674 {
675         struct irb *irb;
676
677         irb = (struct irb *) __LC_IRB;
678         /* Check for unsolicited interrupt. */
679         if ((irb->scsw.stctl ==
680                         (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS))
681             && (!irb->scsw.cc)) {
682                 if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
683                     !irb->esw.esw0.erw.cons) {
684                         /* Unit check but no sense data. Need basic sense. */
685                         if (ccw_device_do_sense(cdev, irb) != 0)
686                                 goto call_handler_unsol;
687                         memcpy(irb, &cdev->private->irb, sizeof(struct irb));
688                         cdev->private->state = DEV_STATE_W4SENSE;
689                         cdev->private->intparm = 0;
690                         return;
691                 }
692 call_handler_unsol:
693                 if (cdev->handler)
694                         cdev->handler (cdev, 0, irb);
695                 return;
696         }
697         /* Accumulate status and find out if a basic sense is needed. */
698         ccw_device_accumulate_irb(cdev, irb);
699         if (cdev->private->flags.dosense) {
700                 if (ccw_device_do_sense(cdev, irb) == 0) {
701                         cdev->private->state = DEV_STATE_W4SENSE;
702                 }
703                 return;
704         }
705         /* Call the handler. */
706         if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
707                 /* Start delayed path verification. */
708                 ccw_device_online_verify(cdev, 0);
709 }
710
711 /*
712  * Got an timeout in online state.
713  */
714 static void
715 ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
716 {
717         int ret;
718
719         ccw_device_set_timeout(cdev, 0);
720         ret = ccw_device_cancel_halt_clear(cdev);
721         if (ret == -EBUSY) {
722                 ccw_device_set_timeout(cdev, 3*HZ);
723                 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
724                 return;
725         }
726         if (ret == -ENODEV) {
727                 struct subchannel *sch;
728
729                 sch = to_subchannel(cdev->dev.parent);
730                 if (!sch->lpm) {
731                         PREPARE_WORK(&cdev->private->kick_work,
732                                      ccw_device_nopath_notify, (void *)cdev);
733                         queue_work(ccw_device_notify_work,
734                                    &cdev->private->kick_work);
735                 } else
736                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
737         } else if (cdev->handler)
738                 cdev->handler(cdev, cdev->private->intparm,
739                               ERR_PTR(-ETIMEDOUT));
740 }
741
742 /*
743  * Got an interrupt for a basic sense.
744  */
745 void
746 ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
747 {
748         struct irb *irb;
749
750         irb = (struct irb *) __LC_IRB;
751         /* Check for unsolicited interrupt. */
752         if (irb->scsw.stctl ==
753                         (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
754                 if (irb->scsw.cc == 1)
755                         /* Basic sense hasn't started. Try again. */
756                         ccw_device_do_sense(cdev, irb);
757                 else {
758                         printk("Huh? %s(%s): unsolicited interrupt...\n",
759                                __FUNCTION__, cdev->dev.bus_id);
760                         if (cdev->handler)
761                                 cdev->handler (cdev, 0, irb);
762                 }
763                 return;
764         }
765         /* Add basic sense info to irb. */
766         ccw_device_accumulate_basic_sense(cdev, irb);
767         if (cdev->private->flags.dosense) {
768                 /* Another basic sense is needed. */
769                 ccw_device_do_sense(cdev, irb);
770                 return;
771         }
772         cdev->private->state = DEV_STATE_ONLINE;
773         /* Call the handler. */
774         if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
775                 /* Start delayed path verification. */
776                 ccw_device_online_verify(cdev, 0);
777 }
778
779 static void
780 ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event)
781 {
782         struct irb *irb;
783
784         irb = (struct irb *) __LC_IRB;
785         /* Accumulate status. We don't do basic sense. */
786         ccw_device_accumulate_irb(cdev, irb);
787         /* Try to start delayed device verification. */
788         ccw_device_online_verify(cdev, 0);
789         /* Note: Don't call handler for cio initiated clear! */
790 }
791
792 static void
793 ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
794 {
795         struct subchannel *sch;
796
797         sch = to_subchannel(cdev->dev.parent);
798         ccw_device_set_timeout(cdev, 0);
799         /* OK, i/o is dead now. Call interrupt handler. */
800         cdev->private->state = DEV_STATE_ONLINE;
801         if (cdev->handler)
802                 cdev->handler(cdev, cdev->private->intparm,
803                               ERR_PTR(-ETIMEDOUT));
804         if (!sch->lpm) {
805                 PREPARE_WORK(&cdev->private->kick_work,
806                              ccw_device_nopath_notify, (void *)cdev);
807                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
808         } else if (cdev->private->flags.doverify)
809                 /* Start delayed path verification. */
810                 ccw_device_online_verify(cdev, 0);
811 }
812
813 static void
814 ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
815 {
816         int ret;
817
818         ret = ccw_device_cancel_halt_clear(cdev);
819         if (ret == -EBUSY) {
820                 ccw_device_set_timeout(cdev, 3*HZ);
821                 return;
822         }
823         if (ret == -ENODEV) {
824                 struct subchannel *sch;
825
826                 sch = to_subchannel(cdev->dev.parent);
827                 if (!sch->lpm) {
828                         PREPARE_WORK(&cdev->private->kick_work,
829                                      ccw_device_nopath_notify, (void *)cdev);
830                         queue_work(ccw_device_notify_work,
831                                    &cdev->private->kick_work);
832                 } else
833                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
834                 return;
835         }
836         //FIXME: Can we get here?
837         cdev->private->state = DEV_STATE_ONLINE;
838         if (cdev->handler)
839                 cdev->handler(cdev, cdev->private->intparm,
840                               ERR_PTR(-ETIMEDOUT));
841 }
842
843 static void
844 ccw_device_wait4io_irq(struct ccw_device *cdev, enum dev_event dev_event)
845 {
846         struct irb *irb;
847         struct subchannel *sch;
848
849         irb = (struct irb *) __LC_IRB;
850         /*
851          * Accumulate status and find out if a basic sense is needed.
852          * This is fine since we have already adapted the lpm.
853          */
854         ccw_device_accumulate_irb(cdev, irb);
855         if (cdev->private->flags.dosense) {
856                 if (ccw_device_do_sense(cdev, irb) == 0) {
857                         cdev->private->state = DEV_STATE_W4SENSE;
858                 }
859                 return;
860         }
861
862         /* Iff device is idle, reset timeout. */
863         sch = to_subchannel(cdev->dev.parent);
864         if (!stsch(sch->irq, &sch->schib))
865                 if (sch->schib.scsw.actl == 0)
866                         ccw_device_set_timeout(cdev, 0);
867         /* Call the handler. */
868         ccw_device_call_handler(cdev);
869         if (!sch->lpm) {
870                 PREPARE_WORK(&cdev->private->kick_work,
871                              ccw_device_nopath_notify, (void *)cdev);
872                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
873         } else if (cdev->private->flags.doverify)
874                 ccw_device_online_verify(cdev, 0);
875 }
876
877 static void
878 ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event)
879 {
880         int ret;
881         struct subchannel *sch;
882
883         sch = to_subchannel(cdev->dev.parent);
884         ccw_device_set_timeout(cdev, 0);
885         ret = ccw_device_cancel_halt_clear(cdev);
886         if (ret == -EBUSY) {
887                 ccw_device_set_timeout(cdev, 3*HZ);
888                 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
889                 return;
890         }
891         if (ret == -ENODEV) {
892                 if (!sch->lpm) {
893                         PREPARE_WORK(&cdev->private->kick_work,
894                                      ccw_device_nopath_notify, (void *)cdev);
895                         queue_work(ccw_device_notify_work,
896                                    &cdev->private->kick_work);
897                 } else
898                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
899                 return;
900         }
901         if (cdev->handler)
902                 cdev->handler(cdev, cdev->private->intparm,
903                               ERR_PTR(-ETIMEDOUT));
904         if (!sch->lpm) {
905                 PREPARE_WORK(&cdev->private->kick_work,
906                              ccw_device_nopath_notify, (void *)cdev);
907                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
908         } else if (cdev->private->flags.doverify)
909                 /* Start delayed path verification. */
910                 ccw_device_online_verify(cdev, 0);
911 }
912
913 static void
914 ccw_device_wait4io_verify(struct ccw_device *cdev, enum dev_event dev_event)
915 {
916         /* When the I/O has terminated, we have to start verification. */
917         if (cdev->private->options.pgroup)
918                 cdev->private->flags.doverify = 1;
919 }
920
921 static void
922 ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
923 {
924         struct irb *irb;
925
926         switch (dev_event) {
927         case DEV_EVENT_INTERRUPT:
928                 irb = (struct irb *) __LC_IRB;
929                 /* Check for unsolicited interrupt. */
930                 if ((irb->scsw.stctl ==
931                      (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
932                     (!irb->scsw.cc))
933                         /* FIXME: we should restart stlck here, but this
934                          * is extremely unlikely ... */
935                         goto out_wakeup;
936
937                 ccw_device_accumulate_irb(cdev, irb);
938                 /* We don't care about basic sense etc. */
939                 break;
940         default: /* timeout */
941                 break;
942         }
943 out_wakeup:
944         wake_up(&cdev->private->wait_q);
945 }
946
947 static void
948 ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
949 {
950         struct subchannel *sch;
951
952         sch = to_subchannel(cdev->dev.parent);
953         if (cio_enable_subchannel(sch, sch->schib.pmcw.isc) != 0)
954                 /* Couldn't enable the subchannel for i/o. Sick device. */
955                 return;
956
957         /* After 60s the device recognition is considered to have failed. */
958         ccw_device_set_timeout(cdev, 60*HZ);
959
960         cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
961         ccw_device_sense_id_start(cdev);
962 }
963
964 void
965 device_trigger_reprobe(struct subchannel *sch)
966 {
967         struct ccw_device *cdev;
968         unsigned long flags;
969
970         if (!sch->dev.driver_data)
971                 return;
972         cdev = sch->dev.driver_data;
973         spin_lock_irqsave(&sch->lock, flags);
974         if (cdev->private->state != DEV_STATE_DISCONNECTED) {
975                 spin_unlock_irqrestore(&sch->lock, flags);
976                 return;
977         }
978         /* Update some values. */
979         if (stsch(sch->irq, &sch->schib)) {
980                 spin_unlock_irqrestore(&sch->lock, flags);
981                 return;
982         }
983         /*
984          * The pim, pam, pom values may not be accurate, but they are the best
985          * we have before performing device selection :/
986          */
987         sch->lpm = sch->schib.pmcw.pim &
988                 sch->schib.pmcw.pam &
989                 sch->schib.pmcw.pom &
990                 sch->opm;
991         /* Re-set some bits in the pmcw that were lost. */
992         sch->schib.pmcw.isc = 3;
993         sch->schib.pmcw.csense = 1;
994         sch->schib.pmcw.ena = 0;
995         if ((sch->lpm & (sch->lpm - 1)) != 0)
996                 sch->schib.pmcw.mp = 1;
997         sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
998         /* We should also udate ssd info, but this has to wait. */
999         ccw_device_start_id(cdev, 0);
1000         spin_unlock_irqrestore(&sch->lock, flags);
1001 }
1002
1003 static void
1004 ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event)
1005 {
1006         struct subchannel *sch;
1007
1008         sch = to_subchannel(cdev->dev.parent);
1009         /*
1010          * An interrupt in state offline means a previous disable was not
1011          * successful. Try again.
1012          */
1013         cio_disable_subchannel(sch);
1014 }
1015
1016 static void
1017 ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
1018 {
1019         retry_set_schib(cdev);
1020         cdev->private->state = DEV_STATE_ONLINE;
1021         dev_fsm_event(cdev, dev_event);
1022 }
1023
1024
1025 static void
1026 ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
1027 {
1028         ccw_device_set_timeout(cdev, 0);
1029         if (dev_event == DEV_EVENT_NOTOPER)
1030                 cdev->private->state = DEV_STATE_NOT_OPER;
1031         else
1032                 cdev->private->state = DEV_STATE_OFFLINE;
1033         wake_up(&cdev->private->wait_q);
1034 }
1035
1036 static void
1037 ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
1038 {
1039         int ret;
1040
1041         ret = ccw_device_cancel_halt_clear(cdev);
1042         switch (ret) {
1043         case 0:
1044                 cdev->private->state = DEV_STATE_OFFLINE;
1045                 wake_up(&cdev->private->wait_q);
1046                 break;
1047         case -ENODEV:
1048                 cdev->private->state = DEV_STATE_NOT_OPER;
1049                 wake_up(&cdev->private->wait_q);
1050                 break;
1051         default:
1052                 ccw_device_set_timeout(cdev, HZ/10);
1053         }
1054 }
1055
1056 /*
1057  * No operation action. This is used e.g. to ignore a timeout event in
1058  * state offline.
1059  */
1060 static void
1061 ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
1062 {
1063 }
1064
1065 /*
1066  * Bug operation action. 
1067  */
1068 static void
1069 ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
1070 {
1071         printk(KERN_EMERG "dev_jumptable[%i][%i] == NULL\n",
1072                cdev->private->state, dev_event);
1073         BUG();
1074 }
1075
1076 /*
1077  * device statemachine
1078  */
1079 fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1080         [DEV_STATE_NOT_OPER] = {
1081                 [DEV_EVENT_NOTOPER]     = ccw_device_nop,
1082                 [DEV_EVENT_INTERRUPT]   = ccw_device_bug,
1083                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1084                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1085         },
1086         [DEV_STATE_SENSE_PGID] = {
1087                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1088                 [DEV_EVENT_INTERRUPT]   = ccw_device_sense_pgid_irq,
1089                 [DEV_EVENT_TIMEOUT]     = ccw_device_onoff_timeout,
1090                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1091         },
1092         [DEV_STATE_SENSE_ID] = {
1093                 [DEV_EVENT_NOTOPER]     = ccw_device_recog_notoper,
1094                 [DEV_EVENT_INTERRUPT]   = ccw_device_sense_id_irq,
1095                 [DEV_EVENT_TIMEOUT]     = ccw_device_recog_timeout,
1096                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1097         },
1098         [DEV_STATE_OFFLINE] = {
1099                 [DEV_EVENT_NOTOPER]     = ccw_device_offline_notoper,
1100                 [DEV_EVENT_INTERRUPT]   = ccw_device_offline_irq,
1101                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1102                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1103         },
1104         [DEV_STATE_VERIFY] = {
1105                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1106                 [DEV_EVENT_INTERRUPT]   = ccw_device_verify_irq,
1107                 [DEV_EVENT_TIMEOUT]     = ccw_device_onoff_timeout,
1108                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1109         },
1110         [DEV_STATE_ONLINE] = {
1111                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1112                 [DEV_EVENT_INTERRUPT]   = ccw_device_irq,
1113                 [DEV_EVENT_TIMEOUT]     = ccw_device_online_timeout,
1114                 [DEV_EVENT_VERIFY]      = ccw_device_online_verify,
1115         },
1116         [DEV_STATE_W4SENSE] = {
1117                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1118                 [DEV_EVENT_INTERRUPT]   = ccw_device_w4sense,
1119                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1120                 [DEV_EVENT_VERIFY]      = ccw_device_online_verify,
1121         },
1122         [DEV_STATE_DISBAND_PGID] = {
1123                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1124                 [DEV_EVENT_INTERRUPT]   = ccw_device_disband_irq,
1125                 [DEV_EVENT_TIMEOUT]     = ccw_device_onoff_timeout,
1126                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1127         },
1128         [DEV_STATE_BOXED] = {
1129                 [DEV_EVENT_NOTOPER]     = ccw_device_offline_notoper,
1130                 [DEV_EVENT_INTERRUPT]   = ccw_device_stlck_done,
1131                 [DEV_EVENT_TIMEOUT]     = ccw_device_stlck_done,
1132                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1133         },
1134         /* states to wait for i/o completion before doing something */
1135         [DEV_STATE_CLEAR_VERIFY] = {
1136                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1137                 [DEV_EVENT_INTERRUPT]   = ccw_device_clear_verify,
1138                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1139                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1140         },
1141         [DEV_STATE_TIMEOUT_KILL] = {
1142                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1143                 [DEV_EVENT_INTERRUPT]   = ccw_device_killing_irq,
1144                 [DEV_EVENT_TIMEOUT]     = ccw_device_killing_timeout,
1145                 [DEV_EVENT_VERIFY]      = ccw_device_nop, //FIXME
1146         },
1147         [DEV_STATE_WAIT4IO] = {
1148                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1149                 [DEV_EVENT_INTERRUPT]   = ccw_device_wait4io_irq,
1150                 [DEV_EVENT_TIMEOUT]     = ccw_device_wait4io_timeout,
1151                 [DEV_EVENT_VERIFY]      = ccw_device_wait4io_verify,
1152         },
1153         [DEV_STATE_QUIESCE] = {
1154                 [DEV_EVENT_NOTOPER]     = ccw_device_quiesce_done,
1155                 [DEV_EVENT_INTERRUPT]   = ccw_device_quiesce_done,
1156                 [DEV_EVENT_TIMEOUT]     = ccw_device_quiesce_timeout,
1157                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1158         },
1159         /* special states for devices gone not operational */
1160         [DEV_STATE_DISCONNECTED] = {
1161                 [DEV_EVENT_NOTOPER]     = ccw_device_nop,
1162                 [DEV_EVENT_INTERRUPT]   = ccw_device_start_id,
1163                 [DEV_EVENT_TIMEOUT]     = ccw_device_bug,
1164                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1165         },
1166         [DEV_STATE_DISCONNECTED_SENSE_ID] = {
1167                 [DEV_EVENT_NOTOPER]     = ccw_device_recog_notoper,
1168                 [DEV_EVENT_INTERRUPT]   = ccw_device_sense_id_irq,
1169                 [DEV_EVENT_TIMEOUT]     = ccw_device_recog_timeout,
1170                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1171         },
1172         [DEV_STATE_CMFCHANGE] = {
1173                 [DEV_EVENT_NOTOPER]     = ccw_device_change_cmfstate,
1174                 [DEV_EVENT_INTERRUPT]   = ccw_device_change_cmfstate,
1175                 [DEV_EVENT_TIMEOUT]     = ccw_device_change_cmfstate,
1176                 [DEV_EVENT_VERIFY]      = ccw_device_change_cmfstate,
1177         },
1178 };
1179
1180 /*
1181  * io_subchannel_irq is called for "real" interrupts or for status
1182  * pending conditions on msch.
1183  */
1184 void
1185 io_subchannel_irq (struct device *pdev)
1186 {
1187         struct ccw_device *cdev;
1188
1189         cdev = to_subchannel(pdev)->dev.driver_data;
1190
1191         CIO_TRACE_EVENT (3, "IRQ");
1192         CIO_TRACE_EVENT (3, pdev->bus_id);
1193
1194         dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1195 }
1196
1197 EXPORT_SYMBOL_GPL(ccw_device_set_timeout);