patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / drivers / s390 / cio / device_fsm.c
1 /*
2  * drivers/s390/cio/device_fsm.c
3  * finite state machine for device handling
4  *
5  *    Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
6  *                       IBM Corporation
7  *    Author(s): Cornelia Huck(cohuck@de.ibm.com)
8  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
9  */
10
11 #include <linux/module.h>
12 #include <linux/config.h>
13 #include <linux/init.h>
14
15 #include <asm/ccwdev.h>
16 #include <asm/qdio.h>
17
18 #include "cio.h"
19 #include "cio_debug.h"
20 #include "css.h"
21 #include "device.h"
22 #include "chsc.h"
23 #include "ioasm.h"
24 #include "qdio.h"
25
26 int
27 device_is_disconnected(struct subchannel *sch)
28 {
29         struct ccw_device *cdev;
30
31         if (!sch->dev.driver_data)
32                 return 0;
33         cdev = sch->dev.driver_data;
34         return (cdev->private->state == DEV_STATE_DISCONNECTED ||
35                 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
36 }
37
38 void
39 device_set_disconnected(struct subchannel *sch)
40 {
41         struct ccw_device *cdev;
42
43         if (!sch->dev.driver_data)
44                 return;
45         cdev = sch->dev.driver_data;
46         ccw_device_set_timeout(cdev, 0);
47         cdev->private->state = DEV_STATE_DISCONNECTED;
48 }
49
50 void
51 device_set_waiting(struct subchannel *sch)
52 {
53         struct ccw_device *cdev;
54
55         if (!sch->dev.driver_data)
56                 return;
57         cdev = sch->dev.driver_data;
58         ccw_device_set_timeout(cdev, 10*HZ);
59         cdev->private->state = DEV_STATE_WAIT4IO;
60 }
61
62 /*
63  * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
64  */
65 static void
66 ccw_device_timeout(unsigned long data)
67 {
68         struct ccw_device *cdev;
69
70         cdev = (struct ccw_device *) data;
71         spin_lock_irq(cdev->ccwlock);
72         dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
73         spin_unlock_irq(cdev->ccwlock);
74 }
75
76 /*
77  * Set timeout
78  */
79 void
80 ccw_device_set_timeout(struct ccw_device *cdev, int expires)
81 {
82         if (expires == 0) {
83                 del_timer(&cdev->private->timer);
84                 return;
85         }
86         if (timer_pending(&cdev->private->timer)) {
87                 if (mod_timer(&cdev->private->timer, jiffies + expires))
88                         return;
89         }
90         cdev->private->timer.function = ccw_device_timeout;
91         cdev->private->timer.data = (unsigned long) cdev;
92         cdev->private->timer.expires = jiffies + expires;
93         add_timer(&cdev->private->timer);
94 }
95
96 /*
97  * Cancel running i/o. This is called repeatedly since halt/clear are
98  * asynchronous operations. We do one try with cio_cancel, two tries
99  * with cio_halt, 255 tries with cio_clear. If everythings fails panic.
100  * Returns 0 if device now idle, -ENODEV for device not operational and
101  * -EBUSY if an interrupt is expected (either from halt/clear or from a
102  * status pending).
103  */
104 int
105 ccw_device_cancel_halt_clear(struct ccw_device *cdev)
106 {
107         struct subchannel *sch;
108         int ret;
109
110         sch = to_subchannel(cdev->dev.parent);
111         ret = stsch(sch->irq, &sch->schib);
112         if (ret || !sch->schib.pmcw.dnv)
113                 return -ENODEV; 
114         if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0)
115                 /* Not operational or no activity -> done. */
116                 return 0;
117         /* Stage 1: cancel io. */
118         if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) &&
119             !(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
120                 ret = cio_cancel(sch);
121                 if (ret != -EINVAL)
122                         return ret;
123                 /* cancel io unsuccessful. From now on it is asynchronous. */
124                 cdev->private->iretry = 3;      /* 3 halt retries. */
125         }
126         if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
127                 /* Stage 2: halt io. */
128                 if (cdev->private->iretry) {
129                         cdev->private->iretry--;
130                         ret = cio_halt(sch);
131                         return (ret == 0) ? -EBUSY : ret;
132                 }
133                 /* halt io unsuccessful. */
134                 cdev->private->iretry = 255;    /* 255 clear retries. */
135         }
136         /* Stage 3: clear io. */
137         if (cdev->private->iretry) {
138                 cdev->private->iretry--;
139                 ret = cio_clear (sch);
140                 return (ret == 0) ? -EBUSY : ret;
141         }
142         panic("Can't stop i/o on subchannel.\n");
143 }
144
145 static void
146 ccw_device_handle_oper(struct ccw_device *cdev)
147 {
148         struct subchannel *sch;
149
150         sch = to_subchannel(cdev->dev.parent);
151         cdev->private->flags.recog_done = 1;
152         /*
153          * Check if cu type and device type still match. If
154          * not, it is certainly another device and we have to
155          * de- and re-register. Also check here for non-matching devno.
156          */
157         if (cdev->id.cu_type != cdev->private->senseid.cu_type ||
158             cdev->id.cu_model != cdev->private->senseid.cu_model ||
159             cdev->id.dev_type != cdev->private->senseid.dev_type ||
160             cdev->id.dev_model != cdev->private->senseid.dev_model ||
161             cdev->private->devno != sch->schib.pmcw.dev) {
162                 PREPARE_WORK(&cdev->private->kick_work,
163                              ccw_device_do_unreg_rereg, (void *)cdev);
164                 queue_work(ccw_device_work, &cdev->private->kick_work);
165                 return;
166         }
167         cdev->private->flags.donotify = 1;
168         /* Get device online again. */
169         ccw_device_online(cdev);
170 }
171
172 /*
173  * The machine won't give us any notification by machine check if a chpid has
174  * been varied online on the SE so we have to find out by magic (i. e. driving
175  * the channel subsystem to device selection and updating our path masks).
176  */
177 static inline void
178 __recover_lost_chpids(struct subchannel *sch, int old_lpm)
179 {
180         int mask, i;
181
182         for (i = 0; i<8; i++) {
183                 mask = 0x80 >> i;
184                 if (!(sch->lpm & mask))
185                         continue;
186                 if (old_lpm & mask)
187                         continue;
188                 chpid_is_actually_online(sch->schib.pmcw.chpid[i]);
189         }
190 }
191
192 /*
193  * Stop device recognition.
194  */
195 static void
196 ccw_device_recog_done(struct ccw_device *cdev, int state)
197 {
198         struct subchannel *sch;
199         int notify, old_lpm;
200
201         sch = to_subchannel(cdev->dev.parent);
202
203         ccw_device_set_timeout(cdev, 0);
204         cio_disable_subchannel(sch);
205         /*
206          * Now that we tried recognition, we have performed device selection
207          * through ssch() and the path information is up to date.
208          */
209         old_lpm = sch->lpm;
210         stsch(sch->irq, &sch->schib);
211         sch->lpm = sch->schib.pmcw.pim &
212                 sch->schib.pmcw.pam &
213                 sch->schib.pmcw.pom &
214                 sch->opm;
215         if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
216                 /* Force reprobe on all chpids. */
217                 old_lpm = 0;
218         if (sch->lpm != old_lpm)
219                 __recover_lost_chpids(sch, old_lpm);
220         if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
221                 if (state == DEV_STATE_NOT_OPER) {
222                         cdev->private->flags.recog_done = 1;
223                         cdev->private->state = DEV_STATE_DISCONNECTED;
224                         return;
225                 }
226                 /* Boxed devices don't need extra treatment. */
227         }
228         notify = 0;
229         switch (state) {
230         case DEV_STATE_NOT_OPER:
231                 CIO_DEBUG(KERN_WARNING, 2,
232                           "SenseID : unknown device %04x on subchannel %04x\n",
233                           cdev->private->devno, sch->irq);
234                 break;
235         case DEV_STATE_OFFLINE:
236                 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
237                         notify = 1;
238                 else  /* fill out sense information */
239                         cdev->id = (struct ccw_device_id) {
240                                 .cu_type   = cdev->private->senseid.cu_type,
241                                 .cu_model  = cdev->private->senseid.cu_model,
242                                 .dev_type  = cdev->private->senseid.dev_type,
243                                 .dev_model = cdev->private->senseid.dev_model,
244                         };
245                 /* Issue device info message. */
246                 CIO_DEBUG(KERN_INFO, 2, "SenseID : device %04x reports: "
247                           "CU  Type/Mod = %04X/%02X, Dev Type/Mod = "
248                           "%04X/%02X\n", cdev->private->devno,
249                           cdev->id.cu_type, cdev->id.cu_model,
250                           cdev->id.dev_type, cdev->id.dev_model);
251                 break;
252         case DEV_STATE_BOXED:
253                 CIO_DEBUG(KERN_WARNING, 2,
254                           "SenseID : boxed device %04x on subchannel %04x\n",
255                           cdev->private->devno, sch->irq);
256                 break;
257         }
258         cdev->private->state = state;
259         if (notify && state == DEV_STATE_OFFLINE)
260                 ccw_device_handle_oper(cdev);
261         else
262                 io_subchannel_recog_done(cdev);
263         if (state != DEV_STATE_NOT_OPER)
264                 wake_up(&cdev->private->wait_q);
265 }
266
267 /*
268  * Function called from device_id.c after sense id has completed.
269  */
270 void
271 ccw_device_sense_id_done(struct ccw_device *cdev, int err)
272 {
273         switch (err) {
274         case 0:
275                 ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
276                 break;
277         case -ETIME:            /* Sense id stopped by timeout. */
278                 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
279                 break;
280         default:
281                 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
282                 break;
283         }
284 }
285
286 static void
287 ccw_device_oper_notify(void *data)
288 {
289         struct ccw_device *cdev;
290         struct subchannel *sch;
291         int ret;
292
293         cdev = (struct ccw_device *)data;
294         sch = to_subchannel(cdev->dev.parent);
295         ret = (sch->driver && sch->driver->notify) ?
296                 sch->driver->notify(&sch->dev, CIO_OPER) : 0;
297         if (!ret)
298                 /* Driver doesn't want device back. */
299                 ccw_device_do_unreg_rereg((void *)cdev);
300         else
301                 wake_up(&cdev->private->wait_q);
302 }
303
304 /*
305  * Finished with online/offline processing.
306  */
307 static void
308 ccw_device_done(struct ccw_device *cdev, int state)
309 {
310         struct subchannel *sch;
311
312         sch = to_subchannel(cdev->dev.parent);
313
314         if (state != DEV_STATE_ONLINE)
315                 cio_disable_subchannel(sch);
316
317         /* Reset device status. */
318         memset(&cdev->private->irb, 0, sizeof(struct irb));
319
320         cdev->private->state = state;
321
322
323         if (state == DEV_STATE_BOXED)
324                 CIO_DEBUG(KERN_WARNING, 2,
325                           "Boxed device %04x on subchannel %04x\n",
326                           cdev->private->devno, sch->irq);
327
328         if (cdev->private->flags.donotify) {
329                 cdev->private->flags.donotify = 0;
330                 PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify,
331                              (void *)cdev);
332                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
333         }
334         wake_up(&cdev->private->wait_q);
335
336         if (css_init_done && state != DEV_STATE_ONLINE)
337                 put_device (&cdev->dev);
338 }
339
340 /*
341  * Function called from device_pgid.c after sense path ground has completed.
342  */
343 void
344 ccw_device_sense_pgid_done(struct ccw_device *cdev, int err)
345 {
346         struct subchannel *sch;
347
348         sch = to_subchannel(cdev->dev.parent);
349         switch (err) {
350         case 0:
351                 /* Start Path Group verification. */
352                 sch->vpm = 0;   /* Start with no path groups set. */
353                 cdev->private->state = DEV_STATE_VERIFY;
354                 ccw_device_verify_start(cdev);
355                 break;
356         case -ETIME:            /* Sense path group id stopped by timeout. */
357         case -EUSERS:           /* device is reserved for someone else. */
358                 ccw_device_done(cdev, DEV_STATE_BOXED);
359                 break;
360         case -EOPNOTSUPP: /* path grouping not supported, just set online. */
361                 cdev->private->options.pgroup = 0;
362                 ccw_device_done(cdev, DEV_STATE_ONLINE);
363                 break;
364         default:
365                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
366                 break;
367         }
368 }
369
370 /*
371  * Start device recognition.
372  */
373 int
374 ccw_device_recognition(struct ccw_device *cdev)
375 {
376         struct subchannel *sch;
377         int ret;
378
379         if ((cdev->private->state != DEV_STATE_NOT_OPER) &&
380             (cdev->private->state != DEV_STATE_BOXED))
381                 return -EINVAL;
382         sch = to_subchannel(cdev->dev.parent);
383         ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
384         if (ret != 0)
385                 /* Couldn't enable the subchannel for i/o. Sick device. */
386                 return ret;
387
388         /* After 60s the device recognition is considered to have failed. */
389         ccw_device_set_timeout(cdev, 60*HZ);
390
391         /*
392          * We used to start here with a sense pgid to find out whether a device
393          * is locked by someone else. Unfortunately, the sense pgid command
394          * code has other meanings on devices predating the path grouping
395          * algorithm, so we start with sense id and box the device after an
396          * timeout (or if sense pgid during path verification detects the device
397          * is locked, as may happen on newer devices).
398          */
399         cdev->private->flags.recog_done = 0;
400         cdev->private->state = DEV_STATE_SENSE_ID;
401         ccw_device_sense_id_start(cdev);
402         return 0;
403 }
404
405 /*
406  * Handle timeout in device recognition.
407  */
408 static void
409 ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
410 {
411         int ret;
412
413         ret = ccw_device_cancel_halt_clear(cdev);
414         switch (ret) {
415         case 0:
416                 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
417                 break;
418         case -ENODEV:
419                 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
420                 break;
421         default:
422                 ccw_device_set_timeout(cdev, 3*HZ);
423         }
424 }
425
426
427 static void
428 ccw_device_nopath_notify(void *data)
429 {
430         struct ccw_device *cdev;
431         struct subchannel *sch;
432         int ret;
433
434         cdev = (struct ccw_device *)data;
435         sch = to_subchannel(cdev->dev.parent);
436         /* Extra sanity. */
437         if (sch->lpm)
438                 return;
439         ret = (sch->driver && sch->driver->notify) ?
440                 sch->driver->notify(&sch->dev, CIO_NO_PATH) : 0;
441         if (!ret) {
442                 if (get_device(&sch->dev)) {
443                         /* Driver doesn't want to keep device. */
444                         cio_disable_subchannel(sch);
445                         if (get_device(&cdev->dev)) {
446                                 PREPARE_WORK(&cdev->private->kick_work,
447                                              ccw_device_call_sch_unregister,
448                                              (void *)cdev);
449                                 queue_work(ccw_device_work,
450                                            &cdev->private->kick_work);
451                         }
452                 }
453         } else {
454                 cio_disable_subchannel(sch);
455                 ccw_device_set_timeout(cdev, 0);
456                 cdev->private->state = DEV_STATE_DISCONNECTED;
457                 wake_up(&cdev->private->wait_q);
458         }
459 }
460
461 void
462 ccw_device_verify_done(struct ccw_device *cdev, int err)
463 {
464         cdev->private->flags.doverify = 0;
465         switch (err) {
466         case -EOPNOTSUPP: /* path grouping not supported, just set online. */
467                 cdev->private->options.pgroup = 0;
468         case 0:
469                 ccw_device_done(cdev, DEV_STATE_ONLINE);
470                 break;
471         case -ETIME:
472                 ccw_device_done(cdev, DEV_STATE_BOXED);
473                 break;
474         default:
475                 PREPARE_WORK(&cdev->private->kick_work,
476                              ccw_device_nopath_notify, (void *)cdev);
477                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
478                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
479                 break;
480         }
481 }
482
483 /*
484  * Get device online.
485  */
486 int
487 ccw_device_online(struct ccw_device *cdev)
488 {
489         struct subchannel *sch;
490         int ret;
491
492         if ((cdev->private->state != DEV_STATE_OFFLINE) &&
493             (cdev->private->state != DEV_STATE_BOXED))
494                 return -EINVAL;
495         sch = to_subchannel(cdev->dev.parent);
496         if (css_init_done && !get_device(&cdev->dev))
497                 return -ENODEV;
498         ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
499         if (ret != 0) {
500                 /* Couldn't enable the subchannel for i/o. Sick device. */
501                 if (ret == -ENODEV)
502                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
503                 return ret;
504         }
505         /* Do we want to do path grouping? */
506         if (!cdev->private->options.pgroup) {
507                 /* No, set state online immediately. */
508                 ccw_device_done(cdev, DEV_STATE_ONLINE);
509                 return 0;
510         }
511         /* Do a SensePGID first. */
512         cdev->private->state = DEV_STATE_SENSE_PGID;
513         ccw_device_sense_pgid_start(cdev);
514         return 0;
515 }
516
517 void
518 ccw_device_disband_done(struct ccw_device *cdev, int err)
519 {
520         switch (err) {
521         case 0:
522                 ccw_device_done(cdev, DEV_STATE_OFFLINE);
523                 break;
524         case -ETIME:
525                 ccw_device_done(cdev, DEV_STATE_BOXED);
526                 break;
527         default:
528                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
529                 break;
530         }
531 }
532
533 /*
534  * Shutdown device.
535  */
536 int
537 ccw_device_offline(struct ccw_device *cdev)
538 {
539         struct subchannel *sch;
540
541         sch = to_subchannel(cdev->dev.parent);
542         if (cdev->private->state != DEV_STATE_ONLINE) {
543                 if (sch->schib.scsw.actl != 0)
544                         return -EBUSY;
545                 return -EINVAL;
546         }
547         if (sch->schib.scsw.actl != 0)
548                 return -EBUSY;
549         /* Are we doing path grouping? */
550         if (!cdev->private->options.pgroup) {
551                 /* No, set state offline immediately. */
552                 ccw_device_done(cdev, DEV_STATE_OFFLINE);
553                 return 0;
554         }
555         /* Start Set Path Group commands. */
556         cdev->private->state = DEV_STATE_DISBAND_PGID;
557         ccw_device_disband_start(cdev);
558         return 0;
559 }
560
561 /*
562  * Handle timeout in device online/offline process.
563  */
564 static void
565 ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event)
566 {
567         int ret;
568
569         ret = ccw_device_cancel_halt_clear(cdev);
570         switch (ret) {
571         case 0:
572                 ccw_device_done(cdev, DEV_STATE_BOXED);
573                 break;
574         case -ENODEV:
575                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
576                 break;
577         default:
578                 ccw_device_set_timeout(cdev, 3*HZ);
579         }
580 }
581
582 /*
583  * Handle not oper event in device recognition.
584  */
585 static void
586 ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event)
587 {
588         ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
589 }
590
591 /*
592  * Handle not operational event while offline.
593  */
594 static void
595 ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event)
596 {
597         struct subchannel *sch;
598
599         cdev->private->state = DEV_STATE_NOT_OPER;
600         sch = to_subchannel(cdev->dev.parent);
601         device_unregister(&sch->dev);
602         sch->schib.pmcw.intparm = 0;
603         cio_modify(sch);
604         wake_up(&cdev->private->wait_q);
605 }
606
607 /*
608  * Handle not operational event while online.
609  */
610 static void
611 ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event)
612 {
613         struct subchannel *sch;
614
615         sch = to_subchannel(cdev->dev.parent);
616         if (sch->driver->notify &&
617             sch->driver->notify(&sch->dev, sch->lpm ? CIO_GONE : CIO_NO_PATH)) {
618                         ccw_device_set_timeout(cdev, 0);
619                         cdev->private->state = DEV_STATE_DISCONNECTED;
620                         wake_up(&cdev->private->wait_q);
621                         return;
622         }
623         cdev->private->state = DEV_STATE_NOT_OPER;
624         cio_disable_subchannel(sch);
625         if (sch->schib.scsw.actl != 0) {
626                 // FIXME: not-oper indication to device driver ?
627                 ccw_device_call_handler(cdev);
628         }
629         device_unregister(&sch->dev);
630         sch->schib.pmcw.intparm = 0;
631         cio_modify(sch);
632         wake_up(&cdev->private->wait_q);
633 }
634
635 /*
636  * Handle path verification event.
637  */
638 static void
639 ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
640 {
641         struct subchannel *sch;
642
643         if (!cdev->private->options.pgroup)
644                 return;
645         if (cdev->private->state == DEV_STATE_W4SENSE) {
646                 cdev->private->flags.doverify = 1;
647                 return;
648         }
649         sch = to_subchannel(cdev->dev.parent);
650         if (sch->schib.scsw.actl != 0 ||
651             (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) {
652                 /*
653                  * No final status yet or final status not yet delivered
654                  * to the device driver. Can't do path verfication now,
655                  * delay until final status was delivered.
656                  */
657                 cdev->private->flags.doverify = 1;
658                 return;
659         }
660         /* Device is idle, we can do the path verification. */
661         cdev->private->state = DEV_STATE_VERIFY;
662         ccw_device_verify_start(cdev);
663 }
664
665 /*
666  * Got an interrupt for a normal io (state online).
667  */
668 static void
669 ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
670 {
671         struct irb *irb;
672
673         irb = (struct irb *) __LC_IRB;
674         /* Check for unsolicited interrupt. */
675         if (irb->scsw.stctl ==
676                         (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
677                 if (cdev->handler)
678                         cdev->handler (cdev, 0, irb);
679                 return;
680         }
681         /* Accumulate status and find out if a basic sense is needed. */
682         ccw_device_accumulate_irb(cdev, irb);
683         if (cdev->private->flags.dosense) {
684                 if (ccw_device_do_sense(cdev, irb) == 0) {
685                         cdev->private->state = DEV_STATE_W4SENSE;
686                 }
687                 return;
688         }
689         /* Call the handler. */
690         if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
691                 /* Start delayed path verification. */
692                 ccw_device_online_verify(cdev, 0);
693 }
694
695 /*
696  * Got an timeout in online state.
697  */
698 static void
699 ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
700 {
701         int ret;
702
703         ccw_device_set_timeout(cdev, 0);
704         ret = ccw_device_cancel_halt_clear(cdev);
705         if (ret == -EBUSY) {
706                 ccw_device_set_timeout(cdev, 3*HZ);
707                 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
708                 return;
709         }
710         if (ret == -ENODEV) {
711                 struct subchannel *sch;
712
713                 sch = to_subchannel(cdev->dev.parent);
714                 if (!sch->lpm) {
715                         PREPARE_WORK(&cdev->private->kick_work,
716                                      ccw_device_nopath_notify, (void *)cdev);
717                         queue_work(ccw_device_notify_work,
718                                    &cdev->private->kick_work);
719                 } else
720                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
721         } else if (cdev->handler)
722                 cdev->handler(cdev, cdev->private->intparm,
723                               ERR_PTR(-ETIMEDOUT));
724 }
725
726 /*
727  * Got an interrupt for a basic sense.
728  */
729 void
730 ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
731 {
732         struct irb *irb;
733
734         irb = (struct irb *) __LC_IRB;
735         /* Check for unsolicited interrupt. */
736         if (irb->scsw.stctl ==
737                         (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
738                 if (cdev->handler)
739                         cdev->handler (cdev, 0, irb);
740                 if (irb->scsw.cc == 1)
741                         /* Basic sense hasn't started. Try again. */
742                         ccw_device_do_sense(cdev, irb);
743                 return;
744         }
745         /* Add basic sense info to irb. */
746         ccw_device_accumulate_basic_sense(cdev, irb);
747         if (cdev->private->flags.dosense) {
748                 /* Another basic sense is needed. */
749                 ccw_device_do_sense(cdev, irb);
750                 return;
751         }
752         cdev->private->state = DEV_STATE_ONLINE;
753         /* Call the handler. */
754         if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
755                 /* Start delayed path verification. */
756                 ccw_device_online_verify(cdev, 0);
757 }
758
759 static void
760 ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event)
761 {
762         struct irb *irb;
763
764         irb = (struct irb *) __LC_IRB;
765         /* Check for unsolicited interrupt. */
766         if (irb->scsw.stctl ==
767                         (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
768                 if (cdev->handler)
769                         cdev->handler (cdev, 0, irb);
770                 return;
771         }
772         /* Accumulate status. We don't do basic sense. */
773         ccw_device_accumulate_irb(cdev, irb);
774         /* Try to start delayed device verification. */
775         ccw_device_online_verify(cdev, 0);
776         /* Note: Don't call handler for cio initiated clear! */
777 }
778
779 static void
780 ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
781 {
782         struct subchannel *sch;
783
784         sch = to_subchannel(cdev->dev.parent);
785         ccw_device_set_timeout(cdev, 0);
786         /* OK, i/o is dead now. Call interrupt handler. */
787         cdev->private->state = DEV_STATE_ONLINE;
788         if (cdev->handler)
789                 cdev->handler(cdev, cdev->private->intparm,
790                               ERR_PTR(-ETIMEDOUT));
791         if (!sch->lpm) {
792                 PREPARE_WORK(&cdev->private->kick_work,
793                              ccw_device_nopath_notify, (void *)cdev);
794                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
795         } else if (cdev->private->flags.doverify)
796                 /* Start delayed path verification. */
797                 ccw_device_online_verify(cdev, 0);
798 }
799
800 static void
801 ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
802 {
803         int ret;
804
805         ret = ccw_device_cancel_halt_clear(cdev);
806         if (ret == -EBUSY) {
807                 ccw_device_set_timeout(cdev, 3*HZ);
808                 return;
809         }
810         if (ret == -ENODEV) {
811                 struct subchannel *sch;
812
813                 sch = to_subchannel(cdev->dev.parent);
814                 if (!sch->lpm) {
815                         PREPARE_WORK(&cdev->private->kick_work,
816                                      ccw_device_nopath_notify, (void *)cdev);
817                         queue_work(ccw_device_notify_work,
818                                    &cdev->private->kick_work);
819                 } else
820                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
821                 return;
822         }
823         //FIXME: Can we get here?
824         cdev->private->state = DEV_STATE_ONLINE;
825         if (cdev->handler)
826                 cdev->handler(cdev, cdev->private->intparm,
827                               ERR_PTR(-ETIMEDOUT));
828 }
829
830 static void
831 ccw_device_wait4io_irq(struct ccw_device *cdev, enum dev_event dev_event)
832 {
833         struct irb *irb;
834         struct subchannel *sch;
835
836         irb = (struct irb *) __LC_IRB;
837         /* Check for unsolicited interrupt. */
838         if (irb->scsw.stctl ==
839                         (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
840                 if (cdev->handler)
841                         cdev->handler (cdev, 0, irb);
842                 if (irb->scsw.cc == 1)
843                         goto call_handler;
844                 return;
845         }
846         /*
847          * Accumulate status and find out if a basic sense is needed.
848          * This is fine since we have already adapted the lpm.
849          */
850         ccw_device_accumulate_irb(cdev, irb);
851         if (cdev->private->flags.dosense) {
852                 if (ccw_device_do_sense(cdev, irb) == 0) {
853                         cdev->private->state = DEV_STATE_W4SENSE;
854                 }
855                 return;
856         }
857 call_handler:
858         /* Iff device is idle, reset timeout. */
859         sch = to_subchannel(cdev->dev.parent);
860         if (!stsch(sch->irq, &sch->schib))
861                 if (sch->schib.scsw.actl == 0)
862                         ccw_device_set_timeout(cdev, 0);
863         /* Call the handler. */
864         ccw_device_call_handler(cdev);
865         if (!sch->lpm) {
866                 PREPARE_WORK(&cdev->private->kick_work,
867                              ccw_device_nopath_notify, (void *)cdev);
868                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
869         } else if (cdev->private->flags.doverify)
870                 ccw_device_online_verify(cdev, 0);
871 }
872
873 static void
874 ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event)
875 {
876         int ret;
877         struct subchannel *sch;
878
879         sch = to_subchannel(cdev->dev.parent);
880         ccw_device_set_timeout(cdev, 0);
881         ret = ccw_device_cancel_halt_clear(cdev);
882         if (ret == -EBUSY) {
883                 ccw_device_set_timeout(cdev, 3*HZ);
884                 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
885                 return;
886         }
887         if (ret == -ENODEV) {
888                 if (!sch->lpm) {
889                         PREPARE_WORK(&cdev->private->kick_work,
890                                      ccw_device_nopath_notify, (void *)cdev);
891                         queue_work(ccw_device_notify_work,
892                                    &cdev->private->kick_work);
893                 } else
894                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
895                 return;
896         }
897         if (cdev->handler)
898                 cdev->handler(cdev, cdev->private->intparm,
899                               ERR_PTR(-ETIMEDOUT));
900         if (!sch->lpm) {
901                 PREPARE_WORK(&cdev->private->kick_work,
902                              ccw_device_nopath_notify, (void *)cdev);
903                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
904         } else if (cdev->private->flags.doverify)
905                 /* Start delayed path verification. */
906                 ccw_device_online_verify(cdev, 0);
907 }
908
909 static void
910 ccw_device_wait4io_verify(struct ccw_device *cdev, enum dev_event dev_event)
911 {
912         /* When the I/O has terminated, we have to start verification. */
913         if (cdev->private->options.pgroup)
914                 cdev->private->flags.doverify = 1;
915 }
916
917 static void
918 ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
919 {
920         struct irb *irb;
921
922         switch (dev_event) {
923         case DEV_EVENT_INTERRUPT:
924                 irb = (struct irb *) __LC_IRB;
925                 /* Check for unsolicited interrupt. */
926                 if (irb->scsw.stctl ==
927                     (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS))
928                         /* FIXME: we should restart stlck here, but this
929                          * is extremely unlikely ... */
930                         goto out_wakeup;
931
932                 ccw_device_accumulate_irb(cdev, irb);
933                 /* We don't care about basic sense etc. */
934                 break;
935         default: /* timeout */
936                 break;
937         }
938 out_wakeup:
939         wake_up(&cdev->private->wait_q);
940 }
941
942 static void
943 ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
944 {
945         struct subchannel *sch;
946
947         sch = to_subchannel(cdev->dev.parent);
948         if (cio_enable_subchannel(sch, sch->schib.pmcw.isc) != 0)
949                 /* Couldn't enable the subchannel for i/o. Sick device. */
950                 return;
951
952         /* After 60s the device recognition is considered to have failed. */
953         ccw_device_set_timeout(cdev, 60*HZ);
954
955         cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
956         ccw_device_sense_id_start(cdev);
957 }
958
959 void
960 device_trigger_reprobe(struct subchannel *sch)
961 {
962         struct ccw_device *cdev;
963         unsigned long flags;
964
965         if (!sch->dev.driver_data)
966                 return;
967         cdev = sch->dev.driver_data;
968         spin_lock_irqsave(&sch->lock, flags);
969         if (cdev->private->state != DEV_STATE_DISCONNECTED) {
970                 spin_unlock_irqrestore(&sch->lock, flags);
971                 return;
972         }
973         /* Update some values. */
974         if (stsch(sch->irq, &sch->schib)) {
975                 spin_unlock_irqrestore(&sch->lock, flags);
976                 return;
977         }
978         /*
979          * The pim, pam, pom values may not be accurate, but they are the best
980          * we have before performing device selection :/
981          */
982         sch->lpm = sch->schib.pmcw.pim &
983                 sch->schib.pmcw.pam &
984                 sch->schib.pmcw.pom &
985                 sch->opm;
986         /* Re-set some bits in the pmcw that were lost. */
987         sch->schib.pmcw.isc = 3;
988         sch->schib.pmcw.csense = 1;
989         sch->schib.pmcw.ena = 0;
990         if ((sch->lpm & (sch->lpm - 1)) != 0)
991                 sch->schib.pmcw.mp = 1;
992         sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
993         ccw_device_start_id(cdev, 0);
994         spin_unlock_irqrestore(&sch->lock, flags);
995 }
996
997 static void
998 ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event)
999 {
1000         struct subchannel *sch;
1001
1002         sch = to_subchannel(cdev->dev.parent);
1003         /*
1004          * An interrupt in state offline means a previous disable was not
1005          * successful. Try again.
1006          */
1007         cio_disable_subchannel(sch);
1008 }
1009
1010 static void
1011 ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
1012 {
1013         retry_set_schib(cdev);
1014         cdev->private->state = DEV_STATE_ONLINE;
1015         dev_fsm_event(cdev, dev_event);
1016 }
1017
1018
1019 static void
1020 ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
1021 {
1022         ccw_device_set_timeout(cdev, 0);
1023         if (dev_event == DEV_EVENT_NOTOPER)
1024                 cdev->private->state = DEV_STATE_NOT_OPER;
1025         else
1026                 cdev->private->state = DEV_STATE_OFFLINE;
1027         wake_up(&cdev->private->wait_q);
1028 }
1029
1030 static void
1031 ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
1032 {
1033         int ret;
1034
1035         ret = ccw_device_cancel_halt_clear(cdev);
1036         switch (ret) {
1037         case 0:
1038                 cdev->private->state = DEV_STATE_OFFLINE;
1039                 wake_up(&cdev->private->wait_q);
1040                 break;
1041         case -ENODEV:
1042                 cdev->private->state = DEV_STATE_NOT_OPER;
1043                 wake_up(&cdev->private->wait_q);
1044                 break;
1045         default:
1046                 ccw_device_set_timeout(cdev, HZ/10);
1047         }
1048 }
1049
1050 /*
1051  * No operation action. This is used e.g. to ignore a timeout event in
1052  * state offline.
1053  */
1054 static void
1055 ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
1056 {
1057 }
1058
1059 /*
1060  * Bug operation action. 
1061  */
1062 static void
1063 ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
1064 {
1065         printk(KERN_EMERG "dev_jumptable[%i][%i] == NULL\n",
1066                cdev->private->state, dev_event);
1067         BUG();
1068 }
1069
1070 /*
1071  * device statemachine
1072  */
1073 fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1074         [DEV_STATE_NOT_OPER] = {
1075                 [DEV_EVENT_NOTOPER]     = ccw_device_nop,
1076                 [DEV_EVENT_INTERRUPT]   = ccw_device_bug,
1077                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1078                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1079         },
1080         [DEV_STATE_SENSE_PGID] = {
1081                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1082                 [DEV_EVENT_INTERRUPT]   = ccw_device_sense_pgid_irq,
1083                 [DEV_EVENT_TIMEOUT]     = ccw_device_onoff_timeout,
1084                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1085         },
1086         [DEV_STATE_SENSE_ID] = {
1087                 [DEV_EVENT_NOTOPER]     = ccw_device_recog_notoper,
1088                 [DEV_EVENT_INTERRUPT]   = ccw_device_sense_id_irq,
1089                 [DEV_EVENT_TIMEOUT]     = ccw_device_recog_timeout,
1090                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1091         },
1092         [DEV_STATE_OFFLINE] = {
1093                 [DEV_EVENT_NOTOPER]     = ccw_device_offline_notoper,
1094                 [DEV_EVENT_INTERRUPT]   = ccw_device_offline_irq,
1095                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1096                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1097         },
1098         [DEV_STATE_VERIFY] = {
1099                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1100                 [DEV_EVENT_INTERRUPT]   = ccw_device_verify_irq,
1101                 [DEV_EVENT_TIMEOUT]     = ccw_device_onoff_timeout,
1102                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1103         },
1104         [DEV_STATE_ONLINE] = {
1105                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1106                 [DEV_EVENT_INTERRUPT]   = ccw_device_irq,
1107                 [DEV_EVENT_TIMEOUT]     = ccw_device_online_timeout,
1108                 [DEV_EVENT_VERIFY]      = ccw_device_online_verify,
1109         },
1110         [DEV_STATE_W4SENSE] = {
1111                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1112                 [DEV_EVENT_INTERRUPT]   = ccw_device_w4sense,
1113                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1114                 [DEV_EVENT_VERIFY]      = ccw_device_online_verify,
1115         },
1116         [DEV_STATE_DISBAND_PGID] = {
1117                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1118                 [DEV_EVENT_INTERRUPT]   = ccw_device_disband_irq,
1119                 [DEV_EVENT_TIMEOUT]     = ccw_device_onoff_timeout,
1120                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1121         },
1122         [DEV_STATE_BOXED] = {
1123                 [DEV_EVENT_NOTOPER]     = ccw_device_offline_notoper,
1124                 [DEV_EVENT_INTERRUPT]   = ccw_device_stlck_done,
1125                 [DEV_EVENT_TIMEOUT]     = ccw_device_stlck_done,
1126                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1127         },
1128         /* states to wait for i/o completion before doing something */
1129         [DEV_STATE_CLEAR_VERIFY] = {
1130                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1131                 [DEV_EVENT_INTERRUPT]   = ccw_device_clear_verify,
1132                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1133                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1134         },
1135         [DEV_STATE_TIMEOUT_KILL] = {
1136                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1137                 [DEV_EVENT_INTERRUPT]   = ccw_device_killing_irq,
1138                 [DEV_EVENT_TIMEOUT]     = ccw_device_killing_timeout,
1139                 [DEV_EVENT_VERIFY]      = ccw_device_nop, //FIXME
1140         },
1141         [DEV_STATE_WAIT4IO] = {
1142                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1143                 [DEV_EVENT_INTERRUPT]   = ccw_device_wait4io_irq,
1144                 [DEV_EVENT_TIMEOUT]     = ccw_device_wait4io_timeout,
1145                 [DEV_EVENT_VERIFY]      = ccw_device_wait4io_verify,
1146         },
1147         [DEV_STATE_QUIESCE] = {
1148                 [DEV_EVENT_NOTOPER]     = ccw_device_quiesce_done,
1149                 [DEV_EVENT_INTERRUPT]   = ccw_device_quiesce_done,
1150                 [DEV_EVENT_TIMEOUT]     = ccw_device_quiesce_timeout,
1151                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1152         },
1153         /* special states for devices gone not operational */
1154         [DEV_STATE_DISCONNECTED] = {
1155                 [DEV_EVENT_NOTOPER]     = ccw_device_nop,
1156                 [DEV_EVENT_INTERRUPT]   = ccw_device_start_id,
1157                 [DEV_EVENT_TIMEOUT]     = ccw_device_bug,
1158                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1159         },
1160         [DEV_STATE_DISCONNECTED_SENSE_ID] = {
1161                 [DEV_EVENT_NOTOPER]     = ccw_device_recog_notoper,
1162                 [DEV_EVENT_INTERRUPT]   = ccw_device_sense_id_irq,
1163                 [DEV_EVENT_TIMEOUT]     = ccw_device_recog_timeout,
1164                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1165         },
1166         [DEV_STATE_CMFCHANGE] = {
1167                 [DEV_EVENT_NOTOPER]     = ccw_device_change_cmfstate,
1168                 [DEV_EVENT_INTERRUPT]   = ccw_device_change_cmfstate,
1169                 [DEV_EVENT_TIMEOUT]     = ccw_device_change_cmfstate,
1170                 [DEV_EVENT_VERIFY]      = ccw_device_change_cmfstate,
1171         },
1172 };
1173
1174 /*
1175  * io_subchannel_irq is called for "real" interrupts or for status
1176  * pending conditions on msch.
1177  */
1178 void
1179 io_subchannel_irq (struct device *pdev)
1180 {
1181         struct ccw_device *cdev;
1182
1183         cdev = to_subchannel(pdev)->dev.driver_data;
1184
1185         CIO_TRACE_EVENT (3, "IRQ");
1186         CIO_TRACE_EVENT (3, pdev->bus_id);
1187
1188         dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1189 }
1190
1191 EXPORT_SYMBOL_GPL(ccw_device_set_timeout);