Merge to Fedora kernel-2.6.7-1.492
[linux-2.6.git] / drivers / s390 / cio / device_fsm.c
1 /*
2  * drivers/s390/cio/device_fsm.c
3  * finite state machine for device handling
4  *
5  *    Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
6  *                       IBM Corporation
7  *    Author(s): Cornelia Huck(cohuck@de.ibm.com)
8  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
9  */
10
11 #include <linux/module.h>
12 #include <linux/config.h>
13 #include <linux/init.h>
14
15 #include <asm/ccwdev.h>
16 #include <asm/qdio.h>
17
18 #include "cio.h"
19 #include "cio_debug.h"
20 #include "css.h"
21 #include "device.h"
22 #include "chsc.h"
23 #include "ioasm.h"
24 #include "qdio.h"
25
26 int
27 device_is_disconnected(struct subchannel *sch)
28 {
29         struct ccw_device *cdev;
30
31         if (!sch->dev.driver_data)
32                 return 0;
33         cdev = sch->dev.driver_data;
34         return (cdev->private->state == DEV_STATE_DISCONNECTED ||
35                 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
36 }
37
38 void
39 device_set_disconnected(struct subchannel *sch)
40 {
41         struct ccw_device *cdev;
42
43         if (!sch->dev.driver_data)
44                 return;
45         cdev = sch->dev.driver_data;
46         ccw_device_set_timeout(cdev, 0);
47         cdev->private->state = DEV_STATE_DISCONNECTED;
48 }
49
50 void
51 device_set_waiting(struct subchannel *sch)
52 {
53         struct ccw_device *cdev;
54
55         if (!sch->dev.driver_data)
56                 return;
57         cdev = sch->dev.driver_data;
58         ccw_device_set_timeout(cdev, 10*HZ);
59         cdev->private->state = DEV_STATE_WAIT4IO;
60 }
61
62 /*
63  * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
64  */
65 static void
66 ccw_device_timeout(unsigned long data)
67 {
68         struct ccw_device *cdev;
69
70         cdev = (struct ccw_device *) data;
71         spin_lock_irq(cdev->ccwlock);
72         dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
73         spin_unlock_irq(cdev->ccwlock);
74 }
75
76 /*
77  * Set timeout
78  */
79 void
80 ccw_device_set_timeout(struct ccw_device *cdev, int expires)
81 {
82         if (expires == 0) {
83                 del_timer(&cdev->private->timer);
84                 return;
85         }
86         if (timer_pending(&cdev->private->timer)) {
87                 if (mod_timer(&cdev->private->timer, jiffies + expires))
88                         return;
89         }
90         cdev->private->timer.function = ccw_device_timeout;
91         cdev->private->timer.data = (unsigned long) cdev;
92         cdev->private->timer.expires = jiffies + expires;
93         add_timer(&cdev->private->timer);
94 }
95
96 /*
97  * Cancel running i/o. This is called repeatedly since halt/clear are
98  * asynchronous operations. We do one try with cio_cancel, two tries
99  * with cio_halt, 255 tries with cio_clear. If everythings fails panic.
100  * Returns 0 if device now idle, -ENODEV for device not operational and
101  * -EBUSY if an interrupt is expected (either from halt/clear or from a
102  * status pending).
103  */
104 int
105 ccw_device_cancel_halt_clear(struct ccw_device *cdev)
106 {
107         struct subchannel *sch;
108         int ret;
109
110         sch = to_subchannel(cdev->dev.parent);
111         ret = stsch(sch->irq, &sch->schib);
112         if (ret || !sch->schib.pmcw.dnv)
113                 return -ENODEV; 
114         if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0)
115                 /* Not operational or no activity -> done. */
116                 return 0;
117         /* Stage 1: cancel io. */
118         if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) &&
119             !(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
120                 ret = cio_cancel(sch);
121                 if (ret != -EINVAL)
122                         return ret;
123                 /* cancel io unsuccessful. From now on it is asynchronous. */
124                 cdev->private->iretry = 3;      /* 3 halt retries. */
125         }
126         if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
127                 /* Stage 2: halt io. */
128                 if (cdev->private->iretry) {
129                         cdev->private->iretry--;
130                         ret = cio_halt(sch);
131                         return (ret == 0) ? -EBUSY : ret;
132                 }
133                 /* halt io unsuccessful. */
134                 cdev->private->iretry = 255;    /* 255 clear retries. */
135         }
136         /* Stage 3: clear io. */
137         if (cdev->private->iretry) {
138                 cdev->private->iretry--;
139                 ret = cio_clear (sch);
140                 return (ret == 0) ? -EBUSY : ret;
141         }
142         panic("Can't stop i/o on subchannel.\n");
143 }
144
145 static void
146 ccw_device_handle_oper(struct ccw_device *cdev)
147 {
148         struct subchannel *sch;
149
150         sch = to_subchannel(cdev->dev.parent);
151         cdev->private->flags.recog_done = 1;
152         /*
153          * Check if cu type and device type still match. If
154          * not, it is certainly another device and we have to
155          * de- and re-register. Also check here for non-matching devno.
156          */
157         if (cdev->id.cu_type != cdev->private->senseid.cu_type ||
158             cdev->id.cu_model != cdev->private->senseid.cu_model ||
159             cdev->id.dev_type != cdev->private->senseid.dev_type ||
160             cdev->id.dev_model != cdev->private->senseid.dev_model ||
161             cdev->private->devno != sch->schib.pmcw.dev) {
162                 PREPARE_WORK(&cdev->private->kick_work,
163                              ccw_device_do_unreg_rereg, (void *)cdev);
164                 queue_work(ccw_device_work, &cdev->private->kick_work);
165                 return;
166         }
167         cdev->private->flags.donotify = 1;
168 }
169
170 /*
171  * The machine won't give us any notification by machine check if a chpid has
172  * been varied online on the SE so we have to find out by magic (i. e. driving
173  * the channel subsystem to device selection and updating our path masks).
174  */
175 static inline void
176 __recover_lost_chpids(struct subchannel *sch, int old_lpm)
177 {
178         int mask, i;
179
180         for (i = 0; i<8; i++) {
181                 mask = 0x80 >> i;
182                 if (!(sch->lpm & mask))
183                         continue;
184                 if (old_lpm & mask)
185                         continue;
186                 chpid_is_actually_online(sch->schib.pmcw.chpid[i]);
187         }
188 }
189
190 /*
191  * Stop device recognition.
192  */
193 static void
194 ccw_device_recog_done(struct ccw_device *cdev, int state)
195 {
196         struct subchannel *sch;
197         int notify, old_lpm;
198
199         sch = to_subchannel(cdev->dev.parent);
200
201         ccw_device_set_timeout(cdev, 0);
202         cio_disable_subchannel(sch);
203         /*
204          * Now that we tried recognition, we have performed device selection
205          * through ssch() and the path information is up to date.
206          */
207         old_lpm = sch->lpm;
208         stsch(sch->irq, &sch->schib);
209         sch->lpm = sch->schib.pmcw.pim &
210                 sch->schib.pmcw.pam &
211                 sch->schib.pmcw.pom &
212                 sch->opm;
213         if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
214                 /* Force reprobe on all chpids. */
215                 old_lpm = 0;
216         if (sch->lpm != old_lpm)
217                 __recover_lost_chpids(sch, old_lpm);
218         if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
219                 if (state == DEV_STATE_NOT_OPER) {
220                         cdev->private->flags.recog_done = 1;
221                         cdev->private->state = DEV_STATE_DISCONNECTED;
222                         return;
223                 }
224                 /* Boxed devices don't need extra treatment. */
225         }
226         notify = 0;
227         switch (state) {
228         case DEV_STATE_NOT_OPER:
229                 CIO_DEBUG(KERN_WARNING, 2,
230                           "SenseID : unknown device %04x on subchannel %04x\n",
231                           cdev->private->devno, sch->irq);
232                 break;
233         case DEV_STATE_OFFLINE:
234                 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
235                         ccw_device_handle_oper(cdev);
236                         notify = 1;
237                 }
238                 /* fill out sense information */
239                 cdev->id = (struct ccw_device_id) {
240                         .cu_type   = cdev->private->senseid.cu_type,
241                         .cu_model  = cdev->private->senseid.cu_model,
242                         .dev_type  = cdev->private->senseid.dev_type,
243                         .dev_model = cdev->private->senseid.dev_model,
244                 };
245                 if (notify) {
246                         /* Get device online again. */
247                         ccw_device_online(cdev);
248                         wake_up(&cdev->private->wait_q);
249                         return;
250                 }
251                 /* Issue device info message. */
252                 CIO_DEBUG(KERN_INFO, 2, "SenseID : device %04x reports: "
253                           "CU  Type/Mod = %04X/%02X, Dev Type/Mod = "
254                           "%04X/%02X\n", cdev->private->devno,
255                           cdev->id.cu_type, cdev->id.cu_model,
256                           cdev->id.dev_type, cdev->id.dev_model);
257                 break;
258         case DEV_STATE_BOXED:
259                 CIO_DEBUG(KERN_WARNING, 2,
260                           "SenseID : boxed device %04x on subchannel %04x\n",
261                           cdev->private->devno, sch->irq);
262                 break;
263         }
264         cdev->private->state = state;
265         io_subchannel_recog_done(cdev);
266         if (state != DEV_STATE_NOT_OPER)
267                 wake_up(&cdev->private->wait_q);
268 }
269
270 /*
271  * Function called from device_id.c after sense id has completed.
272  */
273 void
274 ccw_device_sense_id_done(struct ccw_device *cdev, int err)
275 {
276         switch (err) {
277         case 0:
278                 ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
279                 break;
280         case -ETIME:            /* Sense id stopped by timeout. */
281                 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
282                 break;
283         default:
284                 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
285                 break;
286         }
287 }
288
289 static void
290 ccw_device_oper_notify(void *data)
291 {
292         struct ccw_device *cdev;
293         struct subchannel *sch;
294         int ret;
295
296         cdev = (struct ccw_device *)data;
297         sch = to_subchannel(cdev->dev.parent);
298         ret = (sch->driver && sch->driver->notify) ?
299                 sch->driver->notify(&sch->dev, CIO_OPER) : 0;
300         if (!ret)
301                 /* Driver doesn't want device back. */
302                 ccw_device_do_unreg_rereg((void *)cdev);
303         else
304                 wake_up(&cdev->private->wait_q);
305 }
306
307 /*
308  * Finished with online/offline processing.
309  */
310 static void
311 ccw_device_done(struct ccw_device *cdev, int state)
312 {
313         struct subchannel *sch;
314
315         sch = to_subchannel(cdev->dev.parent);
316
317         if (state != DEV_STATE_ONLINE)
318                 cio_disable_subchannel(sch);
319
320         /* Reset device status. */
321         memset(&cdev->private->irb, 0, sizeof(struct irb));
322
323         cdev->private->state = state;
324
325
326         if (state == DEV_STATE_BOXED)
327                 CIO_DEBUG(KERN_WARNING, 2,
328                           "Boxed device %04x on subchannel %04x\n",
329                           cdev->private->devno, sch->irq);
330
331         if (cdev->private->flags.donotify) {
332                 cdev->private->flags.donotify = 0;
333                 PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify,
334                              (void *)cdev);
335                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
336         }
337         wake_up(&cdev->private->wait_q);
338
339         if (css_init_done && state != DEV_STATE_ONLINE)
340                 put_device (&cdev->dev);
341 }
342
343 /*
344  * Function called from device_pgid.c after sense path ground has completed.
345  */
346 void
347 ccw_device_sense_pgid_done(struct ccw_device *cdev, int err)
348 {
349         struct subchannel *sch;
350
351         sch = to_subchannel(cdev->dev.parent);
352         switch (err) {
353         case 0:
354                 /* Start Path Group verification. */
355                 sch->vpm = 0;   /* Start with no path groups set. */
356                 cdev->private->state = DEV_STATE_VERIFY;
357                 ccw_device_verify_start(cdev);
358                 break;
359         case -ETIME:            /* Sense path group id stopped by timeout. */
360         case -EUSERS:           /* device is reserved for someone else. */
361                 ccw_device_done(cdev, DEV_STATE_BOXED);
362                 break;
363         case -EOPNOTSUPP: /* path grouping not supported, just set online. */
364                 cdev->private->options.pgroup = 0;
365                 ccw_device_done(cdev, DEV_STATE_ONLINE);
366                 break;
367         default:
368                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
369                 break;
370         }
371 }
372
373 /*
374  * Start device recognition.
375  */
376 int
377 ccw_device_recognition(struct ccw_device *cdev)
378 {
379         struct subchannel *sch;
380         int ret;
381
382         if ((cdev->private->state != DEV_STATE_NOT_OPER) &&
383             (cdev->private->state != DEV_STATE_BOXED))
384                 return -EINVAL;
385         sch = to_subchannel(cdev->dev.parent);
386         ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
387         if (ret != 0)
388                 /* Couldn't enable the subchannel for i/o. Sick device. */
389                 return ret;
390
391         /* After 60s the device recognition is considered to have failed. */
392         ccw_device_set_timeout(cdev, 60*HZ);
393
394         /*
395          * We used to start here with a sense pgid to find out whether a device
396          * is locked by someone else. Unfortunately, the sense pgid command
397          * code has other meanings on devices predating the path grouping
398          * algorithm, so we start with sense id and box the device after an
399          * timeout (or if sense pgid during path verification detects the device
400          * is locked, as may happen on newer devices).
401          */
402         cdev->private->flags.recog_done = 0;
403         cdev->private->state = DEV_STATE_SENSE_ID;
404         ccw_device_sense_id_start(cdev);
405         return 0;
406 }
407
408 /*
409  * Handle timeout in device recognition.
410  */
411 static void
412 ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
413 {
414         int ret;
415
416         ret = ccw_device_cancel_halt_clear(cdev);
417         switch (ret) {
418         case 0:
419                 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
420                 break;
421         case -ENODEV:
422                 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
423                 break;
424         default:
425                 ccw_device_set_timeout(cdev, 3*HZ);
426         }
427 }
428
429
430 static void
431 ccw_device_nopath_notify(void *data)
432 {
433         struct ccw_device *cdev;
434         struct subchannel *sch;
435         int ret;
436
437         cdev = (struct ccw_device *)data;
438         sch = to_subchannel(cdev->dev.parent);
439         /* Extra sanity. */
440         if (sch->lpm)
441                 return;
442         ret = (sch->driver && sch->driver->notify) ?
443                 sch->driver->notify(&sch->dev, CIO_NO_PATH) : 0;
444         if (!ret) {
445                 if (get_device(&sch->dev)) {
446                         /* Driver doesn't want to keep device. */
447                         cio_disable_subchannel(sch);
448                         if (get_device(&cdev->dev)) {
449                                 PREPARE_WORK(&cdev->private->kick_work,
450                                              ccw_device_call_sch_unregister,
451                                              (void *)cdev);
452                                 queue_work(ccw_device_work,
453                                            &cdev->private->kick_work);
454                         }
455                 }
456         } else {
457                 cio_disable_subchannel(sch);
458                 ccw_device_set_timeout(cdev, 0);
459                 cdev->private->state = DEV_STATE_DISCONNECTED;
460                 wake_up(&cdev->private->wait_q);
461         }
462 }
463
464 void
465 ccw_device_verify_done(struct ccw_device *cdev, int err)
466 {
467         cdev->private->flags.doverify = 0;
468         switch (err) {
469         case -EOPNOTSUPP: /* path grouping not supported, just set online. */
470                 cdev->private->options.pgroup = 0;
471         case 0:
472                 ccw_device_done(cdev, DEV_STATE_ONLINE);
473                 break;
474         case -ETIME:
475                 ccw_device_done(cdev, DEV_STATE_BOXED);
476                 break;
477         default:
478                 PREPARE_WORK(&cdev->private->kick_work,
479                              ccw_device_nopath_notify, (void *)cdev);
480                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
481                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
482                 break;
483         }
484 }
485
486 /*
487  * Get device online.
488  */
489 int
490 ccw_device_online(struct ccw_device *cdev)
491 {
492         struct subchannel *sch;
493         int ret;
494
495         if ((cdev->private->state != DEV_STATE_OFFLINE) &&
496             (cdev->private->state != DEV_STATE_BOXED))
497                 return -EINVAL;
498         sch = to_subchannel(cdev->dev.parent);
499         if (css_init_done && !get_device(&cdev->dev))
500                 return -ENODEV;
501         ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
502         if (ret != 0) {
503                 /* Couldn't enable the subchannel for i/o. Sick device. */
504                 if (ret == -ENODEV)
505                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
506                 return ret;
507         }
508         /* Do we want to do path grouping? */
509         if (!cdev->private->options.pgroup) {
510                 /* No, set state online immediately. */
511                 ccw_device_done(cdev, DEV_STATE_ONLINE);
512                 return 0;
513         }
514         /* Do a SensePGID first. */
515         cdev->private->state = DEV_STATE_SENSE_PGID;
516         ccw_device_sense_pgid_start(cdev);
517         return 0;
518 }
519
520 void
521 ccw_device_disband_done(struct ccw_device *cdev, int err)
522 {
523         switch (err) {
524         case 0:
525                 ccw_device_done(cdev, DEV_STATE_OFFLINE);
526                 break;
527         case -ETIME:
528                 ccw_device_done(cdev, DEV_STATE_BOXED);
529                 break;
530         default:
531                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
532                 break;
533         }
534 }
535
536 /*
537  * Shutdown device.
538  */
539 int
540 ccw_device_offline(struct ccw_device *cdev)
541 {
542         struct subchannel *sch;
543
544         sch = to_subchannel(cdev->dev.parent);
545         if (cdev->private->state != DEV_STATE_ONLINE) {
546                 if (sch->schib.scsw.actl != 0)
547                         return -EBUSY;
548                 return -EINVAL;
549         }
550         if (sch->schib.scsw.actl != 0)
551                 return -EBUSY;
552         /* Are we doing path grouping? */
553         if (!cdev->private->options.pgroup) {
554                 /* No, set state offline immediately. */
555                 ccw_device_done(cdev, DEV_STATE_OFFLINE);
556                 return 0;
557         }
558         /* Start Set Path Group commands. */
559         cdev->private->state = DEV_STATE_DISBAND_PGID;
560         ccw_device_disband_start(cdev);
561         return 0;
562 }
563
564 /*
565  * Handle timeout in device online/offline process.
566  */
567 static void
568 ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event)
569 {
570         int ret;
571
572         ret = ccw_device_cancel_halt_clear(cdev);
573         switch (ret) {
574         case 0:
575                 ccw_device_done(cdev, DEV_STATE_BOXED);
576                 break;
577         case -ENODEV:
578                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
579                 break;
580         default:
581                 ccw_device_set_timeout(cdev, 3*HZ);
582         }
583 }
584
585 /*
586  * Handle not oper event in device recognition.
587  */
588 static void
589 ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event)
590 {
591         ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
592 }
593
594 /*
595  * Handle not operational event while offline.
596  */
597 static void
598 ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event)
599 {
600         struct subchannel *sch;
601
602         cdev->private->state = DEV_STATE_NOT_OPER;
603         sch = to_subchannel(cdev->dev.parent);
604         device_unregister(&sch->dev);
605         sch->schib.pmcw.intparm = 0;
606         cio_modify(sch);
607         wake_up(&cdev->private->wait_q);
608 }
609
610 /*
611  * Handle not operational event while online.
612  */
613 static void
614 ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event)
615 {
616         struct subchannel *sch;
617
618         sch = to_subchannel(cdev->dev.parent);
619         if (sch->driver->notify &&
620             sch->driver->notify(&sch->dev, sch->lpm ? CIO_GONE : CIO_NO_PATH)) {
621                         ccw_device_set_timeout(cdev, 0);
622                         cdev->private->state = DEV_STATE_DISCONNECTED;
623                         wake_up(&cdev->private->wait_q);
624                         return;
625         }
626         cdev->private->state = DEV_STATE_NOT_OPER;
627         cio_disable_subchannel(sch);
628         if (sch->schib.scsw.actl != 0) {
629                 // FIXME: not-oper indication to device driver ?
630                 ccw_device_call_handler(cdev);
631         }
632         device_unregister(&sch->dev);
633         sch->schib.pmcw.intparm = 0;
634         cio_modify(sch);
635         wake_up(&cdev->private->wait_q);
636 }
637
638 /*
639  * Handle path verification event.
640  */
641 static void
642 ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
643 {
644         struct subchannel *sch;
645
646         if (!cdev->private->options.pgroup)
647                 return;
648         if (cdev->private->state == DEV_STATE_W4SENSE) {
649                 cdev->private->flags.doverify = 1;
650                 return;
651         }
652         sch = to_subchannel(cdev->dev.parent);
653         if (sch->schib.scsw.actl != 0 ||
654             (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) {
655                 /*
656                  * No final status yet or final status not yet delivered
657                  * to the device driver. Can't do path verfication now,
658                  * delay until final status was delivered.
659                  */
660                 cdev->private->flags.doverify = 1;
661                 return;
662         }
663         /* Device is idle, we can do the path verification. */
664         cdev->private->state = DEV_STATE_VERIFY;
665         ccw_device_verify_start(cdev);
666 }
667
668 /*
669  * Got an interrupt for a normal io (state online).
670  */
671 static void
672 ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
673 {
674         struct irb *irb;
675
676         irb = (struct irb *) __LC_IRB;
677         /* Check for unsolicited interrupt. */
678         if ((irb->scsw.stctl ==
679                         (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS))
680             && (!irb->scsw.cc)) {
681                 if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
682                     !irb->esw.esw0.erw.cons) {
683                         /* Unit check but no sense data. Need basic sense. */
684                         if (ccw_device_do_sense(cdev, irb) != 0)
685                                 goto call_handler_unsol;
686                         memcpy(irb, &cdev->private->irb, sizeof(struct irb));
687                         cdev->private->state = DEV_STATE_W4SENSE;
688                         cdev->private->intparm = 0;
689                         return;
690                 }
691 call_handler_unsol:
692                 if (cdev->handler)
693                         cdev->handler (cdev, 0, irb);
694                 return;
695         }
696         /* Accumulate status and find out if a basic sense is needed. */
697         ccw_device_accumulate_irb(cdev, irb);
698         if (cdev->private->flags.dosense) {
699                 if (ccw_device_do_sense(cdev, irb) == 0) {
700                         cdev->private->state = DEV_STATE_W4SENSE;
701                 }
702                 return;
703         }
704         /* Call the handler. */
705         if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
706                 /* Start delayed path verification. */
707                 ccw_device_online_verify(cdev, 0);
708 }
709
710 /*
711  * Got an timeout in online state.
712  */
713 static void
714 ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
715 {
716         int ret;
717
718         ccw_device_set_timeout(cdev, 0);
719         ret = ccw_device_cancel_halt_clear(cdev);
720         if (ret == -EBUSY) {
721                 ccw_device_set_timeout(cdev, 3*HZ);
722                 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
723                 return;
724         }
725         if (ret == -ENODEV) {
726                 struct subchannel *sch;
727
728                 sch = to_subchannel(cdev->dev.parent);
729                 if (!sch->lpm) {
730                         PREPARE_WORK(&cdev->private->kick_work,
731                                      ccw_device_nopath_notify, (void *)cdev);
732                         queue_work(ccw_device_notify_work,
733                                    &cdev->private->kick_work);
734                 } else
735                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
736         } else if (cdev->handler)
737                 cdev->handler(cdev, cdev->private->intparm,
738                               ERR_PTR(-ETIMEDOUT));
739 }
740
741 /*
742  * Got an interrupt for a basic sense.
743  */
744 void
745 ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
746 {
747         struct irb *irb;
748
749         irb = (struct irb *) __LC_IRB;
750         /* Check for unsolicited interrupt. */
751         if (irb->scsw.stctl ==
752                         (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
753                 if (irb->scsw.cc == 1)
754                         /* Basic sense hasn't started. Try again. */
755                         ccw_device_do_sense(cdev, irb);
756                 else {
757                         printk("Huh? %s(%s): unsolicited interrupt...\n",
758                                __FUNCTION__, cdev->dev.bus_id);
759                         if (cdev->handler)
760                                 cdev->handler (cdev, 0, irb);
761                 }
762                 return;
763         }
764         /* Add basic sense info to irb. */
765         ccw_device_accumulate_basic_sense(cdev, irb);
766         if (cdev->private->flags.dosense) {
767                 /* Another basic sense is needed. */
768                 ccw_device_do_sense(cdev, irb);
769                 return;
770         }
771         cdev->private->state = DEV_STATE_ONLINE;
772         /* Call the handler. */
773         if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
774                 /* Start delayed path verification. */
775                 ccw_device_online_verify(cdev, 0);
776 }
777
778 static void
779 ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event)
780 {
781         struct irb *irb;
782
783         irb = (struct irb *) __LC_IRB;
784         /* Accumulate status. We don't do basic sense. */
785         ccw_device_accumulate_irb(cdev, irb);
786         /* Try to start delayed device verification. */
787         ccw_device_online_verify(cdev, 0);
788         /* Note: Don't call handler for cio initiated clear! */
789 }
790
791 static void
792 ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
793 {
794         struct subchannel *sch;
795
796         sch = to_subchannel(cdev->dev.parent);
797         ccw_device_set_timeout(cdev, 0);
798         /* OK, i/o is dead now. Call interrupt handler. */
799         cdev->private->state = DEV_STATE_ONLINE;
800         if (cdev->handler)
801                 cdev->handler(cdev, cdev->private->intparm,
802                               ERR_PTR(-ETIMEDOUT));
803         if (!sch->lpm) {
804                 PREPARE_WORK(&cdev->private->kick_work,
805                              ccw_device_nopath_notify, (void *)cdev);
806                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
807         } else if (cdev->private->flags.doverify)
808                 /* Start delayed path verification. */
809                 ccw_device_online_verify(cdev, 0);
810 }
811
812 static void
813 ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
814 {
815         int ret;
816
817         ret = ccw_device_cancel_halt_clear(cdev);
818         if (ret == -EBUSY) {
819                 ccw_device_set_timeout(cdev, 3*HZ);
820                 return;
821         }
822         if (ret == -ENODEV) {
823                 struct subchannel *sch;
824
825                 sch = to_subchannel(cdev->dev.parent);
826                 if (!sch->lpm) {
827                         PREPARE_WORK(&cdev->private->kick_work,
828                                      ccw_device_nopath_notify, (void *)cdev);
829                         queue_work(ccw_device_notify_work,
830                                    &cdev->private->kick_work);
831                 } else
832                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
833                 return;
834         }
835         //FIXME: Can we get here?
836         cdev->private->state = DEV_STATE_ONLINE;
837         if (cdev->handler)
838                 cdev->handler(cdev, cdev->private->intparm,
839                               ERR_PTR(-ETIMEDOUT));
840 }
841
842 static void
843 ccw_device_wait4io_irq(struct ccw_device *cdev, enum dev_event dev_event)
844 {
845         struct irb *irb;
846         struct subchannel *sch;
847
848         irb = (struct irb *) __LC_IRB;
849         /*
850          * Accumulate status and find out if a basic sense is needed.
851          * This is fine since we have already adapted the lpm.
852          */
853         ccw_device_accumulate_irb(cdev, irb);
854         if (cdev->private->flags.dosense) {
855                 if (ccw_device_do_sense(cdev, irb) == 0) {
856                         cdev->private->state = DEV_STATE_W4SENSE;
857                 }
858                 return;
859         }
860
861         /* Iff device is idle, reset timeout. */
862         sch = to_subchannel(cdev->dev.parent);
863         if (!stsch(sch->irq, &sch->schib))
864                 if (sch->schib.scsw.actl == 0)
865                         ccw_device_set_timeout(cdev, 0);
866         /* Call the handler. */
867         ccw_device_call_handler(cdev);
868         if (!sch->lpm) {
869                 PREPARE_WORK(&cdev->private->kick_work,
870                              ccw_device_nopath_notify, (void *)cdev);
871                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
872         } else if (cdev->private->flags.doverify)
873                 ccw_device_online_verify(cdev, 0);
874 }
875
876 static void
877 ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event)
878 {
879         int ret;
880         struct subchannel *sch;
881
882         sch = to_subchannel(cdev->dev.parent);
883         ccw_device_set_timeout(cdev, 0);
884         ret = ccw_device_cancel_halt_clear(cdev);
885         if (ret == -EBUSY) {
886                 ccw_device_set_timeout(cdev, 3*HZ);
887                 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
888                 return;
889         }
890         if (ret == -ENODEV) {
891                 if (!sch->lpm) {
892                         PREPARE_WORK(&cdev->private->kick_work,
893                                      ccw_device_nopath_notify, (void *)cdev);
894                         queue_work(ccw_device_notify_work,
895                                    &cdev->private->kick_work);
896                 } else
897                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
898                 return;
899         }
900         if (cdev->handler)
901                 cdev->handler(cdev, cdev->private->intparm,
902                               ERR_PTR(-ETIMEDOUT));
903         if (!sch->lpm) {
904                 PREPARE_WORK(&cdev->private->kick_work,
905                              ccw_device_nopath_notify, (void *)cdev);
906                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
907         } else if (cdev->private->flags.doverify)
908                 /* Start delayed path verification. */
909                 ccw_device_online_verify(cdev, 0);
910 }
911
912 static void
913 ccw_device_wait4io_verify(struct ccw_device *cdev, enum dev_event dev_event)
914 {
915         /* When the I/O has terminated, we have to start verification. */
916         if (cdev->private->options.pgroup)
917                 cdev->private->flags.doverify = 1;
918 }
919
920 static void
921 ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
922 {
923         struct irb *irb;
924
925         switch (dev_event) {
926         case DEV_EVENT_INTERRUPT:
927                 irb = (struct irb *) __LC_IRB;
928                 /* Check for unsolicited interrupt. */
929                 if ((irb->scsw.stctl ==
930                      (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
931                     (!irb->scsw.cc))
932                         /* FIXME: we should restart stlck here, but this
933                          * is extremely unlikely ... */
934                         goto out_wakeup;
935
936                 ccw_device_accumulate_irb(cdev, irb);
937                 /* We don't care about basic sense etc. */
938                 break;
939         default: /* timeout */
940                 break;
941         }
942 out_wakeup:
943         wake_up(&cdev->private->wait_q);
944 }
945
946 static void
947 ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
948 {
949         struct subchannel *sch;
950
951         sch = to_subchannel(cdev->dev.parent);
952         if (cio_enable_subchannel(sch, sch->schib.pmcw.isc) != 0)
953                 /* Couldn't enable the subchannel for i/o. Sick device. */
954                 return;
955
956         /* After 60s the device recognition is considered to have failed. */
957         ccw_device_set_timeout(cdev, 60*HZ);
958
959         cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
960         ccw_device_sense_id_start(cdev);
961 }
962
963 void
964 device_trigger_reprobe(struct subchannel *sch)
965 {
966         struct ccw_device *cdev;
967         unsigned long flags;
968
969         if (!sch->dev.driver_data)
970                 return;
971         cdev = sch->dev.driver_data;
972         spin_lock_irqsave(&sch->lock, flags);
973         if (cdev->private->state != DEV_STATE_DISCONNECTED) {
974                 spin_unlock_irqrestore(&sch->lock, flags);
975                 return;
976         }
977         /* Update some values. */
978         if (stsch(sch->irq, &sch->schib)) {
979                 spin_unlock_irqrestore(&sch->lock, flags);
980                 return;
981         }
982         /*
983          * The pim, pam, pom values may not be accurate, but they are the best
984          * we have before performing device selection :/
985          */
986         sch->lpm = sch->schib.pmcw.pim &
987                 sch->schib.pmcw.pam &
988                 sch->schib.pmcw.pom &
989                 sch->opm;
990         /* Re-set some bits in the pmcw that were lost. */
991         sch->schib.pmcw.isc = 3;
992         sch->schib.pmcw.csense = 1;
993         sch->schib.pmcw.ena = 0;
994         if ((sch->lpm & (sch->lpm - 1)) != 0)
995                 sch->schib.pmcw.mp = 1;
996         sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
997         ccw_device_start_id(cdev, 0);
998         spin_unlock_irqrestore(&sch->lock, flags);
999 }
1000
1001 static void
1002 ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event)
1003 {
1004         struct subchannel *sch;
1005
1006         sch = to_subchannel(cdev->dev.parent);
1007         /*
1008          * An interrupt in state offline means a previous disable was not
1009          * successful. Try again.
1010          */
1011         cio_disable_subchannel(sch);
1012 }
1013
1014 static void
1015 ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
1016 {
1017         retry_set_schib(cdev);
1018         cdev->private->state = DEV_STATE_ONLINE;
1019         dev_fsm_event(cdev, dev_event);
1020 }
1021
1022
1023 static void
1024 ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
1025 {
1026         ccw_device_set_timeout(cdev, 0);
1027         if (dev_event == DEV_EVENT_NOTOPER)
1028                 cdev->private->state = DEV_STATE_NOT_OPER;
1029         else
1030                 cdev->private->state = DEV_STATE_OFFLINE;
1031         wake_up(&cdev->private->wait_q);
1032 }
1033
1034 static void
1035 ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
1036 {
1037         int ret;
1038
1039         ret = ccw_device_cancel_halt_clear(cdev);
1040         switch (ret) {
1041         case 0:
1042                 cdev->private->state = DEV_STATE_OFFLINE;
1043                 wake_up(&cdev->private->wait_q);
1044                 break;
1045         case -ENODEV:
1046                 cdev->private->state = DEV_STATE_NOT_OPER;
1047                 wake_up(&cdev->private->wait_q);
1048                 break;
1049         default:
1050                 ccw_device_set_timeout(cdev, HZ/10);
1051         }
1052 }
1053
1054 /*
1055  * No operation action. This is used e.g. to ignore a timeout event in
1056  * state offline.
1057  */
1058 static void
1059 ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
1060 {
1061 }
1062
1063 /*
1064  * Bug operation action. 
1065  */
1066 static void
1067 ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
1068 {
1069         printk(KERN_EMERG "dev_jumptable[%i][%i] == NULL\n",
1070                cdev->private->state, dev_event);
1071         BUG();
1072 }
1073
1074 /*
1075  * device statemachine
1076  */
1077 fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1078         [DEV_STATE_NOT_OPER] = {
1079                 [DEV_EVENT_NOTOPER]     = ccw_device_nop,
1080                 [DEV_EVENT_INTERRUPT]   = ccw_device_bug,
1081                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1082                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1083         },
1084         [DEV_STATE_SENSE_PGID] = {
1085                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1086                 [DEV_EVENT_INTERRUPT]   = ccw_device_sense_pgid_irq,
1087                 [DEV_EVENT_TIMEOUT]     = ccw_device_onoff_timeout,
1088                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1089         },
1090         [DEV_STATE_SENSE_ID] = {
1091                 [DEV_EVENT_NOTOPER]     = ccw_device_recog_notoper,
1092                 [DEV_EVENT_INTERRUPT]   = ccw_device_sense_id_irq,
1093                 [DEV_EVENT_TIMEOUT]     = ccw_device_recog_timeout,
1094                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1095         },
1096         [DEV_STATE_OFFLINE] = {
1097                 [DEV_EVENT_NOTOPER]     = ccw_device_offline_notoper,
1098                 [DEV_EVENT_INTERRUPT]   = ccw_device_offline_irq,
1099                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1100                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1101         },
1102         [DEV_STATE_VERIFY] = {
1103                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1104                 [DEV_EVENT_INTERRUPT]   = ccw_device_verify_irq,
1105                 [DEV_EVENT_TIMEOUT]     = ccw_device_onoff_timeout,
1106                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1107         },
1108         [DEV_STATE_ONLINE] = {
1109                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1110                 [DEV_EVENT_INTERRUPT]   = ccw_device_irq,
1111                 [DEV_EVENT_TIMEOUT]     = ccw_device_online_timeout,
1112                 [DEV_EVENT_VERIFY]      = ccw_device_online_verify,
1113         },
1114         [DEV_STATE_W4SENSE] = {
1115                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1116                 [DEV_EVENT_INTERRUPT]   = ccw_device_w4sense,
1117                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1118                 [DEV_EVENT_VERIFY]      = ccw_device_online_verify,
1119         },
1120         [DEV_STATE_DISBAND_PGID] = {
1121                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1122                 [DEV_EVENT_INTERRUPT]   = ccw_device_disband_irq,
1123                 [DEV_EVENT_TIMEOUT]     = ccw_device_onoff_timeout,
1124                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1125         },
1126         [DEV_STATE_BOXED] = {
1127                 [DEV_EVENT_NOTOPER]     = ccw_device_offline_notoper,
1128                 [DEV_EVENT_INTERRUPT]   = ccw_device_stlck_done,
1129                 [DEV_EVENT_TIMEOUT]     = ccw_device_stlck_done,
1130                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1131         },
1132         /* states to wait for i/o completion before doing something */
1133         [DEV_STATE_CLEAR_VERIFY] = {
1134                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1135                 [DEV_EVENT_INTERRUPT]   = ccw_device_clear_verify,
1136                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1137                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1138         },
1139         [DEV_STATE_TIMEOUT_KILL] = {
1140                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1141                 [DEV_EVENT_INTERRUPT]   = ccw_device_killing_irq,
1142                 [DEV_EVENT_TIMEOUT]     = ccw_device_killing_timeout,
1143                 [DEV_EVENT_VERIFY]      = ccw_device_nop, //FIXME
1144         },
1145         [DEV_STATE_WAIT4IO] = {
1146                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1147                 [DEV_EVENT_INTERRUPT]   = ccw_device_wait4io_irq,
1148                 [DEV_EVENT_TIMEOUT]     = ccw_device_wait4io_timeout,
1149                 [DEV_EVENT_VERIFY]      = ccw_device_wait4io_verify,
1150         },
1151         [DEV_STATE_QUIESCE] = {
1152                 [DEV_EVENT_NOTOPER]     = ccw_device_quiesce_done,
1153                 [DEV_EVENT_INTERRUPT]   = ccw_device_quiesce_done,
1154                 [DEV_EVENT_TIMEOUT]     = ccw_device_quiesce_timeout,
1155                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1156         },
1157         /* special states for devices gone not operational */
1158         [DEV_STATE_DISCONNECTED] = {
1159                 [DEV_EVENT_NOTOPER]     = ccw_device_nop,
1160                 [DEV_EVENT_INTERRUPT]   = ccw_device_start_id,
1161                 [DEV_EVENT_TIMEOUT]     = ccw_device_bug,
1162                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1163         },
1164         [DEV_STATE_DISCONNECTED_SENSE_ID] = {
1165                 [DEV_EVENT_NOTOPER]     = ccw_device_recog_notoper,
1166                 [DEV_EVENT_INTERRUPT]   = ccw_device_sense_id_irq,
1167                 [DEV_EVENT_TIMEOUT]     = ccw_device_recog_timeout,
1168                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1169         },
1170         [DEV_STATE_CMFCHANGE] = {
1171                 [DEV_EVENT_NOTOPER]     = ccw_device_change_cmfstate,
1172                 [DEV_EVENT_INTERRUPT]   = ccw_device_change_cmfstate,
1173                 [DEV_EVENT_TIMEOUT]     = ccw_device_change_cmfstate,
1174                 [DEV_EVENT_VERIFY]      = ccw_device_change_cmfstate,
1175         },
1176 };
1177
1178 /*
1179  * io_subchannel_irq is called for "real" interrupts or for status
1180  * pending conditions on msch.
1181  */
1182 void
1183 io_subchannel_irq (struct device *pdev)
1184 {
1185         struct ccw_device *cdev;
1186
1187         cdev = to_subchannel(pdev)->dev.driver_data;
1188
1189         CIO_TRACE_EVENT (3, "IRQ");
1190         CIO_TRACE_EVENT (3, pdev->bus_id);
1191
1192         dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1193 }
1194
1195 EXPORT_SYMBOL_GPL(ccw_device_set_timeout);