vserver 1.9.5.x5
[linux-2.6.git] / drivers / s390 / cio / device_fsm.c
1 /*
2  * drivers/s390/cio/device_fsm.c
3  * finite state machine for device handling
4  *
5  *    Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
6  *                       IBM Corporation
7  *    Author(s): Cornelia Huck(cohuck@de.ibm.com)
8  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
9  */
10
11 #include <linux/module.h>
12 #include <linux/config.h>
13 #include <linux/init.h>
14
15 #include <asm/ccwdev.h>
16 #include <asm/qdio.h>
17
18 #include "cio.h"
19 #include "cio_debug.h"
20 #include "css.h"
21 #include "device.h"
22 #include "chsc.h"
23 #include "ioasm.h"
24 #include "qdio.h"
25
26 int
27 device_is_disconnected(struct subchannel *sch)
28 {
29         struct ccw_device *cdev;
30
31         if (!sch->dev.driver_data)
32                 return 0;
33         cdev = sch->dev.driver_data;
34         return (cdev->private->state == DEV_STATE_DISCONNECTED ||
35                 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
36 }
37
38 void
39 device_set_disconnected(struct subchannel *sch)
40 {
41         struct ccw_device *cdev;
42
43         if (!sch->dev.driver_data)
44                 return;
45         cdev = sch->dev.driver_data;
46         ccw_device_set_timeout(cdev, 0);
47         cdev->private->state = DEV_STATE_DISCONNECTED;
48 }
49
50 void
51 device_set_waiting(struct subchannel *sch)
52 {
53         struct ccw_device *cdev;
54
55         if (!sch->dev.driver_data)
56                 return;
57         cdev = sch->dev.driver_data;
58         ccw_device_set_timeout(cdev, 10*HZ);
59         cdev->private->state = DEV_STATE_WAIT4IO;
60 }
61
62 /*
63  * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
64  */
65 static void
66 ccw_device_timeout(unsigned long data)
67 {
68         struct ccw_device *cdev;
69
70         cdev = (struct ccw_device *) data;
71         spin_lock_irq(cdev->ccwlock);
72         dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
73         spin_unlock_irq(cdev->ccwlock);
74 }
75
76 /*
77  * Set timeout
78  */
79 void
80 ccw_device_set_timeout(struct ccw_device *cdev, int expires)
81 {
82         if (expires == 0) {
83                 del_timer(&cdev->private->timer);
84                 return;
85         }
86         if (timer_pending(&cdev->private->timer)) {
87                 if (mod_timer(&cdev->private->timer, jiffies + expires))
88                         return;
89         }
90         cdev->private->timer.function = ccw_device_timeout;
91         cdev->private->timer.data = (unsigned long) cdev;
92         cdev->private->timer.expires = jiffies + expires;
93         add_timer(&cdev->private->timer);
94 }
95
96 /* Kill any pending timers after machine check. */
97 void
98 device_kill_pending_timer(struct subchannel *sch)
99 {
100         struct ccw_device *cdev;
101
102         if (!sch->dev.driver_data)
103                 return;
104         cdev = sch->dev.driver_data;
105         ccw_device_set_timeout(cdev, 0);
106 }
107
108 /*
109  * Cancel running i/o. This is called repeatedly since halt/clear are
110  * asynchronous operations. We do one try with cio_cancel, two tries
111  * with cio_halt, 255 tries with cio_clear. If everythings fails panic.
112  * Returns 0 if device now idle, -ENODEV for device not operational and
113  * -EBUSY if an interrupt is expected (either from halt/clear or from a
114  * status pending).
115  */
116 int
117 ccw_device_cancel_halt_clear(struct ccw_device *cdev)
118 {
119         struct subchannel *sch;
120         int ret;
121
122         sch = to_subchannel(cdev->dev.parent);
123         ret = stsch(sch->irq, &sch->schib);
124         if (ret || !sch->schib.pmcw.dnv)
125                 return -ENODEV; 
126         if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0)
127                 /* Not operational or no activity -> done. */
128                 return 0;
129         /* Stage 1: cancel io. */
130         if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) &&
131             !(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
132                 ret = cio_cancel(sch);
133                 if (ret != -EINVAL)
134                         return ret;
135                 /* cancel io unsuccessful. From now on it is asynchronous. */
136                 cdev->private->iretry = 3;      /* 3 halt retries. */
137         }
138         if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
139                 /* Stage 2: halt io. */
140                 if (cdev->private->iretry) {
141                         cdev->private->iretry--;
142                         ret = cio_halt(sch);
143                         return (ret == 0) ? -EBUSY : ret;
144                 }
145                 /* halt io unsuccessful. */
146                 cdev->private->iretry = 255;    /* 255 clear retries. */
147         }
148         /* Stage 3: clear io. */
149         if (cdev->private->iretry) {
150                 cdev->private->iretry--;
151                 ret = cio_clear (sch);
152                 return (ret == 0) ? -EBUSY : ret;
153         }
154         panic("Can't stop i/o on subchannel.\n");
155 }
156
157 static int
158 ccw_device_handle_oper(struct ccw_device *cdev)
159 {
160         struct subchannel *sch;
161
162         sch = to_subchannel(cdev->dev.parent);
163         cdev->private->flags.recog_done = 1;
164         /*
165          * Check if cu type and device type still match. If
166          * not, it is certainly another device and we have to
167          * de- and re-register. Also check here for non-matching devno.
168          */
169         if (cdev->id.cu_type != cdev->private->senseid.cu_type ||
170             cdev->id.cu_model != cdev->private->senseid.cu_model ||
171             cdev->id.dev_type != cdev->private->senseid.dev_type ||
172             cdev->id.dev_model != cdev->private->senseid.dev_model ||
173             cdev->private->devno != sch->schib.pmcw.dev) {
174                 PREPARE_WORK(&cdev->private->kick_work,
175                              ccw_device_do_unreg_rereg, (void *)cdev);
176                 queue_work(ccw_device_work, &cdev->private->kick_work);
177                 return 0;
178         }
179         cdev->private->flags.donotify = 1;
180         return 1;
181 }
182
183 /*
184  * The machine won't give us any notification by machine check if a chpid has
185  * been varied online on the SE so we have to find out by magic (i. e. driving
186  * the channel subsystem to device selection and updating our path masks).
187  */
188 static inline void
189 __recover_lost_chpids(struct subchannel *sch, int old_lpm)
190 {
191         int mask, i;
192
193         for (i = 0; i<8; i++) {
194                 mask = 0x80 >> i;
195                 if (!(sch->lpm & mask))
196                         continue;
197                 if (old_lpm & mask)
198                         continue;
199                 chpid_is_actually_online(sch->schib.pmcw.chpid[i]);
200         }
201 }
202
203 /*
204  * Stop device recognition.
205  */
206 static void
207 ccw_device_recog_done(struct ccw_device *cdev, int state)
208 {
209         struct subchannel *sch;
210         int notify, old_lpm, same_dev;
211
212         sch = to_subchannel(cdev->dev.parent);
213
214         ccw_device_set_timeout(cdev, 0);
215         cio_disable_subchannel(sch);
216         /*
217          * Now that we tried recognition, we have performed device selection
218          * through ssch() and the path information is up to date.
219          */
220         old_lpm = sch->lpm;
221         stsch(sch->irq, &sch->schib);
222         sch->lpm = sch->schib.pmcw.pim &
223                 sch->schib.pmcw.pam &
224                 sch->schib.pmcw.pom &
225                 sch->opm;
226         if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
227                 /* Force reprobe on all chpids. */
228                 old_lpm = 0;
229         if (sch->lpm != old_lpm)
230                 __recover_lost_chpids(sch, old_lpm);
231         if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
232                 if (state == DEV_STATE_NOT_OPER) {
233                         cdev->private->flags.recog_done = 1;
234                         cdev->private->state = DEV_STATE_DISCONNECTED;
235                         return;
236                 }
237                 /* Boxed devices don't need extra treatment. */
238         }
239         notify = 0;
240         same_dev = 0; /* Keep the compiler quiet... */
241         switch (state) {
242         case DEV_STATE_NOT_OPER:
243                 CIO_DEBUG(KERN_WARNING, 2,
244                           "SenseID : unknown device %04x on subchannel %04x\n",
245                           cdev->private->devno, sch->irq);
246                 break;
247         case DEV_STATE_OFFLINE:
248                 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
249                         same_dev = ccw_device_handle_oper(cdev);
250                         notify = 1;
251                 }
252                 /* fill out sense information */
253                 cdev->id = (struct ccw_device_id) {
254                         .cu_type   = cdev->private->senseid.cu_type,
255                         .cu_model  = cdev->private->senseid.cu_model,
256                         .dev_type  = cdev->private->senseid.dev_type,
257                         .dev_model = cdev->private->senseid.dev_model,
258                 };
259                 if (notify) {
260                         cdev->private->state = DEV_STATE_OFFLINE;
261                         if (same_dev) {
262                                 /* Get device online again. */
263                                 ccw_device_online(cdev);
264                                 wake_up(&cdev->private->wait_q);
265                         }
266                         return;
267                 }
268                 /* Issue device info message. */
269                 CIO_DEBUG(KERN_INFO, 2, "SenseID : device %04x reports: "
270                           "CU  Type/Mod = %04X/%02X, Dev Type/Mod = "
271                           "%04X/%02X\n", cdev->private->devno,
272                           cdev->id.cu_type, cdev->id.cu_model,
273                           cdev->id.dev_type, cdev->id.dev_model);
274                 break;
275         case DEV_STATE_BOXED:
276                 CIO_DEBUG(KERN_WARNING, 2,
277                           "SenseID : boxed device %04x on subchannel %04x\n",
278                           cdev->private->devno, sch->irq);
279                 break;
280         }
281         cdev->private->state = state;
282         io_subchannel_recog_done(cdev);
283         if (state != DEV_STATE_NOT_OPER)
284                 wake_up(&cdev->private->wait_q);
285 }
286
287 /*
288  * Function called from device_id.c after sense id has completed.
289  */
290 void
291 ccw_device_sense_id_done(struct ccw_device *cdev, int err)
292 {
293         switch (err) {
294         case 0:
295                 ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
296                 break;
297         case -ETIME:            /* Sense id stopped by timeout. */
298                 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
299                 break;
300         default:
301                 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
302                 break;
303         }
304 }
305
306 static void
307 ccw_device_oper_notify(void *data)
308 {
309         struct ccw_device *cdev;
310         struct subchannel *sch;
311         int ret;
312
313         cdev = (struct ccw_device *)data;
314         sch = to_subchannel(cdev->dev.parent);
315         ret = (sch->driver && sch->driver->notify) ?
316                 sch->driver->notify(&sch->dev, CIO_OPER) : 0;
317         if (!ret)
318                 /* Driver doesn't want device back. */
319                 ccw_device_do_unreg_rereg((void *)cdev);
320         else
321                 wake_up(&cdev->private->wait_q);
322 }
323
324 /*
325  * Finished with online/offline processing.
326  */
327 static void
328 ccw_device_done(struct ccw_device *cdev, int state)
329 {
330         struct subchannel *sch;
331
332         sch = to_subchannel(cdev->dev.parent);
333
334         if (state != DEV_STATE_ONLINE)
335                 cio_disable_subchannel(sch);
336
337         /* Reset device status. */
338         memset(&cdev->private->irb, 0, sizeof(struct irb));
339
340         cdev->private->state = state;
341
342
343         if (state == DEV_STATE_BOXED)
344                 CIO_DEBUG(KERN_WARNING, 2,
345                           "Boxed device %04x on subchannel %04x\n",
346                           cdev->private->devno, sch->irq);
347
348         if (cdev->private->flags.donotify) {
349                 cdev->private->flags.donotify = 0;
350                 PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify,
351                              (void *)cdev);
352                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
353         }
354         wake_up(&cdev->private->wait_q);
355
356         if (css_init_done && state != DEV_STATE_ONLINE)
357                 put_device (&cdev->dev);
358 }
359
360 /*
361  * Function called from device_pgid.c after sense path ground has completed.
362  */
363 void
364 ccw_device_sense_pgid_done(struct ccw_device *cdev, int err)
365 {
366         struct subchannel *sch;
367
368         sch = to_subchannel(cdev->dev.parent);
369         switch (err) {
370         case 0:
371                 /* Start Path Group verification. */
372                 sch->vpm = 0;   /* Start with no path groups set. */
373                 cdev->private->state = DEV_STATE_VERIFY;
374                 ccw_device_verify_start(cdev);
375                 break;
376         case -ETIME:            /* Sense path group id stopped by timeout. */
377         case -EUSERS:           /* device is reserved for someone else. */
378                 ccw_device_done(cdev, DEV_STATE_BOXED);
379                 break;
380         case -EOPNOTSUPP: /* path grouping not supported, just set online. */
381                 cdev->private->options.pgroup = 0;
382                 ccw_device_done(cdev, DEV_STATE_ONLINE);
383                 break;
384         default:
385                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
386                 break;
387         }
388 }
389
390 /*
391  * Start device recognition.
392  */
393 int
394 ccw_device_recognition(struct ccw_device *cdev)
395 {
396         struct subchannel *sch;
397         int ret;
398
399         if ((cdev->private->state != DEV_STATE_NOT_OPER) &&
400             (cdev->private->state != DEV_STATE_BOXED))
401                 return -EINVAL;
402         sch = to_subchannel(cdev->dev.parent);
403         ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
404         if (ret != 0)
405                 /* Couldn't enable the subchannel for i/o. Sick device. */
406                 return ret;
407
408         /* After 60s the device recognition is considered to have failed. */
409         ccw_device_set_timeout(cdev, 60*HZ);
410
411         /*
412          * We used to start here with a sense pgid to find out whether a device
413          * is locked by someone else. Unfortunately, the sense pgid command
414          * code has other meanings on devices predating the path grouping
415          * algorithm, so we start with sense id and box the device after an
416          * timeout (or if sense pgid during path verification detects the device
417          * is locked, as may happen on newer devices).
418          */
419         cdev->private->flags.recog_done = 0;
420         cdev->private->state = DEV_STATE_SENSE_ID;
421         ccw_device_sense_id_start(cdev);
422         return 0;
423 }
424
425 /*
426  * Handle timeout in device recognition.
427  */
428 static void
429 ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
430 {
431         int ret;
432
433         ret = ccw_device_cancel_halt_clear(cdev);
434         switch (ret) {
435         case 0:
436                 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
437                 break;
438         case -ENODEV:
439                 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
440                 break;
441         default:
442                 ccw_device_set_timeout(cdev, 3*HZ);
443         }
444 }
445
446
447 static void
448 ccw_device_nopath_notify(void *data)
449 {
450         struct ccw_device *cdev;
451         struct subchannel *sch;
452         int ret;
453
454         cdev = (struct ccw_device *)data;
455         sch = to_subchannel(cdev->dev.parent);
456         /* Extra sanity. */
457         if (sch->lpm)
458                 return;
459         ret = (sch->driver && sch->driver->notify) ?
460                 sch->driver->notify(&sch->dev, CIO_NO_PATH) : 0;
461         if (!ret) {
462                 if (get_device(&sch->dev)) {
463                         /* Driver doesn't want to keep device. */
464                         cio_disable_subchannel(sch);
465                         if (get_device(&cdev->dev)) {
466                                 PREPARE_WORK(&cdev->private->kick_work,
467                                              ccw_device_call_sch_unregister,
468                                              (void *)cdev);
469                                 queue_work(ccw_device_work,
470                                            &cdev->private->kick_work);
471                         } else
472                                 put_device(&sch->dev);
473                 }
474         } else {
475                 cio_disable_subchannel(sch);
476                 ccw_device_set_timeout(cdev, 0);
477                 cdev->private->state = DEV_STATE_DISCONNECTED;
478                 wake_up(&cdev->private->wait_q);
479         }
480 }
481
482 void
483 ccw_device_verify_done(struct ccw_device *cdev, int err)
484 {
485         cdev->private->flags.doverify = 0;
486         switch (err) {
487         case -EOPNOTSUPP: /* path grouping not supported, just set online. */
488                 cdev->private->options.pgroup = 0;
489         case 0:
490                 ccw_device_done(cdev, DEV_STATE_ONLINE);
491                 break;
492         case -ETIME:
493                 ccw_device_done(cdev, DEV_STATE_BOXED);
494                 break;
495         default:
496                 PREPARE_WORK(&cdev->private->kick_work,
497                              ccw_device_nopath_notify, (void *)cdev);
498                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
499                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
500                 break;
501         }
502 }
503
504 /*
505  * Get device online.
506  */
507 int
508 ccw_device_online(struct ccw_device *cdev)
509 {
510         struct subchannel *sch;
511         int ret;
512
513         if ((cdev->private->state != DEV_STATE_OFFLINE) &&
514             (cdev->private->state != DEV_STATE_BOXED))
515                 return -EINVAL;
516         sch = to_subchannel(cdev->dev.parent);
517         if (css_init_done && !get_device(&cdev->dev))
518                 return -ENODEV;
519         ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
520         if (ret != 0) {
521                 /* Couldn't enable the subchannel for i/o. Sick device. */
522                 if (ret == -ENODEV)
523                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
524                 return ret;
525         }
526         /* Do we want to do path grouping? */
527         if (!cdev->private->options.pgroup) {
528                 /* No, set state online immediately. */
529                 ccw_device_done(cdev, DEV_STATE_ONLINE);
530                 return 0;
531         }
532         /* Do a SensePGID first. */
533         cdev->private->state = DEV_STATE_SENSE_PGID;
534         ccw_device_sense_pgid_start(cdev);
535         return 0;
536 }
537
538 void
539 ccw_device_disband_done(struct ccw_device *cdev, int err)
540 {
541         switch (err) {
542         case 0:
543                 ccw_device_done(cdev, DEV_STATE_OFFLINE);
544                 break;
545         case -ETIME:
546                 ccw_device_done(cdev, DEV_STATE_BOXED);
547                 break;
548         default:
549                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
550                 break;
551         }
552 }
553
554 /*
555  * Shutdown device.
556  */
557 int
558 ccw_device_offline(struct ccw_device *cdev)
559 {
560         struct subchannel *sch;
561
562         sch = to_subchannel(cdev->dev.parent);
563         if (cdev->private->state != DEV_STATE_ONLINE) {
564                 if (sch->schib.scsw.actl != 0)
565                         return -EBUSY;
566                 return -EINVAL;
567         }
568         if (sch->schib.scsw.actl != 0)
569                 return -EBUSY;
570         /* Are we doing path grouping? */
571         if (!cdev->private->options.pgroup) {
572                 /* No, set state offline immediately. */
573                 ccw_device_done(cdev, DEV_STATE_OFFLINE);
574                 return 0;
575         }
576         /* Start Set Path Group commands. */
577         cdev->private->state = DEV_STATE_DISBAND_PGID;
578         ccw_device_disband_start(cdev);
579         return 0;
580 }
581
582 /*
583  * Handle timeout in device online/offline process.
584  */
585 static void
586 ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event)
587 {
588         int ret;
589
590         ret = ccw_device_cancel_halt_clear(cdev);
591         switch (ret) {
592         case 0:
593                 ccw_device_done(cdev, DEV_STATE_BOXED);
594                 break;
595         case -ENODEV:
596                 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
597                 break;
598         default:
599                 ccw_device_set_timeout(cdev, 3*HZ);
600         }
601 }
602
603 /*
604  * Handle not oper event in device recognition.
605  */
606 static void
607 ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event)
608 {
609         ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
610 }
611
612 /*
613  * Handle not operational event while offline.
614  */
615 static void
616 ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event)
617 {
618         struct subchannel *sch;
619
620         cdev->private->state = DEV_STATE_NOT_OPER;
621         sch = to_subchannel(cdev->dev.parent);
622         device_unregister(&sch->dev);
623         sch->schib.pmcw.intparm = 0;
624         cio_modify(sch);
625         wake_up(&cdev->private->wait_q);
626 }
627
628 /*
629  * Handle not operational event while online.
630  */
631 static void
632 ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event)
633 {
634         struct subchannel *sch;
635
636         sch = to_subchannel(cdev->dev.parent);
637         if (sch->driver->notify &&
638             sch->driver->notify(&sch->dev, sch->lpm ? CIO_GONE : CIO_NO_PATH)) {
639                         ccw_device_set_timeout(cdev, 0);
640                         cdev->private->state = DEV_STATE_DISCONNECTED;
641                         wake_up(&cdev->private->wait_q);
642                         return;
643         }
644         cdev->private->state = DEV_STATE_NOT_OPER;
645         cio_disable_subchannel(sch);
646         if (sch->schib.scsw.actl != 0) {
647                 // FIXME: not-oper indication to device driver ?
648                 ccw_device_call_handler(cdev);
649         }
650         device_unregister(&sch->dev);
651         sch->schib.pmcw.intparm = 0;
652         cio_modify(sch);
653         wake_up(&cdev->private->wait_q);
654 }
655
656 /*
657  * Handle path verification event.
658  */
659 static void
660 ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
661 {
662         struct subchannel *sch;
663
664         if (!cdev->private->options.pgroup)
665                 return;
666         if (cdev->private->state == DEV_STATE_W4SENSE) {
667                 cdev->private->flags.doverify = 1;
668                 return;
669         }
670         sch = to_subchannel(cdev->dev.parent);
671         if (sch->schib.scsw.actl != 0 ||
672             (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) {
673                 /*
674                  * No final status yet or final status not yet delivered
675                  * to the device driver. Can't do path verfication now,
676                  * delay until final status was delivered.
677                  */
678                 cdev->private->flags.doverify = 1;
679                 return;
680         }
681         /* Device is idle, we can do the path verification. */
682         cdev->private->state = DEV_STATE_VERIFY;
683         ccw_device_verify_start(cdev);
684 }
685
686 /*
687  * Got an interrupt for a normal io (state online).
688  */
689 static void
690 ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
691 {
692         struct irb *irb;
693
694         irb = (struct irb *) __LC_IRB;
695         /* Check for unsolicited interrupt. */
696         if ((irb->scsw.stctl ==
697                         (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS))
698             && (!irb->scsw.cc)) {
699                 if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
700                     !irb->esw.esw0.erw.cons) {
701                         /* Unit check but no sense data. Need basic sense. */
702                         if (ccw_device_do_sense(cdev, irb) != 0)
703                                 goto call_handler_unsol;
704                         memcpy(irb, &cdev->private->irb, sizeof(struct irb));
705                         cdev->private->state = DEV_STATE_W4SENSE;
706                         cdev->private->intparm = 0;
707                         return;
708                 }
709 call_handler_unsol:
710                 if (cdev->handler)
711                         cdev->handler (cdev, 0, irb);
712                 return;
713         }
714         /* Accumulate status and find out if a basic sense is needed. */
715         ccw_device_accumulate_irb(cdev, irb);
716         if (cdev->private->flags.dosense) {
717                 if (ccw_device_do_sense(cdev, irb) == 0) {
718                         cdev->private->state = DEV_STATE_W4SENSE;
719                 }
720                 return;
721         }
722         /* Call the handler. */
723         if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
724                 /* Start delayed path verification. */
725                 ccw_device_online_verify(cdev, 0);
726 }
727
728 /*
729  * Got an timeout in online state.
730  */
731 static void
732 ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
733 {
734         int ret;
735
736         ccw_device_set_timeout(cdev, 0);
737         ret = ccw_device_cancel_halt_clear(cdev);
738         if (ret == -EBUSY) {
739                 ccw_device_set_timeout(cdev, 3*HZ);
740                 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
741                 return;
742         }
743         if (ret == -ENODEV) {
744                 struct subchannel *sch;
745
746                 sch = to_subchannel(cdev->dev.parent);
747                 if (!sch->lpm) {
748                         PREPARE_WORK(&cdev->private->kick_work,
749                                      ccw_device_nopath_notify, (void *)cdev);
750                         queue_work(ccw_device_notify_work,
751                                    &cdev->private->kick_work);
752                 } else
753                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
754         } else if (cdev->handler)
755                 cdev->handler(cdev, cdev->private->intparm,
756                               ERR_PTR(-ETIMEDOUT));
757 }
758
759 /*
760  * Got an interrupt for a basic sense.
761  */
762 void
763 ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
764 {
765         struct irb *irb;
766
767         irb = (struct irb *) __LC_IRB;
768         /* Check for unsolicited interrupt. */
769         if (irb->scsw.stctl ==
770                         (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
771                 if (irb->scsw.cc == 1)
772                         /* Basic sense hasn't started. Try again. */
773                         ccw_device_do_sense(cdev, irb);
774                 else {
775                         printk("Huh? %s(%s): unsolicited interrupt...\n",
776                                __FUNCTION__, cdev->dev.bus_id);
777                         if (cdev->handler)
778                                 cdev->handler (cdev, 0, irb);
779                 }
780                 return;
781         }
782         /* Add basic sense info to irb. */
783         ccw_device_accumulate_basic_sense(cdev, irb);
784         if (cdev->private->flags.dosense) {
785                 /* Another basic sense is needed. */
786                 ccw_device_do_sense(cdev, irb);
787                 return;
788         }
789         cdev->private->state = DEV_STATE_ONLINE;
790         /* Call the handler. */
791         if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
792                 /* Start delayed path verification. */
793                 ccw_device_online_verify(cdev, 0);
794 }
795
796 static void
797 ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event)
798 {
799         struct irb *irb;
800
801         irb = (struct irb *) __LC_IRB;
802         /* Accumulate status. We don't do basic sense. */
803         ccw_device_accumulate_irb(cdev, irb);
804         /* Try to start delayed device verification. */
805         ccw_device_online_verify(cdev, 0);
806         /* Note: Don't call handler for cio initiated clear! */
807 }
808
809 static void
810 ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
811 {
812         struct subchannel *sch;
813
814         sch = to_subchannel(cdev->dev.parent);
815         ccw_device_set_timeout(cdev, 0);
816         /* OK, i/o is dead now. Call interrupt handler. */
817         cdev->private->state = DEV_STATE_ONLINE;
818         if (cdev->handler)
819                 cdev->handler(cdev, cdev->private->intparm,
820                               ERR_PTR(-ETIMEDOUT));
821         if (!sch->lpm) {
822                 PREPARE_WORK(&cdev->private->kick_work,
823                              ccw_device_nopath_notify, (void *)cdev);
824                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
825         } else if (cdev->private->flags.doverify)
826                 /* Start delayed path verification. */
827                 ccw_device_online_verify(cdev, 0);
828 }
829
830 static void
831 ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
832 {
833         int ret;
834
835         ret = ccw_device_cancel_halt_clear(cdev);
836         if (ret == -EBUSY) {
837                 ccw_device_set_timeout(cdev, 3*HZ);
838                 return;
839         }
840         if (ret == -ENODEV) {
841                 struct subchannel *sch;
842
843                 sch = to_subchannel(cdev->dev.parent);
844                 if (!sch->lpm) {
845                         PREPARE_WORK(&cdev->private->kick_work,
846                                      ccw_device_nopath_notify, (void *)cdev);
847                         queue_work(ccw_device_notify_work,
848                                    &cdev->private->kick_work);
849                 } else
850                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
851                 return;
852         }
853         //FIXME: Can we get here?
854         cdev->private->state = DEV_STATE_ONLINE;
855         if (cdev->handler)
856                 cdev->handler(cdev, cdev->private->intparm,
857                               ERR_PTR(-ETIMEDOUT));
858 }
859
860 static void
861 ccw_device_wait4io_irq(struct ccw_device *cdev, enum dev_event dev_event)
862 {
863         struct irb *irb;
864         struct subchannel *sch;
865
866         irb = (struct irb *) __LC_IRB;
867         /*
868          * Accumulate status and find out if a basic sense is needed.
869          * This is fine since we have already adapted the lpm.
870          */
871         ccw_device_accumulate_irb(cdev, irb);
872         if (cdev->private->flags.dosense) {
873                 if (ccw_device_do_sense(cdev, irb) == 0) {
874                         cdev->private->state = DEV_STATE_W4SENSE;
875                 }
876                 return;
877         }
878
879         /* Iff device is idle, reset timeout. */
880         sch = to_subchannel(cdev->dev.parent);
881         if (!stsch(sch->irq, &sch->schib))
882                 if (sch->schib.scsw.actl == 0)
883                         ccw_device_set_timeout(cdev, 0);
884         /* Call the handler. */
885         ccw_device_call_handler(cdev);
886         if (!sch->lpm) {
887                 PREPARE_WORK(&cdev->private->kick_work,
888                              ccw_device_nopath_notify, (void *)cdev);
889                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
890         } else if (cdev->private->flags.doverify)
891                 ccw_device_online_verify(cdev, 0);
892 }
893
894 static void
895 ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event)
896 {
897         int ret;
898         struct subchannel *sch;
899
900         sch = to_subchannel(cdev->dev.parent);
901         ccw_device_set_timeout(cdev, 0);
902         ret = ccw_device_cancel_halt_clear(cdev);
903         if (ret == -EBUSY) {
904                 ccw_device_set_timeout(cdev, 3*HZ);
905                 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
906                 return;
907         }
908         if (ret == -ENODEV) {
909                 if (!sch->lpm) {
910                         PREPARE_WORK(&cdev->private->kick_work,
911                                      ccw_device_nopath_notify, (void *)cdev);
912                         queue_work(ccw_device_notify_work,
913                                    &cdev->private->kick_work);
914                 } else
915                         dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
916                 return;
917         }
918         if (cdev->handler)
919                 cdev->handler(cdev, cdev->private->intparm,
920                               ERR_PTR(-ETIMEDOUT));
921         if (!sch->lpm) {
922                 PREPARE_WORK(&cdev->private->kick_work,
923                              ccw_device_nopath_notify, (void *)cdev);
924                 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
925         } else if (cdev->private->flags.doverify)
926                 /* Start delayed path verification. */
927                 ccw_device_online_verify(cdev, 0);
928 }
929
930 static void
931 ccw_device_wait4io_verify(struct ccw_device *cdev, enum dev_event dev_event)
932 {
933         /* When the I/O has terminated, we have to start verification. */
934         if (cdev->private->options.pgroup)
935                 cdev->private->flags.doverify = 1;
936 }
937
938 static void
939 ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
940 {
941         struct irb *irb;
942
943         switch (dev_event) {
944         case DEV_EVENT_INTERRUPT:
945                 irb = (struct irb *) __LC_IRB;
946                 /* Check for unsolicited interrupt. */
947                 if ((irb->scsw.stctl ==
948                      (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
949                     (!irb->scsw.cc))
950                         /* FIXME: we should restart stlck here, but this
951                          * is extremely unlikely ... */
952                         goto out_wakeup;
953
954                 ccw_device_accumulate_irb(cdev, irb);
955                 /* We don't care about basic sense etc. */
956                 break;
957         default: /* timeout */
958                 break;
959         }
960 out_wakeup:
961         wake_up(&cdev->private->wait_q);
962 }
963
964 static void
965 ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
966 {
967         struct subchannel *sch;
968
969         sch = to_subchannel(cdev->dev.parent);
970         if (cio_enable_subchannel(sch, sch->schib.pmcw.isc) != 0)
971                 /* Couldn't enable the subchannel for i/o. Sick device. */
972                 return;
973
974         /* After 60s the device recognition is considered to have failed. */
975         ccw_device_set_timeout(cdev, 60*HZ);
976
977         cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
978         ccw_device_sense_id_start(cdev);
979 }
980
981 void
982 device_trigger_reprobe(struct subchannel *sch)
983 {
984         struct ccw_device *cdev;
985         unsigned long flags;
986
987         if (!sch->dev.driver_data)
988                 return;
989         cdev = sch->dev.driver_data;
990         spin_lock_irqsave(&sch->lock, flags);
991         if (cdev->private->state != DEV_STATE_DISCONNECTED) {
992                 spin_unlock_irqrestore(&sch->lock, flags);
993                 return;
994         }
995         /* Update some values. */
996         if (stsch(sch->irq, &sch->schib)) {
997                 spin_unlock_irqrestore(&sch->lock, flags);
998                 return;
999         }
1000         /*
1001          * The pim, pam, pom values may not be accurate, but they are the best
1002          * we have before performing device selection :/
1003          */
1004         sch->lpm = sch->schib.pmcw.pim &
1005                 sch->schib.pmcw.pam &
1006                 sch->schib.pmcw.pom &
1007                 sch->opm;
1008         /* Re-set some bits in the pmcw that were lost. */
1009         sch->schib.pmcw.isc = 3;
1010         sch->schib.pmcw.csense = 1;
1011         sch->schib.pmcw.ena = 0;
1012         if ((sch->lpm & (sch->lpm - 1)) != 0)
1013                 sch->schib.pmcw.mp = 1;
1014         sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
1015         /* We should also udate ssd info, but this has to wait. */
1016         ccw_device_start_id(cdev, 0);
1017         spin_unlock_irqrestore(&sch->lock, flags);
1018 }
1019
1020 static void
1021 ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event)
1022 {
1023         struct subchannel *sch;
1024
1025         sch = to_subchannel(cdev->dev.parent);
1026         /*
1027          * An interrupt in state offline means a previous disable was not
1028          * successful. Try again.
1029          */
1030         cio_disable_subchannel(sch);
1031 }
1032
1033 static void
1034 ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
1035 {
1036         retry_set_schib(cdev);
1037         cdev->private->state = DEV_STATE_ONLINE;
1038         dev_fsm_event(cdev, dev_event);
1039 }
1040
1041
1042 static void
1043 ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
1044 {
1045         ccw_device_set_timeout(cdev, 0);
1046         if (dev_event == DEV_EVENT_NOTOPER)
1047                 cdev->private->state = DEV_STATE_NOT_OPER;
1048         else
1049                 cdev->private->state = DEV_STATE_OFFLINE;
1050         wake_up(&cdev->private->wait_q);
1051 }
1052
1053 static void
1054 ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
1055 {
1056         int ret;
1057
1058         ret = ccw_device_cancel_halt_clear(cdev);
1059         switch (ret) {
1060         case 0:
1061                 cdev->private->state = DEV_STATE_OFFLINE;
1062                 wake_up(&cdev->private->wait_q);
1063                 break;
1064         case -ENODEV:
1065                 cdev->private->state = DEV_STATE_NOT_OPER;
1066                 wake_up(&cdev->private->wait_q);
1067                 break;
1068         default:
1069                 ccw_device_set_timeout(cdev, HZ/10);
1070         }
1071 }
1072
1073 /*
1074  * No operation action. This is used e.g. to ignore a timeout event in
1075  * state offline.
1076  */
1077 static void
1078 ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
1079 {
1080 }
1081
1082 /*
1083  * Bug operation action. 
1084  */
1085 static void
1086 ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
1087 {
1088         printk(KERN_EMERG "dev_jumptable[%i][%i] == NULL\n",
1089                cdev->private->state, dev_event);
1090         BUG();
1091 }
1092
1093 /*
1094  * device statemachine
1095  */
1096 fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1097         [DEV_STATE_NOT_OPER] = {
1098                 [DEV_EVENT_NOTOPER]     = ccw_device_nop,
1099                 [DEV_EVENT_INTERRUPT]   = ccw_device_bug,
1100                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1101                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1102         },
1103         [DEV_STATE_SENSE_PGID] = {
1104                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1105                 [DEV_EVENT_INTERRUPT]   = ccw_device_sense_pgid_irq,
1106                 [DEV_EVENT_TIMEOUT]     = ccw_device_onoff_timeout,
1107                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1108         },
1109         [DEV_STATE_SENSE_ID] = {
1110                 [DEV_EVENT_NOTOPER]     = ccw_device_recog_notoper,
1111                 [DEV_EVENT_INTERRUPT]   = ccw_device_sense_id_irq,
1112                 [DEV_EVENT_TIMEOUT]     = ccw_device_recog_timeout,
1113                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1114         },
1115         [DEV_STATE_OFFLINE] = {
1116                 [DEV_EVENT_NOTOPER]     = ccw_device_offline_notoper,
1117                 [DEV_EVENT_INTERRUPT]   = ccw_device_offline_irq,
1118                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1119                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1120         },
1121         [DEV_STATE_VERIFY] = {
1122                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1123                 [DEV_EVENT_INTERRUPT]   = ccw_device_verify_irq,
1124                 [DEV_EVENT_TIMEOUT]     = ccw_device_onoff_timeout,
1125                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1126         },
1127         [DEV_STATE_ONLINE] = {
1128                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1129                 [DEV_EVENT_INTERRUPT]   = ccw_device_irq,
1130                 [DEV_EVENT_TIMEOUT]     = ccw_device_online_timeout,
1131                 [DEV_EVENT_VERIFY]      = ccw_device_online_verify,
1132         },
1133         [DEV_STATE_W4SENSE] = {
1134                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1135                 [DEV_EVENT_INTERRUPT]   = ccw_device_w4sense,
1136                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1137                 [DEV_EVENT_VERIFY]      = ccw_device_online_verify,
1138         },
1139         [DEV_STATE_DISBAND_PGID] = {
1140                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1141                 [DEV_EVENT_INTERRUPT]   = ccw_device_disband_irq,
1142                 [DEV_EVENT_TIMEOUT]     = ccw_device_onoff_timeout,
1143                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1144         },
1145         [DEV_STATE_BOXED] = {
1146                 [DEV_EVENT_NOTOPER]     = ccw_device_offline_notoper,
1147                 [DEV_EVENT_INTERRUPT]   = ccw_device_stlck_done,
1148                 [DEV_EVENT_TIMEOUT]     = ccw_device_stlck_done,
1149                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1150         },
1151         /* states to wait for i/o completion before doing something */
1152         [DEV_STATE_CLEAR_VERIFY] = {
1153                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1154                 [DEV_EVENT_INTERRUPT]   = ccw_device_clear_verify,
1155                 [DEV_EVENT_TIMEOUT]     = ccw_device_nop,
1156                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1157         },
1158         [DEV_STATE_TIMEOUT_KILL] = {
1159                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1160                 [DEV_EVENT_INTERRUPT]   = ccw_device_killing_irq,
1161                 [DEV_EVENT_TIMEOUT]     = ccw_device_killing_timeout,
1162                 [DEV_EVENT_VERIFY]      = ccw_device_nop, //FIXME
1163         },
1164         [DEV_STATE_WAIT4IO] = {
1165                 [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1166                 [DEV_EVENT_INTERRUPT]   = ccw_device_wait4io_irq,
1167                 [DEV_EVENT_TIMEOUT]     = ccw_device_wait4io_timeout,
1168                 [DEV_EVENT_VERIFY]      = ccw_device_wait4io_verify,
1169         },
1170         [DEV_STATE_QUIESCE] = {
1171                 [DEV_EVENT_NOTOPER]     = ccw_device_quiesce_done,
1172                 [DEV_EVENT_INTERRUPT]   = ccw_device_quiesce_done,
1173                 [DEV_EVENT_TIMEOUT]     = ccw_device_quiesce_timeout,
1174                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1175         },
1176         /* special states for devices gone not operational */
1177         [DEV_STATE_DISCONNECTED] = {
1178                 [DEV_EVENT_NOTOPER]     = ccw_device_nop,
1179                 [DEV_EVENT_INTERRUPT]   = ccw_device_start_id,
1180                 [DEV_EVENT_TIMEOUT]     = ccw_device_bug,
1181                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1182         },
1183         [DEV_STATE_DISCONNECTED_SENSE_ID] = {
1184                 [DEV_EVENT_NOTOPER]     = ccw_device_recog_notoper,
1185                 [DEV_EVENT_INTERRUPT]   = ccw_device_sense_id_irq,
1186                 [DEV_EVENT_TIMEOUT]     = ccw_device_recog_timeout,
1187                 [DEV_EVENT_VERIFY]      = ccw_device_nop,
1188         },
1189         [DEV_STATE_CMFCHANGE] = {
1190                 [DEV_EVENT_NOTOPER]     = ccw_device_change_cmfstate,
1191                 [DEV_EVENT_INTERRUPT]   = ccw_device_change_cmfstate,
1192                 [DEV_EVENT_TIMEOUT]     = ccw_device_change_cmfstate,
1193                 [DEV_EVENT_VERIFY]      = ccw_device_change_cmfstate,
1194         },
1195 };
1196
1197 /*
1198  * io_subchannel_irq is called for "real" interrupts or for status
1199  * pending conditions on msch.
1200  */
1201 void
1202 io_subchannel_irq (struct device *pdev)
1203 {
1204         struct ccw_device *cdev;
1205
1206         cdev = to_subchannel(pdev)->dev.driver_data;
1207
1208         CIO_TRACE_EVENT (3, "IRQ");
1209         CIO_TRACE_EVENT (3, pdev->bus_id);
1210         if (cdev)
1211                 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1212 }
1213
1214 EXPORT_SYMBOL_GPL(ccw_device_set_timeout);