2 * drivers/s390/cio/device_fsm.c
3 * finite state machine for device handling
5 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
7 * Author(s): Cornelia Huck(cohuck@de.ibm.com)
8 * Martin Schwidefsky (schwidefsky@de.ibm.com)
11 #include <linux/module.h>
12 #include <linux/config.h>
13 #include <linux/init.h>
15 #include <asm/ccwdev.h>
19 #include "cio_debug.h"
27 device_is_disconnected(struct subchannel *sch)
29 struct ccw_device *cdev;
31 if (!sch->dev.driver_data)
33 cdev = sch->dev.driver_data;
34 return (cdev->private->state == DEV_STATE_DISCONNECTED ||
35 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
39 device_set_disconnected(struct subchannel *sch)
41 struct ccw_device *cdev;
43 if (!sch->dev.driver_data)
45 cdev = sch->dev.driver_data;
46 ccw_device_set_timeout(cdev, 0);
47 cdev->private->state = DEV_STATE_DISCONNECTED;
51 device_set_waiting(struct subchannel *sch)
53 struct ccw_device *cdev;
55 if (!sch->dev.driver_data)
57 cdev = sch->dev.driver_data;
58 ccw_device_set_timeout(cdev, 10*HZ);
59 cdev->private->state = DEV_STATE_WAIT4IO;
63 * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
66 ccw_device_timeout(unsigned long data)
68 struct ccw_device *cdev;
70 cdev = (struct ccw_device *) data;
71 spin_lock_irq(cdev->ccwlock);
72 dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
73 spin_unlock_irq(cdev->ccwlock);
80 ccw_device_set_timeout(struct ccw_device *cdev, int expires)
83 del_timer(&cdev->private->timer);
86 if (timer_pending(&cdev->private->timer)) {
87 if (mod_timer(&cdev->private->timer, jiffies + expires))
90 cdev->private->timer.function = ccw_device_timeout;
91 cdev->private->timer.data = (unsigned long) cdev;
92 cdev->private->timer.expires = jiffies + expires;
93 add_timer(&cdev->private->timer);
96 /* Kill any pending timers after machine check. */
98 device_kill_pending_timer(struct subchannel *sch)
100 struct ccw_device *cdev;
102 if (!sch->dev.driver_data)
104 cdev = sch->dev.driver_data;
105 ccw_device_set_timeout(cdev, 0);
109 * Cancel running i/o. This is called repeatedly since halt/clear are
110 * asynchronous operations. We do one try with cio_cancel, two tries
111 * with cio_halt, 255 tries with cio_clear. If everythings fails panic.
112 * Returns 0 if device now idle, -ENODEV for device not operational and
113 * -EBUSY if an interrupt is expected (either from halt/clear or from a
117 ccw_device_cancel_halt_clear(struct ccw_device *cdev)
119 struct subchannel *sch;
122 sch = to_subchannel(cdev->dev.parent);
123 ret = stsch(sch->irq, &sch->schib);
124 if (ret || !sch->schib.pmcw.dnv)
126 if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0)
127 /* Not operational or no activity -> done. */
129 /* Stage 1: cancel io. */
130 if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) &&
131 !(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
132 ret = cio_cancel(sch);
135 /* cancel io unsuccessful. From now on it is asynchronous. */
136 cdev->private->iretry = 3; /* 3 halt retries. */
138 if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
139 /* Stage 2: halt io. */
140 if (cdev->private->iretry) {
141 cdev->private->iretry--;
143 return (ret == 0) ? -EBUSY : ret;
145 /* halt io unsuccessful. */
146 cdev->private->iretry = 255; /* 255 clear retries. */
148 /* Stage 3: clear io. */
149 if (cdev->private->iretry) {
150 cdev->private->iretry--;
151 ret = cio_clear (sch);
152 return (ret == 0) ? -EBUSY : ret;
154 panic("Can't stop i/o on subchannel.\n");
158 ccw_device_handle_oper(struct ccw_device *cdev)
160 struct subchannel *sch;
162 sch = to_subchannel(cdev->dev.parent);
163 cdev->private->flags.recog_done = 1;
165 * Check if cu type and device type still match. If
166 * not, it is certainly another device and we have to
167 * de- and re-register. Also check here for non-matching devno.
169 if (cdev->id.cu_type != cdev->private->senseid.cu_type ||
170 cdev->id.cu_model != cdev->private->senseid.cu_model ||
171 cdev->id.dev_type != cdev->private->senseid.dev_type ||
172 cdev->id.dev_model != cdev->private->senseid.dev_model ||
173 cdev->private->devno != sch->schib.pmcw.dev) {
174 PREPARE_WORK(&cdev->private->kick_work,
175 ccw_device_do_unreg_rereg, (void *)cdev);
176 queue_work(ccw_device_work, &cdev->private->kick_work);
179 cdev->private->flags.donotify = 1;
184 * The machine won't give us any notification by machine check if a chpid has
185 * been varied online on the SE so we have to find out by magic (i. e. driving
186 * the channel subsystem to device selection and updating our path masks).
189 __recover_lost_chpids(struct subchannel *sch, int old_lpm)
193 for (i = 0; i<8; i++) {
195 if (!(sch->lpm & mask))
199 chpid_is_actually_online(sch->schib.pmcw.chpid[i]);
204 * Stop device recognition.
207 ccw_device_recog_done(struct ccw_device *cdev, int state)
209 struct subchannel *sch;
210 int notify, old_lpm, same_dev;
212 sch = to_subchannel(cdev->dev.parent);
214 ccw_device_set_timeout(cdev, 0);
215 cio_disable_subchannel(sch);
217 * Now that we tried recognition, we have performed device selection
218 * through ssch() and the path information is up to date.
221 stsch(sch->irq, &sch->schib);
222 sch->lpm = sch->schib.pmcw.pim &
223 sch->schib.pmcw.pam &
224 sch->schib.pmcw.pom &
226 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
227 /* Force reprobe on all chpids. */
229 if (sch->lpm != old_lpm)
230 __recover_lost_chpids(sch, old_lpm);
231 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
232 if (state == DEV_STATE_NOT_OPER) {
233 cdev->private->flags.recog_done = 1;
234 cdev->private->state = DEV_STATE_DISCONNECTED;
237 /* Boxed devices don't need extra treatment. */
240 same_dev = 0; /* Keep the compiler quiet... */
242 case DEV_STATE_NOT_OPER:
243 CIO_DEBUG(KERN_WARNING, 2,
244 "SenseID : unknown device %04x on subchannel %04x\n",
245 cdev->private->devno, sch->irq);
247 case DEV_STATE_OFFLINE:
248 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
249 same_dev = ccw_device_handle_oper(cdev);
252 /* fill out sense information */
253 cdev->id = (struct ccw_device_id) {
254 .cu_type = cdev->private->senseid.cu_type,
255 .cu_model = cdev->private->senseid.cu_model,
256 .dev_type = cdev->private->senseid.dev_type,
257 .dev_model = cdev->private->senseid.dev_model,
260 cdev->private->state = DEV_STATE_OFFLINE;
262 /* Get device online again. */
263 ccw_device_online(cdev);
264 wake_up(&cdev->private->wait_q);
268 /* Issue device info message. */
269 CIO_DEBUG(KERN_INFO, 2, "SenseID : device %04x reports: "
270 "CU Type/Mod = %04X/%02X, Dev Type/Mod = "
271 "%04X/%02X\n", cdev->private->devno,
272 cdev->id.cu_type, cdev->id.cu_model,
273 cdev->id.dev_type, cdev->id.dev_model);
275 case DEV_STATE_BOXED:
276 CIO_DEBUG(KERN_WARNING, 2,
277 "SenseID : boxed device %04x on subchannel %04x\n",
278 cdev->private->devno, sch->irq);
281 cdev->private->state = state;
282 io_subchannel_recog_done(cdev);
283 if (state != DEV_STATE_NOT_OPER)
284 wake_up(&cdev->private->wait_q);
288 * Function called from device_id.c after sense id has completed.
291 ccw_device_sense_id_done(struct ccw_device *cdev, int err)
295 ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
297 case -ETIME: /* Sense id stopped by timeout. */
298 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
301 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
307 ccw_device_oper_notify(void *data)
309 struct ccw_device *cdev;
310 struct subchannel *sch;
313 cdev = (struct ccw_device *)data;
314 sch = to_subchannel(cdev->dev.parent);
315 ret = (sch->driver && sch->driver->notify) ?
316 sch->driver->notify(&sch->dev, CIO_OPER) : 0;
318 /* Driver doesn't want device back. */
319 ccw_device_do_unreg_rereg((void *)cdev);
321 wake_up(&cdev->private->wait_q);
325 * Finished with online/offline processing.
328 ccw_device_done(struct ccw_device *cdev, int state)
330 struct subchannel *sch;
332 sch = to_subchannel(cdev->dev.parent);
334 if (state != DEV_STATE_ONLINE)
335 cio_disable_subchannel(sch);
337 /* Reset device status. */
338 memset(&cdev->private->irb, 0, sizeof(struct irb));
340 cdev->private->state = state;
343 if (state == DEV_STATE_BOXED)
344 CIO_DEBUG(KERN_WARNING, 2,
345 "Boxed device %04x on subchannel %04x\n",
346 cdev->private->devno, sch->irq);
348 if (cdev->private->flags.donotify) {
349 cdev->private->flags.donotify = 0;
350 PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify,
352 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
354 wake_up(&cdev->private->wait_q);
356 if (css_init_done && state != DEV_STATE_ONLINE)
357 put_device (&cdev->dev);
361 * Function called from device_pgid.c after sense path ground has completed.
364 ccw_device_sense_pgid_done(struct ccw_device *cdev, int err)
366 struct subchannel *sch;
368 sch = to_subchannel(cdev->dev.parent);
371 /* Start Path Group verification. */
372 sch->vpm = 0; /* Start with no path groups set. */
373 cdev->private->state = DEV_STATE_VERIFY;
374 ccw_device_verify_start(cdev);
376 case -ETIME: /* Sense path group id stopped by timeout. */
377 case -EUSERS: /* device is reserved for someone else. */
378 ccw_device_done(cdev, DEV_STATE_BOXED);
380 case -EOPNOTSUPP: /* path grouping not supported, just set online. */
381 cdev->private->options.pgroup = 0;
382 ccw_device_done(cdev, DEV_STATE_ONLINE);
385 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
391 * Start device recognition.
394 ccw_device_recognition(struct ccw_device *cdev)
396 struct subchannel *sch;
399 if ((cdev->private->state != DEV_STATE_NOT_OPER) &&
400 (cdev->private->state != DEV_STATE_BOXED))
402 sch = to_subchannel(cdev->dev.parent);
403 ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
405 /* Couldn't enable the subchannel for i/o. Sick device. */
408 /* After 60s the device recognition is considered to have failed. */
409 ccw_device_set_timeout(cdev, 60*HZ);
412 * We used to start here with a sense pgid to find out whether a device
413 * is locked by someone else. Unfortunately, the sense pgid command
414 * code has other meanings on devices predating the path grouping
415 * algorithm, so we start with sense id and box the device after an
416 * timeout (or if sense pgid during path verification detects the device
417 * is locked, as may happen on newer devices).
419 cdev->private->flags.recog_done = 0;
420 cdev->private->state = DEV_STATE_SENSE_ID;
421 ccw_device_sense_id_start(cdev);
426 * Handle timeout in device recognition.
429 ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
433 ret = ccw_device_cancel_halt_clear(cdev);
436 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
439 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
442 ccw_device_set_timeout(cdev, 3*HZ);
448 ccw_device_nopath_notify(void *data)
450 struct ccw_device *cdev;
451 struct subchannel *sch;
454 cdev = (struct ccw_device *)data;
455 sch = to_subchannel(cdev->dev.parent);
459 ret = (sch->driver && sch->driver->notify) ?
460 sch->driver->notify(&sch->dev, CIO_NO_PATH) : 0;
462 if (get_device(&sch->dev)) {
463 /* Driver doesn't want to keep device. */
464 cio_disable_subchannel(sch);
465 if (get_device(&cdev->dev)) {
466 PREPARE_WORK(&cdev->private->kick_work,
467 ccw_device_call_sch_unregister,
469 queue_work(ccw_device_work,
470 &cdev->private->kick_work);
472 put_device(&sch->dev);
475 cio_disable_subchannel(sch);
476 ccw_device_set_timeout(cdev, 0);
477 cdev->private->state = DEV_STATE_DISCONNECTED;
478 wake_up(&cdev->private->wait_q);
483 ccw_device_verify_done(struct ccw_device *cdev, int err)
485 cdev->private->flags.doverify = 0;
487 case -EOPNOTSUPP: /* path grouping not supported, just set online. */
488 cdev->private->options.pgroup = 0;
490 ccw_device_done(cdev, DEV_STATE_ONLINE);
493 ccw_device_done(cdev, DEV_STATE_BOXED);
496 PREPARE_WORK(&cdev->private->kick_work,
497 ccw_device_nopath_notify, (void *)cdev);
498 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
499 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
508 ccw_device_online(struct ccw_device *cdev)
510 struct subchannel *sch;
513 if ((cdev->private->state != DEV_STATE_OFFLINE) &&
514 (cdev->private->state != DEV_STATE_BOXED))
516 sch = to_subchannel(cdev->dev.parent);
517 if (css_init_done && !get_device(&cdev->dev))
519 ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
521 /* Couldn't enable the subchannel for i/o. Sick device. */
523 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
526 /* Do we want to do path grouping? */
527 if (!cdev->private->options.pgroup) {
528 /* No, set state online immediately. */
529 ccw_device_done(cdev, DEV_STATE_ONLINE);
532 /* Do a SensePGID first. */
533 cdev->private->state = DEV_STATE_SENSE_PGID;
534 ccw_device_sense_pgid_start(cdev);
539 ccw_device_disband_done(struct ccw_device *cdev, int err)
543 ccw_device_done(cdev, DEV_STATE_OFFLINE);
546 ccw_device_done(cdev, DEV_STATE_BOXED);
549 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
558 ccw_device_offline(struct ccw_device *cdev)
560 struct subchannel *sch;
562 sch = to_subchannel(cdev->dev.parent);
563 if (cdev->private->state != DEV_STATE_ONLINE) {
564 if (sch->schib.scsw.actl != 0)
568 if (sch->schib.scsw.actl != 0)
570 /* Are we doing path grouping? */
571 if (!cdev->private->options.pgroup) {
572 /* No, set state offline immediately. */
573 ccw_device_done(cdev, DEV_STATE_OFFLINE);
576 /* Start Set Path Group commands. */
577 cdev->private->state = DEV_STATE_DISBAND_PGID;
578 ccw_device_disband_start(cdev);
583 * Handle timeout in device online/offline process.
586 ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event)
590 ret = ccw_device_cancel_halt_clear(cdev);
593 ccw_device_done(cdev, DEV_STATE_BOXED);
596 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
599 ccw_device_set_timeout(cdev, 3*HZ);
604 * Handle not oper event in device recognition.
607 ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event)
609 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
613 * Handle not operational event while offline.
616 ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event)
618 struct subchannel *sch;
620 cdev->private->state = DEV_STATE_NOT_OPER;
621 sch = to_subchannel(cdev->dev.parent);
622 device_unregister(&sch->dev);
623 sch->schib.pmcw.intparm = 0;
625 wake_up(&cdev->private->wait_q);
629 * Handle not operational event while online.
632 ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event)
634 struct subchannel *sch;
636 sch = to_subchannel(cdev->dev.parent);
637 if (sch->driver->notify &&
638 sch->driver->notify(&sch->dev, sch->lpm ? CIO_GONE : CIO_NO_PATH)) {
639 ccw_device_set_timeout(cdev, 0);
640 cdev->private->state = DEV_STATE_DISCONNECTED;
641 wake_up(&cdev->private->wait_q);
644 cdev->private->state = DEV_STATE_NOT_OPER;
645 cio_disable_subchannel(sch);
646 if (sch->schib.scsw.actl != 0) {
647 // FIXME: not-oper indication to device driver ?
648 ccw_device_call_handler(cdev);
650 device_unregister(&sch->dev);
651 sch->schib.pmcw.intparm = 0;
653 wake_up(&cdev->private->wait_q);
657 * Handle path verification event.
660 ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
662 struct subchannel *sch;
664 if (!cdev->private->options.pgroup)
666 if (cdev->private->state == DEV_STATE_W4SENSE) {
667 cdev->private->flags.doverify = 1;
670 sch = to_subchannel(cdev->dev.parent);
671 if (sch->schib.scsw.actl != 0 ||
672 (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) {
674 * No final status yet or final status not yet delivered
675 * to the device driver. Can't do path verfication now,
676 * delay until final status was delivered.
678 cdev->private->flags.doverify = 1;
681 /* Device is idle, we can do the path verification. */
682 cdev->private->state = DEV_STATE_VERIFY;
683 ccw_device_verify_start(cdev);
687 * Got an interrupt for a normal io (state online).
690 ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
694 irb = (struct irb *) __LC_IRB;
695 /* Check for unsolicited interrupt. */
696 if ((irb->scsw.stctl ==
697 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS))
698 && (!irb->scsw.cc)) {
699 if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
700 !irb->esw.esw0.erw.cons) {
701 /* Unit check but no sense data. Need basic sense. */
702 if (ccw_device_do_sense(cdev, irb) != 0)
703 goto call_handler_unsol;
704 memcpy(irb, &cdev->private->irb, sizeof(struct irb));
705 cdev->private->state = DEV_STATE_W4SENSE;
706 cdev->private->intparm = 0;
711 cdev->handler (cdev, 0, irb);
714 /* Accumulate status and find out if a basic sense is needed. */
715 ccw_device_accumulate_irb(cdev, irb);
716 if (cdev->private->flags.dosense) {
717 if (ccw_device_do_sense(cdev, irb) == 0) {
718 cdev->private->state = DEV_STATE_W4SENSE;
722 /* Call the handler. */
723 if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
724 /* Start delayed path verification. */
725 ccw_device_online_verify(cdev, 0);
729 * Got an timeout in online state.
732 ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
736 ccw_device_set_timeout(cdev, 0);
737 ret = ccw_device_cancel_halt_clear(cdev);
739 ccw_device_set_timeout(cdev, 3*HZ);
740 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
743 if (ret == -ENODEV) {
744 struct subchannel *sch;
746 sch = to_subchannel(cdev->dev.parent);
748 PREPARE_WORK(&cdev->private->kick_work,
749 ccw_device_nopath_notify, (void *)cdev);
750 queue_work(ccw_device_notify_work,
751 &cdev->private->kick_work);
753 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
754 } else if (cdev->handler)
755 cdev->handler(cdev, cdev->private->intparm,
756 ERR_PTR(-ETIMEDOUT));
760 * Got an interrupt for a basic sense.
763 ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
767 irb = (struct irb *) __LC_IRB;
768 /* Check for unsolicited interrupt. */
769 if (irb->scsw.stctl ==
770 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
771 if (irb->scsw.cc == 1)
772 /* Basic sense hasn't started. Try again. */
773 ccw_device_do_sense(cdev, irb);
775 printk("Huh? %s(%s): unsolicited interrupt...\n",
776 __FUNCTION__, cdev->dev.bus_id);
778 cdev->handler (cdev, 0, irb);
782 /* Add basic sense info to irb. */
783 ccw_device_accumulate_basic_sense(cdev, irb);
784 if (cdev->private->flags.dosense) {
785 /* Another basic sense is needed. */
786 ccw_device_do_sense(cdev, irb);
789 cdev->private->state = DEV_STATE_ONLINE;
790 /* Call the handler. */
791 if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
792 /* Start delayed path verification. */
793 ccw_device_online_verify(cdev, 0);
797 ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event)
801 irb = (struct irb *) __LC_IRB;
802 /* Accumulate status. We don't do basic sense. */
803 ccw_device_accumulate_irb(cdev, irb);
804 /* Try to start delayed device verification. */
805 ccw_device_online_verify(cdev, 0);
806 /* Note: Don't call handler for cio initiated clear! */
810 ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
812 struct subchannel *sch;
814 sch = to_subchannel(cdev->dev.parent);
815 ccw_device_set_timeout(cdev, 0);
816 /* OK, i/o is dead now. Call interrupt handler. */
817 cdev->private->state = DEV_STATE_ONLINE;
819 cdev->handler(cdev, cdev->private->intparm,
820 ERR_PTR(-ETIMEDOUT));
822 PREPARE_WORK(&cdev->private->kick_work,
823 ccw_device_nopath_notify, (void *)cdev);
824 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
825 } else if (cdev->private->flags.doverify)
826 /* Start delayed path verification. */
827 ccw_device_online_verify(cdev, 0);
831 ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
835 ret = ccw_device_cancel_halt_clear(cdev);
837 ccw_device_set_timeout(cdev, 3*HZ);
840 if (ret == -ENODEV) {
841 struct subchannel *sch;
843 sch = to_subchannel(cdev->dev.parent);
845 PREPARE_WORK(&cdev->private->kick_work,
846 ccw_device_nopath_notify, (void *)cdev);
847 queue_work(ccw_device_notify_work,
848 &cdev->private->kick_work);
850 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
853 //FIXME: Can we get here?
854 cdev->private->state = DEV_STATE_ONLINE;
856 cdev->handler(cdev, cdev->private->intparm,
857 ERR_PTR(-ETIMEDOUT));
861 ccw_device_wait4io_irq(struct ccw_device *cdev, enum dev_event dev_event)
864 struct subchannel *sch;
866 irb = (struct irb *) __LC_IRB;
868 * Accumulate status and find out if a basic sense is needed.
869 * This is fine since we have already adapted the lpm.
871 ccw_device_accumulate_irb(cdev, irb);
872 if (cdev->private->flags.dosense) {
873 if (ccw_device_do_sense(cdev, irb) == 0) {
874 cdev->private->state = DEV_STATE_W4SENSE;
879 /* Iff device is idle, reset timeout. */
880 sch = to_subchannel(cdev->dev.parent);
881 if (!stsch(sch->irq, &sch->schib))
882 if (sch->schib.scsw.actl == 0)
883 ccw_device_set_timeout(cdev, 0);
884 /* Call the handler. */
885 ccw_device_call_handler(cdev);
887 PREPARE_WORK(&cdev->private->kick_work,
888 ccw_device_nopath_notify, (void *)cdev);
889 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
890 } else if (cdev->private->flags.doverify)
891 ccw_device_online_verify(cdev, 0);
895 ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event)
898 struct subchannel *sch;
900 sch = to_subchannel(cdev->dev.parent);
901 ccw_device_set_timeout(cdev, 0);
902 ret = ccw_device_cancel_halt_clear(cdev);
904 ccw_device_set_timeout(cdev, 3*HZ);
905 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
908 if (ret == -ENODEV) {
910 PREPARE_WORK(&cdev->private->kick_work,
911 ccw_device_nopath_notify, (void *)cdev);
912 queue_work(ccw_device_notify_work,
913 &cdev->private->kick_work);
915 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
919 cdev->handler(cdev, cdev->private->intparm,
920 ERR_PTR(-ETIMEDOUT));
922 PREPARE_WORK(&cdev->private->kick_work,
923 ccw_device_nopath_notify, (void *)cdev);
924 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
925 } else if (cdev->private->flags.doverify)
926 /* Start delayed path verification. */
927 ccw_device_online_verify(cdev, 0);
931 ccw_device_wait4io_verify(struct ccw_device *cdev, enum dev_event dev_event)
933 /* When the I/O has terminated, we have to start verification. */
934 if (cdev->private->options.pgroup)
935 cdev->private->flags.doverify = 1;
939 ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
944 case DEV_EVENT_INTERRUPT:
945 irb = (struct irb *) __LC_IRB;
946 /* Check for unsolicited interrupt. */
947 if ((irb->scsw.stctl ==
948 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
950 /* FIXME: we should restart stlck here, but this
951 * is extremely unlikely ... */
954 ccw_device_accumulate_irb(cdev, irb);
955 /* We don't care about basic sense etc. */
957 default: /* timeout */
961 wake_up(&cdev->private->wait_q);
965 ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
967 struct subchannel *sch;
969 sch = to_subchannel(cdev->dev.parent);
970 if (cio_enable_subchannel(sch, sch->schib.pmcw.isc) != 0)
971 /* Couldn't enable the subchannel for i/o. Sick device. */
974 /* After 60s the device recognition is considered to have failed. */
975 ccw_device_set_timeout(cdev, 60*HZ);
977 cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
978 ccw_device_sense_id_start(cdev);
982 device_trigger_reprobe(struct subchannel *sch)
984 struct ccw_device *cdev;
987 if (!sch->dev.driver_data)
989 cdev = sch->dev.driver_data;
990 spin_lock_irqsave(&sch->lock, flags);
991 if (cdev->private->state != DEV_STATE_DISCONNECTED) {
992 spin_unlock_irqrestore(&sch->lock, flags);
995 /* Update some values. */
996 if (stsch(sch->irq, &sch->schib)) {
997 spin_unlock_irqrestore(&sch->lock, flags);
1001 * The pim, pam, pom values may not be accurate, but they are the best
1002 * we have before performing device selection :/
1004 sch->lpm = sch->schib.pmcw.pim &
1005 sch->schib.pmcw.pam &
1006 sch->schib.pmcw.pom &
1008 /* Re-set some bits in the pmcw that were lost. */
1009 sch->schib.pmcw.isc = 3;
1010 sch->schib.pmcw.csense = 1;
1011 sch->schib.pmcw.ena = 0;
1012 if ((sch->lpm & (sch->lpm - 1)) != 0)
1013 sch->schib.pmcw.mp = 1;
1014 sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
1015 /* We should also udate ssd info, but this has to wait. */
1016 ccw_device_start_id(cdev, 0);
1017 spin_unlock_irqrestore(&sch->lock, flags);
1021 ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event)
1023 struct subchannel *sch;
1025 sch = to_subchannel(cdev->dev.parent);
1027 * An interrupt in state offline means a previous disable was not
1028 * successful. Try again.
1030 cio_disable_subchannel(sch);
1034 ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
1036 retry_set_schib(cdev);
1037 cdev->private->state = DEV_STATE_ONLINE;
1038 dev_fsm_event(cdev, dev_event);
1043 ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
1045 ccw_device_set_timeout(cdev, 0);
1046 if (dev_event == DEV_EVENT_NOTOPER)
1047 cdev->private->state = DEV_STATE_NOT_OPER;
1049 cdev->private->state = DEV_STATE_OFFLINE;
1050 wake_up(&cdev->private->wait_q);
1054 ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
1058 ret = ccw_device_cancel_halt_clear(cdev);
1061 cdev->private->state = DEV_STATE_OFFLINE;
1062 wake_up(&cdev->private->wait_q);
1065 cdev->private->state = DEV_STATE_NOT_OPER;
1066 wake_up(&cdev->private->wait_q);
1069 ccw_device_set_timeout(cdev, HZ/10);
1074 * No operation action. This is used e.g. to ignore a timeout event in
1078 ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
1083 * Bug operation action.
1086 ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
1088 printk(KERN_EMERG "dev_jumptable[%i][%i] == NULL\n",
1089 cdev->private->state, dev_event);
1094 * device statemachine
1096 fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1097 [DEV_STATE_NOT_OPER] = {
1098 [DEV_EVENT_NOTOPER] = ccw_device_nop,
1099 [DEV_EVENT_INTERRUPT] = ccw_device_bug,
1100 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1101 [DEV_EVENT_VERIFY] = ccw_device_nop,
1103 [DEV_STATE_SENSE_PGID] = {
1104 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
1105 [DEV_EVENT_INTERRUPT] = ccw_device_sense_pgid_irq,
1106 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
1107 [DEV_EVENT_VERIFY] = ccw_device_nop,
1109 [DEV_STATE_SENSE_ID] = {
1110 [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper,
1111 [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq,
1112 [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout,
1113 [DEV_EVENT_VERIFY] = ccw_device_nop,
1115 [DEV_STATE_OFFLINE] = {
1116 [DEV_EVENT_NOTOPER] = ccw_device_offline_notoper,
1117 [DEV_EVENT_INTERRUPT] = ccw_device_offline_irq,
1118 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1119 [DEV_EVENT_VERIFY] = ccw_device_nop,
1121 [DEV_STATE_VERIFY] = {
1122 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
1123 [DEV_EVENT_INTERRUPT] = ccw_device_verify_irq,
1124 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
1125 [DEV_EVENT_VERIFY] = ccw_device_nop,
1127 [DEV_STATE_ONLINE] = {
1128 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
1129 [DEV_EVENT_INTERRUPT] = ccw_device_irq,
1130 [DEV_EVENT_TIMEOUT] = ccw_device_online_timeout,
1131 [DEV_EVENT_VERIFY] = ccw_device_online_verify,
1133 [DEV_STATE_W4SENSE] = {
1134 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
1135 [DEV_EVENT_INTERRUPT] = ccw_device_w4sense,
1136 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1137 [DEV_EVENT_VERIFY] = ccw_device_online_verify,
1139 [DEV_STATE_DISBAND_PGID] = {
1140 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
1141 [DEV_EVENT_INTERRUPT] = ccw_device_disband_irq,
1142 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
1143 [DEV_EVENT_VERIFY] = ccw_device_nop,
1145 [DEV_STATE_BOXED] = {
1146 [DEV_EVENT_NOTOPER] = ccw_device_offline_notoper,
1147 [DEV_EVENT_INTERRUPT] = ccw_device_stlck_done,
1148 [DEV_EVENT_TIMEOUT] = ccw_device_stlck_done,
1149 [DEV_EVENT_VERIFY] = ccw_device_nop,
1151 /* states to wait for i/o completion before doing something */
1152 [DEV_STATE_CLEAR_VERIFY] = {
1153 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
1154 [DEV_EVENT_INTERRUPT] = ccw_device_clear_verify,
1155 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1156 [DEV_EVENT_VERIFY] = ccw_device_nop,
1158 [DEV_STATE_TIMEOUT_KILL] = {
1159 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
1160 [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq,
1161 [DEV_EVENT_TIMEOUT] = ccw_device_killing_timeout,
1162 [DEV_EVENT_VERIFY] = ccw_device_nop, //FIXME
1164 [DEV_STATE_WAIT4IO] = {
1165 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
1166 [DEV_EVENT_INTERRUPT] = ccw_device_wait4io_irq,
1167 [DEV_EVENT_TIMEOUT] = ccw_device_wait4io_timeout,
1168 [DEV_EVENT_VERIFY] = ccw_device_wait4io_verify,
1170 [DEV_STATE_QUIESCE] = {
1171 [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done,
1172 [DEV_EVENT_INTERRUPT] = ccw_device_quiesce_done,
1173 [DEV_EVENT_TIMEOUT] = ccw_device_quiesce_timeout,
1174 [DEV_EVENT_VERIFY] = ccw_device_nop,
1176 /* special states for devices gone not operational */
1177 [DEV_STATE_DISCONNECTED] = {
1178 [DEV_EVENT_NOTOPER] = ccw_device_nop,
1179 [DEV_EVENT_INTERRUPT] = ccw_device_start_id,
1180 [DEV_EVENT_TIMEOUT] = ccw_device_bug,
1181 [DEV_EVENT_VERIFY] = ccw_device_nop,
1183 [DEV_STATE_DISCONNECTED_SENSE_ID] = {
1184 [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper,
1185 [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq,
1186 [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout,
1187 [DEV_EVENT_VERIFY] = ccw_device_nop,
1189 [DEV_STATE_CMFCHANGE] = {
1190 [DEV_EVENT_NOTOPER] = ccw_device_change_cmfstate,
1191 [DEV_EVENT_INTERRUPT] = ccw_device_change_cmfstate,
1192 [DEV_EVENT_TIMEOUT] = ccw_device_change_cmfstate,
1193 [DEV_EVENT_VERIFY] = ccw_device_change_cmfstate,
1198 * io_subchannel_irq is called for "real" interrupts or for status
1199 * pending conditions on msch.
1202 io_subchannel_irq (struct device *pdev)
1204 struct ccw_device *cdev;
1206 cdev = to_subchannel(pdev)->dev.driver_data;
1208 CIO_TRACE_EVENT (3, "IRQ");
1209 CIO_TRACE_EVENT (3, pdev->bus_id);
1211 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1214 EXPORT_SYMBOL_GPL(ccw_device_set_timeout);