X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=drivers%2Fs390%2Fcio%2Fdevice_fsm.c;h=eed14572fc3b01e3445ee30a46e30bae6a15a006;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=3d5c0a58be3fe686664e7a586d8445c11d883a54;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 3d5c0a58b..eed14572f 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -4,16 +4,17 @@ * * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, * IBM Corporation - * Author(s): Cornelia Huck(cohuck@de.ibm.com) + * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) * Martin Schwidefsky (schwidefsky@de.ibm.com) */ #include -#include #include +#include +#include #include -#include +#include #include "cio.h" #include "cio_debug.h" @@ -21,7 +22,17 @@ #include "device.h" #include "chsc.h" #include "ioasm.h" -#include "qdio.h" + +int +device_is_online(struct subchannel *sch) +{ + struct ccw_device *cdev; + + if (!sch->dev.driver_data) + return 0; + cdev = sch->dev.driver_data; + return (cdev->private->state == DEV_STATE_ONLINE); +} int device_is_disconnected(struct subchannel *sch) @@ -44,19 +55,29 @@ device_set_disconnected(struct subchannel *sch) return; cdev = sch->dev.driver_data; ccw_device_set_timeout(cdev, 0); + cdev->private->flags.fake_irb = 0; cdev->private->state = DEV_STATE_DISCONNECTED; } -void -device_set_waiting(struct subchannel *sch) +void device_set_intretry(struct subchannel *sch) { struct ccw_device *cdev; - if (!sch->dev.driver_data) + cdev = sch->dev.driver_data; + if (!cdev) return; + cdev->private->flags.intretry = 1; +} + +int device_trigger_verify(struct subchannel *sch) +{ + struct ccw_device *cdev; + cdev = sch->dev.driver_data; - ccw_device_set_timeout(cdev, 10*HZ); - cdev->private->state = DEV_STATE_WAIT4IO; + if (!cdev || !cdev->online) + return -EINVAL; + dev_fsm_event(cdev, DEV_EVENT_VERIFY); + return 0; } /* @@ -93,6 +114,18 @@ ccw_device_set_timeout(struct ccw_device *cdev, int expires) add_timer(&cdev->private->timer); } +/* Kill any pending timers after machine check. */ +void +device_kill_pending_timer(struct subchannel *sch) +{ + struct ccw_device *cdev; + + if (!sch->dev.driver_data) + return; + cdev = sch->dev.driver_data; + ccw_device_set_timeout(cdev, 0); +} + /* * Cancel running i/o. This is called repeatedly since halt/clear are * asynchronous operations. We do one try with cio_cancel, two tries @@ -108,7 +141,7 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev) int ret; sch = to_subchannel(cdev->dev.parent); - ret = stsch(sch->irq, &sch->schib); + ret = stsch(sch->schid, &sch->schib); if (ret || !sch->schib.pmcw.dnv) return -ENODEV; if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0) @@ -128,7 +161,8 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev) if (cdev->private->iretry) { cdev->private->iretry--; ret = cio_halt(sch); - return (ret == 0) ? -EBUSY : ret; + if (ret != -EBUSY) + return (ret == 0) ? -EBUSY : ret; } /* halt io unsuccessful. */ cdev->private->iretry = 255; /* 255 clear retries. */ @@ -142,7 +176,7 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev) panic("Can't stop i/o on subchannel.\n"); } -static void +static int ccw_device_handle_oper(struct ccw_device *cdev) { struct subchannel *sch; @@ -159,13 +193,12 @@ ccw_device_handle_oper(struct ccw_device *cdev) cdev->id.dev_type != cdev->private->senseid.dev_type || cdev->id.dev_model != cdev->private->senseid.dev_model) { PREPARE_WORK(&cdev->private->kick_work, - ccw_device_do_unreg_rereg, (void *)&cdev->dev); + ccw_device_do_unreg_rereg); queue_work(ccw_device_work, &cdev->private->kick_work); - return; + return 0; } cdev->private->flags.donotify = 1; - /* Get device online again. */ - ccw_device_online(cdev); + return 1; } /* @@ -195,7 +228,7 @@ static void ccw_device_recog_done(struct ccw_device *cdev, int state) { struct subchannel *sch; - int notify, old_lpm; + int notify, old_lpm, same_dev; sch = to_subchannel(cdev->dev.parent); @@ -206,11 +239,11 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) * through ssch() and the path information is up to date. */ old_lpm = sch->lpm; - stsch(sch->irq, &sch->schib); - sch->lpm = sch->schib.pmcw.pim & - sch->schib.pmcw.pam & - sch->schib.pmcw.pom & - sch->opm; + stsch(sch->schid, &sch->schib); + sch->lpm = sch->schib.pmcw.pam & sch->opm; + /* Check since device may again have become not operational. */ + if (!sch->schib.pmcw.dnv) + state = DEV_STATE_NOT_OPER; if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) /* Force reprobe on all chpids. */ old_lpm = 0; @@ -225,40 +258,52 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) /* Boxed devices don't need extra treatment. */ } notify = 0; + same_dev = 0; /* Keep the compiler quiet... */ switch (state) { case DEV_STATE_NOT_OPER: CIO_DEBUG(KERN_WARNING, 2, - "SenseID : unknown device %04x on subchannel %04x\n", - cdev->private->devno, sch->irq); + "SenseID : unknown device %04x on subchannel " + "0.%x.%04x\n", cdev->private->dev_id.devno, + sch->schid.ssid, sch->schid.sch_no); break; case DEV_STATE_OFFLINE: - if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) + if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) { + same_dev = ccw_device_handle_oper(cdev); notify = 1; - else /* fill out sense information */ - cdev->id = (struct ccw_device_id) { - .cu_type = cdev->private->senseid.cu_type, - .cu_model = cdev->private->senseid.cu_model, - .dev_type = cdev->private->senseid.dev_type, - .dev_model = cdev->private->senseid.dev_model, - }; + } + /* fill out sense information */ + memset(&cdev->id, 0, sizeof(cdev->id)); + cdev->id.cu_type = cdev->private->senseid.cu_type; + cdev->id.cu_model = cdev->private->senseid.cu_model; + cdev->id.dev_type = cdev->private->senseid.dev_type; + cdev->id.dev_model = cdev->private->senseid.dev_model; + if (notify) { + cdev->private->state = DEV_STATE_OFFLINE; + if (same_dev) { + /* Get device online again. */ + ccw_device_online(cdev); + wake_up(&cdev->private->wait_q); + } + return; + } /* Issue device info message. */ - CIO_DEBUG(KERN_INFO, 2, "SenseID : device %04x reports: " + CIO_DEBUG(KERN_INFO, 2, "SenseID : device 0.%x.%04x reports: " "CU Type/Mod = %04X/%02X, Dev Type/Mod = " - "%04X/%02X\n", cdev->private->devno, + "%04X/%02X\n", + cdev->private->dev_id.ssid, + cdev->private->dev_id.devno, cdev->id.cu_type, cdev->id.cu_model, cdev->id.dev_type, cdev->id.dev_model); break; case DEV_STATE_BOXED: CIO_DEBUG(KERN_WARNING, 2, - "SenseID : boxed device %04x on subchannel %04x\n", - cdev->private->devno, sch->irq); + "SenseID : boxed device %04x on subchannel " + "0.%x.%04x\n", cdev->private->dev_id.devno, + sch->schid.ssid, sch->schid.sch_no); break; } cdev->private->state = state; - if (notify && state == DEV_STATE_OFFLINE) - ccw_device_handle_oper(cdev); - else - io_subchannel_recog_done(cdev); + io_subchannel_recog_done(cdev); if (state != DEV_STATE_NOT_OPER) wake_up(&cdev->private->wait_q); } @@ -283,21 +328,26 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err) } static void -ccw_device_oper_notify(void *data) +ccw_device_oper_notify(struct work_struct *work) { + struct ccw_device_private *priv; struct ccw_device *cdev; struct subchannel *sch; int ret; - cdev = (struct ccw_device *)data; + priv = container_of(work, struct ccw_device_private, kick_work); + cdev = priv->cdev; sch = to_subchannel(cdev->dev.parent); ret = (sch->driver && sch->driver->notify) ? sch->driver->notify(&sch->dev, CIO_OPER) : 0; if (!ret) /* Driver doesn't want device back. */ - ccw_device_do_unreg_rereg((void *)&cdev->dev); - else + ccw_device_do_unreg_rereg(work); + else { + /* Reenable channel measurements, if needed. */ + cmf_reenable(cdev); wake_up(&cdev->private->wait_q); + } } /* @@ -310,6 +360,8 @@ ccw_device_done(struct ccw_device *cdev, int state) sch = to_subchannel(cdev->dev.parent); + ccw_device_set_timeout(cdev, 0); + if (state != DEV_STATE_ONLINE) cio_disable_subchannel(sch); @@ -322,12 +374,11 @@ ccw_device_done(struct ccw_device *cdev, int state) if (state == DEV_STATE_BOXED) CIO_DEBUG(KERN_WARNING, 2, "Boxed device %04x on subchannel %04x\n", - cdev->private->devno, sch->irq); + cdev->private->dev_id.devno, sch->schid.sch_no); if (cdev->private->flags.donotify) { cdev->private->flags.donotify = 0; - PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify, - (void *)cdev); + PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify); queue_work(ccw_device_notify_work, &cdev->private->kick_work); } wake_up(&cdev->private->wait_q); @@ -336,6 +387,57 @@ ccw_device_done(struct ccw_device *cdev, int state) put_device (&cdev->dev); } +static inline int cmp_pgid(struct pgid *p1, struct pgid *p2) +{ + char *c1; + char *c2; + + c1 = (char *)p1; + c2 = (char *)p2; + + return memcmp(c1 + 1, c2 + 1, sizeof(struct pgid) - 1); +} + +static void __ccw_device_get_common_pgid(struct ccw_device *cdev) +{ + int i; + int last; + + last = 0; + for (i = 0; i < 8; i++) { + if (cdev->private->pgid[i].inf.ps.state1 == SNID_STATE1_RESET) + /* No PGID yet */ + continue; + if (cdev->private->pgid[last].inf.ps.state1 == + SNID_STATE1_RESET) { + /* First non-zero PGID */ + last = i; + continue; + } + if (cmp_pgid(&cdev->private->pgid[i], + &cdev->private->pgid[last]) == 0) + /* Non-conflicting PGIDs */ + continue; + + /* PGID mismatch, can't pathgroup. */ + CIO_MSG_EVENT(0, "SNID - pgid mismatch for device " + "0.%x.%04x, can't pathgroup\n", + cdev->private->dev_id.ssid, + cdev->private->dev_id.devno); + cdev->private->options.pgroup = 0; + return; + } + if (cdev->private->pgid[last].inf.ps.state1 == + SNID_STATE1_RESET) + /* No previous pgid found */ + memcpy(&cdev->private->pgid[0], &css[0]->global_pgid, + sizeof(struct pgid)); + else + /* Use existing pgid */ + memcpy(&cdev->private->pgid[0], &cdev->private->pgid[last], + sizeof(struct pgid)); +} + /* * Function called from device_pgid.c after sense path ground has completed. */ @@ -346,24 +448,26 @@ ccw_device_sense_pgid_done(struct ccw_device *cdev, int err) sch = to_subchannel(cdev->dev.parent); switch (err) { - case 0: - /* Start Path Group verification. */ - sch->vpm = 0; /* Start with no path groups set. */ - cdev->private->state = DEV_STATE_VERIFY; - ccw_device_verify_start(cdev); + case -EOPNOTSUPP: /* path grouping not supported, use nop instead. */ + cdev->private->options.pgroup = 0; + break; + case 0: /* success */ + case -EACCES: /* partial success, some paths not operational */ + /* Check if all pgids are equal or 0. */ + __ccw_device_get_common_pgid(cdev); break; case -ETIME: /* Sense path group id stopped by timeout. */ case -EUSERS: /* device is reserved for someone else. */ ccw_device_done(cdev, DEV_STATE_BOXED); - break; - case -EOPNOTSUPP: /* path grouping not supported, just set online. */ - cdev->private->options.pgroup = 0; - ccw_device_done(cdev, DEV_STATE_ONLINE); - break; + return; default: ccw_device_done(cdev, DEV_STATE_NOT_OPER); - break; + return; } + /* Start Path Group verification. */ + cdev->private->state = DEV_STATE_VERIFY; + cdev->private->flags.doverify = 0; + ccw_device_verify_start(cdev); } /* @@ -424,13 +528,15 @@ ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event) static void -ccw_device_nopath_notify(void *data) +ccw_device_nopath_notify(struct work_struct *work) { + struct ccw_device_private *priv; struct ccw_device *cdev; struct subchannel *sch; int ret; - cdev = (struct ccw_device *)data; + priv = container_of(work, struct ccw_device_private, kick_work); + cdev = priv->cdev; sch = to_subchannel(cdev->dev.parent); /* Extra sanity. */ if (sch->lpm) @@ -443,48 +549,66 @@ ccw_device_nopath_notify(void *data) cio_disable_subchannel(sch); if (get_device(&cdev->dev)) { PREPARE_WORK(&cdev->private->kick_work, - ccw_device_call_sch_unregister, - (void *)cdev); + ccw_device_call_sch_unregister); queue_work(ccw_device_work, &cdev->private->kick_work); - } + } else + put_device(&sch->dev); } } else { cio_disable_subchannel(sch); ccw_device_set_timeout(cdev, 0); + cdev->private->flags.fake_irb = 0; cdev->private->state = DEV_STATE_DISCONNECTED; wake_up(&cdev->private->wait_q); } } void -device_call_nopath_notify(struct subchannel *sch) +ccw_device_verify_done(struct ccw_device *cdev, int err) { - struct ccw_device *cdev; + struct subchannel *sch; - if (!sch->dev.driver_data) + sch = to_subchannel(cdev->dev.parent); + /* Update schib - pom may have changed. */ + stsch(sch->schid, &sch->schib); + /* Update lpm with verified path mask. */ + sch->lpm = sch->vpm; + /* Repeat path verification? */ + if (cdev->private->flags.doverify) { + cdev->private->flags.doverify = 0; + ccw_device_verify_start(cdev); return; - cdev = sch->dev.driver_data; - PREPARE_WORK(&cdev->private->kick_work, - ccw_device_nopath_notify, (void *)cdev); - queue_work(ccw_device_notify_work, &cdev->private->kick_work); -} - - -void -ccw_device_verify_done(struct ccw_device *cdev, int err) -{ - cdev->private->flags.doverify = 0; + } switch (err) { + case -EOPNOTSUPP: /* path grouping not supported, just set online. */ + cdev->private->options.pgroup = 0; case 0: ccw_device_done(cdev, DEV_STATE_ONLINE); + /* Deliver fake irb to device driver, if needed. */ + if (cdev->private->flags.fake_irb) { + memset(&cdev->private->irb, 0, sizeof(struct irb)); + cdev->private->irb.scsw.cc = 1; + cdev->private->irb.scsw.fctl = SCSW_FCTL_START_FUNC; + cdev->private->irb.scsw.actl = SCSW_ACTL_START_PEND; + cdev->private->irb.scsw.stctl = SCSW_STCTL_STATUS_PEND; + cdev->private->flags.fake_irb = 0; + if (cdev->handler) + cdev->handler(cdev, cdev->private->intparm, + &cdev->private->irb); + memset(&cdev->private->irb, 0, sizeof(struct irb)); + } break; case -ETIME: + /* Reset oper notify indication after verify error. */ + cdev->private->flags.donotify = 0; ccw_device_done(cdev, DEV_STATE_BOXED); break; default: + /* Reset oper notify indication after verify error. */ + cdev->private->flags.donotify = 0; PREPARE_WORK(&cdev->private->kick_work, - ccw_device_nopath_notify, (void *)cdev); + ccw_device_nopath_notify); queue_work(ccw_device_notify_work, &cdev->private->kick_work); ccw_device_done(cdev, DEV_STATE_NOT_OPER); break; @@ -515,8 +639,10 @@ ccw_device_online(struct ccw_device *cdev) } /* Do we want to do path grouping? */ if (!cdev->private->options.pgroup) { - /* No, set state online immediately. */ - ccw_device_done(cdev, DEV_STATE_ONLINE); + /* Start initial path verification. */ + cdev->private->state = DEV_STATE_VERIFY; + cdev->private->flags.doverify = 0; + ccw_device_verify_start(cdev); return 0; } /* Do a SensePGID first. */ @@ -549,7 +675,13 @@ ccw_device_offline(struct ccw_device *cdev) { struct subchannel *sch; + if (ccw_device_is_orphan(cdev)) { + ccw_device_done(cdev, DEV_STATE_OFFLINE); + return 0; + } sch = to_subchannel(cdev->dev.parent); + if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv) + return -ENODEV; if (cdev->private->state != DEV_STATE_ONLINE) { if (sch->schib.scsw.actl != 0) return -EBUSY; @@ -609,9 +741,11 @@ ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event) cdev->private->state = DEV_STATE_NOT_OPER; sch = to_subchannel(cdev->dev.parent); - device_unregister(&sch->dev); - sch->schib.pmcw.intparm = 0; - cio_modify(sch); + if (get_device(&cdev->dev)) { + PREPARE_WORK(&cdev->private->kick_work, + ccw_device_call_sch_unregister); + queue_work(ccw_device_work, &cdev->private->kick_work); + } wake_up(&cdev->private->wait_q); } @@ -627,6 +761,7 @@ ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event) if (sch->driver->notify && sch->driver->notify(&sch->dev, sch->lpm ? CIO_GONE : CIO_NO_PATH)) { ccw_device_set_timeout(cdev, 0); + cdev->private->flags.fake_irb = 0; cdev->private->state = DEV_STATE_DISCONNECTED; wake_up(&cdev->private->wait_q); return; @@ -637,9 +772,11 @@ ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event) // FIXME: not-oper indication to device driver ? ccw_device_call_handler(cdev); } - device_unregister(&sch->dev); - sch->schib.pmcw.intparm = 0; - cio_modify(sch); + if (get_device(&cdev->dev)) { + PREPARE_WORK(&cdev->private->kick_work, + ccw_device_call_sch_unregister); + queue_work(ccw_device_work, &cdev->private->kick_work); + } wake_up(&cdev->private->wait_q); } @@ -651,14 +788,19 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event) { struct subchannel *sch; - if (!cdev->private->options.pgroup) - return; if (cdev->private->state == DEV_STATE_W4SENSE) { cdev->private->flags.doverify = 1; return; } sch = to_subchannel(cdev->dev.parent); + /* + * Since we might not just be coming from an interrupt from the + * subchannel we have to update the schib. + */ + stsch(sch->schid, &sch->schib); + if (sch->schib.scsw.actl != 0 || + (sch->schib.scsw.stctl & SCSW_STCTL_STATUS_PEND) || (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) { /* * No final status yet or final status not yet delivered @@ -670,6 +812,7 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event) } /* Device is idle, we can do the path verification. */ cdev->private->state = DEV_STATE_VERIFY; + cdev->private->flags.doverify = 0; ccw_device_verify_start(cdev); } @@ -683,8 +826,20 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) irb = (struct irb *) __LC_IRB; /* Check for unsolicited interrupt. */ - if (irb->scsw.stctl == - (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { + if ((irb->scsw.stctl == + (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) + && (!irb->scsw.cc)) { + if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && + !irb->esw.esw0.erw.cons) { + /* Unit check but no sense data. Need basic sense. */ + if (ccw_device_do_sense(cdev, irb) != 0) + goto call_handler_unsol; + memcpy(&cdev->private->irb, irb, sizeof(struct irb)); + cdev->private->state = DEV_STATE_W4SENSE; + cdev->private->intparm = 0; + return; + } +call_handler_unsol: if (cdev->handler) cdev->handler (cdev, 0, irb); return; @@ -724,7 +879,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event) sch = to_subchannel(cdev->dev.parent); if (!sch->lpm) { PREPARE_WORK(&cdev->private->kick_work, - ccw_device_nopath_notify, (void *)cdev); + ccw_device_nopath_notify); queue_work(ccw_device_notify_work, &cdev->private->kick_work); } else @@ -746,13 +901,35 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) /* Check for unsolicited interrupt. */ if (irb->scsw.stctl == (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { - if (cdev->handler) - cdev->handler (cdev, 0, irb); if (irb->scsw.cc == 1) /* Basic sense hasn't started. Try again. */ ccw_device_do_sense(cdev, irb); + else { + printk(KERN_INFO "Huh? %s(%s): unsolicited " + "interrupt...\n", + __FUNCTION__, cdev->dev.bus_id); + if (cdev->handler) + cdev->handler (cdev, 0, irb); + } return; } + /* + * Check if a halt or clear has been issued in the meanwhile. If yes, + * only deliver the halt/clear interrupt to the device driver as if it + * had killed the original request. + */ + if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) { + /* Retry Basic Sense if requested. */ + if (cdev->private->flags.intretry) { + cdev->private->flags.intretry = 0; + ccw_device_do_sense(cdev, irb); + return; + } + cdev->private->flags.dosense = 0; + memset(&cdev->private->irb, 0, sizeof(struct irb)); + ccw_device_accumulate_irb(cdev, irb); + goto call_handler; + } /* Add basic sense info to irb. */ ccw_device_accumulate_basic_sense(cdev, irb); if (cdev->private->flags.dosense) { @@ -760,6 +937,7 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) ccw_device_do_sense(cdev, irb); return; } +call_handler: cdev->private->state = DEV_STATE_ONLINE; /* Call the handler. */ if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify) @@ -773,15 +951,10 @@ ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event) struct irb *irb; irb = (struct irb *) __LC_IRB; - /* Check for unsolicited interrupt. */ - if (irb->scsw.stctl == - (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { - if (cdev->handler) - cdev->handler (cdev, 0, irb); - return; - } /* Accumulate status. We don't do basic sense. */ ccw_device_accumulate_irb(cdev, irb); + /* Remember to clear irb to avoid residuals. */ + memset(&cdev->private->irb, 0, sizeof(struct irb)); /* Try to start delayed device verification. */ ccw_device_online_verify(cdev, 0); /* Note: Don't call handler for cio initiated clear! */ @@ -798,10 +971,10 @@ ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event) cdev->private->state = DEV_STATE_ONLINE; if (cdev->handler) cdev->handler(cdev, cdev->private->intparm, - ERR_PTR(-ETIMEDOUT)); + ERR_PTR(-EIO)); if (!sch->lpm) { PREPARE_WORK(&cdev->private->kick_work, - ccw_device_nopath_notify, (void *)cdev); + ccw_device_nopath_notify); queue_work(ccw_device_notify_work, &cdev->private->kick_work); } else if (cdev->private->flags.doverify) /* Start delayed path verification. */ @@ -824,7 +997,7 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event) sch = to_subchannel(cdev->dev.parent); if (!sch->lpm) { PREPARE_WORK(&cdev->private->kick_work, - ccw_device_nopath_notify, (void *)cdev); + ccw_device_nopath_notify); queue_work(ccw_device_notify_work, &cdev->private->kick_work); } else @@ -835,60 +1008,15 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event) cdev->private->state = DEV_STATE_ONLINE; if (cdev->handler) cdev->handler(cdev, cdev->private->intparm, - ERR_PTR(-ETIMEDOUT)); -} - -static void -ccw_device_wait4io_irq(struct ccw_device *cdev, enum dev_event dev_event) -{ - struct irb *irb; - struct subchannel *sch; - - irb = (struct irb *) __LC_IRB; - /* Check for unsolicited interrupt. */ - if (irb->scsw.stctl == - (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { - if (cdev->handler) - cdev->handler (cdev, 0, irb); - if (irb->scsw.cc == 1) - goto call_handler; - return; - } - /* - * Accumulate status and find out if a basic sense is needed. - * This is fine since we have already adapted the lpm. - */ - ccw_device_accumulate_irb(cdev, irb); - if (cdev->private->flags.dosense) { - if (ccw_device_do_sense(cdev, irb) == 0) { - cdev->private->state = DEV_STATE_W4SENSE; - } - return; - } -call_handler: - /* Iff device is idle, reset timeout. */ - sch = to_subchannel(cdev->dev.parent); - if (!stsch(sch->irq, &sch->schib)) - if (sch->schib.scsw.actl == 0) - ccw_device_set_timeout(cdev, 0); - /* Call the handler. */ - ccw_device_call_handler(cdev); - if (!sch->lpm) { - PREPARE_WORK(&cdev->private->kick_work, - ccw_device_nopath_notify, (void *)cdev); - queue_work(ccw_device_notify_work, &cdev->private->kick_work); - } else if (cdev->private->flags.doverify) - ccw_device_online_verify(cdev, 0); + ERR_PTR(-EIO)); } -static void -ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event) +void device_kill_io(struct subchannel *sch) { int ret; - struct subchannel *sch; + struct ccw_device *cdev; - sch = to_subchannel(cdev->dev.parent); - ccw_device_set_timeout(cdev, 0); + cdev = sch->dev.driver_data; ret = ccw_device_cancel_halt_clear(cdev); if (ret == -EBUSY) { ccw_device_set_timeout(cdev, 3*HZ); @@ -898,7 +1026,7 @@ ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event) if (ret == -ENODEV) { if (!sch->lpm) { PREPARE_WORK(&cdev->private->kick_work, - ccw_device_nopath_notify, (void *)cdev); + ccw_device_nopath_notify); queue_work(ccw_device_notify_work, &cdev->private->kick_work); } else @@ -907,22 +1035,21 @@ ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event) } if (cdev->handler) cdev->handler(cdev, cdev->private->intparm, - ERR_PTR(-ETIMEDOUT)); + ERR_PTR(-EIO)); if (!sch->lpm) { PREPARE_WORK(&cdev->private->kick_work, - ccw_device_nopath_notify, (void *)cdev); + ccw_device_nopath_notify); queue_work(ccw_device_notify_work, &cdev->private->kick_work); - } else if (cdev->private->flags.doverify) + } else /* Start delayed path verification. */ ccw_device_online_verify(cdev, 0); } static void -ccw_device_wait4io_verify(struct ccw_device *cdev, enum dev_event dev_event) +ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event) { - /* When the I/O has terminated, we have to start verification. */ - if (cdev->private->options.pgroup) - cdev->private->flags.doverify = 1; + /* Start verification after current task finished. */ + cdev->private->flags.doverify = 1; } static void @@ -934,8 +1061,9 @@ ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event) case DEV_EVENT_INTERRUPT: irb = (struct irb *) __LC_IRB; /* Check for unsolicited interrupt. */ - if (irb->scsw.stctl == - (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) + if ((irb->scsw.stctl == + (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) && + (!irb->scsw.cc)) /* FIXME: we should restart stlck here, but this * is extremely unlikely ... */ goto out_wakeup; @@ -971,29 +1099,23 @@ void device_trigger_reprobe(struct subchannel *sch) { struct ccw_device *cdev; - unsigned long flags; if (!sch->dev.driver_data) return; cdev = sch->dev.driver_data; - spin_lock_irqsave(&sch->lock, flags); - if (cdev->private->state != DEV_STATE_DISCONNECTED) { - spin_unlock_irqrestore(&sch->lock, flags); + if (cdev->private->state != DEV_STATE_DISCONNECTED) return; - } + /* Update some values. */ - if (stsch(sch->irq, &sch->schib)) { - spin_unlock_irqrestore(&sch->lock, flags); + if (stsch(sch->schid, &sch->schib)) + return; + if (!sch->schib.pmcw.dnv) return; - } /* * The pim, pam, pom values may not be accurate, but they are the best * we have before performing device selection :/ */ - sch->lpm = sch->schib.pmcw.pim & - sch->schib.pmcw.pam & - sch->schib.pmcw.pom & - sch->opm; + sch->lpm = sch->schib.pmcw.pam & sch->opm; /* Re-set some bits in the pmcw that were lost. */ sch->schib.pmcw.isc = 3; sch->schib.pmcw.csense = 1; @@ -1001,8 +1123,14 @@ device_trigger_reprobe(struct subchannel *sch) if ((sch->lpm & (sch->lpm - 1)) != 0) sch->schib.pmcw.mp = 1; sch->schib.pmcw.intparm = (__u32)(unsigned long)sch; - ccw_device_start_id(cdev, 0); - spin_unlock_irqrestore(&sch->lock, flags); + /* We should also udate ssd info, but this has to wait. */ + /* Check if this is another device which appeared on the same sch. */ + if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { + PREPARE_WORK(&cdev->private->kick_work, + ccw_device_move_to_orphanage); + queue_work(ccw_device_work, &cdev->private->kick_work); + } else + ccw_device_start_id(cdev, 0); } static void @@ -1026,6 +1154,13 @@ ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event) dev_fsm_event(cdev, dev_event); } +static void ccw_device_update_cmfblock(struct ccw_device *cdev, + enum dev_event dev_event) +{ + cmf_retry_copy_block(cdev); + cdev->private->state = DEV_STATE_ONLINE; + dev_fsm_event(cdev, dev_event); +} static void ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event) @@ -1082,103 +1217,103 @@ ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event) * device statemachine */ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { - [DEV_STATE_NOT_OPER] { - [DEV_EVENT_NOTOPER] ccw_device_nop, - [DEV_EVENT_INTERRUPT] ccw_device_bug, - [DEV_EVENT_TIMEOUT] ccw_device_nop, - [DEV_EVENT_VERIFY] ccw_device_nop, + [DEV_STATE_NOT_OPER] = { + [DEV_EVENT_NOTOPER] = ccw_device_nop, + [DEV_EVENT_INTERRUPT] = ccw_device_bug, + [DEV_EVENT_TIMEOUT] = ccw_device_nop, + [DEV_EVENT_VERIFY] = ccw_device_nop, }, - [DEV_STATE_SENSE_PGID] { - [DEV_EVENT_NOTOPER] ccw_device_online_notoper, - [DEV_EVENT_INTERRUPT] ccw_device_sense_pgid_irq, - [DEV_EVENT_TIMEOUT] ccw_device_onoff_timeout, - [DEV_EVENT_VERIFY] ccw_device_nop, + [DEV_STATE_SENSE_PGID] = { + [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, + [DEV_EVENT_INTERRUPT] = ccw_device_sense_pgid_irq, + [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, + [DEV_EVENT_VERIFY] = ccw_device_nop, }, - [DEV_STATE_SENSE_ID] { - [DEV_EVENT_NOTOPER] ccw_device_recog_notoper, - [DEV_EVENT_INTERRUPT] ccw_device_sense_id_irq, - [DEV_EVENT_TIMEOUT] ccw_device_recog_timeout, - [DEV_EVENT_VERIFY] ccw_device_nop, + [DEV_STATE_SENSE_ID] = { + [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper, + [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq, + [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout, + [DEV_EVENT_VERIFY] = ccw_device_nop, }, - [DEV_STATE_OFFLINE] { - [DEV_EVENT_NOTOPER] ccw_device_offline_notoper, - [DEV_EVENT_INTERRUPT] ccw_device_offline_irq, - [DEV_EVENT_TIMEOUT] ccw_device_nop, - [DEV_EVENT_VERIFY] ccw_device_nop, + [DEV_STATE_OFFLINE] = { + [DEV_EVENT_NOTOPER] = ccw_device_offline_notoper, + [DEV_EVENT_INTERRUPT] = ccw_device_offline_irq, + [DEV_EVENT_TIMEOUT] = ccw_device_nop, + [DEV_EVENT_VERIFY] = ccw_device_nop, }, - [DEV_STATE_VERIFY] { - [DEV_EVENT_NOTOPER] ccw_device_online_notoper, - [DEV_EVENT_INTERRUPT] ccw_device_verify_irq, - [DEV_EVENT_TIMEOUT] ccw_device_onoff_timeout, - [DEV_EVENT_VERIFY] ccw_device_nop, + [DEV_STATE_VERIFY] = { + [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, + [DEV_EVENT_INTERRUPT] = ccw_device_verify_irq, + [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, + [DEV_EVENT_VERIFY] = ccw_device_delay_verify, }, - [DEV_STATE_ONLINE] { - [DEV_EVENT_NOTOPER] ccw_device_online_notoper, - [DEV_EVENT_INTERRUPT] ccw_device_irq, - [DEV_EVENT_TIMEOUT] ccw_device_online_timeout, - [DEV_EVENT_VERIFY] ccw_device_online_verify, + [DEV_STATE_ONLINE] = { + [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, + [DEV_EVENT_INTERRUPT] = ccw_device_irq, + [DEV_EVENT_TIMEOUT] = ccw_device_online_timeout, + [DEV_EVENT_VERIFY] = ccw_device_online_verify, }, - [DEV_STATE_W4SENSE] { - [DEV_EVENT_NOTOPER] ccw_device_online_notoper, - [DEV_EVENT_INTERRUPT] ccw_device_w4sense, - [DEV_EVENT_TIMEOUT] ccw_device_nop, - [DEV_EVENT_VERIFY] ccw_device_online_verify, + [DEV_STATE_W4SENSE] = { + [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, + [DEV_EVENT_INTERRUPT] = ccw_device_w4sense, + [DEV_EVENT_TIMEOUT] = ccw_device_nop, + [DEV_EVENT_VERIFY] = ccw_device_online_verify, }, - [DEV_STATE_DISBAND_PGID] { - [DEV_EVENT_NOTOPER] ccw_device_online_notoper, - [DEV_EVENT_INTERRUPT] ccw_device_disband_irq, - [DEV_EVENT_TIMEOUT] ccw_device_onoff_timeout, - [DEV_EVENT_VERIFY] ccw_device_nop, + [DEV_STATE_DISBAND_PGID] = { + [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, + [DEV_EVENT_INTERRUPT] = ccw_device_disband_irq, + [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, + [DEV_EVENT_VERIFY] = ccw_device_nop, }, - [DEV_STATE_BOXED] { - [DEV_EVENT_NOTOPER] ccw_device_offline_notoper, - [DEV_EVENT_INTERRUPT] ccw_device_stlck_done, - [DEV_EVENT_TIMEOUT] ccw_device_stlck_done, - [DEV_EVENT_VERIFY] ccw_device_nop, + [DEV_STATE_BOXED] = { + [DEV_EVENT_NOTOPER] = ccw_device_offline_notoper, + [DEV_EVENT_INTERRUPT] = ccw_device_stlck_done, + [DEV_EVENT_TIMEOUT] = ccw_device_stlck_done, + [DEV_EVENT_VERIFY] = ccw_device_nop, }, /* states to wait for i/o completion before doing something */ - [DEV_STATE_CLEAR_VERIFY] { - [DEV_EVENT_NOTOPER] ccw_device_online_notoper, - [DEV_EVENT_INTERRUPT] ccw_device_clear_verify, - [DEV_EVENT_TIMEOUT] ccw_device_nop, - [DEV_EVENT_VERIFY] ccw_device_nop, + [DEV_STATE_CLEAR_VERIFY] = { + [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, + [DEV_EVENT_INTERRUPT] = ccw_device_clear_verify, + [DEV_EVENT_TIMEOUT] = ccw_device_nop, + [DEV_EVENT_VERIFY] = ccw_device_nop, }, - [DEV_STATE_TIMEOUT_KILL] { - [DEV_EVENT_NOTOPER] ccw_device_online_notoper, - [DEV_EVENT_INTERRUPT] ccw_device_killing_irq, - [DEV_EVENT_TIMEOUT] ccw_device_killing_timeout, - [DEV_EVENT_VERIFY] ccw_device_nop, //FIXME + [DEV_STATE_TIMEOUT_KILL] = { + [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, + [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq, + [DEV_EVENT_TIMEOUT] = ccw_device_killing_timeout, + [DEV_EVENT_VERIFY] = ccw_device_nop, //FIXME }, - [DEV_STATE_WAIT4IO] { - [DEV_EVENT_NOTOPER] ccw_device_online_notoper, - [DEV_EVENT_INTERRUPT] ccw_device_wait4io_irq, - [DEV_EVENT_TIMEOUT] ccw_device_wait4io_timeout, - [DEV_EVENT_VERIFY] ccw_device_wait4io_verify, - }, - [DEV_STATE_QUIESCE] { - [DEV_EVENT_NOTOPER] ccw_device_quiesce_done, - [DEV_EVENT_INTERRUPT] ccw_device_quiesce_done, - [DEV_EVENT_TIMEOUT] ccw_device_quiesce_timeout, - [DEV_EVENT_VERIFY] ccw_device_nop, + [DEV_STATE_QUIESCE] = { + [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done, + [DEV_EVENT_INTERRUPT] = ccw_device_quiesce_done, + [DEV_EVENT_TIMEOUT] = ccw_device_quiesce_timeout, + [DEV_EVENT_VERIFY] = ccw_device_nop, }, /* special states for devices gone not operational */ - [DEV_STATE_DISCONNECTED] { - [DEV_EVENT_NOTOPER] ccw_device_nop, - [DEV_EVENT_INTERRUPT] ccw_device_start_id, - [DEV_EVENT_TIMEOUT] ccw_device_bug, - [DEV_EVENT_VERIFY] ccw_device_nop, + [DEV_STATE_DISCONNECTED] = { + [DEV_EVENT_NOTOPER] = ccw_device_nop, + [DEV_EVENT_INTERRUPT] = ccw_device_start_id, + [DEV_EVENT_TIMEOUT] = ccw_device_bug, + [DEV_EVENT_VERIFY] = ccw_device_start_id, + }, + [DEV_STATE_DISCONNECTED_SENSE_ID] = { + [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper, + [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq, + [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout, + [DEV_EVENT_VERIFY] = ccw_device_nop, }, - [DEV_STATE_DISCONNECTED_SENSE_ID] { - [DEV_EVENT_NOTOPER] ccw_device_recog_notoper, - [DEV_EVENT_INTERRUPT] ccw_device_sense_id_irq, - [DEV_EVENT_TIMEOUT] ccw_device_recog_timeout, - [DEV_EVENT_VERIFY] ccw_device_nop, + [DEV_STATE_CMFCHANGE] = { + [DEV_EVENT_NOTOPER] = ccw_device_change_cmfstate, + [DEV_EVENT_INTERRUPT] = ccw_device_change_cmfstate, + [DEV_EVENT_TIMEOUT] = ccw_device_change_cmfstate, + [DEV_EVENT_VERIFY] = ccw_device_change_cmfstate, }, - [DEV_STATE_CMFCHANGE] { - [DEV_EVENT_NOTOPER] ccw_device_change_cmfstate, - [DEV_EVENT_INTERRUPT] ccw_device_change_cmfstate, - [DEV_EVENT_TIMEOUT] ccw_device_change_cmfstate, - [DEV_EVENT_VERIFY] ccw_device_change_cmfstate, + [DEV_STATE_CMFUPDATE] = { + [DEV_EVENT_NOTOPER] = ccw_device_update_cmfblock, + [DEV_EVENT_INTERRUPT] = ccw_device_update_cmfblock, + [DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock, + [DEV_EVENT_VERIFY] = ccw_device_update_cmfblock, }, }; @@ -1195,8 +1330,8 @@ io_subchannel_irq (struct device *pdev) CIO_TRACE_EVENT (3, "IRQ"); CIO_TRACE_EVENT (3, pdev->bus_id); - - dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); + if (cdev) + dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); } EXPORT_SYMBOL_GPL(ccw_device_set_timeout);