X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=drivers%2Fs390%2Fchar%2Ftape_core.c;h=c6c2e918b990d3645bb5e384f9de4b4aae5804c4;hb=refs%2Fheads%2Fvserver;hp=4ea438c749c9c6f4764ea1056c1baae1d1c52446;hpb=76828883507a47dae78837ab5dec5a5b4513c667;p=linux-2.6.git diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c index 4ea438c74..c6c2e918b 100644 --- a/drivers/s390/char/tape_core.c +++ b/drivers/s390/char/tape_core.c @@ -11,7 +11,6 @@ * Stefan Bader */ -#include #include #include // for kernel parameters #include // for requesting modules @@ -29,7 +28,7 @@ #define PRINTK_HEADER "TAPE_CORE: " static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *); -static void tape_delayed_next_request(void * data); +static void tape_delayed_next_request(struct work_struct *); /* * One list to contain all tape devices of all disciplines, so @@ -210,18 +209,14 @@ tape_state_set(struct tape_device *device, enum tape_state newstate) return; } DBF_EVENT(4, "ts. dev: %x\n", device->first_minor); - if (device->tape_state < TO_SIZE && device->tape_state >= 0) - str = tape_state_verbose[device->tape_state]; - else - str = "UNKNOWN TS"; - DBF_EVENT(4, "old ts: %s\n", str); - if (device->tape_state < TO_SIZE && device->tape_state >=0 ) + DBF_EVENT(4, "old ts:\t\n"); + if (device->tape_state < TS_SIZE && device->tape_state >=0 ) str = tape_state_verbose[device->tape_state]; else str = "UNKNOWN TS"; DBF_EVENT(4, "%s\n", str); DBF_EVENT(4, "new ts:\t\n"); - if (newstate < TO_SIZE && newstate >= 0) + if (newstate < TS_SIZE && newstate >= 0) str = tape_state_verbose[newstate]; else str = "UNKNOWN TS"; @@ -277,7 +272,7 @@ __tape_cancel_io(struct tape_device *device, struct tape_request *request) return 0; case -EBUSY: request->status = TAPE_REQUEST_CANCEL; - schedule_work(&device->tape_dnr); + schedule_delayed_work(&device->tape_dnr, 0); return 0; case -ENODEV: DBF_EXCEPTION(2, "device gone, retry\n"); @@ -453,16 +448,14 @@ tape_alloc_device(void) { struct tape_device *device; - device = (struct tape_device *) - kmalloc(sizeof(struct tape_device), GFP_KERNEL); + device = kzalloc(sizeof(struct tape_device), GFP_KERNEL); if (device == NULL) { DBF_EXCEPTION(2, "ti:no mem\n"); PRINT_INFO ("can't allocate memory for " "tape info structure\n"); return ERR_PTR(-ENOMEM); } - memset(device, 0, sizeof(struct tape_device)); - device->modeset_byte = (char *) kmalloc(1, GFP_KERNEL | GFP_DMA); + device->modeset_byte = kmalloc(1, GFP_KERNEL | GFP_DMA); if (device->modeset_byte == NULL) { DBF_EXCEPTION(2, "ti:no mem\n"); PRINT_INFO("can't allocate memory for modeset byte\n"); @@ -477,7 +470,7 @@ tape_alloc_device(void) *device->modeset_byte = 0; device->first_minor = -1; atomic_set(&device->ref_count, 1); - INIT_WORK(&device->tape_dnr, tape_delayed_next_request, device); + INIT_DELAYED_WORK(&device->tape_dnr, tape_delayed_next_request); return device; } @@ -550,20 +543,24 @@ int tape_generic_probe(struct ccw_device *cdev) { struct tape_device *device; + int ret; device = tape_alloc_device(); if (IS_ERR(device)) return -ENODEV; - PRINT_INFO("tape device %s found\n", cdev->dev.bus_id); + ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP); + ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group); + if (ret) { + tape_put_device(device); + PRINT_ERR("probe failed for tape device %s\n", cdev->dev.bus_id); + return ret; + } cdev->dev.driver_data = device; + cdev->handler = __tape_do_irq; device->cdev = cdev; device->cdev_id = busid_to_int(cdev->dev.bus_id); - cdev->handler = __tape_do_irq; - - ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP); - sysfs_create_group(&cdev->dev.kobj, &tape_attr_group); - - return 0; + PRINT_INFO("tape device %s found\n", cdev->dev.bus_id); + return ret; } static inline void @@ -659,34 +656,30 @@ tape_alloc_request(int cplength, int datasize) DBF_LH(6, "tape_alloc_request(%d, %d)\n", cplength, datasize); - request = (struct tape_request *) kmalloc(sizeof(struct tape_request), - GFP_KERNEL); + request = kzalloc(sizeof(struct tape_request), GFP_KERNEL); if (request == NULL) { DBF_EXCEPTION(1, "cqra nomem\n"); return ERR_PTR(-ENOMEM); } - memset(request, 0, sizeof(struct tape_request)); /* allocate channel program */ if (cplength > 0) { - request->cpaddr = kmalloc(cplength*sizeof(struct ccw1), + request->cpaddr = kcalloc(cplength, sizeof(struct ccw1), GFP_ATOMIC | GFP_DMA); if (request->cpaddr == NULL) { DBF_EXCEPTION(1, "cqra nomem\n"); kfree(request); return ERR_PTR(-ENOMEM); } - memset(request->cpaddr, 0, cplength*sizeof(struct ccw1)); } /* alloc small kernel buffer */ if (datasize > 0) { - request->cpdata = kmalloc(datasize, GFP_KERNEL | GFP_DMA); + request->cpdata = kzalloc(datasize, GFP_KERNEL | GFP_DMA); if (request->cpdata == NULL) { DBF_EXCEPTION(1, "cqra nomem\n"); kfree(request->cpaddr); kfree(request); return ERR_PTR(-ENOMEM); } - memset(request->cpdata, 0, datasize); } DBF_LH(6, "New request %p(%p/%p)\n", request, request->cpaddr, request->cpdata); @@ -731,7 +724,7 @@ __tape_start_io(struct tape_device *device, struct tape_request *request) } else if (rc == -EBUSY) { /* The common I/O subsystem is currently busy. Retry later. */ request->status = TAPE_REQUEST_QUEUED; - schedule_work(&device->tape_dnr); + schedule_delayed_work(&device->tape_dnr, 0); rc = 0; } else { /* Start failed. Remove request and indicate failure. */ @@ -761,6 +754,13 @@ __tape_start_next_request(struct tape_device *device) */ if (request->status == TAPE_REQUEST_IN_IO) return; + /* + * Request has already been stopped. We have to wait until + * the request is removed from the queue in the interrupt + * handling. + */ + if (request->status == TAPE_REQUEST_DONE) + return; /* * We wanted to cancel the request but the common I/O layer @@ -790,11 +790,11 @@ __tape_start_next_request(struct tape_device *device) } static void -tape_delayed_next_request(void *data) +tape_delayed_next_request(struct work_struct *work) { - struct tape_device * device; + struct tape_device *device = + container_of(work, struct tape_device, tape_dnr.work); - device = (struct tape_device *) data; DBF_LH(6, "tape_delayed_next_request(%p)\n", device); spin_lock_irq(get_ccwdev_lock(device->cdev)); __tape_start_next_request(device); @@ -1015,7 +1015,7 @@ tape_do_io_interruptible(struct tape_device *device, wq, (request->callback == NULL) ); - } while (rc != -ERESTARTSYS); + } while (rc == -ERESTARTSYS); DBF_EVENT(3, "IO stopped on %08x\n", device->cdev_id); rc = -ERESTARTSYS; @@ -1023,6 +1023,20 @@ tape_do_io_interruptible(struct tape_device *device, return rc; } +/* + * Stop running ccw. + */ +int +tape_cancel_io(struct tape_device *device, struct tape_request *request) +{ + int rc; + + spin_lock_irq(get_ccwdev_lock(device->cdev)); + rc = __tape_cancel_io(device, request); + spin_unlock_irq(get_ccwdev_lock(device->cdev)); + return rc; +} + /* * Tape interrupt routine, called from the ccw_device layer */ @@ -1064,15 +1078,16 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) /* * If the condition code is not zero and the start function bit is * still set, this is an deferred error and the last start I/O did - * not succeed. Restart the request now. + * not succeed. At this point the condition that caused the deferred + * error might still apply. So we just schedule the request to be + * started later. */ - if (irb->scsw.cc != 0 && (irb->scsw.fctl & SCSW_FCTL_START_FUNC)) { - PRINT_WARN("(%s): deferred cc=%i. restaring\n", - cdev->dev.bus_id, - irb->scsw.cc); - rc = __tape_start_io(device, request); - if (rc) - __tape_end_request(device, request, rc); + if (irb->scsw.cc != 0 && (irb->scsw.fctl & SCSW_FCTL_START_FUNC) && + (request->status == TAPE_REQUEST_IN_IO)) { + DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n", + device->cdev_id, irb->scsw.cc, irb->scsw.fctl); + request->status = TAPE_REQUEST_QUEUED; + schedule_delayed_work(&device->tape_dnr, HZ); return; } @@ -1286,4 +1301,5 @@ EXPORT_SYMBOL(tape_dump_sense_dbf); EXPORT_SYMBOL(tape_do_io); EXPORT_SYMBOL(tape_do_io_async); EXPORT_SYMBOL(tape_do_io_interruptible); +EXPORT_SYMBOL(tape_cancel_io); EXPORT_SYMBOL(tape_mtop);