2 * drivers/s390/cio/device_ops.c
6 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
8 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
9 * Cornelia Huck (cohuck@de.ibm.com)
11 #include <linux/config.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/errno.h>
15 #include <linux/slab.h>
16 #include <linux/list.h>
17 #include <linux/device.h>
19 #include <asm/ccwdev.h>
20 #include <asm/idals.h>
24 #include "cio_debug.h"
30 ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
33 * The flag usage is mutal exclusive ...
35 if ((flags & CCWDEV_EARLY_NOTIFICATION) &&
36 (flags & CCWDEV_REPORT_ALL))
38 cdev->private->options.fast = (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
39 cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0;
40 cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0;
41 cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0;
46 ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
48 struct subchannel *sch;
53 if (cdev->private->state == DEV_STATE_NOT_OPER)
55 if (cdev->private->state != DEV_STATE_ONLINE &&
56 cdev->private->state != DEV_STATE_W4SENSE)
58 sch = to_subchannel(cdev->dev.parent);
63 cdev->private->intparm = intparm;
68 ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa,
69 unsigned long intparm, __u8 lpm, unsigned long flags)
71 struct subchannel *sch;
76 sch = to_subchannel(cdev->dev.parent);
79 if (cdev->private->state == DEV_STATE_NOT_OPER)
81 if (cdev->private->state != DEV_STATE_ONLINE ||
82 ((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
83 !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
84 cdev->private->flags.doverify)
86 ret = cio_set_options (sch, flags);
89 ret = cio_start (sch, cpa, lpm);
91 cdev->private->intparm = intparm;
96 ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa,
97 unsigned long intparm, __u8 lpm, unsigned long flags,
104 ccw_device_set_timeout(cdev, expires);
105 ret = ccw_device_start(cdev, cpa, intparm, lpm, flags);
107 ccw_device_set_timeout(cdev, 0);
112 ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
114 struct subchannel *sch;
119 if (cdev->private->state == DEV_STATE_NOT_OPER)
121 if (cdev->private->state != DEV_STATE_ONLINE &&
122 cdev->private->state != DEV_STATE_W4SENSE)
124 sch = to_subchannel(cdev->dev.parent);
129 cdev->private->intparm = intparm;
134 ccw_device_resume(struct ccw_device *cdev)
136 struct subchannel *sch;
140 sch = to_subchannel(cdev->dev.parent);
143 if (cdev->private->state == DEV_STATE_NOT_OPER)
145 if (cdev->private->state != DEV_STATE_ONLINE ||
146 !(sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED))
148 return cio_resume(sch);
152 * Pass interrupt to device driver.
155 ccw_device_call_handler(struct ccw_device *cdev)
157 struct subchannel *sch;
161 sch = to_subchannel(cdev->dev.parent);
164 * we allow for the device action handler if .
165 * - we received ending status
166 * - the action handler requested to see all interrupts
167 * - we received an intermediate status
168 * - fast notification was requested (primary status)
169 * - unsolicited interrupts
171 stctl = cdev->private->irb.scsw.stctl;
172 ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
173 (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
174 (stctl == SCSW_STCTL_STATUS_PEND);
175 if (!ending_status &&
176 !cdev->private->options.repall &&
177 !(stctl & SCSW_STCTL_INTER_STATUS) &&
178 !(cdev->private->options.fast &&
179 (stctl & SCSW_STCTL_PRIM_STATUS)))
183 * Now we are ready to call the device driver interrupt handler.
186 cdev->handler(cdev, cdev->private->intparm,
187 &cdev->private->irb);
190 * Clear the old and now useless interrupt response block.
192 memset(&cdev->private->irb, 0, sizeof(struct irb));
198 * Search for CIW command in extended sense data.
201 ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
205 if (cdev->private->flags.esid == 0)
207 for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++)
208 if (cdev->private->senseid.ciw[ciw_cnt].ct == ct)
209 return cdev->private->senseid.ciw + ciw_cnt;
214 ccw_device_get_path_mask(struct ccw_device *cdev)
216 struct subchannel *sch;
218 sch = to_subchannel(cdev->dev.parent);
226 ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb)
229 /* unsolicited interrupt */
232 /* Abuse intparm for error reporting. */
234 cdev->private->intparm = -EIO;
235 else if ((irb->scsw.dstat !=
236 (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
237 (irb->scsw.cstat != 0)) {
239 * We didn't get channel end / device end. Check if path
240 * verification has been started; we can retry after it has
241 * finished. We also retry unit checks except for command reject
242 * or intervention required.
244 if (cdev->private->flags.doverify ||
245 cdev->private->state == DEV_STATE_VERIFY)
246 cdev->private->intparm = -EAGAIN;
247 if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
249 (SNS0_CMD_REJECT | SNS0_INTERVENTION_REQ)))
250 cdev->private->intparm = -EAGAIN;
252 cdev->private->intparm = -EIO;
255 cdev->private->intparm = 0;
256 wake_up(&cdev->private->wait_q);
260 __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic)
263 struct subchannel *sch;
265 sch = to_subchannel(cdev->dev.parent);
267 ret = cio_start (sch, ccw, 0);
268 if ((ret == -EBUSY) || (ret == -EACCES)) {
269 /* Try again later. */
270 spin_unlock_irq(&sch->lock);
272 spin_lock_irq(&sch->lock);
276 /* Non-retryable error. */
278 /* Wait for end of request. */
279 cdev->private->intparm = magic;
280 spin_unlock_irq(&sch->lock);
281 wait_event(cdev->private->wait_q,
282 (cdev->private->intparm == -EIO) ||
283 (cdev->private->intparm == -EAGAIN) ||
284 (cdev->private->intparm == 0));
285 spin_lock_irq(&sch->lock);
286 /* Check at least for channel end / device end */
287 if (cdev->private->intparm == -EIO) {
288 /* Non-retryable error. */
292 if (cdev->private->intparm == 0)
295 /* Try again later. */
296 spin_unlock_irq(&sch->lock);
298 spin_lock_irq(&sch->lock);
305 * read_dev_chars() - read device characteristics
306 * @param cdev target ccw device
307 * @param buffer pointer to buffer for rdc data
308 * @param length size of rdc data
309 * @returns 0 for success, negative error value on failure
312 * called for online device, lock not held
315 read_dev_chars (struct ccw_device *cdev, void **buffer, int length)
317 void (*handler)(struct ccw_device *, unsigned long, struct irb *);
318 struct subchannel *sch;
320 struct ccw1 *rdc_ccw;
324 if (!buffer || !length)
326 sch = to_subchannel(cdev->dev.parent);
328 CIO_TRACE_EVENT (4, "rddevch");
329 CIO_TRACE_EVENT (4, sch->dev.bus_id);
331 rdc_ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
334 memset(rdc_ccw, 0, sizeof(struct ccw1));
335 rdc_ccw->cmd_code = CCW_CMD_RDC;
336 rdc_ccw->count = length;
337 rdc_ccw->flags = CCW_FLAG_SLI;
338 ret = set_normalized_cda (rdc_ccw, (*buffer));
344 spin_lock_irq(&sch->lock);
345 /* Save interrupt handler. */
346 handler = cdev->handler;
347 /* Temporarily install own handler. */
348 cdev->handler = ccw_device_wake_up;
349 if (cdev->private->state != DEV_STATE_ONLINE)
351 else if (((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
352 !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
353 cdev->private->flags.doverify)
356 /* 0x00D9C4C3 == ebcdic "RDC" */
357 ret = __ccw_device_retry_loop(cdev, rdc_ccw, 0x00D9C4C3);
359 /* Restore interrupt handler. */
360 cdev->handler = handler;
361 spin_unlock_irq(&sch->lock);
363 clear_normalized_cda (rdc_ccw);
370 * Read Configuration data
373 read_conf_data (struct ccw_device *cdev, void **buffer, int *length)
375 void (*handler)(struct ccw_device *, unsigned long, struct irb *);
376 struct subchannel *sch;
380 struct ccw1 *rcd_ccw;
384 if (!buffer || !length)
386 sch = to_subchannel(cdev->dev.parent);
388 CIO_TRACE_EVENT (4, "rdconf");
389 CIO_TRACE_EVENT (4, sch->dev.bus_id);
392 * scan for RCD command in extended SenseID data
394 ciw = ccw_device_get_ciw(cdev, CIW_TYPE_RCD);
395 if (!ciw || ciw->cmd == 0)
398 rcd_ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
401 memset(rcd_ccw, 0, sizeof(struct ccw1));
402 rcd_buf = kmalloc(ciw->count, GFP_KERNEL | GFP_DMA);
407 memset (rcd_buf, 0, ciw->count);
408 rcd_ccw->cmd_code = ciw->cmd;
409 rcd_ccw->cda = (__u32) __pa (rcd_buf);
410 rcd_ccw->count = ciw->count;
411 rcd_ccw->flags = CCW_FLAG_SLI;
413 spin_lock_irq(&sch->lock);
414 /* Save interrupt handler. */
415 handler = cdev->handler;
416 /* Temporarily install own handler. */
417 cdev->handler = ccw_device_wake_up;
418 if (cdev->private->state != DEV_STATE_ONLINE)
420 else if (((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
421 !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
422 cdev->private->flags.doverify)
425 /* 0x00D9C3C4 == ebcdic "RCD" */
426 ret = __ccw_device_retry_loop(cdev, rcd_ccw, 0x00D9C3C4);
428 /* Restore interrupt handler. */
429 cdev->handler = handler;
430 spin_unlock_irq(&sch->lock);
433 * on success we update the user input parms
440 *length = ciw->count;
449 * Try to break the lock on a boxed device.
452 ccw_device_stlck(struct ccw_device *cdev)
456 struct subchannel *sch;
462 if (cdev->drv && !cdev->private->options.force)
465 sch = to_subchannel(cdev->dev.parent);
467 CIO_TRACE_EVENT(2, "stl lock");
468 CIO_TRACE_EVENT(2, cdev->dev.bus_id);
470 buf = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL);
473 buf2 = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL);
478 spin_lock_irqsave(&sch->lock, flags);
479 ret = cio_enable_subchannel(sch, 3);
483 * Setup ccw. We chain an unconditional reserve and a release so we
484 * only break the lock.
486 cdev->private->iccws[0].cmd_code = CCW_CMD_STLCK;
487 cdev->private->iccws[0].cda = (__u32) __pa(buf);
488 cdev->private->iccws[0].count = 32;
489 cdev->private->iccws[0].flags = CCW_FLAG_CC;
490 cdev->private->iccws[1].cmd_code = CCW_CMD_RELEASE;
491 cdev->private->iccws[1].cda = (__u32) __pa(buf2);
492 cdev->private->iccws[1].count = 32;
493 cdev->private->iccws[1].flags = 0;
494 ret = cio_start(sch, cdev->private->iccws, 0);
496 cio_disable_subchannel(sch); //FIXME: return code?
499 cdev->private->irb.scsw.actl |= SCSW_ACTL_START_PEND;
500 spin_unlock_irqrestore(&sch->lock, flags);
501 wait_event(cdev->private->wait_q, cdev->private->irb.scsw.actl == 0);
502 spin_lock_irqsave(&sch->lock, flags);
503 cio_disable_subchannel(sch); //FIXME: return code?
504 if ((cdev->private->irb.scsw.dstat !=
505 (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
506 (cdev->private->irb.scsw.cstat != 0))
509 memset(&cdev->private->irb, 0, sizeof(struct irb));
515 spin_unlock_irqrestore(&sch->lock, flags);
519 // FIXME: these have to go:
522 _ccw_device_get_subchannel_number(struct ccw_device *cdev)
524 return cdev->private->irq;
528 _ccw_device_get_device_number(struct ccw_device *cdev)
530 return cdev->private->devno;
534 MODULE_LICENSE("GPL");
535 EXPORT_SYMBOL(ccw_device_set_options);
536 EXPORT_SYMBOL(ccw_device_clear);
537 EXPORT_SYMBOL(ccw_device_halt);
538 EXPORT_SYMBOL(ccw_device_resume);
539 EXPORT_SYMBOL(ccw_device_start_timeout);
540 EXPORT_SYMBOL(ccw_device_start);
541 EXPORT_SYMBOL(ccw_device_get_ciw);
542 EXPORT_SYMBOL(ccw_device_get_path_mask);
543 EXPORT_SYMBOL(read_conf_data);
544 EXPORT_SYMBOL(read_dev_chars);
545 EXPORT_SYMBOL(_ccw_device_get_subchannel_number);
546 EXPORT_SYMBOL(_ccw_device_get_device_number);