ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / drivers / s390 / cio / device_ops.c
1 /*
2  *  drivers/s390/cio/device_ops.c
3  *
4  *   $Revision: 1.34 $
5  *
6  *    Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
7  *                       IBM Corporation
8  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
9  *               Cornelia Huck (cohuck@de.ibm.com)
10  */
11 #include <linux/config.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/errno.h>
15 #include <linux/slab.h>
16 #include <linux/list.h>
17 #include <linux/device.h>
18
19 #include <asm/ccwdev.h>
20 #include <asm/idals.h>
21 #include <asm/qdio.h>
22
23 #include "cio.h"
24 #include "cio_debug.h"
25 #include "css.h"
26 #include "device.h"
27 #include "qdio.h"
28
29 int
30 ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
31 {
32        /*
33         * The flag usage is mutal exclusive ...
34         */
35         if ((flags & CCWDEV_EARLY_NOTIFICATION) &&
36             (flags & CCWDEV_REPORT_ALL))
37                 return -EINVAL;
38         cdev->private->options.fast = (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
39         cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0;
40         cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0;
41         cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0;
42         return 0;
43 }
44
45 int
46 ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
47 {
48         struct subchannel *sch;
49         int ret;
50
51         if (!cdev)
52                 return -ENODEV;
53         if (cdev->private->state == DEV_STATE_NOT_OPER)
54                 return -ENODEV;
55         if (cdev->private->state != DEV_STATE_ONLINE &&
56             cdev->private->state != DEV_STATE_W4SENSE)
57                 return -EINVAL;
58         sch = to_subchannel(cdev->dev.parent);
59         if (!sch)
60                 return -ENODEV;
61         ret = cio_clear(sch);
62         if (ret == 0)
63                 cdev->private->intparm = intparm;
64         return ret;
65 }
66
67 int
68 ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa,
69                  unsigned long intparm, __u8 lpm, unsigned long flags)
70 {
71         struct subchannel *sch;
72         int ret;
73
74         if (!cdev)
75                 return -ENODEV;
76         sch = to_subchannel(cdev->dev.parent);
77         if (!sch)
78                 return -ENODEV;
79         if (cdev->private->state == DEV_STATE_NOT_OPER)
80                 return -ENODEV;
81         if (cdev->private->state != DEV_STATE_ONLINE ||
82             ((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
83              !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
84             cdev->private->flags.doverify)
85                 return -EBUSY;
86         ret = cio_set_options (sch, flags);
87         if (ret)
88                 return ret;
89         ret = cio_start (sch, cpa, lpm);
90         if (ret == 0)
91                 cdev->private->intparm = intparm;
92         return ret;
93 }
94
95 int
96 ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa,
97                          unsigned long intparm, __u8 lpm, unsigned long flags,
98                          int expires)
99 {
100         int ret;
101
102         if (!cdev)
103                 return -ENODEV;
104         ccw_device_set_timeout(cdev, expires);
105         ret = ccw_device_start(cdev, cpa, intparm, lpm, flags);
106         if (ret != 0)
107                 ccw_device_set_timeout(cdev, 0);
108         return ret;
109 }
110
111 int
112 ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
113 {
114         struct subchannel *sch;
115         int ret;
116
117         if (!cdev)
118                 return -ENODEV;
119         if (cdev->private->state == DEV_STATE_NOT_OPER)
120                 return -ENODEV;
121         if (cdev->private->state != DEV_STATE_ONLINE &&
122             cdev->private->state != DEV_STATE_W4SENSE)
123                 return -EINVAL;
124         sch = to_subchannel(cdev->dev.parent);
125         if (!sch)
126                 return -ENODEV;
127         ret = cio_halt(sch);
128         if (ret == 0)
129                 cdev->private->intparm = intparm;
130         return ret;
131 }
132
133 int
134 ccw_device_resume(struct ccw_device *cdev)
135 {
136         struct subchannel *sch;
137
138         if (!cdev)
139                 return -ENODEV;
140         sch = to_subchannel(cdev->dev.parent);
141         if (!sch)
142                 return -ENODEV;
143         if (cdev->private->state == DEV_STATE_NOT_OPER)
144                 return -ENODEV;
145         if (cdev->private->state != DEV_STATE_ONLINE ||
146             !(sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED))
147                 return -EINVAL;
148         return cio_resume(sch);
149 }
150
151 /*
152  * Pass interrupt to device driver.
153  */
154 int
155 ccw_device_call_handler(struct ccw_device *cdev)
156 {
157         struct subchannel *sch;
158         unsigned int stctl;
159         int ending_status;
160
161         sch = to_subchannel(cdev->dev.parent);
162
163         /*
164          * we allow for the device action handler if .
165          *  - we received ending status
166          *  - the action handler requested to see all interrupts
167          *  - we received an intermediate status
168          *  - fast notification was requested (primary status)
169          *  - unsolicited interrupts
170          */
171         stctl = cdev->private->irb.scsw.stctl;
172         ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
173                 (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
174                 (stctl == SCSW_STCTL_STATUS_PEND);
175         if (!ending_status &&
176             !cdev->private->options.repall &&
177             !(stctl & SCSW_STCTL_INTER_STATUS) &&
178             !(cdev->private->options.fast &&
179               (stctl & SCSW_STCTL_PRIM_STATUS)))
180                 return 0;
181
182         /*
183          * Now we are ready to call the device driver interrupt handler.
184          */
185         if (cdev->handler)
186                 cdev->handler(cdev, cdev->private->intparm,
187                               &cdev->private->irb);
188
189         /*
190          * Clear the old and now useless interrupt response block.
191          */
192         memset(&cdev->private->irb, 0, sizeof(struct irb));
193
194         return 1;
195 }
196
197 /*
198  * Search for CIW command in extended sense data.
199  */
200 struct ciw *
201 ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
202 {
203         int ciw_cnt;
204
205         if (cdev->private->flags.esid == 0)
206                 return NULL;
207         for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++)
208                 if (cdev->private->senseid.ciw[ciw_cnt].ct == ct)
209                         return cdev->private->senseid.ciw + ciw_cnt;
210         return NULL;
211 }
212
213 __u8
214 ccw_device_get_path_mask(struct ccw_device *cdev)
215 {
216         struct subchannel *sch;
217
218         sch = to_subchannel(cdev->dev.parent);
219         if (!sch)
220                 return 0;
221         else
222                 return sch->vpm;
223 }
224
225 static void
226 ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb)
227 {
228         if (!ip)
229                 /* unsolicited interrupt */
230                 return;
231
232         /* Abuse intparm for error reporting. */
233         if (IS_ERR(irb))
234                 cdev->private->intparm = -EIO;
235         else if ((irb->scsw.dstat !=
236                   (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
237                  (irb->scsw.cstat != 0)) {
238                 /*
239                  * We didn't get channel end / device end. Check if path
240                  * verification has been started; we can retry after it has
241                  * finished. We also retry unit checks except for command reject
242                  * or intervention required.
243                  */
244                  if (cdev->private->flags.doverify ||
245                          cdev->private->state == DEV_STATE_VERIFY)
246                          cdev->private->intparm = -EAGAIN;
247                  if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
248                      !(irb->ecw[0] &
249                        (SNS0_CMD_REJECT | SNS0_INTERVENTION_REQ)))
250                          cdev->private->intparm = -EAGAIN;
251                  else
252                          cdev->private->intparm = -EIO;
253                          
254         } else
255                 cdev->private->intparm = 0;
256         wake_up(&cdev->private->wait_q);
257 }
258
259 static inline int
260 __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic)
261 {
262         int ret;
263         struct subchannel *sch;
264
265         sch = to_subchannel(cdev->dev.parent);
266         do {
267                 ret = cio_start (sch, ccw, 0);
268                 if ((ret == -EBUSY) || (ret == -EACCES)) {
269                         /* Try again later. */
270                         spin_unlock_irq(&sch->lock);
271                         schedule_timeout(1);
272                         spin_lock_irq(&sch->lock);
273                         continue;
274                 }
275                 if (ret != 0)
276                         /* Non-retryable error. */
277                         break;
278                 /* Wait for end of request. */
279                 cdev->private->intparm = magic;
280                 spin_unlock_irq(&sch->lock);
281                 wait_event(cdev->private->wait_q,
282                            (cdev->private->intparm == -EIO) ||
283                            (cdev->private->intparm == -EAGAIN) ||
284                            (cdev->private->intparm == 0));
285                 spin_lock_irq(&sch->lock);
286                 /* Check at least for channel end / device end */
287                 if (cdev->private->intparm == -EIO) {
288                         /* Non-retryable error. */
289                         ret = -EIO;
290                         break;
291                 }
292                 if (cdev->private->intparm == 0)
293                         /* Success. */
294                         break;
295                 /* Try again later. */
296                 spin_unlock_irq(&sch->lock);
297                 schedule_timeout(1);
298                 spin_lock_irq(&sch->lock);
299         } while (1);
300
301         return ret;
302 }
303
304 /**
305  * read_dev_chars() - read device characteristics
306  * @param cdev   target ccw device
307  * @param buffer pointer to buffer for rdc data
308  * @param length size of rdc data
309  * @returns 0 for success, negative error value on failure
310  *
311  * Context:
312  *   called for online device, lock not held
313  **/
314 int
315 read_dev_chars (struct ccw_device *cdev, void **buffer, int length)
316 {
317         void (*handler)(struct ccw_device *, unsigned long, struct irb *);
318         struct subchannel *sch;
319         int ret;
320         struct ccw1 *rdc_ccw;
321
322         if (!cdev)
323                 return -ENODEV;
324         if (!buffer || !length)
325                 return -EINVAL;
326         sch = to_subchannel(cdev->dev.parent);
327
328         CIO_TRACE_EVENT (4, "rddevch");
329         CIO_TRACE_EVENT (4, sch->dev.bus_id);
330
331         rdc_ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
332         if (!rdc_ccw)
333                 return -ENOMEM;
334         memset(rdc_ccw, 0, sizeof(struct ccw1));
335         rdc_ccw->cmd_code = CCW_CMD_RDC;
336         rdc_ccw->count = length;
337         rdc_ccw->flags = CCW_FLAG_SLI;
338         ret = set_normalized_cda (rdc_ccw, (*buffer));
339         if (ret != 0) {
340                 kfree(rdc_ccw);
341                 return ret;
342         }
343
344         spin_lock_irq(&sch->lock);
345         /* Save interrupt handler. */
346         handler = cdev->handler;
347         /* Temporarily install own handler. */
348         cdev->handler = ccw_device_wake_up;
349         if (cdev->private->state != DEV_STATE_ONLINE)
350                 ret = -ENODEV;
351         else if (((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
352                   !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
353                  cdev->private->flags.doverify)
354                 ret = -EBUSY;
355         else
356                 /* 0x00D9C4C3 == ebcdic "RDC" */
357                 ret = __ccw_device_retry_loop(cdev, rdc_ccw, 0x00D9C4C3);
358
359         /* Restore interrupt handler. */
360         cdev->handler = handler;
361         spin_unlock_irq(&sch->lock);
362
363         clear_normalized_cda (rdc_ccw);
364         kfree(rdc_ccw);
365
366         return ret;
367 }
368
369 /*
370  *  Read Configuration data
371  */
372 int
373 read_conf_data (struct ccw_device *cdev, void **buffer, int *length)
374 {
375         void (*handler)(struct ccw_device *, unsigned long, struct irb *);
376         struct subchannel *sch;
377         struct ciw *ciw;
378         char *rcd_buf;
379         int ret;
380         struct ccw1 *rcd_ccw;
381
382         if (!cdev)
383                 return -ENODEV;
384         if (!buffer || !length)
385                 return -EINVAL;
386         sch = to_subchannel(cdev->dev.parent);
387
388         CIO_TRACE_EVENT (4, "rdconf");
389         CIO_TRACE_EVENT (4, sch->dev.bus_id);
390
391         /*
392          * scan for RCD command in extended SenseID data
393          */
394         ciw = ccw_device_get_ciw(cdev, CIW_TYPE_RCD);
395         if (!ciw || ciw->cmd == 0)
396                 return -EOPNOTSUPP;
397
398         rcd_ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
399         if (!rcd_ccw)
400                 return -ENOMEM;
401         memset(rcd_ccw, 0, sizeof(struct ccw1));
402         rcd_buf = kmalloc(ciw->count, GFP_KERNEL | GFP_DMA);
403         if (!rcd_buf) {
404                 kfree(rcd_ccw);
405                 return -ENOMEM;
406         }
407         memset (rcd_buf, 0, ciw->count);
408         rcd_ccw->cmd_code = ciw->cmd;
409         rcd_ccw->cda = (__u32) __pa (rcd_buf);
410         rcd_ccw->count = ciw->count;
411         rcd_ccw->flags = CCW_FLAG_SLI;
412
413         spin_lock_irq(&sch->lock);
414         /* Save interrupt handler. */
415         handler = cdev->handler;
416         /* Temporarily install own handler. */
417         cdev->handler = ccw_device_wake_up;
418         if (cdev->private->state != DEV_STATE_ONLINE)
419                 ret = -ENODEV;
420         else if (((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
421                   !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
422                  cdev->private->flags.doverify)
423                 ret = -EBUSY;
424         else
425                 /* 0x00D9C3C4 == ebcdic "RCD" */
426                 ret = __ccw_device_retry_loop(cdev, rcd_ccw, 0x00D9C3C4);
427
428         /* Restore interrupt handler. */
429         cdev->handler = handler;
430         spin_unlock_irq(&sch->lock);
431
432         /*
433          * on success we update the user input parms
434          */
435         if (ret) {
436                 kfree (rcd_buf);
437                 *buffer = NULL;
438                 *length = 0;
439         } else {
440                 *length = ciw->count;
441                 *buffer = rcd_buf;
442         }
443         kfree(rcd_ccw);
444
445         return ret;
446 }
447
448 /*
449  * Try to break the lock on a boxed device.
450  */
451 int
452 ccw_device_stlck(struct ccw_device *cdev)
453 {
454         void *buf, *buf2;
455         unsigned long flags;
456         struct subchannel *sch;
457         int ret;
458
459         if (!cdev)
460                 return -ENODEV;
461
462         if (cdev->drv && !cdev->private->options.force)
463                 return -EINVAL;
464
465         sch = to_subchannel(cdev->dev.parent);
466         
467         CIO_TRACE_EVENT(2, "stl lock");
468         CIO_TRACE_EVENT(2, cdev->dev.bus_id);
469
470         buf = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL);
471         if (!buf)
472                 return -ENOMEM;
473         buf2 = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL);
474         if (!buf2) {
475                 kfree(buf);
476                 return -ENOMEM;
477         }
478         spin_lock_irqsave(&sch->lock, flags);
479         ret = cio_enable_subchannel(sch, 3);
480         if (ret)
481                 goto out_unlock;
482         /*
483          * Setup ccw. We chain an unconditional reserve and a release so we
484          * only break the lock.
485          */
486         cdev->private->iccws[0].cmd_code = CCW_CMD_STLCK;
487         cdev->private->iccws[0].cda = (__u32) __pa(buf);
488         cdev->private->iccws[0].count = 32;
489         cdev->private->iccws[0].flags = CCW_FLAG_CC;
490         cdev->private->iccws[1].cmd_code = CCW_CMD_RELEASE;
491         cdev->private->iccws[1].cda = (__u32) __pa(buf2);
492         cdev->private->iccws[1].count = 32;
493         cdev->private->iccws[1].flags = 0;
494         ret = cio_start(sch, cdev->private->iccws, 0);
495         if (ret) {
496                 cio_disable_subchannel(sch); //FIXME: return code?
497                 goto out_unlock;
498         }
499         cdev->private->irb.scsw.actl |= SCSW_ACTL_START_PEND;
500         spin_unlock_irqrestore(&sch->lock, flags);
501         wait_event(cdev->private->wait_q, cdev->private->irb.scsw.actl == 0);
502         spin_lock_irqsave(&sch->lock, flags);
503         cio_disable_subchannel(sch); //FIXME: return code?
504         if ((cdev->private->irb.scsw.dstat !=
505              (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
506             (cdev->private->irb.scsw.cstat != 0))
507                 ret = -EIO;
508         /* Clear irb. */
509         memset(&cdev->private->irb, 0, sizeof(struct irb));
510 out_unlock:
511         if (buf)
512                 kfree(buf);
513         if (buf2)
514                 kfree(buf2);
515         spin_unlock_irqrestore(&sch->lock, flags);
516         return ret;
517 }
518
519 // FIXME: these have to go:
520
521 int
522 _ccw_device_get_subchannel_number(struct ccw_device *cdev)
523 {
524         return cdev->private->irq;
525 }
526
527 int
528 _ccw_device_get_device_number(struct ccw_device *cdev)
529 {
530         return cdev->private->devno;
531 }
532
533
534 MODULE_LICENSE("GPL");
535 EXPORT_SYMBOL(ccw_device_set_options);
536 EXPORT_SYMBOL(ccw_device_clear);
537 EXPORT_SYMBOL(ccw_device_halt);
538 EXPORT_SYMBOL(ccw_device_resume);
539 EXPORT_SYMBOL(ccw_device_start_timeout);
540 EXPORT_SYMBOL(ccw_device_start);
541 EXPORT_SYMBOL(ccw_device_get_ciw);
542 EXPORT_SYMBOL(ccw_device_get_path_mask);
543 EXPORT_SYMBOL(read_conf_data);
544 EXPORT_SYMBOL(read_dev_chars);
545 EXPORT_SYMBOL(_ccw_device_get_subchannel_number);
546 EXPORT_SYMBOL(_ccw_device_get_device_number);