vserver 1.9.5.x5
[linux-2.6.git] / drivers / s390 / cio / device_ops.c
1 /*
2  *  drivers/s390/cio/device_ops.c
3  *
4  *   $Revision: 1.53 $
5  *
6  *    Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
7  *                       IBM Corporation
8  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
9  *               Cornelia Huck (cohuck@de.ibm.com)
10  */
11 #include <linux/config.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/errno.h>
15 #include <linux/slab.h>
16 #include <linux/list.h>
17 #include <linux/device.h>
18 #include <linux/delay.h>
19
20 #include <asm/ccwdev.h>
21 #include <asm/idals.h>
22 #include <asm/qdio.h>
23
24 #include "cio.h"
25 #include "cio_debug.h"
26 #include "css.h"
27 #include "device.h"
28 #include "qdio.h"
29
30 int
31 ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
32 {
33        /*
34         * The flag usage is mutal exclusive ...
35         */
36         if ((flags & CCWDEV_EARLY_NOTIFICATION) &&
37             (flags & CCWDEV_REPORT_ALL))
38                 return -EINVAL;
39         cdev->private->options.fast = (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
40         cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0;
41         cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0;
42         cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0;
43         return 0;
44 }
45
46 int
47 ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
48 {
49         struct subchannel *sch;
50         int ret;
51
52         if (!cdev)
53                 return -ENODEV;
54         if (cdev->private->state == DEV_STATE_NOT_OPER)
55                 return -ENODEV;
56         if (cdev->private->state != DEV_STATE_ONLINE &&
57             cdev->private->state != DEV_STATE_WAIT4IO &&
58             cdev->private->state != DEV_STATE_W4SENSE)
59                 return -EINVAL;
60         sch = to_subchannel(cdev->dev.parent);
61         if (!sch)
62                 return -ENODEV;
63         ret = cio_clear(sch);
64         if (ret == 0)
65                 cdev->private->intparm = intparm;
66         return ret;
67 }
68
69 int
70 ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
71                      unsigned long intparm, __u8 lpm, __u8 key,
72                      unsigned long flags)
73 {
74         struct subchannel *sch;
75         int ret;
76
77         if (!cdev)
78                 return -ENODEV;
79         sch = to_subchannel(cdev->dev.parent);
80         if (!sch)
81                 return -ENODEV;
82         if (cdev->private->state == DEV_STATE_NOT_OPER)
83                 return -ENODEV;
84         if (cdev->private->state != DEV_STATE_ONLINE ||
85             ((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
86              !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
87             cdev->private->flags.doverify)
88                 return -EBUSY;
89         ret = cio_set_options (sch, flags);
90         if (ret)
91                 return ret;
92         ret = cio_start_key (sch, cpa, lpm, key);
93         if (ret == 0)
94                 cdev->private->intparm = intparm;
95         return ret;
96 }
97
98
99 int
100 ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
101                              unsigned long intparm, __u8 lpm, __u8 key,
102                              unsigned long flags, int expires)
103 {
104         int ret;
105
106         if (!cdev)
107                 return -ENODEV;
108         ccw_device_set_timeout(cdev, expires);
109         ret = ccw_device_start_key(cdev, cpa, intparm, lpm, key, flags);
110         if (ret != 0)
111                 ccw_device_set_timeout(cdev, 0);
112         return ret;
113 }
114
115 int
116 ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa,
117                  unsigned long intparm, __u8 lpm, unsigned long flags)
118 {
119         return ccw_device_start_key(cdev, cpa, intparm, lpm,
120                                     default_storage_key, flags);
121 }
122
123 int
124 ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa,
125                          unsigned long intparm, __u8 lpm, unsigned long flags,
126                          int expires)
127 {
128         return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm,
129                                             default_storage_key, flags,
130                                             expires);
131 }
132
133
134 int
135 ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
136 {
137         struct subchannel *sch;
138         int ret;
139
140         if (!cdev)
141                 return -ENODEV;
142         if (cdev->private->state == DEV_STATE_NOT_OPER)
143                 return -ENODEV;
144         if (cdev->private->state != DEV_STATE_ONLINE &&
145             cdev->private->state != DEV_STATE_WAIT4IO &&
146             cdev->private->state != DEV_STATE_W4SENSE)
147                 return -EINVAL;
148         sch = to_subchannel(cdev->dev.parent);
149         if (!sch)
150                 return -ENODEV;
151         ret = cio_halt(sch);
152         if (ret == 0)
153                 cdev->private->intparm = intparm;
154         return ret;
155 }
156
157 int
158 ccw_device_resume(struct ccw_device *cdev)
159 {
160         struct subchannel *sch;
161
162         if (!cdev)
163                 return -ENODEV;
164         sch = to_subchannel(cdev->dev.parent);
165         if (!sch)
166                 return -ENODEV;
167         if (cdev->private->state == DEV_STATE_NOT_OPER)
168                 return -ENODEV;
169         if (cdev->private->state != DEV_STATE_ONLINE ||
170             !(sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED))
171                 return -EINVAL;
172         return cio_resume(sch);
173 }
174
175 /*
176  * Pass interrupt to device driver.
177  */
178 int
179 ccw_device_call_handler(struct ccw_device *cdev)
180 {
181         struct subchannel *sch;
182         unsigned int stctl;
183         int ending_status;
184
185         sch = to_subchannel(cdev->dev.parent);
186
187         /*
188          * we allow for the device action handler if .
189          *  - we received ending status
190          *  - the action handler requested to see all interrupts
191          *  - we received an intermediate status
192          *  - fast notification was requested (primary status)
193          *  - unsolicited interrupts
194          */
195         stctl = cdev->private->irb.scsw.stctl;
196         ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
197                 (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
198                 (stctl == SCSW_STCTL_STATUS_PEND);
199         if (!ending_status &&
200             !cdev->private->options.repall &&
201             !(stctl & SCSW_STCTL_INTER_STATUS) &&
202             !(cdev->private->options.fast &&
203               (stctl & SCSW_STCTL_PRIM_STATUS)))
204                 return 0;
205
206         /*
207          * Now we are ready to call the device driver interrupt handler.
208          */
209         if (cdev->handler)
210                 cdev->handler(cdev, cdev->private->intparm,
211                               &cdev->private->irb);
212
213         /*
214          * Clear the old and now useless interrupt response block.
215          */
216         memset(&cdev->private->irb, 0, sizeof(struct irb));
217
218         return 1;
219 }
220
221 /*
222  * Search for CIW command in extended sense data.
223  */
224 struct ciw *
225 ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
226 {
227         int ciw_cnt;
228
229         if (cdev->private->flags.esid == 0)
230                 return NULL;
231         for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++)
232                 if (cdev->private->senseid.ciw[ciw_cnt].ct == ct)
233                         return cdev->private->senseid.ciw + ciw_cnt;
234         return NULL;
235 }
236
237 __u8
238 ccw_device_get_path_mask(struct ccw_device *cdev)
239 {
240         struct subchannel *sch;
241
242         sch = to_subchannel(cdev->dev.parent);
243         if (!sch)
244                 return 0;
245         else
246                 return sch->vpm;
247 }
248
249 static void
250 ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb)
251 {
252         if (!ip)
253                 /* unsolicited interrupt */
254                 return;
255
256         /* Abuse intparm for error reporting. */
257         if (IS_ERR(irb))
258                 cdev->private->intparm = -EIO;
259         else if ((irb->scsw.dstat !=
260                   (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
261                  (irb->scsw.cstat != 0)) {
262                 /*
263                  * We didn't get channel end / device end. Check if path
264                  * verification has been started; we can retry after it has
265                  * finished. We also retry unit checks except for command reject
266                  * or intervention required.
267                  */
268                  if (cdev->private->flags.doverify ||
269                          cdev->private->state == DEV_STATE_VERIFY)
270                          cdev->private->intparm = -EAGAIN;
271                  if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
272                      !(irb->ecw[0] &
273                        (SNS0_CMD_REJECT | SNS0_INTERVENTION_REQ)))
274                          cdev->private->intparm = -EAGAIN;
275                  else
276                          cdev->private->intparm = -EIO;
277                          
278         } else
279                 cdev->private->intparm = 0;
280         wake_up(&cdev->private->wait_q);
281 }
282
283 static inline int
284 __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic)
285 {
286         int ret;
287         struct subchannel *sch;
288
289         sch = to_subchannel(cdev->dev.parent);
290         do {
291                 ret = cio_start (sch, ccw, 0);
292                 if ((ret == -EBUSY) || (ret == -EACCES)) {
293                         /* Try again later. */
294                         spin_unlock_irq(&sch->lock);
295                         msleep(10);
296                         spin_lock_irq(&sch->lock);
297                         continue;
298                 }
299                 if (ret != 0)
300                         /* Non-retryable error. */
301                         break;
302                 /* Wait for end of request. */
303                 cdev->private->intparm = magic;
304                 spin_unlock_irq(&sch->lock);
305                 wait_event(cdev->private->wait_q,
306                            (cdev->private->intparm == -EIO) ||
307                            (cdev->private->intparm == -EAGAIN) ||
308                            (cdev->private->intparm == 0));
309                 spin_lock_irq(&sch->lock);
310                 /* Check at least for channel end / device end */
311                 if (cdev->private->intparm == -EIO) {
312                         /* Non-retryable error. */
313                         ret = -EIO;
314                         break;
315                 }
316                 if (cdev->private->intparm == 0)
317                         /* Success. */
318                         break;
319                 /* Try again later. */
320                 spin_unlock_irq(&sch->lock);
321                 msleep(10);
322                 spin_lock_irq(&sch->lock);
323         } while (1);
324
325         return ret;
326 }
327
328 /**
329  * read_dev_chars() - read device characteristics
330  * @param cdev   target ccw device
331  * @param buffer pointer to buffer for rdc data
332  * @param length size of rdc data
333  * @returns 0 for success, negative error value on failure
334  *
335  * Context:
336  *   called for online device, lock not held
337  **/
338 int
339 read_dev_chars (struct ccw_device *cdev, void **buffer, int length)
340 {
341         void (*handler)(struct ccw_device *, unsigned long, struct irb *);
342         struct subchannel *sch;
343         int ret;
344         struct ccw1 *rdc_ccw;
345
346         if (!cdev)
347                 return -ENODEV;
348         if (!buffer || !length)
349                 return -EINVAL;
350         sch = to_subchannel(cdev->dev.parent);
351
352         CIO_TRACE_EVENT (4, "rddevch");
353         CIO_TRACE_EVENT (4, sch->dev.bus_id);
354
355         rdc_ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
356         if (!rdc_ccw)
357                 return -ENOMEM;
358         memset(rdc_ccw, 0, sizeof(struct ccw1));
359         rdc_ccw->cmd_code = CCW_CMD_RDC;
360         rdc_ccw->count = length;
361         rdc_ccw->flags = CCW_FLAG_SLI;
362         ret = set_normalized_cda (rdc_ccw, (*buffer));
363         if (ret != 0) {
364                 kfree(rdc_ccw);
365                 return ret;
366         }
367
368         spin_lock_irq(&sch->lock);
369         /* Save interrupt handler. */
370         handler = cdev->handler;
371         /* Temporarily install own handler. */
372         cdev->handler = ccw_device_wake_up;
373         if (cdev->private->state != DEV_STATE_ONLINE)
374                 ret = -ENODEV;
375         else if (((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
376                   !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
377                  cdev->private->flags.doverify)
378                 ret = -EBUSY;
379         else
380                 /* 0x00D9C4C3 == ebcdic "RDC" */
381                 ret = __ccw_device_retry_loop(cdev, rdc_ccw, 0x00D9C4C3);
382
383         /* Restore interrupt handler. */
384         cdev->handler = handler;
385         spin_unlock_irq(&sch->lock);
386
387         clear_normalized_cda (rdc_ccw);
388         kfree(rdc_ccw);
389
390         return ret;
391 }
392
393 /*
394  *  Read Configuration data
395  */
396 int
397 read_conf_data (struct ccw_device *cdev, void **buffer, int *length)
398 {
399         void (*handler)(struct ccw_device *, unsigned long, struct irb *);
400         struct subchannel *sch;
401         struct ciw *ciw;
402         char *rcd_buf;
403         int ret;
404         struct ccw1 *rcd_ccw;
405
406         if (!cdev)
407                 return -ENODEV;
408         if (!buffer || !length)
409                 return -EINVAL;
410         sch = to_subchannel(cdev->dev.parent);
411
412         CIO_TRACE_EVENT (4, "rdconf");
413         CIO_TRACE_EVENT (4, sch->dev.bus_id);
414
415         /*
416          * scan for RCD command in extended SenseID data
417          */
418         ciw = ccw_device_get_ciw(cdev, CIW_TYPE_RCD);
419         if (!ciw || ciw->cmd == 0)
420                 return -EOPNOTSUPP;
421
422         rcd_ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
423         if (!rcd_ccw)
424                 return -ENOMEM;
425         memset(rcd_ccw, 0, sizeof(struct ccw1));
426         rcd_buf = kmalloc(ciw->count, GFP_KERNEL | GFP_DMA);
427         if (!rcd_buf) {
428                 kfree(rcd_ccw);
429                 return -ENOMEM;
430         }
431         memset (rcd_buf, 0, ciw->count);
432         rcd_ccw->cmd_code = ciw->cmd;
433         rcd_ccw->cda = (__u32) __pa (rcd_buf);
434         rcd_ccw->count = ciw->count;
435         rcd_ccw->flags = CCW_FLAG_SLI;
436
437         spin_lock_irq(&sch->lock);
438         /* Save interrupt handler. */
439         handler = cdev->handler;
440         /* Temporarily install own handler. */
441         cdev->handler = ccw_device_wake_up;
442         if (cdev->private->state != DEV_STATE_ONLINE)
443                 ret = -ENODEV;
444         else if (((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
445                   !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
446                  cdev->private->flags.doverify)
447                 ret = -EBUSY;
448         else
449                 /* 0x00D9C3C4 == ebcdic "RCD" */
450                 ret = __ccw_device_retry_loop(cdev, rcd_ccw, 0x00D9C3C4);
451
452         /* Restore interrupt handler. */
453         cdev->handler = handler;
454         spin_unlock_irq(&sch->lock);
455
456         /*
457          * on success we update the user input parms
458          */
459         if (ret) {
460                 kfree (rcd_buf);
461                 *buffer = NULL;
462                 *length = 0;
463         } else {
464                 *length = ciw->count;
465                 *buffer = rcd_buf;
466         }
467         kfree(rcd_ccw);
468
469         return ret;
470 }
471
472 /*
473  * Try to break the lock on a boxed device.
474  */
475 int
476 ccw_device_stlck(struct ccw_device *cdev)
477 {
478         void *buf, *buf2;
479         unsigned long flags;
480         struct subchannel *sch;
481         int ret;
482
483         if (!cdev)
484                 return -ENODEV;
485
486         if (cdev->drv && !cdev->private->options.force)
487                 return -EINVAL;
488
489         sch = to_subchannel(cdev->dev.parent);
490         
491         CIO_TRACE_EVENT(2, "stl lock");
492         CIO_TRACE_EVENT(2, cdev->dev.bus_id);
493
494         buf = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL);
495         if (!buf)
496                 return -ENOMEM;
497         buf2 = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL);
498         if (!buf2) {
499                 kfree(buf);
500                 return -ENOMEM;
501         }
502         spin_lock_irqsave(&sch->lock, flags);
503         ret = cio_enable_subchannel(sch, 3);
504         if (ret)
505                 goto out_unlock;
506         /*
507          * Setup ccw. We chain an unconditional reserve and a release so we
508          * only break the lock.
509          */
510         cdev->private->iccws[0].cmd_code = CCW_CMD_STLCK;
511         cdev->private->iccws[0].cda = (__u32) __pa(buf);
512         cdev->private->iccws[0].count = 32;
513         cdev->private->iccws[0].flags = CCW_FLAG_CC;
514         cdev->private->iccws[1].cmd_code = CCW_CMD_RELEASE;
515         cdev->private->iccws[1].cda = (__u32) __pa(buf2);
516         cdev->private->iccws[1].count = 32;
517         cdev->private->iccws[1].flags = 0;
518         ret = cio_start(sch, cdev->private->iccws, 0);
519         if (ret) {
520                 cio_disable_subchannel(sch); //FIXME: return code?
521                 goto out_unlock;
522         }
523         cdev->private->irb.scsw.actl |= SCSW_ACTL_START_PEND;
524         spin_unlock_irqrestore(&sch->lock, flags);
525         wait_event(cdev->private->wait_q, cdev->private->irb.scsw.actl == 0);
526         spin_lock_irqsave(&sch->lock, flags);
527         cio_disable_subchannel(sch); //FIXME: return code?
528         if ((cdev->private->irb.scsw.dstat !=
529              (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
530             (cdev->private->irb.scsw.cstat != 0))
531                 ret = -EIO;
532         /* Clear irb. */
533         memset(&cdev->private->irb, 0, sizeof(struct irb));
534 out_unlock:
535         if (buf)
536                 kfree(buf);
537         if (buf2)
538                 kfree(buf2);
539         spin_unlock_irqrestore(&sch->lock, flags);
540         return ret;
541 }
542
543 // FIXME: these have to go:
544
545 int
546 _ccw_device_get_subchannel_number(struct ccw_device *cdev)
547 {
548         return cdev->private->irq;
549 }
550
551 int
552 _ccw_device_get_device_number(struct ccw_device *cdev)
553 {
554         return cdev->private->devno;
555 }
556
557
558 MODULE_LICENSE("GPL");
559 EXPORT_SYMBOL(ccw_device_set_options);
560 EXPORT_SYMBOL(ccw_device_clear);
561 EXPORT_SYMBOL(ccw_device_halt);
562 EXPORT_SYMBOL(ccw_device_resume);
563 EXPORT_SYMBOL(ccw_device_start_timeout);
564 EXPORT_SYMBOL(ccw_device_start);
565 EXPORT_SYMBOL(ccw_device_start_timeout_key);
566 EXPORT_SYMBOL(ccw_device_start_key);
567 EXPORT_SYMBOL(ccw_device_get_ciw);
568 EXPORT_SYMBOL(ccw_device_get_path_mask);
569 EXPORT_SYMBOL(read_conf_data);
570 EXPORT_SYMBOL(read_dev_chars);
571 EXPORT_SYMBOL(_ccw_device_get_subchannel_number);
572 EXPORT_SYMBOL(_ccw_device_get_device_number);