VServer 1.9.2 (patch-2.6.8.1-vs1.9.2.diff)
[linux-2.6.git] / drivers / s390 / cio / device_ops.c
1 /*
2  *  drivers/s390/cio/device_ops.c
3  *
4  *   $Revision: 1.50 $
5  *
6  *    Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
7  *                       IBM Corporation
8  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
9  *               Cornelia Huck (cohuck@de.ibm.com)
10  */
11 #include <linux/config.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/errno.h>
15 #include <linux/slab.h>
16 #include <linux/list.h>
17 #include <linux/device.h>
18 #include <linux/delay.h>
19
20 #include <asm/ccwdev.h>
21 #include <asm/idals.h>
22 #include <asm/qdio.h>
23
24 #include "cio.h"
25 #include "cio_debug.h"
26 #include "css.h"
27 #include "device.h"
28 #include "qdio.h"
29
30 int
31 ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
32 {
33        /*
34         * The flag usage is mutal exclusive ...
35         */
36         if ((flags & CCWDEV_EARLY_NOTIFICATION) &&
37             (flags & CCWDEV_REPORT_ALL))
38                 return -EINVAL;
39         cdev->private->options.fast = (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
40         cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0;
41         cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0;
42         cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0;
43         return 0;
44 }
45
46 int
47 ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
48 {
49         struct subchannel *sch;
50         int ret;
51
52         if (!cdev)
53                 return -ENODEV;
54         if (cdev->private->state == DEV_STATE_NOT_OPER)
55                 return -ENODEV;
56         if (cdev->private->state != DEV_STATE_ONLINE &&
57             cdev->private->state != DEV_STATE_W4SENSE)
58                 return -EINVAL;
59         sch = to_subchannel(cdev->dev.parent);
60         if (!sch)
61                 return -ENODEV;
62         ret = cio_clear(sch);
63         if (ret == 0)
64                 cdev->private->intparm = intparm;
65         return ret;
66 }
67
68 int
69 ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa,
70                  unsigned long intparm, __u8 lpm, unsigned long flags)
71 {
72         struct subchannel *sch;
73         int ret;
74
75         if (!cdev)
76                 return -ENODEV;
77         sch = to_subchannel(cdev->dev.parent);
78         if (!sch)
79                 return -ENODEV;
80         if (cdev->private->state == DEV_STATE_NOT_OPER)
81                 return -ENODEV;
82         if (cdev->private->state != DEV_STATE_ONLINE ||
83             ((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
84              !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
85             cdev->private->flags.doverify)
86                 return -EBUSY;
87         ret = cio_set_options (sch, flags);
88         if (ret)
89                 return ret;
90         ret = cio_start (sch, cpa, lpm);
91         if (ret == 0)
92                 cdev->private->intparm = intparm;
93         return ret;
94 }
95
96 int
97 ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa,
98                          unsigned long intparm, __u8 lpm, unsigned long flags,
99                          int expires)
100 {
101         int ret;
102
103         if (!cdev)
104                 return -ENODEV;
105         ccw_device_set_timeout(cdev, expires);
106         ret = ccw_device_start(cdev, cpa, intparm, lpm, flags);
107         if (ret != 0)
108                 ccw_device_set_timeout(cdev, 0);
109         return ret;
110 }
111
112 int
113 ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
114 {
115         struct subchannel *sch;
116         int ret;
117
118         if (!cdev)
119                 return -ENODEV;
120         if (cdev->private->state == DEV_STATE_NOT_OPER)
121                 return -ENODEV;
122         if (cdev->private->state != DEV_STATE_ONLINE &&
123             cdev->private->state != DEV_STATE_W4SENSE)
124                 return -EINVAL;
125         sch = to_subchannel(cdev->dev.parent);
126         if (!sch)
127                 return -ENODEV;
128         ret = cio_halt(sch);
129         if (ret == 0)
130                 cdev->private->intparm = intparm;
131         return ret;
132 }
133
134 int
135 ccw_device_resume(struct ccw_device *cdev)
136 {
137         struct subchannel *sch;
138
139         if (!cdev)
140                 return -ENODEV;
141         sch = to_subchannel(cdev->dev.parent);
142         if (!sch)
143                 return -ENODEV;
144         if (cdev->private->state == DEV_STATE_NOT_OPER)
145                 return -ENODEV;
146         if (cdev->private->state != DEV_STATE_ONLINE ||
147             !(sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED))
148                 return -EINVAL;
149         return cio_resume(sch);
150 }
151
152 /*
153  * Pass interrupt to device driver.
154  */
155 int
156 ccw_device_call_handler(struct ccw_device *cdev)
157 {
158         struct subchannel *sch;
159         unsigned int stctl;
160         int ending_status;
161
162         sch = to_subchannel(cdev->dev.parent);
163
164         /*
165          * we allow for the device action handler if .
166          *  - we received ending status
167          *  - the action handler requested to see all interrupts
168          *  - we received an intermediate status
169          *  - fast notification was requested (primary status)
170          *  - unsolicited interrupts
171          */
172         stctl = cdev->private->irb.scsw.stctl;
173         ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
174                 (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
175                 (stctl == SCSW_STCTL_STATUS_PEND);
176         if (!ending_status &&
177             !cdev->private->options.repall &&
178             !(stctl & SCSW_STCTL_INTER_STATUS) &&
179             !(cdev->private->options.fast &&
180               (stctl & SCSW_STCTL_PRIM_STATUS)))
181                 return 0;
182
183         /*
184          * Now we are ready to call the device driver interrupt handler.
185          */
186         if (cdev->handler)
187                 cdev->handler(cdev, cdev->private->intparm,
188                               &cdev->private->irb);
189
190         /*
191          * Clear the old and now useless interrupt response block.
192          */
193         memset(&cdev->private->irb, 0, sizeof(struct irb));
194
195         return 1;
196 }
197
198 /*
199  * Search for CIW command in extended sense data.
200  */
201 struct ciw *
202 ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
203 {
204         int ciw_cnt;
205
206         if (cdev->private->flags.esid == 0)
207                 return NULL;
208         for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++)
209                 if (cdev->private->senseid.ciw[ciw_cnt].ct == ct)
210                         return cdev->private->senseid.ciw + ciw_cnt;
211         return NULL;
212 }
213
214 __u8
215 ccw_device_get_path_mask(struct ccw_device *cdev)
216 {
217         struct subchannel *sch;
218
219         sch = to_subchannel(cdev->dev.parent);
220         if (!sch)
221                 return 0;
222         else
223                 return sch->vpm;
224 }
225
226 static void
227 ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb)
228 {
229         if (!ip)
230                 /* unsolicited interrupt */
231                 return;
232
233         /* Abuse intparm for error reporting. */
234         if (IS_ERR(irb))
235                 cdev->private->intparm = -EIO;
236         else if ((irb->scsw.dstat !=
237                   (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
238                  (irb->scsw.cstat != 0)) {
239                 /*
240                  * We didn't get channel end / device end. Check if path
241                  * verification has been started; we can retry after it has
242                  * finished. We also retry unit checks except for command reject
243                  * or intervention required.
244                  */
245                  if (cdev->private->flags.doverify ||
246                          cdev->private->state == DEV_STATE_VERIFY)
247                          cdev->private->intparm = -EAGAIN;
248                  if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
249                      !(irb->ecw[0] &
250                        (SNS0_CMD_REJECT | SNS0_INTERVENTION_REQ)))
251                          cdev->private->intparm = -EAGAIN;
252                  else
253                          cdev->private->intparm = -EIO;
254                          
255         } else
256                 cdev->private->intparm = 0;
257         wake_up(&cdev->private->wait_q);
258 }
259
260 static inline int
261 __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic)
262 {
263         int ret;
264         struct subchannel *sch;
265
266         sch = to_subchannel(cdev->dev.parent);
267         do {
268                 ret = cio_start (sch, ccw, 0);
269                 if ((ret == -EBUSY) || (ret == -EACCES)) {
270                         /* Try again later. */
271                         spin_unlock_irq(&sch->lock);
272                         msleep(10);
273                         spin_lock_irq(&sch->lock);
274                         continue;
275                 }
276                 if (ret != 0)
277                         /* Non-retryable error. */
278                         break;
279                 /* Wait for end of request. */
280                 cdev->private->intparm = magic;
281                 spin_unlock_irq(&sch->lock);
282                 wait_event(cdev->private->wait_q,
283                            (cdev->private->intparm == -EIO) ||
284                            (cdev->private->intparm == -EAGAIN) ||
285                            (cdev->private->intparm == 0));
286                 spin_lock_irq(&sch->lock);
287                 /* Check at least for channel end / device end */
288                 if (cdev->private->intparm == -EIO) {
289                         /* Non-retryable error. */
290                         ret = -EIO;
291                         break;
292                 }
293                 if (cdev->private->intparm == 0)
294                         /* Success. */
295                         break;
296                 /* Try again later. */
297                 spin_unlock_irq(&sch->lock);
298                 msleep(10);
299                 spin_lock_irq(&sch->lock);
300         } while (1);
301
302         return ret;
303 }
304
305 /**
306  * read_dev_chars() - read device characteristics
307  * @param cdev   target ccw device
308  * @param buffer pointer to buffer for rdc data
309  * @param length size of rdc data
310  * @returns 0 for success, negative error value on failure
311  *
312  * Context:
313  *   called for online device, lock not held
314  **/
315 int
316 read_dev_chars (struct ccw_device *cdev, void **buffer, int length)
317 {
318         void (*handler)(struct ccw_device *, unsigned long, struct irb *);
319         struct subchannel *sch;
320         int ret;
321         struct ccw1 *rdc_ccw;
322
323         if (!cdev)
324                 return -ENODEV;
325         if (!buffer || !length)
326                 return -EINVAL;
327         sch = to_subchannel(cdev->dev.parent);
328
329         CIO_TRACE_EVENT (4, "rddevch");
330         CIO_TRACE_EVENT (4, sch->dev.bus_id);
331
332         rdc_ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
333         if (!rdc_ccw)
334                 return -ENOMEM;
335         memset(rdc_ccw, 0, sizeof(struct ccw1));
336         rdc_ccw->cmd_code = CCW_CMD_RDC;
337         rdc_ccw->count = length;
338         rdc_ccw->flags = CCW_FLAG_SLI;
339         ret = set_normalized_cda (rdc_ccw, (*buffer));
340         if (ret != 0) {
341                 kfree(rdc_ccw);
342                 return ret;
343         }
344
345         spin_lock_irq(&sch->lock);
346         /* Save interrupt handler. */
347         handler = cdev->handler;
348         /* Temporarily install own handler. */
349         cdev->handler = ccw_device_wake_up;
350         if (cdev->private->state != DEV_STATE_ONLINE)
351                 ret = -ENODEV;
352         else if (((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
353                   !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
354                  cdev->private->flags.doverify)
355                 ret = -EBUSY;
356         else
357                 /* 0x00D9C4C3 == ebcdic "RDC" */
358                 ret = __ccw_device_retry_loop(cdev, rdc_ccw, 0x00D9C4C3);
359
360         /* Restore interrupt handler. */
361         cdev->handler = handler;
362         spin_unlock_irq(&sch->lock);
363
364         clear_normalized_cda (rdc_ccw);
365         kfree(rdc_ccw);
366
367         return ret;
368 }
369
370 /*
371  *  Read Configuration data
372  */
373 int
374 read_conf_data (struct ccw_device *cdev, void **buffer, int *length)
375 {
376         void (*handler)(struct ccw_device *, unsigned long, struct irb *);
377         struct subchannel *sch;
378         struct ciw *ciw;
379         char *rcd_buf;
380         int ret;
381         struct ccw1 *rcd_ccw;
382
383         if (!cdev)
384                 return -ENODEV;
385         if (!buffer || !length)
386                 return -EINVAL;
387         sch = to_subchannel(cdev->dev.parent);
388
389         CIO_TRACE_EVENT (4, "rdconf");
390         CIO_TRACE_EVENT (4, sch->dev.bus_id);
391
392         /*
393          * scan for RCD command in extended SenseID data
394          */
395         ciw = ccw_device_get_ciw(cdev, CIW_TYPE_RCD);
396         if (!ciw || ciw->cmd == 0)
397                 return -EOPNOTSUPP;
398
399         rcd_ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
400         if (!rcd_ccw)
401                 return -ENOMEM;
402         memset(rcd_ccw, 0, sizeof(struct ccw1));
403         rcd_buf = kmalloc(ciw->count, GFP_KERNEL | GFP_DMA);
404         if (!rcd_buf) {
405                 kfree(rcd_ccw);
406                 return -ENOMEM;
407         }
408         memset (rcd_buf, 0, ciw->count);
409         rcd_ccw->cmd_code = ciw->cmd;
410         rcd_ccw->cda = (__u32) __pa (rcd_buf);
411         rcd_ccw->count = ciw->count;
412         rcd_ccw->flags = CCW_FLAG_SLI;
413
414         spin_lock_irq(&sch->lock);
415         /* Save interrupt handler. */
416         handler = cdev->handler;
417         /* Temporarily install own handler. */
418         cdev->handler = ccw_device_wake_up;
419         if (cdev->private->state != DEV_STATE_ONLINE)
420                 ret = -ENODEV;
421         else if (((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
422                   !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
423                  cdev->private->flags.doverify)
424                 ret = -EBUSY;
425         else
426                 /* 0x00D9C3C4 == ebcdic "RCD" */
427                 ret = __ccw_device_retry_loop(cdev, rcd_ccw, 0x00D9C3C4);
428
429         /* Restore interrupt handler. */
430         cdev->handler = handler;
431         spin_unlock_irq(&sch->lock);
432
433         /*
434          * on success we update the user input parms
435          */
436         if (ret) {
437                 kfree (rcd_buf);
438                 *buffer = NULL;
439                 *length = 0;
440         } else {
441                 *length = ciw->count;
442                 *buffer = rcd_buf;
443         }
444         kfree(rcd_ccw);
445
446         return ret;
447 }
448
449 /*
450  * Try to break the lock on a boxed device.
451  */
452 int
453 ccw_device_stlck(struct ccw_device *cdev)
454 {
455         void *buf, *buf2;
456         unsigned long flags;
457         struct subchannel *sch;
458         int ret;
459
460         if (!cdev)
461                 return -ENODEV;
462
463         if (cdev->drv && !cdev->private->options.force)
464                 return -EINVAL;
465
466         sch = to_subchannel(cdev->dev.parent);
467         
468         CIO_TRACE_EVENT(2, "stl lock");
469         CIO_TRACE_EVENT(2, cdev->dev.bus_id);
470
471         buf = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL);
472         if (!buf)
473                 return -ENOMEM;
474         buf2 = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL);
475         if (!buf2) {
476                 kfree(buf);
477                 return -ENOMEM;
478         }
479         spin_lock_irqsave(&sch->lock, flags);
480         ret = cio_enable_subchannel(sch, 3);
481         if (ret)
482                 goto out_unlock;
483         /*
484          * Setup ccw. We chain an unconditional reserve and a release so we
485          * only break the lock.
486          */
487         cdev->private->iccws[0].cmd_code = CCW_CMD_STLCK;
488         cdev->private->iccws[0].cda = (__u32) __pa(buf);
489         cdev->private->iccws[0].count = 32;
490         cdev->private->iccws[0].flags = CCW_FLAG_CC;
491         cdev->private->iccws[1].cmd_code = CCW_CMD_RELEASE;
492         cdev->private->iccws[1].cda = (__u32) __pa(buf2);
493         cdev->private->iccws[1].count = 32;
494         cdev->private->iccws[1].flags = 0;
495         ret = cio_start(sch, cdev->private->iccws, 0);
496         if (ret) {
497                 cio_disable_subchannel(sch); //FIXME: return code?
498                 goto out_unlock;
499         }
500         cdev->private->irb.scsw.actl |= SCSW_ACTL_START_PEND;
501         spin_unlock_irqrestore(&sch->lock, flags);
502         wait_event(cdev->private->wait_q, cdev->private->irb.scsw.actl == 0);
503         spin_lock_irqsave(&sch->lock, flags);
504         cio_disable_subchannel(sch); //FIXME: return code?
505         if ((cdev->private->irb.scsw.dstat !=
506              (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
507             (cdev->private->irb.scsw.cstat != 0))
508                 ret = -EIO;
509         /* Clear irb. */
510         memset(&cdev->private->irb, 0, sizeof(struct irb));
511 out_unlock:
512         if (buf)
513                 kfree(buf);
514         if (buf2)
515                 kfree(buf2);
516         spin_unlock_irqrestore(&sch->lock, flags);
517         return ret;
518 }
519
520 // FIXME: these have to go:
521
522 int
523 _ccw_device_get_subchannel_number(struct ccw_device *cdev)
524 {
525         return cdev->private->irq;
526 }
527
528 int
529 _ccw_device_get_device_number(struct ccw_device *cdev)
530 {
531         return cdev->private->devno;
532 }
533
534
535 MODULE_LICENSE("GPL");
536 EXPORT_SYMBOL(ccw_device_set_options);
537 EXPORT_SYMBOL(ccw_device_clear);
538 EXPORT_SYMBOL(ccw_device_halt);
539 EXPORT_SYMBOL(ccw_device_resume);
540 EXPORT_SYMBOL(ccw_device_start_timeout);
541 EXPORT_SYMBOL(ccw_device_start);
542 EXPORT_SYMBOL(ccw_device_get_ciw);
543 EXPORT_SYMBOL(ccw_device_get_path_mask);
544 EXPORT_SYMBOL(read_conf_data);
545 EXPORT_SYMBOL(read_dev_chars);
546 EXPORT_SYMBOL(_ccw_device_get_subchannel_number);
547 EXPORT_SYMBOL(_ccw_device_get_device_number);