patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / drivers / s390 / cio / css.c
1 /*
2  *  drivers/s390/cio/css.c
3  *  driver for channel subsystem
4  *   $Revision: 1.77 $
5  *
6  *    Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
7  *                       IBM Corporation
8  *    Author(s): Arnd Bergmann (arndb@de.ibm.com)
9  *               Cornelia Huck (cohuck@de.ibm.com)
10  */
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/device.h>
14 #include <linux/slab.h>
15 #include <linux/errno.h>
16 #include <linux/list.h>
17
18 #include "css.h"
19 #include "cio.h"
20 #include "cio_debug.h"
21 #include "ioasm.h"
22
23 unsigned int highest_subchannel;
24 int need_rescan = 0;
25 int css_init_done = 0;
26
27 struct device css_bus_device = {
28         .bus_id = "css0",
29 };
30
31 static struct subchannel *
32 css_alloc_subchannel(int irq)
33 {
34         struct subchannel *sch;
35         int ret;
36
37         sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
38         if (sch == NULL)
39                 return ERR_PTR(-ENOMEM);
40         ret = cio_validate_subchannel (sch, irq);
41         if (ret < 0) {
42                 kfree(sch);
43                 return ERR_PTR(ret);
44         }
45         if (irq > highest_subchannel)
46                 highest_subchannel = irq;
47
48         if (sch->st != SUBCHANNEL_TYPE_IO) {
49                 /* For now we ignore all non-io subchannels. */
50                 kfree(sch);
51                 return ERR_PTR(-EINVAL);
52         }
53
54         /* 
55          * Set intparm to subchannel address.
56          * This is fine even on 64bit since the subchannel is always located
57          * under 2G.
58          */
59         sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
60         ret = cio_modify(sch);
61         if (ret) {
62                 kfree(sch);
63                 return ERR_PTR(ret);
64         }
65         return sch;
66 }
67
68 static void
69 css_free_subchannel(struct subchannel *sch)
70 {
71         if (sch) {
72                 /* Reset intparm to zeroes. */
73                 sch->schib.pmcw.intparm = 0;
74                 cio_modify(sch);
75                 kfree(sch);
76         }
77         
78 }
79
80 static void
81 css_subchannel_release(struct device *dev)
82 {
83         struct subchannel *sch;
84
85         sch = to_subchannel(dev);
86         if (!cio_is_console(sch->irq))
87                 kfree(sch);
88 }
89
90 extern int css_get_ssd_info(struct subchannel *sch);
91
92 static int
93 css_register_subchannel(struct subchannel *sch)
94 {
95         int ret;
96
97         /* Initialize the subchannel structure */
98         sch->dev.parent = &css_bus_device;
99         sch->dev.bus = &css_bus_type;
100         sch->dev.release = &css_subchannel_release;
101         
102         /* make it known to the system */
103         ret = device_register(&sch->dev);
104         if (ret)
105                 printk (KERN_WARNING "%s: could not register %s\n",
106                         __func__, sch->dev.bus_id);
107         else
108                 css_get_ssd_info(sch);
109         return ret;
110 }
111
112 int
113 css_probe_device(int irq)
114 {
115         int ret;
116         struct subchannel *sch;
117
118         sch = css_alloc_subchannel(irq);
119         if (IS_ERR(sch))
120                 return PTR_ERR(sch);
121         ret = css_register_subchannel(sch);
122         if (ret)
123                 css_free_subchannel(sch);
124         return ret;
125 }
126
127 struct subchannel *
128 get_subchannel_by_schid(int irq)
129 {
130         struct subchannel *sch;
131         struct list_head *entry;
132         struct device *dev;
133
134         if (!get_bus(&css_bus_type))
135                 return NULL;
136         down_read(&css_bus_type.subsys.rwsem);
137         sch = NULL;
138         list_for_each(entry, &css_bus_type.devices.list) {
139                 dev = get_device(container_of(entry,
140                                               struct device, bus_list));
141                 if (!dev)
142                         continue;
143                 sch = to_subchannel(dev);
144                 if (sch->irq == irq)
145                         break;
146                 put_device(dev);
147                 sch = NULL;
148         }
149         up_read(&css_bus_type.subsys.rwsem);
150         put_bus(&css_bus_type);
151
152         return sch;
153 }
154
155 static inline int
156 css_get_subchannel_status(struct subchannel *sch, int schid)
157 {
158         struct schib schib;
159         int cc;
160
161         cc = stsch(schid, &schib);
162         if (cc)
163                 return CIO_GONE;
164         if (!schib.pmcw.dnv)
165                 return CIO_GONE;
166         if (sch && sch->schib.pmcw.dnv &&
167             (schib.pmcw.dev != sch->schib.pmcw.dev))
168                 return CIO_REVALIDATE;
169         if (sch && !sch->lpm)
170                 return CIO_NO_PATH;
171         return CIO_OPER;
172 }
173         
174 static int
175 css_evaluate_subchannel(int irq, int slow)
176 {
177         int event, ret, disc;
178         struct subchannel *sch;
179
180         sch = get_subchannel_by_schid(irq);
181         disc = sch ? device_is_disconnected(sch) : 0;
182         if (disc && slow) {
183                 if (sch)
184                         put_device(&sch->dev);
185                 return 0; /* Already processed. */
186         }
187         if (!disc && !slow) {
188                 if (sch)
189                         put_device(&sch->dev);
190                 return -EAGAIN; /* Will be done on the slow path. */
191         }
192         event = css_get_subchannel_status(sch, irq);
193         CIO_MSG_EVENT(4, "Evaluating schid %04x, event %d, %s, %s path.\n",
194                       irq, event, sch?(disc?"disconnected":"normal"):"unknown",
195                       slow?"slow":"fast");
196         switch (event) {
197         case CIO_NO_PATH:
198         case CIO_GONE:
199                 if (!sch) {
200                         /* Never used this subchannel. Ignore. */
201                         ret = 0;
202                         break;
203                 }
204                 if (sch->driver && sch->driver->notify &&
205                     sch->driver->notify(&sch->dev, event)) {
206                         cio_disable_subchannel(sch);
207                         device_set_disconnected(sch);
208                         ret = 0;
209                         break;
210                 }
211                 /*
212                  * Unregister subchannel.
213                  * The device will be killed automatically.
214                  */
215                 cio_disable_subchannel(sch);
216                 device_unregister(&sch->dev);
217                 /* Reset intparm to zeroes. */
218                 sch->schib.pmcw.intparm = 0;
219                 cio_modify(sch);
220                 put_device(&sch->dev);
221                 ret = 0;
222                 break;
223         case CIO_REVALIDATE:
224                 /* 
225                  * Revalidation machine check. Sick.
226                  * We don't notify the driver since we have to throw the device
227                  * away in any case.
228                  */
229                 if (!disc) {
230                         device_unregister(&sch->dev);
231                         /* Reset intparm to zeroes. */
232                         sch->schib.pmcw.intparm = 0;
233                         cio_modify(sch);
234                         put_device(&sch->dev);
235                         ret = css_probe_device(irq);
236                 } else {
237                         /*
238                          * We can't immediately deregister the disconnected
239                          * device since it might block.
240                          */
241                         device_trigger_reprobe(sch);
242                         ret = 0;
243                 }
244                 break;
245         case CIO_OPER:
246                 if (disc)
247                         /* Get device operational again. */
248                         device_trigger_reprobe(sch);
249                 ret = sch ? 0 : css_probe_device(irq);
250                 break;
251         default:
252                 BUG();
253                 ret = 0;
254         }
255         return ret;
256 }
257
258 static void
259 css_rescan_devices(void)
260 {
261         int irq, ret;
262
263         for (irq = 0; irq <= __MAX_SUBCHANNELS; irq++) {
264                 ret = css_evaluate_subchannel(irq, 1);
265                 /* No more memory. It doesn't make sense to continue. No
266                  * panic because this can happen in midflight and just
267                  * because we can't use a new device is no reason to crash
268                  * the system. */
269                 if (ret == -ENOMEM)
270                         break;
271                 /* -ENXIO indicates that there are no more subchannels. */
272                 if (ret == -ENXIO)
273                         break;
274         }
275 }
276
277 struct slow_subchannel {
278         struct list_head slow_list;
279         unsigned long schid;
280 };
281
282 static LIST_HEAD(slow_subchannels_head);
283 static spinlock_t slow_subchannel_lock = SPIN_LOCK_UNLOCKED;
284
285 static void
286 css_trigger_slow_path(void)
287 {
288         CIO_TRACE_EVENT(4, "slowpath");
289
290         if (need_rescan) {
291                 need_rescan = 0;
292                 css_rescan_devices();
293                 return;
294         }
295
296         spin_lock_irq(&slow_subchannel_lock);
297         while (!list_empty(&slow_subchannels_head)) {
298                 struct slow_subchannel *slow_sch =
299                         list_entry(slow_subchannels_head.next,
300                                    struct slow_subchannel, slow_list);
301
302                 list_del_init(slow_subchannels_head.next);
303                 spin_unlock_irq(&slow_subchannel_lock);
304                 css_evaluate_subchannel(slow_sch->schid, 1);
305                 spin_lock_irq(&slow_subchannel_lock);
306                 kfree(slow_sch);
307         }
308         spin_unlock_irq(&slow_subchannel_lock);
309 }
310
311 typedef void (*workfunc)(void *);
312 DECLARE_WORK(slow_path_work, (workfunc)css_trigger_slow_path, NULL);
313 struct workqueue_struct *slow_path_wq;
314
315 /*
316  * Rescan for new devices. FIXME: This is slow.
317  * This function is called when we have lost CRWs due to overflows and we have
318  * to do subchannel housekeeping.
319  */
320 void
321 css_reiterate_subchannels(void)
322 {
323         css_clear_subchannel_slow_list();
324         need_rescan = 1;
325 }
326
327 /*
328  * Called from the machine check handler for subchannel report words.
329  */
330 int
331 css_process_crw(int irq)
332 {
333         int ret;
334
335         CIO_CRW_EVENT(2, "source is subchannel %04X\n", irq);
336
337         if (need_rescan)
338                 /* We need to iterate all subchannels anyway. */
339                 return -EAGAIN;
340         /* 
341          * Since we are always presented with IPI in the CRW, we have to
342          * use stsch() to find out if the subchannel in question has come
343          * or gone.
344          */
345         ret = css_evaluate_subchannel(irq, 0);
346         if (ret == -EAGAIN) {
347                 if (css_enqueue_subchannel_slow(irq)) {
348                         css_clear_subchannel_slow_list();
349                         need_rescan = 1;
350                 }
351         }
352         return ret;
353 }
354
355 /*
356  * some of the initialization has already been done from init_IRQ(),
357  * here we do the rest now that the driver core is running.
358  * The struct subchannel's are created during probing (except for the
359  * static console subchannel).
360  */
361 static int __init
362 init_channel_subsystem (void)
363 {
364         int ret, irq;
365
366         if ((ret = bus_register(&css_bus_type)))
367                 goto out;
368         if ((ret = device_register (&css_bus_device)))
369                 goto out_bus;
370
371         css_init_done = 1;
372
373         ctl_set_bit(6, 28);
374
375         for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
376                 struct subchannel *sch;
377
378                 if (cio_is_console(irq))
379                         sch = cio_get_console_subchannel();
380                 else {
381                         sch = css_alloc_subchannel(irq);
382                         if (IS_ERR(sch))
383                                 ret = PTR_ERR(sch);
384                         else
385                                 ret = 0;
386                         if (ret == -ENOMEM)
387                                 panic("Out of memory in "
388                                       "init_channel_subsystem\n");
389                         /* -ENXIO: no more subchannels. */
390                         if (ret == -ENXIO)
391                                 break;
392                         if (ret)
393                                 continue;
394                 }
395                 /*
396                  * We register ALL valid subchannels in ioinfo, even those
397                  * that have been present before init_channel_subsystem.
398                  * These subchannels can't have been registered yet (kmalloc
399                  * not working) so we do it now. This is true e.g. for the
400                  * console subchannel.
401                  */
402                 css_register_subchannel(sch);
403         }
404         return 0;
405
406 out_bus:
407         bus_unregister(&css_bus_type);
408 out:
409         return ret;
410 }
411
412 /*
413  * find a driver for a subchannel. They identify by the subchannel
414  * type with the exception that the console subchannel driver has its own
415  * subchannel type although the device is an i/o subchannel
416  */
417 static int
418 css_bus_match (struct device *dev, struct device_driver *drv)
419 {
420         struct subchannel *sch = container_of (dev, struct subchannel, dev);
421         struct css_driver *driver = container_of (drv, struct css_driver, drv);
422
423         if (sch->st == driver->subchannel_type)
424                 return 1;
425
426         return 0;
427 }
428
429 struct bus_type css_bus_type = {
430         .name  = "css",
431         .match = &css_bus_match,
432 };
433
434 subsys_initcall(init_channel_subsystem);
435
436 /*
437  * Register root devices for some drivers. The release function must not be
438  * in the device drivers, so we do it here.
439  */
440 static void
441 s390_root_dev_release(struct device *dev)
442 {
443         kfree(dev);
444 }
445
446 struct device *
447 s390_root_dev_register(const char *name)
448 {
449         struct device *dev;
450         int ret;
451
452         if (!strlen(name))
453                 return ERR_PTR(-EINVAL);
454         dev = kmalloc(sizeof(struct device), GFP_KERNEL);
455         if (!dev)
456                 return ERR_PTR(-ENOMEM);
457         memset(dev, 0, sizeof(struct device));
458         strncpy(dev->bus_id, name, min(strlen(name), (size_t)BUS_ID_SIZE));
459         dev->release = s390_root_dev_release;
460         ret = device_register(dev);
461         if (ret) {
462                 kfree(dev);
463                 return ERR_PTR(ret);
464         }
465         return dev;
466 }
467
468 void
469 s390_root_dev_unregister(struct device *dev)
470 {
471         if (dev)
472                 device_unregister(dev);
473 }
474
475 int
476 css_enqueue_subchannel_slow(unsigned long schid)
477 {
478         struct slow_subchannel *new_slow_sch;
479         unsigned long flags;
480
481         new_slow_sch = kmalloc(sizeof(struct slow_subchannel), GFP_ATOMIC);
482         if (!new_slow_sch)
483                 return -ENOMEM;
484         new_slow_sch->schid = schid;
485         spin_lock_irqsave(&slow_subchannel_lock, flags);
486         list_add_tail(&new_slow_sch->slow_list, &slow_subchannels_head);
487         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
488         return 0;
489 }
490
491 void
492 css_clear_subchannel_slow_list(void)
493 {
494         unsigned long flags;
495
496         spin_lock_irqsave(&slow_subchannel_lock, flags);
497         while (!list_empty(&slow_subchannels_head)) {
498                 struct slow_subchannel *slow_sch =
499                         list_entry(slow_subchannels_head.next,
500                                    struct slow_subchannel, slow_list);
501
502                 list_del_init(slow_subchannels_head.next);
503                 kfree(slow_sch);
504         }
505         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
506 }
507
508
509
510 int
511 css_slow_subchannels_exist(void)
512 {
513         return (!list_empty(&slow_subchannels_head));
514 }
515
516 MODULE_LICENSE("GPL");
517 EXPORT_SYMBOL(css_bus_type);
518 EXPORT_SYMBOL(s390_root_dev_register);
519 EXPORT_SYMBOL(s390_root_dev_unregister);