X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=drivers%2Fs390%2Fcio%2Fcss.c;h=9d6c02446863cffa2eb1063962781248cae5f574;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=dba1bfb9aab2692f5159a77b282f11df6df00eed;hpb=9bf4aaab3e101692164d49b7ca357651eb691cb6;p=linux-2.6.git diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index dba1bfb9a..9d6c02446 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -1,12 +1,11 @@ /* * drivers/s390/cio/css.c * driver for channel subsystem - * $Revision: 1.80 $ * * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, * IBM Corporation * Author(s): Arnd Bergmann (arndb@de.ibm.com) - * Cornelia Huck (cohuck@de.ibm.com) + * Cornelia Huck (cornelia.huck@de.ibm.com) */ #include #include @@ -20,20 +19,38 @@ #include "cio_debug.h" #include "ioasm.h" #include "chsc.h" +#include "device.h" -unsigned int highest_subchannel; int need_rescan = 0; int css_init_done = 0; +static int need_reprobe = 0; +static int max_ssid = 0; + +struct channel_subsystem *css[__MAX_CSSID + 1]; -struct pgid global_pgid; int css_characteristics_avail = 0; -struct device css_bus_device = { - .bus_id = "css0", -}; +inline int +for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) +{ + struct subchannel_id schid; + int ret; + + init_subchannel_id(&schid); + ret = -ENODEV; + do { + do { + ret = fn(schid, data); + if (ret) + break; + } while (schid.sch_no++ < __MAX_SUBCHANNEL); + schid.sch_no = 0; + } while (schid.ssid++ < max_ssid); + return ret; +} static struct subchannel * -css_alloc_subchannel(int irq) +css_alloc_subchannel(struct subchannel_id schid) { struct subchannel *sch; int ret; @@ -41,13 +58,11 @@ css_alloc_subchannel(int irq) sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA); if (sch == NULL) return ERR_PTR(-ENOMEM); - ret = cio_validate_subchannel (sch, irq); + ret = cio_validate_subchannel (sch, schid); if (ret < 0) { kfree(sch); return ERR_PTR(ret); } - if (irq > highest_subchannel) - highest_subchannel = irq; if (sch->st != SUBCHANNEL_TYPE_IO) { /* For now we ignore all non-io subchannels. */ @@ -76,9 +91,9 @@ css_free_subchannel(struct subchannel *sch) /* Reset intparm to zeroes. */ sch->schib.pmcw.intparm = 0; cio_modify(sch); + kfree(sch->lock); kfree(sch); } - } static void @@ -87,39 +102,62 @@ css_subchannel_release(struct device *dev) struct subchannel *sch; sch = to_subchannel(dev); - if (!cio_is_console(sch->irq)) + if (!cio_is_console(sch->schid)) { + kfree(sch->lock); kfree(sch); + } } extern int css_get_ssd_info(struct subchannel *sch); + +int css_sch_device_register(struct subchannel *sch) +{ + int ret; + + mutex_lock(&sch->reg_mutex); + ret = device_register(&sch->dev); + mutex_unlock(&sch->reg_mutex); + return ret; +} + +void css_sch_device_unregister(struct subchannel *sch) +{ + mutex_lock(&sch->reg_mutex); + device_unregister(&sch->dev); + mutex_unlock(&sch->reg_mutex); +} + static int css_register_subchannel(struct subchannel *sch) { int ret; /* Initialize the subchannel structure */ - sch->dev.parent = &css_bus_device; + sch->dev.parent = &css[0]->device; sch->dev.bus = &css_bus_type; sch->dev.release = &css_subchannel_release; - + sch->dev.groups = subch_attr_groups; + + css_get_ssd_info(sch); + /* make it known to the system */ - ret = device_register(&sch->dev); - if (ret) + ret = css_sch_device_register(sch); + if (ret) { printk (KERN_WARNING "%s: could not register %s\n", __func__, sch->dev.bus_id); - else - css_get_ssd_info(sch); + return ret; + } return ret; } int -css_probe_device(int irq) +css_probe_device(struct subchannel_id schid) { int ret; struct subchannel *sch; - sch = css_alloc_subchannel(irq); + sch = css_alloc_subchannel(schid); if (IS_ERR(sch)) return PTR_ERR(sch); ret = css_register_subchannel(sch); @@ -128,186 +166,186 @@ css_probe_device(int irq) return ret; } -struct subchannel * -get_subchannel_by_schid(int irq) +static int +check_subchannel(struct device * dev, void * data) { struct subchannel *sch; - struct list_head *entry; + struct subchannel_id *schid = data; + + sch = to_subchannel(dev); + return schid_equal(&sch->schid, schid); +} + +struct subchannel * +get_subchannel_by_schid(struct subchannel_id schid) +{ struct device *dev; - if (!get_bus(&css_bus_type)) - return NULL; - down_read(&css_bus_type.subsys.rwsem); - sch = NULL; - list_for_each(entry, &css_bus_type.devices.list) { - dev = get_device(container_of(entry, - struct device, bus_list)); - if (!dev) - continue; - sch = to_subchannel(dev); - if (sch->irq == irq) - break; - put_device(dev); - sch = NULL; - } - up_read(&css_bus_type.subsys.rwsem); - put_bus(&css_bus_type); + dev = bus_find_device(&css_bus_type, NULL, + &schid, check_subchannel); - return sch; + return dev ? to_subchannel(dev) : NULL; } -static inline int -css_get_subchannel_status(struct subchannel *sch, int schid) +static inline int css_get_subchannel_status(struct subchannel *sch) { struct schib schib; - int cc; - cc = stsch(schid, &schib); - if (cc) - return CIO_GONE; - if (!schib.pmcw.dnv) + if (stsch(sch->schid, &schib) || !schib.pmcw.dnv) return CIO_GONE; - if (sch && sch->schib.pmcw.dnv && - (schib.pmcw.dev != sch->schib.pmcw.dev)) + if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev)) return CIO_REVALIDATE; - if (sch && !sch->lpm) + if (!sch->lpm) return CIO_NO_PATH; return CIO_OPER; } - -static int -css_evaluate_subchannel(int irq, int slow) + +static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) { int event, ret, disc; - struct subchannel *sch; + unsigned long flags; + enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action; - sch = get_subchannel_by_schid(irq); - disc = sch ? device_is_disconnected(sch) : 0; + spin_lock_irqsave(sch->lock, flags); + disc = device_is_disconnected(sch); if (disc && slow) { - if (sch) - put_device(&sch->dev); - return 0; /* Already processed. */ + /* Disconnected devices are evaluated directly only.*/ + spin_unlock_irqrestore(sch->lock, flags); + return 0; } + /* No interrupt after machine check - kill pending timers. */ + device_kill_pending_timer(sch); if (!disc && !slow) { - if (sch) - put_device(&sch->dev); - return -EAGAIN; /* Will be done on the slow path. */ + /* Non-disconnected devices are evaluated on the slow path. */ + spin_unlock_irqrestore(sch->lock, flags); + return -EAGAIN; } - event = css_get_subchannel_status(sch, irq); - CIO_MSG_EVENT(4, "Evaluating schid %04x, event %d, %s, %s path.\n", - irq, event, sch?(disc?"disconnected":"normal"):"unknown", - slow?"slow":"fast"); + event = css_get_subchannel_status(sch); + CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n", + sch->schid.ssid, sch->schid.sch_no, event, + disc ? "disconnected" : "normal", + slow ? "slow" : "fast"); + /* Analyze subchannel status. */ + action = NONE; switch (event) { case CIO_NO_PATH: - case CIO_GONE: - if (!sch) { - /* Never used this subchannel. Ignore. */ - ret = 0; - break; - } - if (disc && (event == CIO_NO_PATH)) { - /* - * Uargh, hack again. Because we don't get a machine - * check on configure on, our path bookkeeping can - * be out of date here (it's fine while we only do - * logical varying or get chsc machine checks). We - * need to force reprobing or we might miss devices - * coming operational again. It won't do harm in real - * no path situations. - */ - device_trigger_reprobe(sch); - ret = 0; + if (disc) { + /* Check if paths have become available. */ + action = REPROBE; break; } - if (sch->driver && sch->driver->notify && - sch->driver->notify(&sch->dev, event)) { - cio_disable_subchannel(sch); - device_set_disconnected(sch); - ret = 0; - break; - } - /* - * Unregister subchannel. - * The device will be killed automatically. - */ + /* fall through */ + case CIO_GONE: + /* Prevent unwanted effects when opening lock. */ cio_disable_subchannel(sch); - device_unregister(&sch->dev); - /* Reset intparm to zeroes. */ - sch->schib.pmcw.intparm = 0; - cio_modify(sch); - put_device(&sch->dev); - ret = 0; + device_set_disconnected(sch); + /* Ask driver what to do with device. */ + action = UNREGISTER; + if (sch->driver && sch->driver->notify) { + spin_unlock_irqrestore(sch->lock, flags); + ret = sch->driver->notify(&sch->dev, event); + spin_lock_irqsave(sch->lock, flags); + if (ret) + action = NONE; + } break; case CIO_REVALIDATE: - /* - * Revalidation machine check. Sick. - * We don't notify the driver since we have to throw the device - * away in any case. - */ - if (!disc) { - device_unregister(&sch->dev); - /* Reset intparm to zeroes. */ - sch->schib.pmcw.intparm = 0; - cio_modify(sch); - put_device(&sch->dev); - ret = css_probe_device(irq); - } else { - /* - * We can't immediately deregister the disconnected - * device since it might block. - */ - device_trigger_reprobe(sch); - ret = 0; - } + /* Device will be removed, so no notify necessary. */ + if (disc) + /* Reprobe because immediate unregister might block. */ + action = REPROBE; + else + action = UNREGISTER_PROBE; break; case CIO_OPER: if (disc) /* Get device operational again. */ - device_trigger_reprobe(sch); - ret = sch ? 0 : css_probe_device(irq); + action = REPROBE; + break; + } + /* Perform action. */ + ret = 0; + switch (action) { + case UNREGISTER: + case UNREGISTER_PROBE: + /* Unregister device (will use subchannel lock). */ + spin_unlock_irqrestore(sch->lock, flags); + css_sch_device_unregister(sch); + spin_lock_irqsave(sch->lock, flags); + + /* Reset intparm to zeroes. */ + sch->schib.pmcw.intparm = 0; + cio_modify(sch); + break; + case REPROBE: + device_trigger_reprobe(sch); break; default: - BUG(); - ret = 0; + break; } + spin_unlock_irqrestore(sch->lock, flags); + /* Probe if necessary. */ + if (action == UNREGISTER_PROBE) + ret = css_probe_device(sch->schid); + return ret; } -static void -css_rescan_devices(void) +static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) { - int irq, ret; + struct schib schib; - for (irq = 0; irq <= __MAX_SUBCHANNELS; irq++) { - ret = css_evaluate_subchannel(irq, 1); - /* No more memory. It doesn't make sense to continue. No - * panic because this can happen in midflight and just - * because we can't use a new device is no reason to crash - * the system. */ - if (ret == -ENOMEM) - break; - /* -ENXIO indicates that there are no more subchannels. */ - if (ret == -ENXIO) - break; + if (!slow) { + /* Will be done on the slow path. */ + return -EAGAIN; + } + if (stsch(schid, &schib) || !schib.pmcw.dnv) { + /* Unusable - ignore. */ + return 0; } + CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, " + "slow path.\n", schid.ssid, schid.sch_no, CIO_OPER); + + return css_probe_device(schid); +} + +static int css_evaluate_subchannel(struct subchannel_id schid, int slow) +{ + struct subchannel *sch; + int ret; + + sch = get_subchannel_by_schid(schid); + if (sch) { + ret = css_evaluate_known_subchannel(sch, slow); + put_device(&sch->dev); + } else + ret = css_evaluate_new_subchannel(schid, slow); + + return ret; +} + +static int +css_rescan_devices(struct subchannel_id schid, void *data) +{ + return css_evaluate_subchannel(schid, 1); } struct slow_subchannel { struct list_head slow_list; - unsigned long schid; + struct subchannel_id schid; }; static LIST_HEAD(slow_subchannels_head); -static spinlock_t slow_subchannel_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(slow_subchannel_lock); static void -css_trigger_slow_path(void) +css_trigger_slow_path(struct work_struct *unused) { CIO_TRACE_EVENT(4, "slowpath"); if (need_rescan) { need_rescan = 0; - css_rescan_devices(); + for_each_subchannel(css_rescan_devices, NULL); return; } @@ -326,10 +364,70 @@ css_trigger_slow_path(void) spin_unlock_irq(&slow_subchannel_lock); } -typedef void (*workfunc)(void *); -DECLARE_WORK(slow_path_work, (workfunc)css_trigger_slow_path, NULL); +DECLARE_WORK(slow_path_work, css_trigger_slow_path); struct workqueue_struct *slow_path_wq; +/* Reprobe subchannel if unregistered. */ +static int reprobe_subchannel(struct subchannel_id schid, void *data) +{ + struct subchannel *sch; + int ret; + + CIO_DEBUG(KERN_INFO, 6, "cio: reprobe 0.%x.%04x\n", + schid.ssid, schid.sch_no); + if (need_reprobe) + return -EAGAIN; + + sch = get_subchannel_by_schid(schid); + if (sch) { + /* Already known. */ + put_device(&sch->dev); + return 0; + } + + ret = css_probe_device(schid); + switch (ret) { + case 0: + break; + case -ENXIO: + case -ENOMEM: + /* These should abort looping */ + break; + default: + ret = 0; + } + + return ret; +} + +/* Work function used to reprobe all unregistered subchannels. */ +static void reprobe_all(struct work_struct *unused) +{ + int ret; + + CIO_MSG_EVENT(2, "reprobe start\n"); + + need_reprobe = 0; + /* Make sure initial subchannel scan is done. */ + wait_event(ccw_device_init_wq, + atomic_read(&ccw_device_init_count) == 0); + ret = for_each_subchannel(reprobe_subchannel, NULL); + + CIO_MSG_EVENT(2, "reprobe done (rc=%d, need_reprobe=%d)\n", ret, + need_reprobe); +} + +DECLARE_WORK(css_reprobe_work, reprobe_all); + +/* Schedule reprobing of all unregistered subchannels. */ +void css_schedule_reprobe(void) +{ + need_reprobe = 1; + queue_work(ccw_device_work, &css_reprobe_work); +} + +EXPORT_SYMBOL_GPL(css_schedule_reprobe); + /* * Rescan for new devices. FIXME: This is slow. * This function is called when we have lost CRWs due to overflows and we have @@ -346,23 +444,31 @@ css_reiterate_subchannels(void) * Called from the machine check handler for subchannel report words. */ int -css_process_crw(int irq) +css_process_crw(int rsid1, int rsid2) { int ret; + struct subchannel_id mchk_schid; - CIO_CRW_EVENT(2, "source is subchannel %04X\n", irq); + CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n", + rsid1, rsid2); if (need_rescan) /* We need to iterate all subchannels anyway. */ return -EAGAIN; + + init_subchannel_id(&mchk_schid); + mchk_schid.sch_no = rsid1; + if (rsid2 != 0) + mchk_schid.ssid = (rsid2 >> 8) & 3; + /* * Since we are always presented with IPI in the CRW, we have to * use stsch() to find out if the subchannel in question has come * or gone. */ - ret = css_evaluate_subchannel(irq, 0); + ret = css_evaluate_subchannel(mchk_schid, 0); if (ret == -EAGAIN) { - if (css_enqueue_subchannel_slow(irq)) { + if (css_enqueue_subchannel_slow(mchk_schid)) { css_clear_subchannel_slow_list(); need_rescan = 1; } @@ -370,22 +476,134 @@ css_process_crw(int irq) return ret; } -static void __init -css_generate_pgid(void) +static int __init +__init_channel_subsystem(struct subchannel_id schid, void *data) { - /* Let's build our path group ID here. */ - if (css_characteristics_avail && css_general_characteristics.mcss) - global_pgid.cpu_addr = 0x8000; + struct subchannel *sch; + int ret; + + if (cio_is_console(schid)) + sch = cio_get_console_subchannel(); else { + sch = css_alloc_subchannel(schid); + if (IS_ERR(sch)) + ret = PTR_ERR(sch); + else + ret = 0; + switch (ret) { + case 0: + break; + case -ENOMEM: + panic("Out of memory in init_channel_subsystem\n"); + /* -ENXIO: no more subchannels. */ + case -ENXIO: + return ret; + /* -EIO: this subchannel set not supported. */ + case -EIO: + return ret; + default: + return 0; + } + } + /* + * We register ALL valid subchannels in ioinfo, even those + * that have been present before init_channel_subsystem. + * These subchannels can't have been registered yet (kmalloc + * not working) so we do it now. This is true e.g. for the + * console subchannel. + */ + css_register_subchannel(sch); + return 0; +} + +static void __init +css_generate_pgid(struct channel_subsystem *css, u32 tod_high) +{ + if (css_characteristics_avail && css_general_characteristics.mcss) { + css->global_pgid.pgid_high.ext_cssid.version = 0x80; + css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; + } else { #ifdef CONFIG_SMP - global_pgid.cpu_addr = hard_smp_processor_id(); + css->global_pgid.pgid_high.cpu_addr = hard_smp_processor_id(); #else - global_pgid.cpu_addr = 0; + css->global_pgid.pgid_high.cpu_addr = 0; #endif } - global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident; - global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine; - global_pgid.tod_high = (__u32) (get_clock() >> 32); + css->global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident; + css->global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine; + css->global_pgid.tod_high = tod_high; + +} + +static void +channel_subsystem_release(struct device *dev) +{ + struct channel_subsystem *css; + + css = to_css(dev); + mutex_destroy(&css->mutex); + kfree(css); +} + +static ssize_t +css_cm_enable_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct channel_subsystem *css = to_css(dev); + + if (!css) + return 0; + return sprintf(buf, "%x\n", css->cm_enabled); +} + +static ssize_t +css_cm_enable_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct channel_subsystem *css = to_css(dev); + int ret; + + switch (buf[0]) { + case '0': + ret = css->cm_enabled ? chsc_secm(css, 0) : 0; + break; + case '1': + ret = css->cm_enabled ? 0 : chsc_secm(css, 1); + break; + default: + ret = -EINVAL; + } + return ret < 0 ? ret : count; +} + +static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store); + +static inline int __init setup_css(int nr) +{ + u32 tod_high; + int ret; + + memset(css[nr], 0, sizeof(struct channel_subsystem)); + css[nr]->pseudo_subchannel = + kzalloc(sizeof(*css[nr]->pseudo_subchannel), GFP_KERNEL); + if (!css[nr]->pseudo_subchannel) + return -ENOMEM; + css[nr]->pseudo_subchannel->dev.parent = &css[nr]->device; + css[nr]->pseudo_subchannel->dev.release = css_subchannel_release; + sprintf(css[nr]->pseudo_subchannel->dev.bus_id, "defunct"); + ret = cio_create_sch_lock(css[nr]->pseudo_subchannel); + if (ret) { + kfree(css[nr]->pseudo_subchannel); + return ret; + } + mutex_init(&css[nr]->mutex); + css[nr]->valid = 1; + css[nr]->cssid = nr; + sprintf(css[nr]->device.bus_id, "css%x", nr); + css[nr]->device.release = channel_subsystem_release; + tod_high = (u32) (get_clock() >> 32); + css_generate_pgid(css[nr], tod_high); + return 0; } /* @@ -396,59 +614,84 @@ css_generate_pgid(void) static int __init init_channel_subsystem (void) { - int ret, irq; + int ret, i; if (chsc_determine_css_characteristics() == 0) css_characteristics_avail = 1; - css_generate_pgid(); - if ((ret = bus_register(&css_bus_type))) goto out; - if ((ret = device_register (&css_bus_device))) - goto out_bus; + /* Try to enable MSS. */ + ret = chsc_enable_facility(CHSC_SDA_OC_MSS); + switch (ret) { + case 0: /* Success. */ + max_ssid = __MAX_SSID; + break; + case -ENOMEM: + goto out_bus; + default: + max_ssid = 0; + } + /* Setup css structure. */ + for (i = 0; i <= __MAX_CSSID; i++) { + css[i] = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL); + if (!css[i]) { + ret = -ENOMEM; + goto out_unregister; + } + ret = setup_css(i); + if (ret) + goto out_free; + ret = device_register(&css[i]->device); + if (ret) + goto out_free_all; + if (css_characteristics_avail && + css_chsc_characteristics.secm) { + ret = device_create_file(&css[i]->device, + &dev_attr_cm_enable); + if (ret) + goto out_device; + } + ret = device_register(&css[i]->pseudo_subchannel->dev); + if (ret) + goto out_file; + } css_init_done = 1; ctl_set_bit(6, 28); - for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { - struct subchannel *sch; - - if (cio_is_console(irq)) - sch = cio_get_console_subchannel(); - else { - sch = css_alloc_subchannel(irq); - if (IS_ERR(sch)) - ret = PTR_ERR(sch); - else - ret = 0; - if (ret == -ENOMEM) - panic("Out of memory in " - "init_channel_subsystem\n"); - /* -ENXIO: no more subchannels. */ - if (ret == -ENXIO) - break; - if (ret) - continue; - } - /* - * We register ALL valid subchannels in ioinfo, even those - * that have been present before init_channel_subsystem. - * These subchannels can't have been registered yet (kmalloc - * not working) so we do it now. This is true e.g. for the - * console subchannel. - */ - css_register_subchannel(sch); - } + for_each_subchannel(__init_channel_subsystem, NULL); return 0; - +out_file: + device_remove_file(&css[i]->device, &dev_attr_cm_enable); +out_device: + device_unregister(&css[i]->device); +out_free_all: + kfree(css[i]->pseudo_subchannel->lock); + kfree(css[i]->pseudo_subchannel); +out_free: + kfree(css[i]); +out_unregister: + while (i > 0) { + i--; + device_unregister(&css[i]->pseudo_subchannel->dev); + if (css_characteristics_avail && css_chsc_characteristics.secm) + device_remove_file(&css[i]->device, + &dev_attr_cm_enable); + device_unregister(&css[i]->device); + } out_bus: bus_unregister(&css_bus_type); out: return ret; } +int sch_is_pseudo_sch(struct subchannel *sch) +{ + return sch == to_css(sch->dev.parent)->pseudo_subchannel; +} + /* * find a driver for a subchannel. They identify by the subchannel * type with the exception that the console subchannel driver has its own @@ -466,59 +709,52 @@ css_bus_match (struct device *dev, struct device_driver *drv) return 0; } -struct bus_type css_bus_type = { - .name = "css", - .match = &css_bus_match, -}; - -subsys_initcall(init_channel_subsystem); - -/* - * Register root devices for some drivers. The release function must not be - * in the device drivers, so we do it here. - */ -static void -s390_root_dev_release(struct device *dev) +static int +css_probe (struct device *dev) { - kfree(dev); + struct subchannel *sch; + + sch = to_subchannel(dev); + sch->driver = container_of (dev->driver, struct css_driver, drv); + return (sch->driver->probe ? sch->driver->probe(sch) : 0); } -struct device * -s390_root_dev_register(const char *name) +static int +css_remove (struct device *dev) { - struct device *dev; - int ret; + struct subchannel *sch; - if (!strlen(name)) - return ERR_PTR(-EINVAL); - dev = kmalloc(sizeof(struct device), GFP_KERNEL); - if (!dev) - return ERR_PTR(-ENOMEM); - memset(dev, 0, sizeof(struct device)); - strncpy(dev->bus_id, name, min(strlen(name), (size_t)BUS_ID_SIZE)); - dev->release = s390_root_dev_release; - ret = device_register(dev); - if (ret) { - kfree(dev); - return ERR_PTR(ret); - } - return dev; + sch = to_subchannel(dev); + return (sch->driver->remove ? sch->driver->remove(sch) : 0); } -void -s390_root_dev_unregister(struct device *dev) +static void +css_shutdown (struct device *dev) { - if (dev) - device_unregister(dev); + struct subchannel *sch; + + sch = to_subchannel(dev); + if (sch->driver->shutdown) + sch->driver->shutdown(sch); } +struct bus_type css_bus_type = { + .name = "css", + .match = css_bus_match, + .probe = css_probe, + .remove = css_remove, + .shutdown = css_shutdown, +}; + +subsys_initcall(init_channel_subsystem); + int -css_enqueue_subchannel_slow(unsigned long schid) +css_enqueue_subchannel_slow(struct subchannel_id schid) { struct slow_subchannel *new_slow_sch; unsigned long flags; - new_slow_sch = kmalloc(sizeof(struct slow_subchannel), GFP_ATOMIC); + new_slow_sch = kzalloc(sizeof(struct slow_subchannel), GFP_ATOMIC); if (!new_slow_sch) return -ENOMEM; new_slow_sch->schid = schid; @@ -555,6 +791,4 @@ css_slow_subchannels_exist(void) MODULE_LICENSE("GPL"); EXPORT_SYMBOL(css_bus_type); -EXPORT_SYMBOL(s390_root_dev_register); -EXPORT_SYMBOL(s390_root_dev_unregister); EXPORT_SYMBOL_GPL(css_characteristics_avail);