linux 2.6.16.38 w/ vs2.0.3-rc1
[linux-2.6.git] / block / ll_rw_blk.c
index 3ecdb34..1ce88cf 100644 (file)
@@ -10,6 +10,7 @@
 /*
  * This handles all read/write requests to block devices
  */
+#include <linux/config.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/backing-dev.h>
@@ -27,7 +28,6 @@
 #include <linux/writeback.h>
 #include <linux/interrupt.h>
 #include <linux/cpu.h>
-#include <linux/blktrace_api.h>
 
 /*
  * for max sense size
@@ -784,8 +784,6 @@ void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b)
        t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments);
        t->max_segment_size = min(t->max_segment_size,b->max_segment_size);
        t->hardsect_size = max(t->hardsect_size,b->hardsect_size);
-       if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
-               clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags);
 }
 
 EXPORT_SYMBOL(blk_queue_stack_limits);
@@ -907,15 +905,17 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth)
                                __FUNCTION__, depth);
        }
 
-       tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
+       tag_index = kmalloc(depth * sizeof(struct request *), GFP_ATOMIC);
        if (!tag_index)
                goto fail;
 
        nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
-       tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
+       tag_map = kmalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
        if (!tag_map)
                goto fail;
 
+       memset(tag_index, 0, depth * sizeof(struct request *));
+       memset(tag_map, 0, nr_ulongs * sizeof(unsigned long));
        tags->real_max_depth = depth;
        tags->max_depth = depth;
        tags->tag_index = tag_index;
@@ -1553,13 +1553,11 @@ void blk_plug_device(request_queue_t *q)
         * don't plug a stopped queue, it must be paired with blk_start_queue()
         * which will restart the queueing
         */
-       if (blk_queue_stopped(q))
+       if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
                return;
 
-       if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
+       if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
                mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
-               blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
-       }
 }
 
 EXPORT_SYMBOL(blk_plug_device);
@@ -1586,7 +1584,7 @@ EXPORT_SYMBOL(blk_remove_plug);
  */
 void __generic_unplug_device(request_queue_t *q)
 {
-       if (unlikely(blk_queue_stopped(q)))
+       if (unlikely(test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags)))
                return;
 
        if (!blk_remove_plug(q))
@@ -1623,21 +1621,14 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
        /*
         * devices don't necessarily have an ->unplug_fn defined
         */
-       if (q->unplug_fn) {
-               blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
-                                       q->rq.count[READ] + q->rq.count[WRITE]);
-
+       if (q->unplug_fn)
                q->unplug_fn(q);
-       }
 }
 
 static void blk_unplug_work(void *data)
 {
        request_queue_t *q = data;
 
-       blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
-                               q->rq.count[READ] + q->rq.count[WRITE]);
-
        q->unplug_fn(q);
 }
 
@@ -1645,9 +1636,6 @@ static void blk_unplug_timeout(unsigned long data)
 {
        request_queue_t *q = (request_queue_t *)data;
 
-       blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
-                               q->rq.count[READ] + q->rq.count[WRITE]);
-
        kblockd_schedule_work(&q->unplug_work);
 }
 
@@ -1662,8 +1650,6 @@ static void blk_unplug_timeout(unsigned long data)
  **/
 void blk_start_queue(request_queue_t *q)
 {
-       WARN_ON(!irqs_disabled());
-
        clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
 
        /*
@@ -1754,7 +1740,7 @@ EXPORT_SYMBOL(blk_run_queue);
 
 /**
  * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed
- * @kobj:    the kobj belonging of the request queue to be released
+ * @q:    the request queue to be released
  *
  * Description:
  *     blk_cleanup_queue is the pair to blk_init_queue() or
@@ -1767,11 +1753,16 @@ EXPORT_SYMBOL(blk_run_queue);
  *     Hopefully the low level driver will have finished any
  *     outstanding requests first...
  **/
-static void blk_release_queue(struct kobject *kobj)
+void blk_cleanup_queue(request_queue_t * q)
 {
-       request_queue_t *q = container_of(kobj, struct request_queue, kobj);
        struct request_list *rl = &q->rq;
 
+       if (!atomic_dec_and_test(&q->refcnt))
+               return;
+
+       if (q->elevator)
+               elevator_exit(q->elevator);
+
        blk_sync_queue(q);
 
        if (rl->rq_pool)
@@ -1780,30 +1771,9 @@ static void blk_release_queue(struct kobject *kobj)
        if (q->queue_tags)
                __blk_queue_free_tags(q);
 
-       if (q->blk_trace)
-               blk_trace_shutdown(q);
-
        kmem_cache_free(requestq_cachep, q);
 }
 
-void blk_put_queue(request_queue_t *q)
-{
-       kobject_put(&q->kobj);
-}
-EXPORT_SYMBOL(blk_put_queue);
-
-void blk_cleanup_queue(request_queue_t * q)
-{
-       mutex_lock(&q->sysfs_lock);
-       set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
-       mutex_unlock(&q->sysfs_lock);
-
-       if (q->elevator)
-               elevator_exit(q->elevator);
-
-       blk_put_queue(q);
-}
-
 EXPORT_SYMBOL(blk_cleanup_queue);
 
 static int blk_init_free_list(request_queue_t *q)
@@ -1831,8 +1801,6 @@ request_queue_t *blk_alloc_queue(gfp_t gfp_mask)
 }
 EXPORT_SYMBOL(blk_alloc_queue);
 
-static struct kobj_type queue_ktype;
-
 request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 {
        request_queue_t *q;
@@ -1843,16 +1811,11 @@ request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 
        memset(q, 0, sizeof(*q));
        init_timer(&q->unplug_timer);
-
-       snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue");
-       q->kobj.ktype = &queue_ktype;
-       kobject_init(&q->kobj);
+       atomic_set(&q->refcnt, 1);
 
        q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
        q->backing_dev_info.unplug_io_data = q;
 
-       mutex_init(&q->sysfs_lock);
-
        return q;
 }
 EXPORT_SYMBOL(blk_alloc_queue_node);
@@ -1879,8 +1842,7 @@ EXPORT_SYMBOL(blk_alloc_queue_node);
  *    get dealt with eventually.
  *
  *    The queue spin lock must be held while manipulating the requests on the
- *    request queue; this lock will be taken also from interrupt context, so irq
- *    disabling is needed for it.
+ *    request queue.
  *
  *    Function returns a pointer to the initialized request queue, or NULL if
  *    it didn't succeed.
@@ -1905,10 +1867,8 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
                return NULL;
 
        q->node = node_id;
-       if (blk_init_free_list(q)) {
-               kmem_cache_free(requestq_cachep, q);
-               return NULL;
-       }
+       if (blk_init_free_list(q))
+               goto out_init;
 
        /*
         * if caller didn't supply a lock, they get per-queue locking with
@@ -1944,7 +1904,9 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
                return q;
        }
 
-       blk_put_queue(q);
+       blk_cleanup_queue(q);
+out_init:
+       kmem_cache_free(requestq_cachep, q);
        return NULL;
 }
 EXPORT_SYMBOL(blk_init_queue_node);
@@ -1952,7 +1914,7 @@ EXPORT_SYMBOL(blk_init_queue_node);
 int blk_get_queue(request_queue_t *q)
 {
        if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
-               kobject_get(&q->kobj);
+               atomic_inc(&q->refcnt);
                return 0;
        }
 
@@ -2160,8 +2122,6 @@ rq_starved:
        
        rq_init(q, rq);
        rq->rl = rl;
-
-       blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
 out:
        return rq;
 }
@@ -2190,8 +2150,6 @@ static struct request *get_request_wait(request_queue_t *q, int rw,
                if (!rq) {
                        struct io_context *ioc;
 
-                       blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
-
                        __generic_unplug_device(q);
                        spin_unlock_irq(q->queue_lock);
                        io_schedule();
@@ -2245,8 +2203,6 @@ EXPORT_SYMBOL(blk_get_request);
  */
 void blk_requeue_request(request_queue_t *q, struct request *rq)
 {
-       blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
-
        if (blk_rq_tagged(rq))
                blk_queue_end_tag(q, rq);
 
@@ -2494,12 +2450,10 @@ void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk,
        rq->rq_disk = bd_disk;
        rq->flags |= REQ_NOMERGE;
        rq->end_io = done;
-       WARN_ON(irqs_disabled());
-       spin_lock_irq(q->queue_lock);
-       __elv_add_request(q, rq, where, 1);
-       __generic_unplug_device(q);
-       spin_unlock_irq(q->queue_lock);
+       elv_add_request(q, rq, where, 1);
+       generic_unplug_device(q);
 }
+
 EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
 
 /**
@@ -2516,7 +2470,7 @@ EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
 int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,
                   struct request *rq, int at_head)
 {
-       DECLARE_COMPLETION_ONSTACK(wait);
+       DECLARE_COMPLETION(wait);
        char sense[SCSI_SENSE_BUFFERSIZE];
        int err = 0;
 
@@ -2744,7 +2698,7 @@ static int attempt_merge(request_queue_t *q, struct request *req,
                return 0;
 
        /*
-        * not contiguous
+        * not contigious
         */
        if (req->sector + req->nr_sectors != next->sector)
                return 0;
@@ -2826,9 +2780,6 @@ static void init_request_from_bio(struct request *req, struct bio *bio)
        if (unlikely(bio_barrier(bio)))
                req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
 
-       if (bio_sync(bio))
-               req->flags |= REQ_RW_SYNC;
-
        req->errors = 0;
        req->hard_sector = req->sector = bio->bi_sector;
        req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio);
@@ -2886,8 +2837,6 @@ static int __make_request(request_queue_t *q, struct bio *bio)
                        if (!q->back_merge_fn(q, req, bio))
                                break;
 
-                       blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
-
                        req->biotail->bi_next = bio;
                        req->biotail = bio;
                        req->nr_sectors = req->hard_nr_sectors += nr_sectors;
@@ -2903,8 +2852,6 @@ static int __make_request(request_queue_t *q, struct bio *bio)
                        if (!q->front_merge_fn(q, req, bio))
                                break;
 
-                       blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
-
                        bio->bi_next = req->bio;
                        req->bio = bio;
 
@@ -3021,9 +2968,7 @@ void generic_make_request(struct bio *bio)
 {
        request_queue_t *q;
        sector_t maxsector;
-       sector_t old_sector;
        int ret, nr_sectors = bio_sectors(bio);
-       dev_t old_dev;
 
        might_sleep();
        /* Test device or partition size, when known. */
@@ -3050,8 +2995,6 @@ void generic_make_request(struct bio *bio)
         * NOTE: we don't repeat the blk_size check for each new device.
         * Stacking drivers are expected to know what they are doing.
         */
-       old_sector = -1;
-       old_dev = 0;
        do {
                char b[BDEVNAME_SIZE];
 
@@ -3084,30 +3027,6 @@ end_io:
                 */
                blk_partition_remap(bio);
 
-               if (old_sector != -1)
-                       blk_add_trace_remap(q, bio, old_dev, bio->bi_sector, 
-                                           old_sector);
-
-               blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
-
-               old_sector = bio->bi_sector;
-               old_dev = bio->bi_bdev->bd_dev;
-
-               maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
-               if (maxsector) {
-                       sector_t sector = bio->bi_sector;
-
-                       if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
-                               /*
-                                * This may well happen - partitions are not checked
-                                * to make sure they are within the size of the
-                                * whole device.
-                                */
-                               handle_bad_sector(bio);
-                               goto end_io;
-                       }
-               }
-
                ret = q->make_request_fn(q, bio);
        } while (ret);
 }
@@ -3132,9 +3051,9 @@ void submit_bio(int rw, struct bio *bio)
        BIO_BUG_ON(!bio->bi_io_vec);
        bio->bi_rw |= rw;
        if (rw & WRITE)
-               count_vm_events(PGPGOUT, count);
+               mod_page_state(pgpgout, count);
        else
-               count_vm_events(PGPGIN, count);
+               mod_page_state(pgpgin, count);
 
        if (unlikely(block_dump)) {
                char b[BDEVNAME_SIZE];
@@ -3227,8 +3146,6 @@ static int __end_that_request_first(struct request *req, int uptodate,
        int total_bytes, bio_nbytes, error, next_idx = 0;
        struct bio *bio;
 
-       blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
-
        /*
         * extend uptodate bool to allow < 0 value to be direct io error
         */
@@ -3380,11 +3297,12 @@ EXPORT_SYMBOL(end_that_request_chunk);
  */
 static void blk_done_softirq(struct softirq_action *h)
 {
-       struct list_head *cpu_list, local_list;
+       struct list_head *cpu_list;
+       LIST_HEAD(local_list);
 
        local_irq_disable();
        cpu_list = &__get_cpu_var(blk_cpu_done);
-       list_replace_init(cpu_list, &local_list);
+       list_splice_init(cpu_list, &local_list);
        local_irq_enable();
 
        while (!list_empty(&local_list)) {
@@ -3430,7 +3348,7 @@ static struct notifier_block __devinitdata blk_cpu_notifier = {
  *
  * Description:
  *     Ends all I/O on a request. It does not handle partial completions,
- *     unless the driver actually implements this in its completion callback
+ *     unless the driver actually implements this in its completionc callback
  *     through requeueing. Theh actual completion happens out-of-order,
  *     through a softirq handler. The user must have registered a completion
  *     callback through blk_queue_softirq_done().
@@ -3472,12 +3390,7 @@ void end_that_request_last(struct request *req, int uptodate)
        if (unlikely(laptop_mode) && blk_fs_request(req))
                laptop_io_completion();
 
-       /*
-        * Account IO completion.  bar_rq isn't accounted as a normal
-        * IO on queueing nor completion.  Accounting the containing
-        * request is enough.
-        */
-       if (disk && blk_fs_request(req) && req != &req->q->bar_rq) {
+       if (disk && blk_fs_request(req)) {
                unsigned long duration = jiffies - req->start_time;
                const int rw = rq_data_dir(req);
 
@@ -3507,8 +3420,8 @@ EXPORT_SYMBOL(end_request);
 
 void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio)
 {
-       /* first two bits are identical in rq->flags and bio->bi_rw */
-       rq->flags |= (bio->bi_rw & 3);
+       /* first three bits are identical in rq->flags and bio->bi_rw */
+       rq->flags |= (bio->bi_rw & 7);
 
        rq->nr_phys_segments = bio_phys_segments(q, bio);
        rq->nr_hw_segments = bio_hw_segments(q, bio);
@@ -3552,11 +3465,13 @@ int __init blk_dev_init(void)
        iocontext_cachep = kmem_cache_create("blkdev_ioc",
                        sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
 
-       for_each_possible_cpu(i)
+       for_each_cpu(i)
                INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
 
        open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
-       register_hotcpu_notifier(&blk_cpu_notifier);
+#ifdef CONFIG_HOTPLUG_CPU
+       register_cpu_notifier(&blk_cpu_notifier);
+#endif
 
        blk_max_low_pfn = max_low_pfn;
        blk_max_pfn = max_pfn;
@@ -3575,18 +3490,10 @@ void put_io_context(struct io_context *ioc)
        BUG_ON(atomic_read(&ioc->refcount) == 0);
 
        if (atomic_dec_and_test(&ioc->refcount)) {
-               struct cfq_io_context *cic;
-
-               rcu_read_lock();
                if (ioc->aic && ioc->aic->dtor)
                        ioc->aic->dtor(ioc->aic);
-               if (ioc->cic_root.rb_node != NULL) {
-                       struct rb_node *n = rb_first(&ioc->cic_root);
-
-                       cic = rb_entry(n, struct cfq_io_context, rb_node);
-                       cic->dtor(ioc);
-               }
-               rcu_read_unlock();
+               if (ioc->cic && ioc->cic->dtor)
+                       ioc->cic->dtor(ioc->cic);
 
                kmem_cache_free(iocontext_cachep, ioc);
        }
@@ -3598,7 +3505,6 @@ void exit_io_context(void)
 {
        unsigned long flags;
        struct io_context *ioc;
-       struct cfq_io_context *cic;
 
        local_irq_save(flags);
        task_lock(current);
@@ -3610,11 +3516,9 @@ void exit_io_context(void)
 
        if (ioc->aic && ioc->aic->exit)
                ioc->aic->exit(ioc->aic);
-       if (ioc->cic_root.rb_node != NULL) {
-               cic = rb_entry(rb_first(&ioc->cic_root), struct cfq_io_context, rb_node);
-               cic->exit(ioc);
-       }
+       if (ioc->cic && ioc->cic->exit)
+               ioc->cic->exit(ioc->cic);
+
        put_io_context(ioc);
 }
 
@@ -3643,9 +3547,7 @@ struct io_context *current_io_context(gfp_t gfp_flags)
                ret->last_waited = jiffies; /* doesn't matter... */
                ret->nr_batch_requests = 0; /* because this is 0 */
                ret->aic = NULL;
-               ret->cic_root.rb_node = NULL;
-               /* make sure set_task_ioprio() sees the settings above */
-               smp_wmb();
+               ret->cic = NULL;
                tsk->io_context = ret;
        }
 
@@ -3725,13 +3627,10 @@ static ssize_t
 queue_requests_store(struct request_queue *q, const char *page, size_t count)
 {
        struct request_list *rl = &q->rq;
-       unsigned long nr;
-       int ret = queue_var_store(&nr, page, count);
-       if (nr < BLKDEV_MIN_RQ)
-               nr = BLKDEV_MIN_RQ;
 
-       spin_lock_irq(q->queue_lock);
-       q->nr_requests = nr;
+       int ret = queue_var_store(&q->nr_requests, page, count);
+       if (q->nr_requests < BLKDEV_MIN_RQ)
+               q->nr_requests = BLKDEV_MIN_RQ;
        blk_queue_congestion_threshold(q);
 
        if (rl->count[READ] >= queue_congestion_on_threshold(q))
@@ -3757,7 +3656,6 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
                blk_clear_queue_full(q, WRITE);
                wake_up(&rl->wait[WRITE]);
        }
-       spin_unlock_irq(q->queue_lock);
        return ret;
 }
 
@@ -3873,19 +3771,13 @@ static ssize_t
 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
 {
        struct queue_sysfs_entry *entry = to_queue(attr);
-       request_queue_t *q = container_of(kobj, struct request_queue, kobj);
-       ssize_t res;
+       struct request_queue *q;
 
+       q = container_of(kobj, struct request_queue, kobj);
        if (!entry->show)
                return -EIO;
-       mutex_lock(&q->sysfs_lock);
-       if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
-               mutex_unlock(&q->sysfs_lock);
-               return -ENOENT;
-       }
-       res = entry->show(q, page);
-       mutex_unlock(&q->sysfs_lock);
-       return res;
+
+       return entry->show(q, page);
 }
 
 static ssize_t
@@ -3893,20 +3785,13 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
                    const char *page, size_t length)
 {
        struct queue_sysfs_entry *entry = to_queue(attr);
-       request_queue_t *q = container_of(kobj, struct request_queue, kobj);
-
-       ssize_t res;
+       struct request_queue *q;
 
+       q = container_of(kobj, struct request_queue, kobj);
        if (!entry->store)
                return -EIO;
-       mutex_lock(&q->sysfs_lock);
-       if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
-               mutex_unlock(&q->sysfs_lock);
-               return -ENOENT;
-       }
-       res = entry->store(q, page, length);
-       mutex_unlock(&q->sysfs_lock);
-       return res;
+
+       return entry->store(q, page, length);
 }
 
 static struct sysfs_ops queue_sysfs_ops = {
@@ -3917,7 +3802,6 @@ static struct sysfs_ops queue_sysfs_ops = {
 static struct kobj_type queue_ktype = {
        .sysfs_ops      = &queue_sysfs_ops,
        .default_attrs  = default_attrs,
-       .release        = blk_release_queue,
 };
 
 int blk_register_queue(struct gendisk *disk)
@@ -3930,17 +3814,19 @@ int blk_register_queue(struct gendisk *disk)
                return -ENXIO;
 
        q->kobj.parent = kobject_get(&disk->kobj);
+       if (!q->kobj.parent)
+               return -EBUSY;
 
-       ret = kobject_add(&q->kobj);
+       snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue");
+       q->kobj.ktype = &queue_ktype;
+
+       ret = kobject_register(&q->kobj);
        if (ret < 0)
                return ret;
 
-       kobject_uevent(&q->kobj, KOBJ_ADD);
-
        ret = elv_register_queue(q);
        if (ret) {
-               kobject_uevent(&q->kobj, KOBJ_REMOVE);
-               kobject_del(&q->kobj);
+               kobject_unregister(&q->kobj);
                return ret;
        }
 
@@ -3954,8 +3840,7 @@ void blk_unregister_queue(struct gendisk *disk)
        if (q && q->request_fn) {
                elv_unregister_queue(q);
 
-               kobject_uevent(&q->kobj, KOBJ_REMOVE);
-               kobject_del(&q->kobj);
+               kobject_unregister(&q->kobj);
                kobject_put(&disk->kobj);
        }
 }