X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=fs%2Fblock_dev.c;h=fc7028b685f29ab94d5db4ef26dbb897573c6af5;hb=refs%2Fheads%2Fvserver;hp=6e50346fb1eeea70da97a38d2059249362c10a0f;hpb=76828883507a47dae78837ab5dec5a5b4513c667;p=linux-2.6.git diff --git a/fs/block_dev.c b/fs/block_dev.c index 6e50346fb..fc7028b68 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -5,25 +5,25 @@ * Copyright (C) 2001 Andrea Arcangeli SuSE */ -#include #include #include #include #include #include #include -#include #include #include #include #include #include #include +#include #include #include #include #include #include +#include "internal.h" struct bdev_inode { struct block_device bdev; @@ -86,16 +86,12 @@ EXPORT_SYMBOL(set_blocksize); int sb_set_blocksize(struct super_block *sb, int size) { - int bits = 9; /* 2^9 = 512 */ - if (set_blocksize(sb->s_bdev, size)) return 0; /* If we get here, we know size is power of two * and it's value is between 512 and PAGE_SIZE */ sb->s_blocksize = size; - for (size >>= 10; size; size >>= 1) - ++bits; - sb->s_blocksize_bits = bits; + sb->s_blocksize_bits = blksize_bits(size); return sb->s_blocksize; } @@ -135,9 +131,10 @@ blkdev_get_block(struct inode *inode, sector_t iblock, static int blkdev_get_blocks(struct inode *inode, sector_t iblock, - unsigned long max_blocks, struct buffer_head *bh, int create) + struct buffer_head *bh, int create) { sector_t end_block = max_block(I_BDEV(inode)); + unsigned long max_blocks = bh->b_size >> inode->i_blkbits; if ((iblock + max_blocks) > end_block) { max_blocks = end_block - iblock; @@ -171,6 +168,203 @@ blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, iov, offset, nr_segs, blkdev_get_blocks, NULL); } +#if 0 +static int blk_end_aio(struct bio *bio, unsigned int bytes_done, int error) +{ + struct kiocb *iocb = bio->bi_private; + atomic_t *bio_count = &iocb->ki_bio_count; + + if (bio_data_dir(bio) == READ) + bio_check_pages_dirty(bio); + else { + bio_release_pages(bio); + bio_put(bio); + } + + /* iocb->ki_nbytes stores error code from LLDD */ + if (error) + iocb->ki_nbytes = -EIO; + + if (atomic_dec_and_test(bio_count)) { + if ((long)iocb->ki_nbytes < 0) + aio_complete(iocb, iocb->ki_nbytes, 0); + else + aio_complete(iocb, iocb->ki_left, 0); + } + + return 0; +} + +#define VEC_SIZE 16 +struct pvec { + unsigned short nr; + unsigned short idx; + struct page *page[VEC_SIZE]; +}; + +#define PAGES_SPANNED(addr, len) \ + (DIV_ROUND_UP((addr) + (len), PAGE_SIZE) - (addr) / PAGE_SIZE); + +/* + * get page pointer for user addr, we internally cache struct page array for + * (addr, count) range in pvec to avoid frequent call to get_user_pages. If + * internal page list is exhausted, a batch count of up to VEC_SIZE is used + * to get next set of page struct. + */ +static struct page *blk_get_page(unsigned long addr, size_t count, int rw, + struct pvec *pvec) +{ + int ret, nr_pages; + if (pvec->idx == pvec->nr) { + nr_pages = PAGES_SPANNED(addr, count); + nr_pages = min(nr_pages, VEC_SIZE); + down_read(¤t->mm->mmap_sem); + ret = get_user_pages(current, current->mm, addr, nr_pages, + rw == READ, 0, pvec->page, NULL); + up_read(¤t->mm->mmap_sem); + if (ret < 0) + return ERR_PTR(ret); + pvec->nr = ret; + pvec->idx = 0; + } + return pvec->page[pvec->idx++]; +} + +/* return a page back to pvec array */ +static void blk_unget_page(struct page *page, struct pvec *pvec) +{ + pvec->page[--pvec->idx] = page; +} + +static ssize_t +blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, + loff_t pos, unsigned long nr_segs) +{ + struct inode *inode = iocb->ki_filp->f_mapping->host; + unsigned blkbits = blksize_bits(bdev_hardsect_size(I_BDEV(inode))); + unsigned blocksize_mask = (1 << blkbits) - 1; + unsigned long seg = 0; /* iov segment iterator */ + unsigned long nvec; /* number of bio vec needed */ + unsigned long cur_off; /* offset into current page */ + unsigned long cur_len; /* I/O len of current page, up to PAGE_SIZE */ + + unsigned long addr; /* user iovec address */ + size_t count; /* user iovec len */ + size_t nbytes = iocb->ki_nbytes = iocb->ki_left; /* total xfer size */ + loff_t size; /* size of block device */ + struct bio *bio; + atomic_t *bio_count = &iocb->ki_bio_count; + struct page *page; + struct pvec pvec; + + pvec.nr = 0; + pvec.idx = 0; + + if (pos & blocksize_mask) + return -EINVAL; + + size = i_size_read(inode); + if (pos + nbytes > size) { + nbytes = size - pos; + iocb->ki_left = nbytes; + } + + /* + * check first non-zero iov alignment, the remaining + * iov alignment is checked inside bio loop below. + */ + do { + addr = (unsigned long) iov[seg].iov_base; + count = min(iov[seg].iov_len, nbytes); + if (addr & blocksize_mask || count & blocksize_mask) + return -EINVAL; + } while (!count && ++seg < nr_segs); + atomic_set(bio_count, 1); + + while (nbytes) { + /* roughly estimate number of bio vec needed */ + nvec = (nbytes + PAGE_SIZE - 1) / PAGE_SIZE; + nvec = max(nvec, nr_segs - seg); + nvec = min(nvec, (unsigned long) BIO_MAX_PAGES); + + /* bio_alloc should not fail with GFP_KERNEL flag */ + bio = bio_alloc(GFP_KERNEL, nvec); + bio->bi_bdev = I_BDEV(inode); + bio->bi_end_io = blk_end_aio; + bio->bi_private = iocb; + bio->bi_sector = pos >> blkbits; +same_bio: + cur_off = addr & ~PAGE_MASK; + cur_len = PAGE_SIZE - cur_off; + if (count < cur_len) + cur_len = count; + + page = blk_get_page(addr, count, rw, &pvec); + if (unlikely(IS_ERR(page))) + goto backout; + + if (bio_add_page(bio, page, cur_len, cur_off)) { + pos += cur_len; + addr += cur_len; + count -= cur_len; + nbytes -= cur_len; + + if (count) + goto same_bio; + while (++seg < nr_segs) { + addr = (unsigned long) iov[seg].iov_base; + count = iov[seg].iov_len; + if (!count) + continue; + if (unlikely(addr & blocksize_mask || + count & blocksize_mask)) { + page = ERR_PTR(-EINVAL); + goto backout; + } + count = min(count, nbytes); + goto same_bio; + } + } else { + blk_unget_page(page, &pvec); + } + + /* bio is ready, submit it */ + if (rw == READ) + bio_set_pages_dirty(bio); + atomic_inc(bio_count); + submit_bio(rw, bio); + } + +completion: + iocb->ki_left -= nbytes; + nbytes = iocb->ki_left; + iocb->ki_pos += nbytes; + + blk_run_address_space(inode->i_mapping); + if (atomic_dec_and_test(bio_count)) + aio_complete(iocb, nbytes, 0); + + return -EIOCBQUEUED; + +backout: + /* + * back out nbytes count constructed so far for this bio, + * we will throw away current bio. + */ + nbytes += bio->bi_size; + bio_release_pages(bio); + bio_put(bio); + + /* + * if no bio was submmitted, return the error code. + * otherwise, proceed with pending I/O completion. + */ + if (atomic_read(bio_count) == 1) + return PTR_ERR(page); + goto completion; +} +#endif + static int blkdev_writepage(struct page *page, struct writeback_control *wbc) { return block_write_full_page(page, blkdev_get_block, wbc); @@ -193,7 +387,7 @@ static int blkdev_commit_write(struct file *file, struct page *page, unsigned fr /* * private llseek: - * for a block special file file->f_dentry->d_inode->i_size is zero + * for a block special file file->f_path.dentry->d_inode->i_size is zero * so we compute the size by hand (just as in block_read/write above) */ static loff_t block_llseek(struct file *file, loff_t offset, int origin) @@ -238,11 +432,11 @@ static int block_fsync(struct file *filp, struct dentry *dentry, int datasync) */ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock); -static kmem_cache_t * bdev_cachep; +static struct kmem_cache * bdev_cachep __read_mostly; static struct inode *bdev_alloc_inode(struct super_block *sb) { - struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, SLAB_KERNEL); + struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; @@ -256,7 +450,7 @@ static void bdev_destroy_inode(struct inode *inode) kmem_cache_free(bdev_cachep, bdi); } -static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) +static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct bdev_inode *ei = (struct bdev_inode *) foo; struct block_device *bdev = &ei->bdev; @@ -265,10 +459,13 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) SLAB_CTOR_CONSTRUCTOR) { memset(bdev, 0, sizeof(*bdev)); - sema_init(&bdev->bd_sem, 1); + mutex_init(&bdev->bd_mutex); sema_init(&bdev->bd_mount_sem, 1); INIT_LIST_HEAD(&bdev->bd_inodes); INIT_LIST_HEAD(&bdev->bd_list); +#ifdef CONFIG_SYSFS + INIT_LIST_HEAD(&bdev->bd_holder_list); +#endif inode_init_once(&ei->vfs_inode); } } @@ -300,10 +497,10 @@ static struct super_operations bdev_sops = { .clear_inode = bdev_clear_inode, }; -static struct super_block *bd_get_sb(struct file_system_type *fs_type, - int flags, const char *dev_name, void *data) +static int bd_get_sb(struct file_system_type *fs_type, + int flags, const char *dev_name, void *data, struct vfsmount *mnt) { - return get_sb_pseudo(fs_type, "bdev:", &bdev_sops, 0x62646576); + return get_sb_pseudo(fs_type, "bdev:", &bdev_sops, 0x62646576, mnt); } static struct file_system_type bd_type = { @@ -312,14 +509,15 @@ static struct file_system_type bd_type = { .kill_sb = kill_anon_super, }; -static struct vfsmount *bd_mnt; +static struct vfsmount *bd_mnt __read_mostly; struct super_block *blockdev_superblock; void __init bdev_cache_init(void) { int err; bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode), - 0, SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|SLAB_PANIC, + 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| + SLAB_MEM_SPREAD|SLAB_PANIC), init_once, NULL); err = register_filesystem(&bd_type); if (err) @@ -413,21 +611,31 @@ EXPORT_SYMBOL(bdput); static struct block_device *bd_acquire(struct inode *inode) { struct block_device *bdev; + spin_lock(&bdev_lock); bdev = inode->i_bdev; - if (bdev && igrab(bdev->bd_inode)) { + if (bdev) { + atomic_inc(&bdev->bd_inode->i_count); spin_unlock(&bdev_lock); return bdev; } spin_unlock(&bdev_lock); + bdev = bdget(inode->i_rdev); if (bdev) { spin_lock(&bdev_lock); - if (inode->i_bdev) - __bd_forget(inode); - inode->i_bdev = bdev; - inode->i_mapping = bdev->bd_inode->i_mapping; - list_add(&inode->i_devices, &bdev->bd_inodes); + if (!inode->i_bdev) { + /* + * We take an additional bd_inode->i_count for inode, + * and it's released in clear_inode() of inode. + * So, we can access it via ->i_mapping always + * without igrab(). + */ + atomic_inc(&bdev->bd_inode->i_count); + inode->i_bdev = bdev; + inode->i_mapping = bdev->bd_inode->i_mapping; + list_add(&inode->i_devices, &bdev->bd_inodes); + } spin_unlock(&bdev_lock); } return bdev; @@ -437,10 +645,18 @@ static struct block_device *bd_acquire(struct inode *inode) void bd_forget(struct inode *inode) { + struct block_device *bdev = NULL; + spin_lock(&bdev_lock); - if (inode->i_bdev) + if (inode->i_bdev) { + if (inode->i_sb != blockdev_superblock) + bdev = inode->i_bdev; __bd_forget(inode); + } spin_unlock(&bdev_lock); + + if (bdev) + iput(bdev->bd_inode); } int bd_claim(struct block_device *bdev, void *holder) @@ -492,6 +708,330 @@ void bd_release(struct block_device *bdev) EXPORT_SYMBOL(bd_release); +#ifdef CONFIG_SYSFS +/* + * Functions for bd_claim_by_kobject / bd_release_from_kobject + * + * If a kobject is passed to bd_claim_by_kobject() + * and the kobject has a parent directory, + * following symlinks are created: + * o from the kobject to the claimed bdev + * o from "holders" directory of the bdev to the parent of the kobject + * bd_release_from_kobject() removes these symlinks. + * + * Example: + * If /dev/dm-0 maps to /dev/sda, kobject corresponding to + * /sys/block/dm-0/slaves is passed to bd_claim_by_kobject(), then: + * /sys/block/dm-0/slaves/sda --> /sys/block/sda + * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0 + */ + +static struct kobject *bdev_get_kobj(struct block_device *bdev) +{ + if (bdev->bd_contains != bdev) + return kobject_get(&bdev->bd_part->kobj); + else + return kobject_get(&bdev->bd_disk->kobj); +} + +static struct kobject *bdev_get_holder(struct block_device *bdev) +{ + if (bdev->bd_contains != bdev) + return kobject_get(bdev->bd_part->holder_dir); + else + return kobject_get(bdev->bd_disk->holder_dir); +} + +static int add_symlink(struct kobject *from, struct kobject *to) +{ + if (!from || !to) + return 0; + return sysfs_create_link(from, to, kobject_name(to)); +} + +static void del_symlink(struct kobject *from, struct kobject *to) +{ + if (!from || !to) + return; + sysfs_remove_link(from, kobject_name(to)); +} + +/* + * 'struct bd_holder' contains pointers to kobjects symlinked by + * bd_claim_by_kobject. + * It's connected to bd_holder_list which is protected by bdev->bd_sem. + */ +struct bd_holder { + struct list_head list; /* chain of holders of the bdev */ + int count; /* references from the holder */ + struct kobject *sdir; /* holder object, e.g. "/block/dm-0/slaves" */ + struct kobject *hdev; /* e.g. "/block/dm-0" */ + struct kobject *hdir; /* e.g. "/block/sda/holders" */ + struct kobject *sdev; /* e.g. "/block/sda" */ +}; + +/* + * Get references of related kobjects at once. + * Returns 1 on success. 0 on failure. + * + * Should call bd_holder_release_dirs() after successful use. + */ +static int bd_holder_grab_dirs(struct block_device *bdev, + struct bd_holder *bo) +{ + if (!bdev || !bo) + return 0; + + bo->sdir = kobject_get(bo->sdir); + if (!bo->sdir) + return 0; + + bo->hdev = kobject_get(bo->sdir->parent); + if (!bo->hdev) + goto fail_put_sdir; + + bo->sdev = bdev_get_kobj(bdev); + if (!bo->sdev) + goto fail_put_hdev; + + bo->hdir = bdev_get_holder(bdev); + if (!bo->hdir) + goto fail_put_sdev; + + return 1; + +fail_put_sdev: + kobject_put(bo->sdev); +fail_put_hdev: + kobject_put(bo->hdev); +fail_put_sdir: + kobject_put(bo->sdir); + + return 0; +} + +/* Put references of related kobjects at once. */ +static void bd_holder_release_dirs(struct bd_holder *bo) +{ + kobject_put(bo->hdir); + kobject_put(bo->sdev); + kobject_put(bo->hdev); + kobject_put(bo->sdir); +} + +static struct bd_holder *alloc_bd_holder(struct kobject *kobj) +{ + struct bd_holder *bo; + + bo = kzalloc(sizeof(*bo), GFP_KERNEL); + if (!bo) + return NULL; + + bo->count = 1; + bo->sdir = kobj; + + return bo; +} + +static void free_bd_holder(struct bd_holder *bo) +{ + kfree(bo); +} + +/** + * find_bd_holder - find matching struct bd_holder from the block device + * + * @bdev: struct block device to be searched + * @bo: target struct bd_holder + * + * Returns matching entry with @bo in @bdev->bd_holder_list. + * If found, increment the reference count and return the pointer. + * If not found, returns NULL. + */ +static struct bd_holder *find_bd_holder(struct block_device *bdev, + struct bd_holder *bo) +{ + struct bd_holder *tmp; + + list_for_each_entry(tmp, &bdev->bd_holder_list, list) + if (tmp->sdir == bo->sdir) { + tmp->count++; + return tmp; + } + + return NULL; +} + +/** + * add_bd_holder - create sysfs symlinks for bd_claim() relationship + * + * @bdev: block device to be bd_claimed + * @bo: preallocated and initialized by alloc_bd_holder() + * + * Add @bo to @bdev->bd_holder_list, create symlinks. + * + * Returns 0 if symlinks are created. + * Returns -ve if something fails. + */ +static int add_bd_holder(struct block_device *bdev, struct bd_holder *bo) +{ + int ret; + + if (!bo) + return -EINVAL; + + if (!bd_holder_grab_dirs(bdev, bo)) + return -EBUSY; + + ret = add_symlink(bo->sdir, bo->sdev); + if (ret == 0) { + ret = add_symlink(bo->hdir, bo->hdev); + if (ret) + del_symlink(bo->sdir, bo->sdev); + } + if (ret == 0) + list_add_tail(&bo->list, &bdev->bd_holder_list); + return ret; +} + +/** + * del_bd_holder - delete sysfs symlinks for bd_claim() relationship + * + * @bdev: block device to be bd_claimed + * @kobj: holder's kobject + * + * If there is matching entry with @kobj in @bdev->bd_holder_list + * and no other bd_claim() from the same kobject, + * remove the struct bd_holder from the list, delete symlinks for it. + * + * Returns a pointer to the struct bd_holder when it's removed from the list + * and ready to be freed. + * Returns NULL if matching claim isn't found or there is other bd_claim() + * by the same kobject. + */ +static struct bd_holder *del_bd_holder(struct block_device *bdev, + struct kobject *kobj) +{ + struct bd_holder *bo; + + list_for_each_entry(bo, &bdev->bd_holder_list, list) { + if (bo->sdir == kobj) { + bo->count--; + BUG_ON(bo->count < 0); + if (!bo->count) { + list_del(&bo->list); + del_symlink(bo->sdir, bo->sdev); + del_symlink(bo->hdir, bo->hdev); + bd_holder_release_dirs(bo); + return bo; + } + break; + } + } + + return NULL; +} + +/** + * bd_claim_by_kobject - bd_claim() with additional kobject signature + * + * @bdev: block device to be claimed + * @holder: holder's signature + * @kobj: holder's kobject + * + * Do bd_claim() and if it succeeds, create sysfs symlinks between + * the bdev and the holder's kobject. + * Use bd_release_from_kobject() when relesing the claimed bdev. + * + * Returns 0 on success. (same as bd_claim()) + * Returns errno on failure. + */ +static int bd_claim_by_kobject(struct block_device *bdev, void *holder, + struct kobject *kobj) +{ + int res; + struct bd_holder *bo, *found; + + if (!kobj) + return -EINVAL; + + bo = alloc_bd_holder(kobj); + if (!bo) + return -ENOMEM; + + mutex_lock(&bdev->bd_mutex); + res = bd_claim(bdev, holder); + if (res == 0) { + found = find_bd_holder(bdev, bo); + if (found == NULL) { + res = add_bd_holder(bdev, bo); + if (res) + bd_release(bdev); + } + } + + if (res || found) + free_bd_holder(bo); + mutex_unlock(&bdev->bd_mutex); + + return res; +} + +/** + * bd_release_from_kobject - bd_release() with additional kobject signature + * + * @bdev: block device to be released + * @kobj: holder's kobject + * + * Do bd_release() and remove sysfs symlinks created by bd_claim_by_kobject(). + */ +static void bd_release_from_kobject(struct block_device *bdev, + struct kobject *kobj) +{ + struct bd_holder *bo; + + if (!kobj) + return; + + mutex_lock(&bdev->bd_mutex); + bd_release(bdev); + if ((bo = del_bd_holder(bdev, kobj))) + free_bd_holder(bo); + mutex_unlock(&bdev->bd_mutex); +} + +/** + * bd_claim_by_disk - wrapper function for bd_claim_by_kobject() + * + * @bdev: block device to be claimed + * @holder: holder's signature + * @disk: holder's gendisk + * + * Call bd_claim_by_kobject() with getting @disk->slave_dir. + */ +int bd_claim_by_disk(struct block_device *bdev, void *holder, + struct gendisk *disk) +{ + return bd_claim_by_kobject(bdev, holder, kobject_get(disk->slave_dir)); +} +EXPORT_SYMBOL_GPL(bd_claim_by_disk); + +/** + * bd_release_from_disk - wrapper function for bd_release_from_kobject() + * + * @bdev: block device to be claimed + * @disk: holder's gendisk + * + * Call bd_release_from_kobject() and put @disk->slave_dir. + */ +void bd_release_from_disk(struct block_device *bdev, struct gendisk *disk) +{ + bd_release_from_kobject(bdev, disk->slave_dir); + kobject_put(disk->slave_dir); +} +EXPORT_SYMBOL_GPL(bd_release_from_disk); +#endif + /* * Tries to open block device by device number. Use it ONLY if you * really do not have anything better - i.e. when you are behind a @@ -557,7 +1097,11 @@ void bd_set_size(struct block_device *bdev, loff_t size) } EXPORT_SYMBOL(bd_set_size); -static int do_open(struct block_device *bdev, struct file *file) +static int __blkdev_get(struct block_device *bdev, mode_t mode, unsigned flags, + int for_part); +static int __blkdev_put(struct block_device *bdev, int for_part); + +static int do_open(struct block_device *bdev, struct file *file, int for_part) { struct module *owner = NULL; struct gendisk *disk; @@ -574,7 +1118,7 @@ static int do_open(struct block_device *bdev, struct file *file) } owner = disk->fops->owner; - down(&bdev->bd_sem); + mutex_lock_nested(&bdev->bd_mutex, for_part); if (!bdev->bd_openers) { bdev->bd_disk = disk; bdev->bd_contains = bdev; @@ -601,25 +1145,21 @@ static int do_open(struct block_device *bdev, struct file *file) ret = -ENOMEM; if (!whole) goto out_first; - ret = blkdev_get(whole, file->f_mode, file->f_flags); + BUG_ON(for_part); + ret = __blkdev_get(whole, file->f_mode, file->f_flags, 1); if (ret) goto out_first; bdev->bd_contains = whole; - down(&whole->bd_sem); - whole->bd_part_count++; p = disk->part[part - 1]; bdev->bd_inode->i_data.backing_dev_info = whole->bd_inode->i_data.backing_dev_info; if (!(disk->flags & GENHD_FL_UP) || !p || !p->nr_sects) { - whole->bd_part_count--; - up(&whole->bd_sem); ret = -ENXIO; goto out_first; } kobject_get(&p->kobj); bdev->bd_part = p; bd_set_size(bdev, (loff_t) p->nr_sects << 9); - up(&whole->bd_sem); } } else { put_disk(disk); @@ -632,14 +1172,12 @@ static int do_open(struct block_device *bdev, struct file *file) } if (bdev->bd_invalidated) rescan_partitions(bdev->bd_disk, bdev); - } else { - down(&bdev->bd_contains->bd_sem); - bdev->bd_contains->bd_part_count++; - up(&bdev->bd_contains->bd_sem); } } bdev->bd_openers++; - up(&bdev->bd_sem); + if (for_part) + bdev->bd_part_count++; + mutex_unlock(&bdev->bd_mutex); unlock_kernel(); return 0; @@ -647,19 +1185,20 @@ out_first: bdev->bd_disk = NULL; bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info; if (bdev != bdev->bd_contains) - blkdev_put(bdev->bd_contains); + __blkdev_put(bdev->bd_contains, 1); bdev->bd_contains = NULL; put_disk(disk); module_put(owner); out: - up(&bdev->bd_sem); + mutex_unlock(&bdev->bd_mutex); unlock_kernel(); if (ret) bdput(bdev); return ret; } -int blkdev_get(struct block_device *bdev, mode_t mode, unsigned flags) +static int __blkdev_get(struct block_device *bdev, mode_t mode, unsigned flags, + int for_part) { /* * This crockload is due to bad choice of ->open() type. @@ -671,12 +1210,16 @@ int blkdev_get(struct block_device *bdev, mode_t mode, unsigned flags) struct dentry fake_dentry = {}; fake_file.f_mode = mode; fake_file.f_flags = flags; - fake_file.f_dentry = &fake_dentry; + fake_file.f_path.dentry = &fake_dentry; fake_dentry.d_inode = bdev->bd_inode; - return do_open(bdev, &fake_file); + return do_open(bdev, &fake_file, for_part); } +int blkdev_get(struct block_device *bdev, mode_t mode, unsigned flags) +{ + return __blkdev_get(bdev, mode, flags, 0); +} EXPORT_SYMBOL(blkdev_get); static int blkdev_open(struct inode * inode, struct file * filp) @@ -693,8 +1236,10 @@ static int blkdev_open(struct inode * inode, struct file * filp) filp->f_flags |= O_LARGEFILE; bdev = bd_acquire(inode); + if (bdev == NULL) + return -ENOMEM; - res = do_open(bdev, filp); + res = do_open(bdev, filp, 0); if (res) return res; @@ -708,14 +1253,18 @@ static int blkdev_open(struct inode * inode, struct file * filp) return res; } -int blkdev_put(struct block_device *bdev) +static int __blkdev_put(struct block_device *bdev, int for_part) { int ret = 0; struct inode *bd_inode = bdev->bd_inode; struct gendisk *disk = bdev->bd_disk; + struct block_device *victim = NULL; - down(&bdev->bd_sem); + mutex_lock_nested(&bdev->bd_mutex, for_part); lock_kernel(); + if (for_part) + bdev->bd_part_count--; + if (!--bdev->bd_openers) { sync_blockdev(bdev); kill_bdev(bdev); @@ -723,10 +1272,6 @@ int blkdev_put(struct block_device *bdev) if (bdev->bd_contains == bdev) { if (disk->fops->release) ret = disk->fops->release(bd_inode, NULL); - } else { - down(&bdev->bd_contains->bd_sem); - bdev->bd_contains->bd_part_count--; - up(&bdev->bd_contains->bd_sem); } if (!bdev->bd_openers) { struct module *owner = disk->fops->owner; @@ -740,17 +1285,22 @@ int blkdev_put(struct block_device *bdev) } bdev->bd_disk = NULL; bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info; - if (bdev != bdev->bd_contains) { - blkdev_put(bdev->bd_contains); - } + if (bdev != bdev->bd_contains) + victim = bdev->bd_contains; bdev->bd_contains = NULL; } unlock_kernel(); - up(&bdev->bd_sem); + mutex_unlock(&bdev->bd_mutex); bdput(bdev); + if (victim) + __blkdev_put(victim, 1); return ret; } +int blkdev_put(struct block_device *bdev) +{ + return __blkdev_put(bdev, 0); +} EXPORT_SYMBOL(blkdev_put); static int blkdev_close(struct inode * inode, struct file * filp) @@ -761,28 +1311,12 @@ static int blkdev_close(struct inode * inode, struct file * filp) return blkdev_put(bdev); } -static ssize_t blkdev_file_write(struct file *file, const char __user *buf, - size_t count, loff_t *ppos) -{ - struct iovec local_iov = { .iov_base = (void __user *)buf, .iov_len = count }; - - return generic_file_write_nolock(file, &local_iov, 1, ppos); -} - -static ssize_t blkdev_file_aio_write(struct kiocb *iocb, const char __user *buf, - size_t count, loff_t pos) -{ - struct iovec local_iov = { .iov_base = (void __user *)buf, .iov_len = count }; - - return generic_file_aio_write_nolock(iocb, &local_iov, 1, &iocb->ki_pos); -} - static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg) { return blkdev_ioctl(file->f_mapping->host, file, cmd, arg); } -struct address_space_operations def_blk_aops = { +const struct address_space_operations def_blk_aops = { .readpage = blkdev_readpage, .writepage = blkdev_writepage, .sync_page = block_sync_page, @@ -792,23 +1326,23 @@ struct address_space_operations def_blk_aops = { .direct_IO = blkdev_direct_IO, }; -struct file_operations def_blk_fops = { +const struct file_operations def_blk_fops = { .open = blkdev_open, .release = blkdev_close, .llseek = block_llseek, - .read = generic_file_read, - .write = blkdev_file_write, + .read = do_sync_read, + .write = do_sync_write, .aio_read = generic_file_aio_read, - .aio_write = blkdev_file_aio_write, + .aio_write = generic_file_aio_write_nolock, .mmap = generic_file_mmap, .fsync = block_fsync, .unlocked_ioctl = block_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = compat_blkdev_ioctl, #endif - .readv = generic_file_readv, - .writev = generic_file_write_nolock, .sendfile = generic_file_sendfile, + .splice_read = generic_file_splice_read, + .splice_write = generic_file_splice_write, }; int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg) @@ -920,3 +1454,24 @@ void close_bdev_excl(struct block_device *bdev) } EXPORT_SYMBOL(close_bdev_excl); + +int __invalidate_device(struct block_device *bdev) +{ + struct super_block *sb = get_super(bdev); + int res = 0; + + if (sb) { + /* + * no need to lock the super, get_super holds the + * read mutex so the filesystem cannot go away + * under us (->put_super runs with the write lock + * hold). + */ + shrink_dcache_sb(sb); + res = invalidate_inodes(sb); + drop_super(sb); + } + invalidate_bdev(bdev, 0); + return res; +} +EXPORT_SYMBOL(__invalidate_device);