}
EXPORT_SYMBOL(sync_blockdev);
-static void __fsync_super(struct super_block *sb)
+/*
+ * Write out and wait upon all dirty data associated with this
+ * superblock. Filesystem data as well as the underlying block
+ * device. Takes the superblock lock.
+ */
+int fsync_super(struct super_block *sb)
{
sync_inodes_sb(sb, 0);
DQUOT_SYNC(sb);
sb->s_op->sync_fs(sb, 1);
sync_blockdev(sb->s_bdev);
sync_inodes_sb(sb, 1);
-}
-/*
- * Write out and wait upon all dirty data associated with this
- * superblock. Filesystem data as well as the underlying block
- * device. Takes the superblock lock.
- */
-int fsync_super(struct super_block *sb)
-{
- __fsync_super(sb);
return sync_blockdev(sb->s_bdev);
}
* freeze_bdev -- lock a filesystem and force it into a consistent state
* @bdev: blockdevice to lock
*
- * This takes the block device bd_mount_mutex to make sure no new mounts
+ * This takes the block device bd_mount_sem to make sure no new mounts
* happen on bdev until thaw_bdev() is called.
* If a superblock is found on this device, we take the s_umount semaphore
* on it to make sure nobody unmounts until the snapshot creation is done.
{
struct super_block *sb;
- mutex_lock(&bdev->bd_mount_mutex);
+ down(&bdev->bd_mount_sem);
sb = get_super(bdev);
if (sb && !(sb->s_flags & MS_RDONLY)) {
sb->s_frozen = SB_FREEZE_WRITE;
smp_wmb();
- __fsync_super(sb);
+ sync_inodes_sb(sb, 0);
+ DQUOT_SYNC(sb);
+
+ lock_super(sb);
+ if (sb->s_dirt && sb->s_op->write_super)
+ sb->s_op->write_super(sb);
+ unlock_super(sb);
+
+ if (sb->s_op->sync_fs)
+ sb->s_op->sync_fs(sb, 1);
+
+ sync_blockdev(sb->s_bdev);
+ sync_inodes_sb(sb, 1);
sb->s_frozen = SB_FREEZE_TRANS;
smp_wmb();
drop_super(sb);
}
- mutex_unlock(&bdev->bd_mount_mutex);
+ up(&bdev->bd_mount_sem);
}
EXPORT_SYMBOL(thaw_bdev);
return ret;
}
-long do_fsync(struct file *file, int datasync)
+static long do_fsync(unsigned int fd, int datasync)
{
- int ret;
- int err;
- struct address_space *mapping = file->f_mapping;
+ struct file * file;
+ struct address_space *mapping;
+ int ret, err;
+
+ ret = -EBADF;
+ file = fget(fd);
+ if (!file)
+ goto out;
+ ret = -EINVAL;
if (!file->f_op || !file->f_op->fsync) {
/* Why? We can still call filemap_fdatawrite */
- ret = -EINVAL;
- goto out;
+ goto out_putf;
}
+ mapping = file->f_mapping;
+
current->flags |= PF_SYNCWRITE;
ret = filemap_fdatawrite(mapping);
/*
- * We need to protect against concurrent writers, which could cause
- * livelocks in fsync_buffers_list().
+ * We need to protect against concurrent writers,
+ * which could cause livelocks in fsync_buffers_list
*/
mutex_lock(&mapping->host->i_mutex);
err = file->f_op->fsync(file, file->f_dentry, datasync);
if (!ret)
ret = err;
current->flags &= ~PF_SYNCWRITE;
-out:
- return ret;
-}
-
-static long __do_fsync(unsigned int fd, int datasync)
-{
- struct file *file;
- int ret = -EBADF;
- file = fget(fd);
- if (file) {
- ret = do_fsync(file, datasync);
- fput(file);
- }
+out_putf:
+ fput(file);
+out:
return ret;
}
asmlinkage long sys_fsync(unsigned int fd)
{
- return __do_fsync(fd, 0);
+ return do_fsync(fd, 0);
}
asmlinkage long sys_fdatasync(unsigned int fd)
{
- return __do_fsync(fd, 1);
+ return do_fsync(fd, 1);
}
/*
if (all_mapped) {
printk("__find_get_block_slow() failed. "
"block=%llu, b_blocknr=%llu\n",
- (unsigned long long)block,
- (unsigned long long)bh->b_blocknr);
- printk("b_state=0x%08lx, b_size=%zu\n",
- bh->b_state, bh->b_size);
+ (unsigned long long)block, (unsigned long long)bh->b_blocknr);
+ printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size);
printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
}
out_unlock:
wakeup_pdflush(1024);
yield();
- for_each_online_pgdat(pgdat) {
+ for_each_pgdat(pgdat) {
zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
if (*zones)
try_to_free_pages(zones, GFP_NOFS);
if (!mapping->assoc_mapping) {
mapping->assoc_mapping = buffer_mapping;
} else {
- BUG_ON(mapping->assoc_mapping != buffer_mapping);
+ if (mapping->assoc_mapping != buffer_mapping)
+ BUG();
}
if (list_empty(&bh->b_assoc_buffers)) {
spin_lock(&buffer_mapping->private_lock);
}
write_unlock_irq(&mapping->tree_lock);
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
- return 1;
}
+
return 0;
}
EXPORT_SYMBOL(__set_page_dirty_buffers);
if (!page)
return NULL;
- BUG_ON(!PageLocked(page));
+ if (!PageLocked(page))
+ BUG();
if (page_has_buffers(page)) {
bh = page_buffers(page);
} while ((size << sizebits) < PAGE_SIZE);
index = block >> sizebits;
- block = index << sizebits;
+ /*
+ * Check for a block which wants to lie outside our maximum possible
+ * pagecache index. (this comparison is done using sector_t types).
+ */
+ if (unlikely(index != block >> sizebits)) {
+ char b[BDEVNAME_SIZE];
+
+ printk(KERN_ERR "%s: requested out-of-range block %llu for "
+ "device %s\n",
+ __FUNCTION__, (unsigned long long)block,
+ bdevname(bdev, b));
+ return -EIO;
+ }
+ block = index << sizebits;
/* Create a page with the proper size buffers.. */
page = grow_dev_page(bdev, block, index, size);
if (!page)
for (;;) {
struct buffer_head * bh;
+ int ret;
bh = __find_get_block(bdev, block, size);
if (bh)
return bh;
- if (!grow_buffers(bdev, block, size))
+ ret = grow_buffers(bdev, block, size);
+ if (ret < 0)
+ return NULL;
+ if (ret == 0)
free_more_memory();
}
}
struct page *page, unsigned long offset)
{
bh->b_page = page;
- BUG_ON(offset >= PAGE_SIZE);
+ if (offset >= PAGE_SIZE)
+ BUG();
if (PageHighMem(page))
/*
* This catches illegal uses and preserves the offset:
* point. Because the caller is about to free (and possibly reuse) those
* blocks on-disk.
*/
-void block_invalidatepage(struct page *page, unsigned long offset)
+int block_invalidatepage(struct page *page, unsigned long offset)
{
struct buffer_head *head, *bh, *next;
unsigned int curr_off = 0;
+ int ret = 1;
BUG_ON(!PageLocked(page));
if (!page_has_buffers(page))
* so real IO is not possible anymore.
*/
if (offset == 0)
- try_to_release_page(page, 0);
+ ret = try_to_release_page(page, 0);
out:
- return;
+ return ret;
}
EXPORT_SYMBOL(block_invalidatepage);
-void do_invalidatepage(struct page *page, unsigned long offset)
+int do_invalidatepage(struct page *page, unsigned long offset)
{
- void (*invalidatepage)(struct page *, unsigned long);
- invalidatepage = page->mapping->a_ops->invalidatepage ? :
- block_invalidatepage;
- (*invalidatepage)(page, offset);
+ int (*invalidatepage)(struct page *, unsigned long);
+ invalidatepage = page->mapping->a_ops->invalidatepage;
+ if (invalidatepage == NULL)
+ invalidatepage = block_invalidatepage;
+ return (*invalidatepage)(page, offset);
}
/*
sector_t block;
sector_t last_block;
struct buffer_head *bh, *head;
- const unsigned blocksize = 1 << inode->i_blkbits;
int nr_underway = 0;
BUG_ON(!PageLocked(page));
last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
if (!page_has_buffers(page)) {
- create_empty_buffers(page, blocksize,
+ create_empty_buffers(page, 1 << inode->i_blkbits,
(1 << BH_Dirty)|(1 << BH_Uptodate));
}
clear_buffer_dirty(bh);
set_buffer_uptodate(bh);
} else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
- WARN_ON(bh->b_size != blocksize);
err = get_block(inode, block, bh, 1);
if (err)
goto recover;
if (buffer_new(bh))
clear_buffer_new(bh);
if (!buffer_mapped(bh)) {
- WARN_ON(bh->b_size != blocksize);
err = get_block(inode, block, bh, 1);
if (err)
break;
fully_mapped = 0;
if (iblock < lblock) {
- WARN_ON(bh->b_size != blocksize);
err = get_block(inode, iblock, bh, 0);
if (err)
SetPageError(page);
create = 1;
if (block_start >= to)
create = 0;
- map_bh.b_size = blocksize;
ret = get_block(inode, block_in_file + block_in_page,
&map_bh, create);
if (ret)
err = 0;
if (!buffer_mapped(bh)) {
- WARN_ON(bh->b_size != blocksize);
err = get_block(inode, iblock, bh, 0);
if (err)
goto unlock;
struct inode *inode = mapping->host;
tmp.b_state = 0;
tmp.b_blocknr = 0;
- tmp.b_size = 1 << inode->i_blkbits;
get_block(inode, block, &tmp, 0);
return tmp.b_blocknr;
}
}
EXPORT_SYMBOL(try_to_free_buffers);
-void block_sync_page(struct page *page)
+int block_sync_page(struct page *page)
{
struct address_space *mapping;
mapping = page_mapping(page);
if (mapping)
blk_run_backing_dev(mapping->backing_dev_info, page);
+ return 0;
}
/*
return 0;
}
+/*
+ * Migration function for pages with buffers. This function can only be used
+ * if the underlying filesystem guarantees that no other references to "page"
+ * exist.
+ */
+#ifdef CONFIG_MIGRATION
+int buffer_migrate_page(struct page *newpage, struct page *page)
+{
+ struct address_space *mapping = page->mapping;
+ struct buffer_head *bh, *head;
+ int rc;
+
+ if (!mapping)
+ return -EAGAIN;
+
+ if (!page_has_buffers(page))
+ return migrate_page(newpage, page);
+
+ head = page_buffers(page);
+
+ rc = migrate_page_remove_references(newpage, page, 3);
+ if (rc)
+ return rc;
+
+ bh = head;
+ do {
+ get_bh(bh);
+ lock_buffer(bh);
+ bh = bh->b_this_page;
+
+ } while (bh != head);
+
+ ClearPagePrivate(page);
+ set_page_private(newpage, page_private(page));
+ set_page_private(page, 0);
+ put_page(page);
+ get_page(newpage);
+
+ bh = head;
+ do {
+ set_bh_page(bh, newpage, bh_offset(bh));
+ bh = bh->b_this_page;
+
+ } while (bh != head);
+
+ SetPagePrivate(newpage);
+
+ migrate_page_copy(newpage, page);
+
+ bh = head;
+ do {
+ unlock_buffer(bh);
+ put_bh(bh);
+ bh = bh->b_this_page;
+
+ } while (bh != head);
+
+ return 0;
+}
+EXPORT_SYMBOL(buffer_migrate_page);
+#endif
+
/*
* Buffer-head allocation
*/
if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
return;
__get_cpu_var(bh_accounting).ratelimit = 0;
- for_each_online_cpu(i)
+ for_each_cpu(i)
tot += per_cpu(bh_accounting, i).nr;
buffer_heads_over_limit = (tot > max_buffer_heads);
}
brelse(b->bhs[i]);
b->bhs[i] = NULL;
}
- get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
- per_cpu(bh_accounting, cpu).nr = 0;
- put_cpu_var(bh_accounting);
}
static int buffer_cpu_notify(struct notifier_block *self,
int nrpages;
bh_cachep = kmem_cache_create("buffer_head",
- sizeof(struct buffer_head), 0,
- (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
- SLAB_MEM_SPREAD),
- init_buffer_head,
- NULL);
+ sizeof(struct buffer_head), 0,
+ SLAB_RECLAIM_ACCOUNT|SLAB_PANIC, init_buffer_head, NULL);
/*
* Limit the bh occupancy to 10% of ZONE_NORMAL