#include "jfs_txnmgr.h"
#include "jfs_debug.h"
-static spinlock_t meta_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(meta_lock);
#ifdef CONFIG_JFS_STATISTICS
-struct {
+static struct {
uint pagealloc; /* # of page allocations */
uint pagefree; /* # of page frees */
uint lockwait; /* # of sleeping lock_metapage() calls */
}
}
-static inline struct metapage *alloc_metapage(int no_wait)
+static inline struct metapage *alloc_metapage(int gfp_mask)
{
- return mempool_alloc(metapage_mempool, no_wait ? GFP_ATOMIC : GFP_NOFS);
+ return mempool_alloc(metapage_mempool, gfp_mask);
}
static inline void free_metapage(struct metapage *mp)
if (absolute)
mapping = inode->i_sb->s_bdev->bd_inode->i_mapping;
- else
+ else {
+ /*
+ * If an nfs client tries to read an inode that is larger
+ * than any existing inodes, we may try to read past the
+ * end of the inode map
+ */
+ if ((lblock << inode->i_blkbits) >= inode->i_size)
+ return NULL;
mapping = inode->i_mapping;
+ }
- spin_lock(&meta_lock);
hash_ptr = meta_hash(mapping, lblock);
+again:
+ spin_lock(&meta_lock);
mp = search_hash(hash_ptr, mapping, lblock);
if (mp) {
page_found:
+ mp->count++;
+ lock_metapage(mp);
+ spin_unlock(&meta_lock);
+ if (test_bit(META_stale, &mp->flag)) {
+ release_metapage(mp);
+ yield(); /* Let other waiters release it, too */
+ goto again;
+ }
if (test_bit(META_discard, &mp->flag)) {
if (!new) {
- spin_unlock(&meta_lock);
jfs_error(inode->i_sb,
"__get_metapage: using a "
"discarded metapage");
+ release_metapage(mp);
return NULL;
}
clear_bit(META_discard, &mp->flag);
}
- mp->count++;
jfs_info("__get_metapage: found 0x%p, in hash", mp);
if (mp->logical_size != size) {
- spin_unlock(&meta_lock);
jfs_error(inode->i_sb,
"__get_metapage: mp->logical_size != size");
+ release_metapage(mp);
return NULL;
}
- lock_metapage(mp);
- spin_unlock(&meta_lock);
} else {
l2bsize = inode->i_blkbits;
l2BlocksPerPage = PAGE_CACHE_SHIFT - l2bsize;
*/
mp = NULL;
if (JFS_IP(inode)->fileset == AGGREGATE_I) {
- mp = mempool_alloc(metapage_mempool, GFP_ATOMIC);
+ mp = alloc_metapage(GFP_ATOMIC);
if (!mp) {
/*
* mempool is supposed to protect us from
struct metapage *mp2;
spin_unlock(&meta_lock);
- mp = mempool_alloc(metapage_mempool, GFP_NOFS);
+ mp = alloc_metapage(GFP_NOFS);
spin_lock(&meta_lock);
/* we dropped the meta_lock, we need to search the
atomic_set(&mp->nohomeok,0);
mp->mapping = mapping;
mp->index = lblock;
- mp->page = 0;
+ mp->page = NULL;
mp->logical_size = size;
add_to_hash(mp, hash_ptr);
spin_unlock(&meta_lock);
jfs_info("__write_metapage: mp = 0x%p", mp);
- if (test_bit(META_discard, &mp->flag)) {
- /*
- * This metadata is no longer valid
- */
- clear_bit(META_dirty, &mp->flag);
- return;
- }
-
page_index = mp->page->index;
page_offset =
(mp->index - (page_index << l2BlocksPerPage)) << l2bsize;
if (--mp->count || atomic_read(&mp->nohomeok)) {
unlock_metapage(mp);
spin_unlock(&meta_lock);
- } else {
- remove_from_hash(mp, meta_hash(mp->mapping, mp->index));
- spin_unlock(&meta_lock);
-
- if (mp->page) {
- kunmap(mp->page);
- mp->data = 0;
- if (test_bit(META_dirty, &mp->flag))
- __write_metapage(mp);
- if (test_bit(META_sync, &mp->flag)) {
- sync_metapage(mp);
- clear_bit(META_sync, &mp->flag);
- }
-
- if (test_bit(META_discard, &mp->flag)) {
- lock_page(mp->page);
- block_invalidatepage(mp->page, 0);
- unlock_page(mp->page);
- }
+ return;
+ }
- page_cache_release(mp->page);
- INCREMENT(mpStat.pagefree);
+ if (mp->page) {
+ /* Releasing spinlock, we have to check mp->count later */
+ set_bit(META_stale, &mp->flag);
+ spin_unlock(&meta_lock);
+ kunmap(mp->page);
+ mp->data = NULL;
+ if (test_bit(META_dirty, &mp->flag))
+ __write_metapage(mp);
+ if (test_bit(META_sync, &mp->flag)) {
+ sync_metapage(mp);
+ clear_bit(META_sync, &mp->flag);
}
- if (mp->lsn) {
- /*
- * Remove metapage from logsynclist.
- */
- log = mp->log;
- LOGSYNC_LOCK(log);
- mp->log = 0;
- mp->lsn = 0;
- mp->clsn = 0;
- log->count--;
- list_del(&mp->synclist);
- LOGSYNC_UNLOCK(log);
+ if (test_bit(META_discard, &mp->flag)) {
+ lock_page(mp->page);
+ block_invalidatepage(mp->page, 0);
+ unlock_page(mp->page);
}
- free_metapage(mp);
+ page_cache_release(mp->page);
+ mp->page = NULL;
+ INCREMENT(mpStat.pagefree);
+ spin_lock(&meta_lock);
}
+
+ if (mp->lsn) {
+ /*
+ * Remove metapage from logsynclist.
+ */
+ log = mp->log;
+ LOGSYNC_LOCK(log);
+ mp->log = NULL;
+ mp->lsn = 0;
+ mp->clsn = 0;
+ log->count--;
+ list_del(&mp->synclist);
+ LOGSYNC_UNLOCK(log);
+ }
+ if (mp->count) {
+ /* Someone else is trying to get this metpage */
+ unlock_metapage(mp);
+ spin_unlock(&meta_lock);
+ return;
+ }
+ remove_from_hash(mp, meta_hash(mp->mapping, mp->index));
+ spin_unlock(&meta_lock);
+
+ free_metapage(mp);
}
void __invalidate_metapages(struct inode *ip, s64 addr, int len)
for (lblock = addr; lblock < addr + len;
lblock += 1 << l2BlocksPerPage) {
hash_ptr = meta_hash(mapping, lblock);
+again:
spin_lock(&meta_lock);
mp = search_hash(hash_ptr, mapping, lblock);
if (mp) {
+ if (test_bit(META_stale, &mp->flag)) {
+ /* Racing with release_metapage */
+ mp->count++;
+ lock_metapage(mp);
+ spin_unlock(&meta_lock);
+ /* racing release_metapage should be done now */
+ release_metapage(mp);
+ goto again;
+ }
+
+ clear_bit(META_dirty, &mp->flag);
set_bit(META_discard, &mp->flag);
spin_unlock(&meta_lock);
} else {
if (page) {
block_invalidatepage(page, 0);
unlock_page(page);
+ page_cache_release(page);
}
}
}