X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=fs%2Fmbcache.c;h=e4fde1ab22cdb0a5af105cdea66cccf9473ac08e;hb=e3c38d9d170af5eb313a7d19a18a7c11ed63882d;hp=a7109f9c2caa6aa1a82524ec387a68be728060bd;hpb=ec9397bab20a628530ce3051167d3d0fcc2c1af7;p=linux-2.6.git diff --git a/fs/mbcache.c b/fs/mbcache.c index a7109f9c2..e4fde1ab2 100644 --- a/fs/mbcache.c +++ b/fs/mbcache.c @@ -57,7 +57,7 @@ #define MB_CACHE_WRITER ((unsigned short)~0U >> 1) -DECLARE_WAIT_QUEUE_HEAD(mb_cache_queue); +static DECLARE_WAIT_QUEUE_HEAD(mb_cache_queue); MODULE_AUTHOR("Andreas Gruenbacher "); MODULE_DESCRIPTION("Meta block cache (for extended attributes)"); @@ -76,6 +76,20 @@ EXPORT_SYMBOL(mb_cache_entry_find_first); EXPORT_SYMBOL(mb_cache_entry_find_next); #endif +struct mb_cache { + struct list_head c_cache_list; + const char *c_name; + struct mb_cache_op c_op; + atomic_t c_entry_count; + int c_bucket_bits; +#ifndef MB_CACHE_INDEXES_COUNT + int c_indexes_count; +#endif + kmem_cache_t *c_entry_cache; + struct list_head *c_block_hash; + struct list_head *c_indexes_hash[0]; +}; + /* * Global data: list of all mbcache's, lru list, and a spinlock for @@ -85,7 +99,7 @@ EXPORT_SYMBOL(mb_cache_entry_find_next); static LIST_HEAD(mb_cache_list); static LIST_HEAD(mb_cache_lru_list); -static spinlock_t mb_cache_spinlock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(mb_cache_spinlock); static struct shrinker *mb_shrinker; static inline int @@ -102,7 +116,7 @@ mb_cache_indexes(struct mb_cache *cache) * What the mbcache registers as to get shrunk dynamically. */ -static int mb_cache_shrink_fn(int nr_to_scan, unsigned int gfp_mask); +static int mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask); static inline int @@ -112,7 +126,7 @@ __mb_cache_entry_is_hashed(struct mb_cache_entry *ce) } -static inline void +static void __mb_cache_entry_unhash(struct mb_cache_entry *ce) { int n; @@ -125,13 +139,12 @@ __mb_cache_entry_unhash(struct mb_cache_entry *ce) } -static inline void -__mb_cache_entry_forget(struct mb_cache_entry *ce, int gfp_mask) +static void +__mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask) { struct mb_cache *cache = ce->e_cache; - mb_assert(!ce->e_used); - mb_assert(!ce->e_queued); + mb_assert(!(ce->e_used || ce->e_queued)); if (cache->c_op.free && cache->c_op.free(ce, gfp_mask)) { /* free failed -- put back on the lru list for freeing later. */ @@ -145,7 +158,7 @@ __mb_cache_entry_forget(struct mb_cache_entry *ce, int gfp_mask) } -static inline void +static void __mb_cache_entry_release_unlock(struct mb_cache_entry *ce) { /* Wake up all processes queuing for this cache entry. */ @@ -157,6 +170,7 @@ __mb_cache_entry_release_unlock(struct mb_cache_entry *ce) if (!(ce->e_used || ce->e_queued)) { if (!__mb_cache_entry_is_hashed(ce)) goto forget; + mb_assert(list_empty(&ce->e_lru_list)); list_add_tail(&ce->e_lru_list, &mb_cache_lru_list); } spin_unlock(&mb_cache_spinlock); @@ -179,7 +193,7 @@ forget: * Returns the number of objects which are present in the cache. */ static int -mb_cache_shrink_fn(int nr_to_scan, unsigned int gfp_mask) +mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask) { LIST_HEAD(free_list); struct list_head *l, *ltmp; @@ -211,7 +225,7 @@ mb_cache_shrink_fn(int nr_to_scan, unsigned int gfp_mask) e_lru_list), gfp_mask); } out: - return count; + return (count / 100) * sysctl_vfs_cache_pressure; } @@ -240,7 +254,7 @@ mb_cache_create(const char *name, struct mb_cache_op *cache_op, struct mb_cache *cache = NULL; if(entry_size < sizeof(struct mb_cache_entry) + - indexes_count * sizeof(struct mb_cache_entry_index)) + indexes_count * sizeof(((struct mb_cache_entry *) 0)->e_indexes[0])) return NULL; cache = kmalloc(sizeof(struct mb_cache) + @@ -274,7 +288,7 @@ mb_cache_create(const char *name, struct mb_cache_op *cache_op, INIT_LIST_HEAD(&cache->c_indexes_hash[m][n]); } cache->c_entry_cache = kmem_cache_create(name, entry_size, 0, - SLAB_RECLAIM_ACCOUNT, NULL, NULL); + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL, NULL); if (!cache->c_entry_cache) goto fail; @@ -287,8 +301,7 @@ fail: if (cache) { while (--m >= 0) kfree(cache->c_indexes_hash[m]); - if (cache->c_block_hash) - kfree(cache->c_block_hash); + kfree(cache->c_block_hash); kfree(cache); } return NULL; @@ -298,15 +311,14 @@ fail: /* * mb_cache_shrink() * - * Removes all cache entires of a device from the cache. All cache entries + * Removes all cache entries of a device from the cache. All cache entries * currently in use cannot be freed, and thus remain in the cache. All others * are freed. * - * @cache: which cache to shrink * @bdev: which device's cache entries to shrink */ void -mb_cache_shrink(struct mb_cache *cache, struct block_device *bdev) +mb_cache_shrink(struct block_device *bdev) { LIST_HEAD(free_list); struct list_head *l, *ltmp; @@ -505,6 +517,9 @@ mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev, if (ce->e_bdev == bdev && ce->e_block == block) { DEFINE_WAIT(wait); + if (!list_empty(&ce->e_lru_list)) + list_del_init(&ce->e_lru_list); + while (ce->e_used > 0) { ce->e_queued++; prepare_to_wait(&mb_cache_queue, &wait, @@ -516,13 +531,11 @@ mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev, } finish_wait(&mb_cache_queue, &wait); ce->e_used += 1 + MB_CACHE_WRITER; - + if (!__mb_cache_entry_is_hashed(ce)) { __mb_cache_entry_release_unlock(ce); return NULL; } - if (!list_empty(&ce->e_lru_list)) - list_del_init(&ce->e_lru_list); goto cleanup; } } @@ -539,8 +552,6 @@ static struct mb_cache_entry * __mb_cache_entry_find(struct list_head *l, struct list_head *head, int index, struct block_device *bdev, unsigned int key) { - DEFINE_WAIT(wait); - while (l != head) { struct mb_cache_entry *ce = list_entry(l, struct mb_cache_entry, @@ -548,10 +559,12 @@ __mb_cache_entry_find(struct list_head *l, struct list_head *head, if (ce->e_bdev == bdev && ce->e_indexes[index].o_key == key) { DEFINE_WAIT(wait); + if (!list_empty(&ce->e_lru_list)) + list_del_init(&ce->e_lru_list); + /* Incrementing before holding the lock gives readers priority over writers. */ ce->e_used++; - while (ce->e_used >= MB_CACHE_WRITER) { ce->e_queued++; prepare_to_wait(&mb_cache_queue, &wait, @@ -562,14 +575,12 @@ __mb_cache_entry_find(struct list_head *l, struct list_head *head, ce->e_queued--; } finish_wait(&mb_cache_queue, &wait); - + if (!__mb_cache_entry_is_hashed(ce)) { __mb_cache_entry_release_unlock(ce); spin_lock(&mb_cache_spinlock); return ERR_PTR(-EAGAIN); } - if (!list_empty(&ce->e_lru_list)) - list_del_init(&ce->e_lru_list); return ce; } l = l->next;