#define MB_CACHE_WRITER ((unsigned short)~0U >> 1)
-DECLARE_WAIT_QUEUE_HEAD(mb_cache_queue);
+static DECLARE_WAIT_QUEUE_HEAD(mb_cache_queue);
MODULE_AUTHOR("Andreas Gruenbacher <a.gruenbacher@computer.org>");
MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
EXPORT_SYMBOL(mb_cache_entry_find_next);
#endif
+struct mb_cache {
+ struct list_head c_cache_list;
+ const char *c_name;
+ struct mb_cache_op c_op;
+ atomic_t c_entry_count;
+ int c_bucket_bits;
+#ifndef MB_CACHE_INDEXES_COUNT
+ int c_indexes_count;
+#endif
+ kmem_cache_t *c_entry_cache;
+ struct list_head *c_block_hash;
+ struct list_head *c_indexes_hash[0];
+};
+
/*
* Global data: list of all mbcache's, lru list, and a spinlock for
static LIST_HEAD(mb_cache_list);
static LIST_HEAD(mb_cache_lru_list);
-static spinlock_t mb_cache_spinlock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(mb_cache_spinlock);
static struct shrinker *mb_shrinker;
static inline int
* What the mbcache registers as to get shrunk dynamically.
*/
-static int mb_cache_shrink_fn(int nr_to_scan, unsigned int gfp_mask);
+static int mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask);
static inline int
}
-static inline void
+static void
__mb_cache_entry_unhash(struct mb_cache_entry *ce)
{
int n;
}
-static inline void
-__mb_cache_entry_forget(struct mb_cache_entry *ce, int gfp_mask)
+static void
+__mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask)
{
struct mb_cache *cache = ce->e_cache;
- mb_assert(!ce->e_used);
- mb_assert(!ce->e_queued);
+ mb_assert(!(ce->e_used || ce->e_queued));
if (cache->c_op.free && cache->c_op.free(ce, gfp_mask)) {
/* free failed -- put back on the lru list
for freeing later. */
}
-static inline void
+static void
__mb_cache_entry_release_unlock(struct mb_cache_entry *ce)
{
/* Wake up all processes queuing for this cache entry. */
if (!(ce->e_used || ce->e_queued)) {
if (!__mb_cache_entry_is_hashed(ce))
goto forget;
+ mb_assert(list_empty(&ce->e_lru_list));
list_add_tail(&ce->e_lru_list, &mb_cache_lru_list);
}
spin_unlock(&mb_cache_spinlock);
* Returns the number of objects which are present in the cache.
*/
static int
-mb_cache_shrink_fn(int nr_to_scan, unsigned int gfp_mask)
+mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask)
{
LIST_HEAD(free_list);
struct list_head *l, *ltmp;
e_lru_list), gfp_mask);
}
out:
- return count;
+ return (count / 100) * sysctl_vfs_cache_pressure;
}
struct mb_cache *cache = NULL;
if(entry_size < sizeof(struct mb_cache_entry) +
- indexes_count * sizeof(struct mb_cache_entry_index))
+ indexes_count * sizeof(((struct mb_cache_entry *) 0)->e_indexes[0]))
return NULL;
cache = kmalloc(sizeof(struct mb_cache) +
if (cache) {
while (--m >= 0)
kfree(cache->c_indexes_hash[m]);
- if (cache->c_block_hash)
- kfree(cache->c_block_hash);
+ kfree(cache->c_block_hash);
kfree(cache);
}
return NULL;
* currently in use cannot be freed, and thus remain in the cache. All others
* are freed.
*
- * @cache: which cache to shrink
* @bdev: which device's cache entries to shrink
*/
void
-mb_cache_shrink(struct mb_cache *cache, struct block_device *bdev)
+mb_cache_shrink(struct block_device *bdev)
{
LIST_HEAD(free_list);
struct list_head *l, *ltmp;
if (ce->e_bdev == bdev && ce->e_block == block) {
DEFINE_WAIT(wait);
+ if (!list_empty(&ce->e_lru_list))
+ list_del_init(&ce->e_lru_list);
+
while (ce->e_used > 0) {
ce->e_queued++;
prepare_to_wait(&mb_cache_queue, &wait,
}
finish_wait(&mb_cache_queue, &wait);
ce->e_used += 1 + MB_CACHE_WRITER;
-
+
if (!__mb_cache_entry_is_hashed(ce)) {
__mb_cache_entry_release_unlock(ce);
return NULL;
}
- if (!list_empty(&ce->e_lru_list))
- list_del_init(&ce->e_lru_list);
goto cleanup;
}
}
__mb_cache_entry_find(struct list_head *l, struct list_head *head,
int index, struct block_device *bdev, unsigned int key)
{
- DEFINE_WAIT(wait);
-
while (l != head) {
struct mb_cache_entry *ce =
list_entry(l, struct mb_cache_entry,
if (ce->e_bdev == bdev && ce->e_indexes[index].o_key == key) {
DEFINE_WAIT(wait);
+ if (!list_empty(&ce->e_lru_list))
+ list_del_init(&ce->e_lru_list);
+
/* Incrementing before holding the lock gives readers
priority over writers. */
ce->e_used++;
-
while (ce->e_used >= MB_CACHE_WRITER) {
ce->e_queued++;
prepare_to_wait(&mb_cache_queue, &wait,
ce->e_queued--;
}
finish_wait(&mb_cache_queue, &wait);
-
+
if (!__mb_cache_entry_is_hashed(ce)) {
__mb_cache_entry_release_unlock(ce);
spin_lock(&mb_cache_spinlock);
return ERR_PTR(-EAGAIN);
}
- if (!list_empty(&ce->e_lru_list))
- list_del_init(&ce->e_lru_list);
return ce;
}
l = l->next;