3 * (C) 2001-2002 Andreas Gruenbacher, <a.gruenbacher@computer.org>
7 * Filesystem Meta Information Block Cache (mbcache)
9 * The mbcache caches blocks of block devices that need to be located
10 * by their device/block number, as well as by other criteria (such
11 * as the block's contents).
13 * There can only be one cache entry in a cache per device and block number.
14 * Additional indexes need not be unique in this sense. The number of
15 * additional indexes (=other criteria) can be hardwired at compile time
16 * or specified at cache create time.
18 * Each cache entry is of fixed size. An entry may be `valid' or `invalid'
19 * in the cache. A valid entry is in the main hash tables of the cache,
20 * and may also be in the lru list. An invalid entry is not in any hashes
23 * A valid cache entry is only in the lru list if no handles refer to it.
24 * Invalid cache entries will be freed when the last handle to the cache
25 * entry is released. Entries that cannot be freed immediately are put
26 * back on the lru list.
29 #include <linux/kernel.h>
30 #include <linux/module.h>
32 #include <linux/hash.h>
35 #include <linux/slab.h>
36 #include <linux/sched.h>
37 #include <linux/init.h>
38 #include <linux/mbcache.h>
42 # define mb_debug(f...) do { \
43 printk(KERN_DEBUG f); \
46 #define mb_assert(c) do { if (!(c)) \
47 printk(KERN_ERR "assertion " #c " failed\n"); \
50 # define mb_debug(f...) do { } while(0)
51 # define mb_assert(c) do { } while(0)
53 #define mb_error(f...) do { \
58 #define MB_CACHE_WRITER ((unsigned short)~0U >> 1)
60 DECLARE_WAIT_QUEUE_HEAD(mb_cache_queue);
62 MODULE_AUTHOR("Andreas Gruenbacher <a.gruenbacher@computer.org>");
63 MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
64 MODULE_LICENSE("GPL");
66 EXPORT_SYMBOL(mb_cache_create);
67 EXPORT_SYMBOL(mb_cache_shrink);
68 EXPORT_SYMBOL(mb_cache_destroy);
69 EXPORT_SYMBOL(mb_cache_entry_alloc);
70 EXPORT_SYMBOL(mb_cache_entry_insert);
71 EXPORT_SYMBOL(mb_cache_entry_release);
72 EXPORT_SYMBOL(mb_cache_entry_free);
73 EXPORT_SYMBOL(mb_cache_entry_get);
74 #if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
75 EXPORT_SYMBOL(mb_cache_entry_find_first);
76 EXPORT_SYMBOL(mb_cache_entry_find_next);
81 * Global data: list of all mbcache's, lru list, and a spinlock for
82 * accessing cache data structures on SMP machines. The lru list is
83 * global across all mbcaches.
86 static LIST_HEAD(mb_cache_list);
87 static LIST_HEAD(mb_cache_lru_list);
88 static spinlock_t mb_cache_spinlock = SPIN_LOCK_UNLOCKED;
89 static struct shrinker *mb_shrinker;
92 mb_cache_indexes(struct mb_cache *cache)
94 #ifdef MB_CACHE_INDEXES_COUNT
95 return MB_CACHE_INDEXES_COUNT;
97 return cache->c_indexes_count;
102 * What the mbcache registers as to get shrunk dynamically.
105 static int mb_cache_shrink_fn(int nr_to_scan, unsigned int gfp_mask);
109 __mb_cache_entry_is_hashed(struct mb_cache_entry *ce)
111 return !list_empty(&ce->e_block_list);
116 __mb_cache_entry_unhash(struct mb_cache_entry *ce)
120 if (__mb_cache_entry_is_hashed(ce)) {
121 list_del_init(&ce->e_block_list);
122 for (n=0; n<mb_cache_indexes(ce->e_cache); n++)
123 list_del(&ce->e_indexes[n].o_list);
129 __mb_cache_entry_forget(struct mb_cache_entry *ce, int gfp_mask)
131 struct mb_cache *cache = ce->e_cache;
133 mb_assert(!ce->e_used);
134 mb_assert(!ce->e_queued);
135 if (cache->c_op.free && cache->c_op.free(ce, gfp_mask)) {
136 /* free failed -- put back on the lru list
137 for freeing later. */
138 spin_lock(&mb_cache_spinlock);
139 list_add(&ce->e_lru_list, &mb_cache_lru_list);
140 spin_unlock(&mb_cache_spinlock);
142 kmem_cache_free(cache->c_entry_cache, ce);
143 atomic_dec(&cache->c_entry_count);
149 __mb_cache_entry_release_unlock(struct mb_cache_entry *ce)
151 /* Wake up all processes queuing for this cache entry. */
153 wake_up_all(&mb_cache_queue);
154 if (ce->e_used >= MB_CACHE_WRITER)
155 ce->e_used -= MB_CACHE_WRITER;
157 if (!(ce->e_used || ce->e_queued)) {
158 if (!__mb_cache_entry_is_hashed(ce))
160 list_add_tail(&ce->e_lru_list, &mb_cache_lru_list);
162 spin_unlock(&mb_cache_spinlock);
165 spin_unlock(&mb_cache_spinlock);
166 __mb_cache_entry_forget(ce, GFP_KERNEL);
171 * mb_cache_shrink_fn() memory pressure callback
173 * This function is called by the kernel memory management when memory
176 * @nr_to_scan: Number of objects to scan
177 * @gfp_mask: (ignored)
179 * Returns the number of objects which are present in the cache.
182 mb_cache_shrink_fn(int nr_to_scan, unsigned int gfp_mask)
184 LIST_HEAD(free_list);
185 struct list_head *l, *ltmp;
188 spin_lock(&mb_cache_spinlock);
189 list_for_each(l, &mb_cache_list) {
190 struct mb_cache *cache =
191 list_entry(l, struct mb_cache, c_cache_list);
192 mb_debug("cache %s (%d)", cache->c_name,
193 atomic_read(&cache->c_entry_count));
194 count += atomic_read(&cache->c_entry_count);
196 mb_debug("trying to free %d entries", nr_to_scan);
197 if (nr_to_scan == 0) {
198 spin_unlock(&mb_cache_spinlock);
201 while (nr_to_scan-- && !list_empty(&mb_cache_lru_list)) {
202 struct mb_cache_entry *ce =
203 list_entry(mb_cache_lru_list.next,
204 struct mb_cache_entry, e_lru_list);
205 list_move_tail(&ce->e_lru_list, &free_list);
206 __mb_cache_entry_unhash(ce);
208 spin_unlock(&mb_cache_spinlock);
209 list_for_each_safe(l, ltmp, &free_list) {
210 __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
211 e_lru_list), gfp_mask);
219 * mb_cache_create() create a new cache
221 * All entries in one cache are equal size. Cache entries may be from
222 * multiple devices. If this is the first mbcache created, registers
223 * the cache with kernel memory management. Returns NULL if no more
224 * memory was available.
226 * @name: name of the cache (informal)
227 * @cache_op: contains the callback called when freeing a cache entry
228 * @entry_size: The size of a cache entry, including
229 * struct mb_cache_entry
230 * @indexes_count: number of additional indexes in the cache. Must equal
231 * MB_CACHE_INDEXES_COUNT if the number of indexes is
233 * @bucket_bits: log2(number of hash buckets)
236 mb_cache_create(const char *name, struct mb_cache_op *cache_op,
237 size_t entry_size, int indexes_count, int bucket_bits)
239 int m=0, n, bucket_count = 1 << bucket_bits;
240 struct mb_cache *cache = NULL;
242 if(entry_size < sizeof(struct mb_cache_entry) +
243 indexes_count * sizeof(struct mb_cache_entry_index))
246 cache = kmalloc(sizeof(struct mb_cache) +
247 indexes_count * sizeof(struct list_head), GFP_KERNEL);
250 cache->c_name = name;
251 cache->c_op.free = NULL;
253 cache->c_op.free = cache_op->free;
254 atomic_set(&cache->c_entry_count, 0);
255 cache->c_bucket_bits = bucket_bits;
256 #ifdef MB_CACHE_INDEXES_COUNT
257 mb_assert(indexes_count == MB_CACHE_INDEXES_COUNT);
259 cache->c_indexes_count = indexes_count;
261 cache->c_block_hash = kmalloc(bucket_count * sizeof(struct list_head),
263 if (!cache->c_block_hash)
265 for (n=0; n<bucket_count; n++)
266 INIT_LIST_HEAD(&cache->c_block_hash[n]);
267 for (m=0; m<indexes_count; m++) {
268 cache->c_indexes_hash[m] = kmalloc(bucket_count *
269 sizeof(struct list_head),
271 if (!cache->c_indexes_hash[m])
273 for (n=0; n<bucket_count; n++)
274 INIT_LIST_HEAD(&cache->c_indexes_hash[m][n]);
276 cache->c_entry_cache = kmem_cache_create(name, entry_size, 0,
277 SLAB_RECLAIM_ACCOUNT, NULL, NULL);
278 if (!cache->c_entry_cache)
281 spin_lock(&mb_cache_spinlock);
282 list_add(&cache->c_cache_list, &mb_cache_list);
283 spin_unlock(&mb_cache_spinlock);
289 kfree(cache->c_indexes_hash[m]);
290 if (cache->c_block_hash)
291 kfree(cache->c_block_hash);
301 * Removes all cache entires of a device from the cache. All cache entries
302 * currently in use cannot be freed, and thus remain in the cache. All others
305 * @cache: which cache to shrink
306 * @bdev: which device's cache entries to shrink
309 mb_cache_shrink(struct mb_cache *cache, struct block_device *bdev)
311 LIST_HEAD(free_list);
312 struct list_head *l, *ltmp;
314 spin_lock(&mb_cache_spinlock);
315 list_for_each_safe(l, ltmp, &mb_cache_lru_list) {
316 struct mb_cache_entry *ce =
317 list_entry(l, struct mb_cache_entry, e_lru_list);
318 if (ce->e_bdev == bdev) {
319 list_move_tail(&ce->e_lru_list, &free_list);
320 __mb_cache_entry_unhash(ce);
323 spin_unlock(&mb_cache_spinlock);
324 list_for_each_safe(l, ltmp, &free_list) {
325 __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
326 e_lru_list), GFP_KERNEL);
334 * Shrinks the cache to its minimum possible size (hopefully 0 entries),
335 * and then destroys it. If this was the last mbcache, un-registers the
336 * mbcache from kernel memory management.
339 mb_cache_destroy(struct mb_cache *cache)
341 LIST_HEAD(free_list);
342 struct list_head *l, *ltmp;
345 spin_lock(&mb_cache_spinlock);
346 list_for_each_safe(l, ltmp, &mb_cache_lru_list) {
347 struct mb_cache_entry *ce =
348 list_entry(l, struct mb_cache_entry, e_lru_list);
349 if (ce->e_cache == cache) {
350 list_move_tail(&ce->e_lru_list, &free_list);
351 __mb_cache_entry_unhash(ce);
354 list_del(&cache->c_cache_list);
355 spin_unlock(&mb_cache_spinlock);
357 list_for_each_safe(l, ltmp, &free_list) {
358 __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
359 e_lru_list), GFP_KERNEL);
362 if (atomic_read(&cache->c_entry_count) > 0) {
363 mb_error("cache %s: %d orphaned entries",
365 atomic_read(&cache->c_entry_count));
368 kmem_cache_destroy(cache->c_entry_cache);
370 for (n=0; n < mb_cache_indexes(cache); n++)
371 kfree(cache->c_indexes_hash[n]);
372 kfree(cache->c_block_hash);
378 * mb_cache_entry_alloc()
380 * Allocates a new cache entry. The new entry will not be valid initially,
381 * and thus cannot be looked up yet. It should be filled with data, and
382 * then inserted into the cache using mb_cache_entry_insert(). Returns NULL
383 * if no more memory was available.
385 struct mb_cache_entry *
386 mb_cache_entry_alloc(struct mb_cache *cache)
388 struct mb_cache_entry *ce;
390 atomic_inc(&cache->c_entry_count);
391 ce = kmem_cache_alloc(cache->c_entry_cache, GFP_KERNEL);
393 INIT_LIST_HEAD(&ce->e_lru_list);
394 INIT_LIST_HEAD(&ce->e_block_list);
396 ce->e_used = 1 + MB_CACHE_WRITER;
404 * mb_cache_entry_insert()
406 * Inserts an entry that was allocated using mb_cache_entry_alloc() into
407 * the cache. After this, the cache entry can be looked up, but is not yet
408 * in the lru list as the caller still holds a handle to it. Returns 0 on
409 * success, or -EBUSY if a cache entry for that device + inode exists
410 * already (this may happen after a failed lookup, but when another process
411 * has inserted the same cache entry in the meantime).
413 * @bdev: device the cache entry belongs to
414 * @block: block number
415 * @keys: array of additional keys. There must be indexes_count entries
416 * in the array (as specified when creating the cache).
419 mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev,
420 sector_t block, unsigned int keys[])
422 struct mb_cache *cache = ce->e_cache;
425 int error = -EBUSY, n;
427 bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
428 cache->c_bucket_bits);
429 spin_lock(&mb_cache_spinlock);
430 list_for_each_prev(l, &cache->c_block_hash[bucket]) {
431 struct mb_cache_entry *ce =
432 list_entry(l, struct mb_cache_entry, e_block_list);
433 if (ce->e_bdev == bdev && ce->e_block == block)
436 __mb_cache_entry_unhash(ce);
439 list_add(&ce->e_block_list, &cache->c_block_hash[bucket]);
440 for (n=0; n<mb_cache_indexes(cache); n++) {
441 ce->e_indexes[n].o_key = keys[n];
442 bucket = hash_long(keys[n], cache->c_bucket_bits);
443 list_add(&ce->e_indexes[n].o_list,
444 &cache->c_indexes_hash[n][bucket]);
448 spin_unlock(&mb_cache_spinlock);
454 * mb_cache_entry_release()
456 * Release a handle to a cache entry. When the last handle to a cache entry
457 * is released it is either freed (if it is invalid) or otherwise inserted
458 * in to the lru list.
461 mb_cache_entry_release(struct mb_cache_entry *ce)
463 spin_lock(&mb_cache_spinlock);
464 __mb_cache_entry_release_unlock(ce);
469 * mb_cache_entry_free()
471 * This is equivalent to the sequence mb_cache_entry_takeout() --
472 * mb_cache_entry_release().
475 mb_cache_entry_free(struct mb_cache_entry *ce)
477 spin_lock(&mb_cache_spinlock);
478 mb_assert(list_empty(&ce->e_lru_list));
479 __mb_cache_entry_unhash(ce);
480 __mb_cache_entry_release_unlock(ce);
485 * mb_cache_entry_get()
487 * Get a cache entry by device / block number. (There can only be one entry
488 * in the cache per device and block.) Returns NULL if no such cache entry
489 * exists. The returned cache entry is locked for exclusive access ("single
492 struct mb_cache_entry *
493 mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev,
498 struct mb_cache_entry *ce;
500 bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
501 cache->c_bucket_bits);
502 spin_lock(&mb_cache_spinlock);
503 list_for_each(l, &cache->c_block_hash[bucket]) {
504 ce = list_entry(l, struct mb_cache_entry, e_block_list);
505 if (ce->e_bdev == bdev && ce->e_block == block) {
508 while (ce->e_used > 0) {
510 prepare_to_wait(&mb_cache_queue, &wait,
511 TASK_UNINTERRUPTIBLE);
512 spin_unlock(&mb_cache_spinlock);
514 spin_lock(&mb_cache_spinlock);
517 finish_wait(&mb_cache_queue, &wait);
518 ce->e_used += 1 + MB_CACHE_WRITER;
520 if (!__mb_cache_entry_is_hashed(ce)) {
521 __mb_cache_entry_release_unlock(ce);
524 if (!list_empty(&ce->e_lru_list))
525 list_del_init(&ce->e_lru_list);
532 spin_unlock(&mb_cache_spinlock);
536 #if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
538 static struct mb_cache_entry *
539 __mb_cache_entry_find(struct list_head *l, struct list_head *head,
540 int index, struct block_device *bdev, unsigned int key)
545 struct mb_cache_entry *ce =
546 list_entry(l, struct mb_cache_entry,
547 e_indexes[index].o_list);
548 if (ce->e_bdev == bdev && ce->e_indexes[index].o_key == key) {
551 /* Incrementing before holding the lock gives readers
552 priority over writers. */
555 while (ce->e_used >= MB_CACHE_WRITER) {
557 prepare_to_wait(&mb_cache_queue, &wait,
558 TASK_UNINTERRUPTIBLE);
559 spin_unlock(&mb_cache_spinlock);
561 spin_lock(&mb_cache_spinlock);
564 finish_wait(&mb_cache_queue, &wait);
566 if (!__mb_cache_entry_is_hashed(ce)) {
567 __mb_cache_entry_release_unlock(ce);
568 spin_lock(&mb_cache_spinlock);
569 return ERR_PTR(-EAGAIN);
571 if (!list_empty(&ce->e_lru_list))
572 list_del_init(&ce->e_lru_list);
582 * mb_cache_entry_find_first()
584 * Find the first cache entry on a given device with a certain key in
585 * an additional index. Additonal matches can be found with
586 * mb_cache_entry_find_next(). Returns NULL if no match was found. The
587 * returned cache entry is locked for shared access ("multiple readers").
589 * @cache: the cache to search
590 * @index: the number of the additonal index to search (0<=index<indexes_count)
591 * @bdev: the device the cache entry should belong to
592 * @key: the key in the index
594 struct mb_cache_entry *
595 mb_cache_entry_find_first(struct mb_cache *cache, int index,
596 struct block_device *bdev, unsigned int key)
598 unsigned int bucket = hash_long(key, cache->c_bucket_bits);
600 struct mb_cache_entry *ce;
602 mb_assert(index < mb_cache_indexes(cache));
603 spin_lock(&mb_cache_spinlock);
604 l = cache->c_indexes_hash[index][bucket].next;
605 ce = __mb_cache_entry_find(l, &cache->c_indexes_hash[index][bucket],
607 spin_unlock(&mb_cache_spinlock);
613 * mb_cache_entry_find_next()
615 * Find the next cache entry on a given device with a certain key in an
616 * additional index. Returns NULL if no match could be found. The previous
617 * entry is atomatically released, so that mb_cache_entry_find_next() can
618 * be called like this:
620 * entry = mb_cache_entry_find_first();
623 * entry = mb_cache_entry_find_next(entry, ...);
626 * @prev: The previous match
627 * @index: the number of the additonal index to search (0<=index<indexes_count)
628 * @bdev: the device the cache entry should belong to
629 * @key: the key in the index
631 struct mb_cache_entry *
632 mb_cache_entry_find_next(struct mb_cache_entry *prev, int index,
633 struct block_device *bdev, unsigned int key)
635 struct mb_cache *cache = prev->e_cache;
636 unsigned int bucket = hash_long(key, cache->c_bucket_bits);
638 struct mb_cache_entry *ce;
640 mb_assert(index < mb_cache_indexes(cache));
641 spin_lock(&mb_cache_spinlock);
642 l = prev->e_indexes[index].o_list.next;
643 ce = __mb_cache_entry_find(l, &cache->c_indexes_hash[index][bucket],
645 __mb_cache_entry_release_unlock(prev);
649 #endif /* !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) */
651 static int __init init_mbcache(void)
653 mb_shrinker = set_shrinker(DEFAULT_SEEKS, mb_cache_shrink_fn);
657 static void __exit exit_mbcache(void)
659 remove_shrinker(mb_shrinker);
662 module_init(init_mbcache)
663 module_exit(exit_mbcache)