2 * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
28 * For further information regarding this notice, see:
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
34 * The xfs_buf.c code provides an abstract buffer cache model on top
35 * of the Linux page cache. Cached metadata blocks for a file system
36 * are hashed to the inode for the block device. xfs_buf.c assembles
37 * buffers (xfs_buf_t) on demand to aggregate such cached pages for I/O.
39 * Written by Steve Lord, Jim Mostek, Russell Cattelan
40 * and Rajagopal Ananthanarayanan ("ananth") at SGI.
44 #include <linux/stddef.h>
45 #include <linux/errno.h>
46 #include <linux/slab.h>
47 #include <linux/pagemap.h>
48 #include <linux/init.h>
49 #include <linux/vmalloc.h>
50 #include <linux/bio.h>
51 #include <linux/sysctl.h>
52 #include <linux/proc_fs.h>
53 #include <linux/workqueue.h>
54 #include <linux/suspend.h>
55 #include <linux/percpu.h>
56 #include <linux/blkdev.h>
58 #include "xfs_linux.h"
64 STATIC kmem_cache_t *pagebuf_cache;
65 STATIC kmem_shaker_t pagebuf_shake;
66 STATIC int pagebuf_daemon_wakeup(int, unsigned int);
67 STATIC void pagebuf_delwri_queue(xfs_buf_t *, int);
68 STATIC struct workqueue_struct *pagebuf_logio_workqueue;
69 STATIC struct workqueue_struct *pagebuf_dataio_workqueue;
83 ktrace_enter(pagebuf_trace_buf,
85 (void *)(unsigned long)pb->pb_flags,
86 (void *)(unsigned long)pb->pb_hold.counter,
87 (void *)(unsigned long)pb->pb_sema.count.counter,
90 (void *)(unsigned long)((pb->pb_file_offset>>32) & 0xffffffff),
91 (void *)(unsigned long)(pb->pb_file_offset & 0xffffffff),
92 (void *)(unsigned long)pb->pb_buffer_length,
93 NULL, NULL, NULL, NULL, NULL);
95 ktrace_t *pagebuf_trace_buf;
96 #define PAGEBUF_TRACE_SIZE 4096
97 #define PB_TRACE(pb, id, data) \
98 pagebuf_trace(pb, id, (void *)data, (void *)__builtin_return_address(0))
100 #define PB_TRACE(pb, id, data) do { } while (0)
103 #ifdef PAGEBUF_LOCK_TRACKING
104 # define PB_SET_OWNER(pb) ((pb)->pb_last_holder = current->pid)
105 # define PB_CLEAR_OWNER(pb) ((pb)->pb_last_holder = -1)
106 # define PB_GET_OWNER(pb) ((pb)->pb_last_holder)
108 # define PB_SET_OWNER(pb) do { } while (0)
109 # define PB_CLEAR_OWNER(pb) do { } while (0)
110 # define PB_GET_OWNER(pb) do { } while (0)
114 * Pagebuf allocation / freeing.
117 #define pb_to_gfp(flags) \
118 ((((flags) & PBF_READ_AHEAD) ? __GFP_NORETRY : \
119 ((flags) & PBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
121 #define pb_to_km(flags) \
122 (((flags) & PBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
125 #define pagebuf_allocate(flags) \
126 kmem_zone_alloc(pagebuf_cache, pb_to_km(flags))
127 #define pagebuf_deallocate(pb) \
128 kmem_zone_free(pagebuf_cache, (pb));
135 #define NHASH (1<<NBITS)
138 struct list_head pb_hash;
139 spinlock_t pb_hash_lock;
142 STATIC pb_hash_t pbhash[NHASH];
143 #define pb_hash(pb) &pbhash[pb->pb_hash_index]
147 struct block_device *bdev,
153 base ^= (unsigned long)bdev / L1_CACHE_BYTES;
154 for (bit = hval = 0; base && bit < sizeof(base) * 8; bit += NBITS) {
155 hval ^= (int)base & (NHASH-1);
162 * Mapping of multi-page buffers into contiguous virtual space
165 typedef struct a_list {
170 STATIC a_list_t *as_free_head;
171 STATIC int as_list_len;
172 STATIC spinlock_t as_lock = SPIN_LOCK_UNLOCKED;
175 * Try to batch vunmaps because they are costly.
183 aentry = kmalloc(sizeof(a_list_t), GFP_ATOMIC);
186 aentry->next = as_free_head;
187 aentry->vm_addr = addr;
188 as_free_head = aentry;
190 spin_unlock(&as_lock);
197 purge_addresses(void)
199 a_list_t *aentry, *old;
201 if (as_free_head == NULL)
205 aentry = as_free_head;
208 spin_unlock(&as_lock);
210 while ((old = aentry) != NULL) {
211 vunmap(aentry->vm_addr);
212 aentry = aentry->next;
218 * Internal pagebuf object manipulation
224 xfs_buftarg_t *target,
227 page_buf_flags_t flags)
230 * We don't want certain flags to appear in pb->pb_flags.
232 flags &= ~(PBF_LOCK|PBF_MAPPED|PBF_DONT_BLOCK|PBF_READ_AHEAD);
234 memset(pb, 0, sizeof(xfs_buf_t));
235 atomic_set(&pb->pb_hold, 1);
236 init_MUTEX_LOCKED(&pb->pb_iodonesema);
237 INIT_LIST_HEAD(&pb->pb_list);
238 INIT_LIST_HEAD(&pb->pb_hash_list);
239 init_MUTEX_LOCKED(&pb->pb_sema); /* held, no waiters */
241 pb->pb_target = target;
242 pb->pb_file_offset = range_base;
244 * Set buffer_length and count_desired to the same value initially.
245 * I/O routines should use count_desired, which will be the same in
246 * most cases but may be reset (e.g. XFS recovery).
248 pb->pb_buffer_length = pb->pb_count_desired = range_length;
249 pb->pb_flags = flags | PBF_NONE;
250 pb->pb_bn = XFS_BUF_DADDR_NULL;
251 atomic_set(&pb->pb_pin_count, 0);
252 init_waitqueue_head(&pb->pb_waiters);
254 XFS_STATS_INC(pb_create);
255 PB_TRACE(pb, "initialize", target);
259 * Allocate a page array capable of holding a specified number
260 * of pages, and point the page buf at it.
266 page_buf_flags_t flags)
268 /* Make sure that we have a page list */
269 if (pb->pb_pages == NULL) {
270 pb->pb_offset = page_buf_poff(pb->pb_file_offset);
271 pb->pb_page_count = page_count;
272 if (page_count <= PB_PAGES) {
273 pb->pb_pages = pb->pb_page_array;
275 pb->pb_pages = kmem_alloc(sizeof(struct page *) *
276 page_count, pb_to_km(flags));
277 if (pb->pb_pages == NULL)
280 memset(pb->pb_pages, 0, sizeof(struct page *) * page_count);
286 * Frees pb_pages if it was malloced.
292 if (bp->pb_pages != bp->pb_page_array) {
293 kmem_free(bp->pb_pages,
294 bp->pb_page_count * sizeof(struct page *));
299 * Releases the specified buffer.
301 * The modification state of any associated pages is left unchanged.
302 * The buffer most not be on any hash - use pagebuf_rele instead for
303 * hashed and refcounted buffers
309 PB_TRACE(bp, "free", 0);
311 ASSERT(list_empty(&bp->pb_hash_list));
313 if (bp->pb_flags & _PBF_PAGE_CACHE) {
316 if ((bp->pb_flags & PBF_MAPPED) && (bp->pb_page_count > 1))
317 free_address(bp->pb_addr - bp->pb_offset);
319 for (i = 0; i < bp->pb_page_count; i++)
320 page_cache_release(bp->pb_pages[i]);
321 _pagebuf_free_pages(bp);
322 } else if (bp->pb_flags & _PBF_KMEM_ALLOC) {
324 * XXX(hch): bp->pb_count_desired might be incorrect (see
325 * pagebuf_associate_memory for details), but fortunately
326 * the Linux version of kmem_free ignores the len argument..
328 kmem_free(bp->pb_addr, bp->pb_count_desired);
329 _pagebuf_free_pages(bp);
332 pagebuf_deallocate(bp);
336 * Finds all pages for buffer in question and builds it's page list.
339 _pagebuf_lookup_pages(
343 struct address_space *mapping = bp->pb_target->pbr_mapping;
344 unsigned int sectorshift = bp->pb_target->pbr_sshift;
345 size_t blocksize = bp->pb_target->pbr_bsize;
346 size_t size = bp->pb_count_desired;
347 size_t nbytes, offset;
348 int gfp_mask = pb_to_gfp(flags);
349 unsigned short page_count, i;
354 end = bp->pb_file_offset + bp->pb_buffer_length;
355 page_count = page_buf_btoc(end) - page_buf_btoct(bp->pb_file_offset);
357 error = _pagebuf_get_pages(bp, page_count, flags);
360 bp->pb_flags |= _PBF_PAGE_CACHE;
362 offset = bp->pb_offset;
363 first = bp->pb_file_offset >> PAGE_CACHE_SHIFT;
365 for (i = 0; i < bp->pb_page_count; i++) {
370 page = find_or_create_page(mapping, first + i, gfp_mask);
371 if (unlikely(page == NULL)) {
372 if (flags & PBF_READ_AHEAD) {
373 bp->pb_page_count = i;
374 for (i = 0; i < bp->pb_page_count; i++)
375 unlock_page(bp->pb_pages[i]);
380 * This could deadlock.
382 * But until all the XFS lowlevel code is revamped to
383 * handle buffer allocation failures we can't do much.
385 if (!(++retries % 100))
387 "XFS: possible memory allocation "
388 "deadlock in %s (mode:0x%x)\n",
389 __FUNCTION__, gfp_mask);
391 XFS_STATS_INC(pb_page_retries);
392 pagebuf_daemon_wakeup(0, gfp_mask);
393 blk_congestion_wait(WRITE, HZ/50);
397 XFS_STATS_INC(pb_page_found);
399 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
402 if (!PageUptodate(page)) {
404 if (blocksize == PAGE_CACHE_SIZE) {
405 if (flags & PBF_READ)
407 } else if (!PagePrivate(page)) {
408 unsigned long j, range;
411 * In this case page->private holds a bitmap
412 * of uptodate sectors within the page
414 ASSERT(blocksize < PAGE_CACHE_SIZE);
415 range = (offset + nbytes) >> sectorshift;
416 for (j = offset >> sectorshift; j < range; j++)
417 if (!test_bit(j, &page->private))
424 bp->pb_pages[i] = page;
428 if (!bp->pb_locked) {
429 for (i = 0; i < bp->pb_page_count; i++)
430 unlock_page(bp->pb_pages[i]);
434 /* if we have any uptodate pages, mark that in the buffer */
435 bp->pb_flags &= ~PBF_NONE;
437 /* if some pages aren't uptodate, mark that in the buffer */
438 if (page_count != bp->pb_page_count)
439 bp->pb_flags |= PBF_PARTIAL;
442 PB_TRACE(bp, "lookup_pages", (long)page_count);
447 * Map buffer into kernel address-space if nessecary.
454 /* A single page buffer is always mappable */
455 if (bp->pb_page_count == 1) {
456 bp->pb_addr = page_address(bp->pb_pages[0]) + bp->pb_offset;
457 bp->pb_flags |= PBF_MAPPED;
458 } else if (flags & PBF_MAPPED) {
459 if (as_list_len > 64)
461 bp->pb_addr = vmap(bp->pb_pages, bp->pb_page_count,
462 VM_MAP, PAGE_KERNEL);
463 if (unlikely(bp->pb_addr == NULL))
465 bp->pb_addr += bp->pb_offset;
466 bp->pb_flags |= PBF_MAPPED;
473 * Finding and Reading Buffers
479 * Looks up, and creates if absent, a lockable buffer for
480 * a given range of an inode. The buffer is returned
481 * locked. If other overlapping buffers exist, they are
482 * released before the new buffer is created and locked,
483 * which may imply that this call will block until those buffers
484 * are unlocked. No I/O is implied by this call.
487 _pagebuf_find( /* find buffer for block */
488 xfs_buftarg_t *target,/* target for block */
489 loff_t ioff, /* starting offset of range */
490 size_t isize, /* length of range */
491 page_buf_flags_t flags, /* PBF_TRYLOCK */
492 xfs_buf_t *new_pb)/* newly allocated buffer */
501 range_base = (ioff << BBSHIFT);
502 range_length = (isize << BBSHIFT);
504 /* Ensure we never do IOs smaller than the sector size */
505 BUG_ON(range_length < (1 << target->pbr_sshift));
507 /* Ensure we never do IOs that are not sector aligned */
508 BUG_ON(range_base & (loff_t)target->pbr_smask);
510 hval = _bhash(target->pbr_bdev, range_base);
513 spin_lock(&h->pb_hash_lock);
514 list_for_each_entry_safe(pb, n, &h->pb_hash, pb_hash_list) {
515 if (pb->pb_target == target &&
516 pb->pb_file_offset == range_base &&
517 pb->pb_buffer_length == range_length) {
518 /* If we look at something bring it to the
519 * front of the list for next time
521 atomic_inc(&pb->pb_hold);
522 list_move(&pb->pb_hash_list, &h->pb_hash);
529 _pagebuf_initialize(new_pb, target, range_base,
530 range_length, flags);
531 new_pb->pb_hash_index = hval;
532 list_add(&new_pb->pb_hash_list, &h->pb_hash);
534 XFS_STATS_INC(pb_miss_locked);
537 spin_unlock(&h->pb_hash_lock);
541 spin_unlock(&h->pb_hash_lock);
543 /* Attempt to get the semaphore without sleeping,
544 * if this does not work then we need to drop the
545 * spinlock and do a hard attempt on the semaphore.
547 not_locked = down_trylock(&pb->pb_sema);
549 if (!(flags & PBF_TRYLOCK)) {
550 /* wait for buffer ownership */
551 PB_TRACE(pb, "get_lock", 0);
553 XFS_STATS_INC(pb_get_locked_waited);
555 /* We asked for a trylock and failed, no need
556 * to look at file offset and length here, we
557 * know that this pagebuf at least overlaps our
558 * pagebuf and is locked, therefore our buffer
559 * either does not exist, or is this buffer
563 XFS_STATS_INC(pb_busy_locked);
571 if (pb->pb_flags & PBF_STALE)
572 pb->pb_flags &= PBF_MAPPED;
573 PB_TRACE(pb, "got_lock", 0);
574 XFS_STATS_INC(pb_get_locked);
579 * xfs_buf_get_flags assembles a buffer covering the specified range.
581 * Storage in memory for all portions of the buffer will be allocated,
582 * although backing storage may not be.
585 xfs_buf_get_flags( /* allocate a buffer */
586 xfs_buftarg_t *target,/* target for buffer */
587 loff_t ioff, /* starting offset of range */
588 size_t isize, /* length of range */
589 page_buf_flags_t flags) /* PBF_TRYLOCK */
591 xfs_buf_t *pb, *new_pb;
594 new_pb = pagebuf_allocate(flags);
595 if (unlikely(!new_pb))
598 pb = _pagebuf_find(target, ioff, isize, flags, new_pb);
600 error = _pagebuf_lookup_pages(pb, flags);
604 pagebuf_deallocate(new_pb);
605 if (unlikely(pb == NULL))
609 for (i = 0; i < pb->pb_page_count; i++)
610 mark_page_accessed(pb->pb_pages[i]);
612 if (!(pb->pb_flags & PBF_MAPPED)) {
613 error = _pagebuf_map_pages(pb, flags);
614 if (unlikely(error)) {
615 printk(KERN_WARNING "%s: failed to map pages\n",
621 XFS_STATS_INC(pb_get);
624 * Always fill in the block number now, the mapped cases can do
625 * their own overlay of this later.
628 pb->pb_count_desired = pb->pb_buffer_length;
630 PB_TRACE(pb, "get", (unsigned long)flags);
634 if (flags & (PBF_LOCK | PBF_TRYLOCK))
642 xfs_buftarg_t *target,
645 page_buf_flags_t flags)
651 pb = xfs_buf_get_flags(target, ioff, isize, flags);
653 if (PBF_NOT_DONE(pb)) {
654 PB_TRACE(pb, "read", (unsigned long)flags);
655 XFS_STATS_INC(pb_get_read);
656 pagebuf_iostart(pb, flags);
657 } else if (flags & PBF_ASYNC) {
658 PB_TRACE(pb, "read_async", (unsigned long)flags);
660 * Read ahead call which is already satisfied,
665 PB_TRACE(pb, "read_done", (unsigned long)flags);
666 /* We do not want read in the flags */
667 pb->pb_flags &= ~PBF_READ;
674 if (flags & (PBF_LOCK | PBF_TRYLOCK))
681 * Create a skeletal pagebuf (no pages associated with it).
685 xfs_buftarg_t *target,
688 page_buf_flags_t flags)
692 pb = pagebuf_allocate(flags);
694 _pagebuf_initialize(pb, target, ioff, isize, flags);
700 * If we are not low on memory then do the readahead in a deadlock
705 xfs_buftarg_t *target,
708 page_buf_flags_t flags)
710 struct backing_dev_info *bdi;
712 bdi = target->pbr_mapping->backing_dev_info;
713 if (bdi_read_congested(bdi))
715 if (bdi_write_congested(bdi))
718 flags |= (PBF_TRYLOCK|PBF_ASYNC|PBF_READ_AHEAD);
719 xfs_buf_read_flags(target, ioff, isize, flags);
725 xfs_buftarg_t *target)
729 pb = pagebuf_allocate(0);
731 _pagebuf_initialize(pb, target, 0, len, 0);
735 static inline struct page *
739 if (((unsigned long)addr < VMALLOC_START) ||
740 ((unsigned long)addr >= VMALLOC_END)) {
741 return virt_to_page(addr);
743 return vmalloc_to_page(addr);
748 pagebuf_associate_memory(
760 page_count = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT;
761 offset = (off_t) mem - ((off_t)mem & PAGE_CACHE_MASK);
762 if (offset && (len > PAGE_CACHE_SIZE))
765 /* Free any previous set of page pointers */
767 _pagebuf_free_pages(pb);
772 rval = _pagebuf_get_pages(pb, page_count, 0);
776 pb->pb_offset = offset;
777 ptr = (size_t) mem & PAGE_CACHE_MASK;
778 end = PAGE_CACHE_ALIGN((size_t) mem + len);
780 /* set up first page */
781 pb->pb_pages[0] = mem_to_page(mem);
783 ptr += PAGE_CACHE_SIZE;
784 pb->pb_page_count = ++i;
786 pb->pb_pages[i] = mem_to_page((void *)ptr);
787 pb->pb_page_count = ++i;
788 ptr += PAGE_CACHE_SIZE;
792 pb->pb_count_desired = pb->pb_buffer_length = len;
793 pb->pb_flags |= PBF_MAPPED;
799 pagebuf_get_no_daddr(
801 xfs_buftarg_t *target)
803 size_t malloc_len = len;
808 bp = pagebuf_allocate(0);
809 if (unlikely(bp == NULL))
811 _pagebuf_initialize(bp, target, 0, len, PBF_FORCEIO);
814 data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL);
815 if (unlikely(data == NULL))
818 /* check whether alignment matches.. */
819 if ((__psunsigned_t)data !=
820 ((__psunsigned_t)data & ~target->pbr_smask)) {
821 /* .. else double the size and try again */
822 kmem_free(data, malloc_len);
827 error = pagebuf_associate_memory(bp, data, len);
830 bp->pb_flags |= _PBF_KMEM_ALLOC;
834 PB_TRACE(bp, "no_daddr", data);
837 kmem_free(data, malloc_len);
847 * Increment reference count on buffer, to hold the buffer concurrently
848 * with another thread which may release (free) the buffer asynchronously.
850 * Must hold the buffer already to call this function.
856 atomic_inc(&pb->pb_hold);
857 PB_TRACE(pb, "hold", 0);
863 * pagebuf_rele releases a hold on the specified buffer. If the
864 * the hold count is 1, pagebuf_rele calls pagebuf_free.
870 pb_hash_t *hash = pb_hash(pb);
872 PB_TRACE(pb, "rele", pb->pb_relse);
874 if (atomic_dec_and_lock(&pb->pb_hold, &hash->pb_hash_lock)) {
878 atomic_inc(&pb->pb_hold);
879 spin_unlock(&hash->pb_hash_lock);
880 (*(pb->pb_relse)) (pb);
881 spin_lock(&hash->pb_hash_lock);
885 if (pb->pb_flags & PBF_DELWRI) {
886 pb->pb_flags |= PBF_ASYNC;
887 atomic_inc(&pb->pb_hold);
888 pagebuf_delwri_queue(pb, 0);
890 } else if (pb->pb_flags & PBF_FS_MANAGED) {
895 list_del_init(&pb->pb_hash_list);
896 spin_unlock(&hash->pb_hash_lock);
899 spin_unlock(&hash->pb_hash_lock);
906 * Mutual exclusion on buffers. Locking model:
908 * Buffers associated with inodes for which buffer locking
909 * is not enabled are not protected by semaphores, and are
910 * assumed to be exclusively owned by the caller. There is a
911 * spinlock in the buffer, used by the caller when concurrent
912 * access is possible.
918 * pagebuf_cond_lock locks a buffer object, if it is not already locked.
919 * Note that this in no way
920 * locks the underlying pages, so it is only useful for synchronizing
921 * concurrent use of page buffer objects, not for synchronizing independent
922 * access to the underlying pages.
925 pagebuf_cond_lock( /* lock buffer, if not locked */
926 /* returns -EBUSY if locked) */
931 locked = down_trylock(&pb->pb_sema) == 0;
935 PB_TRACE(pb, "cond_lock", (long)locked);
936 return(locked ? 0 : -EBUSY);
942 * Return lock value for a pagebuf
948 return(atomic_read(&pb->pb_sema.count));
954 * pagebuf_lock locks a buffer object. Note that this in no way
955 * locks the underlying pages, so it is only useful for synchronizing
956 * concurrent use of page buffer objects, not for synchronizing independent
957 * access to the underlying pages.
963 PB_TRACE(pb, "lock", 0);
964 if (atomic_read(&pb->pb_io_remaining))
965 blk_run_address_space(pb->pb_target->pbr_mapping);
968 PB_TRACE(pb, "locked", 0);
975 * pagebuf_unlock releases the lock on the buffer object created by
976 * pagebuf_lock or pagebuf_cond_lock (not any
977 * pinning of underlying pages created by pagebuf_pin).
980 pagebuf_unlock( /* unlock buffer */
981 xfs_buf_t *pb) /* buffer to unlock */
985 PB_TRACE(pb, "unlock", 0);
990 * Pinning Buffer Storage in Memory
996 * pagebuf_pin locks all of the memory represented by a buffer in
997 * memory. Multiple calls to pagebuf_pin and pagebuf_unpin, for
998 * the same or different buffers affecting a given page, will
999 * properly count the number of outstanding "pin" requests. The
1000 * buffer may be released after the pagebuf_pin and a different
1001 * buffer used when calling pagebuf_unpin, if desired.
1002 * pagebuf_pin should be used by the file system when it wants be
1003 * assured that no attempt will be made to force the affected
1004 * memory to disk. It does not assure that a given logical page
1005 * will not be moved to a different physical page.
1011 atomic_inc(&pb->pb_pin_count);
1012 PB_TRACE(pb, "pin", (long)pb->pb_pin_count.counter);
1018 * pagebuf_unpin reverses the locking of memory performed by
1019 * pagebuf_pin. Note that both functions affected the logical
1020 * pages associated with the buffer, not the buffer itself.
1026 if (atomic_dec_and_test(&pb->pb_pin_count)) {
1027 wake_up_all(&pb->pb_waiters);
1029 PB_TRACE(pb, "unpin", (long)pb->pb_pin_count.counter);
1036 return atomic_read(&pb->pb_pin_count);
1040 * pagebuf_wait_unpin
1042 * pagebuf_wait_unpin waits until all of the memory associated
1043 * with the buffer is not longer locked in memory. It returns
1044 * immediately if none of the affected pages are locked.
1047 _pagebuf_wait_unpin(
1050 DECLARE_WAITQUEUE (wait, current);
1052 if (atomic_read(&pb->pb_pin_count) == 0)
1055 add_wait_queue(&pb->pb_waiters, &wait);
1057 set_current_state(TASK_UNINTERRUPTIBLE);
1058 if (atomic_read(&pb->pb_pin_count) == 0)
1060 if (atomic_read(&pb->pb_io_remaining))
1061 blk_run_address_space(pb->pb_target->pbr_mapping);
1064 remove_wait_queue(&pb->pb_waiters, &wait);
1065 set_current_state(TASK_RUNNING);
1069 * Buffer Utility Routines
1075 * pagebuf_iodone marks a buffer for which I/O is in progress
1076 * done with respect to that I/O. The pb_iodone routine, if
1077 * present, will be called as a side-effect.
1080 pagebuf_iodone_work(
1083 xfs_buf_t *bp = (xfs_buf_t *)v;
1086 (*(bp->pb_iodone))(bp);
1087 else if (bp->pb_flags & PBF_ASYNC)
1097 pb->pb_flags &= ~(PBF_READ | PBF_WRITE);
1098 if (pb->pb_error == 0) {
1099 pb->pb_flags &= ~(PBF_PARTIAL | PBF_NONE);
1102 PB_TRACE(pb, "iodone", pb->pb_iodone);
1104 if ((pb->pb_iodone) || (pb->pb_flags & PBF_ASYNC)) {
1106 INIT_WORK(&pb->pb_iodone_work, pagebuf_iodone_work, pb);
1107 queue_work(dataio ? pagebuf_dataio_workqueue :
1108 pagebuf_logio_workqueue, &pb->pb_iodone_work);
1110 pagebuf_iodone_work(pb);
1113 up(&pb->pb_iodonesema);
1120 * pagebuf_ioerror sets the error code for a buffer.
1123 pagebuf_ioerror( /* mark/clear buffer error flag */
1124 xfs_buf_t *pb, /* buffer to mark */
1125 int error) /* error to store (0 if none) */
1127 ASSERT(error >= 0 && error <= 0xffff);
1128 pb->pb_error = (unsigned short)error;
1129 PB_TRACE(pb, "ioerror", (unsigned long)error);
1135 * pagebuf_iostart initiates I/O on a buffer, based on the flags supplied.
1136 * If necessary, it will arrange for any disk space allocation required,
1137 * and it will break up the request if the block mappings require it.
1138 * The pb_iodone routine in the buffer supplied will only be called
1139 * when all of the subsidiary I/O requests, if any, have been completed.
1140 * pagebuf_iostart calls the pagebuf_ioinitiate routine or
1141 * pagebuf_iorequest, if the former routine is not defined, to start
1142 * the I/O on a given low-level request.
1145 pagebuf_iostart( /* start I/O on a buffer */
1146 xfs_buf_t *pb, /* buffer to start */
1147 page_buf_flags_t flags) /* PBF_LOCK, PBF_ASYNC, PBF_READ, */
1148 /* PBF_WRITE, PBF_DELWRI, */
1149 /* PBF_DONT_BLOCK */
1153 PB_TRACE(pb, "iostart", (unsigned long)flags);
1155 if (flags & PBF_DELWRI) {
1156 pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC);
1157 pb->pb_flags |= flags & (PBF_DELWRI | PBF_ASYNC);
1158 pagebuf_delwri_queue(pb, 1);
1162 pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC | PBF_DELWRI | \
1163 PBF_READ_AHEAD | _PBF_RUN_QUEUES);
1164 pb->pb_flags |= flags & (PBF_READ | PBF_WRITE | PBF_ASYNC | \
1165 PBF_READ_AHEAD | _PBF_RUN_QUEUES);
1167 BUG_ON(pb->pb_bn == XFS_BUF_DADDR_NULL);
1169 /* For writes allow an alternate strategy routine to precede
1170 * the actual I/O request (which may not be issued at all in
1171 * a shutdown situation, for example).
1173 status = (flags & PBF_WRITE) ?
1174 pagebuf_iostrategy(pb) : pagebuf_iorequest(pb);
1176 /* Wait for I/O if we are not an async request.
1177 * Note: async I/O request completion will release the buffer,
1178 * and that can already be done by this point. So using the
1179 * buffer pointer from here on, after async I/O, is invalid.
1181 if (!status && !(flags & PBF_ASYNC))
1182 status = pagebuf_iowait(pb);
1188 * Helper routine for pagebuf_iorequest
1191 STATIC __inline__ int
1195 ASSERT(pb->pb_flags & (PBF_READ|PBF_WRITE));
1196 if (pb->pb_flags & PBF_READ)
1197 return pb->pb_locked;
1201 STATIC __inline__ void
1206 if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) {
1208 pagebuf_iodone(pb, (pb->pb_flags & PBF_FS_DATAIOD), schedule);
1215 unsigned int bytes_done,
1218 xfs_buf_t *pb = (xfs_buf_t *)bio->bi_private;
1219 unsigned int i, blocksize = pb->pb_target->pbr_bsize;
1220 unsigned int sectorshift = pb->pb_target->pbr_sshift;
1221 struct bio_vec *bvec = bio->bi_io_vec;
1226 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1229 for (i = 0; i < bio->bi_vcnt; i++, bvec++) {
1230 struct page *page = bvec->bv_page;
1234 } else if (blocksize == PAGE_CACHE_SIZE) {
1235 SetPageUptodate(page);
1236 } else if (!PagePrivate(page) &&
1237 (pb->pb_flags & _PBF_PAGE_CACHE)) {
1238 unsigned long j, range;
1240 ASSERT(blocksize < PAGE_CACHE_SIZE);
1241 range = (bvec->bv_offset + bvec->bv_len) >> sectorshift;
1242 for (j = bvec->bv_offset >> sectorshift; j < range; j++)
1243 set_bit(j, &page->private);
1244 if (page->private == (unsigned long)(PAGE_CACHE_SIZE-1))
1245 SetPageUptodate(page);
1248 if (_pagebuf_iolocked(pb)) {
1253 _pagebuf_iodone(pb, 1);
1262 int i, map_i, total_nr_pages, nr_pages;
1264 int offset = pb->pb_offset;
1265 int size = pb->pb_count_desired;
1266 sector_t sector = pb->pb_bn;
1267 unsigned int blocksize = pb->pb_target->pbr_bsize;
1268 int locking = _pagebuf_iolocked(pb);
1270 total_nr_pages = pb->pb_page_count;
1273 /* Special code path for reading a sub page size pagebuf in --
1274 * we populate up the whole page, and hence the other metadata
1275 * in the same page. This optimization is only valid when the
1276 * filesystem block size and the page size are equal.
1278 if ((pb->pb_buffer_length < PAGE_CACHE_SIZE) &&
1279 (pb->pb_flags & PBF_READ) && locking &&
1280 (blocksize == PAGE_CACHE_SIZE)) {
1281 bio = bio_alloc(GFP_NOIO, 1);
1283 bio->bi_bdev = pb->pb_target->pbr_bdev;
1284 bio->bi_sector = sector - (offset >> BBSHIFT);
1285 bio->bi_end_io = bio_end_io_pagebuf;
1286 bio->bi_private = pb;
1288 bio_add_page(bio, pb->pb_pages[0], PAGE_CACHE_SIZE, 0);
1291 atomic_inc(&pb->pb_io_remaining);
1296 /* Lock down the pages which we need to for the request */
1297 if (locking && (pb->pb_flags & PBF_WRITE) && (pb->pb_locked == 0)) {
1298 for (i = 0; size; i++) {
1299 int nbytes = PAGE_CACHE_SIZE - offset;
1300 struct page *page = pb->pb_pages[i];
1310 offset = pb->pb_offset;
1311 size = pb->pb_count_desired;
1315 atomic_inc(&pb->pb_io_remaining);
1316 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1317 if (nr_pages > total_nr_pages)
1318 nr_pages = total_nr_pages;
1320 bio = bio_alloc(GFP_NOIO, nr_pages);
1321 bio->bi_bdev = pb->pb_target->pbr_bdev;
1322 bio->bi_sector = sector;
1323 bio->bi_end_io = bio_end_io_pagebuf;
1324 bio->bi_private = pb;
1326 for (; size && nr_pages; nr_pages--, map_i++) {
1327 int nbytes = PAGE_CACHE_SIZE - offset;
1332 if (bio_add_page(bio, pb->pb_pages[map_i],
1333 nbytes, offset) < nbytes)
1337 sector += nbytes >> BBSHIFT;
1343 if (likely(bio->bi_size)) {
1344 submit_bio((pb->pb_flags & PBF_READ) ? READ : WRITE, bio);
1349 pagebuf_ioerror(pb, EIO);
1352 if (pb->pb_flags & _PBF_RUN_QUEUES) {
1353 pb->pb_flags &= ~_PBF_RUN_QUEUES;
1354 if (atomic_read(&pb->pb_io_remaining) > 1)
1355 blk_run_address_space(pb->pb_target->pbr_mapping);
1360 * pagebuf_iorequest -- the core I/O request routine.
1363 pagebuf_iorequest( /* start real I/O */
1364 xfs_buf_t *pb) /* buffer to convey to device */
1366 PB_TRACE(pb, "iorequest", 0);
1368 if (pb->pb_flags & PBF_DELWRI) {
1369 pagebuf_delwri_queue(pb, 1);
1373 if (pb->pb_flags & PBF_WRITE) {
1374 _pagebuf_wait_unpin(pb);
1379 /* Set the count to 1 initially, this will stop an I/O
1380 * completion callout which happens before we have started
1381 * all the I/O from calling pagebuf_iodone too early.
1383 atomic_set(&pb->pb_io_remaining, 1);
1384 _pagebuf_ioapply(pb);
1385 _pagebuf_iodone(pb, 0);
1394 * pagebuf_iowait waits for I/O to complete on the buffer supplied.
1395 * It returns immediately if no I/O is pending. In any case, it returns
1396 * the error code, if any, or 0 if there is no error.
1402 PB_TRACE(pb, "iowait", 0);
1403 if (atomic_read(&pb->pb_io_remaining))
1404 blk_run_address_space(pb->pb_target->pbr_mapping);
1405 down(&pb->pb_iodonesema);
1406 PB_TRACE(pb, "iowaited", (long)pb->pb_error);
1407 return pb->pb_error;
1417 offset += pb->pb_offset;
1419 page = pb->pb_pages[offset >> PAGE_CACHE_SHIFT];
1420 return (caddr_t) page_address(page) + (offset & (PAGE_CACHE_SIZE - 1));
1426 * Move data into or out of a buffer.
1430 xfs_buf_t *pb, /* buffer to process */
1431 size_t boff, /* starting buffer offset */
1432 size_t bsize, /* length to copy */
1433 caddr_t data, /* data address */
1434 page_buf_rw_t mode) /* read/write flag */
1436 size_t bend, cpoff, csize;
1439 bend = boff + bsize;
1440 while (boff < bend) {
1441 page = pb->pb_pages[page_buf_btoct(boff + pb->pb_offset)];
1442 cpoff = page_buf_poff(boff + pb->pb_offset);
1443 csize = min_t(size_t,
1444 PAGE_CACHE_SIZE-cpoff, pb->pb_count_desired-boff);
1446 ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
1450 memset(page_address(page) + cpoff, 0, csize);
1453 memcpy(data, page_address(page) + cpoff, csize);
1456 memcpy(page_address(page) + cpoff, data, csize);
1465 * Handling of buftargs.
1469 * Wait for any bufs with callbacks that have been submitted but
1470 * have not yet returned... walk the hash list for the target.
1474 xfs_buftarg_t *target)
1480 for (i = 0; i < NHASH; i++) {
1483 spin_lock(&h->pb_hash_lock);
1484 list_for_each_entry_safe(pb, n, &h->pb_hash, pb_hash_list) {
1485 if (pb->pb_target == target &&
1486 !(pb->pb_flags & PBF_FS_MANAGED)) {
1487 spin_unlock(&h->pb_hash_lock);
1492 spin_unlock(&h->pb_hash_lock);
1501 xfs_flush_buftarg(btp, 1);
1503 xfs_blkdev_put(btp->pbr_bdev);
1504 iput(btp->pbr_mapping->host);
1505 kmem_free(btp, sizeof(*btp));
1514 invalidate_bdev(btp->pbr_bdev, 1);
1515 truncate_inode_pages(btp->pbr_mapping, 0LL);
1519 xfs_setsize_buftarg(
1521 unsigned int blocksize,
1522 unsigned int sectorsize)
1524 btp->pbr_bsize = blocksize;
1525 btp->pbr_sshift = ffs(sectorsize) - 1;
1526 btp->pbr_smask = sectorsize - 1;
1528 if (set_blocksize(btp->pbr_bdev, sectorsize)) {
1530 "XFS: Cannot set_blocksize to %u on device %s\n",
1531 sectorsize, XFS_BUFTARG_NAME(btp));
1538 xfs_mapping_buftarg(
1540 struct block_device *bdev)
1542 struct backing_dev_info *bdi;
1543 struct inode *inode;
1544 struct address_space *mapping;
1545 static struct address_space_operations mapping_aops = {
1546 .sync_page = block_sync_page,
1549 inode = new_inode(bdev->bd_inode->i_sb);
1552 "XFS: Cannot allocate mapping inode for device %s\n",
1553 XFS_BUFTARG_NAME(btp));
1556 inode->i_mode = S_IFBLK;
1557 inode->i_bdev = bdev;
1558 inode->i_rdev = bdev->bd_dev;
1559 bdi = blk_get_backing_dev_info(bdev);
1561 bdi = &default_backing_dev_info;
1562 mapping = &inode->i_data;
1563 mapping->a_ops = &mapping_aops;
1564 mapping->backing_dev_info = bdi;
1565 mapping_set_gfp_mask(mapping, GFP_KERNEL);
1566 btp->pbr_mapping = mapping;
1572 struct block_device *bdev)
1576 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1578 btp->pbr_dev = bdev->bd_dev;
1579 btp->pbr_bdev = bdev;
1580 if (xfs_setsize_buftarg(btp, PAGE_CACHE_SIZE, bdev_hardsect_size(bdev)))
1582 if (xfs_mapping_buftarg(btp, bdev))
1587 kmem_free(btp, sizeof(*btp));
1593 * Pagebuf delayed write buffer handling
1596 STATIC LIST_HEAD(pbd_delwrite_queue);
1597 STATIC spinlock_t pbd_delwrite_lock = SPIN_LOCK_UNLOCKED;
1600 pagebuf_delwri_queue(
1604 PB_TRACE(pb, "delwri_q", (long)unlock);
1605 ASSERT(pb->pb_flags & PBF_DELWRI);
1607 spin_lock(&pbd_delwrite_lock);
1608 /* If already in the queue, dequeue and place at tail */
1609 if (!list_empty(&pb->pb_list)) {
1611 atomic_dec(&pb->pb_hold);
1613 list_del(&pb->pb_list);
1616 list_add_tail(&pb->pb_list, &pbd_delwrite_queue);
1617 pb->pb_queuetime = jiffies;
1618 spin_unlock(&pbd_delwrite_lock);
1625 pagebuf_delwri_dequeue(
1630 spin_lock(&pbd_delwrite_lock);
1631 if ((pb->pb_flags & PBF_DELWRI) && !list_empty(&pb->pb_list)) {
1632 list_del_init(&pb->pb_list);
1635 pb->pb_flags &= ~PBF_DELWRI;
1636 spin_unlock(&pbd_delwrite_lock);
1641 PB_TRACE(pb, "delwri_dq", (long)dequeued);
1645 pagebuf_runall_queues(
1646 struct workqueue_struct *queue)
1648 flush_workqueue(queue);
1651 /* Defines for pagebuf daemon */
1652 STATIC DECLARE_COMPLETION(pagebuf_daemon_done);
1653 STATIC struct task_struct *pagebuf_daemon_task;
1654 STATIC int pagebuf_daemon_active;
1655 STATIC int force_flush;
1659 pagebuf_daemon_wakeup(
1665 wake_up_process(pagebuf_daemon_task);
1673 struct list_head tmp;
1675 xfs_buftarg_t *target;
1678 /* Set up the thread */
1679 daemonize("xfsbufd");
1680 current->flags |= PF_MEMALLOC;
1682 pagebuf_daemon_task = current;
1683 pagebuf_daemon_active = 1;
1686 INIT_LIST_HEAD(&tmp);
1689 if (current->flags & PF_FREEZE)
1690 refrigerator(PF_FREEZE);
1692 set_current_state(TASK_INTERRUPTIBLE);
1693 schedule_timeout((xfs_buf_timer_centisecs * HZ) / 100);
1695 age = (xfs_buf_age_centisecs * HZ) / 100;
1696 spin_lock(&pbd_delwrite_lock);
1697 list_for_each_entry_safe(pb, n, &pbd_delwrite_queue, pb_list) {
1698 PB_TRACE(pb, "walkq1", (long)pagebuf_ispin(pb));
1699 ASSERT(pb->pb_flags & PBF_DELWRI);
1701 if (!pagebuf_ispin(pb) && !pagebuf_cond_lock(pb)) {
1703 time_before(jiffies,
1704 pb->pb_queuetime + age)) {
1709 pb->pb_flags &= ~PBF_DELWRI;
1710 pb->pb_flags |= PBF_WRITE;
1711 list_move(&pb->pb_list, &tmp);
1714 spin_unlock(&pbd_delwrite_lock);
1716 while (!list_empty(&tmp)) {
1717 pb = list_entry(tmp.next, xfs_buf_t, pb_list);
1718 target = pb->pb_target;
1720 list_del_init(&pb->pb_list);
1721 pagebuf_iostrategy(pb);
1723 blk_run_address_space(target->pbr_mapping);
1726 if (as_list_len > 0)
1730 } while (pagebuf_daemon_active);
1732 complete_and_exit(&pagebuf_daemon_done, 0);
1736 * Go through all incore buffers, and release buffers if they belong to
1737 * the given device. This is used in filesystem error handling to
1738 * preserve the consistency of its metadata.
1742 xfs_buftarg_t *target,
1745 struct list_head tmp;
1749 pagebuf_runall_queues(pagebuf_dataio_workqueue);
1750 pagebuf_runall_queues(pagebuf_logio_workqueue);
1752 INIT_LIST_HEAD(&tmp);
1753 spin_lock(&pbd_delwrite_lock);
1754 list_for_each_entry_safe(pb, n, &pbd_delwrite_queue, pb_list) {
1756 if (pb->pb_target != target)
1759 ASSERT(pb->pb_flags & PBF_DELWRI);
1760 PB_TRACE(pb, "walkq2", (long)pagebuf_ispin(pb));
1761 if (pagebuf_ispin(pb)) {
1766 pb->pb_flags &= ~PBF_DELWRI;
1767 pb->pb_flags |= PBF_WRITE;
1768 list_move(&pb->pb_list, &tmp);
1770 spin_unlock(&pbd_delwrite_lock);
1773 * Dropped the delayed write list lock, now walk the temporary list
1775 list_for_each_entry_safe(pb, n, &tmp, pb_list) {
1777 pb->pb_flags &= ~PBF_ASYNC;
1779 list_del_init(&pb->pb_list);
1782 pagebuf_iostrategy(pb);
1786 * Remaining list items must be flushed before returning
1788 while (!list_empty(&tmp)) {
1789 pb = list_entry(tmp.next, xfs_buf_t, pb_list);
1791 list_del_init(&pb->pb_list);
1797 blk_run_address_space(target->pbr_mapping);
1803 pagebuf_daemon_start(void)
1807 pagebuf_logio_workqueue = create_workqueue("xfslogd");
1808 if (!pagebuf_logio_workqueue)
1811 pagebuf_dataio_workqueue = create_workqueue("xfsdatad");
1812 if (!pagebuf_dataio_workqueue) {
1813 destroy_workqueue(pagebuf_logio_workqueue);
1817 rval = kernel_thread(pagebuf_daemon, NULL, CLONE_FS|CLONE_FILES);
1819 destroy_workqueue(pagebuf_logio_workqueue);
1820 destroy_workqueue(pagebuf_dataio_workqueue);
1827 * pagebuf_daemon_stop
1829 * Note: do not mark as __exit, it is called from pagebuf_terminate.
1832 pagebuf_daemon_stop(void)
1834 pagebuf_daemon_active = 0;
1836 wait_for_completion(&pagebuf_daemon_done);
1838 destroy_workqueue(pagebuf_logio_workqueue);
1839 destroy_workqueue(pagebuf_dataio_workqueue);
1843 * Initialization and Termination
1851 pagebuf_cache = kmem_cache_create("xfs_buf_t", sizeof(xfs_buf_t), 0,
1852 SLAB_HWCACHE_ALIGN, NULL, NULL);
1853 if (pagebuf_cache == NULL) {
1854 printk("XFS: couldn't init xfs_buf_t cache\n");
1855 pagebuf_terminate();
1859 #ifdef PAGEBUF_TRACE
1860 pagebuf_trace_buf = ktrace_alloc(PAGEBUF_TRACE_SIZE, KM_SLEEP);
1863 pagebuf_daemon_start();
1865 pagebuf_shake = kmem_shake_register(pagebuf_daemon_wakeup);
1866 if (pagebuf_shake == NULL) {
1867 pagebuf_terminate();
1871 for (i = 0; i < NHASH; i++) {
1872 spin_lock_init(&pbhash[i].pb_hash_lock);
1873 INIT_LIST_HEAD(&pbhash[i].pb_hash);
1881 * pagebuf_terminate.
1883 * Note: do not mark as __exit, this is also called from the __init code.
1886 pagebuf_terminate(void)
1888 pagebuf_daemon_stop();
1890 #ifdef PAGEBUF_TRACE
1891 ktrace_free(pagebuf_trace_buf);
1894 kmem_zone_destroy(pagebuf_cache);
1895 kmem_shake_deregister(pagebuf_shake);