vserver 1.9.3
[linux-2.6.git] / fs / xfs / linux-2.6 / xfs_buf.c
1 /*
2  * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it would be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11  *
12  * Further, this software is distributed without any warranty that it is
13  * free of the rightful claim of any third person regarding infringement
14  * or the like.  Any license provided herein, whether implied or
15  * otherwise, applies only to this software file.  Patent licenses, if
16  * any, provided herein do not apply to combinations of this program with
17  * other software, or any other product whatsoever.
18  *
19  * You should have received a copy of the GNU General Public License along
20  * with this program; if not, write the Free Software Foundation, Inc., 59
21  * Temple Place - Suite 330, Boston MA 02111-1307, USA.
22  *
23  * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24  * Mountain View, CA  94043, or:
25  *
26  * http://www.sgi.com
27  *
28  * For further information regarding this notice, see:
29  *
30  * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31  */
32
33 /*
34  *      The xfs_buf.c code provides an abstract buffer cache model on top
35  *      of the Linux page cache.  Cached metadata blocks for a file system
36  *      are hashed to the inode for the block device.  xfs_buf.c assembles
37  *      buffers (xfs_buf_t) on demand to aggregate such cached pages for I/O.
38  *
39  *      Written by Steve Lord, Jim Mostek, Russell Cattelan
40  *                  and Rajagopal Ananthanarayanan ("ananth") at SGI.
41  *
42  */
43
44 #include <linux/stddef.h>
45 #include <linux/errno.h>
46 #include <linux/slab.h>
47 #include <linux/pagemap.h>
48 #include <linux/init.h>
49 #include <linux/vmalloc.h>
50 #include <linux/bio.h>
51 #include <linux/sysctl.h>
52 #include <linux/proc_fs.h>
53 #include <linux/workqueue.h>
54 #include <linux/suspend.h>
55 #include <linux/percpu.h>
56
57 #include "xfs_linux.h"
58
59 #ifndef GFP_READAHEAD
60 #define GFP_READAHEAD   (__GFP_NOWARN|__GFP_NORETRY)
61 #endif
62
63 /*
64  * File wide globals
65  */
66
67 STATIC kmem_cache_t *pagebuf_cache;
68 STATIC kmem_shaker_t pagebuf_shake;
69 STATIC int pagebuf_daemon_wakeup(int, unsigned int);
70 STATIC void pagebuf_delwri_queue(xfs_buf_t *, int);
71 STATIC struct workqueue_struct *pagebuf_logio_workqueue;
72 STATIC struct workqueue_struct *pagebuf_dataio_workqueue;
73
74 /*
75  * Pagebuf debugging
76  */
77
78 #ifdef PAGEBUF_TRACE
79 void
80 pagebuf_trace(
81         xfs_buf_t       *pb,
82         char            *id,
83         void            *data,
84         void            *ra)
85 {
86         ktrace_enter(pagebuf_trace_buf,
87                 pb, id,
88                 (void *)(unsigned long)pb->pb_flags,
89                 (void *)(unsigned long)pb->pb_hold.counter,
90                 (void *)(unsigned long)pb->pb_sema.count.counter,
91                 (void *)current,
92                 data, ra,
93                 (void *)(unsigned long)((pb->pb_file_offset>>32) & 0xffffffff),
94                 (void *)(unsigned long)(pb->pb_file_offset & 0xffffffff),
95                 (void *)(unsigned long)pb->pb_buffer_length,
96                 NULL, NULL, NULL, NULL, NULL);
97 }
98 ktrace_t *pagebuf_trace_buf;
99 #define PAGEBUF_TRACE_SIZE      4096
100 #define PB_TRACE(pb, id, data)  \
101         pagebuf_trace(pb, id, (void *)data, (void *)__builtin_return_address(0))
102 #else
103 #define PB_TRACE(pb, id, data)  do { } while (0)
104 #endif
105
106 #ifdef PAGEBUF_LOCK_TRACKING
107 # define PB_SET_OWNER(pb)       ((pb)->pb_last_holder = current->pid)
108 # define PB_CLEAR_OWNER(pb)     ((pb)->pb_last_holder = -1)
109 # define PB_GET_OWNER(pb)       ((pb)->pb_last_holder)
110 #else
111 # define PB_SET_OWNER(pb)       do { } while (0)
112 # define PB_CLEAR_OWNER(pb)     do { } while (0)
113 # define PB_GET_OWNER(pb)       do { } while (0)
114 #endif
115
116 /*
117  * Pagebuf allocation / freeing.
118  */
119
120 #define pb_to_gfp(flags) \
121         (((flags) & PBF_READ_AHEAD) ? GFP_READAHEAD : \
122          ((flags) & PBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL)
123
124 #define pb_to_km(flags) \
125          (((flags) & PBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
126
127
128 #define pagebuf_allocate(flags) \
129         kmem_zone_alloc(pagebuf_cache, pb_to_km(flags))
130 #define pagebuf_deallocate(pb) \
131         kmem_zone_free(pagebuf_cache, (pb));
132
133 /*
134  * Pagebuf hashing
135  */
136
137 #define NBITS   8
138 #define NHASH   (1<<NBITS)
139
140 typedef struct {
141         struct list_head        pb_hash;
142         spinlock_t              pb_hash_lock;
143 } pb_hash_t;
144
145 STATIC pb_hash_t        pbhash[NHASH];
146 #define pb_hash(pb)     &pbhash[pb->pb_hash_index]
147
148 STATIC int
149 _bhash(
150         struct block_device *bdev,
151         loff_t          base)
152 {
153         int             bit, hval;
154
155         base >>= 9;
156         base ^= (unsigned long)bdev / L1_CACHE_BYTES;
157         for (bit = hval = 0; base && bit < sizeof(base) * 8; bit += NBITS) {
158                 hval ^= (int)base & (NHASH-1);
159                 base >>= NBITS;
160         }
161         return hval;
162 }
163
164 /*
165  * Mapping of multi-page buffers into contiguous virtual space
166  */
167
168 typedef struct a_list {
169         void            *vm_addr;
170         struct a_list   *next;
171 } a_list_t;
172
173 STATIC a_list_t         *as_free_head;
174 STATIC int              as_list_len;
175 STATIC spinlock_t       as_lock = SPIN_LOCK_UNLOCKED;
176
177 /*
178  * Try to batch vunmaps because they are costly.
179  */
180 STATIC void
181 free_address(
182         void            *addr)
183 {
184         a_list_t        *aentry;
185
186         aentry = kmalloc(sizeof(a_list_t), GFP_ATOMIC);
187         if (aentry) {
188                 spin_lock(&as_lock);
189                 aentry->next = as_free_head;
190                 aentry->vm_addr = addr;
191                 as_free_head = aentry;
192                 as_list_len++;
193                 spin_unlock(&as_lock);
194         } else {
195                 vunmap(addr);
196         }
197 }
198
199 STATIC void
200 purge_addresses(void)
201 {
202         a_list_t        *aentry, *old;
203
204         if (as_free_head == NULL)
205                 return;
206
207         spin_lock(&as_lock);
208         aentry = as_free_head;
209         as_free_head = NULL;
210         as_list_len = 0;
211         spin_unlock(&as_lock);
212
213         while ((old = aentry) != NULL) {
214                 vunmap(aentry->vm_addr);
215                 aentry = aentry->next;
216                 kfree(old);
217         }
218 }
219
220 /*
221  *      Internal pagebuf object manipulation
222  */
223
224 STATIC void
225 _pagebuf_initialize(
226         xfs_buf_t               *pb,
227         xfs_buftarg_t           *target,
228         loff_t                  range_base,
229         size_t                  range_length,
230         page_buf_flags_t        flags)
231 {
232         /*
233          * We don't want certain flags to appear in pb->pb_flags.
234          */
235         flags &= ~(PBF_LOCK|PBF_MAPPED|PBF_DONT_BLOCK|PBF_READ_AHEAD);
236
237         memset(pb, 0, sizeof(xfs_buf_t));
238         atomic_set(&pb->pb_hold, 1);
239         init_MUTEX_LOCKED(&pb->pb_iodonesema);
240         INIT_LIST_HEAD(&pb->pb_list);
241         INIT_LIST_HEAD(&pb->pb_hash_list);
242         init_MUTEX_LOCKED(&pb->pb_sema); /* held, no waiters */
243         PB_SET_OWNER(pb);
244         pb->pb_target = target;
245         pb->pb_file_offset = range_base;
246         /*
247          * Set buffer_length and count_desired to the same value initially.
248          * I/O routines should use count_desired, which will be the same in
249          * most cases but may be reset (e.g. XFS recovery).
250          */
251         pb->pb_buffer_length = pb->pb_count_desired = range_length;
252         pb->pb_flags = flags | PBF_NONE;
253         pb->pb_bn = XFS_BUF_DADDR_NULL;
254         atomic_set(&pb->pb_pin_count, 0);
255         init_waitqueue_head(&pb->pb_waiters);
256
257         XFS_STATS_INC(pb_create);
258         PB_TRACE(pb, "initialize", target);
259 }
260
261 /*
262  * Allocate a page array capable of holding a specified number
263  * of pages, and point the page buf at it.
264  */
265 STATIC int
266 _pagebuf_get_pages(
267         xfs_buf_t               *pb,
268         int                     page_count,
269         page_buf_flags_t        flags)
270 {
271         /* Make sure that we have a page list */
272         if (pb->pb_pages == NULL) {
273                 pb->pb_offset = page_buf_poff(pb->pb_file_offset);
274                 pb->pb_page_count = page_count;
275                 if (page_count <= PB_PAGES) {
276                         pb->pb_pages = pb->pb_page_array;
277                 } else {
278                         pb->pb_pages = kmem_alloc(sizeof(struct page *) *
279                                         page_count, pb_to_km(flags));
280                         if (pb->pb_pages == NULL)
281                                 return -ENOMEM;
282                 }
283                 memset(pb->pb_pages, 0, sizeof(struct page *) * page_count);
284         }
285         return 0;
286 }
287
288 /*
289  *      Frees pb_pages if it was malloced.
290  */
291 STATIC void
292 _pagebuf_free_pages(
293         xfs_buf_t       *bp)
294 {
295         if (bp->pb_pages != bp->pb_page_array) {
296                 kmem_free(bp->pb_pages,
297                           bp->pb_page_count * sizeof(struct page *));
298         }
299 }
300
301 /*
302  *      Releases the specified buffer.
303  *
304  *      The modification state of any associated pages is left unchanged.
305  *      The buffer most not be on any hash - use pagebuf_rele instead for
306  *      hashed and refcounted buffers
307  */
308 void
309 pagebuf_free(
310         xfs_buf_t               *bp)
311 {
312         PB_TRACE(bp, "free", 0);
313
314         ASSERT(list_empty(&bp->pb_hash_list));
315
316         if (bp->pb_flags & _PBF_PAGE_CACHE) {
317                 uint            i;
318
319                 if ((bp->pb_flags & PBF_MAPPED) && (bp->pb_page_count > 1))
320                         free_address(bp->pb_addr - bp->pb_offset);
321
322                 for (i = 0; i < bp->pb_page_count; i++)
323                         page_cache_release(bp->pb_pages[i]);
324                 _pagebuf_free_pages(bp);
325         } else if (bp->pb_flags & _PBF_KMEM_ALLOC) {
326                  /*
327                   * XXX(hch): bp->pb_count_desired might be incorrect (see
328                   * pagebuf_associate_memory for details), but fortunately
329                   * the Linux version of kmem_free ignores the len argument..
330                   */
331                 kmem_free(bp->pb_addr, bp->pb_count_desired);
332                 _pagebuf_free_pages(bp);
333         }
334
335         pagebuf_deallocate(bp);
336 }
337
338 /*
339  *      Finds all pages for buffer in question and builds it's page list.
340  */
341 STATIC int
342 _pagebuf_lookup_pages(
343         xfs_buf_t               *bp,
344         uint                    flags)
345 {
346         struct address_space    *mapping = bp->pb_target->pbr_mapping;
347         unsigned int            sectorshift = bp->pb_target->pbr_sshift;
348         size_t                  blocksize = bp->pb_target->pbr_bsize;
349         size_t                  size = bp->pb_count_desired;
350         size_t                  nbytes, offset;
351         int                     gfp_mask = pb_to_gfp(flags);
352         unsigned short          page_count, i;
353         pgoff_t                 first;
354         loff_t                  end;
355         int                     error;
356
357         end = bp->pb_file_offset + bp->pb_buffer_length;
358         page_count = page_buf_btoc(end) - page_buf_btoct(bp->pb_file_offset);
359
360         error = _pagebuf_get_pages(bp, page_count, flags);
361         if (unlikely(error))
362                 return error;
363         bp->pb_flags |= _PBF_PAGE_CACHE;
364
365         offset = bp->pb_offset;
366         first = bp->pb_file_offset >> PAGE_CACHE_SHIFT;
367
368         for (i = 0; i < bp->pb_page_count; i++) {
369                 struct page     *page;
370                 uint            retries = 0;
371
372               retry:
373                 page = find_or_create_page(mapping, first + i, gfp_mask);
374                 if (unlikely(page == NULL)) {
375                         if (flags & PBF_READ_AHEAD) {
376                                 bp->pb_page_count = i;
377                                 for (i = 0; i < bp->pb_page_count; i++)
378                                         unlock_page(bp->pb_pages[i]);
379                                 return -ENOMEM;
380                         }
381
382                         /*
383                          * This could deadlock.
384                          *
385                          * But until all the XFS lowlevel code is revamped to
386                          * handle buffer allocation failures we can't do much.
387                          */
388                         if (!(++retries % 100))
389                                 printk(KERN_ERR
390                                         "possible deadlock in %s (mode:0x%x)\n",
391                                         __FUNCTION__, gfp_mask);
392
393                         XFS_STATS_INC(pb_page_retries);
394                         pagebuf_daemon_wakeup(0, gfp_mask);
395                         set_current_state(TASK_UNINTERRUPTIBLE);
396                         schedule_timeout(10);
397                         goto retry;
398                 }
399
400                 XFS_STATS_INC(pb_page_found);
401
402                 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
403                 size -= nbytes;
404
405                 if (!PageUptodate(page)) {
406                         page_count--;
407                         if (blocksize == PAGE_CACHE_SIZE) {
408                                 if (flags & PBF_READ)
409                                         bp->pb_locked = 1;
410                         } else if (!PagePrivate(page)) {
411                                 unsigned long   j, range;
412
413                                 /*
414                                  * In this case page->private holds a bitmap
415                                  * of uptodate sectors within the page
416                                  */
417                                 ASSERT(blocksize < PAGE_CACHE_SIZE);
418                                 range = (offset + nbytes) >> sectorshift;
419                                 for (j = offset >> sectorshift; j < range; j++)
420                                         if (!test_bit(j, &page->private))
421                                                 break;
422                                 if (j == range)
423                                         page_count++;
424                         }
425                 }
426
427                 bp->pb_pages[i] = page;
428                 offset = 0;
429         }
430
431         if (!bp->pb_locked) {
432                 for (i = 0; i < bp->pb_page_count; i++)
433                         unlock_page(bp->pb_pages[i]);
434         }
435
436         if (page_count) {
437                 /* if we have any uptodate pages, mark that in the buffer */
438                 bp->pb_flags &= ~PBF_NONE;
439
440                 /* if some pages aren't uptodate, mark that in the buffer */
441                 if (page_count != bp->pb_page_count)
442                         bp->pb_flags |= PBF_PARTIAL;
443         }
444
445         PB_TRACE(bp, "lookup_pages", (long)page_count);
446         return error;
447 }
448
449 /*
450  *      Map buffer into kernel address-space if nessecary.
451  */
452 STATIC int
453 _pagebuf_map_pages(
454         xfs_buf_t               *bp,
455         uint                    flags)
456 {
457         /* A single page buffer is always mappable */
458         if (bp->pb_page_count == 1) {
459                 bp->pb_addr = page_address(bp->pb_pages[0]) + bp->pb_offset;
460                 bp->pb_flags |= PBF_MAPPED;
461         } else if (flags & PBF_MAPPED) {
462                 if (as_list_len > 64)
463                         purge_addresses();
464                 bp->pb_addr = vmap(bp->pb_pages, bp->pb_page_count,
465                                 VM_MAP, PAGE_KERNEL);
466                 if (unlikely(bp->pb_addr == NULL))
467                         return -ENOMEM;
468                 bp->pb_addr += bp->pb_offset;
469                 bp->pb_flags |= PBF_MAPPED;
470         }
471
472         return 0;
473 }
474
475 /*
476  *      Finding and Reading Buffers
477  */
478
479 /*
480  *      _pagebuf_find
481  *
482  *      Looks up, and creates if absent, a lockable buffer for
483  *      a given range of an inode.  The buffer is returned
484  *      locked.  If other overlapping buffers exist, they are
485  *      released before the new buffer is created and locked,
486  *      which may imply that this call will block until those buffers
487  *      are unlocked.  No I/O is implied by this call.
488  */
489 STATIC xfs_buf_t *
490 _pagebuf_find(                          /* find buffer for block        */
491         xfs_buftarg_t           *target,/* target for block             */
492         loff_t                  ioff,   /* starting offset of range     */
493         size_t                  isize,  /* length of range              */
494         page_buf_flags_t        flags,  /* PBF_TRYLOCK                  */
495         xfs_buf_t               *new_pb)/* newly allocated buffer       */
496 {
497         loff_t                  range_base;
498         size_t                  range_length;
499         int                     hval;
500         pb_hash_t               *h;
501         xfs_buf_t               *pb, *n;
502         int                     not_locked;
503
504         range_base = (ioff << BBSHIFT);
505         range_length = (isize << BBSHIFT);
506
507         /* Ensure we never do IOs smaller than the sector size */
508         BUG_ON(range_length < (1 << target->pbr_sshift));
509
510         /* Ensure we never do IOs that are not sector aligned */
511         BUG_ON(range_base & (loff_t)target->pbr_smask);
512
513         hval = _bhash(target->pbr_bdev, range_base);
514         h = &pbhash[hval];
515
516         spin_lock(&h->pb_hash_lock);
517         list_for_each_entry_safe(pb, n, &h->pb_hash, pb_hash_list) {
518                 if (pb->pb_target == target &&
519                     pb->pb_file_offset == range_base &&
520                     pb->pb_buffer_length == range_length) {
521                         /* If we look at something bring it to the
522                          * front of the list for next time
523                          */
524                         atomic_inc(&pb->pb_hold);
525                         list_move(&pb->pb_hash_list, &h->pb_hash);
526                         goto found;
527                 }
528         }
529
530         /* No match found */
531         if (new_pb) {
532                 _pagebuf_initialize(new_pb, target, range_base,
533                                 range_length, flags);
534                 new_pb->pb_hash_index = hval;
535                 list_add(&new_pb->pb_hash_list, &h->pb_hash);
536         } else {
537                 XFS_STATS_INC(pb_miss_locked);
538         }
539
540         spin_unlock(&h->pb_hash_lock);
541         return (new_pb);
542
543 found:
544         spin_unlock(&h->pb_hash_lock);
545
546         /* Attempt to get the semaphore without sleeping,
547          * if this does not work then we need to drop the
548          * spinlock and do a hard attempt on the semaphore.
549          */
550         not_locked = down_trylock(&pb->pb_sema);
551         if (not_locked) {
552                 if (!(flags & PBF_TRYLOCK)) {
553                         /* wait for buffer ownership */
554                         PB_TRACE(pb, "get_lock", 0);
555                         pagebuf_lock(pb);
556                         XFS_STATS_INC(pb_get_locked_waited);
557                 } else {
558                         /* We asked for a trylock and failed, no need
559                          * to look at file offset and length here, we
560                          * know that this pagebuf at least overlaps our
561                          * pagebuf and is locked, therefore our buffer
562                          * either does not exist, or is this buffer
563                          */
564
565                         pagebuf_rele(pb);
566                         XFS_STATS_INC(pb_busy_locked);
567                         return (NULL);
568                 }
569         } else {
570                 /* trylock worked */
571                 PB_SET_OWNER(pb);
572         }
573
574         if (pb->pb_flags & PBF_STALE)
575                 pb->pb_flags &= PBF_MAPPED;
576         PB_TRACE(pb, "got_lock", 0);
577         XFS_STATS_INC(pb_get_locked);
578         return (pb);
579 }
580
581
582 /*
583  *      pagebuf_find
584  *
585  *      pagebuf_find returns a buffer matching the specified range of
586  *      data for the specified target, if any of the relevant blocks
587  *      are in memory.  The buffer may have unallocated holes, if
588  *      some, but not all, of the blocks are in memory.  Even where
589  *      pages are present in the buffer, not all of every page may be
590  *      valid.
591  */
592 xfs_buf_t *
593 pagebuf_find(                           /* find buffer for block        */
594                                         /* if the block is in memory    */
595         xfs_buftarg_t           *target,/* target for block             */
596         loff_t                  ioff,   /* starting offset of range     */
597         size_t                  isize,  /* length of range              */
598         page_buf_flags_t        flags)  /* PBF_TRYLOCK                  */
599 {
600         return _pagebuf_find(target, ioff, isize, flags, NULL);
601 }
602
603 /*
604  *      pagebuf_get
605  *
606  *      pagebuf_get assembles a buffer covering the specified range.
607  *      Some or all of the blocks in the range may be valid.  Storage
608  *      in memory for all portions of the buffer will be allocated,
609  *      although backing storage may not be.  If PBF_READ is set in
610  *      flags, pagebuf_iostart is called also.
611  */
612 xfs_buf_t *
613 pagebuf_get(                            /* allocate a buffer            */
614         xfs_buftarg_t           *target,/* target for buffer            */
615         loff_t                  ioff,   /* starting offset of range     */
616         size_t                  isize,  /* length of range              */
617         page_buf_flags_t        flags)  /* PBF_TRYLOCK                  */
618 {
619         xfs_buf_t               *pb, *new_pb;
620         int                     error = 0, i;
621
622         new_pb = pagebuf_allocate(flags);
623         if (unlikely(!new_pb))
624                 return NULL;
625
626         pb = _pagebuf_find(target, ioff, isize, flags, new_pb);
627         if (pb == new_pb) {
628                 error = _pagebuf_lookup_pages(pb, flags);
629                 if (error)
630                         goto no_buffer;
631         } else {
632                 pagebuf_deallocate(new_pb);
633                 if (unlikely(pb == NULL))
634                         return NULL;
635         }
636
637         for (i = 0; i < pb->pb_page_count; i++)
638                 mark_page_accessed(pb->pb_pages[i]);
639
640         if (!(pb->pb_flags & PBF_MAPPED)) {
641                 error = _pagebuf_map_pages(pb, flags);
642                 if (unlikely(error)) {
643                         printk(KERN_WARNING
644                                "pagebuf_get: failed to map pages\n");
645                         goto no_buffer;
646                 }
647         }
648
649         XFS_STATS_INC(pb_get);
650
651         /*
652          * Always fill in the block number now, the mapped cases can do
653          * their own overlay of this later.
654          */
655         pb->pb_bn = ioff;
656         pb->pb_count_desired = pb->pb_buffer_length;
657
658         if (flags & PBF_READ) {
659                 if (PBF_NOT_DONE(pb)) {
660                         PB_TRACE(pb, "get_read", (unsigned long)flags);
661                         XFS_STATS_INC(pb_get_read);
662                         pagebuf_iostart(pb, flags);
663                 } else if (flags & PBF_ASYNC) {
664                         PB_TRACE(pb, "get_read_async", (unsigned long)flags);
665                         /*
666                          * Read ahead call which is already satisfied,
667                          * drop the buffer
668                          */
669                         goto no_buffer;
670                 } else {
671                         PB_TRACE(pb, "get_read_done", (unsigned long)flags);
672                         /* We do not want read in the flags */
673                         pb->pb_flags &= ~PBF_READ;
674                 }
675         } else {
676                 PB_TRACE(pb, "get_write", (unsigned long)flags);
677         }
678
679         return pb;
680
681 no_buffer:
682         if (flags & (PBF_LOCK | PBF_TRYLOCK))
683                 pagebuf_unlock(pb);
684         pagebuf_rele(pb);
685         return NULL;
686 }
687
688 /*
689  * Create a skeletal pagebuf (no pages associated with it).
690  */
691 xfs_buf_t *
692 pagebuf_lookup(
693         xfs_buftarg_t           *target,
694         loff_t                  ioff,
695         size_t                  isize,
696         page_buf_flags_t        flags)
697 {
698         xfs_buf_t               *pb;
699
700         pb = pagebuf_allocate(flags);
701         if (pb) {
702                 _pagebuf_initialize(pb, target, ioff, isize, flags);
703         }
704         return pb;
705 }
706
707 /*
708  * If we are not low on memory then do the readahead in a deadlock
709  * safe manner.
710  */
711 void
712 pagebuf_readahead(
713         xfs_buftarg_t           *target,
714         loff_t                  ioff,
715         size_t                  isize,
716         page_buf_flags_t        flags)
717 {
718         struct backing_dev_info *bdi;
719
720         bdi = target->pbr_mapping->backing_dev_info;
721         if (bdi_read_congested(bdi))
722                 return;
723         if (bdi_write_congested(bdi))
724                 return;
725
726         flags |= (PBF_TRYLOCK|PBF_READ|PBF_ASYNC|PBF_READ_AHEAD);
727         pagebuf_get(target, ioff, isize, flags);
728 }
729
730 xfs_buf_t *
731 pagebuf_get_empty(
732         size_t                  len,
733         xfs_buftarg_t           *target)
734 {
735         xfs_buf_t               *pb;
736
737         pb = pagebuf_allocate(0);
738         if (pb)
739                 _pagebuf_initialize(pb, target, 0, len, 0);
740         return pb;
741 }
742
743 static inline struct page *
744 mem_to_page(
745         void                    *addr)
746 {
747         if (((unsigned long)addr < VMALLOC_START) ||
748             ((unsigned long)addr >= VMALLOC_END)) {
749                 return virt_to_page(addr);
750         } else {
751                 return vmalloc_to_page(addr);
752         }
753 }
754
755 int
756 pagebuf_associate_memory(
757         xfs_buf_t               *pb,
758         void                    *mem,
759         size_t                  len)
760 {
761         int                     rval;
762         int                     i = 0;
763         size_t                  ptr;
764         size_t                  end, end_cur;
765         off_t                   offset;
766         int                     page_count;
767
768         page_count = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT;
769         offset = (off_t) mem - ((off_t)mem & PAGE_CACHE_MASK);
770         if (offset && (len > PAGE_CACHE_SIZE))
771                 page_count++;
772
773         /* Free any previous set of page pointers */
774         if (pb->pb_pages)
775                 _pagebuf_free_pages(pb);
776
777         pb->pb_pages = NULL;
778         pb->pb_addr = mem;
779
780         rval = _pagebuf_get_pages(pb, page_count, 0);
781         if (rval)
782                 return rval;
783
784         pb->pb_offset = offset;
785         ptr = (size_t) mem & PAGE_CACHE_MASK;
786         end = PAGE_CACHE_ALIGN((size_t) mem + len);
787         end_cur = end;
788         /* set up first page */
789         pb->pb_pages[0] = mem_to_page(mem);
790
791         ptr += PAGE_CACHE_SIZE;
792         pb->pb_page_count = ++i;
793         while (ptr < end) {
794                 pb->pb_pages[i] = mem_to_page((void *)ptr);
795                 pb->pb_page_count = ++i;
796                 ptr += PAGE_CACHE_SIZE;
797         }
798         pb->pb_locked = 0;
799
800         pb->pb_count_desired = pb->pb_buffer_length = len;
801         pb->pb_flags |= PBF_MAPPED;
802
803         return 0;
804 }
805
806 xfs_buf_t *
807 pagebuf_get_no_daddr(
808         size_t                  len,
809         xfs_buftarg_t           *target)
810 {
811         size_t                  malloc_len = len;
812         xfs_buf_t               *bp;
813         void                    *data;
814         int                     error;
815
816         bp = pagebuf_allocate(0);
817         if (unlikely(bp == NULL))
818                 goto fail;
819         _pagebuf_initialize(bp, target, 0, len, PBF_FORCEIO);
820
821  try_again:
822         data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL);
823         if (unlikely(data == NULL))
824                 goto fail_free_buf;
825
826         /* check whether alignment matches.. */
827         if ((__psunsigned_t)data !=
828             ((__psunsigned_t)data & ~target->pbr_smask)) {
829                 /* .. else double the size and try again */
830                 kmem_free(data, malloc_len);
831                 malloc_len <<= 1;
832                 goto try_again;
833         }
834
835         error = pagebuf_associate_memory(bp, data, len);
836         if (error)
837                 goto fail_free_mem;
838         bp->pb_flags |= _PBF_KMEM_ALLOC;
839
840         pagebuf_unlock(bp);
841
842         PB_TRACE(bp, "no_daddr", data);
843         return bp;
844  fail_free_mem:
845         kmem_free(data, malloc_len);
846  fail_free_buf:
847         pagebuf_free(bp);
848  fail:
849         return NULL;
850 }
851
852 /*
853  *      pagebuf_hold
854  *
855  *      Increment reference count on buffer, to hold the buffer concurrently
856  *      with another thread which may release (free) the buffer asynchronously.
857  *
858  *      Must hold the buffer already to call this function.
859  */
860 void
861 pagebuf_hold(
862         xfs_buf_t               *pb)
863 {
864         atomic_inc(&pb->pb_hold);
865         PB_TRACE(pb, "hold", 0);
866 }
867
868 /*
869  *      pagebuf_rele
870  *
871  *      pagebuf_rele releases a hold on the specified buffer.  If the
872  *      the hold count is 1, pagebuf_rele calls pagebuf_free.
873  */
874 void
875 pagebuf_rele(
876         xfs_buf_t               *pb)
877 {
878         pb_hash_t               *hash = pb_hash(pb);
879
880         PB_TRACE(pb, "rele", pb->pb_relse);
881
882         if (atomic_dec_and_lock(&pb->pb_hold, &hash->pb_hash_lock)) {
883                 int             do_free = 1;
884
885                 if (pb->pb_relse) {
886                         atomic_inc(&pb->pb_hold);
887                         spin_unlock(&hash->pb_hash_lock);
888                         (*(pb->pb_relse)) (pb);
889                         spin_lock(&hash->pb_hash_lock);
890                         do_free = 0;
891                 }
892
893                 if (pb->pb_flags & PBF_DELWRI) {
894                         pb->pb_flags |= PBF_ASYNC;
895                         atomic_inc(&pb->pb_hold);
896                         pagebuf_delwri_queue(pb, 0);
897                         do_free = 0;
898                 } else if (pb->pb_flags & PBF_FS_MANAGED) {
899                         do_free = 0;
900                 }
901
902                 if (do_free) {
903                         list_del_init(&pb->pb_hash_list);
904                         spin_unlock(&hash->pb_hash_lock);
905                         pagebuf_free(pb);
906                 } else {
907                         spin_unlock(&hash->pb_hash_lock);
908                 }
909         }
910 }
911
912
913 /*
914  *      Mutual exclusion on buffers.  Locking model:
915  *
916  *      Buffers associated with inodes for which buffer locking
917  *      is not enabled are not protected by semaphores, and are
918  *      assumed to be exclusively owned by the caller.  There is a
919  *      spinlock in the buffer, used by the caller when concurrent
920  *      access is possible.
921  */
922
923 /*
924  *      pagebuf_cond_lock
925  *
926  *      pagebuf_cond_lock locks a buffer object, if it is not already locked.
927  *      Note that this in no way
928  *      locks the underlying pages, so it is only useful for synchronizing
929  *      concurrent use of page buffer objects, not for synchronizing independent
930  *      access to the underlying pages.
931  */
932 int
933 pagebuf_cond_lock(                      /* lock buffer, if not locked   */
934                                         /* returns -EBUSY if locked)    */
935         xfs_buf_t               *pb)
936 {
937         int                     locked;
938
939         locked = down_trylock(&pb->pb_sema) == 0;
940         if (locked) {
941                 PB_SET_OWNER(pb);
942         }
943         PB_TRACE(pb, "cond_lock", (long)locked);
944         return(locked ? 0 : -EBUSY);
945 }
946
947 /*
948  *      pagebuf_lock_value
949  *
950  *      Return lock value for a pagebuf
951  */
952 int
953 pagebuf_lock_value(
954         xfs_buf_t               *pb)
955 {
956         return(atomic_read(&pb->pb_sema.count));
957 }
958
959 /*
960  *      pagebuf_lock
961  *
962  *      pagebuf_lock locks a buffer object.  Note that this in no way
963  *      locks the underlying pages, so it is only useful for synchronizing
964  *      concurrent use of page buffer objects, not for synchronizing independent
965  *      access to the underlying pages.
966  */
967 int
968 pagebuf_lock(
969         xfs_buf_t               *pb)
970 {
971         PB_TRACE(pb, "lock", 0);
972         if (atomic_read(&pb->pb_io_remaining))
973                 blk_run_address_space(pb->pb_target->pbr_mapping);
974         down(&pb->pb_sema);
975         PB_SET_OWNER(pb);
976         PB_TRACE(pb, "locked", 0);
977         return 0;
978 }
979
980 /*
981  *      pagebuf_unlock
982  *
983  *      pagebuf_unlock releases the lock on the buffer object created by
984  *      pagebuf_lock or pagebuf_cond_lock (not any
985  *      pinning of underlying pages created by pagebuf_pin).
986  */
987 void
988 pagebuf_unlock(                         /* unlock buffer                */
989         xfs_buf_t               *pb)    /* buffer to unlock             */
990 {
991         PB_CLEAR_OWNER(pb);
992         up(&pb->pb_sema);
993         PB_TRACE(pb, "unlock", 0);
994 }
995
996
997 /*
998  *      Pinning Buffer Storage in Memory
999  */
1000
1001 /*
1002  *      pagebuf_pin
1003  *
1004  *      pagebuf_pin locks all of the memory represented by a buffer in
1005  *      memory.  Multiple calls to pagebuf_pin and pagebuf_unpin, for
1006  *      the same or different buffers affecting a given page, will
1007  *      properly count the number of outstanding "pin" requests.  The
1008  *      buffer may be released after the pagebuf_pin and a different
1009  *      buffer used when calling pagebuf_unpin, if desired.
1010  *      pagebuf_pin should be used by the file system when it wants be
1011  *      assured that no attempt will be made to force the affected
1012  *      memory to disk.  It does not assure that a given logical page
1013  *      will not be moved to a different physical page.
1014  */
1015 void
1016 pagebuf_pin(
1017         xfs_buf_t               *pb)
1018 {
1019         atomic_inc(&pb->pb_pin_count);
1020         PB_TRACE(pb, "pin", (long)pb->pb_pin_count.counter);
1021 }
1022
1023 /*
1024  *      pagebuf_unpin
1025  *
1026  *      pagebuf_unpin reverses the locking of memory performed by
1027  *      pagebuf_pin.  Note that both functions affected the logical
1028  *      pages associated with the buffer, not the buffer itself.
1029  */
1030 void
1031 pagebuf_unpin(
1032         xfs_buf_t               *pb)
1033 {
1034         if (atomic_dec_and_test(&pb->pb_pin_count)) {
1035                 wake_up_all(&pb->pb_waiters);
1036         }
1037         PB_TRACE(pb, "unpin", (long)pb->pb_pin_count.counter);
1038 }
1039
1040 int
1041 pagebuf_ispin(
1042         xfs_buf_t               *pb)
1043 {
1044         return atomic_read(&pb->pb_pin_count);
1045 }
1046
1047 /*
1048  *      pagebuf_wait_unpin
1049  *
1050  *      pagebuf_wait_unpin waits until all of the memory associated
1051  *      with the buffer is not longer locked in memory.  It returns
1052  *      immediately if none of the affected pages are locked.
1053  */
1054 static inline void
1055 _pagebuf_wait_unpin(
1056         xfs_buf_t               *pb)
1057 {
1058         DECLARE_WAITQUEUE       (wait, current);
1059
1060         if (atomic_read(&pb->pb_pin_count) == 0)
1061                 return;
1062
1063         add_wait_queue(&pb->pb_waiters, &wait);
1064         for (;;) {
1065                 set_current_state(TASK_UNINTERRUPTIBLE);
1066                 if (atomic_read(&pb->pb_pin_count) == 0)
1067                         break;
1068                 if (atomic_read(&pb->pb_io_remaining))
1069                         blk_run_address_space(pb->pb_target->pbr_mapping);
1070                 schedule();
1071         }
1072         remove_wait_queue(&pb->pb_waiters, &wait);
1073         set_current_state(TASK_RUNNING);
1074 }
1075
1076 /*
1077  *      Buffer Utility Routines
1078  */
1079
1080 /*
1081  *      pagebuf_iodone
1082  *
1083  *      pagebuf_iodone marks a buffer for which I/O is in progress
1084  *      done with respect to that I/O.  The pb_iodone routine, if
1085  *      present, will be called as a side-effect.
1086  */
1087 void
1088 pagebuf_iodone_work(
1089         void                    *v)
1090 {
1091         xfs_buf_t               *bp = (xfs_buf_t *)v;
1092
1093         if (bp->pb_iodone)
1094                 (*(bp->pb_iodone))(bp);
1095         else if (bp->pb_flags & PBF_ASYNC)
1096                 xfs_buf_relse(bp);
1097 }
1098
1099 void
1100 pagebuf_iodone(
1101         xfs_buf_t               *pb,
1102         int                     dataio,
1103         int                     schedule)
1104 {
1105         pb->pb_flags &= ~(PBF_READ | PBF_WRITE);
1106         if (pb->pb_error == 0) {
1107                 pb->pb_flags &= ~(PBF_PARTIAL | PBF_NONE);
1108         }
1109
1110         PB_TRACE(pb, "iodone", pb->pb_iodone);
1111
1112         if ((pb->pb_iodone) || (pb->pb_flags & PBF_ASYNC)) {
1113                 if (schedule) {
1114                         INIT_WORK(&pb->pb_iodone_work, pagebuf_iodone_work, pb);
1115                         queue_work(dataio ? pagebuf_dataio_workqueue :
1116                                 pagebuf_logio_workqueue, &pb->pb_iodone_work);
1117                 } else {
1118                         pagebuf_iodone_work(pb);
1119                 }
1120         } else {
1121                 up(&pb->pb_iodonesema);
1122         }
1123 }
1124
1125 /*
1126  *      pagebuf_ioerror
1127  *
1128  *      pagebuf_ioerror sets the error code for a buffer.
1129  */
1130 void
1131 pagebuf_ioerror(                        /* mark/clear buffer error flag */
1132         xfs_buf_t               *pb,    /* buffer to mark               */
1133         int                     error)  /* error to store (0 if none)   */
1134 {
1135         ASSERT(error >= 0 && error <= 0xffff);
1136         pb->pb_error = (unsigned short)error;
1137         PB_TRACE(pb, "ioerror", (unsigned long)error);
1138 }
1139
1140 /*
1141  *      pagebuf_iostart
1142  *
1143  *      pagebuf_iostart initiates I/O on a buffer, based on the flags supplied.
1144  *      If necessary, it will arrange for any disk space allocation required,
1145  *      and it will break up the request if the block mappings require it.
1146  *      The pb_iodone routine in the buffer supplied will only be called
1147  *      when all of the subsidiary I/O requests, if any, have been completed.
1148  *      pagebuf_iostart calls the pagebuf_ioinitiate routine or
1149  *      pagebuf_iorequest, if the former routine is not defined, to start
1150  *      the I/O on a given low-level request.
1151  */
1152 int
1153 pagebuf_iostart(                        /* start I/O on a buffer          */
1154         xfs_buf_t               *pb,    /* buffer to start                */
1155         page_buf_flags_t        flags)  /* PBF_LOCK, PBF_ASYNC, PBF_READ, */
1156                                         /* PBF_WRITE, PBF_DELWRI,         */
1157                                         /* PBF_DONT_BLOCK                 */
1158 {
1159         int                     status = 0;
1160
1161         PB_TRACE(pb, "iostart", (unsigned long)flags);
1162
1163         if (flags & PBF_DELWRI) {
1164                 pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC);
1165                 pb->pb_flags |= flags & (PBF_DELWRI | PBF_ASYNC);
1166                 pagebuf_delwri_queue(pb, 1);
1167                 return status;
1168         }
1169
1170         pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC | PBF_DELWRI | \
1171                         PBF_READ_AHEAD | _PBF_RUN_QUEUES);
1172         pb->pb_flags |= flags & (PBF_READ | PBF_WRITE | PBF_ASYNC | \
1173                         PBF_READ_AHEAD | _PBF_RUN_QUEUES);
1174
1175         BUG_ON(pb->pb_bn == XFS_BUF_DADDR_NULL);
1176
1177         /* For writes allow an alternate strategy routine to precede
1178          * the actual I/O request (which may not be issued at all in
1179          * a shutdown situation, for example).
1180          */
1181         status = (flags & PBF_WRITE) ?
1182                 pagebuf_iostrategy(pb) : pagebuf_iorequest(pb);
1183
1184         /* Wait for I/O if we are not an async request.
1185          * Note: async I/O request completion will release the buffer,
1186          * and that can already be done by this point.  So using the
1187          * buffer pointer from here on, after async I/O, is invalid.
1188          */
1189         if (!status && !(flags & PBF_ASYNC))
1190                 status = pagebuf_iowait(pb);
1191
1192         return status;
1193 }
1194
1195 /*
1196  * Helper routine for pagebuf_iorequest
1197  */
1198
1199 STATIC __inline__ int
1200 _pagebuf_iolocked(
1201         xfs_buf_t               *pb)
1202 {
1203         ASSERT(pb->pb_flags & (PBF_READ|PBF_WRITE));
1204         if (pb->pb_flags & PBF_READ)
1205                 return pb->pb_locked;
1206         return 0;
1207 }
1208
1209 STATIC __inline__ void
1210 _pagebuf_iodone(
1211         xfs_buf_t               *pb,
1212         int                     schedule)
1213 {
1214         if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) {
1215                 pb->pb_locked = 0;
1216                 pagebuf_iodone(pb, (pb->pb_flags & PBF_FS_DATAIOD), schedule);
1217         }
1218 }
1219
1220 STATIC int
1221 bio_end_io_pagebuf(
1222         struct bio              *bio,
1223         unsigned int            bytes_done,
1224         int                     error)
1225 {
1226         xfs_buf_t               *pb = (xfs_buf_t *)bio->bi_private;
1227         unsigned int            i, blocksize = pb->pb_target->pbr_bsize;
1228         unsigned int            sectorshift = pb->pb_target->pbr_sshift;
1229         struct bio_vec          *bvec = bio->bi_io_vec;
1230
1231         if (bio->bi_size)
1232                 return 1;
1233
1234         if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1235                 pb->pb_error = EIO;
1236
1237         for (i = 0; i < bio->bi_vcnt; i++, bvec++) {
1238                 struct page     *page = bvec->bv_page;
1239
1240                 if (pb->pb_error) {
1241                         SetPageError(page);
1242                 } else if (blocksize == PAGE_CACHE_SIZE) {
1243                         SetPageUptodate(page);
1244                 } else if (!PagePrivate(page) &&
1245                                 (pb->pb_flags & _PBF_PAGE_CACHE)) {
1246                         unsigned long   j, range;
1247
1248                         ASSERT(blocksize < PAGE_CACHE_SIZE);
1249                         range = (bvec->bv_offset + bvec->bv_len) >> sectorshift;
1250                         for (j = bvec->bv_offset >> sectorshift; j < range; j++)
1251                                 set_bit(j, &page->private);
1252                         if (page->private == (unsigned long)(PAGE_CACHE_SIZE-1))
1253                                 SetPageUptodate(page);
1254                 }
1255
1256                 if (_pagebuf_iolocked(pb)) {
1257                         unlock_page(page);
1258                 }
1259         }
1260
1261         _pagebuf_iodone(pb, 1);
1262         bio_put(bio);
1263         return 0;
1264 }
1265
1266 void
1267 _pagebuf_ioapply(
1268         xfs_buf_t               *pb)
1269 {
1270         int                     i, map_i, total_nr_pages, nr_pages;
1271         struct bio              *bio;
1272         int                     offset = pb->pb_offset;
1273         int                     size = pb->pb_count_desired;
1274         sector_t                sector = pb->pb_bn;
1275         unsigned int            blocksize = pb->pb_target->pbr_bsize;
1276         int                     locking = _pagebuf_iolocked(pb);
1277
1278         total_nr_pages = pb->pb_page_count;
1279         map_i = 0;
1280
1281         /* Special code path for reading a sub page size pagebuf in --
1282          * we populate up the whole page, and hence the other metadata
1283          * in the same page.  This optimization is only valid when the
1284          * filesystem block size and the page size are equal.
1285          */
1286         if ((pb->pb_buffer_length < PAGE_CACHE_SIZE) &&
1287             (pb->pb_flags & PBF_READ) && locking &&
1288             (blocksize == PAGE_CACHE_SIZE)) {
1289                 bio = bio_alloc(GFP_NOIO, 1);
1290
1291                 bio->bi_bdev = pb->pb_target->pbr_bdev;
1292                 bio->bi_sector = sector - (offset >> BBSHIFT);
1293                 bio->bi_end_io = bio_end_io_pagebuf;
1294                 bio->bi_private = pb;
1295
1296                 bio_add_page(bio, pb->pb_pages[0], PAGE_CACHE_SIZE, 0);
1297                 size = 0;
1298
1299                 atomic_inc(&pb->pb_io_remaining);
1300
1301                 goto submit_io;
1302         }
1303
1304         /* Lock down the pages which we need to for the request */
1305         if (locking && (pb->pb_flags & PBF_WRITE) && (pb->pb_locked == 0)) {
1306                 for (i = 0; size; i++) {
1307                         int             nbytes = PAGE_CACHE_SIZE - offset;
1308                         struct page     *page = pb->pb_pages[i];
1309
1310                         if (nbytes > size)
1311                                 nbytes = size;
1312
1313                         lock_page(page);
1314
1315                         size -= nbytes;
1316                         offset = 0;
1317                 }
1318                 offset = pb->pb_offset;
1319                 size = pb->pb_count_desired;
1320         }
1321
1322 next_chunk:
1323         atomic_inc(&pb->pb_io_remaining);
1324         nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1325         if (nr_pages > total_nr_pages)
1326                 nr_pages = total_nr_pages;
1327
1328         bio = bio_alloc(GFP_NOIO, nr_pages);
1329         bio->bi_bdev = pb->pb_target->pbr_bdev;
1330         bio->bi_sector = sector;
1331         bio->bi_end_io = bio_end_io_pagebuf;
1332         bio->bi_private = pb;
1333
1334         for (; size && nr_pages; nr_pages--, map_i++) {
1335                 int     nbytes = PAGE_CACHE_SIZE - offset;
1336
1337                 if (nbytes > size)
1338                         nbytes = size;
1339
1340                 if (bio_add_page(bio, pb->pb_pages[map_i],
1341                                         nbytes, offset) < nbytes)
1342                         break;
1343
1344                 offset = 0;
1345                 sector += nbytes >> BBSHIFT;
1346                 size -= nbytes;
1347                 total_nr_pages--;
1348         }
1349
1350 submit_io:
1351         if (likely(bio->bi_size)) {
1352                 submit_bio((pb->pb_flags & PBF_READ) ? READ : WRITE, bio);
1353                 if (size)
1354                         goto next_chunk;
1355         } else {
1356                 bio_put(bio);
1357                 pagebuf_ioerror(pb, EIO);
1358         }
1359
1360         if (pb->pb_flags & _PBF_RUN_QUEUES) {
1361                 pb->pb_flags &= ~_PBF_RUN_QUEUES;
1362                 if (atomic_read(&pb->pb_io_remaining) > 1)
1363                         blk_run_address_space(pb->pb_target->pbr_mapping);
1364         }
1365 }
1366
1367 /*
1368  *      pagebuf_iorequest -- the core I/O request routine.
1369  */
1370 int
1371 pagebuf_iorequest(                      /* start real I/O               */
1372         xfs_buf_t               *pb)    /* buffer to convey to device   */
1373 {
1374         PB_TRACE(pb, "iorequest", 0);
1375
1376         if (pb->pb_flags & PBF_DELWRI) {
1377                 pagebuf_delwri_queue(pb, 1);
1378                 return 0;
1379         }
1380
1381         if (pb->pb_flags & PBF_WRITE) {
1382                 _pagebuf_wait_unpin(pb);
1383         }
1384
1385         pagebuf_hold(pb);
1386
1387         /* Set the count to 1 initially, this will stop an I/O
1388          * completion callout which happens before we have started
1389          * all the I/O from calling pagebuf_iodone too early.
1390          */
1391         atomic_set(&pb->pb_io_remaining, 1);
1392         _pagebuf_ioapply(pb);
1393         _pagebuf_iodone(pb, 0);
1394
1395         pagebuf_rele(pb);
1396         return 0;
1397 }
1398
1399 /*
1400  *      pagebuf_iowait
1401  *
1402  *      pagebuf_iowait waits for I/O to complete on the buffer supplied.
1403  *      It returns immediately if no I/O is pending.  In any case, it returns
1404  *      the error code, if any, or 0 if there is no error.
1405  */
1406 int
1407 pagebuf_iowait(
1408         xfs_buf_t               *pb)
1409 {
1410         PB_TRACE(pb, "iowait", 0);
1411         if (atomic_read(&pb->pb_io_remaining))
1412                 blk_run_address_space(pb->pb_target->pbr_mapping);
1413         down(&pb->pb_iodonesema);
1414         PB_TRACE(pb, "iowaited", (long)pb->pb_error);
1415         return pb->pb_error;
1416 }
1417
1418 caddr_t
1419 pagebuf_offset(
1420         xfs_buf_t               *pb,
1421         size_t                  offset)
1422 {
1423         struct page             *page;
1424
1425         offset += pb->pb_offset;
1426
1427         page = pb->pb_pages[offset >> PAGE_CACHE_SHIFT];
1428         return (caddr_t) page_address(page) + (offset & (PAGE_CACHE_SIZE - 1));
1429 }
1430
1431 /*
1432  *      pagebuf_iomove
1433  *
1434  *      Move data into or out of a buffer.
1435  */
1436 void
1437 pagebuf_iomove(
1438         xfs_buf_t               *pb,    /* buffer to process            */
1439         size_t                  boff,   /* starting buffer offset       */
1440         size_t                  bsize,  /* length to copy               */
1441         caddr_t                 data,   /* data address                 */
1442         page_buf_rw_t           mode)   /* read/write flag              */
1443 {
1444         size_t                  bend, cpoff, csize;
1445         struct page             *page;
1446
1447         bend = boff + bsize;
1448         while (boff < bend) {
1449                 page = pb->pb_pages[page_buf_btoct(boff + pb->pb_offset)];
1450                 cpoff = page_buf_poff(boff + pb->pb_offset);
1451                 csize = min_t(size_t,
1452                               PAGE_CACHE_SIZE-cpoff, pb->pb_count_desired-boff);
1453
1454                 ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
1455
1456                 switch (mode) {
1457                 case PBRW_ZERO:
1458                         memset(page_address(page) + cpoff, 0, csize);
1459                         break;
1460                 case PBRW_READ:
1461                         memcpy(data, page_address(page) + cpoff, csize);
1462                         break;
1463                 case PBRW_WRITE:
1464                         memcpy(page_address(page) + cpoff, data, csize);
1465                 }
1466
1467                 boff += csize;
1468                 data += csize;
1469         }
1470 }
1471
1472 /*
1473  *      Handling of buftargs.
1474  */
1475
1476 void
1477 xfs_free_buftarg(
1478         xfs_buftarg_t           *btp,
1479         int                     external)
1480 {
1481         xfs_flush_buftarg(btp, 1);
1482         if (external)
1483                 xfs_blkdev_put(btp->pbr_bdev);
1484         iput(btp->pbr_mapping->host);
1485         kmem_free(btp, sizeof(*btp));
1486 }
1487
1488 void
1489 xfs_incore_relse(
1490         xfs_buftarg_t           *btp,
1491         int                     delwri_only,
1492         int                     wait)
1493 {
1494         invalidate_bdev(btp->pbr_bdev, 1);
1495         truncate_inode_pages(btp->pbr_mapping, 0LL);
1496 }
1497
1498 int
1499 xfs_setsize_buftarg(
1500         xfs_buftarg_t           *btp,
1501         unsigned int            blocksize,
1502         unsigned int            sectorsize)
1503 {
1504         btp->pbr_bsize = blocksize;
1505         btp->pbr_sshift = ffs(sectorsize) - 1;
1506         btp->pbr_smask = sectorsize - 1;
1507
1508         if (set_blocksize(btp->pbr_bdev, sectorsize)) {
1509                 printk(KERN_WARNING
1510                         "XFS: Cannot set_blocksize to %u on device %s\n",
1511                         sectorsize, XFS_BUFTARG_NAME(btp));
1512                 return EINVAL;
1513         }
1514         return 0;
1515 }
1516
1517 STATIC int
1518 xfs_mapping_buftarg(
1519         xfs_buftarg_t           *btp,
1520         struct block_device     *bdev)
1521 {
1522         struct backing_dev_info *bdi;
1523         struct inode            *inode;
1524         struct address_space    *mapping;
1525         static struct address_space_operations mapping_aops = {
1526                 .sync_page = block_sync_page,
1527         };
1528
1529         inode = new_inode(bdev->bd_inode->i_sb);
1530         if (!inode) {
1531                 printk(KERN_WARNING
1532                         "XFS: Cannot allocate mapping inode for device %s\n",
1533                         XFS_BUFTARG_NAME(btp));
1534                 return ENOMEM;
1535         }
1536         inode->i_mode = S_IFBLK;
1537         inode->i_bdev = bdev;
1538         inode->i_rdev = bdev->bd_dev;
1539         bdi = blk_get_backing_dev_info(bdev);
1540         if (!bdi)
1541                 bdi = &default_backing_dev_info;
1542         mapping = &inode->i_data;
1543         mapping->a_ops = &mapping_aops;
1544         mapping->backing_dev_info = bdi;
1545         mapping_set_gfp_mask(mapping, GFP_KERNEL);
1546         btp->pbr_mapping = mapping;
1547         return 0;
1548 }
1549
1550 xfs_buftarg_t *
1551 xfs_alloc_buftarg(
1552         struct block_device     *bdev)
1553 {
1554         xfs_buftarg_t           *btp;
1555
1556         btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1557
1558         btp->pbr_dev =  bdev->bd_dev;
1559         btp->pbr_bdev = bdev;
1560         if (xfs_setsize_buftarg(btp, PAGE_CACHE_SIZE, bdev_hardsect_size(bdev)))
1561                 goto error;
1562         if (xfs_mapping_buftarg(btp, bdev))
1563                 goto error;
1564         return btp;
1565
1566 error:
1567         kmem_free(btp, sizeof(*btp));
1568         return NULL;
1569 }
1570
1571
1572 /*
1573  * Pagebuf delayed write buffer handling
1574  */
1575
1576 STATIC LIST_HEAD(pbd_delwrite_queue);
1577 STATIC spinlock_t pbd_delwrite_lock = SPIN_LOCK_UNLOCKED;
1578
1579 STATIC void
1580 pagebuf_delwri_queue(
1581         xfs_buf_t               *pb,
1582         int                     unlock)
1583 {
1584         PB_TRACE(pb, "delwri_q", (long)unlock);
1585         ASSERT(pb->pb_flags & PBF_DELWRI);
1586
1587         spin_lock(&pbd_delwrite_lock);
1588         /* If already in the queue, dequeue and place at tail */
1589         if (!list_empty(&pb->pb_list)) {
1590                 if (unlock) {
1591                         atomic_dec(&pb->pb_hold);
1592                 }
1593                 list_del(&pb->pb_list);
1594         }
1595
1596         list_add_tail(&pb->pb_list, &pbd_delwrite_queue);
1597         pb->pb_queuetime = jiffies;
1598         spin_unlock(&pbd_delwrite_lock);
1599
1600         if (unlock)
1601                 pagebuf_unlock(pb);
1602 }
1603
1604 void
1605 pagebuf_delwri_dequeue(
1606         xfs_buf_t               *pb)
1607 {
1608         int                     dequeued = 0;
1609
1610         spin_lock(&pbd_delwrite_lock);
1611         if ((pb->pb_flags & PBF_DELWRI) && !list_empty(&pb->pb_list)) {
1612                 list_del_init(&pb->pb_list);
1613                 dequeued = 1;
1614         }
1615         pb->pb_flags &= ~PBF_DELWRI;
1616         spin_unlock(&pbd_delwrite_lock);
1617
1618         if (dequeued)
1619                 pagebuf_rele(pb);
1620
1621         PB_TRACE(pb, "delwri_dq", (long)dequeued);
1622 }
1623
1624 STATIC void
1625 pagebuf_runall_queues(
1626         struct workqueue_struct *queue)
1627 {
1628         flush_workqueue(queue);
1629 }
1630
1631 /* Defines for pagebuf daemon */
1632 STATIC DECLARE_COMPLETION(pagebuf_daemon_done);
1633 STATIC struct task_struct *pagebuf_daemon_task;
1634 STATIC int pagebuf_daemon_active;
1635 STATIC int force_flush;
1636
1637
1638 STATIC int
1639 pagebuf_daemon_wakeup(
1640         int                     priority,
1641         unsigned int            mask)
1642 {
1643         force_flush = 1;
1644         barrier();
1645         wake_up_process(pagebuf_daemon_task);
1646         return 0;
1647 }
1648
1649 STATIC int
1650 pagebuf_daemon(
1651         void                    *data)
1652 {
1653         struct list_head        tmp;
1654         unsigned long           age;
1655         xfs_buftarg_t           *target;
1656         xfs_buf_t               *pb, *n;
1657
1658         /*  Set up the thread  */
1659         daemonize("xfsbufd");
1660         current->flags |= PF_MEMALLOC;
1661
1662         pagebuf_daemon_task = current;
1663         pagebuf_daemon_active = 1;
1664         barrier();
1665
1666         INIT_LIST_HEAD(&tmp);
1667         do {
1668                 /* swsusp */
1669                 if (current->flags & PF_FREEZE)
1670                         refrigerator(PF_FREEZE);
1671
1672                 set_current_state(TASK_INTERRUPTIBLE);
1673                 schedule_timeout((xfs_buf_timer_centisecs * HZ) / 100);
1674
1675                 age = (xfs_buf_age_centisecs * HZ) / 100;
1676                 spin_lock(&pbd_delwrite_lock);
1677                 list_for_each_entry_safe(pb, n, &pbd_delwrite_queue, pb_list) {
1678                         PB_TRACE(pb, "walkq1", (long)pagebuf_ispin(pb));
1679                         ASSERT(pb->pb_flags & PBF_DELWRI);
1680
1681                         if (!pagebuf_ispin(pb) && !pagebuf_cond_lock(pb)) {
1682                                 if (!force_flush &&
1683                                     time_before(jiffies,
1684                                                 pb->pb_queuetime + age)) {
1685                                         pagebuf_unlock(pb);
1686                                         break;
1687                                 }
1688
1689                                 pb->pb_flags &= ~PBF_DELWRI;
1690                                 pb->pb_flags |= PBF_WRITE;
1691                                 list_move(&pb->pb_list, &tmp);
1692                         }
1693                 }
1694                 spin_unlock(&pbd_delwrite_lock);
1695
1696                 while (!list_empty(&tmp)) {
1697                         pb = list_entry(tmp.next, xfs_buf_t, pb_list);
1698                         target = pb->pb_target;
1699
1700                         list_del_init(&pb->pb_list);
1701                         pagebuf_iostrategy(pb);
1702
1703                         blk_run_address_space(target->pbr_mapping);
1704                 }
1705
1706                 if (as_list_len > 0)
1707                         purge_addresses();
1708
1709                 force_flush = 0;
1710         } while (pagebuf_daemon_active);
1711
1712         complete_and_exit(&pagebuf_daemon_done, 0);
1713 }
1714
1715 /*
1716  * Go through all incore buffers, and release buffers if they belong to
1717  * the given device. This is used in filesystem error handling to
1718  * preserve the consistency of its metadata.
1719  */
1720 int
1721 xfs_flush_buftarg(
1722         xfs_buftarg_t           *target,
1723         int                     wait)
1724 {
1725         struct list_head        tmp;
1726         xfs_buf_t               *pb, *n;
1727         int                     pincount = 0;
1728
1729         pagebuf_runall_queues(pagebuf_dataio_workqueue);
1730         pagebuf_runall_queues(pagebuf_logio_workqueue);
1731
1732         INIT_LIST_HEAD(&tmp);
1733         spin_lock(&pbd_delwrite_lock);
1734         list_for_each_entry_safe(pb, n, &pbd_delwrite_queue, pb_list) {
1735
1736                 if (pb->pb_target != target)
1737                         continue;
1738
1739                 ASSERT(pb->pb_flags & PBF_DELWRI);
1740                 PB_TRACE(pb, "walkq2", (long)pagebuf_ispin(pb));
1741                 if (pagebuf_ispin(pb)) {
1742                         pincount++;
1743                         continue;
1744                 }
1745
1746                 pb->pb_flags &= ~PBF_DELWRI;
1747                 pb->pb_flags |= PBF_WRITE;
1748                 list_move(&pb->pb_list, &tmp);
1749         }
1750         spin_unlock(&pbd_delwrite_lock);
1751
1752         /*
1753          * Dropped the delayed write list lock, now walk the temporary list
1754          */
1755         list_for_each_entry_safe(pb, n, &tmp, pb_list) {
1756                 if (wait)
1757                         pb->pb_flags &= ~PBF_ASYNC;
1758                 else
1759                         list_del_init(&pb->pb_list);
1760
1761                 pagebuf_lock(pb);
1762                 pagebuf_iostrategy(pb);
1763         }
1764
1765         /*
1766          * Remaining list items must be flushed before returning
1767          */
1768         while (!list_empty(&tmp)) {
1769                 pb = list_entry(tmp.next, xfs_buf_t, pb_list);
1770
1771                 list_del_init(&pb->pb_list);
1772                 xfs_iowait(pb);
1773                 xfs_buf_relse(pb);
1774         }
1775
1776         if (wait)
1777                 blk_run_address_space(target->pbr_mapping);
1778
1779         return pincount;
1780 }
1781
1782 STATIC int
1783 pagebuf_daemon_start(void)
1784 {
1785         int             rval;
1786
1787         pagebuf_logio_workqueue = create_workqueue("xfslogd");
1788         if (!pagebuf_logio_workqueue)
1789                 return -ENOMEM;
1790
1791         pagebuf_dataio_workqueue = create_workqueue("xfsdatad");
1792         if (!pagebuf_dataio_workqueue) {
1793                 destroy_workqueue(pagebuf_logio_workqueue);
1794                 return -ENOMEM;
1795         }
1796
1797         rval = kernel_thread(pagebuf_daemon, NULL, CLONE_FS|CLONE_FILES);
1798         if (rval < 0) {
1799                 destroy_workqueue(pagebuf_logio_workqueue);
1800                 destroy_workqueue(pagebuf_dataio_workqueue);
1801         }
1802
1803         return rval;
1804 }
1805
1806 /*
1807  * pagebuf_daemon_stop
1808  *
1809  * Note: do not mark as __exit, it is called from pagebuf_terminate.
1810  */
1811 STATIC void
1812 pagebuf_daemon_stop(void)
1813 {
1814         pagebuf_daemon_active = 0;
1815         barrier();
1816         wait_for_completion(&pagebuf_daemon_done);
1817
1818         destroy_workqueue(pagebuf_logio_workqueue);
1819         destroy_workqueue(pagebuf_dataio_workqueue);
1820 }
1821
1822 /*
1823  *      Initialization and Termination
1824  */
1825
1826 int __init
1827 pagebuf_init(void)
1828 {
1829         int                     i;
1830
1831         pagebuf_cache = kmem_cache_create("xfs_buf_t", sizeof(xfs_buf_t), 0,
1832                         SLAB_HWCACHE_ALIGN, NULL, NULL);
1833         if (pagebuf_cache == NULL) {
1834                 printk("XFS: couldn't init xfs_buf_t cache\n");
1835                 pagebuf_terminate();
1836                 return -ENOMEM;
1837         }
1838
1839 #ifdef PAGEBUF_TRACE
1840         pagebuf_trace_buf = ktrace_alloc(PAGEBUF_TRACE_SIZE, KM_SLEEP);
1841 #endif
1842
1843         pagebuf_daemon_start();
1844
1845         pagebuf_shake = kmem_shake_register(pagebuf_daemon_wakeup);
1846         if (pagebuf_shake == NULL) {
1847                 pagebuf_terminate();
1848                 return -ENOMEM;
1849         }
1850
1851         for (i = 0; i < NHASH; i++) {
1852                 spin_lock_init(&pbhash[i].pb_hash_lock);
1853                 INIT_LIST_HEAD(&pbhash[i].pb_hash);
1854         }
1855
1856         return 0;
1857 }
1858
1859
1860 /*
1861  *      pagebuf_terminate.
1862  *
1863  *      Note: do not mark as __exit, this is also called from the __init code.
1864  */
1865 void
1866 pagebuf_terminate(void)
1867 {
1868         pagebuf_daemon_stop();
1869
1870 #ifdef PAGEBUF_TRACE
1871         ktrace_free(pagebuf_trace_buf);
1872 #endif
1873
1874         kmem_zone_destroy(pagebuf_cache);
1875         kmem_shake_deregister(pagebuf_shake);
1876 }