X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=fs%2Fnfs%2Fpagelist.c;h=ca4b1d4ff42b0f7d337ad01fa7c1675f959c4134;hb=refs%2Fheads%2Fvserver;hp=c82fc852840ead607eb1ac6d2ee404bd1bfbbaf4;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index c82fc8528..ca4b1d4ff 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -9,7 +9,6 @@ * */ -#include #include #include #include @@ -18,25 +17,20 @@ #include #include #include +#include #define NFS_PARANOIA 1 -/* - * Spinlock - */ -spinlock_t nfs_wreq_lock = SPIN_LOCK_UNLOCKED; - -static kmem_cache_t *nfs_page_cachep; +static struct kmem_cache *nfs_page_cachep; static inline struct nfs_page * nfs_page_alloc(void) { struct nfs_page *p; - p = kmem_cache_alloc(nfs_page_cachep, SLAB_KERNEL); + p = kmem_cache_alloc(nfs_page_cachep, GFP_KERNEL); if (p) { memset(p, 0, sizeof(*p)); INIT_LIST_HEAD(&p->wb_list); - init_waitqueue_head(&p->wb_wait); } return p; } @@ -62,7 +56,7 @@ nfs_page_free(struct nfs_page *p) * User should ensure it is safe to sleep in this function. */ struct nfs_page * -nfs_create_request(struct file *file, struct inode *inode, +nfs_create_request(struct nfs_open_context *ctx, struct inode *inode, struct page *page, unsigned int offset, unsigned int count) { @@ -91,38 +85,76 @@ nfs_create_request(struct file *file, struct inode *inode, atomic_set(&req->wb_complete, 0); req->wb_index = page->index; page_cache_get(page); + BUG_ON(PagePrivate(page)); + BUG_ON(!PageLocked(page)); + BUG_ON(page->mapping->host != inode); req->wb_offset = offset; req->wb_pgbase = offset; req->wb_bytes = count; - req->wb_inode = inode; - req->wb_count = 1; - server->rpc_ops->request_init(req, file); + atomic_set(&req->wb_count, 1); + req->wb_context = get_nfs_open_context(ctx); return req; } +/** + * nfs_unlock_request - Unlock request and wake up sleepers. + * @req: + */ +void nfs_unlock_request(struct nfs_page *req) +{ + if (!NFS_WBACK_BUSY(req)) { + printk(KERN_ERR "NFS: Invalid unlock attempted\n"); + BUG(); + } + smp_mb__before_clear_bit(); + clear_bit(PG_BUSY, &req->wb_flags); + smp_mb__after_clear_bit(); + wake_up_bit(&req->wb_flags, PG_BUSY); + nfs_release_request(req); +} + +/** + * nfs_set_page_writeback_locked - Lock a request for writeback + * @req: + */ +int nfs_set_page_writeback_locked(struct nfs_page *req) +{ + struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode); + + if (!nfs_lock_request(req)) + return 0; + radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK); + return 1; +} + +/** + * nfs_clear_page_writeback - Unlock request and wake up sleepers + */ +void nfs_clear_page_writeback(struct nfs_page *req) +{ + struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode); + + if (req->wb_page != NULL) { + spin_lock(&nfsi->req_lock); + radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK); + spin_unlock(&nfsi->req_lock); + } + nfs_unlock_request(req); +} + /** * nfs_clear_request - Free up all resources allocated to the request * @req: * - * Release all resources associated with a write request after it + * Release page resources associated with a write request after it * has completed. */ void nfs_clear_request(struct nfs_page *req) { - if (req->wb_state) - req->wb_state = NULL; - /* Release struct file or cached credential */ - if (req->wb_file) { - fput(req->wb_file); - req->wb_file = NULL; - } - if (req->wb_cred) { - put_rpccred(req->wb_cred); - req->wb_cred = NULL; - } - if (req->wb_page) { - page_cache_release(req->wb_page); + struct page *page = req->wb_page; + if (page != NULL) { + page_cache_release(page); req->wb_page = NULL; } } @@ -137,12 +169,8 @@ void nfs_clear_request(struct nfs_page *req) void nfs_release_request(struct nfs_page *req) { - spin_lock(&nfs_wreq_lock); - if (--req->wb_count) { - spin_unlock(&nfs_wreq_lock); + if (!atomic_dec_and_test(&req->wb_count)) return; - } - spin_unlock(&nfs_wreq_lock); #ifdef NFS_PARANOIA BUG_ON (!list_empty(&req->wb_list)); @@ -151,37 +179,19 @@ nfs_release_request(struct nfs_page *req) /* Release struct file or cached credential */ nfs_clear_request(req); + put_nfs_open_context(req->wb_context); nfs_page_free(req); } -/** - * nfs_list_add_request - Insert a request into a sorted list - * @req: request - * @head: head of list into which to insert the request. - * - * Note that the wb_list is sorted by page index in order to facilitate - * coalescing of requests. - * We use an insertion sort that is optimized for the case of appended - * writes. - */ -void -nfs_list_add_request(struct nfs_page *req, struct list_head *head) +static int nfs_wait_bit_interruptible(void *word) { - struct list_head *pos; + int ret = 0; -#ifdef NFS_PARANOIA - if (!list_empty(&req->wb_list)) { - printk(KERN_ERR "NFS: Add to list failed!\n"); - BUG(); - } -#endif - list_for_each_prev(pos, head) { - struct nfs_page *p = nfs_list_entry(pos); - if (p->wb_index < req->wb_index) - break; - } - list_add(&req->wb_list, pos); - req->wb_list_head = head; + if (signal_pending(current)) + ret = -ERESTARTSYS; + else + schedule(); + return ret; } /** @@ -194,12 +204,22 @@ nfs_list_add_request(struct nfs_page *req, struct list_head *head) int nfs_wait_on_request(struct nfs_page *req) { - struct inode *inode = req->wb_inode; - struct rpc_clnt *clnt = NFS_CLIENT(inode); - - if (!NFS_WBACK_BUSY(req)) - return 0; - return nfs_wait_event(clnt, req->wb_wait, !NFS_WBACK_BUSY(req)); + struct rpc_clnt *clnt = NFS_CLIENT(req->wb_context->dentry->d_inode); + sigset_t oldmask; + int ret = 0; + + if (!test_bit(PG_BUSY, &req->wb_flags)) + goto out; + /* + * Note: the call to rpc_clnt_sigmask() suffices to ensure that we + * are not interrupted if intr flag is not set + */ + rpc_clnt_sigmask(clnt, &oldmask); + ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY, + nfs_wait_bit_interruptible, TASK_INTERRUPTIBLE); + rpc_clnt_sigunmask(clnt, &oldmask); +out: + return ret; } /** @@ -224,7 +244,11 @@ nfs_coalesce_requests(struct list_head *head, struct list_head *dst, req = nfs_list_entry(head->next); if (prev) { - if (req->wb_cred != prev->wb_cred) + if (req->wb_context->cred != prev->wb_context->cred) + break; + if (req->wb_context->lockowner != prev->wb_context->lockowner) + break; + if (req->wb_context->state != prev->wb_context->state) break; if (req->wb_index != (prev->wb_index + 1)) break; @@ -243,8 +267,82 @@ nfs_coalesce_requests(struct list_head *head, struct list_head *dst, return npages; } +#define NFS_SCAN_MAXENTRIES 16 +/** + * nfs_scan_dirty - Scan the radix tree for dirty requests + * @mapping: pointer to address space + * @wbc: writeback_control structure + * @dst: Destination list + * + * Moves elements from one of the inode request lists. + * If the number of requests is set to 0, the entire address_space + * starting at index idx_start, is scanned. + * The requests are *not* checked to ensure that they form a contiguous set. + * You must be holding the inode's req_lock when calling this function + */ +long nfs_scan_dirty(struct address_space *mapping, + struct writeback_control *wbc, + struct list_head *dst) +{ + struct nfs_inode *nfsi = NFS_I(mapping->host); + struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES]; + struct nfs_page *req; + pgoff_t idx_start, idx_end; + long res = 0; + int found, i; + + if (nfsi->ndirty == 0) + return 0; + if (wbc->range_cyclic) { + idx_start = 0; + idx_end = ULONG_MAX; + } else if (wbc->range_end == 0) { + idx_start = wbc->range_start >> PAGE_CACHE_SHIFT; + idx_end = ULONG_MAX; + } else { + idx_start = wbc->range_start >> PAGE_CACHE_SHIFT; + idx_end = wbc->range_end >> PAGE_CACHE_SHIFT; + } + + for (;;) { + unsigned int toscan = NFS_SCAN_MAXENTRIES; + + found = radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, + (void **)&pgvec[0], idx_start, toscan, + NFS_PAGE_TAG_DIRTY); + + /* Did we make progress? */ + if (found <= 0) + break; + + for (i = 0; i < found; i++) { + req = pgvec[i]; + if (!wbc->range_cyclic && req->wb_index > idx_end) + goto out; + + /* Try to lock request and mark it for writeback */ + if (!nfs_set_page_writeback_locked(req)) + goto next; + radix_tree_tag_clear(&nfsi->nfs_page_tree, + req->wb_index, NFS_PAGE_TAG_DIRTY); + nfsi->ndirty--; + nfs_list_remove_request(req); + nfs_list_add_request(req, dst); + res++; + if (res == LONG_MAX) + goto out; +next: + idx_start = req->wb_index + 1; + } + } +out: + WARN_ON ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty)); + return res; +} + /** * nfs_scan_list - Scan a list for matching requests + * @nfsi: NFS inode * @head: One of the NFS inode request lists * @dst: Destination list * @idx_start: lower bound of page->index to scan @@ -254,16 +352,17 @@ nfs_coalesce_requests(struct list_head *head, struct list_head *dst, * If the number of requests is set to 0, the entire address_space * starting at index idx_start, is scanned. * The requests are *not* checked to ensure that they form a contiguous set. - * You must be holding the nfs_wreq_lock when calling this function + * You must be holding the inode's req_lock when calling this function */ -int -nfs_scan_list(struct list_head *head, struct list_head *dst, - unsigned long idx_start, unsigned int npages) +int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *head, + struct list_head *dst, unsigned long idx_start, + unsigned int npages) { - struct list_head *pos, *tmp; - struct nfs_page *req; - unsigned long idx_end; - int res; + struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES]; + struct nfs_page *req; + unsigned long idx_end; + int found, i; + int res; res = 0; if (npages == 0) @@ -271,25 +370,32 @@ nfs_scan_list(struct list_head *head, struct list_head *dst, else idx_end = idx_start + npages - 1; - list_for_each_safe(pos, tmp, head) { - - req = nfs_list_entry(pos); - - if (req->wb_index < idx_start) - continue; - if (req->wb_index > idx_end) + for (;;) { + found = radix_tree_gang_lookup(&nfsi->nfs_page_tree, + (void **)&pgvec[0], idx_start, + NFS_SCAN_MAXENTRIES); + if (found <= 0) break; + for (i = 0; i < found; i++) { + req = pgvec[i]; + if (req->wb_index > idx_end) + goto out; + idx_start = req->wb_index + 1; + if (req->wb_list_head != head) + continue; + if (nfs_set_page_writeback_locked(req)) { + nfs_list_remove_request(req); + nfs_list_add_request(req, dst); + res++; + } + } - if (!nfs_lock_request(req)) - continue; - nfs_list_remove_request(req); - nfs_list_add_request(req, dst); - res++; } +out: return res; } -int nfs_init_nfspagecache(void) +int __init nfs_init_nfspagecache(void) { nfs_page_cachep = kmem_cache_create("nfs_page", sizeof(struct nfs_page), @@ -303,7 +409,6 @@ int nfs_init_nfspagecache(void) void nfs_destroy_nfspagecache(void) { - if (kmem_cache_destroy(nfs_page_cachep)) - printk(KERN_INFO "nfs_page: not all structures were freed\n"); + kmem_cache_destroy(nfs_page_cachep); }