X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=net%2Fsunrpc%2Fcache.c;h=14274490f92e8d9b521b5396320c5e76178adb5e;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=dcaa0c4453ff4b0a7eb532a5676bfe3d27349e5c;hpb=76828883507a47dae78837ab5dec5a5b4513c667;p=linux-2.6.git diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index dcaa0c445..14274490f 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -33,19 +34,146 @@ #define RPCDBG_FACILITY RPCDBG_CACHE -static void cache_defer_req(struct cache_req *req, struct cache_head *item); +static int cache_defer_req(struct cache_req *req, struct cache_head *item); static void cache_revisit_request(struct cache_head *item); -void cache_init(struct cache_head *h) +static void cache_init(struct cache_head *h) { time_t now = get_seconds(); h->next = NULL; h->flags = 0; - atomic_set(&h->refcnt, 1); + kref_init(&h->ref); h->expiry_time = now + CACHE_NEW_EXPIRY; h->last_refresh = now; } +struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail, + struct cache_head *key, int hash) +{ + struct cache_head **head, **hp; + struct cache_head *new = NULL; + + head = &detail->hash_table[hash]; + + read_lock(&detail->hash_lock); + + for (hp=head; *hp != NULL ; hp = &(*hp)->next) { + struct cache_head *tmp = *hp; + if (detail->match(tmp, key)) { + cache_get(tmp); + read_unlock(&detail->hash_lock); + return tmp; + } + } + read_unlock(&detail->hash_lock); + /* Didn't find anything, insert an empty entry */ + + new = detail->alloc(); + if (!new) + return NULL; + /* must fully initialise 'new', else + * we might get lose if we need to + * cache_put it soon. + */ + cache_init(new); + detail->init(new, key); + + write_lock(&detail->hash_lock); + + /* check if entry appeared while we slept */ + for (hp=head; *hp != NULL ; hp = &(*hp)->next) { + struct cache_head *tmp = *hp; + if (detail->match(tmp, key)) { + cache_get(tmp); + write_unlock(&detail->hash_lock); + cache_put(new, detail); + return tmp; + } + } + new->next = *head; + *head = new; + detail->entries++; + cache_get(new); + write_unlock(&detail->hash_lock); + + return new; +} +EXPORT_SYMBOL(sunrpc_cache_lookup); + + +static void queue_loose(struct cache_detail *detail, struct cache_head *ch); + +static int cache_fresh_locked(struct cache_head *head, time_t expiry) +{ + head->expiry_time = expiry; + head->last_refresh = get_seconds(); + return !test_and_set_bit(CACHE_VALID, &head->flags); +} + +static void cache_fresh_unlocked(struct cache_head *head, + struct cache_detail *detail, int new) +{ + if (new) + cache_revisit_request(head); + if (test_and_clear_bit(CACHE_PENDING, &head->flags)) { + cache_revisit_request(head); + queue_loose(detail, head); + } +} + +struct cache_head *sunrpc_cache_update(struct cache_detail *detail, + struct cache_head *new, struct cache_head *old, int hash) +{ + /* The 'old' entry is to be replaced by 'new'. + * If 'old' is not VALID, we update it directly, + * otherwise we need to replace it + */ + struct cache_head **head; + struct cache_head *tmp; + int is_new; + + if (!test_bit(CACHE_VALID, &old->flags)) { + write_lock(&detail->hash_lock); + if (!test_bit(CACHE_VALID, &old->flags)) { + if (test_bit(CACHE_NEGATIVE, &new->flags)) + set_bit(CACHE_NEGATIVE, &old->flags); + else + detail->update(old, new); + is_new = cache_fresh_locked(old, new->expiry_time); + write_unlock(&detail->hash_lock); + cache_fresh_unlocked(old, detail, is_new); + return old; + } + write_unlock(&detail->hash_lock); + } + /* We need to insert a new entry */ + tmp = detail->alloc(); + if (!tmp) { + cache_put(old, detail); + return NULL; + } + cache_init(tmp); + detail->init(tmp, old); + head = &detail->hash_table[hash]; + + write_lock(&detail->hash_lock); + if (test_bit(CACHE_NEGATIVE, &new->flags)) + set_bit(CACHE_NEGATIVE, &tmp->flags); + else + detail->update(tmp, new); + tmp->next = *head; + *head = tmp; + detail->entries++; + cache_get(tmp); + is_new = cache_fresh_locked(tmp, new->expiry_time); + cache_fresh_locked(old, 0); + write_unlock(&detail->hash_lock); + cache_fresh_unlocked(tmp, detail, is_new); + cache_fresh_unlocked(old, detail, 0); + cache_put(old, detail); + return tmp; +} +EXPORT_SYMBOL(sunrpc_cache_update); static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h); /* @@ -57,6 +185,7 @@ static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h); * * Returns 0 if the cache_head can be used, or cache_puts it and returns * -EAGAIN if upcall is pending, + * -ETIMEDOUT if upcall failed and should be retried, * -ENOENT if cache entry was negative */ int cache_check(struct cache_detail *detail, @@ -93,7 +222,8 @@ int cache_check(struct cache_detail *detail, clear_bit(CACHE_PENDING, &h->flags); if (rv == -EAGAIN) { set_bit(CACHE_NEGATIVE, &h->flags); - cache_fresh(detail, h, get_seconds()+CACHE_NEW_EXPIRY); + cache_fresh_unlocked(h, detail, + cache_fresh_locked(h, get_seconds()+CACHE_NEW_EXPIRY)); rv = -ENOENT; } break; @@ -107,27 +237,14 @@ int cache_check(struct cache_detail *detail, } if (rv == -EAGAIN) - cache_defer_req(rqstp, h); + if (cache_defer_req(rqstp, h) != 0) + rv = -ETIMEDOUT; - if (rv && h) - detail->cache_put(h, detail); + if (rv) + cache_put(h, detail); return rv; } -static void queue_loose(struct cache_detail *detail, struct cache_head *ch); - -void cache_fresh(struct cache_detail *detail, - struct cache_head *head, time_t expiry) -{ - - head->expiry_time = expiry; - head->last_refresh = get_seconds(); - if (!test_and_set_bit(CACHE_VALID, &head->flags)) - cache_revisit_request(head); - if (test_and_clear_bit(CACHE_PENDING, &head->flags)) - queue_loose(detail, head); -} - /* * caches need to be periodically cleaned. * For this we maintain a list of cache_detail and @@ -169,8 +286,8 @@ static struct file_operations cache_file_operations; static struct file_operations content_file_operations; static struct file_operations cache_flush_operations; -static void do_cache_clean(void *data); -static DECLARE_WORK(cache_cleaner, do_cache_clean, NULL); +static void do_cache_clean(struct work_struct *work); +static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean); void cache_register(struct cache_detail *cd) { @@ -222,7 +339,7 @@ void cache_register(struct cache_detail *cd) spin_unlock(&cache_list_lock); /* start the cleaning process */ - schedule_work(&cache_cleaner); + schedule_delayed_work(&cache_cleaner, 0); } int cache_unregister(struct cache_detail *cd) @@ -321,7 +438,7 @@ static int cache_clean(void) if (test_and_clear_bit(CACHE_PENDING, &ch->flags)) queue_loose(current_detail, ch); - if (atomic_read(&ch->refcnt) == 1) + if (atomic_read(&ch->ref.refcount) == 1) break; } if (ch) { @@ -336,7 +453,7 @@ static int cache_clean(void) current_index ++; spin_unlock(&cache_list_lock); if (ch) - d->cache_put(ch, d); + cache_put(ch, d); } else spin_unlock(&cache_list_lock); @@ -346,7 +463,7 @@ static int cache_clean(void) /* * We want to regularly clean the cache, so we need to schedule some work ... */ -static void do_cache_clean(void *data) +static void do_cache_clean(struct work_struct *work) { int delay = 5; if (cache_clean() == -1) @@ -408,14 +525,21 @@ static LIST_HEAD(cache_defer_list); static struct list_head cache_defer_hash[DFR_HASHSIZE]; static int cache_defer_cnt; -static void cache_defer_req(struct cache_req *req, struct cache_head *item) +static int cache_defer_req(struct cache_req *req, struct cache_head *item) { struct cache_deferred_req *dreq; int hash = DFR_HASH(item); + if (cache_defer_cnt >= DFR_MAX) { + /* too much in the cache, randomly drop this one, + * or continue and drop the oldest below + */ + if (net_random()&1) + return -ETIMEDOUT; + } dreq = req->defer(req); if (dreq == NULL) - return; + return -ETIMEDOUT; dreq->item = item; dreq->recv_time = get_seconds(); @@ -431,17 +555,8 @@ static void cache_defer_req(struct cache_req *req, struct cache_head *item) /* it is in, now maybe clean up */ dreq = NULL; if (++cache_defer_cnt > DFR_MAX) { - /* too much in the cache, randomly drop - * first or last - */ - if (net_random()&1) - dreq = list_entry(cache_defer_list.next, - struct cache_deferred_req, - recent); - else - dreq = list_entry(cache_defer_list.prev, - struct cache_deferred_req, - recent); + dreq = list_entry(cache_defer_list.prev, + struct cache_deferred_req, recent); list_del(&dreq->recent); list_del(&dreq->hash); cache_defer_cnt--; @@ -452,10 +567,11 @@ static void cache_defer_req(struct cache_req *req, struct cache_head *item) /* there was one too many */ dreq->revisit(dreq, 1); } - if (test_bit(CACHE_VALID, &item->flags)) { + if (!test_bit(CACHE_PENDING, &item->flags)) { /* must have just been validated... */ cache_revisit_request(item); } + return 0; } static void cache_revisit_request(struct cache_head *item) @@ -532,7 +648,7 @@ void cache_clean_deferred(void *owner) */ static DEFINE_SPINLOCK(queue_lock); -static DECLARE_MUTEX(queue_io_sem); +static DEFINE_MUTEX(queue_io_mutex); struct cache_queue { struct list_head list; @@ -555,13 +671,13 @@ cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { struct cache_reader *rp = filp->private_data; struct cache_request *rq; - struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data; + struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data; int err; if (count == 0) return 0; - down(&queue_io_sem); /* protect against multiple concurrent + mutex_lock(&queue_io_mutex); /* protect against multiple concurrent * readers on this file */ again: spin_lock(&queue_lock); @@ -574,7 +690,7 @@ cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) } if (rp->q.list.next == &cd->queue) { spin_unlock(&queue_lock); - up(&queue_io_sem); + mutex_unlock(&queue_io_mutex); BUG_ON(rp->offset); return 0; } @@ -613,7 +729,7 @@ cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) !test_bit(CACHE_PENDING, &rq->item->flags)) { list_del(&rq->q.list); spin_unlock(&queue_lock); - cd->cache_put(rq->item, cd); + cache_put(rq->item, cd); kfree(rq->buf); kfree(rq); } else @@ -621,28 +737,28 @@ cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) } if (err == -EAGAIN) goto again; - up(&queue_io_sem); + mutex_unlock(&queue_io_mutex); return err ? err : count; } -static char write_buf[8192]; /* protected by queue_io_sem */ +static char write_buf[8192]; /* protected by queue_io_mutex */ static ssize_t cache_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { int err; - struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data; + struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data; if (count == 0) return 0; if (count >= sizeof(write_buf)) return -EINVAL; - down(&queue_io_sem); + mutex_lock(&queue_io_mutex); if (copy_from_user(write_buf, buf, count)) { - up(&queue_io_sem); + mutex_unlock(&queue_io_mutex); return -EFAULT; } write_buf[count] = '\0'; @@ -651,7 +767,7 @@ cache_write(struct file *filp, const char __user *buf, size_t count, else err = -EINVAL; - up(&queue_io_sem); + mutex_unlock(&queue_io_mutex); return err ? err : count; } @@ -663,7 +779,7 @@ cache_poll(struct file *filp, poll_table *wait) unsigned int mask; struct cache_reader *rp = filp->private_data; struct cache_queue *cq; - struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data; + struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data; poll_wait(filp, &queue_wait, wait); @@ -793,10 +909,10 @@ static void queue_loose(struct cache_detail *detail, struct cache_head *ch) if (cr->item != ch) continue; if (cr->readers != 0) - break; + continue; list_del(&cr->q.list); spin_unlock(&queue_lock); - detail->cache_put(cr->item, detail); + cache_put(cr->item, detail); kfree(cr->buf); kfree(cr); return; @@ -1081,8 +1197,8 @@ static int c_show(struct seq_file *m, void *p) return cd->cache_show(m, cd, NULL); ifdebug(CACHE) - seq_printf(m, "# expiry=%ld refcnt=%d\n", - cp->expiry_time, atomic_read(&cp->refcnt)); + seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n", + cp->expiry_time, atomic_read(&cp->ref.refcount), cp->flags); cache_get(cp); if (cache_check(cd, cp, NULL)) /* cache_check does a cache_put on failure */ @@ -1139,7 +1255,7 @@ static struct file_operations content_file_operations = { static ssize_t read_flush(struct file *file, char __user *buf, size_t count, loff_t *ppos) { - struct cache_detail *cd = PDE(file->f_dentry->d_inode)->data; + struct cache_detail *cd = PDE(file->f_path.dentry->d_inode)->data; char tbuf[20]; unsigned long p = *ppos; int len; @@ -1160,7 +1276,7 @@ static ssize_t read_flush(struct file *file, char __user *buf, static ssize_t write_flush(struct file * file, const char __user * buf, size_t count, loff_t *ppos) { - struct cache_detail *cd = PDE(file->f_dentry->d_inode)->data; + struct cache_detail *cd = PDE(file->f_path.dentry->d_inode)->data; char tbuf[20]; char *ep; long flushtime;