#include <linux/slab.h>
#include <linux/string.h>
#include <linux/spinlock.h>
+#include <linux/list.h>
#include <linux/sunrpc/svc.h>
#include <linux/nfsd/nfsd.h>
*/
#define CACHESIZE 1024
#define HASHSIZE 64
-#define REQHASH(xid) ((((xid) >> 24) ^ (xid)) & (HASHSIZE-1))
+#define REQHASH(xid) (((((__force __u32)xid) >> 24) ^ ((__force __u32)xid)) & (HASHSIZE-1))
-struct nfscache_head {
- struct svc_cacherep * next;
- struct svc_cacherep * prev;
-};
-
-static struct nfscache_head * hash_list;
-static struct svc_cacherep * lru_head;
-static struct svc_cacherep * lru_tail;
-static struct svc_cacherep * nfscache;
+static struct hlist_head * hash_list;
+static struct list_head lru_head;
static int cache_disabled = 1;
-static int nfsd_cache_append(struct svc_rqst *rqstp, struct iovec *vec);
+static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
/*
* locking for the reply cache:
* A cache entry is "single use" if c_state == RC_INPROG
* Otherwise, it when accessing _prev or _next, the lock must be held.
*/
-static spinlock_t cache_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(cache_lock);
void
nfsd_cache_init(void)
{
struct svc_cacherep *rp;
- struct nfscache_head *rh;
- size_t i;
- unsigned long order;
-
-
- i = CACHESIZE * sizeof (struct svc_cacherep);
- for (order = 0; (PAGE_SIZE << order) < i; order++)
- ;
- nfscache = (struct svc_cacherep *)
- __get_free_pages(GFP_KERNEL, order);
- if (!nfscache) {
- printk (KERN_ERR "nfsd: cannot allocate %Zd bytes for reply cache\n", i);
- return;
+ int i;
+
+ INIT_LIST_HEAD(&lru_head);
+ i = CACHESIZE;
+ while(i) {
+ rp = kmalloc(sizeof(*rp), GFP_KERNEL);
+ if (!rp) break;
+ list_add(&rp->c_lru, &lru_head);
+ rp->c_state = RC_UNUSED;
+ rp->c_type = RC_NOCACHE;
+ INIT_HLIST_NODE(&rp->c_hash);
+ i--;
}
- memset(nfscache, 0, i);
- i = HASHSIZE * sizeof (struct nfscache_head);
- hash_list = kmalloc (i, GFP_KERNEL);
+ if (i)
+ printk (KERN_ERR "nfsd: cannot allocate all %d cache entries, only got %d\n",
+ CACHESIZE, CACHESIZE-i);
+
+ hash_list = kcalloc (HASHSIZE, sizeof(struct hlist_head), GFP_KERNEL);
if (!hash_list) {
- free_pages ((unsigned long)nfscache, order);
- nfscache = NULL;
- printk (KERN_ERR "nfsd: cannot allocate %Zd bytes for hash list\n", i);
+ nfsd_cache_shutdown();
+ printk (KERN_ERR "nfsd: cannot allocate %Zd bytes for hash list\n",
+ HASHSIZE * sizeof(struct hlist_head));
return;
}
- for (i = 0, rh = hash_list; i < HASHSIZE; i++, rh++)
- rh->next = rh->prev = (struct svc_cacherep *) rh;
-
- for (i = 0, rp = nfscache; i < CACHESIZE; i++, rp++) {
- rp->c_state = RC_UNUSED;
- rp->c_type = RC_NOCACHE;
- rp->c_hash_next =
- rp->c_hash_prev = rp;
- rp->c_lru_next = rp + 1;
- rp->c_lru_prev = rp - 1;
- }
- lru_head = nfscache;
- lru_tail = nfscache + CACHESIZE - 1;
- lru_head->c_lru_prev = NULL;
- lru_tail->c_lru_next = NULL;
-
cache_disabled = 0;
}
nfsd_cache_shutdown(void)
{
struct svc_cacherep *rp;
- size_t i;
- unsigned long order;
- for (rp = lru_head; rp; rp = rp->c_lru_next) {
+ while (!list_empty(&lru_head)) {
+ rp = list_entry(lru_head.next, struct svc_cacherep, c_lru);
if (rp->c_state == RC_DONE && rp->c_type == RC_REPLBUFF)
kfree(rp->c_replvec.iov_base);
+ list_del(&rp->c_lru);
+ kfree(rp);
}
cache_disabled = 1;
- i = CACHESIZE * sizeof (struct svc_cacherep);
- for (order = 0; (PAGE_SIZE << order) < i; order++)
- ;
- free_pages ((unsigned long)nfscache, order);
- nfscache = NULL;
kfree (hash_list);
hash_list = NULL;
}
/*
- * Move cache entry to front of LRU list
+ * Move cache entry to end of LRU list
*/
static void
-lru_put_front(struct svc_cacherep *rp)
+lru_put_end(struct svc_cacherep *rp)
{
- struct svc_cacherep *prev = rp->c_lru_prev,
- *next = rp->c_lru_next;
-
- if (prev)
- prev->c_lru_next = next;
- else
- lru_head = next;
- if (next)
- next->c_lru_prev = prev;
- else
- lru_tail = prev;
-
- rp->c_lru_next = lru_head;
- rp->c_lru_prev = NULL;
- if (lru_head)
- lru_head->c_lru_prev = rp;
- lru_head = rp;
+ list_move_tail(&rp->c_lru, &lru_head);
}
/*
static void
hash_refile(struct svc_cacherep *rp)
{
- struct svc_cacherep *prev = rp->c_hash_prev,
- *next = rp->c_hash_next;
- struct nfscache_head *head = hash_list + REQHASH(rp->c_xid);
-
- prev->c_hash_next = next;
- next->c_hash_prev = prev;
-
- rp->c_hash_next = head->next;
- rp->c_hash_prev = (struct svc_cacherep *) head;
- head->next->c_hash_prev = rp;
- head->next = rp;
+ hlist_del_init(&rp->c_hash);
+ hlist_add_head(&rp->c_hash, hash_list + REQHASH(rp->c_xid));
}
/*
int
nfsd_cache_lookup(struct svc_rqst *rqstp, int type)
{
- struct svc_cacherep *rh, *rp;
- u32 xid = rqstp->rq_xid,
- proto = rqstp->rq_prot,
+ struct hlist_node *hn;
+ struct hlist_head *rh;
+ struct svc_cacherep *rp;
+ __be32 xid = rqstp->rq_xid;
+ u32 proto = rqstp->rq_prot,
vers = rqstp->rq_vers,
proc = rqstp->rq_proc;
unsigned long age;
spin_lock(&cache_lock);
rtn = RC_DOIT;
- rp = rh = (struct svc_cacherep *) &hash_list[REQHASH(xid)];
- while ((rp = rp->c_hash_next) != rh) {
+ rh = &hash_list[REQHASH(xid)];
+ hlist_for_each_entry(rp, hn, rh, c_hash) {
if (rp->c_state != RC_UNUSED &&
xid == rp->c_xid && proc == rp->c_proc &&
proto == rp->c_prot && vers == rp->c_vers &&
/* This loop shouldn't take more than a few iterations normally */
{
int safe = 0;
- for (rp = lru_tail; rp; rp = rp->c_lru_prev) {
+ list_for_each_entry(rp, &lru_head, c_lru) {
if (rp->c_state != RC_INPROG)
break;
if (safe++ > CACHESIZE) {
/* We found a matching entry which is either in progress or done. */
age = jiffies - rp->c_timestamp;
rp->c_timestamp = jiffies;
- lru_put_front(rp);
+ lru_put_end(rp);
rtn = RC_DROPIT;
/* Request being processed or excessive rexmits */
* In this case, nfsd_cache_update is called with statp == NULL.
*/
void
-nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, u32 *statp)
+nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
{
struct svc_cacherep *rp;
- struct iovec *resv = &rqstp->rq_res.head[0], *cachv;
+ struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
int len;
if (!(rp = rqstp->rq_cacherep) || cache_disabled)
break;
}
spin_lock(&cache_lock);
- lru_put_front(rp);
+ lru_put_end(rp);
rp->c_secure = rqstp->rq_secure;
rp->c_type = cachetype;
rp->c_state = RC_DONE;
* keep a refcount....
*/
static int
-nfsd_cache_append(struct svc_rqst *rqstp, struct iovec *data)
+nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
{
- struct iovec *vec = &rqstp->rq_res.head[0];
+ struct kvec *vec = &rqstp->rq_res.head[0];
if (vec->iov_len + data->iov_len > PAGE_SIZE) {
printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n",