X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=net%2Fipv4%2Finetpeer.c;h=711eb6d0285a8a4709a8e89a9c1b88697dcce7ee;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=d7da79d5b20595b53ace58e96bab72162a90d1c6;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index d7da79d5b..711eb6d02 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c @@ -20,6 +20,7 @@ #include #include #include +#include #include /* @@ -70,9 +71,9 @@ */ /* Exported for inet_getid inline function. */ -spinlock_t inet_peer_idlock = SPIN_LOCK_UNLOCKED; +DEFINE_SPINLOCK(inet_peer_idlock); -static kmem_cache_t *peer_cachep; +static struct kmem_cache *peer_cachep __read_mostly; #define node_height(x) x->avl_height static struct inet_peer peer_fake_node = { @@ -82,25 +83,22 @@ static struct inet_peer peer_fake_node = { }; #define peer_avl_empty (&peer_fake_node) static struct inet_peer *peer_root = peer_avl_empty; -static rwlock_t peer_pool_lock = RW_LOCK_UNLOCKED; +static DEFINE_RWLOCK(peer_pool_lock); #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */ -static volatile int peer_total; +static int peer_total; /* Exported for sysctl_net_ipv4. */ int inet_peer_threshold = 65536 + 128; /* start to throw entries more * aggressively at this stage */ int inet_peer_minttl = 120 * HZ; /* TTL under high load: 120 sec */ int inet_peer_maxttl = 10 * 60 * HZ; /* usual time to live: 10 min */ -/* Exported for inet_putpeer inline function. */ -struct inet_peer *inet_peer_unused_head, - **inet_peer_unused_tailp = &inet_peer_unused_head; -spinlock_t inet_peer_unused_lock = SPIN_LOCK_UNLOCKED; -#define PEER_MAX_CLEANUP_WORK 30 +static struct inet_peer *inet_peer_unused_head; +static struct inet_peer **inet_peer_unused_tailp = &inet_peer_unused_head; +static DEFINE_SPINLOCK(inet_peer_unused_lock); static void peer_check_expire(unsigned long dummy); -static struct timer_list peer_periodic_timer = - TIMER_INITIALIZER(peer_check_expire, 0, 0); +static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0); /* Exported for sysctl_net_ipv4. */ int inet_peer_gc_mintime = 10 * HZ, @@ -126,12 +124,9 @@ void __init inet_initpeers(void) peer_cachep = kmem_cache_create("inet_peer_cache", sizeof(struct inet_peer), - 0, SLAB_HWCACHE_ALIGN, + 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); - if (!peer_cachep) - panic("cannot create inet_peer_cache"); - /* All the timers, started at system startup tend to synchronize. Perturb it a bit. */ @@ -166,7 +161,7 @@ static void unlink_from_unused(struct inet_peer *p) for (u = peer_root; u != peer_avl_empty; ) { \ if (daddr == u->v4daddr) \ break; \ - if (daddr < u->v4daddr) \ + if ((__force __u32)daddr < (__force __u32)u->v4daddr) \ v = &u->avl_left; \ else \ v = &u->avl_right; \ @@ -304,8 +299,7 @@ static void unlink_from_pool(struct inet_peer *p) /* look for a node to insert instead of p */ struct inet_peer *t; t = lookup_rightempty(p); - if (*stackptr[-1] != t) - BUG(); + BUG_ON(*stackptr[-1] != t); **--stackptr = t->avl_left; /* t is removed, t->v4daddr > x->v4daddr for any * x in p->avl_left subtree. @@ -314,8 +308,7 @@ static void unlink_from_pool(struct inet_peer *p) t->avl_left = p->avl_left; t->avl_right = p->avl_right; t->avl_height = p->avl_height; - if (delp[1] != &p->avl_left) - BUG(); + BUG_ON(delp[1] != &p->avl_left); delp[1] = &t->avl_left; /* was &p->avl_left */ } peer_avl_rebalance(stack, stackptr); @@ -345,7 +338,8 @@ static int cleanup_once(unsigned long ttl) spin_lock_bh(&inet_peer_unused_lock); p = inet_peer_unused_head; if (p != NULL) { - if (time_after(p->dtime + ttl, jiffies)) { + __u32 delta = (__u32)jiffies - p->dtime; + if (delta < ttl) { /* Do not prune fresh entries. */ spin_unlock_bh(&inet_peer_unused_lock); return -1; @@ -373,7 +367,7 @@ static int cleanup_once(unsigned long ttl) } /* Called with or without local BH being disabled. */ -struct inet_peer *inet_getpeer(__u32 daddr, int create) +struct inet_peer *inet_getpeer(__be32 daddr, int create) { struct inet_peer *p, *n; struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr; @@ -401,6 +395,7 @@ struct inet_peer *inet_getpeer(__u32 daddr, int create) return NULL; n->v4daddr = daddr; atomic_set(&n->refcnt, 1); + atomic_set(&n->rid, 0); n->ip_id_count = secure_ip_id(daddr); n->tcp_ts_stamp = 0; @@ -436,7 +431,7 @@ out_free: /* Called with local BH disabled. */ static void peer_check_expire(unsigned long dummy) { - int i; + unsigned long now = jiffies; int ttl; if (peer_total >= inet_peer_threshold) @@ -445,16 +440,33 @@ static void peer_check_expire(unsigned long dummy) ttl = inet_peer_maxttl - (inet_peer_maxttl - inet_peer_minttl) / HZ * peer_total / inet_peer_threshold * HZ; - for (i = 0; i < PEER_MAX_CLEANUP_WORK && !cleanup_once(ttl); i++); + while (!cleanup_once(ttl)) { + if (jiffies != now) + break; + } /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime * interval depending on the total number of entries (more entries, * less interval). */ - peer_periodic_timer.expires = jiffies - + inet_peer_gc_maxtime - - (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ * - peer_total / inet_peer_threshold * HZ; + if (peer_total >= inet_peer_threshold) + peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime; + else + peer_periodic_timer.expires = jiffies + + inet_peer_gc_maxtime + - (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ * + peer_total / inet_peer_threshold * HZ; add_timer(&peer_periodic_timer); } -EXPORT_SYMBOL(inet_peer_idlock); +void inet_putpeer(struct inet_peer *p) +{ + spin_lock_bh(&inet_peer_unused_lock); + if (atomic_dec_and_test(&p->refcnt)) { + p->unused_prevp = inet_peer_unused_tailp; + p->unused_next = NULL; + *inet_peer_unused_tailp = p; + inet_peer_unused_tailp = &p->unused_next; + p->dtime = (__u32)jiffies; + } + spin_unlock_bh(&inet_peer_unused_lock); +}