fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / net / netfilter / nf_conntrack_core.c
index d622ddf..cb29ba7 100644 (file)
@@ -3,7 +3,7 @@
    extension. */
 
 /* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2005 Netfilter Core Team <coreteam@netfilter.org>
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
  * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
  *
  * This program is free software; you can redistribute it and/or modify
  *     - generalize L3 protocol denendent part.
  * 23 Mar 2004: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
  *     - add support various size of conntrack structures.
+ * 26 Jan 2006: Harald Welte <laforge@netfilter.org>
+ *     - restructure nf_conn (introduce nf_conn_help)
+ *     - redesign 'features' how they were originally intended
+ * 26 Feb 2006: Pablo Neira Ayuso <pablo@eurodev.net>
+ *     - add support for L3 protocol module load on demand.
  *
  * Derived from net/ipv4/netfilter/ip_conntrack_core.c
  */
 
-#include <linux/config.h>
 #include <linux/types.h>
 #include <linux/netfilter.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/netdevice.h>
 #include <linux/socket.h>
-
-/* This rwlock protects the main hash table, protocol/helper/expected
-   registrations, conntrack timers*/
-#define ASSERT_READ_LOCK(x)
-#define ASSERT_WRITE_LOCK(x)
+#include <linux/mm.h>
 
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_l3proto.h>
-#include <net/netfilter/nf_conntrack_protocol.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
+#include <net/netfilter/nf_conntrack_expect.h>
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <net/netfilter/nf_conntrack_core.h>
-#include <linux/netfilter_ipv4/listhelp.h>
 
-#define NF_CONNTRACK_VERSION   "0.4.1"
+#define NF_CONNTRACK_VERSION   "0.5.0"
 
 #if 0
 #define DEBUGP printk
 #endif
 
 DEFINE_RWLOCK(nf_conntrack_lock);
+EXPORT_SYMBOL_GPL(nf_conntrack_lock);
 
 /* nf_conntrack_standalone needs this */
 atomic_t nf_conntrack_count = ATOMIC_INIT(0);
+EXPORT_SYMBOL_GPL(nf_conntrack_count);
 
-void (*nf_conntrack_destroyed)(struct nf_conn *conntrack) = NULL;
-LIST_HEAD(nf_conntrack_expect_list);
-struct nf_conntrack_protocol **nf_ct_protos[PF_MAX];
-struct nf_conntrack_l3proto *nf_ct_l3protos[PF_MAX];
-static LIST_HEAD(helpers);
-unsigned int nf_conntrack_htable_size = 0;
-int nf_conntrack_max;
-struct list_head *nf_conntrack_hash;
-static kmem_cache_t *nf_conntrack_expect_cachep;
-struct nf_conn nf_conntrack_untracked;
-unsigned int nf_ct_log_invalid;
-static LIST_HEAD(unconfirmed);
-static int nf_conntrack_vmalloc;
-
-static unsigned int nf_conntrack_next_id = 1;
-static unsigned int nf_conntrack_expect_next_id = 1;
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
-struct notifier_block *nf_conntrack_chain;
-struct notifier_block *nf_conntrack_expect_chain;
-
-DEFINE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache);
-
-/* deliver cached events and clear cache entry - must be called with locally
- * disabled softirqs */
-static inline void
-__nf_ct_deliver_cached_events(struct nf_conntrack_ecache *ecache)
-{
-       DEBUGP("ecache: delivering events for %p\n", ecache->ct);
-       if (nf_ct_is_confirmed(ecache->ct) && !nf_ct_is_dying(ecache->ct)
-           && ecache->events)
-               notifier_call_chain(&nf_conntrack_chain, ecache->events,
-                                   ecache->ct);
-
-       ecache->events = 0;
-       nf_ct_put(ecache->ct);
-       ecache->ct = NULL;
-}
+void (*nf_conntrack_destroyed)(struct nf_conn *conntrack);
+EXPORT_SYMBOL_GPL(nf_conntrack_destroyed);
 
-/* Deliver all cached events for a particular conntrack. This is called
- * by code prior to async packet handling for freeing the skb */
-void nf_ct_deliver_cached_events(const struct nf_conn *ct)
-{
-       struct nf_conntrack_ecache *ecache;
+unsigned int nf_conntrack_htable_size __read_mostly;
+EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
 
-       local_bh_disable();
-       ecache = &__get_cpu_var(nf_conntrack_ecache);
-       if (ecache->ct == ct)
-               __nf_ct_deliver_cached_events(ecache);
-       local_bh_enable();
-}
+int nf_conntrack_max __read_mostly;
+EXPORT_SYMBOL_GPL(nf_conntrack_max);
 
-/* Deliver cached events for old pending events, if current conntrack != old */
-void __nf_ct_event_cache_init(struct nf_conn *ct)
-{
-       struct nf_conntrack_ecache *ecache;
-       
-       /* take care of delivering potentially old events */
-       ecache = &__get_cpu_var(nf_conntrack_ecache);
-       BUG_ON(ecache->ct == ct);
-       if (ecache->ct)
-               __nf_ct_deliver_cached_events(ecache);
-       /* initialize for this conntrack/packet */
-       ecache->ct = ct;
-       nf_conntrack_get(&ct->ct_general);
-}
+struct list_head *nf_conntrack_hash __read_mostly;
+EXPORT_SYMBOL_GPL(nf_conntrack_hash);
 
-/* flush the event cache - touches other CPU's data and must not be called
- * while packets are still passing through the code */
-static void nf_ct_event_cache_flush(void)
-{
-       struct nf_conntrack_ecache *ecache;
-       int cpu;
+struct nf_conn nf_conntrack_untracked __read_mostly;
+EXPORT_SYMBOL_GPL(nf_conntrack_untracked);
 
-       for_each_cpu(cpu) {
-               ecache = &per_cpu(nf_conntrack_ecache, cpu);
-               if (ecache->ct)
-                       nf_ct_put(ecache->ct);
-       }
-}
-#else
-static inline void nf_ct_event_cache_flush(void) {}
-#endif /* CONFIG_NF_CONNTRACK_EVENTS */
+unsigned int nf_ct_log_invalid __read_mostly;
+LIST_HEAD(unconfirmed);
+static int nf_conntrack_vmalloc __read_mostly;
+
+static unsigned int nf_conntrack_next_id;
 
 DEFINE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat);
 EXPORT_PER_CPU_SYMBOL(nf_conntrack_stat);
@@ -168,75 +108,18 @@ static struct {
        size_t size;
 
        /* slab cache pointer */
-       kmem_cache_t *cachep;
+       struct kmem_cache *cachep;
 
        /* allocated slab cache + modules which uses this slab cache */
        int use;
 
-       /* Initialization */
-       int (*init_conntrack)(struct nf_conn *, u_int32_t);
-
 } nf_ct_cache[NF_CT_F_NUM];
 
 /* protect members of nf_ct_cache except of "use" */
 DEFINE_RWLOCK(nf_ct_cache_lock);
 
 /* This avoids calling kmem_cache_create() with same name simultaneously */
-DECLARE_MUTEX(nf_ct_cache_mutex);
-
-extern struct nf_conntrack_protocol nf_conntrack_generic_protocol;
-struct nf_conntrack_protocol *
-__nf_ct_proto_find(u_int16_t l3proto, u_int8_t protocol)
-{
-       if (unlikely(l3proto >= AF_MAX || nf_ct_protos[l3proto] == NULL))
-               return &nf_conntrack_generic_protocol;
-
-       return nf_ct_protos[l3proto][protocol];
-}
-
-/* this is guaranteed to always return a valid protocol helper, since
- * it falls back to generic_protocol */
-struct nf_conntrack_protocol *
-nf_ct_proto_find_get(u_int16_t l3proto, u_int8_t protocol)
-{
-       struct nf_conntrack_protocol *p;
-
-       preempt_disable();
-       p = __nf_ct_proto_find(l3proto, protocol);
-       if (p) {
-               if (!try_module_get(p->me))
-                       p = &nf_conntrack_generic_protocol;
-       }
-       preempt_enable();
-       
-       return p;
-}
-
-void nf_ct_proto_put(struct nf_conntrack_protocol *p)
-{
-       module_put(p->me);
-}
-
-struct nf_conntrack_l3proto *
-nf_ct_l3proto_find_get(u_int16_t l3proto)
-{
-       struct nf_conntrack_l3proto *p;
-
-       preempt_disable();
-       p = __nf_ct_l3proto_find(l3proto);
-       if (p) {
-               if (!try_module_get(p->me))
-                       p = &nf_conntrack_generic_l3proto;
-       }
-       preempt_enable();
-
-       return p;
-}
-
-void nf_ct_l3proto_put(struct nf_conntrack_l3proto *p)
-{
-       module_put(p->me);
-}
+static DEFINE_MUTEX(nf_ct_cache_mutex);
 
 static int nf_conntrack_hash_rnd_initted;
 static unsigned int nf_conntrack_hash_rnd;
@@ -259,25 +142,12 @@ static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple)
                                nf_conntrack_hash_rnd);
 }
 
-/* Initialize "struct nf_conn" which has spaces for helper */
-static int
-init_conntrack_for_helper(struct nf_conn *conntrack, u_int32_t features)
-{
-
-       conntrack->help = (union nf_conntrack_help *)
-               (((unsigned long)conntrack->data
-                 + (__alignof__(union nf_conntrack_help) - 1))
-                & (~((unsigned long)(__alignof__(union nf_conntrack_help) -1))));
-       return 0;
-}
-
 int nf_conntrack_register_cache(u_int32_t features, const char *name,
-                               size_t size,
-                               int (*init)(struct nf_conn *, u_int32_t))
+                               size_t size)
 {
        int ret = 0;
        char *cache_name;
-       kmem_cache_t *cachep;
+       struct kmem_cache *cachep;
 
        DEBUGP("nf_conntrack_register_cache: features=0x%x, name=%s, size=%d\n",
               features, name, size);
@@ -288,7 +158,7 @@ int nf_conntrack_register_cache(u_int32_t features, const char *name,
                return -EINVAL;
        }
 
-       down(&nf_ct_cache_mutex);
+       mutex_lock(&nf_ct_cache_mutex);
 
        write_lock_bh(&nf_ct_cache_lock);
        /* e.g: multiple helpers are loaded */
@@ -296,8 +166,7 @@ int nf_conntrack_register_cache(u_int32_t features, const char *name,
                DEBUGP("nf_conntrack_register_cache: already resisterd.\n");
                if ((!strncmp(nf_ct_cache[features].name, name,
                              NF_CT_FEATURES_NAMELEN))
-                   && nf_ct_cache[features].size == size
-                   && nf_ct_cache[features].init_conntrack == init) {
+                   && nf_ct_cache[features].size == size) {
                        DEBUGP("nf_conntrack_register_cache: reusing.\n");
                        nf_ct_cache[features].use++;
                        ret = 0;
@@ -305,7 +174,7 @@ int nf_conntrack_register_cache(u_int32_t features, const char *name,
                        ret = -EBUSY;
 
                write_unlock_bh(&nf_ct_cache_lock);
-               up(&nf_ct_cache_mutex);
+               mutex_unlock(&nf_ct_cache_mutex);
                return ret;
        }
        write_unlock_bh(&nf_ct_cache_lock);
@@ -340,7 +209,6 @@ int nf_conntrack_register_cache(u_int32_t features, const char *name,
        write_lock_bh(&nf_ct_cache_lock);
        nf_ct_cache[features].use = 1;
        nf_ct_cache[features].size = size;
-       nf_ct_cache[features].init_conntrack = init;
        nf_ct_cache[features].cachep = cachep;
        nf_ct_cache[features].name = cache_name;
        write_unlock_bh(&nf_ct_cache_lock);
@@ -350,14 +218,15 @@ int nf_conntrack_register_cache(u_int32_t features, const char *name,
 out_free_name:
        kfree(cache_name);
 out_up_mutex:
-       up(&nf_ct_cache_mutex);
+       mutex_unlock(&nf_ct_cache_mutex);
        return ret;
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_register_cache);
 
 /* FIXME: In the current, only nf_conntrack_cleanup() can call this function. */
 void nf_conntrack_unregister_cache(u_int32_t features)
 {
-       kmem_cache_t *cachep;
+       struct kmem_cache *cachep;
        char *name;
 
        /*
@@ -365,19 +234,18 @@ void nf_conntrack_unregister_cache(u_int32_t features)
         * slab cache.
         */
        DEBUGP("nf_conntrack_unregister_cache: 0x%04x\n", features);
-       down(&nf_ct_cache_mutex);
+       mutex_lock(&nf_ct_cache_mutex);
 
        write_lock_bh(&nf_ct_cache_lock);
        if (--nf_ct_cache[features].use > 0) {
                write_unlock_bh(&nf_ct_cache_lock);
-               up(&nf_ct_cache_mutex);
+               mutex_unlock(&nf_ct_cache_mutex);
                return;
        }
        cachep = nf_ct_cache[features].cachep;
        name = nf_ct_cache[features].name;
        nf_ct_cache[features].cachep = NULL;
        nf_ct_cache[features].name = NULL;
-       nf_ct_cache[features].init_conntrack = NULL;
        nf_ct_cache[features].size = 0;
        write_unlock_bh(&nf_ct_cache_lock);
 
@@ -386,8 +254,9 @@ void nf_conntrack_unregister_cache(u_int32_t features)
        kmem_cache_destroy(cachep);
        kfree(name);
 
-       up(&nf_ct_cache_mutex);
+       mutex_unlock(&nf_ct_cache_mutex);
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_unregister_cache);
 
 int
 nf_ct_get_tuple(const struct sk_buff *skb,
@@ -397,7 +266,7 @@ nf_ct_get_tuple(const struct sk_buff *skb,
                u_int8_t protonum,
                struct nf_conntrack_tuple *tuple,
                const struct nf_conntrack_l3proto *l3proto,
-               const struct nf_conntrack_protocol *protocol)
+               const struct nf_conntrack_l4proto *l4proto)
 {
        NF_CT_TUPLE_U_BLANK(tuple);
 
@@ -408,14 +277,15 @@ nf_ct_get_tuple(const struct sk_buff *skb,
        tuple->dst.protonum = protonum;
        tuple->dst.dir = IP_CT_DIR_ORIGINAL;
 
-       return protocol->pkt_to_tuple(skb, dataoff, tuple);
+       return l4proto->pkt_to_tuple(skb, dataoff, tuple);
 }
+EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
 
 int
 nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
                   const struct nf_conntrack_tuple *orig,
                   const struct nf_conntrack_l3proto *l3proto,
-                  const struct nf_conntrack_protocol *protocol)
+                  const struct nf_conntrack_l4proto *l4proto)
 {
        NF_CT_TUPLE_U_BLANK(inverse);
 
@@ -426,113 +296,16 @@ nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
        inverse->dst.dir = !orig->dst.dir;
 
        inverse->dst.protonum = orig->dst.protonum;
-       return protocol->invert_tuple(inverse, orig);
-}
-
-/* nf_conntrack_expect helper functions */
-void nf_ct_unlink_expect(struct nf_conntrack_expect *exp)
-{
-       ASSERT_WRITE_LOCK(&nf_conntrack_lock);
-       NF_CT_ASSERT(!timer_pending(&exp->timeout));
-       list_del(&exp->list);
-       NF_CT_STAT_INC(expect_delete);
-       exp->master->expecting--;
-       nf_conntrack_expect_put(exp);
-}
-
-static void expectation_timed_out(unsigned long ul_expect)
-{
-       struct nf_conntrack_expect *exp = (void *)ul_expect;
-
-       write_lock_bh(&nf_conntrack_lock);
-       nf_ct_unlink_expect(exp);
-       write_unlock_bh(&nf_conntrack_lock);
-       nf_conntrack_expect_put(exp);
-}
-
-struct nf_conntrack_expect *
-__nf_conntrack_expect_find(const struct nf_conntrack_tuple *tuple)
-{
-       struct nf_conntrack_expect *i;
-       
-       list_for_each_entry(i, &nf_conntrack_expect_list, list) {
-               if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)) {
-                       atomic_inc(&i->use);
-                       return i;
-               }
-       }
-       return NULL;
-}
-
-/* Just find a expectation corresponding to a tuple. */
-struct nf_conntrack_expect *
-nf_conntrack_expect_find(const struct nf_conntrack_tuple *tuple)
-{
-       struct nf_conntrack_expect *i;
-       
-       read_lock_bh(&nf_conntrack_lock);
-       i = __nf_conntrack_expect_find(tuple);
-       read_unlock_bh(&nf_conntrack_lock);
-
-       return i;
-}
-
-/* If an expectation for this connection is found, it gets delete from
- * global list then returned. */
-static struct nf_conntrack_expect *
-find_expectation(const struct nf_conntrack_tuple *tuple)
-{
-       struct nf_conntrack_expect *i;
-
-       list_for_each_entry(i, &nf_conntrack_expect_list, list) {
-       /* If master is not in hash table yet (ie. packet hasn't left
-          this machine yet), how can other end know about expected?
-          Hence these are not the droids you are looking for (if
-          master ct never got confirmed, we'd hold a reference to it
-          and weird things would happen to future packets). */
-               if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)
-                   && nf_ct_is_confirmed(i->master)) {
-                       if (i->flags & NF_CT_EXPECT_PERMANENT) {
-                               atomic_inc(&i->use);
-                               return i;
-                       } else if (del_timer(&i->timeout)) {
-                               nf_ct_unlink_expect(i);
-                               return i;
-                       }
-               }
-       }
-       return NULL;
-}
-
-/* delete all expectations for this conntrack */
-void nf_ct_remove_expectations(struct nf_conn *ct)
-{
-       struct nf_conntrack_expect *i, *tmp;
-
-       /* Optimization: most connection never expect any others. */
-       if (ct->expecting == 0)
-               return;
-
-       list_for_each_entry_safe(i, tmp, &nf_conntrack_expect_list, list) {
-               if (i->master == ct && del_timer(&i->timeout)) {
-                       nf_ct_unlink_expect(i);
-                       nf_conntrack_expect_put(i);
-               }
-       }
+       return l4proto->invert_tuple(inverse, orig);
 }
+EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
 
 static void
 clean_from_lists(struct nf_conn *ct)
 {
-       unsigned int ho, hr;
-       
        DEBUGP("clean_from_lists(%p)\n", ct);
-       ASSERT_WRITE_LOCK(&nf_conntrack_lock);
-
-       ho = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
-       hr = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
-       LIST_DELETE(&nf_conntrack_hash[ho], &ct->tuplehash[IP_CT_DIR_ORIGINAL]);
-       LIST_DELETE(&nf_conntrack_hash[hr], &ct->tuplehash[IP_CT_DIR_REPLY]);
+       list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list);
+       list_del(&ct->tuplehash[IP_CT_DIR_REPLY].list);
 
        /* Destroy all pending expectations */
        nf_ct_remove_expectations(ct);
@@ -542,8 +315,9 @@ static void
 destroy_conntrack(struct nf_conntrack *nfct)
 {
        struct nf_conn *ct = (struct nf_conn *)nfct;
+       struct nf_conn_help *help = nfct_help(ct);
        struct nf_conntrack_l3proto *l3proto;
-       struct nf_conntrack_protocol *proto;
+       struct nf_conntrack_l4proto *l4proto;
 
        DEBUGP("destroy_conntrack(%p)\n", ct);
        NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
@@ -552,6 +326,9 @@ destroy_conntrack(struct nf_conntrack *nfct)
        nf_conntrack_event(IPCT_DESTROY, ct);
        set_bit(IPS_DYING_BIT, &ct->status);
 
+       if (help && help->helper && help->helper->destroy)
+               help->helper->destroy(ct);
+
        /* To make sure we don't get any weird locking issues here:
         * destroy_conntrack() MUST NOT be called with a write lock
         * to nf_conntrack_lock!!! -HW */
@@ -559,9 +336,9 @@ destroy_conntrack(struct nf_conntrack *nfct)
        if (l3proto && l3proto->destroy)
                l3proto->destroy(ct);
 
-       proto = __nf_ct_proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num, ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum);
-       if (proto && proto->destroy)
-               proto->destroy(ct);
+       l4proto = __nf_ct_l4proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num, ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum);
+       if (l4proto && l4proto->destroy)
+               l4proto->destroy(ct);
 
        if (nf_conntrack_destroyed)
                nf_conntrack_destroyed(ct);
@@ -602,16 +379,6 @@ static void death_by_timeout(unsigned long ul_conntrack)
        nf_ct_put(ct);
 }
 
-static inline int
-conntrack_tuple_cmp(const struct nf_conntrack_tuple_hash *i,
-                   const struct nf_conntrack_tuple *tuple,
-                   const struct nf_conn *ignored_conntrack)
-{
-       ASSERT_READ_LOCK(&nf_conntrack_lock);
-       return nf_ct_tuplehash_to_ctrack(i) != ignored_conntrack
-               && nf_ct_tuple_equal(tuple, &i->tuple);
-}
-
 struct nf_conntrack_tuple_hash *
 __nf_conntrack_find(const struct nf_conntrack_tuple *tuple,
                    const struct nf_conn *ignored_conntrack)
@@ -619,9 +386,9 @@ __nf_conntrack_find(const struct nf_conntrack_tuple *tuple,
        struct nf_conntrack_tuple_hash *h;
        unsigned int hash = hash_conntrack(tuple);
 
-       ASSERT_READ_LOCK(&nf_conntrack_lock);
        list_for_each_entry(h, &nf_conntrack_hash[hash], list) {
-               if (conntrack_tuple_cmp(h, tuple, ignored_conntrack)) {
+               if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack &&
+                   nf_ct_tuple_equal(tuple, &h->tuple)) {
                        NF_CT_STAT_INC(found);
                        return h;
                }
@@ -630,6 +397,7 @@ __nf_conntrack_find(const struct nf_conntrack_tuple *tuple,
 
        return NULL;
 }
+EXPORT_SYMBOL_GPL(__nf_conntrack_find);
 
 /* Find a connection corresponding to a tuple. */
 struct nf_conntrack_tuple_hash *
@@ -646,16 +414,17 @@ nf_conntrack_find_get(const struct nf_conntrack_tuple *tuple,
 
        return h;
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
 
 static void __nf_conntrack_hash_insert(struct nf_conn *ct,
                                       unsigned int hash,
                                       unsigned int repl_hash) 
 {
        ct->id = ++nf_conntrack_next_id;
-       list_prepend(&nf_conntrack_hash[hash],
-                    &ct->tuplehash[IP_CT_DIR_ORIGINAL].list);
-       list_prepend(&nf_conntrack_hash[repl_hash],
-                    &ct->tuplehash[IP_CT_DIR_REPLY].list);
+       list_add(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list,
+                &nf_conntrack_hash[hash]);
+       list_add(&ct->tuplehash[IP_CT_DIR_REPLY].list,
+                &nf_conntrack_hash[repl_hash]);
 }
 
 void nf_conntrack_hash_insert(struct nf_conn *ct)
@@ -669,13 +438,16 @@ void nf_conntrack_hash_insert(struct nf_conn *ct)
        __nf_conntrack_hash_insert(ct, hash, repl_hash);
        write_unlock_bh(&nf_conntrack_lock);
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert);
 
 /* Confirm a connection given skb; places it in hash table */
 int
 __nf_conntrack_confirm(struct sk_buff **pskb)
 {
        unsigned int hash, repl_hash;
+       struct nf_conntrack_tuple_hash *h;
        struct nf_conn *ct;
+       struct nf_conn_help *help;
        enum ip_conntrack_info ctinfo;
 
        ct = nf_ct_get(*pskb, &ctinfo);
@@ -705,43 +477,46 @@ __nf_conntrack_confirm(struct sk_buff **pskb)
        /* See if there's one in the list already, including reverse:
           NAT could have grabbed it without realizing, since we're
           not in the hash.  If there is, we lost race. */
-       if (!LIST_FIND(&nf_conntrack_hash[hash],
-                      conntrack_tuple_cmp,
-                      struct nf_conntrack_tuple_hash *,
-                      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, NULL)
-           && !LIST_FIND(&nf_conntrack_hash[repl_hash],
-                         conntrack_tuple_cmp,
-                         struct nf_conntrack_tuple_hash *,
-                         &ct->tuplehash[IP_CT_DIR_REPLY].tuple, NULL)) {
-               /* Remove from unconfirmed list */
-               list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list);
+       list_for_each_entry(h, &nf_conntrack_hash[hash], list)
+               if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+                                     &h->tuple))
+                       goto out;
+       list_for_each_entry(h, &nf_conntrack_hash[repl_hash], list)
+               if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
+                                     &h->tuple))
+                       goto out;
 
-               __nf_conntrack_hash_insert(ct, hash, repl_hash);
-               /* Timer relative to confirmation time, not original
-                  setting time, otherwise we'd get timer wrap in
-                  weird delay cases. */
-               ct->timeout.expires += jiffies;
-               add_timer(&ct->timeout);
-               atomic_inc(&ct->ct_general.use);
-               set_bit(IPS_CONFIRMED_BIT, &ct->status);
-               NF_CT_STAT_INC(insert);
-               write_unlock_bh(&nf_conntrack_lock);
-               if (ct->helper)
-                       nf_conntrack_event_cache(IPCT_HELPER, *pskb);
+       /* Remove from unconfirmed list */
+       list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list);
+
+       __nf_conntrack_hash_insert(ct, hash, repl_hash);
+       /* Timer relative to confirmation time, not original
+          setting time, otherwise we'd get timer wrap in
+          weird delay cases. */
+       ct->timeout.expires += jiffies;
+       add_timer(&ct->timeout);
+       atomic_inc(&ct->ct_general.use);
+       set_bit(IPS_CONFIRMED_BIT, &ct->status);
+       NF_CT_STAT_INC(insert);
+       write_unlock_bh(&nf_conntrack_lock);
+       help = nfct_help(ct);
+       if (help && help->helper)
+               nf_conntrack_event_cache(IPCT_HELPER, *pskb);
 #ifdef CONFIG_NF_NAT_NEEDED
-               if (test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status) ||
-                   test_bit(IPS_DST_NAT_DONE_BIT, &ct->status))
-                       nf_conntrack_event_cache(IPCT_NATINFO, *pskb);
+       if (test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status) ||
+           test_bit(IPS_DST_NAT_DONE_BIT, &ct->status))
+               nf_conntrack_event_cache(IPCT_NATINFO, *pskb);
 #endif
-               nf_conntrack_event_cache(master_ct(ct) ?
-                                        IPCT_RELATED : IPCT_NEW, *pskb);
-               return NF_ACCEPT;
-       }
+       nf_conntrack_event_cache(master_ct(ct) ?
+                                IPCT_RELATED : IPCT_NEW, *pskb);
+       return NF_ACCEPT;
 
+out:
        NF_CT_STAT_INC(insert_failed);
        write_unlock_bh(&nf_conntrack_lock);
        return NF_DROP;
 }
+EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
 
 /* Returns true if a connection correspondings to the tuple (required
    for NAT). */
@@ -757,27 +532,25 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
 
        return h != NULL;
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
 
 /* There's a small race here where we may free a just-assured
    connection.  Too bad: we're in trouble anyway. */
-static inline int unreplied(const struct nf_conntrack_tuple_hash *i)
-{
-       return !(test_bit(IPS_ASSURED_BIT,
-                         &nf_ct_tuplehash_to_ctrack(i)->status));
-}
-
 static int early_drop(struct list_head *chain)
 {
        /* Traverse backwards: gives us oldest, which is roughly LRU */
        struct nf_conntrack_tuple_hash *h;
-       struct nf_conn *ct = NULL;
+       struct nf_conn *ct = NULL, *tmp;
        int dropped = 0;
 
        read_lock_bh(&nf_conntrack_lock);
-       h = LIST_FIND_B(chain, unreplied, struct nf_conntrack_tuple_hash *);
-       if (h) {
-               ct = nf_ct_tuplehash_to_ctrack(h);
-               atomic_inc(&ct->ct_general.use);
+       list_for_each_entry_reverse(h, chain, list) {
+               tmp = nf_ct_tuplehash_to_ctrack(h);
+               if (!test_bit(IPS_ASSURED_BIT, &tmp->status)) {
+                       ct = tmp;
+                       atomic_inc(&ct->ct_general.use);
+                       break;
+               }
        }
        read_unlock_bh(&nf_conntrack_lock);
 
@@ -793,66 +566,29 @@ static int early_drop(struct list_head *chain)
        return dropped;
 }
 
-static inline int helper_cmp(const struct nf_conntrack_helper *i,
-                            const struct nf_conntrack_tuple *rtuple)
-{
-       return nf_ct_tuple_mask_cmp(rtuple, &i->tuple, &i->mask);
-}
-
-static struct nf_conntrack_helper *
-__nf_ct_helper_find(const struct nf_conntrack_tuple *tuple)
-{
-       return LIST_FIND(&helpers, helper_cmp,
-                        struct nf_conntrack_helper *,
-                        tuple);
-}
-
-struct nf_conntrack_helper *
-nf_ct_helper_find_get( const struct nf_conntrack_tuple *tuple)
-{
-       struct nf_conntrack_helper *helper;
-
-       /* need nf_conntrack_lock to assure that helper exists until
-        * try_module_get() is called */
-       read_lock_bh(&nf_conntrack_lock);
-
-       helper = __nf_ct_helper_find(tuple);
-       if (helper) {
-               /* need to increase module usage count to assure helper will
-                * not go away while the caller is e.g. busy putting a
-                * conntrack in the hash that uses the helper */
-               if (!try_module_get(helper->me))
-                       helper = NULL;
-       }
-
-       read_unlock_bh(&nf_conntrack_lock);
-
-       return helper;
-}
-
-void nf_ct_helper_put(struct nf_conntrack_helper *helper)
-{
-       module_put(helper->me);
-}
-
 static struct nf_conn *
 __nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
                     const struct nf_conntrack_tuple *repl,
-                    const struct nf_conntrack_l3proto *l3proto)
+                    const struct nf_conntrack_l3proto *l3proto,
+                    u_int32_t features)
 {
        struct nf_conn *conntrack = NULL;
-       u_int32_t features = 0;
+       struct nf_conntrack_helper *helper;
 
-       if (!nf_conntrack_hash_rnd_initted) {
+       if (unlikely(!nf_conntrack_hash_rnd_initted)) {
                get_random_bytes(&nf_conntrack_hash_rnd, 4);
                nf_conntrack_hash_rnd_initted = 1;
        }
 
+       /* We don't want any race condition at early drop stage */
+       atomic_inc(&nf_conntrack_count);
+
        if (nf_conntrack_max
-           && atomic_read(&nf_conntrack_count) >= nf_conntrack_max) {
+           && atomic_read(&nf_conntrack_count) > nf_conntrack_max) {
                unsigned int hash = hash_conntrack(orig);
                /* Try dropping from this hash chain. */
                if (!early_drop(&nf_conntrack_hash[hash])) {
+                       atomic_dec(&nf_conntrack_count);
                        if (net_ratelimit())
                                printk(KERN_WARNING
                                       "nf_conntrack: table full, dropping"
@@ -862,9 +598,13 @@ __nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
        }
 
        /*  find features needed by this conntrack. */
-       features = l3proto->get_features(orig);
+       features |= l3proto->get_features(orig);
+
+       /* FIXME: protect helper list per RCU */
        read_lock_bh(&nf_conntrack_lock);
-       if (__nf_ct_helper_find(repl) != NULL)
+       helper = __nf_ct_helper_find(repl);
+       /* NAT might want to assign a helper later */
+       if (helper || features & NF_CT_F_NAT)
                features |= NF_CT_F_HELP;
        read_unlock_bh(&nf_conntrack_lock);
 
@@ -872,7 +612,7 @@ __nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
 
        read_lock_bh(&nf_ct_cache_lock);
 
-       if (!nf_ct_cache[features].use) {
+       if (unlikely(!nf_ct_cache[features].use)) {
                DEBUGP("nf_conntrack_alloc: not supported features = 0x%x\n",
                        features);
                goto out;
@@ -886,14 +626,6 @@ __nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
 
        memset(conntrack, 0, nf_ct_cache[features].size);
        conntrack->features = features;
-       if (nf_ct_cache[features].init_conntrack &&
-           nf_ct_cache[features].init_conntrack(conntrack, features) < 0) {
-               DEBUGP("nf_conntrack_alloc: failed to init\n");
-               kmem_cache_free(nf_ct_cache[features].cachep, conntrack);
-               conntrack = NULL;
-               goto out;
-       }
-
        atomic_set(&conntrack->ct_general.use, 1);
        conntrack->ct_general.destroy = destroy_conntrack;
        conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
@@ -902,10 +634,12 @@ __nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
        init_timer(&conntrack->timeout);
        conntrack->timeout.data = (unsigned long)conntrack;
        conntrack->timeout.function = death_by_timeout;
+       read_unlock_bh(&nf_ct_cache_lock);
 
-       atomic_inc(&nf_conntrack_count);
+       return conntrack;
 out:
        read_unlock_bh(&nf_ct_cache_lock);
+       atomic_dec(&nf_conntrack_count);
        return conntrack;
 }
 
@@ -915,8 +649,9 @@ struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
        struct nf_conntrack_l3proto *l3proto;
 
        l3proto = __nf_ct_l3proto_find(orig->src.l3num);
-       return __nf_conntrack_alloc(orig, repl, l3proto);
+       return __nf_conntrack_alloc(orig, repl, l3proto, 0);
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
 
 void nf_conntrack_free(struct nf_conn *conntrack)
 {
@@ -927,32 +662,40 @@ void nf_conntrack_free(struct nf_conn *conntrack)
        kmem_cache_free(nf_ct_cache[features].cachep, conntrack);
        atomic_dec(&nf_conntrack_count);
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_free);
 
 /* Allocate a new conntrack: we return -ENOMEM if classification
    failed due to stress.  Otherwise it really is unclassifiable. */
 static struct nf_conntrack_tuple_hash *
 init_conntrack(const struct nf_conntrack_tuple *tuple,
               struct nf_conntrack_l3proto *l3proto,
-              struct nf_conntrack_protocol *protocol,
+              struct nf_conntrack_l4proto *l4proto,
               struct sk_buff *skb,
               unsigned int dataoff)
 {
        struct nf_conn *conntrack;
        struct nf_conntrack_tuple repl_tuple;
        struct nf_conntrack_expect *exp;
+       u_int32_t features = 0;
 
-       if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, protocol)) {
+       if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
                DEBUGP("Can't invert tuple.\n");
                return NULL;
        }
 
-       conntrack = __nf_conntrack_alloc(tuple, &repl_tuple, l3proto);
+       read_lock_bh(&nf_conntrack_lock);
+       exp = __nf_conntrack_expect_find(tuple);
+       if (exp && exp->helper)
+               features = NF_CT_F_HELP;
+       read_unlock_bh(&nf_conntrack_lock);
+
+       conntrack = __nf_conntrack_alloc(tuple, &repl_tuple, l3proto, features);
        if (conntrack == NULL || IS_ERR(conntrack)) {
                DEBUGP("Can't allocate conntrack.\n");
                return (struct nf_conntrack_tuple_hash *)conntrack;
        }
 
-       if (!protocol->new(conntrack, skb, dataoff)) {
+       if (!l4proto->new(conntrack, skb, dataoff)) {
                nf_conntrack_free(conntrack);
                DEBUGP("init conntrack: can't track with proto module\n");
                return NULL;
@@ -967,16 +710,23 @@ init_conntrack(const struct nf_conntrack_tuple *tuple,
                /* Welcome, Mr. Bond.  We've been expecting you... */
                __set_bit(IPS_EXPECTED_BIT, &conntrack->status);
                conntrack->master = exp->master;
+               if (exp->helper)
+                       nfct_help(conntrack)->helper = exp->helper;
 #ifdef CONFIG_NF_CONNTRACK_MARK
                conntrack->mark = exp->master->mark;
+#endif
+#ifdef CONFIG_NF_CONNTRACK_SECMARK
+               conntrack->secmark = exp->master->secmark;
 #endif
                nf_conntrack_get(&conntrack->master->ct_general);
                NF_CT_STAT_INC(expect_new);
        } else {
-               conntrack->helper = __nf_ct_helper_find(&repl_tuple);
+               struct nf_conn_help *help = nfct_help(conntrack);
 
+               if (help)
+                       help->helper = __nf_ct_helper_find(&repl_tuple);
                NF_CT_STAT_INC(new);
-        }
+       }
 
        /* Overload tuple linked list to put us in unconfirmed list. */
        list_add(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL].list, &unconfirmed);
@@ -999,7 +749,7 @@ resolve_normal_ct(struct sk_buff *skb,
                  u_int16_t l3num,
                  u_int8_t protonum,
                  struct nf_conntrack_l3proto *l3proto,
-                 struct nf_conntrack_protocol *proto,
+                 struct nf_conntrack_l4proto *l4proto,
                  int *set_reply,
                  enum ip_conntrack_info *ctinfo)
 {
@@ -1009,7 +759,7 @@ resolve_normal_ct(struct sk_buff *skb,
 
        if (!nf_ct_get_tuple(skb, (unsigned int)(skb->nh.raw - skb->data),
                             dataoff, l3num, protonum, &tuple, l3proto,
-                            proto)) {
+                            l4proto)) {
                DEBUGP("resolve_normal_ct: Can't get tuple\n");
                return NULL;
        }
@@ -1017,7 +767,7 @@ resolve_normal_ct(struct sk_buff *skb,
        /* look for tuple match */
        h = nf_conntrack_find_get(&tuple, NULL);
        if (!h) {
-               h = init_conntrack(&tuple, l3proto, proto, skb, dataoff);
+               h = init_conntrack(&tuple, l3proto, l4proto, skb, dataoff);
                if (!h)
                        return NULL;
                if (IS_ERR(h))
@@ -1055,7 +805,7 @@ nf_conntrack_in(int pf, unsigned int hooknum, struct sk_buff **pskb)
        struct nf_conn *ct;
        enum ip_conntrack_info ctinfo;
        struct nf_conntrack_l3proto *l3proto;
-       struct nf_conntrack_protocol *proto;
+       struct nf_conntrack_l4proto *l4proto;
        unsigned int dataoff;
        u_int8_t protonum;
        int set_reply = 0;
@@ -1073,19 +823,19 @@ nf_conntrack_in(int pf, unsigned int hooknum, struct sk_buff **pskb)
                return -ret;
        }
 
-       proto = __nf_ct_proto_find((u_int16_t)pf, protonum);
+       l4proto = __nf_ct_l4proto_find((u_int16_t)pf, protonum);
 
        /* It may be an special packet, error, unclean...
         * inverse of the return code tells to the netfilter
         * core what to do with the packet. */
-       if (proto->error != NULL &&
-           (ret = proto->error(*pskb, dataoff, &ctinfo, pf, hooknum)) <= 0) {
+       if (l4proto->error != NULL &&
+           (ret = l4proto->error(*pskb, dataoff, &ctinfo, pf, hooknum)) <= 0) {
                NF_CT_STAT_INC(error);
                NF_CT_STAT_INC(invalid);
                return -ret;
        }
 
-       ct = resolve_normal_ct(*pskb, dataoff, pf, protonum, l3proto, proto,
+       ct = resolve_normal_ct(*pskb, dataoff, pf, protonum, l3proto, l4proto,
                               &set_reply, &ctinfo);
        if (!ct) {
                /* Not valid part of a connection */
@@ -1101,7 +851,7 @@ nf_conntrack_in(int pf, unsigned int hooknum, struct sk_buff **pskb)
 
        NF_CT_ASSERT((*pskb)->nfct);
 
-       ret = proto->packet(ct, *pskb, dataoff, ctinfo, pf, hooknum);
+       ret = l4proto->packet(ct, *pskb, dataoff, ctinfo, pf, hooknum);
        if (ret < 0) {
                /* Invalid: inverse of the return code tells
                 * the netfilter core what to do */
@@ -1117,261 +867,38 @@ nf_conntrack_in(int pf, unsigned int hooknum, struct sk_buff **pskb)
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_in);
 
 int nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
                         const struct nf_conntrack_tuple *orig)
 {
        return nf_ct_invert_tuple(inverse, orig,
                                  __nf_ct_l3proto_find(orig->src.l3num),
-                                 __nf_ct_proto_find(orig->src.l3num,
+                                 __nf_ct_l4proto_find(orig->src.l3num,
                                                     orig->dst.protonum));
 }
-
-/* Would two expected things clash? */
-static inline int expect_clash(const struct nf_conntrack_expect *a,
-                              const struct nf_conntrack_expect *b)
-{
-       /* Part covered by intersection of masks must be unequal,
-          otherwise they clash */
-       struct nf_conntrack_tuple intersect_mask;
-       int count;
-
-       intersect_mask.src.l3num = a->mask.src.l3num & b->mask.src.l3num;
-       intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
-       intersect_mask.dst.u.all = a->mask.dst.u.all & b->mask.dst.u.all;
-       intersect_mask.dst.protonum = a->mask.dst.protonum
-                                       & b->mask.dst.protonum;
-
-       for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
-               intersect_mask.src.u3.all[count] =
-                       a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
-       }
-
-       for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
-               intersect_mask.dst.u3.all[count] =
-                       a->mask.dst.u3.all[count] & b->mask.dst.u3.all[count];
-       }
-
-       return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
-}
-
-static inline int expect_matches(const struct nf_conntrack_expect *a,
-                                const struct nf_conntrack_expect *b)
-{
-       return a->master == b->master
-               && nf_ct_tuple_equal(&a->tuple, &b->tuple)
-               && nf_ct_tuple_equal(&a->mask, &b->mask);
-}
-
-/* Generally a bad idea to call this: could have matched already. */
-void nf_conntrack_unexpect_related(struct nf_conntrack_expect *exp)
-{
-       struct nf_conntrack_expect *i;
-
-       write_lock_bh(&nf_conntrack_lock);
-       /* choose the the oldest expectation to evict */
-       list_for_each_entry_reverse(i, &nf_conntrack_expect_list, list) {
-               if (expect_matches(i, exp) && del_timer(&i->timeout)) {
-                       nf_ct_unlink_expect(i);
-                       write_unlock_bh(&nf_conntrack_lock);
-                       nf_conntrack_expect_put(i);
-                       return;
-               }
-       }
-       write_unlock_bh(&nf_conntrack_lock);
-}
-
-/* We don't increase the master conntrack refcount for non-fulfilled
- * conntracks. During the conntrack destruction, the expectations are
- * always killed before the conntrack itself */
-struct nf_conntrack_expect *nf_conntrack_expect_alloc(struct nf_conn *me)
-{
-       struct nf_conntrack_expect *new;
-
-       new = kmem_cache_alloc(nf_conntrack_expect_cachep, GFP_ATOMIC);
-       if (!new) {
-               DEBUGP("expect_related: OOM allocating expect\n");
-               return NULL;
-       }
-       new->master = me;
-       atomic_set(&new->use, 1);
-       return new;
-}
-
-void nf_conntrack_expect_put(struct nf_conntrack_expect *exp)
-{
-       if (atomic_dec_and_test(&exp->use))
-               kmem_cache_free(nf_conntrack_expect_cachep, exp);
-}
-
-static void nf_conntrack_expect_insert(struct nf_conntrack_expect *exp)
-{
-       atomic_inc(&exp->use);
-       exp->master->expecting++;
-       list_add(&exp->list, &nf_conntrack_expect_list);
-
-       init_timer(&exp->timeout);
-       exp->timeout.data = (unsigned long)exp;
-       exp->timeout.function = expectation_timed_out;
-       exp->timeout.expires = jiffies + exp->master->helper->timeout * HZ;
-       add_timer(&exp->timeout);
-
-       exp->id = ++nf_conntrack_expect_next_id;
-       atomic_inc(&exp->use);
-       NF_CT_STAT_INC(expect_create);
-}
-
-/* Race with expectations being used means we could have none to find; OK. */
-static void evict_oldest_expect(struct nf_conn *master)
-{
-       struct nf_conntrack_expect *i;
-
-       list_for_each_entry_reverse(i, &nf_conntrack_expect_list, list) {
-               if (i->master == master) {
-                       if (del_timer(&i->timeout)) {
-                               nf_ct_unlink_expect(i);
-                               nf_conntrack_expect_put(i);
-                       }
-                       break;
-               }
-       }
-}
-
-static inline int refresh_timer(struct nf_conntrack_expect *i)
-{
-       if (!del_timer(&i->timeout))
-               return 0;
-
-       i->timeout.expires = jiffies + i->master->helper->timeout*HZ;
-       add_timer(&i->timeout);
-       return 1;
-}
-
-int nf_conntrack_expect_related(struct nf_conntrack_expect *expect)
-{
-       struct nf_conntrack_expect *i;
-       struct nf_conn *master = expect->master;
-       int ret;
-
-       DEBUGP("nf_conntrack_expect_related %p\n", related_to);
-       DEBUGP("tuple: "); NF_CT_DUMP_TUPLE(&expect->tuple);
-       DEBUGP("mask:  "); NF_CT_DUMP_TUPLE(&expect->mask);
-
-       write_lock_bh(&nf_conntrack_lock);
-       list_for_each_entry(i, &nf_conntrack_expect_list, list) {
-               if (expect_matches(i, expect)) {
-                       /* Refresh timer: if it's dying, ignore.. */
-                       if (refresh_timer(i)) {
-                               ret = 0;
-                               goto out;
-                       }
-               } else if (expect_clash(i, expect)) {
-                       ret = -EBUSY;
-                       goto out;
-               }
-       }
-       /* Will be over limit? */
-       if (master->helper->max_expected && 
-           master->expecting >= master->helper->max_expected)
-               evict_oldest_expect(master);
-
-       nf_conntrack_expect_insert(expect);
-       nf_conntrack_expect_event(IPEXP_NEW, expect);
-       ret = 0;
-out:
-       write_unlock_bh(&nf_conntrack_lock);
-       return ret;
-}
+EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr);
 
 /* Alter reply tuple (maybe alter helper).  This is for NAT, and is
    implicitly racy: see __nf_conntrack_confirm */
-void nf_conntrack_alter_reply(struct nf_conn *conntrack,
+void nf_conntrack_alter_reply(struct nf_conn *ct,
                              const struct nf_conntrack_tuple *newreply)
 {
+       struct nf_conn_help *help = nfct_help(ct);
+
        write_lock_bh(&nf_conntrack_lock);
        /* Should be unconfirmed, so not in hash table yet */
-       NF_CT_ASSERT(!nf_ct_is_confirmed(conntrack));
+       NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
 
-       DEBUGP("Altering reply tuple of %p to ", conntrack);
+       DEBUGP("Altering reply tuple of %p to ", ct);
        NF_CT_DUMP_TUPLE(newreply);
 
-       conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
-       if (!conntrack->master && conntrack->expecting == 0)
-               conntrack->helper = __nf_ct_helper_find(newreply);
+       ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
+       if (!ct->master && help && help->expecting == 0)
+               help->helper = __nf_ct_helper_find(newreply);
        write_unlock_bh(&nf_conntrack_lock);
 }
-
-int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
-{
-       int ret;
-       BUG_ON(me->timeout == 0);
-
-       ret = nf_conntrack_register_cache(NF_CT_F_HELP, "nf_conntrack:help",
-                                         sizeof(struct nf_conn)
-                                         + sizeof(union nf_conntrack_help)
-                                         + __alignof__(union nf_conntrack_help),
-                                         init_conntrack_for_helper);
-       if (ret < 0) {
-               printk(KERN_ERR "nf_conntrack_helper_reigster: Unable to create slab cache for conntracks\n");
-               return ret;
-       }
-       write_lock_bh(&nf_conntrack_lock);
-       list_prepend(&helpers, me);
-       write_unlock_bh(&nf_conntrack_lock);
-
-       return 0;
-}
-
-struct nf_conntrack_helper *
-__nf_conntrack_helper_find_byname(const char *name)
-{
-       struct nf_conntrack_helper *h;
-
-       list_for_each_entry(h, &helpers, list) {
-               if (!strcmp(h->name, name))
-                       return h;
-       }
-
-       return NULL;
-}
-
-static inline int unhelp(struct nf_conntrack_tuple_hash *i,
-                        const struct nf_conntrack_helper *me)
-{
-       if (nf_ct_tuplehash_to_ctrack(i)->helper == me) {
-               nf_conntrack_event(IPCT_HELPER, nf_ct_tuplehash_to_ctrack(i));
-               nf_ct_tuplehash_to_ctrack(i)->helper = NULL;
-       }
-       return 0;
-}
-
-void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
-{
-       unsigned int i;
-       struct nf_conntrack_expect *exp, *tmp;
-
-       /* Need write lock here, to delete helper. */
-       write_lock_bh(&nf_conntrack_lock);
-       LIST_DELETE(&helpers, me);
-
-       /* Get rid of expectations */
-       list_for_each_entry_safe(exp, tmp, &nf_conntrack_expect_list, list) {
-               if (exp->master->helper == me && del_timer(&exp->timeout)) {
-                       nf_ct_unlink_expect(exp);
-                       nf_conntrack_expect_put(exp);
-               }
-       }
-
-       /* Get rid of expecteds, set helpers to NULL. */
-       LIST_FIND_W(&unconfirmed, unhelp, struct nf_conntrack_tuple_hash*, me);
-       for (i = 0; i < nf_conntrack_htable_size; i++)
-               LIST_FIND_W(&nf_conntrack_hash[i], unhelp,
-                           struct nf_conntrack_tuple_hash *, me);
-       write_unlock_bh(&nf_conntrack_lock);
-
-       /* Someone could be still looking at the helper in a bh. */
-       synchronize_net();
-}
+EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
 
 /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
 void __nf_ct_refresh_acct(struct nf_conn *ct,
@@ -1387,14 +914,25 @@ void __nf_ct_refresh_acct(struct nf_conn *ct,
 
        write_lock_bh(&nf_conntrack_lock);
 
+       /* Only update if this is not a fixed timeout */
+       if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) {
+               write_unlock_bh(&nf_conntrack_lock);
+               return;
+       }
+
        /* If not in hash table, timer will not be active yet */
        if (!nf_ct_is_confirmed(ct)) {
                ct->timeout.expires = extra_jiffies;
                event = IPCT_REFRESH;
        } else {
-               /* Need del_timer for race avoidance (may already be dying). */
-               if (del_timer(&ct->timeout)) {
-                       ct->timeout.expires = jiffies + extra_jiffies;
+               unsigned long newtime = jiffies + extra_jiffies;
+
+               /* Only update the timeout if the new timeout is at least
+                  HZ jiffies from the old timeout. Need del_timer for race
+                  avoidance (may already be dying). */
+               if (newtime - ct->timeout.expires >= HZ
+                   && del_timer(&ct->timeout)) {
+                       ct->timeout.expires = newtime;
                        add_timer(&ct->timeout);
                        event = IPCT_REFRESH;
                }
@@ -1405,9 +943,10 @@ void __nf_ct_refresh_acct(struct nf_conn *ct,
                ct->counters[CTINFO2DIR(ctinfo)].packets++;
                ct->counters[CTINFO2DIR(ctinfo)].bytes +=
                        skb->len - (unsigned int)(skb->nh.raw - skb->data);
-       if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000)
-           || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000))
-               event |= IPCT_COUNTER_FILLING;
+
+               if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000)
+                   || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000))
+                       event |= IPCT_COUNTER_FILLING;
        }
 #endif
 
@@ -1417,12 +956,15 @@ void __nf_ct_refresh_acct(struct nf_conn *ct,
        if (event)
                nf_conntrack_event_cache(event, skb);
 }
+EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
 
 #if defined(CONFIG_NF_CT_NETLINK) || \
     defined(CONFIG_NF_CT_NETLINK_MODULE)
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_conntrack.h>
+#include <linux/mutex.h>
+
 
 /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
  * in ip_conntrack_core, since we don't want the protocols to autoload
@@ -1439,6 +981,7 @@ int nf_ct_port_tuple_to_nfattr(struct sk_buff *skb,
 nfattr_failure:
        return -1;
 }
+EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nfattr);
 
 static const size_t cta_min_proto[CTA_PROTO_MAX] = {
        [CTA_PROTO_SRC_PORT-1]  = sizeof(u_int16_t),
@@ -1454,13 +997,12 @@ int nf_ct_port_nfattr_to_tuple(struct nfattr *tb[],
        if (nfattr_bad_size(tb, CTA_PROTO_MAX, cta_min_proto))
                return -EINVAL;
 
-       t->src.u.tcp.port =
-               *(u_int16_t *)NFA_DATA(tb[CTA_PROTO_SRC_PORT-1]);
-       t->dst.u.tcp.port =
-               *(u_int16_t *)NFA_DATA(tb[CTA_PROTO_DST_PORT-1]);
+       t->src.u.tcp.port = *(__be16 *)NFA_DATA(tb[CTA_PROTO_SRC_PORT-1]);
+       t->dst.u.tcp.port = *(__be16 *)NFA_DATA(tb[CTA_PROTO_DST_PORT-1]);
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(nf_ct_port_nfattr_to_tuple);
 #endif
 
 /* Used by ipt_REJECT and ip6t_REJECT. */
@@ -1481,6 +1023,7 @@ void __nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
        nskb->nfctinfo = ctinfo;
        nf_conntrack_get(nskb->nfct);
 }
+EXPORT_SYMBOL_GPL(__nf_conntrack_attach);
 
 static inline int
 do_iter(const struct nf_conntrack_tuple_hash *i,
@@ -1491,37 +1034,41 @@ do_iter(const struct nf_conntrack_tuple_hash *i,
 }
 
 /* Bring out ya dead! */
-static struct nf_conntrack_tuple_hash *
+static struct nf_conn *
 get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
                void *data, unsigned int *bucket)
 {
-       struct nf_conntrack_tuple_hash *h = NULL;
+       struct nf_conntrack_tuple_hash *h;
+       struct nf_conn *ct;
 
        write_lock_bh(&nf_conntrack_lock);
        for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
-               h = LIST_FIND_W(&nf_conntrack_hash[*bucket], do_iter,
-                               struct nf_conntrack_tuple_hash *, iter, data);
-               if (h)
-                       break;
+               list_for_each_entry(h, &nf_conntrack_hash[*bucket], list) {
+                       ct = nf_ct_tuplehash_to_ctrack(h);
+                       if (iter(ct, data))
+                               goto found;
+               }
        }
-       if (!h)
-               h = LIST_FIND_W(&unconfirmed, do_iter,
-                               struct nf_conntrack_tuple_hash *, iter, data);
-       if (h)
-               atomic_inc(&nf_ct_tuplehash_to_ctrack(h)->ct_general.use);
+       list_for_each_entry(h, &unconfirmed, list) {
+               ct = nf_ct_tuplehash_to_ctrack(h);
+               if (iter(ct, data))
+                       set_bit(IPS_DYING_BIT, &ct->status);
+       }
        write_unlock_bh(&nf_conntrack_lock);
-
-       return h;
+       return NULL;
+found:
+       atomic_inc(&ct->ct_general.use);
+       write_unlock_bh(&nf_conntrack_lock);
+       return ct;
 }
 
 void
 nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data), void *data)
 {
-       struct nf_conntrack_tuple_hash *h;
+       struct nf_conn *ct;
        unsigned int bucket = 0;
 
-       while ((h = get_next_corpse(iter, data, &bucket)) != NULL) {
-               struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
+       while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
                /* Time to push up daises... */
                if (del_timer(&ct->timeout))
                        death_by_timeout((unsigned long)ct);
@@ -1530,6 +1077,7 @@ nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data), void *data)
                nf_ct_put(ct);
        }
 }
+EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
 
 static int kill_all(struct nf_conn *i, void *data)
 {
@@ -1545,10 +1093,11 @@ static void free_conntrack_hash(struct list_head *hash, int vmalloced, int size)
                           get_order(sizeof(struct list_head) * size));
 }
 
-void nf_conntrack_flush()
+void nf_conntrack_flush(void)
 {
        nf_ct_iterate_cleanup(kill_all, NULL);
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_flush);
 
 /* Mishearing the voices in his head, our hero wonders how he's
    supposed to kill the mall. */
@@ -1586,6 +1135,8 @@ void nf_conntrack_cleanup(void)
        free_conntrack_hash(nf_conntrack_hash, nf_conntrack_vmalloc,
                            nf_conntrack_htable_size);
 
+       nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_generic);
+
        /* free l3proto protocol tables */
        for (i = 0; i < PF_MAX; i++)
                if (nf_ct_protos[i]) {
@@ -1697,7 +1248,7 @@ int __init nf_conntrack_init(void)
        }
 
        ret = nf_conntrack_register_cache(NF_CT_F_BASIC, "nf_conntrack:basic",
-                                         sizeof(struct nf_conn), NULL);
+                                         sizeof(struct nf_conn));
        if (ret < 0) {
                printk(KERN_ERR "Unable to create nf_conn slab cache\n");
                goto err_free_hash;
@@ -1711,10 +1262,14 @@ int __init nf_conntrack_init(void)
                goto err_free_conntrack_slab;
        }
 
+       ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_generic);
+       if (ret < 0)
+               goto out_free_expect_slab;
+
        /* Don't NEED lock here, but good form anyway. */
        write_lock_bh(&nf_conntrack_lock);
-        for (i = 0; i < PF_MAX; i++)
-               nf_ct_l3protos[i] = &nf_conntrack_generic_l3proto;
+        for (i = 0; i < AF_MAX; i++)
+               nf_ct_l3protos[i] = &nf_conntrack_l3proto_generic;
         write_unlock_bh(&nf_conntrack_lock);
 
        /* For use by REJECT target */
@@ -1728,6 +1283,8 @@ int __init nf_conntrack_init(void)
 
        return ret;
 
+out_free_expect_slab:
+       kmem_cache_destroy(nf_conntrack_expect_cachep);
 err_free_conntrack_slab:
        nf_conntrack_unregister_cache(NF_CT_F_BASIC);
 err_free_hash: