extern atomic_t ip_conntrack_count;
DECLARE_PER_CPU(struct ip_conntrack_stat, ip_conntrack_stat);
-static int kill_proto(const struct ip_conntrack *i, void *data)
+static int kill_proto(struct ip_conntrack *i, void *data)
{
return (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum ==
*((u_int8_t *) data));
#ifdef CONFIG_IP_NF_CT_ACCT
static unsigned int
-seq_print_counters(struct seq_file *s, struct ip_conntrack_counter *counter)
+seq_print_counters(struct seq_file *s,
+ const struct ip_conntrack_counter *counter)
{
return seq_printf(s, "packets=%llu bytes=%llu ",
(unsigned long long)counter->packets,
static int ct_seq_real_show(const struct ip_conntrack_tuple_hash *hash,
struct seq_file *s)
{
- struct ip_conntrack *conntrack = hash->ctrack;
+ const struct ip_conntrack *conntrack = tuplehash_to_ctrack(hash);
struct ip_conntrack_protocol *proto;
MUST_BE_READ_LOCKED(&ip_conntrack_lock);
if (seq_printf(s, "[ASSURED] "))
return 1;
+#if defined(CONFIG_IP_NF_CONNTRACK_MARK)
+ if (seq_printf(s, "mark=%ld ", conntrack->mark))
+ return 1;
+#endif
+
if (seq_printf(s, "use=%u\n", atomic_read(&conntrack->ct_general.use)))
return 1;
/* strange seq_file api calls stop even if we fail,
* thus we need to grab lock since stop unlocks */
READ_LOCK(&ip_conntrack_lock);
- READ_LOCK(&ip_conntrack_expect_tuple_lock);
if (list_empty(e))
return NULL;
static void exp_seq_stop(struct seq_file *s, void *v)
{
- READ_UNLOCK(&ip_conntrack_expect_tuple_lock);
READ_UNLOCK(&ip_conntrack_lock);
}
{
struct ip_conntrack_expect *expect = v;
- if (expect->expectant->helper->timeout)
+ if (expect->timeout.function)
seq_printf(s, "%lu ", timer_pending(&expect->timeout)
? (expect->timeout.expires - jiffies)/HZ : 0);
else
seq_printf(s, "- ");
- seq_printf(s, "use=%u proto=%u ", atomic_read(&expect->use),
- expect->tuple.dst.protonum);
+ seq_printf(s, "proto=%u ", expect->tuple.dst.protonum);
print_tuple(s, &expect->tuple,
ip_ct_find_proto(expect->tuple.dst.protonum));
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
+ struct ip_conntrack *ct;
+ enum ip_conntrack_info ctinfo;
+
+ /* This is where we call the helper: as the packet goes out. */
+ ct = ip_conntrack_get(*pskb, &ctinfo);
+ if (ct && ct->helper) {
+ unsigned int ret;
+ ret = ct->helper->help(pskb, ct, ctinfo);
+ if (ret != NF_ACCEPT)
+ return ret;
+ }
+
/* We've seen it coming out the other side: confirm it */
- return ip_conntrack_confirm(*pskb);
+ return ip_conntrack_confirm(pskb);
}
static unsigned int ip_conntrack_defrag(unsigned int hooknum,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
+#if !defined(CONFIG_IP_NF_NAT) && !defined(CONFIG_IP_NF_NAT_MODULE)
/* Previously seen (loopback)? Ignore. Do this before
fragment check. */
if ((*pskb)->nfct)
return NF_ACCEPT;
+#endif
/* Gather fragments. */
if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
- *pskb = ip_ct_gather_frags(*pskb);
+ *pskb = ip_ct_gather_frags(*pskb,
+ hooknum == NF_IP_PRE_ROUTING ?
+ IP_DEFRAG_CONNTRACK_IN :
+ IP_DEFRAG_CONNTRACK_OUT);
if (!*pskb)
return NF_STOLEN;
}
goto cleanup_nothing;
#ifdef CONFIG_PROC_FS
+ ret = -ENOMEM;
proc = proc_net_fops_create("ip_conntrack", 0440, &ct_file_ops);
if (!proc) goto cleanup_init;
ip_ct_sysctl_header = register_sysctl_table(ip_ct_net_table, 0);
if (ip_ct_sysctl_header == NULL) {
printk("ip_conntrack: can't register to sysctl.\n");
- goto cleanup;
+ ret = -ENOMEM;
+ goto cleanup_localinops;
}
#endif
cleanup:
#ifdef CONFIG_SYSCTL
unregister_sysctl_table(ip_ct_sysctl_header);
+ cleanup_localinops:
#endif
nf_unregister_hook(&ip_conntrack_local_in_ops);
cleanup_inoutandlocalops:
cleanup_defraglocalops:
nf_unregister_hook(&ip_conntrack_defrag_local_out_ops);
cleanup_defragops:
- /* Frag queues may hold fragments with skb->dst == NULL */
- ip_ct_no_defrag = 1;
- synchronize_net();
- local_bh_disable();
- ipfrag_flush();
- local_bh_enable();
nf_unregister_hook(&ip_conntrack_defrag_ops);
cleanup_proc_stat:
#ifdef CONFIG_PROC_FS
- proc_net_remove("ip_conntrack_stat");
-cleanup_proc_exp:
- proc_net_remove("ip_conntrack_exp");
+ remove_proc_entry("ip_conntrack", proc_net_stat);
+ cleanup_proc_exp:
+ proc_net_remove("ip_conntrack_expect");
cleanup_proc:
proc_net_remove("ip_conntrack");
cleanup_init:
synchronize_net();
/* Remove all contrack entries for this protocol */
- ip_ct_selective_cleanup(kill_proto, &proto->proto);
+ ip_ct_iterate_cleanup(kill_proto, &proto->proto);
}
static int __init init(void)
EXPORT_SYMBOL(ip_conntrack_protocol_register);
EXPORT_SYMBOL(ip_conntrack_protocol_unregister);
+EXPORT_SYMBOL(ip_ct_get_tuple);
EXPORT_SYMBOL(invert_tuplepr);
EXPORT_SYMBOL(ip_conntrack_alter_reply);
EXPORT_SYMBOL(ip_conntrack_destroyed);
EXPORT_SYMBOL(need_ip_conntrack);
EXPORT_SYMBOL(ip_conntrack_helper_register);
EXPORT_SYMBOL(ip_conntrack_helper_unregister);
-EXPORT_SYMBOL(ip_ct_selective_cleanup);
+EXPORT_SYMBOL(ip_ct_iterate_cleanup);
EXPORT_SYMBOL(ip_ct_refresh_acct);
EXPORT_SYMBOL(ip_ct_protos);
EXPORT_SYMBOL(ip_ct_find_proto);
-EXPORT_SYMBOL(ip_ct_find_helper);
EXPORT_SYMBOL(ip_conntrack_expect_alloc);
+EXPORT_SYMBOL(ip_conntrack_expect_free);
EXPORT_SYMBOL(ip_conntrack_expect_related);
-EXPORT_SYMBOL(ip_conntrack_change_expect);
EXPORT_SYMBOL(ip_conntrack_unexpect_related);
-EXPORT_SYMBOL_GPL(ip_conntrack_expect_find_get);
-EXPORT_SYMBOL_GPL(ip_conntrack_expect_put);
EXPORT_SYMBOL(ip_conntrack_tuple_taken);
EXPORT_SYMBOL(ip_ct_gather_frags);
EXPORT_SYMBOL(ip_conntrack_htable_size);
-EXPORT_SYMBOL(ip_conntrack_expect_list);
EXPORT_SYMBOL(ip_conntrack_lock);
EXPORT_SYMBOL(ip_conntrack_hash);
EXPORT_SYMBOL(ip_conntrack_untracked);
EXPORT_SYMBOL_GPL(ip_conntrack_find_get);
EXPORT_SYMBOL_GPL(ip_conntrack_put);
+#ifdef CONFIG_IP_NF_NAT_NEEDED
+EXPORT_SYMBOL(ip_conntrack_tcp_update);
+#endif