fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / net / ipv4 / tcp_ipv4.c
index daedaa3..4d3232d 100644 (file)
  *                                     ACK bit.
  *             Andi Kleen :            Implemented fast path mtu discovery.
  *                                     Fixed many serious bugs in the
- *                                     open_request handling and moved
+ *                                     request_sock handling and moved
  *                                     most of it into the af independent code.
  *                                     Added tail drop and some other bugfixes.
- *                                     Added new listen sematics.
+ *                                     Added new listen semantics.
  *             Mike McLagan    :       Routing by source
  *     Juan Jose Ciarlante:            ip_dynaddr bits
  *             Andi Kleen:             various fixes.
@@ -52,7 +52,6 @@
  *                                     a single port at the same time.
  */
 
-#include <linux/config.h>
 
 #include <linux/types.h>
 #include <linux/fcntl.h>
 #include <linux/times.h>
 
 #include <net/icmp.h>
+#include <net/inet_hashtables.h>
 #include <net/tcp.h>
+#include <net/transp_v6.h>
 #include <net/ipv6.h>
 #include <net/inet_common.h>
+#include <net/timewait_sock.h>
 #include <net/xfrm.h>
+#include <net/netdma.h>
 
 #include <linux/inet.h>
 #include <linux/ipv6.h>
 #include <linux/stddef.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
-#include <linux/vserver/debug.h>
 
-extern int sysctl_ip_dynaddr;
-int sysctl_tcp_tw_reuse;
-int sysctl_tcp_low_latency;
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+
+int sysctl_tcp_tw_reuse __read_mostly;
+int sysctl_tcp_low_latency __read_mostly;
 
 /* Check TCP sequence numbers in ICMP packets. */
 #define ICMP_MIN_LENGTH 8
@@ -86,479 +90,40 @@ int sysctl_tcp_low_latency;
 /* Socket used for sending RSTs */
 static struct socket *tcp_socket;
 
-void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
-                      struct sk_buff *skb);
-
-struct tcp_hashinfo __cacheline_aligned tcp_hashinfo = {
-       .__tcp_lhash_lock       =       RW_LOCK_UNLOCKED,
-       .__tcp_lhash_users      =       ATOMIC_INIT(0),
-       .__tcp_lhash_wait
-         = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.__tcp_lhash_wait),
-       .__tcp_portalloc_lock   =       SPIN_LOCK_UNLOCKED
-};
-
-/*
- * This array holds the first and last local port number.
- * For high-usage systems, use sysctl to change this to
- * 32768-61000
- */
-int sysctl_local_port_range[2] = { 1024, 4999 };
-int tcp_port_rover = 1024 - 1;
-
-static __inline__ int tcp_hashfn(__u32 laddr, __u16 lport,
-                                __u32 faddr, __u16 fport)
-{
-       int h = (laddr ^ lport) ^ (faddr ^ fport);
-       h ^= h >> 16;
-       h ^= h >> 8;
-       return h & (tcp_ehash_size - 1);
-}
-
-static __inline__ int tcp_sk_hashfn(struct sock *sk)
-{
-       struct inet_sock *inet = inet_sk(sk);
-       __u32 laddr = inet->rcv_saddr;
-       __u16 lport = inet->num;
-       __u32 faddr = inet->daddr;
-       __u16 fport = inet->dport;
-
-       return tcp_hashfn(laddr, lport, faddr, fport);
-}
+void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb);
 
-/* Allocate and initialize a new TCP local port bind bucket.
- * The bindhash mutex for snum's hash chain must be held here.
- */
-struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head,
-                                         unsigned short snum)
-{
-       struct tcp_bind_bucket *tb = kmem_cache_alloc(tcp_bucket_cachep,
-                                                     SLAB_ATOMIC);
-       if (tb) {
-               tb->port = snum;
-               tb->fastreuse = 0;
-               INIT_HLIST_HEAD(&tb->owners);
-               hlist_add_head(&tb->node, &head->chain);
-       }
-       return tb;
-}
-
-/* Caller must hold hashbucket lock for this tb with local BH disabled */
-void tcp_bucket_destroy(struct tcp_bind_bucket *tb)
-{
-       if (hlist_empty(&tb->owners)) {
-               __hlist_del(&tb->node);
-               kmem_cache_free(tcp_bucket_cachep, tb);
-       }
-}
-
-/* Caller must disable local BH processing. */
-static __inline__ void __tcp_inherit_port(struct sock *sk, struct sock *child)
-{
-       struct tcp_bind_hashbucket *head =
-                               &tcp_bhash[tcp_bhashfn(inet_sk(child)->num)];
-       struct tcp_bind_bucket *tb;
-
-       spin_lock(&head->lock);
-       tb = tcp_sk(sk)->bind_hash;
-       sk_add_bind_node(child, &tb->owners);
-       tcp_sk(child)->bind_hash = tb;
-       spin_unlock(&head->lock);
-}
-
-inline void tcp_inherit_port(struct sock *sk, struct sock *child)
-{
-       local_bh_disable();
-       __tcp_inherit_port(sk, child);
-       local_bh_enable();
-}
-
-void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb,
-                  unsigned short snum)
-{
-       inet_sk(sk)->num = snum;
-       sk_add_bind_node(sk, &tb->owners);
-       tcp_sk(sk)->bind_hash = tb;
-}
+#ifdef CONFIG_TCP_MD5SIG
+static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
+                                                  __be32 addr);
+static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
+                                  __be32 saddr, __be32 daddr,
+                                  struct tcphdr *th, int protocol,
+                                  int tcplen);
+#endif
 
-static inline int tcp_bind_conflict(struct sock *sk, struct tcp_bind_bucket *tb)
-{
-       struct sock *sk2;
-       struct hlist_node *node;
-       int reuse = sk->sk_reuse;
-
-       sk_for_each_bound(sk2, node, &tb->owners) {
-               if (sk != sk2 &&
-                   !tcp_v6_ipv6only(sk2) &&
-                   (!sk->sk_bound_dev_if ||
-                    !sk2->sk_bound_dev_if ||
-                    sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
-                       if (!reuse || !sk2->sk_reuse ||
-                           sk2->sk_state == TCP_LISTEN) {
-                               if (nx_addr_conflict(sk->sk_nx_info,
-                                       tcp_v4_rcv_saddr(sk), sk2))
-                                       break;
-                       }
-               }
-       }
-       return node != NULL;
-}
+struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
+       .lhash_lock  = __RW_LOCK_UNLOCKED(tcp_hashinfo.lhash_lock),
+       .lhash_users = ATOMIC_INIT(0),
+       .lhash_wait  = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait),
+};
 
-/* Obtain a reference to a local port for the given sock,
- * if snum is zero it means select any available local port.
- */
 static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
 {
-       struct tcp_bind_hashbucket *head;
-       struct hlist_node *node;
-       struct tcp_bind_bucket *tb;
-       int ret;
-
-       local_bh_disable();
-       if (!snum) {
-               int low = sysctl_local_port_range[0];
-               int high = sysctl_local_port_range[1];
-               int remaining = (high - low) + 1;
-               int rover;
-
-               spin_lock(&tcp_portalloc_lock);
-               if (tcp_port_rover < low)
-                       rover = low;
-               else
-                       rover = tcp_port_rover;
-               do {
-                       rover++;
-                       if (rover > high)
-                               rover = low;
-                       head = &tcp_bhash[tcp_bhashfn(rover)];
-                       spin_lock(&head->lock);
-                       tb_for_each(tb, node, &head->chain)
-                               if (tb->port == rover)
-                                       goto next;
-                       break;
-               next:
-                       spin_unlock(&head->lock);
-               } while (--remaining > 0);
-               tcp_port_rover = rover;
-               spin_unlock(&tcp_portalloc_lock);
-
-               /* Exhausted local port range during search? */
-               ret = 1;
-               if (remaining <= 0)
-                       goto fail;
-
-               /* OK, here is the one we will use.  HEAD is
-                * non-NULL and we hold it's mutex.
-                */
-               snum = rover;
-       } else {
-               head = &tcp_bhash[tcp_bhashfn(snum)];
-               spin_lock(&head->lock);
-               tb_for_each(tb, node, &head->chain)
-                       if (tb->port == snum)
-                               goto tb_found;
-       }
-       tb = NULL;
-       goto tb_not_found;
-tb_found:
-       if (!hlist_empty(&tb->owners)) {
-               if (sk->sk_reuse > 1)
-                       goto success;
-               if (tb->fastreuse > 0 &&
-                   sk->sk_reuse && sk->sk_state != TCP_LISTEN) {
-                       goto success;
-               } else {
-                       ret = 1;
-                       if (tcp_bind_conflict(sk, tb))
-                               goto fail_unlock;
-               }
-       }
-tb_not_found:
-       ret = 1;
-       if (!tb && (tb = tcp_bucket_create(head, snum)) == NULL)
-               goto fail_unlock;
-       if (hlist_empty(&tb->owners)) {
-               if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
-                       tb->fastreuse = 1;
-               else
-                       tb->fastreuse = 0;
-       } else if (tb->fastreuse &&
-                  (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
-               tb->fastreuse = 0;
-success:
-       if (!tcp_sk(sk)->bind_hash)
-               tcp_bind_hash(sk, tb, snum);
-       BUG_TRAP(tcp_sk(sk)->bind_hash == tb);
-       ret = 0;
-
-fail_unlock:
-       spin_unlock(&head->lock);
-fail:
-       local_bh_enable();
-       return ret;
-}
-
-/* Get rid of any references to a local port held by the
- * given sock.
- */
-static void __tcp_put_port(struct sock *sk)
-{
-       struct inet_sock *inet = inet_sk(sk);
-       struct tcp_bind_hashbucket *head = &tcp_bhash[tcp_bhashfn(inet->num)];
-       struct tcp_bind_bucket *tb;
-
-       spin_lock(&head->lock);
-       tb = tcp_sk(sk)->bind_hash;
-       __sk_del_bind_node(sk);
-       tcp_sk(sk)->bind_hash = NULL;
-       inet->num = 0;
-       tcp_bucket_destroy(tb);
-       spin_unlock(&head->lock);
-}
-
-void tcp_put_port(struct sock *sk)
-{
-       local_bh_disable();
-       __tcp_put_port(sk);
-       local_bh_enable();
-}
-
-/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it can be very bad on SMP.
- * Look, when several writers sleep and reader wakes them up, all but one
- * immediately hit write lock and grab all the cpus. Exclusive sleep solves
- * this, _but_ remember, it adds useless work on UP machines (wake up each
- * exclusive lock release). It should be ifdefed really.
- */
-
-void tcp_listen_wlock(void)
-{
-       write_lock(&tcp_lhash_lock);
-
-       if (atomic_read(&tcp_lhash_users)) {
-               DEFINE_WAIT(wait);
-
-               for (;;) {
-                       prepare_to_wait_exclusive(&tcp_lhash_wait,
-                                               &wait, TASK_UNINTERRUPTIBLE);
-                       if (!atomic_read(&tcp_lhash_users))
-                               break;
-                       write_unlock_bh(&tcp_lhash_lock);
-                       schedule();
-                       write_lock_bh(&tcp_lhash_lock);
-               }
-
-               finish_wait(&tcp_lhash_wait, &wait);
-       }
-}
-
-static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible)
-{
-       struct hlist_head *list;
-       rwlock_t *lock;
-
-       BUG_TRAP(sk_unhashed(sk));
-       if (listen_possible && sk->sk_state == TCP_LISTEN) {
-               list = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)];
-               lock = &tcp_lhash_lock;
-               tcp_listen_wlock();
-       } else {
-               list = &tcp_ehash[(sk->sk_hashent = tcp_sk_hashfn(sk))].chain;
-               lock = &tcp_ehash[sk->sk_hashent].lock;
-               write_lock(lock);
-       }
-       __sk_add_node(sk, list);
-       sock_prot_inc_use(sk->sk_prot);
-       write_unlock(lock);
-       if (listen_possible && sk->sk_state == TCP_LISTEN)
-               wake_up(&tcp_lhash_wait);
+       return inet_csk_get_port(&tcp_hashinfo, sk, snum,
+                                inet_csk_bind_conflict);
 }
 
 static void tcp_v4_hash(struct sock *sk)
 {
-       if (sk->sk_state != TCP_CLOSE) {
-               local_bh_disable();
-               __tcp_v4_hash(sk, 1);
-               local_bh_enable();
-       }
+       inet_hash(&tcp_hashinfo, sk);
 }
 
 void tcp_unhash(struct sock *sk)
 {
-       rwlock_t *lock;
-
-       if (sk_unhashed(sk))
-               goto ende;
-
-       if (sk->sk_state == TCP_LISTEN) {
-               local_bh_disable();
-               tcp_listen_wlock();
-               lock = &tcp_lhash_lock;
-       } else {
-               struct tcp_ehash_bucket *head = &tcp_ehash[sk->sk_hashent];
-               lock = &head->lock;
-               write_lock_bh(&head->lock);
-       }
-
-       if (__sk_del_node_init(sk))
-               sock_prot_dec_use(sk->sk_prot);
-       write_unlock_bh(lock);
-
- ende:
-       if (sk->sk_state == TCP_LISTEN)
-               wake_up(&tcp_lhash_wait);
-}
-
-
-/*
- *      Check if a given address matches for a tcp socket
- *
- *      nxi:   the socket's nx_info if any
- *      addr:  to be verified address
- *      saddr: socket addresses
- */
-static inline int tcp_addr_match (
-       struct nx_info *nxi,
-       uint32_t addr,
-       uint32_t saddr)
-{
-       if (addr && (saddr == addr))
-               return 1;
-       if (!saddr)
-               return addr_in_nx_info(nxi, addr);
-       return 0;
-}
-
-/* Don't inline this cruft.  Here are some nice properties to
- * exploit here.  The BSD API does not allow a listening TCP
- * to specify the remote port nor the remote address for the
- * connection.  So always assume those are both wildcarded
- * during the search since they can never be otherwise.
- */
-static struct sock *__tcp_v4_lookup_listener(struct hlist_head *head, u32 daddr,
-                                            unsigned short hnum, int dif)
-{
-       struct sock *result = NULL, *sk;
-       struct hlist_node *node;
-       int score, hiscore;
-
-       hiscore=-1;
-       sk_for_each(sk, node, head) {
-               struct inet_sock *inet = inet_sk(sk);
-
-               if (inet->num == hnum && !ipv6_only_sock(sk)) {
-                       __u32 rcv_saddr = inet->rcv_saddr;
-
-                       score = (sk->sk_family == PF_INET ? 1 : 0);
-                       if (tcp_addr_match(sk->sk_nx_info, daddr, rcv_saddr))
-                               score+=2;
-                       else
-                               continue;
-                       if (sk->sk_bound_dev_if) {
-                               if (sk->sk_bound_dev_if != dif)
-                                       continue;
-                               score+=2;
-                       }
-                       if (score == 5)
-                               return sk;
-                       if (score > hiscore) {
-                               hiscore = score;
-                               result = sk;
-                       }
-               }
-       }
-       return result;
-}
-
-/* Optimize the common listener case. */
-static inline struct sock *tcp_v4_lookup_listener(u32 daddr,
-               unsigned short hnum, int dif)
-{
-       struct sock *sk = NULL;
-       struct hlist_head *head;
-
-       read_lock(&tcp_lhash_lock);
-       head = &tcp_listening_hash[tcp_lhashfn(hnum)];
-       if (!hlist_empty(head)) {
-               struct inet_sock *inet = inet_sk((sk = __sk_head(head)));
-
-               if (inet->num == hnum && !sk->sk_node.next &&
-                   (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) &&
-                   tcp_addr_match(sk->sk_nx_info, daddr, inet->rcv_saddr) &&
-                   !sk->sk_bound_dev_if)
-                       goto sherry_cache;
-               sk = __tcp_v4_lookup_listener(head, daddr, hnum, dif);
-       }
-       if (sk) {
-sherry_cache:
-               sock_hold(sk);
-       }
-       read_unlock(&tcp_lhash_lock);
-       return sk;
-}
-
-/* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
- * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
- *
- * Local BH must be disabled here.
- */
-
-static inline struct sock *__tcp_v4_lookup_established(u32 saddr, u16 sport,
-                                                      u32 daddr, u16 hnum,
-                                                      int dif)
-{
-       struct tcp_ehash_bucket *head;
-       TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
-       __u32 ports = TCP_COMBINED_PORTS(sport, hnum);
-       struct sock *sk;
-       struct hlist_node *node;
-       /* Optimize here for direct hit, only listening connections can
-        * have wildcards anyways.
-        */
-       int hash = tcp_hashfn(daddr, hnum, saddr, sport);
-       head = &tcp_ehash[hash];
-       read_lock(&head->lock);
-       sk_for_each(sk, node, &head->chain) {
-               if (TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif))
-                       goto hit; /* You sunk my battleship! */
-       }
-
-       /* Must check for a TIME_WAIT'er before going to listener hash. */
-       sk_for_each(sk, node, &(head + tcp_ehash_size)->chain) {
-               if (TCP_IPV4_TW_MATCH(sk, acookie, saddr, daddr, ports, dif))
-                       goto hit;
-       }
-       sk = NULL;
-out:
-       read_unlock(&head->lock);
-       return sk;
-hit:
-       sock_hold(sk);
-       goto out;
-}
-
-static inline struct sock *__tcp_v4_lookup(u32 saddr, u16 sport,
-                                          u32 daddr, u16 hnum, int dif)
-{
-       struct sock *sk = __tcp_v4_lookup_established(saddr, sport,
-                                                     daddr, hnum, dif);
-
-       return sk ? : tcp_v4_lookup_listener(daddr, hnum, dif);
+       inet_unhash(&tcp_hashinfo, sk);
 }
 
-inline struct sock *tcp_v4_lookup(u32 saddr, u16 sport, u32 daddr,
-                                 u16 dport, int dif)
-{
-       struct sock *sk;
-
-       local_bh_disable();
-       sk = __tcp_v4_lookup(saddr, sport, daddr, ntohs(dport), dif);
-       local_bh_enable();
-
-       return sk;
-}
-
-EXPORT_SYMBOL_GPL(tcp_v4_lookup);
-
-static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb)
+static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb)
 {
        return secure_tcp_sequence_number(skb->nh.iph->daddr,
                                          skb->nh.iph->saddr,
@@ -566,200 +131,38 @@ static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb)
                                          skb->h.th->source);
 }
 
-/* called with local bh disabled */
-static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
-                                     struct tcp_tw_bucket **twp)
+int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
 {
-       struct inet_sock *inet = inet_sk(sk);
-       u32 daddr = inet->rcv_saddr;
-       u32 saddr = inet->daddr;
-       int dif = sk->sk_bound_dev_if;
-       TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
-       __u32 ports = TCP_COMBINED_PORTS(inet->dport, lport);
-       int hash = tcp_hashfn(daddr, lport, saddr, inet->dport);
-       struct tcp_ehash_bucket *head = &tcp_ehash[hash];
-       struct sock *sk2;
-       struct hlist_node *node;
-       struct tcp_tw_bucket *tw;
-
-       write_lock(&head->lock);
-
-       /* Check TIME-WAIT sockets first. */
-       sk_for_each(sk2, node, &(head + tcp_ehash_size)->chain) {
-               tw = (struct tcp_tw_bucket *)sk2;
-
-               if (TCP_IPV4_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) {
-                       struct tcp_sock *tp = tcp_sk(sk);
-
-                       /* With PAWS, it is safe from the viewpoint
-                          of data integrity. Even without PAWS it
-                          is safe provided sequence spaces do not
-                          overlap i.e. at data rates <= 80Mbit/sec.
-
-                          Actually, the idea is close to VJ's one,
-                          only timestamp cache is held not per host,
-                          but per port pair and TW bucket is used
-                          as state holder.
-
-                          If TW bucket has been already destroyed we
-                          fall back to VJ's scheme and use initial
-                          timestamp retrieved from peer table.
-                        */
-                       if (tw->tw_ts_recent_stamp &&
-                           (!twp || (sysctl_tcp_tw_reuse &&
-                                     xtime.tv_sec -
-                                     tw->tw_ts_recent_stamp > 1))) {
-                               if ((tp->write_seq =
-                                               tw->tw_snd_nxt + 65535 + 2) == 0)
-                                       tp->write_seq = 1;
-                               tp->rx_opt.ts_recent       = tw->tw_ts_recent;
-                               tp->rx_opt.ts_recent_stamp = tw->tw_ts_recent_stamp;
-                               sock_hold(sk2);
-                               goto unique;
-                       } else
-                               goto not_unique;
-               }
-       }
-       tw = NULL;
-
-       /* And established part... */
-       sk_for_each(sk2, node, &head->chain) {
-               if (TCP_IPV4_MATCH(sk2, acookie, saddr, daddr, ports, dif))
-                       goto not_unique;
-       }
+       const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
+       struct tcp_sock *tp = tcp_sk(sk);
 
-unique:
-       /* Must record num and sport now. Otherwise we will see
-        * in hash table socket with a funny identity. */
-       inet->num = lport;
-       inet->sport = htons(lport);
-       sk->sk_hashent = hash;
-       BUG_TRAP(sk_unhashed(sk));
-       __sk_add_node(sk, &head->chain);
-       sock_prot_inc_use(sk->sk_prot);
-       write_unlock(&head->lock);
+       /* With PAWS, it is safe from the viewpoint
+          of data integrity. Even without PAWS it is safe provided sequence
+          spaces do not overlap i.e. at data rates <= 80Mbit/sec.
 
-       if (twp) {
-               *twp = tw;
-               NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
-       } else if (tw) {
-               /* Silly. Should hash-dance instead... */
-               tcp_tw_deschedule(tw);
-               NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
+          Actually, the idea is close to VJ's one, only timestamp cache is
+          held not per host, but per port pair and TW bucket is used as state
+          holder.
 
-               tcp_tw_put(tw);
+          If TW bucket has been already destroyed we fall back to VJ's scheme
+          and use initial timestamp retrieved from peer table.
+        */
+       if (tcptw->tw_ts_recent_stamp &&
+           (twp == NULL || (sysctl_tcp_tw_reuse &&
+                            xtime.tv_sec - tcptw->tw_ts_recent_stamp > 1))) {
+               tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
+               if (tp->write_seq == 0)
+                       tp->write_seq = 1;
+               tp->rx_opt.ts_recent       = tcptw->tw_ts_recent;
+               tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
+               sock_hold(sktw);
+               return 1;
        }
 
        return 0;
-
-not_unique:
-       write_unlock(&head->lock);
-       return -EADDRNOTAVAIL;
 }
 
-static inline u32 connect_port_offset(const struct sock *sk)
-{
-       const struct inet_sock *inet = inet_sk(sk);
-
-       return secure_tcp_port_ephemeral(inet->rcv_saddr, inet->daddr, 
-                                        inet->dport);
-}
-
-/*
- * Bind a port for a connect operation and hash it.
- */
-static inline int tcp_v4_hash_connect(struct sock *sk)
-{
-       unsigned short snum = inet_sk(sk)->num;
-       struct tcp_bind_hashbucket *head;
-       struct tcp_bind_bucket *tb;
-       int ret;
-
-       if (!snum) {
-               int low = sysctl_local_port_range[0];
-               int high = sysctl_local_port_range[1];
-               int range = high - low;
-               int i;
-               int port;
-               static u32 hint;
-               u32 offset = hint + connect_port_offset(sk);
-               struct hlist_node *node;
-               struct tcp_tw_bucket *tw = NULL;
-
-               local_bh_disable();
-               for (i = 1; i <= range; i++) {
-                       port = low + (i + offset) % range;
-                       head = &tcp_bhash[tcp_bhashfn(port)];
-                       spin_lock(&head->lock);
-
-                       /* Does not bother with rcv_saddr checks,
-                        * because the established check is already
-                        * unique enough.
-                        */
-                       tb_for_each(tb, node, &head->chain) {
-                               if (tb->port == port) {
-                                       BUG_TRAP(!hlist_empty(&tb->owners));
-                                       if (tb->fastreuse >= 0)
-                                               goto next_port;
-                                       if (!__tcp_v4_check_established(sk,
-                                                                       port,
-                                                                       &tw))
-                                               goto ok;
-                                       goto next_port;
-                               }
-                       }
-
-                       tb = tcp_bucket_create(head, port);
-                       if (!tb) {
-                               spin_unlock(&head->lock);
-                               break;
-                       }
-                       tb->fastreuse = -1;
-                       goto ok;
-
-               next_port:
-                       spin_unlock(&head->lock);
-               }
-               local_bh_enable();
-
-               return -EADDRNOTAVAIL;
-
-ok:
-               hint += i;
-
-               /* Head lock still held and bh's disabled */
-               tcp_bind_hash(sk, tb, port);
-               if (sk_unhashed(sk)) {
-                       inet_sk(sk)->sport = htons(port);
-                       __tcp_v4_hash(sk, 0);
-               }
-               spin_unlock(&head->lock);
-
-               if (tw) {
-                       tcp_tw_deschedule(tw);
-                       tcp_tw_put(tw);
-               }
-
-               ret = 0;
-               goto out;
-       }
-
-       head  = &tcp_bhash[tcp_bhashfn(snum)];
-       tb  = tcp_sk(sk)->bind_hash;
-       spin_lock_bh(&head->lock);
-       if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
-               __tcp_v4_hash(sk, 0);
-               spin_unlock_bh(&head->lock);
-               return 0;
-       } else {
-               spin_unlock(&head->lock);
-               /* No definite answer... Walk to established hash table */
-               ret = __tcp_v4_check_established(sk, snum, NULL);
-out:
-               local_bh_enable();
-               return ret;
-       }
-}
+EXPORT_SYMBOL_GPL(tcp_twsk_unique);
 
 /* This will initiate an outgoing connection. */
 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
@@ -768,7 +171,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        struct tcp_sock *tp = tcp_sk(sk);
        struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
        struct rtable *rt;
-       u32 daddr, nexthop;
+       __be32 daddr, nexthop;
        int tmp;
        int err;
 
@@ -811,16 +214,17 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                tp->write_seq              = 0;
        }
 
-       if (sysctl_tcp_tw_recycle &&
+       if (tcp_death_row.sysctl_tw_recycle &&
            !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) {
                struct inet_peer *peer = rt_get_peer(rt);
-
-               /* VJ's idea. We save last timestamp seen from
-                * the destination in peer table, when entering state TIME-WAIT
-                * and initialize rx_opt.ts_recent from it, when trying new connection.
+               /*
+                * VJ's idea. We save last timestamp seen from
+                * the destination in peer table, when entering state
+                * TIME-WAIT * and initialize rx_opt.ts_recent from it,
+                * when trying new connection.
                 */
-
-               if (peer && peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) {
+               if (peer != NULL &&
+                   peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) {
                        tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
                        tp->rx_opt.ts_recent = peer->tcp_ts;
                }
@@ -829,9 +233,9 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        inet->dport = usin->sin_port;
        inet->daddr = daddr;
 
-       tp->ext_header_len = 0;
+       inet_csk(sk)->icsk_ext_hdr_len = 0;
        if (inet->opt)
-               tp->ext_header_len = inet->opt->optlen;
+               inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen;
 
        tp->rx_opt.mss_clamp = 536;
 
@@ -841,17 +245,18 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
         * complete initialization after this.
         */
        tcp_set_state(sk, TCP_SYN_SENT);
-       err = tcp_v4_hash_connect(sk);
+       err = inet_hash_connect(&tcp_death_row, sk);
        if (err)
                goto failure;
 
-       err = ip_route_newports(&rt, inet->sport, inet->dport, sk);
+       err = ip_route_newports(&rt, IPPROTO_TCP,
+                               inet->sport, inet->dport, sk);
        if (err)
                goto failure;
 
        /* OK, now commit destination to socket.  */
-       __sk_dst_set(sk, &rt->u.dst);
-       tcp_v4_setup_caps(sk, &rt->u.dst);
+       sk->sk_gso_type = SKB_GSO_TCPV4;
+       sk_setup_caps(sk, &rt->u.dst);
 
        if (!tp->write_seq)
                tp->write_seq = secure_tcp_sequence_number(inet->saddr,
@@ -869,7 +274,10 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        return 0;
 
 failure:
-       /* This unhashes the socket and releases the local port, if necessary. */
+       /*
+        * This unhashes the socket and releases the local port,
+        * if necessary.
+        */
        tcp_set_state(sk, TCP_CLOSE);
        ip_rt_put(rt);
        sk->sk_route_caps = 0;
@@ -877,68 +285,13 @@ failure:
        return err;
 }
 
-static __inline__ int tcp_v4_iif(struct sk_buff *skb)
-{
-       return ((struct rtable *)skb->dst)->rt_iif;
-}
-
-static __inline__ u32 tcp_v4_synq_hash(u32 raddr, u16 rport, u32 rnd)
-{
-       return (jhash_2words(raddr, (u32) rport, rnd) & (TCP_SYNQ_HSIZE - 1));
-}
-
-static struct open_request *tcp_v4_search_req(struct tcp_sock *tp,
-                                             struct open_request ***prevp,
-                                             __u16 rport,
-                                             __u32 raddr, __u32 laddr)
-{
-       struct tcp_listen_opt *lopt = tp->listen_opt;
-       struct open_request *req, **prev;
-
-       for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport, lopt->hash_rnd)];
-            (req = *prev) != NULL;
-            prev = &req->dl_next) {
-               if (req->rmt_port == rport &&
-                   req->af.v4_req.rmt_addr == raddr &&
-                   req->af.v4_req.loc_addr == laddr &&
-                   TCP_INET_FAMILY(req->class->family)) {
-                       BUG_TRAP(!req->sk);
-                       *prevp = prev;
-                       break;
-               }
-       }
-
-       return req;
-}
-
-static void tcp_v4_synq_add(struct sock *sk, struct open_request *req)
-{
-       struct tcp_sock *tp = tcp_sk(sk);
-       struct tcp_listen_opt *lopt = tp->listen_opt;
-       u32 h = tcp_v4_synq_hash(req->af.v4_req.rmt_addr, req->rmt_port, lopt->hash_rnd);
-
-       req->expires = jiffies + TCP_TIMEOUT_INIT;
-       req->retrans = 0;
-       req->sk = NULL;
-       req->dl_next = lopt->syn_table[h];
-
-       write_lock(&tp->syn_wait_lock);
-       lopt->syn_table[h] = req;
-       write_unlock(&tp->syn_wait_lock);
-
-       tcp_synq_added(sk);
-}
-
-
 /*
  * This routine does path mtu discovery as defined in RFC1191.
  */
-static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph,
-                                    u32 mtu)
+static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu)
 {
        struct dst_entry *dst;
        struct inet_sock *inet = inet_sk(sk);
-       struct tcp_sock *tp = tcp_sk(sk);
 
        /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
         * send out by Linux are always <576bytes so they should go through
@@ -967,7 +320,7 @@ static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph,
        mtu = dst_mtu(dst);
 
        if (inet->pmtudisc != IP_PMTUDISC_DONT &&
-           tp->pmtu_cookie > mtu) {
+           inet_csk(sk)->icsk_pmtu_cookie > mtu) {
                tcp_sync_mss(sk, mtu);
 
                /* Resend the TCP packet because it's
@@ -1012,14 +365,14 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
                return;
        }
 
-       sk = tcp_v4_lookup(iph->daddr, th->dest, iph->saddr,
-                          th->source, tcp_v4_iif(skb));
+       sk = inet_lookup(&tcp_hashinfo, iph->daddr, th->dest, iph->saddr,
+                        th->source, inet_iif(skb));
        if (!sk) {
                ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
                return;
        }
        if (sk->sk_state == TCP_TIME_WAIT) {
-               tcp_tw_put((struct tcp_tw_bucket *)sk);
+               inet_twsk_put(inet_twsk(sk));
                return;
        }
 
@@ -1037,7 +390,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
        seq = ntohl(th->seq);
        if (sk->sk_state != TCP_LISTEN &&
            !between(seq, tp->snd_una, tp->snd_nxt)) {
-               NET_INC_STATS(LINUX_MIB_OUTOFWINDOWICMPS);
+               NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
                goto out;
        }
 
@@ -1068,13 +421,13 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
        }
 
        switch (sk->sk_state) {
-               struct open_request *req, **prev;
+               struct request_sock *req, **prev;
        case TCP_LISTEN:
                if (sock_owned_by_user(sk))
                        goto out;
 
-               req = tcp_v4_search_req(tp, &prev, th->dest,
-                                       iph->daddr, iph->saddr);
+               req = inet_csk_search_req(sk, &prev, th->dest,
+                                         iph->daddr, iph->saddr);
                if (!req)
                        goto out;
 
@@ -1083,7 +436,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
                 */
                BUG_TRAP(!req->sk);
 
-               if (seq != req->snt_isn) {
+               if (seq != tcp_rsk(req)->snt_isn) {
                        NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
                        goto out;
                }
@@ -1094,7 +447,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
                 * created socket, and POSIX does not want network
                 * errors returned from accept().
                 */
-               tcp_synq_drop(sk, req, prev);
+               inet_csk_reqsk_queue_drop(sk, req, prev);
                goto out;
 
        case TCP_SYN_SENT:
@@ -1102,7 +455,6 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
                               It can f.e. if SYNs crossed.
                             */
                if (!sock_owned_by_user(sk)) {
-                       TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
                        sk->sk_err = err;
 
                        sk->sk_error_report(sk);
@@ -1144,14 +496,15 @@ out:
 }
 
 /* This routine computes an IPv4 TCP checksum. */
-void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
-                      struct sk_buff *skb)
+void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
 {
        struct inet_sock *inet = inet_sk(sk);
+       struct tcphdr *th = skb->h.th;
 
-       if (skb->ip_summed == CHECKSUM_HW) {
-               th->check = ~tcp_v4_check(th, len, inet->saddr, inet->daddr, 0);
-               skb->csum = offsetof(struct tcphdr, check);
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               th->check = ~tcp_v4_check(th, len,
+                                         inet->saddr, inet->daddr, 0);
+               skb->csum_offset = offsetof(struct tcphdr, check);
        } else {
                th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr,
                                         csum_partial((char *)th,
@@ -1160,6 +513,24 @@ void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
        }
 }
 
+int tcp_v4_gso_send_check(struct sk_buff *skb)
+{
+       struct iphdr *iph;
+       struct tcphdr *th;
+
+       if (!pskb_may_pull(skb, sizeof(*th)))
+               return -EINVAL;
+
+       iph = skb->nh.iph;
+       th = skb->h.th;
+
+       th->check = 0;
+       th->check = ~tcp_v4_check(th, skb->len, iph->saddr, iph->daddr, 0);
+       skb->csum_offset = offsetof(struct tcphdr, check);
+       skb->ip_summed = CHECKSUM_PARTIAL;
+       return 0;
+}
+
 /*
  *     This routine will send an RST to the other tcp.
  *
@@ -1173,11 +544,19 @@ void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
  *     Exception: precedence violation. We do not implement it in any case.
  */
 
-static void tcp_v4_send_reset(struct sk_buff *skb)
+static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
 {
        struct tcphdr *th = skb->h.th;
-       struct tcphdr rth;
+       struct {
+               struct tcphdr th;
+#ifdef CONFIG_TCP_MD5SIG
+               __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
+#endif
+       } rep;
        struct ip_reply_arg arg;
+#ifdef CONFIG_TCP_MD5SIG
+       struct tcp_md5sig_key *key;
+#endif
 
        /* Never send a reset in response to a reset. */
        if (th->rst)
@@ -1187,29 +566,49 @@ static void tcp_v4_send_reset(struct sk_buff *skb)
                return;
 
        /* Swap the send and the receive. */
-       memset(&rth, 0, sizeof(struct tcphdr));
-       rth.dest   = th->source;
-       rth.source = th->dest;
-       rth.doff   = sizeof(struct tcphdr) / 4;
-       rth.rst    = 1;
+       memset(&rep, 0, sizeof(rep));
+       rep.th.dest   = th->source;
+       rep.th.source = th->dest;
+       rep.th.doff   = sizeof(struct tcphdr) / 4;
+       rep.th.rst    = 1;
 
        if (th->ack) {
-               rth.seq = th->ack_seq;
+               rep.th.seq = th->ack_seq;
        } else {
-               rth.ack = 1;
-               rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
-                                   skb->len - (th->doff << 2));
+               rep.th.ack = 1;
+               rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
+                                      skb->len - (th->doff << 2));
        }
 
-       memset(&arg, 0, sizeof arg);
-       arg.iov[0].iov_base = (unsigned char *)&rth;
-       arg.iov[0].iov_len  = sizeof rth;
+       memset(&arg, 0, sizeof(arg));
+       arg.iov[0].iov_base = (unsigned char *)&rep;
+       arg.iov[0].iov_len  = sizeof(rep.th);
+
+#ifdef CONFIG_TCP_MD5SIG
+       key = sk ? tcp_v4_md5_do_lookup(sk, skb->nh.iph->daddr) : NULL;
+       if (key) {
+               rep.opt[0] = htonl((TCPOPT_NOP << 24) |
+                                  (TCPOPT_NOP << 16) |
+                                  (TCPOPT_MD5SIG << 8) |
+                                  TCPOLEN_MD5SIG);
+               /* Update length and the length the header thinks exists */
+               arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
+               rep.th.doff = arg.iov[0].iov_len / 4;
+
+               tcp_v4_do_calc_md5_hash((__u8 *)&rep.opt[1],
+                                       key,
+                                       skb->nh.iph->daddr,
+                                       skb->nh.iph->saddr,
+                                       &rep.th, IPPROTO_TCP,
+                                       arg.iov[0].iov_len);
+       }
+#endif
        arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
-                                     skb->nh.iph->saddr, /*XXX*/
+                                     skb->nh.iph->saddr, /* XXX */
                                      sizeof(struct tcphdr), IPPROTO_TCP, 0);
        arg.csumoffset = offsetof(struct tcphdr, check) / 2;
 
-       ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth);
+       ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len);
 
        TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
        TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
@@ -1219,28 +618,37 @@ static void tcp_v4_send_reset(struct sk_buff *skb)
    outside socket context is ugly, certainly. What can I do?
  */
 
-static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
+static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk,
+                           struct sk_buff *skb, u32 seq, u32 ack,
                            u32 win, u32 ts)
 {
        struct tcphdr *th = skb->h.th;
        struct {
                struct tcphdr th;
-               u32 tsopt[3];
+               __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
+#ifdef CONFIG_TCP_MD5SIG
+                          + (TCPOLEN_MD5SIG_ALIGNED >> 2)
+#endif
+                       ];
        } rep;
        struct ip_reply_arg arg;
+#ifdef CONFIG_TCP_MD5SIG
+       struct tcp_md5sig_key *key;
+       struct tcp_md5sig_key tw_key;
+#endif
 
        memset(&rep.th, 0, sizeof(struct tcphdr));
-       memset(&arg, 0, sizeof arg);
+       memset(&arg, 0, sizeof(arg));
 
        arg.iov[0].iov_base = (unsigned char *)&rep;
        arg.iov[0].iov_len  = sizeof(rep.th);
        if (ts) {
-               rep.tsopt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
-                                    (TCPOPT_TIMESTAMP << 8) |
-                                    TCPOLEN_TIMESTAMP);
-               rep.tsopt[1] = htonl(tcp_time_stamp);
-               rep.tsopt[2] = htonl(ts);
-               arg.iov[0].iov_len = sizeof(rep);
+               rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
+                                  (TCPOPT_TIMESTAMP << 8) |
+                                  TCPOLEN_TIMESTAMP);
+               rep.opt[1] = htonl(tcp_time_stamp);
+               rep.opt[2] = htonl(ts);
+               arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
        }
 
        /* Swap the send and the receive. */
@@ -1252,8 +660,44 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
        rep.th.ack     = 1;
        rep.th.window  = htons(win);
 
+#ifdef CONFIG_TCP_MD5SIG
+       /*
+        * The SKB holds an imcoming packet, but may not have a valid ->sk
+        * pointer. This is especially the case when we're dealing with a
+        * TIME_WAIT ack, because the sk structure is long gone, and only
+        * the tcp_timewait_sock remains. So the md5 key is stashed in that
+        * structure, and we use it in preference.  I believe that (twsk ||
+        * skb->sk) holds true, but we program defensively.
+        */
+       if (!twsk && skb->sk) {
+               key = tcp_v4_md5_do_lookup(skb->sk, skb->nh.iph->daddr);
+       } else if (twsk && twsk->tw_md5_keylen) {
+               tw_key.key = twsk->tw_md5_key;
+               tw_key.keylen = twsk->tw_md5_keylen;
+               key = &tw_key;
+       } else
+               key = NULL;
+
+       if (key) {
+               int offset = (ts) ? 3 : 0;
+
+               rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
+                                         (TCPOPT_NOP << 16) |
+                                         (TCPOPT_MD5SIG << 8) |
+                                         TCPOLEN_MD5SIG);
+               arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
+               rep.th.doff = arg.iov[0].iov_len/4;
+
+               tcp_v4_do_calc_md5_hash((__u8 *)&rep.opt[offset],
+                                       key,
+                                       skb->nh.iph->daddr,
+                                       skb->nh.iph->saddr,
+                                       &rep.th, IPPROTO_TCP,
+                                       arg.iov[0].iov_len);
+       }
+#endif
        arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
-                                     skb->nh.iph->saddr, /*XXX*/
+                                     skb->nh.iph->saddr, /* XXX */
                                      arg.iov[0].iov_len, IPPROTO_TCP, 0);
        arg.csumoffset = offsetof(struct tcphdr, check) / 2;
 
@@ -1264,62 +708,38 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
 
 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
 {
-       struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
+       struct inet_timewait_sock *tw = inet_twsk(sk);
+       struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
 
-       tcp_v4_send_ack(skb, tw->tw_snd_nxt, tw->tw_rcv_nxt,
-                       tw->tw_rcv_wnd >> tw->tw_rcv_wscale, tw->tw_ts_recent);
+       tcp_v4_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
+                       tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
+                       tcptw->tw_ts_recent);
 
-       tcp_tw_put(tw);
+       inet_twsk_put(tw);
 }
 
-static void tcp_v4_or_send_ack(struct sk_buff *skb, struct open_request *req)
+static void tcp_v4_reqsk_send_ack(struct sk_buff *skb,
+                                 struct request_sock *req)
 {
-       tcp_v4_send_ack(skb, req->snt_isn + 1, req->rcv_isn + 1, req->rcv_wnd,
+       tcp_v4_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1,
+                       tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
                        req->ts_recent);
 }
 
-static struct dst_entry* tcp_v4_route_req(struct sock *sk,
-                                         struct open_request *req)
-{
-       struct rtable *rt;
-       struct ip_options *opt = req->af.v4_req.opt;
-       struct flowi fl = { .oif = sk->sk_bound_dev_if,
-                           .nl_u = { .ip4_u =
-                                     { .daddr = ((opt && opt->srr) ?
-                                                 opt->faddr :
-                                                 req->af.v4_req.rmt_addr),
-                                       .saddr = req->af.v4_req.loc_addr,
-                                       .tos = RT_CONN_FLAGS(sk) } },
-                           .proto = IPPROTO_TCP,
-                           .uli_u = { .ports =
-                                      { .sport = inet_sk(sk)->sport,
-                                        .dport = req->rmt_port } } };
-
-       if (ip_route_output_flow(&rt, &fl, sk, 0)) {
-               IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
-               return NULL;
-       }
-       if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) {
-               ip_rt_put(rt);
-               IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
-               return NULL;
-       }
-       return &rt->u.dst;
-}
-
 /*
  *     Send a SYN-ACK after having received an ACK.
- *     This still operates on a open_request only, not on a big
+ *     This still operates on a request_sock only, not on a big
  *     socket.
  */
-static int tcp_v4_send_synack(struct sock *sk, struct open_request *req,
+static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
                              struct dst_entry *dst)
 {
+       const struct inet_request_sock *ireq = inet_rsk(req);
        int err = -1;
        struct sk_buff * skb;
 
        /* First, grab a route. */
-       if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
+       if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
                goto out;
 
        skb = tcp_make_synack(sk, dst, req);
@@ -1328,16 +748,15 @@ static int tcp_v4_send_synack(struct sock *sk, struct open_request *req,
                struct tcphdr *th = skb->h.th;
 
                th->check = tcp_v4_check(th, skb->len,
-                                        req->af.v4_req.loc_addr,
-                                        req->af.v4_req.rmt_addr,
+                                        ireq->loc_addr,
+                                        ireq->rmt_addr,
                                         csum_partial((char *)th, skb->len,
                                                      skb->csum));
 
-               err = ip_build_and_send_pkt(skb, sk, req->af.v4_req.loc_addr,
-                                           req->af.v4_req.rmt_addr,
-                                           req->af.v4_req.opt);
-               if (err == NET_XMIT_CN)
-                       err = 0;
+               err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
+                                           ireq->rmt_addr,
+                                           ireq->opt);
+               err = net_xmit_eval(err);
        }
 
 out:
@@ -1346,15 +765,15 @@ out:
 }
 
 /*
- *     IPv4 open_request destructor.
+ *     IPv4 request_sock destructor.
  */
-static void tcp_v4_or_free(struct open_request *req)
+static void tcp_v4_reqsk_destructor(struct request_sock *req)
 {
-       if (req->af.v4_req.opt)
-               kfree(req->af.v4_req.opt);
+       kfree(inet_rsk(req)->opt);
 }
 
-static inline void syn_flood_warning(struct sk_buff *skb)
+#ifdef CONFIG_SYN_COOKIES
+static void syn_flood_warning(struct sk_buff *skb)
 {
        static unsigned long warntime;
 
@@ -1365,12 +784,13 @@ static inline void syn_flood_warning(struct sk_buff *skb)
                       ntohs(skb->h.th->dest));
        }
 }
+#endif
 
 /*
- * Save and compile IPv4 options into the open_request if needed.
+ * Save and compile IPv4 options into the request_sock if needed.
  */
-static inline struct ip_options *tcp_v4_save_options(struct sock *sk,
-                                                    struct sk_buff *skb)
+static struct ip_options *tcp_v4_save_options(struct sock *sk,
+                                             struct sk_buff *skb)
 {
        struct ip_options *opt = &(IPCB(skb)->opt);
        struct ip_options *dopt = NULL;
@@ -1388,35 +808,451 @@ static inline struct ip_options *tcp_v4_save_options(struct sock *sk,
        return dopt;
 }
 
+#ifdef CONFIG_TCP_MD5SIG
 /*
- * Maximum number of SYN_RECV sockets in queue per LISTEN socket.
- * One SYN_RECV socket costs about 80bytes on a 32bit machine.
- * It would be better to replace it with a global counter for all sockets
- * but then some measure against one socket starving all other sockets
- * would be needed.
- *
- * It was 128 by default. Experiments with real servers show, that
- * it is absolutely not enough even at 100conn/sec. 256 cures most
- * of problems. This value is adjusted to 128 for very small machines
- * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb).
- * Further increasing requires to change hash table size.
+ * RFC2385 MD5 checksumming requires a mapping of
+ * IP address->MD5 Key.
+ * We need to maintain these in the sk structure.
  */
-int sysctl_max_syn_backlog = 256;
 
-struct or_calltable or_ipv4 = {
+/* Find the Key structure for an address.  */
+static struct tcp_md5sig_key *
+                       tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       int i;
+
+       if (!tp->md5sig_info || !tp->md5sig_info->entries4)
+               return NULL;
+       for (i = 0; i < tp->md5sig_info->entries4; i++) {
+               if (tp->md5sig_info->keys4[i].addr == addr)
+                       return (struct tcp_md5sig_key *)
+                                               &tp->md5sig_info->keys4[i];
+       }
+       return NULL;
+}
+
+struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
+                                        struct sock *addr_sk)
+{
+       return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->daddr);
+}
+
+EXPORT_SYMBOL(tcp_v4_md5_lookup);
+
+static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
+                                                     struct request_sock *req)
+{
+       return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr);
+}
+
+/* This can be called on a newly created socket, from other files */
+int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
+                     u8 *newkey, u8 newkeylen)
+{
+       /* Add Key to the list */
+       struct tcp4_md5sig_key *key;
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct tcp4_md5sig_key *keys;
+
+       key = (struct tcp4_md5sig_key *)tcp_v4_md5_do_lookup(sk, addr);
+       if (key) {
+               /* Pre-existing entry - just update that one. */
+               kfree(key->key);
+               key->key = newkey;
+               key->keylen = newkeylen;
+       } else {
+               struct tcp_md5sig_info *md5sig;
+
+               if (!tp->md5sig_info) {
+                       tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info),
+                                                 GFP_ATOMIC);
+                       if (!tp->md5sig_info) {
+                               kfree(newkey);
+                               return -ENOMEM;
+                       }
+               }
+               if (tcp_alloc_md5sig_pool() == NULL) {
+                       kfree(newkey);
+                       return -ENOMEM;
+               }
+               md5sig = tp->md5sig_info;
+
+               if (md5sig->alloced4 == md5sig->entries4) {
+                       keys = kmalloc((sizeof(*keys) *
+                                       (md5sig->entries4 + 1)), GFP_ATOMIC);
+                       if (!keys) {
+                               kfree(newkey);
+                               tcp_free_md5sig_pool();
+                               return -ENOMEM;
+                       }
+
+                       if (md5sig->entries4)
+                               memcpy(keys, md5sig->keys4,
+                                      sizeof(*keys) * md5sig->entries4);
+
+                       /* Free old key list, and reference new one */
+                       if (md5sig->keys4)
+                               kfree(md5sig->keys4);
+                       md5sig->keys4 = keys;
+                       md5sig->alloced4++;
+               }
+               md5sig->entries4++;
+               md5sig->keys4[md5sig->entries4 - 1].addr   = addr;
+               md5sig->keys4[md5sig->entries4 - 1].key    = newkey;
+               md5sig->keys4[md5sig->entries4 - 1].keylen = newkeylen;
+       }
+       return 0;
+}
+
+EXPORT_SYMBOL(tcp_v4_md5_do_add);
+
+static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk,
+                              u8 *newkey, u8 newkeylen)
+{
+       return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->daddr,
+                                newkey, newkeylen);
+}
+
+int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       int i;
+
+       for (i = 0; i < tp->md5sig_info->entries4; i++) {
+               if (tp->md5sig_info->keys4[i].addr == addr) {
+                       /* Free the key */
+                       kfree(tp->md5sig_info->keys4[i].key);
+                       tp->md5sig_info->entries4--;
+
+                       if (tp->md5sig_info->entries4 == 0) {
+                               kfree(tp->md5sig_info->keys4);
+                               tp->md5sig_info->keys4 = NULL;
+                               tp->md5sig_info->alloced4 = 0;
+                       } else if (tp->md5sig_info->entries4 != i) {
+                               /* Need to do some manipulation */
+                               memcpy(&tp->md5sig_info->keys4[i],
+                                      &tp->md5sig_info->keys4[i+1],
+                                      (tp->md5sig_info->entries4 - i) *
+                                       sizeof(struct tcp4_md5sig_key));
+                       }
+                       tcp_free_md5sig_pool();
+                       return 0;
+               }
+       }
+       return -ENOENT;
+}
+
+EXPORT_SYMBOL(tcp_v4_md5_do_del);
+
+static void tcp_v4_clear_md5_list(struct sock *sk)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+
+       /* Free each key, then the set of key keys,
+        * the crypto element, and then decrement our
+        * hold on the last resort crypto.
+        */
+       if (tp->md5sig_info->entries4) {
+               int i;
+               for (i = 0; i < tp->md5sig_info->entries4; i++)
+                       kfree(tp->md5sig_info->keys4[i].key);
+               tp->md5sig_info->entries4 = 0;
+               tcp_free_md5sig_pool();
+       }
+       if (tp->md5sig_info->keys4) {
+               kfree(tp->md5sig_info->keys4);
+               tp->md5sig_info->keys4 = NULL;
+               tp->md5sig_info->alloced4  = 0;
+       }
+}
+
+static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
+                                int optlen)
+{
+       struct tcp_md5sig cmd;
+       struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
+       u8 *newkey;
+
+       if (optlen < sizeof(cmd))
+               return -EINVAL;
+
+       if (copy_from_user(&cmd, optval, sizeof(cmd)))
+               return -EFAULT;
+
+       if (sin->sin_family != AF_INET)
+               return -EINVAL;
+
+       if (!cmd.tcpm_key || !cmd.tcpm_keylen) {
+               if (!tcp_sk(sk)->md5sig_info)
+                       return -ENOENT;
+               return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);
+       }
+
+       if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
+               return -EINVAL;
+
+       if (!tcp_sk(sk)->md5sig_info) {
+               struct tcp_sock *tp = tcp_sk(sk);
+               struct tcp_md5sig_info *p = kzalloc(sizeof(*p), GFP_KERNEL);
+
+               if (!p)
+                       return -EINVAL;
+
+               tp->md5sig_info = p;
+
+       }
+
+       newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
+       if (!newkey)
+               return -ENOMEM;
+       return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
+                                newkey, cmd.tcpm_keylen);
+}
+
+static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
+                                  __be32 saddr, __be32 daddr,
+                                  struct tcphdr *th, int protocol,
+                                  int tcplen)
+{
+       struct scatterlist sg[4];
+       __u16 data_len;
+       int block = 0;
+       __sum16 old_checksum;
+       struct tcp_md5sig_pool *hp;
+       struct tcp4_pseudohdr *bp;
+       struct hash_desc *desc;
+       int err;
+       unsigned int nbytes = 0;
+
+       /*
+        * Okay, so RFC2385 is turned on for this connection,
+        * so we need to generate the MD5 hash for the packet now.
+        */
+
+       hp = tcp_get_md5sig_pool();
+       if (!hp)
+               goto clear_hash_noput;
+
+       bp = &hp->md5_blk.ip4;
+       desc = &hp->md5_desc;
+
+       /*
+        * 1. the TCP pseudo-header (in the order: source IP address,
+        * destination IP address, zero-padded protocol number, and
+        * segment length)
+        */
+       bp->saddr = saddr;
+       bp->daddr = daddr;
+       bp->pad = 0;
+       bp->protocol = protocol;
+       bp->len = htons(tcplen);
+       sg_set_buf(&sg[block++], bp, sizeof(*bp));
+       nbytes += sizeof(*bp);
+
+       /* 2. the TCP header, excluding options, and assuming a
+        * checksum of zero/
+        */
+       old_checksum = th->check;
+       th->check = 0;
+       sg_set_buf(&sg[block++], th, sizeof(struct tcphdr));
+       nbytes += sizeof(struct tcphdr);
+
+       /* 3. the TCP segment data (if any) */
+       data_len = tcplen - (th->doff << 2);
+       if (data_len > 0) {
+               unsigned char *data = (unsigned char *)th + (th->doff << 2);
+               sg_set_buf(&sg[block++], data, data_len);
+               nbytes += data_len;
+       }
+
+       /* 4. an independently-specified key or password, known to both
+        * TCPs and presumably connection-specific
+        */
+       sg_set_buf(&sg[block++], key->key, key->keylen);
+       nbytes += key->keylen;
+
+       /* Now store the Hash into the packet */
+       err = crypto_hash_init(desc);
+       if (err)
+               goto clear_hash;
+       err = crypto_hash_update(desc, sg, nbytes);
+       if (err)
+               goto clear_hash;
+       err = crypto_hash_final(desc, md5_hash);
+       if (err)
+               goto clear_hash;
+
+       /* Reset header, and free up the crypto */
+       tcp_put_md5sig_pool();
+       th->check = old_checksum;
+
+out:
+       return 0;
+clear_hash:
+       tcp_put_md5sig_pool();
+clear_hash_noput:
+       memset(md5_hash, 0, 16);
+       goto out;
+}
+
+int tcp_v4_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
+                        struct sock *sk,
+                        struct dst_entry *dst,
+                        struct request_sock *req,
+                        struct tcphdr *th, int protocol,
+                        int tcplen)
+{
+       __be32 saddr, daddr;
+
+       if (sk) {
+               saddr = inet_sk(sk)->saddr;
+               daddr = inet_sk(sk)->daddr;
+       } else {
+               struct rtable *rt = (struct rtable *)dst;
+               BUG_ON(!rt);
+               saddr = rt->rt_src;
+               daddr = rt->rt_dst;
+       }
+       return tcp_v4_do_calc_md5_hash(md5_hash, key,
+                                      saddr, daddr,
+                                      th, protocol, tcplen);
+}
+
+EXPORT_SYMBOL(tcp_v4_calc_md5_hash);
+
+static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
+{
+       /*
+        * This gets called for each TCP segment that arrives
+        * so we want to be efficient.
+        * We have 3 drop cases:
+        * o No MD5 hash and one expected.
+        * o MD5 hash and we're not expecting one.
+        * o MD5 hash and its wrong.
+        */
+       __u8 *hash_location = NULL;
+       struct tcp_md5sig_key *hash_expected;
+       struct iphdr *iph = skb->nh.iph;
+       struct tcphdr *th = skb->h.th;
+       int length = (th->doff << 2) - sizeof(struct tcphdr);
+       int genhash;
+       unsigned char *ptr;
+       unsigned char newhash[16];
+
+       hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
+
+       /*
+        * If the TCP option length is less than the TCP_MD5SIG
+        * option length, then we can shortcut
+        */
+       if (length < TCPOLEN_MD5SIG) {
+               if (hash_expected)
+                       return 1;
+               else
+                       return 0;
+       }
+
+       /* Okay, we can't shortcut - we have to grub through the options */
+       ptr = (unsigned char *)(th + 1);
+       while (length > 0) {
+               int opcode = *ptr++;
+               int opsize;
+
+               switch (opcode) {
+               case TCPOPT_EOL:
+                       goto done_opts;
+               case TCPOPT_NOP:
+                       length--;
+                       continue;
+               default:
+                       opsize = *ptr++;
+                       if (opsize < 2)
+                               goto done_opts;
+                       if (opsize > length)
+                               goto done_opts;
+
+                       if (opcode == TCPOPT_MD5SIG) {
+                               hash_location = ptr;
+                               goto done_opts;
+                       }
+               }
+               ptr += opsize-2;
+               length -= opsize;
+       }
+done_opts:
+       /* We've parsed the options - do we have a hash? */
+       if (!hash_expected && !hash_location)
+               return 0;
+
+       if (hash_expected && !hash_location) {
+               LIMIT_NETDEBUG(KERN_INFO "MD5 Hash expected but NOT found "
+                              "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n",
+                              NIPQUAD(iph->saddr), ntohs(th->source),
+                              NIPQUAD(iph->daddr), ntohs(th->dest));
+               return 1;
+       }
+
+       if (!hash_expected && hash_location) {
+               LIMIT_NETDEBUG(KERN_INFO "MD5 Hash NOT expected but found "
+                              "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n",
+                              NIPQUAD(iph->saddr), ntohs(th->source),
+                              NIPQUAD(iph->daddr), ntohs(th->dest));
+               return 1;
+       }
+
+       /* Okay, so this is hash_expected and hash_location -
+        * so we need to calculate the checksum.
+        */
+       genhash = tcp_v4_do_calc_md5_hash(newhash,
+                                         hash_expected,
+                                         iph->saddr, iph->daddr,
+                                         th, sk->sk_protocol,
+                                         skb->len);
+
+       if (genhash || memcmp(hash_location, newhash, 16) != 0) {
+               if (net_ratelimit()) {
+                       printk(KERN_INFO "MD5 Hash failed for "
+                              "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)%s\n",
+                              NIPQUAD(iph->saddr), ntohs(th->source),
+                              NIPQUAD(iph->daddr), ntohs(th->dest),
+                              genhash ? " tcp_v4_calc_md5_hash failed" : "");
+               }
+               return 1;
+       }
+       return 0;
+}
+
+#endif
+
+struct request_sock_ops tcp_request_sock_ops __read_mostly = {
        .family         =       PF_INET,
+       .obj_size       =       sizeof(struct tcp_request_sock),
        .rtx_syn_ack    =       tcp_v4_send_synack,
-       .send_ack       =       tcp_v4_or_send_ack,
-       .destructor     =       tcp_v4_or_free,
+       .send_ack       =       tcp_v4_reqsk_send_ack,
+       .destructor     =       tcp_v4_reqsk_destructor,
        .send_reset     =       tcp_v4_send_reset,
 };
 
+#ifdef CONFIG_TCP_MD5SIG
+static struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
+       .md5_lookup     =       tcp_v4_reqsk_md5_lookup,
+};
+#endif
+
+static struct timewait_sock_ops tcp_timewait_sock_ops = {
+       .twsk_obj_size  = sizeof(struct tcp_timewait_sock),
+       .twsk_unique    = tcp_twsk_unique,
+       .twsk_destructor= tcp_twsk_destructor,
+};
+
 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
 {
+       struct inet_request_sock *ireq;
        struct tcp_options_received tmp_opt;
-       struct open_request *req;
-       __u32 saddr = skb->nh.iph->saddr;
-       __u32 daddr = skb->nh.iph->daddr;
+       struct request_sock *req;
+       __be32 saddr = skb->nh.iph->saddr;
+       __be32 daddr = skb->nh.iph->daddr;
        __u32 isn = TCP_SKB_CB(skb)->when;
        struct dst_entry *dst = NULL;
 #ifdef CONFIG_SYN_COOKIES
@@ -1434,7 +1270,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
         * limitations, they conserve resources and peer is
         * evidently real one.
         */
-       if (tcp_synq_is_full(sk) && !isn) {
+       if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
 #ifdef CONFIG_SYN_COOKIES
                if (sysctl_tcp_syncookies) {
                        want_cookie = 1;
@@ -1448,13 +1284,17 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
         * clogging syn queue with openreqs with exponentially increasing
         * timeout.
         */
-       if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1)
+       if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
                goto drop;
 
-       req = tcp_openreq_alloc();
+       req = reqsk_alloc(&tcp_request_sock_ops);
        if (!req)
                goto drop;
 
+#ifdef CONFIG_TCP_MD5SIG
+       tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
+#endif
+
        tcp_clear_options(&tmp_opt);
        tmp_opt.mss_clamp = 536;
        tmp_opt.user_mss  = tcp_sk(sk)->rx_opt.user_mss;
@@ -1479,10 +1319,13 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
 
        tcp_openreq_init(req, &tmp_opt, skb);
 
-       req->af.v4_req.loc_addr = daddr;
-       req->af.v4_req.rmt_addr = saddr;
-       req->af.v4_req.opt = tcp_v4_save_options(sk, skb);
-       req->class = &or_ipv4;
+       if (security_inet_conn_request(sk, skb, req))
+               goto drop_and_free;
+
+       ireq = inet_rsk(req);
+       ireq->loc_addr = daddr;
+       ireq->rmt_addr = saddr;
+       ireq->opt = tcp_v4_save_options(sk, skb);
        if (!want_cookie)
                TCP_ECN_create_request(req, skb->h.th);
 
@@ -1504,8 +1347,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
                 * are made in the function processing timewait state.
                 */
                if (tmp_opt.saw_tstamp &&
-                   sysctl_tcp_tw_recycle &&
-                   (dst = tcp_v4_route_req(sk, req)) != NULL &&
+                   tcp_death_row.sysctl_tw_recycle &&
+                   (dst = inet_csk_route_req(sk, req)) != NULL &&
                    (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
                    peer->v4daddr == saddr) {
                        if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
@@ -1518,7 +1361,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
                }
                /* Kill the following clause, if you dislike this way. */
                else if (!sysctl_tcp_syncookies &&
-                        (sysctl_max_syn_backlog - tcp_synq_len(sk) <
+                        (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
                          (sysctl_max_syn_backlog >> 2)) &&
                         (!peer || !peer->tcp_ts_stamp) &&
                         (!dst || !dst_metric(dst, RTAX_RTT))) {
@@ -1529,34 +1372,31 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
                         * to destinations, already remembered
                         * to the moment of synflood.
                         */
-                       NETDEBUG(if (net_ratelimit()) \
-                                       printk(KERN_DEBUG "TCP: drop open "
-                                                         "request from %u.%u."
-                                                         "%u.%u/%u\n", \
-                                              NIPQUAD(saddr),
-                                              ntohs(skb->h.th->source)));
+                       LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open "
+                                      "request from %u.%u.%u.%u/%u\n",
+                                      NIPQUAD(saddr),
+                                      ntohs(skb->h.th->source));
                        dst_release(dst);
                        goto drop_and_free;
                }
 
-               isn = tcp_v4_init_sequence(sk, skb);
+               isn = tcp_v4_init_sequence(skb);
        }
-       req->snt_isn = isn;
+       tcp_rsk(req)->snt_isn = isn;
 
        if (tcp_v4_send_synack(sk, req, dst))
                goto drop_and_free;
 
        if (want_cookie) {
-               tcp_openreq_free(req);
+               reqsk_free(req);
        } else {
-               tcp_v4_synq_add(sk, req);
+               inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
        }
        return 0;
 
 drop_and_free:
-       tcp_openreq_free(req);
+       reqsk_free(req);
 drop:
-       TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
        return 0;
 }
 
@@ -1566,46 +1406,68 @@ drop:
  * now create the new socket.
  */
 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
-                                 struct open_request *req,
+                                 struct request_sock *req,
                                  struct dst_entry *dst)
 {
+       struct inet_request_sock *ireq;
        struct inet_sock *newinet;
        struct tcp_sock *newtp;
        struct sock *newsk;
+#ifdef CONFIG_TCP_MD5SIG
+       struct tcp_md5sig_key *key;
+#endif
 
        if (sk_acceptq_is_full(sk))
                goto exit_overflow;
 
-       if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
+       if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
                goto exit;
 
        newsk = tcp_create_openreq_child(sk, req, skb);
        if (!newsk)
                goto exit;
 
-       newsk->sk_dst_cache = dst;
-       tcp_v4_setup_caps(newsk, dst);
+       newsk->sk_gso_type = SKB_GSO_TCPV4;
+       sk_setup_caps(newsk, dst);
 
        newtp                 = tcp_sk(newsk);
        newinet               = inet_sk(newsk);
-       newinet->daddr        = req->af.v4_req.rmt_addr;
-       newinet->rcv_saddr    = req->af.v4_req.loc_addr;
-       newinet->saddr        = req->af.v4_req.loc_addr;
-       newinet->opt          = req->af.v4_req.opt;
-       req->af.v4_req.opt    = NULL;
-       newinet->mc_index     = tcp_v4_iif(skb);
+       ireq                  = inet_rsk(req);
+       newinet->daddr        = ireq->rmt_addr;
+       newinet->rcv_saddr    = ireq->loc_addr;
+       newinet->saddr        = ireq->loc_addr;
+       newinet->opt          = ireq->opt;
+       ireq->opt             = NULL;
+       newinet->mc_index     = inet_iif(skb);
        newinet->mc_ttl       = skb->nh.iph->ttl;
-       newtp->ext_header_len = 0;
+       inet_csk(newsk)->icsk_ext_hdr_len = 0;
        if (newinet->opt)
-               newtp->ext_header_len = newinet->opt->optlen;
+               inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen;
        newinet->id = newtp->write_seq ^ jiffies;
 
+       tcp_mtup_init(newsk);
        tcp_sync_mss(newsk, dst_mtu(dst));
        newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
        tcp_initialize_rcv_mss(newsk);
 
-       __tcp_v4_hash(newsk, 0);
-       __tcp_inherit_port(sk, newsk);
+#ifdef CONFIG_TCP_MD5SIG
+       /* Copy over the MD5 key from the original socket */
+       if ((key = tcp_v4_md5_do_lookup(sk, newinet->daddr)) != NULL) {
+               /*
+                * We're using one, so create a matching key
+                * on the newsk structure. If we fail to get
+                * memory, then we end up not copying the key
+                * across. Shucks.
+                */
+               char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
+               if (newkey != NULL)
+                       tcp_v4_md5_do_add(newsk, inet_sk(sk)->daddr,
+                                         newkey, key->keylen);
+       }
+#endif
+
+       __inet_hash(&tcp_hashinfo, newsk, 0);
+       __inet_inherit_port(&tcp_hashinfo, sk, newsk);
 
        return newsk;
 
@@ -1621,27 +1483,24 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
 {
        struct tcphdr *th = skb->h.th;
        struct iphdr *iph = skb->nh.iph;
-       struct tcp_sock *tp = tcp_sk(sk);
        struct sock *nsk;
-       struct open_request **prev;
+       struct request_sock **prev;
        /* Find possible connection requests. */
-       struct open_request *req = tcp_v4_search_req(tp, &prev, th->source,
-                                                    iph->saddr, iph->daddr);
+       struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
+                                                      iph->saddr, iph->daddr);
        if (req)
                return tcp_check_req(sk, skb, req, prev);
 
-       nsk = __tcp_v4_lookup_established(skb->nh.iph->saddr,
-                                         th->source,
-                                         skb->nh.iph->daddr,
-                                         ntohs(th->dest),
-                                         tcp_v4_iif(skb));
+       nsk = inet_lookup_established(&tcp_hashinfo, skb->nh.iph->saddr,
+                                     th->source, skb->nh.iph->daddr,
+                                     th->dest, inet_iif(skb));
 
        if (nsk) {
                if (nsk->sk_state != TCP_TIME_WAIT) {
                        bh_lock_sock(nsk);
                        return nsk;
                }
-               tcp_tw_put((struct tcp_tw_bucket *)nsk);
+               inet_twsk_put(inet_twsk(nsk));
                return NULL;
        }
 
@@ -1652,28 +1511,21 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
        return sk;
 }
 
-static int tcp_v4_checksum_init(struct sk_buff *skb)
+static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
 {
-       if (skb->ip_summed == CHECKSUM_HW) {
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
+       if (skb->ip_summed == CHECKSUM_COMPLETE) {
                if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
-                                 skb->nh.iph->daddr, skb->csum))
+                                 skb->nh.iph->daddr, skb->csum)) {
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
                        return 0;
-
-               NETDEBUG(if (net_ratelimit())
-                               printk(KERN_DEBUG "hw tcp v4 csum failed\n"));
-               skb->ip_summed = CHECKSUM_NONE;
+               }
        }
+
+       skb->csum = csum_tcpudp_nofold(skb->nh.iph->saddr, skb->nh.iph->daddr,
+                                      skb->len, IPPROTO_TCP, 0);
+
        if (skb->len <= 76) {
-               if (tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
-                                skb->nh.iph->daddr,
-                                skb_checksum(skb, 0, skb->len, 0)))
-                       return -1;
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
-       } else {
-               skb->csum = ~tcp_v4_check(skb->h.th, skb->len,
-                                         skb->nh.iph->saddr,
-                                         skb->nh.iph->daddr, 0);
+               return __skb_checksum_complete(skb);
        }
        return 0;
 }
@@ -1689,10 +1541,24 @@ static int tcp_v4_checksum_init(struct sk_buff *skb)
  */
 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
 {
+       struct sock *rsk;
+#ifdef CONFIG_TCP_MD5SIG
+       /*
+        * We really want to reject the packet as early as possible
+        * if:
+        *  o We're expecting an MD5'd packet and this is no MD5 tcp option
+        *  o There is an MD5 option and we're not expecting one
+        */
+       if (tcp_v4_inbound_md5_hash(sk, skb))
+               goto discard;
+#endif
+
        if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
                TCP_CHECK_TIMER(sk);
-               if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
+               if (tcp_rcv_established(sk, skb, skb->h.th, skb->len)) {
+                       rsk = sk;
                        goto reset;
+               }
                TCP_CHECK_TIMER(sk);
                return 0;
        }
@@ -1706,20 +1572,24 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
                        goto discard;
 
                if (nsk != sk) {
-                       if (tcp_child_process(sk, nsk, skb))
+                       if (tcp_child_process(sk, nsk, skb)) {
+                               rsk = nsk;
                                goto reset;
+                       }
                        return 0;
                }
        }
 
        TCP_CHECK_TIMER(sk);
-       if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
+       if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len)) {
+               rsk = sk;
                goto reset;
+       }
        TCP_CHECK_TIMER(sk);
        return 0;
 
 reset:
-       tcp_v4_send_reset(skb);
+       tcp_v4_send_reset(rsk, skb);
 discard:
        kfree_skb(skb);
        /* Be careful here. If this function gets more complicated and
@@ -1762,10 +1632,10 @@ int tcp_v4_rcv(struct sk_buff *skb)
 
        /* An explanation is required here, I think.
         * Packet length and doff are validated by header prediction,
-        * provided case of th->doff==0 is elimineted.
+        * provided case of th->doff==0 is eliminated.
         * So, we defer the checks. */
        if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
-            tcp_v4_checksum_init(skb) < 0))
+            tcp_v4_checksum_init(skb)))
                goto bad_packet;
 
        th = skb->h.th;
@@ -1777,9 +1647,9 @@ int tcp_v4_rcv(struct sk_buff *skb)
        TCP_SKB_CB(skb)->flags   = skb->nh.iph->tos;
        TCP_SKB_CB(skb)->sacked  = 0;
 
-       sk = __tcp_v4_lookup(skb->nh.iph->saddr, th->source,
-                            skb->nh.iph->daddr, ntohs(th->dest),
-                            tcp_v4_iif(skb));
+       sk = __inet_lookup(&tcp_hashinfo, skb->nh.iph->saddr, th->source,
+                          skb->nh.iph->daddr, th->dest,
+                          inet_iif(skb));
 
        if (!sk)
                goto no_tcp_socket;
@@ -1790,17 +1660,28 @@ process:
 
        if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
                goto discard_and_relse;
+       nf_reset(skb);
 
-       if (sk_filter(sk, skb, 0))
+       if (sk_filter(sk, skb))
                goto discard_and_relse;
 
        skb->dev = NULL;
 
-       bh_lock_sock(sk);
+       bh_lock_sock_nested(sk);
        ret = 0;
        if (!sock_owned_by_user(sk)) {
-               if (!tcp_prequeue(sk, skb))
+#ifdef CONFIG_NET_DMA
+               struct tcp_sock *tp = tcp_sk(sk);
+               if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
+                       tp->ucopy.dma_chan = get_softnet_dma();
+               if (tp->ucopy.dma_chan)
                        ret = tcp_v4_do_rcv(sk, skb);
+               else
+#endif
+               {
+                       if (!tcp_prequeue(sk, skb))
+                       ret = tcp_v4_do_rcv(sk, skb);
+               }
        } else
                sk_add_backlog(sk, skb);
        bh_unlock_sock(sk);
@@ -1817,7 +1698,7 @@ no_tcp_socket:
 bad_packet:
                TCP_INC_STATS_BH(TCP_MIB_INERRS);
        } else {
-               tcp_v4_send_reset(skb);
+               tcp_v4_send_reset(NULL, skb);
        }
 
 discard_it:
@@ -1831,24 +1712,24 @@ discard_and_relse:
 
 do_time_wait:
        if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
-               tcp_tw_put((struct tcp_tw_bucket *) sk);
+               inet_twsk_put(inet_twsk(sk));
                goto discard_it;
        }
 
        if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
                TCP_INC_STATS_BH(TCP_MIB_INERRS);
-               tcp_tw_put((struct tcp_tw_bucket *) sk);
+               inet_twsk_put(inet_twsk(sk));
                goto discard_it;
        }
-       switch (tcp_timewait_state_process((struct tcp_tw_bucket *)sk,
-                                          skb, th, skb->len)) {
+       switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
        case TCP_TW_SYN: {
-               struct sock *sk2 = tcp_v4_lookup_listener(skb->nh.iph->daddr,
-                                                         ntohs(th->dest),
-                                                         tcp_v4_iif(skb));
+               struct sock *sk2 = inet_lookup_listener(&tcp_hashinfo,
+                                                       skb->nh.iph->daddr,
+                                                       th->dest,
+                                                       inet_iif(skb));
                if (sk2) {
-                       tcp_tw_deschedule((struct tcp_tw_bucket *)sk);
-                       tcp_tw_put((struct tcp_tw_bucket *)sk);
+                       inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
+                       inet_twsk_put(inet_twsk(sk));
                        sk = sk2;
                        goto process;
                }
@@ -1864,122 +1745,6 @@ do_time_wait:
        goto discard_it;
 }
 
-/* With per-bucket locks this operation is not-atomic, so that
- * this version is not worse.
- */
-static void __tcp_v4_rehash(struct sock *sk)
-{
-       sk->sk_prot->unhash(sk);
-       sk->sk_prot->hash(sk);
-}
-
-static int tcp_v4_reselect_saddr(struct sock *sk)
-{
-       struct inet_sock *inet = inet_sk(sk);
-       int err;
-       struct rtable *rt;
-       __u32 old_saddr = inet->saddr;
-       __u32 new_saddr;
-       __u32 daddr = inet->daddr;
-
-       if (inet->opt && inet->opt->srr)
-               daddr = inet->opt->faddr;
-
-       /* Query new route. */
-       err = ip_route_connect(&rt, daddr, 0,
-                              RT_CONN_FLAGS(sk),
-                              sk->sk_bound_dev_if,
-                              IPPROTO_TCP,
-                              inet->sport, inet->dport, sk);
-       if (err)
-               return err;
-
-       __sk_dst_set(sk, &rt->u.dst);
-       tcp_v4_setup_caps(sk, &rt->u.dst);
-
-       new_saddr = rt->rt_src;
-
-       if (new_saddr == old_saddr)
-               return 0;
-
-       if (sysctl_ip_dynaddr > 1) {
-               printk(KERN_INFO "tcp_v4_rebuild_header(): shifting inet->"
-                                "saddr from %d.%d.%d.%d to %d.%d.%d.%d\n",
-                      NIPQUAD(old_saddr),
-                      NIPQUAD(new_saddr));
-       }
-
-       inet->saddr = new_saddr;
-       inet->rcv_saddr = new_saddr;
-
-       /* XXX The only one ugly spot where we need to
-        * XXX really change the sockets identity after
-        * XXX it has entered the hashes. -DaveM
-        *
-        * Besides that, it does not check for connection
-        * uniqueness. Wait for troubles.
-        */
-       __tcp_v4_rehash(sk);
-       return 0;
-}
-
-int tcp_v4_rebuild_header(struct sock *sk)
-{
-       struct inet_sock *inet = inet_sk(sk);
-       struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
-       u32 daddr;
-       int err;
-
-       /* Route is OK, nothing to do. */
-       if (rt)
-               return 0;
-
-       /* Reroute. */
-       daddr = inet->daddr;
-       if (inet->opt && inet->opt->srr)
-               daddr = inet->opt->faddr;
-
-       {
-               struct flowi fl = { .oif = sk->sk_bound_dev_if,
-                                   .nl_u = { .ip4_u =
-                                             { .daddr = daddr,
-                                               .saddr = inet->saddr,
-                                               .tos = RT_CONN_FLAGS(sk) } },
-                                   .proto = IPPROTO_TCP,
-                                   .uli_u = { .ports =
-                                              { .sport = inet->sport,
-                                                .dport = inet->dport } } };
-                                               
-               err = ip_route_output_flow(&rt, &fl, sk, 0);
-       }
-       if (!err) {
-               __sk_dst_set(sk, &rt->u.dst);
-               tcp_v4_setup_caps(sk, &rt->u.dst);
-               return 0;
-       }
-
-       /* Routing failed... */
-       sk->sk_route_caps = 0;
-
-       if (!sysctl_ip_dynaddr ||
-           sk->sk_state != TCP_SYN_SENT ||
-           (sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
-           (err = tcp_v4_reselect_saddr(sk)) != 0)
-               sk->sk_err_soft = -err;
-
-       return err;
-}
-
-static void v4_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
-{
-       struct sockaddr_in *sin = (struct sockaddr_in *) uaddr;
-       struct inet_sock *inet = inet_sk(sk);
-
-       sin->sin_family         = AF_INET;
-       sin->sin_addr.s_addr    = inet->daddr;
-       sin->sin_port           = inet->dport;
-}
-
 /* VJ's idea. Save last timestamp seen from this destination
  * and hold it at least for normal timewait interval to use for duplicate
  * segment detection in subsequent connections, before they enter synchronized
@@ -2018,18 +1783,18 @@ int tcp_v4_remember_stamp(struct sock *sk)
        return 0;
 }
 
-int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw)
+int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
 {
-       struct inet_peer *peer = NULL;
-
-       peer = inet_getpeer(tw->tw_daddr, 1);
+       struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1);
 
        if (peer) {
-               if ((s32)(peer->tcp_ts - tw->tw_ts_recent) <= 0 ||
+               const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
+
+               if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
                    (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
-                    peer->tcp_ts_stamp <= tw->tw_ts_recent_stamp)) {
-                       peer->tcp_ts_stamp = tw->tw_ts_recent_stamp;
-                       peer->tcp_ts tw->tw_ts_recent;
+                    peer->tcp_ts_stamp <= tcptw->tw_ts_recent_stamp)) {
+                       peer->tcp_ts_stamp = tcptw->tw_ts_recent_stamp;
+                       peer->tcp_ts       = tcptw->tw_ts_recent;
                }
                inet_putpeer(peer);
                return 1;
@@ -2038,32 +1803,46 @@ int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw)
        return 0;
 }
 
-struct tcp_func ipv4_specific = {
-       .queue_xmit     =       ip_queue_xmit,
-       .send_check     =       tcp_v4_send_check,
-       .rebuild_header =       tcp_v4_rebuild_header,
-       .conn_request   =       tcp_v4_conn_request,
-       .syn_recv_sock  =       tcp_v4_syn_recv_sock,
-       .remember_stamp =       tcp_v4_remember_stamp,
-       .net_header_len =       sizeof(struct iphdr),
-       .setsockopt     =       ip_setsockopt,
-       .getsockopt     =       ip_getsockopt,
-       .addr2sockaddr  =       v4_addr2sockaddr,
-       .sockaddr_len   =       sizeof(struct sockaddr_in),
+struct inet_connection_sock_af_ops ipv4_specific = {
+       .queue_xmit        = ip_queue_xmit,
+       .send_check        = tcp_v4_send_check,
+       .rebuild_header    = inet_sk_rebuild_header,
+       .conn_request      = tcp_v4_conn_request,
+       .syn_recv_sock     = tcp_v4_syn_recv_sock,
+       .remember_stamp    = tcp_v4_remember_stamp,
+       .net_header_len    = sizeof(struct iphdr),
+       .setsockopt        = ip_setsockopt,
+       .getsockopt        = ip_getsockopt,
+       .addr2sockaddr     = inet_csk_addr2sockaddr,
+       .sockaddr_len      = sizeof(struct sockaddr_in),
+#ifdef CONFIG_COMPAT
+       .compat_setsockopt = compat_ip_setsockopt,
+       .compat_getsockopt = compat_ip_getsockopt,
+#endif
 };
 
+#ifdef CONFIG_TCP_MD5SIG
+static struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
+       .md5_lookup             = tcp_v4_md5_lookup,
+       .calc_md5_hash          = tcp_v4_calc_md5_hash,
+       .md5_add                = tcp_v4_md5_add_func,
+       .md5_parse              = tcp_v4_parse_md5_keys,
+};
+#endif
+
 /* NOTE: A lot of things set to zero explicitly by call to
  *       sk_alloc() so need not be done here.
  */
 static int tcp_v4_init_sock(struct sock *sk)
 {
+       struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
 
        skb_queue_head_init(&tp->out_of_order_queue);
        tcp_init_xmit_timers(sk);
        tcp_prequeue_init(tp);
 
-       tp->rto  = TCP_TIMEOUT_INIT;
+       icsk->icsk_rto = TCP_TIMEOUT_INIT;
        tp->mdev = TCP_TIMEOUT_INIT;
 
        /* So many TCP implementations out there (incorrectly) count the
@@ -2078,16 +1857,21 @@ static int tcp_v4_init_sock(struct sock *sk)
         */
        tp->snd_ssthresh = 0x7fffffff;  /* Infinity */
        tp->snd_cwnd_clamp = ~0;
-       tp->mss_cache_std = tp->mss_cache = 536;
+       tp->mss_cache = 536;
 
        tp->reordering = sysctl_tcp_reordering;
+       icsk->icsk_ca_ops = &tcp_init_congestion_ops;
 
        sk->sk_state = TCP_CLOSE;
 
        sk->sk_write_space = sk_stream_write_space;
        sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
 
-       tp->af_specific = &ipv4_specific;
+       icsk->icsk_af_ops = &ipv4_specific;
+       icsk->icsk_sync_mss = tcp_sync_mss;
+#ifdef CONFIG_TCP_MD5SIG
+       tp->af_specific = &tcp_sock_ipv4_specific;
+#endif
 
        sk->sk_sndbuf = sysctl_tcp_wmem[1];
        sk->sk_rcvbuf = sysctl_tcp_rmem[1];
@@ -2103,18 +1887,34 @@ int tcp_v4_destroy_sock(struct sock *sk)
 
        tcp_clear_xmit_timers(sk);
 
+       tcp_cleanup_congestion_control(sk);
+
        /* Cleanup up the write buffer. */
        sk_stream_writequeue_purge(sk);
 
        /* Cleans up our, hopefully empty, out_of_order_queue. */
        __skb_queue_purge(&tp->out_of_order_queue);
 
+#ifdef CONFIG_TCP_MD5SIG
+       /* Clean up the MD5 key list, if any */
+       if (tp->md5sig_info) {
+               tcp_v4_clear_md5_list(sk);
+               kfree(tp->md5sig_info);
+               tp->md5sig_info = NULL;
+       }
+#endif
+
+#ifdef CONFIG_NET_DMA
+       /* Cleans up our sk_async_wait_queue */
+       __skb_queue_purge(&sk->sk_async_wait_queue);
+#endif
+
        /* Clean prequeue, it must be empty really */
        __skb_queue_purge(&tp->ucopy.prequeue);
 
        /* Clean up a referenced TCP bind bucket. */
-       if (tp->bind_hash)
-               tcp_put_port(sk);
+       if (inet_csk(sk)->icsk_bind_hash)
+               inet_put_port(&tcp_hashinfo, sk);
 
        /*
         * If sendmsg cached page exists, toss it.
@@ -2134,13 +1934,13 @@ EXPORT_SYMBOL(tcp_v4_destroy_sock);
 #ifdef CONFIG_PROC_FS
 /* Proc filesystem TCP sock list dumping. */
 
-static inline struct tcp_tw_bucket *tw_head(struct hlist_head *head)
+static inline struct inet_timewait_sock *tw_head(struct hlist_head *head)
 {
        return hlist_empty(head) ? NULL :
-               list_entry(head->first, struct tcp_tw_bucket, tw_node);
+               list_entry(head->first, struct inet_timewait_sock, tw_node);
 }
 
-static inline struct tcp_tw_bucket *tw_next(struct tcp_tw_bucket *tw)
+static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
 {
        return tw->tw_node.next ?
                hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
@@ -2148,67 +1948,67 @@ static inline struct tcp_tw_bucket *tw_next(struct tcp_tw_bucket *tw)
 
 static void *listening_get_next(struct seq_file *seq, void *cur)
 {
-       struct tcp_sock *tp;
+       struct inet_connection_sock *icsk;
        struct hlist_node *node;
        struct sock *sk = cur;
        struct tcp_iter_state* st = seq->private;
 
        if (!sk) {
                st->bucket = 0;
-               sk = sk_head(&tcp_listening_hash[0]);
+               sk = sk_head(&tcp_hashinfo.listening_hash[0]);
                goto get_sk;
        }
 
        ++st->num;
 
        if (st->state == TCP_SEQ_STATE_OPENREQ) {
-               struct open_request *req = cur;
+               struct request_sock *req = cur;
 
-               tp = tcp_sk(st->syn_wait_sk);
+               icsk = inet_csk(st->syn_wait_sk);
                req = req->dl_next;
                while (1) {
                        while (req) {
                                vxdprintk(VXD_CBIT(net, 6),
                                        "sk,req: %p [#%d] (from %d)", req->sk,
-                                       (req->sk)?req->sk->sk_xid:0, vx_current_xid());
+                                       (req->sk)?req->sk->sk_nid:0, nx_current_nid());
                                if (req->sk &&
-                                       !vx_check(req->sk->sk_xid, VX_IDENT|VX_WATCH))
+                                       !nx_check(req->sk->sk_nid, VS_WATCH_P|VS_IDENT))
                                        continue;
-                               if (req->class->family == st->family) {
+                               if (req->rsk_ops->family == st->family) {
                                        cur = req;
                                        goto out;
                                }
                                req = req->dl_next;
                        }
-                       if (++st->sbucket >= TCP_SYNQ_HSIZE)
+                       if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
                                break;
 get_req:
-                       req = tp->listen_opt->syn_table[st->sbucket];
+                       req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
                }
                sk        = sk_next(st->syn_wait_sk);
                st->state = TCP_SEQ_STATE_LISTENING;
-               read_unlock_bh(&tp->syn_wait_lock);
+               read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
        } else {
-               tp = tcp_sk(sk);
-               read_lock_bh(&tp->syn_wait_lock);
-               if (tp->listen_opt && tp->listen_opt->qlen)
+               icsk = inet_csk(sk);
+               read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+               if (reqsk_queue_len(&icsk->icsk_accept_queue))
                        goto start_req;
-               read_unlock_bh(&tp->syn_wait_lock);
+               read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
                sk = sk_next(sk);
        }
 get_sk:
        sk_for_each_from(sk, node) {
                vxdprintk(VXD_CBIT(net, 6), "sk: %p [#%d] (from %d)",
-                       sk, sk->sk_xid, vx_current_xid());
-               if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
+                       sk, sk->sk_nid, nx_current_nid());
+               if (!nx_check(sk->sk_nid, VS_WATCH_P|VS_IDENT))
                        continue;
                if (sk->sk_family == st->family) {
                        cur = sk;
                        goto out;
                }
-               tp = tcp_sk(sk);
-               read_lock_bh(&tp->syn_wait_lock);
-               if (tp->listen_opt && tp->listen_opt->qlen) {
+               icsk = inet_csk(sk);
+               read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+               if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
 start_req:
                        st->uid         = sock_i_uid(sk);
                        st->syn_wait_sk = sk;
@@ -2216,10 +2016,10 @@ start_req:
                        st->sbucket     = 0;
                        goto get_req;
                }
-               read_unlock_bh(&tp->syn_wait_lock);
+               read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
        }
-       if (++st->bucket < TCP_LHTABLE_SIZE) {
-               sk = sk_head(&tcp_listening_hash[st->bucket]);
+       if (++st->bucket < INET_LHTABLE_SIZE) {
+               sk = sk_head(&tcp_hashinfo.listening_hash[st->bucket]);
                goto get_sk;
        }
        cur = NULL;
@@ -2243,20 +2043,20 @@ static void *established_get_first(struct seq_file *seq)
        struct tcp_iter_state* st = seq->private;
        void *rc = NULL;
 
-       for (st->bucket = 0; st->bucket < tcp_ehash_size; ++st->bucket) {
+       for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) {
                struct sock *sk;
                struct hlist_node *node;
-               struct tcp_tw_bucket *tw;
+               struct inet_timewait_sock *tw;
 
                /* We can reschedule _before_ having picked the target: */
                cond_resched_softirq();
 
-               read_lock(&tcp_ehash[st->bucket].lock);
-               sk_for_each(sk, node, &tcp_ehash[st->bucket].chain) {
+               read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
+               sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
                        vxdprintk(VXD_CBIT(net, 6),
                                "sk,egf: %p [#%d] (from %d)",
-                               sk, sk->sk_xid, vx_current_xid());
-                       if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
+                               sk, sk->sk_nid, nx_current_nid());
+                       if (!nx_check(sk->sk_nid, VS_WATCH_P|VS_IDENT))
                                continue;
                        if (sk->sk_family != st->family)
                                continue;
@@ -2264,19 +2064,19 @@ static void *established_get_first(struct seq_file *seq)
                        goto out;
                }
                st->state = TCP_SEQ_STATE_TIME_WAIT;
-               tw_for_each(tw, node,
-                           &tcp_ehash[st->bucket + tcp_ehash_size].chain) {
+               inet_twsk_for_each(tw, node,
+                                  &tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain) {
                        vxdprintk(VXD_CBIT(net, 6),
                                "tw: %p [#%d] (from %d)",
-                               tw, tw->tw_xid, vx_current_xid());
-                       if (!vx_check(tw->tw_xid, VX_IDENT|VX_WATCH))
+                               tw, tw->tw_nid, nx_current_nid());
+                       if (!nx_check(tw->tw_nid, VS_WATCH_P|VS_IDENT))
                                continue;
                        if (tw->tw_family != st->family)
                                continue;
                        rc = tw;
                        goto out;
                }
-               read_unlock(&tcp_ehash[st->bucket].lock);
+               read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
                st->state = TCP_SEQ_STATE_ESTABLISHED;
        }
 out:
@@ -2286,7 +2086,7 @@ out:
 static void *established_get_next(struct seq_file *seq, void *cur)
 {
        struct sock *sk = cur;
-       struct tcp_tw_bucket *tw;
+       struct inet_timewait_sock *tw;
        struct hlist_node *node;
        struct tcp_iter_state* st = seq->private;
 
@@ -2297,22 +2097,22 @@ static void *established_get_next(struct seq_file *seq, void *cur)
                tw = tw_next(tw);
 get_tw:
                while (tw && (tw->tw_family != st->family ||
-                       !vx_check(tw->tw_xid, VX_IDENT|VX_WATCH))) {
+                       !nx_check(tw->tw_nid, VS_WATCH_P|VS_IDENT))) {
                        tw = tw_next(tw);
                }
                if (tw) {
                        cur = tw;
                        goto out;
                }
-               read_unlock(&tcp_ehash[st->bucket].lock);
+               read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
                st->state = TCP_SEQ_STATE_ESTABLISHED;
 
                /* We can reschedule between buckets: */
                cond_resched_softirq();
 
-               if (++st->bucket < tcp_ehash_size) {
-                       read_lock(&tcp_ehash[st->bucket].lock);
-                       sk = sk_head(&tcp_ehash[st->bucket].chain);
+               if (++st->bucket < tcp_hashinfo.ehash_size) {
+                       read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
+                       sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain);
                } else {
                        cur = NULL;
                        goto out;
@@ -2323,15 +2123,15 @@ get_tw:
        sk_for_each_from(sk, node) {
                vxdprintk(VXD_CBIT(net, 6),
                        "sk,egn: %p [#%d] (from %d)",
-                       sk, sk->sk_xid, vx_current_xid());
-               if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
+                       sk, sk->sk_nid, nx_current_nid());
+               if (!nx_check(sk->sk_nid, VS_WATCH_P|VS_IDENT))
                        continue;
                if (sk->sk_family == st->family)
                        goto found;
        }
 
        st->state = TCP_SEQ_STATE_TIME_WAIT;
-       tw = tw_head(&tcp_ehash[st->bucket + tcp_ehash_size].chain);
+       tw = tw_head(&tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain);
        goto get_tw;
 found:
        cur = sk;
@@ -2346,7 +2146,7 @@ static void *established_get_idx(struct seq_file *seq, loff_t pos)
        while (rc && pos) {
                rc = established_get_next(seq, rc);
                --pos;
-       }               
+       }
        return rc;
 }
 
@@ -2355,12 +2155,12 @@ static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
        void *rc;
        struct tcp_iter_state* st = seq->private;
 
-       tcp_listen_lock();
+       inet_listen_lock(&tcp_hashinfo);
        st->state = TCP_SEQ_STATE_LISTENING;
        rc        = listening_get_idx(seq, &pos);
 
        if (!rc) {
-               tcp_listen_unlock();
+               inet_listen_unlock(&tcp_hashinfo);
                local_bh_disable();
                st->state = TCP_SEQ_STATE_ESTABLISHED;
                rc        = established_get_idx(seq, pos);
@@ -2393,7 +2193,7 @@ static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
        case TCP_SEQ_STATE_LISTENING:
                rc = listening_get_next(seq, v);
                if (!rc) {
-                       tcp_listen_unlock();
+                       inet_listen_unlock(&tcp_hashinfo);
                        local_bh_disable();
                        st->state = TCP_SEQ_STATE_ESTABLISHED;
                        rc        = established_get_first(seq);
@@ -2416,17 +2216,17 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
        switch (st->state) {
        case TCP_SEQ_STATE_OPENREQ:
                if (v) {
-                       struct tcp_sock *tp = tcp_sk(st->syn_wait_sk);
-                       read_unlock_bh(&tp->syn_wait_lock);
+                       struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
+                       read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
                }
        case TCP_SEQ_STATE_LISTENING:
                if (v != SEQ_START_TOKEN)
-                       tcp_listen_unlock();
+                       inet_listen_unlock(&tcp_hashinfo);
                break;
        case TCP_SEQ_STATE_TIME_WAIT:
        case TCP_SEQ_STATE_ESTABLISHED:
                if (v)
-                       read_unlock(&tcp_ehash[st->bucket].lock);
+                       read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
                local_bh_enable();
                break;
        }
@@ -2442,10 +2242,9 @@ static int tcp_seq_open(struct inode *inode, struct file *file)
        if (unlikely(afinfo == NULL))
                return -EINVAL;
 
-       s = kmalloc(sizeof(*s), GFP_KERNEL);
+       s = kzalloc(sizeof(*s), GFP_KERNEL);
        if (!s)
                return -ENOMEM;
-       memset(s, 0, sizeof(*s));
        s->family               = afinfo->family;
        s->seq_ops.start        = tcp_seq_start;
        s->seq_ops.next         = tcp_seq_next;
@@ -2476,7 +2275,7 @@ int tcp_proc_register(struct tcp_seq_afinfo *afinfo)
        afinfo->seq_fops->read          = seq_read;
        afinfo->seq_fops->llseek        = seq_lseek;
        afinfo->seq_fops->release       = seq_release_private;
-       
+
        p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops);
        if (p)
                p->data = afinfo;
@@ -2490,21 +2289,22 @@ void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo)
        if (!afinfo)
                return;
        proc_net_remove(afinfo->name);
-       memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops)); 
+       memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops));
 }
 
-static void get_openreq4(struct sock *sk, struct open_request *req,
+static void get_openreq4(struct sock *sk, struct request_sock *req,
                         char *tmpbuf, int i, int uid)
 {
+       const struct inet_request_sock *ireq = inet_rsk(req);
        int ttd = req->expires - jiffies;
 
        sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
                " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p",
                i,
-               req->af.v4_req.loc_addr,
+               ireq->loc_addr,
                ntohs(inet_sk(sk)->sport),
-               req->af.v4_req.rmt_addr,
-               ntohs(req->rmt_port),
+               ireq->rmt_addr,
+               ntohs(ireq->rmt_port),
                TCP_SYN_RECV,
                0, 0, /* could print option size, but that is af dependent. */
                1,    /* timers active (only the expire timer) */
@@ -2522,18 +2322,19 @@ static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
        int timer_active;
        unsigned long timer_expires;
        struct tcp_sock *tp = tcp_sk(sp);
+       const struct inet_connection_sock *icsk = inet_csk(sp);
        struct inet_sock *inet = inet_sk(sp);
-       unsigned int dest = inet->daddr;
-       unsigned int src = inet->rcv_saddr;
+       __be32 dest = inet->daddr;
+       __be32 src = inet->rcv_saddr;
        __u16 destp = ntohs(inet->dport);
        __u16 srcp = ntohs(inet->sport);
 
-       if (tp->pending == TCP_TIME_RETRANS) {
+       if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
                timer_active    = 1;
-               timer_expires   = tp->timeout;
-       } else if (tp->pending == TCP_TIME_PROBE0) {
+               timer_expires   = icsk->icsk_timeout;
+       } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
                timer_active    = 4;
-               timer_expires   = tp->timeout;
+               timer_expires   = icsk->icsk_timeout;
        } else if (timer_pending(&sp->sk_timer)) {
                timer_active    = 2;
                timer_expires   = sp->sk_timer.expires;
@@ -2545,22 +2346,27 @@ static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
        sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
                        "%08X %5d %8d %lu %d %p %u %u %u %u %d",
                i, src, srcp, dest, destp, sp->sk_state,
-               tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq,
+               tp->write_seq - tp->snd_una,
+               sp->sk_state == TCP_LISTEN ? sp->sk_ack_backlog :
+                                            (tp->rcv_nxt - tp->copied_seq),
                timer_active,
                jiffies_to_clock_t(timer_expires - jiffies),
-               tp->retransmits,
+               icsk->icsk_retransmits,
                sock_i_uid(sp),
-               tp->probes_out,
+               icsk->icsk_probes_out,
                sock_i_ino(sp),
                atomic_read(&sp->sk_refcnt), sp,
-               tp->rto, tp->ack.ato, (tp->ack.quick << 1) | tp->ack.pingpong,
+               icsk->icsk_rto,
+               icsk->icsk_ack.ato,
+               (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
                tp->snd_cwnd,
                tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh);
 }
 
-static void get_timewait4_sock(struct tcp_tw_bucket *tw, char *tmpbuf, int i)
+static void get_timewait4_sock(struct inet_timewait_sock *tw,
+                              char *tmpbuf, int i)
 {
-       unsigned int dest, src;
+       __be32 dest, src;
        __u16 destp, srcp;
        int ttd = tw->tw_ttd - jiffies;
 
@@ -2638,7 +2444,7 @@ struct proto tcp_prot = {
        .close                  = tcp_close,
        .connect                = tcp_v4_connect,
        .disconnect             = tcp_disconnect,
-       .accept                 = tcp_accept,
+       .accept                 = inet_csk_accept,
        .ioctl                  = tcp_ioctl,
        .init                   = tcp_v4_init_sock,
        .destroy                = tcp_v4_destroy_sock,
@@ -2653,6 +2459,7 @@ struct proto tcp_prot = {
        .get_port               = tcp_v4_get_port,
        .enter_memory_pressure  = tcp_enter_memory_pressure,
        .sockets_allocated      = &tcp_sockets_allocated,
+       .orphan_count           = &tcp_orphan_count,
        .memory_allocated       = &tcp_memory_allocated,
        .memory_pressure        = &tcp_memory_pressure,
        .sysctl_mem             = sysctl_tcp_mem,
@@ -2660,39 +2467,28 @@ struct proto tcp_prot = {
        .sysctl_rmem            = sysctl_tcp_rmem,
        .max_header             = MAX_TCP_HEADER,
        .obj_size               = sizeof(struct tcp_sock),
+       .twsk_prot              = &tcp_timewait_sock_ops,
+       .rsk_prot               = &tcp_request_sock_ops,
+#ifdef CONFIG_COMPAT
+       .compat_setsockopt      = compat_tcp_setsockopt,
+       .compat_getsockopt      = compat_tcp_getsockopt,
+#endif
 };
 
-
-
 void __init tcp_v4_init(struct net_proto_family *ops)
 {
-       int err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_TCP, &tcp_socket);
-       if (err < 0)
+       if (inet_csk_ctl_sock_create(&tcp_socket, PF_INET, SOCK_RAW,
+                                    IPPROTO_TCP) < 0)
                panic("Failed to create the TCP control socket.\n");
-       tcp_socket->sk->sk_allocation   = GFP_ATOMIC;
-       inet_sk(tcp_socket->sk)->uc_ttl = -1;
-
-       /* Unhash it so that IP input processing does not even
-        * see it, we do not wish this socket to see incoming
-        * packets.
-        */
-       tcp_socket->sk->sk_prot->unhash(tcp_socket->sk);
 }
 
 EXPORT_SYMBOL(ipv4_specific);
-EXPORT_SYMBOL(tcp_bind_hash);
-EXPORT_SYMBOL(tcp_bucket_create);
 EXPORT_SYMBOL(tcp_hashinfo);
-EXPORT_SYMBOL(tcp_inherit_port);
-EXPORT_SYMBOL(tcp_listen_wlock);
-EXPORT_SYMBOL(tcp_port_rover);
 EXPORT_SYMBOL(tcp_prot);
-EXPORT_SYMBOL(tcp_put_port);
 EXPORT_SYMBOL(tcp_unhash);
 EXPORT_SYMBOL(tcp_v4_conn_request);
 EXPORT_SYMBOL(tcp_v4_connect);
 EXPORT_SYMBOL(tcp_v4_do_rcv);
-EXPORT_SYMBOL(tcp_v4_rebuild_header);
 EXPORT_SYMBOL(tcp_v4_remember_stamp);
 EXPORT_SYMBOL(tcp_v4_send_check);
 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
@@ -2702,7 +2498,5 @@ EXPORT_SYMBOL(tcp_proc_register);
 EXPORT_SYMBOL(tcp_proc_unregister);
 #endif
 EXPORT_SYMBOL(sysctl_local_port_range);
-EXPORT_SYMBOL(sysctl_max_syn_backlog);
 EXPORT_SYMBOL(sysctl_tcp_low_latency);
-EXPORT_SYMBOL(sysctl_tcp_tw_reuse);