linux 2.6.16.38 w/ vs2.0.3-rc1
[linux-2.6.git] / net / ipv4 / tcp_ipv4.c
index 05b61dd..dd0dbd8 100644 (file)
@@ -52,6 +52,7 @@
  *                                     a single port at the same time.
  */
 
+#include <linux/config.h>
 
 #include <linux/types.h>
 #include <linux/fcntl.h>
@@ -70,7 +71,6 @@
 #include <net/inet_common.h>
 #include <net/timewait_sock.h>
 #include <net/xfrm.h>
-#include <net/netdma.h>
 
 #include <linux/inet.h>
 #include <linux/ipv6.h>
@@ -91,7 +91,7 @@ static struct socket *tcp_socket;
 void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb);
 
 struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
-       .lhash_lock     = __RW_LOCK_UNLOCKED(tcp_hashinfo.lhash_lock),
+       .lhash_lock     = RW_LOCK_UNLOCKED,
        .lhash_users    = ATOMIC_INIT(0),
        .lhash_wait     = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait),
 };
@@ -242,7 +242,6 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                goto failure;
 
        /* OK, now commit destination to socket.  */
-       sk->sk_gso_type = SKB_GSO_TCPV4;
        sk_setup_caps(sk, &rt->u.dst);
 
        if (!tp->write_seq)
@@ -439,6 +438,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
                               It can f.e. if SYNs crossed.
                             */
                if (!sock_owned_by_user(sk)) {
+                       TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
                        sk->sk_err = err;
 
                        sk->sk_error_report(sk);
@@ -496,24 +496,6 @@ void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
        }
 }
 
-int tcp_v4_gso_send_check(struct sk_buff *skb)
-{
-       struct iphdr *iph;
-       struct tcphdr *th;
-
-       if (!pskb_may_pull(skb, sizeof(*th)))
-               return -EINVAL;
-
-       iph = skb->nh.iph;
-       th = skb->h.th;
-
-       th->check = 0;
-       th->check = ~tcp_v4_check(th, skb->len, iph->saddr, iph->daddr, 0);
-       skb->csum = offsetof(struct tcphdr, check);
-       skb->ip_summed = CHECKSUM_HW;
-       return 0;
-}
-
 /*
  *     This routine will send an RST to the other tcp.
  *
@@ -874,6 +856,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
 drop_and_free:
        reqsk_free(req);
 drop:
+       TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
        return 0;
 }
 
@@ -901,7 +884,6 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
        if (!newsk)
                goto exit;
 
-       newsk->sk_gso_type = SKB_GSO_TCPV4;
        sk_setup_caps(newsk, dst);
 
        newtp                 = tcp_sk(newsk);
@@ -919,7 +901,6 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
                inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen;
        newinet->id = newtp->write_seq ^ jiffies;
 
-       tcp_mtup_init(newsk);
        tcp_sync_mss(newsk, dst_mtu(dst));
        newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
        tcp_initialize_rcv_mss(newsk);
@@ -1107,21 +1088,11 @@ process:
 
        skb->dev = NULL;
 
-       bh_lock_sock_nested(sk);
+       bh_lock_sock(sk);
        ret = 0;
        if (!sock_owned_by_user(sk)) {
-#ifdef CONFIG_NET_DMA
-               struct tcp_sock *tp = tcp_sk(sk);
-               if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
-                       tp->ucopy.dma_chan = get_softnet_dma();
-               if (tp->ucopy.dma_chan)
+               if (!tcp_prequeue(sk, skb))
                        ret = tcp_v4_do_rcv(sk, skb);
-               else
-#endif
-               {
-                       if (!tcp_prequeue(sk, skb))
-                       ret = tcp_v4_do_rcv(sk, skb);
-               }
        } else
                sk_add_backlog(sk, skb);
        bh_unlock_sock(sk);
@@ -1246,21 +1217,17 @@ int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
 }
 
 struct inet_connection_sock_af_ops ipv4_specific = {
-       .queue_xmit        = ip_queue_xmit,
-       .send_check        = tcp_v4_send_check,
-       .rebuild_header    = inet_sk_rebuild_header,
-       .conn_request      = tcp_v4_conn_request,
-       .syn_recv_sock     = tcp_v4_syn_recv_sock,
-       .remember_stamp    = tcp_v4_remember_stamp,
-       .net_header_len    = sizeof(struct iphdr),
-       .setsockopt        = ip_setsockopt,
-       .getsockopt        = ip_getsockopt,
-       .addr2sockaddr     = inet_csk_addr2sockaddr,
-       .sockaddr_len      = sizeof(struct sockaddr_in),
-#ifdef CONFIG_COMPAT
-       .compat_setsockopt = compat_ip_setsockopt,
-       .compat_getsockopt = compat_ip_getsockopt,
-#endif
+       .queue_xmit     =       ip_queue_xmit,
+       .send_check     =       tcp_v4_send_check,
+       .rebuild_header =       inet_sk_rebuild_header,
+       .conn_request   =       tcp_v4_conn_request,
+       .syn_recv_sock  =       tcp_v4_syn_recv_sock,
+       .remember_stamp =       tcp_v4_remember_stamp,
+       .net_header_len =       sizeof(struct iphdr),
+       .setsockopt     =       ip_setsockopt,
+       .getsockopt     =       ip_getsockopt,
+       .addr2sockaddr  =       inet_csk_addr2sockaddr,
+       .sockaddr_len   =       sizeof(struct sockaddr_in),
 };
 
 /* NOTE: A lot of things set to zero explicitly by call to
@@ -1325,11 +1292,6 @@ int tcp_v4_destroy_sock(struct sock *sk)
        /* Cleans up our, hopefully empty, out_of_order_queue. */
        __skb_queue_purge(&tp->out_of_order_queue);
 
-#ifdef CONFIG_NET_DMA
-       /* Cleans up our sk_async_wait_queue */
-       __skb_queue_purge(&sk->sk_async_wait_queue);
-#endif
-
        /* Clean prequeue, it must be empty really */
        __skb_queue_purge(&tp->ucopy.prequeue);
 
@@ -1663,9 +1625,10 @@ static int tcp_seq_open(struct inode *inode, struct file *file)
        if (unlikely(afinfo == NULL))
                return -EINVAL;
 
-       s = kzalloc(sizeof(*s), GFP_KERNEL);
+       s = kmalloc(sizeof(*s), GFP_KERNEL);
        if (!s)
                return -ENOMEM;
+       memset(s, 0, sizeof(*s));
        s->family               = afinfo->family;
        s->seq_ops.start        = tcp_seq_start;
        s->seq_ops.next         = tcp_seq_next;
@@ -1767,8 +1730,7 @@ static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
        sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
                        "%08X %5d %8d %lu %d %p %u %u %u %u %d",
                i, src, srcp, dest, destp, sp->sk_state,
-               tp->write_seq - tp->snd_una,
-               (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
+               tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq,
                timer_active,
                jiffies_to_clock_t(timer_expires - jiffies),
                icsk->icsk_retransmits,
@@ -1888,16 +1850,23 @@ struct proto tcp_prot = {
        .obj_size               = sizeof(struct tcp_sock),
        .twsk_prot              = &tcp_timewait_sock_ops,
        .rsk_prot               = &tcp_request_sock_ops,
-#ifdef CONFIG_COMPAT
-       .compat_setsockopt      = compat_tcp_setsockopt,
-       .compat_getsockopt      = compat_tcp_getsockopt,
-#endif
 };
 
+
+
 void __init tcp_v4_init(struct net_proto_family *ops)
 {
-       if (inet_csk_ctl_sock_create(&tcp_socket, PF_INET, SOCK_RAW, IPPROTO_TCP) < 0)
+       int err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_TCP, &tcp_socket);
+       if (err < 0)
                panic("Failed to create the TCP control socket.\n");
+       tcp_socket->sk->sk_allocation   = GFP_ATOMIC;
+       inet_sk(tcp_socket->sk)->uc_ttl = -1;
+
+       /* Unhash it so that IP input processing does not even
+        * see it, we do not wish this socket to see incoming
+        * packets.
+        */
+       tcp_socket->sk->sk_prot->unhash(tcp_socket->sk);
 }
 
 EXPORT_SYMBOL(ipv4_specific);
@@ -1917,4 +1886,5 @@ EXPORT_SYMBOL(tcp_proc_unregister);
 #endif
 EXPORT_SYMBOL(sysctl_local_port_range);
 EXPORT_SYMBOL(sysctl_tcp_low_latency);
+EXPORT_SYMBOL(sysctl_tcp_tw_reuse);