X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=net%2Fipv4%2Ftcp.c;h=01fda573983fcd63641f341428db6cd2ace2c98c;hb=refs%2Fheads%2Fvserver;hp=260cb40321561990a73c39606b98c74afe4a5f97;hpb=9213980e6a70d8473e0ffd4b39ab5b6caaba9ff5;p=linux-2.6.git diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 260cb4032..01fda5739 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -7,7 +7,7 @@ * * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $ * - * Authors: Ross Biro, + * Authors: Ross Biro * Fred N. van Kempen, * Mark Evans, * Corey Minyard @@ -247,7 +247,6 @@ * TCP_CLOSE socket is finished */ -#include #include #include #include @@ -256,126 +255,62 @@ #include #include #include +#include +#include +#include +#include +#include #include #include #include #include - +#include #include #include -int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT; - -DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics); +int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT; -kmem_cache_t *tcp_openreq_cachep; -kmem_cache_t *tcp_bucket_cachep; -kmem_cache_t *tcp_timewait_cachep; +DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly; atomic_t tcp_orphan_count = ATOMIC_INIT(0); -int sysctl_tcp_default_win_scale; +EXPORT_SYMBOL_GPL(tcp_orphan_count); -int sysctl_tcp_mem[3]; -int sysctl_tcp_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 }; -int sysctl_tcp_rmem[3] = { 4 * 1024, 87380, 87380 * 2 }; +int sysctl_tcp_mem[3] __read_mostly; +int sysctl_tcp_wmem[3] __read_mostly; +int sysctl_tcp_rmem[3] __read_mostly; + +EXPORT_SYMBOL(sysctl_tcp_mem); +EXPORT_SYMBOL(sysctl_tcp_rmem); +EXPORT_SYMBOL(sysctl_tcp_wmem); atomic_t tcp_memory_allocated; /* Current allocated memory. */ atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */ -/* Pressure flag: try to collapse. +EXPORT_SYMBOL(tcp_memory_allocated); +EXPORT_SYMBOL(tcp_sockets_allocated); + +/* + * Pressure flag: try to collapse. * Technical note: it is used by multiple contexts non atomically. - * All the tcp_mem_schedule() is of this nature: accounting - * is strict, actions are advisory and have some latency. */ + * All the sk_stream_mem_schedule() is of this nature: accounting + * is strict, actions are advisory and have some latency. + */ int tcp_memory_pressure; -#define TCP_PAGES(amt) (((amt) + TCP_MEM_QUANTUM - 1) / TCP_MEM_QUANTUM) +EXPORT_SYMBOL(tcp_memory_pressure); -int tcp_mem_schedule(struct sock *sk, int size, int kind) +void tcp_enter_memory_pressure(void) { - int amt = TCP_PAGES(size); - - sk->sk_forward_alloc += amt * TCP_MEM_QUANTUM; - atomic_add(amt, &tcp_memory_allocated); - - /* Under limit. */ - if (atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) { - if (tcp_memory_pressure) - tcp_memory_pressure = 0; - return 1; + if (!tcp_memory_pressure) { + NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES); + tcp_memory_pressure = 1; } - - /* Over hard limit. */ - if (atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]) { - tcp_enter_memory_pressure(); - goto suppress_allocation; - } - - /* Under pressure. */ - if (atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[1]) - tcp_enter_memory_pressure(); - - if (kind) { - if (atomic_read(&sk->sk_rmem_alloc) < sysctl_tcp_rmem[0]) - return 1; - } else if (sk->sk_wmem_queued < sysctl_tcp_wmem[0]) - return 1; - - if (!tcp_memory_pressure || - sysctl_tcp_mem[2] > atomic_read(&tcp_sockets_allocated) * - TCP_PAGES(sk->sk_wmem_queued + - atomic_read(&sk->sk_rmem_alloc) + - sk->sk_forward_alloc)) - return 1; - -suppress_allocation: - - if (!kind) { - tcp_moderate_sndbuf(sk); - - /* Fail only if socket is _under_ its sndbuf. - * In this case we cannot block, so that we have to fail. - */ - if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) - return 1; - } - - /* Alas. Undo changes. */ - sk->sk_forward_alloc -= amt * TCP_MEM_QUANTUM; - atomic_sub(amt, &tcp_memory_allocated); - return 0; } -void __tcp_mem_reclaim(struct sock *sk) -{ - if (sk->sk_forward_alloc >= TCP_MEM_QUANTUM) { - atomic_sub(sk->sk_forward_alloc / TCP_MEM_QUANTUM, - &tcp_memory_allocated); - sk->sk_forward_alloc &= TCP_MEM_QUANTUM - 1; - if (tcp_memory_pressure && - atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) - tcp_memory_pressure = 0; - } -} - -void tcp_rfree(struct sk_buff *skb) -{ - struct sock *sk = skb->sk; - - atomic_sub(skb->truesize, &sk->sk_rmem_alloc); - sk->sk_forward_alloc += skb->truesize; -} - -/* - * LISTEN is a special case for poll.. - */ -static __inline__ unsigned int tcp_listen_poll(struct sock *sk, - poll_table *wait) -{ - return tcp_sk(sk)->accept_queue ? (POLLIN | POLLRDNORM) : 0; -} +EXPORT_SYMBOL(tcp_enter_memory_pressure); /* * Wait for a TCP event. @@ -388,11 +323,11 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) { unsigned int mask; struct sock *sk = sock->sk; - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); poll_wait(file, sk->sk_sleep, wait); if (sk->sk_state == TCP_LISTEN) - return tcp_listen_poll(sk, wait); + return inet_csk_listen_poll(sk); /* Socket is not locked. We are protected from async events by poll logic and correct handling of state changes @@ -433,7 +368,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE) mask |= POLLHUP; if (sk->sk_shutdown & RCV_SHUTDOWN) - mask |= POLLIN | POLLRDNORM; + mask |= POLLIN | POLLRDNORM | POLLRDHUP; /* Connected? */ if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) { @@ -447,7 +382,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) mask |= POLLIN | POLLRDNORM; if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { - if (tcp_wspace(sk) >= tcp_min_write_space(sk)) { + if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) { mask |= POLLOUT | POLLWRNORM; } else { /* send SIGIO later */ set_bit(SOCK_ASYNC_NOSPACE, @@ -458,7 +393,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) * wspace test but before the flags are set, * IO signal will be lost. */ - if (tcp_wspace(sk) >= tcp_min_write_space(sk)) + if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) mask |= POLLOUT | POLLWRNORM; } } @@ -469,27 +404,9 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) return mask; } -/* - * TCP socket write_space callback. - */ -void tcp_write_space(struct sock *sk) -{ - struct socket *sock = sk->sk_socket; - - if (tcp_wspace(sk) >= tcp_min_write_space(sk) && sock) { - clear_bit(SOCK_NOSPACE, &sock->flags); - - if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) - wake_up_interruptible(sk->sk_sleep); - - if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) - sock_wake_async(sock, 2, POLL_OUT); - } -} - int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); int answ; switch (cmd) { @@ -533,277 +450,36 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) return put_user(answ, (int __user *)arg); } - -int tcp_listen_start(struct sock *sk) -{ - struct inet_opt *inet = inet_sk(sk); - struct tcp_opt *tp = tcp_sk(sk); - struct tcp_listen_opt *lopt; - - sk->sk_max_ack_backlog = 0; - sk->sk_ack_backlog = 0; - tp->accept_queue = tp->accept_queue_tail = NULL; - tp->syn_wait_lock = RW_LOCK_UNLOCKED; - tcp_delack_init(tp); - - lopt = kmalloc(sizeof(struct tcp_listen_opt), GFP_KERNEL); - if (!lopt) - return -ENOMEM; - - memset(lopt, 0, sizeof(struct tcp_listen_opt)); - for (lopt->max_qlen_log = 6; ; lopt->max_qlen_log++) - if ((1 << lopt->max_qlen_log) >= sysctl_max_syn_backlog) - break; - get_random_bytes(&lopt->hash_rnd, 4); - - write_lock_bh(&tp->syn_wait_lock); - tp->listen_opt = lopt; - write_unlock_bh(&tp->syn_wait_lock); - - /* There is race window here: we announce ourselves listening, - * but this transition is still not validated by get_port(). - * It is OK, because this socket enters to hash table only - * after validation is complete. - */ - sk->sk_state = TCP_LISTEN; - if (!sk->sk_prot->get_port(sk, inet->num)) { - inet->sport = htons(inet->num); - - sk_dst_reset(sk); - sk->sk_prot->hash(sk); - - return 0; - } - - sk->sk_state = TCP_CLOSE; - write_lock_bh(&tp->syn_wait_lock); - tp->listen_opt = NULL; - write_unlock_bh(&tp->syn_wait_lock); - kfree(lopt); - return -EADDRINUSE; -} - -/* - * This routine closes sockets which have been at least partially - * opened, but not yet accepted. - */ - -static void tcp_listen_stop (struct sock *sk) -{ - struct tcp_opt *tp = tcp_sk(sk); - struct tcp_listen_opt *lopt = tp->listen_opt; - struct open_request *acc_req = tp->accept_queue; - struct open_request *req; - int i; - - tcp_delete_keepalive_timer(sk); - - /* make all the listen_opt local to us */ - write_lock_bh(&tp->syn_wait_lock); - tp->listen_opt = NULL; - write_unlock_bh(&tp->syn_wait_lock); - tp->accept_queue = tp->accept_queue_tail = NULL; - - if (lopt->qlen) { - for (i = 0; i < TCP_SYNQ_HSIZE; i++) { - while ((req = lopt->syn_table[i]) != NULL) { - lopt->syn_table[i] = req->dl_next; - lopt->qlen--; - tcp_openreq_free(req); - - /* Following specs, it would be better either to send FIN - * (and enter FIN-WAIT-1, it is normal close) - * or to send active reset (abort). - * Certainly, it is pretty dangerous while synflood, but it is - * bad justification for our negligence 8) - * To be honest, we are not able to make either - * of the variants now. --ANK - */ - } - } - } - BUG_TRAP(!lopt->qlen); - - kfree(lopt); - - while ((req = acc_req) != NULL) { - struct sock *child = req->sk; - - acc_req = req->dl_next; - - local_bh_disable(); - bh_lock_sock(child); - BUG_TRAP(!sock_owned_by_user(child)); - sock_hold(child); - - tcp_disconnect(child, O_NONBLOCK); - - sock_orphan(child); - - atomic_inc(&tcp_orphan_count); - - tcp_destroy_sock(child); - - bh_unlock_sock(child); - local_bh_enable(); - sock_put(child); - - sk_acceptq_removed(sk); - tcp_openreq_fastfree(req); - } - BUG_TRAP(!sk->sk_ack_backlog); -} - -/* - * Wait for a socket to get into the connected state - * - * Note: Must be called with the socket locked. - */ -static int wait_for_tcp_connect(struct sock *sk, int flags, long *timeo_p) -{ - struct tcp_opt *tp = tcp_sk(sk); - struct task_struct *tsk = current; - DEFINE_WAIT(wait); - - while ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) { - if (sk->sk_err) - return sock_error(sk); - if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) - return -EPIPE; - if (!*timeo_p) - return -EAGAIN; - if (signal_pending(tsk)) - return sock_intr_errno(*timeo_p); - - prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); - tp->write_pending++; - - release_sock(sk); - *timeo_p = schedule_timeout(*timeo_p); - lock_sock(sk); - - finish_wait(sk->sk_sleep, &wait); - tp->write_pending--; - } - return 0; -} - -static inline int tcp_memory_free(struct sock *sk) -{ - return sk->sk_wmem_queued < sk->sk_sndbuf; -} - -/* - * Wait for more memory for a socket - */ -static int wait_for_tcp_memory(struct sock *sk, long *timeo) -{ - struct tcp_opt *tp = tcp_sk(sk); - int err = 0; - long vm_wait = 0; - long current_timeo = *timeo; - DEFINE_WAIT(wait); - - if (tcp_memory_free(sk)) - current_timeo = vm_wait = (net_random() % (HZ / 5)) + 2; - - for (;;) { - set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); - - prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); - - if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) - goto do_error; - if (!*timeo) - goto do_nonblock; - if (signal_pending(current)) - goto do_interrupted; - clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); - if (tcp_memory_free(sk) && !vm_wait) - break; - - set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); - tp->write_pending++; - release_sock(sk); - if (!tcp_memory_free(sk) || vm_wait) - current_timeo = schedule_timeout(current_timeo); - lock_sock(sk); - tp->write_pending--; - - if (vm_wait) { - vm_wait -= current_timeo; - current_timeo = *timeo; - if (current_timeo != MAX_SCHEDULE_TIMEOUT && - (current_timeo -= vm_wait) < 0) - current_timeo = 0; - vm_wait = 0; - } - *timeo = current_timeo; - } -out: - finish_wait(sk->sk_sleep, &wait); - return err; - -do_error: - err = -EPIPE; - goto out; -do_nonblock: - err = -EAGAIN; - goto out; -do_interrupted: - err = sock_intr_errno(*timeo); - goto out; -} - -static inline int can_coalesce(struct sk_buff *skb, int i, struct page *page, - int off) -{ - if (i) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; - return page == frag->page && - off == frag->page_offset + frag->size; - } - return 0; -} - -static inline void fill_page_desc(struct sk_buff *skb, int i, - struct page *page, int off, int size) -{ - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - frag->page = page; - frag->page_offset = off; - frag->size = size; - skb_shinfo(skb)->nr_frags = i + 1; -} - -static inline void tcp_mark_push(struct tcp_opt *tp, struct sk_buff *skb) +static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) { TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; tp->pushed_seq = tp->write_seq; } -static inline int forced_push(struct tcp_opt *tp) +static inline int forced_push(struct tcp_sock *tp) { return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); } -static inline void skb_entail(struct sock *sk, struct tcp_opt *tp, +static inline void skb_entail(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb) { - skb->csum = 0; - TCP_SKB_CB(skb)->seq = tp->write_seq; - TCP_SKB_CB(skb)->end_seq = tp->write_seq; - TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK; - TCP_SKB_CB(skb)->sacked = 0; + struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); + + skb->csum = 0; + tcb->seq = tcb->end_seq = tp->write_seq; + tcb->flags = TCPCB_FLAG_ACK; + tcb->sacked = 0; + skb_header_release(skb); __skb_queue_tail(&sk->sk_write_queue, skb); - tcp_charge_skb(sk, skb); - if (!tp->send_head) - tp->send_head = skb; - else if (tp->nonagle&TCP_NAGLE_PUSH) + sk_charge_skb(sk, skb); + if (!sk->sk_send_head) + sk->sk_send_head = skb; + if (tp->nonagle & TCP_NAGLE_PUSH) tp->nonagle &= ~TCP_NAGLE_PUSH; } -static inline void tcp_mark_urg(struct tcp_opt *tp, int flags, +static inline void tcp_mark_urg(struct tcp_sock *tp, int flags, struct sk_buff *skb) { if (flags & MSG_OOB) { @@ -813,10 +489,10 @@ static inline void tcp_mark_urg(struct tcp_opt *tp, int flags, } } -static inline void tcp_push(struct sock *sk, struct tcp_opt *tp, int flags, +static inline void tcp_push(struct sock *sk, struct tcp_sock *tp, int flags, int mss_now, int nonagle) { - if (tp->send_head) { + if (sk->sk_send_head) { struct sk_buff *skb = sk->sk_write_queue.prev; if (!(flags & MSG_MORE) || forced_push(tp)) tcp_mark_push(tp, skb); @@ -826,32 +502,24 @@ static inline void tcp_push(struct sock *sk, struct tcp_opt *tp, int flags, } } -static int tcp_error(struct sock *sk, int flags, int err) -{ - if (err == -EPIPE) - err = sock_error(sk) ? : -EPIPE; - if (err == -EPIPE && !(flags & MSG_NOSIGNAL)) - send_sig(SIGPIPE, current, 0); - return err; -} - static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, size_t psize, int flags) { - struct tcp_opt *tp = tcp_sk(sk); - int mss_now; + struct tcp_sock *tp = tcp_sk(sk); + int mss_now, size_goal; int err; ssize_t copied; long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); /* Wait for a connection to finish. */ if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) - if ((err = wait_for_tcp_connect(sk, 0, &timeo)) != 0) + if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) goto out_err; clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); mss_now = tcp_current_mss(sk, !(flags&MSG_OOB)); + size_goal = tp->xmit_size_goal; copied = 0; err = -EPIPE; @@ -861,43 +529,52 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse while (psize > 0) { struct sk_buff *skb = sk->sk_write_queue.prev; struct page *page = pages[poffset / PAGE_SIZE]; - int copy, i; + int copy, i, can_coalesce; int offset = poffset % PAGE_SIZE; int size = min_t(size_t, psize, PAGE_SIZE - offset); - if (!tp->send_head || (copy = mss_now - skb->len) <= 0) { + if (!sk->sk_send_head || (copy = size_goal - skb->len) <= 0) { new_segment: - if (!tcp_memory_free(sk)) + if (!sk_stream_memory_free(sk)) goto wait_for_sndbuf; - skb = tcp_alloc_pskb(sk, 0, tp->mss_cache, - sk->sk_allocation); + skb = sk_stream_alloc_pskb(sk, 0, 0, + sk->sk_allocation); if (!skb) goto wait_for_memory; skb_entail(sk, tp, skb); - copy = mss_now; + copy = size_goal; } if (copy > size) copy = size; i = skb_shinfo(skb)->nr_frags; - if (can_coalesce(skb, i, page, offset)) { - skb_shinfo(skb)->frags[i - 1].size += copy; - } else if (i < MAX_SKB_FRAGS) { - get_page(page); - fill_page_desc(skb, i, page, offset, copy); - } else { + can_coalesce = skb_can_coalesce(skb, i, page, offset); + if (!can_coalesce && i >= MAX_SKB_FRAGS) { tcp_mark_push(tp, skb); goto new_segment; } + if (!sk_stream_wmem_schedule(sk, copy)) + goto wait_for_memory; + + if (can_coalesce) { + skb_shinfo(skb)->frags[i - 1].size += copy; + } else { + get_page(page); + skb_fill_page_desc(skb, i, page, offset, copy); + } skb->len += copy; skb->data_len += copy; - skb->ip_summed = CHECKSUM_HW; + skb->truesize += copy; + sk->sk_wmem_queued += copy; + sk->sk_forward_alloc -= copy; + skb->ip_summed = CHECKSUM_PARTIAL; tp->write_seq += copy; TCP_SKB_CB(skb)->end_seq += copy; + skb_shinfo(skb)->gso_segs = 0; if (!copied) TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH; @@ -907,13 +584,13 @@ new_segment: if (!(psize -= copy)) goto out; - if (skb->len != mss_now || (flags & MSG_OOB)) + if (skb->len < mss_now || (flags & MSG_OOB)) continue; if (forced_push(tp)) { tcp_mark_push(tp, skb); __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH); - } else if (skb == tp->send_head) + } else if (skb == sk->sk_send_head) tcp_push_one(sk, mss_now); continue; @@ -923,10 +600,11 @@ wait_for_memory: if (copied) tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); - if ((err = wait_for_tcp_memory(sk, &timeo)) != 0) + if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) goto do_error; mss_now = tcp_current_mss(sk, !(flags&MSG_OOB)); + size_goal = tp->xmit_size_goal; } out: @@ -938,7 +616,7 @@ do_error: if (copied) goto out; out_err: - return tcp_error(sk, flags, err); + return sk_stream_error(sk, flags, err); } ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, @@ -947,14 +625,10 @@ ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, ssize_t res; struct sock *sk = sock->sk; -#define TCP_ZC_CSUM_FLAGS (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) - if (!(sk->sk_route_caps & NETIF_F_SG) || - !(sk->sk_route_caps & TCP_ZC_CSUM_FLAGS)) + !(sk->sk_route_caps & NETIF_F_ALL_CSUM)) return sock_no_sendpage(sock, page, offset, size, flags); -#undef TCP_ZC_CSUM_FLAGS - lock_sock(sk); TCP_CHECK_TIMER(sk); res = do_tcp_sendpages(sk, &page, offset, size, flags); @@ -963,67 +637,25 @@ ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, return res; } -#define TCP_PAGE(sk) (inet_sk(sk)->sndmsg_page) -#define TCP_OFF(sk) (inet_sk(sk)->sndmsg_off) +#define TCP_PAGE(sk) (sk->sk_sndmsg_page) +#define TCP_OFF(sk) (sk->sk_sndmsg_off) -static inline int tcp_copy_to_page(struct sock *sk, char __user *from, - struct sk_buff *skb, struct page *page, - int off, int copy) +static inline int select_size(struct sock *sk, struct tcp_sock *tp) { - int err = 0; - unsigned int csum; - - if (skb->ip_summed == CHECKSUM_NONE) { - csum = csum_and_copy_from_user(from, page_address(page) + off, - copy, 0, &err); - if (err) return err; - skb->csum = csum_block_add(skb->csum, csum, skb->len); - } else { - if (copy_from_user(page_address(page) + off, from, copy)) - return -EFAULT; - } + int tmp = tp->mss_cache; - skb->len += copy; - skb->data_len += copy; - skb->truesize += copy; - sk->sk_wmem_queued += copy; - sk->sk_forward_alloc -= copy; - return 0; -} + if (sk->sk_route_caps & NETIF_F_SG) { + if (sk_can_gso(sk)) + tmp = 0; + else { + int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER); -static inline int skb_add_data(struct sk_buff *skb, char __user *from, int copy) -{ - int err = 0; - unsigned int csum; - int off = skb->len; - - if (skb->ip_summed == CHECKSUM_NONE) { - csum = csum_and_copy_from_user(from, skb_put(skb, copy), - copy, 0, &err); - if (!err) { - skb->csum = csum_block_add(skb->csum, csum, off); - return 0; + if (tmp >= pgbreak && + tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE) + tmp = pgbreak; } - } else { - if (!copy_from_user(skb_put(skb, copy), from, copy)) - return 0; } - __skb_trim(skb, off); - return -EFAULT; -} - -static inline int select_size(struct sock *sk, struct tcp_opt *tp) -{ - int tmp = tp->mss_cache_std; - - if (sk->sk_route_caps & NETIF_F_SG) { - int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER); - - if (tmp >= pgbreak && - tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE) - tmp = pgbreak; - } return tmp; } @@ -1031,10 +663,10 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t size) { struct iovec *iov; - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; int iovlen, flags; - int mss_now; + int mss_now, size_goal; int err, copied; long timeo; @@ -1046,13 +678,14 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, /* Wait for a connection to finish. */ if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) - if ((err = wait_for_tcp_connect(sk, flags, &timeo)) != 0) + if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) goto out_err; /* This should be in poll */ clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); mss_now = tcp_current_mss(sk, !(flags&MSG_OOB)); + size_goal = tp->xmit_size_goal; /* Ok commence sending. */ iovlen = msg->msg_iovlen; @@ -1074,31 +707,29 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, skb = sk->sk_write_queue.prev; - if (!tp->send_head || - (copy = mss_now - skb->len) <= 0) { + if (!sk->sk_send_head || + (copy = size_goal - skb->len) <= 0) { new_segment: /* Allocate new segment. If the interface is SG, * allocate skb fitting to single page. */ - if (!tcp_memory_free(sk)) + if (!sk_stream_memory_free(sk)) goto wait_for_sndbuf; - skb = tcp_alloc_pskb(sk, select_size(sk, tp), - 0, sk->sk_allocation); + skb = sk_stream_alloc_pskb(sk, select_size(sk, tp), + 0, sk->sk_allocation); if (!skb) goto wait_for_memory; /* * Check whether we can use HW checksum. */ - if (sk->sk_route_caps & - (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | - NETIF_F_HW_CSUM)) - skb->ip_summed = CHECKSUM_HW; + if (sk->sk_route_caps & NETIF_F_ALL_CSUM) + skb->ip_summed = CHECKSUM_PARTIAL; skb_entail(sk, tp, skb); - copy = mss_now; + copy = size_goal; } /* Try to append data to the end of skb. */ @@ -1118,7 +749,7 @@ new_segment: struct page *page = TCP_PAGE(sk); int off = TCP_OFF(sk); - if (can_coalesce(skb, i, page, off) && + if (skb_can_coalesce(skb, i, page, off) && off != PAGE_SIZE) { /* We can extend the last page * fragment. */ @@ -1133,30 +764,29 @@ new_segment: tcp_mark_push(tp, skb); goto new_segment; } else if (page) { - /* If page is cached, align - * offset to L1 cache boundary - */ - off = (off + L1_CACHE_BYTES - 1) & - ~(L1_CACHE_BYTES - 1); if (off == PAGE_SIZE) { put_page(page); TCP_PAGE(sk) = page = NULL; + off = 0; } - } + } else + off = 0; + + if (copy > PAGE_SIZE - off) + copy = PAGE_SIZE - off; + + if (!sk_stream_wmem_schedule(sk, copy)) + goto wait_for_memory; if (!page) { /* Allocate new cache page. */ - if (!(page = tcp_alloc_page(sk))) + if (!(page = sk_stream_alloc_page(sk))) goto wait_for_memory; - off = 0; } - if (copy > PAGE_SIZE - off) - copy = PAGE_SIZE - off; - /* Time to copy data. We are close to * the end! */ - err = tcp_copy_to_page(sk, from, skb, page, + err = skb_copy_to_page(sk, from, skb, page, off, copy); if (err) { /* If this page was new, give it to the @@ -1174,7 +804,7 @@ new_segment: skb_shinfo(skb)->frags[i - 1].size += copy; } else { - fill_page_desc(skb, i, page, off, copy); + skb_fill_page_desc(skb, i, page, off, copy); if (TCP_PAGE(sk)) { get_page(page); } else if (off + copy < PAGE_SIZE) { @@ -1191,19 +821,20 @@ new_segment: tp->write_seq += copy; TCP_SKB_CB(skb)->end_seq += copy; + skb_shinfo(skb)->gso_segs = 0; from += copy; copied += copy; if ((seglen -= copy) == 0 && iovlen == 0) goto out; - if (skb->len != mss_now || (flags & MSG_OOB)) + if (skb->len < mss_now || (flags & MSG_OOB)) continue; if (forced_push(tp)) { tcp_mark_push(tp, skb); __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH); - } else if (skb == tp->send_head) + } else if (skb == sk->sk_send_head) tcp_push_one(sk, mss_now); continue; @@ -1213,10 +844,11 @@ wait_for_memory: if (copied) tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); - if ((err = wait_for_tcp_memory(sk, &timeo)) != 0) + if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) goto do_error; mss_now = tcp_current_mss(sk, !(flags&MSG_OOB)); + size_goal = tp->xmit_size_goal; } } @@ -1229,17 +861,17 @@ out: do_fault: if (!skb->len) { - if (tp->send_head == skb) - tp->send_head = NULL; - __skb_unlink(skb, skb->list); - tcp_free_skb(sk, skb); + if (sk->sk_send_head == skb) + sk->sk_send_head = NULL; + __skb_unlink(skb, &sk->sk_write_queue); + sk_stream_free_skb(sk, skb); } do_error: if (copied) goto out; out_err: - err = tcp_error(sk, flags, err); + err = sk_stream_error(sk, flags, err); TCP_CHECK_TIMER(sk); release_sock(sk); return err; @@ -1254,7 +886,7 @@ static int tcp_recv_urg(struct sock *sk, long timeo, struct msghdr *msg, int len, int flags, int *addr_len) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); /* No URG data to read. */ if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data || @@ -1302,9 +934,9 @@ static int tcp_recv_urg(struct sock *sk, long timeo, * calculation of whether or not we must ACK for the sake of * a window update. */ -static void cleanup_rbuf(struct sock *sk, int copied) +void tcp_cleanup_rbuf(struct sock *sk, int copied) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); int time_to_ack = 0; #if TCP_DEBUG @@ -1313,20 +945,24 @@ static void cleanup_rbuf(struct sock *sk, int copied) BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)); #endif - if (tcp_ack_scheduled(tp)) { + if (inet_csk_ack_scheduled(sk)) { + const struct inet_connection_sock *icsk = inet_csk(sk); /* Delayed ACKs frequently hit locked sockets during bulk * receive. */ - if (tp->ack.blocked || + if (icsk->icsk_ack.blocked || /* Once-per-two-segments ACK was not sent by tcp_input.c */ - tp->rcv_nxt - tp->rcv_wup > tp->ack.rcv_mss || + tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || /* * If this read emptied read buffer, we send ACK, if * connection is not bidirectional, user drained * receive buffer and there was a small segment * in queue. */ - (copied > 0 && (tp->ack.pending & TCP_ACK_PUSHED) && - !tp->ack.pingpong && !atomic_read(&sk->sk_rmem_alloc))) + (copied > 0 && + ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) || + ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && + !icsk->icsk_ack.pingpong)) && + !atomic_read(&sk->sk_rmem_alloc))) time_to_ack = 1; } @@ -1359,9 +995,9 @@ static void cleanup_rbuf(struct sock *sk, int copied) static void tcp_prequeue_process(struct sock *sk) { struct sk_buff *skb; - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); - NET_ADD_STATS_USER(TCPPrequeued, skb_queue_len(&tp->ucopy.prequeue)); + NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED); /* RX process wants to run with disabled BHs, though it is not * necessary */ @@ -1406,7 +1042,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, sk_read_actor_t recv_actor) { struct sk_buff *skb; - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); u32 seq = tp->copied_seq; u32 offset; int copied = 0; @@ -1436,11 +1072,11 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, break; } if (skb->h.th->fin) { - sk_eat_skb(sk, skb); + sk_eat_skb(sk, skb, 0); ++seq; break; } - sk_eat_skb(sk, skb); + sk_eat_skb(sk, skb, 0); if (!desc->count) break; } @@ -1450,7 +1086,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, /* Clean up data we have read: This will do ACK frames. */ if (copied) - cleanup_rbuf(sk, copied); + tcp_cleanup_rbuf(sk, copied); return copied; } @@ -1465,7 +1101,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int nonblock, int flags, int *addr_len) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); int copied = 0; u32 peek_seq; u32 *seq; @@ -1474,6 +1110,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, int target; /* Read at least this many bytes */ long timeo; struct task_struct *user_recv = NULL; + int copied_early = 0; lock_sock(sk); @@ -1497,6 +1134,17 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); +#ifdef CONFIG_NET_DMA + tp->ucopy.dma_chan = NULL; + preempt_disable(); + if ((len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && + !sysctl_tcp_low_latency && __get_cpu_var(softnet_data).net_dma) { + preempt_enable_no_resched(); + tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len); + } else + preempt_enable_no_resched(); +#endif + do { struct sk_buff *skb; u32 offset; @@ -1584,9 +1232,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, } } - cleanup_rbuf(sk, copied); + tcp_cleanup_rbuf(sk, copied); - if (tp->ucopy.task == user_recv) { + if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) { /* Install new reader */ if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) { user_recv = current; @@ -1625,7 +1273,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, * is not empty. It is more elegant, but eats cycles, * unfortunately. */ - if (skb_queue_len(&tp->ucopy.prequeue)) + if (!skb_queue_empty(&tp->ucopy.prequeue)) goto do_prequeue; /* __ Set realtime policy in scheduler __ */ @@ -1638,24 +1286,28 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, } else sk_wait_data(sk, &timeo); +#ifdef CONFIG_NET_DMA + tp->ucopy.wakeup = 0; +#endif + if (user_recv) { int chunk; /* __ Restore normal policy in scheduler __ */ if ((chunk = len - tp->ucopy.len) != 0) { - NET_ADD_STATS_USER(TCPDirectCopyFromBacklog, chunk); + NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk); len -= chunk; copied += chunk; } if (tp->rcv_nxt == tp->copied_seq && - skb_queue_len(&tp->ucopy.prequeue)) { + !skb_queue_empty(&tp->ucopy.prequeue)) { do_prequeue: tcp_prequeue_process(sk); if ((chunk = len - tp->ucopy.len) != 0) { - NET_ADD_STATS_USER(TCPDirectCopyFromPrequeue, chunk); + NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); len -= chunk; copied += chunk; } @@ -1693,13 +1345,39 @@ do_prequeue: } if (!(flags & MSG_TRUNC)) { - err = skb_copy_datagram_iovec(skb, offset, - msg->msg_iov, used); - if (err) { - /* Exception. Bailout! */ - if (!copied) - copied = -EFAULT; - break; +#ifdef CONFIG_NET_DMA + if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) + tp->ucopy.dma_chan = get_softnet_dma(); + + if (tp->ucopy.dma_chan) { + tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( + tp->ucopy.dma_chan, skb, offset, + msg->msg_iov, used, + tp->ucopy.pinned_list); + + if (tp->ucopy.dma_cookie < 0) { + + printk(KERN_ALERT "dma_cookie < 0\n"); + + /* Exception. Bailout! */ + if (!copied) + copied = -EFAULT; + break; + } + if ((offset + used) == skb->len) + copied_early = 1; + + } else +#endif + { + err = skb_copy_datagram_iovec(skb, offset, + msg->msg_iov, used); + if (err) { + /* Exception. Bailout! */ + if (!copied) + copied = -EFAULT; + break; + } } } @@ -1719,20 +1397,24 @@ skip_copy: if (skb->h.th->fin) goto found_fin_ok; - if (!(flags & MSG_PEEK)) - sk_eat_skb(sk, skb); + if (!(flags & MSG_PEEK)) { + sk_eat_skb(sk, skb, copied_early); + copied_early = 0; + } continue; found_fin_ok: /* Process the FIN. */ ++*seq; - if (!(flags & MSG_PEEK)) - sk_eat_skb(sk, skb); + if (!(flags & MSG_PEEK)) { + sk_eat_skb(sk, skb, copied_early); + copied_early = 0; + } break; } while (len > 0); if (user_recv) { - if (skb_queue_len(&tp->ucopy.prequeue)) { + if (!skb_queue_empty(&tp->ucopy.prequeue)) { int chunk; tp->ucopy.len = copied > 0 ? len : 0; @@ -1740,7 +1422,7 @@ skip_copy: tcp_prequeue_process(sk); if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) { - NET_ADD_STATS_USER(TCPDirectCopyFromPrequeue, chunk); + NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); len -= chunk; copied += chunk; } @@ -1750,12 +1432,42 @@ skip_copy: tp->ucopy.len = 0; } +#ifdef CONFIG_NET_DMA + if (tp->ucopy.dma_chan) { + struct sk_buff *skb; + dma_cookie_t done, used; + + dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); + + while (dma_async_memcpy_complete(tp->ucopy.dma_chan, + tp->ucopy.dma_cookie, &done, + &used) == DMA_IN_PROGRESS) { + /* do partial cleanup of sk_async_wait_queue */ + while ((skb = skb_peek(&sk->sk_async_wait_queue)) && + (dma_async_is_complete(skb->dma_cookie, done, + used) == DMA_SUCCESS)) { + __skb_dequeue(&sk->sk_async_wait_queue); + kfree_skb(skb); + } + } + + /* Safe to free early-copied skbs now */ + __skb_queue_purge(&sk->sk_async_wait_queue); + dma_chan_put(tp->ucopy.dma_chan); + tp->ucopy.dma_chan = NULL; + } + if (tp->ucopy.pinned_list) { + dma_unpin_iovec_pages(tp->ucopy.pinned_list); + tp->ucopy.pinned_list = NULL; + } +#endif + /* According to UNIX98, msg_name/msg_namelen are ignored * on connected socket. I was just happy when found this 8) --ANK */ /* Clean up data we have read: This will do ACK frames. */ - cleanup_rbuf(sk, copied); + tcp_cleanup_rbuf(sk, copied); TCP_CHECK_TIMER(sk); release_sock(sk); @@ -1778,7 +1490,7 @@ recv_urg: * closed. */ -static unsigned char new_state[16] = { +static const unsigned char new_state[16] = { /* current state: new state: action: */ /* (Invalid) */ TCP_CLOSE, /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN, @@ -1828,86 +1540,11 @@ void tcp_shutdown(struct sock *sk, int how) } } - -/* - * Return 1 if we still have things to send in our buffers. - */ - -static inline int closing(struct sock *sk) -{ - return (1 << sk->sk_state) & - (TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK); -} - -static __inline__ void tcp_kill_sk_queues(struct sock *sk) -{ - /* First the read buffer. */ - __skb_queue_purge(&sk->sk_receive_queue); - - /* Next, the error queue. */ - __skb_queue_purge(&sk->sk_error_queue); - - /* Next, the write queue. */ - BUG_TRAP(skb_queue_empty(&sk->sk_write_queue)); - - /* Account for returned memory. */ - tcp_mem_reclaim(sk); - - BUG_TRAP(!sk->sk_wmem_queued); - BUG_TRAP(!sk->sk_forward_alloc); - - /* It is _impossible_ for the backlog to contain anything - * when we get here. All user references to this socket - * have gone away, only the net layer knows can touch it. - */ -} - -/* - * At this point, there should be no process reference to this - * socket, and thus no user references at all. Therefore we - * can assume the socket waitqueue is inactive and nobody will - * try to jump onto it. - */ -void tcp_destroy_sock(struct sock *sk) -{ - BUG_TRAP(sk->sk_state == TCP_CLOSE); - BUG_TRAP(sock_flag(sk, SOCK_DEAD)); - - /* It cannot be in hash table! */ - BUG_TRAP(sk_unhashed(sk)); - - /* If it has not 0 inet_sk(sk)->num, it must be bound */ - BUG_TRAP(!inet_sk(sk)->num || tcp_sk(sk)->bind_hash); - -#ifdef TCP_DEBUG - if (sk->sk_zapped) { - printk(KERN_DEBUG "TCP: double destroy sk=%p\n", sk); - sock_hold(sk); - } - sk->sk_zapped = 1; -#endif - - sk->sk_prot->destroy(sk); - - tcp_kill_sk_queues(sk); - - xfrm_sk_free_policy(sk); - -#ifdef INET_REFCNT_DEBUG - if (atomic_read(&sk->sk_refcnt) != 1) { - printk(KERN_DEBUG "Destruction TCP %p delayed, c=%d\n", - sk, atomic_read(&sk->sk_refcnt)); - } -#endif - - atomic_dec(&tcp_orphan_count); - sock_put(sk); -} - void tcp_close(struct sock *sk, long timeout) { struct sk_buff *skb; int data_was_unread = 0; + int state; lock_sock(sk); sk->sk_shutdown = SHUTDOWN_MASK; @@ -1916,7 +1553,7 @@ void tcp_close(struct sock *sk, long timeout) tcp_set_state(sk, TCP_CLOSE); /* Special case. */ - tcp_listen_stop(sk); + inet_csk_listen_stop(sk); goto adjudge_to_death; } @@ -1932,7 +1569,7 @@ void tcp_close(struct sock *sk, long timeout) __kfree_skb(skb); } - tcp_mem_reclaim(sk); + sk_stream_mem_reclaim(sk); /* As outlined in draft-ietf-tcpimpl-prob-03.txt, section * 3.10, we send a RST here because data was lost. To @@ -1945,13 +1582,13 @@ void tcp_close(struct sock *sk, long timeout) */ if (data_was_unread) { /* Unread data was tossed, zap the connection. */ - NET_INC_STATS_USER(TCPAbortOnClose); + NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE); tcp_set_state(sk, TCP_CLOSE); tcp_send_active_reset(sk, GFP_KERNEL); } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { /* Check zero linger _after_ checking for unread data. */ sk->sk_prot->disconnect(sk, 0); - NET_INC_STATS_USER(TCPAbortOnData); + NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA); } else if (tcp_close_state(sk)) { /* We FIN if the application ate all the data before * zapping the connection. @@ -1981,24 +1618,14 @@ void tcp_close(struct sock *sk, long timeout) tcp_send_fin(sk); } - if (timeout) { - struct task_struct *tsk = current; - DEFINE_WAIT(wait); - - do { - prepare_to_wait(sk->sk_sleep, &wait, - TASK_INTERRUPTIBLE); - if (!closing(sk)) - break; - release_sock(sk); - timeout = schedule_timeout(timeout); - lock_sock(sk); - } while (!signal_pending(tsk) && timeout); - - finish_wait(sk->sk_sleep, &wait); - } + sk_stream_wait_close(sk, timeout); adjudge_to_death: + state = sk->sk_state; + sock_hold(sk); + sock_orphan(sk); + atomic_inc(sk->sk_prot->orphan_count); + /* It is the last release_sock in its life. It will remove backlog. */ release_sock(sk); @@ -2010,8 +1637,9 @@ adjudge_to_death: bh_lock_sock(sk); BUG_TRAP(!sock_owned_by_user(sk)); - sock_hold(sk); - sock_orphan(sk); + /* Have we already been destroyed by a softirq or backlog? */ + if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) + goto out; /* This is a (useful) BSD violating of the RFC. There is a * problem with TCP as specified in that the other end could @@ -2028,26 +1656,26 @@ adjudge_to_death: */ if (sk->sk_state == TCP_FIN_WAIT2) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); if (tp->linger2 < 0) { tcp_set_state(sk, TCP_CLOSE); tcp_send_active_reset(sk, GFP_ATOMIC); - NET_INC_STATS_BH(TCPAbortOnLinger); + NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER); } else { - int tmo = tcp_fin_time(tp); + const int tmo = tcp_fin_time(sk); if (tmo > TCP_TIMEWAIT_LEN) { - tcp_reset_keepalive_timer(sk, tcp_fin_time(tp)); + inet_csk_reset_keepalive_timer(sk, + tmo - TCP_TIMEWAIT_LEN); } else { - atomic_inc(&tcp_orphan_count); tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); goto out; } } } if (sk->sk_state != TCP_CLOSE) { - tcp_mem_reclaim(sk); - if (atomic_read(&tcp_orphan_count) > sysctl_tcp_max_orphans || + sk_stream_mem_reclaim(sk); + if (atomic_read(sk->sk_prot->orphan_count) > sysctl_tcp_max_orphans || (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) { if (net_ratelimit()) @@ -2055,13 +1683,12 @@ adjudge_to_death: "sockets\n"); tcp_set_state(sk, TCP_CLOSE); tcp_send_active_reset(sk, GFP_ATOMIC); - NET_INC_STATS_BH(TCPAbortOnMemory); + NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY); } } - atomic_inc(&tcp_orphan_count); if (sk->sk_state == TCP_CLOSE) - tcp_destroy_sock(sk); + inet_csk_destroy_sock(sk); /* Otherwise, socket is reprieved until protocol close. */ out: @@ -2081,8 +1708,9 @@ static inline int tcp_need_reset(int state) int tcp_disconnect(struct sock *sk, int flags) { - struct inet_opt *inet = inet_sk(sk); - struct tcp_opt *tp = tcp_sk(sk); + struct inet_sock *inet = inet_sk(sk); + struct inet_connection_sock *icsk = inet_csk(sk); + struct tcp_sock *tp = tcp_sk(sk); int err = 0; int old_state = sk->sk_state; @@ -2091,11 +1719,11 @@ int tcp_disconnect(struct sock *sk, int flags) /* ABORT function of RFC793 */ if (old_state == TCP_LISTEN) { - tcp_listen_stop(sk); + inet_csk_listen_stop(sk); } else if (tcp_need_reset(old_state) || (tp->snd_nxt != tp->write_seq && (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) { - /* The last check adjusts for discrepance of Linux wrt. RFC + /* The last check adjusts for discrepancy of Linux wrt. RFC * states */ tcp_send_active_reset(sk, gfp_any()); @@ -2105,8 +1733,11 @@ int tcp_disconnect(struct sock *sk, int flags) tcp_clear_xmit_timers(sk); __skb_queue_purge(&sk->sk_receive_queue); - tcp_writequeue_purge(sk); + sk_stream_writequeue_purge(sk); __skb_queue_purge(&tp->out_of_order_queue); +#ifdef CONFIG_NET_DMA + __skb_queue_purge(&sk->sk_async_wait_queue); +#endif inet->dport = 0; @@ -2118,139 +1749,57 @@ int tcp_disconnect(struct sock *sk, int flags) tp->srtt = 0; if ((tp->write_seq += tp->max_window + 2) == 0) tp->write_seq = 1; - tp->backoff = 0; + icsk->icsk_backoff = 0; tp->snd_cwnd = 2; - tp->probes_out = 0; + icsk->icsk_probes_out = 0; tp->packets_out = 0; tp->snd_ssthresh = 0x7fffffff; tp->snd_cwnd_cnt = 0; - tcp_set_ca_state(tp, TCP_CA_Open); + tp->bytes_acked = 0; + tcp_set_ca_state(sk, TCP_CA_Open); tcp_clear_retrans(tp); - tcp_delack_init(tp); - tp->send_head = NULL; - tp->saw_tstamp = 0; - tcp_sack_reset(tp); + inet_csk_delack_init(sk); + sk->sk_send_head = NULL; + tp->rx_opt.saw_tstamp = 0; + tcp_sack_reset(&tp->rx_opt); __sk_dst_reset(sk); - BUG_TRAP(!inet->num || tp->bind_hash); + BUG_TRAP(!inet->num || icsk->icsk_bind_hash); sk->sk_error_report(sk); return err; } /* - * Wait for an incoming connection, avoid race - * conditions. This must be called with the socket locked. - */ -static int wait_for_connect(struct sock *sk, long timeo) -{ - struct tcp_opt *tp = tcp_sk(sk); - DEFINE_WAIT(wait); - int err; - - /* - * True wake-one mechanism for incoming connections: only - * one process gets woken up, not the 'whole herd'. - * Since we do not 'race & poll' for established sockets - * anymore, the common case will execute the loop only once. - * - * Subtle issue: "add_wait_queue_exclusive()" will be added - * after any current non-exclusive waiters, and we know that - * it will always _stay_ after any new non-exclusive waiters - * because all non-exclusive waiters are added at the - * beginning of the wait-queue. As such, it's ok to "drop" - * our exclusiveness temporarily when we get woken up without - * having to remove and re-insert us on the wait queue. - */ - for (;;) { - prepare_to_wait_exclusive(sk->sk_sleep, &wait, - TASK_INTERRUPTIBLE); - release_sock(sk); - if (!tp->accept_queue) - timeo = schedule_timeout(timeo); - lock_sock(sk); - err = 0; - if (tp->accept_queue) - break; - err = -EINVAL; - if (sk->sk_state != TCP_LISTEN) - break; - err = sock_intr_errno(timeo); - if (signal_pending(current)) - break; - err = -EAGAIN; - if (!timeo) - break; - } - finish_wait(sk->sk_sleep, &wait); - return err; -} - -/* - * This will accept the next outstanding connection. + * Socket option code for TCP. */ - -struct sock *tcp_accept(struct sock *sk, int flags, int *err) +static int do_tcp_setsockopt(struct sock *sk, int level, + int optname, char __user *optval, int optlen) { - struct tcp_opt *tp = tcp_sk(sk); - struct open_request *req; - struct sock *newsk; - int error; - - lock_sock(sk); + struct tcp_sock *tp = tcp_sk(sk); + struct inet_connection_sock *icsk = inet_csk(sk); + int val; + int err = 0; - /* We need to make sure that this socket is listening, - * and that it has something pending. - */ - error = -EINVAL; - if (sk->sk_state != TCP_LISTEN) - goto out; + /* This is a string value all the others are int's */ + if (optname == TCP_CONGESTION) { + char name[TCP_CA_NAME_MAX]; - /* Find already established connection */ - if (!tp->accept_queue) { - long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); + if (optlen < 1) + return -EINVAL; - /* If this is a non blocking socket don't sleep */ - error = -EAGAIN; - if (!timeo) - goto out; + val = strncpy_from_user(name, optval, + min(TCP_CA_NAME_MAX-1, optlen)); + if (val < 0) + return -EFAULT; + name[val] = 0; - error = wait_for_connect(sk, timeo); - if (error) - goto out; + lock_sock(sk); + err = tcp_set_congestion_control(sk, name); + release_sock(sk); + return err; } - req = tp->accept_queue; - if ((tp->accept_queue = req->dl_next) == NULL) - tp->accept_queue_tail = NULL; - - newsk = req->sk; - sk_acceptq_removed(sk); - tcp_openreq_fastfree(req); - BUG_TRAP(newsk->sk_state != TCP_SYN_RECV); - release_sock(sk); - return newsk; - -out: - release_sock(sk); - *err = error; - return NULL; -} - -/* - * Socket option code for TCP. - */ -int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, - int optlen) -{ - struct tcp_opt *tp = tcp_sk(sk); - int val; - int err = 0; - - if (level != SOL_TCP) - return tp->af_specific->setsockopt(sk, level, optname, - optval, optlen); - if (optlen < sizeof(int)) return -EINVAL; @@ -2268,7 +1817,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, err = -EINVAL; break; } - tp->user_mss = val; + tp->rx_opt.user_mss = val; break; case TCP_NODELAY: @@ -2323,7 +1872,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, elapsed = tp->keepalive_time - elapsed; else elapsed = 0; - tcp_reset_keepalive_timer(sk, elapsed); + inet_csk_reset_keepalive_timer(sk, elapsed); } } break; @@ -2343,7 +1892,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, if (val < 1 || val > MAX_TCP_SYNCNT) err = -EINVAL; else - tp->syn_retries = val; + icsk->icsk_syn_retries = val; break; case TCP_LINGER2: @@ -2356,15 +1905,15 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, break; case TCP_DEFER_ACCEPT: - tp->defer_accept = 0; + icsk->icsk_accept_queue.rskq_defer_accept = 0; if (val > 0) { /* Translate value in seconds to number of * retransmits */ - while (tp->defer_accept < 32 && + while (icsk->icsk_accept_queue.rskq_defer_accept < 32 && val > ((TCP_TIMEOUT_INIT / HZ) << - tp->defer_accept)) - tp->defer_accept++; - tp->defer_accept++; + icsk->icsk_accept_queue.rskq_defer_accept)) + icsk->icsk_accept_queue.rskq_defer_accept++; + icsk->icsk_accept_queue.rskq_defer_accept++; } break; @@ -2382,20 +1931,27 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, case TCP_QUICKACK: if (!val) { - tp->ack.pingpong = 1; + icsk->icsk_ack.pingpong = 1; } else { - tp->ack.pingpong = 0; + icsk->icsk_ack.pingpong = 0; if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && - tcp_ack_scheduled(tp)) { - tp->ack.pending |= TCP_ACK_PUSHED; - cleanup_rbuf(sk, 1); + inet_csk_ack_scheduled(sk)) { + icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; + tcp_cleanup_rbuf(sk, 1); if (!(val & 1)) - tp->ack.pingpong = 1; + icsk->icsk_ack.pingpong = 1; } } break; +#ifdef CONFIG_TCP_MD5SIG + case TCP_MD5SIG: + /* Read the IP->Key mappings from userspace */ + err = tp->af_specific->md5_parse(sk, optval, optlen); + break; +#endif + default: err = -ENOPROTOOPT; break; @@ -2404,15 +1960,96 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, return err; } -int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, - int __user *optlen) +int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, + int optlen) { - struct tcp_opt *tp = tcp_sk(sk); - int val, len; + struct inet_connection_sock *icsk = inet_csk(sk); if (level != SOL_TCP) - return tp->af_specific->getsockopt(sk, level, optname, - optval, optlen); + return icsk->icsk_af_ops->setsockopt(sk, level, optname, + optval, optlen); + return do_tcp_setsockopt(sk, level, optname, optval, optlen); +} + +#ifdef CONFIG_COMPAT +int compat_tcp_setsockopt(struct sock *sk, int level, int optname, + char __user *optval, int optlen) +{ + if (level != SOL_TCP) + return inet_csk_compat_setsockopt(sk, level, optname, + optval, optlen); + return do_tcp_setsockopt(sk, level, optname, optval, optlen); +} + +EXPORT_SYMBOL(compat_tcp_setsockopt); +#endif + +/* Return information about state of tcp endpoint in API format. */ +void tcp_get_info(struct sock *sk, struct tcp_info *info) +{ + struct tcp_sock *tp = tcp_sk(sk); + const struct inet_connection_sock *icsk = inet_csk(sk); + u32 now = tcp_time_stamp; + + memset(info, 0, sizeof(*info)); + + info->tcpi_state = sk->sk_state; + info->tcpi_ca_state = icsk->icsk_ca_state; + info->tcpi_retransmits = icsk->icsk_retransmits; + info->tcpi_probes = icsk->icsk_probes_out; + info->tcpi_backoff = icsk->icsk_backoff; + + if (tp->rx_opt.tstamp_ok) + info->tcpi_options |= TCPI_OPT_TIMESTAMPS; + if (tp->rx_opt.sack_ok) + info->tcpi_options |= TCPI_OPT_SACK; + if (tp->rx_opt.wscale_ok) { + info->tcpi_options |= TCPI_OPT_WSCALE; + info->tcpi_snd_wscale = tp->rx_opt.snd_wscale; + info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; + } + + if (tp->ecn_flags&TCP_ECN_OK) + info->tcpi_options |= TCPI_OPT_ECN; + + info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); + info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato); + info->tcpi_snd_mss = tp->mss_cache; + info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; + + info->tcpi_unacked = tp->packets_out; + info->tcpi_sacked = tp->sacked_out; + info->tcpi_lost = tp->lost_out; + info->tcpi_retrans = tp->retrans_out; + info->tcpi_fackets = tp->fackets_out; + + info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); + info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); + info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); + + info->tcpi_pmtu = icsk->icsk_pmtu_cookie; + info->tcpi_rcv_ssthresh = tp->rcv_ssthresh; + info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3; + info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2; + info->tcpi_snd_ssthresh = tp->snd_ssthresh; + info->tcpi_snd_cwnd = tp->snd_cwnd; + info->tcpi_advmss = tp->advmss; + info->tcpi_reordering = tp->reordering; + + info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3; + info->tcpi_rcv_space = tp->rcvq_space.space; + + info->tcpi_total_retrans = tp->total_retrans; +} + +EXPORT_SYMBOL_GPL(tcp_get_info); + +static int do_tcp_getsockopt(struct sock *sk, int level, + int optname, char __user *optval, int __user *optlen) +{ + struct inet_connection_sock *icsk = inet_csk(sk); + struct tcp_sock *tp = tcp_sk(sk); + int val, len; if (get_user(len, optlen)) return -EFAULT; @@ -2424,9 +2061,9 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, switch (optname) { case TCP_MAXSEG: - val = tp->mss_cache_std; + val = tp->mss_cache; if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) - val = tp->user_mss; + val = tp->rx_opt.user_mss; break; case TCP_NODELAY: val = !!(tp->nonagle&TCP_NAGLE_OFF); @@ -2444,7 +2081,7 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes; break; case TCP_SYNCNT: - val = tp->syn_retries ? : sysctl_tcp_syn_retries; + val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; break; case TCP_LINGER2: val = tp->linger2; @@ -2452,8 +2089,8 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, val = (val ? : sysctl_tcp_fin_timeout) / HZ; break; case TCP_DEFER_ACCEPT: - val = !tp->defer_accept ? 0 : ((TCP_TIMEOUT_INIT / HZ) << - (tp->defer_accept - 1)); + val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 : + ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1)); break; case TCP_WINDOW_CLAMP: val = tp->window_clamp; @@ -2474,8 +2111,18 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, return 0; } case TCP_QUICKACK: - val = !tp->ack.pingpong; + val = !icsk->icsk_ack.pingpong; break; + + case TCP_CONGESTION: + if (get_user(len, optlen)) + return -EFAULT; + len = min_t(unsigned int, len, TCP_CA_NAME_MAX); + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, icsk->icsk_ca_ops->name, len)) + return -EFAULT; + return 0; default: return -ENOPROTOOPT; }; @@ -2487,9 +2134,247 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, return 0; } +int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, + int __user *optlen) +{ + struct inet_connection_sock *icsk = inet_csk(sk); + + if (level != SOL_TCP) + return icsk->icsk_af_ops->getsockopt(sk, level, optname, + optval, optlen); + return do_tcp_getsockopt(sk, level, optname, optval, optlen); +} + +#ifdef CONFIG_COMPAT +int compat_tcp_getsockopt(struct sock *sk, int level, int optname, + char __user *optval, int __user *optlen) +{ + if (level != SOL_TCP) + return inet_csk_compat_getsockopt(sk, level, optname, + optval, optlen); + return do_tcp_getsockopt(sk, level, optname, optval, optlen); +} + +EXPORT_SYMBOL(compat_tcp_getsockopt); +#endif + +struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) +{ + struct sk_buff *segs = ERR_PTR(-EINVAL); + struct tcphdr *th; + unsigned thlen; + unsigned int seq; + __be32 delta; + unsigned int oldlen; + unsigned int len; + + if (!pskb_may_pull(skb, sizeof(*th))) + goto out; + + th = skb->h.th; + thlen = th->doff * 4; + if (thlen < sizeof(*th)) + goto out; + + if (!pskb_may_pull(skb, thlen)) + goto out; + + oldlen = (u16)~skb->len; + __skb_pull(skb, thlen); + + if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { + /* Packet is from an untrusted source, reset gso_segs. */ + int type = skb_shinfo(skb)->gso_type; + int mss; + + if (unlikely(type & + ~(SKB_GSO_TCPV4 | + SKB_GSO_DODGY | + SKB_GSO_TCP_ECN | + SKB_GSO_TCPV6 | + 0) || + !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))) + goto out; + + mss = skb_shinfo(skb)->gso_size; + skb_shinfo(skb)->gso_segs = (skb->len + mss - 1) / mss; + + segs = NULL; + goto out; + } + + segs = skb_segment(skb, features); + if (IS_ERR(segs)) + goto out; + + len = skb_shinfo(skb)->gso_size; + delta = htonl(oldlen + (thlen + len)); + + skb = segs; + th = skb->h.th; + seq = ntohl(th->seq); + + do { + th->fin = th->psh = 0; + + th->check = ~csum_fold((__force __wsum)((__force u32)th->check + + (__force u32)delta)); + if (skb->ip_summed != CHECKSUM_PARTIAL) + th->check = csum_fold(csum_partial(skb->h.raw, thlen, + skb->csum)); + + seq += len; + skb = skb->next; + th = skb->h.th; + + th->seq = htonl(seq); + th->cwr = 0; + } while (skb->next); + + delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len); + th->check = ~csum_fold((__force __wsum)((__force u32)th->check + + (__force u32)delta)); + if (skb->ip_summed != CHECKSUM_PARTIAL) + th->check = csum_fold(csum_partial(skb->h.raw, thlen, + skb->csum)); + +out: + return segs; +} +EXPORT_SYMBOL(tcp_tso_segment); + +#ifdef CONFIG_TCP_MD5SIG +static unsigned long tcp_md5sig_users; +static struct tcp_md5sig_pool **tcp_md5sig_pool; +static DEFINE_SPINLOCK(tcp_md5sig_pool_lock); + +static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool) +{ + int cpu; + for_each_possible_cpu(cpu) { + struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu); + if (p) { + if (p->md5_desc.tfm) + crypto_free_hash(p->md5_desc.tfm); + kfree(p); + p = NULL; + } + } + free_percpu(pool); +} + +void tcp_free_md5sig_pool(void) +{ + struct tcp_md5sig_pool **pool = NULL; + + spin_lock_bh(&tcp_md5sig_pool_lock); + if (--tcp_md5sig_users == 0) { + pool = tcp_md5sig_pool; + tcp_md5sig_pool = NULL; + } + spin_unlock_bh(&tcp_md5sig_pool_lock); + if (pool) + __tcp_free_md5sig_pool(pool); +} + +EXPORT_SYMBOL(tcp_free_md5sig_pool); + +static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void) +{ + int cpu; + struct tcp_md5sig_pool **pool; + + pool = alloc_percpu(struct tcp_md5sig_pool *); + if (!pool) + return NULL; + + for_each_possible_cpu(cpu) { + struct tcp_md5sig_pool *p; + struct crypto_hash *hash; + + p = kzalloc(sizeof(*p), GFP_KERNEL); + if (!p) + goto out_free; + *per_cpu_ptr(pool, cpu) = p; + + hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); + if (!hash || IS_ERR(hash)) + goto out_free; + + p->md5_desc.tfm = hash; + } + return pool; +out_free: + __tcp_free_md5sig_pool(pool); + return NULL; +} + +struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void) +{ + struct tcp_md5sig_pool **pool; + int alloc = 0; + +retry: + spin_lock_bh(&tcp_md5sig_pool_lock); + pool = tcp_md5sig_pool; + if (tcp_md5sig_users++ == 0) { + alloc = 1; + spin_unlock_bh(&tcp_md5sig_pool_lock); + } else if (!pool) { + tcp_md5sig_users--; + spin_unlock_bh(&tcp_md5sig_pool_lock); + cpu_relax(); + goto retry; + } else + spin_unlock_bh(&tcp_md5sig_pool_lock); + + if (alloc) { + /* we cannot hold spinlock here because this may sleep. */ + struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool(); + spin_lock_bh(&tcp_md5sig_pool_lock); + if (!p) { + tcp_md5sig_users--; + spin_unlock_bh(&tcp_md5sig_pool_lock); + return NULL; + } + pool = tcp_md5sig_pool; + if (pool) { + /* oops, it has already been assigned. */ + spin_unlock_bh(&tcp_md5sig_pool_lock); + __tcp_free_md5sig_pool(p); + } else { + tcp_md5sig_pool = pool = p; + spin_unlock_bh(&tcp_md5sig_pool_lock); + } + } + return pool; +} + +EXPORT_SYMBOL(tcp_alloc_md5sig_pool); + +struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu) +{ + struct tcp_md5sig_pool **p; + spin_lock_bh(&tcp_md5sig_pool_lock); + p = tcp_md5sig_pool; + if (p) + tcp_md5sig_users++; + spin_unlock_bh(&tcp_md5sig_pool_lock); + return (p ? *per_cpu_ptr(p, cpu) : NULL); +} + +EXPORT_SYMBOL(__tcp_get_md5sig_pool); + +void __tcp_put_md5sig_pool(void) +{ + tcp_free_md5sig_pool(); +} + +EXPORT_SYMBOL(__tcp_put_md5sig_pool); +#endif extern void __skb_cb_too_small_for_tcp(int, int); -extern void tcpdiag_init(void); +extern struct tcp_congestion_ops tcp_reno; static __initdata unsigned long thash_entries; static int __init set_thash_entries(char *str) @@ -2504,127 +2389,109 @@ __setup("thash_entries=", set_thash_entries); void __init tcp_init(void) { struct sk_buff *skb = NULL; - unsigned long goal; - int order, i; + unsigned long limit; + int order, i, max_share; if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb)) __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb), sizeof(skb->cb)); - tcp_openreq_cachep = kmem_cache_create("tcp_open_request", - sizeof(struct open_request), - 0, SLAB_HWCACHE_ALIGN, - NULL, NULL); - if (!tcp_openreq_cachep) - panic("tcp_init: Cannot alloc open_request cache."); - - tcp_bucket_cachep = kmem_cache_create("tcp_bind_bucket", - sizeof(struct tcp_bind_bucket), - 0, SLAB_HWCACHE_ALIGN, - NULL, NULL); - if (!tcp_bucket_cachep) - panic("tcp_init: Cannot alloc tcp_bind_bucket cache."); - - tcp_timewait_cachep = kmem_cache_create("tcp_tw_bucket", - sizeof(struct tcp_tw_bucket), - 0, SLAB_HWCACHE_ALIGN, - NULL, NULL); - if (!tcp_timewait_cachep) - panic("tcp_init: Cannot alloc tcp_tw_bucket cache."); + tcp_hashinfo.bind_bucket_cachep = + kmem_cache_create("tcp_bind_bucket", + sizeof(struct inet_bind_bucket), 0, + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); /* Size and allocate the main established and bind bucket * hash tables. * * The methodology is similar to that of the buffer cache. */ - if (num_physpages >= (128 * 1024)) - goal = num_physpages >> (21 - PAGE_SHIFT); - else - goal = num_physpages >> (23 - PAGE_SHIFT); - - if (thash_entries) - goal = (thash_entries * sizeof(struct tcp_ehash_bucket)) >> PAGE_SHIFT; - for (order = 0; (1UL << order) < goal; order++) - ; - do { - tcp_ehash_size = (1UL << order) * PAGE_SIZE / - sizeof(struct tcp_ehash_bucket); - tcp_ehash_size >>= 1; - while (tcp_ehash_size & (tcp_ehash_size - 1)) - tcp_ehash_size--; - tcp_ehash = (struct tcp_ehash_bucket *) - __get_free_pages(GFP_ATOMIC, order); - } while (!tcp_ehash && --order > 0); - - if (!tcp_ehash) - panic("Failed to allocate TCP established hash table\n"); - for (i = 0; i < (tcp_ehash_size << 1); i++) { - tcp_ehash[i].lock = RW_LOCK_UNLOCKED; - INIT_HLIST_HEAD(&tcp_ehash[i].chain); + tcp_hashinfo.ehash = + alloc_large_system_hash("TCP established", + sizeof(struct inet_ehash_bucket), + thash_entries, + (num_physpages >= 128 * 1024) ? + 13 : 15, + 0, + &tcp_hashinfo.ehash_size, + NULL, + 0); + tcp_hashinfo.ehash_size = (1 << tcp_hashinfo.ehash_size) >> 1; + for (i = 0; i < (tcp_hashinfo.ehash_size << 1); i++) { + rwlock_init(&tcp_hashinfo.ehash[i].lock); + INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain); } - do { - tcp_bhash_size = (1UL << order) * PAGE_SIZE / - sizeof(struct tcp_bind_hashbucket); - if ((tcp_bhash_size > (64 * 1024)) && order > 0) - continue; - tcp_bhash = (struct tcp_bind_hashbucket *) - __get_free_pages(GFP_ATOMIC, order); - } while (!tcp_bhash && --order >= 0); - - if (!tcp_bhash) - panic("Failed to allocate TCP bind hash table\n"); - for (i = 0; i < tcp_bhash_size; i++) { - tcp_bhash[i].lock = SPIN_LOCK_UNLOCKED; - INIT_HLIST_HEAD(&tcp_bhash[i].chain); + tcp_hashinfo.bhash = + alloc_large_system_hash("TCP bind", + sizeof(struct inet_bind_hashbucket), + tcp_hashinfo.ehash_size, + (num_physpages >= 128 * 1024) ? + 13 : 15, + 0, + &tcp_hashinfo.bhash_size, + NULL, + 64 * 1024); + tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size; + for (i = 0; i < tcp_hashinfo.bhash_size; i++) { + spin_lock_init(&tcp_hashinfo.bhash[i].lock); + INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); } /* Try to be a bit smarter and adjust defaults depending * on available memory. */ - if (order > 4) { + for (order = 0; ((1 << order) << PAGE_SHIFT) < + (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket)); + order++) + ; + if (order >= 4) { sysctl_local_port_range[0] = 32768; sysctl_local_port_range[1] = 61000; - sysctl_tcp_max_tw_buckets = 180000; + tcp_death_row.sysctl_max_tw_buckets = 180000; sysctl_tcp_max_orphans = 4096 << (order - 4); sysctl_max_syn_backlog = 1024; } else if (order < 3) { sysctl_local_port_range[0] = 1024 * (3 - order); - sysctl_tcp_max_tw_buckets >>= (3 - order); + tcp_death_row.sysctl_max_tw_buckets >>= (3 - order); sysctl_tcp_max_orphans >>= (3 - order); sysctl_max_syn_backlog = 128; } - tcp_port_rover = sysctl_local_port_range[0] - 1; - sysctl_tcp_mem[0] = 768 << order; - sysctl_tcp_mem[1] = 1024 << order; - sysctl_tcp_mem[2] = 1536 << order; + /* Set the pressure threshold to be a fraction of global memory that + * is up to 1/2 at 256 MB, decreasing toward zero with the amount of + * memory, with a floor of 128 pages. + */ + limit = min(nr_all_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT); + limit = (limit * (nr_all_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11); + limit = max(limit, 128UL); + sysctl_tcp_mem[0] = limit / 4 * 3; + sysctl_tcp_mem[1] = limit; + sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2; - if (order < 3) { - sysctl_tcp_wmem[2] = 64 * 1024; - sysctl_tcp_rmem[0] = PAGE_SIZE; - sysctl_tcp_rmem[1] = 43689; - sysctl_tcp_rmem[2] = 2 * 43689; - } + /* Set per-socket limits to no more than 1/128 the pressure threshold */ + limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7); + max_share = min(4UL*1024*1024, limit); + + sysctl_tcp_wmem[0] = SK_STREAM_MEM_QUANTUM; + sysctl_tcp_wmem[1] = 16*1024; + sysctl_tcp_wmem[2] = max(64*1024, max_share); + + sysctl_tcp_rmem[0] = SK_STREAM_MEM_QUANTUM; + sysctl_tcp_rmem[1] = 87380; + sysctl_tcp_rmem[2] = max(87380, max_share); printk(KERN_INFO "TCP: Hash tables configured " "(established %d bind %d)\n", - tcp_ehash_size << 1, tcp_bhash_size); + tcp_hashinfo.ehash_size << 1, tcp_hashinfo.bhash_size); - tcpdiag_init(); + tcp_register_congestion_control(&tcp_reno); } -EXPORT_SYMBOL(__tcp_mem_reclaim); -EXPORT_SYMBOL(sysctl_tcp_rmem); -EXPORT_SYMBOL(sysctl_tcp_wmem); -EXPORT_SYMBOL(tcp_accept); EXPORT_SYMBOL(tcp_close); -EXPORT_SYMBOL(tcp_close_state); -EXPORT_SYMBOL(tcp_destroy_sock); EXPORT_SYMBOL(tcp_disconnect); EXPORT_SYMBOL(tcp_getsockopt); EXPORT_SYMBOL(tcp_ioctl); -EXPORT_SYMBOL(tcp_openreq_cachep); EXPORT_SYMBOL(tcp_poll); EXPORT_SYMBOL(tcp_read_sock); EXPORT_SYMBOL(tcp_recvmsg); @@ -2632,7 +2499,4 @@ EXPORT_SYMBOL(tcp_sendmsg); EXPORT_SYMBOL(tcp_sendpage); EXPORT_SYMBOL(tcp_setsockopt); EXPORT_SYMBOL(tcp_shutdown); -EXPORT_SYMBOL(tcp_sockets_allocated); EXPORT_SYMBOL(tcp_statistics); -EXPORT_SYMBOL(tcp_timewait_cachep); -EXPORT_SYMBOL(tcp_write_space);