*
* Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
*
- * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Mark Evans, <evansmp@uhura.aston.ac.uk>
* Corey Minyard <wf-rch!minyard@relay.EU.net>
#include <linux/smp_lock.h>
#include <linux/fs.h>
#include <linux/random.h>
+#include <linux/bootmem.h>
+#include <linux/in.h>
#include <net/icmp.h>
#include <net/tcp.h>
int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
-DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics);
-
-kmem_cache_t *tcp_openreq_cachep;
-kmem_cache_t *tcp_bucket_cachep;
-kmem_cache_t *tcp_timewait_cachep;
+DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly;
atomic_t tcp_orphan_count = ATOMIC_INIT(0);
-int sysctl_tcp_default_win_scale;
+EXPORT_SYMBOL_GPL(tcp_orphan_count);
int sysctl_tcp_mem[3];
int sysctl_tcp_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 };
int sysctl_tcp_rmem[3] = { 4 * 1024, 87380, 87380 * 2 };
+EXPORT_SYMBOL(sysctl_tcp_mem);
+EXPORT_SYMBOL(sysctl_tcp_rmem);
+EXPORT_SYMBOL(sysctl_tcp_wmem);
+
atomic_t tcp_memory_allocated; /* Current allocated memory. */
atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
-/* Pressure flag: try to collapse.
+EXPORT_SYMBOL(tcp_memory_allocated);
+EXPORT_SYMBOL(tcp_sockets_allocated);
+
+/*
+ * Pressure flag: try to collapse.
* Technical note: it is used by multiple contexts non atomically.
- * All the tcp_mem_schedule() is of this nature: accounting
- * is strict, actions are advisory and have some latency. */
+ * All the sk_stream_mem_schedule() is of this nature: accounting
+ * is strict, actions are advisory and have some latency.
+ */
int tcp_memory_pressure;
-#define TCP_PAGES(amt) (((amt) + TCP_MEM_QUANTUM - 1) / TCP_MEM_QUANTUM)
-
-int tcp_mem_schedule(struct sock *sk, int size, int kind)
-{
- int amt = TCP_PAGES(size);
-
- sk->sk_forward_alloc += amt * TCP_MEM_QUANTUM;
- atomic_add(amt, &tcp_memory_allocated);
-
- /* Under limit. */
- if (atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
- if (tcp_memory_pressure)
- tcp_memory_pressure = 0;
- return 1;
- }
-
- /* Over hard limit. */
- if (atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]) {
- tcp_enter_memory_pressure();
- goto suppress_allocation;
- }
-
- /* Under pressure. */
- if (atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[1])
- tcp_enter_memory_pressure();
-
- if (kind) {
- if (atomic_read(&sk->sk_rmem_alloc) < sysctl_tcp_rmem[0])
- return 1;
- } else if (sk->sk_wmem_queued < sysctl_tcp_wmem[0])
- return 1;
-
- if (!tcp_memory_pressure ||
- sysctl_tcp_mem[2] > atomic_read(&tcp_sockets_allocated) *
- TCP_PAGES(sk->sk_wmem_queued +
- atomic_read(&sk->sk_rmem_alloc) +
- sk->sk_forward_alloc))
- return 1;
-
-suppress_allocation:
+EXPORT_SYMBOL(tcp_memory_pressure);
- if (!kind) {
- tcp_moderate_sndbuf(sk);
-
- /* Fail only if socket is _under_ its sndbuf.
- * In this case we cannot block, so that we have to fail.
- */
- if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
- return 1;
- }
-
- /* Alas. Undo changes. */
- sk->sk_forward_alloc -= amt * TCP_MEM_QUANTUM;
- atomic_sub(amt, &tcp_memory_allocated);
- return 0;
-}
-
-void __tcp_mem_reclaim(struct sock *sk)
+void tcp_enter_memory_pressure(void)
{
- if (sk->sk_forward_alloc >= TCP_MEM_QUANTUM) {
- atomic_sub(sk->sk_forward_alloc / TCP_MEM_QUANTUM,
- &tcp_memory_allocated);
- sk->sk_forward_alloc &= TCP_MEM_QUANTUM - 1;
- if (tcp_memory_pressure &&
- atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0])
- tcp_memory_pressure = 0;
+ if (!tcp_memory_pressure) {
+ NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
+ tcp_memory_pressure = 1;
}
}
-void tcp_rfree(struct sk_buff *skb)
-{
- struct sock *sk = skb->sk;
-
- atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
- sk->sk_forward_alloc += skb->truesize;
-}
-
-/*
- * LISTEN is a special case for poll..
- */
-static __inline__ unsigned int tcp_listen_poll(struct sock *sk,
- poll_table *wait)
-{
- return tcp_sk(sk)->accept_queue ? (POLLIN | POLLRDNORM) : 0;
-}
+EXPORT_SYMBOL(tcp_enter_memory_pressure);
/*
* Wait for a TCP event.
{
unsigned int mask;
struct sock *sk = sock->sk;
- struct tcp_opt *tp = tcp_sk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
poll_wait(file, sk->sk_sleep, wait);
if (sk->sk_state == TCP_LISTEN)
- return tcp_listen_poll(sk, wait);
+ return inet_csk_listen_poll(sk);
/* Socket is not locked. We are protected from async events
by poll logic and correct handling of state changes
mask |= POLLIN | POLLRDNORM;
if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
- if (tcp_wspace(sk) >= tcp_min_write_space(sk)) {
+ if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
mask |= POLLOUT | POLLWRNORM;
} else { /* send SIGIO later */
set_bit(SOCK_ASYNC_NOSPACE,
* wspace test but before the flags are set,
* IO signal will be lost.
*/
- if (tcp_wspace(sk) >= tcp_min_write_space(sk))
+ if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
mask |= POLLOUT | POLLWRNORM;
}
}
return mask;
}
-/*
- * TCP socket write_space callback.
- */
-void tcp_write_space(struct sock *sk)
-{
- struct socket *sock = sk->sk_socket;
-
- if (tcp_wspace(sk) >= tcp_min_write_space(sk) && sock) {
- clear_bit(SOCK_NOSPACE, &sock->flags);
-
- if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible(sk->sk_sleep);
-
- if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
- sock_wake_async(sock, 2, POLL_OUT);
- }
-}
-
int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
- struct tcp_opt *tp = tcp_sk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
int answ;
switch (cmd) {
return put_user(answ, (int __user *)arg);
}
-
-int tcp_listen_start(struct sock *sk)
-{
- struct inet_opt *inet = inet_sk(sk);
- struct tcp_opt *tp = tcp_sk(sk);
- struct tcp_listen_opt *lopt;
-
- sk->sk_max_ack_backlog = 0;
- sk->sk_ack_backlog = 0;
- tp->accept_queue = tp->accept_queue_tail = NULL;
- tp->syn_wait_lock = RW_LOCK_UNLOCKED;
- tcp_delack_init(tp);
-
- lopt = kmalloc(sizeof(struct tcp_listen_opt), GFP_KERNEL);
- if (!lopt)
- return -ENOMEM;
-
- memset(lopt, 0, sizeof(struct tcp_listen_opt));
- for (lopt->max_qlen_log = 6; ; lopt->max_qlen_log++)
- if ((1 << lopt->max_qlen_log) >= sysctl_max_syn_backlog)
- break;
- get_random_bytes(&lopt->hash_rnd, 4);
-
- write_lock_bh(&tp->syn_wait_lock);
- tp->listen_opt = lopt;
- write_unlock_bh(&tp->syn_wait_lock);
-
- /* There is race window here: we announce ourselves listening,
- * but this transition is still not validated by get_port().
- * It is OK, because this socket enters to hash table only
- * after validation is complete.
- */
- sk->sk_state = TCP_LISTEN;
- if (!sk->sk_prot->get_port(sk, inet->num)) {
- inet->sport = htons(inet->num);
-
- sk_dst_reset(sk);
- sk->sk_prot->hash(sk);
-
- return 0;
- }
-
- sk->sk_state = TCP_CLOSE;
- write_lock_bh(&tp->syn_wait_lock);
- tp->listen_opt = NULL;
- write_unlock_bh(&tp->syn_wait_lock);
- kfree(lopt);
- return -EADDRINUSE;
-}
-
-/*
- * This routine closes sockets which have been at least partially
- * opened, but not yet accepted.
- */
-
-static void tcp_listen_stop (struct sock *sk)
-{
- struct tcp_opt *tp = tcp_sk(sk);
- struct tcp_listen_opt *lopt = tp->listen_opt;
- struct open_request *acc_req = tp->accept_queue;
- struct open_request *req;
- int i;
-
- tcp_delete_keepalive_timer(sk);
-
- /* make all the listen_opt local to us */
- write_lock_bh(&tp->syn_wait_lock);
- tp->listen_opt = NULL;
- write_unlock_bh(&tp->syn_wait_lock);
- tp->accept_queue = tp->accept_queue_tail = NULL;
-
- if (lopt->qlen) {
- for (i = 0; i < TCP_SYNQ_HSIZE; i++) {
- while ((req = lopt->syn_table[i]) != NULL) {
- lopt->syn_table[i] = req->dl_next;
- lopt->qlen--;
- tcp_openreq_free(req);
-
- /* Following specs, it would be better either to send FIN
- * (and enter FIN-WAIT-1, it is normal close)
- * or to send active reset (abort).
- * Certainly, it is pretty dangerous while synflood, but it is
- * bad justification for our negligence 8)
- * To be honest, we are not able to make either
- * of the variants now. --ANK
- */
- }
- }
- }
- BUG_TRAP(!lopt->qlen);
-
- kfree(lopt);
-
- while ((req = acc_req) != NULL) {
- struct sock *child = req->sk;
-
- acc_req = req->dl_next;
-
- local_bh_disable();
- bh_lock_sock(child);
- BUG_TRAP(!sock_owned_by_user(child));
- sock_hold(child);
-
- tcp_disconnect(child, O_NONBLOCK);
-
- sock_orphan(child);
-
- atomic_inc(&tcp_orphan_count);
-
- tcp_destroy_sock(child);
-
- bh_unlock_sock(child);
- local_bh_enable();
- sock_put(child);
-
- tcp_acceptq_removed(sk);
- tcp_openreq_fastfree(req);
- }
- BUG_TRAP(!sk->sk_ack_backlog);
-}
-
-/*
- * Wait for a socket to get into the connected state
- *
- * Note: Must be called with the socket locked.
- */
-static int wait_for_tcp_connect(struct sock *sk, int flags, long *timeo_p)
-{
- struct tcp_opt *tp = tcp_sk(sk);
- struct task_struct *tsk = current;
- DEFINE_WAIT(wait);
-
- while ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
- if (sk->sk_err)
- return sock_error(sk);
- if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV))
- return -EPIPE;
- if (!*timeo_p)
- return -EAGAIN;
- if (signal_pending(tsk))
- return sock_intr_errno(*timeo_p);
-
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
- tp->write_pending++;
-
- release_sock(sk);
- *timeo_p = schedule_timeout(*timeo_p);
- lock_sock(sk);
-
- finish_wait(sk->sk_sleep, &wait);
- tp->write_pending--;
- }
- return 0;
-}
-
-static inline int tcp_memory_free(struct sock *sk)
-{
- return sk->sk_wmem_queued < sk->sk_sndbuf;
-}
-
-/*
- * Wait for more memory for a socket
- */
-static int wait_for_tcp_memory(struct sock *sk, long *timeo)
-{
- struct tcp_opt *tp = tcp_sk(sk);
- int err = 0;
- long vm_wait = 0;
- long current_timeo = *timeo;
- DEFINE_WAIT(wait);
-
- if (tcp_memory_free(sk))
- current_timeo = vm_wait = (net_random() % (HZ / 5)) + 2;
-
- for (;;) {
- set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
-
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
-
- if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
- goto do_error;
- if (!*timeo)
- goto do_nonblock;
- if (signal_pending(current))
- goto do_interrupted;
- clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
- if (tcp_memory_free(sk) && !vm_wait)
- break;
-
- set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
- tp->write_pending++;
- release_sock(sk);
- if (!tcp_memory_free(sk) || vm_wait)
- current_timeo = schedule_timeout(current_timeo);
- lock_sock(sk);
- tp->write_pending--;
-
- if (vm_wait) {
- vm_wait -= current_timeo;
- current_timeo = *timeo;
- if (current_timeo != MAX_SCHEDULE_TIMEOUT &&
- (current_timeo -= vm_wait) < 0)
- current_timeo = 0;
- vm_wait = 0;
- }
- *timeo = current_timeo;
- }
-out:
- finish_wait(sk->sk_sleep, &wait);
- return err;
-
-do_error:
- err = -EPIPE;
- goto out;
-do_nonblock:
- err = -EAGAIN;
- goto out;
-do_interrupted:
- err = sock_intr_errno(*timeo);
- goto out;
-}
-
-static inline int can_coalesce(struct sk_buff *skb, int i, struct page *page,
- int off)
-{
- if (i) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
- return page == frag->page &&
- off == frag->page_offset + frag->size;
- }
- return 0;
-}
-
-static inline void fill_page_desc(struct sk_buff *skb, int i,
- struct page *page, int off, int size)
-{
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- frag->page = page;
- frag->page_offset = off;
- frag->size = size;
- skb_shinfo(skb)->nr_frags = i + 1;
-}
-
-static inline void tcp_mark_push(struct tcp_opt *tp, struct sk_buff *skb)
+static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
{
TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
tp->pushed_seq = tp->write_seq;
}
-static inline int forced_push(struct tcp_opt *tp)
+static inline int forced_push(struct tcp_sock *tp)
{
return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
}
-static inline void skb_entail(struct sock *sk, struct tcp_opt *tp,
+static inline void skb_entail(struct sock *sk, struct tcp_sock *tp,
struct sk_buff *skb)
{
skb->csum = 0;
TCP_SKB_CB(skb)->end_seq = tp->write_seq;
TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
TCP_SKB_CB(skb)->sacked = 0;
+ skb_header_release(skb);
__skb_queue_tail(&sk->sk_write_queue, skb);
- tcp_charge_skb(sk, skb);
- if (!tp->send_head)
- tp->send_head = skb;
- else if (tp->nonagle&TCP_NAGLE_PUSH)
+ sk_charge_skb(sk, skb);
+ if (!sk->sk_send_head)
+ sk->sk_send_head = skb;
+ if (tp->nonagle & TCP_NAGLE_PUSH)
tp->nonagle &= ~TCP_NAGLE_PUSH;
}
-static inline void tcp_mark_urg(struct tcp_opt *tp, int flags,
+static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
struct sk_buff *skb)
{
if (flags & MSG_OOB) {
}
}
-static inline void tcp_push(struct sock *sk, struct tcp_opt *tp, int flags,
+static inline void tcp_push(struct sock *sk, struct tcp_sock *tp, int flags,
int mss_now, int nonagle)
{
- if (tp->send_head) {
+ if (sk->sk_send_head) {
struct sk_buff *skb = sk->sk_write_queue.prev;
if (!(flags & MSG_MORE) || forced_push(tp))
tcp_mark_push(tp, skb);
}
}
-static int tcp_error(struct sock *sk, int flags, int err)
-{
- if (err == -EPIPE)
- err = sock_error(sk) ? : -EPIPE;
- if (err == -EPIPE && !(flags & MSG_NOSIGNAL))
- send_sig(SIGPIPE, current, 0);
- return err;
-}
-
static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
size_t psize, int flags)
{
- struct tcp_opt *tp = tcp_sk(sk);
- int mss_now;
+ struct tcp_sock *tp = tcp_sk(sk);
+ int mss_now, size_goal;
int err;
ssize_t copied;
long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
/* Wait for a connection to finish. */
if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
- if ((err = wait_for_tcp_connect(sk, 0, &timeo)) != 0)
+ if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
goto out_err;
clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
+ size_goal = tp->xmit_size_goal;
copied = 0;
err = -EPIPE;
while (psize > 0) {
struct sk_buff *skb = sk->sk_write_queue.prev;
struct page *page = pages[poffset / PAGE_SIZE];
- int copy, i;
+ int copy, i, can_coalesce;
int offset = poffset % PAGE_SIZE;
int size = min_t(size_t, psize, PAGE_SIZE - offset);
- if (!tp->send_head || (copy = mss_now - skb->len) <= 0) {
+ if (!sk->sk_send_head || (copy = size_goal - skb->len) <= 0) {
new_segment:
- if (!tcp_memory_free(sk))
+ if (!sk_stream_memory_free(sk))
goto wait_for_sndbuf;
- skb = tcp_alloc_pskb(sk, 0, tp->mss_cache,
- sk->sk_allocation);
+ skb = sk_stream_alloc_pskb(sk, 0, 0,
+ sk->sk_allocation);
if (!skb)
goto wait_for_memory;
skb_entail(sk, tp, skb);
- copy = mss_now;
+ copy = size_goal;
}
if (copy > size)
copy = size;
i = skb_shinfo(skb)->nr_frags;
- if (can_coalesce(skb, i, page, offset)) {
- skb_shinfo(skb)->frags[i - 1].size += copy;
- } else if (i < MAX_SKB_FRAGS) {
- get_page(page);
- fill_page_desc(skb, i, page, offset, copy);
- } else {
+ can_coalesce = skb_can_coalesce(skb, i, page, offset);
+ if (!can_coalesce && i >= MAX_SKB_FRAGS) {
tcp_mark_push(tp, skb);
goto new_segment;
}
+ if (!sk_stream_wmem_schedule(sk, copy))
+ goto wait_for_memory;
+
+ if (can_coalesce) {
+ skb_shinfo(skb)->frags[i - 1].size += copy;
+ } else {
+ get_page(page);
+ skb_fill_page_desc(skb, i, page, offset, copy);
+ }
skb->len += copy;
skb->data_len += copy;
+ skb->truesize += copy;
+ sk->sk_wmem_queued += copy;
+ sk->sk_forward_alloc -= copy;
skb->ip_summed = CHECKSUM_HW;
tp->write_seq += copy;
TCP_SKB_CB(skb)->end_seq += copy;
+ skb_shinfo(skb)->tso_segs = 0;
if (!copied)
TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
if (!(psize -= copy))
goto out;
- if (skb->len != mss_now || (flags & MSG_OOB))
+ if (skb->len < mss_now || (flags & MSG_OOB))
continue;
if (forced_push(tp)) {
tcp_mark_push(tp, skb);
__tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
- } else if (skb == tp->send_head)
+ } else if (skb == sk->sk_send_head)
tcp_push_one(sk, mss_now);
continue;
if (copied)
tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
- if ((err = wait_for_tcp_memory(sk, &timeo)) != 0)
+ if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
goto do_error;
mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
+ size_goal = tp->xmit_size_goal;
}
out:
if (copied)
goto out;
out_err:
- return tcp_error(sk, flags, err);
+ return sk_stream_error(sk, flags, err);
}
ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
return res;
}
-#define TCP_PAGE(sk) (inet_sk(sk)->sndmsg_page)
-#define TCP_OFF(sk) (inet_sk(sk)->sndmsg_off)
+#define TCP_PAGE(sk) (sk->sk_sndmsg_page)
+#define TCP_OFF(sk) (sk->sk_sndmsg_off)
-static inline int tcp_copy_to_page(struct sock *sk, char __user *from,
- struct sk_buff *skb, struct page *page,
- int off, int copy)
+static inline int select_size(struct sock *sk, struct tcp_sock *tp)
{
- int err = 0;
- unsigned int csum;
-
- if (skb->ip_summed == CHECKSUM_NONE) {
- csum = csum_and_copy_from_user(from, page_address(page) + off,
- copy, 0, &err);
- if (err) return err;
- skb->csum = csum_block_add(skb->csum, csum, skb->len);
- } else {
- if (copy_from_user(page_address(page) + off, from, copy))
- return -EFAULT;
- }
+ int tmp = tp->mss_cache;
- skb->len += copy;
- skb->data_len += copy;
- skb->truesize += copy;
- sk->sk_wmem_queued += copy;
- sk->sk_forward_alloc -= copy;
- return 0;
-}
+ if (sk->sk_route_caps & NETIF_F_SG) {
+ if (sk->sk_route_caps & NETIF_F_TSO)
+ tmp = 0;
+ else {
+ int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
-static inline int skb_add_data(struct sk_buff *skb, char __user *from, int copy)
-{
- int err = 0;
- unsigned int csum;
- int off = skb->len;
-
- if (skb->ip_summed == CHECKSUM_NONE) {
- csum = csum_and_copy_from_user(from, skb_put(skb, copy),
- copy, 0, &err);
- if (!err) {
- skb->csum = csum_block_add(skb->csum, csum, off);
- return 0;
+ if (tmp >= pgbreak &&
+ tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
+ tmp = pgbreak;
}
- } else {
- if (!copy_from_user(skb_put(skb, copy), from, copy))
- return 0;
}
- __skb_trim(skb, off);
- return -EFAULT;
-}
-
-static inline int select_size(struct sock *sk, struct tcp_opt *tp)
-{
- int tmp = tp->mss_cache_std;
-
- if (sk->sk_route_caps & NETIF_F_SG) {
- int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
-
- if (tmp >= pgbreak &&
- tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
- tmp = pgbreak;
- }
return tmp;
}
size_t size)
{
struct iovec *iov;
- struct tcp_opt *tp = tcp_sk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
int iovlen, flags;
- int mss_now;
+ int mss_now, size_goal;
int err, copied;
long timeo;
/* Wait for a connection to finish. */
if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
- if ((err = wait_for_tcp_connect(sk, flags, &timeo)) != 0)
+ if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
goto out_err;
/* This should be in poll */
clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
+ size_goal = tp->xmit_size_goal;
/* Ok commence sending. */
iovlen = msg->msg_iovlen;
skb = sk->sk_write_queue.prev;
- if (!tp->send_head ||
- (copy = mss_now - skb->len) <= 0) {
+ if (!sk->sk_send_head ||
+ (copy = size_goal - skb->len) <= 0) {
new_segment:
/* Allocate new segment. If the interface is SG,
* allocate skb fitting to single page.
*/
- if (!tcp_memory_free(sk))
+ if (!sk_stream_memory_free(sk))
goto wait_for_sndbuf;
- skb = tcp_alloc_pskb(sk, select_size(sk, tp),
- 0, sk->sk_allocation);
+ skb = sk_stream_alloc_pskb(sk, select_size(sk, tp),
+ 0, sk->sk_allocation);
if (!skb)
goto wait_for_memory;
skb->ip_summed = CHECKSUM_HW;
skb_entail(sk, tp, skb);
- copy = mss_now;
+ copy = size_goal;
}
/* Try to append data to the end of skb. */
struct page *page = TCP_PAGE(sk);
int off = TCP_OFF(sk);
- if (can_coalesce(skb, i, page, off) &&
+ if (skb_can_coalesce(skb, i, page, off) &&
off != PAGE_SIZE) {
/* We can extend the last page
* fragment. */
tcp_mark_push(tp, skb);
goto new_segment;
} else if (page) {
- /* If page is cached, align
- * offset to L1 cache boundary
- */
- off = (off + L1_CACHE_BYTES - 1) &
- ~(L1_CACHE_BYTES - 1);
if (off == PAGE_SIZE) {
put_page(page);
TCP_PAGE(sk) = page = NULL;
+ off = 0;
}
- }
+ } else
+ off = 0;
+
+ if (copy > PAGE_SIZE - off)
+ copy = PAGE_SIZE - off;
+
+ if (!sk_stream_wmem_schedule(sk, copy))
+ goto wait_for_memory;
if (!page) {
/* Allocate new cache page. */
- if (!(page = tcp_alloc_page(sk)))
+ if (!(page = sk_stream_alloc_page(sk)))
goto wait_for_memory;
- off = 0;
}
- if (copy > PAGE_SIZE - off)
- copy = PAGE_SIZE - off;
-
/* Time to copy data. We are close to
* the end! */
- err = tcp_copy_to_page(sk, from, skb, page,
+ err = skb_copy_to_page(sk, from, skb, page,
off, copy);
if (err) {
/* If this page was new, give it to the
skb_shinfo(skb)->frags[i - 1].size +=
copy;
} else {
- fill_page_desc(skb, i, page, off, copy);
+ skb_fill_page_desc(skb, i, page, off, copy);
if (TCP_PAGE(sk)) {
get_page(page);
} else if (off + copy < PAGE_SIZE) {
tp->write_seq += copy;
TCP_SKB_CB(skb)->end_seq += copy;
+ skb_shinfo(skb)->tso_segs = 0;
from += copy;
copied += copy;
if ((seglen -= copy) == 0 && iovlen == 0)
goto out;
- if (skb->len != mss_now || (flags & MSG_OOB))
+ if (skb->len < mss_now || (flags & MSG_OOB))
continue;
if (forced_push(tp)) {
tcp_mark_push(tp, skb);
__tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
- } else if (skb == tp->send_head)
+ } else if (skb == sk->sk_send_head)
tcp_push_one(sk, mss_now);
continue;
if (copied)
tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
- if ((err = wait_for_tcp_memory(sk, &timeo)) != 0)
+ if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
goto do_error;
mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
+ size_goal = tp->xmit_size_goal;
}
}
do_fault:
if (!skb->len) {
- if (tp->send_head == skb)
- tp->send_head = NULL;
- __skb_unlink(skb, skb->list);
- tcp_free_skb(sk, skb);
+ if (sk->sk_send_head == skb)
+ sk->sk_send_head = NULL;
+ __skb_unlink(skb, &sk->sk_write_queue);
+ sk_stream_free_skb(sk, skb);
}
do_error:
if (copied)
goto out;
out_err:
- err = tcp_error(sk, flags, err);
+ err = sk_stream_error(sk, flags, err);
TCP_CHECK_TIMER(sk);
release_sock(sk);
return err;
struct msghdr *msg, int len, int flags,
int *addr_len)
{
- struct tcp_opt *tp = tcp_sk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
/* No URG data to read. */
if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
return -EAGAIN;
}
-/*
- * Release a skb if it is no longer needed. This routine
- * must be called with interrupts disabled or with the
- * socket locked so that the sk_buff queue operation is ok.
- */
-
-static inline void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
-{
- __skb_unlink(skb, &sk->sk_receive_queue);
- __kfree_skb(skb);
-}
-
/* Clean up the receive buffer for full frames taken by the user,
* then send an ACK if necessary. COPIED is the number of bytes
* tcp_recvmsg has given to the user so far, it speeds up the
* calculation of whether or not we must ACK for the sake of
* a window update.
*/
-void cleanup_rbuf(struct sock *sk, int copied)
+static void cleanup_rbuf(struct sock *sk, int copied)
{
- struct tcp_opt *tp = tcp_sk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
int time_to_ack = 0;
#if TCP_DEBUG
BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
#endif
- if (tcp_ack_scheduled(tp)) {
+ if (inet_csk_ack_scheduled(sk)) {
+ const struct inet_connection_sock *icsk = inet_csk(sk);
/* Delayed ACKs frequently hit locked sockets during bulk
* receive. */
- if (tp->ack.blocked ||
+ if (icsk->icsk_ack.blocked ||
/* Once-per-two-segments ACK was not sent by tcp_input.c */
- tp->rcv_nxt - tp->rcv_wup > tp->ack.rcv_mss ||
+ tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
/*
* If this read emptied read buffer, we send ACK, if
* connection is not bidirectional, user drained
* receive buffer and there was a small segment
* in queue.
*/
- (copied > 0 && (tp->ack.pending & TCP_ACK_PUSHED) &&
- !tp->ack.pingpong && !atomic_read(&sk->sk_rmem_alloc)))
+ (copied > 0 && (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
+ !icsk->icsk_ack.pingpong && !atomic_read(&sk->sk_rmem_alloc)))
time_to_ack = 1;
}
tcp_send_ack(sk);
}
-/* Now socket state including sk->sk_err is changed only under lock,
- * hence we may omit checks after joining wait queue.
- * We check receive queue before schedule() only as optimization;
- * it is very likely that release_sock() added new data.
- */
-
-static long tcp_data_wait(struct sock *sk, long timeo)
-{
- DEFINE_WAIT(wait);
-
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
-
- set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
- release_sock(sk);
-
- if (skb_queue_empty(&sk->sk_receive_queue))
- timeo = schedule_timeout(timeo);
-
- lock_sock(sk);
- clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
-
- finish_wait(sk->sk_sleep, &wait);
- return timeo;
-}
-
static void tcp_prequeue_process(struct sock *sk)
{
struct sk_buff *skb;
- struct tcp_opt *tp = tcp_sk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
- NET_ADD_STATS_USER(TCPPrequeued, skb_queue_len(&tp->ucopy.prequeue));
+ NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
/* RX process wants to run with disabled BHs, though it is not
* necessary */
sk_read_actor_t recv_actor)
{
struct sk_buff *skb;
- struct tcp_opt *tp = tcp_sk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
u32 seq = tp->copied_seq;
u32 offset;
int copied = 0;
break;
}
if (skb->h.th->fin) {
- tcp_eat_skb(sk, skb);
+ sk_eat_skb(sk, skb);
++seq;
break;
}
- tcp_eat_skb(sk, skb);
+ sk_eat_skb(sk, skb);
if (!desc->count)
break;
}
int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len, int nonblock, int flags, int *addr_len)
{
- struct tcp_opt *tp = tcp_sk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
int copied = 0;
u32 peek_seq;
u32 *seq;
cleanup_rbuf(sk, copied);
- if (tp->ucopy.task == user_recv) {
+ if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
/* Install new reader */
if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
user_recv = current;
* is not empty. It is more elegant, but eats cycles,
* unfortunately.
*/
- if (skb_queue_len(&tp->ucopy.prequeue))
+ if (!skb_queue_empty(&tp->ucopy.prequeue))
goto do_prequeue;
/* __ Set realtime policy in scheduler __ */
/* Do not sleep, just process backlog. */
release_sock(sk);
lock_sock(sk);
- } else {
- timeo = tcp_data_wait(sk, timeo);
- }
+ } else
+ sk_wait_data(sk, &timeo);
if (user_recv) {
int chunk;
/* __ Restore normal policy in scheduler __ */
if ((chunk = len - tp->ucopy.len) != 0) {
- NET_ADD_STATS_USER(TCPDirectCopyFromBacklog, chunk);
+ NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
len -= chunk;
copied += chunk;
}
if (tp->rcv_nxt == tp->copied_seq &&
- skb_queue_len(&tp->ucopy.prequeue)) {
+ !skb_queue_empty(&tp->ucopy.prequeue)) {
do_prequeue:
tcp_prequeue_process(sk);
if ((chunk = len - tp->ucopy.len) != 0) {
- NET_ADD_STATS_USER(TCPDirectCopyFromPrequeue, chunk);
+ NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
len -= chunk;
copied += chunk;
}
if (skb->h.th->fin)
goto found_fin_ok;
if (!(flags & MSG_PEEK))
- tcp_eat_skb(sk, skb);
+ sk_eat_skb(sk, skb);
continue;
found_fin_ok:
/* Process the FIN. */
++*seq;
if (!(flags & MSG_PEEK))
- tcp_eat_skb(sk, skb);
+ sk_eat_skb(sk, skb);
break;
} while (len > 0);
if (user_recv) {
- if (skb_queue_len(&tp->ucopy.prequeue)) {
+ if (!skb_queue_empty(&tp->ucopy.prequeue)) {
int chunk;
tp->ucopy.len = copied > 0 ? len : 0;
tcp_prequeue_process(sk);
if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
- NET_ADD_STATS_USER(TCPDirectCopyFromPrequeue, chunk);
+ NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
len -= chunk;
copied += chunk;
}
* closed.
*/
-static unsigned char new_state[16] = {
+static const unsigned char new_state[16] = {
/* current state: new state: action: */
/* (Invalid) */ TCP_CLOSE,
/* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
}
}
-
-/*
- * Return 1 if we still have things to send in our buffers.
- */
-
-static inline int closing(struct sock *sk)
-{
- return (1 << sk->sk_state) &
- (TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK);
-}
-
-static __inline__ void tcp_kill_sk_queues(struct sock *sk)
-{
- /* First the read buffer. */
- __skb_queue_purge(&sk->sk_receive_queue);
-
- /* Next, the error queue. */
- __skb_queue_purge(&sk->sk_error_queue);
-
- /* Next, the write queue. */
- BUG_TRAP(skb_queue_empty(&sk->sk_write_queue));
-
- /* Account for returned memory. */
- tcp_mem_reclaim(sk);
-
- BUG_TRAP(!sk->sk_wmem_queued);
- BUG_TRAP(!sk->sk_forward_alloc);
-
- /* It is _impossible_ for the backlog to contain anything
- * when we get here. All user references to this socket
- * have gone away, only the net layer knows can touch it.
- */
-}
-
-/*
- * At this point, there should be no process reference to this
- * socket, and thus no user references at all. Therefore we
- * can assume the socket waitqueue is inactive and nobody will
- * try to jump onto it.
- */
-void tcp_destroy_sock(struct sock *sk)
-{
- BUG_TRAP(sk->sk_state == TCP_CLOSE);
- BUG_TRAP(sock_flag(sk, SOCK_DEAD));
-
- /* It cannot be in hash table! */
- BUG_TRAP(sk_unhashed(sk));
-
- /* If it has not 0 inet_sk(sk)->num, it must be bound */
- BUG_TRAP(!inet_sk(sk)->num || tcp_sk(sk)->bind_hash);
-
-#ifdef TCP_DEBUG
- if (sk->sk_zapped) {
- printk(KERN_DEBUG "TCP: double destroy sk=%p\n", sk);
- sock_hold(sk);
- }
- sk->sk_zapped = 1;
-#endif
-
- sk->sk_prot->destroy(sk);
-
- tcp_kill_sk_queues(sk);
-
- xfrm_sk_free_policy(sk);
-
-#ifdef INET_REFCNT_DEBUG
- if (atomic_read(&sk->sk_refcnt) != 1) {
- printk(KERN_DEBUG "Destruction TCP %p delayed, c=%d\n",
- sk, atomic_read(&sk->sk_refcnt));
- }
-#endif
-
- atomic_dec(&tcp_orphan_count);
- sock_put(sk);
-}
-
void tcp_close(struct sock *sk, long timeout)
{
struct sk_buff *skb;
tcp_set_state(sk, TCP_CLOSE);
/* Special case. */
- tcp_listen_stop(sk);
+ inet_csk_listen_stop(sk);
goto adjudge_to_death;
}
__kfree_skb(skb);
}
- tcp_mem_reclaim(sk);
+ sk_stream_mem_reclaim(sk);
/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
* 3.10, we send a RST here because data was lost. To
*/
if (data_was_unread) {
/* Unread data was tossed, zap the connection. */
- NET_INC_STATS_USER(TCPAbortOnClose);
+ NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
tcp_set_state(sk, TCP_CLOSE);
tcp_send_active_reset(sk, GFP_KERNEL);
} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
/* Check zero linger _after_ checking for unread data. */
sk->sk_prot->disconnect(sk, 0);
- NET_INC_STATS_USER(TCPAbortOnData);
+ NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
} else if (tcp_close_state(sk)) {
/* We FIN if the application ate all the data before
* zapping the connection.
tcp_send_fin(sk);
}
- if (timeout) {
- struct task_struct *tsk = current;
- DEFINE_WAIT(wait);
-
- do {
- prepare_to_wait(sk->sk_sleep, &wait,
- TASK_INTERRUPTIBLE);
- if (!closing(sk))
- break;
- release_sock(sk);
- timeout = schedule_timeout(timeout);
- lock_sock(sk);
- } while (!signal_pending(tsk) && timeout);
-
- finish_wait(sk->sk_sleep, &wait);
- }
+ sk_stream_wait_close(sk, timeout);
adjudge_to_death:
/* It is the last release_sock in its life. It will remove backlog. */
*/
if (sk->sk_state == TCP_FIN_WAIT2) {
- struct tcp_opt *tp = tcp_sk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
if (tp->linger2 < 0) {
tcp_set_state(sk, TCP_CLOSE);
tcp_send_active_reset(sk, GFP_ATOMIC);
- NET_INC_STATS_BH(TCPAbortOnLinger);
+ NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
} else {
- int tmo = tcp_fin_time(tp);
+ const int tmo = tcp_fin_time(sk);
if (tmo > TCP_TIMEWAIT_LEN) {
- tcp_reset_keepalive_timer(sk, tcp_fin_time(tp));
+ inet_csk_reset_keepalive_timer(sk, tcp_fin_time(sk));
} else {
- atomic_inc(&tcp_orphan_count);
+ atomic_inc(sk->sk_prot->orphan_count);
tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
goto out;
}
}
}
if (sk->sk_state != TCP_CLOSE) {
- tcp_mem_reclaim(sk);
- if (atomic_read(&tcp_orphan_count) > sysctl_tcp_max_orphans ||
+ sk_stream_mem_reclaim(sk);
+ if (atomic_read(sk->sk_prot->orphan_count) > sysctl_tcp_max_orphans ||
(sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) {
if (net_ratelimit())
"sockets\n");
tcp_set_state(sk, TCP_CLOSE);
tcp_send_active_reset(sk, GFP_ATOMIC);
- NET_INC_STATS_BH(TCPAbortOnMemory);
+ NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
}
}
- atomic_inc(&tcp_orphan_count);
+ atomic_inc(sk->sk_prot->orphan_count);
if (sk->sk_state == TCP_CLOSE)
- tcp_destroy_sock(sk);
+ inet_csk_destroy_sock(sk);
/* Otherwise, socket is reprieved until protocol close. */
out:
int tcp_disconnect(struct sock *sk, int flags)
{
- struct inet_opt *inet = inet_sk(sk);
- struct tcp_opt *tp = tcp_sk(sk);
+ struct inet_sock *inet = inet_sk(sk);
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
int err = 0;
int old_state = sk->sk_state;
/* ABORT function of RFC793 */
if (old_state == TCP_LISTEN) {
- tcp_listen_stop(sk);
+ inet_csk_listen_stop(sk);
} else if (tcp_need_reset(old_state) ||
(tp->snd_nxt != tp->write_seq &&
(1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
- /* The last check adjusts for discrepance of Linux wrt. RFC
+ /* The last check adjusts for discrepancy of Linux wrt. RFC
* states
*/
tcp_send_active_reset(sk, gfp_any());
tcp_clear_xmit_timers(sk);
__skb_queue_purge(&sk->sk_receive_queue);
- tcp_writequeue_purge(sk);
+ sk_stream_writequeue_purge(sk);
__skb_queue_purge(&tp->out_of_order_queue);
inet->dport = 0;
tp->srtt = 0;
if ((tp->write_seq += tp->max_window + 2) == 0)
tp->write_seq = 1;
- tp->backoff = 0;
+ icsk->icsk_backoff = 0;
tp->snd_cwnd = 2;
- tp->probes_out = 0;
+ icsk->icsk_probes_out = 0;
tp->packets_out = 0;
tp->snd_ssthresh = 0x7fffffff;
tp->snd_cwnd_cnt = 0;
- tcp_set_ca_state(tp, TCP_CA_Open);
+ tp->bytes_acked = 0;
+ tcp_set_ca_state(sk, TCP_CA_Open);
tcp_clear_retrans(tp);
- tcp_delack_init(tp);
- tp->send_head = NULL;
- tp->saw_tstamp = 0;
- tcp_sack_reset(tp);
+ inet_csk_delack_init(sk);
+ sk->sk_send_head = NULL;
+ tp->rx_opt.saw_tstamp = 0;
+ tcp_sack_reset(&tp->rx_opt);
__sk_dst_reset(sk);
- BUG_TRAP(!inet->num || tp->bind_hash);
+ BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
sk->sk_error_report(sk);
return err;
}
-/*
- * Wait for an incoming connection, avoid race
- * conditions. This must be called with the socket locked.
- */
-static int wait_for_connect(struct sock *sk, long timeo)
-{
- struct tcp_opt *tp = tcp_sk(sk);
- DEFINE_WAIT(wait);
- int err;
-
- /*
- * True wake-one mechanism for incoming connections: only
- * one process gets woken up, not the 'whole herd'.
- * Since we do not 'race & poll' for established sockets
- * anymore, the common case will execute the loop only once.
- *
- * Subtle issue: "add_wait_queue_exclusive()" will be added
- * after any current non-exclusive waiters, and we know that
- * it will always _stay_ after any new non-exclusive waiters
- * because all non-exclusive waiters are added at the
- * beginning of the wait-queue. As such, it's ok to "drop"
- * our exclusiveness temporarily when we get woken up without
- * having to remove and re-insert us on the wait queue.
- */
- for (;;) {
- prepare_to_wait_exclusive(sk->sk_sleep, &wait,
- TASK_INTERRUPTIBLE);
- release_sock(sk);
- if (!tp->accept_queue)
- timeo = schedule_timeout(timeo);
- lock_sock(sk);
- err = 0;
- if (tp->accept_queue)
- break;
- err = -EINVAL;
- if (sk->sk_state != TCP_LISTEN)
- break;
- err = sock_intr_errno(timeo);
- if (signal_pending(current))
- break;
- err = -EAGAIN;
- if (!timeo)
- break;
- }
- finish_wait(sk->sk_sleep, &wait);
- return err;
-}
-
-/*
- * This will accept the next outstanding connection.
- */
-
-struct sock *tcp_accept(struct sock *sk, int flags, int *err)
-{
- struct tcp_opt *tp = tcp_sk(sk);
- struct open_request *req;
- struct sock *newsk;
- int error;
-
- lock_sock(sk);
-
- /* We need to make sure that this socket is listening,
- * and that it has something pending.
- */
- error = -EINVAL;
- if (sk->sk_state != TCP_LISTEN)
- goto out;
-
- /* Find already established connection */
- if (!tp->accept_queue) {
- long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
-
- /* If this is a non blocking socket don't sleep */
- error = -EAGAIN;
- if (!timeo)
- goto out;
-
- error = wait_for_connect(sk, timeo);
- if (error)
- goto out;
- }
-
- req = tp->accept_queue;
- if ((tp->accept_queue = req->dl_next) == NULL)
- tp->accept_queue_tail = NULL;
-
- newsk = req->sk;
- tcp_acceptq_removed(sk);
- tcp_openreq_fastfree(req);
- BUG_TRAP(newsk->sk_state != TCP_SYN_RECV);
- release_sock(sk);
- return newsk;
-
-out:
- release_sock(sk);
- *err = error;
- return NULL;
-}
-
/*
* Socket option code for TCP.
*/
int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
int optlen)
{
- struct tcp_opt *tp = tcp_sk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct inet_connection_sock *icsk = inet_csk(sk);
int val;
int err = 0;
if (level != SOL_TCP)
- return tp->af_specific->setsockopt(sk, level, optname,
- optval, optlen);
+ return icsk->icsk_af_ops->setsockopt(sk, level, optname,
+ optval, optlen);
+
+ /* This is a string value all the others are int's */
+ if (optname == TCP_CONGESTION) {
+ char name[TCP_CA_NAME_MAX];
+
+ if (optlen < 1)
+ return -EINVAL;
+
+ val = strncpy_from_user(name, optval,
+ min(TCP_CA_NAME_MAX-1, optlen));
+ if (val < 0)
+ return -EFAULT;
+ name[val] = 0;
+
+ lock_sock(sk);
+ err = tcp_set_congestion_control(sk, name);
+ release_sock(sk);
+ return err;
+ }
if (optlen < sizeof(int))
return -EINVAL;
err = -EINVAL;
break;
}
- tp->user_mss = val;
+ tp->rx_opt.user_mss = val;
break;
case TCP_NODELAY:
elapsed = tp->keepalive_time - elapsed;
else
elapsed = 0;
- tcp_reset_keepalive_timer(sk, elapsed);
+ inet_csk_reset_keepalive_timer(sk, elapsed);
}
}
break;
if (val < 1 || val > MAX_TCP_SYNCNT)
err = -EINVAL;
else
- tp->syn_retries = val;
+ icsk->icsk_syn_retries = val;
break;
case TCP_LINGER2:
break;
case TCP_DEFER_ACCEPT:
- tp->defer_accept = 0;
+ icsk->icsk_accept_queue.rskq_defer_accept = 0;
if (val > 0) {
/* Translate value in seconds to number of
* retransmits */
- while (tp->defer_accept < 32 &&
+ while (icsk->icsk_accept_queue.rskq_defer_accept < 32 &&
val > ((TCP_TIMEOUT_INIT / HZ) <<
- tp->defer_accept))
- tp->defer_accept++;
- tp->defer_accept++;
+ icsk->icsk_accept_queue.rskq_defer_accept))
+ icsk->icsk_accept_queue.rskq_defer_accept++;
+ icsk->icsk_accept_queue.rskq_defer_accept++;
}
break;
case TCP_QUICKACK:
if (!val) {
- tp->ack.pingpong = 1;
+ icsk->icsk_ack.pingpong = 1;
} else {
- tp->ack.pingpong = 0;
+ icsk->icsk_ack.pingpong = 0;
if ((1 << sk->sk_state) &
(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
- tcp_ack_scheduled(tp)) {
- tp->ack.pending |= TCP_ACK_PUSHED;
+ inet_csk_ack_scheduled(sk)) {
+ icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
cleanup_rbuf(sk, 1);
if (!(val & 1))
- tp->ack.pingpong = 1;
+ icsk->icsk_ack.pingpong = 1;
}
}
break;
return err;
}
+/* Return information about state of tcp endpoint in API format. */
+void tcp_get_info(struct sock *sk, struct tcp_info *info)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ const struct inet_connection_sock *icsk = inet_csk(sk);
+ u32 now = tcp_time_stamp;
+
+ memset(info, 0, sizeof(*info));
+
+ info->tcpi_state = sk->sk_state;
+ info->tcpi_ca_state = icsk->icsk_ca_state;
+ info->tcpi_retransmits = icsk->icsk_retransmits;
+ info->tcpi_probes = icsk->icsk_probes_out;
+ info->tcpi_backoff = icsk->icsk_backoff;
+
+ if (tp->rx_opt.tstamp_ok)
+ info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
+ if (tp->rx_opt.sack_ok)
+ info->tcpi_options |= TCPI_OPT_SACK;
+ if (tp->rx_opt.wscale_ok) {
+ info->tcpi_options |= TCPI_OPT_WSCALE;
+ info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
+ info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
+ }
+
+ if (tp->ecn_flags&TCP_ECN_OK)
+ info->tcpi_options |= TCPI_OPT_ECN;
+
+ info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
+ info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
+ info->tcpi_snd_mss = tp->mss_cache;
+ info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
+
+ info->tcpi_unacked = tp->packets_out;
+ info->tcpi_sacked = tp->sacked_out;
+ info->tcpi_lost = tp->lost_out;
+ info->tcpi_retrans = tp->retrans_out;
+ info->tcpi_fackets = tp->fackets_out;
+
+ info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
+ info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
+ info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
+
+ info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
+ info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
+ info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
+ info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
+ info->tcpi_snd_ssthresh = tp->snd_ssthresh;
+ info->tcpi_snd_cwnd = tp->snd_cwnd;
+ info->tcpi_advmss = tp->advmss;
+ info->tcpi_reordering = tp->reordering;
+
+ info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
+ info->tcpi_rcv_space = tp->rcvq_space.space;
+
+ info->tcpi_total_retrans = tp->total_retrans;
+}
+
+EXPORT_SYMBOL_GPL(tcp_get_info);
+
int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
int __user *optlen)
{
- struct tcp_opt *tp = tcp_sk(sk);
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
int val, len;
if (level != SOL_TCP)
- return tp->af_specific->getsockopt(sk, level, optname,
- optval, optlen);
+ return icsk->icsk_af_ops->getsockopt(sk, level, optname,
+ optval, optlen);
if (get_user(len, optlen))
return -EFAULT;
switch (optname) {
case TCP_MAXSEG:
- val = tp->mss_cache_std;
+ val = tp->mss_cache;
if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
- val = tp->user_mss;
+ val = tp->rx_opt.user_mss;
break;
case TCP_NODELAY:
val = !!(tp->nonagle&TCP_NAGLE_OFF);
val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
break;
case TCP_SYNCNT:
- val = tp->syn_retries ? : sysctl_tcp_syn_retries;
+ val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
break;
case TCP_LINGER2:
val = tp->linger2;
val = (val ? : sysctl_tcp_fin_timeout) / HZ;
break;
case TCP_DEFER_ACCEPT:
- val = !tp->defer_accept ? 0 : ((TCP_TIMEOUT_INIT / HZ) <<
- (tp->defer_accept - 1));
+ val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 :
+ ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));
break;
case TCP_WINDOW_CLAMP:
val = tp->window_clamp;
break;
case TCP_INFO: {
struct tcp_info info;
- u32 now = tcp_time_stamp;
if (get_user(len, optlen))
return -EFAULT;
- info.tcpi_state = sk->sk_state;
- info.tcpi_ca_state = tp->ca_state;
- info.tcpi_retransmits = tp->retransmits;
- info.tcpi_probes = tp->probes_out;
- info.tcpi_backoff = tp->backoff;
- info.tcpi_options = 0;
- if (tp->tstamp_ok)
- info.tcpi_options |= TCPI_OPT_TIMESTAMPS;
- if (tp->sack_ok)
- info.tcpi_options |= TCPI_OPT_SACK;
- if (tp->wscale_ok) {
- info.tcpi_options |= TCPI_OPT_WSCALE;
- info.tcpi_snd_wscale = tp->snd_wscale;
- info.tcpi_rcv_wscale = tp->rcv_wscale;
- } else {
- info.tcpi_snd_wscale = 0;
- info.tcpi_rcv_wscale = 0;
- }
- if (tp->ecn_flags & TCP_ECN_OK)
- info.tcpi_options |= TCPI_OPT_ECN;
-
- info.tcpi_rto = (1000000 * tp->rto) / HZ;
- info.tcpi_ato = (1000000 * tp->ack.ato) / HZ;
- info.tcpi_snd_mss = tp->mss_cache_std;
- info.tcpi_rcv_mss = tp->ack.rcv_mss;
-
- info.tcpi_unacked = tp->packets_out;
- info.tcpi_sacked = tp->sacked_out;
- info.tcpi_lost = tp->lost_out;
- info.tcpi_retrans = tp->retrans_out;
- info.tcpi_fackets = tp->fackets_out;
-
- info.tcpi_last_data_sent = ((now - tp->lsndtime) * 1000) / HZ;
- info.tcpi_last_ack_sent = 0;
- info.tcpi_last_data_recv = ((now -
- tp->ack.lrcvtime) * 1000) / HZ;
- info.tcpi_last_ack_recv = ((now - tp->rcv_tstamp) * 1000) / HZ;
-
- info.tcpi_pmtu = tp->pmtu_cookie;
- info.tcpi_rcv_ssthresh = tp->rcv_ssthresh;
- info.tcpi_rtt = ((1000000 * tp->srtt) / HZ) >> 3;
- info.tcpi_rttvar = ((1000000 * tp->mdev) / HZ) >> 2;
- info.tcpi_snd_ssthresh = tp->snd_ssthresh;
- info.tcpi_snd_cwnd = tp->snd_cwnd;
- info.tcpi_advmss = tp->advmss;
- info.tcpi_reordering = tp->reordering;
+
+ tcp_get_info(sk, &info);
len = min_t(unsigned int, len, sizeof(info));
if (put_user(len, optlen))
return 0;
}
case TCP_QUICKACK:
- val = !tp->ack.pingpong;
+ val = !icsk->icsk_ack.pingpong;
break;
+
+ case TCP_CONGESTION:
+ if (get_user(len, optlen))
+ return -EFAULT;
+ len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
+ return -EFAULT;
+ return 0;
default:
return -ENOPROTOOPT;
};
extern void __skb_cb_too_small_for_tcp(int, int);
-extern void tcpdiag_init(void);
+extern struct tcp_congestion_ops tcp_reno;
static __initdata unsigned long thash_entries;
static int __init set_thash_entries(char *str)
void __init tcp_init(void)
{
struct sk_buff *skb = NULL;
- unsigned long goal;
int order, i;
if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
__skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
sizeof(skb->cb));
- tcp_openreq_cachep = kmem_cache_create("tcp_open_request",
- sizeof(struct open_request),
- 0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
- if (!tcp_openreq_cachep)
- panic("tcp_init: Cannot alloc open_request cache.");
-
- tcp_bucket_cachep = kmem_cache_create("tcp_bind_bucket",
- sizeof(struct tcp_bind_bucket),
- 0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
- if (!tcp_bucket_cachep)
+ tcp_hashinfo.bind_bucket_cachep =
+ kmem_cache_create("tcp_bind_bucket",
+ sizeof(struct inet_bind_bucket), 0,
+ SLAB_HWCACHE_ALIGN, NULL, NULL);
+ if (!tcp_hashinfo.bind_bucket_cachep)
panic("tcp_init: Cannot alloc tcp_bind_bucket cache.");
- tcp_timewait_cachep = kmem_cache_create("tcp_tw_bucket",
- sizeof(struct tcp_tw_bucket),
- 0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
- if (!tcp_timewait_cachep)
- panic("tcp_init: Cannot alloc tcp_tw_bucket cache.");
-
/* Size and allocate the main established and bind bucket
* hash tables.
*
* The methodology is similar to that of the buffer cache.
*/
- if (num_physpages >= (128 * 1024))
- goal = num_physpages >> (21 - PAGE_SHIFT);
- else
- goal = num_physpages >> (23 - PAGE_SHIFT);
-
- if (thash_entries)
- goal = (thash_entries * sizeof(struct tcp_ehash_bucket)) >> PAGE_SHIFT;
- for (order = 0; (1UL << order) < goal; order++)
- ;
- do {
- tcp_ehash_size = (1UL << order) * PAGE_SIZE /
- sizeof(struct tcp_ehash_bucket);
- tcp_ehash_size >>= 1;
- while (tcp_ehash_size & (tcp_ehash_size - 1))
- tcp_ehash_size--;
- tcp_ehash = (struct tcp_ehash_bucket *)
- __get_free_pages(GFP_ATOMIC, order);
- } while (!tcp_ehash && --order > 0);
-
- if (!tcp_ehash)
- panic("Failed to allocate TCP established hash table\n");
- for (i = 0; i < (tcp_ehash_size << 1); i++) {
- tcp_ehash[i].lock = RW_LOCK_UNLOCKED;
- INIT_HLIST_HEAD(&tcp_ehash[i].chain);
+ tcp_hashinfo.ehash =
+ alloc_large_system_hash("TCP established",
+ sizeof(struct inet_ehash_bucket),
+ thash_entries,
+ (num_physpages >= 128 * 1024) ?
+ 13 : 15,
+ 0,
+ &tcp_hashinfo.ehash_size,
+ NULL,
+ 0);
+ tcp_hashinfo.ehash_size = (1 << tcp_hashinfo.ehash_size) >> 1;
+ for (i = 0; i < (tcp_hashinfo.ehash_size << 1); i++) {
+ rwlock_init(&tcp_hashinfo.ehash[i].lock);
+ INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
}
- do {
- tcp_bhash_size = (1UL << order) * PAGE_SIZE /
- sizeof(struct tcp_bind_hashbucket);
- if ((tcp_bhash_size > (64 * 1024)) && order > 0)
- continue;
- tcp_bhash = (struct tcp_bind_hashbucket *)
- __get_free_pages(GFP_ATOMIC, order);
- } while (!tcp_bhash && --order >= 0);
-
- if (!tcp_bhash)
- panic("Failed to allocate TCP bind hash table\n");
- for (i = 0; i < tcp_bhash_size; i++) {
- tcp_bhash[i].lock = SPIN_LOCK_UNLOCKED;
- INIT_HLIST_HEAD(&tcp_bhash[i].chain);
+ tcp_hashinfo.bhash =
+ alloc_large_system_hash("TCP bind",
+ sizeof(struct inet_bind_hashbucket),
+ tcp_hashinfo.ehash_size,
+ (num_physpages >= 128 * 1024) ?
+ 13 : 15,
+ 0,
+ &tcp_hashinfo.bhash_size,
+ NULL,
+ 64 * 1024);
+ tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
+ for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
+ spin_lock_init(&tcp_hashinfo.bhash[i].lock);
+ INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
}
/* Try to be a bit smarter and adjust defaults depending
* on available memory.
*/
- if (order > 4) {
+ for (order = 0; ((1 << order) << PAGE_SHIFT) <
+ (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
+ order++)
+ ;
+ if (order >= 4) {
sysctl_local_port_range[0] = 32768;
sysctl_local_port_range[1] = 61000;
- sysctl_tcp_max_tw_buckets = 180000;
+ tcp_death_row.sysctl_max_tw_buckets = 180000;
sysctl_tcp_max_orphans = 4096 << (order - 4);
sysctl_max_syn_backlog = 1024;
} else if (order < 3) {
sysctl_local_port_range[0] = 1024 * (3 - order);
- sysctl_tcp_max_tw_buckets >>= (3 - order);
+ tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
sysctl_tcp_max_orphans >>= (3 - order);
sysctl_max_syn_backlog = 128;
}
- tcp_port_rover = sysctl_local_port_range[0] - 1;
sysctl_tcp_mem[0] = 768 << order;
sysctl_tcp_mem[1] = 1024 << order;
printk(KERN_INFO "TCP: Hash tables configured "
"(established %d bind %d)\n",
- tcp_ehash_size << 1, tcp_bhash_size);
+ tcp_hashinfo.ehash_size << 1, tcp_hashinfo.bhash_size);
- tcpdiag_init();
+ tcp_register_congestion_control(&tcp_reno);
}
-EXPORT_SYMBOL(__tcp_mem_reclaim);
-EXPORT_SYMBOL(sysctl_tcp_rmem);
-EXPORT_SYMBOL(sysctl_tcp_wmem);
-EXPORT_SYMBOL(tcp_accept);
EXPORT_SYMBOL(tcp_close);
-EXPORT_SYMBOL(tcp_close_state);
-EXPORT_SYMBOL(tcp_destroy_sock);
EXPORT_SYMBOL(tcp_disconnect);
EXPORT_SYMBOL(tcp_getsockopt);
EXPORT_SYMBOL(tcp_ioctl);
-EXPORT_SYMBOL(tcp_openreq_cachep);
EXPORT_SYMBOL(tcp_poll);
EXPORT_SYMBOL(tcp_read_sock);
EXPORT_SYMBOL(tcp_recvmsg);
EXPORT_SYMBOL(tcp_sendpage);
EXPORT_SYMBOL(tcp_setsockopt);
EXPORT_SYMBOL(tcp_shutdown);
-EXPORT_SYMBOL(tcp_sockets_allocated);
EXPORT_SYMBOL(tcp_statistics);
-EXPORT_SYMBOL(tcp_timewait_cachep);
-EXPORT_SYMBOL(tcp_write_space);
-EXPORT_SYMBOL_GPL(cleanup_rbuf);