#include <linux/smp_lock.h>
#include <linux/fs.h>
#include <linux/random.h>
+
+#ifdef CONFIG_CKRM
#include <linux/ckrm.h>
+#endif
#include <net/icmp.h>
#include <net/tcp.h>
atomic_t tcp_orphan_count = ATOMIC_INIT(0);
-int sysctl_tcp_default_win_scale;
+int sysctl_tcp_default_win_scale = 7;
int sysctl_tcp_mem[3];
int sysctl_tcp_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 };
int sysctl_tcp_rmem[3] = { 4 * 1024, 87380, 87380 * 2 };
+EXPORT_SYMBOL(sysctl_tcp_mem);
+EXPORT_SYMBOL(sysctl_tcp_rmem);
+EXPORT_SYMBOL(sysctl_tcp_wmem);
+
atomic_t tcp_memory_allocated; /* Current allocated memory. */
atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
-/* Pressure flag: try to collapse.
+EXPORT_SYMBOL(tcp_memory_allocated);
+EXPORT_SYMBOL(tcp_sockets_allocated);
+
+/*
+ * Pressure flag: try to collapse.
* Technical note: it is used by multiple contexts non atomically.
- * All the tcp_mem_schedule() is of this nature: accounting
- * is strict, actions are advisory and have some latency. */
+ * All the sk_stream_mem_schedule() is of this nature: accounting
+ * is strict, actions are advisory and have some latency.
+ */
int tcp_memory_pressure;
-#define TCP_PAGES(amt) (((amt) + TCP_MEM_QUANTUM - 1) / TCP_MEM_QUANTUM)
-
-int tcp_mem_schedule(struct sock *sk, int size, int kind)
-{
- int amt = TCP_PAGES(size);
-
- sk->sk_forward_alloc += amt * TCP_MEM_QUANTUM;
- atomic_add(amt, &tcp_memory_allocated);
-
- /* Under limit. */
- if (atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
- if (tcp_memory_pressure)
- tcp_memory_pressure = 0;
- return 1;
- }
-
- /* Over hard limit. */
- if (atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]) {
- tcp_enter_memory_pressure();
- goto suppress_allocation;
- }
-
- /* Under pressure. */
- if (atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[1])
- tcp_enter_memory_pressure();
-
- if (kind) {
- if (atomic_read(&sk->sk_rmem_alloc) < sysctl_tcp_rmem[0])
- return 1;
- } else if (sk->sk_wmem_queued < sysctl_tcp_wmem[0])
- return 1;
-
- if (!tcp_memory_pressure ||
- sysctl_tcp_mem[2] > atomic_read(&tcp_sockets_allocated) *
- TCP_PAGES(sk->sk_wmem_queued +
- atomic_read(&sk->sk_rmem_alloc) +
- sk->sk_forward_alloc))
- return 1;
-
-suppress_allocation:
-
- if (!kind) {
- tcp_moderate_sndbuf(sk);
-
- /* Fail only if socket is _under_ its sndbuf.
- * In this case we cannot block, so that we have to fail.
- */
- if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
- return 1;
- }
-
- /* Alas. Undo changes. */
- sk->sk_forward_alloc -= amt * TCP_MEM_QUANTUM;
- atomic_sub(amt, &tcp_memory_allocated);
- return 0;
-}
+EXPORT_SYMBOL(tcp_memory_pressure);
-void __tcp_mem_reclaim(struct sock *sk)
+void tcp_enter_memory_pressure(void)
{
- if (sk->sk_forward_alloc >= TCP_MEM_QUANTUM) {
- atomic_sub(sk->sk_forward_alloc / TCP_MEM_QUANTUM,
- &tcp_memory_allocated);
- sk->sk_forward_alloc &= TCP_MEM_QUANTUM - 1;
- if (tcp_memory_pressure &&
- atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0])
- tcp_memory_pressure = 0;
+ if (!tcp_memory_pressure) {
+ NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
+ tcp_memory_pressure = 1;
}
}
-void tcp_rfree(struct sk_buff *skb)
-{
- struct sock *sk = skb->sk;
-
- atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
- sk->sk_forward_alloc += skb->truesize;
-}
+EXPORT_SYMBOL(tcp_enter_memory_pressure);
/*
* LISTEN is a special case for poll..
mask |= POLLIN | POLLRDNORM;
if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
- if (tcp_wspace(sk) >= tcp_min_write_space(sk)) {
+ if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
mask |= POLLOUT | POLLWRNORM;
} else { /* send SIGIO later */
set_bit(SOCK_ASYNC_NOSPACE,
* wspace test but before the flags are set,
* IO signal will be lost.
*/
- if (tcp_wspace(sk) >= tcp_min_write_space(sk))
+ if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
mask |= POLLOUT | POLLWRNORM;
}
}
return mask;
}
-/*
- * TCP socket write_space callback.
- */
-void tcp_write_space(struct sock *sk)
-{
- struct socket *sock = sk->sk_socket;
-
- if (tcp_wspace(sk) >= tcp_min_write_space(sk) && sock) {
- clear_bit(SOCK_NOSPACE, &sock->flags);
-
- if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible(sk->sk_sleep);
-
- if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
- sock_wake_async(sock, 2, POLL_OUT);
- }
-}
-
int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
struct tcp_opt *tp = tcp_sk(sk);
sk->sk_max_ack_backlog = 0;
sk->sk_ack_backlog = 0;
+#ifdef CONFIG_ACCEPT_QUEUES
tp->accept_queue = NULL;
+#else
+ tp->accept_queue = tp->accept_queue_tail = NULL;
+#endif
+ tp->syn_wait_lock = RW_LOCK_UNLOCKED;
+ tcp_delack_init(tp);
+
+ lopt = kmalloc(sizeof(struct tcp_listen_opt), GFP_KERNEL);
+ if (!lopt)
+ return -ENOMEM;
+
+ memset(lopt, 0, sizeof(struct tcp_listen_opt));
+ for (lopt->max_qlen_log = 6; ; lopt->max_qlen_log++)
+ if ((1 << lopt->max_qlen_log) >= sysctl_max_syn_backlog)
+ break;
+ get_random_bytes(&lopt->hash_rnd, 4);
+
#ifdef CONFIG_ACCEPT_QUEUES
tp->class_index = 0;
for (i=0; i < NUM_ACCEPT_QUEUES; i++) {
tp->acceptq[i].aq_qcount = 0;
tp->acceptq[i].aq_count = 0;
if (i == 0) {
- tp->acceptq[i].aq_valid = 1;
tp->acceptq[i].aq_ratio = 1;
}
else {
- tp->acceptq[i].aq_valid = 0;
tp->acceptq[i].aq_ratio = 0;
}
}
#endif
- tp->syn_wait_lock = RW_LOCK_UNLOCKED;
- tcp_delack_init(tp);
-
- lopt = kmalloc(sizeof(struct tcp_listen_opt), GFP_KERNEL);
- if (!lopt)
- return -ENOMEM;
-
- memset(lopt, 0, sizeof(struct tcp_listen_opt));
- for (lopt->max_qlen_log = 6; ; lopt->max_qlen_log++)
- if ((1 << lopt->max_qlen_log) >= sysctl_max_syn_backlog)
- break;
- get_random_bytes(&lopt->hash_rnd, 4);
write_lock_bh(&tp->syn_wait_lock);
tp->listen_opt = lopt;
sock_put(child);
#ifdef CONFIG_ACCEPT_QUEUES
- tcp_acceptq_removed(sk, req->acceptq_class);
+ sk_acceptq_removed(sk, req->acceptq_class);
#else
- tcp_acceptq_removed(sk);
+ sk_acceptq_removed(sk);
#endif
tcp_openreq_fastfree(req);
}
BUG_TRAP(!sk->sk_ack_backlog);
}
-/*
- * Wait for a socket to get into the connected state
- *
- * Note: Must be called with the socket locked.
- */
-static int wait_for_tcp_connect(struct sock *sk, int flags, long *timeo_p)
-{
- struct tcp_opt *tp = tcp_sk(sk);
- struct task_struct *tsk = current;
- DEFINE_WAIT(wait);
-
- while ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
- if (sk->sk_err)
- return sock_error(sk);
- if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV))
- return -EPIPE;
- if (!*timeo_p)
- return -EAGAIN;
- if (signal_pending(tsk))
- return sock_intr_errno(*timeo_p);
-
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
- tp->write_pending++;
-
- release_sock(sk);
- *timeo_p = schedule_timeout(*timeo_p);
- lock_sock(sk);
-
- finish_wait(sk->sk_sleep, &wait);
- tp->write_pending--;
- }
- return 0;
-}
-
-static inline int tcp_memory_free(struct sock *sk)
-{
- return sk->sk_wmem_queued < sk->sk_sndbuf;
-}
-
-/*
- * Wait for more memory for a socket
- */
-static int wait_for_tcp_memory(struct sock *sk, long *timeo)
-{
- struct tcp_opt *tp = tcp_sk(sk);
- int err = 0;
- long vm_wait = 0;
- long current_timeo = *timeo;
- DEFINE_WAIT(wait);
-
- if (tcp_memory_free(sk))
- current_timeo = vm_wait = (net_random() % (HZ / 5)) + 2;
-
- for (;;) {
- set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
-
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
-
- if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
- goto do_error;
- if (!*timeo)
- goto do_nonblock;
- if (signal_pending(current))
- goto do_interrupted;
- clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
- if (tcp_memory_free(sk) && !vm_wait)
- break;
-
- set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
- tp->write_pending++;
- release_sock(sk);
- if (!tcp_memory_free(sk) || vm_wait)
- current_timeo = schedule_timeout(current_timeo);
- lock_sock(sk);
- tp->write_pending--;
-
- if (vm_wait) {
- vm_wait -= current_timeo;
- current_timeo = *timeo;
- if (current_timeo != MAX_SCHEDULE_TIMEOUT &&
- (current_timeo -= vm_wait) < 0)
- current_timeo = 0;
- vm_wait = 0;
- }
- *timeo = current_timeo;
- }
-out:
- finish_wait(sk->sk_sleep, &wait);
- return err;
-
-do_error:
- err = -EPIPE;
- goto out;
-do_nonblock:
- err = -EAGAIN;
- goto out;
-do_interrupted:
- err = sock_intr_errno(*timeo);
- goto out;
-}
-
-static inline int can_coalesce(struct sk_buff *skb, int i, struct page *page,
- int off)
-{
- if (i) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
- return page == frag->page &&
- off == frag->page_offset + frag->size;
- }
- return 0;
-}
-
-static inline void fill_page_desc(struct sk_buff *skb, int i,
- struct page *page, int off, int size)
-{
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- frag->page = page;
- frag->page_offset = off;
- frag->size = size;
- skb_shinfo(skb)->nr_frags = i + 1;
-}
-
static inline void tcp_mark_push(struct tcp_opt *tp, struct sk_buff *skb)
{
TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
TCP_SKB_CB(skb)->sacked = 0;
__skb_queue_tail(&sk->sk_write_queue, skb);
- tcp_charge_skb(sk, skb);
- if (!tp->send_head)
- tp->send_head = skb;
+ sk_charge_skb(sk, skb);
+ if (!sk->sk_send_head)
+ sk->sk_send_head = skb;
else if (tp->nonagle&TCP_NAGLE_PUSH)
tp->nonagle &= ~TCP_NAGLE_PUSH;
}
static inline void tcp_push(struct sock *sk, struct tcp_opt *tp, int flags,
int mss_now, int nonagle)
{
- if (tp->send_head) {
+ if (sk->sk_send_head) {
struct sk_buff *skb = sk->sk_write_queue.prev;
if (!(flags & MSG_MORE) || forced_push(tp))
tcp_mark_push(tp, skb);
}
}
-static int tcp_error(struct sock *sk, int flags, int err)
-{
- if (err == -EPIPE)
- err = sock_error(sk) ? : -EPIPE;
- if (err == -EPIPE && !(flags & MSG_NOSIGNAL))
- send_sig(SIGPIPE, current, 0);
- return err;
-}
-
static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
size_t psize, int flags)
{
/* Wait for a connection to finish. */
if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
- if ((err = wait_for_tcp_connect(sk, 0, &timeo)) != 0)
+ if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
goto out_err;
clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
int offset = poffset % PAGE_SIZE;
int size = min_t(size_t, psize, PAGE_SIZE - offset);
- if (!tp->send_head || (copy = mss_now - skb->len) <= 0) {
+ if (!sk->sk_send_head || (copy = mss_now - skb->len) <= 0) {
new_segment:
- if (!tcp_memory_free(sk))
+ if (!sk_stream_memory_free(sk))
goto wait_for_sndbuf;
- skb = tcp_alloc_pskb(sk, 0, tp->mss_cache,
- sk->sk_allocation);
+ skb = sk_stream_alloc_pskb(sk, 0, tp->mss_cache,
+ sk->sk_allocation);
if (!skb)
goto wait_for_memory;
copy = size;
i = skb_shinfo(skb)->nr_frags;
- if (can_coalesce(skb, i, page, offset)) {
+ if (skb_can_coalesce(skb, i, page, offset)) {
skb_shinfo(skb)->frags[i - 1].size += copy;
} else if (i < MAX_SKB_FRAGS) {
get_page(page);
- fill_page_desc(skb, i, page, offset, copy);
+ skb_fill_page_desc(skb, i, page, offset, copy);
} else {
tcp_mark_push(tp, skb);
goto new_segment;
if (forced_push(tp)) {
tcp_mark_push(tp, skb);
__tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
- } else if (skb == tp->send_head)
+ } else if (skb == sk->sk_send_head)
tcp_push_one(sk, mss_now);
continue;
if (copied)
tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
- if ((err = wait_for_tcp_memory(sk, &timeo)) != 0)
+ if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
goto do_error;
mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
if (copied)
goto out;
out_err:
- return tcp_error(sk, flags, err);
+ return sk_stream_error(sk, flags, err);
}
ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
return res;
}
-#define TCP_PAGE(sk) (inet_sk(sk)->sndmsg_page)
-#define TCP_OFF(sk) (inet_sk(sk)->sndmsg_off)
-
-static inline int tcp_copy_to_page(struct sock *sk, char __user *from,
- struct sk_buff *skb, struct page *page,
- int off, int copy)
-{
- int err = 0;
- unsigned int csum;
-
- if (skb->ip_summed == CHECKSUM_NONE) {
- csum = csum_and_copy_from_user(from, page_address(page) + off,
- copy, 0, &err);
- if (err) return err;
- skb->csum = csum_block_add(skb->csum, csum, skb->len);
- } else {
- if (copy_from_user(page_address(page) + off, from, copy))
- return -EFAULT;
- }
-
- skb->len += copy;
- skb->data_len += copy;
- skb->truesize += copy;
- sk->sk_wmem_queued += copy;
- sk->sk_forward_alloc -= copy;
- return 0;
-}
-
-static inline int skb_add_data(struct sk_buff *skb, char __user *from, int copy)
-{
- int err = 0;
- unsigned int csum;
- int off = skb->len;
-
- if (skb->ip_summed == CHECKSUM_NONE) {
- csum = csum_and_copy_from_user(from, skb_put(skb, copy),
- copy, 0, &err);
- if (!err) {
- skb->csum = csum_block_add(skb->csum, csum, off);
- return 0;
- }
- } else {
- if (!copy_from_user(skb_put(skb, copy), from, copy))
- return 0;
- }
-
- __skb_trim(skb, off);
- return -EFAULT;
-}
+#define TCP_PAGE(sk) (sk->sk_sndmsg_page)
+#define TCP_OFF(sk) (sk->sk_sndmsg_off)
static inline int select_size(struct sock *sk, struct tcp_opt *tp)
{
/* Wait for a connection to finish. */
if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
- if ((err = wait_for_tcp_connect(sk, flags, &timeo)) != 0)
+ if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
goto out_err;
/* This should be in poll */
skb = sk->sk_write_queue.prev;
- if (!tp->send_head ||
+ if (!sk->sk_send_head ||
(copy = mss_now - skb->len) <= 0) {
new_segment:
/* Allocate new segment. If the interface is SG,
* allocate skb fitting to single page.
*/
- if (!tcp_memory_free(sk))
+ if (!sk_stream_memory_free(sk))
goto wait_for_sndbuf;
- skb = tcp_alloc_pskb(sk, select_size(sk, tp),
- 0, sk->sk_allocation);
+ skb = sk_stream_alloc_pskb(sk, select_size(sk, tp),
+ 0, sk->sk_allocation);
if (!skb)
goto wait_for_memory;
struct page *page = TCP_PAGE(sk);
int off = TCP_OFF(sk);
- if (can_coalesce(skb, i, page, off) &&
+ if (skb_can_coalesce(skb, i, page, off) &&
off != PAGE_SIZE) {
/* We can extend the last page
* fragment. */
if (!page) {
/* Allocate new cache page. */
- if (!(page = tcp_alloc_page(sk)))
+ if (!(page = sk_stream_alloc_page(sk)))
goto wait_for_memory;
off = 0;
}
/* Time to copy data. We are close to
* the end! */
- err = tcp_copy_to_page(sk, from, skb, page,
+ err = skb_copy_to_page(sk, from, skb, page,
off, copy);
if (err) {
/* If this page was new, give it to the
skb_shinfo(skb)->frags[i - 1].size +=
copy;
} else {
- fill_page_desc(skb, i, page, off, copy);
+ skb_fill_page_desc(skb, i, page, off, copy);
if (TCP_PAGE(sk)) {
get_page(page);
} else if (off + copy < PAGE_SIZE) {
if (forced_push(tp)) {
tcp_mark_push(tp, skb);
__tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
- } else if (skb == tp->send_head)
+ } else if (skb == sk->sk_send_head)
tcp_push_one(sk, mss_now);
continue;
if (copied)
tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
- if ((err = wait_for_tcp_memory(sk, &timeo)) != 0)
+ if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
goto do_error;
mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
do_fault:
if (!skb->len) {
- if (tp->send_head == skb)
- tp->send_head = NULL;
+ if (sk->sk_send_head == skb)
+ sk->sk_send_head = NULL;
__skb_unlink(skb, skb->list);
- tcp_free_skb(sk, skb);
+ sk_stream_free_skb(sk, skb);
}
do_error:
if (copied)
goto out;
out_err:
- err = tcp_error(sk, flags, err);
+ err = sk_stream_error(sk, flags, err);
TCP_CHECK_TIMER(sk);
release_sock(sk);
return err;
return -EAGAIN;
}
-/*
- * Release a skb if it is no longer needed. This routine
- * must be called with interrupts disabled or with the
- * socket locked so that the sk_buff queue operation is ok.
- */
-
-static inline void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
-{
- __skb_unlink(skb, &sk->sk_receive_queue);
- __kfree_skb(skb);
-}
-
/* Clean up the receive buffer for full frames taken by the user,
* then send an ACK if necessary. COPIED is the number of bytes
* tcp_recvmsg has given to the user so far, it speeds up the
tcp_send_ack(sk);
}
-/* Now socket state including sk->sk_err is changed only under lock,
- * hence we may omit checks after joining wait queue.
- * We check receive queue before schedule() only as optimization;
- * it is very likely that release_sock() added new data.
- */
-
-static long tcp_data_wait(struct sock *sk, long timeo)
-{
- DEFINE_WAIT(wait);
-
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
-
- set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
- release_sock(sk);
-
- if (skb_queue_empty(&sk->sk_receive_queue))
- timeo = schedule_timeout(timeo);
-
- lock_sock(sk);
- clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
-
- finish_wait(sk->sk_sleep, &wait);
- return timeo;
-}
-
static void tcp_prequeue_process(struct sock *sk)
{
struct sk_buff *skb;
struct tcp_opt *tp = tcp_sk(sk);
- NET_ADD_STATS_USER(TCPPrequeued, skb_queue_len(&tp->ucopy.prequeue));
+ NET_ADD_STATS_USER(LINUX_MIB_TCPPREQUEUED, skb_queue_len(&tp->ucopy.prequeue));
/* RX process wants to run with disabled BHs, though it is not
* necessary */
break;
}
if (skb->h.th->fin) {
- tcp_eat_skb(sk, skb);
+ sk_eat_skb(sk, skb);
++seq;
break;
}
- tcp_eat_skb(sk, skb);
+ sk_eat_skb(sk, skb);
if (!desc->count)
break;
}
/* Do not sleep, just process backlog. */
release_sock(sk);
lock_sock(sk);
- } else {
- timeo = tcp_data_wait(sk, timeo);
- }
+ } else
+ sk_wait_data(sk, &timeo);
if (user_recv) {
int chunk;
/* __ Restore normal policy in scheduler __ */
if ((chunk = len - tp->ucopy.len) != 0) {
- NET_ADD_STATS_USER(TCPDirectCopyFromBacklog, chunk);
+ NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
len -= chunk;
copied += chunk;
}
tcp_prequeue_process(sk);
if ((chunk = len - tp->ucopy.len) != 0) {
- NET_ADD_STATS_USER(TCPDirectCopyFromPrequeue, chunk);
+ NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
len -= chunk;
copied += chunk;
}
if (skb->h.th->fin)
goto found_fin_ok;
if (!(flags & MSG_PEEK))
- tcp_eat_skb(sk, skb);
+ sk_eat_skb(sk, skb);
continue;
found_fin_ok:
/* Process the FIN. */
++*seq;
if (!(flags & MSG_PEEK))
- tcp_eat_skb(sk, skb);
+ sk_eat_skb(sk, skb);
break;
} while (len > 0);
tcp_prequeue_process(sk);
if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
- NET_ADD_STATS_USER(TCPDirectCopyFromPrequeue, chunk);
+ NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
len -= chunk;
copied += chunk;
}
}
}
-
-/*
- * Return 1 if we still have things to send in our buffers.
- */
-
-static inline int closing(struct sock *sk)
-{
- return (1 << sk->sk_state) &
- (TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK);
-}
-
-static __inline__ void tcp_kill_sk_queues(struct sock *sk)
-{
- /* First the read buffer. */
- __skb_queue_purge(&sk->sk_receive_queue);
-
- /* Next, the error queue. */
- __skb_queue_purge(&sk->sk_error_queue);
-
- /* Next, the write queue. */
- BUG_TRAP(skb_queue_empty(&sk->sk_write_queue));
-
- /* Account for returned memory. */
- tcp_mem_reclaim(sk);
-
- BUG_TRAP(!sk->sk_wmem_queued);
- BUG_TRAP(!sk->sk_forward_alloc);
-
- /* It is _impossible_ for the backlog to contain anything
- * when we get here. All user references to this socket
- * have gone away, only the net layer knows can touch it.
- */
-}
-
/*
* At this point, there should be no process reference to this
* socket, and thus no user references at all. Therefore we
sk->sk_prot->destroy(sk);
- tcp_kill_sk_queues(sk);
+ sk_stream_kill_queues(sk);
xfrm_sk_free_policy(sk);
__kfree_skb(skb);
}
- tcp_mem_reclaim(sk);
+ sk_stream_mem_reclaim(sk);
/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
* 3.10, we send a RST here because data was lost. To
*/
if (data_was_unread) {
/* Unread data was tossed, zap the connection. */
- NET_INC_STATS_USER(TCPAbortOnClose);
+ NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
tcp_set_state(sk, TCP_CLOSE);
tcp_send_active_reset(sk, GFP_KERNEL);
} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
/* Check zero linger _after_ checking for unread data. */
sk->sk_prot->disconnect(sk, 0);
- NET_INC_STATS_USER(TCPAbortOnData);
+ NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
} else if (tcp_close_state(sk)) {
/* We FIN if the application ate all the data before
* zapping the connection.
tcp_send_fin(sk);
}
- if (timeout) {
- struct task_struct *tsk = current;
- DEFINE_WAIT(wait);
-
- do {
- prepare_to_wait(sk->sk_sleep, &wait,
- TASK_INTERRUPTIBLE);
- if (!closing(sk))
- break;
- release_sock(sk);
- timeout = schedule_timeout(timeout);
- lock_sock(sk);
- } while (!signal_pending(tsk) && timeout);
-
- finish_wait(sk->sk_sleep, &wait);
- }
+ sk_stream_wait_close(sk, timeout);
adjudge_to_death:
/* It is the last release_sock in its life. It will remove backlog. */
if (tp->linger2 < 0) {
tcp_set_state(sk, TCP_CLOSE);
tcp_send_active_reset(sk, GFP_ATOMIC);
- NET_INC_STATS_BH(TCPAbortOnLinger);
+ NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
} else {
int tmo = tcp_fin_time(tp);
}
}
if (sk->sk_state != TCP_CLOSE) {
- tcp_mem_reclaim(sk);
+ sk_stream_mem_reclaim(sk);
if (atomic_read(&tcp_orphan_count) > sysctl_tcp_max_orphans ||
(sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) {
"sockets\n");
tcp_set_state(sk, TCP_CLOSE);
tcp_send_active_reset(sk, GFP_ATOMIC);
- NET_INC_STATS_BH(TCPAbortOnMemory);
+ NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
}
}
atomic_inc(&tcp_orphan_count);
tcp_clear_xmit_timers(sk);
__skb_queue_purge(&sk->sk_receive_queue);
- tcp_writequeue_purge(sk);
+ sk_stream_writequeue_purge(sk);
__skb_queue_purge(&tp->out_of_order_queue);
inet->dport = 0;
tcp_set_ca_state(tp, TCP_CA_Open);
tcp_clear_retrans(tp);
tcp_delack_init(tp);
- tp->send_head = NULL;
+ sk->sk_send_head = NULL;
tp->saw_tstamp = 0;
tcp_sack_reset(tp);
__sk_dst_reset(sk);
req = tp->accept_queue;
if ((tp->accept_queue = req->dl_next) == NULL)
tp->accept_queue_tail = NULL;
-
- tcp_acceptq_removed(sk);
+ newsk = req->sk;
+ sk_acceptq_removed(sk);
#else
first = tp->class_index;
/* We should always have request queued here. The accept_queue
if((++(tp->acceptq[first].aq_cnt)) >= tp->acceptq[first].aq_ratio){
tp->acceptq[first].aq_cnt = 0;
- tp->class_index = ++first & ~NUM_ACCEPT_QUEUES;
+ tp->class_index = ++first & (NUM_ACCEPT_QUEUES-1);
}
- tcp_acceptq_removed(sk, req->acceptq_class);
-#endif
newsk = req->sk;
+ sk_acceptq_removed(sk, req->acceptq_class);
+#endif
tcp_openreq_fastfree(req);
BUG_TRAP(newsk->sk_state != TCP_SYN_RECV);
release_sock(sk);
return NULL;
}
+
/*
* Socket option code for TCP.
*/
#ifdef CONFIG_ACCEPT_QUEUES
case TCP_ACCEPTQ_SHARE:
+#ifdef CONFIG_CKRM
+ // If CKRM is set then the shares are set through rcfs.
+ // Get shares will still succeed.
+ err = -EOPNOTSUPP;
+ break;
+#else
{
char share_wt[NUM_ACCEPT_QUEUES];
int i,j;
else if (share_wt[i] < j) {
j = share_wt[i];
}
- tp->acceptq[i].aq_valid = 1;
}
else
- tp->acceptq[i].aq_valid = 0;
+ tp->acceptq[i].aq_ratio = 0;
}
if (j == 0) {
* specified set class 0 as 1.
*/
share_wt[0] = 1;
- tp->acceptq[0].aq_valid = 1;
j = 1;
}
for (i=0; i < NUM_ACCEPT_QUEUES; i++) {
}
break;
#endif
-
+#endif
default:
err = -ENOPROTOOPT;
break;
break;
case TCP_INFO: {
struct tcp_info info;
- u32 now = tcp_time_stamp;
if (get_user(len, optlen))
return -EFAULT;
- info.tcpi_state = sk->sk_state;
- info.tcpi_ca_state = tp->ca_state;
- info.tcpi_retransmits = tp->retransmits;
- info.tcpi_probes = tp->probes_out;
- info.tcpi_backoff = tp->backoff;
- info.tcpi_options = 0;
- if (tp->tstamp_ok)
- info.tcpi_options |= TCPI_OPT_TIMESTAMPS;
- if (tp->sack_ok)
- info.tcpi_options |= TCPI_OPT_SACK;
- if (tp->wscale_ok) {
- info.tcpi_options |= TCPI_OPT_WSCALE;
- info.tcpi_snd_wscale = tp->snd_wscale;
- info.tcpi_rcv_wscale = tp->rcv_wscale;
- } else {
- info.tcpi_snd_wscale = 0;
- info.tcpi_rcv_wscale = 0;
- }
- if (tp->ecn_flags & TCP_ECN_OK)
- info.tcpi_options |= TCPI_OPT_ECN;
-
- info.tcpi_rto = (1000000 * tp->rto) / HZ;
- info.tcpi_ato = (1000000 * tp->ack.ato) / HZ;
- info.tcpi_snd_mss = tp->mss_cache_std;
- info.tcpi_rcv_mss = tp->ack.rcv_mss;
-
- info.tcpi_unacked = tp->packets_out;
- info.tcpi_sacked = tp->sacked_out;
- info.tcpi_lost = tp->lost_out;
- info.tcpi_retrans = tp->retrans_out;
- info.tcpi_fackets = tp->fackets_out;
-
- info.tcpi_last_data_sent = ((now - tp->lsndtime) * 1000) / HZ;
- info.tcpi_last_ack_sent = 0;
- info.tcpi_last_data_recv = ((now -
- tp->ack.lrcvtime) * 1000) / HZ;
- info.tcpi_last_ack_recv = ((now - tp->rcv_tstamp) * 1000) / HZ;
-
- info.tcpi_pmtu = tp->pmtu_cookie;
- info.tcpi_rcv_ssthresh = tp->rcv_ssthresh;
- info.tcpi_rtt = ((1000000 * tp->srtt) / HZ) >> 3;
- info.tcpi_rttvar = ((1000000 * tp->mdev) / HZ) >> 2;
- info.tcpi_snd_ssthresh = tp->snd_ssthresh;
- info.tcpi_snd_cwnd = tp->snd_cwnd;
- info.tcpi_advmss = tp->advmss;
- info.tcpi_reordering = tp->reordering;
+
+ tcp_get_info(sk, &info);
len = min_t(unsigned int, len, sizeof(info));
if (put_user(len, optlen))
break;
#ifdef CONFIG_ACCEPT_QUEUES
- case TCP_ACCEPTQ_SHARE: {
+ case TCP_ACCEPTQ_SHARE:
+ {
struct tcp_acceptq_info tinfo[NUM_ACCEPT_QUEUES];
int i;
for(i=0; i < NUM_ACCEPT_QUEUES; i++) {
tinfo[i].acceptq_wait_time =
- tp->acceptq[i].aq_wait_time/(HZ/USER_HZ);
+ jiffies_to_msecs(tp->acceptq[i].aq_wait_time);
tinfo[i].acceptq_qcount = tp->acceptq[i].aq_qcount;
tinfo[i].acceptq_count = tp->acceptq[i].aq_count;
- if (tp->acceptq[i].aq_valid)
- tinfo[i].acceptq_shares=tp->acceptq[i].aq_ratio;
- else
- tinfo[i].acceptq_shares = 0;
+ tinfo[i].acceptq_shares=tp->acceptq[i].aq_ratio;
}
len = min_t(unsigned int, len, sizeof(tinfo));
return 0;
}
+ break;
#endif
default:
return -ENOPROTOOPT;
tcpdiag_init();
}
-EXPORT_SYMBOL(__tcp_mem_reclaim);
-EXPORT_SYMBOL(sysctl_tcp_rmem);
-EXPORT_SYMBOL(sysctl_tcp_wmem);
EXPORT_SYMBOL(tcp_accept);
EXPORT_SYMBOL(tcp_close);
EXPORT_SYMBOL(tcp_close_state);
EXPORT_SYMBOL(tcp_sendpage);
EXPORT_SYMBOL(tcp_setsockopt);
EXPORT_SYMBOL(tcp_shutdown);
-EXPORT_SYMBOL(tcp_sockets_allocated);
EXPORT_SYMBOL(tcp_statistics);
EXPORT_SYMBOL(tcp_timewait_cachep);
-EXPORT_SYMBOL(tcp_write_space);
EXPORT_SYMBOL_GPL(cleanup_rbuf);