2 * linux/net/sunrpc/xprt.c
4 * This is a generic RPC call interface supporting congestion avoidance,
5 * and asynchronous calls.
7 * The interface works like this:
9 * - When a process places a call, it allocates a request slot if
10 * one is available. Otherwise, it sleeps on the backlog queue
12 * - Next, the caller puts together the RPC message, stuffs it into
13 * the request struct, and calls xprt_call().
14 * - xprt_call transmits the message and installs the caller on the
15 * socket's wait list. At the same time, it installs a timer that
16 * is run after the packet's timeout has expired.
17 * - When a packet arrives, the data_ready handler walks the list of
18 * pending requests for that socket. If a matching XID is found, the
19 * caller is woken up, and the timer removed.
20 * - When no reply arrives within the timeout interval, the timer is
21 * fired by the kernel and runs xprt_timer(). It either adjusts the
22 * timeout values (minor timeout) or wakes up the caller with a status
24 * - When the caller receives a notification from RPC that a reply arrived,
25 * it should release the RPC slot, and process the reply.
26 * If the call timed out, it may choose to retry the operation by
27 * adjusting the initial timeout value, and simply calling rpc_call
30 * Support for async RPC is done through a set of RPC-specific scheduling
31 * primitives that `transparently' work for processes as well as async
32 * tasks that rely on callbacks.
34 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
36 * TCP callback races fixes (C) 1998 Red Hat Software <alan@redhat.com>
37 * TCP send fixes (C) 1998 Red Hat Software <alan@redhat.com>
38 * TCP NFS related read + write fixes
39 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
41 * Rewrite of larges part of the code in order to stabilize TCP stuff.
42 * Fix behaviour when socket buffer is full.
43 * (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
46 #include <linux/types.h>
47 #include <linux/slab.h>
48 #include <linux/capability.h>
49 #include <linux/sched.h>
50 #include <linux/errno.h>
51 #include <linux/socket.h>
53 #include <linux/net.h>
55 #include <linux/udp.h>
56 #include <linux/tcp.h>
57 #include <linux/sunrpc/clnt.h>
58 #include <linux/file.h>
59 #include <linux/workqueue.h>
60 #include <linux/random.h>
63 #include <net/checksum.h>
72 # undef RPC_DEBUG_DATA
73 # define RPCDBG_FACILITY RPCDBG_XPRT
76 #define XPRT_MAX_BACKOFF (8)
77 #define XPRT_IDLE_TIMEOUT (5*60*HZ)
78 #define XPRT_MAX_RESVPORT (800)
83 static void xprt_request_init(struct rpc_task *, struct rpc_xprt *);
84 static inline void do_xprt_reserve(struct rpc_task *);
85 static void xprt_disconnect(struct rpc_xprt *);
86 static void xprt_connect_status(struct rpc_task *task);
87 static struct rpc_xprt * xprt_setup(int proto, struct sockaddr_in *ap,
88 struct rpc_timeout *to);
89 static struct socket *xprt_create_socket(struct rpc_xprt *, int, int);
90 static void xprt_bind_socket(struct rpc_xprt *, struct socket *);
91 static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
95 * Print the buffer contents (first 128 bytes only--just enough for
99 xprt_pktdump(char *msg, u32 *packet, unsigned int count)
101 u8 *buf = (u8 *) packet;
104 dprintk("RPC: %s\n", msg);
105 for (j = 0; j < count && j < 128; j += 4) {
109 dprintk("0x%04x ", j);
111 dprintk("%02x%02x%02x%02x ",
112 buf[j], buf[j+1], buf[j+2], buf[j+3]);
118 xprt_pktdump(char *msg, u32 *packet, unsigned int count)
125 * Look up RPC transport given an INET socket
127 static inline struct rpc_xprt *
128 xprt_from_sock(struct sock *sk)
130 return (struct rpc_xprt *) sk->sk_user_data;
134 * Serialize write access to sockets, in order to prevent different
135 * requests from interfering with each other.
136 * Also prevents TCP socket connects from colliding with writes.
139 __xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
141 struct rpc_rqst *req = task->tk_rqstp;
143 if (test_and_set_bit(XPRT_LOCKED, &xprt->sockstate)) {
144 if (task == xprt->snd_task)
150 if (xprt->nocong || __xprt_get_cong(xprt, task)) {
151 xprt->snd_task = task;
153 req->rq_bytes_sent = 0;
158 smp_mb__before_clear_bit();
159 clear_bit(XPRT_LOCKED, &xprt->sockstate);
160 smp_mb__after_clear_bit();
162 dprintk("RPC: %4d failed to lock socket %p\n", task->tk_pid, xprt);
163 task->tk_timeout = 0;
164 task->tk_status = -EAGAIN;
165 if (req && req->rq_ntrans)
166 rpc_sleep_on(&xprt->resend, task, NULL, NULL);
168 rpc_sleep_on(&xprt->sending, task, NULL, NULL);
173 xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
177 spin_lock_bh(&xprt->sock_lock);
178 retval = __xprt_lock_write(xprt, task);
179 spin_unlock_bh(&xprt->sock_lock);
185 __xprt_lock_write_next(struct rpc_xprt *xprt)
187 struct rpc_task *task;
189 if (test_and_set_bit(XPRT_LOCKED, &xprt->sockstate))
191 if (!xprt->nocong && RPCXPRT_CONGESTED(xprt))
193 task = rpc_wake_up_next(&xprt->resend);
195 task = rpc_wake_up_next(&xprt->sending);
199 if (xprt->nocong || __xprt_get_cong(xprt, task)) {
200 struct rpc_rqst *req = task->tk_rqstp;
201 xprt->snd_task = task;
203 req->rq_bytes_sent = 0;
209 smp_mb__before_clear_bit();
210 clear_bit(XPRT_LOCKED, &xprt->sockstate);
211 smp_mb__after_clear_bit();
215 * Releases the socket for use by other requests.
218 __xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
220 if (xprt->snd_task == task) {
221 xprt->snd_task = NULL;
222 smp_mb__before_clear_bit();
223 clear_bit(XPRT_LOCKED, &xprt->sockstate);
224 smp_mb__after_clear_bit();
225 __xprt_lock_write_next(xprt);
230 xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
232 spin_lock_bh(&xprt->sock_lock);
233 __xprt_release_write(xprt, task);
234 spin_unlock_bh(&xprt->sock_lock);
238 * Write data to socket.
241 xprt_sendmsg(struct rpc_xprt *xprt, struct rpc_rqst *req)
243 struct socket *sock = xprt->sock;
244 struct xdr_buf *xdr = &req->rq_snd_buf;
245 struct sockaddr *addr = NULL;
253 xprt_pktdump("packet data:",
254 req->rq_svec->iov_base,
255 req->rq_svec->iov_len);
257 /* For UDP, we need to provide an address */
259 addr = (struct sockaddr *) &xprt->addr;
260 addrlen = sizeof(xprt->addr);
262 /* Dont repeat bytes */
263 skip = req->rq_bytes_sent;
265 clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
266 result = xdr_sendpages(sock, addr, addrlen, xdr, skip, MSG_DONTWAIT);
268 dprintk("RPC: xprt_sendmsg(%d) = %d\n", xdr->len - skip, result);
275 /* When the server has died, an ICMP port unreachable message
276 * prompts ECONNREFUSED.
283 /* connection broken */
288 printk(KERN_NOTICE "RPC: sendmsg returned error %d\n", -result);
294 * Van Jacobson congestion avoidance. Check if the congestion window
295 * overflowed. Put the task to sleep if this is the case.
298 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task)
300 struct rpc_rqst *req = task->tk_rqstp;
304 dprintk("RPC: %4d xprt_cwnd_limited cong = %ld cwnd = %ld\n",
305 task->tk_pid, xprt->cong, xprt->cwnd);
306 if (RPCXPRT_CONGESTED(xprt))
309 xprt->cong += RPC_CWNDSCALE;
314 * Adjust the congestion window, and wake up the next task
315 * that has been sleeping due to congestion
318 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
323 xprt->cong -= RPC_CWNDSCALE;
324 __xprt_lock_write_next(xprt);
328 * Adjust RPC congestion window
329 * We use a time-smoothed congestion estimator to avoid heavy oscillation.
332 xprt_adjust_cwnd(struct rpc_xprt *xprt, int result)
337 if (result >= 0 && cwnd <= xprt->cong) {
338 /* The (cwnd >> 1) term makes sure
339 * the result gets rounded properly. */
340 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
341 if (cwnd > RPC_MAXCWND(xprt))
342 cwnd = RPC_MAXCWND(xprt);
343 __xprt_lock_write_next(xprt);
344 } else if (result == -ETIMEDOUT) {
346 if (cwnd < RPC_CWNDSCALE)
347 cwnd = RPC_CWNDSCALE;
349 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n",
350 xprt->cong, xprt->cwnd, cwnd);
355 * Adjust timeout values etc for next retransmit
358 xprt_adjust_timeout(struct rpc_timeout *to)
360 if (to->to_retries > 0) {
361 if (to->to_exponential)
362 to->to_current <<= 1;
364 to->to_current += to->to_increment;
365 if (to->to_maxval && to->to_current >= to->to_maxval)
366 to->to_current = to->to_maxval;
368 if (to->to_exponential)
369 to->to_initval <<= 1;
371 to->to_initval += to->to_increment;
372 if (to->to_maxval && to->to_initval >= to->to_maxval)
373 to->to_initval = to->to_maxval;
374 to->to_current = to->to_initval;
377 if (!to->to_current) {
378 printk(KERN_WARNING "xprt_adjust_timeout: to_current = 0!\n");
379 to->to_current = 5 * HZ;
381 pprintk("RPC: %lu %s\n", jiffies,
382 to->to_retries? "retrans" : "timeout");
383 return to->to_retries-- > 0;
387 * Close down a transport socket
390 xprt_close(struct rpc_xprt *xprt)
392 struct socket *sock = xprt->sock;
393 struct sock *sk = xprt->inet;
398 write_lock_bh(&sk->sk_callback_lock);
402 sk->sk_user_data = NULL;
403 sk->sk_data_ready = xprt->old_data_ready;
404 sk->sk_state_change = xprt->old_state_change;
405 sk->sk_write_space = xprt->old_write_space;
406 write_unlock_bh(&sk->sk_callback_lock);
414 xprt_socket_autoclose(void *args)
416 struct rpc_xprt *xprt = (struct rpc_xprt *)args;
418 xprt_disconnect(xprt);
420 xprt_release_write(xprt, NULL);
424 * Mark a transport as disconnected
427 xprt_disconnect(struct rpc_xprt *xprt)
429 dprintk("RPC: disconnected transport %p\n", xprt);
430 spin_lock_bh(&xprt->sock_lock);
431 xprt_clear_connected(xprt);
432 rpc_wake_up_status(&xprt->pending, -ENOTCONN);
433 spin_unlock_bh(&xprt->sock_lock);
437 * Used to allow disconnection when we've been idle
440 xprt_init_autodisconnect(unsigned long data)
442 struct rpc_xprt *xprt = (struct rpc_xprt *)data;
444 spin_lock(&xprt->sock_lock);
445 if (!list_empty(&xprt->recv) || xprt->shutdown)
447 if (test_and_set_bit(XPRT_LOCKED, &xprt->sockstate))
449 spin_unlock(&xprt->sock_lock);
450 /* Let keventd close the socket */
451 if (test_bit(XPRT_CONNECTING, &xprt->sockstate) != 0)
452 xprt_release_write(xprt, NULL);
454 schedule_work(&xprt->task_cleanup);
457 spin_unlock(&xprt->sock_lock);
460 static void xprt_socket_connect(void *args)
462 struct rpc_xprt *xprt = (struct rpc_xprt *)args;
463 struct socket *sock = xprt->sock;
466 if (xprt->shutdown || xprt->addr.sin_port == 0)
470 * Start by resetting any existing state
473 sock = xprt_create_socket(xprt, xprt->prot, xprt->resvport);
475 /* couldn't create socket or bind to reserved port;
476 * this is likely a permanent error, so cause an abort */
479 xprt_bind_socket(xprt, sock);
480 xprt_sock_setbufsize(xprt);
487 * Tell the socket layer to start connecting...
489 status = sock->ops->connect(sock, (struct sockaddr *) &xprt->addr,
490 sizeof(xprt->addr), O_NONBLOCK);
491 dprintk("RPC: %p connect status %d connected %d sock state %d\n",
492 xprt, -status, xprt_connected(xprt), sock->sk->sk_state);
502 rpc_wake_up_status(&xprt->pending, status);
504 rpc_wake_up(&xprt->pending);
506 smp_mb__before_clear_bit();
507 clear_bit(XPRT_CONNECTING, &xprt->sockstate);
508 smp_mb__after_clear_bit();
512 * Attempt to connect a TCP socket.
515 void xprt_connect(struct rpc_task *task)
517 struct rpc_xprt *xprt = task->tk_xprt;
519 dprintk("RPC: %4d xprt_connect xprt %p %s connected\n", task->tk_pid,
520 xprt, (xprt_connected(xprt) ? "is" : "is not"));
522 if (xprt->shutdown) {
523 task->tk_status = -EIO;
526 if (!xprt->addr.sin_port) {
527 task->tk_status = -EIO;
530 if (!xprt_lock_write(xprt, task))
532 if (xprt_connected(xprt))
536 task->tk_rqstp->rq_bytes_sent = 0;
538 task->tk_timeout = RPC_CONNECT_TIMEOUT;
539 rpc_sleep_on(&xprt->pending, task, xprt_connect_status, NULL);
540 if (!test_and_set_bit(XPRT_CONNECTING, &xprt->sockstate))
541 schedule_work(&xprt->sock_connect);
544 xprt_release_write(xprt, task);
548 * We arrive here when awoken from waiting on connection establishment.
551 xprt_connect_status(struct rpc_task *task)
553 struct rpc_xprt *xprt = task->tk_xprt;
555 if (task->tk_status >= 0) {
556 dprintk("RPC: %4d xprt_connect_status: connection established\n",
561 /* if soft mounted, just cause this RPC to fail */
562 if (RPC_IS_SOFT(task))
563 task->tk_status = -EIO;
565 switch (task->tk_status) {
569 rpc_delay(task, RPC_REESTABLISH_TIMEOUT);
572 dprintk("RPC: %4d xprt_connect_status: timed out\n",
576 printk(KERN_ERR "RPC: error %d connecting to server %s\n",
577 -task->tk_status, task->tk_client->cl_server);
579 xprt_release_write(xprt, task);
583 * Look up the RPC request corresponding to a reply, and then lock it.
585 static inline struct rpc_rqst *
586 xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid)
588 struct list_head *pos;
589 struct rpc_rqst *req = NULL;
591 list_for_each(pos, &xprt->recv) {
592 struct rpc_rqst *entry = list_entry(pos, struct rpc_rqst, rq_list);
593 if (entry->rq_xid == xid) {
602 * Complete reply received.
603 * The TCP code relies on us to remove the request from xprt->pending.
606 xprt_complete_rqst(struct rpc_xprt *xprt, struct rpc_rqst *req, int copied)
608 struct rpc_task *task = req->rq_task;
609 struct rpc_clnt *clnt = task->tk_client;
611 /* Adjust congestion window */
613 unsigned timer = task->tk_msg.rpc_proc->p_timer;
614 xprt_adjust_cwnd(xprt, copied);
615 __xprt_put_cong(xprt, req);
617 if (req->rq_ntrans == 1)
618 rpc_update_rtt(clnt->cl_rtt, timer,
619 (long)jiffies - req->rq_xtime);
620 rpc_set_timeo(clnt->cl_rtt, timer, req->rq_ntrans - 1);
625 /* Profile only reads for now */
627 static unsigned long nextstat;
628 static unsigned long pkt_rtt, pkt_len, pkt_cnt;
631 pkt_len += req->rq_slen + copied;
632 pkt_rtt += jiffies - req->rq_xtime;
633 if (time_before(nextstat, jiffies)) {
634 printk("RPC: %lu %ld cwnd\n", jiffies, xprt->cwnd);
635 printk("RPC: %ld %ld %ld %ld stat\n",
636 jiffies, pkt_cnt, pkt_len, pkt_rtt);
637 pkt_rtt = pkt_len = pkt_cnt = 0;
638 nextstat = jiffies + 5 * HZ;
643 dprintk("RPC: %4d has input (%d bytes)\n", task->tk_pid, copied);
644 list_del_init(&req->rq_list);
645 req->rq_received = req->rq_private_buf.len = copied;
647 /* ... and wake up the process. */
648 rpc_wake_up_task(task);
653 skb_read_bits(skb_reader_t *desc, void *to, size_t len)
655 if (len > desc->count)
657 if (skb_copy_bits(desc->skb, desc->offset, to, len))
665 skb_read_and_csum_bits(skb_reader_t *desc, void *to, size_t len)
667 unsigned int csum2, pos;
669 if (len > desc->count)
672 csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len, 0);
673 desc->csum = csum_block_add(desc->csum, csum2, pos);
680 * We have set things up such that we perform the checksum of the UDP
681 * packet in parallel with the copies into the RPC client iovec. -DaveM
684 csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
689 desc.offset = sizeof(struct udphdr);
690 desc.count = skb->len - desc.offset;
692 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
695 desc.csum = csum_partial(skb->data, desc.offset, skb->csum);
696 xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_and_csum_bits);
697 if (desc.offset != skb->len) {
699 csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0);
700 desc.csum = csum_block_add(desc.csum, csum2, desc.offset);
704 if ((unsigned short)csum_fold(desc.csum))
708 xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_bits);
715 * Input handler for RPC replies. Called from a bottom half and hence
719 udp_data_ready(struct sock *sk, int len)
721 struct rpc_task *task;
722 struct rpc_xprt *xprt;
723 struct rpc_rqst *rovr;
725 int err, repsize, copied;
728 read_lock(&sk->sk_callback_lock);
729 dprintk("RPC: udp_data_ready...\n");
730 if (!(xprt = xprt_from_sock(sk))) {
731 printk("RPC: udp_data_ready request not found!\n");
735 dprintk("RPC: udp_data_ready client %p\n", xprt);
737 if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL)
743 repsize = skb->len - sizeof(struct udphdr);
745 printk("RPC: impossible RPC reply size %d!\n", repsize);
749 /* Copy the XID from the skb... */
750 if (skb_copy_bits(skb, sizeof(struct udphdr), &xid, sizeof(xid)) < 0)
753 /* Look up and lock the request corresponding to the given XID */
754 spin_lock(&xprt->sock_lock);
755 rovr = xprt_lookup_rqst(xprt, xid);
758 task = rovr->rq_task;
760 dprintk("RPC: %4d received reply\n", task->tk_pid);
762 if ((copied = rovr->rq_private_buf.buflen) > repsize)
765 /* Suck it into the iovec, verify checksum if not done by hw. */
766 if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb))
769 /* Something worked... */
770 dst_confirm(skb->dst);
772 xprt_complete_rqst(xprt, rovr, copied);
775 spin_unlock(&xprt->sock_lock);
777 skb_free_datagram(sk, skb);
779 read_unlock(&sk->sk_callback_lock);
783 * Copy from an skb into memory and shrink the skb.
786 tcp_copy_data(skb_reader_t *desc, void *p, size_t len)
788 if (len > desc->count)
790 if (skb_copy_bits(desc->skb, desc->offset, p, len))
798 * TCP read fragment marker
801 tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc)
806 p = ((char *) &xprt->tcp_recm) + xprt->tcp_offset;
807 len = sizeof(xprt->tcp_recm) - xprt->tcp_offset;
808 used = tcp_copy_data(desc, p, len);
809 xprt->tcp_offset += used;
812 xprt->tcp_reclen = ntohl(xprt->tcp_recm);
813 if (xprt->tcp_reclen & 0x80000000)
814 xprt->tcp_flags |= XPRT_LAST_FRAG;
816 xprt->tcp_flags &= ~XPRT_LAST_FRAG;
817 xprt->tcp_reclen &= 0x7fffffff;
818 xprt->tcp_flags &= ~XPRT_COPY_RECM;
819 xprt->tcp_offset = 0;
820 /* Sanity check of the record length */
821 if (xprt->tcp_reclen < 4) {
822 printk(KERN_ERR "RPC: Invalid TCP record fragment length\n");
823 xprt_disconnect(xprt);
825 dprintk("RPC: reading TCP record fragment of length %d\n",
830 tcp_check_recm(struct rpc_xprt *xprt)
832 if (xprt->tcp_offset == xprt->tcp_reclen) {
833 xprt->tcp_flags |= XPRT_COPY_RECM;
834 xprt->tcp_offset = 0;
835 if (xprt->tcp_flags & XPRT_LAST_FRAG) {
836 xprt->tcp_flags &= ~XPRT_COPY_DATA;
837 xprt->tcp_flags |= XPRT_COPY_XID;
838 xprt->tcp_copied = 0;
847 tcp_read_xid(struct rpc_xprt *xprt, skb_reader_t *desc)
852 len = sizeof(xprt->tcp_xid) - xprt->tcp_offset;
853 dprintk("RPC: reading XID (%Zu bytes)\n", len);
854 p = ((char *) &xprt->tcp_xid) + xprt->tcp_offset;
855 used = tcp_copy_data(desc, p, len);
856 xprt->tcp_offset += used;
859 xprt->tcp_flags &= ~XPRT_COPY_XID;
860 xprt->tcp_flags |= XPRT_COPY_DATA;
861 xprt->tcp_copied = 4;
862 dprintk("RPC: reading reply for XID %08x\n", xprt->tcp_xid);
863 tcp_check_recm(xprt);
867 * TCP read and complete request
870 tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc)
872 struct rpc_rqst *req;
873 struct xdr_buf *rcvbuf;
876 /* Find and lock the request corresponding to this xid */
877 spin_lock(&xprt->sock_lock);
878 req = xprt_lookup_rqst(xprt, xprt->tcp_xid);
880 xprt->tcp_flags &= ~XPRT_COPY_DATA;
881 dprintk("RPC: XID %08x request not found!\n",
883 spin_unlock(&xprt->sock_lock);
887 rcvbuf = &req->rq_private_buf;
889 if (len > xprt->tcp_reclen - xprt->tcp_offset) {
890 skb_reader_t my_desc;
892 len = xprt->tcp_reclen - xprt->tcp_offset;
893 memcpy(&my_desc, desc, sizeof(my_desc));
895 xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied,
896 &my_desc, tcp_copy_data);
900 xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied,
901 desc, tcp_copy_data);
902 xprt->tcp_copied += len;
903 xprt->tcp_offset += len;
905 if (xprt->tcp_copied == req->rq_private_buf.buflen)
906 xprt->tcp_flags &= ~XPRT_COPY_DATA;
907 else if (xprt->tcp_offset == xprt->tcp_reclen) {
908 if (xprt->tcp_flags & XPRT_LAST_FRAG)
909 xprt->tcp_flags &= ~XPRT_COPY_DATA;
912 if (!(xprt->tcp_flags & XPRT_COPY_DATA)) {
913 dprintk("RPC: %4d received reply complete\n",
914 req->rq_task->tk_pid);
915 xprt_complete_rqst(xprt, req, xprt->tcp_copied);
917 spin_unlock(&xprt->sock_lock);
918 tcp_check_recm(xprt);
922 * TCP discard extra bytes from a short read
925 tcp_read_discard(struct rpc_xprt *xprt, skb_reader_t *desc)
929 len = xprt->tcp_reclen - xprt->tcp_offset;
930 if (len > desc->count)
934 xprt->tcp_offset += len;
935 tcp_check_recm(xprt);
939 * TCP record receive routine
940 * We first have to grab the record marker, then the XID, then the data.
943 tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
944 unsigned int offset, size_t len)
946 struct rpc_xprt *xprt = (struct rpc_xprt *)rd_desc->buf;
947 skb_reader_t desc = {
954 dprintk("RPC: tcp_data_recv\n");
956 /* Read in a new fragment marker if necessary */
957 /* Can we ever really expect to get completely empty fragments? */
958 if (xprt->tcp_flags & XPRT_COPY_RECM) {
959 tcp_read_fraghdr(xprt, &desc);
962 /* Read in the xid if necessary */
963 if (xprt->tcp_flags & XPRT_COPY_XID) {
964 tcp_read_xid(xprt, &desc);
967 /* Read in the request data */
968 if (xprt->tcp_flags & XPRT_COPY_DATA) {
969 tcp_read_request(xprt, &desc);
972 /* Skip over any trailing bytes on short reads */
973 tcp_read_discard(xprt, &desc);
974 } while (desc.count);
975 dprintk("RPC: tcp_data_recv done\n");
976 return len - desc.count;
979 static void tcp_data_ready(struct sock *sk, int bytes)
981 struct rpc_xprt *xprt;
982 read_descriptor_t rd_desc;
984 read_lock(&sk->sk_callback_lock);
985 dprintk("RPC: tcp_data_ready...\n");
986 if (!(xprt = xprt_from_sock(sk))) {
987 printk("RPC: tcp_data_ready socket info not found!\n");
993 /* We use rd_desc to pass struct xprt to tcp_data_recv */
994 rd_desc.buf = (char *)xprt;
995 rd_desc.count = 65536;
996 tcp_read_sock(sk, &rd_desc, tcp_data_recv);
998 read_unlock(&sk->sk_callback_lock);
1002 tcp_state_change(struct sock *sk)
1004 struct rpc_xprt *xprt;
1006 read_lock(&sk->sk_callback_lock);
1007 if (!(xprt = xprt_from_sock(sk)))
1009 dprintk("RPC: tcp_state_change client %p...\n", xprt);
1010 dprintk("RPC: state %x conn %d dead %d zapped %d\n",
1011 sk->sk_state, xprt_connected(xprt),
1012 sock_flag(sk, SOCK_DEAD), sk->sk_zapped);
1014 switch (sk->sk_state) {
1015 case TCP_ESTABLISHED:
1016 spin_lock_bh(&xprt->sock_lock);
1017 if (!xprt_test_and_set_connected(xprt)) {
1018 /* Reset TCP record info */
1019 xprt->tcp_offset = 0;
1020 xprt->tcp_reclen = 0;
1021 xprt->tcp_copied = 0;
1022 xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID;
1023 rpc_wake_up(&xprt->pending);
1025 spin_unlock_bh(&xprt->sock_lock);
1031 if (xprt_test_and_clear_connected(xprt))
1032 rpc_wake_up_status(&xprt->pending, -ENOTCONN);
1036 read_unlock(&sk->sk_callback_lock);
1040 * Called when more output buffer space is available for this socket.
1041 * We try not to wake our writers until they can make "significant"
1042 * progress, otherwise we'll waste resources thrashing sock_sendmsg
1043 * with a bunch of small requests.
1046 xprt_write_space(struct sock *sk)
1048 struct rpc_xprt *xprt;
1049 struct socket *sock;
1051 read_lock(&sk->sk_callback_lock);
1052 if (!(xprt = xprt_from_sock(sk)) || !(sock = sk->sk_socket))
1057 /* Wait until we have enough socket memory */
1059 /* from net/ipv4/tcp.c:tcp_write_space */
1060 if (tcp_wspace(sk) < tcp_min_write_space(sk))
1063 /* from net/core/sock.c:sock_def_write_space */
1064 if (!sock_writeable(sk))
1068 if (!test_and_clear_bit(SOCK_NOSPACE, &sock->flags))
1071 spin_lock_bh(&xprt->sock_lock);
1072 if (xprt->snd_task && xprt->snd_task->tk_rpcwait == &xprt->pending)
1073 rpc_wake_up_task(xprt->snd_task);
1074 spin_unlock_bh(&xprt->sock_lock);
1076 read_unlock(&sk->sk_callback_lock);
1080 * RPC receive timeout handler.
1083 xprt_timer(struct rpc_task *task)
1085 struct rpc_rqst *req = task->tk_rqstp;
1086 struct rpc_xprt *xprt = req->rq_xprt;
1088 spin_lock(&xprt->sock_lock);
1089 if (req->rq_received)
1092 xprt_adjust_cwnd(req->rq_xprt, -ETIMEDOUT);
1093 __xprt_put_cong(xprt, req);
1095 dprintk("RPC: %4d xprt_timer (%s request)\n",
1096 task->tk_pid, req ? "pending" : "backlogged");
1098 task->tk_status = -ETIMEDOUT;
1100 task->tk_timeout = 0;
1101 rpc_wake_up_task(task);
1102 spin_unlock(&xprt->sock_lock);
1106 * Place the actual RPC call.
1107 * We have to copy the iovec because sendmsg fiddles with its contents.
1110 xprt_prepare_transmit(struct rpc_task *task)
1112 struct rpc_rqst *req = task->tk_rqstp;
1113 struct rpc_xprt *xprt = req->rq_xprt;
1116 dprintk("RPC: %4d xprt_prepare_transmit\n", task->tk_pid);
1121 spin_lock_bh(&xprt->sock_lock);
1122 if (req->rq_received && !req->rq_bytes_sent) {
1123 err = req->rq_received;
1126 if (!__xprt_lock_write(xprt, task)) {
1131 if (!xprt_connected(xprt)) {
1136 spin_unlock_bh(&xprt->sock_lock);
1141 xprt_transmit(struct rpc_task *task)
1143 struct rpc_clnt *clnt = task->tk_client;
1144 struct rpc_rqst *req = task->tk_rqstp;
1145 struct rpc_xprt *xprt = req->rq_xprt;
1146 int status, retry = 0;
1149 dprintk("RPC: %4d xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
1151 /* set up everything as needed. */
1152 /* Write the record marker */
1154 u32 *marker = req->rq_svec[0].iov_base;
1156 *marker = htonl(0x80000000|(req->rq_slen-sizeof(*marker)));
1160 if (!req->rq_received) {
1161 if (list_empty(&req->rq_list)) {
1162 spin_lock_bh(&xprt->sock_lock);
1163 /* Update the softirq receive buffer */
1164 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
1165 sizeof(req->rq_private_buf));
1166 /* Add request to the receive list */
1167 list_add_tail(&req->rq_list, &xprt->recv);
1168 spin_unlock_bh(&xprt->sock_lock);
1170 } else if (!req->rq_bytes_sent)
1173 /* Continue transmitting the packet/record. We must be careful
1174 * to cope with writespace callbacks arriving _after_ we have
1175 * called xprt_sendmsg().
1178 req->rq_xtime = jiffies;
1179 status = xprt_sendmsg(xprt, req);
1185 req->rq_bytes_sent += status;
1187 /* If we've sent the entire packet, immediately
1188 * reset the count of bytes sent. */
1189 if (req->rq_bytes_sent >= req->rq_slen) {
1190 req->rq_bytes_sent = 0;
1194 if (status >= req->rq_slen)
1200 dprintk("RPC: %4d xmit incomplete (%d left of %d)\n",
1201 task->tk_pid, req->rq_slen - req->rq_bytes_sent,
1209 /* Note: at this point, task->tk_sleeping has not yet been set,
1210 * hence there is no danger of the waking up task being put on
1211 * schedq, and being picked up by a parallel run of rpciod().
1213 task->tk_status = status;
1217 if (test_bit(SOCK_ASYNC_NOSPACE, &xprt->sock->flags)) {
1218 /* Protect against races with xprt_write_space */
1219 spin_lock_bh(&xprt->sock_lock);
1220 /* Don't race with disconnect */
1221 if (!xprt_connected(xprt))
1222 task->tk_status = -ENOTCONN;
1223 else if (test_bit(SOCK_NOSPACE, &xprt->sock->flags)) {
1224 task->tk_timeout = req->rq_timeout.to_current;
1225 rpc_sleep_on(&xprt->pending, task, NULL, NULL);
1227 spin_unlock_bh(&xprt->sock_lock);
1230 /* Keep holding the socket if it is blocked */
1231 rpc_delay(task, HZ>>4);
1234 task->tk_timeout = RPC_REESTABLISH_TIMEOUT;
1235 rpc_sleep_on(&xprt->sending, task, NULL, NULL);
1240 xprt_disconnect(xprt);
1242 xprt_release_write(xprt, task);
1245 dprintk("RPC: %4d xmit complete\n", task->tk_pid);
1246 /* Set the task's receive timeout value */
1247 spin_lock_bh(&xprt->sock_lock);
1248 if (!xprt->nocong) {
1249 int timer = task->tk_msg.rpc_proc->p_timer;
1250 task->tk_timeout = rpc_calc_rto(clnt->cl_rtt, timer);
1251 task->tk_timeout <<= rpc_ntimeo(clnt->cl_rtt, timer);
1252 task->tk_timeout <<= clnt->cl_timeout.to_retries
1253 - req->rq_timeout.to_retries;
1254 if (task->tk_timeout > req->rq_timeout.to_maxval)
1255 task->tk_timeout = req->rq_timeout.to_maxval;
1257 task->tk_timeout = req->rq_timeout.to_current;
1258 /* Don't race with disconnect */
1259 if (!xprt_connected(xprt))
1260 task->tk_status = -ENOTCONN;
1261 else if (!req->rq_received)
1262 rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer);
1263 __xprt_release_write(xprt, task);
1264 spin_unlock_bh(&xprt->sock_lock);
1268 * Reserve an RPC call slot.
1271 xprt_reserve(struct rpc_task *task)
1273 struct rpc_xprt *xprt = task->tk_xprt;
1275 task->tk_status = -EIO;
1276 if (!xprt->shutdown) {
1277 spin_lock(&xprt->xprt_lock);
1278 do_xprt_reserve(task);
1279 spin_unlock(&xprt->xprt_lock);
1281 del_timer_sync(&xprt->timer);
1286 do_xprt_reserve(struct rpc_task *task)
1288 struct rpc_xprt *xprt = task->tk_xprt;
1290 task->tk_status = 0;
1293 if (!list_empty(&xprt->free)) {
1294 struct rpc_rqst *req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
1295 list_del_init(&req->rq_list);
1296 task->tk_rqstp = req;
1297 xprt_request_init(task, xprt);
1300 dprintk("RPC: waiting for request slot\n");
1301 task->tk_status = -EAGAIN;
1302 task->tk_timeout = 0;
1303 rpc_sleep_on(&xprt->backlog, task, NULL, NULL);
1307 * Allocate a 'unique' XID
1309 static inline u32 xprt_alloc_xid(struct rpc_xprt *xprt)
1314 static inline void xprt_init_xid(struct rpc_xprt *xprt)
1316 get_random_bytes(&xprt->xid, sizeof(xprt->xid));
1320 * Initialize RPC request
1323 xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
1325 struct rpc_rqst *req = task->tk_rqstp;
1327 req->rq_timeout = xprt->timeout;
1328 req->rq_task = task;
1329 req->rq_xprt = xprt;
1330 req->rq_xid = xprt_alloc_xid(xprt);
1331 dprintk("RPC: %4d reserved req %p xid %08x\n", task->tk_pid,
1336 * Release an RPC call slot
1339 xprt_release(struct rpc_task *task)
1341 struct rpc_xprt *xprt = task->tk_xprt;
1342 struct rpc_rqst *req;
1344 if (!(req = task->tk_rqstp))
1346 spin_lock_bh(&xprt->sock_lock);
1347 __xprt_release_write(xprt, task);
1348 __xprt_put_cong(xprt, req);
1349 if (!list_empty(&req->rq_list))
1350 list_del(&req->rq_list);
1351 xprt->last_used = jiffies;
1352 if (list_empty(&xprt->recv) && !xprt->shutdown)
1353 mod_timer(&xprt->timer, xprt->last_used + XPRT_IDLE_TIMEOUT);
1354 spin_unlock_bh(&xprt->sock_lock);
1355 task->tk_rqstp = NULL;
1356 memset(req, 0, sizeof(*req)); /* mark unused */
1358 dprintk("RPC: %4d release request %p\n", task->tk_pid, req);
1360 spin_lock(&xprt->xprt_lock);
1361 list_add(&req->rq_list, &xprt->free);
1362 xprt_clear_backlog(xprt);
1363 spin_unlock(&xprt->xprt_lock);
1367 * Set default timeout parameters
1370 xprt_default_timeout(struct rpc_timeout *to, int proto)
1372 if (proto == IPPROTO_UDP)
1373 xprt_set_timeout(to, 5, 5 * HZ);
1375 xprt_set_timeout(to, 5, 60 * HZ);
1379 * Set constant timeout
1382 xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr)
1386 to->to_increment = incr;
1387 to->to_maxval = incr * retr;
1388 to->to_retries = retr;
1389 to->to_exponential = 0;
1392 unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE;
1393 unsigned int xprt_tcp_slot_table_entries = RPC_DEF_SLOT_TABLE;
1396 * Initialize an RPC client
1398 static struct rpc_xprt *
1399 xprt_setup(int proto, struct sockaddr_in *ap, struct rpc_timeout *to)
1401 struct rpc_xprt *xprt;
1402 unsigned int entries;
1403 size_t slot_table_size;
1404 struct rpc_rqst *req;
1406 dprintk("RPC: setting up %s transport...\n",
1407 proto == IPPROTO_UDP? "UDP" : "TCP");
1409 entries = (proto == IPPROTO_TCP)?
1410 xprt_tcp_slot_table_entries : xprt_udp_slot_table_entries;
1412 if ((xprt = kmalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL)
1413 return ERR_PTR(-ENOMEM);
1414 memset(xprt, 0, sizeof(*xprt)); /* Nnnngh! */
1415 xprt->max_reqs = entries;
1416 slot_table_size = entries * sizeof(xprt->slot[0]);
1417 xprt->slot = kmalloc(slot_table_size, GFP_KERNEL);
1418 if (xprt->slot == NULL) {
1420 return ERR_PTR(-ENOMEM);
1422 memset(xprt->slot, 0, slot_table_size);
1426 xprt->stream = (proto == IPPROTO_TCP)? 1 : 0;
1428 xprt->cwnd = RPC_MAXCWND(xprt);
1431 xprt->cwnd = RPC_INITCWND;
1432 spin_lock_init(&xprt->sock_lock);
1433 spin_lock_init(&xprt->xprt_lock);
1434 init_waitqueue_head(&xprt->cong_wait);
1436 INIT_LIST_HEAD(&xprt->free);
1437 INIT_LIST_HEAD(&xprt->recv);
1438 INIT_WORK(&xprt->sock_connect, xprt_socket_connect, xprt);
1439 INIT_WORK(&xprt->task_cleanup, xprt_socket_autoclose, xprt);
1440 init_timer(&xprt->timer);
1441 xprt->timer.function = xprt_init_autodisconnect;
1442 xprt->timer.data = (unsigned long) xprt;
1443 xprt->last_used = jiffies;
1444 xprt->port = XPRT_MAX_RESVPORT;
1446 /* Set timeout parameters */
1448 xprt->timeout = *to;
1449 xprt->timeout.to_current = to->to_initval;
1451 xprt_default_timeout(&xprt->timeout, xprt->prot);
1453 rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1454 rpc_init_wait_queue(&xprt->sending, "xprt_sending");
1455 rpc_init_wait_queue(&xprt->resend, "xprt_resend");
1456 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1458 /* initialize free list */
1459 for (req = &xprt->slot[entries-1]; req >= &xprt->slot[0]; req--)
1460 list_add(&req->rq_list, &xprt->free);
1462 xprt_init_xid(xprt);
1464 /* Check whether we want to use a reserved port */
1465 xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0;
1467 dprintk("RPC: created transport %p with %u slots\n", xprt,
1474 * Bind to a reserved port
1476 static inline int xprt_bindresvport(struct rpc_xprt *xprt, struct socket *sock)
1478 struct sockaddr_in myaddr = {
1479 .sin_family = AF_INET,
1483 /* Were we already bound to a given port? Try to reuse it */
1486 myaddr.sin_port = htons(port);
1487 err = sock->ops->bind(sock, (struct sockaddr *) &myaddr,
1494 port = XPRT_MAX_RESVPORT;
1495 } while (err == -EADDRINUSE && port != xprt->port);
1497 printk("RPC: Can't bind to reserved port (%d).\n", -err);
1502 xprt_bind_socket(struct rpc_xprt *xprt, struct socket *sock)
1504 struct sock *sk = sock->sk;
1509 write_lock_bh(&sk->sk_callback_lock);
1510 sk->sk_user_data = xprt;
1511 xprt->old_data_ready = sk->sk_data_ready;
1512 xprt->old_state_change = sk->sk_state_change;
1513 xprt->old_write_space = sk->sk_write_space;
1514 if (xprt->prot == IPPROTO_UDP) {
1515 sk->sk_data_ready = udp_data_ready;
1516 sk->sk_no_check = UDP_CSUM_NORCV;
1517 xprt_set_connected(xprt);
1519 struct tcp_opt *tp = tcp_sk(sk);
1520 tp->nonagle = 1; /* disable Nagle's algorithm */
1521 sk->sk_data_ready = tcp_data_ready;
1522 sk->sk_state_change = tcp_state_change;
1523 xprt_clear_connected(xprt);
1525 sk->sk_write_space = xprt_write_space;
1527 /* Reset to new socket */
1530 write_unlock_bh(&sk->sk_callback_lock);
1536 * Set socket buffer length
1539 xprt_sock_setbufsize(struct rpc_xprt *xprt)
1541 struct sock *sk = xprt->inet;
1545 if (xprt->rcvsize) {
1546 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
1547 sk->sk_rcvbuf = xprt->rcvsize * xprt->max_reqs * 2;
1549 if (xprt->sndsize) {
1550 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
1551 sk->sk_sndbuf = xprt->sndsize * xprt->max_reqs * 2;
1552 sk->sk_write_space(sk);
1557 * Datastream sockets are created here, but xprt_connect will create
1558 * and connect stream sockets.
1560 static struct socket * xprt_create_socket(struct rpc_xprt *xprt, int proto, int resvport)
1562 struct socket *sock;
1565 dprintk("RPC: xprt_create_socket(%s %d)\n",
1566 (proto == IPPROTO_UDP)? "udp" : "tcp", proto);
1568 type = (proto == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM;
1570 if ((err = sock_create_kern(PF_INET, type, proto, &sock)) < 0) {
1571 printk("RPC: can't create socket (%d).\n", -err);
1575 /* If the caller has the capability, bind to a reserved port */
1576 if (resvport && xprt_bindresvport(xprt, sock) < 0) {
1577 printk("RPC: can't bind to reserved port.\n");
1589 * Create an RPC client transport given the protocol and peer address.
1592 xprt_create_proto(int proto, struct sockaddr_in *sap, struct rpc_timeout *to)
1594 struct rpc_xprt *xprt;
1596 xprt = xprt_setup(proto, sap, to);
1598 dprintk("RPC: xprt_create_proto failed\n");
1600 dprintk("RPC: xprt_create_proto created xprt %p\n", xprt);
1605 * Prepare for transport shutdown.
1608 xprt_shutdown(struct rpc_xprt *xprt)
1611 rpc_wake_up(&xprt->sending);
1612 rpc_wake_up(&xprt->resend);
1613 rpc_wake_up(&xprt->pending);
1614 rpc_wake_up(&xprt->backlog);
1615 wake_up(&xprt->cong_wait);
1616 del_timer_sync(&xprt->timer);
1620 * Clear the xprt backlog queue
1623 xprt_clear_backlog(struct rpc_xprt *xprt) {
1624 rpc_wake_up_next(&xprt->backlog);
1625 wake_up(&xprt->cong_wait);
1630 * Destroy an RPC transport, killing off all requests.
1633 xprt_destroy(struct rpc_xprt *xprt)
1635 dprintk("RPC: destroying transport %p\n", xprt);
1636 xprt_shutdown(xprt);
1637 xprt_disconnect(xprt);