2 * linux/net/sunrpc/svcsock.c
4 * These are the RPC server socket internals.
6 * The server scheduling algorithm does not always distribute the load
7 * evenly when servicing a single client. May need to modify the
8 * svc_sock_enqueue procedure...
10 * TCP support is largely untested and may be a little slow. The problem
11 * is that we currently do two separate recvfrom's, one for the 4-byte
12 * record length, and the second for the actual record. This could possibly
13 * be improved by always reading a minimum size of around 100 bytes and
14 * tucking any superfluous bytes away in a temporary store. Still, that
15 * leaves write requests out in the rain. An alternative may be to peek at
16 * the first skb in the queue, and if it matches the next TCP sequence
17 * number, to extract the record marker. Yuck.
19 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
22 #include <linux/sched.h>
23 #include <linux/errno.h>
24 #include <linux/fcntl.h>
25 #include <linux/net.h>
27 #include <linux/inet.h>
28 #include <linux/udp.h>
29 #include <linux/tcp.h>
30 #include <linux/unistd.h>
31 #include <linux/slab.h>
32 #include <linux/netdevice.h>
33 #include <linux/skbuff.h>
35 #include <net/checksum.h>
37 #include <net/tcp_states.h>
38 #include <asm/uaccess.h>
39 #include <asm/ioctls.h>
41 #include <linux/sunrpc/types.h>
42 #include <linux/sunrpc/xdr.h>
43 #include <linux/sunrpc/svcsock.h>
44 #include <linux/sunrpc/stats.h>
46 /* SMP locking strategy:
48 * svc_serv->sv_lock protects most stuff for that service.
50 * Some flags can be set to certain values at any time
51 * providing that certain rules are followed:
53 * SK_BUSY can be set to 0 at any time.
54 * svc_sock_enqueue must be called afterwards
55 * SK_CONN, SK_DATA, can be set or cleared at any time.
56 * after a set, svc_sock_enqueue must be called.
57 * after a clear, the socket must be read/accepted
58 * if this succeeds, it must be set again.
59 * SK_CLOSE can set at any time. It is never cleared.
63 #define RPCDBG_FACILITY RPCDBG_SVCSOCK
66 static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *,
67 int *errp, int pmap_reg);
68 static void svc_udp_data_ready(struct sock *, int);
69 static int svc_udp_recvfrom(struct svc_rqst *);
70 static int svc_udp_sendto(struct svc_rqst *);
72 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk);
73 static int svc_deferred_recv(struct svc_rqst *rqstp);
74 static struct cache_deferred_req *svc_defer(struct cache_req *req);
76 #ifdef CONFIG_DEBUG_LOCK_ALLOC
77 static struct lock_class_key svc_key[2];
78 static struct lock_class_key svc_slock_key[2];
80 static inline void svc_reclassify_socket(struct socket *sock)
82 struct sock *sk = sock->sk;
83 BUG_ON(sk->sk_lock.owner != NULL);
84 switch (sk->sk_family) {
86 sock_lock_init_class_and_name(sk,
87 "slock-AF_INET-NFSD", &svc_slock_key[0],
88 "sk_lock-AF_INET-NFSD", &svc_key[0]);
92 sock_lock_init_class_and_name(sk,
93 "slock-AF_INET6-NFSD", &svc_slock_key[1],
94 "sk_lock-AF_INET6-NFSD", &svc_key[1]);
102 static inline void svc_reclassify_socket(struct socket *sock)
108 * Queue up an idle server thread. Must have serv->sv_lock held.
109 * Note: this is really a stack rather than a queue, so that we only
110 * use as many different threads as we need, and the rest don't polute
114 svc_serv_enqueue(struct svc_serv *serv, struct svc_rqst *rqstp)
116 list_add(&rqstp->rq_list, &serv->sv_threads);
120 * Dequeue an nfsd thread. Must have serv->sv_lock held.
123 svc_serv_dequeue(struct svc_serv *serv, struct svc_rqst *rqstp)
125 list_del(&rqstp->rq_list);
129 * Release an skbuff after use
132 svc_release_skb(struct svc_rqst *rqstp)
134 struct sk_buff *skb = rqstp->rq_skbuff;
135 struct svc_deferred_req *dr = rqstp->rq_deferred;
138 rqstp->rq_skbuff = NULL;
140 dprintk("svc: service %p, releasing skb %p\n", rqstp, skb);
141 skb_free_datagram(rqstp->rq_sock->sk_sk, skb);
144 rqstp->rq_deferred = NULL;
150 * Any space to write?
152 static inline unsigned long
153 svc_sock_wspace(struct svc_sock *svsk)
157 if (svsk->sk_sock->type == SOCK_STREAM)
158 wspace = sk_stream_wspace(svsk->sk_sk);
160 wspace = sock_wspace(svsk->sk_sk);
166 * Queue up a socket with data pending. If there are idle nfsd
167 * processes, wake 'em up.
171 svc_sock_enqueue(struct svc_sock *svsk)
173 struct svc_serv *serv = svsk->sk_server;
174 struct svc_rqst *rqstp;
176 if (!(svsk->sk_flags &
177 ( (1<<SK_CONN)|(1<<SK_DATA)|(1<<SK_CLOSE)|(1<<SK_DEFERRED)) ))
179 if (test_bit(SK_DEAD, &svsk->sk_flags))
182 spin_lock_bh(&serv->sv_lock);
184 if (!list_empty(&serv->sv_threads) &&
185 !list_empty(&serv->sv_sockets))
187 "svc_sock_enqueue: threads and sockets both waiting??\n");
189 if (test_bit(SK_DEAD, &svsk->sk_flags)) {
190 /* Don't enqueue dead sockets */
191 dprintk("svc: socket %p is dead, not enqueued\n", svsk->sk_sk);
195 if (test_bit(SK_BUSY, &svsk->sk_flags)) {
196 /* Don't enqueue socket while daemon is receiving */
197 dprintk("svc: socket %p busy, not enqueued\n", svsk->sk_sk);
201 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
202 if (((svsk->sk_reserved + serv->sv_bufsz)*2
203 > svc_sock_wspace(svsk))
204 && !test_bit(SK_CLOSE, &svsk->sk_flags)
205 && !test_bit(SK_CONN, &svsk->sk_flags)) {
206 /* Don't enqueue while not enough space for reply */
207 dprintk("svc: socket %p no space, %d*2 > %ld, not enqueued\n",
208 svsk->sk_sk, svsk->sk_reserved+serv->sv_bufsz,
209 svc_sock_wspace(svsk));
212 clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
214 /* Mark socket as busy. It will remain in this state until the
215 * server has processed all pending data and put the socket back
218 set_bit(SK_BUSY, &svsk->sk_flags);
220 if (!list_empty(&serv->sv_threads)) {
221 rqstp = list_entry(serv->sv_threads.next,
224 dprintk("svc: socket %p served by daemon %p\n",
226 svc_serv_dequeue(serv, rqstp);
229 "svc_sock_enqueue: server %p, rq_sock=%p!\n",
230 rqstp, rqstp->rq_sock);
231 rqstp->rq_sock = svsk;
233 rqstp->rq_reserved = serv->sv_bufsz;
234 svsk->sk_reserved += rqstp->rq_reserved;
235 wake_up(&rqstp->rq_wait);
237 dprintk("svc: socket %p put into queue\n", svsk->sk_sk);
238 list_add_tail(&svsk->sk_ready, &serv->sv_sockets);
242 spin_unlock_bh(&serv->sv_lock);
246 * Dequeue the first socket. Must be called with the serv->sv_lock held.
248 static inline struct svc_sock *
249 svc_sock_dequeue(struct svc_serv *serv)
251 struct svc_sock *svsk;
253 if (list_empty(&serv->sv_sockets))
256 svsk = list_entry(serv->sv_sockets.next,
257 struct svc_sock, sk_ready);
258 list_del_init(&svsk->sk_ready);
260 dprintk("svc: socket %p dequeued, inuse=%d\n",
261 svsk->sk_sk, svsk->sk_inuse);
267 * Having read something from a socket, check whether it
268 * needs to be re-enqueued.
269 * Note: SK_DATA only gets cleared when a read-attempt finds
270 * no (or insufficient) data.
273 svc_sock_received(struct svc_sock *svsk)
275 clear_bit(SK_BUSY, &svsk->sk_flags);
276 svc_sock_enqueue(svsk);
281 * svc_reserve - change the space reserved for the reply to a request.
282 * @rqstp: The request in question
283 * @space: new max space to reserve
285 * Each request reserves some space on the output queue of the socket
286 * to make sure the reply fits. This function reduces that reserved
287 * space to be the amount of space used already, plus @space.
290 void svc_reserve(struct svc_rqst *rqstp, int space)
292 space += rqstp->rq_res.head[0].iov_len;
294 if (space < rqstp->rq_reserved) {
295 struct svc_sock *svsk = rqstp->rq_sock;
296 spin_lock_bh(&svsk->sk_server->sv_lock);
297 svsk->sk_reserved -= (rqstp->rq_reserved - space);
298 rqstp->rq_reserved = space;
299 spin_unlock_bh(&svsk->sk_server->sv_lock);
301 svc_sock_enqueue(svsk);
306 * Release a socket after use.
309 svc_sock_put(struct svc_sock *svsk)
311 struct svc_serv *serv = svsk->sk_server;
313 spin_lock_bh(&serv->sv_lock);
314 if (!--(svsk->sk_inuse) && test_bit(SK_DEAD, &svsk->sk_flags)) {
315 spin_unlock_bh(&serv->sv_lock);
316 dprintk("svc: releasing dead socket\n");
317 sock_release(svsk->sk_sock);
321 spin_unlock_bh(&serv->sv_lock);
325 svc_sock_release(struct svc_rqst *rqstp)
327 struct svc_sock *svsk = rqstp->rq_sock;
329 svc_release_skb(rqstp);
331 svc_free_allpages(rqstp);
332 rqstp->rq_res.page_len = 0;
333 rqstp->rq_res.page_base = 0;
336 /* Reset response buffer and release
338 * But first, check that enough space was reserved
339 * for the reply, otherwise we have a bug!
341 if ((rqstp->rq_res.len) > rqstp->rq_reserved)
342 printk(KERN_ERR "RPC request reserved %d but used %d\n",
346 rqstp->rq_res.head[0].iov_len = 0;
347 svc_reserve(rqstp, 0);
348 rqstp->rq_sock = NULL;
354 * External function to wake up a server waiting for data
357 svc_wake_up(struct svc_serv *serv)
359 struct svc_rqst *rqstp;
361 spin_lock_bh(&serv->sv_lock);
362 if (!list_empty(&serv->sv_threads)) {
363 rqstp = list_entry(serv->sv_threads.next,
366 dprintk("svc: daemon %p woken up.\n", rqstp);
368 svc_serv_dequeue(serv, rqstp);
369 rqstp->rq_sock = NULL;
371 wake_up(&rqstp->rq_wait);
373 spin_unlock_bh(&serv->sv_lock);
377 * Generic sendto routine
380 svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
382 struct svc_sock *svsk = rqstp->rq_sock;
383 struct socket *sock = svsk->sk_sock;
385 char buffer[CMSG_SPACE(sizeof(struct in_pktinfo))];
386 struct cmsghdr *cmh = (struct cmsghdr *)buffer;
387 struct in_pktinfo *pki = (struct in_pktinfo *)CMSG_DATA(cmh);
391 struct page **ppage = xdr->pages;
392 size_t base = xdr->page_base;
393 unsigned int pglen = xdr->page_len;
394 unsigned int flags = MSG_MORE;
398 if (rqstp->rq_prot == IPPROTO_UDP) {
399 /* set the source and destination */
401 msg.msg_name = &rqstp->rq_addr;
402 msg.msg_namelen = sizeof(rqstp->rq_addr);
405 msg.msg_flags = MSG_MORE;
407 msg.msg_control = cmh;
408 msg.msg_controllen = sizeof(buffer);
409 cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
410 cmh->cmsg_level = SOL_IP;
411 cmh->cmsg_type = IP_PKTINFO;
412 pki->ipi_ifindex = 0;
413 pki->ipi_spec_dst.s_addr = rqstp->rq_daddr;
415 if (sock_sendmsg(sock, &msg, 0) < 0)
420 if (slen == xdr->head[0].iov_len)
422 len = sock->ops->sendpage(sock, rqstp->rq_respages[0], 0, xdr->head[0].iov_len, flags);
423 if (len != xdr->head[0].iov_len)
425 slen -= xdr->head[0].iov_len;
430 size = PAGE_SIZE - base < pglen ? PAGE_SIZE - base : pglen;
434 result = sock->ops->sendpage(sock, *ppage, base, size, flags);
441 size = PAGE_SIZE < pglen ? PAGE_SIZE : pglen;
446 if (xdr->tail[0].iov_len) {
447 result = sock->ops->sendpage(sock, rqstp->rq_respages[rqstp->rq_restailpage],
448 ((unsigned long)xdr->tail[0].iov_base)& (PAGE_SIZE-1),
449 xdr->tail[0].iov_len, 0);
455 dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %x)\n",
456 rqstp->rq_sock, xdr->head[0].iov_base, xdr->head[0].iov_len, xdr->len, len,
457 rqstp->rq_addr.sin_addr.s_addr);
463 * Check input queue length
466 svc_recv_available(struct svc_sock *svsk)
469 struct socket *sock = svsk->sk_sock;
472 oldfs = get_fs(); set_fs(KERNEL_DS);
473 err = sock->ops->ioctl(sock, TIOCINQ, (unsigned long) &avail);
476 return (err >= 0)? avail : err;
480 * Generic recvfrom routine.
483 svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, int buflen)
489 rqstp->rq_addrlen = sizeof(rqstp->rq_addr);
490 sock = rqstp->rq_sock->sk_sock;
492 msg.msg_name = &rqstp->rq_addr;
493 msg.msg_namelen = sizeof(rqstp->rq_addr);
494 msg.msg_control = NULL;
495 msg.msg_controllen = 0;
497 msg.msg_flags = MSG_DONTWAIT;
499 len = kernel_recvmsg(sock, &msg, iov, nr, buflen, MSG_DONTWAIT);
501 /* sock_recvmsg doesn't fill in the name/namelen, so we must..
502 * possibly we should cache this in the svc_sock structure
503 * at accept time. FIXME
505 alen = sizeof(rqstp->rq_addr);
506 sock->ops->getname(sock, (struct sockaddr *)&rqstp->rq_addr, &alen, 1);
508 dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n",
509 rqstp->rq_sock, iov[0].iov_base, iov[0].iov_len, len);
515 * Set socket snd and rcv buffer lengths
518 svc_sock_setbufsize(struct socket *sock, unsigned int snd, unsigned int rcv)
522 oldfs = get_fs(); set_fs(KERNEL_DS);
523 sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF,
524 (char*)&snd, sizeof(snd));
525 sock_setsockopt(sock, SOL_SOCKET, SO_RCVBUF,
526 (char*)&rcv, sizeof(rcv));
528 /* sock_setsockopt limits use to sysctl_?mem_max,
529 * which isn't acceptable. Until that is made conditional
530 * on not having CAP_SYS_RESOURCE or similar, we go direct...
531 * DaveM said I could!
534 sock->sk->sk_sndbuf = snd * 2;
535 sock->sk->sk_rcvbuf = rcv * 2;
536 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
537 release_sock(sock->sk);
541 * INET callback when data has been received on the socket.
544 svc_udp_data_ready(struct sock *sk, int count)
546 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
549 dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n",
550 svsk, sk, count, test_bit(SK_BUSY, &svsk->sk_flags));
551 set_bit(SK_DATA, &svsk->sk_flags);
552 svc_sock_enqueue(svsk);
554 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
555 wake_up_interruptible(sk->sk_sleep);
559 * INET callback when space is newly available on the socket.
562 svc_write_space(struct sock *sk)
564 struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data);
567 dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
568 svsk, sk, test_bit(SK_BUSY, &svsk->sk_flags));
569 svc_sock_enqueue(svsk);
572 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) {
573 dprintk("RPC svc_write_space: someone sleeping on %p\n",
575 wake_up_interruptible(sk->sk_sleep);
580 * Receive a datagram from a UDP socket.
583 svc_udp_recvfrom(struct svc_rqst *rqstp)
585 struct svc_sock *svsk = rqstp->rq_sock;
586 struct svc_serv *serv = svsk->sk_server;
590 if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags))
591 /* udp sockets need large rcvbuf as all pending
592 * requests are still in that buffer. sndbuf must
593 * also be large enough that there is enough space
594 * for one reply per thread.
596 svc_sock_setbufsize(svsk->sk_sock,
597 (serv->sv_nrthreads+3) * serv->sv_bufsz,
598 (serv->sv_nrthreads+3) * serv->sv_bufsz);
600 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
601 svc_sock_received(svsk);
602 return svc_deferred_recv(rqstp);
605 clear_bit(SK_DATA, &svsk->sk_flags);
606 while ((skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err)) == NULL) {
607 if (err == -EAGAIN) {
608 svc_sock_received(svsk);
611 /* possibly an icmp error */
612 dprintk("svc: recvfrom returned error %d\n", -err);
614 if (skb->tstamp.off_sec == 0) {
617 tv.tv_sec = xtime.tv_sec;
618 tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC;
619 skb_set_timestamp(skb, &tv);
620 /* Don't enable netstamp, sunrpc doesn't
621 need that much accuracy */
623 skb_get_timestamp(skb, &svsk->sk_sk->sk_stamp);
624 set_bit(SK_DATA, &svsk->sk_flags); /* there may be more data... */
627 * Maybe more packets - kick another thread ASAP.
629 svc_sock_received(svsk);
631 len = skb->len - sizeof(struct udphdr);
632 rqstp->rq_arg.len = len;
634 rqstp->rq_prot = IPPROTO_UDP;
636 /* Get sender address */
637 rqstp->rq_addr.sin_family = AF_INET;
638 rqstp->rq_addr.sin_port = skb->h.uh->source;
639 rqstp->rq_addr.sin_addr.s_addr = skb->nh.iph->saddr;
640 rqstp->rq_daddr = skb->nh.iph->daddr;
642 if (skb_is_nonlinear(skb)) {
643 /* we have to copy */
645 if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) {
648 skb_free_datagram(svsk->sk_sk, skb);
652 skb_free_datagram(svsk->sk_sk, skb);
654 /* we can use it in-place */
655 rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr);
656 rqstp->rq_arg.head[0].iov_len = len;
657 if (skb_checksum_complete(skb)) {
658 skb_free_datagram(svsk->sk_sk, skb);
661 rqstp->rq_skbuff = skb;
664 rqstp->rq_arg.page_base = 0;
665 if (len <= rqstp->rq_arg.head[0].iov_len) {
666 rqstp->rq_arg.head[0].iov_len = len;
667 rqstp->rq_arg.page_len = 0;
669 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
670 rqstp->rq_argused += (rqstp->rq_arg.page_len + PAGE_SIZE - 1)/ PAGE_SIZE;
674 serv->sv_stats->netudpcnt++;
680 svc_udp_sendto(struct svc_rqst *rqstp)
684 error = svc_sendto(rqstp, &rqstp->rq_res);
685 if (error == -ECONNREFUSED)
686 /* ICMP error on earlier request. */
687 error = svc_sendto(rqstp, &rqstp->rq_res);
693 svc_udp_init(struct svc_sock *svsk)
695 svsk->sk_sk->sk_data_ready = svc_udp_data_ready;
696 svsk->sk_sk->sk_write_space = svc_write_space;
697 svsk->sk_recvfrom = svc_udp_recvfrom;
698 svsk->sk_sendto = svc_udp_sendto;
700 /* initialise setting must have enough space to
701 * receive and respond to one request.
702 * svc_udp_recvfrom will re-adjust if necessary
704 svc_sock_setbufsize(svsk->sk_sock,
705 3 * svsk->sk_server->sv_bufsz,
706 3 * svsk->sk_server->sv_bufsz);
708 set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */
709 set_bit(SK_CHNGBUF, &svsk->sk_flags);
713 * A data_ready event on a listening socket means there's a connection
714 * pending. Do not use state_change as a substitute for it.
717 svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
719 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
721 dprintk("svc: socket %p TCP (listen) state change %d\n",
725 * This callback may called twice when a new connection
726 * is established as a child socket inherits everything
727 * from a parent LISTEN socket.
728 * 1) data_ready method of the parent socket will be called
729 * when one of child sockets become ESTABLISHED.
730 * 2) data_ready method of the child socket may be called
731 * when it receives data before the socket is accepted.
732 * In case of 2, we should ignore it silently.
734 if (sk->sk_state == TCP_LISTEN) {
736 set_bit(SK_CONN, &svsk->sk_flags);
737 svc_sock_enqueue(svsk);
739 printk("svc: socket %p: no user data\n", sk);
742 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
743 wake_up_interruptible_all(sk->sk_sleep);
747 * A state change on a connected socket means it's dying or dead.
750 svc_tcp_state_change(struct sock *sk)
752 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
754 dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n",
755 sk, sk->sk_state, sk->sk_user_data);
758 printk("svc: socket %p: no user data\n", sk);
760 set_bit(SK_CLOSE, &svsk->sk_flags);
761 svc_sock_enqueue(svsk);
763 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
764 wake_up_interruptible_all(sk->sk_sleep);
768 svc_tcp_data_ready(struct sock *sk, int count)
770 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
772 dprintk("svc: socket %p TCP data ready (svsk %p)\n",
773 sk, sk->sk_user_data);
775 set_bit(SK_DATA, &svsk->sk_flags);
776 svc_sock_enqueue(svsk);
778 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
779 wake_up_interruptible(sk->sk_sleep);
783 * Accept a TCP connection
786 svc_tcp_accept(struct svc_sock *svsk)
788 struct sockaddr_in sin;
789 struct svc_serv *serv = svsk->sk_server;
790 struct socket *sock = svsk->sk_sock;
791 struct socket *newsock;
792 const struct proto_ops *ops;
793 struct svc_sock *newsvsk;
796 dprintk("svc: tcp_accept %p sock %p\n", svsk, sock);
800 err = sock_create_lite(PF_INET, SOCK_STREAM, IPPROTO_TCP, &newsock);
803 printk(KERN_WARNING "%s: no more sockets!\n",
808 dprintk("svc: tcp_accept %p allocated\n", newsock);
809 newsock->ops = ops = sock->ops;
811 clear_bit(SK_CONN, &svsk->sk_flags);
812 if ((err = ops->accept(sock, newsock, O_NONBLOCK)) < 0) {
813 if (err != -EAGAIN && net_ratelimit())
814 printk(KERN_WARNING "%s: accept failed (err %d)!\n",
815 serv->sv_name, -err);
816 goto failed; /* aborted connection or whatever */
818 set_bit(SK_CONN, &svsk->sk_flags);
819 svc_sock_enqueue(svsk);
822 err = ops->getname(newsock, (struct sockaddr *) &sin, &slen, 1);
825 printk(KERN_WARNING "%s: peername failed (err %d)!\n",
826 serv->sv_name, -err);
827 goto failed; /* aborted connection or whatever */
830 /* Ideally, we would want to reject connections from unauthorized
831 * hosts here, but when we get encription, the IP of the host won't
832 * tell us anything. For now just warn about unpriv connections.
834 if (ntohs(sin.sin_port) >= 1024) {
836 "%s: connect from unprivileged port: %u.%u.%u.%u:%d\n",
838 NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
841 dprintk("%s: connect from %u.%u.%u.%u:%04x\n", serv->sv_name,
842 NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
844 /* make sure that a write doesn't block forever when
847 newsock->sk->sk_sndtimeo = HZ*30;
849 if (!(newsvsk = svc_setup_socket(serv, newsock, &err, 0)))
853 /* make sure that we don't have too many active connections.
854 * If we have, something must be dropped.
856 * There's no point in trying to do random drop here for
857 * DoS prevention. The NFS clients does 1 reconnect in 15
858 * seconds. An attacker can easily beat that.
860 * The only somewhat efficient mechanism would be if drop
861 * old connections from the same IP first. But right now
862 * we don't even record the client IP in svc_sock.
864 if (serv->sv_tmpcnt > (serv->sv_nrthreads+3)*20) {
865 struct svc_sock *svsk = NULL;
866 spin_lock_bh(&serv->sv_lock);
867 if (!list_empty(&serv->sv_tempsocks)) {
868 if (net_ratelimit()) {
869 /* Try to help the admin */
870 printk(KERN_NOTICE "%s: too many open TCP "
871 "sockets, consider increasing the "
872 "number of nfsd threads\n",
874 printk(KERN_NOTICE "%s: last TCP connect from "
877 NIPQUAD(sin.sin_addr.s_addr),
878 ntohs(sin.sin_port));
881 * Always select the oldest socket. It's not fair,
884 svsk = list_entry(serv->sv_tempsocks.prev,
887 set_bit(SK_CLOSE, &svsk->sk_flags);
890 spin_unlock_bh(&serv->sv_lock);
893 svc_sock_enqueue(svsk);
900 serv->sv_stats->nettcpconn++;
905 sock_release(newsock);
910 * Receive data from a TCP socket.
913 svc_tcp_recvfrom(struct svc_rqst *rqstp)
915 struct svc_sock *svsk = rqstp->rq_sock;
916 struct svc_serv *serv = svsk->sk_server;
918 struct kvec vec[RPCSVC_MAXPAGES];
921 dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
922 svsk, test_bit(SK_DATA, &svsk->sk_flags),
923 test_bit(SK_CONN, &svsk->sk_flags),
924 test_bit(SK_CLOSE, &svsk->sk_flags));
926 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
927 svc_sock_received(svsk);
928 return svc_deferred_recv(rqstp);
931 if (test_bit(SK_CLOSE, &svsk->sk_flags)) {
932 svc_delete_socket(svsk);
936 if (svsk->sk_sk->sk_state == TCP_LISTEN) {
937 svc_tcp_accept(svsk);
938 svc_sock_received(svsk);
942 if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags))
943 /* sndbuf needs to have room for one request
944 * per thread, otherwise we can stall even when the
945 * network isn't a bottleneck.
946 * rcvbuf just needs to be able to hold a few requests.
947 * Normally they will be removed from the queue
948 * as soon a a complete request arrives.
950 svc_sock_setbufsize(svsk->sk_sock,
951 (serv->sv_nrthreads+3) * serv->sv_bufsz,
954 clear_bit(SK_DATA, &svsk->sk_flags);
956 /* Receive data. If we haven't got the record length yet, get
957 * the next four bytes. Otherwise try to gobble up as much as
958 * possible up to the complete record length.
960 if (svsk->sk_tcplen < 4) {
961 unsigned long want = 4 - svsk->sk_tcplen;
964 iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen;
966 if ((len = svc_recvfrom(rqstp, &iov, 1, want)) < 0)
968 svsk->sk_tcplen += len;
971 dprintk("svc: short recvfrom while reading record length (%d of %lu)\n",
973 svc_sock_received(svsk);
974 return -EAGAIN; /* record header not complete */
977 svsk->sk_reclen = ntohl(svsk->sk_reclen);
978 if (!(svsk->sk_reclen & 0x80000000)) {
979 /* FIXME: technically, a record can be fragmented,
980 * and non-terminal fragments will not have the top
981 * bit set in the fragment length header.
982 * But apparently no known nfs clients send fragmented
984 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx (non-terminal)\n",
985 (unsigned long) svsk->sk_reclen);
988 svsk->sk_reclen &= 0x7fffffff;
989 dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen);
990 if (svsk->sk_reclen > serv->sv_bufsz) {
991 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx (large)\n",
992 (unsigned long) svsk->sk_reclen);
997 /* Check whether enough data is available */
998 len = svc_recv_available(svsk);
1002 if (len < svsk->sk_reclen) {
1003 dprintk("svc: incomplete TCP record (%d of %d)\n",
1004 len, svsk->sk_reclen);
1005 svc_sock_received(svsk);
1006 return -EAGAIN; /* record not complete */
1008 len = svsk->sk_reclen;
1009 set_bit(SK_DATA, &svsk->sk_flags);
1011 vec[0] = rqstp->rq_arg.head[0];
1014 while (vlen < len) {
1015 vec[pnum].iov_base = page_address(rqstp->rq_argpages[rqstp->rq_argused++]);
1016 vec[pnum].iov_len = PAGE_SIZE;
1021 /* Now receive data */
1022 len = svc_recvfrom(rqstp, vec, pnum, len);
1026 dprintk("svc: TCP complete record (%d bytes)\n", len);
1027 rqstp->rq_arg.len = len;
1028 rqstp->rq_arg.page_base = 0;
1029 if (len <= rqstp->rq_arg.head[0].iov_len) {
1030 rqstp->rq_arg.head[0].iov_len = len;
1031 rqstp->rq_arg.page_len = 0;
1033 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
1036 rqstp->rq_skbuff = NULL;
1037 rqstp->rq_prot = IPPROTO_TCP;
1039 /* Reset TCP read info */
1040 svsk->sk_reclen = 0;
1041 svsk->sk_tcplen = 0;
1043 svc_sock_received(svsk);
1045 serv->sv_stats->nettcpcnt++;
1050 svc_delete_socket(svsk);
1054 if (len == -EAGAIN) {
1055 dprintk("RPC: TCP recvfrom got EAGAIN\n");
1056 svc_sock_received(svsk);
1058 printk(KERN_NOTICE "%s: recvfrom returned errno %d\n",
1059 svsk->sk_server->sv_name, -len);
1067 * Send out data on TCP socket.
1070 svc_tcp_sendto(struct svc_rqst *rqstp)
1072 struct xdr_buf *xbufp = &rqstp->rq_res;
1076 /* Set up the first element of the reply kvec.
1077 * Any other kvecs that may be in use have been taken
1078 * care of by the server implementation itself.
1080 reclen = htonl(0x80000000|((xbufp->len ) - 4));
1081 memcpy(xbufp->head[0].iov_base, &reclen, 4);
1083 if (test_bit(SK_DEAD, &rqstp->rq_sock->sk_flags))
1086 sent = svc_sendto(rqstp, &rqstp->rq_res);
1087 if (sent != xbufp->len) {
1088 printk(KERN_NOTICE "rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n",
1089 rqstp->rq_sock->sk_server->sv_name,
1090 (sent<0)?"got error":"sent only",
1092 svc_delete_socket(rqstp->rq_sock);
1099 svc_tcp_init(struct svc_sock *svsk)
1101 struct sock *sk = svsk->sk_sk;
1102 struct tcp_sock *tp = tcp_sk(sk);
1104 svsk->sk_recvfrom = svc_tcp_recvfrom;
1105 svsk->sk_sendto = svc_tcp_sendto;
1107 if (sk->sk_state == TCP_LISTEN) {
1108 dprintk("setting up TCP socket for listening\n");
1109 sk->sk_data_ready = svc_tcp_listen_data_ready;
1110 set_bit(SK_CONN, &svsk->sk_flags);
1112 dprintk("setting up TCP socket for reading\n");
1113 sk->sk_state_change = svc_tcp_state_change;
1114 sk->sk_data_ready = svc_tcp_data_ready;
1115 sk->sk_write_space = svc_write_space;
1117 svsk->sk_reclen = 0;
1118 svsk->sk_tcplen = 0;
1120 tp->nonagle = 1; /* disable Nagle's algorithm */
1122 /* initialise setting must have enough space to
1123 * receive and respond to one request.
1124 * svc_tcp_recvfrom will re-adjust if necessary
1126 svc_sock_setbufsize(svsk->sk_sock,
1127 3 * svsk->sk_server->sv_bufsz,
1128 3 * svsk->sk_server->sv_bufsz);
1130 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1131 set_bit(SK_DATA, &svsk->sk_flags);
1132 if (sk->sk_state != TCP_ESTABLISHED)
1133 set_bit(SK_CLOSE, &svsk->sk_flags);
1138 svc_sock_update_bufs(struct svc_serv *serv)
1141 * The number of server threads has changed. Update
1142 * rcvbuf and sndbuf accordingly on all sockets
1144 struct list_head *le;
1146 spin_lock_bh(&serv->sv_lock);
1147 list_for_each(le, &serv->sv_permsocks) {
1148 struct svc_sock *svsk =
1149 list_entry(le, struct svc_sock, sk_list);
1150 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1152 list_for_each(le, &serv->sv_tempsocks) {
1153 struct svc_sock *svsk =
1154 list_entry(le, struct svc_sock, sk_list);
1155 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1157 spin_unlock_bh(&serv->sv_lock);
1161 * Receive the next request on any socket.
1164 svc_recv(struct svc_serv *serv, struct svc_rqst *rqstp, long timeout)
1166 struct svc_sock *svsk =NULL;
1169 struct xdr_buf *arg;
1170 DECLARE_WAITQUEUE(wait, current);
1172 dprintk("svc: server %p waiting for data (to = %ld)\n",
1177 "svc_recv: service %p, socket not NULL!\n",
1179 if (waitqueue_active(&rqstp->rq_wait))
1181 "svc_recv: service %p, wait queue active!\n",
1184 /* Initialize the buffers */
1185 /* first reclaim pages that were moved to response list */
1186 svc_pushback_allpages(rqstp);
1188 /* now allocate needed pages. If we get a failure, sleep briefly */
1189 pages = 2 + (serv->sv_bufsz + PAGE_SIZE -1) / PAGE_SIZE;
1190 while (rqstp->rq_arghi < pages) {
1191 struct page *p = alloc_page(GFP_KERNEL);
1193 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1196 rqstp->rq_argpages[rqstp->rq_arghi++] = p;
1199 /* Make arg->head point to first page and arg->pages point to rest */
1200 arg = &rqstp->rq_arg;
1201 arg->head[0].iov_base = page_address(rqstp->rq_argpages[0]);
1202 arg->head[0].iov_len = PAGE_SIZE;
1203 rqstp->rq_argused = 1;
1204 arg->pages = rqstp->rq_argpages + 1;
1206 /* save at least one page for response */
1207 arg->page_len = (pages-2)*PAGE_SIZE;
1208 arg->len = (pages-1)*PAGE_SIZE;
1209 arg->tail[0].iov_len = 0;
1216 spin_lock_bh(&serv->sv_lock);
1217 if (!list_empty(&serv->sv_tempsocks)) {
1218 svsk = list_entry(serv->sv_tempsocks.next,
1219 struct svc_sock, sk_list);
1220 /* apparently the "standard" is that clients close
1221 * idle connections after 5 minutes, servers after
1223 * http://www.connectathon.org/talks96/nfstcp.pdf
1225 if (get_seconds() - svsk->sk_lastrecv < 6*60
1226 || test_bit(SK_BUSY, &svsk->sk_flags))
1230 set_bit(SK_BUSY, &svsk->sk_flags);
1231 set_bit(SK_CLOSE, &svsk->sk_flags);
1232 rqstp->rq_sock = svsk;
1234 } else if ((svsk = svc_sock_dequeue(serv)) != NULL) {
1235 rqstp->rq_sock = svsk;
1237 rqstp->rq_reserved = serv->sv_bufsz;
1238 svsk->sk_reserved += rqstp->rq_reserved;
1240 /* No data pending. Go to sleep */
1241 svc_serv_enqueue(serv, rqstp);
1244 * We have to be able to interrupt this wait
1245 * to bring down the daemons ...
1247 set_current_state(TASK_INTERRUPTIBLE);
1248 add_wait_queue(&rqstp->rq_wait, &wait);
1249 spin_unlock_bh(&serv->sv_lock);
1251 schedule_timeout(timeout);
1255 spin_lock_bh(&serv->sv_lock);
1256 remove_wait_queue(&rqstp->rq_wait, &wait);
1258 if (!(svsk = rqstp->rq_sock)) {
1259 svc_serv_dequeue(serv, rqstp);
1260 spin_unlock_bh(&serv->sv_lock);
1261 dprintk("svc: server %p, no data yet\n", rqstp);
1262 return signalled()? -EINTR : -EAGAIN;
1265 spin_unlock_bh(&serv->sv_lock);
1267 dprintk("svc: server %p, socket %p, inuse=%d\n",
1268 rqstp, svsk, svsk->sk_inuse);
1269 len = svsk->sk_recvfrom(rqstp);
1270 dprintk("svc: got len=%d\n", len);
1272 /* No data, incomplete (TCP) read, or accept() */
1273 if (len == 0 || len == -EAGAIN) {
1274 rqstp->rq_res.len = 0;
1275 svc_sock_release(rqstp);
1278 svsk->sk_lastrecv = get_seconds();
1279 if (test_bit(SK_TEMP, &svsk->sk_flags)) {
1280 /* push active sockets to end of list */
1281 spin_lock_bh(&serv->sv_lock);
1282 if (!list_empty(&svsk->sk_list))
1283 list_move_tail(&svsk->sk_list, &serv->sv_tempsocks);
1284 spin_unlock_bh(&serv->sv_lock);
1287 rqstp->rq_secure = ntohs(rqstp->rq_addr.sin_port) < 1024;
1288 rqstp->rq_chandle.defer = svc_defer;
1291 serv->sv_stats->netcnt++;
1299 svc_drop(struct svc_rqst *rqstp)
1301 dprintk("svc: socket %p dropped request\n", rqstp->rq_sock);
1302 svc_sock_release(rqstp);
1306 * Return reply to client.
1309 svc_send(struct svc_rqst *rqstp)
1311 struct svc_sock *svsk;
1315 if ((svsk = rqstp->rq_sock) == NULL) {
1316 printk(KERN_WARNING "NULL socket pointer in %s:%d\n",
1317 __FILE__, __LINE__);
1321 /* release the receive skb before sending the reply */
1322 svc_release_skb(rqstp);
1324 /* calculate over-all length */
1325 xb = & rqstp->rq_res;
1326 xb->len = xb->head[0].iov_len +
1328 xb->tail[0].iov_len;
1330 /* Grab svsk->sk_mutex to serialize outgoing data. */
1331 mutex_lock(&svsk->sk_mutex);
1332 if (test_bit(SK_DEAD, &svsk->sk_flags))
1335 len = svsk->sk_sendto(rqstp);
1336 mutex_unlock(&svsk->sk_mutex);
1337 svc_sock_release(rqstp);
1339 if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN)
1345 * Initialize socket for RPC use and create svc_sock struct
1346 * XXX: May want to setsockopt SO_SNDBUF and SO_RCVBUF.
1348 static struct svc_sock *
1349 svc_setup_socket(struct svc_serv *serv, struct socket *sock,
1350 int *errp, int pmap_register)
1352 struct svc_sock *svsk;
1355 dprintk("svc: svc_setup_socket %p\n", sock);
1356 if (!(svsk = kzalloc(sizeof(*svsk), GFP_KERNEL))) {
1363 /* Register socket with portmapper */
1364 if (*errp >= 0 && pmap_register)
1365 *errp = svc_register(serv, inet->sk_protocol,
1366 ntohs(inet_sk(inet)->sport));
1373 set_bit(SK_BUSY, &svsk->sk_flags);
1374 inet->sk_user_data = svsk;
1375 svsk->sk_sock = sock;
1377 svsk->sk_ostate = inet->sk_state_change;
1378 svsk->sk_odata = inet->sk_data_ready;
1379 svsk->sk_owspace = inet->sk_write_space;
1380 svsk->sk_server = serv;
1381 svsk->sk_lastrecv = get_seconds();
1382 INIT_LIST_HEAD(&svsk->sk_deferred);
1383 INIT_LIST_HEAD(&svsk->sk_ready);
1384 mutex_init(&svsk->sk_mutex);
1386 /* Initialize the socket */
1387 if (sock->type == SOCK_DGRAM)
1392 spin_lock_bh(&serv->sv_lock);
1393 if (!pmap_register) {
1394 set_bit(SK_TEMP, &svsk->sk_flags);
1395 list_add(&svsk->sk_list, &serv->sv_tempsocks);
1398 clear_bit(SK_TEMP, &svsk->sk_flags);
1399 list_add(&svsk->sk_list, &serv->sv_permsocks);
1401 spin_unlock_bh(&serv->sv_lock);
1403 dprintk("svc: svc_setup_socket created %p (inet %p)\n",
1406 clear_bit(SK_BUSY, &svsk->sk_flags);
1407 svc_sock_enqueue(svsk);
1412 * Create socket for RPC service.
1415 svc_create_socket(struct svc_serv *serv, int protocol, struct sockaddr_in *sin)
1417 struct svc_sock *svsk;
1418 struct socket *sock;
1422 dprintk("svc: svc_create_socket(%s, %d, %u.%u.%u.%u:%d)\n",
1423 serv->sv_program->pg_name, protocol,
1424 NIPQUAD(sin->sin_addr.s_addr),
1425 ntohs(sin->sin_port));
1427 if (protocol != IPPROTO_UDP && protocol != IPPROTO_TCP) {
1428 printk(KERN_WARNING "svc: only UDP and TCP "
1429 "sockets supported\n");
1432 type = (protocol == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM;
1434 if ((error = sock_create_kern(PF_INET, type, protocol, &sock)) < 0)
1437 svc_reclassify_socket(sock);
1440 if (type == SOCK_STREAM)
1441 sock->sk->sk_reuse = 1; /* allow address reuse */
1442 error = sock->ops->bind(sock, (struct sockaddr *) sin,
1448 if (protocol == IPPROTO_TCP) {
1449 if ((error = sock->ops->listen(sock, 64)) < 0)
1453 if ((svsk = svc_setup_socket(serv, sock, &error, 1)) != NULL)
1457 dprintk("svc: svc_create_socket error = %d\n", -error);
1463 * Remove a dead socket
1466 svc_delete_socket(struct svc_sock *svsk)
1468 struct svc_serv *serv;
1471 dprintk("svc: svc_delete_socket(%p)\n", svsk);
1473 serv = svsk->sk_server;
1476 sk->sk_state_change = svsk->sk_ostate;
1477 sk->sk_data_ready = svsk->sk_odata;
1478 sk->sk_write_space = svsk->sk_owspace;
1480 spin_lock_bh(&serv->sv_lock);
1482 list_del_init(&svsk->sk_list);
1483 list_del_init(&svsk->sk_ready);
1484 if (!test_and_set_bit(SK_DEAD, &svsk->sk_flags))
1485 if (test_bit(SK_TEMP, &svsk->sk_flags))
1488 if (!svsk->sk_inuse) {
1489 spin_unlock_bh(&serv->sv_lock);
1490 sock_release(svsk->sk_sock);
1493 spin_unlock_bh(&serv->sv_lock);
1494 dprintk(KERN_NOTICE "svc: server socket destroy delayed\n");
1495 /* svsk->sk_server = NULL; */
1500 * Make a socket for nfsd and lockd
1503 svc_makesock(struct svc_serv *serv, int protocol, unsigned short port)
1505 struct sockaddr_in sin;
1507 dprintk("svc: creating socket proto = %d\n", protocol);
1508 sin.sin_family = AF_INET;
1509 sin.sin_addr.s_addr = INADDR_ANY;
1510 sin.sin_port = htons(port);
1511 return svc_create_socket(serv, protocol, &sin);
1515 * Handle defer and revisit of requests
1518 static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
1520 struct svc_deferred_req *dr = container_of(dreq, struct svc_deferred_req, handle);
1521 struct svc_serv *serv = dreq->owner;
1522 struct svc_sock *svsk;
1525 svc_sock_put(dr->svsk);
1529 dprintk("revisit queued\n");
1532 spin_lock_bh(&serv->sv_lock);
1533 list_add(&dr->handle.recent, &svsk->sk_deferred);
1534 spin_unlock_bh(&serv->sv_lock);
1535 set_bit(SK_DEFERRED, &svsk->sk_flags);
1536 svc_sock_enqueue(svsk);
1540 static struct cache_deferred_req *
1541 svc_defer(struct cache_req *req)
1543 struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle);
1544 int size = sizeof(struct svc_deferred_req) + (rqstp->rq_arg.len);
1545 struct svc_deferred_req *dr;
1547 if (rqstp->rq_arg.page_len)
1548 return NULL; /* if more than a page, give up FIXME */
1549 if (rqstp->rq_deferred) {
1550 dr = rqstp->rq_deferred;
1551 rqstp->rq_deferred = NULL;
1553 int skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
1554 /* FIXME maybe discard if size too large */
1555 dr = kmalloc(size, GFP_KERNEL);
1559 dr->handle.owner = rqstp->rq_server;
1560 dr->prot = rqstp->rq_prot;
1561 dr->addr = rqstp->rq_addr;
1562 dr->daddr = rqstp->rq_daddr;
1563 dr->argslen = rqstp->rq_arg.len >> 2;
1564 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, dr->argslen<<2);
1566 spin_lock_bh(&rqstp->rq_server->sv_lock);
1567 rqstp->rq_sock->sk_inuse++;
1568 dr->svsk = rqstp->rq_sock;
1569 spin_unlock_bh(&rqstp->rq_server->sv_lock);
1571 dr->handle.revisit = svc_revisit;
1576 * recv data from a deferred request into an active one
1578 static int svc_deferred_recv(struct svc_rqst *rqstp)
1580 struct svc_deferred_req *dr = rqstp->rq_deferred;
1582 rqstp->rq_arg.head[0].iov_base = dr->args;
1583 rqstp->rq_arg.head[0].iov_len = dr->argslen<<2;
1584 rqstp->rq_arg.page_len = 0;
1585 rqstp->rq_arg.len = dr->argslen<<2;
1586 rqstp->rq_prot = dr->prot;
1587 rqstp->rq_addr = dr->addr;
1588 rqstp->rq_daddr = dr->daddr;
1589 return dr->argslen<<2;
1593 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk)
1595 struct svc_deferred_req *dr = NULL;
1596 struct svc_serv *serv = svsk->sk_server;
1598 if (!test_bit(SK_DEFERRED, &svsk->sk_flags))
1600 spin_lock_bh(&serv->sv_lock);
1601 clear_bit(SK_DEFERRED, &svsk->sk_flags);
1602 if (!list_empty(&svsk->sk_deferred)) {
1603 dr = list_entry(svsk->sk_deferred.next,
1604 struct svc_deferred_req,
1606 list_del_init(&dr->handle.recent);
1607 set_bit(SK_DEFERRED, &svsk->sk_flags);
1609 spin_unlock_bh(&serv->sv_lock);