2 * linux/net/sunrpc/svcsock.c
4 * These are the RPC server socket internals.
6 * The server scheduling algorithm does not always distribute the load
7 * evenly when servicing a single client. May need to modify the
8 * svc_sock_enqueue procedure...
10 * TCP support is largely untested and may be a little slow. The problem
11 * is that we currently do two separate recvfrom's, one for the 4-byte
12 * record length, and the second for the actual record. This could possibly
13 * be improved by always reading a minimum size of around 100 bytes and
14 * tucking any superfluous bytes away in a temporary store. Still, that
15 * leaves write requests out in the rain. An alternative may be to peek at
16 * the first skb in the queue, and if it matches the next TCP sequence
17 * number, to extract the record marker. Yuck.
19 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
22 #include <linux/sched.h>
23 #include <linux/errno.h>
24 #include <linux/fcntl.h>
25 #include <linux/net.h>
27 #include <linux/inet.h>
28 #include <linux/udp.h>
29 #include <linux/tcp.h>
30 #include <linux/unistd.h>
31 #include <linux/slab.h>
32 #include <linux/netdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/suspend.h>
36 #include <net/checksum.h>
39 #include <asm/uaccess.h>
40 #include <asm/ioctls.h>
42 #include <linux/sunrpc/types.h>
43 #include <linux/sunrpc/xdr.h>
44 #include <linux/sunrpc/svcsock.h>
45 #include <linux/sunrpc/stats.h>
47 /* SMP locking strategy:
49 * svc_serv->sv_lock protects most stuff for that service.
51 * Some flags can be set to certain values at any time
52 * providing that certain rules are followed:
54 * SK_BUSY can be set to 0 at any time.
55 * svc_sock_enqueue must be called afterwards
56 * SK_CONN, SK_DATA, can be set or cleared at any time.
57 * after a set, svc_sock_enqueue must be called.
58 * after a clear, the socket must be read/accepted
59 * if this succeeds, it must be set again.
60 * SK_CLOSE can set at any time. It is never cleared.
64 #define RPCDBG_FACILITY RPCDBG_SVCSOCK
67 static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *,
68 int *errp, int pmap_reg);
69 static void svc_udp_data_ready(struct sock *, int);
70 static int svc_udp_recvfrom(struct svc_rqst *);
71 static int svc_udp_sendto(struct svc_rqst *);
73 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk);
74 static int svc_deferred_recv(struct svc_rqst *rqstp);
75 static struct cache_deferred_req *svc_defer(struct cache_req *req);
78 * Queue up an idle server thread. Must have serv->sv_lock held.
79 * Note: this is really a stack rather than a queue, so that we only
80 * use as many different threads as we need, and the rest don't polute
84 svc_serv_enqueue(struct svc_serv *serv, struct svc_rqst *rqstp)
86 list_add(&rqstp->rq_list, &serv->sv_threads);
90 * Dequeue an nfsd thread. Must have serv->sv_lock held.
93 svc_serv_dequeue(struct svc_serv *serv, struct svc_rqst *rqstp)
95 list_del(&rqstp->rq_list);
99 * Release an skbuff after use
102 svc_release_skb(struct svc_rqst *rqstp)
104 struct sk_buff *skb = rqstp->rq_skbuff;
105 struct svc_deferred_req *dr = rqstp->rq_deferred;
108 rqstp->rq_skbuff = NULL;
110 dprintk("svc: service %p, releasing skb %p\n", rqstp, skb);
111 skb_free_datagram(rqstp->rq_sock->sk_sk, skb);
114 rqstp->rq_deferred = NULL;
120 * Any space to write?
122 static inline unsigned long
123 svc_sock_wspace(struct svc_sock *svsk)
127 if (svsk->sk_sock->type == SOCK_STREAM)
128 wspace = sk_stream_wspace(svsk->sk_sk);
130 wspace = sock_wspace(svsk->sk_sk);
136 * Queue up a socket with data pending. If there are idle nfsd
137 * processes, wake 'em up.
141 svc_sock_enqueue(struct svc_sock *svsk)
143 struct svc_serv *serv = svsk->sk_server;
144 struct svc_rqst *rqstp;
146 if (!(svsk->sk_flags &
147 ( (1<<SK_CONN)|(1<<SK_DATA)|(1<<SK_CLOSE)|(1<<SK_DEFERRED)) ))
149 if (test_bit(SK_DEAD, &svsk->sk_flags))
152 spin_lock_bh(&serv->sv_lock);
154 if (!list_empty(&serv->sv_threads) &&
155 !list_empty(&serv->sv_sockets))
157 "svc_sock_enqueue: threads and sockets both waiting??\n");
159 if (test_bit(SK_DEAD, &svsk->sk_flags)) {
160 /* Don't enqueue dead sockets */
161 dprintk("svc: socket %p is dead, not enqueued\n", svsk->sk_sk);
165 if (test_bit(SK_BUSY, &svsk->sk_flags)) {
166 /* Don't enqueue socket while daemon is receiving */
167 dprintk("svc: socket %p busy, not enqueued\n", svsk->sk_sk);
171 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
172 if (((svsk->sk_reserved + serv->sv_bufsz)*2
173 > svc_sock_wspace(svsk))
174 && !test_bit(SK_CLOSE, &svsk->sk_flags)
175 && !test_bit(SK_CONN, &svsk->sk_flags)) {
176 /* Don't enqueue while not enough space for reply */
177 dprintk("svc: socket %p no space, %d*2 > %ld, not enqueued\n",
178 svsk->sk_sk, svsk->sk_reserved+serv->sv_bufsz,
179 svc_sock_wspace(svsk));
182 clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
184 /* Mark socket as busy. It will remain in this state until the
185 * server has processed all pending data and put the socket back
188 set_bit(SK_BUSY, &svsk->sk_flags);
190 if (!list_empty(&serv->sv_threads)) {
191 rqstp = list_entry(serv->sv_threads.next,
194 dprintk("svc: socket %p served by daemon %p\n",
196 svc_serv_dequeue(serv, rqstp);
199 "svc_sock_enqueue: server %p, rq_sock=%p!\n",
200 rqstp, rqstp->rq_sock);
201 rqstp->rq_sock = svsk;
203 rqstp->rq_reserved = serv->sv_bufsz;
204 svsk->sk_reserved += rqstp->rq_reserved;
205 wake_up(&rqstp->rq_wait);
207 dprintk("svc: socket %p put into queue\n", svsk->sk_sk);
208 list_add_tail(&svsk->sk_ready, &serv->sv_sockets);
212 spin_unlock_bh(&serv->sv_lock);
216 * Dequeue the first socket. Must be called with the serv->sv_lock held.
218 static inline struct svc_sock *
219 svc_sock_dequeue(struct svc_serv *serv)
221 struct svc_sock *svsk;
223 if (list_empty(&serv->sv_sockets))
226 svsk = list_entry(serv->sv_sockets.next,
227 struct svc_sock, sk_ready);
228 list_del_init(&svsk->sk_ready);
230 dprintk("svc: socket %p dequeued, inuse=%d\n",
231 svsk->sk_sk, svsk->sk_inuse);
237 * Having read something from a socket, check whether it
238 * needs to be re-enqueued.
239 * Note: SK_DATA only gets cleared when a read-attempt finds
240 * no (or insufficient) data.
243 svc_sock_received(struct svc_sock *svsk)
245 clear_bit(SK_BUSY, &svsk->sk_flags);
246 svc_sock_enqueue(svsk);
251 * svc_reserve - change the space reserved for the reply to a request.
252 * @rqstp: The request in question
253 * @space: new max space to reserve
255 * Each request reserves some space on the output queue of the socket
256 * to make sure the reply fits. This function reduces that reserved
257 * space to be the amount of space used already, plus @space.
260 void svc_reserve(struct svc_rqst *rqstp, int space)
262 space += rqstp->rq_res.head[0].iov_len;
264 if (space < rqstp->rq_reserved) {
265 struct svc_sock *svsk = rqstp->rq_sock;
266 spin_lock_bh(&svsk->sk_server->sv_lock);
267 svsk->sk_reserved -= (rqstp->rq_reserved - space);
268 rqstp->rq_reserved = space;
269 spin_unlock_bh(&svsk->sk_server->sv_lock);
271 svc_sock_enqueue(svsk);
276 * Release a socket after use.
279 svc_sock_put(struct svc_sock *svsk)
281 struct svc_serv *serv = svsk->sk_server;
283 spin_lock_bh(&serv->sv_lock);
284 if (!--(svsk->sk_inuse) && test_bit(SK_DEAD, &svsk->sk_flags)) {
285 spin_unlock_bh(&serv->sv_lock);
286 dprintk("svc: releasing dead socket\n");
287 sock_release(svsk->sk_sock);
291 spin_unlock_bh(&serv->sv_lock);
295 svc_sock_release(struct svc_rqst *rqstp)
297 struct svc_sock *svsk = rqstp->rq_sock;
299 svc_release_skb(rqstp);
301 svc_free_allpages(rqstp);
302 rqstp->rq_res.page_len = 0;
303 rqstp->rq_res.page_base = 0;
306 /* Reset response buffer and release
308 * But first, check that enough space was reserved
309 * for the reply, otherwise we have a bug!
311 if ((rqstp->rq_res.len) > rqstp->rq_reserved)
312 printk(KERN_ERR "RPC request reserved %d but used %d\n",
316 rqstp->rq_res.head[0].iov_len = 0;
317 svc_reserve(rqstp, 0);
318 rqstp->rq_sock = NULL;
324 * External function to wake up a server waiting for data
327 svc_wake_up(struct svc_serv *serv)
329 struct svc_rqst *rqstp;
331 spin_lock_bh(&serv->sv_lock);
332 if (!list_empty(&serv->sv_threads)) {
333 rqstp = list_entry(serv->sv_threads.next,
336 dprintk("svc: daemon %p woken up.\n", rqstp);
338 svc_serv_dequeue(serv, rqstp);
339 rqstp->rq_sock = NULL;
341 wake_up(&rqstp->rq_wait);
343 spin_unlock_bh(&serv->sv_lock);
347 * Generic sendto routine
350 svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
352 struct svc_sock *svsk = rqstp->rq_sock;
353 struct socket *sock = svsk->sk_sock;
355 char buffer[CMSG_SPACE(sizeof(struct in_pktinfo))];
356 struct cmsghdr *cmh = (struct cmsghdr *)buffer;
357 struct in_pktinfo *pki = (struct in_pktinfo *)CMSG_DATA(cmh);
361 struct page **ppage = xdr->pages;
362 size_t base = xdr->page_base;
363 unsigned int pglen = xdr->page_len;
364 unsigned int flags = MSG_MORE;
368 if (rqstp->rq_prot == IPPROTO_UDP) {
369 /* set the source and destination */
371 msg.msg_name = &rqstp->rq_addr;
372 msg.msg_namelen = sizeof(rqstp->rq_addr);
375 msg.msg_flags = MSG_MORE;
377 msg.msg_control = cmh;
378 msg.msg_controllen = sizeof(buffer);
379 cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
380 cmh->cmsg_level = SOL_IP;
381 cmh->cmsg_type = IP_PKTINFO;
382 pki->ipi_ifindex = 0;
383 pki->ipi_spec_dst.s_addr = rqstp->rq_daddr;
385 if (sock_sendmsg(sock, &msg, 0) < 0)
390 if (slen == xdr->head[0].iov_len)
392 len = sock->ops->sendpage(sock, rqstp->rq_respages[0], 0, xdr->head[0].iov_len, flags);
393 if (len != xdr->head[0].iov_len)
395 slen -= xdr->head[0].iov_len;
400 size = PAGE_SIZE - base < pglen ? PAGE_SIZE - base : pglen;
404 result = sock->ops->sendpage(sock, *ppage, base, size, flags);
411 size = PAGE_SIZE < pglen ? PAGE_SIZE : pglen;
416 if (xdr->tail[0].iov_len) {
417 result = sock->ops->sendpage(sock, rqstp->rq_respages[rqstp->rq_restailpage],
418 ((unsigned long)xdr->tail[0].iov_base)& (PAGE_SIZE-1),
419 xdr->tail[0].iov_len, 0);
425 dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %x)\n",
426 rqstp->rq_sock, xdr->head[0].iov_base, xdr->head[0].iov_len, xdr->len, len,
427 rqstp->rq_addr.sin_addr.s_addr);
433 * Check input queue length
436 svc_recv_available(struct svc_sock *svsk)
439 struct socket *sock = svsk->sk_sock;
442 oldfs = get_fs(); set_fs(KERNEL_DS);
443 err = sock->ops->ioctl(sock, TIOCINQ, (unsigned long) &avail);
446 return (err >= 0)? avail : err;
450 * Generic recvfrom routine.
453 svc_recvfrom(struct svc_rqst *rqstp, struct iovec *iov, int nr, int buflen)
460 rqstp->rq_addrlen = sizeof(rqstp->rq_addr);
461 sock = rqstp->rq_sock->sk_sock;
463 msg.msg_name = &rqstp->rq_addr;
464 msg.msg_namelen = sizeof(rqstp->rq_addr);
467 msg.msg_control = NULL;
468 msg.msg_controllen = 0;
470 msg.msg_flags = MSG_DONTWAIT;
472 oldfs = get_fs(); set_fs(KERNEL_DS);
473 len = sock_recvmsg(sock, &msg, buflen, MSG_DONTWAIT);
476 /* sock_recvmsg doesn't fill in the name/namelen, so we must..
477 * possibly we should cache this in the svc_sock structure
478 * at accept time. FIXME
480 alen = sizeof(rqstp->rq_addr);
481 sock->ops->getname(sock, (struct sockaddr *)&rqstp->rq_addr, &alen, 1);
483 dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n",
484 rqstp->rq_sock, iov[0].iov_base, iov[0].iov_len, len);
490 * Set socket snd and rcv buffer lengths
493 svc_sock_setbufsize(struct socket *sock, unsigned int snd, unsigned int rcv)
497 oldfs = get_fs(); set_fs(KERNEL_DS);
498 sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF,
499 (char*)&snd, sizeof(snd));
500 sock_setsockopt(sock, SOL_SOCKET, SO_RCVBUF,
501 (char*)&rcv, sizeof(rcv));
503 /* sock_setsockopt limits use to sysctl_?mem_max,
504 * which isn't acceptable. Until that is made conditional
505 * on not having CAP_SYS_RESOURCE or similar, we go direct...
506 * DaveM said I could!
509 sock->sk->sk_sndbuf = snd * 2;
510 sock->sk->sk_rcvbuf = rcv * 2;
511 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
512 release_sock(sock->sk);
516 * INET callback when data has been received on the socket.
519 svc_udp_data_ready(struct sock *sk, int count)
521 struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data);
525 dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n",
526 svsk, sk, count, test_bit(SK_BUSY, &svsk->sk_flags));
527 set_bit(SK_DATA, &svsk->sk_flags);
528 svc_sock_enqueue(svsk);
530 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
531 wake_up_interruptible(sk->sk_sleep);
535 * INET callback when space is newly available on the socket.
538 svc_write_space(struct sock *sk)
540 struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data);
543 dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
544 svsk, sk, test_bit(SK_BUSY, &svsk->sk_flags));
545 svc_sock_enqueue(svsk);
548 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) {
549 printk(KERN_WARNING "RPC svc_write_space: some sleeping on %p\n",
551 wake_up_interruptible(sk->sk_sleep);
556 * Receive a datagram from a UDP socket.
559 csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb);
562 svc_udp_recvfrom(struct svc_rqst *rqstp)
564 struct svc_sock *svsk = rqstp->rq_sock;
565 struct svc_serv *serv = svsk->sk_server;
569 if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags))
570 /* udp sockets need large rcvbuf as all pending
571 * requests are still in that buffer. sndbuf must
572 * also be large enough that there is enough space
573 * for one reply per thread.
575 svc_sock_setbufsize(svsk->sk_sock,
576 (serv->sv_nrthreads+3) * serv->sv_bufsz,
577 (serv->sv_nrthreads+3) * serv->sv_bufsz);
579 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
580 svc_sock_received(svsk);
581 return svc_deferred_recv(rqstp);
584 clear_bit(SK_DATA, &svsk->sk_flags);
585 while ((skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err)) == NULL) {
586 if (err == -EAGAIN) {
587 svc_sock_received(svsk);
590 /* possibly an icmp error */
591 dprintk("svc: recvfrom returned error %d\n", -err);
593 if (skb->stamp.tv_sec == 0) {
594 skb->stamp.tv_sec = xtime.tv_sec;
595 skb->stamp.tv_usec = xtime.tv_nsec * 1000;
596 /* Don't enable netstamp, sunrpc doesn't
597 need that much accuracy */
599 svsk->sk_sk->sk_stamp = skb->stamp;
600 set_bit(SK_DATA, &svsk->sk_flags); /* there may be more data... */
603 * Maybe more packets - kick another thread ASAP.
605 svc_sock_received(svsk);
607 len = skb->len - sizeof(struct udphdr);
608 rqstp->rq_arg.len = len;
610 rqstp->rq_prot = IPPROTO_UDP;
612 /* Get sender address */
613 rqstp->rq_addr.sin_family = AF_INET;
614 rqstp->rq_addr.sin_port = skb->h.uh->source;
615 rqstp->rq_addr.sin_addr.s_addr = skb->nh.iph->saddr;
616 rqstp->rq_daddr = skb->nh.iph->daddr;
618 if (skb_is_nonlinear(skb)) {
619 /* we have to copy */
621 if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) {
624 skb_free_datagram(svsk->sk_sk, skb);
628 skb_free_datagram(svsk->sk_sk, skb);
630 /* we can use it in-place */
631 rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr);
632 rqstp->rq_arg.head[0].iov_len = len;
633 if (skb->ip_summed != CHECKSUM_UNNECESSARY) {
634 if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) {
635 skb_free_datagram(svsk->sk_sk, skb);
638 skb->ip_summed = CHECKSUM_UNNECESSARY;
640 rqstp->rq_skbuff = skb;
643 rqstp->rq_arg.page_base = 0;
644 if (len <= rqstp->rq_arg.head[0].iov_len) {
645 rqstp->rq_arg.head[0].iov_len = len;
646 rqstp->rq_arg.page_len = 0;
648 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
649 rqstp->rq_argused += (rqstp->rq_arg.page_len + PAGE_SIZE - 1)/ PAGE_SIZE;
653 serv->sv_stats->netudpcnt++;
659 svc_udp_sendto(struct svc_rqst *rqstp)
663 error = svc_sendto(rqstp, &rqstp->rq_res);
664 if (error == -ECONNREFUSED)
665 /* ICMP error on earlier request. */
666 error = svc_sendto(rqstp, &rqstp->rq_res);
672 svc_udp_init(struct svc_sock *svsk)
674 svsk->sk_sk->sk_data_ready = svc_udp_data_ready;
675 svsk->sk_sk->sk_write_space = svc_write_space;
676 svsk->sk_recvfrom = svc_udp_recvfrom;
677 svsk->sk_sendto = svc_udp_sendto;
679 /* initialise setting must have enough space to
680 * receive and respond to one request.
681 * svc_udp_recvfrom will re-adjust if necessary
683 svc_sock_setbufsize(svsk->sk_sock,
684 3 * svsk->sk_server->sv_bufsz,
685 3 * svsk->sk_server->sv_bufsz);
687 set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */
688 set_bit(SK_CHNGBUF, &svsk->sk_flags);
692 * A data_ready event on a listening socket means there's a connection
693 * pending. Do not use state_change as a substitute for it.
696 svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
698 struct svc_sock *svsk;
700 dprintk("svc: socket %p TCP (listen) state change %d\n",
703 if (sk->sk_state != TCP_LISTEN) {
705 * This callback may called twice when a new connection
706 * is established as a child socket inherits everything
707 * from a parent LISTEN socket.
708 * 1) data_ready method of the parent socket will be called
709 * when one of child sockets become ESTABLISHED.
710 * 2) data_ready method of the child socket may be called
711 * when it receives data before the socket is accepted.
712 * In case of 2, we should ignore it silently.
716 if (!(svsk = (struct svc_sock *) sk->sk_user_data)) {
717 printk("svc: socket %p: no user data\n", sk);
720 set_bit(SK_CONN, &svsk->sk_flags);
721 svc_sock_enqueue(svsk);
723 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
724 wake_up_interruptible_all(sk->sk_sleep);
728 * A state change on a connected socket means it's dying or dead.
731 svc_tcp_state_change(struct sock *sk)
733 struct svc_sock *svsk;
735 dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n",
736 sk, sk->sk_state, sk->sk_user_data);
738 if (!(svsk = (struct svc_sock *) sk->sk_user_data)) {
739 printk("svc: socket %p: no user data\n", sk);
742 set_bit(SK_CLOSE, &svsk->sk_flags);
743 svc_sock_enqueue(svsk);
745 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
746 wake_up_interruptible_all(sk->sk_sleep);
750 svc_tcp_data_ready(struct sock *sk, int count)
752 struct svc_sock * svsk;
754 dprintk("svc: socket %p TCP data ready (svsk %p)\n",
755 sk, sk->sk_user_data);
756 if (!(svsk = (struct svc_sock *)(sk->sk_user_data)))
758 set_bit(SK_DATA, &svsk->sk_flags);
759 svc_sock_enqueue(svsk);
761 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
762 wake_up_interruptible(sk->sk_sleep);
766 * Accept a TCP connection
769 svc_tcp_accept(struct svc_sock *svsk)
771 struct sockaddr_in sin;
772 struct svc_serv *serv = svsk->sk_server;
773 struct socket *sock = svsk->sk_sock;
774 struct socket *newsock;
775 struct proto_ops *ops;
776 struct svc_sock *newsvsk;
779 dprintk("svc: tcp_accept %p sock %p\n", svsk, sock);
783 err = sock_create_lite(PF_INET, SOCK_STREAM, IPPROTO_TCP, &newsock);
786 printk(KERN_WARNING "%s: no more sockets!\n",
791 dprintk("svc: tcp_accept %p allocated\n", newsock);
792 newsock->ops = ops = sock->ops;
794 clear_bit(SK_CONN, &svsk->sk_flags);
795 if ((err = ops->accept(sock, newsock, O_NONBLOCK)) < 0) {
796 if (err != -EAGAIN && net_ratelimit())
797 printk(KERN_WARNING "%s: accept failed (err %d)!\n",
798 serv->sv_name, -err);
799 goto failed; /* aborted connection or whatever */
801 set_bit(SK_CONN, &svsk->sk_flags);
802 svc_sock_enqueue(svsk);
805 err = ops->getname(newsock, (struct sockaddr *) &sin, &slen, 1);
808 printk(KERN_WARNING "%s: peername failed (err %d)!\n",
809 serv->sv_name, -err);
810 goto failed; /* aborted connection or whatever */
813 /* Ideally, we would want to reject connections from unauthorized
814 * hosts here, but when we get encription, the IP of the host won't
815 * tell us anything. For now just warn about unpriv connections.
817 if (ntohs(sin.sin_port) >= 1024) {
819 "%s: connect from unprivileged port: %u.%u.%u.%u:%d\n",
821 NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
824 dprintk("%s: connect from %u.%u.%u.%u:%04x\n", serv->sv_name,
825 NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
827 /* make sure that a write doesn't block forever when
830 newsock->sk->sk_sndtimeo = HZ*30;
832 if (!(newsvsk = svc_setup_socket(serv, newsock, &err, 0)))
836 /* make sure that we don't have too many active connections.
837 * If we have, something must be dropped.
839 * There's no point in trying to do random drop here for
840 * DoS prevention. The NFS clients does 1 reconnect in 15
841 * seconds. An attacker can easily beat that.
843 * The only somewhat efficient mechanism would be if drop
844 * old connections from the same IP first. But right now
845 * we don't even record the client IP in svc_sock.
847 if (serv->sv_tmpcnt > (serv->sv_nrthreads+3)*20) {
848 struct svc_sock *svsk = NULL;
849 spin_lock_bh(&serv->sv_lock);
850 if (!list_empty(&serv->sv_tempsocks)) {
851 if (net_ratelimit()) {
852 /* Try to help the admin */
853 printk(KERN_NOTICE "%s: too many open TCP "
854 "sockets, consider increasing the "
855 "number of nfsd threads\n",
857 printk(KERN_NOTICE "%s: last TCP connect from "
860 NIPQUAD(sin.sin_addr.s_addr),
861 ntohs(sin.sin_port));
864 * Always select the oldest socket. It's not fair,
867 svsk = list_entry(serv->sv_tempsocks.prev,
870 set_bit(SK_CLOSE, &svsk->sk_flags);
873 spin_unlock_bh(&serv->sv_lock);
876 svc_sock_enqueue(svsk);
883 serv->sv_stats->nettcpconn++;
888 sock_release(newsock);
893 * Receive data from a TCP socket.
896 svc_tcp_recvfrom(struct svc_rqst *rqstp)
898 struct svc_sock *svsk = rqstp->rq_sock;
899 struct svc_serv *serv = svsk->sk_server;
901 struct iovec vec[RPCSVC_MAXPAGES];
904 dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
905 svsk, test_bit(SK_DATA, &svsk->sk_flags),
906 test_bit(SK_CONN, &svsk->sk_flags),
907 test_bit(SK_CLOSE, &svsk->sk_flags));
909 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
910 svc_sock_received(svsk);
911 return svc_deferred_recv(rqstp);
914 if (test_bit(SK_CLOSE, &svsk->sk_flags)) {
915 svc_delete_socket(svsk);
919 if (test_bit(SK_CONN, &svsk->sk_flags)) {
920 svc_tcp_accept(svsk);
921 svc_sock_received(svsk);
925 if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags))
926 /* sndbuf needs to have room for one request
927 * per thread, otherwise we can stall even when the
928 * network isn't a bottleneck.
929 * rcvbuf just needs to be able to hold a few requests.
930 * Normally they will be removed from the queue
931 * as soon a a complete request arrives.
933 svc_sock_setbufsize(svsk->sk_sock,
934 (serv->sv_nrthreads+3) * serv->sv_bufsz,
937 clear_bit(SK_DATA, &svsk->sk_flags);
939 /* Receive data. If we haven't got the record length yet, get
940 * the next four bytes. Otherwise try to gobble up as much as
941 * possible up to the complete record length.
943 if (svsk->sk_tcplen < 4) {
944 unsigned long want = 4 - svsk->sk_tcplen;
947 iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen;
949 if ((len = svc_recvfrom(rqstp, &iov, 1, want)) < 0)
951 svsk->sk_tcplen += len;
954 dprintk("svc: short recvfrom while reading record length (%d of %lu)\n",
956 svc_sock_received(svsk);
957 return -EAGAIN; /* record header not complete */
960 svsk->sk_reclen = ntohl(svsk->sk_reclen);
961 if (!(svsk->sk_reclen & 0x80000000)) {
962 /* FIXME: technically, a record can be fragmented,
963 * and non-terminal fragments will not have the top
964 * bit set in the fragment length header.
965 * But apparently no known nfs clients send fragmented
967 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx (non-terminal)\n",
968 (unsigned long) svsk->sk_reclen);
971 svsk->sk_reclen &= 0x7fffffff;
972 dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen);
973 if (svsk->sk_reclen > serv->sv_bufsz) {
974 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx (large)\n",
975 (unsigned long) svsk->sk_reclen);
980 /* Check whether enough data is available */
981 len = svc_recv_available(svsk);
985 if (len < svsk->sk_reclen) {
986 dprintk("svc: incomplete TCP record (%d of %d)\n",
987 len, svsk->sk_reclen);
988 svc_sock_received(svsk);
989 return -EAGAIN; /* record not complete */
991 len = svsk->sk_reclen;
992 set_bit(SK_DATA, &svsk->sk_flags);
994 vec[0] = rqstp->rq_arg.head[0];
998 vec[pnum].iov_base = page_address(rqstp->rq_argpages[rqstp->rq_argused++]);
999 vec[pnum].iov_len = PAGE_SIZE;
1004 /* Now receive data */
1005 len = svc_recvfrom(rqstp, vec, pnum, len);
1009 dprintk("svc: TCP complete record (%d bytes)\n", len);
1010 rqstp->rq_arg.len = len;
1011 rqstp->rq_arg.page_base = 0;
1012 if (len <= rqstp->rq_arg.head[0].iov_len) {
1013 rqstp->rq_arg.head[0].iov_len = len;
1014 rqstp->rq_arg.page_len = 0;
1016 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
1019 rqstp->rq_skbuff = NULL;
1020 rqstp->rq_prot = IPPROTO_TCP;
1022 /* Reset TCP read info */
1023 svsk->sk_reclen = 0;
1024 svsk->sk_tcplen = 0;
1026 svc_sock_received(svsk);
1028 serv->sv_stats->nettcpcnt++;
1033 svc_delete_socket(svsk);
1037 if (len == -EAGAIN) {
1038 dprintk("RPC: TCP recvfrom got EAGAIN\n");
1039 svc_sock_received(svsk);
1041 printk(KERN_NOTICE "%s: recvfrom returned errno %d\n",
1042 svsk->sk_server->sv_name, -len);
1043 svc_sock_received(svsk);
1050 * Send out data on TCP socket.
1053 svc_tcp_sendto(struct svc_rqst *rqstp)
1055 struct xdr_buf *xbufp = &rqstp->rq_res;
1059 /* Set up the first element of the reply iovec.
1060 * Any other iovecs that may be in use have been taken
1061 * care of by the server implementation itself.
1063 reclen = htonl(0x80000000|((xbufp->len ) - 4));
1064 memcpy(xbufp->head[0].iov_base, &reclen, 4);
1066 if (test_bit(SK_DEAD, &rqstp->rq_sock->sk_flags))
1069 sent = svc_sendto(rqstp, &rqstp->rq_res);
1070 if (sent != xbufp->len) {
1071 printk(KERN_NOTICE "rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n",
1072 rqstp->rq_sock->sk_server->sv_name,
1073 (sent<0)?"got error":"sent only",
1075 svc_delete_socket(rqstp->rq_sock);
1082 svc_tcp_init(struct svc_sock *svsk)
1084 struct sock *sk = svsk->sk_sk;
1085 struct tcp_opt *tp = tcp_sk(sk);
1087 svsk->sk_recvfrom = svc_tcp_recvfrom;
1088 svsk->sk_sendto = svc_tcp_sendto;
1090 if (sk->sk_state == TCP_LISTEN) {
1091 dprintk("setting up TCP socket for listening\n");
1092 sk->sk_data_ready = svc_tcp_listen_data_ready;
1093 set_bit(SK_CONN, &svsk->sk_flags);
1095 dprintk("setting up TCP socket for reading\n");
1096 sk->sk_state_change = svc_tcp_state_change;
1097 sk->sk_data_ready = svc_tcp_data_ready;
1098 sk->sk_write_space = svc_write_space;
1100 svsk->sk_reclen = 0;
1101 svsk->sk_tcplen = 0;
1103 tp->nonagle = 1; /* disable Nagle's algorithm */
1105 /* initialise setting must have enough space to
1106 * receive and respond to one request.
1107 * svc_tcp_recvfrom will re-adjust if necessary
1109 svc_sock_setbufsize(svsk->sk_sock,
1110 3 * svsk->sk_server->sv_bufsz,
1111 3 * svsk->sk_server->sv_bufsz);
1113 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1114 set_bit(SK_DATA, &svsk->sk_flags);
1115 if (sk->sk_state != TCP_ESTABLISHED)
1116 set_bit(SK_CLOSE, &svsk->sk_flags);
1121 svc_sock_update_bufs(struct svc_serv *serv)
1124 * The number of server threads has changed. Update
1125 * rcvbuf and sndbuf accordingly on all sockets
1127 struct list_head *le;
1129 spin_lock_bh(&serv->sv_lock);
1130 list_for_each(le, &serv->sv_permsocks) {
1131 struct svc_sock *svsk =
1132 list_entry(le, struct svc_sock, sk_list);
1133 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1135 list_for_each(le, &serv->sv_tempsocks) {
1136 struct svc_sock *svsk =
1137 list_entry(le, struct svc_sock, sk_list);
1138 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1140 spin_unlock_bh(&serv->sv_lock);
1144 * Receive the next request on any socket.
1147 svc_recv(struct svc_serv *serv, struct svc_rqst *rqstp, long timeout)
1149 struct svc_sock *svsk =NULL;
1152 struct xdr_buf *arg;
1153 DECLARE_WAITQUEUE(wait, current);
1155 dprintk("svc: server %p waiting for data (to = %ld)\n",
1160 "svc_recv: service %p, socket not NULL!\n",
1162 if (waitqueue_active(&rqstp->rq_wait))
1164 "svc_recv: service %p, wait queue active!\n",
1167 /* Initialize the buffers */
1168 /* first reclaim pages that were moved to response list */
1169 svc_pushback_allpages(rqstp);
1171 /* now allocate needed pages. If we get a failure, sleep briefly */
1172 pages = 2 + (serv->sv_bufsz + PAGE_SIZE -1) / PAGE_SIZE;
1173 while (rqstp->rq_arghi < pages) {
1174 struct page *p = alloc_page(GFP_KERNEL);
1176 set_current_state(TASK_UNINTERRUPTIBLE);
1177 schedule_timeout(HZ/2);
1180 rqstp->rq_argpages[rqstp->rq_arghi++] = p;
1183 /* Make arg->head point to first page and arg->pages point to rest */
1184 arg = &rqstp->rq_arg;
1185 arg->head[0].iov_base = page_address(rqstp->rq_argpages[0]);
1186 arg->head[0].iov_len = PAGE_SIZE;
1187 rqstp->rq_argused = 1;
1188 arg->pages = rqstp->rq_argpages + 1;
1190 /* save at least one page for response */
1191 arg->page_len = (pages-2)*PAGE_SIZE;
1192 arg->len = (pages-1)*PAGE_SIZE;
1193 arg->tail[0].iov_len = 0;
1198 spin_lock_bh(&serv->sv_lock);
1199 if (!list_empty(&serv->sv_tempsocks)) {
1200 svsk = list_entry(serv->sv_tempsocks.next,
1201 struct svc_sock, sk_list);
1202 /* apparently the "standard" is that clients close
1203 * idle connections after 5 minutes, servers after
1205 * http://www.connectathon.org/talks96/nfstcp.pdf
1207 if (get_seconds() - svsk->sk_lastrecv < 6*60
1208 || test_bit(SK_BUSY, &svsk->sk_flags))
1212 set_bit(SK_BUSY, &svsk->sk_flags);
1213 set_bit(SK_CLOSE, &svsk->sk_flags);
1214 rqstp->rq_sock = svsk;
1216 } else if ((svsk = svc_sock_dequeue(serv)) != NULL) {
1217 rqstp->rq_sock = svsk;
1219 rqstp->rq_reserved = serv->sv_bufsz;
1220 svsk->sk_reserved += rqstp->rq_reserved;
1222 /* No data pending. Go to sleep */
1223 svc_serv_enqueue(serv, rqstp);
1226 * We have to be able to interrupt this wait
1227 * to bring down the daemons ...
1229 set_current_state(TASK_INTERRUPTIBLE);
1230 add_wait_queue(&rqstp->rq_wait, &wait);
1231 spin_unlock_bh(&serv->sv_lock);
1233 schedule_timeout(timeout);
1235 if (current->flags & PF_FREEZE)
1236 refrigerator(PF_FREEZE);
1238 spin_lock_bh(&serv->sv_lock);
1239 remove_wait_queue(&rqstp->rq_wait, &wait);
1241 if (!(svsk = rqstp->rq_sock)) {
1242 svc_serv_dequeue(serv, rqstp);
1243 spin_unlock_bh(&serv->sv_lock);
1244 dprintk("svc: server %p, no data yet\n", rqstp);
1245 return signalled()? -EINTR : -EAGAIN;
1248 spin_unlock_bh(&serv->sv_lock);
1250 dprintk("svc: server %p, socket %p, inuse=%d\n",
1251 rqstp, svsk, svsk->sk_inuse);
1252 len = svsk->sk_recvfrom(rqstp);
1253 dprintk("svc: got len=%d\n", len);
1255 /* No data, incomplete (TCP) read, or accept() */
1256 if (len == 0 || len == -EAGAIN) {
1257 rqstp->rq_res.len = 0;
1258 svc_sock_release(rqstp);
1261 svsk->sk_lastrecv = get_seconds();
1262 if (test_bit(SK_TEMP, &svsk->sk_flags)) {
1263 /* push active sockets to end of list */
1264 spin_lock_bh(&serv->sv_lock);
1265 if (!list_empty(&svsk->sk_list))
1266 list_move_tail(&svsk->sk_list, &serv->sv_tempsocks);
1267 spin_unlock_bh(&serv->sv_lock);
1270 rqstp->rq_secure = ntohs(rqstp->rq_addr.sin_port) < 1024;
1271 rqstp->rq_chandle.defer = svc_defer;
1274 serv->sv_stats->netcnt++;
1282 svc_drop(struct svc_rqst *rqstp)
1284 dprintk("svc: socket %p dropped request\n", rqstp->rq_sock);
1285 svc_sock_release(rqstp);
1289 * Return reply to client.
1292 svc_send(struct svc_rqst *rqstp)
1294 struct svc_sock *svsk;
1298 if ((svsk = rqstp->rq_sock) == NULL) {
1299 printk(KERN_WARNING "NULL socket pointer in %s:%d\n",
1300 __FILE__, __LINE__);
1304 /* release the receive skb before sending the reply */
1305 svc_release_skb(rqstp);
1307 /* calculate over-all length */
1308 xb = & rqstp->rq_res;
1309 xb->len = xb->head[0].iov_len +
1311 xb->tail[0].iov_len;
1313 /* Grab svsk->sk_sem to serialize outgoing data. */
1314 down(&svsk->sk_sem);
1315 if (test_bit(SK_DEAD, &svsk->sk_flags))
1318 len = svsk->sk_sendto(rqstp);
1320 svc_sock_release(rqstp);
1322 if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN)
1328 * Initialize socket for RPC use and create svc_sock struct
1329 * XXX: May want to setsockopt SO_SNDBUF and SO_RCVBUF.
1331 static struct svc_sock *
1332 svc_setup_socket(struct svc_serv *serv, struct socket *sock,
1333 int *errp, int pmap_register)
1335 struct svc_sock *svsk;
1338 dprintk("svc: svc_setup_socket %p\n", sock);
1339 if (!(svsk = kmalloc(sizeof(*svsk), GFP_KERNEL))) {
1343 memset(svsk, 0, sizeof(*svsk));
1347 /* Register socket with portmapper */
1348 if (*errp >= 0 && pmap_register)
1349 *errp = svc_register(serv, inet->sk_protocol,
1350 ntohs(inet_sk(inet)->sport));
1357 set_bit(SK_BUSY, &svsk->sk_flags);
1358 inet->sk_user_data = svsk;
1359 svsk->sk_sock = sock;
1361 svsk->sk_ostate = inet->sk_state_change;
1362 svsk->sk_odata = inet->sk_data_ready;
1363 svsk->sk_owspace = inet->sk_write_space;
1364 svsk->sk_server = serv;
1365 svsk->sk_lastrecv = get_seconds();
1366 INIT_LIST_HEAD(&svsk->sk_deferred);
1367 INIT_LIST_HEAD(&svsk->sk_ready);
1368 sema_init(&svsk->sk_sem, 1);
1370 /* Initialize the socket */
1371 if (sock->type == SOCK_DGRAM)
1376 spin_lock_bh(&serv->sv_lock);
1377 if (!pmap_register) {
1378 set_bit(SK_TEMP, &svsk->sk_flags);
1379 list_add(&svsk->sk_list, &serv->sv_tempsocks);
1382 clear_bit(SK_TEMP, &svsk->sk_flags);
1383 list_add(&svsk->sk_list, &serv->sv_permsocks);
1385 spin_unlock_bh(&serv->sv_lock);
1387 dprintk("svc: svc_setup_socket created %p (inet %p)\n",
1390 clear_bit(SK_BUSY, &svsk->sk_flags);
1391 svc_sock_enqueue(svsk);
1396 * Create socket for RPC service.
1399 svc_create_socket(struct svc_serv *serv, int protocol, struct sockaddr_in *sin)
1401 struct svc_sock *svsk;
1402 struct socket *sock;
1406 dprintk("svc: svc_create_socket(%s, %d, %u.%u.%u.%u:%d)\n",
1407 serv->sv_program->pg_name, protocol,
1408 NIPQUAD(sin->sin_addr.s_addr),
1409 ntohs(sin->sin_port));
1411 if (protocol != IPPROTO_UDP && protocol != IPPROTO_TCP) {
1412 printk(KERN_WARNING "svc: only UDP and TCP "
1413 "sockets supported\n");
1416 type = (protocol == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM;
1418 if ((error = sock_create_kern(PF_INET, type, protocol, &sock)) < 0)
1422 if (type == SOCK_STREAM)
1423 sock->sk->sk_reuse = 1; /* allow address reuse */
1424 error = sock->ops->bind(sock, (struct sockaddr *) sin,
1430 if (protocol == IPPROTO_TCP) {
1431 if ((error = sock->ops->listen(sock, 64)) < 0)
1435 if ((svsk = svc_setup_socket(serv, sock, &error, 1)) != NULL)
1439 dprintk("svc: svc_create_socket error = %d\n", -error);
1445 * Remove a dead socket
1448 svc_delete_socket(struct svc_sock *svsk)
1450 struct svc_serv *serv;
1453 dprintk("svc: svc_delete_socket(%p)\n", svsk);
1455 serv = svsk->sk_server;
1458 sk->sk_state_change = svsk->sk_ostate;
1459 sk->sk_data_ready = svsk->sk_odata;
1460 sk->sk_write_space = svsk->sk_owspace;
1462 spin_lock_bh(&serv->sv_lock);
1464 list_del_init(&svsk->sk_list);
1465 list_del_init(&svsk->sk_ready);
1466 if (!test_and_set_bit(SK_DEAD, &svsk->sk_flags))
1467 if (test_bit(SK_TEMP, &svsk->sk_flags))
1470 if (!svsk->sk_inuse) {
1471 spin_unlock_bh(&serv->sv_lock);
1472 sock_release(svsk->sk_sock);
1475 spin_unlock_bh(&serv->sv_lock);
1476 dprintk(KERN_NOTICE "svc: server socket destroy delayed\n");
1477 /* svsk->sk_server = NULL; */
1482 * Make a socket for nfsd and lockd
1485 svc_makesock(struct svc_serv *serv, int protocol, unsigned short port)
1487 struct sockaddr_in sin;
1489 dprintk("svc: creating socket proto = %d\n", protocol);
1490 sin.sin_family = AF_INET;
1491 sin.sin_addr.s_addr = INADDR_ANY;
1492 sin.sin_port = htons(port);
1493 return svc_create_socket(serv, protocol, &sin);
1497 * Handle defer and revisit of requests
1500 static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
1502 struct svc_deferred_req *dr = container_of(dreq, struct svc_deferred_req, handle);
1503 struct svc_serv *serv = dreq->owner;
1504 struct svc_sock *svsk;
1507 svc_sock_put(dr->svsk);
1511 dprintk("revisit queued\n");
1514 spin_lock_bh(&serv->sv_lock);
1515 list_add(&dr->handle.recent, &svsk->sk_deferred);
1516 spin_unlock_bh(&serv->sv_lock);
1517 set_bit(SK_DEFERRED, &svsk->sk_flags);
1518 svc_sock_enqueue(svsk);
1522 static struct cache_deferred_req *
1523 svc_defer(struct cache_req *req)
1525 struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle);
1526 int size = sizeof(struct svc_deferred_req) + (rqstp->rq_arg.len);
1527 struct svc_deferred_req *dr;
1529 if (rqstp->rq_arg.page_len)
1530 return NULL; /* if more than a page, give up FIXME */
1531 if (rqstp->rq_deferred) {
1532 dr = rqstp->rq_deferred;
1533 rqstp->rq_deferred = NULL;
1535 int skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
1536 /* FIXME maybe discard if size too large */
1537 dr = kmalloc(size, GFP_KERNEL);
1541 dr->handle.owner = rqstp->rq_server;
1542 dr->prot = rqstp->rq_prot;
1543 dr->addr = rqstp->rq_addr;
1544 dr->argslen = rqstp->rq_arg.len >> 2;
1545 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, dr->argslen<<2);
1547 spin_lock_bh(&rqstp->rq_server->sv_lock);
1548 rqstp->rq_sock->sk_inuse++;
1549 dr->svsk = rqstp->rq_sock;
1550 spin_unlock_bh(&rqstp->rq_server->sv_lock);
1552 dr->handle.revisit = svc_revisit;
1557 * recv data from a deferred request into an active one
1559 static int svc_deferred_recv(struct svc_rqst *rqstp)
1561 struct svc_deferred_req *dr = rqstp->rq_deferred;
1563 rqstp->rq_arg.head[0].iov_base = dr->args;
1564 rqstp->rq_arg.head[0].iov_len = dr->argslen<<2;
1565 rqstp->rq_arg.page_len = 0;
1566 rqstp->rq_arg.len = dr->argslen<<2;
1567 rqstp->rq_prot = dr->prot;
1568 rqstp->rq_addr = dr->addr;
1569 return dr->argslen<<2;
1573 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk)
1575 struct svc_deferred_req *dr = NULL;
1576 struct svc_serv *serv = svsk->sk_server;
1578 if (!test_bit(SK_DEFERRED, &svsk->sk_flags))
1580 spin_lock_bh(&serv->sv_lock);
1581 clear_bit(SK_DEFERRED, &svsk->sk_flags);
1582 if (!list_empty(&svsk->sk_deferred)) {
1583 dr = list_entry(svsk->sk_deferred.next,
1584 struct svc_deferred_req,
1586 list_del_init(&dr->handle.recent);
1587 set_bit(SK_DEFERRED, &svsk->sk_flags);
1589 spin_unlock_bh(&serv->sv_lock);