2 * TUX - Integrated Application Protocols Layer and Object Cache
4 * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
6 * accept.c: accept new connections, allocate requests
11 /****************************************************************
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 ****************************************************************/
28 unsigned int tux_ack_pingpong = 1;
29 unsigned int tux_push_all = 0;
30 unsigned int tux_zerocopy_parse = 1;
32 static int __idle_event (tux_req_t *req);
33 static int __output_space_event (tux_req_t *req);
35 struct socket * start_listening(tux_socket_t *listen, int nr)
37 struct sockaddr_in sin;
38 struct socket *sock = NULL;
41 struct inet_connection_sock *icsk;
43 u16 port = listen->port;
44 u32 addr = listen->ip;
45 tux_proto_t *proto = listen->proto;
47 /* Create a listening socket: */
49 err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
51 printk(KERN_ERR "TUX: error %d creating socket.\n", err);
55 /* Bind the socket: */
57 sin.sin_family = AF_INET;
58 sin.sin_addr.s_addr = htonl(addr);
59 sin.sin_port = htons(port);
64 sock_set_flag(sk, SOCK_URGINLINE);
66 err = sock->ops->bind(sock, (struct sockaddr*)&sin, sizeof(sin));
68 printk(KERN_ERR "TUX: error %d binding socket. This means that probably some other process is (or was a short time ago) using addr %s://%d.%d.%d.%d:%d.\n",
69 err, proto->name, HIPQUAD(addr), port);
74 Dprintk("listen sk accept_queue: %d.\n",
75 !reqsk_queue_empty(&icsk->icsk_accept_queue));
76 icsk->icsk_ack.pingpong = tux_ack_pingpong;
78 sock_reset_flag(sk, SOCK_LINGER);
79 sk->sk_lingertime = 0;
80 tp->linger2 = tux_keepalive_timeout * HZ;
82 if (proto->defer_accept && !tux_keepalive_timeout && tux_defer_accept)
83 icsk->icsk_accept_queue.rskq_defer_accept = 1;
85 /* Now, start listening on the socket */
87 err = sock->ops->listen(sock, tux_max_backlog);
89 printk(KERN_ERR "TUX: error %d listening on socket.\n", err);
93 printk(KERN_NOTICE "TUX: thread %d listens on %s://%d.%d.%d.%d:%d.\n",
94 nr, proto->name, HIPQUAD(addr), port);
103 static inline void __kfree_req (tux_req_t *req, threadinfo_t * ti)
106 DEBUG_DEL_LIST(&req->all);
111 int flush_freequeue (threadinfo_t * ti)
113 struct list_head *tmp;
118 spin_lock_irqsave(&ti->free_requests_lock,flags);
119 while (ti->nr_free_requests) {
120 ti->nr_free_requests--;
121 tmp = ti->free_requests.next;
122 req = list_entry(tmp, tux_req_t, free);
125 DEC_STAT(nr_free_pending);
126 __kfree_req(req, ti);
129 spin_unlock_irqrestore(&ti->free_requests_lock,flags);
134 static tux_req_t * kmalloc_req (threadinfo_t * ti)
136 struct list_head *tmp;
140 spin_lock_irqsave(&ti->free_requests_lock, flags);
141 if (ti->nr_free_requests) {
142 ti->nr_free_requests--;
143 tmp = ti->free_requests.next;
144 req = list_entry(tmp, tux_req_t, free);
147 DEC_STAT(nr_free_pending);
148 req->magic = TUX_MAGIC;
149 spin_unlock_irqrestore(&ti->free_requests_lock, flags);
151 spin_unlock_irqrestore(&ti->free_requests_lock, flags);
152 req = tux_kmalloc(sizeof(*req));
154 memset (req, 0, sizeof(*req));
155 list_add(&req->all, &ti->all_requests);
157 req->magic = TUX_MAGIC;
158 INC_STAT(nr_allocated);
159 init_waitqueue_entry(&req->sleep, current);
160 init_waitqueue_entry(&req->ftp_sleep, current);
161 INIT_LIST_HEAD(&req->work);
162 INIT_LIST_HEAD(&req->free);
163 INIT_LIST_HEAD(&req->lru);
165 req->total_bytes = 0;
166 SET_TIMESTAMP(req->accept_timestamp);
167 req->first_timestamp = jiffies;
169 init_timer(&req->keepalive_timer);
170 init_timer(&req->output_timer);
172 Dprintk("allocated NEW req %p.\n", req);
176 void kfree_req (tux_req_t *req)
178 threadinfo_t * ti = req->ti;
181 Dprintk("freeing req %p.\n", req);
183 if (req->magic != TUX_MAGIC)
185 spin_lock_irqsave(&ti->free_requests_lock,flags);
187 DEC_STAT(nr_allocated);
188 if (req->sock || req->dentry || req->private)
190 if (ti->nr_free_requests > tux_max_free_requests)
191 __kfree_req(req, ti);
194 ti->nr_free_requests++;
196 // the free requests queue is LIFO
197 list_add(&req->free, &ti->free_requests);
198 INC_STAT(nr_free_pending);
200 spin_unlock_irqrestore(&ti->free_requests_lock,flags);
203 static void __add_req_to_workqueue (tux_req_t *req)
205 threadinfo_t *ti = req->ti;
207 if (!list_empty(&req->work))
209 Dprintk("work-queueing request %p at %p/%p.\n", req, __builtin_return_address(0), __builtin_return_address(1));
210 if (connection_too_fast(req))
211 list_add_tail(&req->work, &ti->work_pending);
213 list_add(&req->work, &ti->work_pending);
214 INC_STAT(nr_work_pending);
215 wake_up_process(ti->thread);
219 void add_req_to_workqueue (tux_req_t *req)
222 threadinfo_t *ti = req->ti;
224 spin_lock_irqsave(&ti->work_lock, flags);
225 __add_req_to_workqueue(req);
226 spin_unlock_irqrestore(&ti->work_lock, flags);
229 void del_output_timer (tux_req_t *req)
232 if (!spin_is_locked(&req->ti->work_lock))
235 if (!list_empty(&req->lru)) {
237 DEBUG_DEL_LIST(&req->lru);
240 Dprintk("del output timeout for req %p.\n", req);
241 del_timer(&req->output_timer);
244 static void output_timeout_fn (unsigned long data);
246 #define OUTPUT_TIMEOUT HZ
248 static void add_output_timer (tux_req_t *req)
250 struct timer_list *timer = &req->output_timer;
252 timer->data = (unsigned long) req;
253 timer->function = &output_timeout_fn;
254 mod_timer(timer, jiffies + OUTPUT_TIMEOUT);
257 static void output_timeout_fn (unsigned long data)
259 tux_req_t *req = (tux_req_t *)data;
261 if (connection_too_fast(req)) {
262 add_output_timer(req);
263 // mod_timer(&req->output_timer, jiffies + OUTPUT_TIMEOUT);
266 output_space_event(req);
269 void output_timeout (tux_req_t *req)
271 Dprintk("output timeout for req %p.\n", req);
272 if (test_and_set_bit(0, &req->wait_output_space))
274 INC_STAT(nr_output_space_pending);
275 add_output_timer(req);
278 void __del_keepalive_timer (tux_req_t *req)
281 if (!spin_is_locked(&req->ti->work_lock))
284 if (!list_empty(&req->lru)) {
286 DEBUG_DEL_LIST(&req->lru);
289 Dprintk("del keepalive timeout for req %p.\n", req);
290 del_timer(&req->keepalive_timer);
293 static void keepalive_timeout_fn (unsigned long data)
295 tux_req_t *req = (tux_req_t *)data;
297 #ifdef CONFIG_TUX_DEBUG
298 Dprintk("req %p timed out after %d sec!\n", req, tux_keepalive_timeout);
302 Dprintk("req->error = TUX_ERROR_CONN_TIMEOUT!\n");
303 req->error = TUX_ERROR_CONN_TIMEOUT;
304 if (!idle_event(req))
305 output_space_event(req);
308 void __add_keepalive_timer (tux_req_t *req)
310 struct timer_list *timer = &req->keepalive_timer;
312 if (!tux_keepalive_timeout)
315 if (!spin_is_locked(&req->ti->work_lock))
319 if (!list_empty(&req->lru))
321 if (req->ti->nr_lru > tux_max_keepalives) {
322 struct list_head *head, *last;
325 head = &req->ti->lru;
329 last_req = list_entry(last, tux_req_t, lru);
331 DEBUG_DEL_LIST(last);
334 Dprintk("LRU-aging req %p!\n", last_req);
335 last_req->error = TUX_ERROR_CONN_TIMEOUT;
336 if (!__idle_event(last_req))
337 __output_space_event(last_req);
339 list_add(&req->lru, &req->ti->lru);
342 timer->expires = jiffies + tux_keepalive_timeout * HZ;
343 timer->data = (unsigned long) req;
344 timer->function = &keepalive_timeout_fn;
348 static int __output_space_event (tux_req_t *req)
350 if (!req || (req->magic != TUX_MAGIC))
353 if (!test_and_clear_bit(0, &req->wait_output_space)) {
354 Dprintk("output space ready event at <%p>, on non-idle %p.\n", __builtin_return_address(0), req);
358 Dprintk("output space ready event at <%p>, %p was waiting!\n", __builtin_return_address(0), req);
359 DEC_STAT(nr_output_space_pending);
361 del_keepalive_timer(req);
362 del_output_timer(req);
364 __add_req_to_workqueue(req);
368 int output_space_event (tux_req_t *req)
373 spin_lock_irqsave(&req->ti->work_lock, flags);
374 ret = __output_space_event(req);
375 spin_unlock_irqrestore(&req->ti->work_lock, flags);
380 static int __idle_event (tux_req_t *req)
382 struct inet_connection_sock *icsk;
385 if (!req || (req->magic != TUX_MAGIC))
389 if (!test_and_clear_bit(0, &req->idle_input)) {
390 Dprintk("data ready event at <%p>, on non-idle %p.\n", __builtin_return_address(0), req);
394 Dprintk("data ready event at <%p>, %p was idle!\n", __builtin_return_address(0), req);
395 del_keepalive_timer(req);
396 del_output_timer(req);
397 DEC_STAT(nr_idle_input_pending);
399 icsk = inet_csk(req->sock->sk);
401 icsk->icsk_ack.pingpong = tux_ack_pingpong;
402 SET_TIMESTAMP(req->accept_timestamp);
404 __add_req_to_workqueue(req);
409 int idle_event (tux_req_t *req)
414 spin_lock_irqsave(&req->ti->work_lock, flags);
415 ret = __idle_event(req);
416 spin_unlock_irqrestore(&req->ti->work_lock, flags);
421 #define HANDLE_CALLBACK_1(callback, tux_name, real_name, param...) \
424 read_lock(&sk->sk_callback_lock); \
425 req = sk->sk_user_data; \
427 Dprintk("callback "#callback"(%p) req %p.\n", \
428 sk->sk_##callback, req); \
431 if (sk->sk_##callback == tux_name) { \
432 printk("BUG: "#callback" "#tux_name" "#real_name" no req!"); \
435 read_unlock(&sk->sk_callback_lock); \
436 if (sk->sk_##callback) \
437 sk->sk_##callback(param); \
441 #define HANDLE_CALLBACK_2(callback, tux_name, real_name, param...) \
442 Dprintk(#tux_name"() on %p.\n", req); \
443 if (req->magic != TUX_MAGIC) \
445 if (req->real_name) \
446 req->real_name(param);
448 #define HANDLE_CALLBACK(callback, tux_name, real_name, param...) \
449 HANDLE_CALLBACK_1(callback,tux_name,real_name,param) \
450 HANDLE_CALLBACK_2(callback,tux_name,real_name,param)
452 static void tux_data_ready (struct sock *sk, int len)
454 HANDLE_CALLBACK_1(data_ready, tux_data_ready, real_data_ready, sk, len);
456 if (!idle_event(req))
457 output_space_event(req);
458 read_unlock(&sk->sk_callback_lock);
461 static void tux_write_space (struct sock *sk)
463 HANDLE_CALLBACK(write_space, tux_write_space, real_write_space, sk);
465 Dprintk("sk->sk_wmem_queued: %d, sk->sk_sndbuf: %d.\n",
466 sk->sk_wmem_queued, sk->sk_sndbuf);
468 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
469 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
470 if (!idle_event(req))
471 output_space_event(req);
473 read_unlock(&sk->sk_callback_lock);
476 static void tux_error_report (struct sock *sk)
478 HANDLE_CALLBACK(error_report, tux_error_report, real_error_report, sk);
480 req->error = TUX_ERROR_CONN_CLOSE;
481 if (!idle_event(req))
482 output_space_event(req);
483 read_unlock(&sk->sk_callback_lock);
486 static void tux_state_change (struct sock *sk)
488 HANDLE_CALLBACK(state_change, tux_state_change, real_state_change, sk);
490 if (req->sock && req->sock->sk &&
491 (req->sock->sk->sk_state > TCP_ESTABLISHED)) {
492 Dprintk("req %p changed to TCP non-established!\n", req);
493 Dprintk("req->sock: %p\n", req->sock);
495 Dprintk("req->sock->sk: %p\n", req->sock->sk);
496 if (req->sock && req->sock->sk)
497 Dprintk("TCP state: %d\n", req->sock->sk->sk_state);
498 Dprintk("req->error = TUX_ERROR_CONN_CLOSE!\n");
499 req->error = TUX_ERROR_CONN_CLOSE;
501 if (!idle_event(req))
502 output_space_event(req);
503 read_unlock(&sk->sk_callback_lock);
506 static void tux_destruct (struct sock *sk)
511 static void tux_ftp_data_ready (struct sock *sk, int len)
513 HANDLE_CALLBACK_1(data_ready, tux_ftp_data_ready,
514 ftp_real_data_ready, sk, len);
515 if (!idle_event(req))
516 output_space_event(req);
517 read_unlock(&sk->sk_callback_lock);
520 static void tux_ftp_write_space (struct sock *sk)
522 HANDLE_CALLBACK_1(write_space, tux_ftp_write_space,
523 ftp_real_write_space, sk);
525 Dprintk("sk->sk_wmem_queued: %d, sk->sk_sndbuf: %d.\n",
526 sk->sk_wmem_queued, sk->sk_sndbuf);
528 if (sk_stream_wspace(sk) >= sk->sk_sndbuf/10*8) {
529 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
530 if (!idle_event(req))
531 output_space_event(req);
533 read_unlock(&sk->sk_callback_lock);
536 static void tux_ftp_error_report (struct sock *sk)
538 HANDLE_CALLBACK(error_report, tux_ftp_error_report,
539 ftp_real_error_report, sk);
541 TDprintk("req %p sock %p got TCP errors on FTP data connection!\n", req, sk);
542 TDprintk("req->error = TUX_ERROR_CONN_CLOSE!\n");
543 req->error = TUX_ERROR_CONN_CLOSE;
544 if (!idle_event(req))
545 output_space_event(req);
546 read_unlock(&sk->sk_callback_lock);
549 static void tux_ftp_state_change (struct sock *sk)
551 HANDLE_CALLBACK(state_change, tux_ftp_state_change,
552 ftp_real_state_change, sk);
554 if (req->sock && req->sock->sk &&
555 (req->sock->sk->sk_state > TCP_ESTABLISHED)) {
556 Dprintk("req %p FTP control sock changed to TCP non-established!\n", req);
557 Dprintk("req->sock: %p\n", req->sock);
558 TDprintk("req->error = TUX_ERROR_CONN_CLOSE!\n");
560 req->error = TUX_ERROR_CONN_CLOSE;
562 if (!idle_event(req))
563 output_space_event(req);
564 read_unlock(&sk->sk_callback_lock);
567 static void tux_ftp_create_child (struct sock *sk, struct sock *newsk)
569 HANDLE_CALLBACK(create_child, tux_ftp_create_child,
570 ftp_real_create_child, sk, newsk);
572 newsk->sk_user_data = NULL;
573 newsk->sk_data_ready = req->ftp_real_data_ready;
574 newsk->sk_state_change = req->ftp_real_state_change;
575 newsk->sk_write_space = req->ftp_real_write_space;
576 newsk->sk_error_report = req->ftp_real_error_report;
577 newsk->sk_create_child = req->ftp_real_create_child;
578 newsk->sk_destruct = req->ftp_real_destruct;
580 if (!idle_event(req))
581 output_space_event(req);
582 read_unlock(&sk->sk_callback_lock);
585 static void tux_ftp_destruct (struct sock *sk)
590 static void link_tux_socket (tux_req_t *req, struct socket *sock)
592 struct sock *sk = sock->sk;
596 if (sk->sk_destruct == tux_destruct)
599 * (No need to lock the socket, we just want to
600 * make sure that events from now on go through
603 write_lock_irq(&sk->sk_callback_lock);
606 sk->sk_user_data = req;
608 req->real_data_ready = sk->sk_data_ready;
609 req->real_state_change = sk->sk_state_change;
610 req->real_write_space = sk->sk_write_space;
611 req->real_error_report = sk->sk_error_report;
612 req->real_destruct = sk->sk_destruct;
614 sk->sk_data_ready = tux_data_ready;
615 sk->sk_state_change = tux_state_change;
616 sk->sk_write_space = tux_write_space;
617 sk->sk_error_report = tux_error_report;
618 sk->sk_destruct = tux_destruct;
620 write_unlock_irq(&sk->sk_callback_lock);
622 if (req->real_destruct == tux_destruct)
624 req->client_addr = inet_sk(sk)->daddr;
625 req->client_port = inet_sk(sk)->dport;
627 add_wait_queue(sk->sk_sleep, &req->sleep);
630 void __link_data_socket (tux_req_t *req, struct socket *sock,
634 * (No need to lock the socket, we just want to
635 * make sure that events from now on go through
638 write_lock_irq(&sk->sk_callback_lock);
640 req->data_sock = sock;
641 sk->sk_user_data = req;
643 req->ftp_real_data_ready = sk->sk_data_ready;
644 req->ftp_real_state_change = sk->sk_state_change;
645 req->ftp_real_write_space = sk->sk_write_space;
646 req->ftp_real_error_report = sk->sk_error_report;
647 req->ftp_real_create_child = sk->sk_create_child;
648 req->ftp_real_destruct = sk->sk_destruct;
650 sk->sk_data_ready = tux_ftp_data_ready;
651 sk->sk_state_change = tux_ftp_state_change;
652 sk->sk_write_space = tux_ftp_write_space;
653 sk->sk_error_report = tux_ftp_error_report;
654 sk->sk_create_child = tux_ftp_create_child;
655 sk->sk_destruct = tux_ftp_destruct;
657 if (req->ftp_real_destruct == tux_ftp_destruct)
660 write_unlock_irq(&sk->sk_callback_lock);
662 add_wait_queue(sk->sk_sleep, &req->ftp_sleep);
665 void link_tux_data_socket (tux_req_t *req, struct socket *sock)
667 struct sock *sk = sock->sk;
671 if (sk->sk_destruct == tux_ftp_destruct)
673 __link_data_socket(req, sock, sk);
676 void unlink_tux_socket (tux_req_t *req)
680 if (!req->sock || !req->sock->sk)
684 write_lock_irq(&sk->sk_callback_lock);
685 if (!sk->sk_user_data)
687 if (req->real_destruct == tux_destruct)
690 sk->sk_user_data = NULL;
692 sk->sk_data_ready = req->real_data_ready;
693 sk->sk_state_change = req->real_state_change;
694 sk->sk_write_space = req->real_write_space;
695 sk->sk_error_report = req->real_error_report;
696 sk->sk_destruct = req->real_destruct;
698 if (sk->sk_destruct == tux_destruct)
701 req->real_data_ready = NULL;
702 req->real_state_change = NULL;
703 req->real_write_space = NULL;
704 req->real_error_report = NULL;
705 req->real_destruct = NULL;
707 write_unlock_irq(&sk->sk_callback_lock);
709 remove_wait_queue(sk->sk_sleep, &req->sleep);
712 void unlink_tux_data_socket (tux_req_t *req)
716 if (!req->data_sock || !req->data_sock->sk)
718 sk = req->data_sock->sk;
720 write_lock_irq(&sk->sk_callback_lock);
722 if (req->real_destruct == tux_ftp_destruct)
725 sk->sk_user_data = NULL;
726 sk->sk_data_ready = req->ftp_real_data_ready;
727 sk->sk_state_change = req->ftp_real_state_change;
728 sk->sk_write_space = req->ftp_real_write_space;
729 sk->sk_error_report = req->ftp_real_error_report;
730 sk->sk_create_child = req->ftp_real_create_child;
731 sk->sk_destruct = req->ftp_real_destruct;
733 req->ftp_real_data_ready = NULL;
734 req->ftp_real_state_change = NULL;
735 req->ftp_real_write_space = NULL;
736 req->ftp_real_error_report = NULL;
737 req->ftp_real_create_child = NULL;
738 req->ftp_real_destruct = NULL;
740 write_unlock_irq(&sk->sk_callback_lock);
742 if (sk->sk_destruct == tux_ftp_destruct)
745 remove_wait_queue(sk->sk_sleep, &req->ftp_sleep);
748 void add_tux_atom (tux_req_t *req, atom_func_t *atom)
750 Dprintk("adding TUX atom %p to req %p, atom_idx: %d, at %p/%p.\n",
751 atom, req, req->atom_idx, __builtin_return_address(0), __builtin_return_address(1));
752 if (req->atom_idx == MAX_TUX_ATOMS)
754 req->atoms[req->atom_idx] = atom;
758 void del_tux_atom (tux_req_t *req)
763 Dprintk("removing TUX atom %p to req %p, atom_idx: %d, at %p.\n",
764 req->atoms[req->atom_idx], req, req->atom_idx, __builtin_return_address(0));
767 void tux_schedule_atom (tux_req_t *req, int cachemiss)
769 if (!list_empty(&req->work))
774 Dprintk("DOING TUX atom %p, req %p, atom_idx: %d, at %p.\n",
775 req->atoms[req->atom_idx], req, req->atom_idx, __builtin_return_address(0));
777 req->atoms[req->atom_idx](req, cachemiss);
779 Dprintk("DONE TUX atom %p, req %p, atom_idx: %d, at %p.\n",
780 req->atoms[req->atom_idx], req, req->atom_idx, __builtin_return_address(0));
784 * Puts newly accepted connections into the inputqueue. This is the
785 * first step in the life of a TUX request.
787 int accept_requests (threadinfo_t *ti)
789 int count = 0, last_count = 0, error, socknr = 0;
790 struct socket *sock, *new_sock;
791 struct tcp_sock *tp2;
792 struct inet_connection_sock *icsk1, *icsk2;
795 if (ti->nr_requests > tux_max_connect)
799 for (socknr = 0; socknr < CONFIG_TUX_NUMSOCKETS; socknr++) {
800 tux_listen_t *tux_listen;
802 tux_listen = ti->listen + socknr;
803 sock = tux_listen->sock;
806 if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
809 icsk1 = inet_csk(sock->sk);
811 * Quick test to see if there are connections on the queue.
812 * This is cheaper than accept() itself because this saves us
813 * the allocation of a new socket. (Which doesn't seem to be
816 if (!reqsk_queue_empty(&icsk1->icsk_accept_queue)) {
820 __set_task_state(current, TASK_RUNNING);
822 new_sock = sock_alloc();
826 new_sock->type = sock->type;
827 new_sock->ops = sock->ops;
829 error = sock->ops->accept(sock, new_sock, O_NONBLOCK);
832 if (new_sock->sk->sk_state != TCP_ESTABLISHED)
835 tp2 = tcp_sk(new_sock->sk);
836 icsk2 = inet_csk(new_sock->sk);
838 icsk2->icsk_ack.pingpong = tux_ack_pingpong;
839 new_sock->sk->sk_reuse = 1;
840 sock_set_flag(new_sock->sk, SOCK_URGINLINE);
842 /* Allocate a request-entry for the connection */
843 req = kmalloc_req(ti);
846 link_tux_socket(req, new_sock);
848 proto = req->proto = tux_listen->proto;
850 proto->got_request(req);
853 if (count != last_count) {
860 sock_release(new_sock);