This commit was manufactured by cvs2svn to create branch 'fedora'.
[linux-2.6.git] / net / tux / accept.c
1 /*
2  * TUX - Integrated Application Protocols Layer and Object Cache
3  *
4  * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
5  *
6  * accept.c: accept new connections, allocate requests
7  */
8
9 #include <net/tux.h>
10
11 /****************************************************************
12  *      This program is free software; you can redistribute it and/or modify
13  *      it under the terms of the GNU General Public License as published by
14  *      the Free Software Foundation; either version 2, or (at your option)
15  *      any later version.
16  *
17  *      This program is distributed in the hope that it will be useful,
18  *      but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *      MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *      GNU General Public License for more details.
21  *
22  *      You should have received a copy of the GNU General Public License
23  *      along with this program; if not, write to the Free Software
24  *      Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25  *
26  ****************************************************************/
27
28 unsigned int tux_ack_pingpong = 1;
29 unsigned int tux_push_all = 0;
30 unsigned int tux_zerocopy_parse = 1;
31
32 static int __idle_event (tux_req_t *req);
33 static int __output_space_event (tux_req_t *req);
34
35 struct socket * start_listening(tux_socket_t *listen, int nr)
36 {
37         struct sockaddr_in sin;
38         struct socket *sock = NULL;
39         struct sock *sk;
40         struct tcp_opt *tp;
41         int err;
42         u16 port = listen->port;
43         u32 addr = listen->ip;
44         tux_proto_t *proto = listen->proto;
45
46         /* Create a listening socket: */
47
48         err = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
49         if (err) {
50                 printk(KERN_ERR "TUX: error %d creating socket.\n", err);
51                 goto error;
52         }
53
54         /* Bind the socket: */
55
56         sin.sin_family = AF_INET;
57         sin.sin_addr.s_addr = htonl(addr);
58         sin.sin_port = htons(port);
59
60         sk = sock->sk;
61         sk->sk_reuse = 1;
62         sock_set_flag(sk, SOCK_URGINLINE);
63
64         err = sock->ops->bind(sock, (struct sockaddr*)&sin, sizeof(sin));
65         if (err) {
66                 printk(KERN_ERR "TUX: error %d binding socket. This means that probably some other process is (or was a short time ago) using addr %s://%d.%d.%d.%d:%d.\n", 
67                         err, proto->name, HIPQUAD(addr), port);
68                 goto error;
69         }
70
71         tp = tcp_sk(sk);
72         Dprintk("listen sk accept_queue: %p/%p.\n",
73                 tp->accept_queue, tp->accept_queue_tail);
74         tp->ack.pingpong = tux_ack_pingpong;
75
76         sock_reset_flag(sk, SOCK_LINGER);
77         sk->sk_lingertime = 0;
78         tp->linger2 = tux_keepalive_timeout * HZ;
79
80         if (proto->defer_accept && !tux_keepalive_timeout && tux_defer_accept)
81                 tp->defer_accept = 1;
82
83         /* Now, start listening on the socket */
84
85         err = sock->ops->listen(sock, tux_max_backlog);
86         if (err) {
87                 printk(KERN_ERR "TUX: error %d listening on socket.\n", err);
88                 goto error;
89         }
90
91         printk(KERN_NOTICE "TUX: thread %d listens on %s://%d.%d.%d.%d:%d.\n",
92                 nr, proto->name, HIPQUAD(addr), port);
93         return sock;
94
95 error:
96         if (sock)
97                 sock_release(sock);
98         return NULL;
99 }
100
101 static inline void __kfree_req (tux_req_t *req, threadinfo_t * ti)
102 {
103         list_del(&req->all);
104         DEBUG_DEL_LIST(&req->all);
105         ti->nr_requests--;
106         kfree(req);
107 }
108
109 int flush_freequeue (threadinfo_t * ti)
110 {
111         struct list_head *tmp;
112         unsigned long flags;
113         tux_req_t *req;
114         int count = 0;
115
116         spin_lock_irqsave(&ti->free_requests_lock,flags);
117         while (ti->nr_free_requests) {
118                 ti->nr_free_requests--;
119                 tmp = ti->free_requests.next;
120                 req = list_entry(tmp, tux_req_t, free);
121                 list_del(tmp);
122                 DEBUG_DEL_LIST(tmp);
123                 DEC_STAT(nr_free_pending);
124                 __kfree_req(req, ti);
125                 count++;
126         }
127         spin_unlock_irqrestore(&ti->free_requests_lock,flags);
128
129         return count;
130 }
131
132 static tux_req_t * kmalloc_req (threadinfo_t * ti)
133 {
134         struct list_head *tmp;
135         unsigned long flags;
136         tux_req_t *req;
137
138         spin_lock_irqsave(&ti->free_requests_lock, flags);
139         if (ti->nr_free_requests) {
140                 ti->nr_free_requests--;
141                 tmp = ti->free_requests.next;
142                 req = list_entry(tmp, tux_req_t, free);
143                 list_del(tmp);
144                 DEBUG_DEL_LIST(tmp);
145                 DEC_STAT(nr_free_pending);
146                 req->magic = TUX_MAGIC;
147                 spin_unlock_irqrestore(&ti->free_requests_lock, flags);
148         } else {
149                 spin_unlock_irqrestore(&ti->free_requests_lock, flags);
150                 req = tux_kmalloc(sizeof(*req));
151                 ti->nr_requests++;
152                 memset (req, 0, sizeof(*req));
153                 list_add(&req->all, &ti->all_requests);
154         }
155         req->magic = TUX_MAGIC;
156         INC_STAT(nr_allocated);
157         init_waitqueue_entry(&req->sleep, current);
158         init_waitqueue_entry(&req->ftp_sleep, current);
159         INIT_LIST_HEAD(&req->work);
160         INIT_LIST_HEAD(&req->free);
161         INIT_LIST_HEAD(&req->lru);
162         req->ti = ti;
163         req->total_bytes = 0;
164         SET_TIMESTAMP(req->accept_timestamp);
165         req->first_timestamp = jiffies;
166         req->fd = -1;
167         init_timer(&req->keepalive_timer);
168         init_timer(&req->output_timer);
169
170         Dprintk("allocated NEW req %p.\n", req);
171         return req;
172 }
173
174 void kfree_req (tux_req_t *req)
175 {
176         threadinfo_t * ti = req->ti;
177         unsigned long flags;
178
179         Dprintk("freeing req %p.\n", req);
180
181         if (req->magic != TUX_MAGIC)
182                 TUX_BUG();
183         spin_lock_irqsave(&ti->free_requests_lock,flags);
184         req->magic = 0;
185         DEC_STAT(nr_allocated);
186         if (req->sock || req->dentry || req->private)
187                 TUX_BUG();
188         if (ti->nr_free_requests > tux_max_free_requests)
189                 __kfree_req(req, ti);
190         else {
191                 req->error = 0;
192                 ti->nr_free_requests++;
193
194                 // the free requests queue is LIFO
195                 list_add(&req->free, &ti->free_requests);
196                 INC_STAT(nr_free_pending);
197         }
198         spin_unlock_irqrestore(&ti->free_requests_lock,flags);
199 }
200
201 static void __add_req_to_workqueue (tux_req_t *req)
202 {
203         threadinfo_t *ti = req->ti;
204
205         if (!list_empty(&req->work))
206                 TUX_BUG();
207         Dprintk("work-queueing request %p at %p/%p.\n", req, __builtin_return_address(0), __builtin_return_address(1));
208         if (connection_too_fast(req))
209                 list_add_tail(&req->work, &ti->work_pending);
210         else
211                 list_add(&req->work, &ti->work_pending);
212         INC_STAT(nr_work_pending);
213         wake_up_process(ti->thread);
214         return;
215 }
216
217 void add_req_to_workqueue (tux_req_t *req)
218 {
219         unsigned long flags;
220         threadinfo_t *ti = req->ti;
221
222         spin_lock_irqsave(&ti->work_lock, flags);
223         __add_req_to_workqueue(req);
224         spin_unlock_irqrestore(&ti->work_lock, flags);
225 }
226
227 void del_output_timer (tux_req_t *req)
228 {
229 #if CONFIG_SMP
230         if (!spin_is_locked(&req->ti->work_lock))
231                 TUX_BUG();
232 #endif
233         if (!list_empty(&req->lru)) {
234                 list_del(&req->lru);
235                 DEBUG_DEL_LIST(&req->lru);
236                 req->ti->nr_lru--;
237         }
238         Dprintk("del output timeout for req %p.\n", req);
239         del_timer(&req->output_timer);
240 }
241
242 static void output_timeout_fn (unsigned long data);
243
244 #define OUTPUT_TIMEOUT HZ
245
246 static void add_output_timer (tux_req_t *req)
247 {
248         struct timer_list *timer = &req->output_timer;
249
250         timer->data = (unsigned long) req;
251         timer->function = &output_timeout_fn;
252         mod_timer(timer, jiffies + OUTPUT_TIMEOUT);
253 }
254
255 static void output_timeout_fn (unsigned long data)
256 {
257         tux_req_t *req = (tux_req_t *)data;
258
259         if (connection_too_fast(req)) {
260                 add_output_timer(req);
261 //              mod_timer(&req->output_timer, jiffies + OUTPUT_TIMEOUT);
262                 return;
263         }
264         output_space_event(req);
265 }
266
267 void output_timeout (tux_req_t *req)
268 {
269         Dprintk("output timeout for req %p.\n", req);
270         if (test_and_set_bit(0, &req->wait_output_space))
271                 TUX_BUG();
272         INC_STAT(nr_output_space_pending);
273         add_output_timer(req);
274 }
275
276 void __del_keepalive_timer (tux_req_t *req)
277 {
278 #if CONFIG_SMP
279         if (!spin_is_locked(&req->ti->work_lock))
280                 TUX_BUG();
281 #endif
282         if (!list_empty(&req->lru)) {
283                 list_del(&req->lru);
284                 DEBUG_DEL_LIST(&req->lru);
285                 req->ti->nr_lru--;
286         }
287         Dprintk("del keepalive timeout for req %p.\n", req);
288         del_timer(&req->keepalive_timer);
289 }
290
291 static void keepalive_timeout_fn (unsigned long data)
292 {
293         tux_req_t *req = (tux_req_t *)data;
294
295 #if CONFIG_TUX_DEBUG
296         Dprintk("req %p timed out after %d sec!\n", req, tux_keepalive_timeout);
297         if (tux_Dprintk)
298                 print_req(req);
299 #endif
300         Dprintk("req->error = TUX_ERROR_CONN_TIMEOUT!\n");
301         req->error = TUX_ERROR_CONN_TIMEOUT;
302         if (!idle_event(req))
303                 output_space_event(req);
304 }
305
306 void __add_keepalive_timer (tux_req_t *req)
307 {
308         struct timer_list *timer = &req->keepalive_timer;
309
310         if (!tux_keepalive_timeout)
311                 TUX_BUG();
312 #if CONFIG_SMP
313         if (!spin_is_locked(&req->ti->work_lock))
314                 TUX_BUG();
315 #endif
316
317         if (!list_empty(&req->lru))
318                 TUX_BUG();
319         if (req->ti->nr_lru > tux_max_keepalives) {
320                 struct list_head *head, *last;
321                 tux_req_t *last_req;
322
323                 head = &req->ti->lru;
324                 last = head->prev;
325                 if (last == head)
326                         TUX_BUG();
327                 last_req = list_entry(last, tux_req_t, lru);
328                 list_del(last);
329                 DEBUG_DEL_LIST(last);
330                 req->ti->nr_lru--;
331
332                 Dprintk("LRU-aging req %p!\n", last_req);
333                 last_req->error = TUX_ERROR_CONN_TIMEOUT;
334                 if (!__idle_event(last_req))
335                         __output_space_event(last_req);
336         }
337         list_add(&req->lru, &req->ti->lru);
338         req->ti->nr_lru++;
339
340         timer->expires = jiffies + tux_keepalive_timeout * HZ;
341         timer->data = (unsigned long) req;
342         timer->function = &keepalive_timeout_fn;
343         add_timer(timer);
344 }
345
346 static int __output_space_event (tux_req_t *req)
347 {
348         if (!req || (req->magic != TUX_MAGIC))
349                 TUX_BUG();
350
351         if (!test_and_clear_bit(0, &req->wait_output_space)) {
352                 Dprintk("output space ready event at <%p>, on non-idle %p.\n", __builtin_return_address(0), req);
353                 return 0;
354         }
355
356         Dprintk("output space ready event at <%p>, %p was waiting!\n", __builtin_return_address(0), req);
357         DEC_STAT(nr_output_space_pending);
358
359         del_keepalive_timer(req);
360         del_output_timer(req);
361
362         __add_req_to_workqueue(req);
363         return 1;
364 }
365
366 int output_space_event (tux_req_t *req)
367 {
368         int ret;
369         unsigned long flags;
370
371         spin_lock_irqsave(&req->ti->work_lock, flags);
372         ret = __output_space_event(req);
373         spin_unlock_irqrestore(&req->ti->work_lock, flags);
374
375         return ret;
376 }
377
378 static int __idle_event (tux_req_t *req)
379 {
380         struct tcp_opt *tp;
381         threadinfo_t *ti;
382
383         if (!req || (req->magic != TUX_MAGIC))
384                 TUX_BUG();
385         ti = req->ti;
386
387         if (!test_and_clear_bit(0, &req->idle_input)) {
388                 Dprintk("data ready event at <%p>, on non-idle %p.\n", __builtin_return_address(0), req);
389                 return 0;
390         }
391
392         Dprintk("data ready event at <%p>, %p was idle!\n", __builtin_return_address(0), req);
393         del_keepalive_timer(req);
394         del_output_timer(req);
395         DEC_STAT(nr_idle_input_pending);
396
397         tp = tcp_sk(req->sock->sk);
398
399         tp->ack.pingpong = tux_ack_pingpong;
400         SET_TIMESTAMP(req->accept_timestamp);
401
402         __add_req_to_workqueue(req);
403
404         return 1;
405 }
406
407 int idle_event (tux_req_t *req)
408 {
409         int ret;
410         unsigned long flags;
411
412         spin_lock_irqsave(&req->ti->work_lock, flags);
413         ret = __idle_event(req);
414         spin_unlock_irqrestore(&req->ti->work_lock, flags);
415
416         return ret;
417 }
418
419 #define HANDLE_CALLBACK_1(callback, tux_name, real_name, param...)      \
420         tux_req_t *req;                                 \
421                                                         \
422         read_lock(&sk->sk_callback_lock);               \
423         req = sk->sk_user_data;                         \
424                                                         \
425         Dprintk("callback "#callback"(%p) req %p.\n",   \
426                 sk->sk_##callback, req);                \
427                                                         \
428         if (!req) {                                     \
429                 if (sk->sk_##callback == tux_name) {    \
430                         printk("BUG: "#callback" "#tux_name" "#real_name" no req!"); \
431                         TUX_BUG();                      \
432                 }                                       \
433                 read_unlock(&sk->sk_callback_lock);     \
434                 if (sk->sk_##callback)                  \
435                         sk->sk_##callback(param);       \
436                 return;                                 \
437         }                                               \
438
439 #define HANDLE_CALLBACK_2(callback, tux_name, real_name, param...)      \
440         Dprintk(#tux_name"() on %p.\n", req);           \
441         if (req->magic != TUX_MAGIC)                    \
442                 TUX_BUG();                              \
443         if (req->real_name)                             \
444                 req->real_name(param);
445
446 #define HANDLE_CALLBACK(callback, tux_name, real_name, param...)        \
447         HANDLE_CALLBACK_1(callback,tux_name,real_name,param)    \
448         HANDLE_CALLBACK_2(callback,tux_name,real_name,param)
449
450 static void tux_data_ready (struct sock *sk, int len)
451 {
452         HANDLE_CALLBACK_1(data_ready, tux_data_ready, real_data_ready, sk, len);
453
454         if (!idle_event(req))
455                 output_space_event(req);
456         read_unlock(&sk->sk_callback_lock);
457 }
458
459 static void tux_write_space (struct sock *sk)
460 {
461         HANDLE_CALLBACK(write_space, tux_write_space, real_write_space, sk);
462
463         Dprintk("sk->sk_wmem_queued: %d, sk->sk_sndbuf: %d.\n",
464                 sk->sk_wmem_queued, sk->sk_sndbuf);
465
466         if (tcp_wspace(sk) >= tcp_min_write_space(sk)) {
467                 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
468                 if (!idle_event(req))
469                         output_space_event(req);
470         }
471         read_unlock(&sk->sk_callback_lock);
472 }
473
474 static void tux_error_report (struct sock *sk)
475 {
476         HANDLE_CALLBACK(error_report, tux_error_report, real_error_report, sk);
477
478         req->error = TUX_ERROR_CONN_CLOSE;
479         if (!idle_event(req))
480                 output_space_event(req);
481         read_unlock(&sk->sk_callback_lock);
482 }
483
484 static void tux_state_change (struct sock *sk)
485 {
486         HANDLE_CALLBACK(state_change, tux_state_change, real_state_change, sk);
487
488         if (req->sock && req->sock->sk &&
489                                 (req->sock->sk->sk_state > TCP_ESTABLISHED)) {
490                 Dprintk("req %p changed to TCP non-established!\n", req);
491                 Dprintk("req->sock: %p\n", req->sock);
492                 if (req->sock)
493                         Dprintk("req->sock->sk: %p\n", req->sock->sk);
494                 if (req->sock && req->sock->sk)
495                         Dprintk("TCP state: %d\n", req->sock->sk->sk_state);
496                 Dprintk("req->error = TUX_ERROR_CONN_CLOSE!\n");
497                 req->error = TUX_ERROR_CONN_CLOSE;
498         }
499         if (!idle_event(req))
500                 output_space_event(req);
501         read_unlock(&sk->sk_callback_lock);
502 }
503
504 static void tux_destruct (struct sock *sk)
505 {
506         BUG();
507 }
508
509 static void tux_ftp_data_ready (struct sock *sk, int len)
510 {
511         HANDLE_CALLBACK_1(data_ready, tux_ftp_data_ready,
512                                 ftp_real_data_ready, sk, len);
513         if (!idle_event(req))
514                 output_space_event(req);
515         read_unlock(&sk->sk_callback_lock);
516 }
517
518 static void tux_ftp_write_space (struct sock *sk)
519 {
520         HANDLE_CALLBACK_1(write_space, tux_ftp_write_space,
521                                 ftp_real_write_space, sk);
522
523         Dprintk("sk->sk_wmem_queued: %d, sk->sk_sndbuf: %d.\n",
524                 sk->sk_wmem_queued, sk->sk_sndbuf);
525
526         if (tcp_wspace(sk) >= sk->sk_sndbuf/10*8) {
527                 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
528                 if (!idle_event(req))
529                         output_space_event(req);
530         }
531         read_unlock(&sk->sk_callback_lock);
532 }
533
534 static void tux_ftp_error_report (struct sock *sk)
535 {
536         HANDLE_CALLBACK(error_report, tux_ftp_error_report,
537                 ftp_real_error_report, sk);
538
539         TDprintk("req %p sock %p got TCP errors on FTP data connection!\n", req, sk);
540         TDprintk("req->error = TUX_ERROR_CONN_CLOSE!\n");
541         req->error = TUX_ERROR_CONN_CLOSE;
542         if (!idle_event(req))
543                 output_space_event(req);
544         read_unlock(&sk->sk_callback_lock);
545 }
546
547 static void tux_ftp_state_change (struct sock *sk)
548 {
549         HANDLE_CALLBACK(state_change, tux_ftp_state_change,
550                         ftp_real_state_change, sk);
551
552         if (req->sock && req->sock->sk &&
553                         (req->sock->sk->sk_state > TCP_ESTABLISHED)) {
554                 Dprintk("req %p FTP control sock changed to TCP non-established!\n", req);
555                 Dprintk("req->sock: %p\n", req->sock);
556                 TDprintk("req->error = TUX_ERROR_CONN_CLOSE!\n");
557
558                 req->error = TUX_ERROR_CONN_CLOSE;
559         }
560         if (!idle_event(req))
561                 output_space_event(req);
562         read_unlock(&sk->sk_callback_lock);
563 }
564
565 static void tux_ftp_create_child (struct sock *sk, struct sock *newsk)
566 {
567         HANDLE_CALLBACK(create_child, tux_ftp_create_child,
568                         ftp_real_create_child, sk, newsk);
569
570         newsk->sk_user_data = NULL;
571         newsk->sk_data_ready = req->ftp_real_data_ready;
572         newsk->sk_state_change = req->ftp_real_state_change;
573         newsk->sk_write_space = req->ftp_real_write_space;
574         newsk->sk_error_report = req->ftp_real_error_report;
575         newsk->sk_create_child = req->ftp_real_create_child;
576         newsk->sk_destruct = req->ftp_real_destruct;
577
578         if (!idle_event(req))
579                 output_space_event(req);
580         read_unlock(&sk->sk_callback_lock);
581 }
582
583 static void tux_ftp_destruct (struct sock *sk)
584 {
585         BUG();
586 }
587
588 static void link_tux_socket (tux_req_t *req, struct socket *sock)
589 {
590         struct sock *sk = sock->sk;
591
592         if (req->sock)
593                 TUX_BUG();
594         if (sk->sk_destruct == tux_destruct)
595                 TUX_BUG();
596         /*
597          * (No need to lock the socket, we just want to
598          * make sure that events from now on go through
599          * tux_data_ready())
600          */
601         write_lock_irq(&sk->sk_callback_lock);
602
603         req->sock = sock;
604         sk->sk_user_data = req;
605
606         req->real_data_ready = sk->sk_data_ready;
607         req->real_state_change = sk->sk_state_change;
608         req->real_write_space = sk->sk_write_space;
609         req->real_error_report = sk->sk_error_report;
610         req->real_destruct = sk->sk_destruct;
611
612         sk->sk_data_ready = tux_data_ready;
613         sk->sk_state_change = tux_state_change;
614         sk->sk_write_space = tux_write_space;
615         sk->sk_error_report = tux_error_report;
616         sk->sk_destruct = tux_destruct;
617
618         write_unlock_irq(&sk->sk_callback_lock);
619
620         if (req->real_destruct == tux_destruct)
621                 TUX_BUG();
622         req->client_addr = inet_sk(sk)->daddr;
623         req->client_port = inet_sk(sk)->dport;
624
625         add_wait_queue(sk->sk_sleep, &req->sleep);
626 }
627
628 void __link_data_socket (tux_req_t *req, struct socket *sock,
629                                                 struct sock *sk)
630 {
631         /*
632          * (No need to lock the socket, we just want to
633          * make sure that events from now on go through
634          * tux_data_ready())
635          */
636         write_lock_irq(&sk->sk_callback_lock);
637
638         req->data_sock = sock;
639         sk->sk_user_data = req;
640
641         req->ftp_real_data_ready = sk->sk_data_ready;
642         req->ftp_real_state_change = sk->sk_state_change;
643         req->ftp_real_write_space = sk->sk_write_space;
644         req->ftp_real_error_report = sk->sk_error_report;
645         req->ftp_real_create_child = sk->sk_create_child;
646         req->ftp_real_destruct = sk->sk_destruct;
647
648         sk->sk_data_ready = tux_ftp_data_ready;
649         sk->sk_state_change = tux_ftp_state_change;
650         sk->sk_write_space = tux_ftp_write_space;
651         sk->sk_error_report = tux_ftp_error_report;
652         sk->sk_create_child = tux_ftp_create_child;
653         sk->sk_destruct = tux_ftp_destruct;
654
655         if (req->ftp_real_destruct == tux_ftp_destruct)
656                 TUX_BUG();
657
658         write_unlock_irq(&sk->sk_callback_lock);
659
660         add_wait_queue(sk->sk_sleep, &req->ftp_sleep);
661 }
662
663 void link_tux_data_socket (tux_req_t *req, struct socket *sock)
664 {
665         struct sock *sk = sock->sk;
666
667         if (req->data_sock)
668                 TUX_BUG();
669         if (sk->sk_destruct == tux_ftp_destruct)
670                 TUX_BUG();
671         __link_data_socket(req, sock, sk);
672 }
673
674 void unlink_tux_socket (tux_req_t *req)
675 {
676         struct sock *sk;
677         
678         if (!req->sock || !req->sock->sk)
679                 return;
680         sk = req->sock->sk;
681
682         write_lock_irq(&sk->sk_callback_lock);
683         if (!sk->sk_user_data)
684                 TUX_BUG();
685         if (req->real_destruct == tux_destruct)
686                 TUX_BUG();
687
688         sk->sk_user_data = NULL;
689
690         sk->sk_data_ready = req->real_data_ready;
691         sk->sk_state_change = req->real_state_change;
692         sk->sk_write_space = req->real_write_space;
693         sk->sk_error_report = req->real_error_report;
694         sk->sk_destruct = req->real_destruct;
695
696         if (sk->sk_destruct == tux_destruct)
697                 TUX_BUG();
698
699         req->real_data_ready = NULL;
700         req->real_state_change = NULL;
701         req->real_write_space = NULL;
702         req->real_error_report = NULL;
703         req->real_destruct = NULL;
704
705         write_unlock_irq(&sk->sk_callback_lock);
706
707         remove_wait_queue(sk->sk_sleep, &req->sleep);
708 }
709
710 void unlink_tux_data_socket (tux_req_t *req)
711 {
712         struct sock *sk;
713         
714         if (!req->data_sock || !req->data_sock->sk)
715                 return;
716         sk = req->data_sock->sk;
717
718         write_lock_irq(&sk->sk_callback_lock);
719
720         if (req->real_destruct == tux_ftp_destruct)
721                 TUX_BUG();
722
723         sk->sk_user_data = NULL;
724         sk->sk_data_ready = req->ftp_real_data_ready;
725         sk->sk_state_change = req->ftp_real_state_change;
726         sk->sk_write_space = req->ftp_real_write_space;
727         sk->sk_error_report = req->ftp_real_error_report;
728         sk->sk_create_child = req->ftp_real_create_child;
729         sk->sk_destruct = req->ftp_real_destruct;
730
731         req->ftp_real_data_ready = NULL;
732         req->ftp_real_state_change = NULL;
733         req->ftp_real_write_space = NULL;
734         req->ftp_real_error_report = NULL;
735         req->ftp_real_create_child = NULL;
736         req->ftp_real_destruct = NULL;
737
738         write_unlock_irq(&sk->sk_callback_lock);
739
740         if (sk->sk_destruct == tux_ftp_destruct)
741                 TUX_BUG();
742
743         remove_wait_queue(sk->sk_sleep, &req->ftp_sleep);
744 }
745
746 void add_tux_atom (tux_req_t *req, atom_func_t *atom)
747 {
748         Dprintk("adding TUX atom %p to req %p, atom_idx: %d, at %p/%p.\n",
749                 atom, req, req->atom_idx, __builtin_return_address(0), __builtin_return_address(1));
750         if (req->atom_idx == MAX_TUX_ATOMS)
751                 TUX_BUG();
752         req->atoms[req->atom_idx] = atom;
753         req->atom_idx++;
754 }
755
756 void del_tux_atom (tux_req_t *req)
757 {
758         if (!req->atom_idx)
759                 TUX_BUG();
760         req->atom_idx--;
761         Dprintk("removing TUX atom %p to req %p, atom_idx: %d, at %p.\n",
762                 req->atoms[req->atom_idx], req, req->atom_idx, __builtin_return_address(0));
763 }
764
765 void tux_schedule_atom (tux_req_t *req, int cachemiss)
766 {
767         if (!list_empty(&req->work))
768                 TUX_BUG();
769         if (!req->atom_idx)
770                 TUX_BUG();
771         req->atom_idx--;
772         Dprintk("DOING TUX atom %p, req %p, atom_idx: %d, at %p.\n",
773                 req->atoms[req->atom_idx], req, req->atom_idx, __builtin_return_address(0));
774         might_sleep();
775         req->atoms[req->atom_idx](req, cachemiss);
776         might_sleep();
777         Dprintk("DONE TUX atom %p, req %p, atom_idx: %d, at %p.\n",
778                 req->atoms[req->atom_idx], req, req->atom_idx, __builtin_return_address(0));
779 }
780
781 /*
782  * Puts newly accepted connections into the inputqueue. This is the
783  * first step in the life of a TUX request.
784  */
785 int accept_requests (threadinfo_t *ti)
786 {
787         int count = 0, last_count = 0, error, socknr = 0;
788         struct socket *sock, *new_sock;
789         struct tcp_opt *tp1, *tp2;
790         tux_req_t *req;
791
792         if (ti->nr_requests > tux_max_connect)
793                 goto out;
794
795 repeat:
796         for (socknr = 0; socknr < CONFIG_TUX_NUMSOCKETS; socknr++) {
797                 tux_listen_t *tux_listen;
798
799                 tux_listen = ti->listen + socknr;
800                 sock = tux_listen->sock;
801                 if (!sock)
802                         break;
803                 if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
804                         break;
805
806         tp1 = tcp_sk(sock->sk);
807         /*
808          * Quick test to see if there are connections on the queue.
809          * This is cheaper than accept() itself because this saves us
810          * the allocation of a new socket. (Which doesn't seem to be
811          * used anyway)
812          */
813         if (tp1->accept_queue) {
814                 tux_proto_t *proto;
815
816                 if (!count++)
817                         __set_task_state(current, TASK_RUNNING);
818
819                 new_sock = sock_alloc();
820                 if (!new_sock)
821                         goto out;
822
823                 new_sock->type = sock->type;
824                 new_sock->ops = sock->ops;
825
826                 error = sock->ops->accept(sock, new_sock, O_NONBLOCK);
827                 if (error < 0)
828                         goto err;
829                 if (new_sock->sk->sk_state != TCP_ESTABLISHED)
830                         goto err;
831
832                 tp2 = tcp_sk(new_sock->sk);
833                 tp2->nonagle = 2;
834                 tp2->ack.pingpong = tux_ack_pingpong;
835                 new_sock->sk->sk_reuse = 1;
836                 sock_set_flag(new_sock->sk, SOCK_URGINLINE);
837
838                 /* Allocate a request-entry for the connection */
839                 req = kmalloc_req(ti);
840                 if (!req)
841                         BUG();
842                 link_tux_socket(req, new_sock);
843
844                 proto = req->proto = tux_listen->proto;
845
846                 proto->got_request(req);
847         }
848         }
849         if (count != last_count) {
850                 last_count = count;
851                 goto repeat;
852         }
853 out:
854         return count;
855 err:
856         sock_release(new_sock);
857         goto out;
858 }
859