2 * TUX - Integrated Application Protocols Layer and Object Cache
4 * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
6 * input.c: handle requests arriving on accepted connections
10 #include <linux/kmod.h>
12 /****************************************************************
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 ****************************************************************/
29 void zap_request (tux_req_t *req, int cachemiss)
33 if (req->error == TUX_ERROR_CONN_TIMEOUT) {
34 if (req->proto->request_timeout) {
36 req->proto->request_timeout(req, cachemiss);
40 flush_request(req, 0);
42 add_tux_atom(req, flush_request);
43 add_req_to_workqueue(req);
49 if (!cachemiss && (req->error == TUX_ERROR_CONN_CLOSE)) {
51 * Zap connection as fast as possible, there is
52 * no valid client connection anymore:
55 flush_request(req, 0);
57 if (req->error == TUX_ERROR_CONN_CLOSE) {
59 add_tux_atom(req, flush_request);
62 * Potentially redirect to the secondary server:
64 add_tux_atom(req, redirect_request);
65 add_req_to_workqueue(req);
69 void __switch_docroot(tux_req_t *req)
71 if (!req->docroot_dentry || !req->docroot_mnt)
73 set_fs_root(current->fs, req->docroot_mnt, req->docroot_dentry);
76 struct dentry * __tux_lookup (tux_req_t *req, const char *filename,
77 struct nameidata *base, struct vfsmount **mnt)
81 err = path_walk(filename, base);
83 Dprintk("path_walk() returned with %d!\n", err);
93 int tux_permission (struct inode *inode)
99 Dprintk("URL inode mode: %08x.\n", mode);
101 if (mode & tux_mode_forbidden)
104 * at least one bit in the 'allowed' set has to
105 * be present to allow access.
107 if (!(mode & tux_mode_allowed))
109 err = permission(inode,MAY_READ,NULL);
113 struct dentry * tux_lookup (tux_req_t *req, const char *filename,
114 const unsigned int flag, struct vfsmount **mnt)
116 struct dentry *dentry;
117 struct nameidata base = { };
119 Dprintk("tux_lookup(%p, %s, %d, virtual: %d, host: %s (%d).)\n", req, filename, flag, req->virtual, req->host, req->host_len);
121 base.flags = LOOKUP_FOLLOW|flag;
122 base.last_type = LAST_ROOT;
123 if (req->objectname[0] == '/') {
124 base.dentry = dget(req->docroot_dentry);
125 base.mnt = mntget(req->docroot_mnt);
127 if (!req->cwd_dentry) {
128 req->cwd_dentry = dget(req->docroot_dentry);
129 req->cwd_mnt = mntget(req->docroot_mnt);
131 base.dentry = req->cwd_dentry;
133 base.mnt = mntget(req->cwd_mnt);
137 dentry = __tux_lookup (req, filename, &base, mnt);
139 Dprintk("looked up {%s} == dentry %p.\n", filename, dentry);
141 if (dentry && !IS_ERR(dentry) && !dentry->d_inode)
146 int lookup_object (tux_req_t *req, const unsigned int flag)
148 struct vfsmount *mnt = NULL;
149 struct dentry *dentry = NULL;
152 dentry = tux_lookup(req, req->objectname, flag, &mnt);
153 if (!dentry || IS_ERR(dentry)) {
154 if (PTR_ERR(dentry) == -EWOULDBLOCKIO)
158 perm = tux_permission(dentry->d_inode);
160 * Only regular files allowed.
162 if ((perm < 0) || !S_ISREG(dentry->d_inode->i_mode)) {
166 req->total_file_len = dentry->d_inode->i_size;
168 install_req_dentry(req, dentry, mnt);
187 void install_req_dentry (tux_req_t *req, struct dentry *dentry, struct vfsmount *mnt)
191 req->dentry = dentry;
195 if (req->in_file && req->in_file->f_dentry)
198 req->in_file = dentry_open(dget(dentry), NULL, O_RDONLY);
201 void release_req_dentry (tux_req_t *req)
204 if (req->in_file && req->in_file->f_dentry)
217 int __connection_too_fast (tux_req_t *req)
219 unsigned long curr_bw, delta, bytes;
221 bytes = req->total_bytes + req->bytes_sent;
225 delta = jiffies - req->first_timestamp;
228 curr_bw = bytes * HZ / delta;
230 if (curr_bw > tux_max_output_bandwidth)
235 void unidle_req (tux_req_t *req)
237 threadinfo_t *ti = req->ti;
239 Dprintk("UNIDLE req %p <%p> (sock %p, sk %p) (keepalive: %d, status: %d)\n", req, __builtin_return_address(0), req->sock, req->sock->sk, req->keep_alive, req->status);
240 spin_lock_irq(&ti->work_lock);
241 if (req->magic != TUX_MAGIC)
243 if (!test_and_clear_bit(0, &req->idle_input)) {
244 Dprintk("unidling %p, wasnt idle!\n", req);
245 if (list_empty(&req->work))
247 list_del(&req->work);
248 DEBUG_DEL_LIST(&req->work);
249 DEC_STAT(nr_work_pending);
251 del_keepalive_timer(req);
252 DEC_STAT(nr_idle_input_pending);
253 Dprintk("unidled %p.\n", req);
257 spin_unlock_irq(&ti->work_lock);
260 #define GOTO_INCOMPLETE do { Dprintk("incomplete at %s:%d.\n", __FILE__, __LINE__); goto incomplete; } while (0)
261 #define GOTO_REDIRECT do { TDprintk("redirect at %s:%d.\n", __FILE__, __LINE__); goto redirect; } while (0)
262 #define GOTO_REDIRECT_NONIDLE do { TDprintk("redirect at %s:%d.\n", __FILE__, __LINE__); goto redirect_nonidle; } while (0)
264 static int read_request (struct socket *sock, char *buf, int max_size)
277 msg.msg_control = NULL;
278 msg.msg_controllen = 0;
281 msg.msg_iov->iov_base = buf;
282 msg.msg_iov->iov_len = max_size;
284 oldmm = get_fs(); set_fs(KERNEL_DS);
287 init_sync_kiocb(&iocb, NULL);
288 len = sock->sk->sk_prot->recvmsg(&iocb, sock->sk, &msg, max_size,
289 MSG_DONTWAIT, MSG_PEEK, NULL);
290 if (-EIOCBQUEUED == len)
291 len = wait_on_sync_kiocb(&iocb);
294 * We must not get a signal inbetween
296 if ((len == -EAGAIN) || (len == -ERESTARTSYS)) {
297 if (!signal_pending(current)) {
310 * We inline URG data so it's at the head of the normal receive queue.
312 static int zap_urg_data (struct socket *sock)
321 oldmm = get_fs(); set_fs(KERNEL_DS);
327 msg.msg_control = NULL;
328 msg.msg_controllen = 0;
331 msg.msg_iov->iov_base = buf;
332 msg.msg_iov->iov_len = 2;
335 init_sync_kiocb(&iocb, NULL);
336 len = sock->sk->sk_prot->recvmsg(&iocb, sock->sk, &msg, 2,
337 MSG_DONTWAIT, 0, NULL);
338 if (-EIOCBQUEUED == len)
339 len = wait_on_sync_kiocb(&iocb);
340 Dprintk("recvmsg(MSG_OOB) returned %d.\n", len);
343 * We must not get a signal inbetween
345 if ((len == -EAGAIN) || (len == -ERESTARTSYS)) {
346 if (!signal_pending(current)) {
356 Dprintk("in out:.. and will return %d.!\n", len);
361 void trunc_headers (tux_req_t *req)
363 struct sock *sk = req->sock->sk;
364 int len, addr_len = 0;
367 if (!req->parsed_len)
370 init_sync_kiocb(&iocb, NULL);
371 len = sk->sk_prot->recvmsg(&iocb, sk, NULL, req->parsed_len, 1, MSG_TRUNC, &addr_len);
372 if (-EIOCBQUEUED == len)
373 len = wait_on_sync_kiocb(&iocb);
374 if ((len == -ERESTARTSYS) || (len == -EAGAIN)) {
378 Dprintk("truncated (TRUNC) %d bytes at %p. (wanted: %d.)\n", len, __builtin_return_address(0), req->parsed_len);
385 void print_req (tux_req_t *req)
389 printk("PRINT req %p <%p>, sock %p\n",
390 req, __builtin_return_address(0), req->sock);
391 printk("... idx: %d\n", req->atom_idx);
394 printk("... sock %p, sk %p, sk->state: %d, sk->err: %d\n", req->sock, sk, sk->sk_state, sk->sk_err);
395 printk("... write_queue: %d, receive_queue: %d, error_queue: %d, keepalive: %d, status: %d\n", !skb_queue_empty(&sk->sk_write_queue), !skb_queue_empty(&sk->sk_receive_queue), !skb_queue_empty(&sk->sk_error_queue), req->keep_alive, req->status);
396 printk("...tp->send_head: %p\n", sk->sk_send_head);
397 printk("...tp->snd_una: %08x\n", tcp_sk(sk)->snd_una);
398 printk("...tp->snd_nxt: %08x\n", tcp_sk(sk)->snd_nxt);
399 printk("...tp->packets_out: %08x\n", tcp_sk(sk)->packets_out);
401 printk("... meth:{%s}, uri:{%s}, query:{%s}, ver:{%s}\n", req->method_str ? req->method_str : "<null>", req->uri_str ? req->uri_str : "<null>", req->query_str ? req->query_str : "<null>", req->version_str ? req->version_str : "<null>");
402 printk("... post_data:{%s}(%d).\n", req->post_data_str, req->post_data_len);
403 printk("... headers: {%s}\n", req->headers);
406 * parse_request() reads all available TCP/IP data and prepares
407 * the request if the TUX request is complete. (we can get TUX
408 * requests in several packets.) Invalid requests are redirected
409 * to the secondary server.
412 void parse_request (tux_req_t *req, int cachemiss)
415 struct sock *sk = req->sock->sk;
416 struct tcp_sock *tp = tcp_sk(sk);
417 struct inet_connection_sock *icsk = inet_csk(sk);
418 int was_keepalive = req->keep_alive;
420 if (req->magic != TUX_MAGIC)
423 SET_TIMESTAMP(req->parse_timestamp);
425 spin_lock_irq(&req->ti->work_lock);
426 add_keepalive_timer(req);
427 if (test_and_set_bit(0, &req->idle_input))
429 INC_STAT(nr_idle_input_pending);
430 spin_unlock_irq(&req->ti->work_lock);
432 Dprintk("idled request %p.\n", req);
436 if (tp->urg_data && !(tp->urg_data & TCP_URG_READ)) {
437 len = zap_urg_data(req->sock);
438 if (tp->urg_data && !(tp->urg_data & TCP_URG_READ)) {
439 req->error = TUX_ERROR_CONN_CLOSE;
444 INC_STAT(input_slowpath);
447 req->headers = tux_kmalloc(tux_max_header_len);
449 /* First, read the data */
450 len = read_request(req->sock, (char *)req->headers, tux_max_header_len-1);
452 req->error = TUX_ERROR_CONN_CLOSE;
459 * Make it a zero-delimited string to automatically get
460 * protection against various buffer overflow situations.
461 * Then pass it to the TUX application protocol stack.
463 ((char *)req->headers)[len] = 0;
464 req->headers_len = len;
466 parsed_len = req->proto->parse_message(req, len);
469 * Is the request fully read? (or is there any error)
475 * Push pending ACK which was delayed due to the
476 * pingpong optimization:
480 icsk->icsk_ack.pingpong = 0;
481 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
482 tcp_cleanup_rbuf(sk, 1);
485 if (len >= tux_max_header_len-1)
493 add_req_to_workqueue(req);
497 TDprintk("req %p will be redirected!\n", req);
506 req->parsed_len = len;
508 INC_STAT(parse_static_redirect);
513 zap_request(req, cachemiss);
519 if (tp->urg_data && !(tp->urg_data & TCP_URG_READ))
522 add_tux_atom(req, parse_request);
523 INC_STAT(parse_static_incomplete);
527 int process_requests (threadinfo_t *ti, tux_req_t **user_req)
529 struct list_head *head, *curr;
536 spin_lock_irq(&ti->work_lock);
537 head = &ti->work_pending;
543 req = list_entry(curr, tux_req_t, work);
544 Dprintk("PROCESS req %p <%p>.\n",
545 req, __builtin_return_address(0));
546 for (i = 0; i < req->atom_idx; i++)
547 Dprintk("... atom %d: %p\n", i, req->atoms[i]);
551 if (req->magic != TUX_MAGIC)
554 if (list_empty(&req->work))
557 DEBUG_DEL_LIST(&req->work);
558 spin_unlock_irq(&ti->work_lock);
560 if (!req->atom_idx) {
566 * idx == 0 requests are flushed automatically.
568 flush_request(req, 0);
570 tux_schedule_atom(req, 0);
574 spin_unlock_irq(&ti->work_lock);
579 int tux_flush_workqueue (threadinfo_t *ti)
581 struct list_head *head, *curr, *next;
586 spin_lock_irq(&ti->work_lock);
587 head = &ti->work_pending;
591 req = list_entry(curr, tux_req_t, work);
593 clear_bit(0, &req->idle_input);
594 clear_bit(0, &req->wait_output_space);
595 if (list_empty(&req->work))
598 DEBUG_DEL_LIST(curr);
599 DEC_STAT(nr_input_pending);
600 spin_unlock_irq(&ti->work_lock);
601 #ifdef CONFIG_TUX_DEBUG
602 req->bytes_expected = 0;
604 req->in_file->f_pos = 0;
606 clear_keepalive(req);
612 flush_request(req, 0);
616 spin_unlock_irq(&ti->work_lock);
621 int print_all_requests (threadinfo_t *ti)
623 struct list_head *head, *curr;
627 spin_lock_irq(&ti->work_lock);
628 head = &ti->all_requests;
631 while (curr != head) {
632 req = list_entry(curr, tux_req_t, all);
637 spin_unlock_irq(&ti->work_lock);