2 * TUX - Integrated Application Protocols Layer and Object Cache
4 * Copyright (C) 2000, 2001, Ingo Molnar <mingo@redhat.com>
6 * main.c: main management and initialization routines
9 #define __KERNEL_SYSCALLS__
10 #define __KERNEL_SYSCALLS_NO_ERRNO__
14 /****************************************************************
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2, or (at your option)
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 ****************************************************************/
32 * Threads information.
34 unsigned int nr_tux_threads;
35 static atomic_t nr_tux_threads_running = ATOMIC_INIT(0);
36 static int stop_threads = 0;
38 threadinfo_t threadinfo[CONFIG_TUX_NUMTHREADS];
40 static void flush_all_requests (threadinfo_t *ti);
42 void flush_all_signals (void)
44 flush_signals(current);
45 spin_lock_irq(¤t->sighand->siglock);
47 spin_unlock_irq(¤t->sighand->siglock);
50 int nr_requests_used (void)
52 unsigned int i, nr = 0;
54 for (i = 0; i < nr_tux_threads; i++) {
55 threadinfo_t *ti = threadinfo + i;
56 nr += ti->nr_requests - ti->nr_free_requests;
62 static inline int accept_pending (threadinfo_t *ti)
66 for (j = 0; j < CONFIG_TUX_NUMSOCKETS; j++) {
67 if (!ti->listen[j].proto)
69 if (!ti->listen[j].sock)
71 if (!reqsk_queue_empty(&inet_csk(ti->listen[j].sock->sk)->icsk_accept_queue))
77 static inline int requests_pending (threadinfo_t *ti)
79 if (!list_empty(&ti->work_pending))
84 static int event_loop (threadinfo_t *ti)
90 if (ti->thread != current)
94 * Any (relevant) event on the socket will change this
95 * thread to TASK_RUNNING because we add it to both
96 * the main listening and the connection request socket
97 * waitqueues. Thus we can do 'lazy checking' of work
98 * to be done and schedule away only if the thread is
99 * still TASK_INTERRUPTIBLE. This makes TUX fully
102 set_task_state(current, TASK_INTERRUPTIBLE);
103 current->flags |= PF_MEMALLOC;
105 if (accept_pending(ti))
106 work_done = accept_requests(ti);
108 if (requests_pending(ti)) {
109 work_done = process_requests(ti, &req);
111 goto handle_userspace_req;
115 * Be nice to other processes:
117 if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) {
118 __set_task_state(current, TASK_RUNNING);
123 if (ti->userspace_req)
125 if (unlikely(stop_threads))
129 if (unlikely(signal_pending(current)))
135 * Any socket event either on the listen socket
136 * or on the request sockets will wake us up:
138 if ((current->state != TASK_RUNNING) &&
139 !requests_pending(ti) && !accept_pending(ti)) {
140 Dprintk("fast thread: no work to be done, sleeping.\n");
142 Dprintk("fast thread: back from sleep!\n");
147 handle_userspace_req:
151 ti->userspace_req = req;
152 __set_task_state(current, TASK_RUNNING);
153 return TUX_RETURN_USERSPACE_REQUEST;
156 __set_task_state(current, TASK_RUNNING);
157 return TUX_RETURN_SIGNAL;
160 __set_task_state(current, TASK_RUNNING);
161 return TUX_RETURN_EXIT;
164 static int init_queues (int nr_tux_threads)
168 for (i = 0; i < nr_tux_threads; i++) {
169 threadinfo_t *ti = threadinfo + i;
171 INIT_LIST_HEAD(&ti->all_requests);
173 spin_lock_init(&ti->free_requests_lock);
174 INIT_LIST_HEAD(&ti->free_requests);
176 spin_lock_init(&ti->work_lock);
177 INIT_LIST_HEAD(&ti->work_pending);
178 INIT_LIST_HEAD(&ti->lru);
184 int tux_chroot (char *dir)
186 kernel_cap_t saved_cap = current->cap_effective;
190 /* Allow chroot dir to be in kernel space. */
191 oldmm = get_fs(); set_fs(KERNEL_DS);
193 cap_raise (current->cap_effective, CAP_SYS_CHROOT);
195 err = sys_chroot(dir);
199 current->cap_effective = saved_cap;
206 * Right now this is not fully SMP-safe against multiple TUX
207 * managers. It's just a rudimentary protection against typical
210 static int initialized = 0;
212 #define MAX_DOCROOTLEN 500
214 static int lookup_docroot(struct nameidata *docroot, const char *name)
218 docroot->mnt = mntget(current->fs->rootmnt);
219 docroot->dentry = dget(current->fs->root);
220 docroot->last.len = 0;
221 docroot->flags = LOOKUP_FOLLOW;
223 err = path_walk(name, docroot);
225 mntput(docroot->mnt);
232 static int user_req_startup (void)
234 char name[MAX_DOCROOTLEN];
235 struct nameidata *docroot;
244 * Look up the HTTP and FTP document root.
245 * (typically they are shared, but can be
246 * different directories.)
248 docroot = &tux_proto_http.main_docroot;
251 strcpy(name, tux_common_docroot);
252 strcat(name, tux_http_subdocroot);
254 err = lookup_docroot(docroot, name);
257 printk(KERN_ERR "TUX: could not look up HTTP documentroot: \"%s\"\n", name);
261 docroot = &tux_proto_ftp.main_docroot;
264 strcpy(name, tux_common_docroot);
265 strcat(name, tux_ftp_subdocroot);
267 err = lookup_docroot(docroot, name);
270 docroot = &tux_proto_http.main_docroot;
271 path_release(docroot);
272 memset(docroot, 0, sizeof(*docroot));
274 printk(KERN_ERR "TUX: could not look up FTP documentroot: \"%s\"\n", name);
279 * Start up the logger thread. (which opens the logfile)
283 nr_tux_threads = tux_threads;
284 if (nr_tux_threads < 1)
286 if (nr_tux_threads > CONFIG_TUX_NUMTHREADS)
287 nr_tux_threads = CONFIG_TUX_NUMTHREADS;
288 tux_threads = nr_tux_threads;
291 * Set up per-thread work-queues:
293 memset(threadinfo, 0, CONFIG_TUX_NUMTHREADS*sizeof(threadinfo_t));
294 init_queues(nr_tux_threads);
297 * Prepare the worker thread structures.
299 for (i = 0; i < nr_tux_threads; i++) {
300 threadinfo_t *ti = threadinfo + i;
302 ti->gzip_state.workspace =
303 vmalloc(zlib_deflate_workspacesize());
304 if (!ti->gzip_state.workspace ||
305 (zlib_deflateInit(&ti->gzip_state, 6) != Z_OK)) {
309 init_MUTEX(&ti->gzip_sem);
312 __module_get(tux_module);
317 static DECLARE_WAIT_QUEUE_HEAD(wait_stop);
318 static DECLARE_WAIT_QUEUE_HEAD(thread_stopped);
320 static int user_req_shutdown (void)
322 DECLARE_WAITQUEUE(wait, current);
323 struct nameidata *docroot;
324 int i, err = -EINVAL;
328 Dprintk("TUX is not up - cannot shut down.\n");
333 add_wait_queue(&thread_stopped, &wait);
337 * Wake up all the worker threads so they notice
338 * that we are being stopped.
340 set_task_state(current, TASK_UNINTERRUPTIBLE);
341 if (atomic_read(&nr_tux_threads_running)) {
342 Dprintk("TUX: shutdown, %d threads still running.\n",
343 atomic_read(&nr_tux_threads_running));
348 set_task_state(current, TASK_RUNNING);
350 remove_wait_queue(&thread_stopped, &wait);
352 if (nr_async_io_pending())
357 docroot = &tux_proto_http.main_docroot;
358 path_release(docroot);
359 memset(docroot, 0, sizeof(*docroot));
360 docroot = &tux_proto_ftp.main_docroot;
361 path_release(docroot);
362 memset(docroot, 0, sizeof(*docroot));
365 flush_dentry_attributes();
367 unregister_all_tuxmodules();
369 for (i = 0; i < nr_tux_threads; i++) {
370 threadinfo_t *ti = threadinfo + i;
371 vfree(ti->gzip_state.workspace);
374 module_put(tux_module);
381 void drop_permissions (void)
384 * Userspace drops privileges already, and group
385 * membership is important to keep.
387 /* Give the new process no privileges.. */
388 current->uid = current->euid =
389 current->suid = current->fsuid = tux_cgi_uid;
390 current->gid = current->egid =
391 current->sgid = current->fsgid = tux_cgi_gid;
392 cap_clear(current->cap_permitted);
393 cap_clear(current->cap_inheritable);
394 cap_clear(current->cap_effective);
397 static int wait_for_others (void)
403 if (signal_pending(current))
405 set_current_state(TASK_INTERRUPTIBLE);
406 schedule_timeout(HZ/10);
408 for (cpu = 0; cpu < nr_tux_threads; cpu++) {
409 ti = threadinfo + cpu;
410 if (ti->listen_error)
415 /* ok, all threads have started up. */
419 static void zap_listen_sockets (threadinfo_t *ti)
424 for (i = 0; i < CONFIG_TUX_NUMSOCKETS; i++) {
425 if (!ti->listen[i].proto)
427 sock = ti->listen[i].sock;
428 if (!ti->listen[i].cloned && sock) {
429 while (waitqueue_active(sock->sk->sk_sleep))
433 ti->listen[i].sock = NULL;
434 ti->listen[i].proto = NULL;
435 ti->listen[i].cloned = 0;
439 static DECLARE_MUTEX(serialize_startup);
441 static int user_req_start_thread (threadinfo_t *ti)
443 unsigned int err, cpu, i, j, k;
444 struct k_sigaction *ka;
449 unsigned int home_cpu;
452 home_cpu = (cpu + tux_cpu_offset) % num_online_cpus();
453 map = cpumask_of_cpu(home_cpu);
455 cpus_and(map, map, cpu_online_map);
456 if (!(cpus_empty(map)))
457 set_cpus_allowed(current, map);
460 ti->thread = current;
461 atomic_inc(&nr_tux_threads_running);
463 err = start_cachemiss_threads(ti);
467 init_waitqueue_entry(&ti->stop, current);
468 for (j = 0; j < CONFIG_TUX_NUMSOCKETS; j++)
469 init_waitqueue_entry(ti->wait_event + j, current);
471 ka = current->sighand->action + SIGCHLD-1;
472 ka->sa.sa_handler = SIG_IGN;
474 /* Block all signals except SIGKILL, SIGSTOP, SIGHUP and SIGCHLD */
475 spin_lock_irq(¤t->sighand->siglock);
476 siginitsetinv(¤t->blocked, sigmask(SIGKILL) |
477 sigmask(SIGSTOP)| sigmask(SIGHUP) | sigmask(SIGCHLD));
479 spin_unlock_irq(¤t->sighand->siglock);
481 if (!tux_listen[cpu][0].proto) {
482 printk(KERN_ERR "no listen socket specified for TUX thread %d, in /proc/net/tux/%d/listen/, aborting.\n", cpu, cpu);
487 * Serialize startup so that listen sockets can be
490 down(&serialize_startup);
492 Dprintk("thread %d initializing sockets.\n", cpu);
494 for (k = 0; k < CONFIG_TUX_NUMSOCKETS; k++) {
495 tux_socket_t *e1, *e2;
497 e1 = tux_listen[cpu] + k;
500 for (i = 0; i < CONFIG_TUX_NUMTHREADS; i++) {
503 for (j = 0; j < CONFIG_TUX_NUMSOCKETS; j++) {
504 e2 = tux_listen[i] + j;
507 if ((e1->ip == e2->ip) && (e1->port == e2->port) && (e1->proto == e2->proto) && threadinfo[i].listen[j].proto) {
508 ti->listen[k] = threadinfo[i].listen[j];
509 ti->listen[k].cloned = 1;
510 Dprintk("cloned socket %d from thread %d's socket %d.\n", k, i, j);
516 ti->listen[k].sock = start_listening(tux_listen[cpu] + k, cpu);
517 if (!ti->listen[k].sock)
519 ti->listen[k].cloned = 0;
520 ti->listen[k].proto = tux_listen[cpu][k].proto;
521 Dprintk("thread %d got sock %p (%d), proto %s.\n", cpu, ti->listen[k].sock, k, ti->listen[k].proto->name);
525 Dprintk("thread %d done initializing sockets.\n", cpu);
526 up(&serialize_startup);
528 if (wait_for_others())
531 if (!ti->listen[0].proto) {
532 printk("hm, socket 0 has no protocol.\n");
536 add_wait_queue(&wait_stop, &ti->stop);
537 for (j = 0; j < CONFIG_TUX_NUMSOCKETS; j++)
538 if (ti->listen[j].proto)
539 add_wait_queue_exclusive(ti->listen[j].sock->sk->sk_sleep,
543 __module_get(tux_module);
547 up(&serialize_startup);
549 printk(KERN_NOTICE "TUX: could not start worker thread %d.\n", ti->cpu);
552 ti->listen_error = 1;
555 zap_listen_sockets(ti);
556 flush_all_requests(ti);
557 stop_cachemiss_threads(ti);
563 * Last thread close the door:
565 if (atomic_dec_and_test(&nr_tux_threads_running))
571 static int flush_idleinput (threadinfo_t * ti)
573 struct list_head *head, *tmp;
577 head = &ti->all_requests;
580 while (tmp != head) {
581 req = list_entry(tmp, tux_req_t, all);
583 if (test_bit(0, &req->idle_input)) {
591 static int flush_waitoutput (threadinfo_t * ti)
593 struct list_head *head, *tmp;
597 head = &ti->all_requests;
600 while (tmp != head) {
601 req = list_entry(tmp, tux_req_t, all);
603 if (test_bit(0, &req->wait_output_space)) {
604 output_space_event(req);
611 static void flush_all_requests (threadinfo_t *ti)
616 count = flush_idleinput(ti);
617 count += flush_waitoutput(ti);
618 count += tux_flush_workqueue(ti);
619 count += flush_freequeue(ti);
620 if (!ti->nr_requests)
623 * Go through again if we advanced:
627 Dprintk("flush_all_requests: %d requests still waiting.\n", ti->nr_requests);
628 #ifdef CONFIG_TUX_DEBUG
629 count = print_all_requests(ti);
630 Dprintk("flush_all_requests: printed %d requests.\n", count);
632 current->state = TASK_UNINTERRUPTIBLE;
633 schedule_timeout(HZ/10);
637 int nr_async_io_pending (void)
639 unsigned int i, sum = 0;
641 for (i = 0; i < nr_tux_threads; i++) {
642 threadinfo_t *ti = threadinfo + i;
644 sum += ti->iot->nr_async_pending;
649 static int user_req_stop_thread (threadinfo_t *ti)
653 printk(KERN_NOTICE "TUX: thread %d stopping ...\n",
654 (int)(ti-threadinfo));
658 for (j = 0; j < CONFIG_TUX_NUMSOCKETS; j++)
659 if (ti->listen[j].proto)
660 remove_wait_queue(ti->listen[j].sock->sk->sk_sleep,
662 remove_wait_queue(&wait_stop, &ti->stop);
664 Dprintk(KERN_NOTICE "TUX: thread %d waiting for sockets to go inactive ...\n", (int)(ti-threadinfo));
665 zap_listen_sockets(ti);
667 Dprintk(KERN_NOTICE "TUX: thread %d has all sockets inactive.\n", (int)(ti-threadinfo));
669 flush_all_requests(ti);
670 stop_cachemiss_threads(ti);
676 printk(KERN_INFO "TUX: thread %d stopped.\n", ti->cpu);
679 current->tux_info = NULL;
680 current->tux_exit = NULL;
681 atomic_dec(&nr_tux_threads_running);
682 wake_up(&thread_stopped);
684 module_put(tux_module);
689 #define COPY_INT(u_field, k_field) \
691 if (__copy_to_user(&u_info->u_field, &req->k_field, \
692 sizeof(req->k_field))) \
696 #define GETLEN(k_field, maxlen) \
697 ((req->k_field##_len < maxlen) ? \
698 req->k_field##_len : maxlen-1)
700 #define COPY_STR(u_field, k_field, maxlen) \
702 if (__copy_to_user(u_info->u_field, req->k_field##_str, \
703 GETLEN(k_field, maxlen))) \
707 #define COPY_COND_STR(u_field,k_field,maxlen) \
709 if (req->k_field##_len) \
710 COPY_STR(u_field, k_field, maxlen); \
711 if (__put_user((char)0, u_info->u_field + \
712 GETLEN(k_field, maxlen))) \
716 static void finish_userspace_req (tux_req_t *req)
718 threadinfo_t *ti = req->ti;
720 ti->userspace_req = NULL;
724 DEC_STAT(nr_userspace_pending);
725 flush_request(req, 0);
728 static void zap_userspace_req (tux_req_t *req)
730 clear_keepalive(req);
731 finish_userspace_req(req);
735 * Fills in the user-space request structure:
737 static int prepare_userspace_req (threadinfo_t *ti, user_req_t *u_info)
740 tux_req_t *req = ti->userspace_req;
745 Dprintk("prepare_userspace_req(%p).\n", req);
749 TDprintk("userspace request has error %d.\n", req->error);
754 fd = sock_map_fd(req->sock);
755 Dprintk("sock_map_fd(%p) :%d.\n", req, fd);
757 Dprintk("sock_map_fd() returned %d.\n", fd);
763 #define return_EFAULT do { Dprintk("-EFAULT at %d:%s.\n", __LINE__, __FILE__); return -EFAULT; } while (0)
765 if (!access_ok(VERIFY_WRITE, u_info, sizeof(*u_info)))
767 if (__copy_to_user(&u_info->sock, &fd, sizeof(fd)))
772 COPY_INT(module_index, usermodule_idx);
774 COPY_COND_STR(query, query, MAX_URI_LEN);
776 COPY_INT(event, event);
777 Dprintk("prepare userspace, user error: %d, event %d.\n", req->user_error, req->event);
778 COPY_INT(error, user_error);
781 filelen = req->total_file_len;
784 if (__copy_to_user(&u_info->objectlen, &filelen, sizeof(filelen)))
786 if ((req->method == METHOD_POST) && !filelen)
787 if (__copy_to_user(&u_info->objectlen,
788 &req->content_len, sizeof(filelen)))
790 if (req->objectname_len) {
791 if (req->objectname[req->objectname_len])
793 if (__copy_to_user(u_info->objectname, req->objectname,
794 req->objectname_len + 1))
797 if (__put_user((char)0, u_info->objectname))
800 COPY_INT(http_version, version);
801 COPY_INT(http_method, method);
802 COPY_INT(keep_alive, keep_alive);
804 COPY_INT(cookies_len, cookies_len);
805 if (req->cookies_len)
806 COPY_STR(cookies, cookies, MAX_COOKIE_LEN);
807 if (__put_user((char)0, u_info->cookies + req->cookies_len))
810 u_req = (u64)(unsigned long)req;
811 if (__copy_to_user(&u_info->id, &u_req, sizeof(u_req)))
813 COPY_INT(priv, private);
814 COPY_INT(bytes_sent, bytes_sent);
816 tmp = inet_sk(req->sock->sk)->daddr;
817 if (__copy_to_user(&u_info->client_host, &tmp, sizeof(tmp)))
820 COPY_COND_STR(content_type, content_type, MAX_FIELD_LEN);
821 COPY_COND_STR(user_agent, user_agent, MAX_FIELD_LEN);
822 COPY_COND_STR(accept, accept, MAX_FIELD_LEN);
823 COPY_COND_STR(accept_charset, accept_charset, MAX_FIELD_LEN);
824 COPY_COND_STR(accept_encoding, accept_encoding, MAX_FIELD_LEN);
825 COPY_COND_STR(accept_language, accept_language, MAX_FIELD_LEN);
826 COPY_COND_STR(cache_control, cache_control, MAX_FIELD_LEN);
827 COPY_COND_STR(if_modified_since, if_modified_since, MAX_FIELD_LEN);
828 COPY_COND_STR(negotiate, negotiate, MAX_FIELD_LEN);
829 COPY_COND_STR(pragma, pragma, MAX_FIELD_LEN);
830 COPY_COND_STR(referer, referer, MAX_FIELD_LEN);
832 return TUX_RETURN_USERSPACE_REQUEST;
835 #define GOTO_ERR_no_unlock do { Dprintk("sys_tux() ERR at %s:%d.\n", __FILE__, __LINE__); goto err_no_unlock; } while (0)
836 #define GOTO_ERR_unlock do { Dprintk("sys_tux() ERR at %s:%d.\n", __FILE__, __LINE__); goto err_unlock; } while (0)
838 static int register_mimetype(user_req_t *u_info)
840 char extension[MAX_URI_LEN], mimetype[MAX_URI_LEN], expires[MAX_URI_LEN];
845 ret = strncpy_from_user(extension, u_info->objectname, MAX_URI_LEN);
849 Dprintk("got MIME extension: %s.\n", extension);
850 ret = copy_from_user(&u_addr, &u_info->object_addr, sizeof(u_addr));
853 addr = (char *)(unsigned long)u_addr;
854 ret = strncpy_from_user(mimetype, addr, MAX_URI_LEN);
858 Dprintk("got MIME type: %s.\n", mimetype);
859 ret = strncpy_from_user(expires, u_info->cache_control, MAX_URI_LEN);
864 Dprintk("got expires header: %s.\n", expires);
866 add_mimetype(extension, mimetype, expires);
872 void user_send_buffer (tux_req_t *req, int cachemiss)
877 SET_TIMESTAMP(req->output_timestamp);
880 ret = send_sync_buf(req, req->sock, req->userbuf, req->userlen, MSG_DONTWAIT | MSG_MORE);
883 add_tux_atom(req, user_send_buffer);
884 if (add_output_space_event(req, req->sock)) {
888 INC_STAT(user_sendbuf_write_misses);
895 add_req_to_workqueue(req);
900 if ((int)req->userlen < 0)
904 add_req_to_workqueue(req);
909 void user_send_object (tux_req_t *req, int cachemiss)
914 SET_TIMESTAMP(req->output_timestamp);
917 ret = generic_send_file(req, req->sock, cachemiss);
920 add_tux_atom(req, user_send_object);
924 add_tux_atom(req, user_send_object);
925 if (add_output_space_event(req, req->sock)) {
929 INC_STAT(user_sendobject_write_misses);
932 INC_STAT(user_sendobject_cachemisses);
933 add_tux_atom(req, user_send_object);
934 queue_cachemiss(req);
939 req->in_file->f_pos = 0;
940 add_req_to_workqueue(req);
945 void user_get_object (tux_req_t *req, int cachemiss)
951 missed = lookup_object(req, cachemiss ? 0 : LOOKUP_ATOMIC);
955 if (!missed && !req->dentry) {
957 req->user_error = -ENOENT;
958 add_req_to_workqueue(req);
964 INC_STAT(user_lookup_cachemisses);
966 req->ti->userspace_req = NULL;
967 DEC_STAT(nr_userspace_pending);
968 add_tux_atom(req, user_get_object);
969 queue_cachemiss(req);
973 req->total_file_len = req->dentry->d_inode->i_size;
974 if (!req->output_len)
975 req->output_len = req->total_file_len;
976 if (tux_fetch_file(req, !cachemiss)) {
977 INC_STAT(user_fetch_cachemisses);
980 req->in_file->f_pos = 0;
981 add_req_to_workqueue(req);
984 asmlinkage long __sys_tux (unsigned int action, user_req_t *u_info)
990 if (action != TUX_ACTION_CURRENT_DATE)
991 Dprintk("got sys_tux(%d, %p).\n", action, u_info);
993 if (action >= MAX_TUX_ACTION)
996 ti = (threadinfo_t *) current->tux_info;
998 if (ti->thread != current)
1001 if (!capable(CAP_SYS_ADMIN)
1002 && (action != TUX_ACTION_CONTINUE_REQ) &&
1003 (action != TUX_ACTION_STOPTHREAD))
1004 goto userspace_actions;
1007 case TUX_ACTION_CONTINUE_REQ:
1008 ret = continue_request((int)(long)u_info);
1011 case TUX_ACTION_STARTUP:
1013 ret = user_req_startup();
1017 case TUX_ACTION_SHUTDOWN:
1019 ret = user_req_shutdown();
1023 case TUX_ACTION_REGISTER_MODULE:
1024 ret = user_register_module(u_info);
1027 case TUX_ACTION_UNREGISTER_MODULE:
1028 ret = user_unregister_module(u_info);
1031 case TUX_ACTION_STARTTHREAD:
1035 ret = copy_from_user(&nr, &u_info->thread_nr,
1039 if (nr >= nr_tux_threads)
1041 ti = threadinfo + nr;
1045 current->tux_info = ti;
1046 current->tux_exit = tux_exit;
1049 Dprintk("TUX: current open files limit for TUX%d: %ld.\n", nr, current->signal->rlim[RLIMIT_NOFILE].rlim_cur);
1051 ret = user_req_start_thread(ti);
1054 current->tux_info = NULL;
1055 current->tux_exit = NULL;
1057 if (ti->thread != current)
1063 case TUX_ACTION_STOPTHREAD:
1068 req = ti->userspace_req;
1070 zap_userspace_req(req);
1073 ret = user_req_stop_thread(ti);
1077 case TUX_ACTION_CURRENT_DATE:
1078 ret = strncpy_from_user(tux_date, u_info->new_date,
1084 case TUX_ACTION_REGISTER_MIMETYPE:
1085 ret = register_mimetype(u_info);
1090 case TUX_ACTION_QUERY_VERSION:
1091 ret = (TUX_MAJOR_VERSION << 24) | (TUX_MINOR_VERSION << 16) | TUX_PATCHLEVEL_VERSION;
1105 req = ti->userspace_req;
1107 if (action == TUX_ACTION_EVENTLOOP)
1114 ret = copy_from_user(&req->event, &u_info->event, sizeof(int));
1117 ret = copy_from_user(&req->status, &u_info->http_status, sizeof(int));
1120 ret = copy_from_user(&req->bytes_sent, &u_info->bytes_sent, sizeof(int));
1123 ret = copy_from_user(&req->private, &u_info->priv, sizeof(req->private));
1129 case TUX_ACTION_EVENTLOOP:
1131 req = ti->userspace_req;
1133 zap_userspace_req(req);
1134 ret = event_loop(ti);
1138 * Module forces keepalive off, server will close
1141 case TUX_ACTION_FINISH_CLOSE_REQ:
1142 clear_keepalive(req);
1144 case TUX_ACTION_FINISH_REQ:
1145 finish_userspace_req(req);
1148 case TUX_ACTION_REDIRECT_REQ:
1150 ti->userspace_req = NULL;
1153 req->error = TUX_ERROR_REDIRECT;
1154 DEC_STAT(nr_userspace_pending);
1155 add_tux_atom(req, redirect_request);
1156 add_req_to_workqueue(req);
1160 case TUX_ACTION_POSTPONE_REQ:
1162 postpone_request(req);
1163 ti->userspace_req = NULL;
1164 ret = TUX_RETURN_USERSPACE_REQUEST;
1167 case TUX_ACTION_GET_OBJECT:
1168 release_req_dentry(req);
1169 ret = strncpy_from_user(req->objectname,
1170 u_info->objectname, MAX_URI_LEN-1);
1172 req->objectname[0] = 0;
1173 req->objectname_len = 0;
1176 req->objectname[ret] = 0; // string delimit
1177 req->objectname_len = ret;
1179 Dprintk("got objectname {%s} (%d) from user-space req %p (req: %p).\n", req->objectname, req->objectname_len, u_info, req);
1180 req->ti->userspace_req = NULL;
1181 DEC_STAT(nr_userspace_pending);
1182 user_get_object(req, 0);
1185 case TUX_ACTION_READ_OBJECT:
1195 ret = copy_from_user(&u_addr, &u_info->object_addr,
1199 addr = (char *)(unsigned long)u_addr;
1200 filp = dentry_open(req->dentry, NULL, O_RDONLY);
1202 generic_file_read(filp, addr, req->total_file_len, &ppos);
1204 ret = TUX_RETURN_USERSPACE_REQUEST;
1208 case TUX_ACTION_SEND_OBJECT:
1211 req->ti->userspace_req = NULL;
1212 DEC_STAT(nr_userspace_pending);
1213 user_send_object(req, 0);
1216 case TUX_ACTION_SEND_BUFFER:
1222 ret = copy_from_user(&u_addr,
1223 &u_info->object_addr, sizeof(u_addr));
1226 addr = (char *)(unsigned long)u_addr;
1227 ret = copy_from_user(&len,
1228 &u_info->objectlen, sizeof(addr));
1235 if (!access_ok(VERIFY_READ, addr, len))
1237 req->userbuf = addr;
1240 req->ti->userspace_req = NULL;
1241 DEC_STAT(nr_userspace_pending);
1242 user_send_buffer(req, 0);
1247 case TUX_ACTION_READ_HEADERS:
1252 ret = copy_from_user(&u_addr, &u_info->object_addr,
1256 addr = (char *)(unsigned long)u_addr;
1257 ret = copy_to_user(&u_info->objectlen,
1258 &req->headers_len, sizeof(req->headers_len));
1261 ret = copy_to_user(addr,req->headers, req->headers_len);
1267 case TUX_ACTION_READ_POST_DATA:
1273 ret = copy_from_user(&u_addr, &u_info->object_addr,
1277 addr = (char *)(unsigned long)u_addr;
1279 ret = copy_from_user(&size, &u_info->objectlen,
1283 Dprintk("READ_POST_DATA: got %p(%d).\n", addr, size);
1284 if (req->post_data_len < size)
1285 size = req->post_data_len;
1286 Dprintk("READ_POST_DATA: writing %d.\n", size);
1287 ret = copy_to_user(&u_info->objectlen,
1288 &size, sizeof(size));
1291 ret = copy_to_user(addr, req->post_data_str, size);
1297 case TUX_ACTION_WATCH_PROXY_SOCKET:
1299 struct socket *sock;
1304 ret = copy_from_user(&u_addr, &u_info->object_addr,
1308 fd = (int)(unsigned long)u_addr;
1310 sock = sockfd_lookup(fd, &err);
1314 link_tux_data_socket(req, sock);
1320 case TUX_ACTION_WAIT_PROXY_SOCKET:
1322 if (!req->data_sock)
1324 if (socket_input(req->data_sock)) {
1325 ret = TUX_RETURN_USERSPACE_REQUEST;
1328 spin_lock_irq(&req->ti->work_lock);
1329 add_keepalive_timer(req);
1330 if (test_and_set_bit(0, &req->idle_input))
1332 spin_unlock_irq(&req->ti->work_lock);
1333 if (socket_input(req->data_sock)) {
1335 ret = TUX_RETURN_USERSPACE_REQUEST;
1338 req->ti->userspace_req = NULL;
1347 req = ti->userspace_req;
1349 ret = prepare_userspace_req(ti, u_info);
1351 TDprintk("hm, user req %p returned %d, zapping.\n",
1353 zap_userspace_req(req);
1358 if (action != TUX_ACTION_CURRENT_DATE)
1359 Dprintk("sys_tux(%d, %p) returning %d.\n", action, u_info, ret);
1360 while (unlikely(test_thread_flag(TIF_NEED_RESCHED))) {
1361 __set_task_state(current, TASK_RUNNING);
1367 Dprintk("sys_tux(%d, %p) returning -EINVAL (ret:%d)!\n", action, u_info, ret);
1368 while (unlikely(test_thread_flag(TIF_NEED_RESCHED))) {
1369 __set_task_state(current, TASK_RUNNING);
1376 * This gets called if a TUX thread does an exit().
1378 void tux_exit (void)
1380 __sys_tux(TUX_ACTION_STOPTHREAD, NULL);
1385 if (init_tux_request_slabs())
1390 #ifdef CONFIG_TUX_MODULE
1391 spin_lock(&tux_module_lock);
1392 sys_tux_ptr = __sys_tux;
1393 tux_module = THIS_MODULE;
1394 spin_unlock(&tux_module_lock);
1400 void tux_cleanup (void)
1402 #ifdef CONFIG_TUX_MODULE
1403 spin_lock(&tux_module_lock);
1406 spin_unlock(&tux_module_lock);
1410 free_tux_request_slabs();
1413 module_init(tux_init)
1414 module_exit(tux_cleanup)
1416 MODULE_LICENSE("GPL");