4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/syscalls.h>
8 #include <linux/init.h>
11 #include <linux/file.h>
12 #include <linux/dnotify.h>
13 #include <linux/smp_lock.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/security.h>
17 #include <linux/ptrace.h>
18 #include <linux/signal.h>
19 #include <linux/vs_limit.h>
22 #include <asm/siginfo.h>
23 #include <asm/uaccess.h>
25 void fastcall set_close_on_exec(unsigned int fd, int flag)
27 struct files_struct *files = current->files;
28 spin_lock(&files->file_lock);
30 FD_SET(fd, files->close_on_exec);
32 FD_CLR(fd, files->close_on_exec);
33 spin_unlock(&files->file_lock);
36 static inline int get_close_on_exec(unsigned int fd)
38 struct files_struct *files = current->files;
40 spin_lock(&files->file_lock);
41 res = FD_ISSET(fd, files->close_on_exec);
42 spin_unlock(&files->file_lock);
47 * locate_fd finds a free file descriptor in the open_fds fdset,
48 * expanding the fd arrays if necessary. Must be called with the
49 * file_lock held for write.
52 static int locate_fd(struct files_struct *files,
53 struct file *file, unsigned int orig_start)
60 if (orig_start >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
65 * Someone might have closed fd's in the range
66 * orig_start..files->next_fd
69 if (start < files->next_fd)
70 start = files->next_fd;
73 if (start < files->max_fdset) {
74 newfd = find_next_zero_bit(files->open_fds->fds_bits,
75 files->max_fdset, start);
79 if (newfd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
81 if (!vx_files_avail(1))
84 error = expand_files(files, newfd);
89 * If we needed to expand the fs array we
90 * might have blocked - try again.
95 if (start <= files->next_fd)
96 files->next_fd = newfd + 1;
104 int dupfd(struct file *file, unsigned int start)
106 struct files_struct * files = current->files;
109 spin_lock(&files->file_lock);
110 fd = locate_fd(files, file, start);
112 FD_SET(fd, files->open_fds);
113 FD_CLR(fd, files->close_on_exec);
114 spin_unlock(&files->file_lock);
116 fd_install(fd, file);
118 spin_unlock(&files->file_lock);
125 EXPORT_SYMBOL_GPL(dupfd);
127 asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd)
130 struct file * file, *tofree;
131 struct files_struct * files = current->files;
133 spin_lock(&files->file_lock);
134 if (!(file = fcheck(oldfd)))
140 if (newfd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
142 get_file(file); /* We are now finished with oldfd */
144 err = expand_files(files, newfd);
148 /* To avoid races with open() and dup(), we will mark the fd as
149 * in-use in the open-file bitmap throughout the entire dup2()
150 * process. This is quite safe: do_close() uses the fd array
151 * entry, not the bitmap, to decide what work needs to be
153 /* Doesn't work. open() might be there first. --AV */
155 /* Yes. It's a race. In user space. Nothing sane to do */
157 tofree = files->fd[newfd];
158 if (!tofree && FD_ISSET(newfd, files->open_fds))
161 files->fd[newfd] = file;
162 FD_SET(newfd, files->open_fds);
163 FD_CLR(newfd, files->close_on_exec);
164 spin_unlock(&files->file_lock);
165 // vx_openfd_inc(newfd);
168 filp_close(tofree, files);
170 vx_openfd_inc(newfd); /* fd was unused */
176 spin_unlock(&files->file_lock);
180 spin_unlock(&files->file_lock);
185 asmlinkage long sys_dup(unsigned int fildes)
188 struct file * file = fget(fildes);
191 ret = dupfd(file, 0);
195 #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | FASYNC | O_DIRECT | O_NOATIME)
197 static int setfl(int fd, struct file * filp, unsigned long arg)
199 struct inode * inode = filp->f_dentry->d_inode;
202 /* O_APPEND cannot be cleared if the file is marked as append-only */
203 if (!(arg & O_APPEND) && IS_APPEND(inode))
206 /* O_NOATIME can only be set by the owner or superuser */
207 if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME))
208 if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER))
211 /* required for strict SunOS emulation */
212 if (O_NONBLOCK != O_NDELAY)
216 if (arg & O_DIRECT) {
217 if (!filp->f_mapping || !filp->f_mapping->a_ops ||
218 !filp->f_mapping->a_ops->direct_IO)
222 if (filp->f_op && filp->f_op->check_flags)
223 error = filp->f_op->check_flags(arg);
228 if ((arg ^ filp->f_flags) & FASYNC) {
229 if (filp->f_op && filp->f_op->fasync) {
230 error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0);
236 filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK);
242 static void f_modown(struct file *filp, unsigned long pid,
243 uid_t uid, uid_t euid, int force)
245 write_lock_irq(&filp->f_owner.lock);
246 if (force || !filp->f_owner.pid) {
247 filp->f_owner.pid = pid;
248 filp->f_owner.uid = uid;
249 filp->f_owner.euid = euid;
251 write_unlock_irq(&filp->f_owner.lock);
254 int f_setown(struct file *filp, unsigned long arg, int force)
258 err = security_file_set_fowner(filp);
262 f_modown(filp, arg, current->uid, current->euid, force);
266 EXPORT_SYMBOL(f_setown);
268 void f_delown(struct file *filp)
270 f_modown(filp, 0, 0, 0, 1);
273 static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
281 err = dupfd(filp, arg);
284 err = get_close_on_exec(fd) ? FD_CLOEXEC : 0;
288 set_close_on_exec(fd, arg & FD_CLOEXEC);
294 err = setfl(fd, filp, arg);
297 err = fcntl_getlk(filp, (struct flock __user *) arg);
301 err = fcntl_setlk(filp, cmd, (struct flock __user *) arg);
305 * XXX If f_owner is a process group, the
306 * negative return value will get converted
307 * into an error. Oops. If we keep the
308 * current syscall conventions, the only way
309 * to fix this will be in libc.
311 err = filp->f_owner.pid;
312 force_successful_syscall_return();
315 err = f_setown(filp, arg, 1);
318 err = filp->f_owner.signum;
321 /* arg == 0 restores default behaviour. */
322 if (!valid_signal(arg)) {
326 filp->f_owner.signum = arg;
329 err = fcntl_getlease(filp);
332 err = fcntl_setlease(fd, filp, arg);
335 err = fcntl_dirnotify(fd, filp, arg);
343 asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg)
352 err = security_file_fcntl(filp, cmd, arg);
358 err = do_fcntl(fd, cmd, arg, filp);
365 #if BITS_PER_LONG == 32
366 asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg)
376 err = security_file_fcntl(filp, cmd, arg);
385 err = fcntl_getlk64(filp, (struct flock64 __user *) arg);
389 err = fcntl_setlk64(filp, cmd, (struct flock64 __user *) arg);
392 err = do_fcntl(fd, cmd, arg, filp);
401 /* Table to convert sigio signal codes into poll band bitmaps */
403 static long band_table[NSIGPOLL] = {
404 POLLIN | POLLRDNORM, /* POLL_IN */
405 POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */
406 POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */
407 POLLERR, /* POLL_ERR */
408 POLLPRI | POLLRDBAND, /* POLL_PRI */
409 POLLHUP | POLLERR /* POLL_HUP */
412 static inline int sigio_perm(struct task_struct *p,
413 struct fown_struct *fown, int sig)
415 return (((fown->euid == 0) ||
416 (fown->euid == p->suid) || (fown->euid == p->uid) ||
417 (fown->uid == p->suid) || (fown->uid == p->uid)) &&
418 !security_file_send_sigiotask(p, fown, sig));
421 static void send_sigio_to_task(struct task_struct *p,
422 struct fown_struct *fown,
426 if (!sigio_perm(p, fown, fown->signum))
429 switch (fown->signum) {
432 /* Queue a rt signal with the appropriate fd as its
433 value. We use SI_SIGIO as the source, not
434 SI_KERNEL, since kernel signals always get
435 delivered even if we can't queue. Failure to
436 queue in this case _should_ be reported; we fall
437 back to SIGIO in that case. --sct */
438 si.si_signo = fown->signum;
441 /* Make sure we are called with one of the POLL_*
442 reasons, otherwise we could leak kernel stack into
444 if ((reason & __SI_MASK) != __SI_POLL)
446 if (reason - POLL_IN >= NSIGPOLL)
449 si.si_band = band_table[reason - POLL_IN];
451 if (!send_group_sig_info(fown->signum, &si, p))
453 /* fall-through: fall back on the old plain SIGIO signal */
455 send_group_sig_info(SIGIO, SEND_SIG_PRIV, p);
459 void send_sigio(struct fown_struct *fown, int fd, int band)
461 struct task_struct *p;
464 read_lock(&fown->lock);
467 goto out_unlock_fown;
469 read_lock(&tasklist_lock);
471 p = find_task_by_real_pid(pid);
473 send_sigio_to_task(p, fown, fd, band);
476 do_each_task_pid(-pid, PIDTYPE_PGID, p) {
477 send_sigio_to_task(p, fown, fd, band);
478 } while_each_task_pid(-pid, PIDTYPE_PGID, p);
480 read_unlock(&tasklist_lock);
482 read_unlock(&fown->lock);
485 static void send_sigurg_to_task(struct task_struct *p,
486 struct fown_struct *fown)
488 if (sigio_perm(p, fown, SIGURG))
489 send_group_sig_info(SIGURG, SEND_SIG_PRIV, p);
492 int send_sigurg(struct fown_struct *fown)
494 struct task_struct *p;
497 read_lock(&fown->lock);
500 goto out_unlock_fown;
504 read_lock(&tasklist_lock);
506 p = find_task_by_real_pid(pid);
508 send_sigurg_to_task(p, fown);
511 do_each_task_pid(-pid, PIDTYPE_PGID, p) {
512 send_sigurg_to_task(p, fown);
513 } while_each_task_pid(-pid, PIDTYPE_PGID, p);
515 read_unlock(&tasklist_lock);
517 read_unlock(&fown->lock);
521 static DEFINE_RWLOCK(fasync_lock);
522 static kmem_cache_t *fasync_cache;
525 * fasync_helper() is used by some character device drivers (mainly mice)
526 * to set up the fasync queue. It returns negative on error, 0 if it did
527 * no changes and positive if it added/deleted the entry.
529 int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
531 struct fasync_struct *fa, **fp;
532 struct fasync_struct *new = NULL;
536 new = kmem_cache_alloc(fasync_cache, SLAB_KERNEL);
540 write_lock_irq(&fasync_lock);
541 for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
542 if (fa->fa_file == filp) {
545 kmem_cache_free(fasync_cache, new);
548 kmem_cache_free(fasync_cache, fa);
556 new->magic = FASYNC_MAGIC;
559 new->fa_next = *fapp;
564 write_unlock_irq(&fasync_lock);
568 EXPORT_SYMBOL(fasync_helper);
570 void __kill_fasync(struct fasync_struct *fa, int sig, int band)
573 struct fown_struct * fown;
574 if (fa->magic != FASYNC_MAGIC) {
575 printk(KERN_ERR "kill_fasync: bad magic number in "
579 fown = &fa->fa_file->f_owner;
580 /* Don't send SIGURG to processes which have not set a
581 queued signum: SIGURG has its own default signalling
583 if (!(sig == SIGURG && fown->signum == 0))
584 send_sigio(fown, fa->fa_fd, band);
589 EXPORT_SYMBOL(__kill_fasync);
591 void kill_fasync(struct fasync_struct **fp, int sig, int band)
593 /* First a quick test without locking: usually
597 read_lock(&fasync_lock);
598 /* reread *fp after obtaining the lock */
599 __kill_fasync(*fp, sig, band);
600 read_unlock(&fasync_lock);
603 EXPORT_SYMBOL(kill_fasync);
605 static int __init fasync_init(void)
607 fasync_cache = kmem_cache_create("fasync_cache",
608 sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL, NULL);
612 module_init(fasync_init)