4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/syscalls.h>
8 #include <linux/init.h>
11 #include <linux/file.h>
12 #include <linux/capability.h>
13 #include <linux/dnotify.h>
14 #include <linux/smp_lock.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
17 #include <linux/security.h>
18 #include <linux/ptrace.h>
19 #include <linux/signal.h>
20 #include <linux/rcupdate.h>
21 #include <linux/vs_limit.h>
24 #include <asm/siginfo.h>
25 #include <asm/uaccess.h>
27 void fastcall set_close_on_exec(unsigned int fd, int flag)
29 struct files_struct *files = current->files;
31 spin_lock(&files->file_lock);
32 fdt = files_fdtable(files);
34 FD_SET(fd, fdt->close_on_exec);
36 FD_CLR(fd, fdt->close_on_exec);
37 spin_unlock(&files->file_lock);
40 static int get_close_on_exec(unsigned int fd)
42 struct files_struct *files = current->files;
46 fdt = files_fdtable(files);
47 res = FD_ISSET(fd, fdt->close_on_exec);
53 * locate_fd finds a free file descriptor in the open_fds fdset,
54 * expanding the fd arrays if necessary. Must be called with the
55 * file_lock held for write.
58 static int locate_fd(struct files_struct *files,
59 struct file *file, unsigned int orig_start)
67 if (orig_start >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
71 fdt = files_fdtable(files);
73 * Someone might have closed fd's in the range
74 * orig_start..fdt->next_fd
77 if (start < files->next_fd)
78 start = files->next_fd;
81 if (start < fdt->max_fds)
82 newfd = find_next_zero_bit(fdt->open_fds->fds_bits,
86 if (newfd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
88 if (!vx_files_avail(1))
91 error = expand_files(files, newfd);
96 * If we needed to expand the fs array we
97 * might have blocked - try again.
103 * We reacquired files_lock, so we are safe as long as
104 * we reacquire the fdtable pointer and use it while holding
105 * the lock, no one can free it during that time.
107 if (start <= files->next_fd)
108 files->next_fd = newfd + 1;
116 static int dupfd(struct file *file, unsigned int start)
118 struct files_struct * files = current->files;
122 spin_lock(&files->file_lock);
123 fd = locate_fd(files, file, start);
125 /* locate_fd() may have expanded fdtable, load the ptr */
126 fdt = files_fdtable(files);
127 FD_SET(fd, fdt->open_fds);
128 FD_CLR(fd, fdt->close_on_exec);
129 spin_unlock(&files->file_lock);
131 fd_install(fd, file);
133 spin_unlock(&files->file_lock);
140 asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd)
143 struct file * file, *tofree;
144 struct files_struct * files = current->files;
147 spin_lock(&files->file_lock);
148 if (!(file = fcheck(oldfd)))
154 if (newfd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
156 get_file(file); /* We are now finished with oldfd */
158 err = expand_files(files, newfd);
162 /* To avoid races with open() and dup(), we will mark the fd as
163 * in-use in the open-file bitmap throughout the entire dup2()
164 * process. This is quite safe: do_close() uses the fd array
165 * entry, not the bitmap, to decide what work needs to be
167 /* Doesn't work. open() might be there first. --AV */
169 /* Yes. It's a race. In user space. Nothing sane to do */
171 fdt = files_fdtable(files);
172 tofree = fdt->fd[newfd];
173 if (!tofree && FD_ISSET(newfd, fdt->open_fds))
176 rcu_assign_pointer(fdt->fd[newfd], file);
177 FD_SET(newfd, fdt->open_fds);
178 FD_CLR(newfd, fdt->close_on_exec);
179 spin_unlock(&files->file_lock);
182 filp_close(tofree, files);
184 vx_openfd_inc(newfd); /* fd was unused */
190 spin_unlock(&files->file_lock);
194 spin_unlock(&files->file_lock);
199 asmlinkage long sys_dup(unsigned int fildes)
202 struct file * file = fget(fildes);
205 ret = dupfd(file, 0);
209 #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | FASYNC | O_DIRECT | O_NOATIME)
211 static int setfl(int fd, struct file * filp, unsigned long arg)
213 struct inode * inode = filp->f_path.dentry->d_inode;
217 * O_APPEND cannot be cleared if the file is marked as append-only
218 * and the file is open for write.
220 if (((arg ^ filp->f_flags) & O_APPEND) && IS_APPEND(inode))
223 /* O_NOATIME can only be set by the owner or superuser */
224 if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME))
225 if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER))
228 /* required for strict SunOS emulation */
229 if (O_NONBLOCK != O_NDELAY)
233 if (arg & O_DIRECT) {
234 if (!filp->f_mapping || !filp->f_mapping->a_ops ||
235 !filp->f_mapping->a_ops->direct_IO)
239 if (filp->f_op && filp->f_op->check_flags)
240 error = filp->f_op->check_flags(arg);
245 if ((arg ^ filp->f_flags) & FASYNC) {
246 if (filp->f_op && filp->f_op->fasync) {
247 error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0);
253 filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK);
259 static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
260 uid_t uid, uid_t euid, int force)
262 write_lock_irq(&filp->f_owner.lock);
263 if (force || !filp->f_owner.pid) {
264 put_pid(filp->f_owner.pid);
265 filp->f_owner.pid = get_pid(pid);
266 filp->f_owner.pid_type = type;
267 filp->f_owner.uid = uid;
268 filp->f_owner.euid = euid;
270 write_unlock_irq(&filp->f_owner.lock);
273 int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
278 err = security_file_set_fowner(filp);
282 f_modown(filp, pid, type, current->uid, current->euid, force);
285 EXPORT_SYMBOL(__f_setown);
287 int f_setown(struct file *filp, unsigned long arg, int force)
300 result = __f_setown(filp, pid, type, force);
304 EXPORT_SYMBOL(f_setown);
306 void f_delown(struct file *filp)
308 f_modown(filp, NULL, PIDTYPE_PID, 0, 0, 1);
311 pid_t f_getown(struct file *filp)
314 read_lock(&filp->f_owner.lock);
315 pid = pid_nr(filp->f_owner.pid);
316 if (filp->f_owner.pid_type == PIDTYPE_PGID)
318 read_unlock(&filp->f_owner.lock);
322 static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
330 err = dupfd(filp, arg);
333 err = get_close_on_exec(fd) ? FD_CLOEXEC : 0;
337 set_close_on_exec(fd, arg & FD_CLOEXEC);
343 err = setfl(fd, filp, arg);
346 err = fcntl_getlk(filp, (struct flock __user *) arg);
350 err = fcntl_setlk(fd, filp, cmd, (struct flock __user *) arg);
354 * XXX If f_owner is a process group, the
355 * negative return value will get converted
356 * into an error. Oops. If we keep the
357 * current syscall conventions, the only way
358 * to fix this will be in libc.
360 err = f_getown(filp);
361 force_successful_syscall_return();
364 err = f_setown(filp, arg, 1);
367 err = filp->f_owner.signum;
370 /* arg == 0 restores default behaviour. */
371 if (!valid_signal(arg)) {
375 filp->f_owner.signum = arg;
378 err = fcntl_getlease(filp);
381 err = fcntl_setlease(fd, filp, arg);
384 err = fcntl_dirnotify(fd, filp, arg);
392 asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg)
401 err = security_file_fcntl(filp, cmd, arg);
407 err = do_fcntl(fd, cmd, arg, filp);
414 #if BITS_PER_LONG == 32
415 asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg)
425 err = security_file_fcntl(filp, cmd, arg);
434 err = fcntl_getlk64(filp, (struct flock64 __user *) arg);
438 err = fcntl_setlk64(fd, filp, cmd,
439 (struct flock64 __user *) arg);
442 err = do_fcntl(fd, cmd, arg, filp);
451 /* Table to convert sigio signal codes into poll band bitmaps */
453 static const long band_table[NSIGPOLL] = {
454 POLLIN | POLLRDNORM, /* POLL_IN */
455 POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */
456 POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */
457 POLLERR, /* POLL_ERR */
458 POLLPRI | POLLRDBAND, /* POLL_PRI */
459 POLLHUP | POLLERR /* POLL_HUP */
462 static inline int sigio_perm(struct task_struct *p,
463 struct fown_struct *fown, int sig)
465 return (((fown->euid == 0) ||
466 (fown->euid == p->suid) || (fown->euid == p->uid) ||
467 (fown->uid == p->suid) || (fown->uid == p->uid)) &&
468 !security_file_send_sigiotask(p, fown, sig));
471 static void send_sigio_to_task(struct task_struct *p,
472 struct fown_struct *fown,
476 if (!sigio_perm(p, fown, fown->signum))
479 switch (fown->signum) {
482 /* Queue a rt signal with the appropriate fd as its
483 value. We use SI_SIGIO as the source, not
484 SI_KERNEL, since kernel signals always get
485 delivered even if we can't queue. Failure to
486 queue in this case _should_ be reported; we fall
487 back to SIGIO in that case. --sct */
488 si.si_signo = fown->signum;
491 /* Make sure we are called with one of the POLL_*
492 reasons, otherwise we could leak kernel stack into
494 BUG_ON((reason & __SI_MASK) != __SI_POLL);
495 if (reason - POLL_IN >= NSIGPOLL)
498 si.si_band = band_table[reason - POLL_IN];
500 if (!group_send_sig_info(fown->signum, &si, p))
502 /* fall-through: fall back on the old plain SIGIO signal */
504 group_send_sig_info(SIGIO, SEND_SIG_PRIV, p);
508 void send_sigio(struct fown_struct *fown, int fd, int band)
510 struct task_struct *p;
514 read_lock(&fown->lock);
515 type = fown->pid_type;
518 goto out_unlock_fown;
520 read_lock(&tasklist_lock);
521 do_each_pid_task(pid, type, p) {
522 send_sigio_to_task(p, fown, fd, band);
523 } while_each_pid_task(pid, type, p);
524 read_unlock(&tasklist_lock);
526 read_unlock(&fown->lock);
529 static void send_sigurg_to_task(struct task_struct *p,
530 struct fown_struct *fown)
532 if (sigio_perm(p, fown, SIGURG))
533 group_send_sig_info(SIGURG, SEND_SIG_PRIV, p);
536 int send_sigurg(struct fown_struct *fown)
538 struct task_struct *p;
543 read_lock(&fown->lock);
544 type = fown->pid_type;
547 goto out_unlock_fown;
551 read_lock(&tasklist_lock);
552 do_each_pid_task(pid, type, p) {
553 send_sigurg_to_task(p, fown);
554 } while_each_pid_task(pid, type, p);
555 read_unlock(&tasklist_lock);
557 read_unlock(&fown->lock);
561 EXPORT_SYMBOL(send_sigurg);
563 static DEFINE_RWLOCK(fasync_lock);
564 static struct kmem_cache *fasync_cache __read_mostly;
567 * fasync_helper() is used by some character device drivers (mainly mice)
568 * to set up the fasync queue. It returns negative on error, 0 if it did
569 * no changes and positive if it added/deleted the entry.
571 int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
573 struct fasync_struct *fa, **fp;
574 struct fasync_struct *new = NULL;
578 new = kmem_cache_alloc(fasync_cache, GFP_KERNEL);
582 write_lock_irq(&fasync_lock);
583 for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
584 if (fa->fa_file == filp) {
587 kmem_cache_free(fasync_cache, new);
590 kmem_cache_free(fasync_cache, fa);
598 new->magic = FASYNC_MAGIC;
601 new->fa_next = *fapp;
606 write_unlock_irq(&fasync_lock);
610 EXPORT_SYMBOL(fasync_helper);
612 void __kill_fasync(struct fasync_struct *fa, int sig, int band)
615 struct fown_struct * fown;
616 if (fa->magic != FASYNC_MAGIC) {
617 printk(KERN_ERR "kill_fasync: bad magic number in "
621 fown = &fa->fa_file->f_owner;
622 /* Don't send SIGURG to processes which have not set a
623 queued signum: SIGURG has its own default signalling
625 if (!(sig == SIGURG && fown->signum == 0))
626 send_sigio(fown, fa->fa_fd, band);
631 EXPORT_SYMBOL(__kill_fasync);
633 void kill_fasync(struct fasync_struct **fp, int sig, int band)
635 /* First a quick test without locking: usually
639 read_lock(&fasync_lock);
640 /* reread *fp after obtaining the lock */
641 __kill_fasync(*fp, sig, band);
642 read_unlock(&fasync_lock);
645 EXPORT_SYMBOL(kill_fasync);
647 static int __init fasync_init(void)
649 fasync_cache = kmem_cache_create("fasync_cache",
650 sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL, NULL);
654 module_init(fasync_init)