4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/syscalls.h>
8 #include <linux/init.h>
11 #include <linux/file.h>
12 #include <linux/capability.h>
13 #include <linux/dnotify.h>
14 #include <linux/smp_lock.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
17 #include <linux/security.h>
18 #include <linux/ptrace.h>
19 #include <linux/signal.h>
20 #include <linux/rcupdate.h>
21 #include <linux/vs_limit.h>
24 #include <asm/siginfo.h>
25 #include <asm/uaccess.h>
27 void fastcall set_close_on_exec(unsigned int fd, int flag)
29 struct files_struct *files = current->files;
31 spin_lock(&files->file_lock);
32 fdt = files_fdtable(files);
34 FD_SET(fd, fdt->close_on_exec);
36 FD_CLR(fd, fdt->close_on_exec);
37 spin_unlock(&files->file_lock);
40 static int get_close_on_exec(unsigned int fd)
42 struct files_struct *files = current->files;
46 fdt = files_fdtable(files);
47 res = FD_ISSET(fd, fdt->close_on_exec);
53 * locate_fd finds a free file descriptor in the open_fds fdset,
54 * expanding the fd arrays if necessary. Must be called with the
55 * file_lock held for write.
58 static int locate_fd(struct files_struct *files,
59 struct file *file, unsigned int orig_start)
67 if (orig_start >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
71 fdt = files_fdtable(files);
73 * Someone might have closed fd's in the range
74 * orig_start..fdt->next_fd
77 if (start < files->next_fd)
78 start = files->next_fd;
81 if (start < fdt->max_fdset) {
82 newfd = find_next_zero_bit(fdt->open_fds->fds_bits,
83 fdt->max_fdset, start);
87 if (newfd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
89 if (!vx_files_avail(1))
92 error = expand_files(files, newfd);
97 * If we needed to expand the fs array we
98 * might have blocked - try again.
104 * We reacquired files_lock, so we are safe as long as
105 * we reacquire the fdtable pointer and use it while holding
106 * the lock, no one can free it during that time.
108 if (start <= files->next_fd)
109 files->next_fd = newfd + 1;
117 int dupfd(struct file *file, unsigned int start)
119 struct files_struct * files = current->files;
123 spin_lock(&files->file_lock);
124 fd = locate_fd(files, file, start);
126 /* locate_fd() may have expanded fdtable, load the ptr */
127 fdt = files_fdtable(files);
128 FD_SET(fd, fdt->open_fds);
129 FD_CLR(fd, fdt->close_on_exec);
130 spin_unlock(&files->file_lock);
132 fd_install(fd, file);
134 spin_unlock(&files->file_lock);
141 EXPORT_SYMBOL_GPL(dupfd);
143 asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd)
146 struct file * file, *tofree;
147 struct files_struct * files = current->files;
150 spin_lock(&files->file_lock);
151 if (!(file = fcheck(oldfd)))
157 if (newfd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
159 get_file(file); /* We are now finished with oldfd */
161 err = expand_files(files, newfd);
165 /* To avoid races with open() and dup(), we will mark the fd as
166 * in-use in the open-file bitmap throughout the entire dup2()
167 * process. This is quite safe: do_close() uses the fd array
168 * entry, not the bitmap, to decide what work needs to be
170 /* Doesn't work. open() might be there first. --AV */
172 /* Yes. It's a race. In user space. Nothing sane to do */
174 fdt = files_fdtable(files);
175 tofree = fdt->fd[newfd];
176 if (!tofree && FD_ISSET(newfd, fdt->open_fds))
179 rcu_assign_pointer(fdt->fd[newfd], file);
180 FD_SET(newfd, fdt->open_fds);
181 FD_CLR(newfd, fdt->close_on_exec);
182 spin_unlock(&files->file_lock);
185 filp_close(tofree, files);
187 vx_openfd_inc(newfd); /* fd was unused */
193 spin_unlock(&files->file_lock);
197 spin_unlock(&files->file_lock);
202 asmlinkage long sys_dup(unsigned int fildes)
205 struct file * file = fget(fildes);
208 ret = dupfd(file, 0);
212 #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | FASYNC | O_DIRECT | O_NOATIME)
214 static int setfl(int fd, struct file * filp, unsigned long arg)
216 struct inode * inode = filp->f_dentry->d_inode;
220 * O_APPEND cannot be cleared if the file is marked as append-only
221 * and the file is open for write.
223 if (((arg ^ filp->f_flags) & O_APPEND) && IS_APPEND(inode))
226 /* O_NOATIME can only be set by the owner or superuser */
227 if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME))
228 if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER))
231 /* required for strict SunOS emulation */
232 if (O_NONBLOCK != O_NDELAY)
236 if (arg & O_DIRECT) {
237 if (!filp->f_mapping || !filp->f_mapping->a_ops ||
238 !filp->f_mapping->a_ops->direct_IO)
242 if (filp->f_op && filp->f_op->check_flags)
243 error = filp->f_op->check_flags(arg);
248 if ((arg ^ filp->f_flags) & FASYNC) {
249 if (filp->f_op && filp->f_op->fasync) {
250 error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0);
256 filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK);
262 static void f_modown(struct file *filp, unsigned long pid,
263 uid_t uid, uid_t euid, int force)
265 write_lock_irq(&filp->f_owner.lock);
266 if (force || !filp->f_owner.pid) {
267 filp->f_owner.pid = pid;
268 filp->f_owner.uid = uid;
269 filp->f_owner.euid = euid;
271 write_unlock_irq(&filp->f_owner.lock);
274 int f_setown(struct file *filp, unsigned long arg, int force)
278 err = security_file_set_fowner(filp);
282 f_modown(filp, arg, current->uid, current->euid, force);
286 EXPORT_SYMBOL(f_setown);
288 void f_delown(struct file *filp)
290 f_modown(filp, 0, 0, 0, 1);
293 static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
301 err = dupfd(filp, arg);
304 err = get_close_on_exec(fd) ? FD_CLOEXEC : 0;
308 set_close_on_exec(fd, arg & FD_CLOEXEC);
314 err = setfl(fd, filp, arg);
317 err = fcntl_getlk(filp, (struct flock __user *) arg);
321 err = fcntl_setlk(fd, filp, cmd, (struct flock __user *) arg);
325 * XXX If f_owner is a process group, the
326 * negative return value will get converted
327 * into an error. Oops. If we keep the
328 * current syscall conventions, the only way
329 * to fix this will be in libc.
331 err = filp->f_owner.pid;
332 force_successful_syscall_return();
335 err = f_setown(filp, arg, 1);
338 err = filp->f_owner.signum;
341 /* arg == 0 restores default behaviour. */
342 if (!valid_signal(arg)) {
346 filp->f_owner.signum = arg;
349 err = fcntl_getlease(filp);
352 err = fcntl_setlease(fd, filp, arg);
355 err = fcntl_dirnotify(fd, filp, arg);
363 asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg)
372 err = security_file_fcntl(filp, cmd, arg);
378 err = do_fcntl(fd, cmd, arg, filp);
385 #if BITS_PER_LONG == 32
386 asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg)
396 err = security_file_fcntl(filp, cmd, arg);
405 err = fcntl_getlk64(filp, (struct flock64 __user *) arg);
409 err = fcntl_setlk64(fd, filp, cmd,
410 (struct flock64 __user *) arg);
413 err = do_fcntl(fd, cmd, arg, filp);
422 /* Table to convert sigio signal codes into poll band bitmaps */
424 static const long band_table[NSIGPOLL] = {
425 POLLIN | POLLRDNORM, /* POLL_IN */
426 POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */
427 POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */
428 POLLERR, /* POLL_ERR */
429 POLLPRI | POLLRDBAND, /* POLL_PRI */
430 POLLHUP | POLLERR /* POLL_HUP */
433 static inline int sigio_perm(struct task_struct *p,
434 struct fown_struct *fown, int sig)
436 return (((fown->euid == 0) ||
437 (fown->euid == p->suid) || (fown->euid == p->uid) ||
438 (fown->uid == p->suid) || (fown->uid == p->uid)) &&
439 !security_file_send_sigiotask(p, fown, sig));
442 static void send_sigio_to_task(struct task_struct *p,
443 struct fown_struct *fown,
447 if (!sigio_perm(p, fown, fown->signum))
450 switch (fown->signum) {
453 /* Queue a rt signal with the appropriate fd as its
454 value. We use SI_SIGIO as the source, not
455 SI_KERNEL, since kernel signals always get
456 delivered even if we can't queue. Failure to
457 queue in this case _should_ be reported; we fall
458 back to SIGIO in that case. --sct */
459 si.si_signo = fown->signum;
462 /* Make sure we are called with one of the POLL_*
463 reasons, otherwise we could leak kernel stack into
465 BUG_ON((reason & __SI_MASK) != __SI_POLL);
466 if (reason - POLL_IN >= NSIGPOLL)
469 si.si_band = band_table[reason - POLL_IN];
471 if (!group_send_sig_info(fown->signum, &si, p))
473 /* fall-through: fall back on the old plain SIGIO signal */
475 group_send_sig_info(SIGIO, SEND_SIG_PRIV, p);
479 void send_sigio(struct fown_struct *fown, int fd, int band)
481 struct task_struct *p;
484 read_lock(&fown->lock);
487 goto out_unlock_fown;
489 read_lock(&tasklist_lock);
491 p = find_task_by_real_pid(pid);
493 send_sigio_to_task(p, fown, fd, band);
496 do_each_task_pid(-pid, PIDTYPE_PGID, p) {
497 send_sigio_to_task(p, fown, fd, band);
498 } while_each_task_pid(-pid, PIDTYPE_PGID, p);
500 read_unlock(&tasklist_lock);
502 read_unlock(&fown->lock);
505 static void send_sigurg_to_task(struct task_struct *p,
506 struct fown_struct *fown)
508 if (sigio_perm(p, fown, SIGURG))
509 group_send_sig_info(SIGURG, SEND_SIG_PRIV, p);
512 int send_sigurg(struct fown_struct *fown)
514 struct task_struct *p;
517 read_lock(&fown->lock);
520 goto out_unlock_fown;
524 read_lock(&tasklist_lock);
526 p = find_task_by_real_pid(pid);
528 send_sigurg_to_task(p, fown);
531 do_each_task_pid(-pid, PIDTYPE_PGID, p) {
532 send_sigurg_to_task(p, fown);
533 } while_each_task_pid(-pid, PIDTYPE_PGID, p);
535 read_unlock(&tasklist_lock);
537 read_unlock(&fown->lock);
541 static DEFINE_RWLOCK(fasync_lock);
542 static kmem_cache_t *fasync_cache __read_mostly;
545 * fasync_helper() is used by some character device drivers (mainly mice)
546 * to set up the fasync queue. It returns negative on error, 0 if it did
547 * no changes and positive if it added/deleted the entry.
549 int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
551 struct fasync_struct *fa, **fp;
552 struct fasync_struct *new = NULL;
556 new = kmem_cache_alloc(fasync_cache, SLAB_KERNEL);
560 write_lock_irq(&fasync_lock);
561 for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
562 if (fa->fa_file == filp) {
565 kmem_cache_free(fasync_cache, new);
568 kmem_cache_free(fasync_cache, fa);
576 new->magic = FASYNC_MAGIC;
579 new->fa_next = *fapp;
584 write_unlock_irq(&fasync_lock);
588 EXPORT_SYMBOL(fasync_helper);
590 void __kill_fasync(struct fasync_struct *fa, int sig, int band)
593 struct fown_struct * fown;
594 if (fa->magic != FASYNC_MAGIC) {
595 printk(KERN_ERR "kill_fasync: bad magic number in "
599 fown = &fa->fa_file->f_owner;
600 /* Don't send SIGURG to processes which have not set a
601 queued signum: SIGURG has its own default signalling
603 if (!(sig == SIGURG && fown->signum == 0))
604 send_sigio(fown, fa->fa_fd, band);
609 EXPORT_SYMBOL(__kill_fasync);
611 void kill_fasync(struct fasync_struct **fp, int sig, int band)
613 /* First a quick test without locking: usually
617 read_lock(&fasync_lock);
618 /* reread *fp after obtaining the lock */
619 __kill_fasync(*fp, sig, band);
620 read_unlock(&fasync_lock);
623 EXPORT_SYMBOL(kill_fasync);
625 static int __init fasync_init(void)
627 fasync_cache = kmem_cache_create("fasync_cache",
628 sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL, NULL);
632 module_init(fasync_init)