4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/syscalls.h>
8 #include <linux/init.h>
11 #include <linux/file.h>
12 #include <linux/capability.h>
13 #include <linux/dnotify.h>
14 #include <linux/smp_lock.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
17 #include <linux/security.h>
18 #include <linux/ptrace.h>
19 #include <linux/signal.h>
20 #include <linux/rcupdate.h>
21 #include <linux/vs_base.h>
22 #include <linux/vs_limit.h>
25 #include <asm/siginfo.h>
26 #include <asm/uaccess.h>
28 void fastcall set_close_on_exec(unsigned int fd, int flag)
30 struct files_struct *files = current->files;
32 spin_lock(&files->file_lock);
33 fdt = files_fdtable(files);
35 FD_SET(fd, fdt->close_on_exec);
37 FD_CLR(fd, fdt->close_on_exec);
38 spin_unlock(&files->file_lock);
41 static int get_close_on_exec(unsigned int fd)
43 struct files_struct *files = current->files;
47 fdt = files_fdtable(files);
48 res = FD_ISSET(fd, fdt->close_on_exec);
54 * locate_fd finds a free file descriptor in the open_fds fdset,
55 * expanding the fd arrays if necessary. Must be called with the
56 * file_lock held for write.
59 static int locate_fd(struct files_struct *files,
60 struct file *file, unsigned int orig_start)
68 if (orig_start >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
72 fdt = files_fdtable(files);
74 * Someone might have closed fd's in the range
75 * orig_start..fdt->next_fd
78 if (start < files->next_fd)
79 start = files->next_fd;
82 if (start < fdt->max_fds)
83 newfd = find_next_zero_bit(fdt->open_fds->fds_bits,
87 if (newfd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
89 if (!vx_files_avail(1))
92 error = expand_files(files, newfd);
97 * If we needed to expand the fs array we
98 * might have blocked - try again.
104 * We reacquired files_lock, so we are safe as long as
105 * we reacquire the fdtable pointer and use it while holding
106 * the lock, no one can free it during that time.
108 if (start <= files->next_fd)
109 files->next_fd = newfd + 1;
117 static int dupfd(struct file *file, unsigned int start)
119 struct files_struct * files = current->files;
123 spin_lock(&files->file_lock);
124 fd = locate_fd(files, file, start);
126 /* locate_fd() may have expanded fdtable, load the ptr */
127 fdt = files_fdtable(files);
128 FD_SET(fd, fdt->open_fds);
129 FD_CLR(fd, fdt->close_on_exec);
130 spin_unlock(&files->file_lock);
132 fd_install(fd, file);
134 spin_unlock(&files->file_lock);
141 asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd)
144 struct file * file, *tofree;
145 struct files_struct * files = current->files;
148 spin_lock(&files->file_lock);
149 if (!(file = fcheck(oldfd)))
155 if (newfd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
157 get_file(file); /* We are now finished with oldfd */
159 err = expand_files(files, newfd);
163 /* To avoid races with open() and dup(), we will mark the fd as
164 * in-use in the open-file bitmap throughout the entire dup2()
165 * process. This is quite safe: do_close() uses the fd array
166 * entry, not the bitmap, to decide what work needs to be
168 /* Doesn't work. open() might be there first. --AV */
170 /* Yes. It's a race. In user space. Nothing sane to do */
172 fdt = files_fdtable(files);
173 tofree = fdt->fd[newfd];
174 if (!tofree && FD_ISSET(newfd, fdt->open_fds))
177 rcu_assign_pointer(fdt->fd[newfd], file);
178 FD_SET(newfd, fdt->open_fds);
179 FD_CLR(newfd, fdt->close_on_exec);
180 spin_unlock(&files->file_lock);
183 filp_close(tofree, files);
185 vx_openfd_inc(newfd); /* fd was unused */
191 spin_unlock(&files->file_lock);
195 spin_unlock(&files->file_lock);
200 asmlinkage long sys_dup(unsigned int fildes)
203 struct file * file = fget(fildes);
206 ret = dupfd(file, 0);
210 #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | FASYNC | O_DIRECT | O_NOATIME)
212 static int setfl(int fd, struct file * filp, unsigned long arg)
214 struct inode * inode = filp->f_path.dentry->d_inode;
218 * O_APPEND cannot be cleared if the file is marked as append-only
219 * and the file is open for write.
221 if (((arg ^ filp->f_flags) & O_APPEND) && IS_APPEND(inode))
224 /* O_NOATIME can only be set by the owner or superuser */
225 if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME))
226 if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER))
229 /* required for strict SunOS emulation */
230 if (O_NONBLOCK != O_NDELAY)
234 if (arg & O_DIRECT) {
235 if (!filp->f_mapping || !filp->f_mapping->a_ops ||
236 !filp->f_mapping->a_ops->direct_IO)
240 if (filp->f_op && filp->f_op->check_flags)
241 error = filp->f_op->check_flags(arg);
246 if ((arg ^ filp->f_flags) & FASYNC) {
247 if (filp->f_op && filp->f_op->fasync) {
248 error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0);
254 filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK);
260 static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
261 uid_t uid, uid_t euid, int force)
263 write_lock_irq(&filp->f_owner.lock);
264 if (force || !filp->f_owner.pid) {
265 put_pid(filp->f_owner.pid);
266 filp->f_owner.pid = get_pid(pid);
267 filp->f_owner.pid_type = type;
268 filp->f_owner.uid = uid;
269 filp->f_owner.euid = euid;
271 write_unlock_irq(&filp->f_owner.lock);
274 int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
279 err = security_file_set_fowner(filp);
283 f_modown(filp, pid, type, current->uid, current->euid, force);
286 EXPORT_SYMBOL(__f_setown);
288 int f_setown(struct file *filp, unsigned long arg, int force)
301 result = __f_setown(filp, pid, type, force);
305 EXPORT_SYMBOL(f_setown);
307 void f_delown(struct file *filp)
309 f_modown(filp, NULL, PIDTYPE_PID, 0, 0, 1);
312 pid_t f_getown(struct file *filp)
315 read_lock(&filp->f_owner.lock);
316 pid = pid_nr(filp->f_owner.pid);
317 if (filp->f_owner.pid_type == PIDTYPE_PGID)
319 read_unlock(&filp->f_owner.lock);
323 static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
331 err = dupfd(filp, arg);
334 err = get_close_on_exec(fd) ? FD_CLOEXEC : 0;
338 set_close_on_exec(fd, arg & FD_CLOEXEC);
344 err = setfl(fd, filp, arg);
347 err = fcntl_getlk(filp, (struct flock __user *) arg);
351 err = fcntl_setlk(fd, filp, cmd, (struct flock __user *) arg);
355 * XXX If f_owner is a process group, the
356 * negative return value will get converted
357 * into an error. Oops. If we keep the
358 * current syscall conventions, the only way
359 * to fix this will be in libc.
361 err = f_getown(filp);
362 force_successful_syscall_return();
365 err = f_setown(filp, arg, 1);
368 err = filp->f_owner.signum;
371 /* arg == 0 restores default behaviour. */
372 if (!valid_signal(arg)) {
376 filp->f_owner.signum = arg;
379 err = fcntl_getlease(filp);
382 err = fcntl_setlease(fd, filp, arg);
385 err = fcntl_dirnotify(fd, filp, arg);
393 asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg)
402 err = security_file_fcntl(filp, cmd, arg);
408 err = do_fcntl(fd, cmd, arg, filp);
415 #if BITS_PER_LONG == 32
416 asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg)
426 err = security_file_fcntl(filp, cmd, arg);
435 err = fcntl_getlk64(filp, (struct flock64 __user *) arg);
439 err = fcntl_setlk64(fd, filp, cmd,
440 (struct flock64 __user *) arg);
443 err = do_fcntl(fd, cmd, arg, filp);
452 /* Table to convert sigio signal codes into poll band bitmaps */
454 static const long band_table[NSIGPOLL] = {
455 POLLIN | POLLRDNORM, /* POLL_IN */
456 POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */
457 POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */
458 POLLERR, /* POLL_ERR */
459 POLLPRI | POLLRDBAND, /* POLL_PRI */
460 POLLHUP | POLLERR /* POLL_HUP */
463 static inline int sigio_perm(struct task_struct *p,
464 struct fown_struct *fown, int sig)
466 return (((fown->euid == 0) ||
467 (fown->euid == p->suid) || (fown->euid == p->uid) ||
468 (fown->uid == p->suid) || (fown->uid == p->uid)) &&
469 !security_file_send_sigiotask(p, fown, sig));
472 static void send_sigio_to_task(struct task_struct *p,
473 struct fown_struct *fown,
477 if (!sigio_perm(p, fown, fown->signum))
480 switch (fown->signum) {
483 /* Queue a rt signal with the appropriate fd as its
484 value. We use SI_SIGIO as the source, not
485 SI_KERNEL, since kernel signals always get
486 delivered even if we can't queue. Failure to
487 queue in this case _should_ be reported; we fall
488 back to SIGIO in that case. --sct */
489 si.si_signo = fown->signum;
492 /* Make sure we are called with one of the POLL_*
493 reasons, otherwise we could leak kernel stack into
495 BUG_ON((reason & __SI_MASK) != __SI_POLL);
496 if (reason - POLL_IN >= NSIGPOLL)
499 si.si_band = band_table[reason - POLL_IN];
501 if (!group_send_sig_info(fown->signum, &si, p))
503 /* fall-through: fall back on the old plain SIGIO signal */
505 group_send_sig_info(SIGIO, SEND_SIG_PRIV, p);
509 void send_sigio(struct fown_struct *fown, int fd, int band)
511 struct task_struct *p;
515 read_lock(&fown->lock);
516 type = fown->pid_type;
519 goto out_unlock_fown;
521 read_lock(&tasklist_lock);
522 do_each_pid_task(pid, type, p) {
523 send_sigio_to_task(p, fown, fd, band);
524 } while_each_pid_task(pid, type, p);
525 read_unlock(&tasklist_lock);
527 read_unlock(&fown->lock);
530 static void send_sigurg_to_task(struct task_struct *p,
531 struct fown_struct *fown)
533 if (sigio_perm(p, fown, SIGURG))
534 group_send_sig_info(SIGURG, SEND_SIG_PRIV, p);
537 int send_sigurg(struct fown_struct *fown)
539 struct task_struct *p;
544 read_lock(&fown->lock);
545 type = fown->pid_type;
548 goto out_unlock_fown;
552 read_lock(&tasklist_lock);
553 do_each_pid_task(pid, type, p) {
554 send_sigurg_to_task(p, fown);
555 } while_each_pid_task(pid, type, p);
556 read_unlock(&tasklist_lock);
558 read_unlock(&fown->lock);
562 EXPORT_SYMBOL(send_sigurg);
564 static DEFINE_RWLOCK(fasync_lock);
565 static struct kmem_cache *fasync_cache __read_mostly;
568 * fasync_helper() is used by some character device drivers (mainly mice)
569 * to set up the fasync queue. It returns negative on error, 0 if it did
570 * no changes and positive if it added/deleted the entry.
572 int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
574 struct fasync_struct *fa, **fp;
575 struct fasync_struct *new = NULL;
579 new = kmem_cache_alloc(fasync_cache, GFP_KERNEL);
583 write_lock_irq(&fasync_lock);
584 for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
585 if (fa->fa_file == filp) {
588 kmem_cache_free(fasync_cache, new);
591 kmem_cache_free(fasync_cache, fa);
599 new->magic = FASYNC_MAGIC;
602 new->fa_next = *fapp;
607 write_unlock_irq(&fasync_lock);
611 EXPORT_SYMBOL(fasync_helper);
613 void __kill_fasync(struct fasync_struct *fa, int sig, int band)
616 struct fown_struct * fown;
617 if (fa->magic != FASYNC_MAGIC) {
618 printk(KERN_ERR "kill_fasync: bad magic number in "
622 fown = &fa->fa_file->f_owner;
623 /* Don't send SIGURG to processes which have not set a
624 queued signum: SIGURG has its own default signalling
626 if (!(sig == SIGURG && fown->signum == 0))
627 send_sigio(fown, fa->fa_fd, band);
632 EXPORT_SYMBOL(__kill_fasync);
634 void kill_fasync(struct fasync_struct **fp, int sig, int band)
636 /* First a quick test without locking: usually
640 read_lock(&fasync_lock);
641 /* reread *fp after obtaining the lock */
642 __kill_fasync(*fp, sig, band);
643 read_unlock(&fasync_lock);
646 EXPORT_SYMBOL(kill_fasync);
648 static int __init fasync_init(void)
650 fasync_cache = kmem_cache_create("fasync_cache",
651 sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL, NULL);
655 module_init(fasync_init)