4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/syscalls.h>
8 #include <linux/init.h>
11 #include <linux/file.h>
12 #include <linux/dnotify.h>
13 #include <linux/smp_lock.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/security.h>
17 #include <linux/ptrace.h>
18 #include <linux/vs_limit.h>
21 #include <asm/siginfo.h>
22 #include <asm/uaccess.h>
24 void fastcall set_close_on_exec(unsigned int fd, int flag)
26 struct files_struct *files = current->files;
27 spin_lock(&files->file_lock);
29 FD_SET(fd, files->close_on_exec);
31 FD_CLR(fd, files->close_on_exec);
32 spin_unlock(&files->file_lock);
35 static inline int get_close_on_exec(unsigned int fd)
37 struct files_struct *files = current->files;
39 spin_lock(&files->file_lock);
40 res = FD_ISSET(fd, files->close_on_exec);
41 spin_unlock(&files->file_lock);
46 /* Expand files. Return <0 on error; 0 nothing done; 1 files expanded,
47 * we may have blocked.
49 * Should be called with the files->file_lock spinlock held for write.
51 static int expand_files(struct files_struct *files, int nr)
55 printk (KERN_ERR "%s %d: nr = %d\n", __FUNCTION__, current->pid, nr);
58 if (nr >= files->max_fdset) {
60 if ((err = expand_fdset(files, nr)))
63 if (nr >= files->max_fds) {
65 if ((err = expand_fd_array(files, nr)))
72 printk (KERN_ERR "%s %d: return %d\n", __FUNCTION__, current->pid, err);
78 * locate_fd finds a free file descriptor in the open_fds fdset,
79 * expanding the fd arrays if necessary. Must be called with the
80 * file_lock held for write.
83 static int locate_fd(struct files_struct *files,
84 struct file *file, unsigned int orig_start)
91 if (orig_start >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
96 * Someone might have closed fd's in the range
97 * orig_start..files->next_fd
100 if (start < files->next_fd)
101 start = files->next_fd;
104 if (start < files->max_fdset) {
105 newfd = find_next_zero_bit(files->open_fds->fds_bits,
106 files->max_fdset, start);
110 if (newfd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
112 if (!vx_files_avail(1))
115 error = expand_files(files, newfd);
120 * If we needed to expand the fs array we
121 * might have blocked - try again.
126 if (start <= files->next_fd)
127 files->next_fd = newfd + 1;
135 int dupfd(struct file *file, unsigned int start)
137 struct files_struct * files = current->files;
140 spin_lock(&files->file_lock);
141 fd = locate_fd(files, file, start);
143 FD_SET(fd, files->open_fds);
144 FD_CLR(fd, files->close_on_exec);
145 spin_unlock(&files->file_lock);
146 // vx_openfd_inc(fd);
147 fd_install(fd, file);
149 spin_unlock(&files->file_lock);
156 EXPORT_SYMBOL_GPL(dupfd);
158 asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd)
161 struct file * file, *tofree;
162 struct files_struct * files = current->files;
164 spin_lock(&files->file_lock);
165 if (!(file = fcheck(oldfd)))
171 if (newfd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
173 get_file(file); /* We are now finished with oldfd */
175 err = expand_files(files, newfd);
179 /* To avoid races with open() and dup(), we will mark the fd as
180 * in-use in the open-file bitmap throughout the entire dup2()
181 * process. This is quite safe: do_close() uses the fd array
182 * entry, not the bitmap, to decide what work needs to be
184 /* Doesn't work. open() might be there first. --AV */
186 /* Yes. It's a race. In user space. Nothing sane to do */
188 tofree = files->fd[newfd];
189 if (!tofree && FD_ISSET(newfd, files->open_fds))
192 files->fd[newfd] = file;
193 FD_SET(newfd, files->open_fds);
194 FD_CLR(newfd, files->close_on_exec);
195 spin_unlock(&files->file_lock);
196 // vx_openfd_inc(newfd);
199 filp_close(tofree, files);
204 spin_unlock(&files->file_lock);
208 spin_unlock(&files->file_lock);
213 asmlinkage long sys_dup(unsigned int fildes)
216 struct file * file = fget(fildes);
219 ret = dupfd(file, 0);
223 #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | FASYNC | O_DIRECT | O_NOATIME)
225 static int setfl(int fd, struct file * filp, unsigned long arg)
227 struct inode * inode = filp->f_dentry->d_inode;
230 /* O_APPEND cannot be cleared if the file is marked as append-only */
231 if (!(arg & O_APPEND) && IS_APPEND(inode))
234 /* O_NOATIME can only be set by the owner or superuser */
235 if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME))
236 if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER))
239 /* required for strict SunOS emulation */
240 if (O_NONBLOCK != O_NDELAY)
244 if (arg & O_DIRECT) {
245 if (!filp->f_mapping || !filp->f_mapping->a_ops ||
246 !filp->f_mapping->a_ops->direct_IO)
250 if (filp->f_op && filp->f_op->check_flags)
251 error = filp->f_op->check_flags(arg);
256 if ((arg ^ filp->f_flags) & FASYNC) {
257 if (filp->f_op && filp->f_op->fasync) {
258 error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0);
264 filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK);
270 static void f_modown(struct file *filp, unsigned long pid,
271 uid_t uid, uid_t euid, int force)
273 write_lock_irq(&filp->f_owner.lock);
274 if (force || !filp->f_owner.pid) {
275 filp->f_owner.pid = pid;
276 filp->f_owner.uid = uid;
277 filp->f_owner.euid = euid;
279 write_unlock_irq(&filp->f_owner.lock);
282 int f_setown(struct file *filp, unsigned long arg, int force)
286 err = security_file_set_fowner(filp);
290 f_modown(filp, arg, current->uid, current->euid, force);
294 EXPORT_SYMBOL(f_setown);
296 void f_delown(struct file *filp)
298 f_modown(filp, 0, 0, 0, 1);
301 static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
309 err = dupfd(filp, arg);
312 err = get_close_on_exec(fd) ? FD_CLOEXEC : 0;
316 set_close_on_exec(fd, arg & FD_CLOEXEC);
322 err = setfl(fd, filp, arg);
325 err = fcntl_getlk(filp, (struct flock __user *) arg);
329 err = fcntl_setlk(filp, cmd, (struct flock __user *) arg);
333 * XXX If f_owner is a process group, the
334 * negative return value will get converted
335 * into an error. Oops. If we keep the
336 * current syscall conventions, the only way
337 * to fix this will be in libc.
339 err = filp->f_owner.pid;
340 force_successful_syscall_return();
343 err = f_setown(filp, arg, 1);
346 err = filp->f_owner.signum;
349 /* arg == 0 restores default behaviour. */
350 if (arg < 0 || arg > _NSIG) {
354 filp->f_owner.signum = arg;
357 err = fcntl_getlease(filp);
360 err = fcntl_setlease(fd, filp, arg);
363 err = fcntl_dirnotify(fd, filp, arg);
371 asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg)
380 err = security_file_fcntl(filp, cmd, arg);
386 err = do_fcntl(fd, cmd, arg, filp);
393 #if BITS_PER_LONG == 32
394 asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg)
404 err = security_file_fcntl(filp, cmd, arg);
413 err = fcntl_getlk64(filp, (struct flock64 __user *) arg);
417 err = fcntl_setlk64(filp, cmd, (struct flock64 __user *) arg);
420 err = do_fcntl(fd, cmd, arg, filp);
429 /* Table to convert sigio signal codes into poll band bitmaps */
431 static long band_table[NSIGPOLL] = {
432 POLLIN | POLLRDNORM, /* POLL_IN */
433 POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */
434 POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */
435 POLLERR, /* POLL_ERR */
436 POLLPRI | POLLRDBAND, /* POLL_PRI */
437 POLLHUP | POLLERR /* POLL_HUP */
440 static inline int sigio_perm(struct task_struct *p,
441 struct fown_struct *fown, int sig)
443 return (((fown->euid == 0) ||
444 (fown->euid == p->suid) || (fown->euid == p->uid) ||
445 (fown->uid == p->suid) || (fown->uid == p->uid)) &&
446 !security_file_send_sigiotask(p, fown, sig));
449 static void send_sigio_to_task(struct task_struct *p,
450 struct fown_struct *fown,
454 if (!sigio_perm(p, fown, fown->signum))
457 switch (fown->signum) {
460 /* Queue a rt signal with the appropriate fd as its
461 value. We use SI_SIGIO as the source, not
462 SI_KERNEL, since kernel signals always get
463 delivered even if we can't queue. Failure to
464 queue in this case _should_ be reported; we fall
465 back to SIGIO in that case. --sct */
466 si.si_signo = fown->signum;
469 /* Make sure we are called with one of the POLL_*
470 reasons, otherwise we could leak kernel stack into
472 if ((reason & __SI_MASK) != __SI_POLL)
474 if (reason - POLL_IN >= NSIGPOLL)
477 si.si_band = band_table[reason - POLL_IN];
479 if (!send_sig_info(fown->signum, &si, p))
481 /* fall-through: fall back on the old plain SIGIO signal */
483 send_group_sig_info(SIGIO, SEND_SIG_PRIV, p);
487 void send_sigio(struct fown_struct *fown, int fd, int band)
489 struct task_struct *p;
492 read_lock(&fown->lock);
495 goto out_unlock_fown;
497 read_lock(&tasklist_lock);
499 p = find_task_by_real_pid(pid);
501 send_sigio_to_task(p, fown, fd, band);
504 do_each_task_pid(-pid, PIDTYPE_PGID, p) {
505 send_sigio_to_task(p, fown, fd, band);
506 } while_each_task_pid(-pid, PIDTYPE_PGID, p);
508 read_unlock(&tasklist_lock);
510 read_unlock(&fown->lock);
513 static void send_sigurg_to_task(struct task_struct *p,
514 struct fown_struct *fown)
516 if (sigio_perm(p, fown, SIGURG))
517 send_group_sig_info(SIGURG, SEND_SIG_PRIV, p);
520 int send_sigurg(struct fown_struct *fown)
522 struct task_struct *p;
525 read_lock(&fown->lock);
528 goto out_unlock_fown;
532 read_lock(&tasklist_lock);
534 p = find_task_by_real_pid(pid);
536 send_sigurg_to_task(p, fown);
539 do_each_task_pid(-pid, PIDTYPE_PGID, p) {
540 send_sigurg_to_task(p, fown);
541 } while_each_task_pid(-pid, PIDTYPE_PGID, p);
543 read_unlock(&tasklist_lock);
545 read_unlock(&fown->lock);
549 static rwlock_t fasync_lock = RW_LOCK_UNLOCKED;
550 static kmem_cache_t *fasync_cache;
553 * fasync_helper() is used by some character device drivers (mainly mice)
554 * to set up the fasync queue. It returns negative on error, 0 if it did
555 * no changes and positive if it added/deleted the entry.
557 int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
559 struct fasync_struct *fa, **fp;
560 struct fasync_struct *new = NULL;
564 new = kmem_cache_alloc(fasync_cache, SLAB_KERNEL);
568 write_lock_irq(&fasync_lock);
569 for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
570 if (fa->fa_file == filp) {
573 kmem_cache_free(fasync_cache, new);
576 kmem_cache_free(fasync_cache, fa);
584 new->magic = FASYNC_MAGIC;
587 new->fa_next = *fapp;
592 write_unlock_irq(&fasync_lock);
596 EXPORT_SYMBOL(fasync_helper);
598 void __kill_fasync(struct fasync_struct *fa, int sig, int band)
601 struct fown_struct * fown;
602 if (fa->magic != FASYNC_MAGIC) {
603 printk(KERN_ERR "kill_fasync: bad magic number in "
607 fown = &fa->fa_file->f_owner;
608 /* Don't send SIGURG to processes which have not set a
609 queued signum: SIGURG has its own default signalling
611 if (!(sig == SIGURG && fown->signum == 0))
612 send_sigio(fown, fa->fa_fd, band);
617 EXPORT_SYMBOL(__kill_fasync);
619 void kill_fasync(struct fasync_struct **fp, int sig, int band)
621 /* First a quick test without locking: usually
625 read_lock(&fasync_lock);
626 /* reread *fp after obtaining the lock */
627 __kill_fasync(*fp, sig, band);
628 read_unlock(&fasync_lock);
631 EXPORT_SYMBOL(kill_fasync);
633 static int __init fasync_init(void)
635 fasync_cache = kmem_cache_create("fasync_cache",
636 sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL, NULL);
640 module_init(fasync_init)