4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/init.h>
10 #include <linux/file.h>
11 #include <linux/dnotify.h>
12 #include <linux/smp_lock.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/security.h>
16 #include <linux/ptrace.h>
19 #include <asm/siginfo.h>
20 #include <asm/uaccess.h>
22 void fastcall set_close_on_exec(unsigned int fd, int flag)
24 struct files_struct *files = current->files;
25 spin_lock(&files->file_lock);
27 FD_SET(fd, files->close_on_exec);
29 FD_CLR(fd, files->close_on_exec);
30 spin_unlock(&files->file_lock);
33 static inline int get_close_on_exec(unsigned int fd)
35 struct files_struct *files = current->files;
37 spin_lock(&files->file_lock);
38 res = FD_ISSET(fd, files->close_on_exec);
39 spin_unlock(&files->file_lock);
44 /* Expand files. Return <0 on error; 0 nothing done; 1 files expanded,
45 * we may have blocked.
47 * Should be called with the files->file_lock spinlock held for write.
49 static int expand_files(struct files_struct *files, int nr)
53 printk (KERN_ERR "%s %d: nr = %d\n", __FUNCTION__, current->pid, nr);
56 if (nr >= files->max_fdset) {
58 if ((err = expand_fdset(files, nr)))
61 if (nr >= files->max_fds) {
63 if ((err = expand_fd_array(files, nr)))
70 printk (KERN_ERR "%s %d: return %d\n", __FUNCTION__, current->pid, err);
76 * locate_fd finds a free file descriptor in the open_fds fdset,
77 * expanding the fd arrays if necessary. Must be called with the
78 * file_lock held for write.
81 static int locate_fd(struct files_struct *files,
82 struct file *file, unsigned int orig_start)
89 if (orig_start >= current->rlim[RLIMIT_NOFILE].rlim_cur)
94 * Someone might have closed fd's in the range
95 * orig_start..files->next_fd
98 if (start < files->next_fd)
99 start = files->next_fd;
102 if (start < files->max_fdset) {
103 newfd = find_next_zero_bit(files->open_fds->fds_bits,
104 files->max_fdset, start);
108 if (newfd >= current->rlim[RLIMIT_NOFILE].rlim_cur)
111 error = expand_files(files, newfd);
116 * If we needed to expand the fs array we
117 * might have blocked - try again.
122 if (start <= files->next_fd)
123 files->next_fd = newfd + 1;
131 static int dupfd(struct file *file, unsigned int start)
133 struct files_struct * files = current->files;
136 spin_lock(&files->file_lock);
137 fd = locate_fd(files, file, start);
139 FD_SET(fd, files->open_fds);
140 FD_CLR(fd, files->close_on_exec);
141 spin_unlock(&files->file_lock);
142 fd_install(fd, file);
144 spin_unlock(&files->file_lock);
151 asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd)
154 struct file * file, *tofree;
155 struct files_struct * files = current->files;
157 spin_lock(&files->file_lock);
158 if (!(file = fcheck(oldfd)))
164 if (newfd >= current->rlim[RLIMIT_NOFILE].rlim_cur)
166 get_file(file); /* We are now finished with oldfd */
168 err = expand_files(files, newfd);
172 /* To avoid races with open() and dup(), we will mark the fd as
173 * in-use in the open-file bitmap throughout the entire dup2()
174 * process. This is quite safe: do_close() uses the fd array
175 * entry, not the bitmap, to decide what work needs to be
177 /* Doesn't work. open() might be there first. --AV */
179 /* Yes. It's a race. In user space. Nothing sane to do */
181 tofree = files->fd[newfd];
182 if (!tofree && FD_ISSET(newfd, files->open_fds))
185 files->fd[newfd] = file;
186 FD_SET(newfd, files->open_fds);
187 FD_CLR(newfd, files->close_on_exec);
188 spin_unlock(&files->file_lock);
191 filp_close(tofree, files);
196 spin_unlock(&files->file_lock);
200 spin_unlock(&files->file_lock);
205 asmlinkage long sys_dup(unsigned int fildes)
208 struct file * file = fget(fildes);
211 ret = dupfd(file, 0);
215 #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | FASYNC | O_DIRECT)
217 static int setfl(int fd, struct file * filp, unsigned long arg)
219 struct inode * inode = filp->f_dentry->d_inode;
222 /* O_APPEND cannot be cleared if the file is marked as append-only */
223 if (!(arg & O_APPEND) && IS_APPEND(inode))
226 /* required for strict SunOS emulation */
227 if (O_NONBLOCK != O_NDELAY)
231 if (arg & O_DIRECT) {
232 if (!filp->f_mapping || !filp->f_mapping->a_ops ||
233 !filp->f_mapping->a_ops->direct_IO)
238 if ((arg ^ filp->f_flags) & FASYNC) {
239 if (filp->f_op && filp->f_op->fasync) {
240 error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0);
246 filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK);
252 static void f_modown(struct file *filp, unsigned long pid,
253 uid_t uid, uid_t euid, int force)
255 write_lock_irq(&filp->f_owner.lock);
256 if (force || !filp->f_owner.pid) {
257 filp->f_owner.pid = pid;
258 filp->f_owner.uid = uid;
259 filp->f_owner.euid = euid;
261 write_unlock_irq(&filp->f_owner.lock);
264 int f_setown(struct file *filp, unsigned long arg, int force)
268 err = security_file_set_fowner(filp);
272 f_modown(filp, arg, current->uid, current->euid, force);
276 EXPORT_SYMBOL(f_setown);
278 void f_delown(struct file *filp)
280 f_modown(filp, 0, 0, 0, 1);
283 EXPORT_SYMBOL(f_delown);
285 long generic_file_fcntl(int fd, unsigned int cmd,
286 unsigned long arg, struct file *filp)
293 err = dupfd(filp, arg);
296 err = get_close_on_exec(fd) ? FD_CLOEXEC : 0;
300 set_close_on_exec(fd, arg & FD_CLOEXEC);
306 err = setfl(fd, filp, arg);
309 err = fcntl_getlk(filp, (struct flock __user *) arg);
313 err = fcntl_setlk(filp, cmd, (struct flock __user *) arg);
317 * XXX If f_owner is a process group, the
318 * negative return value will get converted
319 * into an error. Oops. If we keep the
320 * current syscall conventions, the only way
321 * to fix this will be in libc.
323 err = filp->f_owner.pid;
324 force_successful_syscall_return();
327 err = f_setown(filp, arg, 1);
330 err = filp->f_owner.signum;
333 /* arg == 0 restores default behaviour. */
334 if (arg < 0 || arg > _NSIG) {
338 filp->f_owner.signum = arg;
341 err = fcntl_getlease(filp);
344 err = fcntl_setlease(fd, filp, arg);
347 err = fcntl_dirnotify(fd, filp, arg);
354 EXPORT_SYMBOL(generic_file_fcntl);
356 static long do_fcntl(int fd, unsigned int cmd,
357 unsigned long arg, struct file *filp)
359 if (filp->f_op && filp->f_op->fcntl)
360 return filp->f_op->fcntl(fd, cmd, arg, filp);
361 return generic_file_fcntl(fd, cmd, arg, filp);
364 asmlinkage long sys_fcntl(int fd, unsigned int cmd, unsigned long arg)
373 err = security_file_fcntl(filp, cmd, arg);
379 err = do_fcntl(fd, cmd, arg, filp);
386 #if BITS_PER_LONG == 32
387 asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg)
397 err = security_file_fcntl(filp, cmd, arg);
406 err = fcntl_getlk64(filp, (struct flock64 __user *) arg);
410 err = fcntl_setlk64(filp, cmd, (struct flock64 __user *) arg);
413 err = do_fcntl(fd, cmd, arg, filp);
422 /* Table to convert sigio signal codes into poll band bitmaps */
424 static long band_table[NSIGPOLL] = {
425 POLLIN | POLLRDNORM, /* POLL_IN */
426 POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */
427 POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */
428 POLLERR, /* POLL_ERR */
429 POLLPRI | POLLRDBAND, /* POLL_PRI */
430 POLLHUP | POLLERR /* POLL_HUP */
433 static inline int sigio_perm(struct task_struct *p,
434 struct fown_struct *fown)
436 return ((fown->euid == 0) ||
437 (fown->euid == p->suid) || (fown->euid == p->uid) ||
438 (fown->uid == p->suid) || (fown->uid == p->uid));
441 static void send_sigio_to_task(struct task_struct *p,
442 struct fown_struct *fown,
446 if (!sigio_perm(p, fown))
449 if (security_file_send_sigiotask(p, fown, fd, reason))
452 switch (fown->signum) {
455 /* Queue a rt signal with the appropriate fd as its
456 value. We use SI_SIGIO as the source, not
457 SI_KERNEL, since kernel signals always get
458 delivered even if we can't queue. Failure to
459 queue in this case _should_ be reported; we fall
460 back to SIGIO in that case. --sct */
461 si.si_signo = fown->signum;
464 /* Make sure we are called with one of the POLL_*
465 reasons, otherwise we could leak kernel stack into
467 if ((reason & __SI_MASK) != __SI_POLL)
469 if (reason - POLL_IN >= NSIGPOLL)
472 si.si_band = band_table[reason - POLL_IN];
474 if (!send_sig_info(fown->signum, &si, p))
476 /* fall-through: fall back on the old plain SIGIO signal */
478 send_group_sig_info(SIGIO, SEND_SIG_PRIV, p);
482 void send_sigio(struct fown_struct *fown, int fd, int band)
484 struct task_struct *p;
487 read_lock(&fown->lock);
490 goto out_unlock_fown;
492 read_lock(&tasklist_lock);
494 p = find_task_by_pid(pid);
496 send_sigio_to_task(p, fown, fd, band);
501 for_each_task_pid(-pid, PIDTYPE_PGID, p, l, pidptr) {
502 send_sigio_to_task(p, fown, fd, band);
505 read_unlock(&tasklist_lock);
507 read_unlock(&fown->lock);
510 static void send_sigurg_to_task(struct task_struct *p,
511 struct fown_struct *fown)
513 if (sigio_perm(p, fown))
514 send_group_sig_info(SIGURG, SEND_SIG_PRIV, p);
517 int send_sigurg(struct fown_struct *fown)
519 struct task_struct *p;
522 read_lock(&fown->lock);
525 goto out_unlock_fown;
529 read_lock(&tasklist_lock);
531 p = find_task_by_pid(pid);
533 send_sigurg_to_task(p, fown);
538 for_each_task_pid(-pid, PIDTYPE_PGID, p, l, pidptr) {
539 send_sigurg_to_task(p, fown);
542 read_unlock(&tasklist_lock);
544 read_unlock(&fown->lock);
548 static rwlock_t fasync_lock = RW_LOCK_UNLOCKED;
549 static kmem_cache_t *fasync_cache;
552 * fasync_helper() is used by some character device drivers (mainly mice)
553 * to set up the fasync queue. It returns negative on error, 0 if it did
554 * no changes and positive if it added/deleted the entry.
556 int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
558 struct fasync_struct *fa, **fp;
559 struct fasync_struct *new = NULL;
563 new = kmem_cache_alloc(fasync_cache, SLAB_KERNEL);
567 write_lock_irq(&fasync_lock);
568 for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
569 if (fa->fa_file == filp) {
572 kmem_cache_free(fasync_cache, new);
575 kmem_cache_free(fasync_cache, fa);
583 new->magic = FASYNC_MAGIC;
586 new->fa_next = *fapp;
591 write_unlock_irq(&fasync_lock);
595 EXPORT_SYMBOL(fasync_helper);
597 void __kill_fasync(struct fasync_struct *fa, int sig, int band)
600 struct fown_struct * fown;
601 if (fa->magic != FASYNC_MAGIC) {
602 printk(KERN_ERR "kill_fasync: bad magic number in "
606 fown = &fa->fa_file->f_owner;
607 /* Don't send SIGURG to processes which have not set a
608 queued signum: SIGURG has its own default signalling
610 if (!(sig == SIGURG && fown->signum == 0))
611 send_sigio(fown, fa->fa_fd, band);
616 EXPORT_SYMBOL(__kill_fasync);
618 void kill_fasync(struct fasync_struct **fp, int sig, int band)
620 /* First a quick test without locking: usually
624 read_lock(&fasync_lock);
625 /* reread *fp after obtaining the lock */
626 __kill_fasync(*fp, sig, band);
627 read_unlock(&fasync_lock);
631 EXPORT_SYMBOL(kill_fasync);
633 static int __init fasync_init(void)
635 fasync_cache = kmem_cache_create("fasync_cache",
636 sizeof(struct fasync_struct), 0, 0, NULL, NULL);
638 panic("cannot create fasync slab cache");
642 module_init(fasync_init)