2 * POSIX message queues filesystem for Linux.
4 * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl)
5 * Michal Wronski (Michal.Wronski@motorola.com)
7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com)
8 * Lockless receive & send, fd based notify:
9 * Manfred Spraul (manfred@colorfullife.com)
11 * Audit: George Wilson (ltcgcw@us.ibm.com)
13 * This file is released under the GPL.
16 #include <linux/capability.h>
17 #include <linux/init.h>
18 #include <linux/pagemap.h>
19 #include <linux/file.h>
20 #include <linux/mount.h>
21 #include <linux/namei.h>
22 #include <linux/sysctl.h>
23 #include <linux/poll.h>
24 #include <linux/mqueue.h>
25 #include <linux/msg.h>
26 #include <linux/skbuff.h>
27 #include <linux/netlink.h>
28 #include <linux/syscalls.h>
29 #include <linux/audit.h>
30 #include <linux/signal.h>
31 #include <linux/mutex.h>
32 #include <linux/vs_base.h>
33 #include <linux/vs_context.h>
34 #include <linux/vs_limit.h>
39 #define MQUEUE_MAGIC 0x19800202
40 #define DIRENT_SIZE 20
41 #define FILENT_SIZE 80
47 #define STATE_PENDING 1
52 #define CTL_QUEUESMAX 2
54 #define CTL_MSGSIZEMAX 4
57 #define DFLT_QUEUESMAX 256 /* max number of message queues */
58 #define DFLT_MSGMAX 10 /* max number of messages in each queue */
59 #define HARD_MSGMAX (131072/sizeof(void*))
60 #define DFLT_MSGSIZEMAX 8192 /* max message size */
63 struct ext_wait_queue { /* queue of sleeping tasks */
64 struct task_struct *task;
65 struct list_head list;
66 struct msg_msg *msg; /* ptr of loaded message */
67 int state; /* one of STATE_* values */
70 struct mqueue_inode_info {
72 struct inode vfs_inode;
73 wait_queue_head_t wait_q;
75 struct msg_msg **messages;
78 struct sigevent notify;
80 struct user_struct *user; /* user who created, for accounting */
81 struct sock *notify_sock;
82 struct sk_buff *notify_cookie;
84 /* for tasks waiting for free space and messages, respectively */
85 struct ext_wait_queue e_wait_q[2];
87 unsigned long qsize; /* size of queue in memory (sum of all msgs) */
90 static struct inode_operations mqueue_dir_inode_operations;
91 static struct file_operations mqueue_file_operations;
92 static struct super_operations mqueue_super_ops;
93 static void remove_notification(struct mqueue_inode_info *info);
95 static spinlock_t mq_lock;
96 static kmem_cache_t *mqueue_inode_cachep;
97 static struct vfsmount *mqueue_mnt;
99 static unsigned int queues_count;
100 static unsigned int queues_max = DFLT_QUEUESMAX;
101 static unsigned int msg_max = DFLT_MSGMAX;
102 static unsigned int msgsize_max = DFLT_MSGSIZEMAX;
104 static struct ctl_table_header * mq_sysctl_table;
106 static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
108 return container_of(inode, struct mqueue_inode_info, vfs_inode);
111 static struct inode *mqueue_get_inode(struct super_block *sb, int mode,
112 struct mq_attr *attr)
116 inode = new_inode(sb);
118 inode->i_mode = mode;
119 inode->i_uid = current->fsuid;
120 inode->i_gid = current->fsgid;
122 inode->i_mtime = inode->i_ctime = inode->i_atime =
126 struct mqueue_inode_info *info;
127 struct task_struct *p = current;
128 struct user_struct *u = p->user;
129 unsigned long mq_bytes, mq_msg_tblsz;
131 inode->i_fop = &mqueue_file_operations;
132 inode->i_size = FILENT_SIZE;
133 /* mqueue specific info */
134 info = MQUEUE_I(inode);
135 spin_lock_init(&info->lock);
136 init_waitqueue_head(&info->wait_q);
137 INIT_LIST_HEAD(&info->e_wait_q[0].list);
138 INIT_LIST_HEAD(&info->e_wait_q[1].list);
139 info->messages = NULL;
140 info->notify_owner = 0;
142 info->user = NULL; /* set when all is ok */
143 memset(&info->attr, 0, sizeof(info->attr));
144 info->attr.mq_maxmsg = DFLT_MSGMAX;
145 info->attr.mq_msgsize = DFLT_MSGSIZEMAX;
147 info->attr.mq_maxmsg = attr->mq_maxmsg;
148 info->attr.mq_msgsize = attr->mq_msgsize;
150 mq_msg_tblsz = info->attr.mq_maxmsg * sizeof(struct msg_msg *);
151 mq_bytes = (mq_msg_tblsz +
152 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
155 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
156 u->mq_bytes + mq_bytes >
157 p->signal->rlim[RLIMIT_MSGQUEUE].rlim_cur ||
158 !vx_ipcmsg_avail(p->vx_info, mq_bytes)) {
159 spin_unlock(&mq_lock);
162 u->mq_bytes += mq_bytes;
163 vx_ipcmsg_add(p->vx_info, u, mq_bytes);
164 spin_unlock(&mq_lock);
166 info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL);
167 if (!info->messages) {
169 u->mq_bytes -= mq_bytes;
170 vx_ipcmsg_sub(p->vx_info, u, mq_bytes);
171 spin_unlock(&mq_lock);
175 info->user = get_uid(u);
176 } else if (S_ISDIR(mode)) {
178 /* Some things misbehave if size == 0 on a directory */
179 inode->i_size = 2 * DIRENT_SIZE;
180 inode->i_op = &mqueue_dir_inode_operations;
181 inode->i_fop = &simple_dir_operations;
186 make_bad_inode(inode);
191 static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
195 sb->s_blocksize = PAGE_CACHE_SIZE;
196 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
197 sb->s_magic = MQUEUE_MAGIC;
198 sb->s_op = &mqueue_super_ops;
200 inode = mqueue_get_inode(sb, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL);
204 sb->s_root = d_alloc_root(inode);
213 static int mqueue_get_sb(struct file_system_type *fs_type,
214 int flags, const char *dev_name,
215 void *data, struct vfsmount *mnt)
217 return get_sb_single(fs_type, flags, data, mqueue_fill_super, mnt);
220 static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags)
222 struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
224 if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
225 SLAB_CTOR_CONSTRUCTOR)
226 inode_init_once(&p->vfs_inode);
229 static struct inode *mqueue_alloc_inode(struct super_block *sb)
231 struct mqueue_inode_info *ei;
233 ei = kmem_cache_alloc(mqueue_inode_cachep, SLAB_KERNEL);
236 return &ei->vfs_inode;
239 static void mqueue_destroy_inode(struct inode *inode)
241 kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
244 static void mqueue_delete_inode(struct inode *inode)
246 struct mqueue_inode_info *info;
247 struct user_struct *user;
248 unsigned long mq_bytes;
251 if (S_ISDIR(inode->i_mode)) {
255 info = MQUEUE_I(inode);
256 spin_lock(&info->lock);
257 for (i = 0; i < info->attr.mq_curmsgs; i++)
258 free_msg(info->messages[i]);
259 kfree(info->messages);
260 spin_unlock(&info->lock);
264 mq_bytes = (info->attr.mq_maxmsg * sizeof(struct msg_msg *) +
265 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
268 struct vx_info *vxi = lookup_vx_info(user->xid);
271 user->mq_bytes -= mq_bytes;
272 vx_ipcmsg_sub(vxi, user, mq_bytes);
274 spin_unlock(&mq_lock);
280 static int mqueue_create(struct inode *dir, struct dentry *dentry,
281 int mode, struct nameidata *nd)
284 struct mq_attr *attr = dentry->d_fsdata;
288 if (queues_count >= queues_max && !capable(CAP_SYS_RESOURCE)) {
293 spin_unlock(&mq_lock);
295 inode = mqueue_get_inode(dir->i_sb, mode, attr);
303 dir->i_size += DIRENT_SIZE;
304 dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
306 d_instantiate(dentry, inode);
310 spin_unlock(&mq_lock);
314 static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
316 struct inode *inode = dentry->d_inode;
318 dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
319 dir->i_size -= DIRENT_SIZE;
326 * This is routine for system read from queue file.
327 * To avoid mess with doing here some sort of mq_receive we allow
328 * to read only queue size & notification info (the only values
329 * that are interesting from user point of view and aren't accessible
330 * through std routines)
332 static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
333 size_t count, loff_t * off)
335 struct mqueue_inode_info *info = MQUEUE_I(filp->f_dentry->d_inode);
336 char buffer[FILENT_SIZE];
343 spin_lock(&info->lock);
344 snprintf(buffer, sizeof(buffer),
345 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
347 info->notify_owner ? info->notify.sigev_notify : 0,
348 (info->notify_owner &&
349 info->notify.sigev_notify == SIGEV_SIGNAL) ?
350 info->notify.sigev_signo : 0,
352 spin_unlock(&info->lock);
353 buffer[sizeof(buffer)-1] = '\0';
354 slen = strlen(buffer)+1;
360 if (o + count > slen)
363 if (copy_to_user(u_data, buffer + o, count))
367 filp->f_dentry->d_inode->i_atime = filp->f_dentry->d_inode->i_ctime = CURRENT_TIME;
371 static int mqueue_flush_file(struct file *filp, fl_owner_t id)
373 struct mqueue_inode_info *info = MQUEUE_I(filp->f_dentry->d_inode);
375 spin_lock(&info->lock);
376 if (current->tgid == info->notify_owner)
377 remove_notification(info);
379 spin_unlock(&info->lock);
383 static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
385 struct mqueue_inode_info *info = MQUEUE_I(filp->f_dentry->d_inode);
388 poll_wait(filp, &info->wait_q, poll_tab);
390 spin_lock(&info->lock);
391 if (info->attr.mq_curmsgs)
392 retval = POLLIN | POLLRDNORM;
394 if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
395 retval |= POLLOUT | POLLWRNORM;
396 spin_unlock(&info->lock);
401 /* Adds current to info->e_wait_q[sr] before element with smaller prio */
402 static void wq_add(struct mqueue_inode_info *info, int sr,
403 struct ext_wait_queue *ewp)
405 struct ext_wait_queue *walk;
409 list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
410 if (walk->task->static_prio <= current->static_prio) {
411 list_add_tail(&ewp->list, &walk->list);
415 list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
419 * Puts current task to sleep. Caller must hold queue lock. After return
423 static int wq_sleep(struct mqueue_inode_info *info, int sr,
424 long timeout, struct ext_wait_queue *ewp)
429 wq_add(info, sr, ewp);
432 set_current_state(TASK_INTERRUPTIBLE);
434 spin_unlock(&info->lock);
435 time = schedule_timeout(timeout);
437 while (ewp->state == STATE_PENDING)
440 if (ewp->state == STATE_READY) {
444 spin_lock(&info->lock);
445 if (ewp->state == STATE_READY) {
449 if (signal_pending(current)) {
450 retval = -ERESTARTSYS;
458 list_del(&ewp->list);
460 spin_unlock(&info->lock);
466 * Returns waiting task that should be serviced first or NULL if none exists
468 static struct ext_wait_queue *wq_get_first_waiter(
469 struct mqueue_inode_info *info, int sr)
471 struct list_head *ptr;
473 ptr = info->e_wait_q[sr].list.prev;
474 if (ptr == &info->e_wait_q[sr].list)
476 return list_entry(ptr, struct ext_wait_queue, list);
479 /* Auxiliary functions to manipulate messages' list */
480 static void msg_insert(struct msg_msg *ptr, struct mqueue_inode_info *info)
484 k = info->attr.mq_curmsgs - 1;
485 while (k >= 0 && info->messages[k]->m_type >= ptr->m_type) {
486 info->messages[k + 1] = info->messages[k];
489 info->attr.mq_curmsgs++;
490 info->qsize += ptr->m_ts;
491 info->messages[k + 1] = ptr;
494 static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
496 info->qsize -= info->messages[--info->attr.mq_curmsgs]->m_ts;
497 return info->messages[info->attr.mq_curmsgs];
500 static inline void set_cookie(struct sk_buff *skb, char code)
502 ((char*)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
506 * The next function is only to split too long sys_mq_timedsend
508 static void __do_notify(struct mqueue_inode_info *info)
511 * invoked when there is registered process and there isn't process
512 * waiting synchronously for message AND state of queue changed from
513 * empty to not empty. Here we are sure that no one is waiting
515 if (info->notify_owner &&
516 info->attr.mq_curmsgs == 1) {
517 struct siginfo sig_i;
518 switch (info->notify.sigev_notify) {
524 sig_i.si_signo = info->notify.sigev_signo;
526 sig_i.si_code = SI_MESGQ;
527 sig_i.si_value = info->notify.sigev_value;
528 sig_i.si_pid = current->tgid;
529 sig_i.si_uid = current->uid;
531 kill_proc_info(info->notify.sigev_signo,
532 &sig_i, info->notify_owner);
535 set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
536 netlink_sendskb(info->notify_sock,
537 info->notify_cookie, 0);
540 /* after notification unregisters process */
541 info->notify_owner = 0;
543 wake_up(&info->wait_q);
546 static long prepare_timeout(const struct timespec __user *u_arg)
548 struct timespec ts, nowts;
552 if (unlikely(copy_from_user(&ts, u_arg,
553 sizeof(struct timespec))))
556 if (unlikely(ts.tv_nsec < 0 || ts.tv_sec < 0
557 || ts.tv_nsec >= NSEC_PER_SEC))
559 nowts = CURRENT_TIME;
560 /* first subtract as jiffies can't be too big */
561 ts.tv_sec -= nowts.tv_sec;
562 if (ts.tv_nsec < nowts.tv_nsec) {
563 ts.tv_nsec += NSEC_PER_SEC;
566 ts.tv_nsec -= nowts.tv_nsec;
570 timeout = timespec_to_jiffies(&ts) + 1;
572 return MAX_SCHEDULE_TIMEOUT;
577 static void remove_notification(struct mqueue_inode_info *info)
579 if (info->notify_owner != 0 &&
580 info->notify.sigev_notify == SIGEV_THREAD) {
581 set_cookie(info->notify_cookie, NOTIFY_REMOVED);
582 netlink_sendskb(info->notify_sock, info->notify_cookie, 0);
584 info->notify_owner = 0;
587 static int mq_attr_ok(struct mq_attr *attr)
589 if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0)
591 if (capable(CAP_SYS_RESOURCE)) {
592 if (attr->mq_maxmsg > HARD_MSGMAX)
595 if (attr->mq_maxmsg > msg_max ||
596 attr->mq_msgsize > msgsize_max)
599 /* check for overflow */
600 if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg)
602 if ((unsigned long)(attr->mq_maxmsg * attr->mq_msgsize) +
603 (attr->mq_maxmsg * sizeof (struct msg_msg *)) <
604 (unsigned long)(attr->mq_maxmsg * attr->mq_msgsize))
610 * Invoked when creating a new queue via sys_mq_open
612 static struct file *do_create(struct dentry *dir, struct dentry *dentry,
613 int oflag, mode_t mode, struct mq_attr __user *u_attr)
620 if (copy_from_user(&attr, u_attr, sizeof(attr)))
623 if (!mq_attr_ok(&attr))
625 /* store for use during create */
626 dentry->d_fsdata = &attr;
629 mode &= ~current->fs->umask;
630 ret = vfs_create(dir->d_inode, dentry, mode, NULL);
631 dentry->d_fsdata = NULL;
635 return dentry_open(dentry, mqueue_mnt, oflag);
643 /* Opens existing queue */
644 static struct file *do_open(struct dentry *dentry, int oflag)
646 static int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
647 MAY_READ | MAY_WRITE };
649 if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) {
652 return ERR_PTR(-EINVAL);
655 if (permission(dentry->d_inode, oflag2acc[oflag & O_ACCMODE], NULL)) {
658 return ERR_PTR(-EACCES);
661 return dentry_open(dentry, mqueue_mnt, oflag);
664 asmlinkage long sys_mq_open(const char __user *u_name, int oflag, mode_t mode,
665 struct mq_attr __user *u_attr)
667 struct dentry *dentry;
672 error = audit_mq_open(oflag, mode, u_attr);
676 if (IS_ERR(name = getname(u_name)))
677 return PTR_ERR(name);
679 fd = get_unused_fd();
683 mutex_lock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
684 dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name));
685 if (IS_ERR(dentry)) {
686 error = PTR_ERR(dentry);
691 if (oflag & O_CREAT) {
692 if (dentry->d_inode) { /* entry already exists */
696 filp = do_open(dentry, oflag);
698 filp = do_create(mqueue_mnt->mnt_root, dentry,
699 oflag, mode, u_attr);
703 if (!dentry->d_inode)
705 filp = do_open(dentry, oflag);
709 error = PTR_ERR(filp);
713 set_close_on_exec(fd, 1);
714 fd_install(fd, filp);
725 mutex_unlock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
731 asmlinkage long sys_mq_unlink(const char __user *u_name)
735 struct dentry *dentry;
736 struct inode *inode = NULL;
738 name = getname(u_name);
740 return PTR_ERR(name);
742 mutex_lock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
743 dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name));
744 if (IS_ERR(dentry)) {
745 err = PTR_ERR(dentry);
749 if (!dentry->d_inode) {
754 inode = dentry->d_inode;
756 atomic_inc(&inode->i_count);
758 err = vfs_unlink(dentry->d_parent->d_inode, dentry, NULL);
763 mutex_unlock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
771 /* Pipelined send and receive functions.
773 * If a receiver finds no waiting message, then it registers itself in the
774 * list of waiting receivers. A sender checks that list before adding the new
775 * message into the message array. If there is a waiting receiver, then it
776 * bypasses the message array and directly hands the message over to the
778 * The receiver accepts the message and returns without grabbing the queue
779 * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers
780 * are necessary. The same algorithm is used for sysv semaphores, see
781 * ipc/sem.c for more details.
783 * The same algorithm is used for senders.
786 /* pipelined_send() - send a message directly to the task waiting in
787 * sys_mq_timedreceive() (without inserting message into a queue).
789 static inline void pipelined_send(struct mqueue_inode_info *info,
790 struct msg_msg *message,
791 struct ext_wait_queue *receiver)
793 receiver->msg = message;
794 list_del(&receiver->list);
795 receiver->state = STATE_PENDING;
796 wake_up_process(receiver->task);
798 receiver->state = STATE_READY;
801 /* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
802 * gets its message and put to the queue (we have one free place for sure). */
803 static inline void pipelined_receive(struct mqueue_inode_info *info)
805 struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
809 wake_up_interruptible(&info->wait_q);
812 msg_insert(sender->msg, info);
813 list_del(&sender->list);
814 sender->state = STATE_PENDING;
815 wake_up_process(sender->task);
817 sender->state = STATE_READY;
820 asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
821 size_t msg_len, unsigned int msg_prio,
822 const struct timespec __user *u_abs_timeout)
826 struct ext_wait_queue wait;
827 struct ext_wait_queue *receiver;
828 struct msg_msg *msg_ptr;
829 struct mqueue_inode_info *info;
833 ret = audit_mq_timedsend(mqdes, msg_len, msg_prio, u_abs_timeout);
837 if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
840 timeout = prepare_timeout(u_abs_timeout);
847 inode = filp->f_dentry->d_inode;
848 if (unlikely(filp->f_op != &mqueue_file_operations))
850 info = MQUEUE_I(inode);
852 if (unlikely(!(filp->f_mode & FMODE_WRITE)))
855 if (unlikely(msg_len > info->attr.mq_msgsize)) {
860 /* First try to allocate memory, before doing anything with
861 * existing queues. */
862 msg_ptr = load_msg(u_msg_ptr, msg_len);
863 if (IS_ERR(msg_ptr)) {
864 ret = PTR_ERR(msg_ptr);
867 msg_ptr->m_ts = msg_len;
868 msg_ptr->m_type = msg_prio;
870 spin_lock(&info->lock);
872 if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
873 if (filp->f_flags & O_NONBLOCK) {
874 spin_unlock(&info->lock);
876 } else if (unlikely(timeout < 0)) {
877 spin_unlock(&info->lock);
881 wait.msg = (void *) msg_ptr;
882 wait.state = STATE_NONE;
883 ret = wq_sleep(info, SEND, timeout, &wait);
888 receiver = wq_get_first_waiter(info, RECV);
890 pipelined_send(info, msg_ptr, receiver);
892 /* adds message to the queue */
893 msg_insert(msg_ptr, info);
896 inode->i_atime = inode->i_mtime = inode->i_ctime =
898 spin_unlock(&info->lock);
907 asmlinkage ssize_t sys_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr,
908 size_t msg_len, unsigned int __user *u_msg_prio,
909 const struct timespec __user *u_abs_timeout)
913 struct msg_msg *msg_ptr;
916 struct mqueue_inode_info *info;
917 struct ext_wait_queue wait;
919 ret = audit_mq_timedreceive(mqdes, msg_len, u_msg_prio, u_abs_timeout);
923 timeout = prepare_timeout(u_abs_timeout);
930 inode = filp->f_dentry->d_inode;
931 if (unlikely(filp->f_op != &mqueue_file_operations))
933 info = MQUEUE_I(inode);
935 if (unlikely(!(filp->f_mode & FMODE_READ)))
938 /* checks if buffer is big enough */
939 if (unlikely(msg_len < info->attr.mq_msgsize)) {
944 spin_lock(&info->lock);
945 if (info->attr.mq_curmsgs == 0) {
946 if (filp->f_flags & O_NONBLOCK) {
947 spin_unlock(&info->lock);
950 } else if (unlikely(timeout < 0)) {
951 spin_unlock(&info->lock);
956 wait.state = STATE_NONE;
957 ret = wq_sleep(info, RECV, timeout, &wait);
961 msg_ptr = msg_get(info);
963 inode->i_atime = inode->i_mtime = inode->i_ctime =
966 /* There is now free space in queue. */
967 pipelined_receive(info);
968 spin_unlock(&info->lock);
974 if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
975 store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
987 * Notes: the case when user wants us to deregister (with NULL as pointer)
988 * and he isn't currently owner of notification, will be silently discarded.
989 * It isn't explicitly defined in the POSIX.
991 asmlinkage long sys_mq_notify(mqd_t mqdes,
992 const struct sigevent __user *u_notification)
998 struct sigevent notification;
999 struct mqueue_inode_info *info;
1002 ret = audit_mq_notify(mqdes, u_notification);
1008 if (u_notification != NULL) {
1009 if (copy_from_user(¬ification, u_notification,
1010 sizeof(struct sigevent)))
1013 if (unlikely(notification.sigev_notify != SIGEV_NONE &&
1014 notification.sigev_notify != SIGEV_SIGNAL &&
1015 notification.sigev_notify != SIGEV_THREAD))
1017 if (notification.sigev_notify == SIGEV_SIGNAL &&
1018 !valid_signal(notification.sigev_signo)) {
1021 if (notification.sigev_notify == SIGEV_THREAD) {
1022 /* create the notify skb */
1023 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
1028 if (copy_from_user(nc->data,
1029 notification.sigev_value.sival_ptr,
1030 NOTIFY_COOKIE_LEN)) {
1034 /* TODO: add a header? */
1035 skb_put(nc, NOTIFY_COOKIE_LEN);
1036 /* and attach it to the socket */
1038 filp = fget(notification.sigev_signo);
1042 sock = netlink_getsockbyfilp(filp);
1045 ret = PTR_ERR(sock);
1050 ret = netlink_attachskb(sock, nc, 0,
1051 MAX_SCHEDULE_TIMEOUT, NULL);
1067 inode = filp->f_dentry->d_inode;
1068 if (unlikely(filp->f_op != &mqueue_file_operations))
1070 info = MQUEUE_I(inode);
1073 spin_lock(&info->lock);
1074 if (u_notification == NULL) {
1075 if (info->notify_owner == current->tgid) {
1076 remove_notification(info);
1077 inode->i_atime = inode->i_ctime = CURRENT_TIME;
1079 } else if (info->notify_owner != 0) {
1082 switch (notification.sigev_notify) {
1084 info->notify.sigev_notify = SIGEV_NONE;
1087 info->notify_sock = sock;
1088 info->notify_cookie = nc;
1091 info->notify.sigev_notify = SIGEV_THREAD;
1094 info->notify.sigev_signo = notification.sigev_signo;
1095 info->notify.sigev_value = notification.sigev_value;
1096 info->notify.sigev_notify = SIGEV_SIGNAL;
1099 info->notify_owner = current->tgid;
1100 inode->i_atime = inode->i_ctime = CURRENT_TIME;
1102 spin_unlock(&info->lock);
1107 netlink_detachskb(sock, nc);
1114 asmlinkage long sys_mq_getsetattr(mqd_t mqdes,
1115 const struct mq_attr __user *u_mqstat,
1116 struct mq_attr __user *u_omqstat)
1119 struct mq_attr mqstat, omqstat;
1121 struct inode *inode;
1122 struct mqueue_inode_info *info;
1124 if (u_mqstat != NULL) {
1125 if (copy_from_user(&mqstat, u_mqstat, sizeof(struct mq_attr)))
1127 if (mqstat.mq_flags & (~O_NONBLOCK))
1136 inode = filp->f_dentry->d_inode;
1137 if (unlikely(filp->f_op != &mqueue_file_operations))
1139 info = MQUEUE_I(inode);
1141 spin_lock(&info->lock);
1143 omqstat = info->attr;
1144 omqstat.mq_flags = filp->f_flags & O_NONBLOCK;
1146 ret = audit_mq_getsetattr(mqdes, &mqstat);
1149 if (mqstat.mq_flags & O_NONBLOCK)
1150 filp->f_flags |= O_NONBLOCK;
1152 filp->f_flags &= ~O_NONBLOCK;
1154 inode->i_atime = inode->i_ctime = CURRENT_TIME;
1157 spin_unlock(&info->lock);
1160 if (u_omqstat != NULL && copy_to_user(u_omqstat, &omqstat,
1161 sizeof(struct mq_attr)))
1170 static struct inode_operations mqueue_dir_inode_operations = {
1171 .lookup = simple_lookup,
1172 .create = mqueue_create,
1173 .unlink = mqueue_unlink,
1176 static struct file_operations mqueue_file_operations = {
1177 .flush = mqueue_flush_file,
1178 .poll = mqueue_poll_file,
1179 .read = mqueue_read_file,
1182 static struct super_operations mqueue_super_ops = {
1183 .alloc_inode = mqueue_alloc_inode,
1184 .destroy_inode = mqueue_destroy_inode,
1185 .statfs = simple_statfs,
1186 .delete_inode = mqueue_delete_inode,
1187 .drop_inode = generic_delete_inode,
1190 static struct file_system_type mqueue_fs_type = {
1192 .get_sb = mqueue_get_sb,
1193 .kill_sb = kill_litter_super,
1196 static int msg_max_limit_min = DFLT_MSGMAX;
1197 static int msg_max_limit_max = HARD_MSGMAX;
1199 static int msg_maxsize_limit_min = DFLT_MSGSIZEMAX;
1200 static int msg_maxsize_limit_max = INT_MAX;
1202 static ctl_table mq_sysctls[] = {
1204 .ctl_name = CTL_QUEUESMAX,
1205 .procname = "queues_max",
1206 .data = &queues_max,
1207 .maxlen = sizeof(int),
1209 .proc_handler = &proc_dointvec,
1212 .ctl_name = CTL_MSGMAX,
1213 .procname = "msg_max",
1215 .maxlen = sizeof(int),
1217 .proc_handler = &proc_dointvec_minmax,
1218 .extra1 = &msg_max_limit_min,
1219 .extra2 = &msg_max_limit_max,
1222 .ctl_name = CTL_MSGSIZEMAX,
1223 .procname = "msgsize_max",
1224 .data = &msgsize_max,
1225 .maxlen = sizeof(int),
1227 .proc_handler = &proc_dointvec_minmax,
1228 .extra1 = &msg_maxsize_limit_min,
1229 .extra2 = &msg_maxsize_limit_max,
1234 static ctl_table mq_sysctl_dir[] = {
1236 .ctl_name = FS_MQUEUE,
1237 .procname = "mqueue",
1239 .child = mq_sysctls,
1244 static ctl_table mq_sysctl_root[] = {
1249 .child = mq_sysctl_dir,
1254 static int __init init_mqueue_fs(void)
1258 mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
1259 sizeof(struct mqueue_inode_info), 0,
1260 SLAB_HWCACHE_ALIGN, init_once, NULL);
1261 if (mqueue_inode_cachep == NULL)
1264 /* ignore failues - they are not fatal */
1265 mq_sysctl_table = register_sysctl_table(mq_sysctl_root, 0);
1267 error = register_filesystem(&mqueue_fs_type);
1271 if (IS_ERR(mqueue_mnt = kern_mount(&mqueue_fs_type))) {
1272 error = PTR_ERR(mqueue_mnt);
1273 goto out_filesystem;
1276 /* internal initialization - not common for vfs */
1278 spin_lock_init(&mq_lock);
1283 unregister_filesystem(&mqueue_fs_type);
1285 if (mq_sysctl_table)
1286 unregister_sysctl_table(mq_sysctl_table);
1287 if (kmem_cache_destroy(mqueue_inode_cachep)) {
1289 "mqueue_inode_cache: not all structures were freed\n");
1294 __initcall(init_mqueue_fs);