2 * POSIX message queues filesystem for Linux.
4 * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl)
5 * Michal Wronski (wrona@mat.uni.torun.pl)
7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com)
8 * Lockless receive & send, fd based notify:
9 * Manfred Spraul (manfred@colorfullife.com)
11 * This file is released under the GPL.
14 #include <linux/init.h>
15 #include <linux/pagemap.h>
16 #include <linux/file.h>
17 #include <linux/mount.h>
18 #include <linux/namei.h>
19 #include <linux/sysctl.h>
20 #include <linux/poll.h>
21 #include <linux/mqueue.h>
22 #include <linux/msg.h>
23 #include <linux/skbuff.h>
24 #include <linux/netlink.h>
28 #define MQUEUE_MAGIC 0x19800202
29 #define DIRENT_SIZE 20
30 #define FILENT_SIZE 80
36 #define STATE_PENDING 1
41 #define CTL_QUEUESMAX 2
43 #define CTL_MSGSIZEMAX 4
46 #define DFLT_QUEUESMAX 64 /* max number of message queues */
47 #define DFLT_MSGMAX 40 /* max number of messages in each queue */
48 #define HARD_MSGMAX (131072/sizeof(void*))
49 #define DFLT_MSGSIZEMAX 16384 /* max message size */
51 #define NOTIFY_COOKIE_LEN 32
53 struct ext_wait_queue { /* queue of sleeping tasks */
54 struct task_struct *task;
55 struct list_head list;
56 struct msg_msg *msg; /* ptr of loaded message */
57 int state; /* one of STATE_* values */
60 struct mqueue_inode_info {
62 struct inode vfs_inode;
63 wait_queue_head_t wait_q;
65 struct msg_msg **messages;
68 struct sigevent notify;
70 struct sock *notify_sock;
71 struct sk_buff *notify_cookie;
73 /* for tasks waiting for free space and messages, respectively */
74 struct ext_wait_queue e_wait_q[2];
76 unsigned long qsize; /* size of queue in memory (sum of all msgs) */
79 static struct inode_operations mqueue_dir_inode_operations;
80 static struct file_operations mqueue_file_operations;
81 static struct super_operations mqueue_super_ops;
82 static void remove_notification(struct mqueue_inode_info *info);
84 static spinlock_t mq_lock;
85 static kmem_cache_t *mqueue_inode_cachep;
86 static struct vfsmount *mqueue_mnt;
88 static unsigned int queues_count;
89 static unsigned int queues_max = DFLT_QUEUESMAX;
90 static unsigned int msg_max = DFLT_MSGMAX;
91 static unsigned int msgsize_max = DFLT_MSGSIZEMAX;
93 static struct ctl_table_header * mq_sysctl_table;
95 static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
97 return container_of(inode, struct mqueue_inode_info, vfs_inode);
100 static struct inode *mqueue_get_inode(struct super_block *sb, int mode)
104 inode = new_inode(sb);
106 inode->i_mode = mode;
107 inode->i_uid = current->fsuid;
108 inode->i_gid = current->fsgid;
109 inode->i_blksize = PAGE_CACHE_SIZE;
111 inode->i_mtime = inode->i_ctime = inode->i_atime =
115 struct mqueue_inode_info *info;
117 inode->i_fop = &mqueue_file_operations;
118 inode->i_size = FILENT_SIZE;
119 /* mqueue specific info */
120 info = MQUEUE_I(inode);
121 spin_lock_init(&info->lock);
122 init_waitqueue_head(&info->wait_q);
123 INIT_LIST_HEAD(&info->e_wait_q[0].list);
124 INIT_LIST_HEAD(&info->e_wait_q[1].list);
125 info->notify_owner = 0;
127 memset(&info->attr, 0, sizeof(info->attr));
128 info->attr.mq_maxmsg = DFLT_MSGMAX;
129 info->attr.mq_msgsize = DFLT_MSGSIZEMAX;
130 info->messages = kmalloc(DFLT_MSGMAX * sizeof(struct msg_msg *), GFP_KERNEL);
131 if (!info->messages) {
132 make_bad_inode(inode);
136 } else if (S_ISDIR(mode)) {
138 /* Some things misbehave if size == 0 on a directory */
139 inode->i_size = 2 * DIRENT_SIZE;
140 inode->i_op = &mqueue_dir_inode_operations;
141 inode->i_fop = &simple_dir_operations;
147 static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
151 sb->s_blocksize = PAGE_CACHE_SIZE;
152 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
153 sb->s_magic = MQUEUE_MAGIC;
154 sb->s_op = &mqueue_super_ops;
156 inode = mqueue_get_inode(sb, S_IFDIR | S_ISVTX | S_IRWXUGO);
160 sb->s_root = d_alloc_root(inode);
169 static struct super_block *mqueue_get_sb(struct file_system_type *fs_type,
170 int flags, const char *dev_name,
173 return get_sb_single(fs_type, flags, data, mqueue_fill_super);
176 static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags)
178 struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
180 if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
181 SLAB_CTOR_CONSTRUCTOR)
182 inode_init_once(&p->vfs_inode);
185 static struct inode *mqueue_alloc_inode(struct super_block *sb)
187 struct mqueue_inode_info *ei;
189 ei = kmem_cache_alloc(mqueue_inode_cachep, SLAB_KERNEL);
192 return &ei->vfs_inode;
195 static void mqueue_destroy_inode(struct inode *inode)
197 kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
200 static void mqueue_delete_inode(struct inode *inode)
202 struct mqueue_inode_info *info;
205 if (S_ISDIR(inode->i_mode)) {
209 info = MQUEUE_I(inode);
210 spin_lock(&info->lock);
211 for (i = 0; i < info->attr.mq_curmsgs; i++)
212 free_msg(info->messages[i]);
213 kfree(info->messages);
214 spin_unlock(&info->lock);
218 if (info->messages) {
221 spin_unlock(&mq_lock);
225 static int mqueue_create(struct inode *dir, struct dentry *dentry,
226 int mode, struct nameidata *nd)
232 if (queues_count >= queues_max && !capable(CAP_SYS_RESOURCE)) {
237 spin_unlock(&mq_lock);
239 inode = mqueue_get_inode(dir->i_sb, mode);
247 dir->i_size += DIRENT_SIZE;
248 dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
250 d_instantiate(dentry, inode);
254 spin_unlock(&mq_lock);
258 static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
260 struct inode *inode = dentry->d_inode;
262 dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
263 dir->i_size -= DIRENT_SIZE;
270 * This is routine for system read from queue file.
271 * To avoid mess with doing here some sort of mq_receive we allow
272 * to read only queue size & notification info (the only values
273 * that are interesting from user point of view and aren't accessible
274 * through std routines)
276 static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
277 size_t count, loff_t * off)
279 struct mqueue_inode_info *info = MQUEUE_I(filp->f_dentry->d_inode);
280 char buffer[FILENT_SIZE];
287 spin_lock(&info->lock);
288 snprintf(buffer, sizeof(buffer),
289 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
291 info->notify_owner ? info->notify.sigev_notify : 0,
292 (info->notify_owner &&
293 info->notify.sigev_notify == SIGEV_SIGNAL) ?
294 info->notify.sigev_signo : 0,
296 spin_unlock(&info->lock);
297 buffer[sizeof(buffer)-1] = '\0';
298 slen = strlen(buffer)+1;
304 if (o + count > slen)
307 if (copy_to_user(u_data, buffer + o, count))
311 filp->f_dentry->d_inode->i_atime = filp->f_dentry->d_inode->i_ctime = CURRENT_TIME;
315 static int mqueue_flush_file(struct file *filp)
317 struct mqueue_inode_info *info = MQUEUE_I(filp->f_dentry->d_inode);
319 spin_lock(&info->lock);
320 if (current->tgid == info->notify_owner)
321 remove_notification(info);
323 spin_unlock(&info->lock);
327 static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
329 struct mqueue_inode_info *info = MQUEUE_I(filp->f_dentry->d_inode);
332 poll_wait(filp, &info->wait_q, poll_tab);
334 spin_lock(&info->lock);
335 if (info->attr.mq_curmsgs)
336 retval = POLLIN | POLLRDNORM;
338 if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
339 retval |= POLLOUT | POLLWRNORM;
340 spin_unlock(&info->lock);
345 /* Adds current to info->e_wait_q[sr] before element with smaller prio */
346 static void wq_add(struct mqueue_inode_info *info, int sr,
347 struct ext_wait_queue *ewp)
349 struct ext_wait_queue *walk;
353 list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
354 if (walk->task->static_prio <= current->static_prio) {
355 list_add_tail(&ewp->list, &walk->list);
359 list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
363 * Puts current task to sleep. Caller must hold queue lock. After return
367 static int wq_sleep(struct mqueue_inode_info *info, int sr,
368 long timeout, struct ext_wait_queue *ewp)
373 wq_add(info, sr, ewp);
376 set_current_state(TASK_INTERRUPTIBLE);
378 spin_unlock(&info->lock);
379 time = schedule_timeout(timeout);
381 while (ewp->state == STATE_PENDING)
384 if (ewp->state == STATE_READY) {
388 spin_lock(&info->lock);
389 if (ewp->state == STATE_READY) {
393 if (signal_pending(current)) {
394 retval = -ERESTARTSYS;
402 list_del(&ewp->list);
404 spin_unlock(&info->lock);
410 * Returns waiting task that should be serviced first or NULL if none exists
412 static struct ext_wait_queue *wq_get_first_waiter(
413 struct mqueue_inode_info *info, int sr)
415 struct list_head *ptr;
417 ptr = info->e_wait_q[sr].list.prev;
418 if (ptr == &info->e_wait_q[sr].list)
420 return list_entry(ptr, struct ext_wait_queue, list);
423 /* Auxiliary functions to manipulate messages' list */
424 static void msg_insert(struct msg_msg *ptr, struct mqueue_inode_info *info)
428 k = info->attr.mq_curmsgs - 1;
429 while (k >= 0 && info->messages[k]->m_type >= ptr->m_type) {
430 info->messages[k + 1] = info->messages[k];
433 info->attr.mq_curmsgs++;
434 info->qsize += ptr->m_ts;
435 info->messages[k + 1] = ptr;
438 static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
440 info->qsize -= info->messages[--info->attr.mq_curmsgs]->m_ts;
441 return info->messages[info->attr.mq_curmsgs];
444 static inline void set_cookie(struct sk_buff *skb, char code)
446 ((char*)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
450 * The next function is only to split too long sys_mq_timedsend
452 static void __do_notify(struct mqueue_inode_info *info)
455 * invoked when there is registered process and there isn't process
456 * waiting synchronously for message AND state of queue changed from
457 * empty to not empty. Here we are sure that no one is waiting
459 if (info->notify_owner &&
460 info->attr.mq_curmsgs == 1) {
461 struct siginfo sig_i;
462 switch (info->notify.sigev_notify) {
468 sig_i.si_signo = info->notify.sigev_signo;
470 sig_i.si_code = SI_MESGQ;
471 sig_i.si_value = info->notify.sigev_value;
472 sig_i.si_pid = current->tgid;
473 sig_i.si_uid = current->uid;
475 kill_proc_info(info->notify.sigev_signo,
476 &sig_i, info->notify_owner);
479 set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
480 netlink_sendskb(info->notify_sock,
481 info->notify_cookie, 0);
484 /* after notification unregisters process */
485 info->notify_owner = 0;
487 wake_up(&info->wait_q);
490 static long prepare_timeout(const struct timespec __user *u_arg)
492 struct timespec ts, nowts;
496 if (unlikely(copy_from_user(&ts, u_arg,
497 sizeof(struct timespec))))
500 if (unlikely(ts.tv_nsec < 0 || ts.tv_sec < 0
501 || ts.tv_nsec >= NSEC_PER_SEC))
503 nowts = CURRENT_TIME;
504 /* first subtract as jiffies can't be too big */
505 ts.tv_sec -= nowts.tv_sec;
506 if (ts.tv_nsec < nowts.tv_nsec) {
507 ts.tv_nsec += NSEC_PER_SEC;
510 ts.tv_nsec -= nowts.tv_nsec;
514 timeout = timespec_to_jiffies(&ts) + 1;
516 return MAX_SCHEDULE_TIMEOUT;
521 static void remove_notification(struct mqueue_inode_info *info)
523 if (info->notify_owner != 0 &&
524 info->notify.sigev_notify == SIGEV_THREAD) {
525 set_cookie(info->notify_cookie, NOTIFY_REMOVED);
526 netlink_sendskb(info->notify_sock, info->notify_cookie, 0);
528 info->notify_owner = 0;
532 * Invoked when creating a new queue via sys_mq_open
534 static struct file *do_create(struct dentry *dir, struct dentry *dentry,
535 int oflag, mode_t mode, struct mq_attr __user *u_attr)
539 struct mqueue_inode_info *info;
540 struct msg_msg **msgs = NULL;
544 if (u_attr != NULL) {
545 if (copy_from_user(&attr, u_attr, sizeof(attr)))
546 return ERR_PTR(-EFAULT);
548 if (attr.mq_maxmsg <= 0 || attr.mq_msgsize <= 0)
549 return ERR_PTR(-EINVAL);
550 if (capable(CAP_SYS_RESOURCE)) {
551 if (attr.mq_maxmsg > HARD_MSGMAX)
552 return ERR_PTR(-EINVAL);
554 if (attr.mq_maxmsg > msg_max ||
555 attr.mq_msgsize > msgsize_max)
556 return ERR_PTR(-EINVAL);
558 msgs = kmalloc(attr.mq_maxmsg * sizeof(*msgs), GFP_KERNEL);
560 return ERR_PTR(-ENOMEM);
565 ret = vfs_create(dir->d_inode, dentry, mode, NULL);
571 inode = dentry->d_inode;
572 info = MQUEUE_I(inode);
575 info->attr.mq_maxmsg = attr.mq_maxmsg;
576 info->attr.mq_msgsize = attr.mq_msgsize;
577 kfree(info->messages);
578 info->messages = msgs;
581 filp = dentry_open(dentry, mqueue_mnt, oflag);
588 /* Opens existing queue */
589 static struct file *do_open(struct dentry *dentry, int oflag)
591 static int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
592 MAY_READ | MAY_WRITE };
595 if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY))
596 return ERR_PTR(-EINVAL);
598 if (permission(dentry->d_inode, oflag2acc[oflag & O_ACCMODE], NULL))
599 return ERR_PTR(-EACCES);
601 filp = dentry_open(dentry, mqueue_mnt, oflag);
609 asmlinkage long sys_mq_open(const char __user *u_name, int oflag, mode_t mode,
610 struct mq_attr __user *u_attr)
612 struct dentry *dentry;
617 if (IS_ERR(name = getname(u_name)))
618 return PTR_ERR(name);
620 fd = get_unused_fd();
624 down(&mqueue_mnt->mnt_root->d_inode->i_sem);
625 dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name));
626 if (IS_ERR(dentry)) {
627 error = PTR_ERR(dentry);
632 if (oflag & O_CREAT) {
633 if (dentry->d_inode) { /* entry already exists */
634 filp = (oflag & O_EXCL) ? ERR_PTR(-EEXIST) :
635 do_open(dentry, oflag);
637 filp = do_create(mqueue_mnt->mnt_root, dentry,
638 oflag, mode, u_attr);
641 filp = (dentry->d_inode) ? do_open(dentry, oflag) :
647 error = PTR_ERR(filp);
651 set_close_on_exec(fd, 1);
652 fd_install(fd, filp);
661 up(&mqueue_mnt->mnt_root->d_inode->i_sem);
667 asmlinkage long sys_mq_unlink(const char __user *u_name)
671 struct dentry *dentry;
672 struct inode *inode = NULL;
674 name = getname(u_name);
676 return PTR_ERR(name);
678 down(&mqueue_mnt->mnt_root->d_inode->i_sem);
679 dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name));
680 if (IS_ERR(dentry)) {
681 err = PTR_ERR(dentry);
685 if (!dentry->d_inode) {
690 inode = dentry->d_inode;
692 atomic_inc(&inode->i_count);
694 err = vfs_unlink(dentry->d_parent->d_inode, dentry);
699 up(&mqueue_mnt->mnt_root->d_inode->i_sem);
707 /* Pipelined send and receive functions.
709 * If a receiver finds no waiting message, then it registers itself in the
710 * list of waiting receivers. A sender checks that list before adding the new
711 * message into the message array. If there is a waiting receiver, then it
712 * bypasses the message array and directly hands the message over to the
714 * The receiver accepts the message and returns without grabbing the queue
715 * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers
716 * are necessary. The same algorithm is used for sysv semaphores, see
717 * ipc/sem.c fore more details.
719 * The same algorithm is used for senders.
722 /* pipelined_send() - send a message directly to the task waiting in
723 * sys_mq_timedreceive() (without inserting message into a queue).
725 static inline void pipelined_send(struct mqueue_inode_info *info,
726 struct msg_msg *message,
727 struct ext_wait_queue *receiver)
729 receiver->msg = message;
730 list_del(&receiver->list);
731 receiver->state = STATE_PENDING;
732 wake_up_process(receiver->task);
734 receiver->state = STATE_READY;
737 /* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
738 * gets its message and put to the queue (we have one free place for sure). */
739 static inline void pipelined_receive(struct mqueue_inode_info *info)
741 struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
745 wake_up_interruptible(&info->wait_q);
748 msg_insert(sender->msg, info);
749 list_del(&sender->list);
750 sender->state = STATE_PENDING;
751 wake_up_process(sender->task);
753 sender->state = STATE_READY;
756 asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
757 size_t msg_len, unsigned int msg_prio,
758 const struct timespec __user *u_abs_timeout)
762 struct ext_wait_queue wait;
763 struct ext_wait_queue *receiver;
764 struct msg_msg *msg_ptr;
765 struct mqueue_inode_info *info;
769 if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
772 timeout = prepare_timeout(u_abs_timeout);
779 inode = filp->f_dentry->d_inode;
780 if (unlikely(filp->f_op != &mqueue_file_operations))
782 info = MQUEUE_I(inode);
784 if (unlikely(!(filp->f_mode & FMODE_WRITE)))
787 if (unlikely(msg_len > info->attr.mq_msgsize)) {
792 /* First try to allocate memory, before doing anything with
793 * existing queues. */
794 msg_ptr = load_msg((void *)u_msg_ptr, msg_len);
795 if (unlikely(IS_ERR(msg_ptr))) {
796 ret = PTR_ERR(msg_ptr);
799 msg_ptr->m_ts = msg_len;
800 msg_ptr->m_type = msg_prio;
802 spin_lock(&info->lock);
804 if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
805 if (filp->f_flags & O_NONBLOCK) {
806 spin_unlock(&info->lock);
808 } else if (unlikely(timeout < 0)) {
809 spin_unlock(&info->lock);
813 wait.msg = (void *) msg_ptr;
814 wait.state = STATE_NONE;
815 ret = wq_sleep(info, SEND, timeout, &wait);
820 receiver = wq_get_first_waiter(info, RECV);
822 pipelined_send(info, msg_ptr, receiver);
824 /* adds message to the queue */
825 msg_insert(msg_ptr, info);
828 inode->i_atime = inode->i_mtime = inode->i_ctime =
830 spin_unlock(&info->lock);
839 asmlinkage ssize_t sys_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr,
840 size_t msg_len, unsigned int __user *u_msg_prio,
841 const struct timespec __user *u_abs_timeout)
845 struct msg_msg *msg_ptr;
848 struct mqueue_inode_info *info;
849 struct ext_wait_queue wait;
851 timeout = prepare_timeout(u_abs_timeout);
858 inode = filp->f_dentry->d_inode;
859 if (unlikely(filp->f_op != &mqueue_file_operations))
861 info = MQUEUE_I(inode);
863 if (unlikely(!(filp->f_mode & FMODE_READ)))
866 /* checks if buffer is big enough */
867 if (unlikely(msg_len < info->attr.mq_msgsize)) {
872 spin_lock(&info->lock);
873 if (info->attr.mq_curmsgs == 0) {
874 if (filp->f_flags & O_NONBLOCK) {
875 spin_unlock(&info->lock);
878 } else if (unlikely(timeout < 0)) {
879 spin_unlock(&info->lock);
884 wait.state = STATE_NONE;
885 ret = wq_sleep(info, RECV, timeout, &wait);
889 msg_ptr = msg_get(info);
891 inode->i_atime = inode->i_mtime = inode->i_ctime =
894 /* There is now free space in queue. */
895 pipelined_receive(info);
896 spin_unlock(&info->lock);
902 if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
903 store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
915 * Notes: the case when user wants us to deregister (with NULL as pointer)
916 * and he isn't currently owner of notification, will be silently discarded.
917 * It isn't explicitly defined in the POSIX.
919 asmlinkage long sys_mq_notify(mqd_t mqdes,
920 const struct sigevent __user *u_notification)
926 struct sigevent notification;
927 struct mqueue_inode_info *info;
932 if (u_notification != NULL) {
933 if (copy_from_user(¬ification, u_notification,
934 sizeof(struct sigevent)))
937 if (unlikely(notification.sigev_notify != SIGEV_NONE &&
938 notification.sigev_notify != SIGEV_SIGNAL &&
939 notification.sigev_notify != SIGEV_THREAD))
941 if (notification.sigev_notify == SIGEV_SIGNAL &&
942 (notification.sigev_signo < 0 ||
943 notification.sigev_signo > _NSIG)) {
946 if (notification.sigev_notify == SIGEV_THREAD) {
947 /* create the notify skb */
948 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
953 if (copy_from_user(nc->data,
954 notification.sigev_value.sival_ptr,
955 NOTIFY_COOKIE_LEN)) {
959 /* TODO: add a header? */
960 skb_put(nc, NOTIFY_COOKIE_LEN);
961 /* and attach it to the socket */
963 filp = fget(notification.sigev_signo);
967 sock = netlink_getsockbyfilp(filp);
975 ret = netlink_attachskb(sock, nc, 0, MAX_SCHEDULE_TIMEOUT);
991 inode = filp->f_dentry->d_inode;
992 if (unlikely(filp->f_op != &mqueue_file_operations))
994 info = MQUEUE_I(inode);
997 spin_lock(&info->lock);
998 if (u_notification == NULL) {
999 if (info->notify_owner == current->tgid) {
1000 remove_notification(info);
1001 inode->i_atime = inode->i_ctime = CURRENT_TIME;
1003 } else if (info->notify_owner != 0) {
1006 switch (notification.sigev_notify) {
1008 info->notify.sigev_notify = SIGEV_NONE;
1011 info->notify_sock = sock;
1012 info->notify_cookie = nc;
1015 info->notify.sigev_notify = SIGEV_THREAD;
1018 info->notify.sigev_signo = notification.sigev_signo;
1019 info->notify.sigev_value = notification.sigev_value;
1020 info->notify.sigev_notify = SIGEV_SIGNAL;
1023 info->notify_owner = current->tgid;
1024 inode->i_atime = inode->i_ctime = CURRENT_TIME;
1026 spin_unlock(&info->lock);
1031 netlink_detachskb(sock, nc);
1038 asmlinkage long sys_mq_getsetattr(mqd_t mqdes,
1039 const struct mq_attr __user *u_mqstat,
1040 struct mq_attr __user *u_omqstat)
1043 struct mq_attr mqstat, omqstat;
1045 struct inode *inode;
1046 struct mqueue_inode_info *info;
1048 if (u_mqstat != NULL) {
1049 if (copy_from_user(&mqstat, u_mqstat, sizeof(struct mq_attr)))
1051 if (mqstat.mq_flags & (~O_NONBLOCK))
1060 inode = filp->f_dentry->d_inode;
1061 if (unlikely(filp->f_op != &mqueue_file_operations))
1063 info = MQUEUE_I(inode);
1065 spin_lock(&info->lock);
1067 omqstat = info->attr;
1068 omqstat.mq_flags = filp->f_flags & O_NONBLOCK;
1070 if (mqstat.mq_flags & O_NONBLOCK)
1071 filp->f_flags |= O_NONBLOCK;
1073 filp->f_flags &= ~O_NONBLOCK;
1075 inode->i_atime = inode->i_ctime = CURRENT_TIME;
1078 spin_unlock(&info->lock);
1081 if (u_omqstat != NULL && copy_to_user(u_omqstat, &omqstat,
1082 sizeof(struct mq_attr)))
1091 static struct inode_operations mqueue_dir_inode_operations = {
1092 .lookup = simple_lookup,
1093 .create = mqueue_create,
1094 .unlink = mqueue_unlink,
1097 static struct file_operations mqueue_file_operations = {
1098 .flush = mqueue_flush_file,
1099 .poll = mqueue_poll_file,
1100 .read = mqueue_read_file,
1103 static struct super_operations mqueue_super_ops = {
1104 .alloc_inode = mqueue_alloc_inode,
1105 .destroy_inode = mqueue_destroy_inode,
1106 .statfs = simple_statfs,
1107 .delete_inode = mqueue_delete_inode,
1108 .drop_inode = generic_delete_inode,
1111 static struct file_system_type mqueue_fs_type = {
1113 .get_sb = mqueue_get_sb,
1114 .kill_sb = kill_litter_super,
1117 static int msg_max_limit_min = DFLT_MSGMAX;
1118 static int msg_max_limit_max = HARD_MSGMAX;
1120 static int msg_maxsize_limit_min = DFLT_MSGSIZEMAX;
1121 static int msg_maxsize_limit_max = INT_MAX;
1123 static ctl_table mq_sysctls[] = {
1125 .ctl_name = CTL_QUEUESMAX,
1126 .procname = "queues_max",
1127 .data = &queues_max,
1128 .maxlen = sizeof(int),
1130 .proc_handler = &proc_dointvec,
1133 .ctl_name = CTL_MSGMAX,
1134 .procname = "msg_max",
1136 .maxlen = sizeof(int),
1138 .proc_handler = &proc_dointvec_minmax,
1139 .extra1 = &msg_max_limit_min,
1140 .extra2 = &msg_max_limit_max,
1143 .ctl_name = CTL_MSGSIZEMAX,
1144 .procname = "msgsize_max",
1145 .data = &msgsize_max,
1146 .maxlen = sizeof(int),
1148 .proc_handler = &proc_dointvec_minmax,
1149 .extra1 = &msg_maxsize_limit_min,
1150 .extra2 = &msg_maxsize_limit_max,
1155 static ctl_table mq_sysctl_dir[] = {
1157 .ctl_name = FS_MQUEUE,
1158 .procname = "mqueue",
1160 .child = mq_sysctls,
1165 static ctl_table mq_sysctl_root[] = {
1170 .child = mq_sysctl_dir,
1175 static int __init init_mqueue_fs(void)
1179 mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
1180 sizeof(struct mqueue_inode_info), 0,
1181 SLAB_HWCACHE_ALIGN, init_once, NULL);
1182 if (mqueue_inode_cachep == NULL)
1185 mq_sysctl_table = register_sysctl_table(mq_sysctl_root, 0);
1186 if (!mq_sysctl_table) {
1191 error = register_filesystem(&mqueue_fs_type);
1195 if (IS_ERR(mqueue_mnt = kern_mount(&mqueue_fs_type))) {
1196 error = PTR_ERR(mqueue_mnt);
1197 goto out_filesystem;
1200 /* internal initialization - not common for vfs */
1202 spin_lock_init(&mq_lock);
1207 unregister_filesystem(&mqueue_fs_type);
1209 unregister_sysctl_table(mq_sysctl_table);
1211 if (kmem_cache_destroy(mqueue_inode_cachep)) {
1213 "mqueue_inode_cache: not all structures were freed\n");
1218 __initcall(init_mqueue_fs);