3 * Copyright (C) 1992 Krishna Balasubramanian
5 * Removed all the remaining kerneld mess
6 * Catch the -EFAULT stuff properly
7 * Use GFP_KERNEL for messages as in 1.2
8 * Fixed up the unchecked user space derefs
9 * Copyright (C) 1998 Alan Cox & Andi Kleen
11 * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
13 * mostly rewritten, threaded and wake-one semantics added
14 * MSGMAX limit removed, sysctl's added
15 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
17 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
21 #include <linux/capability.h>
22 #include <linux/slab.h>
23 #include <linux/msg.h>
24 #include <linux/spinlock.h>
25 #include <linux/init.h>
26 #include <linux/proc_fs.h>
27 #include <linux/list.h>
28 #include <linux/security.h>
29 #include <linux/sched.h>
30 #include <linux/syscalls.h>
31 #include <linux/audit.h>
32 #include <linux/seq_file.h>
33 #include <linux/mutex.h>
35 #include <asm/current.h>
36 #include <asm/uaccess.h>
40 int msg_ctlmax = MSGMAX;
41 int msg_ctlmnb = MSGMNB;
42 int msg_ctlmni = MSGMNI;
45 * one msg_receiver structure for each sleeping receiver:
48 struct list_head r_list;
49 struct task_struct *r_tsk;
55 volatile struct msg_msg *r_msg;
58 /* one msg_sender for each sleeping sender */
60 struct list_head list;
61 struct task_struct *tsk;
65 #define SEARCH_EQUAL 2
66 #define SEARCH_NOTEQUAL 3
67 #define SEARCH_LESSEQUAL 4
69 static atomic_t msg_bytes = ATOMIC_INIT(0);
70 static atomic_t msg_hdrs = ATOMIC_INIT(0);
72 static struct ipc_ids msg_ids;
74 #define msg_lock(id) ((struct msg_queue *)ipc_lock(&msg_ids, id))
75 #define msg_unlock(msq) ipc_unlock(&(msq)->q_perm)
76 #define msg_rmid(id) ((struct msg_queue *)ipc_rmid(&msg_ids, id))
77 #define msg_checkid(msq, msgid) ipc_checkid(&msg_ids, &msq->q_perm, msgid)
78 #define msg_buildid(id, seq) ipc_buildid(&msg_ids, id, seq)
80 static void freeque(struct msg_queue *msq, int id);
81 static int newque(key_t key, int msgflg);
83 static int sysvipc_msg_proc_show(struct seq_file *s, void *it);
86 void __init msg_init(void)
88 ipc_init_ids(&msg_ids, msg_ctlmni);
89 ipc_init_proc_interface("sysvipc/msg",
90 " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n",
92 sysvipc_msg_proc_show);
95 static int newque(key_t key, int msgflg)
97 struct msg_queue *msq;
100 msq = ipc_rcu_alloc(sizeof(*msq));
104 msq->q_perm.mode = msgflg & S_IRWXUGO;
105 msq->q_perm.key = key;
106 msq->q_perm.xid = vx_current_xid();
108 msq->q_perm.security = NULL;
109 retval = security_msg_queue_alloc(msq);
115 id = ipc_addid(&msg_ids, &msq->q_perm, msg_ctlmni);
117 security_msg_queue_free(msq);
122 msq->q_id = msg_buildid(id, msq->q_perm.seq);
123 msq->q_stime = msq->q_rtime = 0;
124 msq->q_ctime = get_seconds();
125 msq->q_cbytes = msq->q_qnum = 0;
126 msq->q_qbytes = msg_ctlmnb;
127 msq->q_lspid = msq->q_lrpid = 0;
128 INIT_LIST_HEAD(&msq->q_messages);
129 INIT_LIST_HEAD(&msq->q_receivers);
130 INIT_LIST_HEAD(&msq->q_senders);
136 static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss)
139 current->state = TASK_INTERRUPTIBLE;
140 list_add_tail(&mss->list, &msq->q_senders);
143 static inline void ss_del(struct msg_sender *mss)
145 if (mss->list.next != NULL)
146 list_del(&mss->list);
149 static void ss_wakeup(struct list_head *h, int kill)
151 struct list_head *tmp;
155 struct msg_sender *mss;
157 mss = list_entry(tmp, struct msg_sender, list);
160 mss->list.next = NULL;
161 wake_up_process(mss->tsk);
165 static void expunge_all(struct msg_queue *msq, int res)
167 struct list_head *tmp;
169 tmp = msq->q_receivers.next;
170 while (tmp != &msq->q_receivers) {
171 struct msg_receiver *msr;
173 msr = list_entry(tmp, struct msg_receiver, r_list);
176 wake_up_process(msr->r_tsk);
178 msr->r_msg = ERR_PTR(res);
183 * freeque() wakes up waiters on the sender and receiver waiting queue,
184 * removes the message queue from message queue ID
185 * array, and cleans up all the messages associated with this queue.
187 * msg_ids.mutex and the spinlock for this message queue is hold
188 * before freeque() is called. msg_ids.mutex remains locked on exit.
190 static void freeque(struct msg_queue *msq, int id)
192 struct list_head *tmp;
194 expunge_all(msq, -EIDRM);
195 ss_wakeup(&msq->q_senders, 1);
199 tmp = msq->q_messages.next;
200 while (tmp != &msq->q_messages) {
201 struct msg_msg *msg = list_entry(tmp, struct msg_msg, m_list);
204 atomic_dec(&msg_hdrs);
207 atomic_sub(msq->q_cbytes, &msg_bytes);
208 security_msg_queue_free(msq);
212 asmlinkage long sys_msgget(key_t key, int msgflg)
214 struct msg_queue *msq;
215 int id, ret = -EPERM;
217 mutex_lock(&msg_ids.mutex);
218 if (key == IPC_PRIVATE)
219 ret = newque(key, msgflg);
220 else if ((id = ipc_findkey(&msg_ids, key)) == -1) { /* key not used */
221 if (!(msgflg & IPC_CREAT))
224 ret = newque(key, msgflg);
225 } else if (msgflg & IPC_CREAT && msgflg & IPC_EXCL) {
230 if (ipcperms(&msq->q_perm, msgflg))
233 int qid = msg_buildid(id, msq->q_perm.seq);
235 ret = security_msg_queue_associate(msq, msgflg);
241 mutex_unlock(&msg_ids.mutex);
246 static inline unsigned long
247 copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version)
251 return copy_to_user(buf, in, sizeof(*in));
256 memset(&out, 0, sizeof(out));
258 ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm);
260 out.msg_stime = in->msg_stime;
261 out.msg_rtime = in->msg_rtime;
262 out.msg_ctime = in->msg_ctime;
264 if (in->msg_cbytes > USHRT_MAX)
265 out.msg_cbytes = USHRT_MAX;
267 out.msg_cbytes = in->msg_cbytes;
268 out.msg_lcbytes = in->msg_cbytes;
270 if (in->msg_qnum > USHRT_MAX)
271 out.msg_qnum = USHRT_MAX;
273 out.msg_qnum = in->msg_qnum;
275 if (in->msg_qbytes > USHRT_MAX)
276 out.msg_qbytes = USHRT_MAX;
278 out.msg_qbytes = in->msg_qbytes;
279 out.msg_lqbytes = in->msg_qbytes;
281 out.msg_lspid = in->msg_lspid;
282 out.msg_lrpid = in->msg_lrpid;
284 return copy_to_user(buf, &out, sizeof(out));
292 unsigned long qbytes;
298 static inline unsigned long
299 copy_msqid_from_user(struct msq_setbuf *out, void __user *buf, int version)
304 struct msqid64_ds tbuf;
306 if (copy_from_user(&tbuf, buf, sizeof(tbuf)))
309 out->qbytes = tbuf.msg_qbytes;
310 out->uid = tbuf.msg_perm.uid;
311 out->gid = tbuf.msg_perm.gid;
312 out->mode = tbuf.msg_perm.mode;
318 struct msqid_ds tbuf_old;
320 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
323 out->uid = tbuf_old.msg_perm.uid;
324 out->gid = tbuf_old.msg_perm.gid;
325 out->mode = tbuf_old.msg_perm.mode;
327 if (tbuf_old.msg_qbytes == 0)
328 out->qbytes = tbuf_old.msg_lqbytes;
330 out->qbytes = tbuf_old.msg_qbytes;
339 asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf)
341 struct kern_ipc_perm *ipcp;
342 struct msq_setbuf setbuf;
343 struct msg_queue *msq;
346 if (msqid < 0 || cmd < 0)
349 version = ipc_parse_version(&cmd);
355 struct msginfo msginfo;
361 * We must not return kernel stack data.
362 * due to padding, it's not enough
363 * to set all member fields.
365 err = security_msg_queue_msgctl(NULL, cmd);
369 memset(&msginfo, 0, sizeof(msginfo));
370 msginfo.msgmni = msg_ctlmni;
371 msginfo.msgmax = msg_ctlmax;
372 msginfo.msgmnb = msg_ctlmnb;
373 msginfo.msgssz = MSGSSZ;
374 msginfo.msgseg = MSGSEG;
375 mutex_lock(&msg_ids.mutex);
376 if (cmd == MSG_INFO) {
377 msginfo.msgpool = msg_ids.in_use;
378 msginfo.msgmap = atomic_read(&msg_hdrs);
379 msginfo.msgtql = atomic_read(&msg_bytes);
381 msginfo.msgmap = MSGMAP;
382 msginfo.msgpool = MSGPOOL;
383 msginfo.msgtql = MSGTQL;
385 max_id = msg_ids.max_id;
386 mutex_unlock(&msg_ids.mutex);
387 if (copy_to_user(buf, &msginfo, sizeof(struct msginfo)))
389 return (max_id < 0) ? 0 : max_id;
394 struct msqid64_ds tbuf;
399 if (cmd == MSG_STAT && msqid >= msg_ids.entries->size)
402 memset(&tbuf, 0, sizeof(tbuf));
404 msq = msg_lock(msqid);
408 if (cmd == MSG_STAT) {
409 success_return = msg_buildid(msqid, msq->q_perm.seq);
412 if (msg_checkid(msq, msqid))
417 if (ipcperms(&msq->q_perm, S_IRUGO))
420 err = security_msg_queue_msgctl(msq, cmd);
424 kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm);
425 tbuf.msg_stime = msq->q_stime;
426 tbuf.msg_rtime = msq->q_rtime;
427 tbuf.msg_ctime = msq->q_ctime;
428 tbuf.msg_cbytes = msq->q_cbytes;
429 tbuf.msg_qnum = msq->q_qnum;
430 tbuf.msg_qbytes = msq->q_qbytes;
431 tbuf.msg_lspid = msq->q_lspid;
432 tbuf.msg_lrpid = msq->q_lrpid;
434 if (copy_msqid_to_user(buf, &tbuf, version))
436 return success_return;
441 if (copy_msqid_from_user(&setbuf, buf, version))
450 mutex_lock(&msg_ids.mutex);
451 msq = msg_lock(msqid);
457 if (msg_checkid(msq, msqid))
461 err = audit_ipc_obj(ipcp);
465 err = audit_ipc_set_perm(setbuf.qbytes, setbuf.uid, setbuf.gid,
472 if (current->euid != ipcp->cuid &&
473 current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN))
474 /* We _could_ check for CAP_CHOWN above, but we don't */
477 err = security_msg_queue_msgctl(msq, cmd);
485 if (setbuf.qbytes > msg_ctlmnb && !capable(CAP_SYS_RESOURCE))
488 msq->q_qbytes = setbuf.qbytes;
490 ipcp->uid = setbuf.uid;
491 ipcp->gid = setbuf.gid;
492 ipcp->mode = (ipcp->mode & ~S_IRWXUGO) |
493 (S_IRWXUGO & setbuf.mode);
494 msq->q_ctime = get_seconds();
495 /* sleeping receivers might be excluded by
496 * stricter permissions.
498 expunge_all(msq, -EAGAIN);
499 /* sleeping senders might be able to send
500 * due to a larger queue size.
502 ss_wakeup(&msq->q_senders, 0);
512 mutex_unlock(&msg_ids.mutex);
522 static int testmsg(struct msg_msg *msg, long type, int mode)
528 case SEARCH_LESSEQUAL:
529 if (msg->m_type <=type)
533 if (msg->m_type == type)
536 case SEARCH_NOTEQUAL:
537 if (msg->m_type != type)
544 static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
546 struct list_head *tmp;
548 tmp = msq->q_receivers.next;
549 while (tmp != &msq->q_receivers) {
550 struct msg_receiver *msr;
552 msr = list_entry(tmp, struct msg_receiver, r_list);
554 if (testmsg(msg, msr->r_msgtype, msr->r_mode) &&
555 !security_msg_queue_msgrcv(msq, msg, msr->r_tsk,
556 msr->r_msgtype, msr->r_mode)) {
558 list_del(&msr->r_list);
559 if (msr->r_maxsize < msg->m_ts) {
561 wake_up_process(msr->r_tsk);
563 msr->r_msg = ERR_PTR(-E2BIG);
566 msq->q_lrpid = msr->r_tsk->pid;
567 msq->q_rtime = get_seconds();
568 wake_up_process(msr->r_tsk);
580 sys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz, int msgflg)
582 struct msg_queue *msq;
587 if (msgsz > msg_ctlmax || (long) msgsz < 0 || msqid < 0)
589 if (get_user(mtype, &msgp->mtype))
594 msg = load_msg(msgp->mtext, msgsz);
601 msq = msg_lock(msqid);
607 if (msg_checkid(msq, msqid))
608 goto out_unlock_free;
614 if (ipcperms(&msq->q_perm, S_IWUGO))
615 goto out_unlock_free;
617 err = security_msg_queue_msgsnd(msq, msg, msgflg);
619 goto out_unlock_free;
621 if (msgsz + msq->q_cbytes <= msq->q_qbytes &&
622 1 + msq->q_qnum <= msq->q_qbytes) {
626 /* queue full, wait: */
627 if (msgflg & IPC_NOWAIT) {
629 goto out_unlock_free;
636 ipc_lock_by_ptr(&msq->q_perm);
638 if (msq->q_perm.deleted) {
640 goto out_unlock_free;
644 if (signal_pending(current)) {
645 err = -ERESTARTNOHAND;
646 goto out_unlock_free;
650 msq->q_lspid = current->tgid;
651 msq->q_stime = get_seconds();
653 if (!pipelined_send(msq, msg)) {
654 /* noone is waiting for this message, enqueue it */
655 list_add_tail(&msg->m_list, &msq->q_messages);
656 msq->q_cbytes += msgsz;
658 atomic_add(msgsz, &msg_bytes);
659 atomic_inc(&msg_hdrs);
673 static inline int convert_mode(long *msgtyp, int msgflg)
676 * find message of correct type.
677 * msgtyp = 0 => get first.
678 * msgtyp > 0 => get first message of matching type.
679 * msgtyp < 0 => get message with least type must be < abs(msgtype).
685 return SEARCH_LESSEQUAL;
687 if (msgflg & MSG_EXCEPT)
688 return SEARCH_NOTEQUAL;
692 asmlinkage long sys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz,
693 long msgtyp, int msgflg)
695 struct msg_queue *msq;
699 if (msqid < 0 || (long) msgsz < 0)
701 mode = convert_mode(&msgtyp, msgflg);
703 msq = msg_lock(msqid);
707 msg = ERR_PTR(-EIDRM);
708 if (msg_checkid(msq, msqid))
712 struct msg_receiver msr_d;
713 struct list_head *tmp;
715 msg = ERR_PTR(-EACCES);
716 if (ipcperms(&msq->q_perm, S_IRUGO))
719 msg = ERR_PTR(-EAGAIN);
720 tmp = msq->q_messages.next;
721 while (tmp != &msq->q_messages) {
722 struct msg_msg *walk_msg;
724 walk_msg = list_entry(tmp, struct msg_msg, m_list);
725 if (testmsg(walk_msg, msgtyp, mode) &&
726 !security_msg_queue_msgrcv(msq, walk_msg, current,
730 if (mode == SEARCH_LESSEQUAL &&
731 walk_msg->m_type != 1) {
733 msgtyp = walk_msg->m_type - 1;
743 * Found a suitable message.
744 * Unlink it from the queue.
746 if ((msgsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) {
747 msg = ERR_PTR(-E2BIG);
750 list_del(&msg->m_list);
752 msq->q_rtime = get_seconds();
753 msq->q_lrpid = current->tgid;
754 msq->q_cbytes -= msg->m_ts;
755 atomic_sub(msg->m_ts, &msg_bytes);
756 atomic_dec(&msg_hdrs);
757 ss_wakeup(&msq->q_senders, 0);
761 /* No message waiting. Wait for a message */
762 if (msgflg & IPC_NOWAIT) {
763 msg = ERR_PTR(-ENOMSG);
766 list_add_tail(&msr_d.r_list, &msq->q_receivers);
767 msr_d.r_tsk = current;
768 msr_d.r_msgtype = msgtyp;
770 if (msgflg & MSG_NOERROR)
771 msr_d.r_maxsize = INT_MAX;
773 msr_d.r_maxsize = msgsz;
774 msr_d.r_msg = ERR_PTR(-EAGAIN);
775 current->state = TASK_INTERRUPTIBLE;
780 /* Lockless receive, part 1:
781 * Disable preemption. We don't hold a reference to the queue
782 * and getting a reference would defeat the idea of a lockless
783 * operation, thus the code relies on rcu to guarantee the
785 * Prior to destruction, expunge_all(-EIRDM) changes r_msg.
786 * Thus if r_msg is -EAGAIN, then the queue not yet destroyed.
787 * rcu_read_lock() prevents preemption between reading r_msg
788 * and the spin_lock() inside ipc_lock_by_ptr().
792 /* Lockless receive, part 2:
793 * Wait until pipelined_send or expunge_all are outside of
794 * wake_up_process(). There is a race with exit(), see
795 * ipc/mqueue.c for the details.
797 msg = (struct msg_msg*)msr_d.r_msg;
798 while (msg == NULL) {
800 msg = (struct msg_msg *)msr_d.r_msg;
803 /* Lockless receive, part 3:
804 * If there is a message or an error then accept it without
807 if (msg != ERR_PTR(-EAGAIN)) {
812 /* Lockless receive, part 3:
813 * Acquire the queue spinlock.
815 ipc_lock_by_ptr(&msq->q_perm);
818 /* Lockless receive, part 4:
819 * Repeat test after acquiring the spinlock.
821 msg = (struct msg_msg*)msr_d.r_msg;
822 if (msg != ERR_PTR(-EAGAIN))
825 list_del(&msr_d.r_list);
826 if (signal_pending(current)) {
827 msg = ERR_PTR(-ERESTARTNOHAND);
836 msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz;
837 if (put_user (msg->m_type, &msgp->mtype) ||
838 store_msg(msgp->mtext, msg, msgsz)) {
846 #ifdef CONFIG_PROC_FS
847 static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
849 struct msg_queue *msq = it;
851 if (!vx_check(msq->q_perm.xid, VX_IDENT))
855 "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",