3 * Copyright (C) 1992 Krishna Balasubramanian
5 * Removed all the remaining kerneld mess
6 * Catch the -EFAULT stuff properly
7 * Use GFP_KERNEL for messages as in 1.2
8 * Fixed up the unchecked user space derefs
9 * Copyright (C) 1998 Alan Cox & Andi Kleen
11 * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
13 * mostly rewritten, threaded and wake-one semantics added
14 * MSGMAX limit removed, sysctl's added
15 * (c) 1999 Manfred Spraul <manfreds@colorfullife.com>
18 #include <linux/config.h>
19 #include <linux/slab.h>
20 #include <linux/msg.h>
21 #include <linux/spinlock.h>
22 #include <linux/init.h>
23 #include <linux/proc_fs.h>
24 #include <linux/list.h>
25 #include <linux/security.h>
26 #include <linux/sched.h>
27 #include <linux/vs_base.h>
28 #include <linux/syscalls.h>
30 #include <asm/current.h>
31 #include <asm/uaccess.h>
35 int msg_ctlmax = MSGMAX;
36 int msg_ctlmnb = MSGMNB;
37 int msg_ctlmni = MSGMNI;
39 /* one msg_receiver structure for each sleeping receiver */
41 struct list_head r_list;
42 struct task_struct* r_tsk;
48 struct msg_msg* volatile r_msg;
51 /* one msg_sender for each sleeping sender */
53 struct list_head list;
54 struct task_struct* tsk;
58 #define SEARCH_EQUAL 2
59 #define SEARCH_NOTEQUAL 3
60 #define SEARCH_LESSEQUAL 4
62 static atomic_t msg_bytes = ATOMIC_INIT(0);
63 static atomic_t msg_hdrs = ATOMIC_INIT(0);
65 static struct ipc_ids msg_ids;
67 #define msg_lock(id) ((struct msg_queue*)ipc_lock(&msg_ids,id))
68 #define msg_unlock(msq) ipc_unlock(&(msq)->q_perm)
69 #define msg_rmid(id) ((struct msg_queue*)ipc_rmid(&msg_ids,id))
70 #define msg_checkid(msq, msgid) \
71 ipc_checkid(&msg_ids,&msq->q_perm,msgid)
72 #define msg_buildid(id, seq) \
73 ipc_buildid(&msg_ids, id, seq)
75 static void freeque (struct msg_queue *msq, int id);
76 static int newque (key_t key, int msgflg);
78 static int sysvipc_msg_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data);
81 void __init msg_init (void)
83 ipc_init_ids(&msg_ids,msg_ctlmni);
86 create_proc_read_entry("sysvipc/msg", 0, NULL, sysvipc_msg_read_proc, NULL);
90 static int newque (key_t key, int msgflg)
94 struct msg_queue *msq;
96 msq = ipc_rcu_alloc(sizeof(*msq));
100 msq->q_perm.mode = (msgflg & S_IRWXUGO);
101 msq->q_perm.key = key;
102 msq->q_perm.xid = current->xid;
104 msq->q_perm.security = NULL;
105 retval = security_msg_queue_alloc(msq);
111 id = ipc_addid(&msg_ids, &msq->q_perm, msg_ctlmni);
113 security_msg_queue_free(msq);
118 msq->q_stime = msq->q_rtime = 0;
119 msq->q_ctime = get_seconds();
120 msq->q_cbytes = msq->q_qnum = 0;
121 msq->q_qbytes = msg_ctlmnb;
122 msq->q_lspid = msq->q_lrpid = 0;
123 INIT_LIST_HEAD(&msq->q_messages);
124 INIT_LIST_HEAD(&msq->q_receivers);
125 INIT_LIST_HEAD(&msq->q_senders);
128 return msg_buildid(id,msq->q_perm.seq);
131 static inline void ss_add(struct msg_queue* msq, struct msg_sender* mss)
134 current->state=TASK_INTERRUPTIBLE;
135 list_add_tail(&mss->list,&msq->q_senders);
138 static inline void ss_del(struct msg_sender* mss)
140 if(mss->list.next != NULL)
141 list_del(&mss->list);
144 static void ss_wakeup(struct list_head* h, int kill)
146 struct list_head *tmp;
150 struct msg_sender* mss;
152 mss = list_entry(tmp,struct msg_sender,list);
156 wake_up_process(mss->tsk);
160 static void expunge_all(struct msg_queue* msq, int res)
162 struct list_head *tmp;
164 tmp = msq->q_receivers.next;
165 while (tmp != &msq->q_receivers) {
166 struct msg_receiver* msr;
168 msr = list_entry(tmp,struct msg_receiver,r_list);
171 wake_up_process(msr->r_tsk);
173 msr->r_msg = ERR_PTR(res);
177 * freeque() wakes up waiters on the sender and receiver waiting queue,
178 * removes the message queue from message queue ID
179 * array, and cleans up all the messages associated with this queue.
181 * msg_ids.sem and the spinlock for this message queue is hold
182 * before freeque() is called. msg_ids.sem remains locked on exit.
184 static void freeque (struct msg_queue *msq, int id)
186 struct list_head *tmp;
188 expunge_all(msq,-EIDRM);
189 ss_wakeup(&msq->q_senders,1);
193 tmp = msq->q_messages.next;
194 while(tmp != &msq->q_messages) {
195 struct msg_msg* msg = list_entry(tmp,struct msg_msg,m_list);
197 atomic_dec(&msg_hdrs);
200 atomic_sub(msq->q_cbytes, &msg_bytes);
201 security_msg_queue_free(msq);
205 asmlinkage long sys_msgget (key_t key, int msgflg)
207 int id, ret = -EPERM;
208 struct msg_queue *msq;
211 if (key == IPC_PRIVATE)
212 ret = newque(key, msgflg);
213 else if ((id = ipc_findkey(&msg_ids, key)) == -1) { /* key not used */
214 if (!(msgflg & IPC_CREAT))
217 ret = newque(key, msgflg);
218 } else if (msgflg & IPC_CREAT && msgflg & IPC_EXCL) {
224 if (ipcperms(&msq->q_perm, msgflg))
227 int qid = msg_buildid(id, msq->q_perm.seq);
228 ret = security_msg_queue_associate(msq, msgflg);
238 static inline unsigned long copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version)
242 return copy_to_user (buf, in, sizeof(*in));
247 memset(&out,0,sizeof(out));
249 ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm);
251 out.msg_stime = in->msg_stime;
252 out.msg_rtime = in->msg_rtime;
253 out.msg_ctime = in->msg_ctime;
255 if(in->msg_cbytes > USHRT_MAX)
256 out.msg_cbytes = USHRT_MAX;
258 out.msg_cbytes = in->msg_cbytes;
259 out.msg_lcbytes = in->msg_cbytes;
261 if(in->msg_qnum > USHRT_MAX)
262 out.msg_qnum = USHRT_MAX;
264 out.msg_qnum = in->msg_qnum;
266 if(in->msg_qbytes > USHRT_MAX)
267 out.msg_qbytes = USHRT_MAX;
269 out.msg_qbytes = in->msg_qbytes;
270 out.msg_lqbytes = in->msg_qbytes;
272 out.msg_lspid = in->msg_lspid;
273 out.msg_lrpid = in->msg_lrpid;
275 return copy_to_user (buf, &out, sizeof(out));
283 unsigned long qbytes;
289 static inline unsigned long copy_msqid_from_user(struct msq_setbuf *out, void __user *buf, int version)
294 struct msqid64_ds tbuf;
296 if (copy_from_user (&tbuf, buf, sizeof (tbuf)))
299 out->qbytes = tbuf.msg_qbytes;
300 out->uid = tbuf.msg_perm.uid;
301 out->gid = tbuf.msg_perm.gid;
302 out->mode = tbuf.msg_perm.mode;
308 struct msqid_ds tbuf_old;
310 if (copy_from_user (&tbuf_old, buf, sizeof (tbuf_old)))
313 out->uid = tbuf_old.msg_perm.uid;
314 out->gid = tbuf_old.msg_perm.gid;
315 out->mode = tbuf_old.msg_perm.mode;
317 if(tbuf_old.msg_qbytes == 0)
318 out->qbytes = tbuf_old.msg_lqbytes;
320 out->qbytes = tbuf_old.msg_qbytes;
329 asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf)
332 struct msg_queue *msq;
333 struct msq_setbuf setbuf;
334 struct kern_ipc_perm *ipcp;
336 if (msqid < 0 || cmd < 0)
339 version = ipc_parse_version(&cmd);
345 struct msginfo msginfo;
349 /* We must not return kernel stack data.
350 * due to padding, it's not enough
351 * to set all member fields.
354 err = security_msg_queue_msgctl(NULL, cmd);
358 memset(&msginfo,0,sizeof(msginfo));
359 msginfo.msgmni = msg_ctlmni;
360 msginfo.msgmax = msg_ctlmax;
361 msginfo.msgmnb = msg_ctlmnb;
362 msginfo.msgssz = MSGSSZ;
363 msginfo.msgseg = MSGSEG;
365 if (cmd == MSG_INFO) {
366 msginfo.msgpool = msg_ids.in_use;
367 msginfo.msgmap = atomic_read(&msg_hdrs);
368 msginfo.msgtql = atomic_read(&msg_bytes);
370 msginfo.msgmap = MSGMAP;
371 msginfo.msgpool = MSGPOOL;
372 msginfo.msgtql = MSGTQL;
374 max_id = msg_ids.max_id;
376 if (copy_to_user (buf, &msginfo, sizeof(struct msginfo)))
378 return (max_id < 0) ? 0: max_id;
383 struct msqid64_ds tbuf;
387 if(cmd == MSG_STAT && msqid >= msg_ids.entries->size)
390 memset(&tbuf,0,sizeof(tbuf));
392 msq = msg_lock(msqid);
396 if(cmd == MSG_STAT) {
397 success_return = msg_buildid(msqid, msq->q_perm.seq);
400 if (msg_checkid(msq,msqid))
405 if (ipcperms (&msq->q_perm, S_IRUGO))
408 err = security_msg_queue_msgctl(msq, cmd);
412 kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm);
413 tbuf.msg_stime = msq->q_stime;
414 tbuf.msg_rtime = msq->q_rtime;
415 tbuf.msg_ctime = msq->q_ctime;
416 tbuf.msg_cbytes = msq->q_cbytes;
417 tbuf.msg_qnum = msq->q_qnum;
418 tbuf.msg_qbytes = msq->q_qbytes;
419 tbuf.msg_lspid = msq->q_lspid;
420 tbuf.msg_lrpid = msq->q_lrpid;
422 if (copy_msqid_to_user(buf, &tbuf, version))
424 return success_return;
429 if (copy_msqid_from_user (&setbuf, buf, version))
439 msq = msg_lock(msqid);
445 if (msg_checkid(msq,msqid))
449 if (current->euid != ipcp->cuid &&
450 current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN))
451 /* We _could_ check for CAP_CHOWN above, but we don't */
454 err = security_msg_queue_msgctl(msq, cmd);
462 if (setbuf.qbytes > msg_ctlmnb && !capable(CAP_SYS_RESOURCE))
465 msq->q_qbytes = setbuf.qbytes;
467 ipcp->uid = setbuf.uid;
468 ipcp->gid = setbuf.gid;
469 ipcp->mode = (ipcp->mode & ~S_IRWXUGO) |
470 (S_IRWXUGO & setbuf.mode);
471 msq->q_ctime = get_seconds();
472 /* sleeping receivers might be excluded by
473 * stricter permissions.
475 expunge_all(msq,-EAGAIN);
476 /* sleeping senders might be able to send
477 * due to a larger queue size.
479 ss_wakeup(&msq->q_senders,0);
484 freeque (msq, msqid);
499 static int testmsg(struct msg_msg* msg,long type,int mode)
505 case SEARCH_LESSEQUAL:
506 if(msg->m_type <=type)
510 if(msg->m_type == type)
513 case SEARCH_NOTEQUAL:
514 if(msg->m_type != type)
521 static inline int pipelined_send(struct msg_queue* msq, struct msg_msg* msg)
523 struct list_head* tmp;
525 tmp = msq->q_receivers.next;
526 while (tmp != &msq->q_receivers) {
527 struct msg_receiver* msr;
528 msr = list_entry(tmp,struct msg_receiver,r_list);
530 if(testmsg(msg,msr->r_msgtype,msr->r_mode) &&
531 !security_msg_queue_msgrcv(msq, msg, msr->r_tsk, msr->r_msgtype, msr->r_mode)) {
532 list_del(&msr->r_list);
533 if(msr->r_maxsize < msg->m_ts) {
535 wake_up_process(msr->r_tsk);
537 msr->r_msg = ERR_PTR(-E2BIG);
540 msq->q_lrpid = msr->r_tsk->pid;
541 msq->q_rtime = get_seconds();
542 wake_up_process(msr->r_tsk);
552 asmlinkage long sys_msgsnd (int msqid, struct msgbuf __user *msgp, size_t msgsz, int msgflg)
554 struct msg_queue *msq;
559 if (msgsz > msg_ctlmax || (long) msgsz < 0 || msqid < 0)
561 if (get_user(mtype, &msgp->mtype))
566 msg = load_msg(msgp->mtext, msgsz);
573 msq = msg_lock(msqid);
579 if (msg_checkid(msq,msqid))
580 goto out_unlock_free;
586 if (ipcperms(&msq->q_perm, S_IWUGO))
587 goto out_unlock_free;
589 err = security_msg_queue_msgsnd(msq, msg, msgflg);
591 goto out_unlock_free;
593 if(msgsz + msq->q_cbytes <= msq->q_qbytes &&
594 1 + msq->q_qnum <= msq->q_qbytes) {
598 /* queue full, wait: */
599 if(msgflg&IPC_NOWAIT) {
601 goto out_unlock_free;
608 ipc_lock_by_ptr(&msq->q_perm);
610 if (msq->q_perm.deleted) {
612 goto out_unlock_free;
616 if (signal_pending(current)) {
618 goto out_unlock_free;
622 msq->q_lspid = current->tgid;
623 msq->q_stime = get_seconds();
625 if(!pipelined_send(msq,msg)) {
626 /* noone is waiting for this message, enqueue it */
627 list_add_tail(&msg->m_list,&msq->q_messages);
628 msq->q_cbytes += msgsz;
630 atomic_add(msgsz,&msg_bytes);
631 atomic_inc(&msg_hdrs);
645 static inline int convert_mode(long* msgtyp, int msgflg)
648 * find message of correct type.
649 * msgtyp = 0 => get first.
650 * msgtyp > 0 => get first message of matching type.
651 * msgtyp < 0 => get message with least type must be < abs(msgtype).
657 return SEARCH_LESSEQUAL;
659 if(msgflg & MSG_EXCEPT)
660 return SEARCH_NOTEQUAL;
664 asmlinkage long sys_msgrcv (int msqid, struct msgbuf __user *msgp, size_t msgsz,
665 long msgtyp, int msgflg)
667 struct msg_queue *msq;
671 if (msqid < 0 || (long) msgsz < 0)
673 mode = convert_mode(&msgtyp,msgflg);
675 msq = msg_lock(msqid);
679 msg = ERR_PTR(-EIDRM);
680 if (msg_checkid(msq,msqid))
684 struct msg_receiver msr_d;
685 struct list_head* tmp;
687 msg = ERR_PTR(-EACCES);
688 if (ipcperms (&msq->q_perm, S_IRUGO))
691 msg = ERR_PTR(-EAGAIN);
692 tmp = msq->q_messages.next;
693 while (tmp != &msq->q_messages) {
694 struct msg_msg *walk_msg;
695 walk_msg = list_entry(tmp,struct msg_msg,m_list);
696 if(testmsg(walk_msg,msgtyp,mode) &&
697 !security_msg_queue_msgrcv(msq, walk_msg, current, msgtyp, mode)) {
699 if(mode == SEARCH_LESSEQUAL && walk_msg->m_type != 1) {
701 msgtyp=walk_msg->m_type-1;
710 /* Found a suitable message. Unlink it from the queue. */
711 if ((msgsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) {
712 msg = ERR_PTR(-E2BIG);
715 list_del(&msg->m_list);
717 msq->q_rtime = get_seconds();
718 msq->q_lrpid = current->tgid;
719 msq->q_cbytes -= msg->m_ts;
720 atomic_sub(msg->m_ts,&msg_bytes);
721 atomic_dec(&msg_hdrs);
722 ss_wakeup(&msq->q_senders,0);
726 /* No message waiting. Wait for a message */
727 if (msgflg & IPC_NOWAIT) {
728 msg = ERR_PTR(-ENOMSG);
731 list_add_tail(&msr_d.r_list,&msq->q_receivers);
732 msr_d.r_tsk = current;
733 msr_d.r_msgtype = msgtyp;
735 if(msgflg & MSG_NOERROR)
736 msr_d.r_maxsize = INT_MAX;
738 msr_d.r_maxsize = msgsz;
739 msr_d.r_msg = ERR_PTR(-EAGAIN);
740 current->state = TASK_INTERRUPTIBLE;
745 /* Lockless receive, part 1:
746 * Disable preemption. We don't hold a reference to the queue
747 * and getting a reference would defeat the idea of a lockless
748 * operation, thus the code relies on rcu to guarantee the
750 * Prior to destruction, expunge_all(-EIRDM) changes r_msg.
751 * Thus if r_msg is -EAGAIN, then the queue not yet destroyed.
752 * rcu_read_lock() prevents preemption between reading r_msg
753 * and the spin_lock() inside ipc_lock_by_ptr().
757 /* Lockless receive, part 2:
758 * Wait until pipelined_send or expunge_all are outside of
759 * wake_up_process(). There is a race with exit(), see
760 * ipc/mqueue.c for the details.
762 msg = (struct msg_msg*) msr_d.r_msg;
763 while (msg == NULL) {
765 msg = (struct msg_msg*) msr_d.r_msg;
768 /* Lockless receive, part 3:
769 * If there is a message or an error then accept it without
772 if(msg != ERR_PTR(-EAGAIN)) {
777 /* Lockless receive, part 3:
778 * Acquire the queue spinlock.
780 ipc_lock_by_ptr(&msq->q_perm);
783 /* Lockless receive, part 4:
784 * Repeat test after acquiring the spinlock.
786 msg = (struct msg_msg*)msr_d.r_msg;
787 if(msg != ERR_PTR(-EAGAIN))
790 list_del(&msr_d.r_list);
791 if (signal_pending(current)) {
792 msg = ERR_PTR(-ERESTARTNOHAND);
801 msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz;
802 if (put_user (msg->m_type, &msgp->mtype) ||
803 store_msg(msgp->mtext, msg, msgsz)) {
810 #ifdef CONFIG_PROC_FS
811 static int sysvipc_msg_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
818 len += sprintf(buffer, " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n");
820 for(i = 0; i <= msg_ids.max_id; i++) {
821 struct msg_queue * msq;
824 if (!vx_check(msq->q_perm.xid, VX_IDENT)) {
828 len += sprintf(buffer + len, "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
830 msg_buildid(i,msq->q_perm.seq),
850 if(pos > offset + length)
858 *start = buffer + (offset - begin);
859 len -= (offset - begin);