xref: /openbmc/linux/ipc/msg.c (revision 1da177e4)
1 /*
2  * linux/ipc/msg.c
3  * Copyright (C) 1992 Krishna Balasubramanian
4  *
5  * Removed all the remaining kerneld mess
6  * Catch the -EFAULT stuff properly
7  * Use GFP_KERNEL for messages as in 1.2
8  * Fixed up the unchecked user space derefs
9  * Copyright (C) 1998 Alan Cox & Andi Kleen
10  *
11  * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
12  *
13  * mostly rewritten, threaded and wake-one semantics added
14  * MSGMAX limit removed, sysctl's added
15  * (c) 1999 Manfred Spraul <manfreds@colorfullife.com>
16  */
17 
18 #include <linux/config.h>
19 #include <linux/slab.h>
20 #include <linux/msg.h>
21 #include <linux/spinlock.h>
22 #include <linux/init.h>
23 #include <linux/proc_fs.h>
24 #include <linux/list.h>
25 #include <linux/security.h>
26 #include <linux/sched.h>
27 #include <linux/syscalls.h>
28 #include <linux/audit.h>
29 #include <asm/current.h>
30 #include <asm/uaccess.h>
31 #include "util.h"
32 
33 /* sysctl: */
34 int msg_ctlmax = MSGMAX;
35 int msg_ctlmnb = MSGMNB;
36 int msg_ctlmni = MSGMNI;
37 
38 /* one msg_receiver structure for each sleeping receiver */
39 struct msg_receiver {
40 	struct list_head r_list;
41 	struct task_struct* r_tsk;
42 
43 	int r_mode;
44 	long r_msgtype;
45 	long r_maxsize;
46 
47 	struct msg_msg* volatile r_msg;
48 };
49 
50 /* one msg_sender for each sleeping sender */
51 struct msg_sender {
52 	struct list_head list;
53 	struct task_struct* tsk;
54 };
55 
56 #define SEARCH_ANY		1
57 #define SEARCH_EQUAL		2
58 #define SEARCH_NOTEQUAL		3
59 #define SEARCH_LESSEQUAL	4
60 
61 static atomic_t msg_bytes = ATOMIC_INIT(0);
62 static atomic_t msg_hdrs = ATOMIC_INIT(0);
63 
64 static struct ipc_ids msg_ids;
65 
66 #define msg_lock(id)	((struct msg_queue*)ipc_lock(&msg_ids,id))
67 #define msg_unlock(msq)	ipc_unlock(&(msq)->q_perm)
68 #define msg_rmid(id)	((struct msg_queue*)ipc_rmid(&msg_ids,id))
69 #define msg_checkid(msq, msgid)	\
70 	ipc_checkid(&msg_ids,&msq->q_perm,msgid)
71 #define msg_buildid(id, seq) \
72 	ipc_buildid(&msg_ids, id, seq)
73 
74 static void freeque (struct msg_queue *msq, int id);
75 static int newque (key_t key, int msgflg);
76 #ifdef CONFIG_PROC_FS
77 static int sysvipc_msg_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data);
78 #endif
79 
80 void __init msg_init (void)
81 {
82 	ipc_init_ids(&msg_ids,msg_ctlmni);
83 
84 #ifdef CONFIG_PROC_FS
85 	create_proc_read_entry("sysvipc/msg", 0, NULL, sysvipc_msg_read_proc, NULL);
86 #endif
87 }
88 
89 static int newque (key_t key, int msgflg)
90 {
91 	int id;
92 	int retval;
93 	struct msg_queue *msq;
94 
95 	msq  = ipc_rcu_alloc(sizeof(*msq));
96 	if (!msq)
97 		return -ENOMEM;
98 
99 	msq->q_perm.mode = (msgflg & S_IRWXUGO);
100 	msq->q_perm.key = key;
101 
102 	msq->q_perm.security = NULL;
103 	retval = security_msg_queue_alloc(msq);
104 	if (retval) {
105 		ipc_rcu_putref(msq);
106 		return retval;
107 	}
108 
109 	id = ipc_addid(&msg_ids, &msq->q_perm, msg_ctlmni);
110 	if(id == -1) {
111 		security_msg_queue_free(msq);
112 		ipc_rcu_putref(msq);
113 		return -ENOSPC;
114 	}
115 
116 	msq->q_stime = msq->q_rtime = 0;
117 	msq->q_ctime = get_seconds();
118 	msq->q_cbytes = msq->q_qnum = 0;
119 	msq->q_qbytes = msg_ctlmnb;
120 	msq->q_lspid = msq->q_lrpid = 0;
121 	INIT_LIST_HEAD(&msq->q_messages);
122 	INIT_LIST_HEAD(&msq->q_receivers);
123 	INIT_LIST_HEAD(&msq->q_senders);
124 	msg_unlock(msq);
125 
126 	return msg_buildid(id,msq->q_perm.seq);
127 }
128 
129 static inline void ss_add(struct msg_queue* msq, struct msg_sender* mss)
130 {
131 	mss->tsk=current;
132 	current->state=TASK_INTERRUPTIBLE;
133 	list_add_tail(&mss->list,&msq->q_senders);
134 }
135 
136 static inline void ss_del(struct msg_sender* mss)
137 {
138 	if(mss->list.next != NULL)
139 		list_del(&mss->list);
140 }
141 
142 static void ss_wakeup(struct list_head* h, int kill)
143 {
144 	struct list_head *tmp;
145 
146 	tmp = h->next;
147 	while (tmp != h) {
148 		struct msg_sender* mss;
149 
150 		mss = list_entry(tmp,struct msg_sender,list);
151 		tmp = tmp->next;
152 		if(kill)
153 			mss->list.next=NULL;
154 		wake_up_process(mss->tsk);
155 	}
156 }
157 
158 static void expunge_all(struct msg_queue* msq, int res)
159 {
160 	struct list_head *tmp;
161 
162 	tmp = msq->q_receivers.next;
163 	while (tmp != &msq->q_receivers) {
164 		struct msg_receiver* msr;
165 
166 		msr = list_entry(tmp,struct msg_receiver,r_list);
167 		tmp = tmp->next;
168 		msr->r_msg = NULL;
169 		wake_up_process(msr->r_tsk);
170 		smp_mb();
171 		msr->r_msg = ERR_PTR(res);
172 	}
173 }
174 /*
175  * freeque() wakes up waiters on the sender and receiver waiting queue,
176  * removes the message queue from message queue ID
177  * array, and cleans up all the messages associated with this queue.
178  *
179  * msg_ids.sem and the spinlock for this message queue is hold
180  * before freeque() is called. msg_ids.sem remains locked on exit.
181  */
182 static void freeque (struct msg_queue *msq, int id)
183 {
184 	struct list_head *tmp;
185 
186 	expunge_all(msq,-EIDRM);
187 	ss_wakeup(&msq->q_senders,1);
188 	msq = msg_rmid(id);
189 	msg_unlock(msq);
190 
191 	tmp = msq->q_messages.next;
192 	while(tmp != &msq->q_messages) {
193 		struct msg_msg* msg = list_entry(tmp,struct msg_msg,m_list);
194 		tmp = tmp->next;
195 		atomic_dec(&msg_hdrs);
196 		free_msg(msg);
197 	}
198 	atomic_sub(msq->q_cbytes, &msg_bytes);
199 	security_msg_queue_free(msq);
200 	ipc_rcu_putref(msq);
201 }
202 
203 asmlinkage long sys_msgget (key_t key, int msgflg)
204 {
205 	int id, ret = -EPERM;
206 	struct msg_queue *msq;
207 
208 	down(&msg_ids.sem);
209 	if (key == IPC_PRIVATE)
210 		ret = newque(key, msgflg);
211 	else if ((id = ipc_findkey(&msg_ids, key)) == -1) { /* key not used */
212 		if (!(msgflg & IPC_CREAT))
213 			ret = -ENOENT;
214 		else
215 			ret = newque(key, msgflg);
216 	} else if (msgflg & IPC_CREAT && msgflg & IPC_EXCL) {
217 		ret = -EEXIST;
218 	} else {
219 		msq = msg_lock(id);
220 		if(msq==NULL)
221 			BUG();
222 		if (ipcperms(&msq->q_perm, msgflg))
223 			ret = -EACCES;
224 		else {
225 			int qid = msg_buildid(id, msq->q_perm.seq);
226 		    	ret = security_msg_queue_associate(msq, msgflg);
227 			if (!ret)
228 				ret = qid;
229 		}
230 		msg_unlock(msq);
231 	}
232 	up(&msg_ids.sem);
233 	return ret;
234 }
235 
236 static inline unsigned long copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version)
237 {
238 	switch(version) {
239 	case IPC_64:
240 		return copy_to_user (buf, in, sizeof(*in));
241 	case IPC_OLD:
242 	    {
243 		struct msqid_ds out;
244 
245 		memset(&out,0,sizeof(out));
246 
247 		ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm);
248 
249 		out.msg_stime		= in->msg_stime;
250 		out.msg_rtime		= in->msg_rtime;
251 		out.msg_ctime		= in->msg_ctime;
252 
253 		if(in->msg_cbytes > USHRT_MAX)
254 			out.msg_cbytes	= USHRT_MAX;
255 		else
256 			out.msg_cbytes	= in->msg_cbytes;
257 		out.msg_lcbytes		= in->msg_cbytes;
258 
259 		if(in->msg_qnum > USHRT_MAX)
260 			out.msg_qnum	= USHRT_MAX;
261 		else
262 			out.msg_qnum	= in->msg_qnum;
263 
264 		if(in->msg_qbytes > USHRT_MAX)
265 			out.msg_qbytes	= USHRT_MAX;
266 		else
267 			out.msg_qbytes	= in->msg_qbytes;
268 		out.msg_lqbytes		= in->msg_qbytes;
269 
270 		out.msg_lspid		= in->msg_lspid;
271 		out.msg_lrpid		= in->msg_lrpid;
272 
273 		return copy_to_user (buf, &out, sizeof(out));
274 	    }
275 	default:
276 		return -EINVAL;
277 	}
278 }
279 
280 struct msq_setbuf {
281 	unsigned long	qbytes;
282 	uid_t		uid;
283 	gid_t		gid;
284 	mode_t		mode;
285 };
286 
287 static inline unsigned long copy_msqid_from_user(struct msq_setbuf *out, void __user *buf, int version)
288 {
289 	switch(version) {
290 	case IPC_64:
291 	    {
292 		struct msqid64_ds tbuf;
293 
294 		if (copy_from_user (&tbuf, buf, sizeof (tbuf)))
295 			return -EFAULT;
296 
297 		out->qbytes		= tbuf.msg_qbytes;
298 		out->uid		= tbuf.msg_perm.uid;
299 		out->gid		= tbuf.msg_perm.gid;
300 		out->mode		= tbuf.msg_perm.mode;
301 
302 		return 0;
303 	    }
304 	case IPC_OLD:
305 	    {
306 		struct msqid_ds tbuf_old;
307 
308 		if (copy_from_user (&tbuf_old, buf, sizeof (tbuf_old)))
309 			return -EFAULT;
310 
311 		out->uid		= tbuf_old.msg_perm.uid;
312 		out->gid		= tbuf_old.msg_perm.gid;
313 		out->mode		= tbuf_old.msg_perm.mode;
314 
315 		if(tbuf_old.msg_qbytes == 0)
316 			out->qbytes	= tbuf_old.msg_lqbytes;
317 		else
318 			out->qbytes	= tbuf_old.msg_qbytes;
319 
320 		return 0;
321 	    }
322 	default:
323 		return -EINVAL;
324 	}
325 }
326 
327 asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf)
328 {
329 	int err, version;
330 	struct msg_queue *msq;
331 	struct msq_setbuf setbuf;
332 	struct kern_ipc_perm *ipcp;
333 
334 	if (msqid < 0 || cmd < 0)
335 		return -EINVAL;
336 
337 	version = ipc_parse_version(&cmd);
338 
339 	switch (cmd) {
340 	case IPC_INFO:
341 	case MSG_INFO:
342 	{
343 		struct msginfo msginfo;
344 		int max_id;
345 		if (!buf)
346 			return -EFAULT;
347 		/* We must not return kernel stack data.
348 		 * due to padding, it's not enough
349 		 * to set all member fields.
350 		 */
351 
352 		err = security_msg_queue_msgctl(NULL, cmd);
353 		if (err)
354 			return err;
355 
356 		memset(&msginfo,0,sizeof(msginfo));
357 		msginfo.msgmni = msg_ctlmni;
358 		msginfo.msgmax = msg_ctlmax;
359 		msginfo.msgmnb = msg_ctlmnb;
360 		msginfo.msgssz = MSGSSZ;
361 		msginfo.msgseg = MSGSEG;
362 		down(&msg_ids.sem);
363 		if (cmd == MSG_INFO) {
364 			msginfo.msgpool = msg_ids.in_use;
365 			msginfo.msgmap = atomic_read(&msg_hdrs);
366 			msginfo.msgtql = atomic_read(&msg_bytes);
367 		} else {
368 			msginfo.msgmap = MSGMAP;
369 			msginfo.msgpool = MSGPOOL;
370 			msginfo.msgtql = MSGTQL;
371 		}
372 		max_id = msg_ids.max_id;
373 		up(&msg_ids.sem);
374 		if (copy_to_user (buf, &msginfo, sizeof(struct msginfo)))
375 			return -EFAULT;
376 		return (max_id < 0) ? 0: max_id;
377 	}
378 	case MSG_STAT:
379 	case IPC_STAT:
380 	{
381 		struct msqid64_ds tbuf;
382 		int success_return;
383 		if (!buf)
384 			return -EFAULT;
385 		if(cmd == MSG_STAT && msqid >= msg_ids.entries->size)
386 			return -EINVAL;
387 
388 		memset(&tbuf,0,sizeof(tbuf));
389 
390 		msq = msg_lock(msqid);
391 		if (msq == NULL)
392 			return -EINVAL;
393 
394 		if(cmd == MSG_STAT) {
395 			success_return = msg_buildid(msqid, msq->q_perm.seq);
396 		} else {
397 			err = -EIDRM;
398 			if (msg_checkid(msq,msqid))
399 				goto out_unlock;
400 			success_return = 0;
401 		}
402 		err = -EACCES;
403 		if (ipcperms (&msq->q_perm, S_IRUGO))
404 			goto out_unlock;
405 
406 		err = security_msg_queue_msgctl(msq, cmd);
407 		if (err)
408 			goto out_unlock;
409 
410 		kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm);
411 		tbuf.msg_stime  = msq->q_stime;
412 		tbuf.msg_rtime  = msq->q_rtime;
413 		tbuf.msg_ctime  = msq->q_ctime;
414 		tbuf.msg_cbytes = msq->q_cbytes;
415 		tbuf.msg_qnum   = msq->q_qnum;
416 		tbuf.msg_qbytes = msq->q_qbytes;
417 		tbuf.msg_lspid  = msq->q_lspid;
418 		tbuf.msg_lrpid  = msq->q_lrpid;
419 		msg_unlock(msq);
420 		if (copy_msqid_to_user(buf, &tbuf, version))
421 			return -EFAULT;
422 		return success_return;
423 	}
424 	case IPC_SET:
425 		if (!buf)
426 			return -EFAULT;
427 		if (copy_msqid_from_user (&setbuf, buf, version))
428 			return -EFAULT;
429 		if ((err = audit_ipc_perms(setbuf.qbytes, setbuf.uid, setbuf.gid, setbuf.mode)))
430 			return err;
431 		break;
432 	case IPC_RMID:
433 		break;
434 	default:
435 		return  -EINVAL;
436 	}
437 
438 	down(&msg_ids.sem);
439 	msq = msg_lock(msqid);
440 	err=-EINVAL;
441 	if (msq == NULL)
442 		goto out_up;
443 
444 	err = -EIDRM;
445 	if (msg_checkid(msq,msqid))
446 		goto out_unlock_up;
447 	ipcp = &msq->q_perm;
448 	err = -EPERM;
449 	if (current->euid != ipcp->cuid &&
450 	    current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN))
451 	    /* We _could_ check for CAP_CHOWN above, but we don't */
452 		goto out_unlock_up;
453 
454 	err = security_msg_queue_msgctl(msq, cmd);
455 	if (err)
456 		goto out_unlock_up;
457 
458 	switch (cmd) {
459 	case IPC_SET:
460 	{
461 		err = -EPERM;
462 		if (setbuf.qbytes > msg_ctlmnb && !capable(CAP_SYS_RESOURCE))
463 			goto out_unlock_up;
464 
465 		msq->q_qbytes = setbuf.qbytes;
466 
467 		ipcp->uid = setbuf.uid;
468 		ipcp->gid = setbuf.gid;
469 		ipcp->mode = (ipcp->mode & ~S_IRWXUGO) |
470 			(S_IRWXUGO & setbuf.mode);
471 		msq->q_ctime = get_seconds();
472 		/* sleeping receivers might be excluded by
473 		 * stricter permissions.
474 		 */
475 		expunge_all(msq,-EAGAIN);
476 		/* sleeping senders might be able to send
477 		 * due to a larger queue size.
478 		 */
479 		ss_wakeup(&msq->q_senders,0);
480 		msg_unlock(msq);
481 		break;
482 	}
483 	case IPC_RMID:
484 		freeque (msq, msqid);
485 		break;
486 	}
487 	err = 0;
488 out_up:
489 	up(&msg_ids.sem);
490 	return err;
491 out_unlock_up:
492 	msg_unlock(msq);
493 	goto out_up;
494 out_unlock:
495 	msg_unlock(msq);
496 	return err;
497 }
498 
499 static int testmsg(struct msg_msg* msg,long type,int mode)
500 {
501 	switch(mode)
502 	{
503 		case SEARCH_ANY:
504 			return 1;
505 		case SEARCH_LESSEQUAL:
506 			if(msg->m_type <=type)
507 				return 1;
508 			break;
509 		case SEARCH_EQUAL:
510 			if(msg->m_type == type)
511 				return 1;
512 			break;
513 		case SEARCH_NOTEQUAL:
514 			if(msg->m_type != type)
515 				return 1;
516 			break;
517 	}
518 	return 0;
519 }
520 
521 static inline int pipelined_send(struct msg_queue* msq, struct msg_msg* msg)
522 {
523 	struct list_head* tmp;
524 
525 	tmp = msq->q_receivers.next;
526 	while (tmp != &msq->q_receivers) {
527 		struct msg_receiver* msr;
528 		msr = list_entry(tmp,struct msg_receiver,r_list);
529 		tmp = tmp->next;
530 		if(testmsg(msg,msr->r_msgtype,msr->r_mode) &&
531 		   !security_msg_queue_msgrcv(msq, msg, msr->r_tsk, msr->r_msgtype, msr->r_mode)) {
532 			list_del(&msr->r_list);
533 			if(msr->r_maxsize < msg->m_ts) {
534 				msr->r_msg = NULL;
535 				wake_up_process(msr->r_tsk);
536 				smp_mb();
537 				msr->r_msg = ERR_PTR(-E2BIG);
538 			} else {
539 				msr->r_msg = NULL;
540 				msq->q_lrpid = msr->r_tsk->pid;
541 				msq->q_rtime = get_seconds();
542 				wake_up_process(msr->r_tsk);
543 				smp_mb();
544 				msr->r_msg = msg;
545 				return 1;
546 			}
547 		}
548 	}
549 	return 0;
550 }
551 
552 asmlinkage long sys_msgsnd (int msqid, struct msgbuf __user *msgp, size_t msgsz, int msgflg)
553 {
554 	struct msg_queue *msq;
555 	struct msg_msg *msg;
556 	long mtype;
557 	int err;
558 
559 	if (msgsz > msg_ctlmax || (long) msgsz < 0 || msqid < 0)
560 		return -EINVAL;
561 	if (get_user(mtype, &msgp->mtype))
562 		return -EFAULT;
563 	if (mtype < 1)
564 		return -EINVAL;
565 
566 	msg = load_msg(msgp->mtext, msgsz);
567 	if(IS_ERR(msg))
568 		return PTR_ERR(msg);
569 
570 	msg->m_type = mtype;
571 	msg->m_ts = msgsz;
572 
573 	msq = msg_lock(msqid);
574 	err=-EINVAL;
575 	if(msq==NULL)
576 		goto out_free;
577 
578 	err= -EIDRM;
579 	if (msg_checkid(msq,msqid))
580 		goto out_unlock_free;
581 
582 	for (;;) {
583 		struct msg_sender s;
584 
585 		err=-EACCES;
586 		if (ipcperms(&msq->q_perm, S_IWUGO))
587 			goto out_unlock_free;
588 
589 		err = security_msg_queue_msgsnd(msq, msg, msgflg);
590 		if (err)
591 			goto out_unlock_free;
592 
593 		if(msgsz + msq->q_cbytes <= msq->q_qbytes &&
594 				1 + msq->q_qnum <= msq->q_qbytes) {
595 			break;
596 		}
597 
598 		/* queue full, wait: */
599 		if(msgflg&IPC_NOWAIT) {
600 			err=-EAGAIN;
601 			goto out_unlock_free;
602 		}
603 		ss_add(msq, &s);
604 		ipc_rcu_getref(msq);
605 		msg_unlock(msq);
606 		schedule();
607 
608 		ipc_lock_by_ptr(&msq->q_perm);
609 		ipc_rcu_putref(msq);
610 		if (msq->q_perm.deleted) {
611 			err = -EIDRM;
612 			goto out_unlock_free;
613 		}
614 		ss_del(&s);
615 
616 		if (signal_pending(current)) {
617 			err=-ERESTARTNOHAND;
618 			goto out_unlock_free;
619 		}
620 	}
621 
622 	msq->q_lspid = current->tgid;
623 	msq->q_stime = get_seconds();
624 
625 	if(!pipelined_send(msq,msg)) {
626 		/* noone is waiting for this message, enqueue it */
627 		list_add_tail(&msg->m_list,&msq->q_messages);
628 		msq->q_cbytes += msgsz;
629 		msq->q_qnum++;
630 		atomic_add(msgsz,&msg_bytes);
631 		atomic_inc(&msg_hdrs);
632 	}
633 
634 	err = 0;
635 	msg = NULL;
636 
637 out_unlock_free:
638 	msg_unlock(msq);
639 out_free:
640 	if(msg!=NULL)
641 		free_msg(msg);
642 	return err;
643 }
644 
645 static inline int convert_mode(long* msgtyp, int msgflg)
646 {
647 	/*
648 	 *  find message of correct type.
649 	 *  msgtyp = 0 => get first.
650 	 *  msgtyp > 0 => get first message of matching type.
651 	 *  msgtyp < 0 => get message with least type must be < abs(msgtype).
652 	 */
653 	if(*msgtyp==0)
654 		return SEARCH_ANY;
655 	if(*msgtyp<0) {
656 		*msgtyp=-(*msgtyp);
657 		return SEARCH_LESSEQUAL;
658 	}
659 	if(msgflg & MSG_EXCEPT)
660 		return SEARCH_NOTEQUAL;
661 	return SEARCH_EQUAL;
662 }
663 
664 asmlinkage long sys_msgrcv (int msqid, struct msgbuf __user *msgp, size_t msgsz,
665 			    long msgtyp, int msgflg)
666 {
667 	struct msg_queue *msq;
668 	struct msg_msg *msg;
669 	int mode;
670 
671 	if (msqid < 0 || (long) msgsz < 0)
672 		return -EINVAL;
673 	mode = convert_mode(&msgtyp,msgflg);
674 
675 	msq = msg_lock(msqid);
676 	if(msq==NULL)
677 		return -EINVAL;
678 
679 	msg = ERR_PTR(-EIDRM);
680 	if (msg_checkid(msq,msqid))
681 		goto out_unlock;
682 
683 	for (;;) {
684 		struct msg_receiver msr_d;
685 		struct list_head* tmp;
686 
687 		msg = ERR_PTR(-EACCES);
688 		if (ipcperms (&msq->q_perm, S_IRUGO))
689 			goto out_unlock;
690 
691 		msg = ERR_PTR(-EAGAIN);
692 		tmp = msq->q_messages.next;
693 		while (tmp != &msq->q_messages) {
694 			struct msg_msg *walk_msg;
695 			walk_msg = list_entry(tmp,struct msg_msg,m_list);
696 			if(testmsg(walk_msg,msgtyp,mode) &&
697 			   !security_msg_queue_msgrcv(msq, walk_msg, current, msgtyp, mode)) {
698 				msg = walk_msg;
699 				if(mode == SEARCH_LESSEQUAL && walk_msg->m_type != 1) {
700 					msg=walk_msg;
701 					msgtyp=walk_msg->m_type-1;
702 				} else {
703 					msg=walk_msg;
704 					break;
705 				}
706 			}
707 			tmp = tmp->next;
708 		}
709 		if(!IS_ERR(msg)) {
710 			/* Found a suitable message. Unlink it from the queue. */
711 			if ((msgsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) {
712 				msg = ERR_PTR(-E2BIG);
713 				goto out_unlock;
714 			}
715 			list_del(&msg->m_list);
716 			msq->q_qnum--;
717 			msq->q_rtime = get_seconds();
718 			msq->q_lrpid = current->tgid;
719 			msq->q_cbytes -= msg->m_ts;
720 			atomic_sub(msg->m_ts,&msg_bytes);
721 			atomic_dec(&msg_hdrs);
722 			ss_wakeup(&msq->q_senders,0);
723 			msg_unlock(msq);
724 			break;
725 		}
726 		/* No message waiting. Wait for a message */
727 		if (msgflg & IPC_NOWAIT) {
728 			msg = ERR_PTR(-ENOMSG);
729 			goto out_unlock;
730 		}
731 		list_add_tail(&msr_d.r_list,&msq->q_receivers);
732 		msr_d.r_tsk = current;
733 		msr_d.r_msgtype = msgtyp;
734 		msr_d.r_mode = mode;
735 		if(msgflg & MSG_NOERROR)
736 			msr_d.r_maxsize = INT_MAX;
737 		 else
738 			msr_d.r_maxsize = msgsz;
739 		msr_d.r_msg = ERR_PTR(-EAGAIN);
740 		current->state = TASK_INTERRUPTIBLE;
741 		msg_unlock(msq);
742 
743 		schedule();
744 
745 		/* Lockless receive, part 1:
746 		 * Disable preemption.  We don't hold a reference to the queue
747 		 * and getting a reference would defeat the idea of a lockless
748 		 * operation, thus the code relies on rcu to guarantee the
749 		 * existance of msq:
750 		 * Prior to destruction, expunge_all(-EIRDM) changes r_msg.
751 		 * Thus if r_msg is -EAGAIN, then the queue not yet destroyed.
752 		 * rcu_read_lock() prevents preemption between reading r_msg
753 		 * and the spin_lock() inside ipc_lock_by_ptr().
754 		 */
755 		rcu_read_lock();
756 
757 		/* Lockless receive, part 2:
758 		 * Wait until pipelined_send or expunge_all are outside of
759 		 * wake_up_process(). There is a race with exit(), see
760 		 * ipc/mqueue.c for the details.
761 		 */
762 		msg = (struct msg_msg*) msr_d.r_msg;
763 		while (msg == NULL) {
764 			cpu_relax();
765 			msg = (struct msg_msg*) msr_d.r_msg;
766 		}
767 
768 		/* Lockless receive, part 3:
769 		 * If there is a message or an error then accept it without
770 		 * locking.
771 		 */
772 		if(msg != ERR_PTR(-EAGAIN)) {
773 			rcu_read_unlock();
774 			break;
775 		}
776 
777 		/* Lockless receive, part 3:
778 		 * Acquire the queue spinlock.
779 		 */
780 		ipc_lock_by_ptr(&msq->q_perm);
781 		rcu_read_unlock();
782 
783 		/* Lockless receive, part 4:
784 		 * Repeat test after acquiring the spinlock.
785 		 */
786 		msg = (struct msg_msg*)msr_d.r_msg;
787 		if(msg != ERR_PTR(-EAGAIN))
788 			goto out_unlock;
789 
790 		list_del(&msr_d.r_list);
791 		if (signal_pending(current)) {
792 			msg = ERR_PTR(-ERESTARTNOHAND);
793 out_unlock:
794 			msg_unlock(msq);
795 			break;
796 		}
797 	}
798 	if (IS_ERR(msg))
799        		return PTR_ERR(msg);
800 
801 	msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz;
802 	if (put_user (msg->m_type, &msgp->mtype) ||
803 	    store_msg(msgp->mtext, msg, msgsz)) {
804 		    msgsz = -EFAULT;
805 	}
806 	free_msg(msg);
807 	return msgsz;
808 }
809 
810 #ifdef CONFIG_PROC_FS
811 static int sysvipc_msg_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
812 {
813 	off_t pos = 0;
814 	off_t begin = 0;
815 	int i, len = 0;
816 
817 	down(&msg_ids.sem);
818 	len += sprintf(buffer, "       key      msqid perms      cbytes       qnum lspid lrpid   uid   gid  cuid  cgid      stime      rtime      ctime\n");
819 
820 	for(i = 0; i <= msg_ids.max_id; i++) {
821 		struct msg_queue * msq;
822 		msq = msg_lock(i);
823 		if(msq != NULL) {
824 			len += sprintf(buffer + len, "%10d %10d  %4o  %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
825 				msq->q_perm.key,
826 				msg_buildid(i,msq->q_perm.seq),
827 				msq->q_perm.mode,
828 				msq->q_cbytes,
829 				msq->q_qnum,
830 				msq->q_lspid,
831 				msq->q_lrpid,
832 				msq->q_perm.uid,
833 				msq->q_perm.gid,
834 				msq->q_perm.cuid,
835 				msq->q_perm.cgid,
836 				msq->q_stime,
837 				msq->q_rtime,
838 				msq->q_ctime);
839 			msg_unlock(msq);
840 
841 			pos += len;
842 			if(pos < offset) {
843 				len = 0;
844 				begin = pos;
845 			}
846 			if(pos > offset + length)
847 				goto done;
848 		}
849 
850 	}
851 	*eof = 1;
852 done:
853 	up(&msg_ids.sem);
854 	*start = buffer + (offset - begin);
855 	len -= (offset - begin);
856 	if(len > length)
857 		len = length;
858 	if(len < 0)
859 		len = 0;
860 	return len;
861 }
862 #endif
863