1 /* 2 * linux/ipc/msg.c 3 * Copyright (C) 1992 Krishna Balasubramanian 4 * 5 * Removed all the remaining kerneld mess 6 * Catch the -EFAULT stuff properly 7 * Use GFP_KERNEL for messages as in 1.2 8 * Fixed up the unchecked user space derefs 9 * Copyright (C) 1998 Alan Cox & Andi Kleen 10 * 11 * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com> 12 * 13 * mostly rewritten, threaded and wake-one semantics added 14 * MSGMAX limit removed, sysctl's added 15 * (c) 1999 Manfred Spraul <manfreds@colorfullife.com> 16 */ 17 18 #include <linux/config.h> 19 #include <linux/slab.h> 20 #include <linux/msg.h> 21 #include <linux/spinlock.h> 22 #include <linux/init.h> 23 #include <linux/proc_fs.h> 24 #include <linux/list.h> 25 #include <linux/security.h> 26 #include <linux/sched.h> 27 #include <linux/syscalls.h> 28 #include <linux/audit.h> 29 #include <linux/seq_file.h> 30 #include <asm/current.h> 31 #include <asm/uaccess.h> 32 #include "util.h" 33 34 /* sysctl: */ 35 int msg_ctlmax = MSGMAX; 36 int msg_ctlmnb = MSGMNB; 37 int msg_ctlmni = MSGMNI; 38 39 /* one msg_receiver structure for each sleeping receiver */ 40 struct msg_receiver { 41 struct list_head r_list; 42 struct task_struct* r_tsk; 43 44 int r_mode; 45 long r_msgtype; 46 long r_maxsize; 47 48 struct msg_msg* volatile r_msg; 49 }; 50 51 /* one msg_sender for each sleeping sender */ 52 struct msg_sender { 53 struct list_head list; 54 struct task_struct* tsk; 55 }; 56 57 #define SEARCH_ANY 1 58 #define SEARCH_EQUAL 2 59 #define SEARCH_NOTEQUAL 3 60 #define SEARCH_LESSEQUAL 4 61 62 static atomic_t msg_bytes = ATOMIC_INIT(0); 63 static atomic_t msg_hdrs = ATOMIC_INIT(0); 64 65 static struct ipc_ids msg_ids; 66 67 #define msg_lock(id) ((struct msg_queue*)ipc_lock(&msg_ids,id)) 68 #define msg_unlock(msq) ipc_unlock(&(msq)->q_perm) 69 #define msg_rmid(id) ((struct msg_queue*)ipc_rmid(&msg_ids,id)) 70 #define msg_checkid(msq, msgid) \ 71 ipc_checkid(&msg_ids,&msq->q_perm,msgid) 72 #define msg_buildid(id, seq) \ 73 ipc_buildid(&msg_ids, id, seq) 74 75 static void freeque (struct msg_queue *msq, int id); 76 static int newque (key_t key, int msgflg); 77 #ifdef CONFIG_PROC_FS 78 static int sysvipc_msg_proc_show(struct seq_file *s, void *it); 79 #endif 80 81 void __init msg_init (void) 82 { 83 ipc_init_ids(&msg_ids,msg_ctlmni); 84 ipc_init_proc_interface("sysvipc/msg", 85 " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n", 86 &msg_ids, 87 sysvipc_msg_proc_show); 88 } 89 90 static int newque (key_t key, int msgflg) 91 { 92 int id; 93 int retval; 94 struct msg_queue *msq; 95 96 msq = ipc_rcu_alloc(sizeof(*msq)); 97 if (!msq) 98 return -ENOMEM; 99 100 msq->q_perm.mode = (msgflg & S_IRWXUGO); 101 msq->q_perm.key = key; 102 103 msq->q_perm.security = NULL; 104 retval = security_msg_queue_alloc(msq); 105 if (retval) { 106 ipc_rcu_putref(msq); 107 return retval; 108 } 109 110 id = ipc_addid(&msg_ids, &msq->q_perm, msg_ctlmni); 111 if(id == -1) { 112 security_msg_queue_free(msq); 113 ipc_rcu_putref(msq); 114 return -ENOSPC; 115 } 116 117 msq->q_id = msg_buildid(id,msq->q_perm.seq); 118 msq->q_stime = msq->q_rtime = 0; 119 msq->q_ctime = get_seconds(); 120 msq->q_cbytes = msq->q_qnum = 0; 121 msq->q_qbytes = msg_ctlmnb; 122 msq->q_lspid = msq->q_lrpid = 0; 123 INIT_LIST_HEAD(&msq->q_messages); 124 INIT_LIST_HEAD(&msq->q_receivers); 125 INIT_LIST_HEAD(&msq->q_senders); 126 msg_unlock(msq); 127 128 return msq->q_id; 129 } 130 131 static inline void ss_add(struct msg_queue* msq, struct msg_sender* mss) 132 { 133 mss->tsk=current; 134 current->state=TASK_INTERRUPTIBLE; 135 list_add_tail(&mss->list,&msq->q_senders); 136 } 137 138 static inline void ss_del(struct msg_sender* mss) 139 { 140 if(mss->list.next != NULL) 141 list_del(&mss->list); 142 } 143 144 static void ss_wakeup(struct list_head* h, int kill) 145 { 146 struct list_head *tmp; 147 148 tmp = h->next; 149 while (tmp != h) { 150 struct msg_sender* mss; 151 152 mss = list_entry(tmp,struct msg_sender,list); 153 tmp = tmp->next; 154 if(kill) 155 mss->list.next=NULL; 156 wake_up_process(mss->tsk); 157 } 158 } 159 160 static void expunge_all(struct msg_queue* msq, int res) 161 { 162 struct list_head *tmp; 163 164 tmp = msq->q_receivers.next; 165 while (tmp != &msq->q_receivers) { 166 struct msg_receiver* msr; 167 168 msr = list_entry(tmp,struct msg_receiver,r_list); 169 tmp = tmp->next; 170 msr->r_msg = NULL; 171 wake_up_process(msr->r_tsk); 172 smp_mb(); 173 msr->r_msg = ERR_PTR(res); 174 } 175 } 176 /* 177 * freeque() wakes up waiters on the sender and receiver waiting queue, 178 * removes the message queue from message queue ID 179 * array, and cleans up all the messages associated with this queue. 180 * 181 * msg_ids.sem and the spinlock for this message queue is hold 182 * before freeque() is called. msg_ids.sem remains locked on exit. 183 */ 184 static void freeque (struct msg_queue *msq, int id) 185 { 186 struct list_head *tmp; 187 188 expunge_all(msq,-EIDRM); 189 ss_wakeup(&msq->q_senders,1); 190 msq = msg_rmid(id); 191 msg_unlock(msq); 192 193 tmp = msq->q_messages.next; 194 while(tmp != &msq->q_messages) { 195 struct msg_msg* msg = list_entry(tmp,struct msg_msg,m_list); 196 tmp = tmp->next; 197 atomic_dec(&msg_hdrs); 198 free_msg(msg); 199 } 200 atomic_sub(msq->q_cbytes, &msg_bytes); 201 security_msg_queue_free(msq); 202 ipc_rcu_putref(msq); 203 } 204 205 asmlinkage long sys_msgget (key_t key, int msgflg) 206 { 207 int id, ret = -EPERM; 208 struct msg_queue *msq; 209 210 down(&msg_ids.sem); 211 if (key == IPC_PRIVATE) 212 ret = newque(key, msgflg); 213 else if ((id = ipc_findkey(&msg_ids, key)) == -1) { /* key not used */ 214 if (!(msgflg & IPC_CREAT)) 215 ret = -ENOENT; 216 else 217 ret = newque(key, msgflg); 218 } else if (msgflg & IPC_CREAT && msgflg & IPC_EXCL) { 219 ret = -EEXIST; 220 } else { 221 msq = msg_lock(id); 222 if(msq==NULL) 223 BUG(); 224 if (ipcperms(&msq->q_perm, msgflg)) 225 ret = -EACCES; 226 else { 227 int qid = msg_buildid(id, msq->q_perm.seq); 228 ret = security_msg_queue_associate(msq, msgflg); 229 if (!ret) 230 ret = qid; 231 } 232 msg_unlock(msq); 233 } 234 up(&msg_ids.sem); 235 return ret; 236 } 237 238 static inline unsigned long copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version) 239 { 240 switch(version) { 241 case IPC_64: 242 return copy_to_user (buf, in, sizeof(*in)); 243 case IPC_OLD: 244 { 245 struct msqid_ds out; 246 247 memset(&out,0,sizeof(out)); 248 249 ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm); 250 251 out.msg_stime = in->msg_stime; 252 out.msg_rtime = in->msg_rtime; 253 out.msg_ctime = in->msg_ctime; 254 255 if(in->msg_cbytes > USHRT_MAX) 256 out.msg_cbytes = USHRT_MAX; 257 else 258 out.msg_cbytes = in->msg_cbytes; 259 out.msg_lcbytes = in->msg_cbytes; 260 261 if(in->msg_qnum > USHRT_MAX) 262 out.msg_qnum = USHRT_MAX; 263 else 264 out.msg_qnum = in->msg_qnum; 265 266 if(in->msg_qbytes > USHRT_MAX) 267 out.msg_qbytes = USHRT_MAX; 268 else 269 out.msg_qbytes = in->msg_qbytes; 270 out.msg_lqbytes = in->msg_qbytes; 271 272 out.msg_lspid = in->msg_lspid; 273 out.msg_lrpid = in->msg_lrpid; 274 275 return copy_to_user (buf, &out, sizeof(out)); 276 } 277 default: 278 return -EINVAL; 279 } 280 } 281 282 struct msq_setbuf { 283 unsigned long qbytes; 284 uid_t uid; 285 gid_t gid; 286 mode_t mode; 287 }; 288 289 static inline unsigned long copy_msqid_from_user(struct msq_setbuf *out, void __user *buf, int version) 290 { 291 switch(version) { 292 case IPC_64: 293 { 294 struct msqid64_ds tbuf; 295 296 if (copy_from_user (&tbuf, buf, sizeof (tbuf))) 297 return -EFAULT; 298 299 out->qbytes = tbuf.msg_qbytes; 300 out->uid = tbuf.msg_perm.uid; 301 out->gid = tbuf.msg_perm.gid; 302 out->mode = tbuf.msg_perm.mode; 303 304 return 0; 305 } 306 case IPC_OLD: 307 { 308 struct msqid_ds tbuf_old; 309 310 if (copy_from_user (&tbuf_old, buf, sizeof (tbuf_old))) 311 return -EFAULT; 312 313 out->uid = tbuf_old.msg_perm.uid; 314 out->gid = tbuf_old.msg_perm.gid; 315 out->mode = tbuf_old.msg_perm.mode; 316 317 if(tbuf_old.msg_qbytes == 0) 318 out->qbytes = tbuf_old.msg_lqbytes; 319 else 320 out->qbytes = tbuf_old.msg_qbytes; 321 322 return 0; 323 } 324 default: 325 return -EINVAL; 326 } 327 } 328 329 asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf) 330 { 331 int err, version; 332 struct msg_queue *msq; 333 struct msq_setbuf setbuf; 334 struct kern_ipc_perm *ipcp; 335 336 if (msqid < 0 || cmd < 0) 337 return -EINVAL; 338 339 version = ipc_parse_version(&cmd); 340 341 switch (cmd) { 342 case IPC_INFO: 343 case MSG_INFO: 344 { 345 struct msginfo msginfo; 346 int max_id; 347 if (!buf) 348 return -EFAULT; 349 /* We must not return kernel stack data. 350 * due to padding, it's not enough 351 * to set all member fields. 352 */ 353 354 err = security_msg_queue_msgctl(NULL, cmd); 355 if (err) 356 return err; 357 358 memset(&msginfo,0,sizeof(msginfo)); 359 msginfo.msgmni = msg_ctlmni; 360 msginfo.msgmax = msg_ctlmax; 361 msginfo.msgmnb = msg_ctlmnb; 362 msginfo.msgssz = MSGSSZ; 363 msginfo.msgseg = MSGSEG; 364 down(&msg_ids.sem); 365 if (cmd == MSG_INFO) { 366 msginfo.msgpool = msg_ids.in_use; 367 msginfo.msgmap = atomic_read(&msg_hdrs); 368 msginfo.msgtql = atomic_read(&msg_bytes); 369 } else { 370 msginfo.msgmap = MSGMAP; 371 msginfo.msgpool = MSGPOOL; 372 msginfo.msgtql = MSGTQL; 373 } 374 max_id = msg_ids.max_id; 375 up(&msg_ids.sem); 376 if (copy_to_user (buf, &msginfo, sizeof(struct msginfo))) 377 return -EFAULT; 378 return (max_id < 0) ? 0: max_id; 379 } 380 case MSG_STAT: 381 case IPC_STAT: 382 { 383 struct msqid64_ds tbuf; 384 int success_return; 385 if (!buf) 386 return -EFAULT; 387 if(cmd == MSG_STAT && msqid >= msg_ids.entries->size) 388 return -EINVAL; 389 390 memset(&tbuf,0,sizeof(tbuf)); 391 392 msq = msg_lock(msqid); 393 if (msq == NULL) 394 return -EINVAL; 395 396 if(cmd == MSG_STAT) { 397 success_return = msg_buildid(msqid, msq->q_perm.seq); 398 } else { 399 err = -EIDRM; 400 if (msg_checkid(msq,msqid)) 401 goto out_unlock; 402 success_return = 0; 403 } 404 err = -EACCES; 405 if (ipcperms (&msq->q_perm, S_IRUGO)) 406 goto out_unlock; 407 408 err = security_msg_queue_msgctl(msq, cmd); 409 if (err) 410 goto out_unlock; 411 412 kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm); 413 tbuf.msg_stime = msq->q_stime; 414 tbuf.msg_rtime = msq->q_rtime; 415 tbuf.msg_ctime = msq->q_ctime; 416 tbuf.msg_cbytes = msq->q_cbytes; 417 tbuf.msg_qnum = msq->q_qnum; 418 tbuf.msg_qbytes = msq->q_qbytes; 419 tbuf.msg_lspid = msq->q_lspid; 420 tbuf.msg_lrpid = msq->q_lrpid; 421 msg_unlock(msq); 422 if (copy_msqid_to_user(buf, &tbuf, version)) 423 return -EFAULT; 424 return success_return; 425 } 426 case IPC_SET: 427 if (!buf) 428 return -EFAULT; 429 if (copy_msqid_from_user (&setbuf, buf, version)) 430 return -EFAULT; 431 if ((err = audit_ipc_perms(setbuf.qbytes, setbuf.uid, setbuf.gid, setbuf.mode))) 432 return err; 433 break; 434 case IPC_RMID: 435 break; 436 default: 437 return -EINVAL; 438 } 439 440 down(&msg_ids.sem); 441 msq = msg_lock(msqid); 442 err=-EINVAL; 443 if (msq == NULL) 444 goto out_up; 445 446 err = -EIDRM; 447 if (msg_checkid(msq,msqid)) 448 goto out_unlock_up; 449 ipcp = &msq->q_perm; 450 err = -EPERM; 451 if (current->euid != ipcp->cuid && 452 current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN)) 453 /* We _could_ check for CAP_CHOWN above, but we don't */ 454 goto out_unlock_up; 455 456 err = security_msg_queue_msgctl(msq, cmd); 457 if (err) 458 goto out_unlock_up; 459 460 switch (cmd) { 461 case IPC_SET: 462 { 463 err = -EPERM; 464 if (setbuf.qbytes > msg_ctlmnb && !capable(CAP_SYS_RESOURCE)) 465 goto out_unlock_up; 466 467 msq->q_qbytes = setbuf.qbytes; 468 469 ipcp->uid = setbuf.uid; 470 ipcp->gid = setbuf.gid; 471 ipcp->mode = (ipcp->mode & ~S_IRWXUGO) | 472 (S_IRWXUGO & setbuf.mode); 473 msq->q_ctime = get_seconds(); 474 /* sleeping receivers might be excluded by 475 * stricter permissions. 476 */ 477 expunge_all(msq,-EAGAIN); 478 /* sleeping senders might be able to send 479 * due to a larger queue size. 480 */ 481 ss_wakeup(&msq->q_senders,0); 482 msg_unlock(msq); 483 break; 484 } 485 case IPC_RMID: 486 freeque (msq, msqid); 487 break; 488 } 489 err = 0; 490 out_up: 491 up(&msg_ids.sem); 492 return err; 493 out_unlock_up: 494 msg_unlock(msq); 495 goto out_up; 496 out_unlock: 497 msg_unlock(msq); 498 return err; 499 } 500 501 static int testmsg(struct msg_msg* msg,long type,int mode) 502 { 503 switch(mode) 504 { 505 case SEARCH_ANY: 506 return 1; 507 case SEARCH_LESSEQUAL: 508 if(msg->m_type <=type) 509 return 1; 510 break; 511 case SEARCH_EQUAL: 512 if(msg->m_type == type) 513 return 1; 514 break; 515 case SEARCH_NOTEQUAL: 516 if(msg->m_type != type) 517 return 1; 518 break; 519 } 520 return 0; 521 } 522 523 static inline int pipelined_send(struct msg_queue* msq, struct msg_msg* msg) 524 { 525 struct list_head* tmp; 526 527 tmp = msq->q_receivers.next; 528 while (tmp != &msq->q_receivers) { 529 struct msg_receiver* msr; 530 msr = list_entry(tmp,struct msg_receiver,r_list); 531 tmp = tmp->next; 532 if(testmsg(msg,msr->r_msgtype,msr->r_mode) && 533 !security_msg_queue_msgrcv(msq, msg, msr->r_tsk, msr->r_msgtype, msr->r_mode)) { 534 list_del(&msr->r_list); 535 if(msr->r_maxsize < msg->m_ts) { 536 msr->r_msg = NULL; 537 wake_up_process(msr->r_tsk); 538 smp_mb(); 539 msr->r_msg = ERR_PTR(-E2BIG); 540 } else { 541 msr->r_msg = NULL; 542 msq->q_lrpid = msr->r_tsk->pid; 543 msq->q_rtime = get_seconds(); 544 wake_up_process(msr->r_tsk); 545 smp_mb(); 546 msr->r_msg = msg; 547 return 1; 548 } 549 } 550 } 551 return 0; 552 } 553 554 asmlinkage long sys_msgsnd (int msqid, struct msgbuf __user *msgp, size_t msgsz, int msgflg) 555 { 556 struct msg_queue *msq; 557 struct msg_msg *msg; 558 long mtype; 559 int err; 560 561 if (msgsz > msg_ctlmax || (long) msgsz < 0 || msqid < 0) 562 return -EINVAL; 563 if (get_user(mtype, &msgp->mtype)) 564 return -EFAULT; 565 if (mtype < 1) 566 return -EINVAL; 567 568 msg = load_msg(msgp->mtext, msgsz); 569 if(IS_ERR(msg)) 570 return PTR_ERR(msg); 571 572 msg->m_type = mtype; 573 msg->m_ts = msgsz; 574 575 msq = msg_lock(msqid); 576 err=-EINVAL; 577 if(msq==NULL) 578 goto out_free; 579 580 err= -EIDRM; 581 if (msg_checkid(msq,msqid)) 582 goto out_unlock_free; 583 584 for (;;) { 585 struct msg_sender s; 586 587 err=-EACCES; 588 if (ipcperms(&msq->q_perm, S_IWUGO)) 589 goto out_unlock_free; 590 591 err = security_msg_queue_msgsnd(msq, msg, msgflg); 592 if (err) 593 goto out_unlock_free; 594 595 if(msgsz + msq->q_cbytes <= msq->q_qbytes && 596 1 + msq->q_qnum <= msq->q_qbytes) { 597 break; 598 } 599 600 /* queue full, wait: */ 601 if(msgflg&IPC_NOWAIT) { 602 err=-EAGAIN; 603 goto out_unlock_free; 604 } 605 ss_add(msq, &s); 606 ipc_rcu_getref(msq); 607 msg_unlock(msq); 608 schedule(); 609 610 ipc_lock_by_ptr(&msq->q_perm); 611 ipc_rcu_putref(msq); 612 if (msq->q_perm.deleted) { 613 err = -EIDRM; 614 goto out_unlock_free; 615 } 616 ss_del(&s); 617 618 if (signal_pending(current)) { 619 err=-ERESTARTNOHAND; 620 goto out_unlock_free; 621 } 622 } 623 624 msq->q_lspid = current->tgid; 625 msq->q_stime = get_seconds(); 626 627 if(!pipelined_send(msq,msg)) { 628 /* noone is waiting for this message, enqueue it */ 629 list_add_tail(&msg->m_list,&msq->q_messages); 630 msq->q_cbytes += msgsz; 631 msq->q_qnum++; 632 atomic_add(msgsz,&msg_bytes); 633 atomic_inc(&msg_hdrs); 634 } 635 636 err = 0; 637 msg = NULL; 638 639 out_unlock_free: 640 msg_unlock(msq); 641 out_free: 642 if(msg!=NULL) 643 free_msg(msg); 644 return err; 645 } 646 647 static inline int convert_mode(long* msgtyp, int msgflg) 648 { 649 /* 650 * find message of correct type. 651 * msgtyp = 0 => get first. 652 * msgtyp > 0 => get first message of matching type. 653 * msgtyp < 0 => get message with least type must be < abs(msgtype). 654 */ 655 if(*msgtyp==0) 656 return SEARCH_ANY; 657 if(*msgtyp<0) { 658 *msgtyp=-(*msgtyp); 659 return SEARCH_LESSEQUAL; 660 } 661 if(msgflg & MSG_EXCEPT) 662 return SEARCH_NOTEQUAL; 663 return SEARCH_EQUAL; 664 } 665 666 asmlinkage long sys_msgrcv (int msqid, struct msgbuf __user *msgp, size_t msgsz, 667 long msgtyp, int msgflg) 668 { 669 struct msg_queue *msq; 670 struct msg_msg *msg; 671 int mode; 672 673 if (msqid < 0 || (long) msgsz < 0) 674 return -EINVAL; 675 mode = convert_mode(&msgtyp,msgflg); 676 677 msq = msg_lock(msqid); 678 if(msq==NULL) 679 return -EINVAL; 680 681 msg = ERR_PTR(-EIDRM); 682 if (msg_checkid(msq,msqid)) 683 goto out_unlock; 684 685 for (;;) { 686 struct msg_receiver msr_d; 687 struct list_head* tmp; 688 689 msg = ERR_PTR(-EACCES); 690 if (ipcperms (&msq->q_perm, S_IRUGO)) 691 goto out_unlock; 692 693 msg = ERR_PTR(-EAGAIN); 694 tmp = msq->q_messages.next; 695 while (tmp != &msq->q_messages) { 696 struct msg_msg *walk_msg; 697 walk_msg = list_entry(tmp,struct msg_msg,m_list); 698 if(testmsg(walk_msg,msgtyp,mode) && 699 !security_msg_queue_msgrcv(msq, walk_msg, current, msgtyp, mode)) { 700 msg = walk_msg; 701 if(mode == SEARCH_LESSEQUAL && walk_msg->m_type != 1) { 702 msg=walk_msg; 703 msgtyp=walk_msg->m_type-1; 704 } else { 705 msg=walk_msg; 706 break; 707 } 708 } 709 tmp = tmp->next; 710 } 711 if(!IS_ERR(msg)) { 712 /* Found a suitable message. Unlink it from the queue. */ 713 if ((msgsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) { 714 msg = ERR_PTR(-E2BIG); 715 goto out_unlock; 716 } 717 list_del(&msg->m_list); 718 msq->q_qnum--; 719 msq->q_rtime = get_seconds(); 720 msq->q_lrpid = current->tgid; 721 msq->q_cbytes -= msg->m_ts; 722 atomic_sub(msg->m_ts,&msg_bytes); 723 atomic_dec(&msg_hdrs); 724 ss_wakeup(&msq->q_senders,0); 725 msg_unlock(msq); 726 break; 727 } 728 /* No message waiting. Wait for a message */ 729 if (msgflg & IPC_NOWAIT) { 730 msg = ERR_PTR(-ENOMSG); 731 goto out_unlock; 732 } 733 list_add_tail(&msr_d.r_list,&msq->q_receivers); 734 msr_d.r_tsk = current; 735 msr_d.r_msgtype = msgtyp; 736 msr_d.r_mode = mode; 737 if(msgflg & MSG_NOERROR) 738 msr_d.r_maxsize = INT_MAX; 739 else 740 msr_d.r_maxsize = msgsz; 741 msr_d.r_msg = ERR_PTR(-EAGAIN); 742 current->state = TASK_INTERRUPTIBLE; 743 msg_unlock(msq); 744 745 schedule(); 746 747 /* Lockless receive, part 1: 748 * Disable preemption. We don't hold a reference to the queue 749 * and getting a reference would defeat the idea of a lockless 750 * operation, thus the code relies on rcu to guarantee the 751 * existance of msq: 752 * Prior to destruction, expunge_all(-EIRDM) changes r_msg. 753 * Thus if r_msg is -EAGAIN, then the queue not yet destroyed. 754 * rcu_read_lock() prevents preemption between reading r_msg 755 * and the spin_lock() inside ipc_lock_by_ptr(). 756 */ 757 rcu_read_lock(); 758 759 /* Lockless receive, part 2: 760 * Wait until pipelined_send or expunge_all are outside of 761 * wake_up_process(). There is a race with exit(), see 762 * ipc/mqueue.c for the details. 763 */ 764 msg = (struct msg_msg*) msr_d.r_msg; 765 while (msg == NULL) { 766 cpu_relax(); 767 msg = (struct msg_msg*) msr_d.r_msg; 768 } 769 770 /* Lockless receive, part 3: 771 * If there is a message or an error then accept it without 772 * locking. 773 */ 774 if(msg != ERR_PTR(-EAGAIN)) { 775 rcu_read_unlock(); 776 break; 777 } 778 779 /* Lockless receive, part 3: 780 * Acquire the queue spinlock. 781 */ 782 ipc_lock_by_ptr(&msq->q_perm); 783 rcu_read_unlock(); 784 785 /* Lockless receive, part 4: 786 * Repeat test after acquiring the spinlock. 787 */ 788 msg = (struct msg_msg*)msr_d.r_msg; 789 if(msg != ERR_PTR(-EAGAIN)) 790 goto out_unlock; 791 792 list_del(&msr_d.r_list); 793 if (signal_pending(current)) { 794 msg = ERR_PTR(-ERESTARTNOHAND); 795 out_unlock: 796 msg_unlock(msq); 797 break; 798 } 799 } 800 if (IS_ERR(msg)) 801 return PTR_ERR(msg); 802 803 msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz; 804 if (put_user (msg->m_type, &msgp->mtype) || 805 store_msg(msgp->mtext, msg, msgsz)) { 806 msgsz = -EFAULT; 807 } 808 free_msg(msg); 809 return msgsz; 810 } 811 812 #ifdef CONFIG_PROC_FS 813 static int sysvipc_msg_proc_show(struct seq_file *s, void *it) 814 { 815 struct msg_queue *msq = it; 816 817 return seq_printf(s, 818 "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n", 819 msq->q_perm.key, 820 msq->q_id, 821 msq->q_perm.mode, 822 msq->q_cbytes, 823 msq->q_qnum, 824 msq->q_lspid, 825 msq->q_lrpid, 826 msq->q_perm.uid, 827 msq->q_perm.gid, 828 msq->q_perm.cuid, 829 msq->q_perm.cgid, 830 msq->q_stime, 831 msq->q_rtime, 832 msq->q_ctime); 833 } 834 #endif 835