1 /* 2 * linux/ipc/msg.c 3 * Copyright (C) 1992 Krishna Balasubramanian 4 * 5 * Removed all the remaining kerneld mess 6 * Catch the -EFAULT stuff properly 7 * Use GFP_KERNEL for messages as in 1.2 8 * Fixed up the unchecked user space derefs 9 * Copyright (C) 1998 Alan Cox & Andi Kleen 10 * 11 * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com> 12 * 13 * mostly rewritten, threaded and wake-one semantics added 14 * MSGMAX limit removed, sysctl's added 15 * (c) 1999 Manfred Spraul <manfred@colorfullife.com> 16 * 17 * support for audit of ipc object properties and permission changes 18 * Dustin Kirkland <dustin.kirkland@us.ibm.com> 19 * 20 * namespaces support 21 * OpenVZ, SWsoft Inc. 22 * Pavel Emelianov <xemul@openvz.org> 23 */ 24 25 #include <linux/capability.h> 26 #include <linux/slab.h> 27 #include <linux/msg.h> 28 #include <linux/spinlock.h> 29 #include <linux/init.h> 30 #include <linux/proc_fs.h> 31 #include <linux/list.h> 32 #include <linux/security.h> 33 #include <linux/sched.h> 34 #include <linux/syscalls.h> 35 #include <linux/audit.h> 36 #include <linux/seq_file.h> 37 #include <linux/rwsem.h> 38 #include <linux/nsproxy.h> 39 40 #include <asm/current.h> 41 #include <asm/uaccess.h> 42 #include "util.h" 43 44 /* 45 * one msg_receiver structure for each sleeping receiver: 46 */ 47 struct msg_receiver { 48 struct list_head r_list; 49 struct task_struct *r_tsk; 50 51 int r_mode; 52 long r_msgtype; 53 long r_maxsize; 54 55 struct msg_msg *volatile r_msg; 56 }; 57 58 /* one msg_sender for each sleeping sender */ 59 struct msg_sender { 60 struct list_head list; 61 struct task_struct *tsk; 62 }; 63 64 #define SEARCH_ANY 1 65 #define SEARCH_EQUAL 2 66 #define SEARCH_NOTEQUAL 3 67 #define SEARCH_LESSEQUAL 4 68 69 static struct ipc_ids init_msg_ids; 70 71 #define msg_ids(ns) (*((ns)->ids[IPC_MSG_IDS])) 72 73 #define msg_unlock(msq) ipc_unlock(&(msq)->q_perm) 74 #define msg_buildid(id, seq) ipc_buildid(id, seq) 75 76 static void freeque(struct ipc_namespace *, struct msg_queue *); 77 static int newque(struct ipc_namespace *, struct ipc_params *); 78 #ifdef CONFIG_PROC_FS 79 static int sysvipc_msg_proc_show(struct seq_file *s, void *it); 80 #endif 81 82 static void __msg_init_ns(struct ipc_namespace *ns, struct ipc_ids *ids) 83 { 84 ns->ids[IPC_MSG_IDS] = ids; 85 ns->msg_ctlmax = MSGMAX; 86 ns->msg_ctlmnb = MSGMNB; 87 ns->msg_ctlmni = MSGMNI; 88 atomic_set(&ns->msg_bytes, 0); 89 atomic_set(&ns->msg_hdrs, 0); 90 ipc_init_ids(ids); 91 } 92 93 int msg_init_ns(struct ipc_namespace *ns) 94 { 95 struct ipc_ids *ids; 96 97 ids = kmalloc(sizeof(struct ipc_ids), GFP_KERNEL); 98 if (ids == NULL) 99 return -ENOMEM; 100 101 __msg_init_ns(ns, ids); 102 return 0; 103 } 104 105 void msg_exit_ns(struct ipc_namespace *ns) 106 { 107 struct msg_queue *msq; 108 int next_id; 109 int total, in_use; 110 111 down_write(&msg_ids(ns).rw_mutex); 112 113 in_use = msg_ids(ns).in_use; 114 115 for (total = 0, next_id = 0; total < in_use; next_id++) { 116 msq = idr_find(&msg_ids(ns).ipcs_idr, next_id); 117 if (msq == NULL) 118 continue; 119 ipc_lock_by_ptr(&msq->q_perm); 120 freeque(ns, msq); 121 total++; 122 } 123 124 up_write(&msg_ids(ns).rw_mutex); 125 126 kfree(ns->ids[IPC_MSG_IDS]); 127 ns->ids[IPC_MSG_IDS] = NULL; 128 } 129 130 void __init msg_init(void) 131 { 132 __msg_init_ns(&init_ipc_ns, &init_msg_ids); 133 ipc_init_proc_interface("sysvipc/msg", 134 " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n", 135 IPC_MSG_IDS, sysvipc_msg_proc_show); 136 } 137 138 /* 139 * This routine is called in the paths where the rw_mutex is held to protect 140 * access to the idr tree. 141 */ 142 static inline struct msg_queue *msg_lock_check_down(struct ipc_namespace *ns, 143 int id) 144 { 145 struct kern_ipc_perm *ipcp = ipc_lock_check_down(&msg_ids(ns), id); 146 147 return container_of(ipcp, struct msg_queue, q_perm); 148 } 149 150 /* 151 * msg_lock_(check_) routines are called in the paths where the rw_mutex 152 * is not held. 153 */ 154 static inline struct msg_queue *msg_lock(struct ipc_namespace *ns, int id) 155 { 156 struct kern_ipc_perm *ipcp = ipc_lock(&msg_ids(ns), id); 157 158 return container_of(ipcp, struct msg_queue, q_perm); 159 } 160 161 static inline struct msg_queue *msg_lock_check(struct ipc_namespace *ns, 162 int id) 163 { 164 struct kern_ipc_perm *ipcp = ipc_lock_check(&msg_ids(ns), id); 165 166 return container_of(ipcp, struct msg_queue, q_perm); 167 } 168 169 static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s) 170 { 171 ipc_rmid(&msg_ids(ns), &s->q_perm); 172 } 173 174 /** 175 * newque - Create a new msg queue 176 * @ns: namespace 177 * @params: ptr to the structure that contains the key and msgflg 178 * 179 * Called with msg_ids.rw_mutex held (writer) 180 */ 181 static int newque(struct ipc_namespace *ns, struct ipc_params *params) 182 { 183 struct msg_queue *msq; 184 int id, retval; 185 key_t key = params->key; 186 int msgflg = params->flg; 187 188 msq = ipc_rcu_alloc(sizeof(*msq)); 189 if (!msq) 190 return -ENOMEM; 191 192 msq->q_perm.mode = msgflg & S_IRWXUGO; 193 msq->q_perm.key = key; 194 195 msq->q_perm.security = NULL; 196 retval = security_msg_queue_alloc(msq); 197 if (retval) { 198 ipc_rcu_putref(msq); 199 return retval; 200 } 201 202 /* 203 * ipc_addid() locks msq 204 */ 205 id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); 206 if (id < 0) { 207 security_msg_queue_free(msq); 208 ipc_rcu_putref(msq); 209 return id; 210 } 211 212 msq->q_perm.id = msg_buildid(id, msq->q_perm.seq); 213 msq->q_stime = msq->q_rtime = 0; 214 msq->q_ctime = get_seconds(); 215 msq->q_cbytes = msq->q_qnum = 0; 216 msq->q_qbytes = ns->msg_ctlmnb; 217 msq->q_lspid = msq->q_lrpid = 0; 218 INIT_LIST_HEAD(&msq->q_messages); 219 INIT_LIST_HEAD(&msq->q_receivers); 220 INIT_LIST_HEAD(&msq->q_senders); 221 222 msg_unlock(msq); 223 224 return msq->q_perm.id; 225 } 226 227 static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss) 228 { 229 mss->tsk = current; 230 current->state = TASK_INTERRUPTIBLE; 231 list_add_tail(&mss->list, &msq->q_senders); 232 } 233 234 static inline void ss_del(struct msg_sender *mss) 235 { 236 if (mss->list.next != NULL) 237 list_del(&mss->list); 238 } 239 240 static void ss_wakeup(struct list_head *h, int kill) 241 { 242 struct list_head *tmp; 243 244 tmp = h->next; 245 while (tmp != h) { 246 struct msg_sender *mss; 247 248 mss = list_entry(tmp, struct msg_sender, list); 249 tmp = tmp->next; 250 if (kill) 251 mss->list.next = NULL; 252 wake_up_process(mss->tsk); 253 } 254 } 255 256 static void expunge_all(struct msg_queue *msq, int res) 257 { 258 struct list_head *tmp; 259 260 tmp = msq->q_receivers.next; 261 while (tmp != &msq->q_receivers) { 262 struct msg_receiver *msr; 263 264 msr = list_entry(tmp, struct msg_receiver, r_list); 265 tmp = tmp->next; 266 msr->r_msg = NULL; 267 wake_up_process(msr->r_tsk); 268 smp_mb(); 269 msr->r_msg = ERR_PTR(res); 270 } 271 } 272 273 /* 274 * freeque() wakes up waiters on the sender and receiver waiting queue, 275 * removes the message queue from message queue ID IDR, and cleans up all the 276 * messages associated with this queue. 277 * 278 * msg_ids.rw_mutex (writer) and the spinlock for this message queue are held 279 * before freeque() is called. msg_ids.rw_mutex remains locked on exit. 280 */ 281 static void freeque(struct ipc_namespace *ns, struct msg_queue *msq) 282 { 283 struct list_head *tmp; 284 285 expunge_all(msq, -EIDRM); 286 ss_wakeup(&msq->q_senders, 1); 287 msg_rmid(ns, msq); 288 msg_unlock(msq); 289 290 tmp = msq->q_messages.next; 291 while (tmp != &msq->q_messages) { 292 struct msg_msg *msg = list_entry(tmp, struct msg_msg, m_list); 293 294 tmp = tmp->next; 295 atomic_dec(&ns->msg_hdrs); 296 free_msg(msg); 297 } 298 atomic_sub(msq->q_cbytes, &ns->msg_bytes); 299 security_msg_queue_free(msq); 300 ipc_rcu_putref(msq); 301 } 302 303 /* 304 * Called with msg_ids.rw_mutex and ipcp locked. 305 */ 306 static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg) 307 { 308 struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm); 309 310 return security_msg_queue_associate(msq, msgflg); 311 } 312 313 asmlinkage long sys_msgget(key_t key, int msgflg) 314 { 315 struct ipc_namespace *ns; 316 struct ipc_ops msg_ops; 317 struct ipc_params msg_params; 318 319 ns = current->nsproxy->ipc_ns; 320 321 msg_ops.getnew = newque; 322 msg_ops.associate = msg_security; 323 msg_ops.more_checks = NULL; 324 325 msg_params.key = key; 326 msg_params.flg = msgflg; 327 328 return ipcget(ns, &msg_ids(ns), &msg_ops, &msg_params); 329 } 330 331 static inline unsigned long 332 copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version) 333 { 334 switch(version) { 335 case IPC_64: 336 return copy_to_user(buf, in, sizeof(*in)); 337 case IPC_OLD: 338 { 339 struct msqid_ds out; 340 341 memset(&out, 0, sizeof(out)); 342 343 ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm); 344 345 out.msg_stime = in->msg_stime; 346 out.msg_rtime = in->msg_rtime; 347 out.msg_ctime = in->msg_ctime; 348 349 if (in->msg_cbytes > USHRT_MAX) 350 out.msg_cbytes = USHRT_MAX; 351 else 352 out.msg_cbytes = in->msg_cbytes; 353 out.msg_lcbytes = in->msg_cbytes; 354 355 if (in->msg_qnum > USHRT_MAX) 356 out.msg_qnum = USHRT_MAX; 357 else 358 out.msg_qnum = in->msg_qnum; 359 360 if (in->msg_qbytes > USHRT_MAX) 361 out.msg_qbytes = USHRT_MAX; 362 else 363 out.msg_qbytes = in->msg_qbytes; 364 out.msg_lqbytes = in->msg_qbytes; 365 366 out.msg_lspid = in->msg_lspid; 367 out.msg_lrpid = in->msg_lrpid; 368 369 return copy_to_user(buf, &out, sizeof(out)); 370 } 371 default: 372 return -EINVAL; 373 } 374 } 375 376 struct msq_setbuf { 377 unsigned long qbytes; 378 uid_t uid; 379 gid_t gid; 380 mode_t mode; 381 }; 382 383 static inline unsigned long 384 copy_msqid_from_user(struct msq_setbuf *out, void __user *buf, int version) 385 { 386 switch(version) { 387 case IPC_64: 388 { 389 struct msqid64_ds tbuf; 390 391 if (copy_from_user(&tbuf, buf, sizeof(tbuf))) 392 return -EFAULT; 393 394 out->qbytes = tbuf.msg_qbytes; 395 out->uid = tbuf.msg_perm.uid; 396 out->gid = tbuf.msg_perm.gid; 397 out->mode = tbuf.msg_perm.mode; 398 399 return 0; 400 } 401 case IPC_OLD: 402 { 403 struct msqid_ds tbuf_old; 404 405 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) 406 return -EFAULT; 407 408 out->uid = tbuf_old.msg_perm.uid; 409 out->gid = tbuf_old.msg_perm.gid; 410 out->mode = tbuf_old.msg_perm.mode; 411 412 if (tbuf_old.msg_qbytes == 0) 413 out->qbytes = tbuf_old.msg_lqbytes; 414 else 415 out->qbytes = tbuf_old.msg_qbytes; 416 417 return 0; 418 } 419 default: 420 return -EINVAL; 421 } 422 } 423 424 asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf) 425 { 426 struct kern_ipc_perm *ipcp; 427 struct msq_setbuf uninitialized_var(setbuf); 428 struct msg_queue *msq; 429 int err, version; 430 struct ipc_namespace *ns; 431 432 if (msqid < 0 || cmd < 0) 433 return -EINVAL; 434 435 version = ipc_parse_version(&cmd); 436 ns = current->nsproxy->ipc_ns; 437 438 switch (cmd) { 439 case IPC_INFO: 440 case MSG_INFO: 441 { 442 struct msginfo msginfo; 443 int max_id; 444 445 if (!buf) 446 return -EFAULT; 447 /* 448 * We must not return kernel stack data. 449 * due to padding, it's not enough 450 * to set all member fields. 451 */ 452 err = security_msg_queue_msgctl(NULL, cmd); 453 if (err) 454 return err; 455 456 memset(&msginfo, 0, sizeof(msginfo)); 457 msginfo.msgmni = ns->msg_ctlmni; 458 msginfo.msgmax = ns->msg_ctlmax; 459 msginfo.msgmnb = ns->msg_ctlmnb; 460 msginfo.msgssz = MSGSSZ; 461 msginfo.msgseg = MSGSEG; 462 down_read(&msg_ids(ns).rw_mutex); 463 if (cmd == MSG_INFO) { 464 msginfo.msgpool = msg_ids(ns).in_use; 465 msginfo.msgmap = atomic_read(&ns->msg_hdrs); 466 msginfo.msgtql = atomic_read(&ns->msg_bytes); 467 } else { 468 msginfo.msgmap = MSGMAP; 469 msginfo.msgpool = MSGPOOL; 470 msginfo.msgtql = MSGTQL; 471 } 472 max_id = ipc_get_maxid(&msg_ids(ns)); 473 up_read(&msg_ids(ns).rw_mutex); 474 if (copy_to_user(buf, &msginfo, sizeof(struct msginfo))) 475 return -EFAULT; 476 return (max_id < 0) ? 0 : max_id; 477 } 478 case MSG_STAT: /* msqid is an index rather than a msg queue id */ 479 case IPC_STAT: 480 { 481 struct msqid64_ds tbuf; 482 int success_return; 483 484 if (!buf) 485 return -EFAULT; 486 487 if (cmd == MSG_STAT) { 488 msq = msg_lock(ns, msqid); 489 if (IS_ERR(msq)) 490 return PTR_ERR(msq); 491 success_return = msq->q_perm.id; 492 } else { 493 msq = msg_lock_check(ns, msqid); 494 if (IS_ERR(msq)) 495 return PTR_ERR(msq); 496 success_return = 0; 497 } 498 err = -EACCES; 499 if (ipcperms(&msq->q_perm, S_IRUGO)) 500 goto out_unlock; 501 502 err = security_msg_queue_msgctl(msq, cmd); 503 if (err) 504 goto out_unlock; 505 506 memset(&tbuf, 0, sizeof(tbuf)); 507 508 kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm); 509 tbuf.msg_stime = msq->q_stime; 510 tbuf.msg_rtime = msq->q_rtime; 511 tbuf.msg_ctime = msq->q_ctime; 512 tbuf.msg_cbytes = msq->q_cbytes; 513 tbuf.msg_qnum = msq->q_qnum; 514 tbuf.msg_qbytes = msq->q_qbytes; 515 tbuf.msg_lspid = msq->q_lspid; 516 tbuf.msg_lrpid = msq->q_lrpid; 517 msg_unlock(msq); 518 if (copy_msqid_to_user(buf, &tbuf, version)) 519 return -EFAULT; 520 return success_return; 521 } 522 case IPC_SET: 523 if (!buf) 524 return -EFAULT; 525 if (copy_msqid_from_user(&setbuf, buf, version)) 526 return -EFAULT; 527 break; 528 case IPC_RMID: 529 break; 530 default: 531 return -EINVAL; 532 } 533 534 down_write(&msg_ids(ns).rw_mutex); 535 msq = msg_lock_check_down(ns, msqid); 536 if (IS_ERR(msq)) { 537 err = PTR_ERR(msq); 538 goto out_up; 539 } 540 541 ipcp = &msq->q_perm; 542 543 err = audit_ipc_obj(ipcp); 544 if (err) 545 goto out_unlock_up; 546 if (cmd == IPC_SET) { 547 err = audit_ipc_set_perm(setbuf.qbytes, setbuf.uid, setbuf.gid, 548 setbuf.mode); 549 if (err) 550 goto out_unlock_up; 551 } 552 553 err = -EPERM; 554 if (current->euid != ipcp->cuid && 555 current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN)) 556 /* We _could_ check for CAP_CHOWN above, but we don't */ 557 goto out_unlock_up; 558 559 err = security_msg_queue_msgctl(msq, cmd); 560 if (err) 561 goto out_unlock_up; 562 563 switch (cmd) { 564 case IPC_SET: 565 { 566 err = -EPERM; 567 if (setbuf.qbytes > ns->msg_ctlmnb && !capable(CAP_SYS_RESOURCE)) 568 goto out_unlock_up; 569 570 msq->q_qbytes = setbuf.qbytes; 571 572 ipcp->uid = setbuf.uid; 573 ipcp->gid = setbuf.gid; 574 ipcp->mode = (ipcp->mode & ~S_IRWXUGO) | 575 (S_IRWXUGO & setbuf.mode); 576 msq->q_ctime = get_seconds(); 577 /* sleeping receivers might be excluded by 578 * stricter permissions. 579 */ 580 expunge_all(msq, -EAGAIN); 581 /* sleeping senders might be able to send 582 * due to a larger queue size. 583 */ 584 ss_wakeup(&msq->q_senders, 0); 585 msg_unlock(msq); 586 break; 587 } 588 case IPC_RMID: 589 freeque(ns, msq); 590 break; 591 } 592 err = 0; 593 out_up: 594 up_write(&msg_ids(ns).rw_mutex); 595 return err; 596 out_unlock_up: 597 msg_unlock(msq); 598 goto out_up; 599 out_unlock: 600 msg_unlock(msq); 601 return err; 602 } 603 604 static int testmsg(struct msg_msg *msg, long type, int mode) 605 { 606 switch(mode) 607 { 608 case SEARCH_ANY: 609 return 1; 610 case SEARCH_LESSEQUAL: 611 if (msg->m_type <=type) 612 return 1; 613 break; 614 case SEARCH_EQUAL: 615 if (msg->m_type == type) 616 return 1; 617 break; 618 case SEARCH_NOTEQUAL: 619 if (msg->m_type != type) 620 return 1; 621 break; 622 } 623 return 0; 624 } 625 626 static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg) 627 { 628 struct list_head *tmp; 629 630 tmp = msq->q_receivers.next; 631 while (tmp != &msq->q_receivers) { 632 struct msg_receiver *msr; 633 634 msr = list_entry(tmp, struct msg_receiver, r_list); 635 tmp = tmp->next; 636 if (testmsg(msg, msr->r_msgtype, msr->r_mode) && 637 !security_msg_queue_msgrcv(msq, msg, msr->r_tsk, 638 msr->r_msgtype, msr->r_mode)) { 639 640 list_del(&msr->r_list); 641 if (msr->r_maxsize < msg->m_ts) { 642 msr->r_msg = NULL; 643 wake_up_process(msr->r_tsk); 644 smp_mb(); 645 msr->r_msg = ERR_PTR(-E2BIG); 646 } else { 647 msr->r_msg = NULL; 648 msq->q_lrpid = task_pid_vnr(msr->r_tsk); 649 msq->q_rtime = get_seconds(); 650 wake_up_process(msr->r_tsk); 651 smp_mb(); 652 msr->r_msg = msg; 653 654 return 1; 655 } 656 } 657 } 658 return 0; 659 } 660 661 long do_msgsnd(int msqid, long mtype, void __user *mtext, 662 size_t msgsz, int msgflg) 663 { 664 struct msg_queue *msq; 665 struct msg_msg *msg; 666 int err; 667 struct ipc_namespace *ns; 668 669 ns = current->nsproxy->ipc_ns; 670 671 if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0) 672 return -EINVAL; 673 if (mtype < 1) 674 return -EINVAL; 675 676 msg = load_msg(mtext, msgsz); 677 if (IS_ERR(msg)) 678 return PTR_ERR(msg); 679 680 msg->m_type = mtype; 681 msg->m_ts = msgsz; 682 683 msq = msg_lock_check(ns, msqid); 684 if (IS_ERR(msq)) { 685 err = PTR_ERR(msq); 686 goto out_free; 687 } 688 689 for (;;) { 690 struct msg_sender s; 691 692 err = -EACCES; 693 if (ipcperms(&msq->q_perm, S_IWUGO)) 694 goto out_unlock_free; 695 696 err = security_msg_queue_msgsnd(msq, msg, msgflg); 697 if (err) 698 goto out_unlock_free; 699 700 if (msgsz + msq->q_cbytes <= msq->q_qbytes && 701 1 + msq->q_qnum <= msq->q_qbytes) { 702 break; 703 } 704 705 /* queue full, wait: */ 706 if (msgflg & IPC_NOWAIT) { 707 err = -EAGAIN; 708 goto out_unlock_free; 709 } 710 ss_add(msq, &s); 711 ipc_rcu_getref(msq); 712 msg_unlock(msq); 713 schedule(); 714 715 ipc_lock_by_ptr(&msq->q_perm); 716 ipc_rcu_putref(msq); 717 if (msq->q_perm.deleted) { 718 err = -EIDRM; 719 goto out_unlock_free; 720 } 721 ss_del(&s); 722 723 if (signal_pending(current)) { 724 err = -ERESTARTNOHAND; 725 goto out_unlock_free; 726 } 727 } 728 729 msq->q_lspid = task_tgid_vnr(current); 730 msq->q_stime = get_seconds(); 731 732 if (!pipelined_send(msq, msg)) { 733 /* noone is waiting for this message, enqueue it */ 734 list_add_tail(&msg->m_list, &msq->q_messages); 735 msq->q_cbytes += msgsz; 736 msq->q_qnum++; 737 atomic_add(msgsz, &ns->msg_bytes); 738 atomic_inc(&ns->msg_hdrs); 739 } 740 741 err = 0; 742 msg = NULL; 743 744 out_unlock_free: 745 msg_unlock(msq); 746 out_free: 747 if (msg != NULL) 748 free_msg(msg); 749 return err; 750 } 751 752 asmlinkage long 753 sys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz, int msgflg) 754 { 755 long mtype; 756 757 if (get_user(mtype, &msgp->mtype)) 758 return -EFAULT; 759 return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg); 760 } 761 762 static inline int convert_mode(long *msgtyp, int msgflg) 763 { 764 /* 765 * find message of correct type. 766 * msgtyp = 0 => get first. 767 * msgtyp > 0 => get first message of matching type. 768 * msgtyp < 0 => get message with least type must be < abs(msgtype). 769 */ 770 if (*msgtyp == 0) 771 return SEARCH_ANY; 772 if (*msgtyp < 0) { 773 *msgtyp = -*msgtyp; 774 return SEARCH_LESSEQUAL; 775 } 776 if (msgflg & MSG_EXCEPT) 777 return SEARCH_NOTEQUAL; 778 return SEARCH_EQUAL; 779 } 780 781 long do_msgrcv(int msqid, long *pmtype, void __user *mtext, 782 size_t msgsz, long msgtyp, int msgflg) 783 { 784 struct msg_queue *msq; 785 struct msg_msg *msg; 786 int mode; 787 struct ipc_namespace *ns; 788 789 if (msqid < 0 || (long) msgsz < 0) 790 return -EINVAL; 791 mode = convert_mode(&msgtyp, msgflg); 792 ns = current->nsproxy->ipc_ns; 793 794 msq = msg_lock_check(ns, msqid); 795 if (IS_ERR(msq)) 796 return PTR_ERR(msq); 797 798 for (;;) { 799 struct msg_receiver msr_d; 800 struct list_head *tmp; 801 802 msg = ERR_PTR(-EACCES); 803 if (ipcperms(&msq->q_perm, S_IRUGO)) 804 goto out_unlock; 805 806 msg = ERR_PTR(-EAGAIN); 807 tmp = msq->q_messages.next; 808 while (tmp != &msq->q_messages) { 809 struct msg_msg *walk_msg; 810 811 walk_msg = list_entry(tmp, struct msg_msg, m_list); 812 if (testmsg(walk_msg, msgtyp, mode) && 813 !security_msg_queue_msgrcv(msq, walk_msg, current, 814 msgtyp, mode)) { 815 816 msg = walk_msg; 817 if (mode == SEARCH_LESSEQUAL && 818 walk_msg->m_type != 1) { 819 msg = walk_msg; 820 msgtyp = walk_msg->m_type - 1; 821 } else { 822 msg = walk_msg; 823 break; 824 } 825 } 826 tmp = tmp->next; 827 } 828 if (!IS_ERR(msg)) { 829 /* 830 * Found a suitable message. 831 * Unlink it from the queue. 832 */ 833 if ((msgsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) { 834 msg = ERR_PTR(-E2BIG); 835 goto out_unlock; 836 } 837 list_del(&msg->m_list); 838 msq->q_qnum--; 839 msq->q_rtime = get_seconds(); 840 msq->q_lrpid = task_tgid_vnr(current); 841 msq->q_cbytes -= msg->m_ts; 842 atomic_sub(msg->m_ts, &ns->msg_bytes); 843 atomic_dec(&ns->msg_hdrs); 844 ss_wakeup(&msq->q_senders, 0); 845 msg_unlock(msq); 846 break; 847 } 848 /* No message waiting. Wait for a message */ 849 if (msgflg & IPC_NOWAIT) { 850 msg = ERR_PTR(-ENOMSG); 851 goto out_unlock; 852 } 853 list_add_tail(&msr_d.r_list, &msq->q_receivers); 854 msr_d.r_tsk = current; 855 msr_d.r_msgtype = msgtyp; 856 msr_d.r_mode = mode; 857 if (msgflg & MSG_NOERROR) 858 msr_d.r_maxsize = INT_MAX; 859 else 860 msr_d.r_maxsize = msgsz; 861 msr_d.r_msg = ERR_PTR(-EAGAIN); 862 current->state = TASK_INTERRUPTIBLE; 863 msg_unlock(msq); 864 865 schedule(); 866 867 /* Lockless receive, part 1: 868 * Disable preemption. We don't hold a reference to the queue 869 * and getting a reference would defeat the idea of a lockless 870 * operation, thus the code relies on rcu to guarantee the 871 * existance of msq: 872 * Prior to destruction, expunge_all(-EIRDM) changes r_msg. 873 * Thus if r_msg is -EAGAIN, then the queue not yet destroyed. 874 * rcu_read_lock() prevents preemption between reading r_msg 875 * and the spin_lock() inside ipc_lock_by_ptr(). 876 */ 877 rcu_read_lock(); 878 879 /* Lockless receive, part 2: 880 * Wait until pipelined_send or expunge_all are outside of 881 * wake_up_process(). There is a race with exit(), see 882 * ipc/mqueue.c for the details. 883 */ 884 msg = (struct msg_msg*)msr_d.r_msg; 885 while (msg == NULL) { 886 cpu_relax(); 887 msg = (struct msg_msg *)msr_d.r_msg; 888 } 889 890 /* Lockless receive, part 3: 891 * If there is a message or an error then accept it without 892 * locking. 893 */ 894 if (msg != ERR_PTR(-EAGAIN)) { 895 rcu_read_unlock(); 896 break; 897 } 898 899 /* Lockless receive, part 3: 900 * Acquire the queue spinlock. 901 */ 902 ipc_lock_by_ptr(&msq->q_perm); 903 rcu_read_unlock(); 904 905 /* Lockless receive, part 4: 906 * Repeat test after acquiring the spinlock. 907 */ 908 msg = (struct msg_msg*)msr_d.r_msg; 909 if (msg != ERR_PTR(-EAGAIN)) 910 goto out_unlock; 911 912 list_del(&msr_d.r_list); 913 if (signal_pending(current)) { 914 msg = ERR_PTR(-ERESTARTNOHAND); 915 out_unlock: 916 msg_unlock(msq); 917 break; 918 } 919 } 920 if (IS_ERR(msg)) 921 return PTR_ERR(msg); 922 923 msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz; 924 *pmtype = msg->m_type; 925 if (store_msg(mtext, msg, msgsz)) 926 msgsz = -EFAULT; 927 928 free_msg(msg); 929 930 return msgsz; 931 } 932 933 asmlinkage long sys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz, 934 long msgtyp, int msgflg) 935 { 936 long err, mtype; 937 938 err = do_msgrcv(msqid, &mtype, msgp->mtext, msgsz, msgtyp, msgflg); 939 if (err < 0) 940 goto out; 941 942 if (put_user(mtype, &msgp->mtype)) 943 err = -EFAULT; 944 out: 945 return err; 946 } 947 948 #ifdef CONFIG_PROC_FS 949 static int sysvipc_msg_proc_show(struct seq_file *s, void *it) 950 { 951 struct msg_queue *msq = it; 952 953 return seq_printf(s, 954 "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n", 955 msq->q_perm.key, 956 msq->q_perm.id, 957 msq->q_perm.mode, 958 msq->q_cbytes, 959 msq->q_qnum, 960 msq->q_lspid, 961 msq->q_lrpid, 962 msq->q_perm.uid, 963 msq->q_perm.gid, 964 msq->q_perm.cuid, 965 msq->q_perm.cgid, 966 msq->q_stime, 967 msq->q_rtime, 968 msq->q_ctime); 969 } 970 #endif 971