1 /* 2 * POSIX message queues filesystem for Linux. 3 * 4 * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl) 5 * Michal Wronski (michal.wronski@gmail.com) 6 * 7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com) 8 * Lockless receive & send, fd based notify: 9 * Manfred Spraul (manfred@colorfullife.com) 10 * 11 * Audit: George Wilson (ltcgcw@us.ibm.com) 12 * 13 * This file is released under the GPL. 14 */ 15 16 #include <linux/capability.h> 17 #include <linux/init.h> 18 #include <linux/pagemap.h> 19 #include <linux/file.h> 20 #include <linux/mount.h> 21 #include <linux/fs_context.h> 22 #include <linux/namei.h> 23 #include <linux/sysctl.h> 24 #include <linux/poll.h> 25 #include <linux/mqueue.h> 26 #include <linux/msg.h> 27 #include <linux/skbuff.h> 28 #include <linux/vmalloc.h> 29 #include <linux/netlink.h> 30 #include <linux/syscalls.h> 31 #include <linux/audit.h> 32 #include <linux/signal.h> 33 #include <linux/mutex.h> 34 #include <linux/nsproxy.h> 35 #include <linux/pid.h> 36 #include <linux/ipc_namespace.h> 37 #include <linux/user_namespace.h> 38 #include <linux/slab.h> 39 #include <linux/sched/wake_q.h> 40 #include <linux/sched/signal.h> 41 #include <linux/sched/user.h> 42 43 #include <net/sock.h> 44 #include "util.h" 45 46 struct mqueue_fs_context { 47 struct ipc_namespace *ipc_ns; 48 }; 49 50 #define MQUEUE_MAGIC 0x19800202 51 #define DIRENT_SIZE 20 52 #define FILENT_SIZE 80 53 54 #define SEND 0 55 #define RECV 1 56 57 #define STATE_NONE 0 58 #define STATE_READY 1 59 60 struct posix_msg_tree_node { 61 struct rb_node rb_node; 62 struct list_head msg_list; 63 int priority; 64 }; 65 66 struct ext_wait_queue { /* queue of sleeping tasks */ 67 struct task_struct *task; 68 struct list_head list; 69 struct msg_msg *msg; /* ptr of loaded message */ 70 int state; /* one of STATE_* values */ 71 }; 72 73 struct mqueue_inode_info { 74 spinlock_t lock; 75 struct inode vfs_inode; 76 wait_queue_head_t wait_q; 77 78 struct rb_root msg_tree; 79 struct rb_node *msg_tree_rightmost; 80 struct posix_msg_tree_node *node_cache; 81 struct mq_attr attr; 82 83 struct sigevent notify; 84 struct pid *notify_owner; 85 struct user_namespace *notify_user_ns; 86 struct user_struct *user; /* user who created, for accounting */ 87 struct sock *notify_sock; 88 struct sk_buff *notify_cookie; 89 90 /* for tasks waiting for free space and messages, respectively */ 91 struct ext_wait_queue e_wait_q[2]; 92 93 unsigned long qsize; /* size of queue in memory (sum of all msgs) */ 94 }; 95 96 static struct file_system_type mqueue_fs_type; 97 static const struct inode_operations mqueue_dir_inode_operations; 98 static const struct file_operations mqueue_file_operations; 99 static const struct super_operations mqueue_super_ops; 100 static const struct fs_context_operations mqueue_fs_context_ops; 101 static void remove_notification(struct mqueue_inode_info *info); 102 103 static struct kmem_cache *mqueue_inode_cachep; 104 105 static struct ctl_table_header *mq_sysctl_table; 106 107 static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode) 108 { 109 return container_of(inode, struct mqueue_inode_info, vfs_inode); 110 } 111 112 /* 113 * This routine should be called with the mq_lock held. 114 */ 115 static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode) 116 { 117 return get_ipc_ns(inode->i_sb->s_fs_info); 118 } 119 120 static struct ipc_namespace *get_ns_from_inode(struct inode *inode) 121 { 122 struct ipc_namespace *ns; 123 124 spin_lock(&mq_lock); 125 ns = __get_ns_from_inode(inode); 126 spin_unlock(&mq_lock); 127 return ns; 128 } 129 130 /* Auxiliary functions to manipulate messages' list */ 131 static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info) 132 { 133 struct rb_node **p, *parent = NULL; 134 struct posix_msg_tree_node *leaf; 135 bool rightmost = true; 136 137 p = &info->msg_tree.rb_node; 138 while (*p) { 139 parent = *p; 140 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); 141 142 if (likely(leaf->priority == msg->m_type)) 143 goto insert_msg; 144 else if (msg->m_type < leaf->priority) { 145 p = &(*p)->rb_left; 146 rightmost = false; 147 } else 148 p = &(*p)->rb_right; 149 } 150 if (info->node_cache) { 151 leaf = info->node_cache; 152 info->node_cache = NULL; 153 } else { 154 leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC); 155 if (!leaf) 156 return -ENOMEM; 157 INIT_LIST_HEAD(&leaf->msg_list); 158 } 159 leaf->priority = msg->m_type; 160 161 if (rightmost) 162 info->msg_tree_rightmost = &leaf->rb_node; 163 164 rb_link_node(&leaf->rb_node, parent, p); 165 rb_insert_color(&leaf->rb_node, &info->msg_tree); 166 insert_msg: 167 info->attr.mq_curmsgs++; 168 info->qsize += msg->m_ts; 169 list_add_tail(&msg->m_list, &leaf->msg_list); 170 return 0; 171 } 172 173 static inline void msg_tree_erase(struct posix_msg_tree_node *leaf, 174 struct mqueue_inode_info *info) 175 { 176 struct rb_node *node = &leaf->rb_node; 177 178 if (info->msg_tree_rightmost == node) 179 info->msg_tree_rightmost = rb_prev(node); 180 181 rb_erase(node, &info->msg_tree); 182 if (info->node_cache) { 183 kfree(leaf); 184 } else { 185 info->node_cache = leaf; 186 } 187 } 188 189 static inline struct msg_msg *msg_get(struct mqueue_inode_info *info) 190 { 191 struct rb_node *parent = NULL; 192 struct posix_msg_tree_node *leaf; 193 struct msg_msg *msg; 194 195 try_again: 196 /* 197 * During insert, low priorities go to the left and high to the 198 * right. On receive, we want the highest priorities first, so 199 * walk all the way to the right. 200 */ 201 parent = info->msg_tree_rightmost; 202 if (!parent) { 203 if (info->attr.mq_curmsgs) { 204 pr_warn_once("Inconsistency in POSIX message queue, " 205 "no tree element, but supposedly messages " 206 "should exist!\n"); 207 info->attr.mq_curmsgs = 0; 208 } 209 return NULL; 210 } 211 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); 212 if (unlikely(list_empty(&leaf->msg_list))) { 213 pr_warn_once("Inconsistency in POSIX message queue, " 214 "empty leaf node but we haven't implemented " 215 "lazy leaf delete!\n"); 216 msg_tree_erase(leaf, info); 217 goto try_again; 218 } else { 219 msg = list_first_entry(&leaf->msg_list, 220 struct msg_msg, m_list); 221 list_del(&msg->m_list); 222 if (list_empty(&leaf->msg_list)) { 223 msg_tree_erase(leaf, info); 224 } 225 } 226 info->attr.mq_curmsgs--; 227 info->qsize -= msg->m_ts; 228 return msg; 229 } 230 231 static struct inode *mqueue_get_inode(struct super_block *sb, 232 struct ipc_namespace *ipc_ns, umode_t mode, 233 struct mq_attr *attr) 234 { 235 struct user_struct *u = current_user(); 236 struct inode *inode; 237 int ret = -ENOMEM; 238 239 inode = new_inode(sb); 240 if (!inode) 241 goto err; 242 243 inode->i_ino = get_next_ino(); 244 inode->i_mode = mode; 245 inode->i_uid = current_fsuid(); 246 inode->i_gid = current_fsgid(); 247 inode->i_mtime = inode->i_ctime = inode->i_atime = current_time(inode); 248 249 if (S_ISREG(mode)) { 250 struct mqueue_inode_info *info; 251 unsigned long mq_bytes, mq_treesize; 252 253 inode->i_fop = &mqueue_file_operations; 254 inode->i_size = FILENT_SIZE; 255 /* mqueue specific info */ 256 info = MQUEUE_I(inode); 257 spin_lock_init(&info->lock); 258 init_waitqueue_head(&info->wait_q); 259 INIT_LIST_HEAD(&info->e_wait_q[0].list); 260 INIT_LIST_HEAD(&info->e_wait_q[1].list); 261 info->notify_owner = NULL; 262 info->notify_user_ns = NULL; 263 info->qsize = 0; 264 info->user = NULL; /* set when all is ok */ 265 info->msg_tree = RB_ROOT; 266 info->msg_tree_rightmost = NULL; 267 info->node_cache = NULL; 268 memset(&info->attr, 0, sizeof(info->attr)); 269 info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max, 270 ipc_ns->mq_msg_default); 271 info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max, 272 ipc_ns->mq_msgsize_default); 273 if (attr) { 274 info->attr.mq_maxmsg = attr->mq_maxmsg; 275 info->attr.mq_msgsize = attr->mq_msgsize; 276 } 277 /* 278 * We used to allocate a static array of pointers and account 279 * the size of that array as well as one msg_msg struct per 280 * possible message into the queue size. That's no longer 281 * accurate as the queue is now an rbtree and will grow and 282 * shrink depending on usage patterns. We can, however, still 283 * account one msg_msg struct per message, but the nodes are 284 * allocated depending on priority usage, and most programs 285 * only use one, or a handful, of priorities. However, since 286 * this is pinned memory, we need to assume worst case, so 287 * that means the min(mq_maxmsg, max_priorities) * struct 288 * posix_msg_tree_node. 289 */ 290 291 ret = -EINVAL; 292 if (info->attr.mq_maxmsg <= 0 || info->attr.mq_msgsize <= 0) 293 goto out_inode; 294 if (capable(CAP_SYS_RESOURCE)) { 295 if (info->attr.mq_maxmsg > HARD_MSGMAX || 296 info->attr.mq_msgsize > HARD_MSGSIZEMAX) 297 goto out_inode; 298 } else { 299 if (info->attr.mq_maxmsg > ipc_ns->mq_msg_max || 300 info->attr.mq_msgsize > ipc_ns->mq_msgsize_max) 301 goto out_inode; 302 } 303 ret = -EOVERFLOW; 304 /* check for overflow */ 305 if (info->attr.mq_msgsize > ULONG_MAX/info->attr.mq_maxmsg) 306 goto out_inode; 307 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + 308 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * 309 sizeof(struct posix_msg_tree_node); 310 mq_bytes = info->attr.mq_maxmsg * info->attr.mq_msgsize; 311 if (mq_bytes + mq_treesize < mq_bytes) 312 goto out_inode; 313 mq_bytes += mq_treesize; 314 spin_lock(&mq_lock); 315 if (u->mq_bytes + mq_bytes < u->mq_bytes || 316 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) { 317 spin_unlock(&mq_lock); 318 /* mqueue_evict_inode() releases info->messages */ 319 ret = -EMFILE; 320 goto out_inode; 321 } 322 u->mq_bytes += mq_bytes; 323 spin_unlock(&mq_lock); 324 325 /* all is ok */ 326 info->user = get_uid(u); 327 } else if (S_ISDIR(mode)) { 328 inc_nlink(inode); 329 /* Some things misbehave if size == 0 on a directory */ 330 inode->i_size = 2 * DIRENT_SIZE; 331 inode->i_op = &mqueue_dir_inode_operations; 332 inode->i_fop = &simple_dir_operations; 333 } 334 335 return inode; 336 out_inode: 337 iput(inode); 338 err: 339 return ERR_PTR(ret); 340 } 341 342 static int mqueue_fill_super(struct super_block *sb, struct fs_context *fc) 343 { 344 struct inode *inode; 345 struct ipc_namespace *ns = sb->s_fs_info; 346 347 sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV; 348 sb->s_blocksize = PAGE_SIZE; 349 sb->s_blocksize_bits = PAGE_SHIFT; 350 sb->s_magic = MQUEUE_MAGIC; 351 sb->s_op = &mqueue_super_ops; 352 353 inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL); 354 if (IS_ERR(inode)) 355 return PTR_ERR(inode); 356 357 sb->s_root = d_make_root(inode); 358 if (!sb->s_root) 359 return -ENOMEM; 360 return 0; 361 } 362 363 static int mqueue_get_tree(struct fs_context *fc) 364 { 365 struct mqueue_fs_context *ctx = fc->fs_private; 366 367 return get_tree_keyed(fc, mqueue_fill_super, ctx->ipc_ns); 368 } 369 370 static void mqueue_fs_context_free(struct fs_context *fc) 371 { 372 struct mqueue_fs_context *ctx = fc->fs_private; 373 374 put_ipc_ns(ctx->ipc_ns); 375 kfree(ctx); 376 } 377 378 static int mqueue_init_fs_context(struct fs_context *fc) 379 { 380 struct mqueue_fs_context *ctx; 381 382 ctx = kzalloc(sizeof(struct mqueue_fs_context), GFP_KERNEL); 383 if (!ctx) 384 return -ENOMEM; 385 386 ctx->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns); 387 put_user_ns(fc->user_ns); 388 fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns); 389 fc->fs_private = ctx; 390 fc->ops = &mqueue_fs_context_ops; 391 return 0; 392 } 393 394 static struct vfsmount *mq_create_mount(struct ipc_namespace *ns) 395 { 396 struct mqueue_fs_context *ctx; 397 struct fs_context *fc; 398 struct vfsmount *mnt; 399 400 fc = fs_context_for_mount(&mqueue_fs_type, SB_KERNMOUNT); 401 if (IS_ERR(fc)) 402 return ERR_CAST(fc); 403 404 ctx = fc->fs_private; 405 put_ipc_ns(ctx->ipc_ns); 406 ctx->ipc_ns = get_ipc_ns(ns); 407 put_user_ns(fc->user_ns); 408 fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns); 409 410 mnt = fc_mount(fc); 411 put_fs_context(fc); 412 return mnt; 413 } 414 415 static void init_once(void *foo) 416 { 417 struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; 418 419 inode_init_once(&p->vfs_inode); 420 } 421 422 static struct inode *mqueue_alloc_inode(struct super_block *sb) 423 { 424 struct mqueue_inode_info *ei; 425 426 ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL); 427 if (!ei) 428 return NULL; 429 return &ei->vfs_inode; 430 } 431 432 static void mqueue_free_inode(struct inode *inode) 433 { 434 kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode)); 435 } 436 437 static void mqueue_evict_inode(struct inode *inode) 438 { 439 struct mqueue_inode_info *info; 440 struct user_struct *user; 441 struct ipc_namespace *ipc_ns; 442 struct msg_msg *msg, *nmsg; 443 LIST_HEAD(tmp_msg); 444 445 clear_inode(inode); 446 447 if (S_ISDIR(inode->i_mode)) 448 return; 449 450 ipc_ns = get_ns_from_inode(inode); 451 info = MQUEUE_I(inode); 452 spin_lock(&info->lock); 453 while ((msg = msg_get(info)) != NULL) 454 list_add_tail(&msg->m_list, &tmp_msg); 455 kfree(info->node_cache); 456 spin_unlock(&info->lock); 457 458 list_for_each_entry_safe(msg, nmsg, &tmp_msg, m_list) { 459 list_del(&msg->m_list); 460 free_msg(msg); 461 } 462 463 user = info->user; 464 if (user) { 465 unsigned long mq_bytes, mq_treesize; 466 467 /* Total amount of bytes accounted for the mqueue */ 468 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + 469 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * 470 sizeof(struct posix_msg_tree_node); 471 472 mq_bytes = mq_treesize + (info->attr.mq_maxmsg * 473 info->attr.mq_msgsize); 474 475 spin_lock(&mq_lock); 476 user->mq_bytes -= mq_bytes; 477 /* 478 * get_ns_from_inode() ensures that the 479 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns 480 * to which we now hold a reference, or it is NULL. 481 * We can't put it here under mq_lock, though. 482 */ 483 if (ipc_ns) 484 ipc_ns->mq_queues_count--; 485 spin_unlock(&mq_lock); 486 free_uid(user); 487 } 488 if (ipc_ns) 489 put_ipc_ns(ipc_ns); 490 } 491 492 static int mqueue_create_attr(struct dentry *dentry, umode_t mode, void *arg) 493 { 494 struct inode *dir = dentry->d_parent->d_inode; 495 struct inode *inode; 496 struct mq_attr *attr = arg; 497 int error; 498 struct ipc_namespace *ipc_ns; 499 500 spin_lock(&mq_lock); 501 ipc_ns = __get_ns_from_inode(dir); 502 if (!ipc_ns) { 503 error = -EACCES; 504 goto out_unlock; 505 } 506 507 if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max && 508 !capable(CAP_SYS_RESOURCE)) { 509 error = -ENOSPC; 510 goto out_unlock; 511 } 512 ipc_ns->mq_queues_count++; 513 spin_unlock(&mq_lock); 514 515 inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr); 516 if (IS_ERR(inode)) { 517 error = PTR_ERR(inode); 518 spin_lock(&mq_lock); 519 ipc_ns->mq_queues_count--; 520 goto out_unlock; 521 } 522 523 put_ipc_ns(ipc_ns); 524 dir->i_size += DIRENT_SIZE; 525 dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir); 526 527 d_instantiate(dentry, inode); 528 dget(dentry); 529 return 0; 530 out_unlock: 531 spin_unlock(&mq_lock); 532 if (ipc_ns) 533 put_ipc_ns(ipc_ns); 534 return error; 535 } 536 537 static int mqueue_create(struct inode *dir, struct dentry *dentry, 538 umode_t mode, bool excl) 539 { 540 return mqueue_create_attr(dentry, mode, NULL); 541 } 542 543 static int mqueue_unlink(struct inode *dir, struct dentry *dentry) 544 { 545 struct inode *inode = d_inode(dentry); 546 547 dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir); 548 dir->i_size -= DIRENT_SIZE; 549 drop_nlink(inode); 550 dput(dentry); 551 return 0; 552 } 553 554 /* 555 * This is routine for system read from queue file. 556 * To avoid mess with doing here some sort of mq_receive we allow 557 * to read only queue size & notification info (the only values 558 * that are interesting from user point of view and aren't accessible 559 * through std routines) 560 */ 561 static ssize_t mqueue_read_file(struct file *filp, char __user *u_data, 562 size_t count, loff_t *off) 563 { 564 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); 565 char buffer[FILENT_SIZE]; 566 ssize_t ret; 567 568 spin_lock(&info->lock); 569 snprintf(buffer, sizeof(buffer), 570 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n", 571 info->qsize, 572 info->notify_owner ? info->notify.sigev_notify : 0, 573 (info->notify_owner && 574 info->notify.sigev_notify == SIGEV_SIGNAL) ? 575 info->notify.sigev_signo : 0, 576 pid_vnr(info->notify_owner)); 577 spin_unlock(&info->lock); 578 buffer[sizeof(buffer)-1] = '\0'; 579 580 ret = simple_read_from_buffer(u_data, count, off, buffer, 581 strlen(buffer)); 582 if (ret <= 0) 583 return ret; 584 585 file_inode(filp)->i_atime = file_inode(filp)->i_ctime = current_time(file_inode(filp)); 586 return ret; 587 } 588 589 static int mqueue_flush_file(struct file *filp, fl_owner_t id) 590 { 591 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); 592 593 spin_lock(&info->lock); 594 if (task_tgid(current) == info->notify_owner) 595 remove_notification(info); 596 597 spin_unlock(&info->lock); 598 return 0; 599 } 600 601 static __poll_t mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab) 602 { 603 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); 604 __poll_t retval = 0; 605 606 poll_wait(filp, &info->wait_q, poll_tab); 607 608 spin_lock(&info->lock); 609 if (info->attr.mq_curmsgs) 610 retval = EPOLLIN | EPOLLRDNORM; 611 612 if (info->attr.mq_curmsgs < info->attr.mq_maxmsg) 613 retval |= EPOLLOUT | EPOLLWRNORM; 614 spin_unlock(&info->lock); 615 616 return retval; 617 } 618 619 /* Adds current to info->e_wait_q[sr] before element with smaller prio */ 620 static void wq_add(struct mqueue_inode_info *info, int sr, 621 struct ext_wait_queue *ewp) 622 { 623 struct ext_wait_queue *walk; 624 625 list_for_each_entry(walk, &info->e_wait_q[sr].list, list) { 626 if (walk->task->prio <= current->prio) { 627 list_add_tail(&ewp->list, &walk->list); 628 return; 629 } 630 } 631 list_add_tail(&ewp->list, &info->e_wait_q[sr].list); 632 } 633 634 /* 635 * Puts current task to sleep. Caller must hold queue lock. After return 636 * lock isn't held. 637 * sr: SEND or RECV 638 */ 639 static int wq_sleep(struct mqueue_inode_info *info, int sr, 640 ktime_t *timeout, struct ext_wait_queue *ewp) 641 __releases(&info->lock) 642 { 643 int retval; 644 signed long time; 645 646 wq_add(info, sr, ewp); 647 648 for (;;) { 649 __set_current_state(TASK_INTERRUPTIBLE); 650 651 spin_unlock(&info->lock); 652 time = schedule_hrtimeout_range_clock(timeout, 0, 653 HRTIMER_MODE_ABS, CLOCK_REALTIME); 654 655 if (ewp->state == STATE_READY) { 656 retval = 0; 657 goto out; 658 } 659 spin_lock(&info->lock); 660 if (ewp->state == STATE_READY) { 661 retval = 0; 662 goto out_unlock; 663 } 664 if (signal_pending(current)) { 665 retval = -ERESTARTSYS; 666 break; 667 } 668 if (time == 0) { 669 retval = -ETIMEDOUT; 670 break; 671 } 672 } 673 list_del(&ewp->list); 674 out_unlock: 675 spin_unlock(&info->lock); 676 out: 677 return retval; 678 } 679 680 /* 681 * Returns waiting task that should be serviced first or NULL if none exists 682 */ 683 static struct ext_wait_queue *wq_get_first_waiter( 684 struct mqueue_inode_info *info, int sr) 685 { 686 struct list_head *ptr; 687 688 ptr = info->e_wait_q[sr].list.prev; 689 if (ptr == &info->e_wait_q[sr].list) 690 return NULL; 691 return list_entry(ptr, struct ext_wait_queue, list); 692 } 693 694 695 static inline void set_cookie(struct sk_buff *skb, char code) 696 { 697 ((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code; 698 } 699 700 /* 701 * The next function is only to split too long sys_mq_timedsend 702 */ 703 static void __do_notify(struct mqueue_inode_info *info) 704 { 705 /* notification 706 * invoked when there is registered process and there isn't process 707 * waiting synchronously for message AND state of queue changed from 708 * empty to not empty. Here we are sure that no one is waiting 709 * synchronously. */ 710 if (info->notify_owner && 711 info->attr.mq_curmsgs == 1) { 712 struct kernel_siginfo sig_i; 713 switch (info->notify.sigev_notify) { 714 case SIGEV_NONE: 715 break; 716 case SIGEV_SIGNAL: 717 /* sends signal */ 718 719 clear_siginfo(&sig_i); 720 sig_i.si_signo = info->notify.sigev_signo; 721 sig_i.si_errno = 0; 722 sig_i.si_code = SI_MESGQ; 723 sig_i.si_value = info->notify.sigev_value; 724 /* map current pid/uid into info->owner's namespaces */ 725 rcu_read_lock(); 726 sig_i.si_pid = task_tgid_nr_ns(current, 727 ns_of_pid(info->notify_owner)); 728 sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid()); 729 rcu_read_unlock(); 730 731 kill_pid_info(info->notify.sigev_signo, 732 &sig_i, info->notify_owner); 733 break; 734 case SIGEV_THREAD: 735 set_cookie(info->notify_cookie, NOTIFY_WOKENUP); 736 netlink_sendskb(info->notify_sock, info->notify_cookie); 737 break; 738 } 739 /* after notification unregisters process */ 740 put_pid(info->notify_owner); 741 put_user_ns(info->notify_user_ns); 742 info->notify_owner = NULL; 743 info->notify_user_ns = NULL; 744 } 745 wake_up(&info->wait_q); 746 } 747 748 static int prepare_timeout(const struct __kernel_timespec __user *u_abs_timeout, 749 struct timespec64 *ts) 750 { 751 if (get_timespec64(ts, u_abs_timeout)) 752 return -EFAULT; 753 if (!timespec64_valid(ts)) 754 return -EINVAL; 755 return 0; 756 } 757 758 static void remove_notification(struct mqueue_inode_info *info) 759 { 760 if (info->notify_owner != NULL && 761 info->notify.sigev_notify == SIGEV_THREAD) { 762 set_cookie(info->notify_cookie, NOTIFY_REMOVED); 763 netlink_sendskb(info->notify_sock, info->notify_cookie); 764 } 765 put_pid(info->notify_owner); 766 put_user_ns(info->notify_user_ns); 767 info->notify_owner = NULL; 768 info->notify_user_ns = NULL; 769 } 770 771 static int prepare_open(struct dentry *dentry, int oflag, int ro, 772 umode_t mode, struct filename *name, 773 struct mq_attr *attr) 774 { 775 static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, 776 MAY_READ | MAY_WRITE }; 777 int acc; 778 779 if (d_really_is_negative(dentry)) { 780 if (!(oflag & O_CREAT)) 781 return -ENOENT; 782 if (ro) 783 return ro; 784 audit_inode_parent_hidden(name, dentry->d_parent); 785 return vfs_mkobj(dentry, mode & ~current_umask(), 786 mqueue_create_attr, attr); 787 } 788 /* it already existed */ 789 audit_inode(name, dentry, 0); 790 if ((oflag & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL)) 791 return -EEXIST; 792 if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) 793 return -EINVAL; 794 acc = oflag2acc[oflag & O_ACCMODE]; 795 return inode_permission(d_inode(dentry), acc); 796 } 797 798 static int do_mq_open(const char __user *u_name, int oflag, umode_t mode, 799 struct mq_attr *attr) 800 { 801 struct vfsmount *mnt = current->nsproxy->ipc_ns->mq_mnt; 802 struct dentry *root = mnt->mnt_root; 803 struct filename *name; 804 struct path path; 805 int fd, error; 806 int ro; 807 808 audit_mq_open(oflag, mode, attr); 809 810 if (IS_ERR(name = getname(u_name))) 811 return PTR_ERR(name); 812 813 fd = get_unused_fd_flags(O_CLOEXEC); 814 if (fd < 0) 815 goto out_putname; 816 817 ro = mnt_want_write(mnt); /* we'll drop it in any case */ 818 inode_lock(d_inode(root)); 819 path.dentry = lookup_one_len(name->name, root, strlen(name->name)); 820 if (IS_ERR(path.dentry)) { 821 error = PTR_ERR(path.dentry); 822 goto out_putfd; 823 } 824 path.mnt = mntget(mnt); 825 error = prepare_open(path.dentry, oflag, ro, mode, name, attr); 826 if (!error) { 827 struct file *file = dentry_open(&path, oflag, current_cred()); 828 if (!IS_ERR(file)) 829 fd_install(fd, file); 830 else 831 error = PTR_ERR(file); 832 } 833 path_put(&path); 834 out_putfd: 835 if (error) { 836 put_unused_fd(fd); 837 fd = error; 838 } 839 inode_unlock(d_inode(root)); 840 if (!ro) 841 mnt_drop_write(mnt); 842 out_putname: 843 putname(name); 844 return fd; 845 } 846 847 SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode, 848 struct mq_attr __user *, u_attr) 849 { 850 struct mq_attr attr; 851 if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr))) 852 return -EFAULT; 853 854 return do_mq_open(u_name, oflag, mode, u_attr ? &attr : NULL); 855 } 856 857 SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name) 858 { 859 int err; 860 struct filename *name; 861 struct dentry *dentry; 862 struct inode *inode = NULL; 863 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; 864 struct vfsmount *mnt = ipc_ns->mq_mnt; 865 866 name = getname(u_name); 867 if (IS_ERR(name)) 868 return PTR_ERR(name); 869 870 audit_inode_parent_hidden(name, mnt->mnt_root); 871 err = mnt_want_write(mnt); 872 if (err) 873 goto out_name; 874 inode_lock_nested(d_inode(mnt->mnt_root), I_MUTEX_PARENT); 875 dentry = lookup_one_len(name->name, mnt->mnt_root, 876 strlen(name->name)); 877 if (IS_ERR(dentry)) { 878 err = PTR_ERR(dentry); 879 goto out_unlock; 880 } 881 882 inode = d_inode(dentry); 883 if (!inode) { 884 err = -ENOENT; 885 } else { 886 ihold(inode); 887 err = vfs_unlink(d_inode(dentry->d_parent), dentry, NULL); 888 } 889 dput(dentry); 890 891 out_unlock: 892 inode_unlock(d_inode(mnt->mnt_root)); 893 if (inode) 894 iput(inode); 895 mnt_drop_write(mnt); 896 out_name: 897 putname(name); 898 899 return err; 900 } 901 902 /* Pipelined send and receive functions. 903 * 904 * If a receiver finds no waiting message, then it registers itself in the 905 * list of waiting receivers. A sender checks that list before adding the new 906 * message into the message array. If there is a waiting receiver, then it 907 * bypasses the message array and directly hands the message over to the 908 * receiver. The receiver accepts the message and returns without grabbing the 909 * queue spinlock: 910 * 911 * - Set pointer to message. 912 * - Queue the receiver task for later wakeup (without the info->lock). 913 * - Update its state to STATE_READY. Now the receiver can continue. 914 * - Wake up the process after the lock is dropped. Should the process wake up 915 * before this wakeup (due to a timeout or a signal) it will either see 916 * STATE_READY and continue or acquire the lock to check the state again. 917 * 918 * The same algorithm is used for senders. 919 */ 920 921 /* pipelined_send() - send a message directly to the task waiting in 922 * sys_mq_timedreceive() (without inserting message into a queue). 923 */ 924 static inline void pipelined_send(struct wake_q_head *wake_q, 925 struct mqueue_inode_info *info, 926 struct msg_msg *message, 927 struct ext_wait_queue *receiver) 928 { 929 receiver->msg = message; 930 list_del(&receiver->list); 931 wake_q_add(wake_q, receiver->task); 932 /* 933 * Rely on the implicit cmpxchg barrier from wake_q_add such 934 * that we can ensure that updating receiver->state is the last 935 * write operation: As once set, the receiver can continue, 936 * and if we don't have the reference count from the wake_q, 937 * yet, at that point we can later have a use-after-free 938 * condition and bogus wakeup. 939 */ 940 receiver->state = STATE_READY; 941 } 942 943 /* pipelined_receive() - if there is task waiting in sys_mq_timedsend() 944 * gets its message and put to the queue (we have one free place for sure). */ 945 static inline void pipelined_receive(struct wake_q_head *wake_q, 946 struct mqueue_inode_info *info) 947 { 948 struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND); 949 950 if (!sender) { 951 /* for poll */ 952 wake_up_interruptible(&info->wait_q); 953 return; 954 } 955 if (msg_insert(sender->msg, info)) 956 return; 957 958 list_del(&sender->list); 959 wake_q_add(wake_q, sender->task); 960 sender->state = STATE_READY; 961 } 962 963 static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr, 964 size_t msg_len, unsigned int msg_prio, 965 struct timespec64 *ts) 966 { 967 struct fd f; 968 struct inode *inode; 969 struct ext_wait_queue wait; 970 struct ext_wait_queue *receiver; 971 struct msg_msg *msg_ptr; 972 struct mqueue_inode_info *info; 973 ktime_t expires, *timeout = NULL; 974 struct posix_msg_tree_node *new_leaf = NULL; 975 int ret = 0; 976 DEFINE_WAKE_Q(wake_q); 977 978 if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX)) 979 return -EINVAL; 980 981 if (ts) { 982 expires = timespec64_to_ktime(*ts); 983 timeout = &expires; 984 } 985 986 audit_mq_sendrecv(mqdes, msg_len, msg_prio, ts); 987 988 f = fdget(mqdes); 989 if (unlikely(!f.file)) { 990 ret = -EBADF; 991 goto out; 992 } 993 994 inode = file_inode(f.file); 995 if (unlikely(f.file->f_op != &mqueue_file_operations)) { 996 ret = -EBADF; 997 goto out_fput; 998 } 999 info = MQUEUE_I(inode); 1000 audit_file(f.file); 1001 1002 if (unlikely(!(f.file->f_mode & FMODE_WRITE))) { 1003 ret = -EBADF; 1004 goto out_fput; 1005 } 1006 1007 if (unlikely(msg_len > info->attr.mq_msgsize)) { 1008 ret = -EMSGSIZE; 1009 goto out_fput; 1010 } 1011 1012 /* First try to allocate memory, before doing anything with 1013 * existing queues. */ 1014 msg_ptr = load_msg(u_msg_ptr, msg_len); 1015 if (IS_ERR(msg_ptr)) { 1016 ret = PTR_ERR(msg_ptr); 1017 goto out_fput; 1018 } 1019 msg_ptr->m_ts = msg_len; 1020 msg_ptr->m_type = msg_prio; 1021 1022 /* 1023 * msg_insert really wants us to have a valid, spare node struct so 1024 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will 1025 * fall back to that if necessary. 1026 */ 1027 if (!info->node_cache) 1028 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL); 1029 1030 spin_lock(&info->lock); 1031 1032 if (!info->node_cache && new_leaf) { 1033 /* Save our speculative allocation into the cache */ 1034 INIT_LIST_HEAD(&new_leaf->msg_list); 1035 info->node_cache = new_leaf; 1036 new_leaf = NULL; 1037 } else { 1038 kfree(new_leaf); 1039 } 1040 1041 if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) { 1042 if (f.file->f_flags & O_NONBLOCK) { 1043 ret = -EAGAIN; 1044 } else { 1045 wait.task = current; 1046 wait.msg = (void *) msg_ptr; 1047 wait.state = STATE_NONE; 1048 ret = wq_sleep(info, SEND, timeout, &wait); 1049 /* 1050 * wq_sleep must be called with info->lock held, and 1051 * returns with the lock released 1052 */ 1053 goto out_free; 1054 } 1055 } else { 1056 receiver = wq_get_first_waiter(info, RECV); 1057 if (receiver) { 1058 pipelined_send(&wake_q, info, msg_ptr, receiver); 1059 } else { 1060 /* adds message to the queue */ 1061 ret = msg_insert(msg_ptr, info); 1062 if (ret) 1063 goto out_unlock; 1064 __do_notify(info); 1065 } 1066 inode->i_atime = inode->i_mtime = inode->i_ctime = 1067 current_time(inode); 1068 } 1069 out_unlock: 1070 spin_unlock(&info->lock); 1071 wake_up_q(&wake_q); 1072 out_free: 1073 if (ret) 1074 free_msg(msg_ptr); 1075 out_fput: 1076 fdput(f); 1077 out: 1078 return ret; 1079 } 1080 1081 static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr, 1082 size_t msg_len, unsigned int __user *u_msg_prio, 1083 struct timespec64 *ts) 1084 { 1085 ssize_t ret; 1086 struct msg_msg *msg_ptr; 1087 struct fd f; 1088 struct inode *inode; 1089 struct mqueue_inode_info *info; 1090 struct ext_wait_queue wait; 1091 ktime_t expires, *timeout = NULL; 1092 struct posix_msg_tree_node *new_leaf = NULL; 1093 1094 if (ts) { 1095 expires = timespec64_to_ktime(*ts); 1096 timeout = &expires; 1097 } 1098 1099 audit_mq_sendrecv(mqdes, msg_len, 0, ts); 1100 1101 f = fdget(mqdes); 1102 if (unlikely(!f.file)) { 1103 ret = -EBADF; 1104 goto out; 1105 } 1106 1107 inode = file_inode(f.file); 1108 if (unlikely(f.file->f_op != &mqueue_file_operations)) { 1109 ret = -EBADF; 1110 goto out_fput; 1111 } 1112 info = MQUEUE_I(inode); 1113 audit_file(f.file); 1114 1115 if (unlikely(!(f.file->f_mode & FMODE_READ))) { 1116 ret = -EBADF; 1117 goto out_fput; 1118 } 1119 1120 /* checks if buffer is big enough */ 1121 if (unlikely(msg_len < info->attr.mq_msgsize)) { 1122 ret = -EMSGSIZE; 1123 goto out_fput; 1124 } 1125 1126 /* 1127 * msg_insert really wants us to have a valid, spare node struct so 1128 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will 1129 * fall back to that if necessary. 1130 */ 1131 if (!info->node_cache) 1132 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL); 1133 1134 spin_lock(&info->lock); 1135 1136 if (!info->node_cache && new_leaf) { 1137 /* Save our speculative allocation into the cache */ 1138 INIT_LIST_HEAD(&new_leaf->msg_list); 1139 info->node_cache = new_leaf; 1140 } else { 1141 kfree(new_leaf); 1142 } 1143 1144 if (info->attr.mq_curmsgs == 0) { 1145 if (f.file->f_flags & O_NONBLOCK) { 1146 spin_unlock(&info->lock); 1147 ret = -EAGAIN; 1148 } else { 1149 wait.task = current; 1150 wait.state = STATE_NONE; 1151 ret = wq_sleep(info, RECV, timeout, &wait); 1152 msg_ptr = wait.msg; 1153 } 1154 } else { 1155 DEFINE_WAKE_Q(wake_q); 1156 1157 msg_ptr = msg_get(info); 1158 1159 inode->i_atime = inode->i_mtime = inode->i_ctime = 1160 current_time(inode); 1161 1162 /* There is now free space in queue. */ 1163 pipelined_receive(&wake_q, info); 1164 spin_unlock(&info->lock); 1165 wake_up_q(&wake_q); 1166 ret = 0; 1167 } 1168 if (ret == 0) { 1169 ret = msg_ptr->m_ts; 1170 1171 if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) || 1172 store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) { 1173 ret = -EFAULT; 1174 } 1175 free_msg(msg_ptr); 1176 } 1177 out_fput: 1178 fdput(f); 1179 out: 1180 return ret; 1181 } 1182 1183 SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, 1184 size_t, msg_len, unsigned int, msg_prio, 1185 const struct __kernel_timespec __user *, u_abs_timeout) 1186 { 1187 struct timespec64 ts, *p = NULL; 1188 if (u_abs_timeout) { 1189 int res = prepare_timeout(u_abs_timeout, &ts); 1190 if (res) 1191 return res; 1192 p = &ts; 1193 } 1194 return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p); 1195 } 1196 1197 SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, 1198 size_t, msg_len, unsigned int __user *, u_msg_prio, 1199 const struct __kernel_timespec __user *, u_abs_timeout) 1200 { 1201 struct timespec64 ts, *p = NULL; 1202 if (u_abs_timeout) { 1203 int res = prepare_timeout(u_abs_timeout, &ts); 1204 if (res) 1205 return res; 1206 p = &ts; 1207 } 1208 return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p); 1209 } 1210 1211 /* 1212 * Notes: the case when user wants us to deregister (with NULL as pointer) 1213 * and he isn't currently owner of notification, will be silently discarded. 1214 * It isn't explicitly defined in the POSIX. 1215 */ 1216 static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification) 1217 { 1218 int ret; 1219 struct fd f; 1220 struct sock *sock; 1221 struct inode *inode; 1222 struct mqueue_inode_info *info; 1223 struct sk_buff *nc; 1224 1225 audit_mq_notify(mqdes, notification); 1226 1227 nc = NULL; 1228 sock = NULL; 1229 if (notification != NULL) { 1230 if (unlikely(notification->sigev_notify != SIGEV_NONE && 1231 notification->sigev_notify != SIGEV_SIGNAL && 1232 notification->sigev_notify != SIGEV_THREAD)) 1233 return -EINVAL; 1234 if (notification->sigev_notify == SIGEV_SIGNAL && 1235 !valid_signal(notification->sigev_signo)) { 1236 return -EINVAL; 1237 } 1238 if (notification->sigev_notify == SIGEV_THREAD) { 1239 long timeo; 1240 1241 /* create the notify skb */ 1242 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL); 1243 if (!nc) 1244 return -ENOMEM; 1245 1246 if (copy_from_user(nc->data, 1247 notification->sigev_value.sival_ptr, 1248 NOTIFY_COOKIE_LEN)) { 1249 ret = -EFAULT; 1250 goto free_skb; 1251 } 1252 1253 /* TODO: add a header? */ 1254 skb_put(nc, NOTIFY_COOKIE_LEN); 1255 /* and attach it to the socket */ 1256 retry: 1257 f = fdget(notification->sigev_signo); 1258 if (!f.file) { 1259 ret = -EBADF; 1260 goto out; 1261 } 1262 sock = netlink_getsockbyfilp(f.file); 1263 fdput(f); 1264 if (IS_ERR(sock)) { 1265 ret = PTR_ERR(sock); 1266 goto free_skb; 1267 } 1268 1269 timeo = MAX_SCHEDULE_TIMEOUT; 1270 ret = netlink_attachskb(sock, nc, &timeo, NULL); 1271 if (ret == 1) { 1272 sock = NULL; 1273 goto retry; 1274 } 1275 if (ret) 1276 return ret; 1277 } 1278 } 1279 1280 f = fdget(mqdes); 1281 if (!f.file) { 1282 ret = -EBADF; 1283 goto out; 1284 } 1285 1286 inode = file_inode(f.file); 1287 if (unlikely(f.file->f_op != &mqueue_file_operations)) { 1288 ret = -EBADF; 1289 goto out_fput; 1290 } 1291 info = MQUEUE_I(inode); 1292 1293 ret = 0; 1294 spin_lock(&info->lock); 1295 if (notification == NULL) { 1296 if (info->notify_owner == task_tgid(current)) { 1297 remove_notification(info); 1298 inode->i_atime = inode->i_ctime = current_time(inode); 1299 } 1300 } else if (info->notify_owner != NULL) { 1301 ret = -EBUSY; 1302 } else { 1303 switch (notification->sigev_notify) { 1304 case SIGEV_NONE: 1305 info->notify.sigev_notify = SIGEV_NONE; 1306 break; 1307 case SIGEV_THREAD: 1308 info->notify_sock = sock; 1309 info->notify_cookie = nc; 1310 sock = NULL; 1311 nc = NULL; 1312 info->notify.sigev_notify = SIGEV_THREAD; 1313 break; 1314 case SIGEV_SIGNAL: 1315 info->notify.sigev_signo = notification->sigev_signo; 1316 info->notify.sigev_value = notification->sigev_value; 1317 info->notify.sigev_notify = SIGEV_SIGNAL; 1318 break; 1319 } 1320 1321 info->notify_owner = get_pid(task_tgid(current)); 1322 info->notify_user_ns = get_user_ns(current_user_ns()); 1323 inode->i_atime = inode->i_ctime = current_time(inode); 1324 } 1325 spin_unlock(&info->lock); 1326 out_fput: 1327 fdput(f); 1328 out: 1329 if (sock) 1330 netlink_detachskb(sock, nc); 1331 else 1332 free_skb: 1333 dev_kfree_skb(nc); 1334 1335 return ret; 1336 } 1337 1338 SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, 1339 const struct sigevent __user *, u_notification) 1340 { 1341 struct sigevent n, *p = NULL; 1342 if (u_notification) { 1343 if (copy_from_user(&n, u_notification, sizeof(struct sigevent))) 1344 return -EFAULT; 1345 p = &n; 1346 } 1347 return do_mq_notify(mqdes, p); 1348 } 1349 1350 static int do_mq_getsetattr(int mqdes, struct mq_attr *new, struct mq_attr *old) 1351 { 1352 struct fd f; 1353 struct inode *inode; 1354 struct mqueue_inode_info *info; 1355 1356 if (new && (new->mq_flags & (~O_NONBLOCK))) 1357 return -EINVAL; 1358 1359 f = fdget(mqdes); 1360 if (!f.file) 1361 return -EBADF; 1362 1363 if (unlikely(f.file->f_op != &mqueue_file_operations)) { 1364 fdput(f); 1365 return -EBADF; 1366 } 1367 1368 inode = file_inode(f.file); 1369 info = MQUEUE_I(inode); 1370 1371 spin_lock(&info->lock); 1372 1373 if (old) { 1374 *old = info->attr; 1375 old->mq_flags = f.file->f_flags & O_NONBLOCK; 1376 } 1377 if (new) { 1378 audit_mq_getsetattr(mqdes, new); 1379 spin_lock(&f.file->f_lock); 1380 if (new->mq_flags & O_NONBLOCK) 1381 f.file->f_flags |= O_NONBLOCK; 1382 else 1383 f.file->f_flags &= ~O_NONBLOCK; 1384 spin_unlock(&f.file->f_lock); 1385 1386 inode->i_atime = inode->i_ctime = current_time(inode); 1387 } 1388 1389 spin_unlock(&info->lock); 1390 fdput(f); 1391 return 0; 1392 } 1393 1394 SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, 1395 const struct mq_attr __user *, u_mqstat, 1396 struct mq_attr __user *, u_omqstat) 1397 { 1398 int ret; 1399 struct mq_attr mqstat, omqstat; 1400 struct mq_attr *new = NULL, *old = NULL; 1401 1402 if (u_mqstat) { 1403 new = &mqstat; 1404 if (copy_from_user(new, u_mqstat, sizeof(struct mq_attr))) 1405 return -EFAULT; 1406 } 1407 if (u_omqstat) 1408 old = &omqstat; 1409 1410 ret = do_mq_getsetattr(mqdes, new, old); 1411 if (ret || !old) 1412 return ret; 1413 1414 if (copy_to_user(u_omqstat, old, sizeof(struct mq_attr))) 1415 return -EFAULT; 1416 return 0; 1417 } 1418 1419 #ifdef CONFIG_COMPAT 1420 1421 struct compat_mq_attr { 1422 compat_long_t mq_flags; /* message queue flags */ 1423 compat_long_t mq_maxmsg; /* maximum number of messages */ 1424 compat_long_t mq_msgsize; /* maximum message size */ 1425 compat_long_t mq_curmsgs; /* number of messages currently queued */ 1426 compat_long_t __reserved[4]; /* ignored for input, zeroed for output */ 1427 }; 1428 1429 static inline int get_compat_mq_attr(struct mq_attr *attr, 1430 const struct compat_mq_attr __user *uattr) 1431 { 1432 struct compat_mq_attr v; 1433 1434 if (copy_from_user(&v, uattr, sizeof(*uattr))) 1435 return -EFAULT; 1436 1437 memset(attr, 0, sizeof(*attr)); 1438 attr->mq_flags = v.mq_flags; 1439 attr->mq_maxmsg = v.mq_maxmsg; 1440 attr->mq_msgsize = v.mq_msgsize; 1441 attr->mq_curmsgs = v.mq_curmsgs; 1442 return 0; 1443 } 1444 1445 static inline int put_compat_mq_attr(const struct mq_attr *attr, 1446 struct compat_mq_attr __user *uattr) 1447 { 1448 struct compat_mq_attr v; 1449 1450 memset(&v, 0, sizeof(v)); 1451 v.mq_flags = attr->mq_flags; 1452 v.mq_maxmsg = attr->mq_maxmsg; 1453 v.mq_msgsize = attr->mq_msgsize; 1454 v.mq_curmsgs = attr->mq_curmsgs; 1455 if (copy_to_user(uattr, &v, sizeof(*uattr))) 1456 return -EFAULT; 1457 return 0; 1458 } 1459 1460 COMPAT_SYSCALL_DEFINE4(mq_open, const char __user *, u_name, 1461 int, oflag, compat_mode_t, mode, 1462 struct compat_mq_attr __user *, u_attr) 1463 { 1464 struct mq_attr attr, *p = NULL; 1465 if (u_attr && oflag & O_CREAT) { 1466 p = &attr; 1467 if (get_compat_mq_attr(&attr, u_attr)) 1468 return -EFAULT; 1469 } 1470 return do_mq_open(u_name, oflag, mode, p); 1471 } 1472 1473 COMPAT_SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, 1474 const struct compat_sigevent __user *, u_notification) 1475 { 1476 struct sigevent n, *p = NULL; 1477 if (u_notification) { 1478 if (get_compat_sigevent(&n, u_notification)) 1479 return -EFAULT; 1480 if (n.sigev_notify == SIGEV_THREAD) 1481 n.sigev_value.sival_ptr = compat_ptr(n.sigev_value.sival_int); 1482 p = &n; 1483 } 1484 return do_mq_notify(mqdes, p); 1485 } 1486 1487 COMPAT_SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, 1488 const struct compat_mq_attr __user *, u_mqstat, 1489 struct compat_mq_attr __user *, u_omqstat) 1490 { 1491 int ret; 1492 struct mq_attr mqstat, omqstat; 1493 struct mq_attr *new = NULL, *old = NULL; 1494 1495 if (u_mqstat) { 1496 new = &mqstat; 1497 if (get_compat_mq_attr(new, u_mqstat)) 1498 return -EFAULT; 1499 } 1500 if (u_omqstat) 1501 old = &omqstat; 1502 1503 ret = do_mq_getsetattr(mqdes, new, old); 1504 if (ret || !old) 1505 return ret; 1506 1507 if (put_compat_mq_attr(old, u_omqstat)) 1508 return -EFAULT; 1509 return 0; 1510 } 1511 #endif 1512 1513 #ifdef CONFIG_COMPAT_32BIT_TIME 1514 static int compat_prepare_timeout(const struct old_timespec32 __user *p, 1515 struct timespec64 *ts) 1516 { 1517 if (get_old_timespec32(ts, p)) 1518 return -EFAULT; 1519 if (!timespec64_valid(ts)) 1520 return -EINVAL; 1521 return 0; 1522 } 1523 1524 SYSCALL_DEFINE5(mq_timedsend_time32, mqd_t, mqdes, 1525 const char __user *, u_msg_ptr, 1526 unsigned int, msg_len, unsigned int, msg_prio, 1527 const struct old_timespec32 __user *, u_abs_timeout) 1528 { 1529 struct timespec64 ts, *p = NULL; 1530 if (u_abs_timeout) { 1531 int res = compat_prepare_timeout(u_abs_timeout, &ts); 1532 if (res) 1533 return res; 1534 p = &ts; 1535 } 1536 return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p); 1537 } 1538 1539 SYSCALL_DEFINE5(mq_timedreceive_time32, mqd_t, mqdes, 1540 char __user *, u_msg_ptr, 1541 unsigned int, msg_len, unsigned int __user *, u_msg_prio, 1542 const struct old_timespec32 __user *, u_abs_timeout) 1543 { 1544 struct timespec64 ts, *p = NULL; 1545 if (u_abs_timeout) { 1546 int res = compat_prepare_timeout(u_abs_timeout, &ts); 1547 if (res) 1548 return res; 1549 p = &ts; 1550 } 1551 return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p); 1552 } 1553 #endif 1554 1555 static const struct inode_operations mqueue_dir_inode_operations = { 1556 .lookup = simple_lookup, 1557 .create = mqueue_create, 1558 .unlink = mqueue_unlink, 1559 }; 1560 1561 static const struct file_operations mqueue_file_operations = { 1562 .flush = mqueue_flush_file, 1563 .poll = mqueue_poll_file, 1564 .read = mqueue_read_file, 1565 .llseek = default_llseek, 1566 }; 1567 1568 static const struct super_operations mqueue_super_ops = { 1569 .alloc_inode = mqueue_alloc_inode, 1570 .free_inode = mqueue_free_inode, 1571 .evict_inode = mqueue_evict_inode, 1572 .statfs = simple_statfs, 1573 }; 1574 1575 static const struct fs_context_operations mqueue_fs_context_ops = { 1576 .free = mqueue_fs_context_free, 1577 .get_tree = mqueue_get_tree, 1578 }; 1579 1580 static struct file_system_type mqueue_fs_type = { 1581 .name = "mqueue", 1582 .init_fs_context = mqueue_init_fs_context, 1583 .kill_sb = kill_litter_super, 1584 .fs_flags = FS_USERNS_MOUNT, 1585 }; 1586 1587 int mq_init_ns(struct ipc_namespace *ns) 1588 { 1589 struct vfsmount *m; 1590 1591 ns->mq_queues_count = 0; 1592 ns->mq_queues_max = DFLT_QUEUESMAX; 1593 ns->mq_msg_max = DFLT_MSGMAX; 1594 ns->mq_msgsize_max = DFLT_MSGSIZEMAX; 1595 ns->mq_msg_default = DFLT_MSG; 1596 ns->mq_msgsize_default = DFLT_MSGSIZE; 1597 1598 m = mq_create_mount(ns); 1599 if (IS_ERR(m)) 1600 return PTR_ERR(m); 1601 ns->mq_mnt = m; 1602 return 0; 1603 } 1604 1605 void mq_clear_sbinfo(struct ipc_namespace *ns) 1606 { 1607 ns->mq_mnt->mnt_sb->s_fs_info = NULL; 1608 } 1609 1610 void mq_put_mnt(struct ipc_namespace *ns) 1611 { 1612 kern_unmount(ns->mq_mnt); 1613 } 1614 1615 static int __init init_mqueue_fs(void) 1616 { 1617 int error; 1618 1619 mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache", 1620 sizeof(struct mqueue_inode_info), 0, 1621 SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, init_once); 1622 if (mqueue_inode_cachep == NULL) 1623 return -ENOMEM; 1624 1625 /* ignore failures - they are not fatal */ 1626 mq_sysctl_table = mq_register_sysctl_table(); 1627 1628 error = register_filesystem(&mqueue_fs_type); 1629 if (error) 1630 goto out_sysctl; 1631 1632 spin_lock_init(&mq_lock); 1633 1634 error = mq_init_ns(&init_ipc_ns); 1635 if (error) 1636 goto out_filesystem; 1637 1638 return 0; 1639 1640 out_filesystem: 1641 unregister_filesystem(&mqueue_fs_type); 1642 out_sysctl: 1643 if (mq_sysctl_table) 1644 unregister_sysctl_table(mq_sysctl_table); 1645 kmem_cache_destroy(mqueue_inode_cachep); 1646 return error; 1647 } 1648 1649 device_initcall(init_mqueue_fs); 1650