1 /* 2 * POSIX message queues filesystem for Linux. 3 * 4 * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl) 5 * Michal Wronski (michal.wronski@gmail.com) 6 * 7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com) 8 * Lockless receive & send, fd based notify: 9 * Manfred Spraul (manfred@colorfullife.com) 10 * 11 * Audit: George Wilson (ltcgcw@us.ibm.com) 12 * 13 * This file is released under the GPL. 14 */ 15 16 #include <linux/capability.h> 17 #include <linux/init.h> 18 #include <linux/pagemap.h> 19 #include <linux/file.h> 20 #include <linux/mount.h> 21 #include <linux/fs_context.h> 22 #include <linux/namei.h> 23 #include <linux/sysctl.h> 24 #include <linux/poll.h> 25 #include <linux/mqueue.h> 26 #include <linux/msg.h> 27 #include <linux/skbuff.h> 28 #include <linux/vmalloc.h> 29 #include <linux/netlink.h> 30 #include <linux/syscalls.h> 31 #include <linux/audit.h> 32 #include <linux/signal.h> 33 #include <linux/mutex.h> 34 #include <linux/nsproxy.h> 35 #include <linux/pid.h> 36 #include <linux/ipc_namespace.h> 37 #include <linux/user_namespace.h> 38 #include <linux/slab.h> 39 #include <linux/sched/wake_q.h> 40 #include <linux/sched/signal.h> 41 #include <linux/sched/user.h> 42 43 #include <net/sock.h> 44 #include "util.h" 45 46 struct mqueue_fs_context { 47 struct ipc_namespace *ipc_ns; 48 }; 49 50 #define MQUEUE_MAGIC 0x19800202 51 #define DIRENT_SIZE 20 52 #define FILENT_SIZE 80 53 54 #define SEND 0 55 #define RECV 1 56 57 #define STATE_NONE 0 58 #define STATE_READY 1 59 60 struct posix_msg_tree_node { 61 struct rb_node rb_node; 62 struct list_head msg_list; 63 int priority; 64 }; 65 66 /* 67 * Locking: 68 * 69 * Accesses to a message queue are synchronized by acquiring info->lock. 70 * 71 * There are two notable exceptions: 72 * - The actual wakeup of a sleeping task is performed using the wake_q 73 * framework. info->lock is already released when wake_up_q is called. 74 * - The exit codepaths after sleeping check ext_wait_queue->state without 75 * any locks. If it is STATE_READY, then the syscall is completed without 76 * acquiring info->lock. 77 * 78 * MQ_BARRIER: 79 * To achieve proper release/acquire memory barrier pairing, the state is set to 80 * STATE_READY with smp_store_release(), and it is read with READ_ONCE followed 81 * by smp_acquire__after_ctrl_dep(). In addition, wake_q_add_safe() is used. 82 * 83 * This prevents the following races: 84 * 85 * 1) With the simple wake_q_add(), the task could be gone already before 86 * the increase of the reference happens 87 * Thread A 88 * Thread B 89 * WRITE_ONCE(wait.state, STATE_NONE); 90 * schedule_hrtimeout() 91 * wake_q_add(A) 92 * if (cmpxchg()) // success 93 * ->state = STATE_READY (reordered) 94 * <timeout returns> 95 * if (wait.state == STATE_READY) return; 96 * sysret to user space 97 * sys_exit() 98 * get_task_struct() // UaF 99 * 100 * Solution: Use wake_q_add_safe() and perform the get_task_struct() before 101 * the smp_store_release() that does ->state = STATE_READY. 102 * 103 * 2) Without proper _release/_acquire barriers, the woken up task 104 * could read stale data 105 * 106 * Thread A 107 * Thread B 108 * do_mq_timedreceive 109 * WRITE_ONCE(wait.state, STATE_NONE); 110 * schedule_hrtimeout() 111 * state = STATE_READY; 112 * <timeout returns> 113 * if (wait.state == STATE_READY) return; 114 * msg_ptr = wait.msg; // Access to stale data! 115 * receiver->msg = message; (reordered) 116 * 117 * Solution: use _release and _acquire barriers. 118 * 119 * 3) There is intentionally no barrier when setting current->state 120 * to TASK_INTERRUPTIBLE: spin_unlock(&info->lock) provides the 121 * release memory barrier, and the wakeup is triggered when holding 122 * info->lock, i.e. spin_lock(&info->lock) provided a pairing 123 * acquire memory barrier. 124 */ 125 126 struct ext_wait_queue { /* queue of sleeping tasks */ 127 struct task_struct *task; 128 struct list_head list; 129 struct msg_msg *msg; /* ptr of loaded message */ 130 int state; /* one of STATE_* values */ 131 }; 132 133 struct mqueue_inode_info { 134 spinlock_t lock; 135 struct inode vfs_inode; 136 wait_queue_head_t wait_q; 137 138 struct rb_root msg_tree; 139 struct rb_node *msg_tree_rightmost; 140 struct posix_msg_tree_node *node_cache; 141 struct mq_attr attr; 142 143 struct sigevent notify; 144 struct pid *notify_owner; 145 struct user_namespace *notify_user_ns; 146 struct user_struct *user; /* user who created, for accounting */ 147 struct sock *notify_sock; 148 struct sk_buff *notify_cookie; 149 150 /* for tasks waiting for free space and messages, respectively */ 151 struct ext_wait_queue e_wait_q[2]; 152 153 unsigned long qsize; /* size of queue in memory (sum of all msgs) */ 154 }; 155 156 static struct file_system_type mqueue_fs_type; 157 static const struct inode_operations mqueue_dir_inode_operations; 158 static const struct file_operations mqueue_file_operations; 159 static const struct super_operations mqueue_super_ops; 160 static const struct fs_context_operations mqueue_fs_context_ops; 161 static void remove_notification(struct mqueue_inode_info *info); 162 163 static struct kmem_cache *mqueue_inode_cachep; 164 165 static struct ctl_table_header *mq_sysctl_table; 166 167 static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode) 168 { 169 return container_of(inode, struct mqueue_inode_info, vfs_inode); 170 } 171 172 /* 173 * This routine should be called with the mq_lock held. 174 */ 175 static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode) 176 { 177 return get_ipc_ns(inode->i_sb->s_fs_info); 178 } 179 180 static struct ipc_namespace *get_ns_from_inode(struct inode *inode) 181 { 182 struct ipc_namespace *ns; 183 184 spin_lock(&mq_lock); 185 ns = __get_ns_from_inode(inode); 186 spin_unlock(&mq_lock); 187 return ns; 188 } 189 190 /* Auxiliary functions to manipulate messages' list */ 191 static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info) 192 { 193 struct rb_node **p, *parent = NULL; 194 struct posix_msg_tree_node *leaf; 195 bool rightmost = true; 196 197 p = &info->msg_tree.rb_node; 198 while (*p) { 199 parent = *p; 200 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); 201 202 if (likely(leaf->priority == msg->m_type)) 203 goto insert_msg; 204 else if (msg->m_type < leaf->priority) { 205 p = &(*p)->rb_left; 206 rightmost = false; 207 } else 208 p = &(*p)->rb_right; 209 } 210 if (info->node_cache) { 211 leaf = info->node_cache; 212 info->node_cache = NULL; 213 } else { 214 leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC); 215 if (!leaf) 216 return -ENOMEM; 217 INIT_LIST_HEAD(&leaf->msg_list); 218 } 219 leaf->priority = msg->m_type; 220 221 if (rightmost) 222 info->msg_tree_rightmost = &leaf->rb_node; 223 224 rb_link_node(&leaf->rb_node, parent, p); 225 rb_insert_color(&leaf->rb_node, &info->msg_tree); 226 insert_msg: 227 info->attr.mq_curmsgs++; 228 info->qsize += msg->m_ts; 229 list_add_tail(&msg->m_list, &leaf->msg_list); 230 return 0; 231 } 232 233 static inline void msg_tree_erase(struct posix_msg_tree_node *leaf, 234 struct mqueue_inode_info *info) 235 { 236 struct rb_node *node = &leaf->rb_node; 237 238 if (info->msg_tree_rightmost == node) 239 info->msg_tree_rightmost = rb_prev(node); 240 241 rb_erase(node, &info->msg_tree); 242 if (info->node_cache) 243 kfree(leaf); 244 else 245 info->node_cache = leaf; 246 } 247 248 static inline struct msg_msg *msg_get(struct mqueue_inode_info *info) 249 { 250 struct rb_node *parent = NULL; 251 struct posix_msg_tree_node *leaf; 252 struct msg_msg *msg; 253 254 try_again: 255 /* 256 * During insert, low priorities go to the left and high to the 257 * right. On receive, we want the highest priorities first, so 258 * walk all the way to the right. 259 */ 260 parent = info->msg_tree_rightmost; 261 if (!parent) { 262 if (info->attr.mq_curmsgs) { 263 pr_warn_once("Inconsistency in POSIX message queue, " 264 "no tree element, but supposedly messages " 265 "should exist!\n"); 266 info->attr.mq_curmsgs = 0; 267 } 268 return NULL; 269 } 270 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); 271 if (unlikely(list_empty(&leaf->msg_list))) { 272 pr_warn_once("Inconsistency in POSIX message queue, " 273 "empty leaf node but we haven't implemented " 274 "lazy leaf delete!\n"); 275 msg_tree_erase(leaf, info); 276 goto try_again; 277 } else { 278 msg = list_first_entry(&leaf->msg_list, 279 struct msg_msg, m_list); 280 list_del(&msg->m_list); 281 if (list_empty(&leaf->msg_list)) { 282 msg_tree_erase(leaf, info); 283 } 284 } 285 info->attr.mq_curmsgs--; 286 info->qsize -= msg->m_ts; 287 return msg; 288 } 289 290 static struct inode *mqueue_get_inode(struct super_block *sb, 291 struct ipc_namespace *ipc_ns, umode_t mode, 292 struct mq_attr *attr) 293 { 294 struct user_struct *u = current_user(); 295 struct inode *inode; 296 int ret = -ENOMEM; 297 298 inode = new_inode(sb); 299 if (!inode) 300 goto err; 301 302 inode->i_ino = get_next_ino(); 303 inode->i_mode = mode; 304 inode->i_uid = current_fsuid(); 305 inode->i_gid = current_fsgid(); 306 inode->i_mtime = inode->i_ctime = inode->i_atime = current_time(inode); 307 308 if (S_ISREG(mode)) { 309 struct mqueue_inode_info *info; 310 unsigned long mq_bytes, mq_treesize; 311 312 inode->i_fop = &mqueue_file_operations; 313 inode->i_size = FILENT_SIZE; 314 /* mqueue specific info */ 315 info = MQUEUE_I(inode); 316 spin_lock_init(&info->lock); 317 init_waitqueue_head(&info->wait_q); 318 INIT_LIST_HEAD(&info->e_wait_q[0].list); 319 INIT_LIST_HEAD(&info->e_wait_q[1].list); 320 info->notify_owner = NULL; 321 info->notify_user_ns = NULL; 322 info->qsize = 0; 323 info->user = NULL; /* set when all is ok */ 324 info->msg_tree = RB_ROOT; 325 info->msg_tree_rightmost = NULL; 326 info->node_cache = NULL; 327 memset(&info->attr, 0, sizeof(info->attr)); 328 info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max, 329 ipc_ns->mq_msg_default); 330 info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max, 331 ipc_ns->mq_msgsize_default); 332 if (attr) { 333 info->attr.mq_maxmsg = attr->mq_maxmsg; 334 info->attr.mq_msgsize = attr->mq_msgsize; 335 } 336 /* 337 * We used to allocate a static array of pointers and account 338 * the size of that array as well as one msg_msg struct per 339 * possible message into the queue size. That's no longer 340 * accurate as the queue is now an rbtree and will grow and 341 * shrink depending on usage patterns. We can, however, still 342 * account one msg_msg struct per message, but the nodes are 343 * allocated depending on priority usage, and most programs 344 * only use one, or a handful, of priorities. However, since 345 * this is pinned memory, we need to assume worst case, so 346 * that means the min(mq_maxmsg, max_priorities) * struct 347 * posix_msg_tree_node. 348 */ 349 350 ret = -EINVAL; 351 if (info->attr.mq_maxmsg <= 0 || info->attr.mq_msgsize <= 0) 352 goto out_inode; 353 if (capable(CAP_SYS_RESOURCE)) { 354 if (info->attr.mq_maxmsg > HARD_MSGMAX || 355 info->attr.mq_msgsize > HARD_MSGSIZEMAX) 356 goto out_inode; 357 } else { 358 if (info->attr.mq_maxmsg > ipc_ns->mq_msg_max || 359 info->attr.mq_msgsize > ipc_ns->mq_msgsize_max) 360 goto out_inode; 361 } 362 ret = -EOVERFLOW; 363 /* check for overflow */ 364 if (info->attr.mq_msgsize > ULONG_MAX/info->attr.mq_maxmsg) 365 goto out_inode; 366 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + 367 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * 368 sizeof(struct posix_msg_tree_node); 369 mq_bytes = info->attr.mq_maxmsg * info->attr.mq_msgsize; 370 if (mq_bytes + mq_treesize < mq_bytes) 371 goto out_inode; 372 mq_bytes += mq_treesize; 373 spin_lock(&mq_lock); 374 if (u->mq_bytes + mq_bytes < u->mq_bytes || 375 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) { 376 spin_unlock(&mq_lock); 377 /* mqueue_evict_inode() releases info->messages */ 378 ret = -EMFILE; 379 goto out_inode; 380 } 381 u->mq_bytes += mq_bytes; 382 spin_unlock(&mq_lock); 383 384 /* all is ok */ 385 info->user = get_uid(u); 386 } else if (S_ISDIR(mode)) { 387 inc_nlink(inode); 388 /* Some things misbehave if size == 0 on a directory */ 389 inode->i_size = 2 * DIRENT_SIZE; 390 inode->i_op = &mqueue_dir_inode_operations; 391 inode->i_fop = &simple_dir_operations; 392 } 393 394 return inode; 395 out_inode: 396 iput(inode); 397 err: 398 return ERR_PTR(ret); 399 } 400 401 static int mqueue_fill_super(struct super_block *sb, struct fs_context *fc) 402 { 403 struct inode *inode; 404 struct ipc_namespace *ns = sb->s_fs_info; 405 406 sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV; 407 sb->s_blocksize = PAGE_SIZE; 408 sb->s_blocksize_bits = PAGE_SHIFT; 409 sb->s_magic = MQUEUE_MAGIC; 410 sb->s_op = &mqueue_super_ops; 411 412 inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL); 413 if (IS_ERR(inode)) 414 return PTR_ERR(inode); 415 416 sb->s_root = d_make_root(inode); 417 if (!sb->s_root) 418 return -ENOMEM; 419 return 0; 420 } 421 422 static int mqueue_get_tree(struct fs_context *fc) 423 { 424 struct mqueue_fs_context *ctx = fc->fs_private; 425 426 return get_tree_keyed(fc, mqueue_fill_super, ctx->ipc_ns); 427 } 428 429 static void mqueue_fs_context_free(struct fs_context *fc) 430 { 431 struct mqueue_fs_context *ctx = fc->fs_private; 432 433 put_ipc_ns(ctx->ipc_ns); 434 kfree(ctx); 435 } 436 437 static int mqueue_init_fs_context(struct fs_context *fc) 438 { 439 struct mqueue_fs_context *ctx; 440 441 ctx = kzalloc(sizeof(struct mqueue_fs_context), GFP_KERNEL); 442 if (!ctx) 443 return -ENOMEM; 444 445 ctx->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns); 446 put_user_ns(fc->user_ns); 447 fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns); 448 fc->fs_private = ctx; 449 fc->ops = &mqueue_fs_context_ops; 450 return 0; 451 } 452 453 static struct vfsmount *mq_create_mount(struct ipc_namespace *ns) 454 { 455 struct mqueue_fs_context *ctx; 456 struct fs_context *fc; 457 struct vfsmount *mnt; 458 459 fc = fs_context_for_mount(&mqueue_fs_type, SB_KERNMOUNT); 460 if (IS_ERR(fc)) 461 return ERR_CAST(fc); 462 463 ctx = fc->fs_private; 464 put_ipc_ns(ctx->ipc_ns); 465 ctx->ipc_ns = get_ipc_ns(ns); 466 put_user_ns(fc->user_ns); 467 fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns); 468 469 mnt = fc_mount(fc); 470 put_fs_context(fc); 471 return mnt; 472 } 473 474 static void init_once(void *foo) 475 { 476 struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; 477 478 inode_init_once(&p->vfs_inode); 479 } 480 481 static struct inode *mqueue_alloc_inode(struct super_block *sb) 482 { 483 struct mqueue_inode_info *ei; 484 485 ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL); 486 if (!ei) 487 return NULL; 488 return &ei->vfs_inode; 489 } 490 491 static void mqueue_free_inode(struct inode *inode) 492 { 493 kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode)); 494 } 495 496 static void mqueue_evict_inode(struct inode *inode) 497 { 498 struct mqueue_inode_info *info; 499 struct user_struct *user; 500 struct ipc_namespace *ipc_ns; 501 struct msg_msg *msg, *nmsg; 502 LIST_HEAD(tmp_msg); 503 504 clear_inode(inode); 505 506 if (S_ISDIR(inode->i_mode)) 507 return; 508 509 ipc_ns = get_ns_from_inode(inode); 510 info = MQUEUE_I(inode); 511 spin_lock(&info->lock); 512 while ((msg = msg_get(info)) != NULL) 513 list_add_tail(&msg->m_list, &tmp_msg); 514 kfree(info->node_cache); 515 spin_unlock(&info->lock); 516 517 list_for_each_entry_safe(msg, nmsg, &tmp_msg, m_list) { 518 list_del(&msg->m_list); 519 free_msg(msg); 520 } 521 522 user = info->user; 523 if (user) { 524 unsigned long mq_bytes, mq_treesize; 525 526 /* Total amount of bytes accounted for the mqueue */ 527 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + 528 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * 529 sizeof(struct posix_msg_tree_node); 530 531 mq_bytes = mq_treesize + (info->attr.mq_maxmsg * 532 info->attr.mq_msgsize); 533 534 spin_lock(&mq_lock); 535 user->mq_bytes -= mq_bytes; 536 /* 537 * get_ns_from_inode() ensures that the 538 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns 539 * to which we now hold a reference, or it is NULL. 540 * We can't put it here under mq_lock, though. 541 */ 542 if (ipc_ns) 543 ipc_ns->mq_queues_count--; 544 spin_unlock(&mq_lock); 545 free_uid(user); 546 } 547 if (ipc_ns) 548 put_ipc_ns(ipc_ns); 549 } 550 551 static int mqueue_create_attr(struct dentry *dentry, umode_t mode, void *arg) 552 { 553 struct inode *dir = dentry->d_parent->d_inode; 554 struct inode *inode; 555 struct mq_attr *attr = arg; 556 int error; 557 struct ipc_namespace *ipc_ns; 558 559 spin_lock(&mq_lock); 560 ipc_ns = __get_ns_from_inode(dir); 561 if (!ipc_ns) { 562 error = -EACCES; 563 goto out_unlock; 564 } 565 566 if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max && 567 !capable(CAP_SYS_RESOURCE)) { 568 error = -ENOSPC; 569 goto out_unlock; 570 } 571 ipc_ns->mq_queues_count++; 572 spin_unlock(&mq_lock); 573 574 inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr); 575 if (IS_ERR(inode)) { 576 error = PTR_ERR(inode); 577 spin_lock(&mq_lock); 578 ipc_ns->mq_queues_count--; 579 goto out_unlock; 580 } 581 582 put_ipc_ns(ipc_ns); 583 dir->i_size += DIRENT_SIZE; 584 dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir); 585 586 d_instantiate(dentry, inode); 587 dget(dentry); 588 return 0; 589 out_unlock: 590 spin_unlock(&mq_lock); 591 if (ipc_ns) 592 put_ipc_ns(ipc_ns); 593 return error; 594 } 595 596 static int mqueue_create(struct inode *dir, struct dentry *dentry, 597 umode_t mode, bool excl) 598 { 599 return mqueue_create_attr(dentry, mode, NULL); 600 } 601 602 static int mqueue_unlink(struct inode *dir, struct dentry *dentry) 603 { 604 struct inode *inode = d_inode(dentry); 605 606 dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir); 607 dir->i_size -= DIRENT_SIZE; 608 drop_nlink(inode); 609 dput(dentry); 610 return 0; 611 } 612 613 /* 614 * This is routine for system read from queue file. 615 * To avoid mess with doing here some sort of mq_receive we allow 616 * to read only queue size & notification info (the only values 617 * that are interesting from user point of view and aren't accessible 618 * through std routines) 619 */ 620 static ssize_t mqueue_read_file(struct file *filp, char __user *u_data, 621 size_t count, loff_t *off) 622 { 623 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); 624 char buffer[FILENT_SIZE]; 625 ssize_t ret; 626 627 spin_lock(&info->lock); 628 snprintf(buffer, sizeof(buffer), 629 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n", 630 info->qsize, 631 info->notify_owner ? info->notify.sigev_notify : 0, 632 (info->notify_owner && 633 info->notify.sigev_notify == SIGEV_SIGNAL) ? 634 info->notify.sigev_signo : 0, 635 pid_vnr(info->notify_owner)); 636 spin_unlock(&info->lock); 637 buffer[sizeof(buffer)-1] = '\0'; 638 639 ret = simple_read_from_buffer(u_data, count, off, buffer, 640 strlen(buffer)); 641 if (ret <= 0) 642 return ret; 643 644 file_inode(filp)->i_atime = file_inode(filp)->i_ctime = current_time(file_inode(filp)); 645 return ret; 646 } 647 648 static int mqueue_flush_file(struct file *filp, fl_owner_t id) 649 { 650 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); 651 652 spin_lock(&info->lock); 653 if (task_tgid(current) == info->notify_owner) 654 remove_notification(info); 655 656 spin_unlock(&info->lock); 657 return 0; 658 } 659 660 static __poll_t mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab) 661 { 662 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); 663 __poll_t retval = 0; 664 665 poll_wait(filp, &info->wait_q, poll_tab); 666 667 spin_lock(&info->lock); 668 if (info->attr.mq_curmsgs) 669 retval = EPOLLIN | EPOLLRDNORM; 670 671 if (info->attr.mq_curmsgs < info->attr.mq_maxmsg) 672 retval |= EPOLLOUT | EPOLLWRNORM; 673 spin_unlock(&info->lock); 674 675 return retval; 676 } 677 678 /* Adds current to info->e_wait_q[sr] before element with smaller prio */ 679 static void wq_add(struct mqueue_inode_info *info, int sr, 680 struct ext_wait_queue *ewp) 681 { 682 struct ext_wait_queue *walk; 683 684 list_for_each_entry(walk, &info->e_wait_q[sr].list, list) { 685 if (walk->task->prio <= current->prio) { 686 list_add_tail(&ewp->list, &walk->list); 687 return; 688 } 689 } 690 list_add_tail(&ewp->list, &info->e_wait_q[sr].list); 691 } 692 693 /* 694 * Puts current task to sleep. Caller must hold queue lock. After return 695 * lock isn't held. 696 * sr: SEND or RECV 697 */ 698 static int wq_sleep(struct mqueue_inode_info *info, int sr, 699 ktime_t *timeout, struct ext_wait_queue *ewp) 700 __releases(&info->lock) 701 { 702 int retval; 703 signed long time; 704 705 wq_add(info, sr, ewp); 706 707 for (;;) { 708 /* memory barrier not required, we hold info->lock */ 709 __set_current_state(TASK_INTERRUPTIBLE); 710 711 spin_unlock(&info->lock); 712 time = schedule_hrtimeout_range_clock(timeout, 0, 713 HRTIMER_MODE_ABS, CLOCK_REALTIME); 714 715 if (READ_ONCE(ewp->state) == STATE_READY) { 716 /* see MQ_BARRIER for purpose/pairing */ 717 smp_acquire__after_ctrl_dep(); 718 retval = 0; 719 goto out; 720 } 721 spin_lock(&info->lock); 722 723 /* we hold info->lock, so no memory barrier required */ 724 if (READ_ONCE(ewp->state) == STATE_READY) { 725 retval = 0; 726 goto out_unlock; 727 } 728 if (signal_pending(current)) { 729 retval = -ERESTARTSYS; 730 break; 731 } 732 if (time == 0) { 733 retval = -ETIMEDOUT; 734 break; 735 } 736 } 737 list_del(&ewp->list); 738 out_unlock: 739 spin_unlock(&info->lock); 740 out: 741 return retval; 742 } 743 744 /* 745 * Returns waiting task that should be serviced first or NULL if none exists 746 */ 747 static struct ext_wait_queue *wq_get_first_waiter( 748 struct mqueue_inode_info *info, int sr) 749 { 750 struct list_head *ptr; 751 752 ptr = info->e_wait_q[sr].list.prev; 753 if (ptr == &info->e_wait_q[sr].list) 754 return NULL; 755 return list_entry(ptr, struct ext_wait_queue, list); 756 } 757 758 759 static inline void set_cookie(struct sk_buff *skb, char code) 760 { 761 ((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code; 762 } 763 764 /* 765 * The next function is only to split too long sys_mq_timedsend 766 */ 767 static void __do_notify(struct mqueue_inode_info *info) 768 { 769 /* notification 770 * invoked when there is registered process and there isn't process 771 * waiting synchronously for message AND state of queue changed from 772 * empty to not empty. Here we are sure that no one is waiting 773 * synchronously. */ 774 if (info->notify_owner && 775 info->attr.mq_curmsgs == 1) { 776 struct kernel_siginfo sig_i; 777 switch (info->notify.sigev_notify) { 778 case SIGEV_NONE: 779 break; 780 case SIGEV_SIGNAL: 781 /* sends signal */ 782 783 clear_siginfo(&sig_i); 784 sig_i.si_signo = info->notify.sigev_signo; 785 sig_i.si_errno = 0; 786 sig_i.si_code = SI_MESGQ; 787 sig_i.si_value = info->notify.sigev_value; 788 /* map current pid/uid into info->owner's namespaces */ 789 rcu_read_lock(); 790 sig_i.si_pid = task_tgid_nr_ns(current, 791 ns_of_pid(info->notify_owner)); 792 sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid()); 793 rcu_read_unlock(); 794 795 kill_pid_info(info->notify.sigev_signo, 796 &sig_i, info->notify_owner); 797 break; 798 case SIGEV_THREAD: 799 set_cookie(info->notify_cookie, NOTIFY_WOKENUP); 800 netlink_sendskb(info->notify_sock, info->notify_cookie); 801 break; 802 } 803 /* after notification unregisters process */ 804 put_pid(info->notify_owner); 805 put_user_ns(info->notify_user_ns); 806 info->notify_owner = NULL; 807 info->notify_user_ns = NULL; 808 } 809 wake_up(&info->wait_q); 810 } 811 812 static int prepare_timeout(const struct __kernel_timespec __user *u_abs_timeout, 813 struct timespec64 *ts) 814 { 815 if (get_timespec64(ts, u_abs_timeout)) 816 return -EFAULT; 817 if (!timespec64_valid(ts)) 818 return -EINVAL; 819 return 0; 820 } 821 822 static void remove_notification(struct mqueue_inode_info *info) 823 { 824 if (info->notify_owner != NULL && 825 info->notify.sigev_notify == SIGEV_THREAD) { 826 set_cookie(info->notify_cookie, NOTIFY_REMOVED); 827 netlink_sendskb(info->notify_sock, info->notify_cookie); 828 } 829 put_pid(info->notify_owner); 830 put_user_ns(info->notify_user_ns); 831 info->notify_owner = NULL; 832 info->notify_user_ns = NULL; 833 } 834 835 static int prepare_open(struct dentry *dentry, int oflag, int ro, 836 umode_t mode, struct filename *name, 837 struct mq_attr *attr) 838 { 839 static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, 840 MAY_READ | MAY_WRITE }; 841 int acc; 842 843 if (d_really_is_negative(dentry)) { 844 if (!(oflag & O_CREAT)) 845 return -ENOENT; 846 if (ro) 847 return ro; 848 audit_inode_parent_hidden(name, dentry->d_parent); 849 return vfs_mkobj(dentry, mode & ~current_umask(), 850 mqueue_create_attr, attr); 851 } 852 /* it already existed */ 853 audit_inode(name, dentry, 0); 854 if ((oflag & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL)) 855 return -EEXIST; 856 if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) 857 return -EINVAL; 858 acc = oflag2acc[oflag & O_ACCMODE]; 859 return inode_permission(d_inode(dentry), acc); 860 } 861 862 static int do_mq_open(const char __user *u_name, int oflag, umode_t mode, 863 struct mq_attr *attr) 864 { 865 struct vfsmount *mnt = current->nsproxy->ipc_ns->mq_mnt; 866 struct dentry *root = mnt->mnt_root; 867 struct filename *name; 868 struct path path; 869 int fd, error; 870 int ro; 871 872 audit_mq_open(oflag, mode, attr); 873 874 if (IS_ERR(name = getname(u_name))) 875 return PTR_ERR(name); 876 877 fd = get_unused_fd_flags(O_CLOEXEC); 878 if (fd < 0) 879 goto out_putname; 880 881 ro = mnt_want_write(mnt); /* we'll drop it in any case */ 882 inode_lock(d_inode(root)); 883 path.dentry = lookup_one_len(name->name, root, strlen(name->name)); 884 if (IS_ERR(path.dentry)) { 885 error = PTR_ERR(path.dentry); 886 goto out_putfd; 887 } 888 path.mnt = mntget(mnt); 889 error = prepare_open(path.dentry, oflag, ro, mode, name, attr); 890 if (!error) { 891 struct file *file = dentry_open(&path, oflag, current_cred()); 892 if (!IS_ERR(file)) 893 fd_install(fd, file); 894 else 895 error = PTR_ERR(file); 896 } 897 path_put(&path); 898 out_putfd: 899 if (error) { 900 put_unused_fd(fd); 901 fd = error; 902 } 903 inode_unlock(d_inode(root)); 904 if (!ro) 905 mnt_drop_write(mnt); 906 out_putname: 907 putname(name); 908 return fd; 909 } 910 911 SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode, 912 struct mq_attr __user *, u_attr) 913 { 914 struct mq_attr attr; 915 if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr))) 916 return -EFAULT; 917 918 return do_mq_open(u_name, oflag, mode, u_attr ? &attr : NULL); 919 } 920 921 SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name) 922 { 923 int err; 924 struct filename *name; 925 struct dentry *dentry; 926 struct inode *inode = NULL; 927 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; 928 struct vfsmount *mnt = ipc_ns->mq_mnt; 929 930 name = getname(u_name); 931 if (IS_ERR(name)) 932 return PTR_ERR(name); 933 934 audit_inode_parent_hidden(name, mnt->mnt_root); 935 err = mnt_want_write(mnt); 936 if (err) 937 goto out_name; 938 inode_lock_nested(d_inode(mnt->mnt_root), I_MUTEX_PARENT); 939 dentry = lookup_one_len(name->name, mnt->mnt_root, 940 strlen(name->name)); 941 if (IS_ERR(dentry)) { 942 err = PTR_ERR(dentry); 943 goto out_unlock; 944 } 945 946 inode = d_inode(dentry); 947 if (!inode) { 948 err = -ENOENT; 949 } else { 950 ihold(inode); 951 err = vfs_unlink(d_inode(dentry->d_parent), dentry, NULL); 952 } 953 dput(dentry); 954 955 out_unlock: 956 inode_unlock(d_inode(mnt->mnt_root)); 957 if (inode) 958 iput(inode); 959 mnt_drop_write(mnt); 960 out_name: 961 putname(name); 962 963 return err; 964 } 965 966 /* Pipelined send and receive functions. 967 * 968 * If a receiver finds no waiting message, then it registers itself in the 969 * list of waiting receivers. A sender checks that list before adding the new 970 * message into the message array. If there is a waiting receiver, then it 971 * bypasses the message array and directly hands the message over to the 972 * receiver. The receiver accepts the message and returns without grabbing the 973 * queue spinlock: 974 * 975 * - Set pointer to message. 976 * - Queue the receiver task for later wakeup (without the info->lock). 977 * - Update its state to STATE_READY. Now the receiver can continue. 978 * - Wake up the process after the lock is dropped. Should the process wake up 979 * before this wakeup (due to a timeout or a signal) it will either see 980 * STATE_READY and continue or acquire the lock to check the state again. 981 * 982 * The same algorithm is used for senders. 983 */ 984 985 static inline void __pipelined_op(struct wake_q_head *wake_q, 986 struct mqueue_inode_info *info, 987 struct ext_wait_queue *this) 988 { 989 list_del(&this->list); 990 get_task_struct(this->task); 991 992 /* see MQ_BARRIER for purpose/pairing */ 993 smp_store_release(&this->state, STATE_READY); 994 wake_q_add_safe(wake_q, this->task); 995 } 996 997 /* pipelined_send() - send a message directly to the task waiting in 998 * sys_mq_timedreceive() (without inserting message into a queue). 999 */ 1000 static inline void pipelined_send(struct wake_q_head *wake_q, 1001 struct mqueue_inode_info *info, 1002 struct msg_msg *message, 1003 struct ext_wait_queue *receiver) 1004 { 1005 receiver->msg = message; 1006 __pipelined_op(wake_q, info, receiver); 1007 } 1008 1009 /* pipelined_receive() - if there is task waiting in sys_mq_timedsend() 1010 * gets its message and put to the queue (we have one free place for sure). */ 1011 static inline void pipelined_receive(struct wake_q_head *wake_q, 1012 struct mqueue_inode_info *info) 1013 { 1014 struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND); 1015 1016 if (!sender) { 1017 /* for poll */ 1018 wake_up_interruptible(&info->wait_q); 1019 return; 1020 } 1021 if (msg_insert(sender->msg, info)) 1022 return; 1023 1024 __pipelined_op(wake_q, info, sender); 1025 } 1026 1027 static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr, 1028 size_t msg_len, unsigned int msg_prio, 1029 struct timespec64 *ts) 1030 { 1031 struct fd f; 1032 struct inode *inode; 1033 struct ext_wait_queue wait; 1034 struct ext_wait_queue *receiver; 1035 struct msg_msg *msg_ptr; 1036 struct mqueue_inode_info *info; 1037 ktime_t expires, *timeout = NULL; 1038 struct posix_msg_tree_node *new_leaf = NULL; 1039 int ret = 0; 1040 DEFINE_WAKE_Q(wake_q); 1041 1042 if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX)) 1043 return -EINVAL; 1044 1045 if (ts) { 1046 expires = timespec64_to_ktime(*ts); 1047 timeout = &expires; 1048 } 1049 1050 audit_mq_sendrecv(mqdes, msg_len, msg_prio, ts); 1051 1052 f = fdget(mqdes); 1053 if (unlikely(!f.file)) { 1054 ret = -EBADF; 1055 goto out; 1056 } 1057 1058 inode = file_inode(f.file); 1059 if (unlikely(f.file->f_op != &mqueue_file_operations)) { 1060 ret = -EBADF; 1061 goto out_fput; 1062 } 1063 info = MQUEUE_I(inode); 1064 audit_file(f.file); 1065 1066 if (unlikely(!(f.file->f_mode & FMODE_WRITE))) { 1067 ret = -EBADF; 1068 goto out_fput; 1069 } 1070 1071 if (unlikely(msg_len > info->attr.mq_msgsize)) { 1072 ret = -EMSGSIZE; 1073 goto out_fput; 1074 } 1075 1076 /* First try to allocate memory, before doing anything with 1077 * existing queues. */ 1078 msg_ptr = load_msg(u_msg_ptr, msg_len); 1079 if (IS_ERR(msg_ptr)) { 1080 ret = PTR_ERR(msg_ptr); 1081 goto out_fput; 1082 } 1083 msg_ptr->m_ts = msg_len; 1084 msg_ptr->m_type = msg_prio; 1085 1086 /* 1087 * msg_insert really wants us to have a valid, spare node struct so 1088 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will 1089 * fall back to that if necessary. 1090 */ 1091 if (!info->node_cache) 1092 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL); 1093 1094 spin_lock(&info->lock); 1095 1096 if (!info->node_cache && new_leaf) { 1097 /* Save our speculative allocation into the cache */ 1098 INIT_LIST_HEAD(&new_leaf->msg_list); 1099 info->node_cache = new_leaf; 1100 new_leaf = NULL; 1101 } else { 1102 kfree(new_leaf); 1103 } 1104 1105 if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) { 1106 if (f.file->f_flags & O_NONBLOCK) { 1107 ret = -EAGAIN; 1108 } else { 1109 wait.task = current; 1110 wait.msg = (void *) msg_ptr; 1111 1112 /* memory barrier not required, we hold info->lock */ 1113 WRITE_ONCE(wait.state, STATE_NONE); 1114 ret = wq_sleep(info, SEND, timeout, &wait); 1115 /* 1116 * wq_sleep must be called with info->lock held, and 1117 * returns with the lock released 1118 */ 1119 goto out_free; 1120 } 1121 } else { 1122 receiver = wq_get_first_waiter(info, RECV); 1123 if (receiver) { 1124 pipelined_send(&wake_q, info, msg_ptr, receiver); 1125 } else { 1126 /* adds message to the queue */ 1127 ret = msg_insert(msg_ptr, info); 1128 if (ret) 1129 goto out_unlock; 1130 __do_notify(info); 1131 } 1132 inode->i_atime = inode->i_mtime = inode->i_ctime = 1133 current_time(inode); 1134 } 1135 out_unlock: 1136 spin_unlock(&info->lock); 1137 wake_up_q(&wake_q); 1138 out_free: 1139 if (ret) 1140 free_msg(msg_ptr); 1141 out_fput: 1142 fdput(f); 1143 out: 1144 return ret; 1145 } 1146 1147 static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr, 1148 size_t msg_len, unsigned int __user *u_msg_prio, 1149 struct timespec64 *ts) 1150 { 1151 ssize_t ret; 1152 struct msg_msg *msg_ptr; 1153 struct fd f; 1154 struct inode *inode; 1155 struct mqueue_inode_info *info; 1156 struct ext_wait_queue wait; 1157 ktime_t expires, *timeout = NULL; 1158 struct posix_msg_tree_node *new_leaf = NULL; 1159 1160 if (ts) { 1161 expires = timespec64_to_ktime(*ts); 1162 timeout = &expires; 1163 } 1164 1165 audit_mq_sendrecv(mqdes, msg_len, 0, ts); 1166 1167 f = fdget(mqdes); 1168 if (unlikely(!f.file)) { 1169 ret = -EBADF; 1170 goto out; 1171 } 1172 1173 inode = file_inode(f.file); 1174 if (unlikely(f.file->f_op != &mqueue_file_operations)) { 1175 ret = -EBADF; 1176 goto out_fput; 1177 } 1178 info = MQUEUE_I(inode); 1179 audit_file(f.file); 1180 1181 if (unlikely(!(f.file->f_mode & FMODE_READ))) { 1182 ret = -EBADF; 1183 goto out_fput; 1184 } 1185 1186 /* checks if buffer is big enough */ 1187 if (unlikely(msg_len < info->attr.mq_msgsize)) { 1188 ret = -EMSGSIZE; 1189 goto out_fput; 1190 } 1191 1192 /* 1193 * msg_insert really wants us to have a valid, spare node struct so 1194 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will 1195 * fall back to that if necessary. 1196 */ 1197 if (!info->node_cache) 1198 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL); 1199 1200 spin_lock(&info->lock); 1201 1202 if (!info->node_cache && new_leaf) { 1203 /* Save our speculative allocation into the cache */ 1204 INIT_LIST_HEAD(&new_leaf->msg_list); 1205 info->node_cache = new_leaf; 1206 } else { 1207 kfree(new_leaf); 1208 } 1209 1210 if (info->attr.mq_curmsgs == 0) { 1211 if (f.file->f_flags & O_NONBLOCK) { 1212 spin_unlock(&info->lock); 1213 ret = -EAGAIN; 1214 } else { 1215 wait.task = current; 1216 1217 /* memory barrier not required, we hold info->lock */ 1218 WRITE_ONCE(wait.state, STATE_NONE); 1219 ret = wq_sleep(info, RECV, timeout, &wait); 1220 msg_ptr = wait.msg; 1221 } 1222 } else { 1223 DEFINE_WAKE_Q(wake_q); 1224 1225 msg_ptr = msg_get(info); 1226 1227 inode->i_atime = inode->i_mtime = inode->i_ctime = 1228 current_time(inode); 1229 1230 /* There is now free space in queue. */ 1231 pipelined_receive(&wake_q, info); 1232 spin_unlock(&info->lock); 1233 wake_up_q(&wake_q); 1234 ret = 0; 1235 } 1236 if (ret == 0) { 1237 ret = msg_ptr->m_ts; 1238 1239 if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) || 1240 store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) { 1241 ret = -EFAULT; 1242 } 1243 free_msg(msg_ptr); 1244 } 1245 out_fput: 1246 fdput(f); 1247 out: 1248 return ret; 1249 } 1250 1251 SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, 1252 size_t, msg_len, unsigned int, msg_prio, 1253 const struct __kernel_timespec __user *, u_abs_timeout) 1254 { 1255 struct timespec64 ts, *p = NULL; 1256 if (u_abs_timeout) { 1257 int res = prepare_timeout(u_abs_timeout, &ts); 1258 if (res) 1259 return res; 1260 p = &ts; 1261 } 1262 return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p); 1263 } 1264 1265 SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, 1266 size_t, msg_len, unsigned int __user *, u_msg_prio, 1267 const struct __kernel_timespec __user *, u_abs_timeout) 1268 { 1269 struct timespec64 ts, *p = NULL; 1270 if (u_abs_timeout) { 1271 int res = prepare_timeout(u_abs_timeout, &ts); 1272 if (res) 1273 return res; 1274 p = &ts; 1275 } 1276 return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p); 1277 } 1278 1279 /* 1280 * Notes: the case when user wants us to deregister (with NULL as pointer) 1281 * and he isn't currently owner of notification, will be silently discarded. 1282 * It isn't explicitly defined in the POSIX. 1283 */ 1284 static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification) 1285 { 1286 int ret; 1287 struct fd f; 1288 struct sock *sock; 1289 struct inode *inode; 1290 struct mqueue_inode_info *info; 1291 struct sk_buff *nc; 1292 1293 audit_mq_notify(mqdes, notification); 1294 1295 nc = NULL; 1296 sock = NULL; 1297 if (notification != NULL) { 1298 if (unlikely(notification->sigev_notify != SIGEV_NONE && 1299 notification->sigev_notify != SIGEV_SIGNAL && 1300 notification->sigev_notify != SIGEV_THREAD)) 1301 return -EINVAL; 1302 if (notification->sigev_notify == SIGEV_SIGNAL && 1303 !valid_signal(notification->sigev_signo)) { 1304 return -EINVAL; 1305 } 1306 if (notification->sigev_notify == SIGEV_THREAD) { 1307 long timeo; 1308 1309 /* create the notify skb */ 1310 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL); 1311 if (!nc) 1312 return -ENOMEM; 1313 1314 if (copy_from_user(nc->data, 1315 notification->sigev_value.sival_ptr, 1316 NOTIFY_COOKIE_LEN)) { 1317 ret = -EFAULT; 1318 goto free_skb; 1319 } 1320 1321 /* TODO: add a header? */ 1322 skb_put(nc, NOTIFY_COOKIE_LEN); 1323 /* and attach it to the socket */ 1324 retry: 1325 f = fdget(notification->sigev_signo); 1326 if (!f.file) { 1327 ret = -EBADF; 1328 goto out; 1329 } 1330 sock = netlink_getsockbyfilp(f.file); 1331 fdput(f); 1332 if (IS_ERR(sock)) { 1333 ret = PTR_ERR(sock); 1334 goto free_skb; 1335 } 1336 1337 timeo = MAX_SCHEDULE_TIMEOUT; 1338 ret = netlink_attachskb(sock, nc, &timeo, NULL); 1339 if (ret == 1) { 1340 sock = NULL; 1341 goto retry; 1342 } 1343 if (ret) 1344 return ret; 1345 } 1346 } 1347 1348 f = fdget(mqdes); 1349 if (!f.file) { 1350 ret = -EBADF; 1351 goto out; 1352 } 1353 1354 inode = file_inode(f.file); 1355 if (unlikely(f.file->f_op != &mqueue_file_operations)) { 1356 ret = -EBADF; 1357 goto out_fput; 1358 } 1359 info = MQUEUE_I(inode); 1360 1361 ret = 0; 1362 spin_lock(&info->lock); 1363 if (notification == NULL) { 1364 if (info->notify_owner == task_tgid(current)) { 1365 remove_notification(info); 1366 inode->i_atime = inode->i_ctime = current_time(inode); 1367 } 1368 } else if (info->notify_owner != NULL) { 1369 ret = -EBUSY; 1370 } else { 1371 switch (notification->sigev_notify) { 1372 case SIGEV_NONE: 1373 info->notify.sigev_notify = SIGEV_NONE; 1374 break; 1375 case SIGEV_THREAD: 1376 info->notify_sock = sock; 1377 info->notify_cookie = nc; 1378 sock = NULL; 1379 nc = NULL; 1380 info->notify.sigev_notify = SIGEV_THREAD; 1381 break; 1382 case SIGEV_SIGNAL: 1383 info->notify.sigev_signo = notification->sigev_signo; 1384 info->notify.sigev_value = notification->sigev_value; 1385 info->notify.sigev_notify = SIGEV_SIGNAL; 1386 break; 1387 } 1388 1389 info->notify_owner = get_pid(task_tgid(current)); 1390 info->notify_user_ns = get_user_ns(current_user_ns()); 1391 inode->i_atime = inode->i_ctime = current_time(inode); 1392 } 1393 spin_unlock(&info->lock); 1394 out_fput: 1395 fdput(f); 1396 out: 1397 if (sock) 1398 netlink_detachskb(sock, nc); 1399 else 1400 free_skb: 1401 dev_kfree_skb(nc); 1402 1403 return ret; 1404 } 1405 1406 SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, 1407 const struct sigevent __user *, u_notification) 1408 { 1409 struct sigevent n, *p = NULL; 1410 if (u_notification) { 1411 if (copy_from_user(&n, u_notification, sizeof(struct sigevent))) 1412 return -EFAULT; 1413 p = &n; 1414 } 1415 return do_mq_notify(mqdes, p); 1416 } 1417 1418 static int do_mq_getsetattr(int mqdes, struct mq_attr *new, struct mq_attr *old) 1419 { 1420 struct fd f; 1421 struct inode *inode; 1422 struct mqueue_inode_info *info; 1423 1424 if (new && (new->mq_flags & (~O_NONBLOCK))) 1425 return -EINVAL; 1426 1427 f = fdget(mqdes); 1428 if (!f.file) 1429 return -EBADF; 1430 1431 if (unlikely(f.file->f_op != &mqueue_file_operations)) { 1432 fdput(f); 1433 return -EBADF; 1434 } 1435 1436 inode = file_inode(f.file); 1437 info = MQUEUE_I(inode); 1438 1439 spin_lock(&info->lock); 1440 1441 if (old) { 1442 *old = info->attr; 1443 old->mq_flags = f.file->f_flags & O_NONBLOCK; 1444 } 1445 if (new) { 1446 audit_mq_getsetattr(mqdes, new); 1447 spin_lock(&f.file->f_lock); 1448 if (new->mq_flags & O_NONBLOCK) 1449 f.file->f_flags |= O_NONBLOCK; 1450 else 1451 f.file->f_flags &= ~O_NONBLOCK; 1452 spin_unlock(&f.file->f_lock); 1453 1454 inode->i_atime = inode->i_ctime = current_time(inode); 1455 } 1456 1457 spin_unlock(&info->lock); 1458 fdput(f); 1459 return 0; 1460 } 1461 1462 SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, 1463 const struct mq_attr __user *, u_mqstat, 1464 struct mq_attr __user *, u_omqstat) 1465 { 1466 int ret; 1467 struct mq_attr mqstat, omqstat; 1468 struct mq_attr *new = NULL, *old = NULL; 1469 1470 if (u_mqstat) { 1471 new = &mqstat; 1472 if (copy_from_user(new, u_mqstat, sizeof(struct mq_attr))) 1473 return -EFAULT; 1474 } 1475 if (u_omqstat) 1476 old = &omqstat; 1477 1478 ret = do_mq_getsetattr(mqdes, new, old); 1479 if (ret || !old) 1480 return ret; 1481 1482 if (copy_to_user(u_omqstat, old, sizeof(struct mq_attr))) 1483 return -EFAULT; 1484 return 0; 1485 } 1486 1487 #ifdef CONFIG_COMPAT 1488 1489 struct compat_mq_attr { 1490 compat_long_t mq_flags; /* message queue flags */ 1491 compat_long_t mq_maxmsg; /* maximum number of messages */ 1492 compat_long_t mq_msgsize; /* maximum message size */ 1493 compat_long_t mq_curmsgs; /* number of messages currently queued */ 1494 compat_long_t __reserved[4]; /* ignored for input, zeroed for output */ 1495 }; 1496 1497 static inline int get_compat_mq_attr(struct mq_attr *attr, 1498 const struct compat_mq_attr __user *uattr) 1499 { 1500 struct compat_mq_attr v; 1501 1502 if (copy_from_user(&v, uattr, sizeof(*uattr))) 1503 return -EFAULT; 1504 1505 memset(attr, 0, sizeof(*attr)); 1506 attr->mq_flags = v.mq_flags; 1507 attr->mq_maxmsg = v.mq_maxmsg; 1508 attr->mq_msgsize = v.mq_msgsize; 1509 attr->mq_curmsgs = v.mq_curmsgs; 1510 return 0; 1511 } 1512 1513 static inline int put_compat_mq_attr(const struct mq_attr *attr, 1514 struct compat_mq_attr __user *uattr) 1515 { 1516 struct compat_mq_attr v; 1517 1518 memset(&v, 0, sizeof(v)); 1519 v.mq_flags = attr->mq_flags; 1520 v.mq_maxmsg = attr->mq_maxmsg; 1521 v.mq_msgsize = attr->mq_msgsize; 1522 v.mq_curmsgs = attr->mq_curmsgs; 1523 if (copy_to_user(uattr, &v, sizeof(*uattr))) 1524 return -EFAULT; 1525 return 0; 1526 } 1527 1528 COMPAT_SYSCALL_DEFINE4(mq_open, const char __user *, u_name, 1529 int, oflag, compat_mode_t, mode, 1530 struct compat_mq_attr __user *, u_attr) 1531 { 1532 struct mq_attr attr, *p = NULL; 1533 if (u_attr && oflag & O_CREAT) { 1534 p = &attr; 1535 if (get_compat_mq_attr(&attr, u_attr)) 1536 return -EFAULT; 1537 } 1538 return do_mq_open(u_name, oflag, mode, p); 1539 } 1540 1541 COMPAT_SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, 1542 const struct compat_sigevent __user *, u_notification) 1543 { 1544 struct sigevent n, *p = NULL; 1545 if (u_notification) { 1546 if (get_compat_sigevent(&n, u_notification)) 1547 return -EFAULT; 1548 if (n.sigev_notify == SIGEV_THREAD) 1549 n.sigev_value.sival_ptr = compat_ptr(n.sigev_value.sival_int); 1550 p = &n; 1551 } 1552 return do_mq_notify(mqdes, p); 1553 } 1554 1555 COMPAT_SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, 1556 const struct compat_mq_attr __user *, u_mqstat, 1557 struct compat_mq_attr __user *, u_omqstat) 1558 { 1559 int ret; 1560 struct mq_attr mqstat, omqstat; 1561 struct mq_attr *new = NULL, *old = NULL; 1562 1563 if (u_mqstat) { 1564 new = &mqstat; 1565 if (get_compat_mq_attr(new, u_mqstat)) 1566 return -EFAULT; 1567 } 1568 if (u_omqstat) 1569 old = &omqstat; 1570 1571 ret = do_mq_getsetattr(mqdes, new, old); 1572 if (ret || !old) 1573 return ret; 1574 1575 if (put_compat_mq_attr(old, u_omqstat)) 1576 return -EFAULT; 1577 return 0; 1578 } 1579 #endif 1580 1581 #ifdef CONFIG_COMPAT_32BIT_TIME 1582 static int compat_prepare_timeout(const struct old_timespec32 __user *p, 1583 struct timespec64 *ts) 1584 { 1585 if (get_old_timespec32(ts, p)) 1586 return -EFAULT; 1587 if (!timespec64_valid(ts)) 1588 return -EINVAL; 1589 return 0; 1590 } 1591 1592 SYSCALL_DEFINE5(mq_timedsend_time32, mqd_t, mqdes, 1593 const char __user *, u_msg_ptr, 1594 unsigned int, msg_len, unsigned int, msg_prio, 1595 const struct old_timespec32 __user *, u_abs_timeout) 1596 { 1597 struct timespec64 ts, *p = NULL; 1598 if (u_abs_timeout) { 1599 int res = compat_prepare_timeout(u_abs_timeout, &ts); 1600 if (res) 1601 return res; 1602 p = &ts; 1603 } 1604 return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p); 1605 } 1606 1607 SYSCALL_DEFINE5(mq_timedreceive_time32, mqd_t, mqdes, 1608 char __user *, u_msg_ptr, 1609 unsigned int, msg_len, unsigned int __user *, u_msg_prio, 1610 const struct old_timespec32 __user *, u_abs_timeout) 1611 { 1612 struct timespec64 ts, *p = NULL; 1613 if (u_abs_timeout) { 1614 int res = compat_prepare_timeout(u_abs_timeout, &ts); 1615 if (res) 1616 return res; 1617 p = &ts; 1618 } 1619 return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p); 1620 } 1621 #endif 1622 1623 static const struct inode_operations mqueue_dir_inode_operations = { 1624 .lookup = simple_lookup, 1625 .create = mqueue_create, 1626 .unlink = mqueue_unlink, 1627 }; 1628 1629 static const struct file_operations mqueue_file_operations = { 1630 .flush = mqueue_flush_file, 1631 .poll = mqueue_poll_file, 1632 .read = mqueue_read_file, 1633 .llseek = default_llseek, 1634 }; 1635 1636 static const struct super_operations mqueue_super_ops = { 1637 .alloc_inode = mqueue_alloc_inode, 1638 .free_inode = mqueue_free_inode, 1639 .evict_inode = mqueue_evict_inode, 1640 .statfs = simple_statfs, 1641 }; 1642 1643 static const struct fs_context_operations mqueue_fs_context_ops = { 1644 .free = mqueue_fs_context_free, 1645 .get_tree = mqueue_get_tree, 1646 }; 1647 1648 static struct file_system_type mqueue_fs_type = { 1649 .name = "mqueue", 1650 .init_fs_context = mqueue_init_fs_context, 1651 .kill_sb = kill_litter_super, 1652 .fs_flags = FS_USERNS_MOUNT, 1653 }; 1654 1655 int mq_init_ns(struct ipc_namespace *ns) 1656 { 1657 struct vfsmount *m; 1658 1659 ns->mq_queues_count = 0; 1660 ns->mq_queues_max = DFLT_QUEUESMAX; 1661 ns->mq_msg_max = DFLT_MSGMAX; 1662 ns->mq_msgsize_max = DFLT_MSGSIZEMAX; 1663 ns->mq_msg_default = DFLT_MSG; 1664 ns->mq_msgsize_default = DFLT_MSGSIZE; 1665 1666 m = mq_create_mount(ns); 1667 if (IS_ERR(m)) 1668 return PTR_ERR(m); 1669 ns->mq_mnt = m; 1670 return 0; 1671 } 1672 1673 void mq_clear_sbinfo(struct ipc_namespace *ns) 1674 { 1675 ns->mq_mnt->mnt_sb->s_fs_info = NULL; 1676 } 1677 1678 void mq_put_mnt(struct ipc_namespace *ns) 1679 { 1680 kern_unmount(ns->mq_mnt); 1681 } 1682 1683 static int __init init_mqueue_fs(void) 1684 { 1685 int error; 1686 1687 mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache", 1688 sizeof(struct mqueue_inode_info), 0, 1689 SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, init_once); 1690 if (mqueue_inode_cachep == NULL) 1691 return -ENOMEM; 1692 1693 /* ignore failures - they are not fatal */ 1694 mq_sysctl_table = mq_register_sysctl_table(); 1695 1696 error = register_filesystem(&mqueue_fs_type); 1697 if (error) 1698 goto out_sysctl; 1699 1700 spin_lock_init(&mq_lock); 1701 1702 error = mq_init_ns(&init_ipc_ns); 1703 if (error) 1704 goto out_filesystem; 1705 1706 return 0; 1707 1708 out_filesystem: 1709 unregister_filesystem(&mqueue_fs_type); 1710 out_sysctl: 1711 if (mq_sysctl_table) 1712 unregister_sysctl_table(mq_sysctl_table); 1713 kmem_cache_destroy(mqueue_inode_cachep); 1714 return error; 1715 } 1716 1717 device_initcall(init_mqueue_fs); 1718