1 /* 2 * POSIX message queues filesystem for Linux. 3 * 4 * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl) 5 * Michal Wronski (michal.wronski@gmail.com) 6 * 7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com) 8 * Lockless receive & send, fd based notify: 9 * Manfred Spraul (manfred@colorfullife.com) 10 * 11 * Audit: George Wilson (ltcgcw@us.ibm.com) 12 * 13 * This file is released under the GPL. 14 */ 15 16 #include <linux/capability.h> 17 #include <linux/init.h> 18 #include <linux/pagemap.h> 19 #include <linux/file.h> 20 #include <linux/mount.h> 21 #include <linux/fs_context.h> 22 #include <linux/namei.h> 23 #include <linux/sysctl.h> 24 #include <linux/poll.h> 25 #include <linux/mqueue.h> 26 #include <linux/msg.h> 27 #include <linux/skbuff.h> 28 #include <linux/vmalloc.h> 29 #include <linux/netlink.h> 30 #include <linux/syscalls.h> 31 #include <linux/audit.h> 32 #include <linux/signal.h> 33 #include <linux/mutex.h> 34 #include <linux/nsproxy.h> 35 #include <linux/pid.h> 36 #include <linux/ipc_namespace.h> 37 #include <linux/user_namespace.h> 38 #include <linux/slab.h> 39 #include <linux/sched/wake_q.h> 40 #include <linux/sched/signal.h> 41 #include <linux/sched/user.h> 42 43 #include <net/sock.h> 44 #include "util.h" 45 46 struct mqueue_fs_context { 47 struct ipc_namespace *ipc_ns; 48 }; 49 50 #define MQUEUE_MAGIC 0x19800202 51 #define DIRENT_SIZE 20 52 #define FILENT_SIZE 80 53 54 #define SEND 0 55 #define RECV 1 56 57 #define STATE_NONE 0 58 #define STATE_READY 1 59 60 struct posix_msg_tree_node { 61 struct rb_node rb_node; 62 struct list_head msg_list; 63 int priority; 64 }; 65 66 /* 67 * Locking: 68 * 69 * Accesses to a message queue are synchronized by acquiring info->lock. 70 * 71 * There are two notable exceptions: 72 * - The actual wakeup of a sleeping task is performed using the wake_q 73 * framework. info->lock is already released when wake_up_q is called. 74 * - The exit codepaths after sleeping check ext_wait_queue->state without 75 * any locks. If it is STATE_READY, then the syscall is completed without 76 * acquiring info->lock. 77 * 78 * MQ_BARRIER: 79 * To achieve proper release/acquire memory barrier pairing, the state is set to 80 * STATE_READY with smp_store_release(), and it is read with READ_ONCE followed 81 * by smp_acquire__after_ctrl_dep(). In addition, wake_q_add_safe() is used. 82 * 83 * This prevents the following races: 84 * 85 * 1) With the simple wake_q_add(), the task could be gone already before 86 * the increase of the reference happens 87 * Thread A 88 * Thread B 89 * WRITE_ONCE(wait.state, STATE_NONE); 90 * schedule_hrtimeout() 91 * wake_q_add(A) 92 * if (cmpxchg()) // success 93 * ->state = STATE_READY (reordered) 94 * <timeout returns> 95 * if (wait.state == STATE_READY) return; 96 * sysret to user space 97 * sys_exit() 98 * get_task_struct() // UaF 99 * 100 * Solution: Use wake_q_add_safe() and perform the get_task_struct() before 101 * the smp_store_release() that does ->state = STATE_READY. 102 * 103 * 2) Without proper _release/_acquire barriers, the woken up task 104 * could read stale data 105 * 106 * Thread A 107 * Thread B 108 * do_mq_timedreceive 109 * WRITE_ONCE(wait.state, STATE_NONE); 110 * schedule_hrtimeout() 111 * state = STATE_READY; 112 * <timeout returns> 113 * if (wait.state == STATE_READY) return; 114 * msg_ptr = wait.msg; // Access to stale data! 115 * receiver->msg = message; (reordered) 116 * 117 * Solution: use _release and _acquire barriers. 118 * 119 * 3) There is intentionally no barrier when setting current->state 120 * to TASK_INTERRUPTIBLE: spin_unlock(&info->lock) provides the 121 * release memory barrier, and the wakeup is triggered when holding 122 * info->lock, i.e. spin_lock(&info->lock) provided a pairing 123 * acquire memory barrier. 124 */ 125 126 struct ext_wait_queue { /* queue of sleeping tasks */ 127 struct task_struct *task; 128 struct list_head list; 129 struct msg_msg *msg; /* ptr of loaded message */ 130 int state; /* one of STATE_* values */ 131 }; 132 133 struct mqueue_inode_info { 134 spinlock_t lock; 135 struct inode vfs_inode; 136 wait_queue_head_t wait_q; 137 138 struct rb_root msg_tree; 139 struct rb_node *msg_tree_rightmost; 140 struct posix_msg_tree_node *node_cache; 141 struct mq_attr attr; 142 143 struct sigevent notify; 144 struct pid *notify_owner; 145 struct user_namespace *notify_user_ns; 146 struct user_struct *user; /* user who created, for accounting */ 147 struct sock *notify_sock; 148 struct sk_buff *notify_cookie; 149 150 /* for tasks waiting for free space and messages, respectively */ 151 struct ext_wait_queue e_wait_q[2]; 152 153 unsigned long qsize; /* size of queue in memory (sum of all msgs) */ 154 }; 155 156 static struct file_system_type mqueue_fs_type; 157 static const struct inode_operations mqueue_dir_inode_operations; 158 static const struct file_operations mqueue_file_operations; 159 static const struct super_operations mqueue_super_ops; 160 static const struct fs_context_operations mqueue_fs_context_ops; 161 static void remove_notification(struct mqueue_inode_info *info); 162 163 static struct kmem_cache *mqueue_inode_cachep; 164 165 static struct ctl_table_header *mq_sysctl_table; 166 167 static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode) 168 { 169 return container_of(inode, struct mqueue_inode_info, vfs_inode); 170 } 171 172 /* 173 * This routine should be called with the mq_lock held. 174 */ 175 static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode) 176 { 177 return get_ipc_ns(inode->i_sb->s_fs_info); 178 } 179 180 static struct ipc_namespace *get_ns_from_inode(struct inode *inode) 181 { 182 struct ipc_namespace *ns; 183 184 spin_lock(&mq_lock); 185 ns = __get_ns_from_inode(inode); 186 spin_unlock(&mq_lock); 187 return ns; 188 } 189 190 /* Auxiliary functions to manipulate messages' list */ 191 static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info) 192 { 193 struct rb_node **p, *parent = NULL; 194 struct posix_msg_tree_node *leaf; 195 bool rightmost = true; 196 197 p = &info->msg_tree.rb_node; 198 while (*p) { 199 parent = *p; 200 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); 201 202 if (likely(leaf->priority == msg->m_type)) 203 goto insert_msg; 204 else if (msg->m_type < leaf->priority) { 205 p = &(*p)->rb_left; 206 rightmost = false; 207 } else 208 p = &(*p)->rb_right; 209 } 210 if (info->node_cache) { 211 leaf = info->node_cache; 212 info->node_cache = NULL; 213 } else { 214 leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC); 215 if (!leaf) 216 return -ENOMEM; 217 INIT_LIST_HEAD(&leaf->msg_list); 218 } 219 leaf->priority = msg->m_type; 220 221 if (rightmost) 222 info->msg_tree_rightmost = &leaf->rb_node; 223 224 rb_link_node(&leaf->rb_node, parent, p); 225 rb_insert_color(&leaf->rb_node, &info->msg_tree); 226 insert_msg: 227 info->attr.mq_curmsgs++; 228 info->qsize += msg->m_ts; 229 list_add_tail(&msg->m_list, &leaf->msg_list); 230 return 0; 231 } 232 233 static inline void msg_tree_erase(struct posix_msg_tree_node *leaf, 234 struct mqueue_inode_info *info) 235 { 236 struct rb_node *node = &leaf->rb_node; 237 238 if (info->msg_tree_rightmost == node) 239 info->msg_tree_rightmost = rb_prev(node); 240 241 rb_erase(node, &info->msg_tree); 242 if (info->node_cache) { 243 kfree(leaf); 244 } else { 245 info->node_cache = leaf; 246 } 247 } 248 249 static inline struct msg_msg *msg_get(struct mqueue_inode_info *info) 250 { 251 struct rb_node *parent = NULL; 252 struct posix_msg_tree_node *leaf; 253 struct msg_msg *msg; 254 255 try_again: 256 /* 257 * During insert, low priorities go to the left and high to the 258 * right. On receive, we want the highest priorities first, so 259 * walk all the way to the right. 260 */ 261 parent = info->msg_tree_rightmost; 262 if (!parent) { 263 if (info->attr.mq_curmsgs) { 264 pr_warn_once("Inconsistency in POSIX message queue, " 265 "no tree element, but supposedly messages " 266 "should exist!\n"); 267 info->attr.mq_curmsgs = 0; 268 } 269 return NULL; 270 } 271 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); 272 if (unlikely(list_empty(&leaf->msg_list))) { 273 pr_warn_once("Inconsistency in POSIX message queue, " 274 "empty leaf node but we haven't implemented " 275 "lazy leaf delete!\n"); 276 msg_tree_erase(leaf, info); 277 goto try_again; 278 } else { 279 msg = list_first_entry(&leaf->msg_list, 280 struct msg_msg, m_list); 281 list_del(&msg->m_list); 282 if (list_empty(&leaf->msg_list)) { 283 msg_tree_erase(leaf, info); 284 } 285 } 286 info->attr.mq_curmsgs--; 287 info->qsize -= msg->m_ts; 288 return msg; 289 } 290 291 static struct inode *mqueue_get_inode(struct super_block *sb, 292 struct ipc_namespace *ipc_ns, umode_t mode, 293 struct mq_attr *attr) 294 { 295 struct user_struct *u = current_user(); 296 struct inode *inode; 297 int ret = -ENOMEM; 298 299 inode = new_inode(sb); 300 if (!inode) 301 goto err; 302 303 inode->i_ino = get_next_ino(); 304 inode->i_mode = mode; 305 inode->i_uid = current_fsuid(); 306 inode->i_gid = current_fsgid(); 307 inode->i_mtime = inode->i_ctime = inode->i_atime = current_time(inode); 308 309 if (S_ISREG(mode)) { 310 struct mqueue_inode_info *info; 311 unsigned long mq_bytes, mq_treesize; 312 313 inode->i_fop = &mqueue_file_operations; 314 inode->i_size = FILENT_SIZE; 315 /* mqueue specific info */ 316 info = MQUEUE_I(inode); 317 spin_lock_init(&info->lock); 318 init_waitqueue_head(&info->wait_q); 319 INIT_LIST_HEAD(&info->e_wait_q[0].list); 320 INIT_LIST_HEAD(&info->e_wait_q[1].list); 321 info->notify_owner = NULL; 322 info->notify_user_ns = NULL; 323 info->qsize = 0; 324 info->user = NULL; /* set when all is ok */ 325 info->msg_tree = RB_ROOT; 326 info->msg_tree_rightmost = NULL; 327 info->node_cache = NULL; 328 memset(&info->attr, 0, sizeof(info->attr)); 329 info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max, 330 ipc_ns->mq_msg_default); 331 info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max, 332 ipc_ns->mq_msgsize_default); 333 if (attr) { 334 info->attr.mq_maxmsg = attr->mq_maxmsg; 335 info->attr.mq_msgsize = attr->mq_msgsize; 336 } 337 /* 338 * We used to allocate a static array of pointers and account 339 * the size of that array as well as one msg_msg struct per 340 * possible message into the queue size. That's no longer 341 * accurate as the queue is now an rbtree and will grow and 342 * shrink depending on usage patterns. We can, however, still 343 * account one msg_msg struct per message, but the nodes are 344 * allocated depending on priority usage, and most programs 345 * only use one, or a handful, of priorities. However, since 346 * this is pinned memory, we need to assume worst case, so 347 * that means the min(mq_maxmsg, max_priorities) * struct 348 * posix_msg_tree_node. 349 */ 350 351 ret = -EINVAL; 352 if (info->attr.mq_maxmsg <= 0 || info->attr.mq_msgsize <= 0) 353 goto out_inode; 354 if (capable(CAP_SYS_RESOURCE)) { 355 if (info->attr.mq_maxmsg > HARD_MSGMAX || 356 info->attr.mq_msgsize > HARD_MSGSIZEMAX) 357 goto out_inode; 358 } else { 359 if (info->attr.mq_maxmsg > ipc_ns->mq_msg_max || 360 info->attr.mq_msgsize > ipc_ns->mq_msgsize_max) 361 goto out_inode; 362 } 363 ret = -EOVERFLOW; 364 /* check for overflow */ 365 if (info->attr.mq_msgsize > ULONG_MAX/info->attr.mq_maxmsg) 366 goto out_inode; 367 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + 368 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * 369 sizeof(struct posix_msg_tree_node); 370 mq_bytes = info->attr.mq_maxmsg * info->attr.mq_msgsize; 371 if (mq_bytes + mq_treesize < mq_bytes) 372 goto out_inode; 373 mq_bytes += mq_treesize; 374 spin_lock(&mq_lock); 375 if (u->mq_bytes + mq_bytes < u->mq_bytes || 376 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) { 377 spin_unlock(&mq_lock); 378 /* mqueue_evict_inode() releases info->messages */ 379 ret = -EMFILE; 380 goto out_inode; 381 } 382 u->mq_bytes += mq_bytes; 383 spin_unlock(&mq_lock); 384 385 /* all is ok */ 386 info->user = get_uid(u); 387 } else if (S_ISDIR(mode)) { 388 inc_nlink(inode); 389 /* Some things misbehave if size == 0 on a directory */ 390 inode->i_size = 2 * DIRENT_SIZE; 391 inode->i_op = &mqueue_dir_inode_operations; 392 inode->i_fop = &simple_dir_operations; 393 } 394 395 return inode; 396 out_inode: 397 iput(inode); 398 err: 399 return ERR_PTR(ret); 400 } 401 402 static int mqueue_fill_super(struct super_block *sb, struct fs_context *fc) 403 { 404 struct inode *inode; 405 struct ipc_namespace *ns = sb->s_fs_info; 406 407 sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV; 408 sb->s_blocksize = PAGE_SIZE; 409 sb->s_blocksize_bits = PAGE_SHIFT; 410 sb->s_magic = MQUEUE_MAGIC; 411 sb->s_op = &mqueue_super_ops; 412 413 inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL); 414 if (IS_ERR(inode)) 415 return PTR_ERR(inode); 416 417 sb->s_root = d_make_root(inode); 418 if (!sb->s_root) 419 return -ENOMEM; 420 return 0; 421 } 422 423 static int mqueue_get_tree(struct fs_context *fc) 424 { 425 struct mqueue_fs_context *ctx = fc->fs_private; 426 427 return get_tree_keyed(fc, mqueue_fill_super, ctx->ipc_ns); 428 } 429 430 static void mqueue_fs_context_free(struct fs_context *fc) 431 { 432 struct mqueue_fs_context *ctx = fc->fs_private; 433 434 put_ipc_ns(ctx->ipc_ns); 435 kfree(ctx); 436 } 437 438 static int mqueue_init_fs_context(struct fs_context *fc) 439 { 440 struct mqueue_fs_context *ctx; 441 442 ctx = kzalloc(sizeof(struct mqueue_fs_context), GFP_KERNEL); 443 if (!ctx) 444 return -ENOMEM; 445 446 ctx->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns); 447 put_user_ns(fc->user_ns); 448 fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns); 449 fc->fs_private = ctx; 450 fc->ops = &mqueue_fs_context_ops; 451 return 0; 452 } 453 454 static struct vfsmount *mq_create_mount(struct ipc_namespace *ns) 455 { 456 struct mqueue_fs_context *ctx; 457 struct fs_context *fc; 458 struct vfsmount *mnt; 459 460 fc = fs_context_for_mount(&mqueue_fs_type, SB_KERNMOUNT); 461 if (IS_ERR(fc)) 462 return ERR_CAST(fc); 463 464 ctx = fc->fs_private; 465 put_ipc_ns(ctx->ipc_ns); 466 ctx->ipc_ns = get_ipc_ns(ns); 467 put_user_ns(fc->user_ns); 468 fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns); 469 470 mnt = fc_mount(fc); 471 put_fs_context(fc); 472 return mnt; 473 } 474 475 static void init_once(void *foo) 476 { 477 struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; 478 479 inode_init_once(&p->vfs_inode); 480 } 481 482 static struct inode *mqueue_alloc_inode(struct super_block *sb) 483 { 484 struct mqueue_inode_info *ei; 485 486 ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL); 487 if (!ei) 488 return NULL; 489 return &ei->vfs_inode; 490 } 491 492 static void mqueue_free_inode(struct inode *inode) 493 { 494 kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode)); 495 } 496 497 static void mqueue_evict_inode(struct inode *inode) 498 { 499 struct mqueue_inode_info *info; 500 struct user_struct *user; 501 struct ipc_namespace *ipc_ns; 502 struct msg_msg *msg, *nmsg; 503 LIST_HEAD(tmp_msg); 504 505 clear_inode(inode); 506 507 if (S_ISDIR(inode->i_mode)) 508 return; 509 510 ipc_ns = get_ns_from_inode(inode); 511 info = MQUEUE_I(inode); 512 spin_lock(&info->lock); 513 while ((msg = msg_get(info)) != NULL) 514 list_add_tail(&msg->m_list, &tmp_msg); 515 kfree(info->node_cache); 516 spin_unlock(&info->lock); 517 518 list_for_each_entry_safe(msg, nmsg, &tmp_msg, m_list) { 519 list_del(&msg->m_list); 520 free_msg(msg); 521 } 522 523 user = info->user; 524 if (user) { 525 unsigned long mq_bytes, mq_treesize; 526 527 /* Total amount of bytes accounted for the mqueue */ 528 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + 529 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * 530 sizeof(struct posix_msg_tree_node); 531 532 mq_bytes = mq_treesize + (info->attr.mq_maxmsg * 533 info->attr.mq_msgsize); 534 535 spin_lock(&mq_lock); 536 user->mq_bytes -= mq_bytes; 537 /* 538 * get_ns_from_inode() ensures that the 539 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns 540 * to which we now hold a reference, or it is NULL. 541 * We can't put it here under mq_lock, though. 542 */ 543 if (ipc_ns) 544 ipc_ns->mq_queues_count--; 545 spin_unlock(&mq_lock); 546 free_uid(user); 547 } 548 if (ipc_ns) 549 put_ipc_ns(ipc_ns); 550 } 551 552 static int mqueue_create_attr(struct dentry *dentry, umode_t mode, void *arg) 553 { 554 struct inode *dir = dentry->d_parent->d_inode; 555 struct inode *inode; 556 struct mq_attr *attr = arg; 557 int error; 558 struct ipc_namespace *ipc_ns; 559 560 spin_lock(&mq_lock); 561 ipc_ns = __get_ns_from_inode(dir); 562 if (!ipc_ns) { 563 error = -EACCES; 564 goto out_unlock; 565 } 566 567 if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max && 568 !capable(CAP_SYS_RESOURCE)) { 569 error = -ENOSPC; 570 goto out_unlock; 571 } 572 ipc_ns->mq_queues_count++; 573 spin_unlock(&mq_lock); 574 575 inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr); 576 if (IS_ERR(inode)) { 577 error = PTR_ERR(inode); 578 spin_lock(&mq_lock); 579 ipc_ns->mq_queues_count--; 580 goto out_unlock; 581 } 582 583 put_ipc_ns(ipc_ns); 584 dir->i_size += DIRENT_SIZE; 585 dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir); 586 587 d_instantiate(dentry, inode); 588 dget(dentry); 589 return 0; 590 out_unlock: 591 spin_unlock(&mq_lock); 592 if (ipc_ns) 593 put_ipc_ns(ipc_ns); 594 return error; 595 } 596 597 static int mqueue_create(struct inode *dir, struct dentry *dentry, 598 umode_t mode, bool excl) 599 { 600 return mqueue_create_attr(dentry, mode, NULL); 601 } 602 603 static int mqueue_unlink(struct inode *dir, struct dentry *dentry) 604 { 605 struct inode *inode = d_inode(dentry); 606 607 dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir); 608 dir->i_size -= DIRENT_SIZE; 609 drop_nlink(inode); 610 dput(dentry); 611 return 0; 612 } 613 614 /* 615 * This is routine for system read from queue file. 616 * To avoid mess with doing here some sort of mq_receive we allow 617 * to read only queue size & notification info (the only values 618 * that are interesting from user point of view and aren't accessible 619 * through std routines) 620 */ 621 static ssize_t mqueue_read_file(struct file *filp, char __user *u_data, 622 size_t count, loff_t *off) 623 { 624 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); 625 char buffer[FILENT_SIZE]; 626 ssize_t ret; 627 628 spin_lock(&info->lock); 629 snprintf(buffer, sizeof(buffer), 630 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n", 631 info->qsize, 632 info->notify_owner ? info->notify.sigev_notify : 0, 633 (info->notify_owner && 634 info->notify.sigev_notify == SIGEV_SIGNAL) ? 635 info->notify.sigev_signo : 0, 636 pid_vnr(info->notify_owner)); 637 spin_unlock(&info->lock); 638 buffer[sizeof(buffer)-1] = '\0'; 639 640 ret = simple_read_from_buffer(u_data, count, off, buffer, 641 strlen(buffer)); 642 if (ret <= 0) 643 return ret; 644 645 file_inode(filp)->i_atime = file_inode(filp)->i_ctime = current_time(file_inode(filp)); 646 return ret; 647 } 648 649 static int mqueue_flush_file(struct file *filp, fl_owner_t id) 650 { 651 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); 652 653 spin_lock(&info->lock); 654 if (task_tgid(current) == info->notify_owner) 655 remove_notification(info); 656 657 spin_unlock(&info->lock); 658 return 0; 659 } 660 661 static __poll_t mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab) 662 { 663 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); 664 __poll_t retval = 0; 665 666 poll_wait(filp, &info->wait_q, poll_tab); 667 668 spin_lock(&info->lock); 669 if (info->attr.mq_curmsgs) 670 retval = EPOLLIN | EPOLLRDNORM; 671 672 if (info->attr.mq_curmsgs < info->attr.mq_maxmsg) 673 retval |= EPOLLOUT | EPOLLWRNORM; 674 spin_unlock(&info->lock); 675 676 return retval; 677 } 678 679 /* Adds current to info->e_wait_q[sr] before element with smaller prio */ 680 static void wq_add(struct mqueue_inode_info *info, int sr, 681 struct ext_wait_queue *ewp) 682 { 683 struct ext_wait_queue *walk; 684 685 list_for_each_entry(walk, &info->e_wait_q[sr].list, list) { 686 if (walk->task->prio <= current->prio) { 687 list_add_tail(&ewp->list, &walk->list); 688 return; 689 } 690 } 691 list_add_tail(&ewp->list, &info->e_wait_q[sr].list); 692 } 693 694 /* 695 * Puts current task to sleep. Caller must hold queue lock. After return 696 * lock isn't held. 697 * sr: SEND or RECV 698 */ 699 static int wq_sleep(struct mqueue_inode_info *info, int sr, 700 ktime_t *timeout, struct ext_wait_queue *ewp) 701 __releases(&info->lock) 702 { 703 int retval; 704 signed long time; 705 706 wq_add(info, sr, ewp); 707 708 for (;;) { 709 /* memory barrier not required, we hold info->lock */ 710 __set_current_state(TASK_INTERRUPTIBLE); 711 712 spin_unlock(&info->lock); 713 time = schedule_hrtimeout_range_clock(timeout, 0, 714 HRTIMER_MODE_ABS, CLOCK_REALTIME); 715 716 if (READ_ONCE(ewp->state) == STATE_READY) { 717 /* see MQ_BARRIER for purpose/pairing */ 718 smp_acquire__after_ctrl_dep(); 719 retval = 0; 720 goto out; 721 } 722 spin_lock(&info->lock); 723 724 /* we hold info->lock, so no memory barrier required */ 725 if (READ_ONCE(ewp->state) == STATE_READY) { 726 retval = 0; 727 goto out_unlock; 728 } 729 if (signal_pending(current)) { 730 retval = -ERESTARTSYS; 731 break; 732 } 733 if (time == 0) { 734 retval = -ETIMEDOUT; 735 break; 736 } 737 } 738 list_del(&ewp->list); 739 out_unlock: 740 spin_unlock(&info->lock); 741 out: 742 return retval; 743 } 744 745 /* 746 * Returns waiting task that should be serviced first or NULL if none exists 747 */ 748 static struct ext_wait_queue *wq_get_first_waiter( 749 struct mqueue_inode_info *info, int sr) 750 { 751 struct list_head *ptr; 752 753 ptr = info->e_wait_q[sr].list.prev; 754 if (ptr == &info->e_wait_q[sr].list) 755 return NULL; 756 return list_entry(ptr, struct ext_wait_queue, list); 757 } 758 759 760 static inline void set_cookie(struct sk_buff *skb, char code) 761 { 762 ((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code; 763 } 764 765 /* 766 * The next function is only to split too long sys_mq_timedsend 767 */ 768 static void __do_notify(struct mqueue_inode_info *info) 769 { 770 /* notification 771 * invoked when there is registered process and there isn't process 772 * waiting synchronously for message AND state of queue changed from 773 * empty to not empty. Here we are sure that no one is waiting 774 * synchronously. */ 775 if (info->notify_owner && 776 info->attr.mq_curmsgs == 1) { 777 struct kernel_siginfo sig_i; 778 switch (info->notify.sigev_notify) { 779 case SIGEV_NONE: 780 break; 781 case SIGEV_SIGNAL: 782 /* sends signal */ 783 784 clear_siginfo(&sig_i); 785 sig_i.si_signo = info->notify.sigev_signo; 786 sig_i.si_errno = 0; 787 sig_i.si_code = SI_MESGQ; 788 sig_i.si_value = info->notify.sigev_value; 789 /* map current pid/uid into info->owner's namespaces */ 790 rcu_read_lock(); 791 sig_i.si_pid = task_tgid_nr_ns(current, 792 ns_of_pid(info->notify_owner)); 793 sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid()); 794 rcu_read_unlock(); 795 796 kill_pid_info(info->notify.sigev_signo, 797 &sig_i, info->notify_owner); 798 break; 799 case SIGEV_THREAD: 800 set_cookie(info->notify_cookie, NOTIFY_WOKENUP); 801 netlink_sendskb(info->notify_sock, info->notify_cookie); 802 break; 803 } 804 /* after notification unregisters process */ 805 put_pid(info->notify_owner); 806 put_user_ns(info->notify_user_ns); 807 info->notify_owner = NULL; 808 info->notify_user_ns = NULL; 809 } 810 wake_up(&info->wait_q); 811 } 812 813 static int prepare_timeout(const struct __kernel_timespec __user *u_abs_timeout, 814 struct timespec64 *ts) 815 { 816 if (get_timespec64(ts, u_abs_timeout)) 817 return -EFAULT; 818 if (!timespec64_valid(ts)) 819 return -EINVAL; 820 return 0; 821 } 822 823 static void remove_notification(struct mqueue_inode_info *info) 824 { 825 if (info->notify_owner != NULL && 826 info->notify.sigev_notify == SIGEV_THREAD) { 827 set_cookie(info->notify_cookie, NOTIFY_REMOVED); 828 netlink_sendskb(info->notify_sock, info->notify_cookie); 829 } 830 put_pid(info->notify_owner); 831 put_user_ns(info->notify_user_ns); 832 info->notify_owner = NULL; 833 info->notify_user_ns = NULL; 834 } 835 836 static int prepare_open(struct dentry *dentry, int oflag, int ro, 837 umode_t mode, struct filename *name, 838 struct mq_attr *attr) 839 { 840 static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, 841 MAY_READ | MAY_WRITE }; 842 int acc; 843 844 if (d_really_is_negative(dentry)) { 845 if (!(oflag & O_CREAT)) 846 return -ENOENT; 847 if (ro) 848 return ro; 849 audit_inode_parent_hidden(name, dentry->d_parent); 850 return vfs_mkobj(dentry, mode & ~current_umask(), 851 mqueue_create_attr, attr); 852 } 853 /* it already existed */ 854 audit_inode(name, dentry, 0); 855 if ((oflag & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL)) 856 return -EEXIST; 857 if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) 858 return -EINVAL; 859 acc = oflag2acc[oflag & O_ACCMODE]; 860 return inode_permission(d_inode(dentry), acc); 861 } 862 863 static int do_mq_open(const char __user *u_name, int oflag, umode_t mode, 864 struct mq_attr *attr) 865 { 866 struct vfsmount *mnt = current->nsproxy->ipc_ns->mq_mnt; 867 struct dentry *root = mnt->mnt_root; 868 struct filename *name; 869 struct path path; 870 int fd, error; 871 int ro; 872 873 audit_mq_open(oflag, mode, attr); 874 875 if (IS_ERR(name = getname(u_name))) 876 return PTR_ERR(name); 877 878 fd = get_unused_fd_flags(O_CLOEXEC); 879 if (fd < 0) 880 goto out_putname; 881 882 ro = mnt_want_write(mnt); /* we'll drop it in any case */ 883 inode_lock(d_inode(root)); 884 path.dentry = lookup_one_len(name->name, root, strlen(name->name)); 885 if (IS_ERR(path.dentry)) { 886 error = PTR_ERR(path.dentry); 887 goto out_putfd; 888 } 889 path.mnt = mntget(mnt); 890 error = prepare_open(path.dentry, oflag, ro, mode, name, attr); 891 if (!error) { 892 struct file *file = dentry_open(&path, oflag, current_cred()); 893 if (!IS_ERR(file)) 894 fd_install(fd, file); 895 else 896 error = PTR_ERR(file); 897 } 898 path_put(&path); 899 out_putfd: 900 if (error) { 901 put_unused_fd(fd); 902 fd = error; 903 } 904 inode_unlock(d_inode(root)); 905 if (!ro) 906 mnt_drop_write(mnt); 907 out_putname: 908 putname(name); 909 return fd; 910 } 911 912 SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode, 913 struct mq_attr __user *, u_attr) 914 { 915 struct mq_attr attr; 916 if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr))) 917 return -EFAULT; 918 919 return do_mq_open(u_name, oflag, mode, u_attr ? &attr : NULL); 920 } 921 922 SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name) 923 { 924 int err; 925 struct filename *name; 926 struct dentry *dentry; 927 struct inode *inode = NULL; 928 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; 929 struct vfsmount *mnt = ipc_ns->mq_mnt; 930 931 name = getname(u_name); 932 if (IS_ERR(name)) 933 return PTR_ERR(name); 934 935 audit_inode_parent_hidden(name, mnt->mnt_root); 936 err = mnt_want_write(mnt); 937 if (err) 938 goto out_name; 939 inode_lock_nested(d_inode(mnt->mnt_root), I_MUTEX_PARENT); 940 dentry = lookup_one_len(name->name, mnt->mnt_root, 941 strlen(name->name)); 942 if (IS_ERR(dentry)) { 943 err = PTR_ERR(dentry); 944 goto out_unlock; 945 } 946 947 inode = d_inode(dentry); 948 if (!inode) { 949 err = -ENOENT; 950 } else { 951 ihold(inode); 952 err = vfs_unlink(d_inode(dentry->d_parent), dentry, NULL); 953 } 954 dput(dentry); 955 956 out_unlock: 957 inode_unlock(d_inode(mnt->mnt_root)); 958 if (inode) 959 iput(inode); 960 mnt_drop_write(mnt); 961 out_name: 962 putname(name); 963 964 return err; 965 } 966 967 /* Pipelined send and receive functions. 968 * 969 * If a receiver finds no waiting message, then it registers itself in the 970 * list of waiting receivers. A sender checks that list before adding the new 971 * message into the message array. If there is a waiting receiver, then it 972 * bypasses the message array and directly hands the message over to the 973 * receiver. The receiver accepts the message and returns without grabbing the 974 * queue spinlock: 975 * 976 * - Set pointer to message. 977 * - Queue the receiver task for later wakeup (without the info->lock). 978 * - Update its state to STATE_READY. Now the receiver can continue. 979 * - Wake up the process after the lock is dropped. Should the process wake up 980 * before this wakeup (due to a timeout or a signal) it will either see 981 * STATE_READY and continue or acquire the lock to check the state again. 982 * 983 * The same algorithm is used for senders. 984 */ 985 986 static inline void __pipelined_op(struct wake_q_head *wake_q, 987 struct mqueue_inode_info *info, 988 struct ext_wait_queue *this) 989 { 990 list_del(&this->list); 991 get_task_struct(this->task); 992 993 /* see MQ_BARRIER for purpose/pairing */ 994 smp_store_release(&this->state, STATE_READY); 995 wake_q_add_safe(wake_q, this->task); 996 } 997 998 /* pipelined_send() - send a message directly to the task waiting in 999 * sys_mq_timedreceive() (without inserting message into a queue). 1000 */ 1001 static inline void pipelined_send(struct wake_q_head *wake_q, 1002 struct mqueue_inode_info *info, 1003 struct msg_msg *message, 1004 struct ext_wait_queue *receiver) 1005 { 1006 receiver->msg = message; 1007 __pipelined_op(wake_q, info, receiver); 1008 } 1009 1010 /* pipelined_receive() - if there is task waiting in sys_mq_timedsend() 1011 * gets its message and put to the queue (we have one free place for sure). */ 1012 static inline void pipelined_receive(struct wake_q_head *wake_q, 1013 struct mqueue_inode_info *info) 1014 { 1015 struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND); 1016 1017 if (!sender) { 1018 /* for poll */ 1019 wake_up_interruptible(&info->wait_q); 1020 return; 1021 } 1022 if (msg_insert(sender->msg, info)) 1023 return; 1024 1025 __pipelined_op(wake_q, info, sender); 1026 } 1027 1028 static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr, 1029 size_t msg_len, unsigned int msg_prio, 1030 struct timespec64 *ts) 1031 { 1032 struct fd f; 1033 struct inode *inode; 1034 struct ext_wait_queue wait; 1035 struct ext_wait_queue *receiver; 1036 struct msg_msg *msg_ptr; 1037 struct mqueue_inode_info *info; 1038 ktime_t expires, *timeout = NULL; 1039 struct posix_msg_tree_node *new_leaf = NULL; 1040 int ret = 0; 1041 DEFINE_WAKE_Q(wake_q); 1042 1043 if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX)) 1044 return -EINVAL; 1045 1046 if (ts) { 1047 expires = timespec64_to_ktime(*ts); 1048 timeout = &expires; 1049 } 1050 1051 audit_mq_sendrecv(mqdes, msg_len, msg_prio, ts); 1052 1053 f = fdget(mqdes); 1054 if (unlikely(!f.file)) { 1055 ret = -EBADF; 1056 goto out; 1057 } 1058 1059 inode = file_inode(f.file); 1060 if (unlikely(f.file->f_op != &mqueue_file_operations)) { 1061 ret = -EBADF; 1062 goto out_fput; 1063 } 1064 info = MQUEUE_I(inode); 1065 audit_file(f.file); 1066 1067 if (unlikely(!(f.file->f_mode & FMODE_WRITE))) { 1068 ret = -EBADF; 1069 goto out_fput; 1070 } 1071 1072 if (unlikely(msg_len > info->attr.mq_msgsize)) { 1073 ret = -EMSGSIZE; 1074 goto out_fput; 1075 } 1076 1077 /* First try to allocate memory, before doing anything with 1078 * existing queues. */ 1079 msg_ptr = load_msg(u_msg_ptr, msg_len); 1080 if (IS_ERR(msg_ptr)) { 1081 ret = PTR_ERR(msg_ptr); 1082 goto out_fput; 1083 } 1084 msg_ptr->m_ts = msg_len; 1085 msg_ptr->m_type = msg_prio; 1086 1087 /* 1088 * msg_insert really wants us to have a valid, spare node struct so 1089 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will 1090 * fall back to that if necessary. 1091 */ 1092 if (!info->node_cache) 1093 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL); 1094 1095 spin_lock(&info->lock); 1096 1097 if (!info->node_cache && new_leaf) { 1098 /* Save our speculative allocation into the cache */ 1099 INIT_LIST_HEAD(&new_leaf->msg_list); 1100 info->node_cache = new_leaf; 1101 new_leaf = NULL; 1102 } else { 1103 kfree(new_leaf); 1104 } 1105 1106 if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) { 1107 if (f.file->f_flags & O_NONBLOCK) { 1108 ret = -EAGAIN; 1109 } else { 1110 wait.task = current; 1111 wait.msg = (void *) msg_ptr; 1112 1113 /* memory barrier not required, we hold info->lock */ 1114 WRITE_ONCE(wait.state, STATE_NONE); 1115 ret = wq_sleep(info, SEND, timeout, &wait); 1116 /* 1117 * wq_sleep must be called with info->lock held, and 1118 * returns with the lock released 1119 */ 1120 goto out_free; 1121 } 1122 } else { 1123 receiver = wq_get_first_waiter(info, RECV); 1124 if (receiver) { 1125 pipelined_send(&wake_q, info, msg_ptr, receiver); 1126 } else { 1127 /* adds message to the queue */ 1128 ret = msg_insert(msg_ptr, info); 1129 if (ret) 1130 goto out_unlock; 1131 __do_notify(info); 1132 } 1133 inode->i_atime = inode->i_mtime = inode->i_ctime = 1134 current_time(inode); 1135 } 1136 out_unlock: 1137 spin_unlock(&info->lock); 1138 wake_up_q(&wake_q); 1139 out_free: 1140 if (ret) 1141 free_msg(msg_ptr); 1142 out_fput: 1143 fdput(f); 1144 out: 1145 return ret; 1146 } 1147 1148 static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr, 1149 size_t msg_len, unsigned int __user *u_msg_prio, 1150 struct timespec64 *ts) 1151 { 1152 ssize_t ret; 1153 struct msg_msg *msg_ptr; 1154 struct fd f; 1155 struct inode *inode; 1156 struct mqueue_inode_info *info; 1157 struct ext_wait_queue wait; 1158 ktime_t expires, *timeout = NULL; 1159 struct posix_msg_tree_node *new_leaf = NULL; 1160 1161 if (ts) { 1162 expires = timespec64_to_ktime(*ts); 1163 timeout = &expires; 1164 } 1165 1166 audit_mq_sendrecv(mqdes, msg_len, 0, ts); 1167 1168 f = fdget(mqdes); 1169 if (unlikely(!f.file)) { 1170 ret = -EBADF; 1171 goto out; 1172 } 1173 1174 inode = file_inode(f.file); 1175 if (unlikely(f.file->f_op != &mqueue_file_operations)) { 1176 ret = -EBADF; 1177 goto out_fput; 1178 } 1179 info = MQUEUE_I(inode); 1180 audit_file(f.file); 1181 1182 if (unlikely(!(f.file->f_mode & FMODE_READ))) { 1183 ret = -EBADF; 1184 goto out_fput; 1185 } 1186 1187 /* checks if buffer is big enough */ 1188 if (unlikely(msg_len < info->attr.mq_msgsize)) { 1189 ret = -EMSGSIZE; 1190 goto out_fput; 1191 } 1192 1193 /* 1194 * msg_insert really wants us to have a valid, spare node struct so 1195 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will 1196 * fall back to that if necessary. 1197 */ 1198 if (!info->node_cache) 1199 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL); 1200 1201 spin_lock(&info->lock); 1202 1203 if (!info->node_cache && new_leaf) { 1204 /* Save our speculative allocation into the cache */ 1205 INIT_LIST_HEAD(&new_leaf->msg_list); 1206 info->node_cache = new_leaf; 1207 } else { 1208 kfree(new_leaf); 1209 } 1210 1211 if (info->attr.mq_curmsgs == 0) { 1212 if (f.file->f_flags & O_NONBLOCK) { 1213 spin_unlock(&info->lock); 1214 ret = -EAGAIN; 1215 } else { 1216 wait.task = current; 1217 1218 /* memory barrier not required, we hold info->lock */ 1219 WRITE_ONCE(wait.state, STATE_NONE); 1220 ret = wq_sleep(info, RECV, timeout, &wait); 1221 msg_ptr = wait.msg; 1222 } 1223 } else { 1224 DEFINE_WAKE_Q(wake_q); 1225 1226 msg_ptr = msg_get(info); 1227 1228 inode->i_atime = inode->i_mtime = inode->i_ctime = 1229 current_time(inode); 1230 1231 /* There is now free space in queue. */ 1232 pipelined_receive(&wake_q, info); 1233 spin_unlock(&info->lock); 1234 wake_up_q(&wake_q); 1235 ret = 0; 1236 } 1237 if (ret == 0) { 1238 ret = msg_ptr->m_ts; 1239 1240 if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) || 1241 store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) { 1242 ret = -EFAULT; 1243 } 1244 free_msg(msg_ptr); 1245 } 1246 out_fput: 1247 fdput(f); 1248 out: 1249 return ret; 1250 } 1251 1252 SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, 1253 size_t, msg_len, unsigned int, msg_prio, 1254 const struct __kernel_timespec __user *, u_abs_timeout) 1255 { 1256 struct timespec64 ts, *p = NULL; 1257 if (u_abs_timeout) { 1258 int res = prepare_timeout(u_abs_timeout, &ts); 1259 if (res) 1260 return res; 1261 p = &ts; 1262 } 1263 return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p); 1264 } 1265 1266 SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, 1267 size_t, msg_len, unsigned int __user *, u_msg_prio, 1268 const struct __kernel_timespec __user *, u_abs_timeout) 1269 { 1270 struct timespec64 ts, *p = NULL; 1271 if (u_abs_timeout) { 1272 int res = prepare_timeout(u_abs_timeout, &ts); 1273 if (res) 1274 return res; 1275 p = &ts; 1276 } 1277 return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p); 1278 } 1279 1280 /* 1281 * Notes: the case when user wants us to deregister (with NULL as pointer) 1282 * and he isn't currently owner of notification, will be silently discarded. 1283 * It isn't explicitly defined in the POSIX. 1284 */ 1285 static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification) 1286 { 1287 int ret; 1288 struct fd f; 1289 struct sock *sock; 1290 struct inode *inode; 1291 struct mqueue_inode_info *info; 1292 struct sk_buff *nc; 1293 1294 audit_mq_notify(mqdes, notification); 1295 1296 nc = NULL; 1297 sock = NULL; 1298 if (notification != NULL) { 1299 if (unlikely(notification->sigev_notify != SIGEV_NONE && 1300 notification->sigev_notify != SIGEV_SIGNAL && 1301 notification->sigev_notify != SIGEV_THREAD)) 1302 return -EINVAL; 1303 if (notification->sigev_notify == SIGEV_SIGNAL && 1304 !valid_signal(notification->sigev_signo)) { 1305 return -EINVAL; 1306 } 1307 if (notification->sigev_notify == SIGEV_THREAD) { 1308 long timeo; 1309 1310 /* create the notify skb */ 1311 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL); 1312 if (!nc) 1313 return -ENOMEM; 1314 1315 if (copy_from_user(nc->data, 1316 notification->sigev_value.sival_ptr, 1317 NOTIFY_COOKIE_LEN)) { 1318 ret = -EFAULT; 1319 goto free_skb; 1320 } 1321 1322 /* TODO: add a header? */ 1323 skb_put(nc, NOTIFY_COOKIE_LEN); 1324 /* and attach it to the socket */ 1325 retry: 1326 f = fdget(notification->sigev_signo); 1327 if (!f.file) { 1328 ret = -EBADF; 1329 goto out; 1330 } 1331 sock = netlink_getsockbyfilp(f.file); 1332 fdput(f); 1333 if (IS_ERR(sock)) { 1334 ret = PTR_ERR(sock); 1335 goto free_skb; 1336 } 1337 1338 timeo = MAX_SCHEDULE_TIMEOUT; 1339 ret = netlink_attachskb(sock, nc, &timeo, NULL); 1340 if (ret == 1) { 1341 sock = NULL; 1342 goto retry; 1343 } 1344 if (ret) 1345 return ret; 1346 } 1347 } 1348 1349 f = fdget(mqdes); 1350 if (!f.file) { 1351 ret = -EBADF; 1352 goto out; 1353 } 1354 1355 inode = file_inode(f.file); 1356 if (unlikely(f.file->f_op != &mqueue_file_operations)) { 1357 ret = -EBADF; 1358 goto out_fput; 1359 } 1360 info = MQUEUE_I(inode); 1361 1362 ret = 0; 1363 spin_lock(&info->lock); 1364 if (notification == NULL) { 1365 if (info->notify_owner == task_tgid(current)) { 1366 remove_notification(info); 1367 inode->i_atime = inode->i_ctime = current_time(inode); 1368 } 1369 } else if (info->notify_owner != NULL) { 1370 ret = -EBUSY; 1371 } else { 1372 switch (notification->sigev_notify) { 1373 case SIGEV_NONE: 1374 info->notify.sigev_notify = SIGEV_NONE; 1375 break; 1376 case SIGEV_THREAD: 1377 info->notify_sock = sock; 1378 info->notify_cookie = nc; 1379 sock = NULL; 1380 nc = NULL; 1381 info->notify.sigev_notify = SIGEV_THREAD; 1382 break; 1383 case SIGEV_SIGNAL: 1384 info->notify.sigev_signo = notification->sigev_signo; 1385 info->notify.sigev_value = notification->sigev_value; 1386 info->notify.sigev_notify = SIGEV_SIGNAL; 1387 break; 1388 } 1389 1390 info->notify_owner = get_pid(task_tgid(current)); 1391 info->notify_user_ns = get_user_ns(current_user_ns()); 1392 inode->i_atime = inode->i_ctime = current_time(inode); 1393 } 1394 spin_unlock(&info->lock); 1395 out_fput: 1396 fdput(f); 1397 out: 1398 if (sock) 1399 netlink_detachskb(sock, nc); 1400 else 1401 free_skb: 1402 dev_kfree_skb(nc); 1403 1404 return ret; 1405 } 1406 1407 SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, 1408 const struct sigevent __user *, u_notification) 1409 { 1410 struct sigevent n, *p = NULL; 1411 if (u_notification) { 1412 if (copy_from_user(&n, u_notification, sizeof(struct sigevent))) 1413 return -EFAULT; 1414 p = &n; 1415 } 1416 return do_mq_notify(mqdes, p); 1417 } 1418 1419 static int do_mq_getsetattr(int mqdes, struct mq_attr *new, struct mq_attr *old) 1420 { 1421 struct fd f; 1422 struct inode *inode; 1423 struct mqueue_inode_info *info; 1424 1425 if (new && (new->mq_flags & (~O_NONBLOCK))) 1426 return -EINVAL; 1427 1428 f = fdget(mqdes); 1429 if (!f.file) 1430 return -EBADF; 1431 1432 if (unlikely(f.file->f_op != &mqueue_file_operations)) { 1433 fdput(f); 1434 return -EBADF; 1435 } 1436 1437 inode = file_inode(f.file); 1438 info = MQUEUE_I(inode); 1439 1440 spin_lock(&info->lock); 1441 1442 if (old) { 1443 *old = info->attr; 1444 old->mq_flags = f.file->f_flags & O_NONBLOCK; 1445 } 1446 if (new) { 1447 audit_mq_getsetattr(mqdes, new); 1448 spin_lock(&f.file->f_lock); 1449 if (new->mq_flags & O_NONBLOCK) 1450 f.file->f_flags |= O_NONBLOCK; 1451 else 1452 f.file->f_flags &= ~O_NONBLOCK; 1453 spin_unlock(&f.file->f_lock); 1454 1455 inode->i_atime = inode->i_ctime = current_time(inode); 1456 } 1457 1458 spin_unlock(&info->lock); 1459 fdput(f); 1460 return 0; 1461 } 1462 1463 SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, 1464 const struct mq_attr __user *, u_mqstat, 1465 struct mq_attr __user *, u_omqstat) 1466 { 1467 int ret; 1468 struct mq_attr mqstat, omqstat; 1469 struct mq_attr *new = NULL, *old = NULL; 1470 1471 if (u_mqstat) { 1472 new = &mqstat; 1473 if (copy_from_user(new, u_mqstat, sizeof(struct mq_attr))) 1474 return -EFAULT; 1475 } 1476 if (u_omqstat) 1477 old = &omqstat; 1478 1479 ret = do_mq_getsetattr(mqdes, new, old); 1480 if (ret || !old) 1481 return ret; 1482 1483 if (copy_to_user(u_omqstat, old, sizeof(struct mq_attr))) 1484 return -EFAULT; 1485 return 0; 1486 } 1487 1488 #ifdef CONFIG_COMPAT 1489 1490 struct compat_mq_attr { 1491 compat_long_t mq_flags; /* message queue flags */ 1492 compat_long_t mq_maxmsg; /* maximum number of messages */ 1493 compat_long_t mq_msgsize; /* maximum message size */ 1494 compat_long_t mq_curmsgs; /* number of messages currently queued */ 1495 compat_long_t __reserved[4]; /* ignored for input, zeroed for output */ 1496 }; 1497 1498 static inline int get_compat_mq_attr(struct mq_attr *attr, 1499 const struct compat_mq_attr __user *uattr) 1500 { 1501 struct compat_mq_attr v; 1502 1503 if (copy_from_user(&v, uattr, sizeof(*uattr))) 1504 return -EFAULT; 1505 1506 memset(attr, 0, sizeof(*attr)); 1507 attr->mq_flags = v.mq_flags; 1508 attr->mq_maxmsg = v.mq_maxmsg; 1509 attr->mq_msgsize = v.mq_msgsize; 1510 attr->mq_curmsgs = v.mq_curmsgs; 1511 return 0; 1512 } 1513 1514 static inline int put_compat_mq_attr(const struct mq_attr *attr, 1515 struct compat_mq_attr __user *uattr) 1516 { 1517 struct compat_mq_attr v; 1518 1519 memset(&v, 0, sizeof(v)); 1520 v.mq_flags = attr->mq_flags; 1521 v.mq_maxmsg = attr->mq_maxmsg; 1522 v.mq_msgsize = attr->mq_msgsize; 1523 v.mq_curmsgs = attr->mq_curmsgs; 1524 if (copy_to_user(uattr, &v, sizeof(*uattr))) 1525 return -EFAULT; 1526 return 0; 1527 } 1528 1529 COMPAT_SYSCALL_DEFINE4(mq_open, const char __user *, u_name, 1530 int, oflag, compat_mode_t, mode, 1531 struct compat_mq_attr __user *, u_attr) 1532 { 1533 struct mq_attr attr, *p = NULL; 1534 if (u_attr && oflag & O_CREAT) { 1535 p = &attr; 1536 if (get_compat_mq_attr(&attr, u_attr)) 1537 return -EFAULT; 1538 } 1539 return do_mq_open(u_name, oflag, mode, p); 1540 } 1541 1542 COMPAT_SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, 1543 const struct compat_sigevent __user *, u_notification) 1544 { 1545 struct sigevent n, *p = NULL; 1546 if (u_notification) { 1547 if (get_compat_sigevent(&n, u_notification)) 1548 return -EFAULT; 1549 if (n.sigev_notify == SIGEV_THREAD) 1550 n.sigev_value.sival_ptr = compat_ptr(n.sigev_value.sival_int); 1551 p = &n; 1552 } 1553 return do_mq_notify(mqdes, p); 1554 } 1555 1556 COMPAT_SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, 1557 const struct compat_mq_attr __user *, u_mqstat, 1558 struct compat_mq_attr __user *, u_omqstat) 1559 { 1560 int ret; 1561 struct mq_attr mqstat, omqstat; 1562 struct mq_attr *new = NULL, *old = NULL; 1563 1564 if (u_mqstat) { 1565 new = &mqstat; 1566 if (get_compat_mq_attr(new, u_mqstat)) 1567 return -EFAULT; 1568 } 1569 if (u_omqstat) 1570 old = &omqstat; 1571 1572 ret = do_mq_getsetattr(mqdes, new, old); 1573 if (ret || !old) 1574 return ret; 1575 1576 if (put_compat_mq_attr(old, u_omqstat)) 1577 return -EFAULT; 1578 return 0; 1579 } 1580 #endif 1581 1582 #ifdef CONFIG_COMPAT_32BIT_TIME 1583 static int compat_prepare_timeout(const struct old_timespec32 __user *p, 1584 struct timespec64 *ts) 1585 { 1586 if (get_old_timespec32(ts, p)) 1587 return -EFAULT; 1588 if (!timespec64_valid(ts)) 1589 return -EINVAL; 1590 return 0; 1591 } 1592 1593 SYSCALL_DEFINE5(mq_timedsend_time32, mqd_t, mqdes, 1594 const char __user *, u_msg_ptr, 1595 unsigned int, msg_len, unsigned int, msg_prio, 1596 const struct old_timespec32 __user *, u_abs_timeout) 1597 { 1598 struct timespec64 ts, *p = NULL; 1599 if (u_abs_timeout) { 1600 int res = compat_prepare_timeout(u_abs_timeout, &ts); 1601 if (res) 1602 return res; 1603 p = &ts; 1604 } 1605 return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p); 1606 } 1607 1608 SYSCALL_DEFINE5(mq_timedreceive_time32, mqd_t, mqdes, 1609 char __user *, u_msg_ptr, 1610 unsigned int, msg_len, unsigned int __user *, u_msg_prio, 1611 const struct old_timespec32 __user *, u_abs_timeout) 1612 { 1613 struct timespec64 ts, *p = NULL; 1614 if (u_abs_timeout) { 1615 int res = compat_prepare_timeout(u_abs_timeout, &ts); 1616 if (res) 1617 return res; 1618 p = &ts; 1619 } 1620 return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p); 1621 } 1622 #endif 1623 1624 static const struct inode_operations mqueue_dir_inode_operations = { 1625 .lookup = simple_lookup, 1626 .create = mqueue_create, 1627 .unlink = mqueue_unlink, 1628 }; 1629 1630 static const struct file_operations mqueue_file_operations = { 1631 .flush = mqueue_flush_file, 1632 .poll = mqueue_poll_file, 1633 .read = mqueue_read_file, 1634 .llseek = default_llseek, 1635 }; 1636 1637 static const struct super_operations mqueue_super_ops = { 1638 .alloc_inode = mqueue_alloc_inode, 1639 .free_inode = mqueue_free_inode, 1640 .evict_inode = mqueue_evict_inode, 1641 .statfs = simple_statfs, 1642 }; 1643 1644 static const struct fs_context_operations mqueue_fs_context_ops = { 1645 .free = mqueue_fs_context_free, 1646 .get_tree = mqueue_get_tree, 1647 }; 1648 1649 static struct file_system_type mqueue_fs_type = { 1650 .name = "mqueue", 1651 .init_fs_context = mqueue_init_fs_context, 1652 .kill_sb = kill_litter_super, 1653 .fs_flags = FS_USERNS_MOUNT, 1654 }; 1655 1656 int mq_init_ns(struct ipc_namespace *ns) 1657 { 1658 struct vfsmount *m; 1659 1660 ns->mq_queues_count = 0; 1661 ns->mq_queues_max = DFLT_QUEUESMAX; 1662 ns->mq_msg_max = DFLT_MSGMAX; 1663 ns->mq_msgsize_max = DFLT_MSGSIZEMAX; 1664 ns->mq_msg_default = DFLT_MSG; 1665 ns->mq_msgsize_default = DFLT_MSGSIZE; 1666 1667 m = mq_create_mount(ns); 1668 if (IS_ERR(m)) 1669 return PTR_ERR(m); 1670 ns->mq_mnt = m; 1671 return 0; 1672 } 1673 1674 void mq_clear_sbinfo(struct ipc_namespace *ns) 1675 { 1676 ns->mq_mnt->mnt_sb->s_fs_info = NULL; 1677 } 1678 1679 void mq_put_mnt(struct ipc_namespace *ns) 1680 { 1681 kern_unmount(ns->mq_mnt); 1682 } 1683 1684 static int __init init_mqueue_fs(void) 1685 { 1686 int error; 1687 1688 mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache", 1689 sizeof(struct mqueue_inode_info), 0, 1690 SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, init_once); 1691 if (mqueue_inode_cachep == NULL) 1692 return -ENOMEM; 1693 1694 /* ignore failures - they are not fatal */ 1695 mq_sysctl_table = mq_register_sysctl_table(); 1696 1697 error = register_filesystem(&mqueue_fs_type); 1698 if (error) 1699 goto out_sysctl; 1700 1701 spin_lock_init(&mq_lock); 1702 1703 error = mq_init_ns(&init_ipc_ns); 1704 if (error) 1705 goto out_filesystem; 1706 1707 return 0; 1708 1709 out_filesystem: 1710 unregister_filesystem(&mqueue_fs_type); 1711 out_sysctl: 1712 if (mq_sysctl_table) 1713 unregister_sysctl_table(mq_sysctl_table); 1714 kmem_cache_destroy(mqueue_inode_cachep); 1715 return error; 1716 } 1717 1718 device_initcall(init_mqueue_fs); 1719