1 /* 2 * POSIX message queues filesystem for Linux. 3 * 4 * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl) 5 * Michal Wronski (michal.wronski@gmail.com) 6 * 7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com) 8 * Lockless receive & send, fd based notify: 9 * Manfred Spraul (manfred@colorfullife.com) 10 * 11 * Audit: George Wilson (ltcgcw@us.ibm.com) 12 * 13 * This file is released under the GPL. 14 */ 15 16 #include <linux/capability.h> 17 #include <linux/init.h> 18 #include <linux/pagemap.h> 19 #include <linux/file.h> 20 #include <linux/mount.h> 21 #include <linux/namei.h> 22 #include <linux/sysctl.h> 23 #include <linux/poll.h> 24 #include <linux/mqueue.h> 25 #include <linux/msg.h> 26 #include <linux/skbuff.h> 27 #include <linux/vmalloc.h> 28 #include <linux/netlink.h> 29 #include <linux/syscalls.h> 30 #include <linux/audit.h> 31 #include <linux/signal.h> 32 #include <linux/mutex.h> 33 #include <linux/nsproxy.h> 34 #include <linux/pid.h> 35 #include <linux/ipc_namespace.h> 36 #include <linux/user_namespace.h> 37 #include <linux/slab.h> 38 39 #include <net/sock.h> 40 #include "util.h" 41 42 #define MQUEUE_MAGIC 0x19800202 43 #define DIRENT_SIZE 20 44 #define FILENT_SIZE 80 45 46 #define SEND 0 47 #define RECV 1 48 49 #define STATE_NONE 0 50 #define STATE_PENDING 1 51 #define STATE_READY 2 52 53 struct posix_msg_tree_node { 54 struct rb_node rb_node; 55 struct list_head msg_list; 56 int priority; 57 }; 58 59 struct ext_wait_queue { /* queue of sleeping tasks */ 60 struct task_struct *task; 61 struct list_head list; 62 struct msg_msg *msg; /* ptr of loaded message */ 63 int state; /* one of STATE_* values */ 64 }; 65 66 struct mqueue_inode_info { 67 spinlock_t lock; 68 struct inode vfs_inode; 69 wait_queue_head_t wait_q; 70 71 struct rb_root msg_tree; 72 struct posix_msg_tree_node *node_cache; 73 struct mq_attr attr; 74 75 struct sigevent notify; 76 struct pid* notify_owner; 77 struct user_namespace *notify_user_ns; 78 struct user_struct *user; /* user who created, for accounting */ 79 struct sock *notify_sock; 80 struct sk_buff *notify_cookie; 81 82 /* for tasks waiting for free space and messages, respectively */ 83 struct ext_wait_queue e_wait_q[2]; 84 85 unsigned long qsize; /* size of queue in memory (sum of all msgs) */ 86 }; 87 88 static const struct inode_operations mqueue_dir_inode_operations; 89 static const struct file_operations mqueue_file_operations; 90 static const struct super_operations mqueue_super_ops; 91 static void remove_notification(struct mqueue_inode_info *info); 92 93 static struct kmem_cache *mqueue_inode_cachep; 94 95 static struct ctl_table_header * mq_sysctl_table; 96 97 static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode) 98 { 99 return container_of(inode, struct mqueue_inode_info, vfs_inode); 100 } 101 102 /* 103 * This routine should be called with the mq_lock held. 104 */ 105 static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode) 106 { 107 return get_ipc_ns(inode->i_sb->s_fs_info); 108 } 109 110 static struct ipc_namespace *get_ns_from_inode(struct inode *inode) 111 { 112 struct ipc_namespace *ns; 113 114 spin_lock(&mq_lock); 115 ns = __get_ns_from_inode(inode); 116 spin_unlock(&mq_lock); 117 return ns; 118 } 119 120 /* Auxiliary functions to manipulate messages' list */ 121 static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info) 122 { 123 struct rb_node **p, *parent = NULL; 124 struct posix_msg_tree_node *leaf; 125 126 p = &info->msg_tree.rb_node; 127 while (*p) { 128 parent = *p; 129 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); 130 131 if (likely(leaf->priority == msg->m_type)) 132 goto insert_msg; 133 else if (msg->m_type < leaf->priority) 134 p = &(*p)->rb_left; 135 else 136 p = &(*p)->rb_right; 137 } 138 if (info->node_cache) { 139 leaf = info->node_cache; 140 info->node_cache = NULL; 141 } else { 142 leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC); 143 if (!leaf) 144 return -ENOMEM; 145 rb_init_node(&leaf->rb_node); 146 INIT_LIST_HEAD(&leaf->msg_list); 147 info->qsize += sizeof(*leaf); 148 } 149 leaf->priority = msg->m_type; 150 rb_link_node(&leaf->rb_node, parent, p); 151 rb_insert_color(&leaf->rb_node, &info->msg_tree); 152 insert_msg: 153 info->attr.mq_curmsgs++; 154 info->qsize += msg->m_ts; 155 list_add_tail(&msg->m_list, &leaf->msg_list); 156 return 0; 157 } 158 159 static inline struct msg_msg *msg_get(struct mqueue_inode_info *info) 160 { 161 struct rb_node **p, *parent = NULL; 162 struct posix_msg_tree_node *leaf; 163 struct msg_msg *msg; 164 165 try_again: 166 p = &info->msg_tree.rb_node; 167 while (*p) { 168 parent = *p; 169 /* 170 * During insert, low priorities go to the left and high to the 171 * right. On receive, we want the highest priorities first, so 172 * walk all the way to the right. 173 */ 174 p = &(*p)->rb_right; 175 } 176 if (!parent) { 177 if (info->attr.mq_curmsgs) { 178 pr_warn_once("Inconsistency in POSIX message queue, " 179 "no tree element, but supposedly messages " 180 "should exist!\n"); 181 info->attr.mq_curmsgs = 0; 182 } 183 return NULL; 184 } 185 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); 186 if (unlikely(list_empty(&leaf->msg_list))) { 187 pr_warn_once("Inconsistency in POSIX message queue, " 188 "empty leaf node but we haven't implemented " 189 "lazy leaf delete!\n"); 190 rb_erase(&leaf->rb_node, &info->msg_tree); 191 if (info->node_cache) { 192 info->qsize -= sizeof(*leaf); 193 kfree(leaf); 194 } else { 195 info->node_cache = leaf; 196 } 197 goto try_again; 198 } else { 199 msg = list_first_entry(&leaf->msg_list, 200 struct msg_msg, m_list); 201 list_del(&msg->m_list); 202 if (list_empty(&leaf->msg_list)) { 203 rb_erase(&leaf->rb_node, &info->msg_tree); 204 if (info->node_cache) { 205 info->qsize -= sizeof(*leaf); 206 kfree(leaf); 207 } else { 208 info->node_cache = leaf; 209 } 210 } 211 } 212 info->attr.mq_curmsgs--; 213 info->qsize -= msg->m_ts; 214 return msg; 215 } 216 217 static struct inode *mqueue_get_inode(struct super_block *sb, 218 struct ipc_namespace *ipc_ns, umode_t mode, 219 struct mq_attr *attr) 220 { 221 struct user_struct *u = current_user(); 222 struct inode *inode; 223 int ret = -ENOMEM; 224 225 inode = new_inode(sb); 226 if (!inode) 227 goto err; 228 229 inode->i_ino = get_next_ino(); 230 inode->i_mode = mode; 231 inode->i_uid = current_fsuid(); 232 inode->i_gid = current_fsgid(); 233 inode->i_mtime = inode->i_ctime = inode->i_atime = CURRENT_TIME; 234 235 if (S_ISREG(mode)) { 236 struct mqueue_inode_info *info; 237 unsigned long mq_bytes, mq_treesize; 238 239 inode->i_fop = &mqueue_file_operations; 240 inode->i_size = FILENT_SIZE; 241 /* mqueue specific info */ 242 info = MQUEUE_I(inode); 243 spin_lock_init(&info->lock); 244 init_waitqueue_head(&info->wait_q); 245 INIT_LIST_HEAD(&info->e_wait_q[0].list); 246 INIT_LIST_HEAD(&info->e_wait_q[1].list); 247 info->notify_owner = NULL; 248 info->notify_user_ns = NULL; 249 info->qsize = 0; 250 info->user = NULL; /* set when all is ok */ 251 info->msg_tree = RB_ROOT; 252 info->node_cache = NULL; 253 memset(&info->attr, 0, sizeof(info->attr)); 254 info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max, 255 ipc_ns->mq_msg_default); 256 info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max, 257 ipc_ns->mq_msgsize_default); 258 if (attr) { 259 info->attr.mq_maxmsg = attr->mq_maxmsg; 260 info->attr.mq_msgsize = attr->mq_msgsize; 261 } 262 /* 263 * We used to allocate a static array of pointers and account 264 * the size of that array as well as one msg_msg struct per 265 * possible message into the queue size. That's no longer 266 * accurate as the queue is now an rbtree and will grow and 267 * shrink depending on usage patterns. We can, however, still 268 * account one msg_msg struct per message, but the nodes are 269 * allocated depending on priority usage, and most programs 270 * only use one, or a handful, of priorities. However, since 271 * this is pinned memory, we need to assume worst case, so 272 * that means the min(mq_maxmsg, max_priorities) * struct 273 * posix_msg_tree_node. 274 */ 275 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + 276 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * 277 sizeof(struct posix_msg_tree_node); 278 279 mq_bytes = mq_treesize + (info->attr.mq_maxmsg * 280 info->attr.mq_msgsize); 281 282 spin_lock(&mq_lock); 283 if (u->mq_bytes + mq_bytes < u->mq_bytes || 284 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) { 285 spin_unlock(&mq_lock); 286 /* mqueue_evict_inode() releases info->messages */ 287 ret = -EMFILE; 288 goto out_inode; 289 } 290 u->mq_bytes += mq_bytes; 291 spin_unlock(&mq_lock); 292 293 /* all is ok */ 294 info->user = get_uid(u); 295 } else if (S_ISDIR(mode)) { 296 inc_nlink(inode); 297 /* Some things misbehave if size == 0 on a directory */ 298 inode->i_size = 2 * DIRENT_SIZE; 299 inode->i_op = &mqueue_dir_inode_operations; 300 inode->i_fop = &simple_dir_operations; 301 } 302 303 return inode; 304 out_inode: 305 iput(inode); 306 err: 307 return ERR_PTR(ret); 308 } 309 310 static int mqueue_fill_super(struct super_block *sb, void *data, int silent) 311 { 312 struct inode *inode; 313 struct ipc_namespace *ns = data; 314 315 sb->s_blocksize = PAGE_CACHE_SIZE; 316 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 317 sb->s_magic = MQUEUE_MAGIC; 318 sb->s_op = &mqueue_super_ops; 319 320 inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL); 321 if (IS_ERR(inode)) 322 return PTR_ERR(inode); 323 324 sb->s_root = d_make_root(inode); 325 if (!sb->s_root) 326 return -ENOMEM; 327 return 0; 328 } 329 330 static struct dentry *mqueue_mount(struct file_system_type *fs_type, 331 int flags, const char *dev_name, 332 void *data) 333 { 334 if (!(flags & MS_KERNMOUNT)) 335 data = current->nsproxy->ipc_ns; 336 return mount_ns(fs_type, flags, data, mqueue_fill_super); 337 } 338 339 static void init_once(void *foo) 340 { 341 struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; 342 343 inode_init_once(&p->vfs_inode); 344 } 345 346 static struct inode *mqueue_alloc_inode(struct super_block *sb) 347 { 348 struct mqueue_inode_info *ei; 349 350 ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL); 351 if (!ei) 352 return NULL; 353 return &ei->vfs_inode; 354 } 355 356 static void mqueue_i_callback(struct rcu_head *head) 357 { 358 struct inode *inode = container_of(head, struct inode, i_rcu); 359 kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode)); 360 } 361 362 static void mqueue_destroy_inode(struct inode *inode) 363 { 364 call_rcu(&inode->i_rcu, mqueue_i_callback); 365 } 366 367 static void mqueue_evict_inode(struct inode *inode) 368 { 369 struct mqueue_inode_info *info; 370 struct user_struct *user; 371 unsigned long mq_bytes, mq_treesize; 372 struct ipc_namespace *ipc_ns; 373 struct msg_msg *msg; 374 375 clear_inode(inode); 376 377 if (S_ISDIR(inode->i_mode)) 378 return; 379 380 ipc_ns = get_ns_from_inode(inode); 381 info = MQUEUE_I(inode); 382 spin_lock(&info->lock); 383 while ((msg = msg_get(info)) != NULL) 384 free_msg(msg); 385 kfree(info->node_cache); 386 spin_unlock(&info->lock); 387 388 /* Total amount of bytes accounted for the mqueue */ 389 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + 390 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * 391 sizeof(struct posix_msg_tree_node); 392 393 mq_bytes = mq_treesize + (info->attr.mq_maxmsg * 394 info->attr.mq_msgsize); 395 396 user = info->user; 397 if (user) { 398 spin_lock(&mq_lock); 399 user->mq_bytes -= mq_bytes; 400 /* 401 * get_ns_from_inode() ensures that the 402 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns 403 * to which we now hold a reference, or it is NULL. 404 * We can't put it here under mq_lock, though. 405 */ 406 if (ipc_ns) 407 ipc_ns->mq_queues_count--; 408 spin_unlock(&mq_lock); 409 free_uid(user); 410 } 411 if (ipc_ns) 412 put_ipc_ns(ipc_ns); 413 } 414 415 static int mqueue_create(struct inode *dir, struct dentry *dentry, 416 umode_t mode, bool excl) 417 { 418 struct inode *inode; 419 struct mq_attr *attr = dentry->d_fsdata; 420 int error; 421 struct ipc_namespace *ipc_ns; 422 423 spin_lock(&mq_lock); 424 ipc_ns = __get_ns_from_inode(dir); 425 if (!ipc_ns) { 426 error = -EACCES; 427 goto out_unlock; 428 } 429 if (ipc_ns->mq_queues_count >= HARD_QUEUESMAX || 430 (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max && 431 !capable(CAP_SYS_RESOURCE))) { 432 error = -ENOSPC; 433 goto out_unlock; 434 } 435 ipc_ns->mq_queues_count++; 436 spin_unlock(&mq_lock); 437 438 inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr); 439 if (IS_ERR(inode)) { 440 error = PTR_ERR(inode); 441 spin_lock(&mq_lock); 442 ipc_ns->mq_queues_count--; 443 goto out_unlock; 444 } 445 446 put_ipc_ns(ipc_ns); 447 dir->i_size += DIRENT_SIZE; 448 dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; 449 450 d_instantiate(dentry, inode); 451 dget(dentry); 452 return 0; 453 out_unlock: 454 spin_unlock(&mq_lock); 455 if (ipc_ns) 456 put_ipc_ns(ipc_ns); 457 return error; 458 } 459 460 static int mqueue_unlink(struct inode *dir, struct dentry *dentry) 461 { 462 struct inode *inode = dentry->d_inode; 463 464 dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; 465 dir->i_size -= DIRENT_SIZE; 466 drop_nlink(inode); 467 dput(dentry); 468 return 0; 469 } 470 471 /* 472 * This is routine for system read from queue file. 473 * To avoid mess with doing here some sort of mq_receive we allow 474 * to read only queue size & notification info (the only values 475 * that are interesting from user point of view and aren't accessible 476 * through std routines) 477 */ 478 static ssize_t mqueue_read_file(struct file *filp, char __user *u_data, 479 size_t count, loff_t *off) 480 { 481 struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); 482 char buffer[FILENT_SIZE]; 483 ssize_t ret; 484 485 spin_lock(&info->lock); 486 snprintf(buffer, sizeof(buffer), 487 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n", 488 info->qsize, 489 info->notify_owner ? info->notify.sigev_notify : 0, 490 (info->notify_owner && 491 info->notify.sigev_notify == SIGEV_SIGNAL) ? 492 info->notify.sigev_signo : 0, 493 pid_vnr(info->notify_owner)); 494 spin_unlock(&info->lock); 495 buffer[sizeof(buffer)-1] = '\0'; 496 497 ret = simple_read_from_buffer(u_data, count, off, buffer, 498 strlen(buffer)); 499 if (ret <= 0) 500 return ret; 501 502 filp->f_path.dentry->d_inode->i_atime = filp->f_path.dentry->d_inode->i_ctime = CURRENT_TIME; 503 return ret; 504 } 505 506 static int mqueue_flush_file(struct file *filp, fl_owner_t id) 507 { 508 struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); 509 510 spin_lock(&info->lock); 511 if (task_tgid(current) == info->notify_owner) 512 remove_notification(info); 513 514 spin_unlock(&info->lock); 515 return 0; 516 } 517 518 static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab) 519 { 520 struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); 521 int retval = 0; 522 523 poll_wait(filp, &info->wait_q, poll_tab); 524 525 spin_lock(&info->lock); 526 if (info->attr.mq_curmsgs) 527 retval = POLLIN | POLLRDNORM; 528 529 if (info->attr.mq_curmsgs < info->attr.mq_maxmsg) 530 retval |= POLLOUT | POLLWRNORM; 531 spin_unlock(&info->lock); 532 533 return retval; 534 } 535 536 /* Adds current to info->e_wait_q[sr] before element with smaller prio */ 537 static void wq_add(struct mqueue_inode_info *info, int sr, 538 struct ext_wait_queue *ewp) 539 { 540 struct ext_wait_queue *walk; 541 542 ewp->task = current; 543 544 list_for_each_entry(walk, &info->e_wait_q[sr].list, list) { 545 if (walk->task->static_prio <= current->static_prio) { 546 list_add_tail(&ewp->list, &walk->list); 547 return; 548 } 549 } 550 list_add_tail(&ewp->list, &info->e_wait_q[sr].list); 551 } 552 553 /* 554 * Puts current task to sleep. Caller must hold queue lock. After return 555 * lock isn't held. 556 * sr: SEND or RECV 557 */ 558 static int wq_sleep(struct mqueue_inode_info *info, int sr, 559 ktime_t *timeout, struct ext_wait_queue *ewp) 560 { 561 int retval; 562 signed long time; 563 564 wq_add(info, sr, ewp); 565 566 for (;;) { 567 set_current_state(TASK_INTERRUPTIBLE); 568 569 spin_unlock(&info->lock); 570 time = schedule_hrtimeout_range_clock(timeout, 0, 571 HRTIMER_MODE_ABS, CLOCK_REALTIME); 572 573 while (ewp->state == STATE_PENDING) 574 cpu_relax(); 575 576 if (ewp->state == STATE_READY) { 577 retval = 0; 578 goto out; 579 } 580 spin_lock(&info->lock); 581 if (ewp->state == STATE_READY) { 582 retval = 0; 583 goto out_unlock; 584 } 585 if (signal_pending(current)) { 586 retval = -ERESTARTSYS; 587 break; 588 } 589 if (time == 0) { 590 retval = -ETIMEDOUT; 591 break; 592 } 593 } 594 list_del(&ewp->list); 595 out_unlock: 596 spin_unlock(&info->lock); 597 out: 598 return retval; 599 } 600 601 /* 602 * Returns waiting task that should be serviced first or NULL if none exists 603 */ 604 static struct ext_wait_queue *wq_get_first_waiter( 605 struct mqueue_inode_info *info, int sr) 606 { 607 struct list_head *ptr; 608 609 ptr = info->e_wait_q[sr].list.prev; 610 if (ptr == &info->e_wait_q[sr].list) 611 return NULL; 612 return list_entry(ptr, struct ext_wait_queue, list); 613 } 614 615 616 static inline void set_cookie(struct sk_buff *skb, char code) 617 { 618 ((char*)skb->data)[NOTIFY_COOKIE_LEN-1] = code; 619 } 620 621 /* 622 * The next function is only to split too long sys_mq_timedsend 623 */ 624 static void __do_notify(struct mqueue_inode_info *info) 625 { 626 /* notification 627 * invoked when there is registered process and there isn't process 628 * waiting synchronously for message AND state of queue changed from 629 * empty to not empty. Here we are sure that no one is waiting 630 * synchronously. */ 631 if (info->notify_owner && 632 info->attr.mq_curmsgs == 1) { 633 struct siginfo sig_i; 634 switch (info->notify.sigev_notify) { 635 case SIGEV_NONE: 636 break; 637 case SIGEV_SIGNAL: 638 /* sends signal */ 639 640 sig_i.si_signo = info->notify.sigev_signo; 641 sig_i.si_errno = 0; 642 sig_i.si_code = SI_MESGQ; 643 sig_i.si_value = info->notify.sigev_value; 644 /* map current pid/uid into info->owner's namespaces */ 645 rcu_read_lock(); 646 sig_i.si_pid = task_tgid_nr_ns(current, 647 ns_of_pid(info->notify_owner)); 648 sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid()); 649 rcu_read_unlock(); 650 651 kill_pid_info(info->notify.sigev_signo, 652 &sig_i, info->notify_owner); 653 break; 654 case SIGEV_THREAD: 655 set_cookie(info->notify_cookie, NOTIFY_WOKENUP); 656 netlink_sendskb(info->notify_sock, info->notify_cookie); 657 break; 658 } 659 /* after notification unregisters process */ 660 put_pid(info->notify_owner); 661 put_user_ns(info->notify_user_ns); 662 info->notify_owner = NULL; 663 info->notify_user_ns = NULL; 664 } 665 wake_up(&info->wait_q); 666 } 667 668 static int prepare_timeout(const struct timespec __user *u_abs_timeout, 669 ktime_t *expires, struct timespec *ts) 670 { 671 if (copy_from_user(ts, u_abs_timeout, sizeof(struct timespec))) 672 return -EFAULT; 673 if (!timespec_valid(ts)) 674 return -EINVAL; 675 676 *expires = timespec_to_ktime(*ts); 677 return 0; 678 } 679 680 static void remove_notification(struct mqueue_inode_info *info) 681 { 682 if (info->notify_owner != NULL && 683 info->notify.sigev_notify == SIGEV_THREAD) { 684 set_cookie(info->notify_cookie, NOTIFY_REMOVED); 685 netlink_sendskb(info->notify_sock, info->notify_cookie); 686 } 687 put_pid(info->notify_owner); 688 put_user_ns(info->notify_user_ns); 689 info->notify_owner = NULL; 690 info->notify_user_ns = NULL; 691 } 692 693 static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr) 694 { 695 int mq_treesize; 696 unsigned long total_size; 697 698 if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0) 699 return -EINVAL; 700 if (capable(CAP_SYS_RESOURCE)) { 701 if (attr->mq_maxmsg > HARD_MSGMAX || 702 attr->mq_msgsize > HARD_MSGSIZEMAX) 703 return -EINVAL; 704 } else { 705 if (attr->mq_maxmsg > ipc_ns->mq_msg_max || 706 attr->mq_msgsize > ipc_ns->mq_msgsize_max) 707 return -EINVAL; 708 } 709 /* check for overflow */ 710 if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg) 711 return -EOVERFLOW; 712 mq_treesize = attr->mq_maxmsg * sizeof(struct msg_msg) + 713 min_t(unsigned int, attr->mq_maxmsg, MQ_PRIO_MAX) * 714 sizeof(struct posix_msg_tree_node); 715 total_size = attr->mq_maxmsg * attr->mq_msgsize; 716 if (total_size + mq_treesize < total_size) 717 return -EOVERFLOW; 718 return 0; 719 } 720 721 /* 722 * Invoked when creating a new queue via sys_mq_open 723 */ 724 static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir, 725 struct path *path, int oflag, umode_t mode, 726 struct mq_attr *attr) 727 { 728 const struct cred *cred = current_cred(); 729 int ret; 730 731 if (attr) { 732 ret = mq_attr_ok(ipc_ns, attr); 733 if (ret) 734 return ERR_PTR(ret); 735 /* store for use during create */ 736 path->dentry->d_fsdata = attr; 737 } else { 738 struct mq_attr def_attr; 739 740 def_attr.mq_maxmsg = min(ipc_ns->mq_msg_max, 741 ipc_ns->mq_msg_default); 742 def_attr.mq_msgsize = min(ipc_ns->mq_msgsize_max, 743 ipc_ns->mq_msgsize_default); 744 ret = mq_attr_ok(ipc_ns, &def_attr); 745 if (ret) 746 return ERR_PTR(ret); 747 } 748 749 mode &= ~current_umask(); 750 ret = vfs_create(dir, path->dentry, mode, true); 751 path->dentry->d_fsdata = NULL; 752 if (ret) 753 return ERR_PTR(ret); 754 return dentry_open(path, oflag, cred); 755 } 756 757 /* Opens existing queue */ 758 static struct file *do_open(struct path *path, int oflag) 759 { 760 static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, 761 MAY_READ | MAY_WRITE }; 762 int acc; 763 if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) 764 return ERR_PTR(-EINVAL); 765 acc = oflag2acc[oflag & O_ACCMODE]; 766 if (inode_permission(path->dentry->d_inode, acc)) 767 return ERR_PTR(-EACCES); 768 return dentry_open(path, oflag, current_cred()); 769 } 770 771 SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode, 772 struct mq_attr __user *, u_attr) 773 { 774 struct path path; 775 struct file *filp; 776 char *name; 777 struct mq_attr attr; 778 int fd, error; 779 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; 780 struct vfsmount *mnt = ipc_ns->mq_mnt; 781 struct dentry *root = mnt->mnt_root; 782 int ro; 783 784 if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr))) 785 return -EFAULT; 786 787 audit_mq_open(oflag, mode, u_attr ? &attr : NULL); 788 789 if (IS_ERR(name = getname(u_name))) 790 return PTR_ERR(name); 791 792 fd = get_unused_fd_flags(O_CLOEXEC); 793 if (fd < 0) 794 goto out_putname; 795 796 ro = mnt_want_write(mnt); /* we'll drop it in any case */ 797 error = 0; 798 mutex_lock(&root->d_inode->i_mutex); 799 path.dentry = lookup_one_len(name, root, strlen(name)); 800 if (IS_ERR(path.dentry)) { 801 error = PTR_ERR(path.dentry); 802 goto out_putfd; 803 } 804 path.mnt = mntget(mnt); 805 806 if (oflag & O_CREAT) { 807 if (path.dentry->d_inode) { /* entry already exists */ 808 audit_inode(name, path.dentry); 809 if (oflag & O_EXCL) { 810 error = -EEXIST; 811 goto out; 812 } 813 filp = do_open(&path, oflag); 814 } else { 815 if (ro) { 816 error = ro; 817 goto out; 818 } 819 filp = do_create(ipc_ns, root->d_inode, 820 &path, oflag, mode, 821 u_attr ? &attr : NULL); 822 } 823 } else { 824 if (!path.dentry->d_inode) { 825 error = -ENOENT; 826 goto out; 827 } 828 audit_inode(name, path.dentry); 829 filp = do_open(&path, oflag); 830 } 831 832 if (!IS_ERR(filp)) 833 fd_install(fd, filp); 834 else 835 error = PTR_ERR(filp); 836 out: 837 path_put(&path); 838 out_putfd: 839 if (error) { 840 put_unused_fd(fd); 841 fd = error; 842 } 843 mutex_unlock(&root->d_inode->i_mutex); 844 mnt_drop_write(mnt); 845 out_putname: 846 putname(name); 847 return fd; 848 } 849 850 SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name) 851 { 852 int err; 853 char *name; 854 struct dentry *dentry; 855 struct inode *inode = NULL; 856 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; 857 struct vfsmount *mnt = ipc_ns->mq_mnt; 858 859 name = getname(u_name); 860 if (IS_ERR(name)) 861 return PTR_ERR(name); 862 863 err = mnt_want_write(mnt); 864 if (err) 865 goto out_name; 866 mutex_lock_nested(&mnt->mnt_root->d_inode->i_mutex, I_MUTEX_PARENT); 867 dentry = lookup_one_len(name, mnt->mnt_root, strlen(name)); 868 if (IS_ERR(dentry)) { 869 err = PTR_ERR(dentry); 870 goto out_unlock; 871 } 872 873 inode = dentry->d_inode; 874 if (!inode) { 875 err = -ENOENT; 876 } else { 877 ihold(inode); 878 err = vfs_unlink(dentry->d_parent->d_inode, dentry); 879 } 880 dput(dentry); 881 882 out_unlock: 883 mutex_unlock(&mnt->mnt_root->d_inode->i_mutex); 884 if (inode) 885 iput(inode); 886 mnt_drop_write(mnt); 887 out_name: 888 putname(name); 889 890 return err; 891 } 892 893 /* Pipelined send and receive functions. 894 * 895 * If a receiver finds no waiting message, then it registers itself in the 896 * list of waiting receivers. A sender checks that list before adding the new 897 * message into the message array. If there is a waiting receiver, then it 898 * bypasses the message array and directly hands the message over to the 899 * receiver. 900 * The receiver accepts the message and returns without grabbing the queue 901 * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers 902 * are necessary. The same algorithm is used for sysv semaphores, see 903 * ipc/sem.c for more details. 904 * 905 * The same algorithm is used for senders. 906 */ 907 908 /* pipelined_send() - send a message directly to the task waiting in 909 * sys_mq_timedreceive() (without inserting message into a queue). 910 */ 911 static inline void pipelined_send(struct mqueue_inode_info *info, 912 struct msg_msg *message, 913 struct ext_wait_queue *receiver) 914 { 915 receiver->msg = message; 916 list_del(&receiver->list); 917 receiver->state = STATE_PENDING; 918 wake_up_process(receiver->task); 919 smp_wmb(); 920 receiver->state = STATE_READY; 921 } 922 923 /* pipelined_receive() - if there is task waiting in sys_mq_timedsend() 924 * gets its message and put to the queue (we have one free place for sure). */ 925 static inline void pipelined_receive(struct mqueue_inode_info *info) 926 { 927 struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND); 928 929 if (!sender) { 930 /* for poll */ 931 wake_up_interruptible(&info->wait_q); 932 return; 933 } 934 if (msg_insert(sender->msg, info)) 935 return; 936 list_del(&sender->list); 937 sender->state = STATE_PENDING; 938 wake_up_process(sender->task); 939 smp_wmb(); 940 sender->state = STATE_READY; 941 } 942 943 SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, 944 size_t, msg_len, unsigned int, msg_prio, 945 const struct timespec __user *, u_abs_timeout) 946 { 947 struct file *filp; 948 struct inode *inode; 949 struct ext_wait_queue wait; 950 struct ext_wait_queue *receiver; 951 struct msg_msg *msg_ptr; 952 struct mqueue_inode_info *info; 953 ktime_t expires, *timeout = NULL; 954 struct timespec ts; 955 struct posix_msg_tree_node *new_leaf = NULL; 956 int ret = 0; 957 958 if (u_abs_timeout) { 959 int res = prepare_timeout(u_abs_timeout, &expires, &ts); 960 if (res) 961 return res; 962 timeout = &expires; 963 } 964 965 if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX)) 966 return -EINVAL; 967 968 audit_mq_sendrecv(mqdes, msg_len, msg_prio, timeout ? &ts : NULL); 969 970 filp = fget(mqdes); 971 if (unlikely(!filp)) { 972 ret = -EBADF; 973 goto out; 974 } 975 976 inode = filp->f_path.dentry->d_inode; 977 if (unlikely(filp->f_op != &mqueue_file_operations)) { 978 ret = -EBADF; 979 goto out_fput; 980 } 981 info = MQUEUE_I(inode); 982 audit_inode(NULL, filp->f_path.dentry); 983 984 if (unlikely(!(filp->f_mode & FMODE_WRITE))) { 985 ret = -EBADF; 986 goto out_fput; 987 } 988 989 if (unlikely(msg_len > info->attr.mq_msgsize)) { 990 ret = -EMSGSIZE; 991 goto out_fput; 992 } 993 994 /* First try to allocate memory, before doing anything with 995 * existing queues. */ 996 msg_ptr = load_msg(u_msg_ptr, msg_len); 997 if (IS_ERR(msg_ptr)) { 998 ret = PTR_ERR(msg_ptr); 999 goto out_fput; 1000 } 1001 msg_ptr->m_ts = msg_len; 1002 msg_ptr->m_type = msg_prio; 1003 1004 /* 1005 * msg_insert really wants us to have a valid, spare node struct so 1006 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will 1007 * fall back to that if necessary. 1008 */ 1009 if (!info->node_cache) 1010 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL); 1011 1012 spin_lock(&info->lock); 1013 1014 if (!info->node_cache && new_leaf) { 1015 /* Save our speculative allocation into the cache */ 1016 rb_init_node(&new_leaf->rb_node); 1017 INIT_LIST_HEAD(&new_leaf->msg_list); 1018 info->node_cache = new_leaf; 1019 info->qsize += sizeof(*new_leaf); 1020 new_leaf = NULL; 1021 } else { 1022 kfree(new_leaf); 1023 } 1024 1025 if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) { 1026 if (filp->f_flags & O_NONBLOCK) { 1027 ret = -EAGAIN; 1028 } else { 1029 wait.task = current; 1030 wait.msg = (void *) msg_ptr; 1031 wait.state = STATE_NONE; 1032 ret = wq_sleep(info, SEND, timeout, &wait); 1033 /* 1034 * wq_sleep must be called with info->lock held, and 1035 * returns with the lock released 1036 */ 1037 goto out_free; 1038 } 1039 } else { 1040 receiver = wq_get_first_waiter(info, RECV); 1041 if (receiver) { 1042 pipelined_send(info, msg_ptr, receiver); 1043 } else { 1044 /* adds message to the queue */ 1045 ret = msg_insert(msg_ptr, info); 1046 if (ret) 1047 goto out_unlock; 1048 __do_notify(info); 1049 } 1050 inode->i_atime = inode->i_mtime = inode->i_ctime = 1051 CURRENT_TIME; 1052 } 1053 out_unlock: 1054 spin_unlock(&info->lock); 1055 out_free: 1056 if (ret) 1057 free_msg(msg_ptr); 1058 out_fput: 1059 fput(filp); 1060 out: 1061 return ret; 1062 } 1063 1064 SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, 1065 size_t, msg_len, unsigned int __user *, u_msg_prio, 1066 const struct timespec __user *, u_abs_timeout) 1067 { 1068 ssize_t ret; 1069 struct msg_msg *msg_ptr; 1070 struct file *filp; 1071 struct inode *inode; 1072 struct mqueue_inode_info *info; 1073 struct ext_wait_queue wait; 1074 ktime_t expires, *timeout = NULL; 1075 struct timespec ts; 1076 struct posix_msg_tree_node *new_leaf = NULL; 1077 1078 if (u_abs_timeout) { 1079 int res = prepare_timeout(u_abs_timeout, &expires, &ts); 1080 if (res) 1081 return res; 1082 timeout = &expires; 1083 } 1084 1085 audit_mq_sendrecv(mqdes, msg_len, 0, timeout ? &ts : NULL); 1086 1087 filp = fget(mqdes); 1088 if (unlikely(!filp)) { 1089 ret = -EBADF; 1090 goto out; 1091 } 1092 1093 inode = filp->f_path.dentry->d_inode; 1094 if (unlikely(filp->f_op != &mqueue_file_operations)) { 1095 ret = -EBADF; 1096 goto out_fput; 1097 } 1098 info = MQUEUE_I(inode); 1099 audit_inode(NULL, filp->f_path.dentry); 1100 1101 if (unlikely(!(filp->f_mode & FMODE_READ))) { 1102 ret = -EBADF; 1103 goto out_fput; 1104 } 1105 1106 /* checks if buffer is big enough */ 1107 if (unlikely(msg_len < info->attr.mq_msgsize)) { 1108 ret = -EMSGSIZE; 1109 goto out_fput; 1110 } 1111 1112 /* 1113 * msg_insert really wants us to have a valid, spare node struct so 1114 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will 1115 * fall back to that if necessary. 1116 */ 1117 if (!info->node_cache) 1118 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL); 1119 1120 spin_lock(&info->lock); 1121 1122 if (!info->node_cache && new_leaf) { 1123 /* Save our speculative allocation into the cache */ 1124 rb_init_node(&new_leaf->rb_node); 1125 INIT_LIST_HEAD(&new_leaf->msg_list); 1126 info->node_cache = new_leaf; 1127 info->qsize += sizeof(*new_leaf); 1128 } else { 1129 kfree(new_leaf); 1130 } 1131 1132 if (info->attr.mq_curmsgs == 0) { 1133 if (filp->f_flags & O_NONBLOCK) { 1134 spin_unlock(&info->lock); 1135 ret = -EAGAIN; 1136 } else { 1137 wait.task = current; 1138 wait.state = STATE_NONE; 1139 ret = wq_sleep(info, RECV, timeout, &wait); 1140 msg_ptr = wait.msg; 1141 } 1142 } else { 1143 msg_ptr = msg_get(info); 1144 1145 inode->i_atime = inode->i_mtime = inode->i_ctime = 1146 CURRENT_TIME; 1147 1148 /* There is now free space in queue. */ 1149 pipelined_receive(info); 1150 spin_unlock(&info->lock); 1151 ret = 0; 1152 } 1153 if (ret == 0) { 1154 ret = msg_ptr->m_ts; 1155 1156 if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) || 1157 store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) { 1158 ret = -EFAULT; 1159 } 1160 free_msg(msg_ptr); 1161 } 1162 out_fput: 1163 fput(filp); 1164 out: 1165 return ret; 1166 } 1167 1168 /* 1169 * Notes: the case when user wants us to deregister (with NULL as pointer) 1170 * and he isn't currently owner of notification, will be silently discarded. 1171 * It isn't explicitly defined in the POSIX. 1172 */ 1173 SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, 1174 const struct sigevent __user *, u_notification) 1175 { 1176 int ret; 1177 struct file *filp; 1178 struct sock *sock; 1179 struct inode *inode; 1180 struct sigevent notification; 1181 struct mqueue_inode_info *info; 1182 struct sk_buff *nc; 1183 1184 if (u_notification) { 1185 if (copy_from_user(¬ification, u_notification, 1186 sizeof(struct sigevent))) 1187 return -EFAULT; 1188 } 1189 1190 audit_mq_notify(mqdes, u_notification ? ¬ification : NULL); 1191 1192 nc = NULL; 1193 sock = NULL; 1194 if (u_notification != NULL) { 1195 if (unlikely(notification.sigev_notify != SIGEV_NONE && 1196 notification.sigev_notify != SIGEV_SIGNAL && 1197 notification.sigev_notify != SIGEV_THREAD)) 1198 return -EINVAL; 1199 if (notification.sigev_notify == SIGEV_SIGNAL && 1200 !valid_signal(notification.sigev_signo)) { 1201 return -EINVAL; 1202 } 1203 if (notification.sigev_notify == SIGEV_THREAD) { 1204 long timeo; 1205 1206 /* create the notify skb */ 1207 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL); 1208 if (!nc) { 1209 ret = -ENOMEM; 1210 goto out; 1211 } 1212 if (copy_from_user(nc->data, 1213 notification.sigev_value.sival_ptr, 1214 NOTIFY_COOKIE_LEN)) { 1215 ret = -EFAULT; 1216 goto out; 1217 } 1218 1219 /* TODO: add a header? */ 1220 skb_put(nc, NOTIFY_COOKIE_LEN); 1221 /* and attach it to the socket */ 1222 retry: 1223 filp = fget(notification.sigev_signo); 1224 if (!filp) { 1225 ret = -EBADF; 1226 goto out; 1227 } 1228 sock = netlink_getsockbyfilp(filp); 1229 fput(filp); 1230 if (IS_ERR(sock)) { 1231 ret = PTR_ERR(sock); 1232 sock = NULL; 1233 goto out; 1234 } 1235 1236 timeo = MAX_SCHEDULE_TIMEOUT; 1237 ret = netlink_attachskb(sock, nc, &timeo, NULL); 1238 if (ret == 1) 1239 goto retry; 1240 if (ret) { 1241 sock = NULL; 1242 nc = NULL; 1243 goto out; 1244 } 1245 } 1246 } 1247 1248 filp = fget(mqdes); 1249 if (!filp) { 1250 ret = -EBADF; 1251 goto out; 1252 } 1253 1254 inode = filp->f_path.dentry->d_inode; 1255 if (unlikely(filp->f_op != &mqueue_file_operations)) { 1256 ret = -EBADF; 1257 goto out_fput; 1258 } 1259 info = MQUEUE_I(inode); 1260 1261 ret = 0; 1262 spin_lock(&info->lock); 1263 if (u_notification == NULL) { 1264 if (info->notify_owner == task_tgid(current)) { 1265 remove_notification(info); 1266 inode->i_atime = inode->i_ctime = CURRENT_TIME; 1267 } 1268 } else if (info->notify_owner != NULL) { 1269 ret = -EBUSY; 1270 } else { 1271 switch (notification.sigev_notify) { 1272 case SIGEV_NONE: 1273 info->notify.sigev_notify = SIGEV_NONE; 1274 break; 1275 case SIGEV_THREAD: 1276 info->notify_sock = sock; 1277 info->notify_cookie = nc; 1278 sock = NULL; 1279 nc = NULL; 1280 info->notify.sigev_notify = SIGEV_THREAD; 1281 break; 1282 case SIGEV_SIGNAL: 1283 info->notify.sigev_signo = notification.sigev_signo; 1284 info->notify.sigev_value = notification.sigev_value; 1285 info->notify.sigev_notify = SIGEV_SIGNAL; 1286 break; 1287 } 1288 1289 info->notify_owner = get_pid(task_tgid(current)); 1290 info->notify_user_ns = get_user_ns(current_user_ns()); 1291 inode->i_atime = inode->i_ctime = CURRENT_TIME; 1292 } 1293 spin_unlock(&info->lock); 1294 out_fput: 1295 fput(filp); 1296 out: 1297 if (sock) { 1298 netlink_detachskb(sock, nc); 1299 } else if (nc) { 1300 dev_kfree_skb(nc); 1301 } 1302 return ret; 1303 } 1304 1305 SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, 1306 const struct mq_attr __user *, u_mqstat, 1307 struct mq_attr __user *, u_omqstat) 1308 { 1309 int ret; 1310 struct mq_attr mqstat, omqstat; 1311 struct file *filp; 1312 struct inode *inode; 1313 struct mqueue_inode_info *info; 1314 1315 if (u_mqstat != NULL) { 1316 if (copy_from_user(&mqstat, u_mqstat, sizeof(struct mq_attr))) 1317 return -EFAULT; 1318 if (mqstat.mq_flags & (~O_NONBLOCK)) 1319 return -EINVAL; 1320 } 1321 1322 filp = fget(mqdes); 1323 if (!filp) { 1324 ret = -EBADF; 1325 goto out; 1326 } 1327 1328 inode = filp->f_path.dentry->d_inode; 1329 if (unlikely(filp->f_op != &mqueue_file_operations)) { 1330 ret = -EBADF; 1331 goto out_fput; 1332 } 1333 info = MQUEUE_I(inode); 1334 1335 spin_lock(&info->lock); 1336 1337 omqstat = info->attr; 1338 omqstat.mq_flags = filp->f_flags & O_NONBLOCK; 1339 if (u_mqstat) { 1340 audit_mq_getsetattr(mqdes, &mqstat); 1341 spin_lock(&filp->f_lock); 1342 if (mqstat.mq_flags & O_NONBLOCK) 1343 filp->f_flags |= O_NONBLOCK; 1344 else 1345 filp->f_flags &= ~O_NONBLOCK; 1346 spin_unlock(&filp->f_lock); 1347 1348 inode->i_atime = inode->i_ctime = CURRENT_TIME; 1349 } 1350 1351 spin_unlock(&info->lock); 1352 1353 ret = 0; 1354 if (u_omqstat != NULL && copy_to_user(u_omqstat, &omqstat, 1355 sizeof(struct mq_attr))) 1356 ret = -EFAULT; 1357 1358 out_fput: 1359 fput(filp); 1360 out: 1361 return ret; 1362 } 1363 1364 static const struct inode_operations mqueue_dir_inode_operations = { 1365 .lookup = simple_lookup, 1366 .create = mqueue_create, 1367 .unlink = mqueue_unlink, 1368 }; 1369 1370 static const struct file_operations mqueue_file_operations = { 1371 .flush = mqueue_flush_file, 1372 .poll = mqueue_poll_file, 1373 .read = mqueue_read_file, 1374 .llseek = default_llseek, 1375 }; 1376 1377 static const struct super_operations mqueue_super_ops = { 1378 .alloc_inode = mqueue_alloc_inode, 1379 .destroy_inode = mqueue_destroy_inode, 1380 .evict_inode = mqueue_evict_inode, 1381 .statfs = simple_statfs, 1382 }; 1383 1384 static struct file_system_type mqueue_fs_type = { 1385 .name = "mqueue", 1386 .mount = mqueue_mount, 1387 .kill_sb = kill_litter_super, 1388 }; 1389 1390 int mq_init_ns(struct ipc_namespace *ns) 1391 { 1392 ns->mq_queues_count = 0; 1393 ns->mq_queues_max = DFLT_QUEUESMAX; 1394 ns->mq_msg_max = DFLT_MSGMAX; 1395 ns->mq_msgsize_max = DFLT_MSGSIZEMAX; 1396 ns->mq_msg_default = DFLT_MSG; 1397 ns->mq_msgsize_default = DFLT_MSGSIZE; 1398 1399 ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns); 1400 if (IS_ERR(ns->mq_mnt)) { 1401 int err = PTR_ERR(ns->mq_mnt); 1402 ns->mq_mnt = NULL; 1403 return err; 1404 } 1405 return 0; 1406 } 1407 1408 void mq_clear_sbinfo(struct ipc_namespace *ns) 1409 { 1410 ns->mq_mnt->mnt_sb->s_fs_info = NULL; 1411 } 1412 1413 void mq_put_mnt(struct ipc_namespace *ns) 1414 { 1415 kern_unmount(ns->mq_mnt); 1416 } 1417 1418 static int __init init_mqueue_fs(void) 1419 { 1420 int error; 1421 1422 mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache", 1423 sizeof(struct mqueue_inode_info), 0, 1424 SLAB_HWCACHE_ALIGN, init_once); 1425 if (mqueue_inode_cachep == NULL) 1426 return -ENOMEM; 1427 1428 /* ignore failures - they are not fatal */ 1429 mq_sysctl_table = mq_register_sysctl_table(); 1430 1431 error = register_filesystem(&mqueue_fs_type); 1432 if (error) 1433 goto out_sysctl; 1434 1435 spin_lock_init(&mq_lock); 1436 1437 error = mq_init_ns(&init_ipc_ns); 1438 if (error) 1439 goto out_filesystem; 1440 1441 return 0; 1442 1443 out_filesystem: 1444 unregister_filesystem(&mqueue_fs_type); 1445 out_sysctl: 1446 if (mq_sysctl_table) 1447 unregister_sysctl_table(mq_sysctl_table); 1448 kmem_cache_destroy(mqueue_inode_cachep); 1449 return error; 1450 } 1451 1452 __initcall(init_mqueue_fs); 1453