1 /* 2 * linux/ipc/shm.c 3 * Copyright (C) 1992, 1993 Krishna Balasubramanian 4 * Many improvements/fixes by Bruno Haible. 5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994. 6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli. 7 * 8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com> 9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de> 10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr> 11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com> 12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com> 13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com> 14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com> 15 * 16 * support for audit of ipc object properties and permission changes 17 * Dustin Kirkland <dustin.kirkland@us.ibm.com> 18 * 19 * namespaces support 20 * OpenVZ, SWsoft Inc. 21 * Pavel Emelianov <xemul@openvz.org> 22 * 23 * Better ipc lock (kern_ipc_perm.lock) handling 24 * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013. 25 */ 26 27 #include <linux/slab.h> 28 #include <linux/mm.h> 29 #include <linux/hugetlb.h> 30 #include <linux/shm.h> 31 #include <linux/init.h> 32 #include <linux/file.h> 33 #include <linux/mman.h> 34 #include <linux/shmem_fs.h> 35 #include <linux/security.h> 36 #include <linux/syscalls.h> 37 #include <linux/audit.h> 38 #include <linux/capability.h> 39 #include <linux/ptrace.h> 40 #include <linux/seq_file.h> 41 #include <linux/rwsem.h> 42 #include <linux/nsproxy.h> 43 #include <linux/mount.h> 44 #include <linux/ipc_namespace.h> 45 46 #include <asm/uaccess.h> 47 48 #include "util.h" 49 50 struct shm_file_data { 51 int id; 52 struct ipc_namespace *ns; 53 struct file *file; 54 const struct vm_operations_struct *vm_ops; 55 }; 56 57 #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data)) 58 59 static const struct file_operations shm_file_operations; 60 static const struct vm_operations_struct shm_vm_ops; 61 62 #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS]) 63 64 #define shm_unlock(shp) \ 65 ipc_unlock(&(shp)->shm_perm) 66 67 static int newseg(struct ipc_namespace *, struct ipc_params *); 68 static void shm_open(struct vm_area_struct *vma); 69 static void shm_close(struct vm_area_struct *vma); 70 static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp); 71 #ifdef CONFIG_PROC_FS 72 static int sysvipc_shm_proc_show(struct seq_file *s, void *it); 73 #endif 74 75 void shm_init_ns(struct ipc_namespace *ns) 76 { 77 ns->shm_ctlmax = SHMMAX; 78 ns->shm_ctlall = SHMALL; 79 ns->shm_ctlmni = SHMMNI; 80 ns->shm_rmid_forced = 0; 81 ns->shm_tot = 0; 82 ipc_init_ids(&shm_ids(ns)); 83 } 84 85 /* 86 * Called with shm_ids.rwsem (writer) and the shp structure locked. 87 * Only shm_ids.rwsem remains locked on exit. 88 */ 89 static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) 90 { 91 struct shmid_kernel *shp; 92 shp = container_of(ipcp, struct shmid_kernel, shm_perm); 93 94 if (shp->shm_nattch){ 95 shp->shm_perm.mode |= SHM_DEST; 96 /* Do not find it any more */ 97 shp->shm_perm.key = IPC_PRIVATE; 98 shm_unlock(shp); 99 } else 100 shm_destroy(ns, shp); 101 } 102 103 #ifdef CONFIG_IPC_NS 104 void shm_exit_ns(struct ipc_namespace *ns) 105 { 106 free_ipcs(ns, &shm_ids(ns), do_shm_rmid); 107 idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr); 108 } 109 #endif 110 111 static int __init ipc_ns_init(void) 112 { 113 shm_init_ns(&init_ipc_ns); 114 return 0; 115 } 116 117 pure_initcall(ipc_ns_init); 118 119 void __init shm_init (void) 120 { 121 ipc_init_proc_interface("sysvipc/shm", 122 #if BITS_PER_LONG <= 32 123 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", 124 #else 125 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", 126 #endif 127 IPC_SHM_IDS, sysvipc_shm_proc_show); 128 } 129 130 static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id) 131 { 132 struct kern_ipc_perm *ipcp = ipc_obtain_object(&shm_ids(ns), id); 133 134 if (IS_ERR(ipcp)) 135 return ERR_CAST(ipcp); 136 137 return container_of(ipcp, struct shmid_kernel, shm_perm); 138 } 139 140 static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id) 141 { 142 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id); 143 144 if (IS_ERR(ipcp)) 145 return ERR_CAST(ipcp); 146 147 return container_of(ipcp, struct shmid_kernel, shm_perm); 148 } 149 150 /* 151 * shm_lock_(check_) routines are called in the paths where the rwsem 152 * is not necessarily held. 153 */ 154 static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id) 155 { 156 struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id); 157 158 if (IS_ERR(ipcp)) 159 return (struct shmid_kernel *)ipcp; 160 161 return container_of(ipcp, struct shmid_kernel, shm_perm); 162 } 163 164 static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp) 165 { 166 rcu_read_lock(); 167 ipc_lock_object(&ipcp->shm_perm); 168 } 169 170 static void shm_rcu_free(struct rcu_head *head) 171 { 172 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); 173 struct shmid_kernel *shp = ipc_rcu_to_struct(p); 174 175 security_shm_free(shp); 176 ipc_rcu_free(head); 177 } 178 179 static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) 180 { 181 ipc_rmid(&shm_ids(ns), &s->shm_perm); 182 } 183 184 185 /* This is called by fork, once for every shm attach. */ 186 static void shm_open(struct vm_area_struct *vma) 187 { 188 struct file *file = vma->vm_file; 189 struct shm_file_data *sfd = shm_file_data(file); 190 struct shmid_kernel *shp; 191 192 shp = shm_lock(sfd->ns, sfd->id); 193 BUG_ON(IS_ERR(shp)); 194 shp->shm_atim = get_seconds(); 195 shp->shm_lprid = task_tgid_vnr(current); 196 shp->shm_nattch++; 197 shm_unlock(shp); 198 } 199 200 /* 201 * shm_destroy - free the struct shmid_kernel 202 * 203 * @ns: namespace 204 * @shp: struct to free 205 * 206 * It has to be called with shp and shm_ids.rwsem (writer) locked, 207 * but returns with shp unlocked and freed. 208 */ 209 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) 210 { 211 struct file *shm_file; 212 213 shm_file = shp->shm_file; 214 shp->shm_file = NULL; 215 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; 216 shm_rmid(ns, shp); 217 shm_unlock(shp); 218 if (!is_file_hugepages(shm_file)) 219 shmem_lock(shm_file, 0, shp->mlock_user); 220 else if (shp->mlock_user) 221 user_shm_unlock(file_inode(shm_file)->i_size, shp->mlock_user); 222 fput(shm_file); 223 ipc_rcu_putref(shp, shm_rcu_free); 224 } 225 226 /* 227 * shm_may_destroy - identifies whether shm segment should be destroyed now 228 * 229 * Returns true if and only if there are no active users of the segment and 230 * one of the following is true: 231 * 232 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp 233 * 234 * 2) sysctl kernel.shm_rmid_forced is set to 1. 235 */ 236 static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) 237 { 238 return (shp->shm_nattch == 0) && 239 (ns->shm_rmid_forced || 240 (shp->shm_perm.mode & SHM_DEST)); 241 } 242 243 /* 244 * remove the attach descriptor vma. 245 * free memory for segment if it is marked destroyed. 246 * The descriptor has already been removed from the current->mm->mmap list 247 * and will later be kfree()d. 248 */ 249 static void shm_close(struct vm_area_struct *vma) 250 { 251 struct file * file = vma->vm_file; 252 struct shm_file_data *sfd = shm_file_data(file); 253 struct shmid_kernel *shp; 254 struct ipc_namespace *ns = sfd->ns; 255 256 down_write(&shm_ids(ns).rwsem); 257 /* remove from the list of attaches of the shm segment */ 258 shp = shm_lock(ns, sfd->id); 259 BUG_ON(IS_ERR(shp)); 260 shp->shm_lprid = task_tgid_vnr(current); 261 shp->shm_dtim = get_seconds(); 262 shp->shm_nattch--; 263 if (shm_may_destroy(ns, shp)) 264 shm_destroy(ns, shp); 265 else 266 shm_unlock(shp); 267 up_write(&shm_ids(ns).rwsem); 268 } 269 270 /* Called with ns->shm_ids(ns).rwsem locked */ 271 static int shm_try_destroy_current(int id, void *p, void *data) 272 { 273 struct ipc_namespace *ns = data; 274 struct kern_ipc_perm *ipcp = p; 275 struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm); 276 277 if (shp->shm_creator != current) 278 return 0; 279 280 /* 281 * Mark it as orphaned to destroy the segment when 282 * kernel.shm_rmid_forced is changed. 283 * It is noop if the following shm_may_destroy() returns true. 284 */ 285 shp->shm_creator = NULL; 286 287 /* 288 * Don't even try to destroy it. If shm_rmid_forced=0 and IPC_RMID 289 * is not set, it shouldn't be deleted here. 290 */ 291 if (!ns->shm_rmid_forced) 292 return 0; 293 294 if (shm_may_destroy(ns, shp)) { 295 shm_lock_by_ptr(shp); 296 shm_destroy(ns, shp); 297 } 298 return 0; 299 } 300 301 /* Called with ns->shm_ids(ns).rwsem locked */ 302 static int shm_try_destroy_orphaned(int id, void *p, void *data) 303 { 304 struct ipc_namespace *ns = data; 305 struct kern_ipc_perm *ipcp = p; 306 struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm); 307 308 /* 309 * We want to destroy segments without users and with already 310 * exit'ed originating process. 311 * 312 * As shp->* are changed under rwsem, it's safe to skip shp locking. 313 */ 314 if (shp->shm_creator != NULL) 315 return 0; 316 317 if (shm_may_destroy(ns, shp)) { 318 shm_lock_by_ptr(shp); 319 shm_destroy(ns, shp); 320 } 321 return 0; 322 } 323 324 void shm_destroy_orphaned(struct ipc_namespace *ns) 325 { 326 down_write(&shm_ids(ns).rwsem); 327 if (shm_ids(ns).in_use) 328 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns); 329 up_write(&shm_ids(ns).rwsem); 330 } 331 332 333 void exit_shm(struct task_struct *task) 334 { 335 struct ipc_namespace *ns = task->nsproxy->ipc_ns; 336 337 if (shm_ids(ns).in_use == 0) 338 return; 339 340 /* Destroy all already created segments, but not mapped yet */ 341 down_write(&shm_ids(ns).rwsem); 342 if (shm_ids(ns).in_use) 343 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns); 344 up_write(&shm_ids(ns).rwsem); 345 } 346 347 static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 348 { 349 struct file *file = vma->vm_file; 350 struct shm_file_data *sfd = shm_file_data(file); 351 352 return sfd->vm_ops->fault(vma, vmf); 353 } 354 355 #ifdef CONFIG_NUMA 356 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new) 357 { 358 struct file *file = vma->vm_file; 359 struct shm_file_data *sfd = shm_file_data(file); 360 int err = 0; 361 if (sfd->vm_ops->set_policy) 362 err = sfd->vm_ops->set_policy(vma, new); 363 return err; 364 } 365 366 static struct mempolicy *shm_get_policy(struct vm_area_struct *vma, 367 unsigned long addr) 368 { 369 struct file *file = vma->vm_file; 370 struct shm_file_data *sfd = shm_file_data(file); 371 struct mempolicy *pol = NULL; 372 373 if (sfd->vm_ops->get_policy) 374 pol = sfd->vm_ops->get_policy(vma, addr); 375 else if (vma->vm_policy) 376 pol = vma->vm_policy; 377 378 return pol; 379 } 380 #endif 381 382 static int shm_mmap(struct file * file, struct vm_area_struct * vma) 383 { 384 struct shm_file_data *sfd = shm_file_data(file); 385 int ret; 386 387 ret = sfd->file->f_op->mmap(sfd->file, vma); 388 if (ret != 0) 389 return ret; 390 sfd->vm_ops = vma->vm_ops; 391 #ifdef CONFIG_MMU 392 BUG_ON(!sfd->vm_ops->fault); 393 #endif 394 vma->vm_ops = &shm_vm_ops; 395 shm_open(vma); 396 397 return ret; 398 } 399 400 static int shm_release(struct inode *ino, struct file *file) 401 { 402 struct shm_file_data *sfd = shm_file_data(file); 403 404 put_ipc_ns(sfd->ns); 405 shm_file_data(file) = NULL; 406 kfree(sfd); 407 return 0; 408 } 409 410 static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync) 411 { 412 struct shm_file_data *sfd = shm_file_data(file); 413 414 if (!sfd->file->f_op->fsync) 415 return -EINVAL; 416 return sfd->file->f_op->fsync(sfd->file, start, end, datasync); 417 } 418 419 static long shm_fallocate(struct file *file, int mode, loff_t offset, 420 loff_t len) 421 { 422 struct shm_file_data *sfd = shm_file_data(file); 423 424 if (!sfd->file->f_op->fallocate) 425 return -EOPNOTSUPP; 426 return sfd->file->f_op->fallocate(file, mode, offset, len); 427 } 428 429 static unsigned long shm_get_unmapped_area(struct file *file, 430 unsigned long addr, unsigned long len, unsigned long pgoff, 431 unsigned long flags) 432 { 433 struct shm_file_data *sfd = shm_file_data(file); 434 return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len, 435 pgoff, flags); 436 } 437 438 static const struct file_operations shm_file_operations = { 439 .mmap = shm_mmap, 440 .fsync = shm_fsync, 441 .release = shm_release, 442 #ifndef CONFIG_MMU 443 .get_unmapped_area = shm_get_unmapped_area, 444 #endif 445 .llseek = noop_llseek, 446 .fallocate = shm_fallocate, 447 }; 448 449 static const struct file_operations shm_file_operations_huge = { 450 .mmap = shm_mmap, 451 .fsync = shm_fsync, 452 .release = shm_release, 453 .get_unmapped_area = shm_get_unmapped_area, 454 .llseek = noop_llseek, 455 .fallocate = shm_fallocate, 456 }; 457 458 int is_file_shm_hugepages(struct file *file) 459 { 460 return file->f_op == &shm_file_operations_huge; 461 } 462 463 static const struct vm_operations_struct shm_vm_ops = { 464 .open = shm_open, /* callback for a new vm-area open */ 465 .close = shm_close, /* callback for when the vm-area is released */ 466 .fault = shm_fault, 467 #if defined(CONFIG_NUMA) 468 .set_policy = shm_set_policy, 469 .get_policy = shm_get_policy, 470 #endif 471 }; 472 473 /** 474 * newseg - Create a new shared memory segment 475 * @ns: namespace 476 * @params: ptr to the structure that contains key, size and shmflg 477 * 478 * Called with shm_ids.rwsem held as a writer. 479 */ 480 481 static int newseg(struct ipc_namespace *ns, struct ipc_params *params) 482 { 483 key_t key = params->key; 484 int shmflg = params->flg; 485 size_t size = params->u.size; 486 int error; 487 struct shmid_kernel *shp; 488 size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 489 struct file * file; 490 char name[13]; 491 int id; 492 vm_flags_t acctflag = 0; 493 494 if (size < SHMMIN || size > ns->shm_ctlmax) 495 return -EINVAL; 496 497 if (ns->shm_tot + numpages > ns->shm_ctlall) 498 return -ENOSPC; 499 500 shp = ipc_rcu_alloc(sizeof(*shp)); 501 if (!shp) 502 return -ENOMEM; 503 504 shp->shm_perm.key = key; 505 shp->shm_perm.mode = (shmflg & S_IRWXUGO); 506 shp->mlock_user = NULL; 507 508 shp->shm_perm.security = NULL; 509 error = security_shm_alloc(shp); 510 if (error) { 511 ipc_rcu_putref(shp, ipc_rcu_free); 512 return error; 513 } 514 515 sprintf (name, "SYSV%08x", key); 516 if (shmflg & SHM_HUGETLB) { 517 struct hstate *hs; 518 size_t hugesize; 519 520 hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); 521 if (!hs) { 522 error = -EINVAL; 523 goto no_file; 524 } 525 hugesize = ALIGN(size, huge_page_size(hs)); 526 527 /* hugetlb_file_setup applies strict accounting */ 528 if (shmflg & SHM_NORESERVE) 529 acctflag = VM_NORESERVE; 530 file = hugetlb_file_setup(name, hugesize, acctflag, 531 &shp->mlock_user, HUGETLB_SHMFS_INODE, 532 (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); 533 } else { 534 /* 535 * Do not allow no accounting for OVERCOMMIT_NEVER, even 536 * if it's asked for. 537 */ 538 if ((shmflg & SHM_NORESERVE) && 539 sysctl_overcommit_memory != OVERCOMMIT_NEVER) 540 acctflag = VM_NORESERVE; 541 file = shmem_file_setup(name, size, acctflag); 542 } 543 error = PTR_ERR(file); 544 if (IS_ERR(file)) 545 goto no_file; 546 547 id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); 548 if (id < 0) { 549 error = id; 550 goto no_id; 551 } 552 553 shp->shm_cprid = task_tgid_vnr(current); 554 shp->shm_lprid = 0; 555 shp->shm_atim = shp->shm_dtim = 0; 556 shp->shm_ctim = get_seconds(); 557 shp->shm_segsz = size; 558 shp->shm_nattch = 0; 559 shp->shm_file = file; 560 shp->shm_creator = current; 561 562 /* 563 * shmid gets reported as "inode#" in /proc/pid/maps. 564 * proc-ps tools use this. Changing this will break them. 565 */ 566 file_inode(file)->i_ino = shp->shm_perm.id; 567 568 ns->shm_tot += numpages; 569 error = shp->shm_perm.id; 570 571 ipc_unlock_object(&shp->shm_perm); 572 rcu_read_unlock(); 573 return error; 574 575 no_id: 576 if (is_file_hugepages(file) && shp->mlock_user) 577 user_shm_unlock(size, shp->mlock_user); 578 fput(file); 579 no_file: 580 ipc_rcu_putref(shp, shm_rcu_free); 581 return error; 582 } 583 584 /* 585 * Called with shm_ids.rwsem and ipcp locked. 586 */ 587 static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg) 588 { 589 struct shmid_kernel *shp; 590 591 shp = container_of(ipcp, struct shmid_kernel, shm_perm); 592 return security_shm_associate(shp, shmflg); 593 } 594 595 /* 596 * Called with shm_ids.rwsem and ipcp locked. 597 */ 598 static inline int shm_more_checks(struct kern_ipc_perm *ipcp, 599 struct ipc_params *params) 600 { 601 struct shmid_kernel *shp; 602 603 shp = container_of(ipcp, struct shmid_kernel, shm_perm); 604 if (shp->shm_segsz < params->u.size) 605 return -EINVAL; 606 607 return 0; 608 } 609 610 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg) 611 { 612 struct ipc_namespace *ns; 613 struct ipc_ops shm_ops; 614 struct ipc_params shm_params; 615 616 ns = current->nsproxy->ipc_ns; 617 618 shm_ops.getnew = newseg; 619 shm_ops.associate = shm_security; 620 shm_ops.more_checks = shm_more_checks; 621 622 shm_params.key = key; 623 shm_params.flg = shmflg; 624 shm_params.u.size = size; 625 626 return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params); 627 } 628 629 static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version) 630 { 631 switch(version) { 632 case IPC_64: 633 return copy_to_user(buf, in, sizeof(*in)); 634 case IPC_OLD: 635 { 636 struct shmid_ds out; 637 638 memset(&out, 0, sizeof(out)); 639 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm); 640 out.shm_segsz = in->shm_segsz; 641 out.shm_atime = in->shm_atime; 642 out.shm_dtime = in->shm_dtime; 643 out.shm_ctime = in->shm_ctime; 644 out.shm_cpid = in->shm_cpid; 645 out.shm_lpid = in->shm_lpid; 646 out.shm_nattch = in->shm_nattch; 647 648 return copy_to_user(buf, &out, sizeof(out)); 649 } 650 default: 651 return -EINVAL; 652 } 653 } 654 655 static inline unsigned long 656 copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version) 657 { 658 switch(version) { 659 case IPC_64: 660 if (copy_from_user(out, buf, sizeof(*out))) 661 return -EFAULT; 662 return 0; 663 case IPC_OLD: 664 { 665 struct shmid_ds tbuf_old; 666 667 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) 668 return -EFAULT; 669 670 out->shm_perm.uid = tbuf_old.shm_perm.uid; 671 out->shm_perm.gid = tbuf_old.shm_perm.gid; 672 out->shm_perm.mode = tbuf_old.shm_perm.mode; 673 674 return 0; 675 } 676 default: 677 return -EINVAL; 678 } 679 } 680 681 static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version) 682 { 683 switch(version) { 684 case IPC_64: 685 return copy_to_user(buf, in, sizeof(*in)); 686 case IPC_OLD: 687 { 688 struct shminfo out; 689 690 if(in->shmmax > INT_MAX) 691 out.shmmax = INT_MAX; 692 else 693 out.shmmax = (int)in->shmmax; 694 695 out.shmmin = in->shmmin; 696 out.shmmni = in->shmmni; 697 out.shmseg = in->shmseg; 698 out.shmall = in->shmall; 699 700 return copy_to_user(buf, &out, sizeof(out)); 701 } 702 default: 703 return -EINVAL; 704 } 705 } 706 707 /* 708 * Calculate and add used RSS and swap pages of a shm. 709 * Called with shm_ids.rwsem held as a reader 710 */ 711 static void shm_add_rss_swap(struct shmid_kernel *shp, 712 unsigned long *rss_add, unsigned long *swp_add) 713 { 714 struct inode *inode; 715 716 inode = file_inode(shp->shm_file); 717 718 if (is_file_hugepages(shp->shm_file)) { 719 struct address_space *mapping = inode->i_mapping; 720 struct hstate *h = hstate_file(shp->shm_file); 721 *rss_add += pages_per_huge_page(h) * mapping->nrpages; 722 } else { 723 #ifdef CONFIG_SHMEM 724 struct shmem_inode_info *info = SHMEM_I(inode); 725 spin_lock(&info->lock); 726 *rss_add += inode->i_mapping->nrpages; 727 *swp_add += info->swapped; 728 spin_unlock(&info->lock); 729 #else 730 *rss_add += inode->i_mapping->nrpages; 731 #endif 732 } 733 } 734 735 /* 736 * Called with shm_ids.rwsem held as a reader 737 */ 738 static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss, 739 unsigned long *swp) 740 { 741 int next_id; 742 int total, in_use; 743 744 *rss = 0; 745 *swp = 0; 746 747 in_use = shm_ids(ns).in_use; 748 749 for (total = 0, next_id = 0; total < in_use; next_id++) { 750 struct kern_ipc_perm *ipc; 751 struct shmid_kernel *shp; 752 753 ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id); 754 if (ipc == NULL) 755 continue; 756 shp = container_of(ipc, struct shmid_kernel, shm_perm); 757 758 shm_add_rss_swap(shp, rss, swp); 759 760 total++; 761 } 762 } 763 764 /* 765 * This function handles some shmctl commands which require the rwsem 766 * to be held in write mode. 767 * NOTE: no locks must be held, the rwsem is taken inside this function. 768 */ 769 static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd, 770 struct shmid_ds __user *buf, int version) 771 { 772 struct kern_ipc_perm *ipcp; 773 struct shmid64_ds shmid64; 774 struct shmid_kernel *shp; 775 int err; 776 777 if (cmd == IPC_SET) { 778 if (copy_shmid_from_user(&shmid64, buf, version)) 779 return -EFAULT; 780 } 781 782 down_write(&shm_ids(ns).rwsem); 783 rcu_read_lock(); 784 785 ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd, 786 &shmid64.shm_perm, 0); 787 if (IS_ERR(ipcp)) { 788 err = PTR_ERR(ipcp); 789 goto out_unlock1; 790 } 791 792 shp = container_of(ipcp, struct shmid_kernel, shm_perm); 793 794 err = security_shm_shmctl(shp, cmd); 795 if (err) 796 goto out_unlock1; 797 798 switch (cmd) { 799 case IPC_RMID: 800 ipc_lock_object(&shp->shm_perm); 801 /* do_shm_rmid unlocks the ipc object and rcu */ 802 do_shm_rmid(ns, ipcp); 803 goto out_up; 804 case IPC_SET: 805 ipc_lock_object(&shp->shm_perm); 806 err = ipc_update_perm(&shmid64.shm_perm, ipcp); 807 if (err) 808 goto out_unlock0; 809 shp->shm_ctim = get_seconds(); 810 break; 811 default: 812 err = -EINVAL; 813 goto out_unlock1; 814 } 815 816 out_unlock0: 817 ipc_unlock_object(&shp->shm_perm); 818 out_unlock1: 819 rcu_read_unlock(); 820 out_up: 821 up_write(&shm_ids(ns).rwsem); 822 return err; 823 } 824 825 static int shmctl_nolock(struct ipc_namespace *ns, int shmid, 826 int cmd, int version, void __user *buf) 827 { 828 int err; 829 struct shmid_kernel *shp; 830 831 /* preliminary security checks for *_INFO */ 832 if (cmd == IPC_INFO || cmd == SHM_INFO) { 833 err = security_shm_shmctl(NULL, cmd); 834 if (err) 835 return err; 836 } 837 838 switch (cmd) { 839 case IPC_INFO: 840 { 841 struct shminfo64 shminfo; 842 843 memset(&shminfo, 0, sizeof(shminfo)); 844 shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni; 845 shminfo.shmmax = ns->shm_ctlmax; 846 shminfo.shmall = ns->shm_ctlall; 847 848 shminfo.shmmin = SHMMIN; 849 if(copy_shminfo_to_user (buf, &shminfo, version)) 850 return -EFAULT; 851 852 down_read(&shm_ids(ns).rwsem); 853 err = ipc_get_maxid(&shm_ids(ns)); 854 up_read(&shm_ids(ns).rwsem); 855 856 if(err<0) 857 err = 0; 858 goto out; 859 } 860 case SHM_INFO: 861 { 862 struct shm_info shm_info; 863 864 memset(&shm_info, 0, sizeof(shm_info)); 865 down_read(&shm_ids(ns).rwsem); 866 shm_info.used_ids = shm_ids(ns).in_use; 867 shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp); 868 shm_info.shm_tot = ns->shm_tot; 869 shm_info.swap_attempts = 0; 870 shm_info.swap_successes = 0; 871 err = ipc_get_maxid(&shm_ids(ns)); 872 up_read(&shm_ids(ns).rwsem); 873 if (copy_to_user(buf, &shm_info, sizeof(shm_info))) { 874 err = -EFAULT; 875 goto out; 876 } 877 878 err = err < 0 ? 0 : err; 879 goto out; 880 } 881 case SHM_STAT: 882 case IPC_STAT: 883 { 884 struct shmid64_ds tbuf; 885 int result; 886 887 rcu_read_lock(); 888 if (cmd == SHM_STAT) { 889 shp = shm_obtain_object(ns, shmid); 890 if (IS_ERR(shp)) { 891 err = PTR_ERR(shp); 892 goto out_unlock; 893 } 894 result = shp->shm_perm.id; 895 } else { 896 shp = shm_obtain_object_check(ns, shmid); 897 if (IS_ERR(shp)) { 898 err = PTR_ERR(shp); 899 goto out_unlock; 900 } 901 result = 0; 902 } 903 904 err = -EACCES; 905 if (ipcperms(ns, &shp->shm_perm, S_IRUGO)) 906 goto out_unlock; 907 908 err = security_shm_shmctl(shp, cmd); 909 if (err) 910 goto out_unlock; 911 912 memset(&tbuf, 0, sizeof(tbuf)); 913 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm); 914 tbuf.shm_segsz = shp->shm_segsz; 915 tbuf.shm_atime = shp->shm_atim; 916 tbuf.shm_dtime = shp->shm_dtim; 917 tbuf.shm_ctime = shp->shm_ctim; 918 tbuf.shm_cpid = shp->shm_cprid; 919 tbuf.shm_lpid = shp->shm_lprid; 920 tbuf.shm_nattch = shp->shm_nattch; 921 rcu_read_unlock(); 922 923 if (copy_shmid_to_user(buf, &tbuf, version)) 924 err = -EFAULT; 925 else 926 err = result; 927 goto out; 928 } 929 default: 930 return -EINVAL; 931 } 932 933 out_unlock: 934 rcu_read_unlock(); 935 out: 936 return err; 937 } 938 939 SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) 940 { 941 struct shmid_kernel *shp; 942 int err, version; 943 struct ipc_namespace *ns; 944 945 if (cmd < 0 || shmid < 0) 946 return -EINVAL; 947 948 version = ipc_parse_version(&cmd); 949 ns = current->nsproxy->ipc_ns; 950 951 switch (cmd) { 952 case IPC_INFO: 953 case SHM_INFO: 954 case SHM_STAT: 955 case IPC_STAT: 956 return shmctl_nolock(ns, shmid, cmd, version, buf); 957 case IPC_RMID: 958 case IPC_SET: 959 return shmctl_down(ns, shmid, cmd, buf, version); 960 case SHM_LOCK: 961 case SHM_UNLOCK: 962 { 963 struct file *shm_file; 964 965 rcu_read_lock(); 966 shp = shm_obtain_object_check(ns, shmid); 967 if (IS_ERR(shp)) { 968 err = PTR_ERR(shp); 969 goto out_unlock1; 970 } 971 972 audit_ipc_obj(&(shp->shm_perm)); 973 err = security_shm_shmctl(shp, cmd); 974 if (err) 975 goto out_unlock1; 976 977 ipc_lock_object(&shp->shm_perm); 978 if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) { 979 kuid_t euid = current_euid(); 980 if (!uid_eq(euid, shp->shm_perm.uid) && 981 !uid_eq(euid, shp->shm_perm.cuid)) { 982 err = -EPERM; 983 goto out_unlock0; 984 } 985 if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) { 986 err = -EPERM; 987 goto out_unlock0; 988 } 989 } 990 991 shm_file = shp->shm_file; 992 993 /* check if shm_destroy() is tearing down shp */ 994 if (shm_file == NULL) { 995 err = -EIDRM; 996 goto out_unlock0; 997 } 998 999 if (is_file_hugepages(shm_file)) 1000 goto out_unlock0; 1001 1002 if (cmd == SHM_LOCK) { 1003 struct user_struct *user = current_user(); 1004 err = shmem_lock(shm_file, 1, user); 1005 if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) { 1006 shp->shm_perm.mode |= SHM_LOCKED; 1007 shp->mlock_user = user; 1008 } 1009 goto out_unlock0; 1010 } 1011 1012 /* SHM_UNLOCK */ 1013 if (!(shp->shm_perm.mode & SHM_LOCKED)) 1014 goto out_unlock0; 1015 shmem_lock(shm_file, 0, shp->mlock_user); 1016 shp->shm_perm.mode &= ~SHM_LOCKED; 1017 shp->mlock_user = NULL; 1018 get_file(shm_file); 1019 ipc_unlock_object(&shp->shm_perm); 1020 rcu_read_unlock(); 1021 shmem_unlock_mapping(shm_file->f_mapping); 1022 1023 fput(shm_file); 1024 return err; 1025 } 1026 default: 1027 return -EINVAL; 1028 } 1029 1030 out_unlock0: 1031 ipc_unlock_object(&shp->shm_perm); 1032 out_unlock1: 1033 rcu_read_unlock(); 1034 return err; 1035 } 1036 1037 /* 1038 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists. 1039 * 1040 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The 1041 * "raddr" thing points to kernel space, and there has to be a wrapper around 1042 * this. 1043 */ 1044 long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, 1045 unsigned long shmlba) 1046 { 1047 struct shmid_kernel *shp; 1048 unsigned long addr; 1049 unsigned long size; 1050 struct file * file; 1051 int err; 1052 unsigned long flags; 1053 unsigned long prot; 1054 int acc_mode; 1055 struct ipc_namespace *ns; 1056 struct shm_file_data *sfd; 1057 struct path path; 1058 fmode_t f_mode; 1059 unsigned long populate = 0; 1060 1061 err = -EINVAL; 1062 if (shmid < 0) 1063 goto out; 1064 else if ((addr = (ulong)shmaddr)) { 1065 if (addr & (shmlba - 1)) { 1066 if (shmflg & SHM_RND) 1067 addr &= ~(shmlba - 1); /* round down */ 1068 else 1069 #ifndef __ARCH_FORCE_SHMLBA 1070 if (addr & ~PAGE_MASK) 1071 #endif 1072 goto out; 1073 } 1074 flags = MAP_SHARED | MAP_FIXED; 1075 } else { 1076 if ((shmflg & SHM_REMAP)) 1077 goto out; 1078 1079 flags = MAP_SHARED; 1080 } 1081 1082 if (shmflg & SHM_RDONLY) { 1083 prot = PROT_READ; 1084 acc_mode = S_IRUGO; 1085 f_mode = FMODE_READ; 1086 } else { 1087 prot = PROT_READ | PROT_WRITE; 1088 acc_mode = S_IRUGO | S_IWUGO; 1089 f_mode = FMODE_READ | FMODE_WRITE; 1090 } 1091 if (shmflg & SHM_EXEC) { 1092 prot |= PROT_EXEC; 1093 acc_mode |= S_IXUGO; 1094 } 1095 1096 /* 1097 * We cannot rely on the fs check since SYSV IPC does have an 1098 * additional creator id... 1099 */ 1100 ns = current->nsproxy->ipc_ns; 1101 rcu_read_lock(); 1102 shp = shm_obtain_object_check(ns, shmid); 1103 if (IS_ERR(shp)) { 1104 err = PTR_ERR(shp); 1105 goto out_unlock; 1106 } 1107 1108 err = -EACCES; 1109 if (ipcperms(ns, &shp->shm_perm, acc_mode)) 1110 goto out_unlock; 1111 1112 err = security_shm_shmat(shp, shmaddr, shmflg); 1113 if (err) 1114 goto out_unlock; 1115 1116 ipc_lock_object(&shp->shm_perm); 1117 1118 /* check if shm_destroy() is tearing down shp */ 1119 if (shp->shm_file == NULL) { 1120 ipc_unlock_object(&shp->shm_perm); 1121 err = -EIDRM; 1122 goto out_unlock; 1123 } 1124 1125 path = shp->shm_file->f_path; 1126 path_get(&path); 1127 shp->shm_nattch++; 1128 size = i_size_read(path.dentry->d_inode); 1129 ipc_unlock_object(&shp->shm_perm); 1130 rcu_read_unlock(); 1131 1132 err = -ENOMEM; 1133 sfd = kzalloc(sizeof(*sfd), GFP_KERNEL); 1134 if (!sfd) { 1135 path_put(&path); 1136 goto out_nattch; 1137 } 1138 1139 file = alloc_file(&path, f_mode, 1140 is_file_hugepages(shp->shm_file) ? 1141 &shm_file_operations_huge : 1142 &shm_file_operations); 1143 err = PTR_ERR(file); 1144 if (IS_ERR(file)) { 1145 kfree(sfd); 1146 path_put(&path); 1147 goto out_nattch; 1148 } 1149 1150 file->private_data = sfd; 1151 file->f_mapping = shp->shm_file->f_mapping; 1152 sfd->id = shp->shm_perm.id; 1153 sfd->ns = get_ipc_ns(ns); 1154 sfd->file = shp->shm_file; 1155 sfd->vm_ops = NULL; 1156 1157 err = security_mmap_file(file, prot, flags); 1158 if (err) 1159 goto out_fput; 1160 1161 down_write(¤t->mm->mmap_sem); 1162 if (addr && !(shmflg & SHM_REMAP)) { 1163 err = -EINVAL; 1164 if (find_vma_intersection(current->mm, addr, addr + size)) 1165 goto invalid; 1166 /* 1167 * If shm segment goes below stack, make sure there is some 1168 * space left for the stack to grow (at least 4 pages). 1169 */ 1170 if (addr < current->mm->start_stack && 1171 addr > current->mm->start_stack - size - PAGE_SIZE * 5) 1172 goto invalid; 1173 } 1174 1175 addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate); 1176 *raddr = addr; 1177 err = 0; 1178 if (IS_ERR_VALUE(addr)) 1179 err = (long)addr; 1180 invalid: 1181 up_write(¤t->mm->mmap_sem); 1182 if (populate) 1183 mm_populate(addr, populate); 1184 1185 out_fput: 1186 fput(file); 1187 1188 out_nattch: 1189 down_write(&shm_ids(ns).rwsem); 1190 shp = shm_lock(ns, shmid); 1191 BUG_ON(IS_ERR(shp)); 1192 shp->shm_nattch--; 1193 if (shm_may_destroy(ns, shp)) 1194 shm_destroy(ns, shp); 1195 else 1196 shm_unlock(shp); 1197 up_write(&shm_ids(ns).rwsem); 1198 return err; 1199 1200 out_unlock: 1201 rcu_read_unlock(); 1202 out: 1203 return err; 1204 } 1205 1206 SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg) 1207 { 1208 unsigned long ret; 1209 long err; 1210 1211 err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA); 1212 if (err) 1213 return err; 1214 force_successful_syscall_return(); 1215 return (long)ret; 1216 } 1217 1218 /* 1219 * detach and kill segment if marked destroyed. 1220 * The work is done in shm_close. 1221 */ 1222 SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) 1223 { 1224 struct mm_struct *mm = current->mm; 1225 struct vm_area_struct *vma; 1226 unsigned long addr = (unsigned long)shmaddr; 1227 int retval = -EINVAL; 1228 #ifdef CONFIG_MMU 1229 loff_t size = 0; 1230 struct vm_area_struct *next; 1231 #endif 1232 1233 if (addr & ~PAGE_MASK) 1234 return retval; 1235 1236 down_write(&mm->mmap_sem); 1237 1238 /* 1239 * This function tries to be smart and unmap shm segments that 1240 * were modified by partial mlock or munmap calls: 1241 * - It first determines the size of the shm segment that should be 1242 * unmapped: It searches for a vma that is backed by shm and that 1243 * started at address shmaddr. It records it's size and then unmaps 1244 * it. 1245 * - Then it unmaps all shm vmas that started at shmaddr and that 1246 * are within the initially determined size. 1247 * Errors from do_munmap are ignored: the function only fails if 1248 * it's called with invalid parameters or if it's called to unmap 1249 * a part of a vma. Both calls in this function are for full vmas, 1250 * the parameters are directly copied from the vma itself and always 1251 * valid - therefore do_munmap cannot fail. (famous last words?) 1252 */ 1253 /* 1254 * If it had been mremap()'d, the starting address would not 1255 * match the usual checks anyway. So assume all vma's are 1256 * above the starting address given. 1257 */ 1258 vma = find_vma(mm, addr); 1259 1260 #ifdef CONFIG_MMU 1261 while (vma) { 1262 next = vma->vm_next; 1263 1264 /* 1265 * Check if the starting address would match, i.e. it's 1266 * a fragment created by mprotect() and/or munmap(), or it 1267 * otherwise it starts at this address with no hassles. 1268 */ 1269 if ((vma->vm_ops == &shm_vm_ops) && 1270 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) { 1271 1272 1273 size = file_inode(vma->vm_file)->i_size; 1274 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); 1275 /* 1276 * We discovered the size of the shm segment, so 1277 * break out of here and fall through to the next 1278 * loop that uses the size information to stop 1279 * searching for matching vma's. 1280 */ 1281 retval = 0; 1282 vma = next; 1283 break; 1284 } 1285 vma = next; 1286 } 1287 1288 /* 1289 * We need look no further than the maximum address a fragment 1290 * could possibly have landed at. Also cast things to loff_t to 1291 * prevent overflows and make comparisons vs. equal-width types. 1292 */ 1293 size = PAGE_ALIGN(size); 1294 while (vma && (loff_t)(vma->vm_end - addr) <= size) { 1295 next = vma->vm_next; 1296 1297 /* finding a matching vma now does not alter retval */ 1298 if ((vma->vm_ops == &shm_vm_ops) && 1299 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) 1300 1301 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); 1302 vma = next; 1303 } 1304 1305 #else /* CONFIG_MMU */ 1306 /* under NOMMU conditions, the exact address to be destroyed must be 1307 * given */ 1308 if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) { 1309 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); 1310 retval = 0; 1311 } 1312 1313 #endif 1314 1315 up_write(&mm->mmap_sem); 1316 return retval; 1317 } 1318 1319 #ifdef CONFIG_PROC_FS 1320 static int sysvipc_shm_proc_show(struct seq_file *s, void *it) 1321 { 1322 struct user_namespace *user_ns = seq_user_ns(s); 1323 struct shmid_kernel *shp = it; 1324 unsigned long rss = 0, swp = 0; 1325 1326 shm_add_rss_swap(shp, &rss, &swp); 1327 1328 #if BITS_PER_LONG <= 32 1329 #define SIZE_SPEC "%10lu" 1330 #else 1331 #define SIZE_SPEC "%21lu" 1332 #endif 1333 1334 return seq_printf(s, 1335 "%10d %10d %4o " SIZE_SPEC " %5u %5u " 1336 "%5lu %5u %5u %5u %5u %10lu %10lu %10lu " 1337 SIZE_SPEC " " SIZE_SPEC "\n", 1338 shp->shm_perm.key, 1339 shp->shm_perm.id, 1340 shp->shm_perm.mode, 1341 shp->shm_segsz, 1342 shp->shm_cprid, 1343 shp->shm_lprid, 1344 shp->shm_nattch, 1345 from_kuid_munged(user_ns, shp->shm_perm.uid), 1346 from_kgid_munged(user_ns, shp->shm_perm.gid), 1347 from_kuid_munged(user_ns, shp->shm_perm.cuid), 1348 from_kgid_munged(user_ns, shp->shm_perm.cgid), 1349 shp->shm_atim, 1350 shp->shm_dtim, 1351 shp->shm_ctim, 1352 rss * PAGE_SIZE, 1353 swp * PAGE_SIZE); 1354 } 1355 #endif 1356