1 /* 2 * linux/ipc/shm.c 3 * Copyright (C) 1992, 1993 Krishna Balasubramanian 4 * Many improvements/fixes by Bruno Haible. 5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994. 6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli. 7 * 8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com> 9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de> 10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr> 11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com> 12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com> 13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com> 14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com> 15 * 16 * support for audit of ipc object properties and permission changes 17 * Dustin Kirkland <dustin.kirkland@us.ibm.com> 18 * 19 * namespaces support 20 * OpenVZ, SWsoft Inc. 21 * Pavel Emelianov <xemul@openvz.org> 22 */ 23 24 #include <linux/slab.h> 25 #include <linux/mm.h> 26 #include <linux/hugetlb.h> 27 #include <linux/shm.h> 28 #include <linux/init.h> 29 #include <linux/file.h> 30 #include <linux/mman.h> 31 #include <linux/shmem_fs.h> 32 #include <linux/security.h> 33 #include <linux/syscalls.h> 34 #include <linux/audit.h> 35 #include <linux/capability.h> 36 #include <linux/ptrace.h> 37 #include <linux/seq_file.h> 38 #include <linux/rwsem.h> 39 #include <linux/nsproxy.h> 40 #include <linux/mount.h> 41 #include <linux/ipc_namespace.h> 42 43 #include <asm/uaccess.h> 44 45 #include "util.h" 46 47 struct shm_file_data { 48 int id; 49 struct ipc_namespace *ns; 50 struct file *file; 51 const struct vm_operations_struct *vm_ops; 52 }; 53 54 #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data)) 55 56 static const struct file_operations shm_file_operations; 57 static const struct vm_operations_struct shm_vm_ops; 58 59 #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS]) 60 61 #define shm_unlock(shp) \ 62 ipc_unlock(&(shp)->shm_perm) 63 64 static int newseg(struct ipc_namespace *, struct ipc_params *); 65 static void shm_open(struct vm_area_struct *vma); 66 static void shm_close(struct vm_area_struct *vma); 67 static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp); 68 #ifdef CONFIG_PROC_FS 69 static int sysvipc_shm_proc_show(struct seq_file *s, void *it); 70 #endif 71 72 void shm_init_ns(struct ipc_namespace *ns) 73 { 74 ns->shm_ctlmax = SHMMAX; 75 ns->shm_ctlall = SHMALL; 76 ns->shm_ctlmni = SHMMNI; 77 ns->shm_rmid_forced = 0; 78 ns->shm_tot = 0; 79 ipc_init_ids(&shm_ids(ns)); 80 } 81 82 /* 83 * Called with shm_ids.rw_mutex (writer) and the shp structure locked. 84 * Only shm_ids.rw_mutex remains locked on exit. 85 */ 86 static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) 87 { 88 struct shmid_kernel *shp; 89 shp = container_of(ipcp, struct shmid_kernel, shm_perm); 90 91 if (shp->shm_nattch){ 92 shp->shm_perm.mode |= SHM_DEST; 93 /* Do not find it any more */ 94 shp->shm_perm.key = IPC_PRIVATE; 95 shm_unlock(shp); 96 } else 97 shm_destroy(ns, shp); 98 } 99 100 #ifdef CONFIG_IPC_NS 101 void shm_exit_ns(struct ipc_namespace *ns) 102 { 103 free_ipcs(ns, &shm_ids(ns), do_shm_rmid); 104 idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr); 105 } 106 #endif 107 108 static int __init ipc_ns_init(void) 109 { 110 shm_init_ns(&init_ipc_ns); 111 return 0; 112 } 113 114 pure_initcall(ipc_ns_init); 115 116 void __init shm_init (void) 117 { 118 ipc_init_proc_interface("sysvipc/shm", 119 #if BITS_PER_LONG <= 32 120 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", 121 #else 122 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", 123 #endif 124 IPC_SHM_IDS, sysvipc_shm_proc_show); 125 } 126 127 /* 128 * shm_lock_(check_) routines are called in the paths where the rw_mutex 129 * is not necessarily held. 130 */ 131 static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id) 132 { 133 struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id); 134 135 if (IS_ERR(ipcp)) 136 return (struct shmid_kernel *)ipcp; 137 138 return container_of(ipcp, struct shmid_kernel, shm_perm); 139 } 140 141 static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp) 142 { 143 rcu_read_lock(); 144 spin_lock(&ipcp->shm_perm.lock); 145 } 146 147 static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns, 148 int id) 149 { 150 struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id); 151 152 if (IS_ERR(ipcp)) 153 return (struct shmid_kernel *)ipcp; 154 155 return container_of(ipcp, struct shmid_kernel, shm_perm); 156 } 157 158 static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) 159 { 160 ipc_rmid(&shm_ids(ns), &s->shm_perm); 161 } 162 163 164 /* This is called by fork, once for every shm attach. */ 165 static void shm_open(struct vm_area_struct *vma) 166 { 167 struct file *file = vma->vm_file; 168 struct shm_file_data *sfd = shm_file_data(file); 169 struct shmid_kernel *shp; 170 171 shp = shm_lock(sfd->ns, sfd->id); 172 BUG_ON(IS_ERR(shp)); 173 shp->shm_atim = get_seconds(); 174 shp->shm_lprid = task_tgid_vnr(current); 175 shp->shm_nattch++; 176 shm_unlock(shp); 177 } 178 179 /* 180 * shm_destroy - free the struct shmid_kernel 181 * 182 * @ns: namespace 183 * @shp: struct to free 184 * 185 * It has to be called with shp and shm_ids.rw_mutex (writer) locked, 186 * but returns with shp unlocked and freed. 187 */ 188 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) 189 { 190 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; 191 shm_rmid(ns, shp); 192 shm_unlock(shp); 193 if (!is_file_hugepages(shp->shm_file)) 194 shmem_lock(shp->shm_file, 0, shp->mlock_user); 195 else if (shp->mlock_user) 196 user_shm_unlock(file_inode(shp->shm_file)->i_size, 197 shp->mlock_user); 198 fput (shp->shm_file); 199 security_shm_free(shp); 200 ipc_rcu_putref(shp); 201 } 202 203 /* 204 * shm_may_destroy - identifies whether shm segment should be destroyed now 205 * 206 * Returns true if and only if there are no active users of the segment and 207 * one of the following is true: 208 * 209 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp 210 * 211 * 2) sysctl kernel.shm_rmid_forced is set to 1. 212 */ 213 static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) 214 { 215 return (shp->shm_nattch == 0) && 216 (ns->shm_rmid_forced || 217 (shp->shm_perm.mode & SHM_DEST)); 218 } 219 220 /* 221 * remove the attach descriptor vma. 222 * free memory for segment if it is marked destroyed. 223 * The descriptor has already been removed from the current->mm->mmap list 224 * and will later be kfree()d. 225 */ 226 static void shm_close(struct vm_area_struct *vma) 227 { 228 struct file * file = vma->vm_file; 229 struct shm_file_data *sfd = shm_file_data(file); 230 struct shmid_kernel *shp; 231 struct ipc_namespace *ns = sfd->ns; 232 233 down_write(&shm_ids(ns).rw_mutex); 234 /* remove from the list of attaches of the shm segment */ 235 shp = shm_lock(ns, sfd->id); 236 BUG_ON(IS_ERR(shp)); 237 shp->shm_lprid = task_tgid_vnr(current); 238 shp->shm_dtim = get_seconds(); 239 shp->shm_nattch--; 240 if (shm_may_destroy(ns, shp)) 241 shm_destroy(ns, shp); 242 else 243 shm_unlock(shp); 244 up_write(&shm_ids(ns).rw_mutex); 245 } 246 247 /* Called with ns->shm_ids(ns).rw_mutex locked */ 248 static int shm_try_destroy_current(int id, void *p, void *data) 249 { 250 struct ipc_namespace *ns = data; 251 struct kern_ipc_perm *ipcp = p; 252 struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm); 253 254 if (shp->shm_creator != current) 255 return 0; 256 257 /* 258 * Mark it as orphaned to destroy the segment when 259 * kernel.shm_rmid_forced is changed. 260 * It is noop if the following shm_may_destroy() returns true. 261 */ 262 shp->shm_creator = NULL; 263 264 /* 265 * Don't even try to destroy it. If shm_rmid_forced=0 and IPC_RMID 266 * is not set, it shouldn't be deleted here. 267 */ 268 if (!ns->shm_rmid_forced) 269 return 0; 270 271 if (shm_may_destroy(ns, shp)) { 272 shm_lock_by_ptr(shp); 273 shm_destroy(ns, shp); 274 } 275 return 0; 276 } 277 278 /* Called with ns->shm_ids(ns).rw_mutex locked */ 279 static int shm_try_destroy_orphaned(int id, void *p, void *data) 280 { 281 struct ipc_namespace *ns = data; 282 struct kern_ipc_perm *ipcp = p; 283 struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm); 284 285 /* 286 * We want to destroy segments without users and with already 287 * exit'ed originating process. 288 * 289 * As shp->* are changed under rw_mutex, it's safe to skip shp locking. 290 */ 291 if (shp->shm_creator != NULL) 292 return 0; 293 294 if (shm_may_destroy(ns, shp)) { 295 shm_lock_by_ptr(shp); 296 shm_destroy(ns, shp); 297 } 298 return 0; 299 } 300 301 void shm_destroy_orphaned(struct ipc_namespace *ns) 302 { 303 down_write(&shm_ids(ns).rw_mutex); 304 if (shm_ids(ns).in_use) 305 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns); 306 up_write(&shm_ids(ns).rw_mutex); 307 } 308 309 310 void exit_shm(struct task_struct *task) 311 { 312 struct ipc_namespace *ns = task->nsproxy->ipc_ns; 313 314 if (shm_ids(ns).in_use == 0) 315 return; 316 317 /* Destroy all already created segments, but not mapped yet */ 318 down_write(&shm_ids(ns).rw_mutex); 319 if (shm_ids(ns).in_use) 320 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns); 321 up_write(&shm_ids(ns).rw_mutex); 322 } 323 324 static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 325 { 326 struct file *file = vma->vm_file; 327 struct shm_file_data *sfd = shm_file_data(file); 328 329 return sfd->vm_ops->fault(vma, vmf); 330 } 331 332 #ifdef CONFIG_NUMA 333 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new) 334 { 335 struct file *file = vma->vm_file; 336 struct shm_file_data *sfd = shm_file_data(file); 337 int err = 0; 338 if (sfd->vm_ops->set_policy) 339 err = sfd->vm_ops->set_policy(vma, new); 340 return err; 341 } 342 343 static struct mempolicy *shm_get_policy(struct vm_area_struct *vma, 344 unsigned long addr) 345 { 346 struct file *file = vma->vm_file; 347 struct shm_file_data *sfd = shm_file_data(file); 348 struct mempolicy *pol = NULL; 349 350 if (sfd->vm_ops->get_policy) 351 pol = sfd->vm_ops->get_policy(vma, addr); 352 else if (vma->vm_policy) 353 pol = vma->vm_policy; 354 355 return pol; 356 } 357 #endif 358 359 static int shm_mmap(struct file * file, struct vm_area_struct * vma) 360 { 361 struct shm_file_data *sfd = shm_file_data(file); 362 int ret; 363 364 ret = sfd->file->f_op->mmap(sfd->file, vma); 365 if (ret != 0) 366 return ret; 367 sfd->vm_ops = vma->vm_ops; 368 #ifdef CONFIG_MMU 369 BUG_ON(!sfd->vm_ops->fault); 370 #endif 371 vma->vm_ops = &shm_vm_ops; 372 shm_open(vma); 373 374 return ret; 375 } 376 377 static int shm_release(struct inode *ino, struct file *file) 378 { 379 struct shm_file_data *sfd = shm_file_data(file); 380 381 put_ipc_ns(sfd->ns); 382 shm_file_data(file) = NULL; 383 kfree(sfd); 384 return 0; 385 } 386 387 static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync) 388 { 389 struct shm_file_data *sfd = shm_file_data(file); 390 391 if (!sfd->file->f_op->fsync) 392 return -EINVAL; 393 return sfd->file->f_op->fsync(sfd->file, start, end, datasync); 394 } 395 396 static long shm_fallocate(struct file *file, int mode, loff_t offset, 397 loff_t len) 398 { 399 struct shm_file_data *sfd = shm_file_data(file); 400 401 if (!sfd->file->f_op->fallocate) 402 return -EOPNOTSUPP; 403 return sfd->file->f_op->fallocate(file, mode, offset, len); 404 } 405 406 static unsigned long shm_get_unmapped_area(struct file *file, 407 unsigned long addr, unsigned long len, unsigned long pgoff, 408 unsigned long flags) 409 { 410 struct shm_file_data *sfd = shm_file_data(file); 411 return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len, 412 pgoff, flags); 413 } 414 415 static const struct file_operations shm_file_operations = { 416 .mmap = shm_mmap, 417 .fsync = shm_fsync, 418 .release = shm_release, 419 #ifndef CONFIG_MMU 420 .get_unmapped_area = shm_get_unmapped_area, 421 #endif 422 .llseek = noop_llseek, 423 .fallocate = shm_fallocate, 424 }; 425 426 static const struct file_operations shm_file_operations_huge = { 427 .mmap = shm_mmap, 428 .fsync = shm_fsync, 429 .release = shm_release, 430 .get_unmapped_area = shm_get_unmapped_area, 431 .llseek = noop_llseek, 432 .fallocate = shm_fallocate, 433 }; 434 435 int is_file_shm_hugepages(struct file *file) 436 { 437 return file->f_op == &shm_file_operations_huge; 438 } 439 440 static const struct vm_operations_struct shm_vm_ops = { 441 .open = shm_open, /* callback for a new vm-area open */ 442 .close = shm_close, /* callback for when the vm-area is released */ 443 .fault = shm_fault, 444 #if defined(CONFIG_NUMA) 445 .set_policy = shm_set_policy, 446 .get_policy = shm_get_policy, 447 #endif 448 }; 449 450 /** 451 * newseg - Create a new shared memory segment 452 * @ns: namespace 453 * @params: ptr to the structure that contains key, size and shmflg 454 * 455 * Called with shm_ids.rw_mutex held as a writer. 456 */ 457 458 static int newseg(struct ipc_namespace *ns, struct ipc_params *params) 459 { 460 key_t key = params->key; 461 int shmflg = params->flg; 462 size_t size = params->u.size; 463 int error; 464 struct shmid_kernel *shp; 465 size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 466 struct file * file; 467 char name[13]; 468 int id; 469 vm_flags_t acctflag = 0; 470 471 if (size < SHMMIN || size > ns->shm_ctlmax) 472 return -EINVAL; 473 474 if (ns->shm_tot + numpages > ns->shm_ctlall) 475 return -ENOSPC; 476 477 shp = ipc_rcu_alloc(sizeof(*shp)); 478 if (!shp) 479 return -ENOMEM; 480 481 shp->shm_perm.key = key; 482 shp->shm_perm.mode = (shmflg & S_IRWXUGO); 483 shp->mlock_user = NULL; 484 485 shp->shm_perm.security = NULL; 486 error = security_shm_alloc(shp); 487 if (error) { 488 ipc_rcu_putref(shp); 489 return error; 490 } 491 492 sprintf (name, "SYSV%08x", key); 493 if (shmflg & SHM_HUGETLB) { 494 struct hstate *hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) 495 & SHM_HUGE_MASK); 496 size_t hugesize; 497 498 if (!hs) { 499 error = -EINVAL; 500 goto no_file; 501 } 502 hugesize = ALIGN(size, huge_page_size(hs)); 503 504 /* hugetlb_file_setup applies strict accounting */ 505 if (shmflg & SHM_NORESERVE) 506 acctflag = VM_NORESERVE; 507 file = hugetlb_file_setup(name, hugesize, acctflag, 508 &shp->mlock_user, HUGETLB_SHMFS_INODE, 509 (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); 510 } else { 511 /* 512 * Do not allow no accounting for OVERCOMMIT_NEVER, even 513 * if it's asked for. 514 */ 515 if ((shmflg & SHM_NORESERVE) && 516 sysctl_overcommit_memory != OVERCOMMIT_NEVER) 517 acctflag = VM_NORESERVE; 518 file = shmem_file_setup(name, size, acctflag); 519 } 520 error = PTR_ERR(file); 521 if (IS_ERR(file)) 522 goto no_file; 523 524 id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); 525 if (id < 0) { 526 error = id; 527 goto no_id; 528 } 529 530 shp->shm_cprid = task_tgid_vnr(current); 531 shp->shm_lprid = 0; 532 shp->shm_atim = shp->shm_dtim = 0; 533 shp->shm_ctim = get_seconds(); 534 shp->shm_segsz = size; 535 shp->shm_nattch = 0; 536 shp->shm_file = file; 537 shp->shm_creator = current; 538 /* 539 * shmid gets reported as "inode#" in /proc/pid/maps. 540 * proc-ps tools use this. Changing this will break them. 541 */ 542 file_inode(file)->i_ino = shp->shm_perm.id; 543 544 ns->shm_tot += numpages; 545 error = shp->shm_perm.id; 546 shm_unlock(shp); 547 return error; 548 549 no_id: 550 if (is_file_hugepages(file) && shp->mlock_user) 551 user_shm_unlock(size, shp->mlock_user); 552 fput(file); 553 no_file: 554 security_shm_free(shp); 555 ipc_rcu_putref(shp); 556 return error; 557 } 558 559 /* 560 * Called with shm_ids.rw_mutex and ipcp locked. 561 */ 562 static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg) 563 { 564 struct shmid_kernel *shp; 565 566 shp = container_of(ipcp, struct shmid_kernel, shm_perm); 567 return security_shm_associate(shp, shmflg); 568 } 569 570 /* 571 * Called with shm_ids.rw_mutex and ipcp locked. 572 */ 573 static inline int shm_more_checks(struct kern_ipc_perm *ipcp, 574 struct ipc_params *params) 575 { 576 struct shmid_kernel *shp; 577 578 shp = container_of(ipcp, struct shmid_kernel, shm_perm); 579 if (shp->shm_segsz < params->u.size) 580 return -EINVAL; 581 582 return 0; 583 } 584 585 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg) 586 { 587 struct ipc_namespace *ns; 588 struct ipc_ops shm_ops; 589 struct ipc_params shm_params; 590 591 ns = current->nsproxy->ipc_ns; 592 593 shm_ops.getnew = newseg; 594 shm_ops.associate = shm_security; 595 shm_ops.more_checks = shm_more_checks; 596 597 shm_params.key = key; 598 shm_params.flg = shmflg; 599 shm_params.u.size = size; 600 601 return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params); 602 } 603 604 static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version) 605 { 606 switch(version) { 607 case IPC_64: 608 return copy_to_user(buf, in, sizeof(*in)); 609 case IPC_OLD: 610 { 611 struct shmid_ds out; 612 613 memset(&out, 0, sizeof(out)); 614 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm); 615 out.shm_segsz = in->shm_segsz; 616 out.shm_atime = in->shm_atime; 617 out.shm_dtime = in->shm_dtime; 618 out.shm_ctime = in->shm_ctime; 619 out.shm_cpid = in->shm_cpid; 620 out.shm_lpid = in->shm_lpid; 621 out.shm_nattch = in->shm_nattch; 622 623 return copy_to_user(buf, &out, sizeof(out)); 624 } 625 default: 626 return -EINVAL; 627 } 628 } 629 630 static inline unsigned long 631 copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version) 632 { 633 switch(version) { 634 case IPC_64: 635 if (copy_from_user(out, buf, sizeof(*out))) 636 return -EFAULT; 637 return 0; 638 case IPC_OLD: 639 { 640 struct shmid_ds tbuf_old; 641 642 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) 643 return -EFAULT; 644 645 out->shm_perm.uid = tbuf_old.shm_perm.uid; 646 out->shm_perm.gid = tbuf_old.shm_perm.gid; 647 out->shm_perm.mode = tbuf_old.shm_perm.mode; 648 649 return 0; 650 } 651 default: 652 return -EINVAL; 653 } 654 } 655 656 static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version) 657 { 658 switch(version) { 659 case IPC_64: 660 return copy_to_user(buf, in, sizeof(*in)); 661 case IPC_OLD: 662 { 663 struct shminfo out; 664 665 if(in->shmmax > INT_MAX) 666 out.shmmax = INT_MAX; 667 else 668 out.shmmax = (int)in->shmmax; 669 670 out.shmmin = in->shmmin; 671 out.shmmni = in->shmmni; 672 out.shmseg = in->shmseg; 673 out.shmall = in->shmall; 674 675 return copy_to_user(buf, &out, sizeof(out)); 676 } 677 default: 678 return -EINVAL; 679 } 680 } 681 682 /* 683 * Calculate and add used RSS and swap pages of a shm. 684 * Called with shm_ids.rw_mutex held as a reader 685 */ 686 static void shm_add_rss_swap(struct shmid_kernel *shp, 687 unsigned long *rss_add, unsigned long *swp_add) 688 { 689 struct inode *inode; 690 691 inode = file_inode(shp->shm_file); 692 693 if (is_file_hugepages(shp->shm_file)) { 694 struct address_space *mapping = inode->i_mapping; 695 struct hstate *h = hstate_file(shp->shm_file); 696 *rss_add += pages_per_huge_page(h) * mapping->nrpages; 697 } else { 698 #ifdef CONFIG_SHMEM 699 struct shmem_inode_info *info = SHMEM_I(inode); 700 spin_lock(&info->lock); 701 *rss_add += inode->i_mapping->nrpages; 702 *swp_add += info->swapped; 703 spin_unlock(&info->lock); 704 #else 705 *rss_add += inode->i_mapping->nrpages; 706 #endif 707 } 708 } 709 710 /* 711 * Called with shm_ids.rw_mutex held as a reader 712 */ 713 static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss, 714 unsigned long *swp) 715 { 716 int next_id; 717 int total, in_use; 718 719 *rss = 0; 720 *swp = 0; 721 722 in_use = shm_ids(ns).in_use; 723 724 for (total = 0, next_id = 0; total < in_use; next_id++) { 725 struct kern_ipc_perm *ipc; 726 struct shmid_kernel *shp; 727 728 ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id); 729 if (ipc == NULL) 730 continue; 731 shp = container_of(ipc, struct shmid_kernel, shm_perm); 732 733 shm_add_rss_swap(shp, rss, swp); 734 735 total++; 736 } 737 } 738 739 /* 740 * This function handles some shmctl commands which require the rw_mutex 741 * to be held in write mode. 742 * NOTE: no locks must be held, the rw_mutex is taken inside this function. 743 */ 744 static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd, 745 struct shmid_ds __user *buf, int version) 746 { 747 struct kern_ipc_perm *ipcp; 748 struct shmid64_ds shmid64; 749 struct shmid_kernel *shp; 750 int err; 751 752 if (cmd == IPC_SET) { 753 if (copy_shmid_from_user(&shmid64, buf, version)) 754 return -EFAULT; 755 } 756 757 ipcp = ipcctl_pre_down(ns, &shm_ids(ns), shmid, cmd, 758 &shmid64.shm_perm, 0); 759 if (IS_ERR(ipcp)) 760 return PTR_ERR(ipcp); 761 762 shp = container_of(ipcp, struct shmid_kernel, shm_perm); 763 764 err = security_shm_shmctl(shp, cmd); 765 if (err) 766 goto out_unlock; 767 switch (cmd) { 768 case IPC_RMID: 769 do_shm_rmid(ns, ipcp); 770 goto out_up; 771 case IPC_SET: 772 err = ipc_update_perm(&shmid64.shm_perm, ipcp); 773 if (err) 774 goto out_unlock; 775 shp->shm_ctim = get_seconds(); 776 break; 777 default: 778 err = -EINVAL; 779 } 780 out_unlock: 781 shm_unlock(shp); 782 out_up: 783 up_write(&shm_ids(ns).rw_mutex); 784 return err; 785 } 786 787 SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) 788 { 789 struct shmid_kernel *shp; 790 int err, version; 791 struct ipc_namespace *ns; 792 793 if (cmd < 0 || shmid < 0) { 794 err = -EINVAL; 795 goto out; 796 } 797 798 version = ipc_parse_version(&cmd); 799 ns = current->nsproxy->ipc_ns; 800 801 switch (cmd) { /* replace with proc interface ? */ 802 case IPC_INFO: 803 { 804 struct shminfo64 shminfo; 805 806 err = security_shm_shmctl(NULL, cmd); 807 if (err) 808 return err; 809 810 memset(&shminfo, 0, sizeof(shminfo)); 811 shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni; 812 shminfo.shmmax = ns->shm_ctlmax; 813 shminfo.shmall = ns->shm_ctlall; 814 815 shminfo.shmmin = SHMMIN; 816 if(copy_shminfo_to_user (buf, &shminfo, version)) 817 return -EFAULT; 818 819 down_read(&shm_ids(ns).rw_mutex); 820 err = ipc_get_maxid(&shm_ids(ns)); 821 up_read(&shm_ids(ns).rw_mutex); 822 823 if(err<0) 824 err = 0; 825 goto out; 826 } 827 case SHM_INFO: 828 { 829 struct shm_info shm_info; 830 831 err = security_shm_shmctl(NULL, cmd); 832 if (err) 833 return err; 834 835 memset(&shm_info, 0, sizeof(shm_info)); 836 down_read(&shm_ids(ns).rw_mutex); 837 shm_info.used_ids = shm_ids(ns).in_use; 838 shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp); 839 shm_info.shm_tot = ns->shm_tot; 840 shm_info.swap_attempts = 0; 841 shm_info.swap_successes = 0; 842 err = ipc_get_maxid(&shm_ids(ns)); 843 up_read(&shm_ids(ns).rw_mutex); 844 if (copy_to_user(buf, &shm_info, sizeof(shm_info))) { 845 err = -EFAULT; 846 goto out; 847 } 848 849 err = err < 0 ? 0 : err; 850 goto out; 851 } 852 case SHM_STAT: 853 case IPC_STAT: 854 { 855 struct shmid64_ds tbuf; 856 int result; 857 858 if (cmd == SHM_STAT) { 859 shp = shm_lock(ns, shmid); 860 if (IS_ERR(shp)) { 861 err = PTR_ERR(shp); 862 goto out; 863 } 864 result = shp->shm_perm.id; 865 } else { 866 shp = shm_lock_check(ns, shmid); 867 if (IS_ERR(shp)) { 868 err = PTR_ERR(shp); 869 goto out; 870 } 871 result = 0; 872 } 873 err = -EACCES; 874 if (ipcperms(ns, &shp->shm_perm, S_IRUGO)) 875 goto out_unlock; 876 err = security_shm_shmctl(shp, cmd); 877 if (err) 878 goto out_unlock; 879 memset(&tbuf, 0, sizeof(tbuf)); 880 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm); 881 tbuf.shm_segsz = shp->shm_segsz; 882 tbuf.shm_atime = shp->shm_atim; 883 tbuf.shm_dtime = shp->shm_dtim; 884 tbuf.shm_ctime = shp->shm_ctim; 885 tbuf.shm_cpid = shp->shm_cprid; 886 tbuf.shm_lpid = shp->shm_lprid; 887 tbuf.shm_nattch = shp->shm_nattch; 888 shm_unlock(shp); 889 if(copy_shmid_to_user (buf, &tbuf, version)) 890 err = -EFAULT; 891 else 892 err = result; 893 goto out; 894 } 895 case SHM_LOCK: 896 case SHM_UNLOCK: 897 { 898 struct file *shm_file; 899 900 shp = shm_lock_check(ns, shmid); 901 if (IS_ERR(shp)) { 902 err = PTR_ERR(shp); 903 goto out; 904 } 905 906 audit_ipc_obj(&(shp->shm_perm)); 907 908 if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) { 909 kuid_t euid = current_euid(); 910 err = -EPERM; 911 if (!uid_eq(euid, shp->shm_perm.uid) && 912 !uid_eq(euid, shp->shm_perm.cuid)) 913 goto out_unlock; 914 if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) 915 goto out_unlock; 916 } 917 918 err = security_shm_shmctl(shp, cmd); 919 if (err) 920 goto out_unlock; 921 922 shm_file = shp->shm_file; 923 if (is_file_hugepages(shm_file)) 924 goto out_unlock; 925 926 if (cmd == SHM_LOCK) { 927 struct user_struct *user = current_user(); 928 err = shmem_lock(shm_file, 1, user); 929 if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) { 930 shp->shm_perm.mode |= SHM_LOCKED; 931 shp->mlock_user = user; 932 } 933 goto out_unlock; 934 } 935 936 /* SHM_UNLOCK */ 937 if (!(shp->shm_perm.mode & SHM_LOCKED)) 938 goto out_unlock; 939 shmem_lock(shm_file, 0, shp->mlock_user); 940 shp->shm_perm.mode &= ~SHM_LOCKED; 941 shp->mlock_user = NULL; 942 get_file(shm_file); 943 shm_unlock(shp); 944 shmem_unlock_mapping(shm_file->f_mapping); 945 fput(shm_file); 946 goto out; 947 } 948 case IPC_RMID: 949 case IPC_SET: 950 err = shmctl_down(ns, shmid, cmd, buf, version); 951 return err; 952 default: 953 return -EINVAL; 954 } 955 956 out_unlock: 957 shm_unlock(shp); 958 out: 959 return err; 960 } 961 962 /* 963 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists. 964 * 965 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The 966 * "raddr" thing points to kernel space, and there has to be a wrapper around 967 * this. 968 */ 969 long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, 970 unsigned long shmlba) 971 { 972 struct shmid_kernel *shp; 973 unsigned long addr; 974 unsigned long size; 975 struct file * file; 976 int err; 977 unsigned long flags; 978 unsigned long prot; 979 int acc_mode; 980 struct ipc_namespace *ns; 981 struct shm_file_data *sfd; 982 struct path path; 983 fmode_t f_mode; 984 unsigned long populate = 0; 985 986 err = -EINVAL; 987 if (shmid < 0) 988 goto out; 989 else if ((addr = (ulong)shmaddr)) { 990 if (addr & (shmlba - 1)) { 991 if (shmflg & SHM_RND) 992 addr &= ~(shmlba - 1); /* round down */ 993 else 994 #ifndef __ARCH_FORCE_SHMLBA 995 if (addr & ~PAGE_MASK) 996 #endif 997 goto out; 998 } 999 flags = MAP_SHARED | MAP_FIXED; 1000 } else { 1001 if ((shmflg & SHM_REMAP)) 1002 goto out; 1003 1004 flags = MAP_SHARED; 1005 } 1006 1007 if (shmflg & SHM_RDONLY) { 1008 prot = PROT_READ; 1009 acc_mode = S_IRUGO; 1010 f_mode = FMODE_READ; 1011 } else { 1012 prot = PROT_READ | PROT_WRITE; 1013 acc_mode = S_IRUGO | S_IWUGO; 1014 f_mode = FMODE_READ | FMODE_WRITE; 1015 } 1016 if (shmflg & SHM_EXEC) { 1017 prot |= PROT_EXEC; 1018 acc_mode |= S_IXUGO; 1019 } 1020 1021 /* 1022 * We cannot rely on the fs check since SYSV IPC does have an 1023 * additional creator id... 1024 */ 1025 ns = current->nsproxy->ipc_ns; 1026 shp = shm_lock_check(ns, shmid); 1027 if (IS_ERR(shp)) { 1028 err = PTR_ERR(shp); 1029 goto out; 1030 } 1031 1032 err = -EACCES; 1033 if (ipcperms(ns, &shp->shm_perm, acc_mode)) 1034 goto out_unlock; 1035 1036 err = security_shm_shmat(shp, shmaddr, shmflg); 1037 if (err) 1038 goto out_unlock; 1039 1040 path = shp->shm_file->f_path; 1041 path_get(&path); 1042 shp->shm_nattch++; 1043 size = i_size_read(path.dentry->d_inode); 1044 shm_unlock(shp); 1045 1046 err = -ENOMEM; 1047 sfd = kzalloc(sizeof(*sfd), GFP_KERNEL); 1048 if (!sfd) 1049 goto out_put_dentry; 1050 1051 file = alloc_file(&path, f_mode, 1052 is_file_hugepages(shp->shm_file) ? 1053 &shm_file_operations_huge : 1054 &shm_file_operations); 1055 err = PTR_ERR(file); 1056 if (IS_ERR(file)) 1057 goto out_free; 1058 1059 file->private_data = sfd; 1060 file->f_mapping = shp->shm_file->f_mapping; 1061 sfd->id = shp->shm_perm.id; 1062 sfd->ns = get_ipc_ns(ns); 1063 sfd->file = shp->shm_file; 1064 sfd->vm_ops = NULL; 1065 1066 err = security_mmap_file(file, prot, flags); 1067 if (err) 1068 goto out_fput; 1069 1070 down_write(¤t->mm->mmap_sem); 1071 if (addr && !(shmflg & SHM_REMAP)) { 1072 err = -EINVAL; 1073 if (find_vma_intersection(current->mm, addr, addr + size)) 1074 goto invalid; 1075 /* 1076 * If shm segment goes below stack, make sure there is some 1077 * space left for the stack to grow (at least 4 pages). 1078 */ 1079 if (addr < current->mm->start_stack && 1080 addr > current->mm->start_stack - size - PAGE_SIZE * 5) 1081 goto invalid; 1082 } 1083 1084 addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate); 1085 *raddr = addr; 1086 err = 0; 1087 if (IS_ERR_VALUE(addr)) 1088 err = (long)addr; 1089 invalid: 1090 up_write(¤t->mm->mmap_sem); 1091 if (populate) 1092 mm_populate(addr, populate); 1093 1094 out_fput: 1095 fput(file); 1096 1097 out_nattch: 1098 down_write(&shm_ids(ns).rw_mutex); 1099 shp = shm_lock(ns, shmid); 1100 BUG_ON(IS_ERR(shp)); 1101 shp->shm_nattch--; 1102 if (shm_may_destroy(ns, shp)) 1103 shm_destroy(ns, shp); 1104 else 1105 shm_unlock(shp); 1106 up_write(&shm_ids(ns).rw_mutex); 1107 1108 out: 1109 return err; 1110 1111 out_unlock: 1112 shm_unlock(shp); 1113 goto out; 1114 1115 out_free: 1116 kfree(sfd); 1117 out_put_dentry: 1118 path_put(&path); 1119 goto out_nattch; 1120 } 1121 1122 SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg) 1123 { 1124 unsigned long ret; 1125 long err; 1126 1127 err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA); 1128 if (err) 1129 return err; 1130 force_successful_syscall_return(); 1131 return (long)ret; 1132 } 1133 1134 /* 1135 * detach and kill segment if marked destroyed. 1136 * The work is done in shm_close. 1137 */ 1138 SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) 1139 { 1140 struct mm_struct *mm = current->mm; 1141 struct vm_area_struct *vma; 1142 unsigned long addr = (unsigned long)shmaddr; 1143 int retval = -EINVAL; 1144 #ifdef CONFIG_MMU 1145 loff_t size = 0; 1146 struct vm_area_struct *next; 1147 #endif 1148 1149 if (addr & ~PAGE_MASK) 1150 return retval; 1151 1152 down_write(&mm->mmap_sem); 1153 1154 /* 1155 * This function tries to be smart and unmap shm segments that 1156 * were modified by partial mlock or munmap calls: 1157 * - It first determines the size of the shm segment that should be 1158 * unmapped: It searches for a vma that is backed by shm and that 1159 * started at address shmaddr. It records it's size and then unmaps 1160 * it. 1161 * - Then it unmaps all shm vmas that started at shmaddr and that 1162 * are within the initially determined size. 1163 * Errors from do_munmap are ignored: the function only fails if 1164 * it's called with invalid parameters or if it's called to unmap 1165 * a part of a vma. Both calls in this function are for full vmas, 1166 * the parameters are directly copied from the vma itself and always 1167 * valid - therefore do_munmap cannot fail. (famous last words?) 1168 */ 1169 /* 1170 * If it had been mremap()'d, the starting address would not 1171 * match the usual checks anyway. So assume all vma's are 1172 * above the starting address given. 1173 */ 1174 vma = find_vma(mm, addr); 1175 1176 #ifdef CONFIG_MMU 1177 while (vma) { 1178 next = vma->vm_next; 1179 1180 /* 1181 * Check if the starting address would match, i.e. it's 1182 * a fragment created by mprotect() and/or munmap(), or it 1183 * otherwise it starts at this address with no hassles. 1184 */ 1185 if ((vma->vm_ops == &shm_vm_ops) && 1186 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) { 1187 1188 1189 size = file_inode(vma->vm_file)->i_size; 1190 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); 1191 /* 1192 * We discovered the size of the shm segment, so 1193 * break out of here and fall through to the next 1194 * loop that uses the size information to stop 1195 * searching for matching vma's. 1196 */ 1197 retval = 0; 1198 vma = next; 1199 break; 1200 } 1201 vma = next; 1202 } 1203 1204 /* 1205 * We need look no further than the maximum address a fragment 1206 * could possibly have landed at. Also cast things to loff_t to 1207 * prevent overflows and make comparisons vs. equal-width types. 1208 */ 1209 size = PAGE_ALIGN(size); 1210 while (vma && (loff_t)(vma->vm_end - addr) <= size) { 1211 next = vma->vm_next; 1212 1213 /* finding a matching vma now does not alter retval */ 1214 if ((vma->vm_ops == &shm_vm_ops) && 1215 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) 1216 1217 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); 1218 vma = next; 1219 } 1220 1221 #else /* CONFIG_MMU */ 1222 /* under NOMMU conditions, the exact address to be destroyed must be 1223 * given */ 1224 retval = -EINVAL; 1225 if (vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) { 1226 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); 1227 retval = 0; 1228 } 1229 1230 #endif 1231 1232 up_write(&mm->mmap_sem); 1233 return retval; 1234 } 1235 1236 #ifdef CONFIG_PROC_FS 1237 static int sysvipc_shm_proc_show(struct seq_file *s, void *it) 1238 { 1239 struct user_namespace *user_ns = seq_user_ns(s); 1240 struct shmid_kernel *shp = it; 1241 unsigned long rss = 0, swp = 0; 1242 1243 shm_add_rss_swap(shp, &rss, &swp); 1244 1245 #if BITS_PER_LONG <= 32 1246 #define SIZE_SPEC "%10lu" 1247 #else 1248 #define SIZE_SPEC "%21lu" 1249 #endif 1250 1251 return seq_printf(s, 1252 "%10d %10d %4o " SIZE_SPEC " %5u %5u " 1253 "%5lu %5u %5u %5u %5u %10lu %10lu %10lu " 1254 SIZE_SPEC " " SIZE_SPEC "\n", 1255 shp->shm_perm.key, 1256 shp->shm_perm.id, 1257 shp->shm_perm.mode, 1258 shp->shm_segsz, 1259 shp->shm_cprid, 1260 shp->shm_lprid, 1261 shp->shm_nattch, 1262 from_kuid_munged(user_ns, shp->shm_perm.uid), 1263 from_kgid_munged(user_ns, shp->shm_perm.gid), 1264 from_kuid_munged(user_ns, shp->shm_perm.cuid), 1265 from_kgid_munged(user_ns, shp->shm_perm.cgid), 1266 shp->shm_atim, 1267 shp->shm_dtim, 1268 shp->shm_ctim, 1269 rss * PAGE_SIZE, 1270 swp * PAGE_SIZE); 1271 } 1272 #endif 1273