1 /* 2 * linux/ipc/shm.c 3 * Copyright (C) 1992, 1993 Krishna Balasubramanian 4 * Many improvements/fixes by Bruno Haible. 5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994. 6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli. 7 * 8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com> 9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de> 10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr> 11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com> 12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com> 13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com> 14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com> 15 * 16 * support for audit of ipc object properties and permission changes 17 * Dustin Kirkland <dustin.kirkland@us.ibm.com> 18 * 19 * namespaces support 20 * OpenVZ, SWsoft Inc. 21 * Pavel Emelianov <xemul@openvz.org> 22 */ 23 24 #include <linux/slab.h> 25 #include <linux/mm.h> 26 #include <linux/hugetlb.h> 27 #include <linux/shm.h> 28 #include <linux/init.h> 29 #include <linux/file.h> 30 #include <linux/mman.h> 31 #include <linux/shmem_fs.h> 32 #include <linux/security.h> 33 #include <linux/syscalls.h> 34 #include <linux/audit.h> 35 #include <linux/capability.h> 36 #include <linux/ptrace.h> 37 #include <linux/seq_file.h> 38 #include <linux/rwsem.h> 39 #include <linux/nsproxy.h> 40 #include <linux/mount.h> 41 #include <linux/ipc_namespace.h> 42 43 #include <asm/uaccess.h> 44 45 #include "util.h" 46 47 struct shm_file_data { 48 int id; 49 struct ipc_namespace *ns; 50 struct file *file; 51 const struct vm_operations_struct *vm_ops; 52 }; 53 54 #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data)) 55 56 static const struct file_operations shm_file_operations; 57 static const struct vm_operations_struct shm_vm_ops; 58 59 #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS]) 60 61 #define shm_unlock(shp) \ 62 ipc_unlock(&(shp)->shm_perm) 63 64 static int newseg(struct ipc_namespace *, struct ipc_params *); 65 static void shm_open(struct vm_area_struct *vma); 66 static void shm_close(struct vm_area_struct *vma); 67 static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp); 68 #ifdef CONFIG_PROC_FS 69 static int sysvipc_shm_proc_show(struct seq_file *s, void *it); 70 #endif 71 72 void shm_init_ns(struct ipc_namespace *ns) 73 { 74 ns->shm_ctlmax = SHMMAX; 75 ns->shm_ctlall = SHMALL; 76 ns->shm_ctlmni = SHMMNI; 77 ns->shm_tot = 0; 78 ipc_init_ids(&shm_ids(ns)); 79 } 80 81 /* 82 * Called with shm_ids.rw_mutex (writer) and the shp structure locked. 83 * Only shm_ids.rw_mutex remains locked on exit. 84 */ 85 static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) 86 { 87 struct shmid_kernel *shp; 88 shp = container_of(ipcp, struct shmid_kernel, shm_perm); 89 90 if (shp->shm_nattch){ 91 shp->shm_perm.mode |= SHM_DEST; 92 /* Do not find it any more */ 93 shp->shm_perm.key = IPC_PRIVATE; 94 shm_unlock(shp); 95 } else 96 shm_destroy(ns, shp); 97 } 98 99 #ifdef CONFIG_IPC_NS 100 void shm_exit_ns(struct ipc_namespace *ns) 101 { 102 free_ipcs(ns, &shm_ids(ns), do_shm_rmid); 103 idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr); 104 } 105 #endif 106 107 void __init shm_init (void) 108 { 109 shm_init_ns(&init_ipc_ns); 110 ipc_init_proc_interface("sysvipc/shm", 111 #if BITS_PER_LONG <= 32 112 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", 113 #else 114 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", 115 #endif 116 IPC_SHM_IDS, sysvipc_shm_proc_show); 117 } 118 119 /* 120 * shm_lock_(check_) routines are called in the paths where the rw_mutex 121 * is not necessarily held. 122 */ 123 static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id) 124 { 125 struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id); 126 127 if (IS_ERR(ipcp)) 128 return (struct shmid_kernel *)ipcp; 129 130 return container_of(ipcp, struct shmid_kernel, shm_perm); 131 } 132 133 static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns, 134 int id) 135 { 136 struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id); 137 138 if (IS_ERR(ipcp)) 139 return (struct shmid_kernel *)ipcp; 140 141 return container_of(ipcp, struct shmid_kernel, shm_perm); 142 } 143 144 static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) 145 { 146 ipc_rmid(&shm_ids(ns), &s->shm_perm); 147 } 148 149 150 /* This is called by fork, once for every shm attach. */ 151 static void shm_open(struct vm_area_struct *vma) 152 { 153 struct file *file = vma->vm_file; 154 struct shm_file_data *sfd = shm_file_data(file); 155 struct shmid_kernel *shp; 156 157 shp = shm_lock(sfd->ns, sfd->id); 158 BUG_ON(IS_ERR(shp)); 159 shp->shm_atim = get_seconds(); 160 shp->shm_lprid = task_tgid_vnr(current); 161 shp->shm_nattch++; 162 shm_unlock(shp); 163 } 164 165 /* 166 * shm_destroy - free the struct shmid_kernel 167 * 168 * @ns: namespace 169 * @shp: struct to free 170 * 171 * It has to be called with shp and shm_ids.rw_mutex (writer) locked, 172 * but returns with shp unlocked and freed. 173 */ 174 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) 175 { 176 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; 177 shm_rmid(ns, shp); 178 shm_unlock(shp); 179 if (!is_file_hugepages(shp->shm_file)) 180 shmem_lock(shp->shm_file, 0, shp->mlock_user); 181 else if (shp->mlock_user) 182 user_shm_unlock(shp->shm_file->f_path.dentry->d_inode->i_size, 183 shp->mlock_user); 184 fput (shp->shm_file); 185 security_shm_free(shp); 186 ipc_rcu_putref(shp); 187 } 188 189 /* 190 * remove the attach descriptor vma. 191 * free memory for segment if it is marked destroyed. 192 * The descriptor has already been removed from the current->mm->mmap list 193 * and will later be kfree()d. 194 */ 195 static void shm_close(struct vm_area_struct *vma) 196 { 197 struct file * file = vma->vm_file; 198 struct shm_file_data *sfd = shm_file_data(file); 199 struct shmid_kernel *shp; 200 struct ipc_namespace *ns = sfd->ns; 201 202 down_write(&shm_ids(ns).rw_mutex); 203 /* remove from the list of attaches of the shm segment */ 204 shp = shm_lock(ns, sfd->id); 205 BUG_ON(IS_ERR(shp)); 206 shp->shm_lprid = task_tgid_vnr(current); 207 shp->shm_dtim = get_seconds(); 208 shp->shm_nattch--; 209 if(shp->shm_nattch == 0 && 210 shp->shm_perm.mode & SHM_DEST) 211 shm_destroy(ns, shp); 212 else 213 shm_unlock(shp); 214 up_write(&shm_ids(ns).rw_mutex); 215 } 216 217 static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 218 { 219 struct file *file = vma->vm_file; 220 struct shm_file_data *sfd = shm_file_data(file); 221 222 return sfd->vm_ops->fault(vma, vmf); 223 } 224 225 #ifdef CONFIG_NUMA 226 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new) 227 { 228 struct file *file = vma->vm_file; 229 struct shm_file_data *sfd = shm_file_data(file); 230 int err = 0; 231 if (sfd->vm_ops->set_policy) 232 err = sfd->vm_ops->set_policy(vma, new); 233 return err; 234 } 235 236 static struct mempolicy *shm_get_policy(struct vm_area_struct *vma, 237 unsigned long addr) 238 { 239 struct file *file = vma->vm_file; 240 struct shm_file_data *sfd = shm_file_data(file); 241 struct mempolicy *pol = NULL; 242 243 if (sfd->vm_ops->get_policy) 244 pol = sfd->vm_ops->get_policy(vma, addr); 245 else if (vma->vm_policy) 246 pol = vma->vm_policy; 247 248 return pol; 249 } 250 #endif 251 252 static int shm_mmap(struct file * file, struct vm_area_struct * vma) 253 { 254 struct shm_file_data *sfd = shm_file_data(file); 255 int ret; 256 257 ret = sfd->file->f_op->mmap(sfd->file, vma); 258 if (ret != 0) 259 return ret; 260 sfd->vm_ops = vma->vm_ops; 261 #ifdef CONFIG_MMU 262 BUG_ON(!sfd->vm_ops->fault); 263 #endif 264 vma->vm_ops = &shm_vm_ops; 265 shm_open(vma); 266 267 return ret; 268 } 269 270 static int shm_release(struct inode *ino, struct file *file) 271 { 272 struct shm_file_data *sfd = shm_file_data(file); 273 274 put_ipc_ns(sfd->ns); 275 shm_file_data(file) = NULL; 276 kfree(sfd); 277 return 0; 278 } 279 280 static int shm_fsync(struct file *file, int datasync) 281 { 282 struct shm_file_data *sfd = shm_file_data(file); 283 284 if (!sfd->file->f_op->fsync) 285 return -EINVAL; 286 return sfd->file->f_op->fsync(sfd->file, datasync); 287 } 288 289 static unsigned long shm_get_unmapped_area(struct file *file, 290 unsigned long addr, unsigned long len, unsigned long pgoff, 291 unsigned long flags) 292 { 293 struct shm_file_data *sfd = shm_file_data(file); 294 return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len, 295 pgoff, flags); 296 } 297 298 static const struct file_operations shm_file_operations = { 299 .mmap = shm_mmap, 300 .fsync = shm_fsync, 301 .release = shm_release, 302 #ifndef CONFIG_MMU 303 .get_unmapped_area = shm_get_unmapped_area, 304 #endif 305 .llseek = noop_llseek, 306 }; 307 308 static const struct file_operations shm_file_operations_huge = { 309 .mmap = shm_mmap, 310 .fsync = shm_fsync, 311 .release = shm_release, 312 .get_unmapped_area = shm_get_unmapped_area, 313 .llseek = noop_llseek, 314 }; 315 316 int is_file_shm_hugepages(struct file *file) 317 { 318 return file->f_op == &shm_file_operations_huge; 319 } 320 321 static const struct vm_operations_struct shm_vm_ops = { 322 .open = shm_open, /* callback for a new vm-area open */ 323 .close = shm_close, /* callback for when the vm-area is released */ 324 .fault = shm_fault, 325 #if defined(CONFIG_NUMA) 326 .set_policy = shm_set_policy, 327 .get_policy = shm_get_policy, 328 #endif 329 }; 330 331 /** 332 * newseg - Create a new shared memory segment 333 * @ns: namespace 334 * @params: ptr to the structure that contains key, size and shmflg 335 * 336 * Called with shm_ids.rw_mutex held as a writer. 337 */ 338 339 static int newseg(struct ipc_namespace *ns, struct ipc_params *params) 340 { 341 key_t key = params->key; 342 int shmflg = params->flg; 343 size_t size = params->u.size; 344 int error; 345 struct shmid_kernel *shp; 346 int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT; 347 struct file * file; 348 char name[13]; 349 int id; 350 int acctflag = 0; 351 352 if (size < SHMMIN || size > ns->shm_ctlmax) 353 return -EINVAL; 354 355 if (ns->shm_tot + numpages > ns->shm_ctlall) 356 return -ENOSPC; 357 358 shp = ipc_rcu_alloc(sizeof(*shp)); 359 if (!shp) 360 return -ENOMEM; 361 362 shp->shm_perm.key = key; 363 shp->shm_perm.mode = (shmflg & S_IRWXUGO); 364 shp->mlock_user = NULL; 365 366 shp->shm_perm.security = NULL; 367 error = security_shm_alloc(shp); 368 if (error) { 369 ipc_rcu_putref(shp); 370 return error; 371 } 372 373 sprintf (name, "SYSV%08x", key); 374 if (shmflg & SHM_HUGETLB) { 375 /* hugetlb_file_setup applies strict accounting */ 376 if (shmflg & SHM_NORESERVE) 377 acctflag = VM_NORESERVE; 378 file = hugetlb_file_setup(name, size, acctflag, 379 &shp->mlock_user, HUGETLB_SHMFS_INODE); 380 } else { 381 /* 382 * Do not allow no accounting for OVERCOMMIT_NEVER, even 383 * if it's asked for. 384 */ 385 if ((shmflg & SHM_NORESERVE) && 386 sysctl_overcommit_memory != OVERCOMMIT_NEVER) 387 acctflag = VM_NORESERVE; 388 file = shmem_file_setup(name, size, acctflag); 389 } 390 error = PTR_ERR(file); 391 if (IS_ERR(file)) 392 goto no_file; 393 394 id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); 395 if (id < 0) { 396 error = id; 397 goto no_id; 398 } 399 400 shp->shm_cprid = task_tgid_vnr(current); 401 shp->shm_lprid = 0; 402 shp->shm_atim = shp->shm_dtim = 0; 403 shp->shm_ctim = get_seconds(); 404 shp->shm_segsz = size; 405 shp->shm_nattch = 0; 406 shp->shm_file = file; 407 /* 408 * shmid gets reported as "inode#" in /proc/pid/maps. 409 * proc-ps tools use this. Changing this will break them. 410 */ 411 file->f_dentry->d_inode->i_ino = shp->shm_perm.id; 412 413 ns->shm_tot += numpages; 414 error = shp->shm_perm.id; 415 shm_unlock(shp); 416 return error; 417 418 no_id: 419 if (is_file_hugepages(file) && shp->mlock_user) 420 user_shm_unlock(size, shp->mlock_user); 421 fput(file); 422 no_file: 423 security_shm_free(shp); 424 ipc_rcu_putref(shp); 425 return error; 426 } 427 428 /* 429 * Called with shm_ids.rw_mutex and ipcp locked. 430 */ 431 static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg) 432 { 433 struct shmid_kernel *shp; 434 435 shp = container_of(ipcp, struct shmid_kernel, shm_perm); 436 return security_shm_associate(shp, shmflg); 437 } 438 439 /* 440 * Called with shm_ids.rw_mutex and ipcp locked. 441 */ 442 static inline int shm_more_checks(struct kern_ipc_perm *ipcp, 443 struct ipc_params *params) 444 { 445 struct shmid_kernel *shp; 446 447 shp = container_of(ipcp, struct shmid_kernel, shm_perm); 448 if (shp->shm_segsz < params->u.size) 449 return -EINVAL; 450 451 return 0; 452 } 453 454 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg) 455 { 456 struct ipc_namespace *ns; 457 struct ipc_ops shm_ops; 458 struct ipc_params shm_params; 459 460 ns = current->nsproxy->ipc_ns; 461 462 shm_ops.getnew = newseg; 463 shm_ops.associate = shm_security; 464 shm_ops.more_checks = shm_more_checks; 465 466 shm_params.key = key; 467 shm_params.flg = shmflg; 468 shm_params.u.size = size; 469 470 return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params); 471 } 472 473 static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version) 474 { 475 switch(version) { 476 case IPC_64: 477 return copy_to_user(buf, in, sizeof(*in)); 478 case IPC_OLD: 479 { 480 struct shmid_ds out; 481 482 memset(&out, 0, sizeof(out)); 483 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm); 484 out.shm_segsz = in->shm_segsz; 485 out.shm_atime = in->shm_atime; 486 out.shm_dtime = in->shm_dtime; 487 out.shm_ctime = in->shm_ctime; 488 out.shm_cpid = in->shm_cpid; 489 out.shm_lpid = in->shm_lpid; 490 out.shm_nattch = in->shm_nattch; 491 492 return copy_to_user(buf, &out, sizeof(out)); 493 } 494 default: 495 return -EINVAL; 496 } 497 } 498 499 static inline unsigned long 500 copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version) 501 { 502 switch(version) { 503 case IPC_64: 504 if (copy_from_user(out, buf, sizeof(*out))) 505 return -EFAULT; 506 return 0; 507 case IPC_OLD: 508 { 509 struct shmid_ds tbuf_old; 510 511 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) 512 return -EFAULT; 513 514 out->shm_perm.uid = tbuf_old.shm_perm.uid; 515 out->shm_perm.gid = tbuf_old.shm_perm.gid; 516 out->shm_perm.mode = tbuf_old.shm_perm.mode; 517 518 return 0; 519 } 520 default: 521 return -EINVAL; 522 } 523 } 524 525 static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version) 526 { 527 switch(version) { 528 case IPC_64: 529 return copy_to_user(buf, in, sizeof(*in)); 530 case IPC_OLD: 531 { 532 struct shminfo out; 533 534 if(in->shmmax > INT_MAX) 535 out.shmmax = INT_MAX; 536 else 537 out.shmmax = (int)in->shmmax; 538 539 out.shmmin = in->shmmin; 540 out.shmmni = in->shmmni; 541 out.shmseg = in->shmseg; 542 out.shmall = in->shmall; 543 544 return copy_to_user(buf, &out, sizeof(out)); 545 } 546 default: 547 return -EINVAL; 548 } 549 } 550 551 /* 552 * Calculate and add used RSS and swap pages of a shm. 553 * Called with shm_ids.rw_mutex held as a reader 554 */ 555 static void shm_add_rss_swap(struct shmid_kernel *shp, 556 unsigned long *rss_add, unsigned long *swp_add) 557 { 558 struct inode *inode; 559 560 inode = shp->shm_file->f_path.dentry->d_inode; 561 562 if (is_file_hugepages(shp->shm_file)) { 563 struct address_space *mapping = inode->i_mapping; 564 struct hstate *h = hstate_file(shp->shm_file); 565 *rss_add += pages_per_huge_page(h) * mapping->nrpages; 566 } else { 567 #ifdef CONFIG_SHMEM 568 struct shmem_inode_info *info = SHMEM_I(inode); 569 spin_lock(&info->lock); 570 *rss_add += inode->i_mapping->nrpages; 571 *swp_add += info->swapped; 572 spin_unlock(&info->lock); 573 #else 574 *rss_add += inode->i_mapping->nrpages; 575 #endif 576 } 577 } 578 579 /* 580 * Called with shm_ids.rw_mutex held as a reader 581 */ 582 static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss, 583 unsigned long *swp) 584 { 585 int next_id; 586 int total, in_use; 587 588 *rss = 0; 589 *swp = 0; 590 591 in_use = shm_ids(ns).in_use; 592 593 for (total = 0, next_id = 0; total < in_use; next_id++) { 594 struct kern_ipc_perm *ipc; 595 struct shmid_kernel *shp; 596 597 ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id); 598 if (ipc == NULL) 599 continue; 600 shp = container_of(ipc, struct shmid_kernel, shm_perm); 601 602 shm_add_rss_swap(shp, rss, swp); 603 604 total++; 605 } 606 } 607 608 /* 609 * This function handles some shmctl commands which require the rw_mutex 610 * to be held in write mode. 611 * NOTE: no locks must be held, the rw_mutex is taken inside this function. 612 */ 613 static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd, 614 struct shmid_ds __user *buf, int version) 615 { 616 struct kern_ipc_perm *ipcp; 617 struct shmid64_ds shmid64; 618 struct shmid_kernel *shp; 619 int err; 620 621 if (cmd == IPC_SET) { 622 if (copy_shmid_from_user(&shmid64, buf, version)) 623 return -EFAULT; 624 } 625 626 ipcp = ipcctl_pre_down(&shm_ids(ns), shmid, cmd, &shmid64.shm_perm, 0); 627 if (IS_ERR(ipcp)) 628 return PTR_ERR(ipcp); 629 630 shp = container_of(ipcp, struct shmid_kernel, shm_perm); 631 632 err = security_shm_shmctl(shp, cmd); 633 if (err) 634 goto out_unlock; 635 switch (cmd) { 636 case IPC_RMID: 637 do_shm_rmid(ns, ipcp); 638 goto out_up; 639 case IPC_SET: 640 ipc_update_perm(&shmid64.shm_perm, ipcp); 641 shp->shm_ctim = get_seconds(); 642 break; 643 default: 644 err = -EINVAL; 645 } 646 out_unlock: 647 shm_unlock(shp); 648 out_up: 649 up_write(&shm_ids(ns).rw_mutex); 650 return err; 651 } 652 653 SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) 654 { 655 struct shmid_kernel *shp; 656 int err, version; 657 struct ipc_namespace *ns; 658 659 if (cmd < 0 || shmid < 0) { 660 err = -EINVAL; 661 goto out; 662 } 663 664 version = ipc_parse_version(&cmd); 665 ns = current->nsproxy->ipc_ns; 666 667 switch (cmd) { /* replace with proc interface ? */ 668 case IPC_INFO: 669 { 670 struct shminfo64 shminfo; 671 672 err = security_shm_shmctl(NULL, cmd); 673 if (err) 674 return err; 675 676 memset(&shminfo, 0, sizeof(shminfo)); 677 shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni; 678 shminfo.shmmax = ns->shm_ctlmax; 679 shminfo.shmall = ns->shm_ctlall; 680 681 shminfo.shmmin = SHMMIN; 682 if(copy_shminfo_to_user (buf, &shminfo, version)) 683 return -EFAULT; 684 685 down_read(&shm_ids(ns).rw_mutex); 686 err = ipc_get_maxid(&shm_ids(ns)); 687 up_read(&shm_ids(ns).rw_mutex); 688 689 if(err<0) 690 err = 0; 691 goto out; 692 } 693 case SHM_INFO: 694 { 695 struct shm_info shm_info; 696 697 err = security_shm_shmctl(NULL, cmd); 698 if (err) 699 return err; 700 701 memset(&shm_info, 0, sizeof(shm_info)); 702 down_read(&shm_ids(ns).rw_mutex); 703 shm_info.used_ids = shm_ids(ns).in_use; 704 shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp); 705 shm_info.shm_tot = ns->shm_tot; 706 shm_info.swap_attempts = 0; 707 shm_info.swap_successes = 0; 708 err = ipc_get_maxid(&shm_ids(ns)); 709 up_read(&shm_ids(ns).rw_mutex); 710 if (copy_to_user(buf, &shm_info, sizeof(shm_info))) { 711 err = -EFAULT; 712 goto out; 713 } 714 715 err = err < 0 ? 0 : err; 716 goto out; 717 } 718 case SHM_STAT: 719 case IPC_STAT: 720 { 721 struct shmid64_ds tbuf; 722 int result; 723 724 if (cmd == SHM_STAT) { 725 shp = shm_lock(ns, shmid); 726 if (IS_ERR(shp)) { 727 err = PTR_ERR(shp); 728 goto out; 729 } 730 result = shp->shm_perm.id; 731 } else { 732 shp = shm_lock_check(ns, shmid); 733 if (IS_ERR(shp)) { 734 err = PTR_ERR(shp); 735 goto out; 736 } 737 result = 0; 738 } 739 err = -EACCES; 740 if (ipcperms (&shp->shm_perm, S_IRUGO)) 741 goto out_unlock; 742 err = security_shm_shmctl(shp, cmd); 743 if (err) 744 goto out_unlock; 745 memset(&tbuf, 0, sizeof(tbuf)); 746 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm); 747 tbuf.shm_segsz = shp->shm_segsz; 748 tbuf.shm_atime = shp->shm_atim; 749 tbuf.shm_dtime = shp->shm_dtim; 750 tbuf.shm_ctime = shp->shm_ctim; 751 tbuf.shm_cpid = shp->shm_cprid; 752 tbuf.shm_lpid = shp->shm_lprid; 753 tbuf.shm_nattch = shp->shm_nattch; 754 shm_unlock(shp); 755 if(copy_shmid_to_user (buf, &tbuf, version)) 756 err = -EFAULT; 757 else 758 err = result; 759 goto out; 760 } 761 case SHM_LOCK: 762 case SHM_UNLOCK: 763 { 764 struct file *uninitialized_var(shm_file); 765 766 lru_add_drain_all(); /* drain pagevecs to lru lists */ 767 768 shp = shm_lock_check(ns, shmid); 769 if (IS_ERR(shp)) { 770 err = PTR_ERR(shp); 771 goto out; 772 } 773 774 audit_ipc_obj(&(shp->shm_perm)); 775 776 if (!capable(CAP_IPC_LOCK)) { 777 uid_t euid = current_euid(); 778 err = -EPERM; 779 if (euid != shp->shm_perm.uid && 780 euid != shp->shm_perm.cuid) 781 goto out_unlock; 782 if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) 783 goto out_unlock; 784 } 785 786 err = security_shm_shmctl(shp, cmd); 787 if (err) 788 goto out_unlock; 789 790 if(cmd==SHM_LOCK) { 791 struct user_struct *user = current_user(); 792 if (!is_file_hugepages(shp->shm_file)) { 793 err = shmem_lock(shp->shm_file, 1, user); 794 if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){ 795 shp->shm_perm.mode |= SHM_LOCKED; 796 shp->mlock_user = user; 797 } 798 } 799 } else if (!is_file_hugepages(shp->shm_file)) { 800 shmem_lock(shp->shm_file, 0, shp->mlock_user); 801 shp->shm_perm.mode &= ~SHM_LOCKED; 802 shp->mlock_user = NULL; 803 } 804 shm_unlock(shp); 805 goto out; 806 } 807 case IPC_RMID: 808 case IPC_SET: 809 err = shmctl_down(ns, shmid, cmd, buf, version); 810 return err; 811 default: 812 return -EINVAL; 813 } 814 815 out_unlock: 816 shm_unlock(shp); 817 out: 818 return err; 819 } 820 821 /* 822 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists. 823 * 824 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The 825 * "raddr" thing points to kernel space, and there has to be a wrapper around 826 * this. 827 */ 828 long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr) 829 { 830 struct shmid_kernel *shp; 831 unsigned long addr; 832 unsigned long size; 833 struct file * file; 834 int err; 835 unsigned long flags; 836 unsigned long prot; 837 int acc_mode; 838 unsigned long user_addr; 839 struct ipc_namespace *ns; 840 struct shm_file_data *sfd; 841 struct path path; 842 fmode_t f_mode; 843 844 err = -EINVAL; 845 if (shmid < 0) 846 goto out; 847 else if ((addr = (ulong)shmaddr)) { 848 if (addr & (SHMLBA-1)) { 849 if (shmflg & SHM_RND) 850 addr &= ~(SHMLBA-1); /* round down */ 851 else 852 #ifndef __ARCH_FORCE_SHMLBA 853 if (addr & ~PAGE_MASK) 854 #endif 855 goto out; 856 } 857 flags = MAP_SHARED | MAP_FIXED; 858 } else { 859 if ((shmflg & SHM_REMAP)) 860 goto out; 861 862 flags = MAP_SHARED; 863 } 864 865 if (shmflg & SHM_RDONLY) { 866 prot = PROT_READ; 867 acc_mode = S_IRUGO; 868 f_mode = FMODE_READ; 869 } else { 870 prot = PROT_READ | PROT_WRITE; 871 acc_mode = S_IRUGO | S_IWUGO; 872 f_mode = FMODE_READ | FMODE_WRITE; 873 } 874 if (shmflg & SHM_EXEC) { 875 prot |= PROT_EXEC; 876 acc_mode |= S_IXUGO; 877 } 878 879 /* 880 * We cannot rely on the fs check since SYSV IPC does have an 881 * additional creator id... 882 */ 883 ns = current->nsproxy->ipc_ns; 884 shp = shm_lock_check(ns, shmid); 885 if (IS_ERR(shp)) { 886 err = PTR_ERR(shp); 887 goto out; 888 } 889 890 err = -EACCES; 891 if (ipcperms(&shp->shm_perm, acc_mode)) 892 goto out_unlock; 893 894 err = security_shm_shmat(shp, shmaddr, shmflg); 895 if (err) 896 goto out_unlock; 897 898 path = shp->shm_file->f_path; 899 path_get(&path); 900 shp->shm_nattch++; 901 size = i_size_read(path.dentry->d_inode); 902 shm_unlock(shp); 903 904 err = -ENOMEM; 905 sfd = kzalloc(sizeof(*sfd), GFP_KERNEL); 906 if (!sfd) 907 goto out_put_dentry; 908 909 file = alloc_file(&path, f_mode, 910 is_file_hugepages(shp->shm_file) ? 911 &shm_file_operations_huge : 912 &shm_file_operations); 913 if (!file) 914 goto out_free; 915 916 file->private_data = sfd; 917 file->f_mapping = shp->shm_file->f_mapping; 918 sfd->id = shp->shm_perm.id; 919 sfd->ns = get_ipc_ns(ns); 920 sfd->file = shp->shm_file; 921 sfd->vm_ops = NULL; 922 923 down_write(¤t->mm->mmap_sem); 924 if (addr && !(shmflg & SHM_REMAP)) { 925 err = -EINVAL; 926 if (find_vma_intersection(current->mm, addr, addr + size)) 927 goto invalid; 928 /* 929 * If shm segment goes below stack, make sure there is some 930 * space left for the stack to grow (at least 4 pages). 931 */ 932 if (addr < current->mm->start_stack && 933 addr > current->mm->start_stack - size - PAGE_SIZE * 5) 934 goto invalid; 935 } 936 937 user_addr = do_mmap (file, addr, size, prot, flags, 0); 938 *raddr = user_addr; 939 err = 0; 940 if (IS_ERR_VALUE(user_addr)) 941 err = (long)user_addr; 942 invalid: 943 up_write(¤t->mm->mmap_sem); 944 945 fput(file); 946 947 out_nattch: 948 down_write(&shm_ids(ns).rw_mutex); 949 shp = shm_lock(ns, shmid); 950 BUG_ON(IS_ERR(shp)); 951 shp->shm_nattch--; 952 if(shp->shm_nattch == 0 && 953 shp->shm_perm.mode & SHM_DEST) 954 shm_destroy(ns, shp); 955 else 956 shm_unlock(shp); 957 up_write(&shm_ids(ns).rw_mutex); 958 959 out: 960 return err; 961 962 out_unlock: 963 shm_unlock(shp); 964 goto out; 965 966 out_free: 967 kfree(sfd); 968 out_put_dentry: 969 path_put(&path); 970 goto out_nattch; 971 } 972 973 SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg) 974 { 975 unsigned long ret; 976 long err; 977 978 err = do_shmat(shmid, shmaddr, shmflg, &ret); 979 if (err) 980 return err; 981 force_successful_syscall_return(); 982 return (long)ret; 983 } 984 985 /* 986 * detach and kill segment if marked destroyed. 987 * The work is done in shm_close. 988 */ 989 SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) 990 { 991 struct mm_struct *mm = current->mm; 992 struct vm_area_struct *vma; 993 unsigned long addr = (unsigned long)shmaddr; 994 int retval = -EINVAL; 995 #ifdef CONFIG_MMU 996 loff_t size = 0; 997 struct vm_area_struct *next; 998 #endif 999 1000 if (addr & ~PAGE_MASK) 1001 return retval; 1002 1003 down_write(&mm->mmap_sem); 1004 1005 /* 1006 * This function tries to be smart and unmap shm segments that 1007 * were modified by partial mlock or munmap calls: 1008 * - It first determines the size of the shm segment that should be 1009 * unmapped: It searches for a vma that is backed by shm and that 1010 * started at address shmaddr. It records it's size and then unmaps 1011 * it. 1012 * - Then it unmaps all shm vmas that started at shmaddr and that 1013 * are within the initially determined size. 1014 * Errors from do_munmap are ignored: the function only fails if 1015 * it's called with invalid parameters or if it's called to unmap 1016 * a part of a vma. Both calls in this function are for full vmas, 1017 * the parameters are directly copied from the vma itself and always 1018 * valid - therefore do_munmap cannot fail. (famous last words?) 1019 */ 1020 /* 1021 * If it had been mremap()'d, the starting address would not 1022 * match the usual checks anyway. So assume all vma's are 1023 * above the starting address given. 1024 */ 1025 vma = find_vma(mm, addr); 1026 1027 #ifdef CONFIG_MMU 1028 while (vma) { 1029 next = vma->vm_next; 1030 1031 /* 1032 * Check if the starting address would match, i.e. it's 1033 * a fragment created by mprotect() and/or munmap(), or it 1034 * otherwise it starts at this address with no hassles. 1035 */ 1036 if ((vma->vm_ops == &shm_vm_ops) && 1037 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) { 1038 1039 1040 size = vma->vm_file->f_path.dentry->d_inode->i_size; 1041 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); 1042 /* 1043 * We discovered the size of the shm segment, so 1044 * break out of here and fall through to the next 1045 * loop that uses the size information to stop 1046 * searching for matching vma's. 1047 */ 1048 retval = 0; 1049 vma = next; 1050 break; 1051 } 1052 vma = next; 1053 } 1054 1055 /* 1056 * We need look no further than the maximum address a fragment 1057 * could possibly have landed at. Also cast things to loff_t to 1058 * prevent overflows and make comparisions vs. equal-width types. 1059 */ 1060 size = PAGE_ALIGN(size); 1061 while (vma && (loff_t)(vma->vm_end - addr) <= size) { 1062 next = vma->vm_next; 1063 1064 /* finding a matching vma now does not alter retval */ 1065 if ((vma->vm_ops == &shm_vm_ops) && 1066 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) 1067 1068 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); 1069 vma = next; 1070 } 1071 1072 #else /* CONFIG_MMU */ 1073 /* under NOMMU conditions, the exact address to be destroyed must be 1074 * given */ 1075 retval = -EINVAL; 1076 if (vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) { 1077 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); 1078 retval = 0; 1079 } 1080 1081 #endif 1082 1083 up_write(&mm->mmap_sem); 1084 return retval; 1085 } 1086 1087 #ifdef CONFIG_PROC_FS 1088 static int sysvipc_shm_proc_show(struct seq_file *s, void *it) 1089 { 1090 struct shmid_kernel *shp = it; 1091 unsigned long rss = 0, swp = 0; 1092 1093 shm_add_rss_swap(shp, &rss, &swp); 1094 1095 #if BITS_PER_LONG <= 32 1096 #define SIZE_SPEC "%10lu" 1097 #else 1098 #define SIZE_SPEC "%21lu" 1099 #endif 1100 1101 return seq_printf(s, 1102 "%10d %10d %4o " SIZE_SPEC " %5u %5u " 1103 "%5lu %5u %5u %5u %5u %10lu %10lu %10lu " 1104 SIZE_SPEC " " SIZE_SPEC "\n", 1105 shp->shm_perm.key, 1106 shp->shm_perm.id, 1107 shp->shm_perm.mode, 1108 shp->shm_segsz, 1109 shp->shm_cprid, 1110 shp->shm_lprid, 1111 shp->shm_nattch, 1112 shp->shm_perm.uid, 1113 shp->shm_perm.gid, 1114 shp->shm_perm.cuid, 1115 shp->shm_perm.cgid, 1116 shp->shm_atim, 1117 shp->shm_dtim, 1118 shp->shm_ctim, 1119 rss * PAGE_SIZE, 1120 swp * PAGE_SIZE); 1121 } 1122 #endif 1123