1 /* 2 * linux/ipc/shm.c 3 * Copyright (C) 1992, 1993 Krishna Balasubramanian 4 * Many improvements/fixes by Bruno Haible. 5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994. 6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli. 7 * 8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com> 9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de> 10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr> 11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com> 12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com> 13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com> 14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com> 15 * 16 * support for audit of ipc object properties and permission changes 17 * Dustin Kirkland <dustin.kirkland@us.ibm.com> 18 * 19 * namespaces support 20 * OpenVZ, SWsoft Inc. 21 * Pavel Emelianov <xemul@openvz.org> 22 */ 23 24 #include <linux/slab.h> 25 #include <linux/mm.h> 26 #include <linux/hugetlb.h> 27 #include <linux/shm.h> 28 #include <linux/init.h> 29 #include <linux/file.h> 30 #include <linux/mman.h> 31 #include <linux/shmem_fs.h> 32 #include <linux/security.h> 33 #include <linux/syscalls.h> 34 #include <linux/audit.h> 35 #include <linux/capability.h> 36 #include <linux/ptrace.h> 37 #include <linux/seq_file.h> 38 #include <linux/rwsem.h> 39 #include <linux/nsproxy.h> 40 #include <linux/mount.h> 41 #include <linux/ipc_namespace.h> 42 43 #include <asm/uaccess.h> 44 45 #include "util.h" 46 47 struct shm_file_data { 48 int id; 49 struct ipc_namespace *ns; 50 struct file *file; 51 const struct vm_operations_struct *vm_ops; 52 }; 53 54 #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data)) 55 56 static const struct file_operations shm_file_operations; 57 static const struct vm_operations_struct shm_vm_ops; 58 59 #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS]) 60 61 #define shm_unlock(shp) \ 62 ipc_unlock(&(shp)->shm_perm) 63 64 static int newseg(struct ipc_namespace *, struct ipc_params *); 65 static void shm_open(struct vm_area_struct *vma); 66 static void shm_close(struct vm_area_struct *vma); 67 static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp); 68 #ifdef CONFIG_PROC_FS 69 static int sysvipc_shm_proc_show(struct seq_file *s, void *it); 70 #endif 71 72 void shm_init_ns(struct ipc_namespace *ns) 73 { 74 ns->shm_ctlmax = SHMMAX; 75 ns->shm_ctlall = SHMALL; 76 ns->shm_ctlmni = SHMMNI; 77 ns->shm_tot = 0; 78 ipc_init_ids(&shm_ids(ns)); 79 } 80 81 /* 82 * Called with shm_ids.rw_mutex (writer) and the shp structure locked. 83 * Only shm_ids.rw_mutex remains locked on exit. 84 */ 85 static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) 86 { 87 struct shmid_kernel *shp; 88 shp = container_of(ipcp, struct shmid_kernel, shm_perm); 89 90 if (shp->shm_nattch){ 91 shp->shm_perm.mode |= SHM_DEST; 92 /* Do not find it any more */ 93 shp->shm_perm.key = IPC_PRIVATE; 94 shm_unlock(shp); 95 } else 96 shm_destroy(ns, shp); 97 } 98 99 #ifdef CONFIG_IPC_NS 100 void shm_exit_ns(struct ipc_namespace *ns) 101 { 102 free_ipcs(ns, &shm_ids(ns), do_shm_rmid); 103 idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr); 104 } 105 #endif 106 107 void __init shm_init (void) 108 { 109 shm_init_ns(&init_ipc_ns); 110 ipc_init_proc_interface("sysvipc/shm", 111 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n", 112 IPC_SHM_IDS, sysvipc_shm_proc_show); 113 } 114 115 /* 116 * shm_lock_(check_) routines are called in the paths where the rw_mutex 117 * is not necessarily held. 118 */ 119 static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id) 120 { 121 struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id); 122 123 if (IS_ERR(ipcp)) 124 return (struct shmid_kernel *)ipcp; 125 126 return container_of(ipcp, struct shmid_kernel, shm_perm); 127 } 128 129 static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns, 130 int id) 131 { 132 struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id); 133 134 if (IS_ERR(ipcp)) 135 return (struct shmid_kernel *)ipcp; 136 137 return container_of(ipcp, struct shmid_kernel, shm_perm); 138 } 139 140 static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) 141 { 142 ipc_rmid(&shm_ids(ns), &s->shm_perm); 143 } 144 145 146 /* This is called by fork, once for every shm attach. */ 147 static void shm_open(struct vm_area_struct *vma) 148 { 149 struct file *file = vma->vm_file; 150 struct shm_file_data *sfd = shm_file_data(file); 151 struct shmid_kernel *shp; 152 153 shp = shm_lock(sfd->ns, sfd->id); 154 BUG_ON(IS_ERR(shp)); 155 shp->shm_atim = get_seconds(); 156 shp->shm_lprid = task_tgid_vnr(current); 157 shp->shm_nattch++; 158 shm_unlock(shp); 159 } 160 161 /* 162 * shm_destroy - free the struct shmid_kernel 163 * 164 * @ns: namespace 165 * @shp: struct to free 166 * 167 * It has to be called with shp and shm_ids.rw_mutex (writer) locked, 168 * but returns with shp unlocked and freed. 169 */ 170 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) 171 { 172 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; 173 shm_rmid(ns, shp); 174 shm_unlock(shp); 175 if (!is_file_hugepages(shp->shm_file)) 176 shmem_lock(shp->shm_file, 0, shp->mlock_user); 177 else if (shp->mlock_user) 178 user_shm_unlock(shp->shm_file->f_path.dentry->d_inode->i_size, 179 shp->mlock_user); 180 fput (shp->shm_file); 181 security_shm_free(shp); 182 ipc_rcu_putref(shp); 183 } 184 185 /* 186 * remove the attach descriptor vma. 187 * free memory for segment if it is marked destroyed. 188 * The descriptor has already been removed from the current->mm->mmap list 189 * and will later be kfree()d. 190 */ 191 static void shm_close(struct vm_area_struct *vma) 192 { 193 struct file * file = vma->vm_file; 194 struct shm_file_data *sfd = shm_file_data(file); 195 struct shmid_kernel *shp; 196 struct ipc_namespace *ns = sfd->ns; 197 198 down_write(&shm_ids(ns).rw_mutex); 199 /* remove from the list of attaches of the shm segment */ 200 shp = shm_lock(ns, sfd->id); 201 BUG_ON(IS_ERR(shp)); 202 shp->shm_lprid = task_tgid_vnr(current); 203 shp->shm_dtim = get_seconds(); 204 shp->shm_nattch--; 205 if(shp->shm_nattch == 0 && 206 shp->shm_perm.mode & SHM_DEST) 207 shm_destroy(ns, shp); 208 else 209 shm_unlock(shp); 210 up_write(&shm_ids(ns).rw_mutex); 211 } 212 213 static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 214 { 215 struct file *file = vma->vm_file; 216 struct shm_file_data *sfd = shm_file_data(file); 217 218 return sfd->vm_ops->fault(vma, vmf); 219 } 220 221 #ifdef CONFIG_NUMA 222 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new) 223 { 224 struct file *file = vma->vm_file; 225 struct shm_file_data *sfd = shm_file_data(file); 226 int err = 0; 227 if (sfd->vm_ops->set_policy) 228 err = sfd->vm_ops->set_policy(vma, new); 229 return err; 230 } 231 232 static struct mempolicy *shm_get_policy(struct vm_area_struct *vma, 233 unsigned long addr) 234 { 235 struct file *file = vma->vm_file; 236 struct shm_file_data *sfd = shm_file_data(file); 237 struct mempolicy *pol = NULL; 238 239 if (sfd->vm_ops->get_policy) 240 pol = sfd->vm_ops->get_policy(vma, addr); 241 else if (vma->vm_policy) 242 pol = vma->vm_policy; 243 244 return pol; 245 } 246 #endif 247 248 static int shm_mmap(struct file * file, struct vm_area_struct * vma) 249 { 250 struct shm_file_data *sfd = shm_file_data(file); 251 int ret; 252 253 ret = sfd->file->f_op->mmap(sfd->file, vma); 254 if (ret != 0) 255 return ret; 256 sfd->vm_ops = vma->vm_ops; 257 #ifdef CONFIG_MMU 258 BUG_ON(!sfd->vm_ops->fault); 259 #endif 260 vma->vm_ops = &shm_vm_ops; 261 shm_open(vma); 262 263 return ret; 264 } 265 266 static int shm_release(struct inode *ino, struct file *file) 267 { 268 struct shm_file_data *sfd = shm_file_data(file); 269 270 put_ipc_ns(sfd->ns); 271 shm_file_data(file) = NULL; 272 kfree(sfd); 273 return 0; 274 } 275 276 static int shm_fsync(struct file *file, int datasync) 277 { 278 struct shm_file_data *sfd = shm_file_data(file); 279 280 if (!sfd->file->f_op->fsync) 281 return -EINVAL; 282 return sfd->file->f_op->fsync(sfd->file, datasync); 283 } 284 285 static unsigned long shm_get_unmapped_area(struct file *file, 286 unsigned long addr, unsigned long len, unsigned long pgoff, 287 unsigned long flags) 288 { 289 struct shm_file_data *sfd = shm_file_data(file); 290 return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len, 291 pgoff, flags); 292 } 293 294 static const struct file_operations shm_file_operations = { 295 .mmap = shm_mmap, 296 .fsync = shm_fsync, 297 .release = shm_release, 298 #ifndef CONFIG_MMU 299 .get_unmapped_area = shm_get_unmapped_area, 300 #endif 301 .llseek = noop_llseek, 302 }; 303 304 static const struct file_operations shm_file_operations_huge = { 305 .mmap = shm_mmap, 306 .fsync = shm_fsync, 307 .release = shm_release, 308 .get_unmapped_area = shm_get_unmapped_area, 309 .llseek = noop_llseek, 310 }; 311 312 int is_file_shm_hugepages(struct file *file) 313 { 314 return file->f_op == &shm_file_operations_huge; 315 } 316 317 static const struct vm_operations_struct shm_vm_ops = { 318 .open = shm_open, /* callback for a new vm-area open */ 319 .close = shm_close, /* callback for when the vm-area is released */ 320 .fault = shm_fault, 321 #if defined(CONFIG_NUMA) 322 .set_policy = shm_set_policy, 323 .get_policy = shm_get_policy, 324 #endif 325 }; 326 327 /** 328 * newseg - Create a new shared memory segment 329 * @ns: namespace 330 * @params: ptr to the structure that contains key, size and shmflg 331 * 332 * Called with shm_ids.rw_mutex held as a writer. 333 */ 334 335 static int newseg(struct ipc_namespace *ns, struct ipc_params *params) 336 { 337 key_t key = params->key; 338 int shmflg = params->flg; 339 size_t size = params->u.size; 340 int error; 341 struct shmid_kernel *shp; 342 int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT; 343 struct file * file; 344 char name[13]; 345 int id; 346 int acctflag = 0; 347 348 if (size < SHMMIN || size > ns->shm_ctlmax) 349 return -EINVAL; 350 351 if (ns->shm_tot + numpages > ns->shm_ctlall) 352 return -ENOSPC; 353 354 shp = ipc_rcu_alloc(sizeof(*shp)); 355 if (!shp) 356 return -ENOMEM; 357 358 shp->shm_perm.key = key; 359 shp->shm_perm.mode = (shmflg & S_IRWXUGO); 360 shp->mlock_user = NULL; 361 362 shp->shm_perm.security = NULL; 363 error = security_shm_alloc(shp); 364 if (error) { 365 ipc_rcu_putref(shp); 366 return error; 367 } 368 369 sprintf (name, "SYSV%08x", key); 370 if (shmflg & SHM_HUGETLB) { 371 /* hugetlb_file_setup applies strict accounting */ 372 if (shmflg & SHM_NORESERVE) 373 acctflag = VM_NORESERVE; 374 file = hugetlb_file_setup(name, size, acctflag, 375 &shp->mlock_user, HUGETLB_SHMFS_INODE); 376 } else { 377 /* 378 * Do not allow no accounting for OVERCOMMIT_NEVER, even 379 * if it's asked for. 380 */ 381 if ((shmflg & SHM_NORESERVE) && 382 sysctl_overcommit_memory != OVERCOMMIT_NEVER) 383 acctflag = VM_NORESERVE; 384 file = shmem_file_setup(name, size, acctflag); 385 } 386 error = PTR_ERR(file); 387 if (IS_ERR(file)) 388 goto no_file; 389 390 id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); 391 if (id < 0) { 392 error = id; 393 goto no_id; 394 } 395 396 shp->shm_cprid = task_tgid_vnr(current); 397 shp->shm_lprid = 0; 398 shp->shm_atim = shp->shm_dtim = 0; 399 shp->shm_ctim = get_seconds(); 400 shp->shm_segsz = size; 401 shp->shm_nattch = 0; 402 shp->shm_file = file; 403 /* 404 * shmid gets reported as "inode#" in /proc/pid/maps. 405 * proc-ps tools use this. Changing this will break them. 406 */ 407 file->f_dentry->d_inode->i_ino = shp->shm_perm.id; 408 409 ns->shm_tot += numpages; 410 error = shp->shm_perm.id; 411 shm_unlock(shp); 412 return error; 413 414 no_id: 415 if (is_file_hugepages(file) && shp->mlock_user) 416 user_shm_unlock(size, shp->mlock_user); 417 fput(file); 418 no_file: 419 security_shm_free(shp); 420 ipc_rcu_putref(shp); 421 return error; 422 } 423 424 /* 425 * Called with shm_ids.rw_mutex and ipcp locked. 426 */ 427 static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg) 428 { 429 struct shmid_kernel *shp; 430 431 shp = container_of(ipcp, struct shmid_kernel, shm_perm); 432 return security_shm_associate(shp, shmflg); 433 } 434 435 /* 436 * Called with shm_ids.rw_mutex and ipcp locked. 437 */ 438 static inline int shm_more_checks(struct kern_ipc_perm *ipcp, 439 struct ipc_params *params) 440 { 441 struct shmid_kernel *shp; 442 443 shp = container_of(ipcp, struct shmid_kernel, shm_perm); 444 if (shp->shm_segsz < params->u.size) 445 return -EINVAL; 446 447 return 0; 448 } 449 450 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg) 451 { 452 struct ipc_namespace *ns; 453 struct ipc_ops shm_ops; 454 struct ipc_params shm_params; 455 456 ns = current->nsproxy->ipc_ns; 457 458 shm_ops.getnew = newseg; 459 shm_ops.associate = shm_security; 460 shm_ops.more_checks = shm_more_checks; 461 462 shm_params.key = key; 463 shm_params.flg = shmflg; 464 shm_params.u.size = size; 465 466 return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params); 467 } 468 469 static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version) 470 { 471 switch(version) { 472 case IPC_64: 473 return copy_to_user(buf, in, sizeof(*in)); 474 case IPC_OLD: 475 { 476 struct shmid_ds out; 477 478 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm); 479 out.shm_segsz = in->shm_segsz; 480 out.shm_atime = in->shm_atime; 481 out.shm_dtime = in->shm_dtime; 482 out.shm_ctime = in->shm_ctime; 483 out.shm_cpid = in->shm_cpid; 484 out.shm_lpid = in->shm_lpid; 485 out.shm_nattch = in->shm_nattch; 486 487 return copy_to_user(buf, &out, sizeof(out)); 488 } 489 default: 490 return -EINVAL; 491 } 492 } 493 494 static inline unsigned long 495 copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version) 496 { 497 switch(version) { 498 case IPC_64: 499 if (copy_from_user(out, buf, sizeof(*out))) 500 return -EFAULT; 501 return 0; 502 case IPC_OLD: 503 { 504 struct shmid_ds tbuf_old; 505 506 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) 507 return -EFAULT; 508 509 out->shm_perm.uid = tbuf_old.shm_perm.uid; 510 out->shm_perm.gid = tbuf_old.shm_perm.gid; 511 out->shm_perm.mode = tbuf_old.shm_perm.mode; 512 513 return 0; 514 } 515 default: 516 return -EINVAL; 517 } 518 } 519 520 static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version) 521 { 522 switch(version) { 523 case IPC_64: 524 return copy_to_user(buf, in, sizeof(*in)); 525 case IPC_OLD: 526 { 527 struct shminfo out; 528 529 if(in->shmmax > INT_MAX) 530 out.shmmax = INT_MAX; 531 else 532 out.shmmax = (int)in->shmmax; 533 534 out.shmmin = in->shmmin; 535 out.shmmni = in->shmmni; 536 out.shmseg = in->shmseg; 537 out.shmall = in->shmall; 538 539 return copy_to_user(buf, &out, sizeof(out)); 540 } 541 default: 542 return -EINVAL; 543 } 544 } 545 546 /* 547 * Called with shm_ids.rw_mutex held as a reader 548 */ 549 static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss, 550 unsigned long *swp) 551 { 552 int next_id; 553 int total, in_use; 554 555 *rss = 0; 556 *swp = 0; 557 558 in_use = shm_ids(ns).in_use; 559 560 for (total = 0, next_id = 0; total < in_use; next_id++) { 561 struct kern_ipc_perm *ipc; 562 struct shmid_kernel *shp; 563 struct inode *inode; 564 565 ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id); 566 if (ipc == NULL) 567 continue; 568 shp = container_of(ipc, struct shmid_kernel, shm_perm); 569 570 inode = shp->shm_file->f_path.dentry->d_inode; 571 572 if (is_file_hugepages(shp->shm_file)) { 573 struct address_space *mapping = inode->i_mapping; 574 struct hstate *h = hstate_file(shp->shm_file); 575 *rss += pages_per_huge_page(h) * mapping->nrpages; 576 } else { 577 #ifdef CONFIG_SHMEM 578 struct shmem_inode_info *info = SHMEM_I(inode); 579 spin_lock(&info->lock); 580 *rss += inode->i_mapping->nrpages; 581 *swp += info->swapped; 582 spin_unlock(&info->lock); 583 #else 584 *rss += inode->i_mapping->nrpages; 585 #endif 586 } 587 588 total++; 589 } 590 } 591 592 /* 593 * This function handles some shmctl commands which require the rw_mutex 594 * to be held in write mode. 595 * NOTE: no locks must be held, the rw_mutex is taken inside this function. 596 */ 597 static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd, 598 struct shmid_ds __user *buf, int version) 599 { 600 struct kern_ipc_perm *ipcp; 601 struct shmid64_ds shmid64; 602 struct shmid_kernel *shp; 603 int err; 604 605 if (cmd == IPC_SET) { 606 if (copy_shmid_from_user(&shmid64, buf, version)) 607 return -EFAULT; 608 } 609 610 ipcp = ipcctl_pre_down(&shm_ids(ns), shmid, cmd, &shmid64.shm_perm, 0); 611 if (IS_ERR(ipcp)) 612 return PTR_ERR(ipcp); 613 614 shp = container_of(ipcp, struct shmid_kernel, shm_perm); 615 616 err = security_shm_shmctl(shp, cmd); 617 if (err) 618 goto out_unlock; 619 switch (cmd) { 620 case IPC_RMID: 621 do_shm_rmid(ns, ipcp); 622 goto out_up; 623 case IPC_SET: 624 ipc_update_perm(&shmid64.shm_perm, ipcp); 625 shp->shm_ctim = get_seconds(); 626 break; 627 default: 628 err = -EINVAL; 629 } 630 out_unlock: 631 shm_unlock(shp); 632 out_up: 633 up_write(&shm_ids(ns).rw_mutex); 634 return err; 635 } 636 637 SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) 638 { 639 struct shmid_kernel *shp; 640 int err, version; 641 struct ipc_namespace *ns; 642 643 if (cmd < 0 || shmid < 0) { 644 err = -EINVAL; 645 goto out; 646 } 647 648 version = ipc_parse_version(&cmd); 649 ns = current->nsproxy->ipc_ns; 650 651 switch (cmd) { /* replace with proc interface ? */ 652 case IPC_INFO: 653 { 654 struct shminfo64 shminfo; 655 656 err = security_shm_shmctl(NULL, cmd); 657 if (err) 658 return err; 659 660 memset(&shminfo, 0, sizeof(shminfo)); 661 shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni; 662 shminfo.shmmax = ns->shm_ctlmax; 663 shminfo.shmall = ns->shm_ctlall; 664 665 shminfo.shmmin = SHMMIN; 666 if(copy_shminfo_to_user (buf, &shminfo, version)) 667 return -EFAULT; 668 669 down_read(&shm_ids(ns).rw_mutex); 670 err = ipc_get_maxid(&shm_ids(ns)); 671 up_read(&shm_ids(ns).rw_mutex); 672 673 if(err<0) 674 err = 0; 675 goto out; 676 } 677 case SHM_INFO: 678 { 679 struct shm_info shm_info; 680 681 err = security_shm_shmctl(NULL, cmd); 682 if (err) 683 return err; 684 685 memset(&shm_info, 0, sizeof(shm_info)); 686 down_read(&shm_ids(ns).rw_mutex); 687 shm_info.used_ids = shm_ids(ns).in_use; 688 shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp); 689 shm_info.shm_tot = ns->shm_tot; 690 shm_info.swap_attempts = 0; 691 shm_info.swap_successes = 0; 692 err = ipc_get_maxid(&shm_ids(ns)); 693 up_read(&shm_ids(ns).rw_mutex); 694 if (copy_to_user(buf, &shm_info, sizeof(shm_info))) { 695 err = -EFAULT; 696 goto out; 697 } 698 699 err = err < 0 ? 0 : err; 700 goto out; 701 } 702 case SHM_STAT: 703 case IPC_STAT: 704 { 705 struct shmid64_ds tbuf; 706 int result; 707 708 if (cmd == SHM_STAT) { 709 shp = shm_lock(ns, shmid); 710 if (IS_ERR(shp)) { 711 err = PTR_ERR(shp); 712 goto out; 713 } 714 result = shp->shm_perm.id; 715 } else { 716 shp = shm_lock_check(ns, shmid); 717 if (IS_ERR(shp)) { 718 err = PTR_ERR(shp); 719 goto out; 720 } 721 result = 0; 722 } 723 err = -EACCES; 724 if (ipcperms (&shp->shm_perm, S_IRUGO)) 725 goto out_unlock; 726 err = security_shm_shmctl(shp, cmd); 727 if (err) 728 goto out_unlock; 729 memset(&tbuf, 0, sizeof(tbuf)); 730 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm); 731 tbuf.shm_segsz = shp->shm_segsz; 732 tbuf.shm_atime = shp->shm_atim; 733 tbuf.shm_dtime = shp->shm_dtim; 734 tbuf.shm_ctime = shp->shm_ctim; 735 tbuf.shm_cpid = shp->shm_cprid; 736 tbuf.shm_lpid = shp->shm_lprid; 737 tbuf.shm_nattch = shp->shm_nattch; 738 shm_unlock(shp); 739 if(copy_shmid_to_user (buf, &tbuf, version)) 740 err = -EFAULT; 741 else 742 err = result; 743 goto out; 744 } 745 case SHM_LOCK: 746 case SHM_UNLOCK: 747 { 748 struct file *uninitialized_var(shm_file); 749 750 lru_add_drain_all(); /* drain pagevecs to lru lists */ 751 752 shp = shm_lock_check(ns, shmid); 753 if (IS_ERR(shp)) { 754 err = PTR_ERR(shp); 755 goto out; 756 } 757 758 audit_ipc_obj(&(shp->shm_perm)); 759 760 if (!capable(CAP_IPC_LOCK)) { 761 uid_t euid = current_euid(); 762 err = -EPERM; 763 if (euid != shp->shm_perm.uid && 764 euid != shp->shm_perm.cuid) 765 goto out_unlock; 766 if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) 767 goto out_unlock; 768 } 769 770 err = security_shm_shmctl(shp, cmd); 771 if (err) 772 goto out_unlock; 773 774 if(cmd==SHM_LOCK) { 775 struct user_struct *user = current_user(); 776 if (!is_file_hugepages(shp->shm_file)) { 777 err = shmem_lock(shp->shm_file, 1, user); 778 if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){ 779 shp->shm_perm.mode |= SHM_LOCKED; 780 shp->mlock_user = user; 781 } 782 } 783 } else if (!is_file_hugepages(shp->shm_file)) { 784 shmem_lock(shp->shm_file, 0, shp->mlock_user); 785 shp->shm_perm.mode &= ~SHM_LOCKED; 786 shp->mlock_user = NULL; 787 } 788 shm_unlock(shp); 789 goto out; 790 } 791 case IPC_RMID: 792 case IPC_SET: 793 err = shmctl_down(ns, shmid, cmd, buf, version); 794 return err; 795 default: 796 return -EINVAL; 797 } 798 799 out_unlock: 800 shm_unlock(shp); 801 out: 802 return err; 803 } 804 805 /* 806 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists. 807 * 808 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The 809 * "raddr" thing points to kernel space, and there has to be a wrapper around 810 * this. 811 */ 812 long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr) 813 { 814 struct shmid_kernel *shp; 815 unsigned long addr; 816 unsigned long size; 817 struct file * file; 818 int err; 819 unsigned long flags; 820 unsigned long prot; 821 int acc_mode; 822 unsigned long user_addr; 823 struct ipc_namespace *ns; 824 struct shm_file_data *sfd; 825 struct path path; 826 fmode_t f_mode; 827 828 err = -EINVAL; 829 if (shmid < 0) 830 goto out; 831 else if ((addr = (ulong)shmaddr)) { 832 if (addr & (SHMLBA-1)) { 833 if (shmflg & SHM_RND) 834 addr &= ~(SHMLBA-1); /* round down */ 835 else 836 #ifndef __ARCH_FORCE_SHMLBA 837 if (addr & ~PAGE_MASK) 838 #endif 839 goto out; 840 } 841 flags = MAP_SHARED | MAP_FIXED; 842 } else { 843 if ((shmflg & SHM_REMAP)) 844 goto out; 845 846 flags = MAP_SHARED; 847 } 848 849 if (shmflg & SHM_RDONLY) { 850 prot = PROT_READ; 851 acc_mode = S_IRUGO; 852 f_mode = FMODE_READ; 853 } else { 854 prot = PROT_READ | PROT_WRITE; 855 acc_mode = S_IRUGO | S_IWUGO; 856 f_mode = FMODE_READ | FMODE_WRITE; 857 } 858 if (shmflg & SHM_EXEC) { 859 prot |= PROT_EXEC; 860 acc_mode |= S_IXUGO; 861 } 862 863 /* 864 * We cannot rely on the fs check since SYSV IPC does have an 865 * additional creator id... 866 */ 867 ns = current->nsproxy->ipc_ns; 868 shp = shm_lock_check(ns, shmid); 869 if (IS_ERR(shp)) { 870 err = PTR_ERR(shp); 871 goto out; 872 } 873 874 err = -EACCES; 875 if (ipcperms(&shp->shm_perm, acc_mode)) 876 goto out_unlock; 877 878 err = security_shm_shmat(shp, shmaddr, shmflg); 879 if (err) 880 goto out_unlock; 881 882 path = shp->shm_file->f_path; 883 path_get(&path); 884 shp->shm_nattch++; 885 size = i_size_read(path.dentry->d_inode); 886 shm_unlock(shp); 887 888 err = -ENOMEM; 889 sfd = kzalloc(sizeof(*sfd), GFP_KERNEL); 890 if (!sfd) 891 goto out_put_dentry; 892 893 file = alloc_file(&path, f_mode, 894 is_file_hugepages(shp->shm_file) ? 895 &shm_file_operations_huge : 896 &shm_file_operations); 897 if (!file) 898 goto out_free; 899 900 file->private_data = sfd; 901 file->f_mapping = shp->shm_file->f_mapping; 902 sfd->id = shp->shm_perm.id; 903 sfd->ns = get_ipc_ns(ns); 904 sfd->file = shp->shm_file; 905 sfd->vm_ops = NULL; 906 907 down_write(¤t->mm->mmap_sem); 908 if (addr && !(shmflg & SHM_REMAP)) { 909 err = -EINVAL; 910 if (find_vma_intersection(current->mm, addr, addr + size)) 911 goto invalid; 912 /* 913 * If shm segment goes below stack, make sure there is some 914 * space left for the stack to grow (at least 4 pages). 915 */ 916 if (addr < current->mm->start_stack && 917 addr > current->mm->start_stack - size - PAGE_SIZE * 5) 918 goto invalid; 919 } 920 921 user_addr = do_mmap (file, addr, size, prot, flags, 0); 922 *raddr = user_addr; 923 err = 0; 924 if (IS_ERR_VALUE(user_addr)) 925 err = (long)user_addr; 926 invalid: 927 up_write(¤t->mm->mmap_sem); 928 929 fput(file); 930 931 out_nattch: 932 down_write(&shm_ids(ns).rw_mutex); 933 shp = shm_lock(ns, shmid); 934 BUG_ON(IS_ERR(shp)); 935 shp->shm_nattch--; 936 if(shp->shm_nattch == 0 && 937 shp->shm_perm.mode & SHM_DEST) 938 shm_destroy(ns, shp); 939 else 940 shm_unlock(shp); 941 up_write(&shm_ids(ns).rw_mutex); 942 943 out: 944 return err; 945 946 out_unlock: 947 shm_unlock(shp); 948 goto out; 949 950 out_free: 951 kfree(sfd); 952 out_put_dentry: 953 path_put(&path); 954 goto out_nattch; 955 } 956 957 SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg) 958 { 959 unsigned long ret; 960 long err; 961 962 err = do_shmat(shmid, shmaddr, shmflg, &ret); 963 if (err) 964 return err; 965 force_successful_syscall_return(); 966 return (long)ret; 967 } 968 969 /* 970 * detach and kill segment if marked destroyed. 971 * The work is done in shm_close. 972 */ 973 SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) 974 { 975 struct mm_struct *mm = current->mm; 976 struct vm_area_struct *vma; 977 unsigned long addr = (unsigned long)shmaddr; 978 int retval = -EINVAL; 979 #ifdef CONFIG_MMU 980 loff_t size = 0; 981 struct vm_area_struct *next; 982 #endif 983 984 if (addr & ~PAGE_MASK) 985 return retval; 986 987 down_write(&mm->mmap_sem); 988 989 /* 990 * This function tries to be smart and unmap shm segments that 991 * were modified by partial mlock or munmap calls: 992 * - It first determines the size of the shm segment that should be 993 * unmapped: It searches for a vma that is backed by shm and that 994 * started at address shmaddr. It records it's size and then unmaps 995 * it. 996 * - Then it unmaps all shm vmas that started at shmaddr and that 997 * are within the initially determined size. 998 * Errors from do_munmap are ignored: the function only fails if 999 * it's called with invalid parameters or if it's called to unmap 1000 * a part of a vma. Both calls in this function are for full vmas, 1001 * the parameters are directly copied from the vma itself and always 1002 * valid - therefore do_munmap cannot fail. (famous last words?) 1003 */ 1004 /* 1005 * If it had been mremap()'d, the starting address would not 1006 * match the usual checks anyway. So assume all vma's are 1007 * above the starting address given. 1008 */ 1009 vma = find_vma(mm, addr); 1010 1011 #ifdef CONFIG_MMU 1012 while (vma) { 1013 next = vma->vm_next; 1014 1015 /* 1016 * Check if the starting address would match, i.e. it's 1017 * a fragment created by mprotect() and/or munmap(), or it 1018 * otherwise it starts at this address with no hassles. 1019 */ 1020 if ((vma->vm_ops == &shm_vm_ops) && 1021 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) { 1022 1023 1024 size = vma->vm_file->f_path.dentry->d_inode->i_size; 1025 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); 1026 /* 1027 * We discovered the size of the shm segment, so 1028 * break out of here and fall through to the next 1029 * loop that uses the size information to stop 1030 * searching for matching vma's. 1031 */ 1032 retval = 0; 1033 vma = next; 1034 break; 1035 } 1036 vma = next; 1037 } 1038 1039 /* 1040 * We need look no further than the maximum address a fragment 1041 * could possibly have landed at. Also cast things to loff_t to 1042 * prevent overflows and make comparisions vs. equal-width types. 1043 */ 1044 size = PAGE_ALIGN(size); 1045 while (vma && (loff_t)(vma->vm_end - addr) <= size) { 1046 next = vma->vm_next; 1047 1048 /* finding a matching vma now does not alter retval */ 1049 if ((vma->vm_ops == &shm_vm_ops) && 1050 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) 1051 1052 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); 1053 vma = next; 1054 } 1055 1056 #else /* CONFIG_MMU */ 1057 /* under NOMMU conditions, the exact address to be destroyed must be 1058 * given */ 1059 retval = -EINVAL; 1060 if (vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) { 1061 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); 1062 retval = 0; 1063 } 1064 1065 #endif 1066 1067 up_write(&mm->mmap_sem); 1068 return retval; 1069 } 1070 1071 #ifdef CONFIG_PROC_FS 1072 static int sysvipc_shm_proc_show(struct seq_file *s, void *it) 1073 { 1074 struct shmid_kernel *shp = it; 1075 1076 #if BITS_PER_LONG <= 32 1077 #define SIZE_SPEC "%10lu" 1078 #else 1079 #define SIZE_SPEC "%21lu" 1080 #endif 1081 1082 return seq_printf(s, 1083 "%10d %10d %4o " SIZE_SPEC " %5u %5u " 1084 "%5lu %5u %5u %5u %5u %10lu %10lu %10lu\n", 1085 shp->shm_perm.key, 1086 shp->shm_perm.id, 1087 shp->shm_perm.mode, 1088 shp->shm_segsz, 1089 shp->shm_cprid, 1090 shp->shm_lprid, 1091 shp->shm_nattch, 1092 shp->shm_perm.uid, 1093 shp->shm_perm.gid, 1094 shp->shm_perm.cuid, 1095 shp->shm_perm.cgid, 1096 shp->shm_atim, 1097 shp->shm_dtim, 1098 shp->shm_ctim); 1099 } 1100 #endif 1101