1 /* 2 * linux/ipc/util.c 3 * Copyright (C) 1992 Krishna Balasubramanian 4 * 5 * Sep 1997 - Call suser() last after "normal" permission checks so we 6 * get BSD style process accounting right. 7 * Occurs in several places in the IPC code. 8 * Chris Evans, <chris@ferret.lmh.ox.ac.uk> 9 * Nov 1999 - ipc helper functions, unified SMP locking 10 * Manfred Spraul <manfred@colorfullife.com> 11 * Oct 2002 - One lock per IPC id. RCU ipc_free for lock-free grow_ary(). 12 * Mingming Cao <cmm@us.ibm.com> 13 * Mar 2006 - support for audit of ipc object properties 14 * Dustin Kirkland <dustin.kirkland@us.ibm.com> 15 * Jun 2006 - namespaces ssupport 16 * OpenVZ, SWsoft Inc. 17 * Pavel Emelianov <xemul@openvz.org> 18 */ 19 20 #include <linux/mm.h> 21 #include <linux/shm.h> 22 #include <linux/init.h> 23 #include <linux/msg.h> 24 #include <linux/vmalloc.h> 25 #include <linux/slab.h> 26 #include <linux/capability.h> 27 #include <linux/highuid.h> 28 #include <linux/security.h> 29 #include <linux/rcupdate.h> 30 #include <linux/workqueue.h> 31 #include <linux/seq_file.h> 32 #include <linux/proc_fs.h> 33 #include <linux/audit.h> 34 #include <linux/nsproxy.h> 35 36 #include <asm/unistd.h> 37 38 #include "util.h" 39 40 struct ipc_proc_iface { 41 const char *path; 42 const char *header; 43 int ids; 44 int (*show)(struct seq_file *, void *); 45 }; 46 47 struct ipc_namespace init_ipc_ns = { 48 .kref = { 49 .refcount = ATOMIC_INIT(2), 50 }, 51 }; 52 53 #ifdef CONFIG_IPC_NS 54 static struct ipc_namespace *clone_ipc_ns(struct ipc_namespace *old_ns) 55 { 56 int err; 57 struct ipc_namespace *ns; 58 59 err = -ENOMEM; 60 ns = kmalloc(sizeof(struct ipc_namespace), GFP_KERNEL); 61 if (ns == NULL) 62 goto err_mem; 63 64 err = sem_init_ns(ns); 65 if (err) 66 goto err_sem; 67 err = msg_init_ns(ns); 68 if (err) 69 goto err_msg; 70 err = shm_init_ns(ns); 71 if (err) 72 goto err_shm; 73 74 kref_init(&ns->kref); 75 return ns; 76 77 err_shm: 78 msg_exit_ns(ns); 79 err_msg: 80 sem_exit_ns(ns); 81 err_sem: 82 kfree(ns); 83 err_mem: 84 return ERR_PTR(err); 85 } 86 87 struct ipc_namespace *copy_ipcs(unsigned long flags, struct ipc_namespace *ns) 88 { 89 struct ipc_namespace *new_ns; 90 91 BUG_ON(!ns); 92 get_ipc_ns(ns); 93 94 if (!(flags & CLONE_NEWIPC)) 95 return ns; 96 97 new_ns = clone_ipc_ns(ns); 98 99 put_ipc_ns(ns); 100 return new_ns; 101 } 102 103 void free_ipc_ns(struct kref *kref) 104 { 105 struct ipc_namespace *ns; 106 107 ns = container_of(kref, struct ipc_namespace, kref); 108 sem_exit_ns(ns); 109 msg_exit_ns(ns); 110 shm_exit_ns(ns); 111 kfree(ns); 112 } 113 #else 114 struct ipc_namespace *copy_ipcs(unsigned long flags, struct ipc_namespace *ns) 115 { 116 if (flags & CLONE_NEWIPC) 117 return ERR_PTR(-EINVAL); 118 return ns; 119 } 120 #endif 121 122 /** 123 * ipc_init - initialise IPC subsystem 124 * 125 * The various system5 IPC resources (semaphores, messages and shared 126 * memory) are initialised 127 */ 128 129 static int __init ipc_init(void) 130 { 131 sem_init(); 132 msg_init(); 133 shm_init(); 134 return 0; 135 } 136 __initcall(ipc_init); 137 138 /** 139 * ipc_init_ids - initialise IPC identifiers 140 * @ids: Identifier set 141 * @size: Number of identifiers 142 * 143 * Given a size for the ipc identifier range (limited below IPCMNI) 144 * set up the sequence range to use then allocate and initialise the 145 * array itself. 146 */ 147 148 void __ipc_init ipc_init_ids(struct ipc_ids* ids, int size) 149 { 150 int i; 151 152 mutex_init(&ids->mutex); 153 154 if(size > IPCMNI) 155 size = IPCMNI; 156 ids->in_use = 0; 157 ids->max_id = -1; 158 ids->seq = 0; 159 { 160 int seq_limit = INT_MAX/SEQ_MULTIPLIER; 161 if(seq_limit > USHRT_MAX) 162 ids->seq_max = USHRT_MAX; 163 else 164 ids->seq_max = seq_limit; 165 } 166 167 ids->entries = ipc_rcu_alloc(sizeof(struct kern_ipc_perm *)*size + 168 sizeof(struct ipc_id_ary)); 169 170 if(ids->entries == NULL) { 171 printk(KERN_ERR "ipc_init_ids() failed, ipc service disabled.\n"); 172 size = 0; 173 ids->entries = &ids->nullentry; 174 } 175 ids->entries->size = size; 176 for(i=0;i<size;i++) 177 ids->entries->p[i] = NULL; 178 } 179 180 #ifdef CONFIG_PROC_FS 181 static const struct file_operations sysvipc_proc_fops; 182 /** 183 * ipc_init_proc_interface - Create a proc interface for sysipc types using a seq_file interface. 184 * @path: Path in procfs 185 * @header: Banner to be printed at the beginning of the file. 186 * @ids: ipc id table to iterate. 187 * @show: show routine. 188 */ 189 void __init ipc_init_proc_interface(const char *path, const char *header, 190 int ids, int (*show)(struct seq_file *, void *)) 191 { 192 struct proc_dir_entry *pde; 193 struct ipc_proc_iface *iface; 194 195 iface = kmalloc(sizeof(*iface), GFP_KERNEL); 196 if (!iface) 197 return; 198 iface->path = path; 199 iface->header = header; 200 iface->ids = ids; 201 iface->show = show; 202 203 pde = create_proc_entry(path, 204 S_IRUGO, /* world readable */ 205 NULL /* parent dir */); 206 if (pde) { 207 pde->data = iface; 208 pde->proc_fops = &sysvipc_proc_fops; 209 } else { 210 kfree(iface); 211 } 212 } 213 #endif 214 215 /** 216 * ipc_findkey - find a key in an ipc identifier set 217 * @ids: Identifier set 218 * @key: The key to find 219 * 220 * Requires ipc_ids.mutex locked. 221 * Returns the identifier if found or -1 if not. 222 */ 223 224 int ipc_findkey(struct ipc_ids* ids, key_t key) 225 { 226 int id; 227 struct kern_ipc_perm* p; 228 int max_id = ids->max_id; 229 230 /* 231 * rcu_dereference() is not needed here 232 * since ipc_ids.mutex is held 233 */ 234 for (id = 0; id <= max_id; id++) { 235 p = ids->entries->p[id]; 236 if(p==NULL) 237 continue; 238 if (key == p->key) 239 return id; 240 } 241 return -1; 242 } 243 244 /* 245 * Requires ipc_ids.mutex locked 246 */ 247 static int grow_ary(struct ipc_ids* ids, int newsize) 248 { 249 struct ipc_id_ary* new; 250 struct ipc_id_ary* old; 251 int i; 252 int size = ids->entries->size; 253 254 if(newsize > IPCMNI) 255 newsize = IPCMNI; 256 if(newsize <= size) 257 return newsize; 258 259 new = ipc_rcu_alloc(sizeof(struct kern_ipc_perm *)*newsize + 260 sizeof(struct ipc_id_ary)); 261 if(new == NULL) 262 return size; 263 new->size = newsize; 264 memcpy(new->p, ids->entries->p, sizeof(struct kern_ipc_perm *)*size); 265 for(i=size;i<newsize;i++) { 266 new->p[i] = NULL; 267 } 268 old = ids->entries; 269 270 /* 271 * Use rcu_assign_pointer() to make sure the memcpyed contents 272 * of the new array are visible before the new array becomes visible. 273 */ 274 rcu_assign_pointer(ids->entries, new); 275 276 __ipc_fini_ids(ids, old); 277 return newsize; 278 } 279 280 /** 281 * ipc_addid - add an IPC identifier 282 * @ids: IPC identifier set 283 * @new: new IPC permission set 284 * @size: new size limit for the id array 285 * 286 * Add an entry 'new' to the IPC arrays. The permissions object is 287 * initialised and the first free entry is set up and the id assigned 288 * is returned. The list is returned in a locked state on success. 289 * On failure the list is not locked and -1 is returned. 290 * 291 * Called with ipc_ids.mutex held. 292 */ 293 294 int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) 295 { 296 int id; 297 298 size = grow_ary(ids,size); 299 300 /* 301 * rcu_dereference()() is not needed here since 302 * ipc_ids.mutex is held 303 */ 304 for (id = 0; id < size; id++) { 305 if(ids->entries->p[id] == NULL) 306 goto found; 307 } 308 return -1; 309 found: 310 ids->in_use++; 311 if (id > ids->max_id) 312 ids->max_id = id; 313 314 new->cuid = new->uid = current->euid; 315 new->gid = new->cgid = current->egid; 316 317 new->seq = ids->seq++; 318 if(ids->seq > ids->seq_max) 319 ids->seq = 0; 320 321 spin_lock_init(&new->lock); 322 new->deleted = 0; 323 rcu_read_lock(); 324 spin_lock(&new->lock); 325 ids->entries->p[id] = new; 326 return id; 327 } 328 329 /** 330 * ipc_rmid - remove an IPC identifier 331 * @ids: identifier set 332 * @id: Identifier to remove 333 * 334 * The identifier must be valid, and in use. The kernel will panic if 335 * fed an invalid identifier. The entry is removed and internal 336 * variables recomputed. The object associated with the identifier 337 * is returned. 338 * ipc_ids.mutex and the spinlock for this ID is hold before this function 339 * is called, and remain locked on the exit. 340 */ 341 342 struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id) 343 { 344 struct kern_ipc_perm* p; 345 int lid = id % SEQ_MULTIPLIER; 346 BUG_ON(lid >= ids->entries->size); 347 348 /* 349 * do not need a rcu_dereference()() here to force ordering 350 * on Alpha, since the ipc_ids.mutex is held. 351 */ 352 p = ids->entries->p[lid]; 353 ids->entries->p[lid] = NULL; 354 BUG_ON(p==NULL); 355 ids->in_use--; 356 357 if (lid == ids->max_id) { 358 do { 359 lid--; 360 if(lid == -1) 361 break; 362 } while (ids->entries->p[lid] == NULL); 363 ids->max_id = lid; 364 } 365 p->deleted = 1; 366 return p; 367 } 368 369 /** 370 * ipc_alloc - allocate ipc space 371 * @size: size desired 372 * 373 * Allocate memory from the appropriate pools and return a pointer to it. 374 * NULL is returned if the allocation fails 375 */ 376 377 void* ipc_alloc(int size) 378 { 379 void* out; 380 if(size > PAGE_SIZE) 381 out = vmalloc(size); 382 else 383 out = kmalloc(size, GFP_KERNEL); 384 return out; 385 } 386 387 /** 388 * ipc_free - free ipc space 389 * @ptr: pointer returned by ipc_alloc 390 * @size: size of block 391 * 392 * Free a block created with ipc_alloc(). The caller must know the size 393 * used in the allocation call. 394 */ 395 396 void ipc_free(void* ptr, int size) 397 { 398 if(size > PAGE_SIZE) 399 vfree(ptr); 400 else 401 kfree(ptr); 402 } 403 404 /* 405 * rcu allocations: 406 * There are three headers that are prepended to the actual allocation: 407 * - during use: ipc_rcu_hdr. 408 * - during the rcu grace period: ipc_rcu_grace. 409 * - [only if vmalloc]: ipc_rcu_sched. 410 * Their lifetime doesn't overlap, thus the headers share the same memory. 411 * Unlike a normal union, they are right-aligned, thus some container_of 412 * forward/backward casting is necessary: 413 */ 414 struct ipc_rcu_hdr 415 { 416 int refcount; 417 int is_vmalloc; 418 void *data[0]; 419 }; 420 421 422 struct ipc_rcu_grace 423 { 424 struct rcu_head rcu; 425 /* "void *" makes sure alignment of following data is sane. */ 426 void *data[0]; 427 }; 428 429 struct ipc_rcu_sched 430 { 431 struct work_struct work; 432 /* "void *" makes sure alignment of following data is sane. */ 433 void *data[0]; 434 }; 435 436 #define HDRLEN_KMALLOC (sizeof(struct ipc_rcu_grace) > sizeof(struct ipc_rcu_hdr) ? \ 437 sizeof(struct ipc_rcu_grace) : sizeof(struct ipc_rcu_hdr)) 438 #define HDRLEN_VMALLOC (sizeof(struct ipc_rcu_sched) > HDRLEN_KMALLOC ? \ 439 sizeof(struct ipc_rcu_sched) : HDRLEN_KMALLOC) 440 441 static inline int rcu_use_vmalloc(int size) 442 { 443 /* Too big for a single page? */ 444 if (HDRLEN_KMALLOC + size > PAGE_SIZE) 445 return 1; 446 return 0; 447 } 448 449 /** 450 * ipc_rcu_alloc - allocate ipc and rcu space 451 * @size: size desired 452 * 453 * Allocate memory for the rcu header structure + the object. 454 * Returns the pointer to the object. 455 * NULL is returned if the allocation fails. 456 */ 457 458 void* ipc_rcu_alloc(int size) 459 { 460 void* out; 461 /* 462 * We prepend the allocation with the rcu struct, and 463 * workqueue if necessary (for vmalloc). 464 */ 465 if (rcu_use_vmalloc(size)) { 466 out = vmalloc(HDRLEN_VMALLOC + size); 467 if (out) { 468 out += HDRLEN_VMALLOC; 469 container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1; 470 container_of(out, struct ipc_rcu_hdr, data)->refcount = 1; 471 } 472 } else { 473 out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL); 474 if (out) { 475 out += HDRLEN_KMALLOC; 476 container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0; 477 container_of(out, struct ipc_rcu_hdr, data)->refcount = 1; 478 } 479 } 480 481 return out; 482 } 483 484 void ipc_rcu_getref(void *ptr) 485 { 486 container_of(ptr, struct ipc_rcu_hdr, data)->refcount++; 487 } 488 489 static void ipc_do_vfree(struct work_struct *work) 490 { 491 vfree(container_of(work, struct ipc_rcu_sched, work)); 492 } 493 494 /** 495 * ipc_schedule_free - free ipc + rcu space 496 * @head: RCU callback structure for queued work 497 * 498 * Since RCU callback function is called in bh, 499 * we need to defer the vfree to schedule_work(). 500 */ 501 static void ipc_schedule_free(struct rcu_head *head) 502 { 503 struct ipc_rcu_grace *grace = 504 container_of(head, struct ipc_rcu_grace, rcu); 505 struct ipc_rcu_sched *sched = 506 container_of(&(grace->data[0]), struct ipc_rcu_sched, data[0]); 507 508 INIT_WORK(&sched->work, ipc_do_vfree); 509 schedule_work(&sched->work); 510 } 511 512 /** 513 * ipc_immediate_free - free ipc + rcu space 514 * @head: RCU callback structure that contains pointer to be freed 515 * 516 * Free from the RCU callback context. 517 */ 518 static void ipc_immediate_free(struct rcu_head *head) 519 { 520 struct ipc_rcu_grace *free = 521 container_of(head, struct ipc_rcu_grace, rcu); 522 kfree(free); 523 } 524 525 void ipc_rcu_putref(void *ptr) 526 { 527 if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0) 528 return; 529 530 if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) { 531 call_rcu(&container_of(ptr, struct ipc_rcu_grace, data)->rcu, 532 ipc_schedule_free); 533 } else { 534 call_rcu(&container_of(ptr, struct ipc_rcu_grace, data)->rcu, 535 ipc_immediate_free); 536 } 537 } 538 539 /** 540 * ipcperms - check IPC permissions 541 * @ipcp: IPC permission set 542 * @flag: desired permission set. 543 * 544 * Check user, group, other permissions for access 545 * to ipc resources. return 0 if allowed 546 */ 547 548 int ipcperms (struct kern_ipc_perm *ipcp, short flag) 549 { /* flag will most probably be 0 or S_...UGO from <linux/stat.h> */ 550 int requested_mode, granted_mode, err; 551 552 if (unlikely((err = audit_ipc_obj(ipcp)))) 553 return err; 554 requested_mode = (flag >> 6) | (flag >> 3) | flag; 555 granted_mode = ipcp->mode; 556 if (current->euid == ipcp->cuid || current->euid == ipcp->uid) 557 granted_mode >>= 6; 558 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid)) 559 granted_mode >>= 3; 560 /* is there some bit set in requested_mode but not in granted_mode? */ 561 if ((requested_mode & ~granted_mode & 0007) && 562 !capable(CAP_IPC_OWNER)) 563 return -1; 564 565 return security_ipc_permission(ipcp, flag); 566 } 567 568 /* 569 * Functions to convert between the kern_ipc_perm structure and the 570 * old/new ipc_perm structures 571 */ 572 573 /** 574 * kernel_to_ipc64_perm - convert kernel ipc permissions to user 575 * @in: kernel permissions 576 * @out: new style IPC permissions 577 * 578 * Turn the kernel object @in into a set of permissions descriptions 579 * for returning to userspace (@out). 580 */ 581 582 583 void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out) 584 { 585 out->key = in->key; 586 out->uid = in->uid; 587 out->gid = in->gid; 588 out->cuid = in->cuid; 589 out->cgid = in->cgid; 590 out->mode = in->mode; 591 out->seq = in->seq; 592 } 593 594 /** 595 * ipc64_perm_to_ipc_perm - convert old ipc permissions to new 596 * @in: new style IPC permissions 597 * @out: old style IPC permissions 598 * 599 * Turn the new style permissions object @in into a compatibility 600 * object and store it into the @out pointer. 601 */ 602 603 void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out) 604 { 605 out->key = in->key; 606 SET_UID(out->uid, in->uid); 607 SET_GID(out->gid, in->gid); 608 SET_UID(out->cuid, in->cuid); 609 SET_GID(out->cgid, in->cgid); 610 out->mode = in->mode; 611 out->seq = in->seq; 612 } 613 614 /* 615 * So far only shm_get_stat() calls ipc_get() via shm_get(), so ipc_get() 616 * is called with shm_ids.mutex locked. Since grow_ary() is also called with 617 * shm_ids.mutex down(for Shared Memory), there is no need to add read 618 * barriers here to gurantee the writes in grow_ary() are seen in order 619 * here (for Alpha). 620 * 621 * However ipc_get() itself does not necessary require ipc_ids.mutex down. So 622 * if in the future ipc_get() is used by other places without ipc_ids.mutex 623 * down, then ipc_get() needs read memery barriers as ipc_lock() does. 624 */ 625 struct kern_ipc_perm* ipc_get(struct ipc_ids* ids, int id) 626 { 627 struct kern_ipc_perm* out; 628 int lid = id % SEQ_MULTIPLIER; 629 if(lid >= ids->entries->size) 630 return NULL; 631 out = ids->entries->p[lid]; 632 return out; 633 } 634 635 struct kern_ipc_perm* ipc_lock(struct ipc_ids* ids, int id) 636 { 637 struct kern_ipc_perm* out; 638 int lid = id % SEQ_MULTIPLIER; 639 struct ipc_id_ary* entries; 640 641 rcu_read_lock(); 642 entries = rcu_dereference(ids->entries); 643 if(lid >= entries->size) { 644 rcu_read_unlock(); 645 return NULL; 646 } 647 out = entries->p[lid]; 648 if(out == NULL) { 649 rcu_read_unlock(); 650 return NULL; 651 } 652 spin_lock(&out->lock); 653 654 /* ipc_rmid() may have already freed the ID while ipc_lock 655 * was spinning: here verify that the structure is still valid 656 */ 657 if (out->deleted) { 658 spin_unlock(&out->lock); 659 rcu_read_unlock(); 660 return NULL; 661 } 662 return out; 663 } 664 665 void ipc_lock_by_ptr(struct kern_ipc_perm *perm) 666 { 667 rcu_read_lock(); 668 spin_lock(&perm->lock); 669 } 670 671 void ipc_unlock(struct kern_ipc_perm* perm) 672 { 673 spin_unlock(&perm->lock); 674 rcu_read_unlock(); 675 } 676 677 int ipc_buildid(struct ipc_ids* ids, int id, int seq) 678 { 679 return SEQ_MULTIPLIER*seq + id; 680 } 681 682 int ipc_checkid(struct ipc_ids* ids, struct kern_ipc_perm* ipcp, int uid) 683 { 684 if(uid/SEQ_MULTIPLIER != ipcp->seq) 685 return 1; 686 return 0; 687 } 688 689 #ifdef __ARCH_WANT_IPC_PARSE_VERSION 690 691 692 /** 693 * ipc_parse_version - IPC call version 694 * @cmd: pointer to command 695 * 696 * Return IPC_64 for new style IPC and IPC_OLD for old style IPC. 697 * The @cmd value is turned from an encoding command and version into 698 * just the command code. 699 */ 700 701 int ipc_parse_version (int *cmd) 702 { 703 if (*cmd & IPC_64) { 704 *cmd ^= IPC_64; 705 return IPC_64; 706 } else { 707 return IPC_OLD; 708 } 709 } 710 711 #endif /* __ARCH_WANT_IPC_PARSE_VERSION */ 712 713 #ifdef CONFIG_PROC_FS 714 struct ipc_proc_iter { 715 struct ipc_namespace *ns; 716 struct ipc_proc_iface *iface; 717 }; 718 719 static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos) 720 { 721 struct ipc_proc_iter *iter = s->private; 722 struct ipc_proc_iface *iface = iter->iface; 723 struct kern_ipc_perm *ipc = it; 724 loff_t p; 725 struct ipc_ids *ids; 726 727 ids = iter->ns->ids[iface->ids]; 728 729 /* If we had an ipc id locked before, unlock it */ 730 if (ipc && ipc != SEQ_START_TOKEN) 731 ipc_unlock(ipc); 732 733 /* 734 * p = *pos - 1 (because id 0 starts at position 1) 735 * + 1 (because we increment the position by one) 736 */ 737 for (p = *pos; p <= ids->max_id; p++) { 738 if ((ipc = ipc_lock(ids, p)) != NULL) { 739 *pos = p + 1; 740 return ipc; 741 } 742 } 743 744 /* Out of range - return NULL to terminate iteration */ 745 return NULL; 746 } 747 748 /* 749 * File positions: pos 0 -> header, pos n -> ipc id + 1. 750 * SeqFile iterator: iterator value locked shp or SEQ_TOKEN_START. 751 */ 752 static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos) 753 { 754 struct ipc_proc_iter *iter = s->private; 755 struct ipc_proc_iface *iface = iter->iface; 756 struct kern_ipc_perm *ipc; 757 loff_t p; 758 struct ipc_ids *ids; 759 760 ids = iter->ns->ids[iface->ids]; 761 762 /* 763 * Take the lock - this will be released by the corresponding 764 * call to stop(). 765 */ 766 mutex_lock(&ids->mutex); 767 768 /* pos < 0 is invalid */ 769 if (*pos < 0) 770 return NULL; 771 772 /* pos == 0 means header */ 773 if (*pos == 0) 774 return SEQ_START_TOKEN; 775 776 /* Find the (pos-1)th ipc */ 777 for (p = *pos - 1; p <= ids->max_id; p++) { 778 if ((ipc = ipc_lock(ids, p)) != NULL) { 779 *pos = p + 1; 780 return ipc; 781 } 782 } 783 return NULL; 784 } 785 786 static void sysvipc_proc_stop(struct seq_file *s, void *it) 787 { 788 struct kern_ipc_perm *ipc = it; 789 struct ipc_proc_iter *iter = s->private; 790 struct ipc_proc_iface *iface = iter->iface; 791 struct ipc_ids *ids; 792 793 /* If we had a locked segment, release it */ 794 if (ipc && ipc != SEQ_START_TOKEN) 795 ipc_unlock(ipc); 796 797 ids = iter->ns->ids[iface->ids]; 798 /* Release the lock we took in start() */ 799 mutex_unlock(&ids->mutex); 800 } 801 802 static int sysvipc_proc_show(struct seq_file *s, void *it) 803 { 804 struct ipc_proc_iter *iter = s->private; 805 struct ipc_proc_iface *iface = iter->iface; 806 807 if (it == SEQ_START_TOKEN) 808 return seq_puts(s, iface->header); 809 810 return iface->show(s, it); 811 } 812 813 static struct seq_operations sysvipc_proc_seqops = { 814 .start = sysvipc_proc_start, 815 .stop = sysvipc_proc_stop, 816 .next = sysvipc_proc_next, 817 .show = sysvipc_proc_show, 818 }; 819 820 static int sysvipc_proc_open(struct inode *inode, struct file *file) 821 { 822 int ret; 823 struct seq_file *seq; 824 struct ipc_proc_iter *iter; 825 826 ret = -ENOMEM; 827 iter = kmalloc(sizeof(*iter), GFP_KERNEL); 828 if (!iter) 829 goto out; 830 831 ret = seq_open(file, &sysvipc_proc_seqops); 832 if (ret) 833 goto out_kfree; 834 835 seq = file->private_data; 836 seq->private = iter; 837 838 iter->iface = PDE(inode)->data; 839 iter->ns = get_ipc_ns(current->nsproxy->ipc_ns); 840 out: 841 return ret; 842 out_kfree: 843 kfree(iter); 844 goto out; 845 } 846 847 static int sysvipc_proc_release(struct inode *inode, struct file *file) 848 { 849 struct seq_file *seq = file->private_data; 850 struct ipc_proc_iter *iter = seq->private; 851 put_ipc_ns(iter->ns); 852 return seq_release_private(inode, file); 853 } 854 855 static const struct file_operations sysvipc_proc_fops = { 856 .open = sysvipc_proc_open, 857 .read = seq_read, 858 .llseek = seq_lseek, 859 .release = sysvipc_proc_release, 860 }; 861 #endif /* CONFIG_PROC_FS */ 862