1 /* 2 * linux/ipc/util.c 3 * Copyright (C) 1992 Krishna Balasubramanian 4 * 5 * Sep 1997 - Call suser() last after "normal" permission checks so we 6 * get BSD style process accounting right. 7 * Occurs in several places in the IPC code. 8 * Chris Evans, <chris@ferret.lmh.ox.ac.uk> 9 * Nov 1999 - ipc helper functions, unified SMP locking 10 * Manfred Spraul <manfred@colorfullife.com> 11 * Oct 2002 - One lock per IPC id. RCU ipc_free for lock-free grow_ary(). 12 * Mingming Cao <cmm@us.ibm.com> 13 * Mar 2006 - support for audit of ipc object properties 14 * Dustin Kirkland <dustin.kirkland@us.ibm.com> 15 * Jun 2006 - namespaces ssupport 16 * OpenVZ, SWsoft Inc. 17 * Pavel Emelianov <xemul@openvz.org> 18 */ 19 20 #include <linux/mm.h> 21 #include <linux/shm.h> 22 #include <linux/init.h> 23 #include <linux/msg.h> 24 #include <linux/vmalloc.h> 25 #include <linux/slab.h> 26 #include <linux/capability.h> 27 #include <linux/highuid.h> 28 #include <linux/security.h> 29 #include <linux/rcupdate.h> 30 #include <linux/workqueue.h> 31 #include <linux/seq_file.h> 32 #include <linux/proc_fs.h> 33 #include <linux/audit.h> 34 #include <linux/nsproxy.h> 35 #include <linux/rwsem.h> 36 #include <linux/ipc_namespace.h> 37 38 #include <asm/unistd.h> 39 40 #include "util.h" 41 42 struct ipc_proc_iface { 43 const char *path; 44 const char *header; 45 int ids; 46 int (*show)(struct seq_file *, void *); 47 }; 48 49 struct ipc_namespace init_ipc_ns = { 50 .kref = { 51 .refcount = ATOMIC_INIT(2), 52 }, 53 }; 54 55 /** 56 * ipc_init - initialise IPC subsystem 57 * 58 * The various system5 IPC resources (semaphores, messages and shared 59 * memory) are initialised 60 */ 61 62 static int __init ipc_init(void) 63 { 64 sem_init(); 65 msg_init(); 66 shm_init(); 67 return 0; 68 } 69 __initcall(ipc_init); 70 71 /** 72 * ipc_init_ids - initialise IPC identifiers 73 * @ids: Identifier set 74 * 75 * Set up the sequence range to use for the ipc identifier range (limited 76 * below IPCMNI) then initialise the ids idr. 77 */ 78 79 void ipc_init_ids(struct ipc_ids *ids) 80 { 81 init_rwsem(&ids->rw_mutex); 82 83 ids->in_use = 0; 84 ids->seq = 0; 85 { 86 int seq_limit = INT_MAX/SEQ_MULTIPLIER; 87 if(seq_limit > USHRT_MAX) 88 ids->seq_max = USHRT_MAX; 89 else 90 ids->seq_max = seq_limit; 91 } 92 93 idr_init(&ids->ipcs_idr); 94 } 95 96 #ifdef CONFIG_PROC_FS 97 static const struct file_operations sysvipc_proc_fops; 98 /** 99 * ipc_init_proc_interface - Create a proc interface for sysipc types using a seq_file interface. 100 * @path: Path in procfs 101 * @header: Banner to be printed at the beginning of the file. 102 * @ids: ipc id table to iterate. 103 * @show: show routine. 104 */ 105 void __init ipc_init_proc_interface(const char *path, const char *header, 106 int ids, int (*show)(struct seq_file *, void *)) 107 { 108 struct proc_dir_entry *pde; 109 struct ipc_proc_iface *iface; 110 111 iface = kmalloc(sizeof(*iface), GFP_KERNEL); 112 if (!iface) 113 return; 114 iface->path = path; 115 iface->header = header; 116 iface->ids = ids; 117 iface->show = show; 118 119 pde = create_proc_entry(path, 120 S_IRUGO, /* world readable */ 121 NULL /* parent dir */); 122 if (pde) { 123 pde->data = iface; 124 pde->proc_fops = &sysvipc_proc_fops; 125 } else { 126 kfree(iface); 127 } 128 } 129 #endif 130 131 /** 132 * ipc_findkey - find a key in an ipc identifier set 133 * @ids: Identifier set 134 * @key: The key to find 135 * 136 * Requires ipc_ids.rw_mutex locked. 137 * Returns the LOCKED pointer to the ipc structure if found or NULL 138 * if not. 139 * If key is found ipc points to the owning ipc structure 140 */ 141 142 static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key) 143 { 144 struct kern_ipc_perm *ipc; 145 int next_id; 146 int total; 147 148 for (total = 0, next_id = 0; total < ids->in_use; next_id++) { 149 ipc = idr_find(&ids->ipcs_idr, next_id); 150 151 if (ipc == NULL) 152 continue; 153 154 if (ipc->key != key) { 155 total++; 156 continue; 157 } 158 159 ipc_lock_by_ptr(ipc); 160 return ipc; 161 } 162 163 return NULL; 164 } 165 166 /** 167 * ipc_get_maxid - get the last assigned id 168 * @ids: IPC identifier set 169 * 170 * Called with ipc_ids.rw_mutex held. 171 */ 172 173 int ipc_get_maxid(struct ipc_ids *ids) 174 { 175 struct kern_ipc_perm *ipc; 176 int max_id = -1; 177 int total, id; 178 179 if (ids->in_use == 0) 180 return -1; 181 182 if (ids->in_use == IPCMNI) 183 return IPCMNI - 1; 184 185 /* Look for the last assigned id */ 186 total = 0; 187 for (id = 0; id < IPCMNI && total < ids->in_use; id++) { 188 ipc = idr_find(&ids->ipcs_idr, id); 189 if (ipc != NULL) { 190 max_id = id; 191 total++; 192 } 193 } 194 return max_id; 195 } 196 197 /** 198 * ipc_addid - add an IPC identifier 199 * @ids: IPC identifier set 200 * @new: new IPC permission set 201 * @size: limit for the number of used ids 202 * 203 * Add an entry 'new' to the IPC ids idr. The permissions object is 204 * initialised and the first free entry is set up and the id assigned 205 * is returned. The 'new' entry is returned in a locked state on success. 206 * On failure the entry is not locked and a negative err-code is returned. 207 * 208 * Called with ipc_ids.rw_mutex held as a writer. 209 */ 210 211 int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) 212 { 213 int id, err; 214 215 if (size > IPCMNI) 216 size = IPCMNI; 217 218 if (ids->in_use >= size) 219 return -ENOSPC; 220 221 err = idr_get_new(&ids->ipcs_idr, new, &id); 222 if (err) 223 return err; 224 225 ids->in_use++; 226 227 new->cuid = new->uid = current->euid; 228 new->gid = new->cgid = current->egid; 229 230 new->seq = ids->seq++; 231 if(ids->seq > ids->seq_max) 232 ids->seq = 0; 233 234 spin_lock_init(&new->lock); 235 new->deleted = 0; 236 rcu_read_lock(); 237 spin_lock(&new->lock); 238 return id; 239 } 240 241 /** 242 * ipcget_new - create a new ipc object 243 * @ns: namespace 244 * @ids: IPC identifer set 245 * @ops: the actual creation routine to call 246 * @params: its parameters 247 * 248 * This routine is called by sys_msgget, sys_semget() and sys_shmget() 249 * when the key is IPC_PRIVATE. 250 */ 251 static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids, 252 struct ipc_ops *ops, struct ipc_params *params) 253 { 254 int err; 255 retry: 256 err = idr_pre_get(&ids->ipcs_idr, GFP_KERNEL); 257 258 if (!err) 259 return -ENOMEM; 260 261 down_write(&ids->rw_mutex); 262 err = ops->getnew(ns, params); 263 up_write(&ids->rw_mutex); 264 265 if (err == -EAGAIN) 266 goto retry; 267 268 return err; 269 } 270 271 /** 272 * ipc_check_perms - check security and permissions for an IPC 273 * @ipcp: ipc permission set 274 * @ops: the actual security routine to call 275 * @params: its parameters 276 * 277 * This routine is called by sys_msgget(), sys_semget() and sys_shmget() 278 * when the key is not IPC_PRIVATE and that key already exists in the 279 * ids IDR. 280 * 281 * On success, the IPC id is returned. 282 * 283 * It is called with ipc_ids.rw_mutex and ipcp->lock held. 284 */ 285 static int ipc_check_perms(struct kern_ipc_perm *ipcp, struct ipc_ops *ops, 286 struct ipc_params *params) 287 { 288 int err; 289 290 if (ipcperms(ipcp, params->flg)) 291 err = -EACCES; 292 else { 293 err = ops->associate(ipcp, params->flg); 294 if (!err) 295 err = ipcp->id; 296 } 297 298 return err; 299 } 300 301 /** 302 * ipcget_public - get an ipc object or create a new one 303 * @ns: namespace 304 * @ids: IPC identifer set 305 * @ops: the actual creation routine to call 306 * @params: its parameters 307 * 308 * This routine is called by sys_msgget, sys_semget() and sys_shmget() 309 * when the key is not IPC_PRIVATE. 310 * It adds a new entry if the key is not found and does some permission 311 * / security checkings if the key is found. 312 * 313 * On success, the ipc id is returned. 314 */ 315 static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids, 316 struct ipc_ops *ops, struct ipc_params *params) 317 { 318 struct kern_ipc_perm *ipcp; 319 int flg = params->flg; 320 int err; 321 retry: 322 err = idr_pre_get(&ids->ipcs_idr, GFP_KERNEL); 323 324 /* 325 * Take the lock as a writer since we are potentially going to add 326 * a new entry + read locks are not "upgradable" 327 */ 328 down_write(&ids->rw_mutex); 329 ipcp = ipc_findkey(ids, params->key); 330 if (ipcp == NULL) { 331 /* key not used */ 332 if (!(flg & IPC_CREAT)) 333 err = -ENOENT; 334 else if (!err) 335 err = -ENOMEM; 336 else 337 err = ops->getnew(ns, params); 338 } else { 339 /* ipc object has been locked by ipc_findkey() */ 340 341 if (flg & IPC_CREAT && flg & IPC_EXCL) 342 err = -EEXIST; 343 else { 344 err = 0; 345 if (ops->more_checks) 346 err = ops->more_checks(ipcp, params); 347 if (!err) 348 /* 349 * ipc_check_perms returns the IPC id on 350 * success 351 */ 352 err = ipc_check_perms(ipcp, ops, params); 353 } 354 ipc_unlock(ipcp); 355 } 356 up_write(&ids->rw_mutex); 357 358 if (err == -EAGAIN) 359 goto retry; 360 361 return err; 362 } 363 364 365 /** 366 * ipc_rmid - remove an IPC identifier 367 * @ids: IPC identifier set 368 * @ipcp: ipc perm structure containing the identifier to remove 369 * 370 * ipc_ids.rw_mutex (as a writer) and the spinlock for this ID are held 371 * before this function is called, and remain locked on the exit. 372 */ 373 374 void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp) 375 { 376 int lid = ipcid_to_idx(ipcp->id); 377 378 idr_remove(&ids->ipcs_idr, lid); 379 380 ids->in_use--; 381 382 ipcp->deleted = 1; 383 384 return; 385 } 386 387 /** 388 * ipc_alloc - allocate ipc space 389 * @size: size desired 390 * 391 * Allocate memory from the appropriate pools and return a pointer to it. 392 * NULL is returned if the allocation fails 393 */ 394 395 void* ipc_alloc(int size) 396 { 397 void* out; 398 if(size > PAGE_SIZE) 399 out = vmalloc(size); 400 else 401 out = kmalloc(size, GFP_KERNEL); 402 return out; 403 } 404 405 /** 406 * ipc_free - free ipc space 407 * @ptr: pointer returned by ipc_alloc 408 * @size: size of block 409 * 410 * Free a block created with ipc_alloc(). The caller must know the size 411 * used in the allocation call. 412 */ 413 414 void ipc_free(void* ptr, int size) 415 { 416 if(size > PAGE_SIZE) 417 vfree(ptr); 418 else 419 kfree(ptr); 420 } 421 422 /* 423 * rcu allocations: 424 * There are three headers that are prepended to the actual allocation: 425 * - during use: ipc_rcu_hdr. 426 * - during the rcu grace period: ipc_rcu_grace. 427 * - [only if vmalloc]: ipc_rcu_sched. 428 * Their lifetime doesn't overlap, thus the headers share the same memory. 429 * Unlike a normal union, they are right-aligned, thus some container_of 430 * forward/backward casting is necessary: 431 */ 432 struct ipc_rcu_hdr 433 { 434 int refcount; 435 int is_vmalloc; 436 void *data[0]; 437 }; 438 439 440 struct ipc_rcu_grace 441 { 442 struct rcu_head rcu; 443 /* "void *" makes sure alignment of following data is sane. */ 444 void *data[0]; 445 }; 446 447 struct ipc_rcu_sched 448 { 449 struct work_struct work; 450 /* "void *" makes sure alignment of following data is sane. */ 451 void *data[0]; 452 }; 453 454 #define HDRLEN_KMALLOC (sizeof(struct ipc_rcu_grace) > sizeof(struct ipc_rcu_hdr) ? \ 455 sizeof(struct ipc_rcu_grace) : sizeof(struct ipc_rcu_hdr)) 456 #define HDRLEN_VMALLOC (sizeof(struct ipc_rcu_sched) > HDRLEN_KMALLOC ? \ 457 sizeof(struct ipc_rcu_sched) : HDRLEN_KMALLOC) 458 459 static inline int rcu_use_vmalloc(int size) 460 { 461 /* Too big for a single page? */ 462 if (HDRLEN_KMALLOC + size > PAGE_SIZE) 463 return 1; 464 return 0; 465 } 466 467 /** 468 * ipc_rcu_alloc - allocate ipc and rcu space 469 * @size: size desired 470 * 471 * Allocate memory for the rcu header structure + the object. 472 * Returns the pointer to the object. 473 * NULL is returned if the allocation fails. 474 */ 475 476 void* ipc_rcu_alloc(int size) 477 { 478 void* out; 479 /* 480 * We prepend the allocation with the rcu struct, and 481 * workqueue if necessary (for vmalloc). 482 */ 483 if (rcu_use_vmalloc(size)) { 484 out = vmalloc(HDRLEN_VMALLOC + size); 485 if (out) { 486 out += HDRLEN_VMALLOC; 487 container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1; 488 container_of(out, struct ipc_rcu_hdr, data)->refcount = 1; 489 } 490 } else { 491 out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL); 492 if (out) { 493 out += HDRLEN_KMALLOC; 494 container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0; 495 container_of(out, struct ipc_rcu_hdr, data)->refcount = 1; 496 } 497 } 498 499 return out; 500 } 501 502 void ipc_rcu_getref(void *ptr) 503 { 504 container_of(ptr, struct ipc_rcu_hdr, data)->refcount++; 505 } 506 507 static void ipc_do_vfree(struct work_struct *work) 508 { 509 vfree(container_of(work, struct ipc_rcu_sched, work)); 510 } 511 512 /** 513 * ipc_schedule_free - free ipc + rcu space 514 * @head: RCU callback structure for queued work 515 * 516 * Since RCU callback function is called in bh, 517 * we need to defer the vfree to schedule_work(). 518 */ 519 static void ipc_schedule_free(struct rcu_head *head) 520 { 521 struct ipc_rcu_grace *grace; 522 struct ipc_rcu_sched *sched; 523 524 grace = container_of(head, struct ipc_rcu_grace, rcu); 525 sched = container_of(&(grace->data[0]), struct ipc_rcu_sched, 526 data[0]); 527 528 INIT_WORK(&sched->work, ipc_do_vfree); 529 schedule_work(&sched->work); 530 } 531 532 /** 533 * ipc_immediate_free - free ipc + rcu space 534 * @head: RCU callback structure that contains pointer to be freed 535 * 536 * Free from the RCU callback context. 537 */ 538 static void ipc_immediate_free(struct rcu_head *head) 539 { 540 struct ipc_rcu_grace *free = 541 container_of(head, struct ipc_rcu_grace, rcu); 542 kfree(free); 543 } 544 545 void ipc_rcu_putref(void *ptr) 546 { 547 if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0) 548 return; 549 550 if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) { 551 call_rcu(&container_of(ptr, struct ipc_rcu_grace, data)->rcu, 552 ipc_schedule_free); 553 } else { 554 call_rcu(&container_of(ptr, struct ipc_rcu_grace, data)->rcu, 555 ipc_immediate_free); 556 } 557 } 558 559 /** 560 * ipcperms - check IPC permissions 561 * @ipcp: IPC permission set 562 * @flag: desired permission set. 563 * 564 * Check user, group, other permissions for access 565 * to ipc resources. return 0 if allowed 566 */ 567 568 int ipcperms (struct kern_ipc_perm *ipcp, short flag) 569 { /* flag will most probably be 0 or S_...UGO from <linux/stat.h> */ 570 int requested_mode, granted_mode, err; 571 572 if (unlikely((err = audit_ipc_obj(ipcp)))) 573 return err; 574 requested_mode = (flag >> 6) | (flag >> 3) | flag; 575 granted_mode = ipcp->mode; 576 if (current->euid == ipcp->cuid || current->euid == ipcp->uid) 577 granted_mode >>= 6; 578 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid)) 579 granted_mode >>= 3; 580 /* is there some bit set in requested_mode but not in granted_mode? */ 581 if ((requested_mode & ~granted_mode & 0007) && 582 !capable(CAP_IPC_OWNER)) 583 return -1; 584 585 return security_ipc_permission(ipcp, flag); 586 } 587 588 /* 589 * Functions to convert between the kern_ipc_perm structure and the 590 * old/new ipc_perm structures 591 */ 592 593 /** 594 * kernel_to_ipc64_perm - convert kernel ipc permissions to user 595 * @in: kernel permissions 596 * @out: new style IPC permissions 597 * 598 * Turn the kernel object @in into a set of permissions descriptions 599 * for returning to userspace (@out). 600 */ 601 602 603 void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out) 604 { 605 out->key = in->key; 606 out->uid = in->uid; 607 out->gid = in->gid; 608 out->cuid = in->cuid; 609 out->cgid = in->cgid; 610 out->mode = in->mode; 611 out->seq = in->seq; 612 } 613 614 /** 615 * ipc64_perm_to_ipc_perm - convert new ipc permissions to old 616 * @in: new style IPC permissions 617 * @out: old style IPC permissions 618 * 619 * Turn the new style permissions object @in into a compatibility 620 * object and store it into the @out pointer. 621 */ 622 623 void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out) 624 { 625 out->key = in->key; 626 SET_UID(out->uid, in->uid); 627 SET_GID(out->gid, in->gid); 628 SET_UID(out->cuid, in->cuid); 629 SET_GID(out->cgid, in->cgid); 630 out->mode = in->mode; 631 out->seq = in->seq; 632 } 633 634 /** 635 * ipc_lock - Lock an ipc structure without rw_mutex held 636 * @ids: IPC identifier set 637 * @id: ipc id to look for 638 * 639 * Look for an id in the ipc ids idr and lock the associated ipc object. 640 * 641 * The ipc object is locked on exit. 642 * 643 * This is the routine that should be called when the rw_mutex is not already 644 * held, i.e. idr tree not protected: it protects the idr tree in read mode 645 * during the idr_find(). 646 */ 647 648 struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id) 649 { 650 struct kern_ipc_perm *out; 651 int lid = ipcid_to_idx(id); 652 653 down_read(&ids->rw_mutex); 654 655 rcu_read_lock(); 656 out = idr_find(&ids->ipcs_idr, lid); 657 if (out == NULL) { 658 rcu_read_unlock(); 659 up_read(&ids->rw_mutex); 660 return ERR_PTR(-EINVAL); 661 } 662 663 up_read(&ids->rw_mutex); 664 665 spin_lock(&out->lock); 666 667 /* ipc_rmid() may have already freed the ID while ipc_lock 668 * was spinning: here verify that the structure is still valid 669 */ 670 if (out->deleted) { 671 spin_unlock(&out->lock); 672 rcu_read_unlock(); 673 return ERR_PTR(-EINVAL); 674 } 675 676 return out; 677 } 678 679 /** 680 * ipc_lock_down - Lock an ipc structure with rw_sem held 681 * @ids: IPC identifier set 682 * @id: ipc id to look for 683 * 684 * Look for an id in the ipc ids idr and lock the associated ipc object. 685 * 686 * The ipc object is locked on exit. 687 * 688 * This is the routine that should be called when the rw_mutex is already 689 * held, i.e. idr tree protected. 690 */ 691 692 struct kern_ipc_perm *ipc_lock_down(struct ipc_ids *ids, int id) 693 { 694 struct kern_ipc_perm *out; 695 int lid = ipcid_to_idx(id); 696 697 rcu_read_lock(); 698 out = idr_find(&ids->ipcs_idr, lid); 699 if (out == NULL) { 700 rcu_read_unlock(); 701 return ERR_PTR(-EINVAL); 702 } 703 704 spin_lock(&out->lock); 705 706 /* 707 * No need to verify that the structure is still valid since the 708 * rw_mutex is held. 709 */ 710 return out; 711 } 712 713 struct kern_ipc_perm *ipc_lock_check_down(struct ipc_ids *ids, int id) 714 { 715 struct kern_ipc_perm *out; 716 717 out = ipc_lock_down(ids, id); 718 if (IS_ERR(out)) 719 return out; 720 721 if (ipc_checkid(out, id)) { 722 ipc_unlock(out); 723 return ERR_PTR(-EIDRM); 724 } 725 726 return out; 727 } 728 729 struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id) 730 { 731 struct kern_ipc_perm *out; 732 733 out = ipc_lock(ids, id); 734 if (IS_ERR(out)) 735 return out; 736 737 if (ipc_checkid(out, id)) { 738 ipc_unlock(out); 739 return ERR_PTR(-EIDRM); 740 } 741 742 return out; 743 } 744 745 /** 746 * ipcget - Common sys_*get() code 747 * @ns : namsepace 748 * @ids : IPC identifier set 749 * @ops : operations to be called on ipc object creation, permission checks 750 * and further checks 751 * @params : the parameters needed by the previous operations. 752 * 753 * Common routine called by sys_msgget(), sys_semget() and sys_shmget(). 754 */ 755 int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids, 756 struct ipc_ops *ops, struct ipc_params *params) 757 { 758 if (params->key == IPC_PRIVATE) 759 return ipcget_new(ns, ids, ops, params); 760 else 761 return ipcget_public(ns, ids, ops, params); 762 } 763 764 #ifdef __ARCH_WANT_IPC_PARSE_VERSION 765 766 767 /** 768 * ipc_parse_version - IPC call version 769 * @cmd: pointer to command 770 * 771 * Return IPC_64 for new style IPC and IPC_OLD for old style IPC. 772 * The @cmd value is turned from an encoding command and version into 773 * just the command code. 774 */ 775 776 int ipc_parse_version (int *cmd) 777 { 778 if (*cmd & IPC_64) { 779 *cmd ^= IPC_64; 780 return IPC_64; 781 } else { 782 return IPC_OLD; 783 } 784 } 785 786 #endif /* __ARCH_WANT_IPC_PARSE_VERSION */ 787 788 #ifdef CONFIG_PROC_FS 789 struct ipc_proc_iter { 790 struct ipc_namespace *ns; 791 struct ipc_proc_iface *iface; 792 }; 793 794 /* 795 * This routine locks the ipc structure found at least at position pos. 796 */ 797 static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos, 798 loff_t *new_pos) 799 { 800 struct kern_ipc_perm *ipc; 801 int total, id; 802 803 total = 0; 804 for (id = 0; id < pos && total < ids->in_use; id++) { 805 ipc = idr_find(&ids->ipcs_idr, id); 806 if (ipc != NULL) 807 total++; 808 } 809 810 if (total >= ids->in_use) 811 return NULL; 812 813 for ( ; pos < IPCMNI; pos++) { 814 ipc = idr_find(&ids->ipcs_idr, pos); 815 if (ipc != NULL) { 816 *new_pos = pos + 1; 817 ipc_lock_by_ptr(ipc); 818 return ipc; 819 } 820 } 821 822 /* Out of range - return NULL to terminate iteration */ 823 return NULL; 824 } 825 826 static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos) 827 { 828 struct ipc_proc_iter *iter = s->private; 829 struct ipc_proc_iface *iface = iter->iface; 830 struct kern_ipc_perm *ipc = it; 831 832 /* If we had an ipc id locked before, unlock it */ 833 if (ipc && ipc != SEQ_START_TOKEN) 834 ipc_unlock(ipc); 835 836 return sysvipc_find_ipc(&iter->ns->ids[iface->ids], *pos, pos); 837 } 838 839 /* 840 * File positions: pos 0 -> header, pos n -> ipc id = n - 1. 841 * SeqFile iterator: iterator value locked ipc pointer or SEQ_TOKEN_START. 842 */ 843 static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos) 844 { 845 struct ipc_proc_iter *iter = s->private; 846 struct ipc_proc_iface *iface = iter->iface; 847 struct ipc_ids *ids; 848 849 ids = &iter->ns->ids[iface->ids]; 850 851 /* 852 * Take the lock - this will be released by the corresponding 853 * call to stop(). 854 */ 855 down_read(&ids->rw_mutex); 856 857 /* pos < 0 is invalid */ 858 if (*pos < 0) 859 return NULL; 860 861 /* pos == 0 means header */ 862 if (*pos == 0) 863 return SEQ_START_TOKEN; 864 865 /* Find the (pos-1)th ipc */ 866 return sysvipc_find_ipc(ids, *pos - 1, pos); 867 } 868 869 static void sysvipc_proc_stop(struct seq_file *s, void *it) 870 { 871 struct kern_ipc_perm *ipc = it; 872 struct ipc_proc_iter *iter = s->private; 873 struct ipc_proc_iface *iface = iter->iface; 874 struct ipc_ids *ids; 875 876 /* If we had a locked structure, release it */ 877 if (ipc && ipc != SEQ_START_TOKEN) 878 ipc_unlock(ipc); 879 880 ids = &iter->ns->ids[iface->ids]; 881 /* Release the lock we took in start() */ 882 up_read(&ids->rw_mutex); 883 } 884 885 static int sysvipc_proc_show(struct seq_file *s, void *it) 886 { 887 struct ipc_proc_iter *iter = s->private; 888 struct ipc_proc_iface *iface = iter->iface; 889 890 if (it == SEQ_START_TOKEN) 891 return seq_puts(s, iface->header); 892 893 return iface->show(s, it); 894 } 895 896 static struct seq_operations sysvipc_proc_seqops = { 897 .start = sysvipc_proc_start, 898 .stop = sysvipc_proc_stop, 899 .next = sysvipc_proc_next, 900 .show = sysvipc_proc_show, 901 }; 902 903 static int sysvipc_proc_open(struct inode *inode, struct file *file) 904 { 905 int ret; 906 struct seq_file *seq; 907 struct ipc_proc_iter *iter; 908 909 ret = -ENOMEM; 910 iter = kmalloc(sizeof(*iter), GFP_KERNEL); 911 if (!iter) 912 goto out; 913 914 ret = seq_open(file, &sysvipc_proc_seqops); 915 if (ret) 916 goto out_kfree; 917 918 seq = file->private_data; 919 seq->private = iter; 920 921 iter->iface = PDE(inode)->data; 922 iter->ns = get_ipc_ns(current->nsproxy->ipc_ns); 923 out: 924 return ret; 925 out_kfree: 926 kfree(iter); 927 goto out; 928 } 929 930 static int sysvipc_proc_release(struct inode *inode, struct file *file) 931 { 932 struct seq_file *seq = file->private_data; 933 struct ipc_proc_iter *iter = seq->private; 934 put_ipc_ns(iter->ns); 935 return seq_release_private(inode, file); 936 } 937 938 static const struct file_operations sysvipc_proc_fops = { 939 .open = sysvipc_proc_open, 940 .read = seq_read, 941 .llseek = seq_lseek, 942 .release = sysvipc_proc_release, 943 }; 944 #endif /* CONFIG_PROC_FS */ 945