1 /* 2 * proc/fs/generic.c --- generic routines for the proc-fs 3 * 4 * This file contains generic proc-fs routines for handling 5 * directories and files. 6 * 7 * Copyright (C) 1991, 1992 Linus Torvalds. 8 * Copyright (C) 1997 Theodore Ts'o 9 */ 10 11 #include <linux/errno.h> 12 #include <linux/time.h> 13 #include <linux/proc_fs.h> 14 #include <linux/stat.h> 15 #include <linux/mm.h> 16 #include <linux/module.h> 17 #include <linux/slab.h> 18 #include <linux/mount.h> 19 #include <linux/init.h> 20 #include <linux/idr.h> 21 #include <linux/namei.h> 22 #include <linux/bitops.h> 23 #include <linux/spinlock.h> 24 #include <linux/completion.h> 25 #include <asm/uaccess.h> 26 27 #include "internal.h" 28 29 DEFINE_SPINLOCK(proc_subdir_lock); 30 31 static int proc_match(unsigned int len, const char *name, struct proc_dir_entry *de) 32 { 33 if (de->namelen != len) 34 return 0; 35 return !memcmp(name, de->name, len); 36 } 37 38 /* buffer size is one page but our output routines use some slack for overruns */ 39 #define PROC_BLOCK_SIZE (PAGE_SIZE - 1024) 40 41 static ssize_t 42 __proc_file_read(struct file *file, char __user *buf, size_t nbytes, 43 loff_t *ppos) 44 { 45 struct inode * inode = file->f_path.dentry->d_inode; 46 char *page; 47 ssize_t retval=0; 48 int eof=0; 49 ssize_t n, count; 50 char *start; 51 struct proc_dir_entry * dp; 52 unsigned long long pos; 53 54 /* 55 * Gaah, please just use "seq_file" instead. The legacy /proc 56 * interfaces cut loff_t down to off_t for reads, and ignore 57 * the offset entirely for writes.. 58 */ 59 pos = *ppos; 60 if (pos > MAX_NON_LFS) 61 return 0; 62 if (nbytes > MAX_NON_LFS - pos) 63 nbytes = MAX_NON_LFS - pos; 64 65 dp = PDE(inode); 66 if (!(page = (char*) __get_free_page(GFP_TEMPORARY))) 67 return -ENOMEM; 68 69 while ((nbytes > 0) && !eof) { 70 count = min_t(size_t, PROC_BLOCK_SIZE, nbytes); 71 72 start = NULL; 73 if (dp->read_proc) { 74 /* 75 * How to be a proc read function 76 * ------------------------------ 77 * Prototype: 78 * int f(char *buffer, char **start, off_t offset, 79 * int count, int *peof, void *dat) 80 * 81 * Assume that the buffer is "count" bytes in size. 82 * 83 * If you know you have supplied all the data you 84 * have, set *peof. 85 * 86 * You have three ways to return data: 87 * 0) Leave *start = NULL. (This is the default.) 88 * Put the data of the requested offset at that 89 * offset within the buffer. Return the number (n) 90 * of bytes there are from the beginning of the 91 * buffer up to the last byte of data. If the 92 * number of supplied bytes (= n - offset) is 93 * greater than zero and you didn't signal eof 94 * and the reader is prepared to take more data 95 * you will be called again with the requested 96 * offset advanced by the number of bytes 97 * absorbed. This interface is useful for files 98 * no larger than the buffer. 99 * 1) Set *start = an unsigned long value less than 100 * the buffer address but greater than zero. 101 * Put the data of the requested offset at the 102 * beginning of the buffer. Return the number of 103 * bytes of data placed there. If this number is 104 * greater than zero and you didn't signal eof 105 * and the reader is prepared to take more data 106 * you will be called again with the requested 107 * offset advanced by *start. This interface is 108 * useful when you have a large file consisting 109 * of a series of blocks which you want to count 110 * and return as wholes. 111 * (Hack by Paul.Russell@rustcorp.com.au) 112 * 2) Set *start = an address within the buffer. 113 * Put the data of the requested offset at *start. 114 * Return the number of bytes of data placed there. 115 * If this number is greater than zero and you 116 * didn't signal eof and the reader is prepared to 117 * take more data you will be called again with the 118 * requested offset advanced by the number of bytes 119 * absorbed. 120 */ 121 n = dp->read_proc(page, &start, *ppos, 122 count, &eof, dp->data); 123 } else 124 break; 125 126 if (n == 0) /* end of file */ 127 break; 128 if (n < 0) { /* error */ 129 if (retval == 0) 130 retval = n; 131 break; 132 } 133 134 if (start == NULL) { 135 if (n > PAGE_SIZE) { 136 printk(KERN_ERR 137 "proc_file_read: Apparent buffer overflow!\n"); 138 n = PAGE_SIZE; 139 } 140 n -= *ppos; 141 if (n <= 0) 142 break; 143 if (n > count) 144 n = count; 145 start = page + *ppos; 146 } else if (start < page) { 147 if (n > PAGE_SIZE) { 148 printk(KERN_ERR 149 "proc_file_read: Apparent buffer overflow!\n"); 150 n = PAGE_SIZE; 151 } 152 if (n > count) { 153 /* 154 * Don't reduce n because doing so might 155 * cut off part of a data block. 156 */ 157 printk(KERN_WARNING 158 "proc_file_read: Read count exceeded\n"); 159 } 160 } else /* start >= page */ { 161 unsigned long startoff = (unsigned long)(start - page); 162 if (n > (PAGE_SIZE - startoff)) { 163 printk(KERN_ERR 164 "proc_file_read: Apparent buffer overflow!\n"); 165 n = PAGE_SIZE - startoff; 166 } 167 if (n > count) 168 n = count; 169 } 170 171 n -= copy_to_user(buf, start < page ? page : start, n); 172 if (n == 0) { 173 if (retval == 0) 174 retval = -EFAULT; 175 break; 176 } 177 178 *ppos += start < page ? (unsigned long)start : n; 179 nbytes -= n; 180 buf += n; 181 retval += n; 182 } 183 free_page((unsigned long) page); 184 return retval; 185 } 186 187 static ssize_t 188 proc_file_read(struct file *file, char __user *buf, size_t nbytes, 189 loff_t *ppos) 190 { 191 struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode); 192 ssize_t rv = -EIO; 193 194 spin_lock(&pde->pde_unload_lock); 195 if (!pde->proc_fops) { 196 spin_unlock(&pde->pde_unload_lock); 197 return rv; 198 } 199 pde->pde_users++; 200 spin_unlock(&pde->pde_unload_lock); 201 202 rv = __proc_file_read(file, buf, nbytes, ppos); 203 204 pde_users_dec(pde); 205 return rv; 206 } 207 208 static ssize_t 209 proc_file_write(struct file *file, const char __user *buffer, 210 size_t count, loff_t *ppos) 211 { 212 struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode); 213 ssize_t rv = -EIO; 214 215 if (pde->write_proc) { 216 spin_lock(&pde->pde_unload_lock); 217 if (!pde->proc_fops) { 218 spin_unlock(&pde->pde_unload_lock); 219 return rv; 220 } 221 pde->pde_users++; 222 spin_unlock(&pde->pde_unload_lock); 223 224 /* FIXME: does this routine need ppos? probably... */ 225 rv = pde->write_proc(file, buffer, count, pde->data); 226 pde_users_dec(pde); 227 } 228 return rv; 229 } 230 231 232 static loff_t 233 proc_file_lseek(struct file *file, loff_t offset, int orig) 234 { 235 loff_t retval = -EINVAL; 236 switch (orig) { 237 case 1: 238 offset += file->f_pos; 239 /* fallthrough */ 240 case 0: 241 if (offset < 0 || offset > MAX_NON_LFS) 242 break; 243 file->f_pos = retval = offset; 244 } 245 return retval; 246 } 247 248 static const struct file_operations proc_file_operations = { 249 .llseek = proc_file_lseek, 250 .read = proc_file_read, 251 .write = proc_file_write, 252 }; 253 254 static int proc_notify_change(struct dentry *dentry, struct iattr *iattr) 255 { 256 struct inode *inode = dentry->d_inode; 257 struct proc_dir_entry *de = PDE(inode); 258 int error; 259 260 error = inode_change_ok(inode, iattr); 261 if (error) 262 return error; 263 264 if ((iattr->ia_valid & ATTR_SIZE) && 265 iattr->ia_size != i_size_read(inode)) { 266 error = vmtruncate(inode, iattr->ia_size); 267 if (error) 268 return error; 269 } 270 271 setattr_copy(inode, iattr); 272 mark_inode_dirty(inode); 273 274 de->uid = inode->i_uid; 275 de->gid = inode->i_gid; 276 de->mode = inode->i_mode; 277 return 0; 278 } 279 280 static int proc_getattr(struct vfsmount *mnt, struct dentry *dentry, 281 struct kstat *stat) 282 { 283 struct inode *inode = dentry->d_inode; 284 struct proc_dir_entry *de = PROC_I(inode)->pde; 285 if (de && de->nlink) 286 inode->i_nlink = de->nlink; 287 288 generic_fillattr(inode, stat); 289 return 0; 290 } 291 292 static const struct inode_operations proc_file_inode_operations = { 293 .setattr = proc_notify_change, 294 }; 295 296 /* 297 * This function parses a name such as "tty/driver/serial", and 298 * returns the struct proc_dir_entry for "/proc/tty/driver", and 299 * returns "serial" in residual. 300 */ 301 static int __xlate_proc_name(const char *name, struct proc_dir_entry **ret, 302 const char **residual) 303 { 304 const char *cp = name, *next; 305 struct proc_dir_entry *de; 306 unsigned int len; 307 308 de = *ret; 309 if (!de) 310 de = &proc_root; 311 312 while (1) { 313 next = strchr(cp, '/'); 314 if (!next) 315 break; 316 317 len = next - cp; 318 for (de = de->subdir; de ; de = de->next) { 319 if (proc_match(len, cp, de)) 320 break; 321 } 322 if (!de) { 323 WARN(1, "name '%s'\n", name); 324 return -ENOENT; 325 } 326 cp += len + 1; 327 } 328 *residual = cp; 329 *ret = de; 330 return 0; 331 } 332 333 static int xlate_proc_name(const char *name, struct proc_dir_entry **ret, 334 const char **residual) 335 { 336 int rv; 337 338 spin_lock(&proc_subdir_lock); 339 rv = __xlate_proc_name(name, ret, residual); 340 spin_unlock(&proc_subdir_lock); 341 return rv; 342 } 343 344 static DEFINE_IDA(proc_inum_ida); 345 static DEFINE_SPINLOCK(proc_inum_lock); /* protects the above */ 346 347 #define PROC_DYNAMIC_FIRST 0xF0000000U 348 349 /* 350 * Return an inode number between PROC_DYNAMIC_FIRST and 351 * 0xffffffff, or zero on failure. 352 */ 353 static unsigned int get_inode_number(void) 354 { 355 unsigned int i; 356 int error; 357 358 retry: 359 if (ida_pre_get(&proc_inum_ida, GFP_KERNEL) == 0) 360 return 0; 361 362 spin_lock(&proc_inum_lock); 363 error = ida_get_new(&proc_inum_ida, &i); 364 spin_unlock(&proc_inum_lock); 365 if (error == -EAGAIN) 366 goto retry; 367 else if (error) 368 return 0; 369 370 if (i > UINT_MAX - PROC_DYNAMIC_FIRST) { 371 spin_lock(&proc_inum_lock); 372 ida_remove(&proc_inum_ida, i); 373 spin_unlock(&proc_inum_lock); 374 return 0; 375 } 376 return PROC_DYNAMIC_FIRST + i; 377 } 378 379 static void release_inode_number(unsigned int inum) 380 { 381 spin_lock(&proc_inum_lock); 382 ida_remove(&proc_inum_ida, inum - PROC_DYNAMIC_FIRST); 383 spin_unlock(&proc_inum_lock); 384 } 385 386 static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd) 387 { 388 nd_set_link(nd, PDE(dentry->d_inode)->data); 389 return NULL; 390 } 391 392 static const struct inode_operations proc_link_inode_operations = { 393 .readlink = generic_readlink, 394 .follow_link = proc_follow_link, 395 }; 396 397 /* 398 * As some entries in /proc are volatile, we want to 399 * get rid of unused dentries. This could be made 400 * smarter: we could keep a "volatile" flag in the 401 * inode to indicate which ones to keep. 402 */ 403 static int proc_delete_dentry(const struct dentry * dentry) 404 { 405 return 1; 406 } 407 408 static const struct dentry_operations proc_dentry_operations = 409 { 410 .d_delete = proc_delete_dentry, 411 }; 412 413 /* 414 * Don't create negative dentries here, return -ENOENT by hand 415 * instead. 416 */ 417 struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir, 418 struct dentry *dentry) 419 { 420 struct inode *inode = NULL; 421 int error = -ENOENT; 422 423 spin_lock(&proc_subdir_lock); 424 for (de = de->subdir; de ; de = de->next) { 425 if (de->namelen != dentry->d_name.len) 426 continue; 427 if (!memcmp(dentry->d_name.name, de->name, de->namelen)) { 428 pde_get(de); 429 spin_unlock(&proc_subdir_lock); 430 error = -EINVAL; 431 inode = proc_get_inode(dir->i_sb, de); 432 goto out_unlock; 433 } 434 } 435 spin_unlock(&proc_subdir_lock); 436 out_unlock: 437 438 if (inode) { 439 d_set_d_op(dentry, &proc_dentry_operations); 440 d_add(dentry, inode); 441 return NULL; 442 } 443 if (de) 444 pde_put(de); 445 return ERR_PTR(error); 446 } 447 448 struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry, 449 struct nameidata *nd) 450 { 451 return proc_lookup_de(PDE(dir), dir, dentry); 452 } 453 454 /* 455 * This returns non-zero if at EOF, so that the /proc 456 * root directory can use this and check if it should 457 * continue with the <pid> entries.. 458 * 459 * Note that the VFS-layer doesn't care about the return 460 * value of the readdir() call, as long as it's non-negative 461 * for success.. 462 */ 463 int proc_readdir_de(struct proc_dir_entry *de, struct file *filp, void *dirent, 464 filldir_t filldir) 465 { 466 unsigned int ino; 467 int i; 468 struct inode *inode = filp->f_path.dentry->d_inode; 469 int ret = 0; 470 471 ino = inode->i_ino; 472 i = filp->f_pos; 473 switch (i) { 474 case 0: 475 if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0) 476 goto out; 477 i++; 478 filp->f_pos++; 479 /* fall through */ 480 case 1: 481 if (filldir(dirent, "..", 2, i, 482 parent_ino(filp->f_path.dentry), 483 DT_DIR) < 0) 484 goto out; 485 i++; 486 filp->f_pos++; 487 /* fall through */ 488 default: 489 spin_lock(&proc_subdir_lock); 490 de = de->subdir; 491 i -= 2; 492 for (;;) { 493 if (!de) { 494 ret = 1; 495 spin_unlock(&proc_subdir_lock); 496 goto out; 497 } 498 if (!i) 499 break; 500 de = de->next; 501 i--; 502 } 503 504 do { 505 struct proc_dir_entry *next; 506 507 /* filldir passes info to user space */ 508 pde_get(de); 509 spin_unlock(&proc_subdir_lock); 510 if (filldir(dirent, de->name, de->namelen, filp->f_pos, 511 de->low_ino, de->mode >> 12) < 0) { 512 pde_put(de); 513 goto out; 514 } 515 spin_lock(&proc_subdir_lock); 516 filp->f_pos++; 517 next = de->next; 518 pde_put(de); 519 de = next; 520 } while (de); 521 spin_unlock(&proc_subdir_lock); 522 } 523 ret = 1; 524 out: 525 return ret; 526 } 527 528 int proc_readdir(struct file *filp, void *dirent, filldir_t filldir) 529 { 530 struct inode *inode = filp->f_path.dentry->d_inode; 531 532 return proc_readdir_de(PDE(inode), filp, dirent, filldir); 533 } 534 535 /* 536 * These are the generic /proc directory operations. They 537 * use the in-memory "struct proc_dir_entry" tree to parse 538 * the /proc directory. 539 */ 540 static const struct file_operations proc_dir_operations = { 541 .llseek = generic_file_llseek, 542 .read = generic_read_dir, 543 .readdir = proc_readdir, 544 }; 545 546 /* 547 * proc directories can do almost nothing.. 548 */ 549 static const struct inode_operations proc_dir_inode_operations = { 550 .lookup = proc_lookup, 551 .getattr = proc_getattr, 552 .setattr = proc_notify_change, 553 }; 554 555 static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp) 556 { 557 unsigned int i; 558 struct proc_dir_entry *tmp; 559 560 i = get_inode_number(); 561 if (i == 0) 562 return -EAGAIN; 563 dp->low_ino = i; 564 565 if (S_ISDIR(dp->mode)) { 566 if (dp->proc_iops == NULL) { 567 dp->proc_fops = &proc_dir_operations; 568 dp->proc_iops = &proc_dir_inode_operations; 569 } 570 dir->nlink++; 571 } else if (S_ISLNK(dp->mode)) { 572 if (dp->proc_iops == NULL) 573 dp->proc_iops = &proc_link_inode_operations; 574 } else if (S_ISREG(dp->mode)) { 575 if (dp->proc_fops == NULL) 576 dp->proc_fops = &proc_file_operations; 577 if (dp->proc_iops == NULL) 578 dp->proc_iops = &proc_file_inode_operations; 579 } 580 581 spin_lock(&proc_subdir_lock); 582 583 for (tmp = dir->subdir; tmp; tmp = tmp->next) 584 if (strcmp(tmp->name, dp->name) == 0) { 585 WARN(1, KERN_WARNING "proc_dir_entry '%s/%s' already registered\n", 586 dir->name, dp->name); 587 break; 588 } 589 590 dp->next = dir->subdir; 591 dp->parent = dir; 592 dir->subdir = dp; 593 spin_unlock(&proc_subdir_lock); 594 595 return 0; 596 } 597 598 static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent, 599 const char *name, 600 mode_t mode, 601 nlink_t nlink) 602 { 603 struct proc_dir_entry *ent = NULL; 604 const char *fn = name; 605 unsigned int len; 606 607 /* make sure name is valid */ 608 if (!name || !strlen(name)) goto out; 609 610 if (xlate_proc_name(name, parent, &fn) != 0) 611 goto out; 612 613 /* At this point there must not be any '/' characters beyond *fn */ 614 if (strchr(fn, '/')) 615 goto out; 616 617 len = strlen(fn); 618 619 ent = kmalloc(sizeof(struct proc_dir_entry) + len + 1, GFP_KERNEL); 620 if (!ent) goto out; 621 622 memset(ent, 0, sizeof(struct proc_dir_entry)); 623 memcpy(((char *) ent) + sizeof(struct proc_dir_entry), fn, len + 1); 624 ent->name = ((char *) ent) + sizeof(*ent); 625 ent->namelen = len; 626 ent->mode = mode; 627 ent->nlink = nlink; 628 atomic_set(&ent->count, 1); 629 ent->pde_users = 0; 630 spin_lock_init(&ent->pde_unload_lock); 631 ent->pde_unload_completion = NULL; 632 INIT_LIST_HEAD(&ent->pde_openers); 633 out: 634 return ent; 635 } 636 637 struct proc_dir_entry *proc_symlink(const char *name, 638 struct proc_dir_entry *parent, const char *dest) 639 { 640 struct proc_dir_entry *ent; 641 642 ent = __proc_create(&parent, name, 643 (S_IFLNK | S_IRUGO | S_IWUGO | S_IXUGO),1); 644 645 if (ent) { 646 ent->data = kmalloc((ent->size=strlen(dest))+1, GFP_KERNEL); 647 if (ent->data) { 648 strcpy((char*)ent->data,dest); 649 if (proc_register(parent, ent) < 0) { 650 kfree(ent->data); 651 kfree(ent); 652 ent = NULL; 653 } 654 } else { 655 kfree(ent); 656 ent = NULL; 657 } 658 } 659 return ent; 660 } 661 EXPORT_SYMBOL(proc_symlink); 662 663 struct proc_dir_entry *proc_mkdir_mode(const char *name, mode_t mode, 664 struct proc_dir_entry *parent) 665 { 666 struct proc_dir_entry *ent; 667 668 ent = __proc_create(&parent, name, S_IFDIR | mode, 2); 669 if (ent) { 670 if (proc_register(parent, ent) < 0) { 671 kfree(ent); 672 ent = NULL; 673 } 674 } 675 return ent; 676 } 677 EXPORT_SYMBOL(proc_mkdir_mode); 678 679 struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name, 680 struct proc_dir_entry *parent) 681 { 682 struct proc_dir_entry *ent; 683 684 ent = __proc_create(&parent, name, S_IFDIR | S_IRUGO | S_IXUGO, 2); 685 if (ent) { 686 ent->data = net; 687 if (proc_register(parent, ent) < 0) { 688 kfree(ent); 689 ent = NULL; 690 } 691 } 692 return ent; 693 } 694 EXPORT_SYMBOL_GPL(proc_net_mkdir); 695 696 struct proc_dir_entry *proc_mkdir(const char *name, 697 struct proc_dir_entry *parent) 698 { 699 return proc_mkdir_mode(name, S_IRUGO | S_IXUGO, parent); 700 } 701 EXPORT_SYMBOL(proc_mkdir); 702 703 struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode, 704 struct proc_dir_entry *parent) 705 { 706 struct proc_dir_entry *ent; 707 nlink_t nlink; 708 709 if (S_ISDIR(mode)) { 710 if ((mode & S_IALLUGO) == 0) 711 mode |= S_IRUGO | S_IXUGO; 712 nlink = 2; 713 } else { 714 if ((mode & S_IFMT) == 0) 715 mode |= S_IFREG; 716 if ((mode & S_IALLUGO) == 0) 717 mode |= S_IRUGO; 718 nlink = 1; 719 } 720 721 ent = __proc_create(&parent, name, mode, nlink); 722 if (ent) { 723 if (proc_register(parent, ent) < 0) { 724 kfree(ent); 725 ent = NULL; 726 } 727 } 728 return ent; 729 } 730 EXPORT_SYMBOL(create_proc_entry); 731 732 struct proc_dir_entry *proc_create_data(const char *name, mode_t mode, 733 struct proc_dir_entry *parent, 734 const struct file_operations *proc_fops, 735 void *data) 736 { 737 struct proc_dir_entry *pde; 738 nlink_t nlink; 739 740 if (S_ISDIR(mode)) { 741 if ((mode & S_IALLUGO) == 0) 742 mode |= S_IRUGO | S_IXUGO; 743 nlink = 2; 744 } else { 745 if ((mode & S_IFMT) == 0) 746 mode |= S_IFREG; 747 if ((mode & S_IALLUGO) == 0) 748 mode |= S_IRUGO; 749 nlink = 1; 750 } 751 752 pde = __proc_create(&parent, name, mode, nlink); 753 if (!pde) 754 goto out; 755 pde->proc_fops = proc_fops; 756 pde->data = data; 757 if (proc_register(parent, pde) < 0) 758 goto out_free; 759 return pde; 760 out_free: 761 kfree(pde); 762 out: 763 return NULL; 764 } 765 EXPORT_SYMBOL(proc_create_data); 766 767 static void free_proc_entry(struct proc_dir_entry *de) 768 { 769 release_inode_number(de->low_ino); 770 771 if (S_ISLNK(de->mode)) 772 kfree(de->data); 773 kfree(de); 774 } 775 776 void pde_put(struct proc_dir_entry *pde) 777 { 778 if (atomic_dec_and_test(&pde->count)) 779 free_proc_entry(pde); 780 } 781 782 /* 783 * Remove a /proc entry and free it if it's not currently in use. 784 */ 785 void remove_proc_entry(const char *name, struct proc_dir_entry *parent) 786 { 787 struct proc_dir_entry **p; 788 struct proc_dir_entry *de = NULL; 789 const char *fn = name; 790 unsigned int len; 791 792 spin_lock(&proc_subdir_lock); 793 if (__xlate_proc_name(name, &parent, &fn) != 0) { 794 spin_unlock(&proc_subdir_lock); 795 return; 796 } 797 len = strlen(fn); 798 799 for (p = &parent->subdir; *p; p=&(*p)->next ) { 800 if (proc_match(len, fn, *p)) { 801 de = *p; 802 *p = de->next; 803 de->next = NULL; 804 break; 805 } 806 } 807 spin_unlock(&proc_subdir_lock); 808 if (!de) { 809 WARN(1, "name '%s'\n", name); 810 return; 811 } 812 813 spin_lock(&de->pde_unload_lock); 814 /* 815 * Stop accepting new callers into module. If you're 816 * dynamically allocating ->proc_fops, save a pointer somewhere. 817 */ 818 de->proc_fops = NULL; 819 /* Wait until all existing callers into module are done. */ 820 if (de->pde_users > 0) { 821 DECLARE_COMPLETION_ONSTACK(c); 822 823 if (!de->pde_unload_completion) 824 de->pde_unload_completion = &c; 825 826 spin_unlock(&de->pde_unload_lock); 827 828 wait_for_completion(de->pde_unload_completion); 829 830 spin_lock(&de->pde_unload_lock); 831 } 832 833 while (!list_empty(&de->pde_openers)) { 834 struct pde_opener *pdeo; 835 836 pdeo = list_first_entry(&de->pde_openers, struct pde_opener, lh); 837 list_del(&pdeo->lh); 838 spin_unlock(&de->pde_unload_lock); 839 pdeo->release(pdeo->inode, pdeo->file); 840 kfree(pdeo); 841 spin_lock(&de->pde_unload_lock); 842 } 843 spin_unlock(&de->pde_unload_lock); 844 845 if (S_ISDIR(de->mode)) 846 parent->nlink--; 847 de->nlink = 0; 848 WARN(de->subdir, KERN_WARNING "%s: removing non-empty directory " 849 "'%s/%s', leaking at least '%s'\n", __func__, 850 de->parent->name, de->name, de->subdir->name); 851 pde_put(de); 852 } 853 EXPORT_SYMBOL(remove_proc_entry); 854