1 /* 2 * proc/fs/generic.c --- generic routines for the proc-fs 3 * 4 * This file contains generic proc-fs routines for handling 5 * directories and files. 6 * 7 * Copyright (C) 1991, 1992 Linus Torvalds. 8 * Copyright (C) 1997 Theodore Ts'o 9 */ 10 11 #include <linux/errno.h> 12 #include <linux/time.h> 13 #include <linux/proc_fs.h> 14 #include <linux/stat.h> 15 #include <linux/module.h> 16 #include <linux/mount.h> 17 #include <linux/smp_lock.h> 18 #include <linux/init.h> 19 #include <linux/idr.h> 20 #include <linux/namei.h> 21 #include <linux/bitops.h> 22 #include <linux/spinlock.h> 23 #include <linux/completion.h> 24 #include <asm/uaccess.h> 25 26 #include "internal.h" 27 28 DEFINE_SPINLOCK(proc_subdir_lock); 29 30 static int proc_match(int len, const char *name, struct proc_dir_entry *de) 31 { 32 if (de->namelen != len) 33 return 0; 34 return !memcmp(name, de->name, len); 35 } 36 37 /* buffer size is one page but our output routines use some slack for overruns */ 38 #define PROC_BLOCK_SIZE (PAGE_SIZE - 1024) 39 40 static ssize_t 41 proc_file_read(struct file *file, char __user *buf, size_t nbytes, 42 loff_t *ppos) 43 { 44 struct inode * inode = file->f_path.dentry->d_inode; 45 char *page; 46 ssize_t retval=0; 47 int eof=0; 48 ssize_t n, count; 49 char *start; 50 struct proc_dir_entry * dp; 51 unsigned long long pos; 52 53 /* 54 * Gaah, please just use "seq_file" instead. The legacy /proc 55 * interfaces cut loff_t down to off_t for reads, and ignore 56 * the offset entirely for writes.. 57 */ 58 pos = *ppos; 59 if (pos > MAX_NON_LFS) 60 return 0; 61 if (nbytes > MAX_NON_LFS - pos) 62 nbytes = MAX_NON_LFS - pos; 63 64 dp = PDE(inode); 65 if (!(page = (char*) __get_free_page(GFP_TEMPORARY))) 66 return -ENOMEM; 67 68 while ((nbytes > 0) && !eof) { 69 count = min_t(size_t, PROC_BLOCK_SIZE, nbytes); 70 71 start = NULL; 72 if (dp->read_proc) { 73 /* 74 * How to be a proc read function 75 * ------------------------------ 76 * Prototype: 77 * int f(char *buffer, char **start, off_t offset, 78 * int count, int *peof, void *dat) 79 * 80 * Assume that the buffer is "count" bytes in size. 81 * 82 * If you know you have supplied all the data you 83 * have, set *peof. 84 * 85 * You have three ways to return data: 86 * 0) Leave *start = NULL. (This is the default.) 87 * Put the data of the requested offset at that 88 * offset within the buffer. Return the number (n) 89 * of bytes there are from the beginning of the 90 * buffer up to the last byte of data. If the 91 * number of supplied bytes (= n - offset) is 92 * greater than zero and you didn't signal eof 93 * and the reader is prepared to take more data 94 * you will be called again with the requested 95 * offset advanced by the number of bytes 96 * absorbed. This interface is useful for files 97 * no larger than the buffer. 98 * 1) Set *start = an unsigned long value less than 99 * the buffer address but greater than zero. 100 * Put the data of the requested offset at the 101 * beginning of the buffer. Return the number of 102 * bytes of data placed there. If this number is 103 * greater than zero and you didn't signal eof 104 * and the reader is prepared to take more data 105 * you will be called again with the requested 106 * offset advanced by *start. This interface is 107 * useful when you have a large file consisting 108 * of a series of blocks which you want to count 109 * and return as wholes. 110 * (Hack by Paul.Russell@rustcorp.com.au) 111 * 2) Set *start = an address within the buffer. 112 * Put the data of the requested offset at *start. 113 * Return the number of bytes of data placed there. 114 * If this number is greater than zero and you 115 * didn't signal eof and the reader is prepared to 116 * take more data you will be called again with the 117 * requested offset advanced by the number of bytes 118 * absorbed. 119 */ 120 n = dp->read_proc(page, &start, *ppos, 121 count, &eof, dp->data); 122 } else 123 break; 124 125 if (n == 0) /* end of file */ 126 break; 127 if (n < 0) { /* error */ 128 if (retval == 0) 129 retval = n; 130 break; 131 } 132 133 if (start == NULL) { 134 if (n > PAGE_SIZE) { 135 printk(KERN_ERR 136 "proc_file_read: Apparent buffer overflow!\n"); 137 n = PAGE_SIZE; 138 } 139 n -= *ppos; 140 if (n <= 0) 141 break; 142 if (n > count) 143 n = count; 144 start = page + *ppos; 145 } else if (start < page) { 146 if (n > PAGE_SIZE) { 147 printk(KERN_ERR 148 "proc_file_read: Apparent buffer overflow!\n"); 149 n = PAGE_SIZE; 150 } 151 if (n > count) { 152 /* 153 * Don't reduce n because doing so might 154 * cut off part of a data block. 155 */ 156 printk(KERN_WARNING 157 "proc_file_read: Read count exceeded\n"); 158 } 159 } else /* start >= page */ { 160 unsigned long startoff = (unsigned long)(start - page); 161 if (n > (PAGE_SIZE - startoff)) { 162 printk(KERN_ERR 163 "proc_file_read: Apparent buffer overflow!\n"); 164 n = PAGE_SIZE - startoff; 165 } 166 if (n > count) 167 n = count; 168 } 169 170 n -= copy_to_user(buf, start < page ? page : start, n); 171 if (n == 0) { 172 if (retval == 0) 173 retval = -EFAULT; 174 break; 175 } 176 177 *ppos += start < page ? (unsigned long)start : n; 178 nbytes -= n; 179 buf += n; 180 retval += n; 181 } 182 free_page((unsigned long) page); 183 return retval; 184 } 185 186 static ssize_t 187 proc_file_write(struct file *file, const char __user *buffer, 188 size_t count, loff_t *ppos) 189 { 190 struct inode *inode = file->f_path.dentry->d_inode; 191 struct proc_dir_entry * dp; 192 193 dp = PDE(inode); 194 195 if (!dp->write_proc) 196 return -EIO; 197 198 /* FIXME: does this routine need ppos? probably... */ 199 return dp->write_proc(file, buffer, count, dp->data); 200 } 201 202 203 static loff_t 204 proc_file_lseek(struct file *file, loff_t offset, int orig) 205 { 206 loff_t retval = -EINVAL; 207 switch (orig) { 208 case 1: 209 offset += file->f_pos; 210 /* fallthrough */ 211 case 0: 212 if (offset < 0 || offset > MAX_NON_LFS) 213 break; 214 file->f_pos = retval = offset; 215 } 216 return retval; 217 } 218 219 static const struct file_operations proc_file_operations = { 220 .llseek = proc_file_lseek, 221 .read = proc_file_read, 222 .write = proc_file_write, 223 }; 224 225 static int proc_notify_change(struct dentry *dentry, struct iattr *iattr) 226 { 227 struct inode *inode = dentry->d_inode; 228 struct proc_dir_entry *de = PDE(inode); 229 int error; 230 231 error = inode_change_ok(inode, iattr); 232 if (error) 233 goto out; 234 235 error = inode_setattr(inode, iattr); 236 if (error) 237 goto out; 238 239 de->uid = inode->i_uid; 240 de->gid = inode->i_gid; 241 de->mode = inode->i_mode; 242 out: 243 return error; 244 } 245 246 static int proc_getattr(struct vfsmount *mnt, struct dentry *dentry, 247 struct kstat *stat) 248 { 249 struct inode *inode = dentry->d_inode; 250 struct proc_dir_entry *de = PROC_I(inode)->pde; 251 if (de && de->nlink) 252 inode->i_nlink = de->nlink; 253 254 generic_fillattr(inode, stat); 255 return 0; 256 } 257 258 static const struct inode_operations proc_file_inode_operations = { 259 .setattr = proc_notify_change, 260 }; 261 262 /* 263 * This function parses a name such as "tty/driver/serial", and 264 * returns the struct proc_dir_entry for "/proc/tty/driver", and 265 * returns "serial" in residual. 266 */ 267 static int xlate_proc_name(const char *name, 268 struct proc_dir_entry **ret, const char **residual) 269 { 270 const char *cp = name, *next; 271 struct proc_dir_entry *de; 272 int len; 273 int rtn = 0; 274 275 de = *ret; 276 if (!de) 277 de = &proc_root; 278 279 spin_lock(&proc_subdir_lock); 280 while (1) { 281 next = strchr(cp, '/'); 282 if (!next) 283 break; 284 285 len = next - cp; 286 for (de = de->subdir; de ; de = de->next) { 287 if (proc_match(len, cp, de)) 288 break; 289 } 290 if (!de) { 291 rtn = -ENOENT; 292 goto out; 293 } 294 cp += len + 1; 295 } 296 *residual = cp; 297 *ret = de; 298 out: 299 spin_unlock(&proc_subdir_lock); 300 return rtn; 301 } 302 303 static DEFINE_IDA(proc_inum_ida); 304 static DEFINE_SPINLOCK(proc_inum_lock); /* protects the above */ 305 306 #define PROC_DYNAMIC_FIRST 0xF0000000U 307 308 /* 309 * Return an inode number between PROC_DYNAMIC_FIRST and 310 * 0xffffffff, or zero on failure. 311 */ 312 static unsigned int get_inode_number(void) 313 { 314 unsigned int i; 315 int error; 316 317 retry: 318 if (ida_pre_get(&proc_inum_ida, GFP_KERNEL) == 0) 319 return 0; 320 321 spin_lock(&proc_inum_lock); 322 error = ida_get_new(&proc_inum_ida, &i); 323 spin_unlock(&proc_inum_lock); 324 if (error == -EAGAIN) 325 goto retry; 326 else if (error) 327 return 0; 328 329 if (i > UINT_MAX - PROC_DYNAMIC_FIRST) { 330 spin_lock(&proc_inum_lock); 331 ida_remove(&proc_inum_ida, i); 332 spin_unlock(&proc_inum_lock); 333 return 0; 334 } 335 return PROC_DYNAMIC_FIRST + i; 336 } 337 338 static void release_inode_number(unsigned int inum) 339 { 340 spin_lock(&proc_inum_lock); 341 ida_remove(&proc_inum_ida, inum - PROC_DYNAMIC_FIRST); 342 spin_unlock(&proc_inum_lock); 343 } 344 345 static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd) 346 { 347 nd_set_link(nd, PDE(dentry->d_inode)->data); 348 return NULL; 349 } 350 351 static const struct inode_operations proc_link_inode_operations = { 352 .readlink = generic_readlink, 353 .follow_link = proc_follow_link, 354 }; 355 356 /* 357 * As some entries in /proc are volatile, we want to 358 * get rid of unused dentries. This could be made 359 * smarter: we could keep a "volatile" flag in the 360 * inode to indicate which ones to keep. 361 */ 362 static int proc_delete_dentry(struct dentry * dentry) 363 { 364 return 1; 365 } 366 367 static struct dentry_operations proc_dentry_operations = 368 { 369 .d_delete = proc_delete_dentry, 370 }; 371 372 /* 373 * Don't create negative dentries here, return -ENOENT by hand 374 * instead. 375 */ 376 struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir, 377 struct dentry *dentry) 378 { 379 struct inode *inode = NULL; 380 int error = -ENOENT; 381 382 lock_kernel(); 383 spin_lock(&proc_subdir_lock); 384 for (de = de->subdir; de ; de = de->next) { 385 if (de->namelen != dentry->d_name.len) 386 continue; 387 if (!memcmp(dentry->d_name.name, de->name, de->namelen)) { 388 unsigned int ino; 389 390 ino = de->low_ino; 391 de_get(de); 392 spin_unlock(&proc_subdir_lock); 393 error = -EINVAL; 394 inode = proc_get_inode(dir->i_sb, ino, de); 395 goto out_unlock; 396 } 397 } 398 spin_unlock(&proc_subdir_lock); 399 out_unlock: 400 unlock_kernel(); 401 402 if (inode) { 403 dentry->d_op = &proc_dentry_operations; 404 d_add(dentry, inode); 405 return NULL; 406 } 407 if (de) 408 de_put(de); 409 return ERR_PTR(error); 410 } 411 412 struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry, 413 struct nameidata *nd) 414 { 415 return proc_lookup_de(PDE(dir), dir, dentry); 416 } 417 418 /* 419 * This returns non-zero if at EOF, so that the /proc 420 * root directory can use this and check if it should 421 * continue with the <pid> entries.. 422 * 423 * Note that the VFS-layer doesn't care about the return 424 * value of the readdir() call, as long as it's non-negative 425 * for success.. 426 */ 427 int proc_readdir_de(struct proc_dir_entry *de, struct file *filp, void *dirent, 428 filldir_t filldir) 429 { 430 unsigned int ino; 431 int i; 432 struct inode *inode = filp->f_path.dentry->d_inode; 433 int ret = 0; 434 435 lock_kernel(); 436 437 ino = inode->i_ino; 438 i = filp->f_pos; 439 switch (i) { 440 case 0: 441 if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0) 442 goto out; 443 i++; 444 filp->f_pos++; 445 /* fall through */ 446 case 1: 447 if (filldir(dirent, "..", 2, i, 448 parent_ino(filp->f_path.dentry), 449 DT_DIR) < 0) 450 goto out; 451 i++; 452 filp->f_pos++; 453 /* fall through */ 454 default: 455 spin_lock(&proc_subdir_lock); 456 de = de->subdir; 457 i -= 2; 458 for (;;) { 459 if (!de) { 460 ret = 1; 461 spin_unlock(&proc_subdir_lock); 462 goto out; 463 } 464 if (!i) 465 break; 466 de = de->next; 467 i--; 468 } 469 470 do { 471 struct proc_dir_entry *next; 472 473 /* filldir passes info to user space */ 474 de_get(de); 475 spin_unlock(&proc_subdir_lock); 476 if (filldir(dirent, de->name, de->namelen, filp->f_pos, 477 de->low_ino, de->mode >> 12) < 0) { 478 de_put(de); 479 goto out; 480 } 481 spin_lock(&proc_subdir_lock); 482 filp->f_pos++; 483 next = de->next; 484 de_put(de); 485 de = next; 486 } while (de); 487 spin_unlock(&proc_subdir_lock); 488 } 489 ret = 1; 490 out: unlock_kernel(); 491 return ret; 492 } 493 494 int proc_readdir(struct file *filp, void *dirent, filldir_t filldir) 495 { 496 struct inode *inode = filp->f_path.dentry->d_inode; 497 498 return proc_readdir_de(PDE(inode), filp, dirent, filldir); 499 } 500 501 /* 502 * These are the generic /proc directory operations. They 503 * use the in-memory "struct proc_dir_entry" tree to parse 504 * the /proc directory. 505 */ 506 static const struct file_operations proc_dir_operations = { 507 .read = generic_read_dir, 508 .readdir = proc_readdir, 509 }; 510 511 /* 512 * proc directories can do almost nothing.. 513 */ 514 static const struct inode_operations proc_dir_inode_operations = { 515 .lookup = proc_lookup, 516 .getattr = proc_getattr, 517 .setattr = proc_notify_change, 518 }; 519 520 static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp) 521 { 522 unsigned int i; 523 struct proc_dir_entry *tmp; 524 525 i = get_inode_number(); 526 if (i == 0) 527 return -EAGAIN; 528 dp->low_ino = i; 529 530 if (S_ISDIR(dp->mode)) { 531 if (dp->proc_iops == NULL) { 532 dp->proc_fops = &proc_dir_operations; 533 dp->proc_iops = &proc_dir_inode_operations; 534 } 535 dir->nlink++; 536 } else if (S_ISLNK(dp->mode)) { 537 if (dp->proc_iops == NULL) 538 dp->proc_iops = &proc_link_inode_operations; 539 } else if (S_ISREG(dp->mode)) { 540 if (dp->proc_fops == NULL) 541 dp->proc_fops = &proc_file_operations; 542 if (dp->proc_iops == NULL) 543 dp->proc_iops = &proc_file_inode_operations; 544 } 545 546 spin_lock(&proc_subdir_lock); 547 548 for (tmp = dir->subdir; tmp; tmp = tmp->next) 549 if (strcmp(tmp->name, dp->name) == 0) { 550 printk(KERN_WARNING "proc_dir_entry '%s/%s' already registered\n", 551 dir->name, dp->name); 552 dump_stack(); 553 break; 554 } 555 556 dp->next = dir->subdir; 557 dp->parent = dir; 558 dir->subdir = dp; 559 spin_unlock(&proc_subdir_lock); 560 561 return 0; 562 } 563 564 static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent, 565 const char *name, 566 mode_t mode, 567 nlink_t nlink) 568 { 569 struct proc_dir_entry *ent = NULL; 570 const char *fn = name; 571 int len; 572 573 /* make sure name is valid */ 574 if (!name || !strlen(name)) goto out; 575 576 if (xlate_proc_name(name, parent, &fn) != 0) 577 goto out; 578 579 /* At this point there must not be any '/' characters beyond *fn */ 580 if (strchr(fn, '/')) 581 goto out; 582 583 len = strlen(fn); 584 585 ent = kmalloc(sizeof(struct proc_dir_entry) + len + 1, GFP_KERNEL); 586 if (!ent) goto out; 587 588 memset(ent, 0, sizeof(struct proc_dir_entry)); 589 memcpy(((char *) ent) + sizeof(struct proc_dir_entry), fn, len + 1); 590 ent->name = ((char *) ent) + sizeof(*ent); 591 ent->namelen = len; 592 ent->mode = mode; 593 ent->nlink = nlink; 594 atomic_set(&ent->count, 1); 595 ent->pde_users = 0; 596 spin_lock_init(&ent->pde_unload_lock); 597 ent->pde_unload_completion = NULL; 598 INIT_LIST_HEAD(&ent->pde_openers); 599 out: 600 return ent; 601 } 602 603 struct proc_dir_entry *proc_symlink(const char *name, 604 struct proc_dir_entry *parent, const char *dest) 605 { 606 struct proc_dir_entry *ent; 607 608 ent = __proc_create(&parent, name, 609 (S_IFLNK | S_IRUGO | S_IWUGO | S_IXUGO),1); 610 611 if (ent) { 612 ent->data = kmalloc((ent->size=strlen(dest))+1, GFP_KERNEL); 613 if (ent->data) { 614 strcpy((char*)ent->data,dest); 615 if (proc_register(parent, ent) < 0) { 616 kfree(ent->data); 617 kfree(ent); 618 ent = NULL; 619 } 620 } else { 621 kfree(ent); 622 ent = NULL; 623 } 624 } 625 return ent; 626 } 627 628 struct proc_dir_entry *proc_mkdir_mode(const char *name, mode_t mode, 629 struct proc_dir_entry *parent) 630 { 631 struct proc_dir_entry *ent; 632 633 ent = __proc_create(&parent, name, S_IFDIR | mode, 2); 634 if (ent) { 635 if (proc_register(parent, ent) < 0) { 636 kfree(ent); 637 ent = NULL; 638 } 639 } 640 return ent; 641 } 642 643 struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name, 644 struct proc_dir_entry *parent) 645 { 646 struct proc_dir_entry *ent; 647 648 ent = __proc_create(&parent, name, S_IFDIR | S_IRUGO | S_IXUGO, 2); 649 if (ent) { 650 ent->data = net; 651 if (proc_register(parent, ent) < 0) { 652 kfree(ent); 653 ent = NULL; 654 } 655 } 656 return ent; 657 } 658 EXPORT_SYMBOL_GPL(proc_net_mkdir); 659 660 struct proc_dir_entry *proc_mkdir(const char *name, 661 struct proc_dir_entry *parent) 662 { 663 return proc_mkdir_mode(name, S_IRUGO | S_IXUGO, parent); 664 } 665 666 struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode, 667 struct proc_dir_entry *parent) 668 { 669 struct proc_dir_entry *ent; 670 nlink_t nlink; 671 672 if (S_ISDIR(mode)) { 673 if ((mode & S_IALLUGO) == 0) 674 mode |= S_IRUGO | S_IXUGO; 675 nlink = 2; 676 } else { 677 if ((mode & S_IFMT) == 0) 678 mode |= S_IFREG; 679 if ((mode & S_IALLUGO) == 0) 680 mode |= S_IRUGO; 681 nlink = 1; 682 } 683 684 ent = __proc_create(&parent, name, mode, nlink); 685 if (ent) { 686 if (proc_register(parent, ent) < 0) { 687 kfree(ent); 688 ent = NULL; 689 } 690 } 691 return ent; 692 } 693 694 struct proc_dir_entry *proc_create_data(const char *name, mode_t mode, 695 struct proc_dir_entry *parent, 696 const struct file_operations *proc_fops, 697 void *data) 698 { 699 struct proc_dir_entry *pde; 700 nlink_t nlink; 701 702 if (S_ISDIR(mode)) { 703 if ((mode & S_IALLUGO) == 0) 704 mode |= S_IRUGO | S_IXUGO; 705 nlink = 2; 706 } else { 707 if ((mode & S_IFMT) == 0) 708 mode |= S_IFREG; 709 if ((mode & S_IALLUGO) == 0) 710 mode |= S_IRUGO; 711 nlink = 1; 712 } 713 714 pde = __proc_create(&parent, name, mode, nlink); 715 if (!pde) 716 goto out; 717 pde->proc_fops = proc_fops; 718 pde->data = data; 719 if (proc_register(parent, pde) < 0) 720 goto out_free; 721 return pde; 722 out_free: 723 kfree(pde); 724 out: 725 return NULL; 726 } 727 728 void free_proc_entry(struct proc_dir_entry *de) 729 { 730 unsigned int ino = de->low_ino; 731 732 if (ino < PROC_DYNAMIC_FIRST) 733 return; 734 735 release_inode_number(ino); 736 737 if (S_ISLNK(de->mode)) 738 kfree(de->data); 739 kfree(de); 740 } 741 742 /* 743 * Remove a /proc entry and free it if it's not currently in use. 744 */ 745 void remove_proc_entry(const char *name, struct proc_dir_entry *parent) 746 { 747 struct proc_dir_entry **p; 748 struct proc_dir_entry *de = NULL; 749 const char *fn = name; 750 int len; 751 752 if (xlate_proc_name(name, &parent, &fn) != 0) 753 return; 754 len = strlen(fn); 755 756 spin_lock(&proc_subdir_lock); 757 for (p = &parent->subdir; *p; p=&(*p)->next ) { 758 if (proc_match(len, fn, *p)) { 759 de = *p; 760 *p = de->next; 761 de->next = NULL; 762 break; 763 } 764 } 765 spin_unlock(&proc_subdir_lock); 766 if (!de) 767 return; 768 769 spin_lock(&de->pde_unload_lock); 770 /* 771 * Stop accepting new callers into module. If you're 772 * dynamically allocating ->proc_fops, save a pointer somewhere. 773 */ 774 de->proc_fops = NULL; 775 /* Wait until all existing callers into module are done. */ 776 if (de->pde_users > 0) { 777 DECLARE_COMPLETION_ONSTACK(c); 778 779 if (!de->pde_unload_completion) 780 de->pde_unload_completion = &c; 781 782 spin_unlock(&de->pde_unload_lock); 783 784 wait_for_completion(de->pde_unload_completion); 785 786 goto continue_removing; 787 } 788 spin_unlock(&de->pde_unload_lock); 789 790 continue_removing: 791 spin_lock(&de->pde_unload_lock); 792 while (!list_empty(&de->pde_openers)) { 793 struct pde_opener *pdeo; 794 795 pdeo = list_first_entry(&de->pde_openers, struct pde_opener, lh); 796 list_del(&pdeo->lh); 797 spin_unlock(&de->pde_unload_lock); 798 pdeo->release(pdeo->inode, pdeo->file); 799 kfree(pdeo); 800 spin_lock(&de->pde_unload_lock); 801 } 802 spin_unlock(&de->pde_unload_lock); 803 804 if (S_ISDIR(de->mode)) 805 parent->nlink--; 806 de->nlink = 0; 807 WARN(de->subdir, KERN_WARNING "%s: removing non-empty directory " 808 "'%s/%s', leaking at least '%s'\n", __func__, 809 de->parent->name, de->name, de->subdir->name); 810 if (atomic_dec_and_test(&de->count)) 811 free_proc_entry(de); 812 } 813