1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/proc/inode.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 7 8 #include <linux/cache.h> 9 #include <linux/time.h> 10 #include <linux/proc_fs.h> 11 #include <linux/kernel.h> 12 #include <linux/pid_namespace.h> 13 #include <linux/mm.h> 14 #include <linux/string.h> 15 #include <linux/stat.h> 16 #include <linux/completion.h> 17 #include <linux/poll.h> 18 #include <linux/printk.h> 19 #include <linux/file.h> 20 #include <linux/limits.h> 21 #include <linux/init.h> 22 #include <linux/module.h> 23 #include <linux/sysctl.h> 24 #include <linux/seq_file.h> 25 #include <linux/slab.h> 26 #include <linux/mount.h> 27 #include <linux/bug.h> 28 29 #include <linux/uaccess.h> 30 31 #include "internal.h" 32 33 static void proc_evict_inode(struct inode *inode) 34 { 35 struct proc_dir_entry *de; 36 struct ctl_table_header *head; 37 struct proc_inode *ei = PROC_I(inode); 38 39 truncate_inode_pages_final(&inode->i_data); 40 clear_inode(inode); 41 42 /* Stop tracking associated processes */ 43 if (ei->pid) { 44 proc_pid_evict_inode(ei); 45 ei->pid = NULL; 46 } 47 48 /* Let go of any associated proc directory entry */ 49 de = ei->pde; 50 if (de) { 51 pde_put(de); 52 ei->pde = NULL; 53 } 54 55 head = ei->sysctl; 56 if (head) { 57 RCU_INIT_POINTER(ei->sysctl, NULL); 58 proc_sys_evict_inode(inode, head); 59 } 60 } 61 62 static struct kmem_cache *proc_inode_cachep __ro_after_init; 63 static struct kmem_cache *pde_opener_cache __ro_after_init; 64 65 static struct inode *proc_alloc_inode(struct super_block *sb) 66 { 67 struct proc_inode *ei; 68 69 ei = kmem_cache_alloc(proc_inode_cachep, GFP_KERNEL); 70 if (!ei) 71 return NULL; 72 ei->pid = NULL; 73 ei->fd = 0; 74 ei->op.proc_get_link = NULL; 75 ei->pde = NULL; 76 ei->sysctl = NULL; 77 ei->sysctl_entry = NULL; 78 INIT_HLIST_NODE(&ei->sibling_inodes); 79 ei->ns_ops = NULL; 80 return &ei->vfs_inode; 81 } 82 83 static void proc_free_inode(struct inode *inode) 84 { 85 kmem_cache_free(proc_inode_cachep, PROC_I(inode)); 86 } 87 88 static void init_once(void *foo) 89 { 90 struct proc_inode *ei = (struct proc_inode *) foo; 91 92 inode_init_once(&ei->vfs_inode); 93 } 94 95 void __init proc_init_kmemcache(void) 96 { 97 proc_inode_cachep = kmem_cache_create("proc_inode_cache", 98 sizeof(struct proc_inode), 99 0, (SLAB_RECLAIM_ACCOUNT| 100 SLAB_MEM_SPREAD|SLAB_ACCOUNT| 101 SLAB_PANIC), 102 init_once); 103 pde_opener_cache = 104 kmem_cache_create("pde_opener", sizeof(struct pde_opener), 0, 105 SLAB_ACCOUNT|SLAB_PANIC, NULL); 106 proc_dir_entry_cache = kmem_cache_create_usercopy( 107 "proc_dir_entry", SIZEOF_PDE, 0, SLAB_PANIC, 108 offsetof(struct proc_dir_entry, inline_name), 109 SIZEOF_PDE_INLINE_NAME, NULL); 110 BUILD_BUG_ON(sizeof(struct proc_dir_entry) >= SIZEOF_PDE); 111 } 112 113 void proc_invalidate_siblings_dcache(struct hlist_head *inodes, spinlock_t *lock) 114 { 115 struct inode *inode; 116 struct proc_inode *ei; 117 struct hlist_node *node; 118 struct super_block *old_sb = NULL; 119 120 rcu_read_lock(); 121 for (;;) { 122 struct super_block *sb; 123 node = hlist_first_rcu(inodes); 124 if (!node) 125 break; 126 ei = hlist_entry(node, struct proc_inode, sibling_inodes); 127 spin_lock(lock); 128 hlist_del_init_rcu(&ei->sibling_inodes); 129 spin_unlock(lock); 130 131 inode = &ei->vfs_inode; 132 sb = inode->i_sb; 133 if ((sb != old_sb) && !atomic_inc_not_zero(&sb->s_active)) 134 continue; 135 inode = igrab(inode); 136 rcu_read_unlock(); 137 if (sb != old_sb) { 138 if (old_sb) 139 deactivate_super(old_sb); 140 old_sb = sb; 141 } 142 if (unlikely(!inode)) { 143 rcu_read_lock(); 144 continue; 145 } 146 147 if (S_ISDIR(inode->i_mode)) { 148 struct dentry *dir = d_find_any_alias(inode); 149 if (dir) { 150 d_invalidate(dir); 151 dput(dir); 152 } 153 } else { 154 struct dentry *dentry; 155 while ((dentry = d_find_alias(inode))) { 156 d_invalidate(dentry); 157 dput(dentry); 158 } 159 } 160 iput(inode); 161 162 rcu_read_lock(); 163 } 164 rcu_read_unlock(); 165 if (old_sb) 166 deactivate_super(old_sb); 167 } 168 169 static inline const char *hidepid2str(enum proc_hidepid v) 170 { 171 switch (v) { 172 case HIDEPID_OFF: return "off"; 173 case HIDEPID_NO_ACCESS: return "noaccess"; 174 case HIDEPID_INVISIBLE: return "invisible"; 175 case HIDEPID_NOT_PTRACEABLE: return "ptraceable"; 176 } 177 WARN_ONCE(1, "bad hide_pid value: %d\n", v); 178 return "unknown"; 179 } 180 181 static int proc_show_options(struct seq_file *seq, struct dentry *root) 182 { 183 struct proc_fs_info *fs_info = proc_sb_info(root->d_sb); 184 185 if (!gid_eq(fs_info->pid_gid, GLOBAL_ROOT_GID)) 186 seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, fs_info->pid_gid)); 187 if (fs_info->hide_pid != HIDEPID_OFF) 188 seq_printf(seq, ",hidepid=%s", hidepid2str(fs_info->hide_pid)); 189 if (fs_info->pidonly != PROC_PIDONLY_OFF) 190 seq_printf(seq, ",subset=pid"); 191 192 return 0; 193 } 194 195 const struct super_operations proc_sops = { 196 .alloc_inode = proc_alloc_inode, 197 .free_inode = proc_free_inode, 198 .drop_inode = generic_delete_inode, 199 .evict_inode = proc_evict_inode, 200 .statfs = simple_statfs, 201 .show_options = proc_show_options, 202 }; 203 204 enum {BIAS = -1U<<31}; 205 206 static inline int use_pde(struct proc_dir_entry *pde) 207 { 208 return likely(atomic_inc_unless_negative(&pde->in_use)); 209 } 210 211 static void unuse_pde(struct proc_dir_entry *pde) 212 { 213 if (unlikely(atomic_dec_return(&pde->in_use) == BIAS)) 214 complete(pde->pde_unload_completion); 215 } 216 217 /* pde is locked on entry, unlocked on exit */ 218 static void close_pdeo(struct proc_dir_entry *pde, struct pde_opener *pdeo) 219 __releases(&pde->pde_unload_lock) 220 { 221 /* 222 * close() (proc_reg_release()) can't delete an entry and proceed: 223 * ->release hook needs to be available at the right moment. 224 * 225 * rmmod (remove_proc_entry() et al) can't delete an entry and proceed: 226 * "struct file" needs to be available at the right moment. 227 * 228 * Therefore, first process to enter this function does ->release() and 229 * signals its completion to the other process which does nothing. 230 */ 231 if (pdeo->closing) { 232 /* somebody else is doing that, just wait */ 233 DECLARE_COMPLETION_ONSTACK(c); 234 pdeo->c = &c; 235 spin_unlock(&pde->pde_unload_lock); 236 wait_for_completion(&c); 237 } else { 238 struct file *file; 239 struct completion *c; 240 241 pdeo->closing = true; 242 spin_unlock(&pde->pde_unload_lock); 243 file = pdeo->file; 244 pde->proc_ops->proc_release(file_inode(file), file); 245 spin_lock(&pde->pde_unload_lock); 246 /* After ->release. */ 247 list_del(&pdeo->lh); 248 c = pdeo->c; 249 spin_unlock(&pde->pde_unload_lock); 250 if (unlikely(c)) 251 complete(c); 252 kmem_cache_free(pde_opener_cache, pdeo); 253 } 254 } 255 256 void proc_entry_rundown(struct proc_dir_entry *de) 257 { 258 DECLARE_COMPLETION_ONSTACK(c); 259 /* Wait until all existing callers into module are done. */ 260 de->pde_unload_completion = &c; 261 if (atomic_add_return(BIAS, &de->in_use) != BIAS) 262 wait_for_completion(&c); 263 264 /* ->pde_openers list can't grow from now on. */ 265 266 spin_lock(&de->pde_unload_lock); 267 while (!list_empty(&de->pde_openers)) { 268 struct pde_opener *pdeo; 269 pdeo = list_first_entry(&de->pde_openers, struct pde_opener, lh); 270 close_pdeo(de, pdeo); 271 spin_lock(&de->pde_unload_lock); 272 } 273 spin_unlock(&de->pde_unload_lock); 274 } 275 276 static loff_t pde_lseek(struct proc_dir_entry *pde, struct file *file, loff_t offset, int whence) 277 { 278 typeof_member(struct proc_ops, proc_lseek) lseek; 279 280 lseek = pde->proc_ops->proc_lseek; 281 if (!lseek) 282 lseek = default_llseek; 283 return lseek(file, offset, whence); 284 } 285 286 static loff_t proc_reg_llseek(struct file *file, loff_t offset, int whence) 287 { 288 struct proc_dir_entry *pde = PDE(file_inode(file)); 289 loff_t rv = -EINVAL; 290 291 if (pde_is_permanent(pde)) { 292 return pde_lseek(pde, file, offset, whence); 293 } else if (use_pde(pde)) { 294 rv = pde_lseek(pde, file, offset, whence); 295 unuse_pde(pde); 296 } 297 return rv; 298 } 299 300 static ssize_t proc_reg_read_iter(struct kiocb *iocb, struct iov_iter *iter) 301 { 302 struct proc_dir_entry *pde = PDE(file_inode(iocb->ki_filp)); 303 ssize_t ret; 304 305 if (pde_is_permanent(pde)) 306 return pde->proc_ops->proc_read_iter(iocb, iter); 307 308 if (!use_pde(pde)) 309 return -EIO; 310 ret = pde->proc_ops->proc_read_iter(iocb, iter); 311 unuse_pde(pde); 312 return ret; 313 } 314 315 static ssize_t pde_read(struct proc_dir_entry *pde, struct file *file, char __user *buf, size_t count, loff_t *ppos) 316 { 317 typeof_member(struct proc_ops, proc_read) read; 318 319 read = pde->proc_ops->proc_read; 320 if (read) 321 return read(file, buf, count, ppos); 322 return -EIO; 323 } 324 325 static ssize_t proc_reg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 326 { 327 struct proc_dir_entry *pde = PDE(file_inode(file)); 328 ssize_t rv = -EIO; 329 330 if (pde_is_permanent(pde)) { 331 return pde_read(pde, file, buf, count, ppos); 332 } else if (use_pde(pde)) { 333 rv = pde_read(pde, file, buf, count, ppos); 334 unuse_pde(pde); 335 } 336 return rv; 337 } 338 339 static ssize_t pde_write(struct proc_dir_entry *pde, struct file *file, const char __user *buf, size_t count, loff_t *ppos) 340 { 341 typeof_member(struct proc_ops, proc_write) write; 342 343 write = pde->proc_ops->proc_write; 344 if (write) 345 return write(file, buf, count, ppos); 346 return -EIO; 347 } 348 349 static ssize_t proc_reg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) 350 { 351 struct proc_dir_entry *pde = PDE(file_inode(file)); 352 ssize_t rv = -EIO; 353 354 if (pde_is_permanent(pde)) { 355 return pde_write(pde, file, buf, count, ppos); 356 } else if (use_pde(pde)) { 357 rv = pde_write(pde, file, buf, count, ppos); 358 unuse_pde(pde); 359 } 360 return rv; 361 } 362 363 static __poll_t pde_poll(struct proc_dir_entry *pde, struct file *file, struct poll_table_struct *pts) 364 { 365 typeof_member(struct proc_ops, proc_poll) poll; 366 367 poll = pde->proc_ops->proc_poll; 368 if (poll) 369 return poll(file, pts); 370 return DEFAULT_POLLMASK; 371 } 372 373 static __poll_t proc_reg_poll(struct file *file, struct poll_table_struct *pts) 374 { 375 struct proc_dir_entry *pde = PDE(file_inode(file)); 376 __poll_t rv = DEFAULT_POLLMASK; 377 378 if (pde_is_permanent(pde)) { 379 return pde_poll(pde, file, pts); 380 } else if (use_pde(pde)) { 381 rv = pde_poll(pde, file, pts); 382 unuse_pde(pde); 383 } 384 return rv; 385 } 386 387 static long pde_ioctl(struct proc_dir_entry *pde, struct file *file, unsigned int cmd, unsigned long arg) 388 { 389 typeof_member(struct proc_ops, proc_ioctl) ioctl; 390 391 ioctl = pde->proc_ops->proc_ioctl; 392 if (ioctl) 393 return ioctl(file, cmd, arg); 394 return -ENOTTY; 395 } 396 397 static long proc_reg_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 398 { 399 struct proc_dir_entry *pde = PDE(file_inode(file)); 400 long rv = -ENOTTY; 401 402 if (pde_is_permanent(pde)) { 403 return pde_ioctl(pde, file, cmd, arg); 404 } else if (use_pde(pde)) { 405 rv = pde_ioctl(pde, file, cmd, arg); 406 unuse_pde(pde); 407 } 408 return rv; 409 } 410 411 #ifdef CONFIG_COMPAT 412 static long pde_compat_ioctl(struct proc_dir_entry *pde, struct file *file, unsigned int cmd, unsigned long arg) 413 { 414 typeof_member(struct proc_ops, proc_compat_ioctl) compat_ioctl; 415 416 compat_ioctl = pde->proc_ops->proc_compat_ioctl; 417 if (compat_ioctl) 418 return compat_ioctl(file, cmd, arg); 419 return -ENOTTY; 420 } 421 422 static long proc_reg_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 423 { 424 struct proc_dir_entry *pde = PDE(file_inode(file)); 425 long rv = -ENOTTY; 426 if (pde_is_permanent(pde)) { 427 return pde_compat_ioctl(pde, file, cmd, arg); 428 } else if (use_pde(pde)) { 429 rv = pde_compat_ioctl(pde, file, cmd, arg); 430 unuse_pde(pde); 431 } 432 return rv; 433 } 434 #endif 435 436 static int pde_mmap(struct proc_dir_entry *pde, struct file *file, struct vm_area_struct *vma) 437 { 438 typeof_member(struct proc_ops, proc_mmap) mmap; 439 440 mmap = pde->proc_ops->proc_mmap; 441 if (mmap) 442 return mmap(file, vma); 443 return -EIO; 444 } 445 446 static int proc_reg_mmap(struct file *file, struct vm_area_struct *vma) 447 { 448 struct proc_dir_entry *pde = PDE(file_inode(file)); 449 int rv = -EIO; 450 451 if (pde_is_permanent(pde)) { 452 return pde_mmap(pde, file, vma); 453 } else if (use_pde(pde)) { 454 rv = pde_mmap(pde, file, vma); 455 unuse_pde(pde); 456 } 457 return rv; 458 } 459 460 static unsigned long 461 pde_get_unmapped_area(struct proc_dir_entry *pde, struct file *file, unsigned long orig_addr, 462 unsigned long len, unsigned long pgoff, 463 unsigned long flags) 464 { 465 typeof_member(struct proc_ops, proc_get_unmapped_area) get_area; 466 467 get_area = pde->proc_ops->proc_get_unmapped_area; 468 #ifdef CONFIG_MMU 469 if (!get_area) 470 get_area = current->mm->get_unmapped_area; 471 #endif 472 if (get_area) 473 return get_area(file, orig_addr, len, pgoff, flags); 474 return orig_addr; 475 } 476 477 static unsigned long 478 proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr, 479 unsigned long len, unsigned long pgoff, 480 unsigned long flags) 481 { 482 struct proc_dir_entry *pde = PDE(file_inode(file)); 483 unsigned long rv = -EIO; 484 485 if (pde_is_permanent(pde)) { 486 return pde_get_unmapped_area(pde, file, orig_addr, len, pgoff, flags); 487 } else if (use_pde(pde)) { 488 rv = pde_get_unmapped_area(pde, file, orig_addr, len, pgoff, flags); 489 unuse_pde(pde); 490 } 491 return rv; 492 } 493 494 static int proc_reg_open(struct inode *inode, struct file *file) 495 { 496 struct proc_fs_info *fs_info = proc_sb_info(inode->i_sb); 497 struct proc_dir_entry *pde = PDE(inode); 498 int rv = 0; 499 typeof_member(struct proc_ops, proc_open) open; 500 typeof_member(struct proc_ops, proc_release) release; 501 struct pde_opener *pdeo; 502 503 if (pde_is_permanent(pde)) { 504 open = pde->proc_ops->proc_open; 505 if (open) 506 rv = open(inode, file); 507 return rv; 508 } 509 510 if (fs_info->pidonly == PROC_PIDONLY_ON) 511 return -ENOENT; 512 513 /* 514 * Ensure that 515 * 1) PDE's ->release hook will be called no matter what 516 * either normally by close()/->release, or forcefully by 517 * rmmod/remove_proc_entry. 518 * 519 * 2) rmmod isn't blocked by opening file in /proc and sitting on 520 * the descriptor (including "rmmod foo </proc/foo" scenario). 521 * 522 * Save every "struct file" with custom ->release hook. 523 */ 524 if (!use_pde(pde)) 525 return -ENOENT; 526 527 release = pde->proc_ops->proc_release; 528 if (release) { 529 pdeo = kmem_cache_alloc(pde_opener_cache, GFP_KERNEL); 530 if (!pdeo) { 531 rv = -ENOMEM; 532 goto out_unuse; 533 } 534 } 535 536 open = pde->proc_ops->proc_open; 537 if (open) 538 rv = open(inode, file); 539 540 if (release) { 541 if (rv == 0) { 542 /* To know what to release. */ 543 pdeo->file = file; 544 pdeo->closing = false; 545 pdeo->c = NULL; 546 spin_lock(&pde->pde_unload_lock); 547 list_add(&pdeo->lh, &pde->pde_openers); 548 spin_unlock(&pde->pde_unload_lock); 549 } else 550 kmem_cache_free(pde_opener_cache, pdeo); 551 } 552 553 out_unuse: 554 unuse_pde(pde); 555 return rv; 556 } 557 558 static int proc_reg_release(struct inode *inode, struct file *file) 559 { 560 struct proc_dir_entry *pde = PDE(inode); 561 struct pde_opener *pdeo; 562 563 if (pde_is_permanent(pde)) { 564 typeof_member(struct proc_ops, proc_release) release; 565 566 release = pde->proc_ops->proc_release; 567 if (release) { 568 return release(inode, file); 569 } 570 return 0; 571 } 572 573 spin_lock(&pde->pde_unload_lock); 574 list_for_each_entry(pdeo, &pde->pde_openers, lh) { 575 if (pdeo->file == file) { 576 close_pdeo(pde, pdeo); 577 return 0; 578 } 579 } 580 spin_unlock(&pde->pde_unload_lock); 581 return 0; 582 } 583 584 static const struct file_operations proc_reg_file_ops = { 585 .llseek = proc_reg_llseek, 586 .read = proc_reg_read, 587 .write = proc_reg_write, 588 .poll = proc_reg_poll, 589 .unlocked_ioctl = proc_reg_unlocked_ioctl, 590 .mmap = proc_reg_mmap, 591 .get_unmapped_area = proc_reg_get_unmapped_area, 592 .open = proc_reg_open, 593 .release = proc_reg_release, 594 }; 595 596 static const struct file_operations proc_iter_file_ops = { 597 .llseek = proc_reg_llseek, 598 .read_iter = proc_reg_read_iter, 599 .write = proc_reg_write, 600 .splice_read = generic_file_splice_read, 601 .poll = proc_reg_poll, 602 .unlocked_ioctl = proc_reg_unlocked_ioctl, 603 .mmap = proc_reg_mmap, 604 .get_unmapped_area = proc_reg_get_unmapped_area, 605 .open = proc_reg_open, 606 .release = proc_reg_release, 607 }; 608 609 #ifdef CONFIG_COMPAT 610 static const struct file_operations proc_reg_file_ops_compat = { 611 .llseek = proc_reg_llseek, 612 .read = proc_reg_read, 613 .write = proc_reg_write, 614 .poll = proc_reg_poll, 615 .unlocked_ioctl = proc_reg_unlocked_ioctl, 616 .compat_ioctl = proc_reg_compat_ioctl, 617 .mmap = proc_reg_mmap, 618 .get_unmapped_area = proc_reg_get_unmapped_area, 619 .open = proc_reg_open, 620 .release = proc_reg_release, 621 }; 622 623 static const struct file_operations proc_iter_file_ops_compat = { 624 .llseek = proc_reg_llseek, 625 .read_iter = proc_reg_read_iter, 626 .splice_read = generic_file_splice_read, 627 .write = proc_reg_write, 628 .poll = proc_reg_poll, 629 .unlocked_ioctl = proc_reg_unlocked_ioctl, 630 .compat_ioctl = proc_reg_compat_ioctl, 631 .mmap = proc_reg_mmap, 632 .get_unmapped_area = proc_reg_get_unmapped_area, 633 .open = proc_reg_open, 634 .release = proc_reg_release, 635 }; 636 #endif 637 638 static void proc_put_link(void *p) 639 { 640 unuse_pde(p); 641 } 642 643 static const char *proc_get_link(struct dentry *dentry, 644 struct inode *inode, 645 struct delayed_call *done) 646 { 647 struct proc_dir_entry *pde = PDE(inode); 648 if (!use_pde(pde)) 649 return ERR_PTR(-EINVAL); 650 set_delayed_call(done, proc_put_link, pde); 651 return pde->data; 652 } 653 654 const struct inode_operations proc_link_inode_operations = { 655 .get_link = proc_get_link, 656 }; 657 658 struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) 659 { 660 struct inode *inode = new_inode(sb); 661 662 if (!inode) { 663 pde_put(de); 664 return NULL; 665 } 666 667 inode->i_ino = de->low_ino; 668 inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); 669 PROC_I(inode)->pde = de; 670 if (is_empty_pde(de)) { 671 make_empty_dir_inode(inode); 672 return inode; 673 } 674 675 if (de->mode) { 676 inode->i_mode = de->mode; 677 inode->i_uid = de->uid; 678 inode->i_gid = de->gid; 679 } 680 if (de->size) 681 inode->i_size = de->size; 682 if (de->nlink) 683 set_nlink(inode, de->nlink); 684 685 if (S_ISREG(inode->i_mode)) { 686 inode->i_op = de->proc_iops; 687 if (de->proc_ops->proc_read_iter) 688 inode->i_fop = &proc_iter_file_ops; 689 else 690 inode->i_fop = &proc_reg_file_ops; 691 #ifdef CONFIG_COMPAT 692 if (de->proc_ops->proc_compat_ioctl) { 693 if (de->proc_ops->proc_read_iter) 694 inode->i_fop = &proc_iter_file_ops_compat; 695 else 696 inode->i_fop = &proc_reg_file_ops_compat; 697 } 698 #endif 699 } else if (S_ISDIR(inode->i_mode)) { 700 inode->i_op = de->proc_iops; 701 inode->i_fop = de->proc_dir_ops; 702 } else if (S_ISLNK(inode->i_mode)) { 703 inode->i_op = de->proc_iops; 704 inode->i_fop = NULL; 705 } else { 706 BUG(); 707 } 708 return inode; 709 } 710