1 /* 2 * linux/fs/namei.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 */ 6 7 /* 8 * Some corrections by tytso. 9 */ 10 11 /* [Feb 1997 T. Schoebel-Theuer] Complete rewrite of the pathname 12 * lookup logic. 13 */ 14 /* [Feb-Apr 2000, AV] Rewrite to the new namespace architecture. 15 */ 16 17 #include <linux/init.h> 18 #include <linux/export.h> 19 #include <linux/kernel.h> 20 #include <linux/slab.h> 21 #include <linux/fs.h> 22 #include <linux/namei.h> 23 #include <linux/pagemap.h> 24 #include <linux/fsnotify.h> 25 #include <linux/personality.h> 26 #include <linux/security.h> 27 #include <linux/ima.h> 28 #include <linux/syscalls.h> 29 #include <linux/mount.h> 30 #include <linux/audit.h> 31 #include <linux/capability.h> 32 #include <linux/file.h> 33 #include <linux/fcntl.h> 34 #include <linux/device_cgroup.h> 35 #include <linux/fs_struct.h> 36 #include <linux/posix_acl.h> 37 #include <asm/uaccess.h> 38 39 #include "internal.h" 40 #include "mount.h" 41 42 /* [Feb-1997 T. Schoebel-Theuer] 43 * Fundamental changes in the pathname lookup mechanisms (namei) 44 * were necessary because of omirr. The reason is that omirr needs 45 * to know the _real_ pathname, not the user-supplied one, in case 46 * of symlinks (and also when transname replacements occur). 47 * 48 * The new code replaces the old recursive symlink resolution with 49 * an iterative one (in case of non-nested symlink chains). It does 50 * this with calls to <fs>_follow_link(). 51 * As a side effect, dir_namei(), _namei() and follow_link() are now 52 * replaced with a single function lookup_dentry() that can handle all 53 * the special cases of the former code. 54 * 55 * With the new dcache, the pathname is stored at each inode, at least as 56 * long as the refcount of the inode is positive. As a side effect, the 57 * size of the dcache depends on the inode cache and thus is dynamic. 58 * 59 * [29-Apr-1998 C. Scott Ananian] Updated above description of symlink 60 * resolution to correspond with current state of the code. 61 * 62 * Note that the symlink resolution is not *completely* iterative. 63 * There is still a significant amount of tail- and mid- recursion in 64 * the algorithm. Also, note that <fs>_readlink() is not used in 65 * lookup_dentry(): lookup_dentry() on the result of <fs>_readlink() 66 * may return different results than <fs>_follow_link(). Many virtual 67 * filesystems (including /proc) exhibit this behavior. 68 */ 69 70 /* [24-Feb-97 T. Schoebel-Theuer] Side effects caused by new implementation: 71 * New symlink semantics: when open() is called with flags O_CREAT | O_EXCL 72 * and the name already exists in form of a symlink, try to create the new 73 * name indicated by the symlink. The old code always complained that the 74 * name already exists, due to not following the symlink even if its target 75 * is nonexistent. The new semantics affects also mknod() and link() when 76 * the name is a symlink pointing to a non-existent name. 77 * 78 * I don't know which semantics is the right one, since I have no access 79 * to standards. But I found by trial that HP-UX 9.0 has the full "new" 80 * semantics implemented, while SunOS 4.1.1 and Solaris (SunOS 5.4) have the 81 * "old" one. Personally, I think the new semantics is much more logical. 82 * Note that "ln old new" where "new" is a symlink pointing to a non-existing 83 * file does succeed in both HP-UX and SunOs, but not in Solaris 84 * and in the old Linux semantics. 85 */ 86 87 /* [16-Dec-97 Kevin Buhr] For security reasons, we change some symlink 88 * semantics. See the comments in "open_namei" and "do_link" below. 89 * 90 * [10-Sep-98 Alan Modra] Another symlink change. 91 */ 92 93 /* [Feb-Apr 2000 AV] Complete rewrite. Rules for symlinks: 94 * inside the path - always follow. 95 * in the last component in creation/removal/renaming - never follow. 96 * if LOOKUP_FOLLOW passed - follow. 97 * if the pathname has trailing slashes - follow. 98 * otherwise - don't follow. 99 * (applied in that order). 100 * 101 * [Jun 2000 AV] Inconsistent behaviour of open() in case if flags==O_CREAT 102 * restored for 2.4. This is the last surviving part of old 4.2BSD bug. 103 * During the 2.4 we need to fix the userland stuff depending on it - 104 * hopefully we will be able to get rid of that wart in 2.5. So far only 105 * XEmacs seems to be relying on it... 106 */ 107 /* 108 * [Sep 2001 AV] Single-semaphore locking scheme (kudos to David Holland) 109 * implemented. Let's see if raised priority of ->s_vfs_rename_mutex gives 110 * any extra contention... 111 */ 112 113 /* In order to reduce some races, while at the same time doing additional 114 * checking and hopefully speeding things up, we copy filenames to the 115 * kernel data space before using them.. 116 * 117 * POSIX.1 2.4: an empty pathname is invalid (ENOENT). 118 * PATH_MAX includes the nul terminator --RR. 119 */ 120 void final_putname(struct filename *name) 121 { 122 if (name->separate) { 123 __putname(name->name); 124 kfree(name); 125 } else { 126 __putname(name); 127 } 128 } 129 130 #define EMBEDDED_NAME_MAX (PATH_MAX - sizeof(struct filename)) 131 132 static struct filename * 133 getname_flags(const char __user *filename, int flags, int *empty) 134 { 135 struct filename *result, *err; 136 int len; 137 long max; 138 char *kname; 139 140 result = audit_reusename(filename); 141 if (result) 142 return result; 143 144 result = __getname(); 145 if (unlikely(!result)) 146 return ERR_PTR(-ENOMEM); 147 148 /* 149 * First, try to embed the struct filename inside the names_cache 150 * allocation 151 */ 152 kname = (char *)result + sizeof(*result); 153 result->name = kname; 154 result->separate = false; 155 max = EMBEDDED_NAME_MAX; 156 157 recopy: 158 len = strncpy_from_user(kname, filename, max); 159 if (unlikely(len < 0)) { 160 err = ERR_PTR(len); 161 goto error; 162 } 163 164 /* 165 * Uh-oh. We have a name that's approaching PATH_MAX. Allocate a 166 * separate struct filename so we can dedicate the entire 167 * names_cache allocation for the pathname, and re-do the copy from 168 * userland. 169 */ 170 if (len == EMBEDDED_NAME_MAX && max == EMBEDDED_NAME_MAX) { 171 kname = (char *)result; 172 173 result = kzalloc(sizeof(*result), GFP_KERNEL); 174 if (!result) { 175 err = ERR_PTR(-ENOMEM); 176 result = (struct filename *)kname; 177 goto error; 178 } 179 result->name = kname; 180 result->separate = true; 181 max = PATH_MAX; 182 goto recopy; 183 } 184 185 /* The empty path is special. */ 186 if (unlikely(!len)) { 187 if (empty) 188 *empty = 1; 189 err = ERR_PTR(-ENOENT); 190 if (!(flags & LOOKUP_EMPTY)) 191 goto error; 192 } 193 194 err = ERR_PTR(-ENAMETOOLONG); 195 if (unlikely(len >= PATH_MAX)) 196 goto error; 197 198 result->uptr = filename; 199 result->aname = NULL; 200 audit_getname(result); 201 return result; 202 203 error: 204 final_putname(result); 205 return err; 206 } 207 208 struct filename * 209 getname(const char __user * filename) 210 { 211 return getname_flags(filename, 0, NULL); 212 } 213 214 /* 215 * The "getname_kernel()" interface doesn't do pathnames longer 216 * than EMBEDDED_NAME_MAX. Deal with it - you're a kernel user. 217 */ 218 struct filename * 219 getname_kernel(const char * filename) 220 { 221 struct filename *result; 222 char *kname; 223 int len; 224 225 len = strlen(filename); 226 if (len >= EMBEDDED_NAME_MAX) 227 return ERR_PTR(-ENAMETOOLONG); 228 229 result = __getname(); 230 if (unlikely(!result)) 231 return ERR_PTR(-ENOMEM); 232 233 kname = (char *)result + sizeof(*result); 234 result->name = kname; 235 result->uptr = NULL; 236 result->aname = NULL; 237 result->separate = false; 238 239 strlcpy(kname, filename, EMBEDDED_NAME_MAX); 240 return result; 241 } 242 243 #ifdef CONFIG_AUDITSYSCALL 244 void putname(struct filename *name) 245 { 246 if (unlikely(!audit_dummy_context())) 247 return audit_putname(name); 248 final_putname(name); 249 } 250 #endif 251 252 static int check_acl(struct inode *inode, int mask) 253 { 254 #ifdef CONFIG_FS_POSIX_ACL 255 struct posix_acl *acl; 256 257 if (mask & MAY_NOT_BLOCK) { 258 acl = get_cached_acl_rcu(inode, ACL_TYPE_ACCESS); 259 if (!acl) 260 return -EAGAIN; 261 /* no ->get_acl() calls in RCU mode... */ 262 if (acl == ACL_NOT_CACHED) 263 return -ECHILD; 264 return posix_acl_permission(inode, acl, mask & ~MAY_NOT_BLOCK); 265 } 266 267 acl = get_acl(inode, ACL_TYPE_ACCESS); 268 if (IS_ERR(acl)) 269 return PTR_ERR(acl); 270 if (acl) { 271 int error = posix_acl_permission(inode, acl, mask); 272 posix_acl_release(acl); 273 return error; 274 } 275 #endif 276 277 return -EAGAIN; 278 } 279 280 /* 281 * This does the basic permission checking 282 */ 283 static int acl_permission_check(struct inode *inode, int mask) 284 { 285 unsigned int mode = inode->i_mode; 286 287 if (likely(uid_eq(current_fsuid(), inode->i_uid))) 288 mode >>= 6; 289 else { 290 if (IS_POSIXACL(inode) && (mode & S_IRWXG)) { 291 int error = check_acl(inode, mask); 292 if (error != -EAGAIN) 293 return error; 294 } 295 296 if (in_group_p(inode->i_gid)) 297 mode >>= 3; 298 } 299 300 /* 301 * If the DACs are ok we don't need any capability check. 302 */ 303 if ((mask & ~mode & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0) 304 return 0; 305 return -EACCES; 306 } 307 308 /** 309 * generic_permission - check for access rights on a Posix-like filesystem 310 * @inode: inode to check access rights for 311 * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC, ...) 312 * 313 * Used to check for read/write/execute permissions on a file. 314 * We use "fsuid" for this, letting us set arbitrary permissions 315 * for filesystem access without changing the "normal" uids which 316 * are used for other things. 317 * 318 * generic_permission is rcu-walk aware. It returns -ECHILD in case an rcu-walk 319 * request cannot be satisfied (eg. requires blocking or too much complexity). 320 * It would then be called again in ref-walk mode. 321 */ 322 int generic_permission(struct inode *inode, int mask) 323 { 324 int ret; 325 326 /* 327 * Do the basic permission checks. 328 */ 329 ret = acl_permission_check(inode, mask); 330 if (ret != -EACCES) 331 return ret; 332 333 if (S_ISDIR(inode->i_mode)) { 334 /* DACs are overridable for directories */ 335 if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE)) 336 return 0; 337 if (!(mask & MAY_WRITE)) 338 if (capable_wrt_inode_uidgid(inode, 339 CAP_DAC_READ_SEARCH)) 340 return 0; 341 return -EACCES; 342 } 343 /* 344 * Read/write DACs are always overridable. 345 * Executable DACs are overridable when there is 346 * at least one exec bit set. 347 */ 348 if (!(mask & MAY_EXEC) || (inode->i_mode & S_IXUGO)) 349 if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE)) 350 return 0; 351 352 /* 353 * Searching includes executable on directories, else just read. 354 */ 355 mask &= MAY_READ | MAY_WRITE | MAY_EXEC; 356 if (mask == MAY_READ) 357 if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH)) 358 return 0; 359 360 return -EACCES; 361 } 362 EXPORT_SYMBOL(generic_permission); 363 364 /* 365 * We _really_ want to just do "generic_permission()" without 366 * even looking at the inode->i_op values. So we keep a cache 367 * flag in inode->i_opflags, that says "this has not special 368 * permission function, use the fast case". 369 */ 370 static inline int do_inode_permission(struct inode *inode, int mask) 371 { 372 if (unlikely(!(inode->i_opflags & IOP_FASTPERM))) { 373 if (likely(inode->i_op->permission)) 374 return inode->i_op->permission(inode, mask); 375 376 /* This gets set once for the inode lifetime */ 377 spin_lock(&inode->i_lock); 378 inode->i_opflags |= IOP_FASTPERM; 379 spin_unlock(&inode->i_lock); 380 } 381 return generic_permission(inode, mask); 382 } 383 384 /** 385 * __inode_permission - Check for access rights to a given inode 386 * @inode: Inode to check permission on 387 * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) 388 * 389 * Check for read/write/execute permissions on an inode. 390 * 391 * When checking for MAY_APPEND, MAY_WRITE must also be set in @mask. 392 * 393 * This does not check for a read-only file system. You probably want 394 * inode_permission(). 395 */ 396 int __inode_permission(struct inode *inode, int mask) 397 { 398 int retval; 399 400 if (unlikely(mask & MAY_WRITE)) { 401 /* 402 * Nobody gets write access to an immutable file. 403 */ 404 if (IS_IMMUTABLE(inode)) 405 return -EACCES; 406 } 407 408 retval = do_inode_permission(inode, mask); 409 if (retval) 410 return retval; 411 412 retval = devcgroup_inode_permission(inode, mask); 413 if (retval) 414 return retval; 415 416 return security_inode_permission(inode, mask); 417 } 418 419 /** 420 * sb_permission - Check superblock-level permissions 421 * @sb: Superblock of inode to check permission on 422 * @inode: Inode to check permission on 423 * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) 424 * 425 * Separate out file-system wide checks from inode-specific permission checks. 426 */ 427 static int sb_permission(struct super_block *sb, struct inode *inode, int mask) 428 { 429 if (unlikely(mask & MAY_WRITE)) { 430 umode_t mode = inode->i_mode; 431 432 /* Nobody gets write access to a read-only fs. */ 433 if ((sb->s_flags & MS_RDONLY) && 434 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) 435 return -EROFS; 436 } 437 return 0; 438 } 439 440 /** 441 * inode_permission - Check for access rights to a given inode 442 * @inode: Inode to check permission on 443 * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) 444 * 445 * Check for read/write/execute permissions on an inode. We use fs[ug]id for 446 * this, letting us set arbitrary permissions for filesystem access without 447 * changing the "normal" UIDs which are used for other things. 448 * 449 * When checking for MAY_APPEND, MAY_WRITE must also be set in @mask. 450 */ 451 int inode_permission(struct inode *inode, int mask) 452 { 453 int retval; 454 455 retval = sb_permission(inode->i_sb, inode, mask); 456 if (retval) 457 return retval; 458 return __inode_permission(inode, mask); 459 } 460 EXPORT_SYMBOL(inode_permission); 461 462 /** 463 * path_get - get a reference to a path 464 * @path: path to get the reference to 465 * 466 * Given a path increment the reference count to the dentry and the vfsmount. 467 */ 468 void path_get(const struct path *path) 469 { 470 mntget(path->mnt); 471 dget(path->dentry); 472 } 473 EXPORT_SYMBOL(path_get); 474 475 /** 476 * path_put - put a reference to a path 477 * @path: path to put the reference to 478 * 479 * Given a path decrement the reference count to the dentry and the vfsmount. 480 */ 481 void path_put(const struct path *path) 482 { 483 dput(path->dentry); 484 mntput(path->mnt); 485 } 486 EXPORT_SYMBOL(path_put); 487 488 /* 489 * Path walking has 2 modes, rcu-walk and ref-walk (see 490 * Documentation/filesystems/path-lookup.txt). In situations when we can't 491 * continue in RCU mode, we attempt to drop out of rcu-walk mode and grab 492 * normal reference counts on dentries and vfsmounts to transition to rcu-walk 493 * mode. Refcounts are grabbed at the last known good point before rcu-walk 494 * got stuck, so ref-walk may continue from there. If this is not successful 495 * (eg. a seqcount has changed), then failure is returned and it's up to caller 496 * to restart the path walk from the beginning in ref-walk mode. 497 */ 498 499 /** 500 * unlazy_walk - try to switch to ref-walk mode. 501 * @nd: nameidata pathwalk data 502 * @dentry: child of nd->path.dentry or NULL 503 * Returns: 0 on success, -ECHILD on failure 504 * 505 * unlazy_walk attempts to legitimize the current nd->path, nd->root and dentry 506 * for ref-walk mode. @dentry must be a path found by a do_lookup call on 507 * @nd or NULL. Must be called from rcu-walk context. 508 */ 509 static int unlazy_walk(struct nameidata *nd, struct dentry *dentry) 510 { 511 struct fs_struct *fs = current->fs; 512 struct dentry *parent = nd->path.dentry; 513 514 BUG_ON(!(nd->flags & LOOKUP_RCU)); 515 516 /* 517 * After legitimizing the bastards, terminate_walk() 518 * will do the right thing for non-RCU mode, and all our 519 * subsequent exit cases should rcu_read_unlock() 520 * before returning. Do vfsmount first; if dentry 521 * can't be legitimized, just set nd->path.dentry to NULL 522 * and rely on dput(NULL) being a no-op. 523 */ 524 if (!legitimize_mnt(nd->path.mnt, nd->m_seq)) 525 return -ECHILD; 526 nd->flags &= ~LOOKUP_RCU; 527 528 if (!lockref_get_not_dead(&parent->d_lockref)) { 529 nd->path.dentry = NULL; 530 goto out; 531 } 532 533 /* 534 * For a negative lookup, the lookup sequence point is the parents 535 * sequence point, and it only needs to revalidate the parent dentry. 536 * 537 * For a positive lookup, we need to move both the parent and the 538 * dentry from the RCU domain to be properly refcounted. And the 539 * sequence number in the dentry validates *both* dentry counters, 540 * since we checked the sequence number of the parent after we got 541 * the child sequence number. So we know the parent must still 542 * be valid if the child sequence number is still valid. 543 */ 544 if (!dentry) { 545 if (read_seqcount_retry(&parent->d_seq, nd->seq)) 546 goto out; 547 BUG_ON(nd->inode != parent->d_inode); 548 } else { 549 if (!lockref_get_not_dead(&dentry->d_lockref)) 550 goto out; 551 if (read_seqcount_retry(&dentry->d_seq, nd->seq)) 552 goto drop_dentry; 553 } 554 555 /* 556 * Sequence counts matched. Now make sure that the root is 557 * still valid and get it if required. 558 */ 559 if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) { 560 spin_lock(&fs->lock); 561 if (nd->root.mnt != fs->root.mnt || nd->root.dentry != fs->root.dentry) 562 goto unlock_and_drop_dentry; 563 path_get(&nd->root); 564 spin_unlock(&fs->lock); 565 } 566 567 rcu_read_unlock(); 568 return 0; 569 570 unlock_and_drop_dentry: 571 spin_unlock(&fs->lock); 572 drop_dentry: 573 rcu_read_unlock(); 574 dput(dentry); 575 goto drop_root_mnt; 576 out: 577 rcu_read_unlock(); 578 drop_root_mnt: 579 if (!(nd->flags & LOOKUP_ROOT)) 580 nd->root.mnt = NULL; 581 return -ECHILD; 582 } 583 584 static inline int d_revalidate(struct dentry *dentry, unsigned int flags) 585 { 586 return dentry->d_op->d_revalidate(dentry, flags); 587 } 588 589 /** 590 * complete_walk - successful completion of path walk 591 * @nd: pointer nameidata 592 * 593 * If we had been in RCU mode, drop out of it and legitimize nd->path. 594 * Revalidate the final result, unless we'd already done that during 595 * the path walk or the filesystem doesn't ask for it. Return 0 on 596 * success, -error on failure. In case of failure caller does not 597 * need to drop nd->path. 598 */ 599 static int complete_walk(struct nameidata *nd) 600 { 601 struct dentry *dentry = nd->path.dentry; 602 int status; 603 604 if (nd->flags & LOOKUP_RCU) { 605 nd->flags &= ~LOOKUP_RCU; 606 if (!(nd->flags & LOOKUP_ROOT)) 607 nd->root.mnt = NULL; 608 609 if (!legitimize_mnt(nd->path.mnt, nd->m_seq)) { 610 rcu_read_unlock(); 611 return -ECHILD; 612 } 613 if (unlikely(!lockref_get_not_dead(&dentry->d_lockref))) { 614 rcu_read_unlock(); 615 mntput(nd->path.mnt); 616 return -ECHILD; 617 } 618 if (read_seqcount_retry(&dentry->d_seq, nd->seq)) { 619 rcu_read_unlock(); 620 dput(dentry); 621 mntput(nd->path.mnt); 622 return -ECHILD; 623 } 624 rcu_read_unlock(); 625 } 626 627 if (likely(!(nd->flags & LOOKUP_JUMPED))) 628 return 0; 629 630 if (likely(!(dentry->d_flags & DCACHE_OP_WEAK_REVALIDATE))) 631 return 0; 632 633 status = dentry->d_op->d_weak_revalidate(dentry, nd->flags); 634 if (status > 0) 635 return 0; 636 637 if (!status) 638 status = -ESTALE; 639 640 path_put(&nd->path); 641 return status; 642 } 643 644 static __always_inline void set_root(struct nameidata *nd) 645 { 646 if (!nd->root.mnt) 647 get_fs_root(current->fs, &nd->root); 648 } 649 650 static int link_path_walk(const char *, struct nameidata *); 651 652 static __always_inline void set_root_rcu(struct nameidata *nd) 653 { 654 if (!nd->root.mnt) { 655 struct fs_struct *fs = current->fs; 656 unsigned seq; 657 658 do { 659 seq = read_seqcount_begin(&fs->seq); 660 nd->root = fs->root; 661 nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq); 662 } while (read_seqcount_retry(&fs->seq, seq)); 663 } 664 } 665 666 static void path_put_conditional(struct path *path, struct nameidata *nd) 667 { 668 dput(path->dentry); 669 if (path->mnt != nd->path.mnt) 670 mntput(path->mnt); 671 } 672 673 static inline void path_to_nameidata(const struct path *path, 674 struct nameidata *nd) 675 { 676 if (!(nd->flags & LOOKUP_RCU)) { 677 dput(nd->path.dentry); 678 if (nd->path.mnt != path->mnt) 679 mntput(nd->path.mnt); 680 } 681 nd->path.mnt = path->mnt; 682 nd->path.dentry = path->dentry; 683 } 684 685 /* 686 * Helper to directly jump to a known parsed path from ->follow_link, 687 * caller must have taken a reference to path beforehand. 688 */ 689 void nd_jump_link(struct nameidata *nd, struct path *path) 690 { 691 path_put(&nd->path); 692 693 nd->path = *path; 694 nd->inode = nd->path.dentry->d_inode; 695 nd->flags |= LOOKUP_JUMPED; 696 } 697 698 static inline void put_link(struct nameidata *nd, struct path *link, void *cookie) 699 { 700 struct inode *inode = link->dentry->d_inode; 701 if (inode->i_op->put_link) 702 inode->i_op->put_link(link->dentry, nd, cookie); 703 path_put(link); 704 } 705 706 int sysctl_protected_symlinks __read_mostly = 0; 707 int sysctl_protected_hardlinks __read_mostly = 0; 708 709 /** 710 * may_follow_link - Check symlink following for unsafe situations 711 * @link: The path of the symlink 712 * @nd: nameidata pathwalk data 713 * 714 * In the case of the sysctl_protected_symlinks sysctl being enabled, 715 * CAP_DAC_OVERRIDE needs to be specifically ignored if the symlink is 716 * in a sticky world-writable directory. This is to protect privileged 717 * processes from failing races against path names that may change out 718 * from under them by way of other users creating malicious symlinks. 719 * It will permit symlinks to be followed only when outside a sticky 720 * world-writable directory, or when the uid of the symlink and follower 721 * match, or when the directory owner matches the symlink's owner. 722 * 723 * Returns 0 if following the symlink is allowed, -ve on error. 724 */ 725 static inline int may_follow_link(struct path *link, struct nameidata *nd) 726 { 727 const struct inode *inode; 728 const struct inode *parent; 729 730 if (!sysctl_protected_symlinks) 731 return 0; 732 733 /* Allowed if owner and follower match. */ 734 inode = link->dentry->d_inode; 735 if (uid_eq(current_cred()->fsuid, inode->i_uid)) 736 return 0; 737 738 /* Allowed if parent directory not sticky and world-writable. */ 739 parent = nd->path.dentry->d_inode; 740 if ((parent->i_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH)) 741 return 0; 742 743 /* Allowed if parent directory and link owner match. */ 744 if (uid_eq(parent->i_uid, inode->i_uid)) 745 return 0; 746 747 audit_log_link_denied("follow_link", link); 748 path_put_conditional(link, nd); 749 path_put(&nd->path); 750 return -EACCES; 751 } 752 753 /** 754 * safe_hardlink_source - Check for safe hardlink conditions 755 * @inode: the source inode to hardlink from 756 * 757 * Return false if at least one of the following conditions: 758 * - inode is not a regular file 759 * - inode is setuid 760 * - inode is setgid and group-exec 761 * - access failure for read and write 762 * 763 * Otherwise returns true. 764 */ 765 static bool safe_hardlink_source(struct inode *inode) 766 { 767 umode_t mode = inode->i_mode; 768 769 /* Special files should not get pinned to the filesystem. */ 770 if (!S_ISREG(mode)) 771 return false; 772 773 /* Setuid files should not get pinned to the filesystem. */ 774 if (mode & S_ISUID) 775 return false; 776 777 /* Executable setgid files should not get pinned to the filesystem. */ 778 if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) 779 return false; 780 781 /* Hardlinking to unreadable or unwritable sources is dangerous. */ 782 if (inode_permission(inode, MAY_READ | MAY_WRITE)) 783 return false; 784 785 return true; 786 } 787 788 /** 789 * may_linkat - Check permissions for creating a hardlink 790 * @link: the source to hardlink from 791 * 792 * Block hardlink when all of: 793 * - sysctl_protected_hardlinks enabled 794 * - fsuid does not match inode 795 * - hardlink source is unsafe (see safe_hardlink_source() above) 796 * - not CAP_FOWNER 797 * 798 * Returns 0 if successful, -ve on error. 799 */ 800 static int may_linkat(struct path *link) 801 { 802 const struct cred *cred; 803 struct inode *inode; 804 805 if (!sysctl_protected_hardlinks) 806 return 0; 807 808 cred = current_cred(); 809 inode = link->dentry->d_inode; 810 811 /* Source inode owner (or CAP_FOWNER) can hardlink all they like, 812 * otherwise, it must be a safe source. 813 */ 814 if (uid_eq(cred->fsuid, inode->i_uid) || safe_hardlink_source(inode) || 815 capable(CAP_FOWNER)) 816 return 0; 817 818 audit_log_link_denied("linkat", link); 819 return -EPERM; 820 } 821 822 static __always_inline int 823 follow_link(struct path *link, struct nameidata *nd, void **p) 824 { 825 struct dentry *dentry = link->dentry; 826 int error; 827 char *s; 828 829 BUG_ON(nd->flags & LOOKUP_RCU); 830 831 if (link->mnt == nd->path.mnt) 832 mntget(link->mnt); 833 834 error = -ELOOP; 835 if (unlikely(current->total_link_count >= 40)) 836 goto out_put_nd_path; 837 838 cond_resched(); 839 current->total_link_count++; 840 841 touch_atime(link); 842 nd_set_link(nd, NULL); 843 844 error = security_inode_follow_link(link->dentry, nd); 845 if (error) 846 goto out_put_nd_path; 847 848 nd->last_type = LAST_BIND; 849 *p = dentry->d_inode->i_op->follow_link(dentry, nd); 850 error = PTR_ERR(*p); 851 if (IS_ERR(*p)) 852 goto out_put_nd_path; 853 854 error = 0; 855 s = nd_get_link(nd); 856 if (s) { 857 if (unlikely(IS_ERR(s))) { 858 path_put(&nd->path); 859 put_link(nd, link, *p); 860 return PTR_ERR(s); 861 } 862 if (*s == '/') { 863 set_root(nd); 864 path_put(&nd->path); 865 nd->path = nd->root; 866 path_get(&nd->root); 867 nd->flags |= LOOKUP_JUMPED; 868 } 869 nd->inode = nd->path.dentry->d_inode; 870 error = link_path_walk(s, nd); 871 if (unlikely(error)) 872 put_link(nd, link, *p); 873 } 874 875 return error; 876 877 out_put_nd_path: 878 *p = NULL; 879 path_put(&nd->path); 880 path_put(link); 881 return error; 882 } 883 884 static int follow_up_rcu(struct path *path) 885 { 886 struct mount *mnt = real_mount(path->mnt); 887 struct mount *parent; 888 struct dentry *mountpoint; 889 890 parent = mnt->mnt_parent; 891 if (&parent->mnt == path->mnt) 892 return 0; 893 mountpoint = mnt->mnt_mountpoint; 894 path->dentry = mountpoint; 895 path->mnt = &parent->mnt; 896 return 1; 897 } 898 899 /* 900 * follow_up - Find the mountpoint of path's vfsmount 901 * 902 * Given a path, find the mountpoint of its source file system. 903 * Replace @path with the path of the mountpoint in the parent mount. 904 * Up is towards /. 905 * 906 * Return 1 if we went up a level and 0 if we were already at the 907 * root. 908 */ 909 int follow_up(struct path *path) 910 { 911 struct mount *mnt = real_mount(path->mnt); 912 struct mount *parent; 913 struct dentry *mountpoint; 914 915 read_seqlock_excl(&mount_lock); 916 parent = mnt->mnt_parent; 917 if (parent == mnt) { 918 read_sequnlock_excl(&mount_lock); 919 return 0; 920 } 921 mntget(&parent->mnt); 922 mountpoint = dget(mnt->mnt_mountpoint); 923 read_sequnlock_excl(&mount_lock); 924 dput(path->dentry); 925 path->dentry = mountpoint; 926 mntput(path->mnt); 927 path->mnt = &parent->mnt; 928 return 1; 929 } 930 EXPORT_SYMBOL(follow_up); 931 932 /* 933 * Perform an automount 934 * - return -EISDIR to tell follow_managed() to stop and return the path we 935 * were called with. 936 */ 937 static int follow_automount(struct path *path, unsigned flags, 938 bool *need_mntput) 939 { 940 struct vfsmount *mnt; 941 int err; 942 943 if (!path->dentry->d_op || !path->dentry->d_op->d_automount) 944 return -EREMOTE; 945 946 /* We don't want to mount if someone's just doing a stat - 947 * unless they're stat'ing a directory and appended a '/' to 948 * the name. 949 * 950 * We do, however, want to mount if someone wants to open or 951 * create a file of any type under the mountpoint, wants to 952 * traverse through the mountpoint or wants to open the 953 * mounted directory. Also, autofs may mark negative dentries 954 * as being automount points. These will need the attentions 955 * of the daemon to instantiate them before they can be used. 956 */ 957 if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY | 958 LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) && 959 path->dentry->d_inode) 960 return -EISDIR; 961 962 current->total_link_count++; 963 if (current->total_link_count >= 40) 964 return -ELOOP; 965 966 mnt = path->dentry->d_op->d_automount(path); 967 if (IS_ERR(mnt)) { 968 /* 969 * The filesystem is allowed to return -EISDIR here to indicate 970 * it doesn't want to automount. For instance, autofs would do 971 * this so that its userspace daemon can mount on this dentry. 972 * 973 * However, we can only permit this if it's a terminal point in 974 * the path being looked up; if it wasn't then the remainder of 975 * the path is inaccessible and we should say so. 976 */ 977 if (PTR_ERR(mnt) == -EISDIR && (flags & LOOKUP_PARENT)) 978 return -EREMOTE; 979 return PTR_ERR(mnt); 980 } 981 982 if (!mnt) /* mount collision */ 983 return 0; 984 985 if (!*need_mntput) { 986 /* lock_mount() may release path->mnt on error */ 987 mntget(path->mnt); 988 *need_mntput = true; 989 } 990 err = finish_automount(mnt, path); 991 992 switch (err) { 993 case -EBUSY: 994 /* Someone else made a mount here whilst we were busy */ 995 return 0; 996 case 0: 997 path_put(path); 998 path->mnt = mnt; 999 path->dentry = dget(mnt->mnt_root); 1000 return 0; 1001 default: 1002 return err; 1003 } 1004 1005 } 1006 1007 /* 1008 * Handle a dentry that is managed in some way. 1009 * - Flagged for transit management (autofs) 1010 * - Flagged as mountpoint 1011 * - Flagged as automount point 1012 * 1013 * This may only be called in refwalk mode. 1014 * 1015 * Serialization is taken care of in namespace.c 1016 */ 1017 static int follow_managed(struct path *path, unsigned flags) 1018 { 1019 struct vfsmount *mnt = path->mnt; /* held by caller, must be left alone */ 1020 unsigned managed; 1021 bool need_mntput = false; 1022 int ret = 0; 1023 1024 /* Given that we're not holding a lock here, we retain the value in a 1025 * local variable for each dentry as we look at it so that we don't see 1026 * the components of that value change under us */ 1027 while (managed = ACCESS_ONCE(path->dentry->d_flags), 1028 managed &= DCACHE_MANAGED_DENTRY, 1029 unlikely(managed != 0)) { 1030 /* Allow the filesystem to manage the transit without i_mutex 1031 * being held. */ 1032 if (managed & DCACHE_MANAGE_TRANSIT) { 1033 BUG_ON(!path->dentry->d_op); 1034 BUG_ON(!path->dentry->d_op->d_manage); 1035 ret = path->dentry->d_op->d_manage(path->dentry, false); 1036 if (ret < 0) 1037 break; 1038 } 1039 1040 /* Transit to a mounted filesystem. */ 1041 if (managed & DCACHE_MOUNTED) { 1042 struct vfsmount *mounted = lookup_mnt(path); 1043 if (mounted) { 1044 dput(path->dentry); 1045 if (need_mntput) 1046 mntput(path->mnt); 1047 path->mnt = mounted; 1048 path->dentry = dget(mounted->mnt_root); 1049 need_mntput = true; 1050 continue; 1051 } 1052 1053 /* Something is mounted on this dentry in another 1054 * namespace and/or whatever was mounted there in this 1055 * namespace got unmounted before lookup_mnt() could 1056 * get it */ 1057 } 1058 1059 /* Handle an automount point */ 1060 if (managed & DCACHE_NEED_AUTOMOUNT) { 1061 ret = follow_automount(path, flags, &need_mntput); 1062 if (ret < 0) 1063 break; 1064 continue; 1065 } 1066 1067 /* We didn't change the current path point */ 1068 break; 1069 } 1070 1071 if (need_mntput && path->mnt == mnt) 1072 mntput(path->mnt); 1073 if (ret == -EISDIR) 1074 ret = 0; 1075 return ret < 0 ? ret : need_mntput; 1076 } 1077 1078 int follow_down_one(struct path *path) 1079 { 1080 struct vfsmount *mounted; 1081 1082 mounted = lookup_mnt(path); 1083 if (mounted) { 1084 dput(path->dentry); 1085 mntput(path->mnt); 1086 path->mnt = mounted; 1087 path->dentry = dget(mounted->mnt_root); 1088 return 1; 1089 } 1090 return 0; 1091 } 1092 EXPORT_SYMBOL(follow_down_one); 1093 1094 static inline int managed_dentry_rcu(struct dentry *dentry) 1095 { 1096 return (dentry->d_flags & DCACHE_MANAGE_TRANSIT) ? 1097 dentry->d_op->d_manage(dentry, true) : 0; 1098 } 1099 1100 /* 1101 * Try to skip to top of mountpoint pile in rcuwalk mode. Fail if 1102 * we meet a managed dentry that would need blocking. 1103 */ 1104 static bool __follow_mount_rcu(struct nameidata *nd, struct path *path, 1105 struct inode **inode) 1106 { 1107 for (;;) { 1108 struct mount *mounted; 1109 /* 1110 * Don't forget we might have a non-mountpoint managed dentry 1111 * that wants to block transit. 1112 */ 1113 switch (managed_dentry_rcu(path->dentry)) { 1114 case -ECHILD: 1115 default: 1116 return false; 1117 case -EISDIR: 1118 return true; 1119 case 0: 1120 break; 1121 } 1122 1123 if (!d_mountpoint(path->dentry)) 1124 return !(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT); 1125 1126 mounted = __lookup_mnt(path->mnt, path->dentry); 1127 if (!mounted) 1128 break; 1129 path->mnt = &mounted->mnt; 1130 path->dentry = mounted->mnt.mnt_root; 1131 nd->flags |= LOOKUP_JUMPED; 1132 nd->seq = read_seqcount_begin(&path->dentry->d_seq); 1133 /* 1134 * Update the inode too. We don't need to re-check the 1135 * dentry sequence number here after this d_inode read, 1136 * because a mount-point is always pinned. 1137 */ 1138 *inode = path->dentry->d_inode; 1139 } 1140 return read_seqretry(&mount_lock, nd->m_seq) && 1141 !(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT); 1142 } 1143 1144 static int follow_dotdot_rcu(struct nameidata *nd) 1145 { 1146 set_root_rcu(nd); 1147 1148 while (1) { 1149 if (nd->path.dentry == nd->root.dentry && 1150 nd->path.mnt == nd->root.mnt) { 1151 break; 1152 } 1153 if (nd->path.dentry != nd->path.mnt->mnt_root) { 1154 struct dentry *old = nd->path.dentry; 1155 struct dentry *parent = old->d_parent; 1156 unsigned seq; 1157 1158 seq = read_seqcount_begin(&parent->d_seq); 1159 if (read_seqcount_retry(&old->d_seq, nd->seq)) 1160 goto failed; 1161 nd->path.dentry = parent; 1162 nd->seq = seq; 1163 break; 1164 } 1165 if (!follow_up_rcu(&nd->path)) 1166 break; 1167 nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); 1168 } 1169 while (d_mountpoint(nd->path.dentry)) { 1170 struct mount *mounted; 1171 mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry); 1172 if (!mounted) 1173 break; 1174 nd->path.mnt = &mounted->mnt; 1175 nd->path.dentry = mounted->mnt.mnt_root; 1176 nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); 1177 if (!read_seqretry(&mount_lock, nd->m_seq)) 1178 goto failed; 1179 } 1180 nd->inode = nd->path.dentry->d_inode; 1181 return 0; 1182 1183 failed: 1184 nd->flags &= ~LOOKUP_RCU; 1185 if (!(nd->flags & LOOKUP_ROOT)) 1186 nd->root.mnt = NULL; 1187 rcu_read_unlock(); 1188 return -ECHILD; 1189 } 1190 1191 /* 1192 * Follow down to the covering mount currently visible to userspace. At each 1193 * point, the filesystem owning that dentry may be queried as to whether the 1194 * caller is permitted to proceed or not. 1195 */ 1196 int follow_down(struct path *path) 1197 { 1198 unsigned managed; 1199 int ret; 1200 1201 while (managed = ACCESS_ONCE(path->dentry->d_flags), 1202 unlikely(managed & DCACHE_MANAGED_DENTRY)) { 1203 /* Allow the filesystem to manage the transit without i_mutex 1204 * being held. 1205 * 1206 * We indicate to the filesystem if someone is trying to mount 1207 * something here. This gives autofs the chance to deny anyone 1208 * other than its daemon the right to mount on its 1209 * superstructure. 1210 * 1211 * The filesystem may sleep at this point. 1212 */ 1213 if (managed & DCACHE_MANAGE_TRANSIT) { 1214 BUG_ON(!path->dentry->d_op); 1215 BUG_ON(!path->dentry->d_op->d_manage); 1216 ret = path->dentry->d_op->d_manage( 1217 path->dentry, false); 1218 if (ret < 0) 1219 return ret == -EISDIR ? 0 : ret; 1220 } 1221 1222 /* Transit to a mounted filesystem. */ 1223 if (managed & DCACHE_MOUNTED) { 1224 struct vfsmount *mounted = lookup_mnt(path); 1225 if (!mounted) 1226 break; 1227 dput(path->dentry); 1228 mntput(path->mnt); 1229 path->mnt = mounted; 1230 path->dentry = dget(mounted->mnt_root); 1231 continue; 1232 } 1233 1234 /* Don't handle automount points here */ 1235 break; 1236 } 1237 return 0; 1238 } 1239 EXPORT_SYMBOL(follow_down); 1240 1241 /* 1242 * Skip to top of mountpoint pile in refwalk mode for follow_dotdot() 1243 */ 1244 static void follow_mount(struct path *path) 1245 { 1246 while (d_mountpoint(path->dentry)) { 1247 struct vfsmount *mounted = lookup_mnt(path); 1248 if (!mounted) 1249 break; 1250 dput(path->dentry); 1251 mntput(path->mnt); 1252 path->mnt = mounted; 1253 path->dentry = dget(mounted->mnt_root); 1254 } 1255 } 1256 1257 static void follow_dotdot(struct nameidata *nd) 1258 { 1259 set_root(nd); 1260 1261 while(1) { 1262 struct dentry *old = nd->path.dentry; 1263 1264 if (nd->path.dentry == nd->root.dentry && 1265 nd->path.mnt == nd->root.mnt) { 1266 break; 1267 } 1268 if (nd->path.dentry != nd->path.mnt->mnt_root) { 1269 /* rare case of legitimate dget_parent()... */ 1270 nd->path.dentry = dget_parent(nd->path.dentry); 1271 dput(old); 1272 break; 1273 } 1274 if (!follow_up(&nd->path)) 1275 break; 1276 } 1277 follow_mount(&nd->path); 1278 nd->inode = nd->path.dentry->d_inode; 1279 } 1280 1281 /* 1282 * This looks up the name in dcache, possibly revalidates the old dentry and 1283 * allocates a new one if not found or not valid. In the need_lookup argument 1284 * returns whether i_op->lookup is necessary. 1285 * 1286 * dir->d_inode->i_mutex must be held 1287 */ 1288 static struct dentry *lookup_dcache(struct qstr *name, struct dentry *dir, 1289 unsigned int flags, bool *need_lookup) 1290 { 1291 struct dentry *dentry; 1292 int error; 1293 1294 *need_lookup = false; 1295 dentry = d_lookup(dir, name); 1296 if (dentry) { 1297 if (dentry->d_flags & DCACHE_OP_REVALIDATE) { 1298 error = d_revalidate(dentry, flags); 1299 if (unlikely(error <= 0)) { 1300 if (error < 0) { 1301 dput(dentry); 1302 return ERR_PTR(error); 1303 } else if (!d_invalidate(dentry)) { 1304 dput(dentry); 1305 dentry = NULL; 1306 } 1307 } 1308 } 1309 } 1310 1311 if (!dentry) { 1312 dentry = d_alloc(dir, name); 1313 if (unlikely(!dentry)) 1314 return ERR_PTR(-ENOMEM); 1315 1316 *need_lookup = true; 1317 } 1318 return dentry; 1319 } 1320 1321 /* 1322 * Call i_op->lookup on the dentry. The dentry must be negative and 1323 * unhashed. 1324 * 1325 * dir->d_inode->i_mutex must be held 1326 */ 1327 static struct dentry *lookup_real(struct inode *dir, struct dentry *dentry, 1328 unsigned int flags) 1329 { 1330 struct dentry *old; 1331 1332 /* Don't create child dentry for a dead directory. */ 1333 if (unlikely(IS_DEADDIR(dir))) { 1334 dput(dentry); 1335 return ERR_PTR(-ENOENT); 1336 } 1337 1338 old = dir->i_op->lookup(dir, dentry, flags); 1339 if (unlikely(old)) { 1340 dput(dentry); 1341 dentry = old; 1342 } 1343 return dentry; 1344 } 1345 1346 static struct dentry *__lookup_hash(struct qstr *name, 1347 struct dentry *base, unsigned int flags) 1348 { 1349 bool need_lookup; 1350 struct dentry *dentry; 1351 1352 dentry = lookup_dcache(name, base, flags, &need_lookup); 1353 if (!need_lookup) 1354 return dentry; 1355 1356 return lookup_real(base->d_inode, dentry, flags); 1357 } 1358 1359 /* 1360 * It's more convoluted than I'd like it to be, but... it's still fairly 1361 * small and for now I'd prefer to have fast path as straight as possible. 1362 * It _is_ time-critical. 1363 */ 1364 static int lookup_fast(struct nameidata *nd, 1365 struct path *path, struct inode **inode) 1366 { 1367 struct vfsmount *mnt = nd->path.mnt; 1368 struct dentry *dentry, *parent = nd->path.dentry; 1369 int need_reval = 1; 1370 int status = 1; 1371 int err; 1372 1373 /* 1374 * Rename seqlock is not required here because in the off chance 1375 * of a false negative due to a concurrent rename, we're going to 1376 * do the non-racy lookup, below. 1377 */ 1378 if (nd->flags & LOOKUP_RCU) { 1379 unsigned seq; 1380 dentry = __d_lookup_rcu(parent, &nd->last, &seq); 1381 if (!dentry) 1382 goto unlazy; 1383 1384 /* 1385 * This sequence count validates that the inode matches 1386 * the dentry name information from lookup. 1387 */ 1388 *inode = dentry->d_inode; 1389 if (read_seqcount_retry(&dentry->d_seq, seq)) 1390 return -ECHILD; 1391 1392 /* 1393 * This sequence count validates that the parent had no 1394 * changes while we did the lookup of the dentry above. 1395 * 1396 * The memory barrier in read_seqcount_begin of child is 1397 * enough, we can use __read_seqcount_retry here. 1398 */ 1399 if (__read_seqcount_retry(&parent->d_seq, nd->seq)) 1400 return -ECHILD; 1401 nd->seq = seq; 1402 1403 if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) { 1404 status = d_revalidate(dentry, nd->flags); 1405 if (unlikely(status <= 0)) { 1406 if (status != -ECHILD) 1407 need_reval = 0; 1408 goto unlazy; 1409 } 1410 } 1411 path->mnt = mnt; 1412 path->dentry = dentry; 1413 if (likely(__follow_mount_rcu(nd, path, inode))) 1414 return 0; 1415 unlazy: 1416 if (unlazy_walk(nd, dentry)) 1417 return -ECHILD; 1418 } else { 1419 dentry = __d_lookup(parent, &nd->last); 1420 } 1421 1422 if (unlikely(!dentry)) 1423 goto need_lookup; 1424 1425 if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE) && need_reval) 1426 status = d_revalidate(dentry, nd->flags); 1427 if (unlikely(status <= 0)) { 1428 if (status < 0) { 1429 dput(dentry); 1430 return status; 1431 } 1432 if (!d_invalidate(dentry)) { 1433 dput(dentry); 1434 goto need_lookup; 1435 } 1436 } 1437 1438 path->mnt = mnt; 1439 path->dentry = dentry; 1440 err = follow_managed(path, nd->flags); 1441 if (unlikely(err < 0)) { 1442 path_put_conditional(path, nd); 1443 return err; 1444 } 1445 if (err) 1446 nd->flags |= LOOKUP_JUMPED; 1447 *inode = path->dentry->d_inode; 1448 return 0; 1449 1450 need_lookup: 1451 return 1; 1452 } 1453 1454 /* Fast lookup failed, do it the slow way */ 1455 static int lookup_slow(struct nameidata *nd, struct path *path) 1456 { 1457 struct dentry *dentry, *parent; 1458 int err; 1459 1460 parent = nd->path.dentry; 1461 BUG_ON(nd->inode != parent->d_inode); 1462 1463 mutex_lock(&parent->d_inode->i_mutex); 1464 dentry = __lookup_hash(&nd->last, parent, nd->flags); 1465 mutex_unlock(&parent->d_inode->i_mutex); 1466 if (IS_ERR(dentry)) 1467 return PTR_ERR(dentry); 1468 path->mnt = nd->path.mnt; 1469 path->dentry = dentry; 1470 err = follow_managed(path, nd->flags); 1471 if (unlikely(err < 0)) { 1472 path_put_conditional(path, nd); 1473 return err; 1474 } 1475 if (err) 1476 nd->flags |= LOOKUP_JUMPED; 1477 return 0; 1478 } 1479 1480 static inline int may_lookup(struct nameidata *nd) 1481 { 1482 if (nd->flags & LOOKUP_RCU) { 1483 int err = inode_permission(nd->inode, MAY_EXEC|MAY_NOT_BLOCK); 1484 if (err != -ECHILD) 1485 return err; 1486 if (unlazy_walk(nd, NULL)) 1487 return -ECHILD; 1488 } 1489 return inode_permission(nd->inode, MAY_EXEC); 1490 } 1491 1492 static inline int handle_dots(struct nameidata *nd, int type) 1493 { 1494 if (type == LAST_DOTDOT) { 1495 if (nd->flags & LOOKUP_RCU) { 1496 if (follow_dotdot_rcu(nd)) 1497 return -ECHILD; 1498 } else 1499 follow_dotdot(nd); 1500 } 1501 return 0; 1502 } 1503 1504 static void terminate_walk(struct nameidata *nd) 1505 { 1506 if (!(nd->flags & LOOKUP_RCU)) { 1507 path_put(&nd->path); 1508 } else { 1509 nd->flags &= ~LOOKUP_RCU; 1510 if (!(nd->flags & LOOKUP_ROOT)) 1511 nd->root.mnt = NULL; 1512 rcu_read_unlock(); 1513 } 1514 } 1515 1516 /* 1517 * Do we need to follow links? We _really_ want to be able 1518 * to do this check without having to look at inode->i_op, 1519 * so we keep a cache of "no, this doesn't need follow_link" 1520 * for the common case. 1521 */ 1522 static inline int should_follow_link(struct dentry *dentry, int follow) 1523 { 1524 return unlikely(d_is_symlink(dentry)) ? follow : 0; 1525 } 1526 1527 static inline int walk_component(struct nameidata *nd, struct path *path, 1528 int follow) 1529 { 1530 struct inode *inode; 1531 int err; 1532 /* 1533 * "." and ".." are special - ".." especially so because it has 1534 * to be able to know about the current root directory and 1535 * parent relationships. 1536 */ 1537 if (unlikely(nd->last_type != LAST_NORM)) 1538 return handle_dots(nd, nd->last_type); 1539 err = lookup_fast(nd, path, &inode); 1540 if (unlikely(err)) { 1541 if (err < 0) 1542 goto out_err; 1543 1544 err = lookup_slow(nd, path); 1545 if (err < 0) 1546 goto out_err; 1547 1548 inode = path->dentry->d_inode; 1549 } 1550 err = -ENOENT; 1551 if (!inode || d_is_negative(path->dentry)) 1552 goto out_path_put; 1553 1554 if (should_follow_link(path->dentry, follow)) { 1555 if (nd->flags & LOOKUP_RCU) { 1556 if (unlikely(unlazy_walk(nd, path->dentry))) { 1557 err = -ECHILD; 1558 goto out_err; 1559 } 1560 } 1561 BUG_ON(inode != path->dentry->d_inode); 1562 return 1; 1563 } 1564 path_to_nameidata(path, nd); 1565 nd->inode = inode; 1566 return 0; 1567 1568 out_path_put: 1569 path_to_nameidata(path, nd); 1570 out_err: 1571 terminate_walk(nd); 1572 return err; 1573 } 1574 1575 /* 1576 * This limits recursive symlink follows to 8, while 1577 * limiting consecutive symlinks to 40. 1578 * 1579 * Without that kind of total limit, nasty chains of consecutive 1580 * symlinks can cause almost arbitrarily long lookups. 1581 */ 1582 static inline int nested_symlink(struct path *path, struct nameidata *nd) 1583 { 1584 int res; 1585 1586 if (unlikely(current->link_count >= MAX_NESTED_LINKS)) { 1587 path_put_conditional(path, nd); 1588 path_put(&nd->path); 1589 return -ELOOP; 1590 } 1591 BUG_ON(nd->depth >= MAX_NESTED_LINKS); 1592 1593 nd->depth++; 1594 current->link_count++; 1595 1596 do { 1597 struct path link = *path; 1598 void *cookie; 1599 1600 res = follow_link(&link, nd, &cookie); 1601 if (res) 1602 break; 1603 res = walk_component(nd, path, LOOKUP_FOLLOW); 1604 put_link(nd, &link, cookie); 1605 } while (res > 0); 1606 1607 current->link_count--; 1608 nd->depth--; 1609 return res; 1610 } 1611 1612 /* 1613 * We can do the critical dentry name comparison and hashing 1614 * operations one word at a time, but we are limited to: 1615 * 1616 * - Architectures with fast unaligned word accesses. We could 1617 * do a "get_unaligned()" if this helps and is sufficiently 1618 * fast. 1619 * 1620 * - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we 1621 * do not trap on the (extremely unlikely) case of a page 1622 * crossing operation. 1623 * 1624 * - Furthermore, we need an efficient 64-bit compile for the 1625 * 64-bit case in order to generate the "number of bytes in 1626 * the final mask". Again, that could be replaced with a 1627 * efficient population count instruction or similar. 1628 */ 1629 #ifdef CONFIG_DCACHE_WORD_ACCESS 1630 1631 #include <asm/word-at-a-time.h> 1632 1633 #ifdef CONFIG_64BIT 1634 1635 static inline unsigned int fold_hash(unsigned long hash) 1636 { 1637 hash += hash >> (8*sizeof(int)); 1638 return hash; 1639 } 1640 1641 #else /* 32-bit case */ 1642 1643 #define fold_hash(x) (x) 1644 1645 #endif 1646 1647 unsigned int full_name_hash(const unsigned char *name, unsigned int len) 1648 { 1649 unsigned long a, mask; 1650 unsigned long hash = 0; 1651 1652 for (;;) { 1653 a = load_unaligned_zeropad(name); 1654 if (len < sizeof(unsigned long)) 1655 break; 1656 hash += a; 1657 hash *= 9; 1658 name += sizeof(unsigned long); 1659 len -= sizeof(unsigned long); 1660 if (!len) 1661 goto done; 1662 } 1663 mask = bytemask_from_count(len); 1664 hash += mask & a; 1665 done: 1666 return fold_hash(hash); 1667 } 1668 EXPORT_SYMBOL(full_name_hash); 1669 1670 /* 1671 * Calculate the length and hash of the path component, and 1672 * return the length of the component; 1673 */ 1674 static inline unsigned long hash_name(const char *name, unsigned int *hashp) 1675 { 1676 unsigned long a, b, adata, bdata, mask, hash, len; 1677 const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; 1678 1679 hash = a = 0; 1680 len = -sizeof(unsigned long); 1681 do { 1682 hash = (hash + a) * 9; 1683 len += sizeof(unsigned long); 1684 a = load_unaligned_zeropad(name+len); 1685 b = a ^ REPEAT_BYTE('/'); 1686 } while (!(has_zero(a, &adata, &constants) | has_zero(b, &bdata, &constants))); 1687 1688 adata = prep_zero_mask(a, adata, &constants); 1689 bdata = prep_zero_mask(b, bdata, &constants); 1690 1691 mask = create_zero_mask(adata | bdata); 1692 1693 hash += a & zero_bytemask(mask); 1694 *hashp = fold_hash(hash); 1695 1696 return len + find_zero(mask); 1697 } 1698 1699 #else 1700 1701 unsigned int full_name_hash(const unsigned char *name, unsigned int len) 1702 { 1703 unsigned long hash = init_name_hash(); 1704 while (len--) 1705 hash = partial_name_hash(*name++, hash); 1706 return end_name_hash(hash); 1707 } 1708 EXPORT_SYMBOL(full_name_hash); 1709 1710 /* 1711 * We know there's a real path component here of at least 1712 * one character. 1713 */ 1714 static inline unsigned long hash_name(const char *name, unsigned int *hashp) 1715 { 1716 unsigned long hash = init_name_hash(); 1717 unsigned long len = 0, c; 1718 1719 c = (unsigned char)*name; 1720 do { 1721 len++; 1722 hash = partial_name_hash(c, hash); 1723 c = (unsigned char)name[len]; 1724 } while (c && c != '/'); 1725 *hashp = end_name_hash(hash); 1726 return len; 1727 } 1728 1729 #endif 1730 1731 /* 1732 * Name resolution. 1733 * This is the basic name resolution function, turning a pathname into 1734 * the final dentry. We expect 'base' to be positive and a directory. 1735 * 1736 * Returns 0 and nd will have valid dentry and mnt on success. 1737 * Returns error and drops reference to input namei data on failure. 1738 */ 1739 static int link_path_walk(const char *name, struct nameidata *nd) 1740 { 1741 struct path next; 1742 int err; 1743 1744 while (*name=='/') 1745 name++; 1746 if (!*name) 1747 return 0; 1748 1749 /* At this point we know we have a real path component. */ 1750 for(;;) { 1751 struct qstr this; 1752 long len; 1753 int type; 1754 1755 err = may_lookup(nd); 1756 if (err) 1757 break; 1758 1759 len = hash_name(name, &this.hash); 1760 this.name = name; 1761 this.len = len; 1762 1763 type = LAST_NORM; 1764 if (name[0] == '.') switch (len) { 1765 case 2: 1766 if (name[1] == '.') { 1767 type = LAST_DOTDOT; 1768 nd->flags |= LOOKUP_JUMPED; 1769 } 1770 break; 1771 case 1: 1772 type = LAST_DOT; 1773 } 1774 if (likely(type == LAST_NORM)) { 1775 struct dentry *parent = nd->path.dentry; 1776 nd->flags &= ~LOOKUP_JUMPED; 1777 if (unlikely(parent->d_flags & DCACHE_OP_HASH)) { 1778 err = parent->d_op->d_hash(parent, &this); 1779 if (err < 0) 1780 break; 1781 } 1782 } 1783 1784 nd->last = this; 1785 nd->last_type = type; 1786 1787 if (!name[len]) 1788 return 0; 1789 /* 1790 * If it wasn't NUL, we know it was '/'. Skip that 1791 * slash, and continue until no more slashes. 1792 */ 1793 do { 1794 len++; 1795 } while (unlikely(name[len] == '/')); 1796 if (!name[len]) 1797 return 0; 1798 1799 name += len; 1800 1801 err = walk_component(nd, &next, LOOKUP_FOLLOW); 1802 if (err < 0) 1803 return err; 1804 1805 if (err) { 1806 err = nested_symlink(&next, nd); 1807 if (err) 1808 return err; 1809 } 1810 if (!d_can_lookup(nd->path.dentry)) { 1811 err = -ENOTDIR; 1812 break; 1813 } 1814 } 1815 terminate_walk(nd); 1816 return err; 1817 } 1818 1819 static int path_init(int dfd, const char *name, unsigned int flags, 1820 struct nameidata *nd, struct file **fp) 1821 { 1822 int retval = 0; 1823 1824 nd->last_type = LAST_ROOT; /* if there are only slashes... */ 1825 nd->flags = flags | LOOKUP_JUMPED; 1826 nd->depth = 0; 1827 if (flags & LOOKUP_ROOT) { 1828 struct dentry *root = nd->root.dentry; 1829 struct inode *inode = root->d_inode; 1830 if (*name) { 1831 if (!d_can_lookup(root)) 1832 return -ENOTDIR; 1833 retval = inode_permission(inode, MAY_EXEC); 1834 if (retval) 1835 return retval; 1836 } 1837 nd->path = nd->root; 1838 nd->inode = inode; 1839 if (flags & LOOKUP_RCU) { 1840 rcu_read_lock(); 1841 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); 1842 nd->m_seq = read_seqbegin(&mount_lock); 1843 } else { 1844 path_get(&nd->path); 1845 } 1846 return 0; 1847 } 1848 1849 nd->root.mnt = NULL; 1850 1851 nd->m_seq = read_seqbegin(&mount_lock); 1852 if (*name=='/') { 1853 if (flags & LOOKUP_RCU) { 1854 rcu_read_lock(); 1855 set_root_rcu(nd); 1856 } else { 1857 set_root(nd); 1858 path_get(&nd->root); 1859 } 1860 nd->path = nd->root; 1861 } else if (dfd == AT_FDCWD) { 1862 if (flags & LOOKUP_RCU) { 1863 struct fs_struct *fs = current->fs; 1864 unsigned seq; 1865 1866 rcu_read_lock(); 1867 1868 do { 1869 seq = read_seqcount_begin(&fs->seq); 1870 nd->path = fs->pwd; 1871 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); 1872 } while (read_seqcount_retry(&fs->seq, seq)); 1873 } else { 1874 get_fs_pwd(current->fs, &nd->path); 1875 } 1876 } else { 1877 /* Caller must check execute permissions on the starting path component */ 1878 struct fd f = fdget_raw(dfd); 1879 struct dentry *dentry; 1880 1881 if (!f.file) 1882 return -EBADF; 1883 1884 dentry = f.file->f_path.dentry; 1885 1886 if (*name) { 1887 if (!d_can_lookup(dentry)) { 1888 fdput(f); 1889 return -ENOTDIR; 1890 } 1891 } 1892 1893 nd->path = f.file->f_path; 1894 if (flags & LOOKUP_RCU) { 1895 if (f.flags & FDPUT_FPUT) 1896 *fp = f.file; 1897 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); 1898 rcu_read_lock(); 1899 } else { 1900 path_get(&nd->path); 1901 fdput(f); 1902 } 1903 } 1904 1905 nd->inode = nd->path.dentry->d_inode; 1906 return 0; 1907 } 1908 1909 static inline int lookup_last(struct nameidata *nd, struct path *path) 1910 { 1911 if (nd->last_type == LAST_NORM && nd->last.name[nd->last.len]) 1912 nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY; 1913 1914 nd->flags &= ~LOOKUP_PARENT; 1915 return walk_component(nd, path, nd->flags & LOOKUP_FOLLOW); 1916 } 1917 1918 /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */ 1919 static int path_lookupat(int dfd, const char *name, 1920 unsigned int flags, struct nameidata *nd) 1921 { 1922 struct file *base = NULL; 1923 struct path path; 1924 int err; 1925 1926 /* 1927 * Path walking is largely split up into 2 different synchronisation 1928 * schemes, rcu-walk and ref-walk (explained in 1929 * Documentation/filesystems/path-lookup.txt). These share much of the 1930 * path walk code, but some things particularly setup, cleanup, and 1931 * following mounts are sufficiently divergent that functions are 1932 * duplicated. Typically there is a function foo(), and its RCU 1933 * analogue, foo_rcu(). 1934 * 1935 * -ECHILD is the error number of choice (just to avoid clashes) that 1936 * is returned if some aspect of an rcu-walk fails. Such an error must 1937 * be handled by restarting a traditional ref-walk (which will always 1938 * be able to complete). 1939 */ 1940 err = path_init(dfd, name, flags | LOOKUP_PARENT, nd, &base); 1941 1942 if (unlikely(err)) 1943 return err; 1944 1945 current->total_link_count = 0; 1946 err = link_path_walk(name, nd); 1947 1948 if (!err && !(flags & LOOKUP_PARENT)) { 1949 err = lookup_last(nd, &path); 1950 while (err > 0) { 1951 void *cookie; 1952 struct path link = path; 1953 err = may_follow_link(&link, nd); 1954 if (unlikely(err)) 1955 break; 1956 nd->flags |= LOOKUP_PARENT; 1957 err = follow_link(&link, nd, &cookie); 1958 if (err) 1959 break; 1960 err = lookup_last(nd, &path); 1961 put_link(nd, &link, cookie); 1962 } 1963 } 1964 1965 if (!err) 1966 err = complete_walk(nd); 1967 1968 if (!err && nd->flags & LOOKUP_DIRECTORY) { 1969 if (!d_can_lookup(nd->path.dentry)) { 1970 path_put(&nd->path); 1971 err = -ENOTDIR; 1972 } 1973 } 1974 1975 if (base) 1976 fput(base); 1977 1978 if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) { 1979 path_put(&nd->root); 1980 nd->root.mnt = NULL; 1981 } 1982 return err; 1983 } 1984 1985 static int filename_lookup(int dfd, struct filename *name, 1986 unsigned int flags, struct nameidata *nd) 1987 { 1988 int retval = path_lookupat(dfd, name->name, flags | LOOKUP_RCU, nd); 1989 if (unlikely(retval == -ECHILD)) 1990 retval = path_lookupat(dfd, name->name, flags, nd); 1991 if (unlikely(retval == -ESTALE)) 1992 retval = path_lookupat(dfd, name->name, 1993 flags | LOOKUP_REVAL, nd); 1994 1995 if (likely(!retval)) 1996 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT); 1997 return retval; 1998 } 1999 2000 static int do_path_lookup(int dfd, const char *name, 2001 unsigned int flags, struct nameidata *nd) 2002 { 2003 struct filename filename = { .name = name }; 2004 2005 return filename_lookup(dfd, &filename, flags, nd); 2006 } 2007 2008 /* does lookup, returns the object with parent locked */ 2009 struct dentry *kern_path_locked(const char *name, struct path *path) 2010 { 2011 struct nameidata nd; 2012 struct dentry *d; 2013 int err = do_path_lookup(AT_FDCWD, name, LOOKUP_PARENT, &nd); 2014 if (err) 2015 return ERR_PTR(err); 2016 if (nd.last_type != LAST_NORM) { 2017 path_put(&nd.path); 2018 return ERR_PTR(-EINVAL); 2019 } 2020 mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); 2021 d = __lookup_hash(&nd.last, nd.path.dentry, 0); 2022 if (IS_ERR(d)) { 2023 mutex_unlock(&nd.path.dentry->d_inode->i_mutex); 2024 path_put(&nd.path); 2025 return d; 2026 } 2027 *path = nd.path; 2028 return d; 2029 } 2030 2031 int kern_path(const char *name, unsigned int flags, struct path *path) 2032 { 2033 struct nameidata nd; 2034 int res = do_path_lookup(AT_FDCWD, name, flags, &nd); 2035 if (!res) 2036 *path = nd.path; 2037 return res; 2038 } 2039 EXPORT_SYMBOL(kern_path); 2040 2041 /** 2042 * vfs_path_lookup - lookup a file path relative to a dentry-vfsmount pair 2043 * @dentry: pointer to dentry of the base directory 2044 * @mnt: pointer to vfs mount of the base directory 2045 * @name: pointer to file name 2046 * @flags: lookup flags 2047 * @path: pointer to struct path to fill 2048 */ 2049 int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt, 2050 const char *name, unsigned int flags, 2051 struct path *path) 2052 { 2053 struct nameidata nd; 2054 int err; 2055 nd.root.dentry = dentry; 2056 nd.root.mnt = mnt; 2057 BUG_ON(flags & LOOKUP_PARENT); 2058 /* the first argument of do_path_lookup() is ignored with LOOKUP_ROOT */ 2059 err = do_path_lookup(AT_FDCWD, name, flags | LOOKUP_ROOT, &nd); 2060 if (!err) 2061 *path = nd.path; 2062 return err; 2063 } 2064 EXPORT_SYMBOL(vfs_path_lookup); 2065 2066 /* 2067 * Restricted form of lookup. Doesn't follow links, single-component only, 2068 * needs parent already locked. Doesn't follow mounts. 2069 * SMP-safe. 2070 */ 2071 static struct dentry *lookup_hash(struct nameidata *nd) 2072 { 2073 return __lookup_hash(&nd->last, nd->path.dentry, nd->flags); 2074 } 2075 2076 /** 2077 * lookup_one_len - filesystem helper to lookup single pathname component 2078 * @name: pathname component to lookup 2079 * @base: base directory to lookup from 2080 * @len: maximum length @len should be interpreted to 2081 * 2082 * Note that this routine is purely a helper for filesystem usage and should 2083 * not be called by generic code. Also note that by using this function the 2084 * nameidata argument is passed to the filesystem methods and a filesystem 2085 * using this helper needs to be prepared for that. 2086 */ 2087 struct dentry *lookup_one_len(const char *name, struct dentry *base, int len) 2088 { 2089 struct qstr this; 2090 unsigned int c; 2091 int err; 2092 2093 WARN_ON_ONCE(!mutex_is_locked(&base->d_inode->i_mutex)); 2094 2095 this.name = name; 2096 this.len = len; 2097 this.hash = full_name_hash(name, len); 2098 if (!len) 2099 return ERR_PTR(-EACCES); 2100 2101 if (unlikely(name[0] == '.')) { 2102 if (len < 2 || (len == 2 && name[1] == '.')) 2103 return ERR_PTR(-EACCES); 2104 } 2105 2106 while (len--) { 2107 c = *(const unsigned char *)name++; 2108 if (c == '/' || c == '\0') 2109 return ERR_PTR(-EACCES); 2110 } 2111 /* 2112 * See if the low-level filesystem might want 2113 * to use its own hash.. 2114 */ 2115 if (base->d_flags & DCACHE_OP_HASH) { 2116 int err = base->d_op->d_hash(base, &this); 2117 if (err < 0) 2118 return ERR_PTR(err); 2119 } 2120 2121 err = inode_permission(base->d_inode, MAY_EXEC); 2122 if (err) 2123 return ERR_PTR(err); 2124 2125 return __lookup_hash(&this, base, 0); 2126 } 2127 EXPORT_SYMBOL(lookup_one_len); 2128 2129 int user_path_at_empty(int dfd, const char __user *name, unsigned flags, 2130 struct path *path, int *empty) 2131 { 2132 struct nameidata nd; 2133 struct filename *tmp = getname_flags(name, flags, empty); 2134 int err = PTR_ERR(tmp); 2135 if (!IS_ERR(tmp)) { 2136 2137 BUG_ON(flags & LOOKUP_PARENT); 2138 2139 err = filename_lookup(dfd, tmp, flags, &nd); 2140 putname(tmp); 2141 if (!err) 2142 *path = nd.path; 2143 } 2144 return err; 2145 } 2146 2147 int user_path_at(int dfd, const char __user *name, unsigned flags, 2148 struct path *path) 2149 { 2150 return user_path_at_empty(dfd, name, flags, path, NULL); 2151 } 2152 EXPORT_SYMBOL(user_path_at); 2153 2154 /* 2155 * NB: most callers don't do anything directly with the reference to the 2156 * to struct filename, but the nd->last pointer points into the name string 2157 * allocated by getname. So we must hold the reference to it until all 2158 * path-walking is complete. 2159 */ 2160 static struct filename * 2161 user_path_parent(int dfd, const char __user *path, struct nameidata *nd, 2162 unsigned int flags) 2163 { 2164 struct filename *s = getname(path); 2165 int error; 2166 2167 /* only LOOKUP_REVAL is allowed in extra flags */ 2168 flags &= LOOKUP_REVAL; 2169 2170 if (IS_ERR(s)) 2171 return s; 2172 2173 error = filename_lookup(dfd, s, flags | LOOKUP_PARENT, nd); 2174 if (error) { 2175 putname(s); 2176 return ERR_PTR(error); 2177 } 2178 2179 return s; 2180 } 2181 2182 /** 2183 * mountpoint_last - look up last component for umount 2184 * @nd: pathwalk nameidata - currently pointing at parent directory of "last" 2185 * @path: pointer to container for result 2186 * 2187 * This is a special lookup_last function just for umount. In this case, we 2188 * need to resolve the path without doing any revalidation. 2189 * 2190 * The nameidata should be the result of doing a LOOKUP_PARENT pathwalk. Since 2191 * mountpoints are always pinned in the dcache, their ancestors are too. Thus, 2192 * in almost all cases, this lookup will be served out of the dcache. The only 2193 * cases where it won't are if nd->last refers to a symlink or the path is 2194 * bogus and it doesn't exist. 2195 * 2196 * Returns: 2197 * -error: if there was an error during lookup. This includes -ENOENT if the 2198 * lookup found a negative dentry. The nd->path reference will also be 2199 * put in this case. 2200 * 2201 * 0: if we successfully resolved nd->path and found it to not to be a 2202 * symlink that needs to be followed. "path" will also be populated. 2203 * The nd->path reference will also be put. 2204 * 2205 * 1: if we successfully resolved nd->last and found it to be a symlink 2206 * that needs to be followed. "path" will be populated with the path 2207 * to the link, and nd->path will *not* be put. 2208 */ 2209 static int 2210 mountpoint_last(struct nameidata *nd, struct path *path) 2211 { 2212 int error = 0; 2213 struct dentry *dentry; 2214 struct dentry *dir = nd->path.dentry; 2215 2216 /* If we're in rcuwalk, drop out of it to handle last component */ 2217 if (nd->flags & LOOKUP_RCU) { 2218 if (unlazy_walk(nd, NULL)) { 2219 error = -ECHILD; 2220 goto out; 2221 } 2222 } 2223 2224 nd->flags &= ~LOOKUP_PARENT; 2225 2226 if (unlikely(nd->last_type != LAST_NORM)) { 2227 error = handle_dots(nd, nd->last_type); 2228 if (error) 2229 goto out; 2230 dentry = dget(nd->path.dentry); 2231 goto done; 2232 } 2233 2234 mutex_lock(&dir->d_inode->i_mutex); 2235 dentry = d_lookup(dir, &nd->last); 2236 if (!dentry) { 2237 /* 2238 * No cached dentry. Mounted dentries are pinned in the cache, 2239 * so that means that this dentry is probably a symlink or the 2240 * path doesn't actually point to a mounted dentry. 2241 */ 2242 dentry = d_alloc(dir, &nd->last); 2243 if (!dentry) { 2244 error = -ENOMEM; 2245 mutex_unlock(&dir->d_inode->i_mutex); 2246 goto out; 2247 } 2248 dentry = lookup_real(dir->d_inode, dentry, nd->flags); 2249 error = PTR_ERR(dentry); 2250 if (IS_ERR(dentry)) { 2251 mutex_unlock(&dir->d_inode->i_mutex); 2252 goto out; 2253 } 2254 } 2255 mutex_unlock(&dir->d_inode->i_mutex); 2256 2257 done: 2258 if (!dentry->d_inode || d_is_negative(dentry)) { 2259 error = -ENOENT; 2260 dput(dentry); 2261 goto out; 2262 } 2263 path->dentry = dentry; 2264 path->mnt = nd->path.mnt; 2265 if (should_follow_link(dentry, nd->flags & LOOKUP_FOLLOW)) 2266 return 1; 2267 mntget(path->mnt); 2268 follow_mount(path); 2269 error = 0; 2270 out: 2271 terminate_walk(nd); 2272 return error; 2273 } 2274 2275 /** 2276 * path_mountpoint - look up a path to be umounted 2277 * @dfd: directory file descriptor to start walk from 2278 * @name: full pathname to walk 2279 * @path: pointer to container for result 2280 * @flags: lookup flags 2281 * 2282 * Look up the given name, but don't attempt to revalidate the last component. 2283 * Returns 0 and "path" will be valid on success; Returns error otherwise. 2284 */ 2285 static int 2286 path_mountpoint(int dfd, const char *name, struct path *path, unsigned int flags) 2287 { 2288 struct file *base = NULL; 2289 struct nameidata nd; 2290 int err; 2291 2292 err = path_init(dfd, name, flags | LOOKUP_PARENT, &nd, &base); 2293 if (unlikely(err)) 2294 return err; 2295 2296 current->total_link_count = 0; 2297 err = link_path_walk(name, &nd); 2298 if (err) 2299 goto out; 2300 2301 err = mountpoint_last(&nd, path); 2302 while (err > 0) { 2303 void *cookie; 2304 struct path link = *path; 2305 err = may_follow_link(&link, &nd); 2306 if (unlikely(err)) 2307 break; 2308 nd.flags |= LOOKUP_PARENT; 2309 err = follow_link(&link, &nd, &cookie); 2310 if (err) 2311 break; 2312 err = mountpoint_last(&nd, path); 2313 put_link(&nd, &link, cookie); 2314 } 2315 out: 2316 if (base) 2317 fput(base); 2318 2319 if (nd.root.mnt && !(nd.flags & LOOKUP_ROOT)) 2320 path_put(&nd.root); 2321 2322 return err; 2323 } 2324 2325 static int 2326 filename_mountpoint(int dfd, struct filename *s, struct path *path, 2327 unsigned int flags) 2328 { 2329 int error = path_mountpoint(dfd, s->name, path, flags | LOOKUP_RCU); 2330 if (unlikely(error == -ECHILD)) 2331 error = path_mountpoint(dfd, s->name, path, flags); 2332 if (unlikely(error == -ESTALE)) 2333 error = path_mountpoint(dfd, s->name, path, flags | LOOKUP_REVAL); 2334 if (likely(!error)) 2335 audit_inode(s, path->dentry, 0); 2336 return error; 2337 } 2338 2339 /** 2340 * user_path_mountpoint_at - lookup a path from userland in order to umount it 2341 * @dfd: directory file descriptor 2342 * @name: pathname from userland 2343 * @flags: lookup flags 2344 * @path: pointer to container to hold result 2345 * 2346 * A umount is a special case for path walking. We're not actually interested 2347 * in the inode in this situation, and ESTALE errors can be a problem. We 2348 * simply want track down the dentry and vfsmount attached at the mountpoint 2349 * and avoid revalidating the last component. 2350 * 2351 * Returns 0 and populates "path" on success. 2352 */ 2353 int 2354 user_path_mountpoint_at(int dfd, const char __user *name, unsigned int flags, 2355 struct path *path) 2356 { 2357 struct filename *s = getname(name); 2358 int error; 2359 if (IS_ERR(s)) 2360 return PTR_ERR(s); 2361 error = filename_mountpoint(dfd, s, path, flags); 2362 putname(s); 2363 return error; 2364 } 2365 2366 int 2367 kern_path_mountpoint(int dfd, const char *name, struct path *path, 2368 unsigned int flags) 2369 { 2370 struct filename s = {.name = name}; 2371 return filename_mountpoint(dfd, &s, path, flags); 2372 } 2373 EXPORT_SYMBOL(kern_path_mountpoint); 2374 2375 /* 2376 * It's inline, so penalty for filesystems that don't use sticky bit is 2377 * minimal. 2378 */ 2379 static inline int check_sticky(struct inode *dir, struct inode *inode) 2380 { 2381 kuid_t fsuid = current_fsuid(); 2382 2383 if (!(dir->i_mode & S_ISVTX)) 2384 return 0; 2385 if (uid_eq(inode->i_uid, fsuid)) 2386 return 0; 2387 if (uid_eq(dir->i_uid, fsuid)) 2388 return 0; 2389 return !capable_wrt_inode_uidgid(inode, CAP_FOWNER); 2390 } 2391 2392 /* 2393 * Check whether we can remove a link victim from directory dir, check 2394 * whether the type of victim is right. 2395 * 1. We can't do it if dir is read-only (done in permission()) 2396 * 2. We should have write and exec permissions on dir 2397 * 3. We can't remove anything from append-only dir 2398 * 4. We can't do anything with immutable dir (done in permission()) 2399 * 5. If the sticky bit on dir is set we should either 2400 * a. be owner of dir, or 2401 * b. be owner of victim, or 2402 * c. have CAP_FOWNER capability 2403 * 6. If the victim is append-only or immutable we can't do antyhing with 2404 * links pointing to it. 2405 * 7. If we were asked to remove a directory and victim isn't one - ENOTDIR. 2406 * 8. If we were asked to remove a non-directory and victim isn't one - EISDIR. 2407 * 9. We can't remove a root or mountpoint. 2408 * 10. We don't allow removal of NFS sillyrenamed files; it's handled by 2409 * nfs_async_unlink(). 2410 */ 2411 static int may_delete(struct inode *dir, struct dentry *victim, bool isdir) 2412 { 2413 struct inode *inode = victim->d_inode; 2414 int error; 2415 2416 if (d_is_negative(victim)) 2417 return -ENOENT; 2418 BUG_ON(!inode); 2419 2420 BUG_ON(victim->d_parent->d_inode != dir); 2421 audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE); 2422 2423 error = inode_permission(dir, MAY_WRITE | MAY_EXEC); 2424 if (error) 2425 return error; 2426 if (IS_APPEND(dir)) 2427 return -EPERM; 2428 2429 if (check_sticky(dir, inode) || IS_APPEND(inode) || 2430 IS_IMMUTABLE(inode) || IS_SWAPFILE(inode)) 2431 return -EPERM; 2432 if (isdir) { 2433 if (!d_is_dir(victim)) 2434 return -ENOTDIR; 2435 if (IS_ROOT(victim)) 2436 return -EBUSY; 2437 } else if (d_is_dir(victim)) 2438 return -EISDIR; 2439 if (IS_DEADDIR(dir)) 2440 return -ENOENT; 2441 if (victim->d_flags & DCACHE_NFSFS_RENAMED) 2442 return -EBUSY; 2443 return 0; 2444 } 2445 2446 /* Check whether we can create an object with dentry child in directory 2447 * dir. 2448 * 1. We can't do it if child already exists (open has special treatment for 2449 * this case, but since we are inlined it's OK) 2450 * 2. We can't do it if dir is read-only (done in permission()) 2451 * 3. We should have write and exec permissions on dir 2452 * 4. We can't do it if dir is immutable (done in permission()) 2453 */ 2454 static inline int may_create(struct inode *dir, struct dentry *child) 2455 { 2456 audit_inode_child(dir, child, AUDIT_TYPE_CHILD_CREATE); 2457 if (child->d_inode) 2458 return -EEXIST; 2459 if (IS_DEADDIR(dir)) 2460 return -ENOENT; 2461 return inode_permission(dir, MAY_WRITE | MAY_EXEC); 2462 } 2463 2464 /* 2465 * p1 and p2 should be directories on the same fs. 2466 */ 2467 struct dentry *lock_rename(struct dentry *p1, struct dentry *p2) 2468 { 2469 struct dentry *p; 2470 2471 if (p1 == p2) { 2472 mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT); 2473 return NULL; 2474 } 2475 2476 mutex_lock(&p1->d_inode->i_sb->s_vfs_rename_mutex); 2477 2478 p = d_ancestor(p2, p1); 2479 if (p) { 2480 mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_PARENT); 2481 mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_CHILD); 2482 return p; 2483 } 2484 2485 p = d_ancestor(p1, p2); 2486 if (p) { 2487 mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT); 2488 mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD); 2489 return p; 2490 } 2491 2492 mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT); 2493 mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD); 2494 return NULL; 2495 } 2496 EXPORT_SYMBOL(lock_rename); 2497 2498 void unlock_rename(struct dentry *p1, struct dentry *p2) 2499 { 2500 mutex_unlock(&p1->d_inode->i_mutex); 2501 if (p1 != p2) { 2502 mutex_unlock(&p2->d_inode->i_mutex); 2503 mutex_unlock(&p1->d_inode->i_sb->s_vfs_rename_mutex); 2504 } 2505 } 2506 EXPORT_SYMBOL(unlock_rename); 2507 2508 int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, 2509 bool want_excl) 2510 { 2511 int error = may_create(dir, dentry); 2512 if (error) 2513 return error; 2514 2515 if (!dir->i_op->create) 2516 return -EACCES; /* shouldn't it be ENOSYS? */ 2517 mode &= S_IALLUGO; 2518 mode |= S_IFREG; 2519 error = security_inode_create(dir, dentry, mode); 2520 if (error) 2521 return error; 2522 error = dir->i_op->create(dir, dentry, mode, want_excl); 2523 if (!error) 2524 fsnotify_create(dir, dentry); 2525 return error; 2526 } 2527 EXPORT_SYMBOL(vfs_create); 2528 2529 static int may_open(struct path *path, int acc_mode, int flag) 2530 { 2531 struct dentry *dentry = path->dentry; 2532 struct inode *inode = dentry->d_inode; 2533 int error; 2534 2535 /* O_PATH? */ 2536 if (!acc_mode) 2537 return 0; 2538 2539 if (!inode) 2540 return -ENOENT; 2541 2542 switch (inode->i_mode & S_IFMT) { 2543 case S_IFLNK: 2544 return -ELOOP; 2545 case S_IFDIR: 2546 if (acc_mode & MAY_WRITE) 2547 return -EISDIR; 2548 break; 2549 case S_IFBLK: 2550 case S_IFCHR: 2551 if (path->mnt->mnt_flags & MNT_NODEV) 2552 return -EACCES; 2553 /*FALLTHRU*/ 2554 case S_IFIFO: 2555 case S_IFSOCK: 2556 flag &= ~O_TRUNC; 2557 break; 2558 } 2559 2560 error = inode_permission(inode, acc_mode); 2561 if (error) 2562 return error; 2563 2564 /* 2565 * An append-only file must be opened in append mode for writing. 2566 */ 2567 if (IS_APPEND(inode)) { 2568 if ((flag & O_ACCMODE) != O_RDONLY && !(flag & O_APPEND)) 2569 return -EPERM; 2570 if (flag & O_TRUNC) 2571 return -EPERM; 2572 } 2573 2574 /* O_NOATIME can only be set by the owner or superuser */ 2575 if (flag & O_NOATIME && !inode_owner_or_capable(inode)) 2576 return -EPERM; 2577 2578 return 0; 2579 } 2580 2581 static int handle_truncate(struct file *filp) 2582 { 2583 struct path *path = &filp->f_path; 2584 struct inode *inode = path->dentry->d_inode; 2585 int error = get_write_access(inode); 2586 if (error) 2587 return error; 2588 /* 2589 * Refuse to truncate files with mandatory locks held on them. 2590 */ 2591 error = locks_verify_locked(filp); 2592 if (!error) 2593 error = security_path_truncate(path); 2594 if (!error) { 2595 error = do_truncate(path->dentry, 0, 2596 ATTR_MTIME|ATTR_CTIME|ATTR_OPEN, 2597 filp); 2598 } 2599 put_write_access(inode); 2600 return error; 2601 } 2602 2603 static inline int open_to_namei_flags(int flag) 2604 { 2605 if ((flag & O_ACCMODE) == 3) 2606 flag--; 2607 return flag; 2608 } 2609 2610 static int may_o_create(struct path *dir, struct dentry *dentry, umode_t mode) 2611 { 2612 int error = security_path_mknod(dir, dentry, mode, 0); 2613 if (error) 2614 return error; 2615 2616 error = inode_permission(dir->dentry->d_inode, MAY_WRITE | MAY_EXEC); 2617 if (error) 2618 return error; 2619 2620 return security_inode_create(dir->dentry->d_inode, dentry, mode); 2621 } 2622 2623 /* 2624 * Attempt to atomically look up, create and open a file from a negative 2625 * dentry. 2626 * 2627 * Returns 0 if successful. The file will have been created and attached to 2628 * @file by the filesystem calling finish_open(). 2629 * 2630 * Returns 1 if the file was looked up only or didn't need creating. The 2631 * caller will need to perform the open themselves. @path will have been 2632 * updated to point to the new dentry. This may be negative. 2633 * 2634 * Returns an error code otherwise. 2635 */ 2636 static int atomic_open(struct nameidata *nd, struct dentry *dentry, 2637 struct path *path, struct file *file, 2638 const struct open_flags *op, 2639 bool got_write, bool need_lookup, 2640 int *opened) 2641 { 2642 struct inode *dir = nd->path.dentry->d_inode; 2643 unsigned open_flag = open_to_namei_flags(op->open_flag); 2644 umode_t mode; 2645 int error; 2646 int acc_mode; 2647 int create_error = 0; 2648 struct dentry *const DENTRY_NOT_SET = (void *) -1UL; 2649 bool excl; 2650 2651 BUG_ON(dentry->d_inode); 2652 2653 /* Don't create child dentry for a dead directory. */ 2654 if (unlikely(IS_DEADDIR(dir))) { 2655 error = -ENOENT; 2656 goto out; 2657 } 2658 2659 mode = op->mode; 2660 if ((open_flag & O_CREAT) && !IS_POSIXACL(dir)) 2661 mode &= ~current_umask(); 2662 2663 excl = (open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT); 2664 if (excl) 2665 open_flag &= ~O_TRUNC; 2666 2667 /* 2668 * Checking write permission is tricky, bacuse we don't know if we are 2669 * going to actually need it: O_CREAT opens should work as long as the 2670 * file exists. But checking existence breaks atomicity. The trick is 2671 * to check access and if not granted clear O_CREAT from the flags. 2672 * 2673 * Another problem is returing the "right" error value (e.g. for an 2674 * O_EXCL open we want to return EEXIST not EROFS). 2675 */ 2676 if (((open_flag & (O_CREAT | O_TRUNC)) || 2677 (open_flag & O_ACCMODE) != O_RDONLY) && unlikely(!got_write)) { 2678 if (!(open_flag & O_CREAT)) { 2679 /* 2680 * No O_CREATE -> atomicity not a requirement -> fall 2681 * back to lookup + open 2682 */ 2683 goto no_open; 2684 } else if (open_flag & (O_EXCL | O_TRUNC)) { 2685 /* Fall back and fail with the right error */ 2686 create_error = -EROFS; 2687 goto no_open; 2688 } else { 2689 /* No side effects, safe to clear O_CREAT */ 2690 create_error = -EROFS; 2691 open_flag &= ~O_CREAT; 2692 } 2693 } 2694 2695 if (open_flag & O_CREAT) { 2696 error = may_o_create(&nd->path, dentry, mode); 2697 if (error) { 2698 create_error = error; 2699 if (open_flag & O_EXCL) 2700 goto no_open; 2701 open_flag &= ~O_CREAT; 2702 } 2703 } 2704 2705 if (nd->flags & LOOKUP_DIRECTORY) 2706 open_flag |= O_DIRECTORY; 2707 2708 file->f_path.dentry = DENTRY_NOT_SET; 2709 file->f_path.mnt = nd->path.mnt; 2710 error = dir->i_op->atomic_open(dir, dentry, file, open_flag, mode, 2711 opened); 2712 if (error < 0) { 2713 if (create_error && error == -ENOENT) 2714 error = create_error; 2715 goto out; 2716 } 2717 2718 if (error) { /* returned 1, that is */ 2719 if (WARN_ON(file->f_path.dentry == DENTRY_NOT_SET)) { 2720 error = -EIO; 2721 goto out; 2722 } 2723 if (file->f_path.dentry) { 2724 dput(dentry); 2725 dentry = file->f_path.dentry; 2726 } 2727 if (*opened & FILE_CREATED) 2728 fsnotify_create(dir, dentry); 2729 if (!dentry->d_inode) { 2730 WARN_ON(*opened & FILE_CREATED); 2731 if (create_error) { 2732 error = create_error; 2733 goto out; 2734 } 2735 } else { 2736 if (excl && !(*opened & FILE_CREATED)) { 2737 error = -EEXIST; 2738 goto out; 2739 } 2740 } 2741 goto looked_up; 2742 } 2743 2744 /* 2745 * We didn't have the inode before the open, so check open permission 2746 * here. 2747 */ 2748 acc_mode = op->acc_mode; 2749 if (*opened & FILE_CREATED) { 2750 WARN_ON(!(open_flag & O_CREAT)); 2751 fsnotify_create(dir, dentry); 2752 acc_mode = MAY_OPEN; 2753 } 2754 error = may_open(&file->f_path, acc_mode, open_flag); 2755 if (error) 2756 fput(file); 2757 2758 out: 2759 dput(dentry); 2760 return error; 2761 2762 no_open: 2763 if (need_lookup) { 2764 dentry = lookup_real(dir, dentry, nd->flags); 2765 if (IS_ERR(dentry)) 2766 return PTR_ERR(dentry); 2767 2768 if (create_error) { 2769 int open_flag = op->open_flag; 2770 2771 error = create_error; 2772 if ((open_flag & O_EXCL)) { 2773 if (!dentry->d_inode) 2774 goto out; 2775 } else if (!dentry->d_inode) { 2776 goto out; 2777 } else if ((open_flag & O_TRUNC) && 2778 S_ISREG(dentry->d_inode->i_mode)) { 2779 goto out; 2780 } 2781 /* will fail later, go on to get the right error */ 2782 } 2783 } 2784 looked_up: 2785 path->dentry = dentry; 2786 path->mnt = nd->path.mnt; 2787 return 1; 2788 } 2789 2790 /* 2791 * Look up and maybe create and open the last component. 2792 * 2793 * Must be called with i_mutex held on parent. 2794 * 2795 * Returns 0 if the file was successfully atomically created (if necessary) and 2796 * opened. In this case the file will be returned attached to @file. 2797 * 2798 * Returns 1 if the file was not completely opened at this time, though lookups 2799 * and creations will have been performed and the dentry returned in @path will 2800 * be positive upon return if O_CREAT was specified. If O_CREAT wasn't 2801 * specified then a negative dentry may be returned. 2802 * 2803 * An error code is returned otherwise. 2804 * 2805 * FILE_CREATE will be set in @*opened if the dentry was created and will be 2806 * cleared otherwise prior to returning. 2807 */ 2808 static int lookup_open(struct nameidata *nd, struct path *path, 2809 struct file *file, 2810 const struct open_flags *op, 2811 bool got_write, int *opened) 2812 { 2813 struct dentry *dir = nd->path.dentry; 2814 struct inode *dir_inode = dir->d_inode; 2815 struct dentry *dentry; 2816 int error; 2817 bool need_lookup; 2818 2819 *opened &= ~FILE_CREATED; 2820 dentry = lookup_dcache(&nd->last, dir, nd->flags, &need_lookup); 2821 if (IS_ERR(dentry)) 2822 return PTR_ERR(dentry); 2823 2824 /* Cached positive dentry: will open in f_op->open */ 2825 if (!need_lookup && dentry->d_inode) 2826 goto out_no_open; 2827 2828 if ((nd->flags & LOOKUP_OPEN) && dir_inode->i_op->atomic_open) { 2829 return atomic_open(nd, dentry, path, file, op, got_write, 2830 need_lookup, opened); 2831 } 2832 2833 if (need_lookup) { 2834 BUG_ON(dentry->d_inode); 2835 2836 dentry = lookup_real(dir_inode, dentry, nd->flags); 2837 if (IS_ERR(dentry)) 2838 return PTR_ERR(dentry); 2839 } 2840 2841 /* Negative dentry, just create the file */ 2842 if (!dentry->d_inode && (op->open_flag & O_CREAT)) { 2843 umode_t mode = op->mode; 2844 if (!IS_POSIXACL(dir->d_inode)) 2845 mode &= ~current_umask(); 2846 /* 2847 * This write is needed to ensure that a 2848 * rw->ro transition does not occur between 2849 * the time when the file is created and when 2850 * a permanent write count is taken through 2851 * the 'struct file' in finish_open(). 2852 */ 2853 if (!got_write) { 2854 error = -EROFS; 2855 goto out_dput; 2856 } 2857 *opened |= FILE_CREATED; 2858 error = security_path_mknod(&nd->path, dentry, mode, 0); 2859 if (error) 2860 goto out_dput; 2861 error = vfs_create(dir->d_inode, dentry, mode, 2862 nd->flags & LOOKUP_EXCL); 2863 if (error) 2864 goto out_dput; 2865 } 2866 out_no_open: 2867 path->dentry = dentry; 2868 path->mnt = nd->path.mnt; 2869 return 1; 2870 2871 out_dput: 2872 dput(dentry); 2873 return error; 2874 } 2875 2876 /* 2877 * Handle the last step of open() 2878 */ 2879 static int do_last(struct nameidata *nd, struct path *path, 2880 struct file *file, const struct open_flags *op, 2881 int *opened, struct filename *name) 2882 { 2883 struct dentry *dir = nd->path.dentry; 2884 int open_flag = op->open_flag; 2885 bool will_truncate = (open_flag & O_TRUNC) != 0; 2886 bool got_write = false; 2887 int acc_mode = op->acc_mode; 2888 struct inode *inode; 2889 bool symlink_ok = false; 2890 struct path save_parent = { .dentry = NULL, .mnt = NULL }; 2891 bool retried = false; 2892 int error; 2893 2894 nd->flags &= ~LOOKUP_PARENT; 2895 nd->flags |= op->intent; 2896 2897 if (nd->last_type != LAST_NORM) { 2898 error = handle_dots(nd, nd->last_type); 2899 if (error) 2900 return error; 2901 goto finish_open; 2902 } 2903 2904 if (!(open_flag & O_CREAT)) { 2905 if (nd->last.name[nd->last.len]) 2906 nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY; 2907 if (open_flag & O_PATH && !(nd->flags & LOOKUP_FOLLOW)) 2908 symlink_ok = true; 2909 /* we _can_ be in RCU mode here */ 2910 error = lookup_fast(nd, path, &inode); 2911 if (likely(!error)) 2912 goto finish_lookup; 2913 2914 if (error < 0) 2915 goto out; 2916 2917 BUG_ON(nd->inode != dir->d_inode); 2918 } else { 2919 /* create side of things */ 2920 /* 2921 * This will *only* deal with leaving RCU mode - LOOKUP_JUMPED 2922 * has been cleared when we got to the last component we are 2923 * about to look up 2924 */ 2925 error = complete_walk(nd); 2926 if (error) 2927 return error; 2928 2929 audit_inode(name, dir, LOOKUP_PARENT); 2930 error = -EISDIR; 2931 /* trailing slashes? */ 2932 if (nd->last.name[nd->last.len]) 2933 goto out; 2934 } 2935 2936 retry_lookup: 2937 if (op->open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) { 2938 error = mnt_want_write(nd->path.mnt); 2939 if (!error) 2940 got_write = true; 2941 /* 2942 * do _not_ fail yet - we might not need that or fail with 2943 * a different error; let lookup_open() decide; we'll be 2944 * dropping this one anyway. 2945 */ 2946 } 2947 mutex_lock(&dir->d_inode->i_mutex); 2948 error = lookup_open(nd, path, file, op, got_write, opened); 2949 mutex_unlock(&dir->d_inode->i_mutex); 2950 2951 if (error <= 0) { 2952 if (error) 2953 goto out; 2954 2955 if ((*opened & FILE_CREATED) || 2956 !S_ISREG(file_inode(file)->i_mode)) 2957 will_truncate = false; 2958 2959 audit_inode(name, file->f_path.dentry, 0); 2960 goto opened; 2961 } 2962 2963 if (*opened & FILE_CREATED) { 2964 /* Don't check for write permission, don't truncate */ 2965 open_flag &= ~O_TRUNC; 2966 will_truncate = false; 2967 acc_mode = MAY_OPEN; 2968 path_to_nameidata(path, nd); 2969 goto finish_open_created; 2970 } 2971 2972 /* 2973 * create/update audit record if it already exists. 2974 */ 2975 if (d_is_positive(path->dentry)) 2976 audit_inode(name, path->dentry, 0); 2977 2978 /* 2979 * If atomic_open() acquired write access it is dropped now due to 2980 * possible mount and symlink following (this might be optimized away if 2981 * necessary...) 2982 */ 2983 if (got_write) { 2984 mnt_drop_write(nd->path.mnt); 2985 got_write = false; 2986 } 2987 2988 error = -EEXIST; 2989 if ((open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT)) 2990 goto exit_dput; 2991 2992 error = follow_managed(path, nd->flags); 2993 if (error < 0) 2994 goto exit_dput; 2995 2996 if (error) 2997 nd->flags |= LOOKUP_JUMPED; 2998 2999 BUG_ON(nd->flags & LOOKUP_RCU); 3000 inode = path->dentry->d_inode; 3001 finish_lookup: 3002 /* we _can_ be in RCU mode here */ 3003 error = -ENOENT; 3004 if (!inode || d_is_negative(path->dentry)) { 3005 path_to_nameidata(path, nd); 3006 goto out; 3007 } 3008 3009 if (should_follow_link(path->dentry, !symlink_ok)) { 3010 if (nd->flags & LOOKUP_RCU) { 3011 if (unlikely(unlazy_walk(nd, path->dentry))) { 3012 error = -ECHILD; 3013 goto out; 3014 } 3015 } 3016 BUG_ON(inode != path->dentry->d_inode); 3017 return 1; 3018 } 3019 3020 if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path->mnt) { 3021 path_to_nameidata(path, nd); 3022 } else { 3023 save_parent.dentry = nd->path.dentry; 3024 save_parent.mnt = mntget(path->mnt); 3025 nd->path.dentry = path->dentry; 3026 3027 } 3028 nd->inode = inode; 3029 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */ 3030 finish_open: 3031 error = complete_walk(nd); 3032 if (error) { 3033 path_put(&save_parent); 3034 return error; 3035 } 3036 audit_inode(name, nd->path.dentry, 0); 3037 error = -EISDIR; 3038 if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry)) 3039 goto out; 3040 error = -ENOTDIR; 3041 if ((nd->flags & LOOKUP_DIRECTORY) && !d_can_lookup(nd->path.dentry)) 3042 goto out; 3043 if (!S_ISREG(nd->inode->i_mode)) 3044 will_truncate = false; 3045 3046 if (will_truncate) { 3047 error = mnt_want_write(nd->path.mnt); 3048 if (error) 3049 goto out; 3050 got_write = true; 3051 } 3052 finish_open_created: 3053 error = may_open(&nd->path, acc_mode, open_flag); 3054 if (error) 3055 goto out; 3056 file->f_path.mnt = nd->path.mnt; 3057 error = finish_open(file, nd->path.dentry, NULL, opened); 3058 if (error) { 3059 if (error == -EOPENSTALE) 3060 goto stale_open; 3061 goto out; 3062 } 3063 opened: 3064 error = open_check_o_direct(file); 3065 if (error) 3066 goto exit_fput; 3067 error = ima_file_check(file, op->acc_mode); 3068 if (error) 3069 goto exit_fput; 3070 3071 if (will_truncate) { 3072 error = handle_truncate(file); 3073 if (error) 3074 goto exit_fput; 3075 } 3076 out: 3077 if (got_write) 3078 mnt_drop_write(nd->path.mnt); 3079 path_put(&save_parent); 3080 terminate_walk(nd); 3081 return error; 3082 3083 exit_dput: 3084 path_put_conditional(path, nd); 3085 goto out; 3086 exit_fput: 3087 fput(file); 3088 goto out; 3089 3090 stale_open: 3091 /* If no saved parent or already retried then can't retry */ 3092 if (!save_parent.dentry || retried) 3093 goto out; 3094 3095 BUG_ON(save_parent.dentry != dir); 3096 path_put(&nd->path); 3097 nd->path = save_parent; 3098 nd->inode = dir->d_inode; 3099 save_parent.mnt = NULL; 3100 save_parent.dentry = NULL; 3101 if (got_write) { 3102 mnt_drop_write(nd->path.mnt); 3103 got_write = false; 3104 } 3105 retried = true; 3106 goto retry_lookup; 3107 } 3108 3109 static int do_tmpfile(int dfd, struct filename *pathname, 3110 struct nameidata *nd, int flags, 3111 const struct open_flags *op, 3112 struct file *file, int *opened) 3113 { 3114 static const struct qstr name = QSTR_INIT("/", 1); 3115 struct dentry *dentry, *child; 3116 struct inode *dir; 3117 int error = path_lookupat(dfd, pathname->name, 3118 flags | LOOKUP_DIRECTORY, nd); 3119 if (unlikely(error)) 3120 return error; 3121 error = mnt_want_write(nd->path.mnt); 3122 if (unlikely(error)) 3123 goto out; 3124 /* we want directory to be writable */ 3125 error = inode_permission(nd->inode, MAY_WRITE | MAY_EXEC); 3126 if (error) 3127 goto out2; 3128 dentry = nd->path.dentry; 3129 dir = dentry->d_inode; 3130 if (!dir->i_op->tmpfile) { 3131 error = -EOPNOTSUPP; 3132 goto out2; 3133 } 3134 child = d_alloc(dentry, &name); 3135 if (unlikely(!child)) { 3136 error = -ENOMEM; 3137 goto out2; 3138 } 3139 nd->flags &= ~LOOKUP_DIRECTORY; 3140 nd->flags |= op->intent; 3141 dput(nd->path.dentry); 3142 nd->path.dentry = child; 3143 error = dir->i_op->tmpfile(dir, nd->path.dentry, op->mode); 3144 if (error) 3145 goto out2; 3146 audit_inode(pathname, nd->path.dentry, 0); 3147 error = may_open(&nd->path, op->acc_mode, op->open_flag); 3148 if (error) 3149 goto out2; 3150 file->f_path.mnt = nd->path.mnt; 3151 error = finish_open(file, nd->path.dentry, NULL, opened); 3152 if (error) 3153 goto out2; 3154 error = open_check_o_direct(file); 3155 if (error) { 3156 fput(file); 3157 } else if (!(op->open_flag & O_EXCL)) { 3158 struct inode *inode = file_inode(file); 3159 spin_lock(&inode->i_lock); 3160 inode->i_state |= I_LINKABLE; 3161 spin_unlock(&inode->i_lock); 3162 } 3163 out2: 3164 mnt_drop_write(nd->path.mnt); 3165 out: 3166 path_put(&nd->path); 3167 return error; 3168 } 3169 3170 static struct file *path_openat(int dfd, struct filename *pathname, 3171 struct nameidata *nd, const struct open_flags *op, int flags) 3172 { 3173 struct file *base = NULL; 3174 struct file *file; 3175 struct path path; 3176 int opened = 0; 3177 int error; 3178 3179 file = get_empty_filp(); 3180 if (IS_ERR(file)) 3181 return file; 3182 3183 file->f_flags = op->open_flag; 3184 3185 if (unlikely(file->f_flags & __O_TMPFILE)) { 3186 error = do_tmpfile(dfd, pathname, nd, flags, op, file, &opened); 3187 goto out; 3188 } 3189 3190 error = path_init(dfd, pathname->name, flags | LOOKUP_PARENT, nd, &base); 3191 if (unlikely(error)) 3192 goto out; 3193 3194 current->total_link_count = 0; 3195 error = link_path_walk(pathname->name, nd); 3196 if (unlikely(error)) 3197 goto out; 3198 3199 error = do_last(nd, &path, file, op, &opened, pathname); 3200 while (unlikely(error > 0)) { /* trailing symlink */ 3201 struct path link = path; 3202 void *cookie; 3203 if (!(nd->flags & LOOKUP_FOLLOW)) { 3204 path_put_conditional(&path, nd); 3205 path_put(&nd->path); 3206 error = -ELOOP; 3207 break; 3208 } 3209 error = may_follow_link(&link, nd); 3210 if (unlikely(error)) 3211 break; 3212 nd->flags |= LOOKUP_PARENT; 3213 nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL); 3214 error = follow_link(&link, nd, &cookie); 3215 if (unlikely(error)) 3216 break; 3217 error = do_last(nd, &path, file, op, &opened, pathname); 3218 put_link(nd, &link, cookie); 3219 } 3220 out: 3221 if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) 3222 path_put(&nd->root); 3223 if (base) 3224 fput(base); 3225 if (!(opened & FILE_OPENED)) { 3226 BUG_ON(!error); 3227 put_filp(file); 3228 } 3229 if (unlikely(error)) { 3230 if (error == -EOPENSTALE) { 3231 if (flags & LOOKUP_RCU) 3232 error = -ECHILD; 3233 else 3234 error = -ESTALE; 3235 } 3236 file = ERR_PTR(error); 3237 } 3238 return file; 3239 } 3240 3241 struct file *do_filp_open(int dfd, struct filename *pathname, 3242 const struct open_flags *op) 3243 { 3244 struct nameidata nd; 3245 int flags = op->lookup_flags; 3246 struct file *filp; 3247 3248 filp = path_openat(dfd, pathname, &nd, op, flags | LOOKUP_RCU); 3249 if (unlikely(filp == ERR_PTR(-ECHILD))) 3250 filp = path_openat(dfd, pathname, &nd, op, flags); 3251 if (unlikely(filp == ERR_PTR(-ESTALE))) 3252 filp = path_openat(dfd, pathname, &nd, op, flags | LOOKUP_REVAL); 3253 return filp; 3254 } 3255 3256 struct file *do_file_open_root(struct dentry *dentry, struct vfsmount *mnt, 3257 const char *name, const struct open_flags *op) 3258 { 3259 struct nameidata nd; 3260 struct file *file; 3261 struct filename filename = { .name = name }; 3262 int flags = op->lookup_flags | LOOKUP_ROOT; 3263 3264 nd.root.mnt = mnt; 3265 nd.root.dentry = dentry; 3266 3267 if (d_is_symlink(dentry) && op->intent & LOOKUP_OPEN) 3268 return ERR_PTR(-ELOOP); 3269 3270 file = path_openat(-1, &filename, &nd, op, flags | LOOKUP_RCU); 3271 if (unlikely(file == ERR_PTR(-ECHILD))) 3272 file = path_openat(-1, &filename, &nd, op, flags); 3273 if (unlikely(file == ERR_PTR(-ESTALE))) 3274 file = path_openat(-1, &filename, &nd, op, flags | LOOKUP_REVAL); 3275 return file; 3276 } 3277 3278 struct dentry *kern_path_create(int dfd, const char *pathname, 3279 struct path *path, unsigned int lookup_flags) 3280 { 3281 struct dentry *dentry = ERR_PTR(-EEXIST); 3282 struct nameidata nd; 3283 int err2; 3284 int error; 3285 bool is_dir = (lookup_flags & LOOKUP_DIRECTORY); 3286 3287 /* 3288 * Note that only LOOKUP_REVAL and LOOKUP_DIRECTORY matter here. Any 3289 * other flags passed in are ignored! 3290 */ 3291 lookup_flags &= LOOKUP_REVAL; 3292 3293 error = do_path_lookup(dfd, pathname, LOOKUP_PARENT|lookup_flags, &nd); 3294 if (error) 3295 return ERR_PTR(error); 3296 3297 /* 3298 * Yucky last component or no last component at all? 3299 * (foo/., foo/.., /////) 3300 */ 3301 if (nd.last_type != LAST_NORM) 3302 goto out; 3303 nd.flags &= ~LOOKUP_PARENT; 3304 nd.flags |= LOOKUP_CREATE | LOOKUP_EXCL; 3305 3306 /* don't fail immediately if it's r/o, at least try to report other errors */ 3307 err2 = mnt_want_write(nd.path.mnt); 3308 /* 3309 * Do the final lookup. 3310 */ 3311 mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); 3312 dentry = lookup_hash(&nd); 3313 if (IS_ERR(dentry)) 3314 goto unlock; 3315 3316 error = -EEXIST; 3317 if (d_is_positive(dentry)) 3318 goto fail; 3319 3320 /* 3321 * Special case - lookup gave negative, but... we had foo/bar/ 3322 * From the vfs_mknod() POV we just have a negative dentry - 3323 * all is fine. Let's be bastards - you had / on the end, you've 3324 * been asking for (non-existent) directory. -ENOENT for you. 3325 */ 3326 if (unlikely(!is_dir && nd.last.name[nd.last.len])) { 3327 error = -ENOENT; 3328 goto fail; 3329 } 3330 if (unlikely(err2)) { 3331 error = err2; 3332 goto fail; 3333 } 3334 *path = nd.path; 3335 return dentry; 3336 fail: 3337 dput(dentry); 3338 dentry = ERR_PTR(error); 3339 unlock: 3340 mutex_unlock(&nd.path.dentry->d_inode->i_mutex); 3341 if (!err2) 3342 mnt_drop_write(nd.path.mnt); 3343 out: 3344 path_put(&nd.path); 3345 return dentry; 3346 } 3347 EXPORT_SYMBOL(kern_path_create); 3348 3349 void done_path_create(struct path *path, struct dentry *dentry) 3350 { 3351 dput(dentry); 3352 mutex_unlock(&path->dentry->d_inode->i_mutex); 3353 mnt_drop_write(path->mnt); 3354 path_put(path); 3355 } 3356 EXPORT_SYMBOL(done_path_create); 3357 3358 struct dentry *user_path_create(int dfd, const char __user *pathname, 3359 struct path *path, unsigned int lookup_flags) 3360 { 3361 struct filename *tmp = getname(pathname); 3362 struct dentry *res; 3363 if (IS_ERR(tmp)) 3364 return ERR_CAST(tmp); 3365 res = kern_path_create(dfd, tmp->name, path, lookup_flags); 3366 putname(tmp); 3367 return res; 3368 } 3369 EXPORT_SYMBOL(user_path_create); 3370 3371 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) 3372 { 3373 int error = may_create(dir, dentry); 3374 3375 if (error) 3376 return error; 3377 3378 if ((S_ISCHR(mode) || S_ISBLK(mode)) && !capable(CAP_MKNOD)) 3379 return -EPERM; 3380 3381 if (!dir->i_op->mknod) 3382 return -EPERM; 3383 3384 error = devcgroup_inode_mknod(mode, dev); 3385 if (error) 3386 return error; 3387 3388 error = security_inode_mknod(dir, dentry, mode, dev); 3389 if (error) 3390 return error; 3391 3392 error = dir->i_op->mknod(dir, dentry, mode, dev); 3393 if (!error) 3394 fsnotify_create(dir, dentry); 3395 return error; 3396 } 3397 EXPORT_SYMBOL(vfs_mknod); 3398 3399 static int may_mknod(umode_t mode) 3400 { 3401 switch (mode & S_IFMT) { 3402 case S_IFREG: 3403 case S_IFCHR: 3404 case S_IFBLK: 3405 case S_IFIFO: 3406 case S_IFSOCK: 3407 case 0: /* zero mode translates to S_IFREG */ 3408 return 0; 3409 case S_IFDIR: 3410 return -EPERM; 3411 default: 3412 return -EINVAL; 3413 } 3414 } 3415 3416 SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode, 3417 unsigned, dev) 3418 { 3419 struct dentry *dentry; 3420 struct path path; 3421 int error; 3422 unsigned int lookup_flags = 0; 3423 3424 error = may_mknod(mode); 3425 if (error) 3426 return error; 3427 retry: 3428 dentry = user_path_create(dfd, filename, &path, lookup_flags); 3429 if (IS_ERR(dentry)) 3430 return PTR_ERR(dentry); 3431 3432 if (!IS_POSIXACL(path.dentry->d_inode)) 3433 mode &= ~current_umask(); 3434 error = security_path_mknod(&path, dentry, mode, dev); 3435 if (error) 3436 goto out; 3437 switch (mode & S_IFMT) { 3438 case 0: case S_IFREG: 3439 error = vfs_create(path.dentry->d_inode,dentry,mode,true); 3440 break; 3441 case S_IFCHR: case S_IFBLK: 3442 error = vfs_mknod(path.dentry->d_inode,dentry,mode, 3443 new_decode_dev(dev)); 3444 break; 3445 case S_IFIFO: case S_IFSOCK: 3446 error = vfs_mknod(path.dentry->d_inode,dentry,mode,0); 3447 break; 3448 } 3449 out: 3450 done_path_create(&path, dentry); 3451 if (retry_estale(error, lookup_flags)) { 3452 lookup_flags |= LOOKUP_REVAL; 3453 goto retry; 3454 } 3455 return error; 3456 } 3457 3458 SYSCALL_DEFINE3(mknod, const char __user *, filename, umode_t, mode, unsigned, dev) 3459 { 3460 return sys_mknodat(AT_FDCWD, filename, mode, dev); 3461 } 3462 3463 int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 3464 { 3465 int error = may_create(dir, dentry); 3466 unsigned max_links = dir->i_sb->s_max_links; 3467 3468 if (error) 3469 return error; 3470 3471 if (!dir->i_op->mkdir) 3472 return -EPERM; 3473 3474 mode &= (S_IRWXUGO|S_ISVTX); 3475 error = security_inode_mkdir(dir, dentry, mode); 3476 if (error) 3477 return error; 3478 3479 if (max_links && dir->i_nlink >= max_links) 3480 return -EMLINK; 3481 3482 error = dir->i_op->mkdir(dir, dentry, mode); 3483 if (!error) 3484 fsnotify_mkdir(dir, dentry); 3485 return error; 3486 } 3487 EXPORT_SYMBOL(vfs_mkdir); 3488 3489 SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode) 3490 { 3491 struct dentry *dentry; 3492 struct path path; 3493 int error; 3494 unsigned int lookup_flags = LOOKUP_DIRECTORY; 3495 3496 retry: 3497 dentry = user_path_create(dfd, pathname, &path, lookup_flags); 3498 if (IS_ERR(dentry)) 3499 return PTR_ERR(dentry); 3500 3501 if (!IS_POSIXACL(path.dentry->d_inode)) 3502 mode &= ~current_umask(); 3503 error = security_path_mkdir(&path, dentry, mode); 3504 if (!error) 3505 error = vfs_mkdir(path.dentry->d_inode, dentry, mode); 3506 done_path_create(&path, dentry); 3507 if (retry_estale(error, lookup_flags)) { 3508 lookup_flags |= LOOKUP_REVAL; 3509 goto retry; 3510 } 3511 return error; 3512 } 3513 3514 SYSCALL_DEFINE2(mkdir, const char __user *, pathname, umode_t, mode) 3515 { 3516 return sys_mkdirat(AT_FDCWD, pathname, mode); 3517 } 3518 3519 /* 3520 * The dentry_unhash() helper will try to drop the dentry early: we 3521 * should have a usage count of 1 if we're the only user of this 3522 * dentry, and if that is true (possibly after pruning the dcache), 3523 * then we drop the dentry now. 3524 * 3525 * A low-level filesystem can, if it choses, legally 3526 * do a 3527 * 3528 * if (!d_unhashed(dentry)) 3529 * return -EBUSY; 3530 * 3531 * if it cannot handle the case of removing a directory 3532 * that is still in use by something else.. 3533 */ 3534 void dentry_unhash(struct dentry *dentry) 3535 { 3536 shrink_dcache_parent(dentry); 3537 spin_lock(&dentry->d_lock); 3538 if (dentry->d_lockref.count == 1) 3539 __d_drop(dentry); 3540 spin_unlock(&dentry->d_lock); 3541 } 3542 EXPORT_SYMBOL(dentry_unhash); 3543 3544 int vfs_rmdir(struct inode *dir, struct dentry *dentry) 3545 { 3546 int error = may_delete(dir, dentry, 1); 3547 3548 if (error) 3549 return error; 3550 3551 if (!dir->i_op->rmdir) 3552 return -EPERM; 3553 3554 dget(dentry); 3555 mutex_lock(&dentry->d_inode->i_mutex); 3556 3557 error = -EBUSY; 3558 if (d_mountpoint(dentry)) 3559 goto out; 3560 3561 error = security_inode_rmdir(dir, dentry); 3562 if (error) 3563 goto out; 3564 3565 shrink_dcache_parent(dentry); 3566 error = dir->i_op->rmdir(dir, dentry); 3567 if (error) 3568 goto out; 3569 3570 dentry->d_inode->i_flags |= S_DEAD; 3571 dont_mount(dentry); 3572 3573 out: 3574 mutex_unlock(&dentry->d_inode->i_mutex); 3575 dput(dentry); 3576 if (!error) 3577 d_delete(dentry); 3578 return error; 3579 } 3580 EXPORT_SYMBOL(vfs_rmdir); 3581 3582 static long do_rmdir(int dfd, const char __user *pathname) 3583 { 3584 int error = 0; 3585 struct filename *name; 3586 struct dentry *dentry; 3587 struct nameidata nd; 3588 unsigned int lookup_flags = 0; 3589 retry: 3590 name = user_path_parent(dfd, pathname, &nd, lookup_flags); 3591 if (IS_ERR(name)) 3592 return PTR_ERR(name); 3593 3594 switch(nd.last_type) { 3595 case LAST_DOTDOT: 3596 error = -ENOTEMPTY; 3597 goto exit1; 3598 case LAST_DOT: 3599 error = -EINVAL; 3600 goto exit1; 3601 case LAST_ROOT: 3602 error = -EBUSY; 3603 goto exit1; 3604 } 3605 3606 nd.flags &= ~LOOKUP_PARENT; 3607 error = mnt_want_write(nd.path.mnt); 3608 if (error) 3609 goto exit1; 3610 3611 mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); 3612 dentry = lookup_hash(&nd); 3613 error = PTR_ERR(dentry); 3614 if (IS_ERR(dentry)) 3615 goto exit2; 3616 if (!dentry->d_inode) { 3617 error = -ENOENT; 3618 goto exit3; 3619 } 3620 error = security_path_rmdir(&nd.path, dentry); 3621 if (error) 3622 goto exit3; 3623 error = vfs_rmdir(nd.path.dentry->d_inode, dentry); 3624 exit3: 3625 dput(dentry); 3626 exit2: 3627 mutex_unlock(&nd.path.dentry->d_inode->i_mutex); 3628 mnt_drop_write(nd.path.mnt); 3629 exit1: 3630 path_put(&nd.path); 3631 putname(name); 3632 if (retry_estale(error, lookup_flags)) { 3633 lookup_flags |= LOOKUP_REVAL; 3634 goto retry; 3635 } 3636 return error; 3637 } 3638 3639 SYSCALL_DEFINE1(rmdir, const char __user *, pathname) 3640 { 3641 return do_rmdir(AT_FDCWD, pathname); 3642 } 3643 3644 /** 3645 * vfs_unlink - unlink a filesystem object 3646 * @dir: parent directory 3647 * @dentry: victim 3648 * @delegated_inode: returns victim inode, if the inode is delegated. 3649 * 3650 * The caller must hold dir->i_mutex. 3651 * 3652 * If vfs_unlink discovers a delegation, it will return -EWOULDBLOCK and 3653 * return a reference to the inode in delegated_inode. The caller 3654 * should then break the delegation on that inode and retry. Because 3655 * breaking a delegation may take a long time, the caller should drop 3656 * dir->i_mutex before doing so. 3657 * 3658 * Alternatively, a caller may pass NULL for delegated_inode. This may 3659 * be appropriate for callers that expect the underlying filesystem not 3660 * to be NFS exported. 3661 */ 3662 int vfs_unlink(struct inode *dir, struct dentry *dentry, struct inode **delegated_inode) 3663 { 3664 struct inode *target = dentry->d_inode; 3665 int error = may_delete(dir, dentry, 0); 3666 3667 if (error) 3668 return error; 3669 3670 if (!dir->i_op->unlink) 3671 return -EPERM; 3672 3673 mutex_lock(&target->i_mutex); 3674 if (d_mountpoint(dentry)) 3675 error = -EBUSY; 3676 else { 3677 error = security_inode_unlink(dir, dentry); 3678 if (!error) { 3679 error = try_break_deleg(target, delegated_inode); 3680 if (error) 3681 goto out; 3682 error = dir->i_op->unlink(dir, dentry); 3683 if (!error) 3684 dont_mount(dentry); 3685 } 3686 } 3687 out: 3688 mutex_unlock(&target->i_mutex); 3689 3690 /* We don't d_delete() NFS sillyrenamed files--they still exist. */ 3691 if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) { 3692 fsnotify_link_count(target); 3693 d_delete(dentry); 3694 } 3695 3696 return error; 3697 } 3698 EXPORT_SYMBOL(vfs_unlink); 3699 3700 /* 3701 * Make sure that the actual truncation of the file will occur outside its 3702 * directory's i_mutex. Truncate can take a long time if there is a lot of 3703 * writeout happening, and we don't want to prevent access to the directory 3704 * while waiting on the I/O. 3705 */ 3706 static long do_unlinkat(int dfd, const char __user *pathname) 3707 { 3708 int error; 3709 struct filename *name; 3710 struct dentry *dentry; 3711 struct nameidata nd; 3712 struct inode *inode = NULL; 3713 struct inode *delegated_inode = NULL; 3714 unsigned int lookup_flags = 0; 3715 retry: 3716 name = user_path_parent(dfd, pathname, &nd, lookup_flags); 3717 if (IS_ERR(name)) 3718 return PTR_ERR(name); 3719 3720 error = -EISDIR; 3721 if (nd.last_type != LAST_NORM) 3722 goto exit1; 3723 3724 nd.flags &= ~LOOKUP_PARENT; 3725 error = mnt_want_write(nd.path.mnt); 3726 if (error) 3727 goto exit1; 3728 retry_deleg: 3729 mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); 3730 dentry = lookup_hash(&nd); 3731 error = PTR_ERR(dentry); 3732 if (!IS_ERR(dentry)) { 3733 /* Why not before? Because we want correct error value */ 3734 if (nd.last.name[nd.last.len]) 3735 goto slashes; 3736 inode = dentry->d_inode; 3737 if (d_is_negative(dentry)) 3738 goto slashes; 3739 ihold(inode); 3740 error = security_path_unlink(&nd.path, dentry); 3741 if (error) 3742 goto exit2; 3743 error = vfs_unlink(nd.path.dentry->d_inode, dentry, &delegated_inode); 3744 exit2: 3745 dput(dentry); 3746 } 3747 mutex_unlock(&nd.path.dentry->d_inode->i_mutex); 3748 if (inode) 3749 iput(inode); /* truncate the inode here */ 3750 inode = NULL; 3751 if (delegated_inode) { 3752 error = break_deleg_wait(&delegated_inode); 3753 if (!error) 3754 goto retry_deleg; 3755 } 3756 mnt_drop_write(nd.path.mnt); 3757 exit1: 3758 path_put(&nd.path); 3759 putname(name); 3760 if (retry_estale(error, lookup_flags)) { 3761 lookup_flags |= LOOKUP_REVAL; 3762 inode = NULL; 3763 goto retry; 3764 } 3765 return error; 3766 3767 slashes: 3768 if (d_is_negative(dentry)) 3769 error = -ENOENT; 3770 else if (d_is_dir(dentry)) 3771 error = -EISDIR; 3772 else 3773 error = -ENOTDIR; 3774 goto exit2; 3775 } 3776 3777 SYSCALL_DEFINE3(unlinkat, int, dfd, const char __user *, pathname, int, flag) 3778 { 3779 if ((flag & ~AT_REMOVEDIR) != 0) 3780 return -EINVAL; 3781 3782 if (flag & AT_REMOVEDIR) 3783 return do_rmdir(dfd, pathname); 3784 3785 return do_unlinkat(dfd, pathname); 3786 } 3787 3788 SYSCALL_DEFINE1(unlink, const char __user *, pathname) 3789 { 3790 return do_unlinkat(AT_FDCWD, pathname); 3791 } 3792 3793 int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname) 3794 { 3795 int error = may_create(dir, dentry); 3796 3797 if (error) 3798 return error; 3799 3800 if (!dir->i_op->symlink) 3801 return -EPERM; 3802 3803 error = security_inode_symlink(dir, dentry, oldname); 3804 if (error) 3805 return error; 3806 3807 error = dir->i_op->symlink(dir, dentry, oldname); 3808 if (!error) 3809 fsnotify_create(dir, dentry); 3810 return error; 3811 } 3812 EXPORT_SYMBOL(vfs_symlink); 3813 3814 SYSCALL_DEFINE3(symlinkat, const char __user *, oldname, 3815 int, newdfd, const char __user *, newname) 3816 { 3817 int error; 3818 struct filename *from; 3819 struct dentry *dentry; 3820 struct path path; 3821 unsigned int lookup_flags = 0; 3822 3823 from = getname(oldname); 3824 if (IS_ERR(from)) 3825 return PTR_ERR(from); 3826 retry: 3827 dentry = user_path_create(newdfd, newname, &path, lookup_flags); 3828 error = PTR_ERR(dentry); 3829 if (IS_ERR(dentry)) 3830 goto out_putname; 3831 3832 error = security_path_symlink(&path, dentry, from->name); 3833 if (!error) 3834 error = vfs_symlink(path.dentry->d_inode, dentry, from->name); 3835 done_path_create(&path, dentry); 3836 if (retry_estale(error, lookup_flags)) { 3837 lookup_flags |= LOOKUP_REVAL; 3838 goto retry; 3839 } 3840 out_putname: 3841 putname(from); 3842 return error; 3843 } 3844 3845 SYSCALL_DEFINE2(symlink, const char __user *, oldname, const char __user *, newname) 3846 { 3847 return sys_symlinkat(oldname, AT_FDCWD, newname); 3848 } 3849 3850 /** 3851 * vfs_link - create a new link 3852 * @old_dentry: object to be linked 3853 * @dir: new parent 3854 * @new_dentry: where to create the new link 3855 * @delegated_inode: returns inode needing a delegation break 3856 * 3857 * The caller must hold dir->i_mutex 3858 * 3859 * If vfs_link discovers a delegation on the to-be-linked file in need 3860 * of breaking, it will return -EWOULDBLOCK and return a reference to the 3861 * inode in delegated_inode. The caller should then break the delegation 3862 * and retry. Because breaking a delegation may take a long time, the 3863 * caller should drop the i_mutex before doing so. 3864 * 3865 * Alternatively, a caller may pass NULL for delegated_inode. This may 3866 * be appropriate for callers that expect the underlying filesystem not 3867 * to be NFS exported. 3868 */ 3869 int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry, struct inode **delegated_inode) 3870 { 3871 struct inode *inode = old_dentry->d_inode; 3872 unsigned max_links = dir->i_sb->s_max_links; 3873 int error; 3874 3875 if (!inode) 3876 return -ENOENT; 3877 3878 error = may_create(dir, new_dentry); 3879 if (error) 3880 return error; 3881 3882 if (dir->i_sb != inode->i_sb) 3883 return -EXDEV; 3884 3885 /* 3886 * A link to an append-only or immutable file cannot be created. 3887 */ 3888 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 3889 return -EPERM; 3890 if (!dir->i_op->link) 3891 return -EPERM; 3892 if (S_ISDIR(inode->i_mode)) 3893 return -EPERM; 3894 3895 error = security_inode_link(old_dentry, dir, new_dentry); 3896 if (error) 3897 return error; 3898 3899 mutex_lock(&inode->i_mutex); 3900 /* Make sure we don't allow creating hardlink to an unlinked file */ 3901 if (inode->i_nlink == 0 && !(inode->i_state & I_LINKABLE)) 3902 error = -ENOENT; 3903 else if (max_links && inode->i_nlink >= max_links) 3904 error = -EMLINK; 3905 else { 3906 error = try_break_deleg(inode, delegated_inode); 3907 if (!error) 3908 error = dir->i_op->link(old_dentry, dir, new_dentry); 3909 } 3910 3911 if (!error && (inode->i_state & I_LINKABLE)) { 3912 spin_lock(&inode->i_lock); 3913 inode->i_state &= ~I_LINKABLE; 3914 spin_unlock(&inode->i_lock); 3915 } 3916 mutex_unlock(&inode->i_mutex); 3917 if (!error) 3918 fsnotify_link(dir, inode, new_dentry); 3919 return error; 3920 } 3921 EXPORT_SYMBOL(vfs_link); 3922 3923 /* 3924 * Hardlinks are often used in delicate situations. We avoid 3925 * security-related surprises by not following symlinks on the 3926 * newname. --KAB 3927 * 3928 * We don't follow them on the oldname either to be compatible 3929 * with linux 2.0, and to avoid hard-linking to directories 3930 * and other special files. --ADM 3931 */ 3932 SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname, 3933 int, newdfd, const char __user *, newname, int, flags) 3934 { 3935 struct dentry *new_dentry; 3936 struct path old_path, new_path; 3937 struct inode *delegated_inode = NULL; 3938 int how = 0; 3939 int error; 3940 3941 if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0) 3942 return -EINVAL; 3943 /* 3944 * To use null names we require CAP_DAC_READ_SEARCH 3945 * This ensures that not everyone will be able to create 3946 * handlink using the passed filedescriptor. 3947 */ 3948 if (flags & AT_EMPTY_PATH) { 3949 if (!capable(CAP_DAC_READ_SEARCH)) 3950 return -ENOENT; 3951 how = LOOKUP_EMPTY; 3952 } 3953 3954 if (flags & AT_SYMLINK_FOLLOW) 3955 how |= LOOKUP_FOLLOW; 3956 retry: 3957 error = user_path_at(olddfd, oldname, how, &old_path); 3958 if (error) 3959 return error; 3960 3961 new_dentry = user_path_create(newdfd, newname, &new_path, 3962 (how & LOOKUP_REVAL)); 3963 error = PTR_ERR(new_dentry); 3964 if (IS_ERR(new_dentry)) 3965 goto out; 3966 3967 error = -EXDEV; 3968 if (old_path.mnt != new_path.mnt) 3969 goto out_dput; 3970 error = may_linkat(&old_path); 3971 if (unlikely(error)) 3972 goto out_dput; 3973 error = security_path_link(old_path.dentry, &new_path, new_dentry); 3974 if (error) 3975 goto out_dput; 3976 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode); 3977 out_dput: 3978 done_path_create(&new_path, new_dentry); 3979 if (delegated_inode) { 3980 error = break_deleg_wait(&delegated_inode); 3981 if (!error) { 3982 path_put(&old_path); 3983 goto retry; 3984 } 3985 } 3986 if (retry_estale(error, how)) { 3987 path_put(&old_path); 3988 how |= LOOKUP_REVAL; 3989 goto retry; 3990 } 3991 out: 3992 path_put(&old_path); 3993 3994 return error; 3995 } 3996 3997 SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname) 3998 { 3999 return sys_linkat(AT_FDCWD, oldname, AT_FDCWD, newname, 0); 4000 } 4001 4002 /** 4003 * vfs_rename - rename a filesystem object 4004 * @old_dir: parent of source 4005 * @old_dentry: source 4006 * @new_dir: parent of destination 4007 * @new_dentry: destination 4008 * @delegated_inode: returns an inode needing a delegation break 4009 * @flags: rename flags 4010 * 4011 * The caller must hold multiple mutexes--see lock_rename()). 4012 * 4013 * If vfs_rename discovers a delegation in need of breaking at either 4014 * the source or destination, it will return -EWOULDBLOCK and return a 4015 * reference to the inode in delegated_inode. The caller should then 4016 * break the delegation and retry. Because breaking a delegation may 4017 * take a long time, the caller should drop all locks before doing 4018 * so. 4019 * 4020 * Alternatively, a caller may pass NULL for delegated_inode. This may 4021 * be appropriate for callers that expect the underlying filesystem not 4022 * to be NFS exported. 4023 * 4024 * The worst of all namespace operations - renaming directory. "Perverted" 4025 * doesn't even start to describe it. Somebody in UCB had a heck of a trip... 4026 * Problems: 4027 * a) we can get into loop creation. 4028 * b) race potential - two innocent renames can create a loop together. 4029 * That's where 4.4 screws up. Current fix: serialization on 4030 * sb->s_vfs_rename_mutex. We might be more accurate, but that's another 4031 * story. 4032 * c) we have to lock _four_ objects - parents and victim (if it exists), 4033 * and source (if it is not a directory). 4034 * And that - after we got ->i_mutex on parents (until then we don't know 4035 * whether the target exists). Solution: try to be smart with locking 4036 * order for inodes. We rely on the fact that tree topology may change 4037 * only under ->s_vfs_rename_mutex _and_ that parent of the object we 4038 * move will be locked. Thus we can rank directories by the tree 4039 * (ancestors first) and rank all non-directories after them. 4040 * That works since everybody except rename does "lock parent, lookup, 4041 * lock child" and rename is under ->s_vfs_rename_mutex. 4042 * HOWEVER, it relies on the assumption that any object with ->lookup() 4043 * has no more than 1 dentry. If "hybrid" objects will ever appear, 4044 * we'd better make sure that there's no link(2) for them. 4045 * d) conversion from fhandle to dentry may come in the wrong moment - when 4046 * we are removing the target. Solution: we will have to grab ->i_mutex 4047 * in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on 4048 * ->i_mutex on parents, which works but leads to some truly excessive 4049 * locking]. 4050 */ 4051 int vfs_rename(struct inode *old_dir, struct dentry *old_dentry, 4052 struct inode *new_dir, struct dentry *new_dentry, 4053 struct inode **delegated_inode, unsigned int flags) 4054 { 4055 int error; 4056 bool is_dir = d_is_dir(old_dentry); 4057 const unsigned char *old_name; 4058 struct inode *source = old_dentry->d_inode; 4059 struct inode *target = new_dentry->d_inode; 4060 bool new_is_dir = false; 4061 unsigned max_links = new_dir->i_sb->s_max_links; 4062 4063 if (source == target) 4064 return 0; 4065 4066 error = may_delete(old_dir, old_dentry, is_dir); 4067 if (error) 4068 return error; 4069 4070 if (!target) { 4071 error = may_create(new_dir, new_dentry); 4072 } else { 4073 new_is_dir = d_is_dir(new_dentry); 4074 4075 if (!(flags & RENAME_EXCHANGE)) 4076 error = may_delete(new_dir, new_dentry, is_dir); 4077 else 4078 error = may_delete(new_dir, new_dentry, new_is_dir); 4079 } 4080 if (error) 4081 return error; 4082 4083 if (!old_dir->i_op->rename && !old_dir->i_op->rename2) 4084 return -EPERM; 4085 4086 if (flags && !old_dir->i_op->rename2) 4087 return -EINVAL; 4088 4089 /* 4090 * If we are going to change the parent - check write permissions, 4091 * we'll need to flip '..'. 4092 */ 4093 if (new_dir != old_dir) { 4094 if (is_dir) { 4095 error = inode_permission(source, MAY_WRITE); 4096 if (error) 4097 return error; 4098 } 4099 if ((flags & RENAME_EXCHANGE) && new_is_dir) { 4100 error = inode_permission(target, MAY_WRITE); 4101 if (error) 4102 return error; 4103 } 4104 } 4105 4106 error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry, 4107 flags); 4108 if (error) 4109 return error; 4110 4111 old_name = fsnotify_oldname_init(old_dentry->d_name.name); 4112 dget(new_dentry); 4113 if (!is_dir || (flags & RENAME_EXCHANGE)) 4114 lock_two_nondirectories(source, target); 4115 else if (target) 4116 mutex_lock(&target->i_mutex); 4117 4118 error = -EBUSY; 4119 if (d_mountpoint(old_dentry) || d_mountpoint(new_dentry)) 4120 goto out; 4121 4122 if (max_links && new_dir != old_dir) { 4123 error = -EMLINK; 4124 if (is_dir && !new_is_dir && new_dir->i_nlink >= max_links) 4125 goto out; 4126 if ((flags & RENAME_EXCHANGE) && !is_dir && new_is_dir && 4127 old_dir->i_nlink >= max_links) 4128 goto out; 4129 } 4130 if (is_dir && !(flags & RENAME_EXCHANGE) && target) 4131 shrink_dcache_parent(new_dentry); 4132 if (!is_dir) { 4133 error = try_break_deleg(source, delegated_inode); 4134 if (error) 4135 goto out; 4136 } 4137 if (target && !new_is_dir) { 4138 error = try_break_deleg(target, delegated_inode); 4139 if (error) 4140 goto out; 4141 } 4142 if (!old_dir->i_op->rename2) { 4143 error = old_dir->i_op->rename(old_dir, old_dentry, 4144 new_dir, new_dentry); 4145 } else { 4146 WARN_ON(old_dir->i_op->rename != NULL); 4147 error = old_dir->i_op->rename2(old_dir, old_dentry, 4148 new_dir, new_dentry, flags); 4149 } 4150 if (error) 4151 goto out; 4152 4153 if (!(flags & RENAME_EXCHANGE) && target) { 4154 if (is_dir) 4155 target->i_flags |= S_DEAD; 4156 dont_mount(new_dentry); 4157 } 4158 if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) { 4159 if (!(flags & RENAME_EXCHANGE)) 4160 d_move(old_dentry, new_dentry); 4161 else 4162 d_exchange(old_dentry, new_dentry); 4163 } 4164 out: 4165 if (!is_dir || (flags & RENAME_EXCHANGE)) 4166 unlock_two_nondirectories(source, target); 4167 else if (target) 4168 mutex_unlock(&target->i_mutex); 4169 dput(new_dentry); 4170 if (!error) { 4171 fsnotify_move(old_dir, new_dir, old_name, is_dir, 4172 !(flags & RENAME_EXCHANGE) ? target : NULL, old_dentry); 4173 if (flags & RENAME_EXCHANGE) { 4174 fsnotify_move(new_dir, old_dir, old_dentry->d_name.name, 4175 new_is_dir, NULL, new_dentry); 4176 } 4177 } 4178 fsnotify_oldname_free(old_name); 4179 4180 return error; 4181 } 4182 EXPORT_SYMBOL(vfs_rename); 4183 4184 SYSCALL_DEFINE5(renameat2, int, olddfd, const char __user *, oldname, 4185 int, newdfd, const char __user *, newname, unsigned int, flags) 4186 { 4187 struct dentry *old_dir, *new_dir; 4188 struct dentry *old_dentry, *new_dentry; 4189 struct dentry *trap; 4190 struct nameidata oldnd, newnd; 4191 struct inode *delegated_inode = NULL; 4192 struct filename *from; 4193 struct filename *to; 4194 unsigned int lookup_flags = 0; 4195 bool should_retry = false; 4196 int error; 4197 4198 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE)) 4199 return -EINVAL; 4200 4201 if ((flags & RENAME_NOREPLACE) && (flags & RENAME_EXCHANGE)) 4202 return -EINVAL; 4203 4204 retry: 4205 from = user_path_parent(olddfd, oldname, &oldnd, lookup_flags); 4206 if (IS_ERR(from)) { 4207 error = PTR_ERR(from); 4208 goto exit; 4209 } 4210 4211 to = user_path_parent(newdfd, newname, &newnd, lookup_flags); 4212 if (IS_ERR(to)) { 4213 error = PTR_ERR(to); 4214 goto exit1; 4215 } 4216 4217 error = -EXDEV; 4218 if (oldnd.path.mnt != newnd.path.mnt) 4219 goto exit2; 4220 4221 old_dir = oldnd.path.dentry; 4222 error = -EBUSY; 4223 if (oldnd.last_type != LAST_NORM) 4224 goto exit2; 4225 4226 new_dir = newnd.path.dentry; 4227 if (flags & RENAME_NOREPLACE) 4228 error = -EEXIST; 4229 if (newnd.last_type != LAST_NORM) 4230 goto exit2; 4231 4232 error = mnt_want_write(oldnd.path.mnt); 4233 if (error) 4234 goto exit2; 4235 4236 oldnd.flags &= ~LOOKUP_PARENT; 4237 newnd.flags &= ~LOOKUP_PARENT; 4238 if (!(flags & RENAME_EXCHANGE)) 4239 newnd.flags |= LOOKUP_RENAME_TARGET; 4240 4241 retry_deleg: 4242 trap = lock_rename(new_dir, old_dir); 4243 4244 old_dentry = lookup_hash(&oldnd); 4245 error = PTR_ERR(old_dentry); 4246 if (IS_ERR(old_dentry)) 4247 goto exit3; 4248 /* source must exist */ 4249 error = -ENOENT; 4250 if (d_is_negative(old_dentry)) 4251 goto exit4; 4252 new_dentry = lookup_hash(&newnd); 4253 error = PTR_ERR(new_dentry); 4254 if (IS_ERR(new_dentry)) 4255 goto exit4; 4256 error = -EEXIST; 4257 if ((flags & RENAME_NOREPLACE) && d_is_positive(new_dentry)) 4258 goto exit5; 4259 if (flags & RENAME_EXCHANGE) { 4260 error = -ENOENT; 4261 if (d_is_negative(new_dentry)) 4262 goto exit5; 4263 4264 if (!d_is_dir(new_dentry)) { 4265 error = -ENOTDIR; 4266 if (newnd.last.name[newnd.last.len]) 4267 goto exit5; 4268 } 4269 } 4270 /* unless the source is a directory trailing slashes give -ENOTDIR */ 4271 if (!d_is_dir(old_dentry)) { 4272 error = -ENOTDIR; 4273 if (oldnd.last.name[oldnd.last.len]) 4274 goto exit5; 4275 if (!(flags & RENAME_EXCHANGE) && newnd.last.name[newnd.last.len]) 4276 goto exit5; 4277 } 4278 /* source should not be ancestor of target */ 4279 error = -EINVAL; 4280 if (old_dentry == trap) 4281 goto exit5; 4282 /* target should not be an ancestor of source */ 4283 if (!(flags & RENAME_EXCHANGE)) 4284 error = -ENOTEMPTY; 4285 if (new_dentry == trap) 4286 goto exit5; 4287 4288 error = security_path_rename(&oldnd.path, old_dentry, 4289 &newnd.path, new_dentry, flags); 4290 if (error) 4291 goto exit5; 4292 error = vfs_rename(old_dir->d_inode, old_dentry, 4293 new_dir->d_inode, new_dentry, 4294 &delegated_inode, flags); 4295 exit5: 4296 dput(new_dentry); 4297 exit4: 4298 dput(old_dentry); 4299 exit3: 4300 unlock_rename(new_dir, old_dir); 4301 if (delegated_inode) { 4302 error = break_deleg_wait(&delegated_inode); 4303 if (!error) 4304 goto retry_deleg; 4305 } 4306 mnt_drop_write(oldnd.path.mnt); 4307 exit2: 4308 if (retry_estale(error, lookup_flags)) 4309 should_retry = true; 4310 path_put(&newnd.path); 4311 putname(to); 4312 exit1: 4313 path_put(&oldnd.path); 4314 putname(from); 4315 if (should_retry) { 4316 should_retry = false; 4317 lookup_flags |= LOOKUP_REVAL; 4318 goto retry; 4319 } 4320 exit: 4321 return error; 4322 } 4323 4324 SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname, 4325 int, newdfd, const char __user *, newname) 4326 { 4327 return sys_renameat2(olddfd, oldname, newdfd, newname, 0); 4328 } 4329 4330 SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newname) 4331 { 4332 return sys_renameat2(AT_FDCWD, oldname, AT_FDCWD, newname, 0); 4333 } 4334 4335 int readlink_copy(char __user *buffer, int buflen, const char *link) 4336 { 4337 int len = PTR_ERR(link); 4338 if (IS_ERR(link)) 4339 goto out; 4340 4341 len = strlen(link); 4342 if (len > (unsigned) buflen) 4343 len = buflen; 4344 if (copy_to_user(buffer, link, len)) 4345 len = -EFAULT; 4346 out: 4347 return len; 4348 } 4349 EXPORT_SYMBOL(readlink_copy); 4350 4351 /* 4352 * A helper for ->readlink(). This should be used *ONLY* for symlinks that 4353 * have ->follow_link() touching nd only in nd_set_link(). Using (or not 4354 * using) it for any given inode is up to filesystem. 4355 */ 4356 int generic_readlink(struct dentry *dentry, char __user *buffer, int buflen) 4357 { 4358 struct nameidata nd; 4359 void *cookie; 4360 int res; 4361 4362 nd.depth = 0; 4363 cookie = dentry->d_inode->i_op->follow_link(dentry, &nd); 4364 if (IS_ERR(cookie)) 4365 return PTR_ERR(cookie); 4366 4367 res = readlink_copy(buffer, buflen, nd_get_link(&nd)); 4368 if (dentry->d_inode->i_op->put_link) 4369 dentry->d_inode->i_op->put_link(dentry, &nd, cookie); 4370 return res; 4371 } 4372 EXPORT_SYMBOL(generic_readlink); 4373 4374 /* get the link contents into pagecache */ 4375 static char *page_getlink(struct dentry * dentry, struct page **ppage) 4376 { 4377 char *kaddr; 4378 struct page *page; 4379 struct address_space *mapping = dentry->d_inode->i_mapping; 4380 page = read_mapping_page(mapping, 0, NULL); 4381 if (IS_ERR(page)) 4382 return (char*)page; 4383 *ppage = page; 4384 kaddr = kmap(page); 4385 nd_terminate_link(kaddr, dentry->d_inode->i_size, PAGE_SIZE - 1); 4386 return kaddr; 4387 } 4388 4389 int page_readlink(struct dentry *dentry, char __user *buffer, int buflen) 4390 { 4391 struct page *page = NULL; 4392 int res = readlink_copy(buffer, buflen, page_getlink(dentry, &page)); 4393 if (page) { 4394 kunmap(page); 4395 page_cache_release(page); 4396 } 4397 return res; 4398 } 4399 EXPORT_SYMBOL(page_readlink); 4400 4401 void *page_follow_link_light(struct dentry *dentry, struct nameidata *nd) 4402 { 4403 struct page *page = NULL; 4404 nd_set_link(nd, page_getlink(dentry, &page)); 4405 return page; 4406 } 4407 EXPORT_SYMBOL(page_follow_link_light); 4408 4409 void page_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) 4410 { 4411 struct page *page = cookie; 4412 4413 if (page) { 4414 kunmap(page); 4415 page_cache_release(page); 4416 } 4417 } 4418 EXPORT_SYMBOL(page_put_link); 4419 4420 /* 4421 * The nofs argument instructs pagecache_write_begin to pass AOP_FLAG_NOFS 4422 */ 4423 int __page_symlink(struct inode *inode, const char *symname, int len, int nofs) 4424 { 4425 struct address_space *mapping = inode->i_mapping; 4426 struct page *page; 4427 void *fsdata; 4428 int err; 4429 char *kaddr; 4430 unsigned int flags = AOP_FLAG_UNINTERRUPTIBLE; 4431 if (nofs) 4432 flags |= AOP_FLAG_NOFS; 4433 4434 retry: 4435 err = pagecache_write_begin(NULL, mapping, 0, len-1, 4436 flags, &page, &fsdata); 4437 if (err) 4438 goto fail; 4439 4440 kaddr = kmap_atomic(page); 4441 memcpy(kaddr, symname, len-1); 4442 kunmap_atomic(kaddr); 4443 4444 err = pagecache_write_end(NULL, mapping, 0, len-1, len-1, 4445 page, fsdata); 4446 if (err < 0) 4447 goto fail; 4448 if (err < len-1) 4449 goto retry; 4450 4451 mark_inode_dirty(inode); 4452 return 0; 4453 fail: 4454 return err; 4455 } 4456 EXPORT_SYMBOL(__page_symlink); 4457 4458 int page_symlink(struct inode *inode, const char *symname, int len) 4459 { 4460 return __page_symlink(inode, symname, len, 4461 !(mapping_gfp_mask(inode->i_mapping) & __GFP_FS)); 4462 } 4463 EXPORT_SYMBOL(page_symlink); 4464 4465 const struct inode_operations page_symlink_inode_operations = { 4466 .readlink = generic_readlink, 4467 .follow_link = page_follow_link_light, 4468 .put_link = page_put_link, 4469 }; 4470 EXPORT_SYMBOL(page_symlink_inode_operations); 4471