1 /* 2 * linux/fs/ext4/namei.c 3 * 4 * Copyright (C) 1992, 1993, 1994, 1995 5 * Remy Card (card@masi.ibp.fr) 6 * Laboratoire MASI - Institut Blaise Pascal 7 * Universite Pierre et Marie Curie (Paris VI) 8 * 9 * from 10 * 11 * linux/fs/minix/namei.c 12 * 13 * Copyright (C) 1991, 1992 Linus Torvalds 14 * 15 * Big-endian to little-endian byte-swapping/bitmaps by 16 * David S. Miller (davem@caip.rutgers.edu), 1995 17 * Directory entry file type support and forward compatibility hooks 18 * for B-tree directories by Theodore Ts'o (tytso@mit.edu), 1998 19 * Hash Tree Directory indexing (c) 20 * Daniel Phillips, 2001 21 * Hash Tree Directory indexing porting 22 * Christopher Li, 2002 23 * Hash Tree Directory indexing cleanup 24 * Theodore Ts'o, 2002 25 */ 26 27 #include <linux/fs.h> 28 #include <linux/pagemap.h> 29 #include <linux/jbd2.h> 30 #include <linux/time.h> 31 #include <linux/ext4_fs.h> 32 #include <linux/ext4_jbd2.h> 33 #include <linux/fcntl.h> 34 #include <linux/stat.h> 35 #include <linux/string.h> 36 #include <linux/quotaops.h> 37 #include <linux/buffer_head.h> 38 #include <linux/bio.h> 39 40 #include "namei.h" 41 #include "xattr.h" 42 #include "acl.h" 43 44 /* 45 * define how far ahead to read directories while searching them. 46 */ 47 #define NAMEI_RA_CHUNKS 2 48 #define NAMEI_RA_BLOCKS 4 49 #define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS) 50 #define NAMEI_RA_INDEX(c,b) (((c) * NAMEI_RA_BLOCKS) + (b)) 51 52 static struct buffer_head *ext4_append(handle_t *handle, 53 struct inode *inode, 54 u32 *block, int *err) 55 { 56 struct buffer_head *bh; 57 58 *block = inode->i_size >> inode->i_sb->s_blocksize_bits; 59 60 if ((bh = ext4_bread(handle, inode, *block, 1, err))) { 61 inode->i_size += inode->i_sb->s_blocksize; 62 EXT4_I(inode)->i_disksize = inode->i_size; 63 ext4_journal_get_write_access(handle,bh); 64 } 65 return bh; 66 } 67 68 #ifndef assert 69 #define assert(test) J_ASSERT(test) 70 #endif 71 72 #ifndef swap 73 #define swap(x, y) do { typeof(x) z = x; x = y; y = z; } while (0) 74 #endif 75 76 #ifdef DX_DEBUG 77 #define dxtrace(command) command 78 #else 79 #define dxtrace(command) 80 #endif 81 82 struct fake_dirent 83 { 84 __le32 inode; 85 __le16 rec_len; 86 u8 name_len; 87 u8 file_type; 88 }; 89 90 struct dx_countlimit 91 { 92 __le16 limit; 93 __le16 count; 94 }; 95 96 struct dx_entry 97 { 98 __le32 hash; 99 __le32 block; 100 }; 101 102 /* 103 * dx_root_info is laid out so that if it should somehow get overlaid by a 104 * dirent the two low bits of the hash version will be zero. Therefore, the 105 * hash version mod 4 should never be 0. Sincerely, the paranoia department. 106 */ 107 108 struct dx_root 109 { 110 struct fake_dirent dot; 111 char dot_name[4]; 112 struct fake_dirent dotdot; 113 char dotdot_name[4]; 114 struct dx_root_info 115 { 116 __le32 reserved_zero; 117 u8 hash_version; 118 u8 info_length; /* 8 */ 119 u8 indirect_levels; 120 u8 unused_flags; 121 } 122 info; 123 struct dx_entry entries[0]; 124 }; 125 126 struct dx_node 127 { 128 struct fake_dirent fake; 129 struct dx_entry entries[0]; 130 }; 131 132 133 struct dx_frame 134 { 135 struct buffer_head *bh; 136 struct dx_entry *entries; 137 struct dx_entry *at; 138 }; 139 140 struct dx_map_entry 141 { 142 u32 hash; 143 u16 offs; 144 u16 size; 145 }; 146 147 static inline unsigned dx_get_block (struct dx_entry *entry); 148 static void dx_set_block (struct dx_entry *entry, unsigned value); 149 static inline unsigned dx_get_hash (struct dx_entry *entry); 150 static void dx_set_hash (struct dx_entry *entry, unsigned value); 151 static unsigned dx_get_count (struct dx_entry *entries); 152 static unsigned dx_get_limit (struct dx_entry *entries); 153 static void dx_set_count (struct dx_entry *entries, unsigned value); 154 static void dx_set_limit (struct dx_entry *entries, unsigned value); 155 static unsigned dx_root_limit (struct inode *dir, unsigned infosize); 156 static unsigned dx_node_limit (struct inode *dir); 157 static struct dx_frame *dx_probe(struct dentry *dentry, 158 struct inode *dir, 159 struct dx_hash_info *hinfo, 160 struct dx_frame *frame, 161 int *err); 162 static void dx_release (struct dx_frame *frames); 163 static int dx_make_map (struct ext4_dir_entry_2 *de, int size, 164 struct dx_hash_info *hinfo, struct dx_map_entry map[]); 165 static void dx_sort_map(struct dx_map_entry *map, unsigned count); 166 static struct ext4_dir_entry_2 *dx_move_dirents (char *from, char *to, 167 struct dx_map_entry *offsets, int count); 168 static struct ext4_dir_entry_2* dx_pack_dirents (char *base, int size); 169 static void dx_insert_block (struct dx_frame *frame, u32 hash, u32 block); 170 static int ext4_htree_next_block(struct inode *dir, __u32 hash, 171 struct dx_frame *frame, 172 struct dx_frame *frames, 173 __u32 *start_hash); 174 static struct buffer_head * ext4_dx_find_entry(struct dentry *dentry, 175 struct ext4_dir_entry_2 **res_dir, int *err); 176 static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry, 177 struct inode *inode); 178 179 /* 180 * Future: use high four bits of block for coalesce-on-delete flags 181 * Mask them off for now. 182 */ 183 184 static inline unsigned dx_get_block (struct dx_entry *entry) 185 { 186 return le32_to_cpu(entry->block) & 0x00ffffff; 187 } 188 189 static inline void dx_set_block (struct dx_entry *entry, unsigned value) 190 { 191 entry->block = cpu_to_le32(value); 192 } 193 194 static inline unsigned dx_get_hash (struct dx_entry *entry) 195 { 196 return le32_to_cpu(entry->hash); 197 } 198 199 static inline void dx_set_hash (struct dx_entry *entry, unsigned value) 200 { 201 entry->hash = cpu_to_le32(value); 202 } 203 204 static inline unsigned dx_get_count (struct dx_entry *entries) 205 { 206 return le16_to_cpu(((struct dx_countlimit *) entries)->count); 207 } 208 209 static inline unsigned dx_get_limit (struct dx_entry *entries) 210 { 211 return le16_to_cpu(((struct dx_countlimit *) entries)->limit); 212 } 213 214 static inline void dx_set_count (struct dx_entry *entries, unsigned value) 215 { 216 ((struct dx_countlimit *) entries)->count = cpu_to_le16(value); 217 } 218 219 static inline void dx_set_limit (struct dx_entry *entries, unsigned value) 220 { 221 ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value); 222 } 223 224 static inline unsigned dx_root_limit (struct inode *dir, unsigned infosize) 225 { 226 unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) - 227 EXT4_DIR_REC_LEN(2) - infosize; 228 return 0? 20: entry_space / sizeof(struct dx_entry); 229 } 230 231 static inline unsigned dx_node_limit (struct inode *dir) 232 { 233 unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0); 234 return 0? 22: entry_space / sizeof(struct dx_entry); 235 } 236 237 /* 238 * Debug 239 */ 240 #ifdef DX_DEBUG 241 static void dx_show_index (char * label, struct dx_entry *entries) 242 { 243 int i, n = dx_get_count (entries); 244 printk("%s index ", label); 245 for (i = 0; i < n; i++) { 246 printk("%x->%u ", i? dx_get_hash(entries + i) : 247 0, dx_get_block(entries + i)); 248 } 249 printk("\n"); 250 } 251 252 struct stats 253 { 254 unsigned names; 255 unsigned space; 256 unsigned bcount; 257 }; 258 259 static struct stats dx_show_leaf(struct dx_hash_info *hinfo, struct ext4_dir_entry_2 *de, 260 int size, int show_names) 261 { 262 unsigned names = 0, space = 0; 263 char *base = (char *) de; 264 struct dx_hash_info h = *hinfo; 265 266 printk("names: "); 267 while ((char *) de < base + size) 268 { 269 if (de->inode) 270 { 271 if (show_names) 272 { 273 int len = de->name_len; 274 char *name = de->name; 275 while (len--) printk("%c", *name++); 276 ext4fs_dirhash(de->name, de->name_len, &h); 277 printk(":%x.%u ", h.hash, 278 ((char *) de - base)); 279 } 280 space += EXT4_DIR_REC_LEN(de->name_len); 281 names++; 282 } 283 de = (struct ext4_dir_entry_2 *) ((char *) de + le16_to_cpu(de->rec_len)); 284 } 285 printk("(%i)\n", names); 286 return (struct stats) { names, space, 1 }; 287 } 288 289 struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir, 290 struct dx_entry *entries, int levels) 291 { 292 unsigned blocksize = dir->i_sb->s_blocksize; 293 unsigned count = dx_get_count (entries), names = 0, space = 0, i; 294 unsigned bcount = 0; 295 struct buffer_head *bh; 296 int err; 297 printk("%i indexed blocks...\n", count); 298 for (i = 0; i < count; i++, entries++) 299 { 300 u32 block = dx_get_block(entries), hash = i? dx_get_hash(entries): 0; 301 u32 range = i < count - 1? (dx_get_hash(entries + 1) - hash): ~hash; 302 struct stats stats; 303 printk("%s%3u:%03u hash %8x/%8x ",levels?"":" ", i, block, hash, range); 304 if (!(bh = ext4_bread (NULL,dir, block, 0,&err))) continue; 305 stats = levels? 306 dx_show_entries(hinfo, dir, ((struct dx_node *) bh->b_data)->entries, levels - 1): 307 dx_show_leaf(hinfo, (struct ext4_dir_entry_2 *) bh->b_data, blocksize, 0); 308 names += stats.names; 309 space += stats.space; 310 bcount += stats.bcount; 311 brelse (bh); 312 } 313 if (bcount) 314 printk("%snames %u, fullness %u (%u%%)\n", levels?"":" ", 315 names, space/bcount,(space/bcount)*100/blocksize); 316 return (struct stats) { names, space, bcount}; 317 } 318 #endif /* DX_DEBUG */ 319 320 /* 321 * Probe for a directory leaf block to search. 322 * 323 * dx_probe can return ERR_BAD_DX_DIR, which means there was a format 324 * error in the directory index, and the caller should fall back to 325 * searching the directory normally. The callers of dx_probe **MUST** 326 * check for this error code, and make sure it never gets reflected 327 * back to userspace. 328 */ 329 static struct dx_frame * 330 dx_probe(struct dentry *dentry, struct inode *dir, 331 struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err) 332 { 333 unsigned count, indirect; 334 struct dx_entry *at, *entries, *p, *q, *m; 335 struct dx_root *root; 336 struct buffer_head *bh; 337 struct dx_frame *frame = frame_in; 338 u32 hash; 339 340 frame->bh = NULL; 341 if (dentry) 342 dir = dentry->d_parent->d_inode; 343 if (!(bh = ext4_bread (NULL,dir, 0, 0, err))) 344 goto fail; 345 root = (struct dx_root *) bh->b_data; 346 if (root->info.hash_version != DX_HASH_TEA && 347 root->info.hash_version != DX_HASH_HALF_MD4 && 348 root->info.hash_version != DX_HASH_LEGACY) { 349 ext4_warning(dir->i_sb, __FUNCTION__, 350 "Unrecognised inode hash code %d", 351 root->info.hash_version); 352 brelse(bh); 353 *err = ERR_BAD_DX_DIR; 354 goto fail; 355 } 356 hinfo->hash_version = root->info.hash_version; 357 hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed; 358 if (dentry) 359 ext4fs_dirhash(dentry->d_name.name, dentry->d_name.len, hinfo); 360 hash = hinfo->hash; 361 362 if (root->info.unused_flags & 1) { 363 ext4_warning(dir->i_sb, __FUNCTION__, 364 "Unimplemented inode hash flags: %#06x", 365 root->info.unused_flags); 366 brelse(bh); 367 *err = ERR_BAD_DX_DIR; 368 goto fail; 369 } 370 371 if ((indirect = root->info.indirect_levels) > 1) { 372 ext4_warning(dir->i_sb, __FUNCTION__, 373 "Unimplemented inode hash depth: %#06x", 374 root->info.indirect_levels); 375 brelse(bh); 376 *err = ERR_BAD_DX_DIR; 377 goto fail; 378 } 379 380 entries = (struct dx_entry *) (((char *)&root->info) + 381 root->info.info_length); 382 383 if (dx_get_limit(entries) != dx_root_limit(dir, 384 root->info.info_length)) { 385 ext4_warning(dir->i_sb, __FUNCTION__, 386 "dx entry: limit != root limit"); 387 brelse(bh); 388 *err = ERR_BAD_DX_DIR; 389 goto fail; 390 } 391 392 dxtrace (printk("Look up %x", hash)); 393 while (1) 394 { 395 count = dx_get_count(entries); 396 if (!count || count > dx_get_limit(entries)) { 397 ext4_warning(dir->i_sb, __FUNCTION__, 398 "dx entry: no count or count > limit"); 399 brelse(bh); 400 *err = ERR_BAD_DX_DIR; 401 goto fail2; 402 } 403 404 p = entries + 1; 405 q = entries + count - 1; 406 while (p <= q) 407 { 408 m = p + (q - p)/2; 409 dxtrace(printk(".")); 410 if (dx_get_hash(m) > hash) 411 q = m - 1; 412 else 413 p = m + 1; 414 } 415 416 if (0) // linear search cross check 417 { 418 unsigned n = count - 1; 419 at = entries; 420 while (n--) 421 { 422 dxtrace(printk(",")); 423 if (dx_get_hash(++at) > hash) 424 { 425 at--; 426 break; 427 } 428 } 429 assert (at == p - 1); 430 } 431 432 at = p - 1; 433 dxtrace(printk(" %x->%u\n", at == entries? 0: dx_get_hash(at), dx_get_block(at))); 434 frame->bh = bh; 435 frame->entries = entries; 436 frame->at = at; 437 if (!indirect--) return frame; 438 if (!(bh = ext4_bread (NULL,dir, dx_get_block(at), 0, err))) 439 goto fail2; 440 at = entries = ((struct dx_node *) bh->b_data)->entries; 441 if (dx_get_limit(entries) != dx_node_limit (dir)) { 442 ext4_warning(dir->i_sb, __FUNCTION__, 443 "dx entry: limit != node limit"); 444 brelse(bh); 445 *err = ERR_BAD_DX_DIR; 446 goto fail2; 447 } 448 frame++; 449 frame->bh = NULL; 450 } 451 fail2: 452 while (frame >= frame_in) { 453 brelse(frame->bh); 454 frame--; 455 } 456 fail: 457 if (*err == ERR_BAD_DX_DIR) 458 ext4_warning(dir->i_sb, __FUNCTION__, 459 "Corrupt dir inode %ld, running e2fsck is " 460 "recommended.", dir->i_ino); 461 return NULL; 462 } 463 464 static void dx_release (struct dx_frame *frames) 465 { 466 if (frames[0].bh == NULL) 467 return; 468 469 if (((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels) 470 brelse(frames[1].bh); 471 brelse(frames[0].bh); 472 } 473 474 /* 475 * This function increments the frame pointer to search the next leaf 476 * block, and reads in the necessary intervening nodes if the search 477 * should be necessary. Whether or not the search is necessary is 478 * controlled by the hash parameter. If the hash value is even, then 479 * the search is only continued if the next block starts with that 480 * hash value. This is used if we are searching for a specific file. 481 * 482 * If the hash value is HASH_NB_ALWAYS, then always go to the next block. 483 * 484 * This function returns 1 if the caller should continue to search, 485 * or 0 if it should not. If there is an error reading one of the 486 * index blocks, it will a negative error code. 487 * 488 * If start_hash is non-null, it will be filled in with the starting 489 * hash of the next page. 490 */ 491 static int ext4_htree_next_block(struct inode *dir, __u32 hash, 492 struct dx_frame *frame, 493 struct dx_frame *frames, 494 __u32 *start_hash) 495 { 496 struct dx_frame *p; 497 struct buffer_head *bh; 498 int err, num_frames = 0; 499 __u32 bhash; 500 501 p = frame; 502 /* 503 * Find the next leaf page by incrementing the frame pointer. 504 * If we run out of entries in the interior node, loop around and 505 * increment pointer in the parent node. When we break out of 506 * this loop, num_frames indicates the number of interior 507 * nodes need to be read. 508 */ 509 while (1) { 510 if (++(p->at) < p->entries + dx_get_count(p->entries)) 511 break; 512 if (p == frames) 513 return 0; 514 num_frames++; 515 p--; 516 } 517 518 /* 519 * If the hash is 1, then continue only if the next page has a 520 * continuation hash of any value. This is used for readdir 521 * handling. Otherwise, check to see if the hash matches the 522 * desired contiuation hash. If it doesn't, return since 523 * there's no point to read in the successive index pages. 524 */ 525 bhash = dx_get_hash(p->at); 526 if (start_hash) 527 *start_hash = bhash; 528 if ((hash & 1) == 0) { 529 if ((bhash & ~1) != hash) 530 return 0; 531 } 532 /* 533 * If the hash is HASH_NB_ALWAYS, we always go to the next 534 * block so no check is necessary 535 */ 536 while (num_frames--) { 537 if (!(bh = ext4_bread(NULL, dir, dx_get_block(p->at), 538 0, &err))) 539 return err; /* Failure */ 540 p++; 541 brelse (p->bh); 542 p->bh = bh; 543 p->at = p->entries = ((struct dx_node *) bh->b_data)->entries; 544 } 545 return 1; 546 } 547 548 549 /* 550 * p is at least 6 bytes before the end of page 551 */ 552 static inline struct ext4_dir_entry_2 *ext4_next_entry(struct ext4_dir_entry_2 *p) 553 { 554 return (struct ext4_dir_entry_2 *)((char*)p + le16_to_cpu(p->rec_len)); 555 } 556 557 /* 558 * This function fills a red-black tree with information from a 559 * directory block. It returns the number directory entries loaded 560 * into the tree. If there is an error it is returned in err. 561 */ 562 static int htree_dirblock_to_tree(struct file *dir_file, 563 struct inode *dir, int block, 564 struct dx_hash_info *hinfo, 565 __u32 start_hash, __u32 start_minor_hash) 566 { 567 struct buffer_head *bh; 568 struct ext4_dir_entry_2 *de, *top; 569 int err, count = 0; 570 571 dxtrace(printk("In htree dirblock_to_tree: block %d\n", block)); 572 if (!(bh = ext4_bread (NULL, dir, block, 0, &err))) 573 return err; 574 575 de = (struct ext4_dir_entry_2 *) bh->b_data; 576 top = (struct ext4_dir_entry_2 *) ((char *) de + 577 dir->i_sb->s_blocksize - 578 EXT4_DIR_REC_LEN(0)); 579 for (; de < top; de = ext4_next_entry(de)) { 580 if (!ext4_check_dir_entry("htree_dirblock_to_tree", dir, de, bh, 581 (block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb)) 582 +((char *)de - bh->b_data))) { 583 /* On error, skip the f_pos to the next block. */ 584 dir_file->f_pos = (dir_file->f_pos | 585 (dir->i_sb->s_blocksize - 1)) + 1; 586 brelse (bh); 587 return count; 588 } 589 ext4fs_dirhash(de->name, de->name_len, hinfo); 590 if ((hinfo->hash < start_hash) || 591 ((hinfo->hash == start_hash) && 592 (hinfo->minor_hash < start_minor_hash))) 593 continue; 594 if (de->inode == 0) 595 continue; 596 if ((err = ext4_htree_store_dirent(dir_file, 597 hinfo->hash, hinfo->minor_hash, de)) != 0) { 598 brelse(bh); 599 return err; 600 } 601 count++; 602 } 603 brelse(bh); 604 return count; 605 } 606 607 608 /* 609 * This function fills a red-black tree with information from a 610 * directory. We start scanning the directory in hash order, starting 611 * at start_hash and start_minor_hash. 612 * 613 * This function returns the number of entries inserted into the tree, 614 * or a negative error code. 615 */ 616 int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash, 617 __u32 start_minor_hash, __u32 *next_hash) 618 { 619 struct dx_hash_info hinfo; 620 struct ext4_dir_entry_2 *de; 621 struct dx_frame frames[2], *frame; 622 struct inode *dir; 623 int block, err; 624 int count = 0; 625 int ret; 626 __u32 hashval; 627 628 dxtrace(printk("In htree_fill_tree, start hash: %x:%x\n", start_hash, 629 start_minor_hash)); 630 dir = dir_file->f_path.dentry->d_inode; 631 if (!(EXT4_I(dir)->i_flags & EXT4_INDEX_FL)) { 632 hinfo.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version; 633 hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed; 634 count = htree_dirblock_to_tree(dir_file, dir, 0, &hinfo, 635 start_hash, start_minor_hash); 636 *next_hash = ~0; 637 return count; 638 } 639 hinfo.hash = start_hash; 640 hinfo.minor_hash = 0; 641 frame = dx_probe(NULL, dir_file->f_path.dentry->d_inode, &hinfo, frames, &err); 642 if (!frame) 643 return err; 644 645 /* Add '.' and '..' from the htree header */ 646 if (!start_hash && !start_minor_hash) { 647 de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data; 648 if ((err = ext4_htree_store_dirent(dir_file, 0, 0, de)) != 0) 649 goto errout; 650 count++; 651 } 652 if (start_hash < 2 || (start_hash ==2 && start_minor_hash==0)) { 653 de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data; 654 de = ext4_next_entry(de); 655 if ((err = ext4_htree_store_dirent(dir_file, 2, 0, de)) != 0) 656 goto errout; 657 count++; 658 } 659 660 while (1) { 661 block = dx_get_block(frame->at); 662 ret = htree_dirblock_to_tree(dir_file, dir, block, &hinfo, 663 start_hash, start_minor_hash); 664 if (ret < 0) { 665 err = ret; 666 goto errout; 667 } 668 count += ret; 669 hashval = ~0; 670 ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS, 671 frame, frames, &hashval); 672 *next_hash = hashval; 673 if (ret < 0) { 674 err = ret; 675 goto errout; 676 } 677 /* 678 * Stop if: (a) there are no more entries, or 679 * (b) we have inserted at least one entry and the 680 * next hash value is not a continuation 681 */ 682 if ((ret == 0) || 683 (count && ((hashval & 1) == 0))) 684 break; 685 } 686 dx_release(frames); 687 dxtrace(printk("Fill tree: returned %d entries, next hash: %x\n", 688 count, *next_hash)); 689 return count; 690 errout: 691 dx_release(frames); 692 return (err); 693 } 694 695 696 /* 697 * Directory block splitting, compacting 698 */ 699 700 /* 701 * Create map of hash values, offsets, and sizes, stored at end of block. 702 * Returns number of entries mapped. 703 */ 704 static int dx_make_map (struct ext4_dir_entry_2 *de, int size, 705 struct dx_hash_info *hinfo, struct dx_map_entry *map_tail) 706 { 707 int count = 0; 708 char *base = (char *) de; 709 struct dx_hash_info h = *hinfo; 710 711 while ((char *) de < base + size) 712 { 713 if (de->name_len && de->inode) { 714 ext4fs_dirhash(de->name, de->name_len, &h); 715 map_tail--; 716 map_tail->hash = h.hash; 717 map_tail->offs = (u16) ((char *) de - base); 718 map_tail->size = le16_to_cpu(de->rec_len); 719 count++; 720 cond_resched(); 721 } 722 /* XXX: do we need to check rec_len == 0 case? -Chris */ 723 de = (struct ext4_dir_entry_2 *) ((char *) de + le16_to_cpu(de->rec_len)); 724 } 725 return count; 726 } 727 728 /* Sort map by hash value */ 729 static void dx_sort_map (struct dx_map_entry *map, unsigned count) 730 { 731 struct dx_map_entry *p, *q, *top = map + count - 1; 732 int more; 733 /* Combsort until bubble sort doesn't suck */ 734 while (count > 2) { 735 count = count*10/13; 736 if (count - 9 < 2) /* 9, 10 -> 11 */ 737 count = 11; 738 for (p = top, q = p - count; q >= map; p--, q--) 739 if (p->hash < q->hash) 740 swap(*p, *q); 741 } 742 /* Garden variety bubble sort */ 743 do { 744 more = 0; 745 q = top; 746 while (q-- > map) { 747 if (q[1].hash >= q[0].hash) 748 continue; 749 swap(*(q+1), *q); 750 more = 1; 751 } 752 } while(more); 753 } 754 755 static void dx_insert_block(struct dx_frame *frame, u32 hash, u32 block) 756 { 757 struct dx_entry *entries = frame->entries; 758 struct dx_entry *old = frame->at, *new = old + 1; 759 int count = dx_get_count(entries); 760 761 assert(count < dx_get_limit(entries)); 762 assert(old < entries + count); 763 memmove(new + 1, new, (char *)(entries + count) - (char *)(new)); 764 dx_set_hash(new, hash); 765 dx_set_block(new, block); 766 dx_set_count(entries, count + 1); 767 } 768 769 static void ext4_update_dx_flag(struct inode *inode) 770 { 771 if (!EXT4_HAS_COMPAT_FEATURE(inode->i_sb, 772 EXT4_FEATURE_COMPAT_DIR_INDEX)) 773 EXT4_I(inode)->i_flags &= ~EXT4_INDEX_FL; 774 } 775 776 /* 777 * NOTE! unlike strncmp, ext4_match returns 1 for success, 0 for failure. 778 * 779 * `len <= EXT4_NAME_LEN' is guaranteed by caller. 780 * `de != NULL' is guaranteed by caller. 781 */ 782 static inline int ext4_match (int len, const char * const name, 783 struct ext4_dir_entry_2 * de) 784 { 785 if (len != de->name_len) 786 return 0; 787 if (!de->inode) 788 return 0; 789 return !memcmp(name, de->name, len); 790 } 791 792 /* 793 * Returns 0 if not found, -1 on failure, and 1 on success 794 */ 795 static inline int search_dirblock(struct buffer_head * bh, 796 struct inode *dir, 797 struct dentry *dentry, 798 unsigned long offset, 799 struct ext4_dir_entry_2 ** res_dir) 800 { 801 struct ext4_dir_entry_2 * de; 802 char * dlimit; 803 int de_len; 804 const char *name = dentry->d_name.name; 805 int namelen = dentry->d_name.len; 806 807 de = (struct ext4_dir_entry_2 *) bh->b_data; 808 dlimit = bh->b_data + dir->i_sb->s_blocksize; 809 while ((char *) de < dlimit) { 810 /* this code is executed quadratically often */ 811 /* do minimal checking `by hand' */ 812 813 if ((char *) de + namelen <= dlimit && 814 ext4_match (namelen, name, de)) { 815 /* found a match - just to be sure, do a full check */ 816 if (!ext4_check_dir_entry("ext4_find_entry", 817 dir, de, bh, offset)) 818 return -1; 819 *res_dir = de; 820 return 1; 821 } 822 /* prevent looping on a bad block */ 823 de_len = le16_to_cpu(de->rec_len); 824 if (de_len <= 0) 825 return -1; 826 offset += de_len; 827 de = (struct ext4_dir_entry_2 *) ((char *) de + de_len); 828 } 829 return 0; 830 } 831 832 833 /* 834 * ext4_find_entry() 835 * 836 * finds an entry in the specified directory with the wanted name. It 837 * returns the cache buffer in which the entry was found, and the entry 838 * itself (as a parameter - res_dir). It does NOT read the inode of the 839 * entry - you'll have to do that yourself if you want to. 840 * 841 * The returned buffer_head has ->b_count elevated. The caller is expected 842 * to brelse() it when appropriate. 843 */ 844 static struct buffer_head * ext4_find_entry (struct dentry *dentry, 845 struct ext4_dir_entry_2 ** res_dir) 846 { 847 struct super_block * sb; 848 struct buffer_head * bh_use[NAMEI_RA_SIZE]; 849 struct buffer_head * bh, *ret = NULL; 850 unsigned long start, block, b; 851 int ra_max = 0; /* Number of bh's in the readahead 852 buffer, bh_use[] */ 853 int ra_ptr = 0; /* Current index into readahead 854 buffer */ 855 int num = 0; 856 int nblocks, i, err; 857 struct inode *dir = dentry->d_parent->d_inode; 858 int namelen; 859 const u8 *name; 860 unsigned blocksize; 861 862 *res_dir = NULL; 863 sb = dir->i_sb; 864 blocksize = sb->s_blocksize; 865 namelen = dentry->d_name.len; 866 name = dentry->d_name.name; 867 if (namelen > EXT4_NAME_LEN) 868 return NULL; 869 if (is_dx(dir)) { 870 bh = ext4_dx_find_entry(dentry, res_dir, &err); 871 /* 872 * On success, or if the error was file not found, 873 * return. Otherwise, fall back to doing a search the 874 * old fashioned way. 875 */ 876 if (bh || (err != ERR_BAD_DX_DIR)) 877 return bh; 878 dxtrace(printk("ext4_find_entry: dx failed, falling back\n")); 879 } 880 nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb); 881 start = EXT4_I(dir)->i_dir_start_lookup; 882 if (start >= nblocks) 883 start = 0; 884 block = start; 885 restart: 886 do { 887 /* 888 * We deal with the read-ahead logic here. 889 */ 890 if (ra_ptr >= ra_max) { 891 /* Refill the readahead buffer */ 892 ra_ptr = 0; 893 b = block; 894 for (ra_max = 0; ra_max < NAMEI_RA_SIZE; ra_max++) { 895 /* 896 * Terminate if we reach the end of the 897 * directory and must wrap, or if our 898 * search has finished at this block. 899 */ 900 if (b >= nblocks || (num && block == start)) { 901 bh_use[ra_max] = NULL; 902 break; 903 } 904 num++; 905 bh = ext4_getblk(NULL, dir, b++, 0, &err); 906 bh_use[ra_max] = bh; 907 if (bh) 908 ll_rw_block(READ_META, 1, &bh); 909 } 910 } 911 if ((bh = bh_use[ra_ptr++]) == NULL) 912 goto next; 913 wait_on_buffer(bh); 914 if (!buffer_uptodate(bh)) { 915 /* read error, skip block & hope for the best */ 916 ext4_error(sb, __FUNCTION__, "reading directory #%lu " 917 "offset %lu", dir->i_ino, block); 918 brelse(bh); 919 goto next; 920 } 921 i = search_dirblock(bh, dir, dentry, 922 block << EXT4_BLOCK_SIZE_BITS(sb), res_dir); 923 if (i == 1) { 924 EXT4_I(dir)->i_dir_start_lookup = block; 925 ret = bh; 926 goto cleanup_and_exit; 927 } else { 928 brelse(bh); 929 if (i < 0) 930 goto cleanup_and_exit; 931 } 932 next: 933 if (++block >= nblocks) 934 block = 0; 935 } while (block != start); 936 937 /* 938 * If the directory has grown while we were searching, then 939 * search the last part of the directory before giving up. 940 */ 941 block = nblocks; 942 nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb); 943 if (block < nblocks) { 944 start = 0; 945 goto restart; 946 } 947 948 cleanup_and_exit: 949 /* Clean up the read-ahead blocks */ 950 for (; ra_ptr < ra_max; ra_ptr++) 951 brelse (bh_use[ra_ptr]); 952 return ret; 953 } 954 955 static struct buffer_head * ext4_dx_find_entry(struct dentry *dentry, 956 struct ext4_dir_entry_2 **res_dir, int *err) 957 { 958 struct super_block * sb; 959 struct dx_hash_info hinfo; 960 u32 hash; 961 struct dx_frame frames[2], *frame; 962 struct ext4_dir_entry_2 *de, *top; 963 struct buffer_head *bh; 964 unsigned long block; 965 int retval; 966 int namelen = dentry->d_name.len; 967 const u8 *name = dentry->d_name.name; 968 struct inode *dir = dentry->d_parent->d_inode; 969 970 sb = dir->i_sb; 971 /* NFS may look up ".." - look at dx_root directory block */ 972 if (namelen > 2 || name[0] != '.'||(name[1] != '.' && name[1] != '\0')){ 973 if (!(frame = dx_probe(dentry, NULL, &hinfo, frames, err))) 974 return NULL; 975 } else { 976 frame = frames; 977 frame->bh = NULL; /* for dx_release() */ 978 frame->at = (struct dx_entry *)frames; /* hack for zero entry*/ 979 dx_set_block(frame->at, 0); /* dx_root block is 0 */ 980 } 981 hash = hinfo.hash; 982 do { 983 block = dx_get_block(frame->at); 984 if (!(bh = ext4_bread (NULL,dir, block, 0, err))) 985 goto errout; 986 de = (struct ext4_dir_entry_2 *) bh->b_data; 987 top = (struct ext4_dir_entry_2 *) ((char *) de + sb->s_blocksize - 988 EXT4_DIR_REC_LEN(0)); 989 for (; de < top; de = ext4_next_entry(de)) 990 if (ext4_match (namelen, name, de)) { 991 if (!ext4_check_dir_entry("ext4_find_entry", 992 dir, de, bh, 993 (block<<EXT4_BLOCK_SIZE_BITS(sb)) 994 +((char *)de - bh->b_data))) { 995 brelse (bh); 996 *err = ERR_BAD_DX_DIR; 997 goto errout; 998 } 999 *res_dir = de; 1000 dx_release (frames); 1001 return bh; 1002 } 1003 brelse (bh); 1004 /* Check to see if we should continue to search */ 1005 retval = ext4_htree_next_block(dir, hash, frame, 1006 frames, NULL); 1007 if (retval < 0) { 1008 ext4_warning(sb, __FUNCTION__, 1009 "error reading index page in directory #%lu", 1010 dir->i_ino); 1011 *err = retval; 1012 goto errout; 1013 } 1014 } while (retval == 1); 1015 1016 *err = -ENOENT; 1017 errout: 1018 dxtrace(printk("%s not found\n", name)); 1019 dx_release (frames); 1020 return NULL; 1021 } 1022 1023 static struct dentry *ext4_lookup(struct inode * dir, struct dentry *dentry, struct nameidata *nd) 1024 { 1025 struct inode * inode; 1026 struct ext4_dir_entry_2 * de; 1027 struct buffer_head * bh; 1028 1029 if (dentry->d_name.len > EXT4_NAME_LEN) 1030 return ERR_PTR(-ENAMETOOLONG); 1031 1032 bh = ext4_find_entry(dentry, &de); 1033 inode = NULL; 1034 if (bh) { 1035 unsigned long ino = le32_to_cpu(de->inode); 1036 brelse (bh); 1037 if (!ext4_valid_inum(dir->i_sb, ino)) { 1038 ext4_error(dir->i_sb, "ext4_lookup", 1039 "bad inode number: %lu", ino); 1040 inode = NULL; 1041 } else 1042 inode = iget(dir->i_sb, ino); 1043 1044 if (!inode) 1045 return ERR_PTR(-EACCES); 1046 1047 if (is_bad_inode(inode)) { 1048 iput(inode); 1049 return ERR_PTR(-ENOENT); 1050 } 1051 } 1052 return d_splice_alias(inode, dentry); 1053 } 1054 1055 1056 struct dentry *ext4_get_parent(struct dentry *child) 1057 { 1058 unsigned long ino; 1059 struct dentry *parent; 1060 struct inode *inode; 1061 struct dentry dotdot; 1062 struct ext4_dir_entry_2 * de; 1063 struct buffer_head *bh; 1064 1065 dotdot.d_name.name = ".."; 1066 dotdot.d_name.len = 2; 1067 dotdot.d_parent = child; /* confusing, isn't it! */ 1068 1069 bh = ext4_find_entry(&dotdot, &de); 1070 inode = NULL; 1071 if (!bh) 1072 return ERR_PTR(-ENOENT); 1073 ino = le32_to_cpu(de->inode); 1074 brelse(bh); 1075 1076 if (!ext4_valid_inum(child->d_inode->i_sb, ino)) { 1077 ext4_error(child->d_inode->i_sb, "ext4_get_parent", 1078 "bad inode number: %lu", ino); 1079 inode = NULL; 1080 } else 1081 inode = iget(child->d_inode->i_sb, ino); 1082 1083 if (!inode) 1084 return ERR_PTR(-EACCES); 1085 1086 if (is_bad_inode(inode)) { 1087 iput(inode); 1088 return ERR_PTR(-ENOENT); 1089 } 1090 1091 parent = d_alloc_anon(inode); 1092 if (!parent) { 1093 iput(inode); 1094 parent = ERR_PTR(-ENOMEM); 1095 } 1096 return parent; 1097 } 1098 1099 #define S_SHIFT 12 1100 static unsigned char ext4_type_by_mode[S_IFMT >> S_SHIFT] = { 1101 [S_IFREG >> S_SHIFT] = EXT4_FT_REG_FILE, 1102 [S_IFDIR >> S_SHIFT] = EXT4_FT_DIR, 1103 [S_IFCHR >> S_SHIFT] = EXT4_FT_CHRDEV, 1104 [S_IFBLK >> S_SHIFT] = EXT4_FT_BLKDEV, 1105 [S_IFIFO >> S_SHIFT] = EXT4_FT_FIFO, 1106 [S_IFSOCK >> S_SHIFT] = EXT4_FT_SOCK, 1107 [S_IFLNK >> S_SHIFT] = EXT4_FT_SYMLINK, 1108 }; 1109 1110 static inline void ext4_set_de_type(struct super_block *sb, 1111 struct ext4_dir_entry_2 *de, 1112 umode_t mode) { 1113 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FILETYPE)) 1114 de->file_type = ext4_type_by_mode[(mode & S_IFMT)>>S_SHIFT]; 1115 } 1116 1117 /* 1118 * Move count entries from end of map between two memory locations. 1119 * Returns pointer to last entry moved. 1120 */ 1121 static struct ext4_dir_entry_2 * 1122 dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count) 1123 { 1124 unsigned rec_len = 0; 1125 1126 while (count--) { 1127 struct ext4_dir_entry_2 *de = (struct ext4_dir_entry_2 *) (from + map->offs); 1128 rec_len = EXT4_DIR_REC_LEN(de->name_len); 1129 memcpy (to, de, rec_len); 1130 ((struct ext4_dir_entry_2 *) to)->rec_len = 1131 cpu_to_le16(rec_len); 1132 de->inode = 0; 1133 map++; 1134 to += rec_len; 1135 } 1136 return (struct ext4_dir_entry_2 *) (to - rec_len); 1137 } 1138 1139 /* 1140 * Compact each dir entry in the range to the minimal rec_len. 1141 * Returns pointer to last entry in range. 1142 */ 1143 static struct ext4_dir_entry_2* dx_pack_dirents(char *base, int size) 1144 { 1145 struct ext4_dir_entry_2 *next, *to, *prev, *de = (struct ext4_dir_entry_2 *) base; 1146 unsigned rec_len = 0; 1147 1148 prev = to = de; 1149 while ((char*)de < base + size) { 1150 next = (struct ext4_dir_entry_2 *) ((char *) de + 1151 le16_to_cpu(de->rec_len)); 1152 if (de->inode && de->name_len) { 1153 rec_len = EXT4_DIR_REC_LEN(de->name_len); 1154 if (de > to) 1155 memmove(to, de, rec_len); 1156 to->rec_len = cpu_to_le16(rec_len); 1157 prev = to; 1158 to = (struct ext4_dir_entry_2 *) (((char *) to) + rec_len); 1159 } 1160 de = next; 1161 } 1162 return prev; 1163 } 1164 1165 /* 1166 * Split a full leaf block to make room for a new dir entry. 1167 * Allocate a new block, and move entries so that they are approx. equally full. 1168 * Returns pointer to de in block into which the new entry will be inserted. 1169 */ 1170 static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, 1171 struct buffer_head **bh,struct dx_frame *frame, 1172 struct dx_hash_info *hinfo, int *error) 1173 { 1174 unsigned blocksize = dir->i_sb->s_blocksize; 1175 unsigned count, continued; 1176 struct buffer_head *bh2; 1177 u32 newblock; 1178 u32 hash2; 1179 struct dx_map_entry *map; 1180 char *data1 = (*bh)->b_data, *data2; 1181 unsigned split, move, size, i; 1182 struct ext4_dir_entry_2 *de = NULL, *de2; 1183 int err = 0; 1184 1185 bh2 = ext4_append (handle, dir, &newblock, &err); 1186 if (!(bh2)) { 1187 brelse(*bh); 1188 *bh = NULL; 1189 goto errout; 1190 } 1191 1192 BUFFER_TRACE(*bh, "get_write_access"); 1193 err = ext4_journal_get_write_access(handle, *bh); 1194 if (err) 1195 goto journal_error; 1196 1197 BUFFER_TRACE(frame->bh, "get_write_access"); 1198 err = ext4_journal_get_write_access(handle, frame->bh); 1199 if (err) 1200 goto journal_error; 1201 1202 data2 = bh2->b_data; 1203 1204 /* create map in the end of data2 block */ 1205 map = (struct dx_map_entry *) (data2 + blocksize); 1206 count = dx_make_map ((struct ext4_dir_entry_2 *) data1, 1207 blocksize, hinfo, map); 1208 map -= count; 1209 dx_sort_map (map, count); 1210 /* Split the existing block in the middle, size-wise */ 1211 size = 0; 1212 move = 0; 1213 for (i = count-1; i >= 0; i--) { 1214 /* is more than half of this entry in 2nd half of the block? */ 1215 if (size + map[i].size/2 > blocksize/2) 1216 break; 1217 size += map[i].size; 1218 move++; 1219 } 1220 /* map index at which we will split */ 1221 split = count - move; 1222 hash2 = map[split].hash; 1223 continued = hash2 == map[split - 1].hash; 1224 dxtrace(printk("Split block %i at %x, %i/%i\n", 1225 dx_get_block(frame->at), hash2, split, count-split)); 1226 1227 /* Fancy dance to stay within two buffers */ 1228 de2 = dx_move_dirents(data1, data2, map + split, count - split); 1229 de = dx_pack_dirents(data1,blocksize); 1230 de->rec_len = cpu_to_le16(data1 + blocksize - (char *) de); 1231 de2->rec_len = cpu_to_le16(data2 + blocksize - (char *) de2); 1232 dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1)); 1233 dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1)); 1234 1235 /* Which block gets the new entry? */ 1236 if (hinfo->hash >= hash2) 1237 { 1238 swap(*bh, bh2); 1239 de = de2; 1240 } 1241 dx_insert_block (frame, hash2 + continued, newblock); 1242 err = ext4_journal_dirty_metadata (handle, bh2); 1243 if (err) 1244 goto journal_error; 1245 err = ext4_journal_dirty_metadata (handle, frame->bh); 1246 if (err) 1247 goto journal_error; 1248 brelse (bh2); 1249 dxtrace(dx_show_index ("frame", frame->entries)); 1250 return de; 1251 1252 journal_error: 1253 brelse(*bh); 1254 brelse(bh2); 1255 *bh = NULL; 1256 ext4_std_error(dir->i_sb, err); 1257 errout: 1258 *error = err; 1259 return NULL; 1260 } 1261 1262 /* 1263 * Add a new entry into a directory (leaf) block. If de is non-NULL, 1264 * it points to a directory entry which is guaranteed to be large 1265 * enough for new directory entry. If de is NULL, then 1266 * add_dirent_to_buf will attempt search the directory block for 1267 * space. It will return -ENOSPC if no space is available, and -EIO 1268 * and -EEXIST if directory entry already exists. 1269 * 1270 * NOTE! bh is NOT released in the case where ENOSPC is returned. In 1271 * all other cases bh is released. 1272 */ 1273 static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry, 1274 struct inode *inode, struct ext4_dir_entry_2 *de, 1275 struct buffer_head * bh) 1276 { 1277 struct inode *dir = dentry->d_parent->d_inode; 1278 const char *name = dentry->d_name.name; 1279 int namelen = dentry->d_name.len; 1280 unsigned long offset = 0; 1281 unsigned short reclen; 1282 int nlen, rlen, err; 1283 char *top; 1284 1285 reclen = EXT4_DIR_REC_LEN(namelen); 1286 if (!de) { 1287 de = (struct ext4_dir_entry_2 *)bh->b_data; 1288 top = bh->b_data + dir->i_sb->s_blocksize - reclen; 1289 while ((char *) de <= top) { 1290 if (!ext4_check_dir_entry("ext4_add_entry", dir, de, 1291 bh, offset)) { 1292 brelse (bh); 1293 return -EIO; 1294 } 1295 if (ext4_match (namelen, name, de)) { 1296 brelse (bh); 1297 return -EEXIST; 1298 } 1299 nlen = EXT4_DIR_REC_LEN(de->name_len); 1300 rlen = le16_to_cpu(de->rec_len); 1301 if ((de->inode? rlen - nlen: rlen) >= reclen) 1302 break; 1303 de = (struct ext4_dir_entry_2 *)((char *)de + rlen); 1304 offset += rlen; 1305 } 1306 if ((char *) de > top) 1307 return -ENOSPC; 1308 } 1309 BUFFER_TRACE(bh, "get_write_access"); 1310 err = ext4_journal_get_write_access(handle, bh); 1311 if (err) { 1312 ext4_std_error(dir->i_sb, err); 1313 brelse(bh); 1314 return err; 1315 } 1316 1317 /* By now the buffer is marked for journaling */ 1318 nlen = EXT4_DIR_REC_LEN(de->name_len); 1319 rlen = le16_to_cpu(de->rec_len); 1320 if (de->inode) { 1321 struct ext4_dir_entry_2 *de1 = (struct ext4_dir_entry_2 *)((char *)de + nlen); 1322 de1->rec_len = cpu_to_le16(rlen - nlen); 1323 de->rec_len = cpu_to_le16(nlen); 1324 de = de1; 1325 } 1326 de->file_type = EXT4_FT_UNKNOWN; 1327 if (inode) { 1328 de->inode = cpu_to_le32(inode->i_ino); 1329 ext4_set_de_type(dir->i_sb, de, inode->i_mode); 1330 } else 1331 de->inode = 0; 1332 de->name_len = namelen; 1333 memcpy (de->name, name, namelen); 1334 /* 1335 * XXX shouldn't update any times until successful 1336 * completion of syscall, but too many callers depend 1337 * on this. 1338 * 1339 * XXX similarly, too many callers depend on 1340 * ext4_new_inode() setting the times, but error 1341 * recovery deletes the inode, so the worst that can 1342 * happen is that the times are slightly out of date 1343 * and/or different from the directory change time. 1344 */ 1345 dir->i_mtime = dir->i_ctime = ext4_current_time(dir); 1346 ext4_update_dx_flag(dir); 1347 dir->i_version++; 1348 ext4_mark_inode_dirty(handle, dir); 1349 BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata"); 1350 err = ext4_journal_dirty_metadata(handle, bh); 1351 if (err) 1352 ext4_std_error(dir->i_sb, err); 1353 brelse(bh); 1354 return 0; 1355 } 1356 1357 /* 1358 * This converts a one block unindexed directory to a 3 block indexed 1359 * directory, and adds the dentry to the indexed directory. 1360 */ 1361 static int make_indexed_dir(handle_t *handle, struct dentry *dentry, 1362 struct inode *inode, struct buffer_head *bh) 1363 { 1364 struct inode *dir = dentry->d_parent->d_inode; 1365 const char *name = dentry->d_name.name; 1366 int namelen = dentry->d_name.len; 1367 struct buffer_head *bh2; 1368 struct dx_root *root; 1369 struct dx_frame frames[2], *frame; 1370 struct dx_entry *entries; 1371 struct ext4_dir_entry_2 *de, *de2; 1372 char *data1, *top; 1373 unsigned len; 1374 int retval; 1375 unsigned blocksize; 1376 struct dx_hash_info hinfo; 1377 u32 block; 1378 struct fake_dirent *fde; 1379 1380 blocksize = dir->i_sb->s_blocksize; 1381 dxtrace(printk("Creating index\n")); 1382 retval = ext4_journal_get_write_access(handle, bh); 1383 if (retval) { 1384 ext4_std_error(dir->i_sb, retval); 1385 brelse(bh); 1386 return retval; 1387 } 1388 root = (struct dx_root *) bh->b_data; 1389 1390 bh2 = ext4_append (handle, dir, &block, &retval); 1391 if (!(bh2)) { 1392 brelse(bh); 1393 return retval; 1394 } 1395 EXT4_I(dir)->i_flags |= EXT4_INDEX_FL; 1396 data1 = bh2->b_data; 1397 1398 /* The 0th block becomes the root, move the dirents out */ 1399 fde = &root->dotdot; 1400 de = (struct ext4_dir_entry_2 *)((char *)fde + le16_to_cpu(fde->rec_len)); 1401 len = ((char *) root) + blocksize - (char *) de; 1402 memcpy (data1, de, len); 1403 de = (struct ext4_dir_entry_2 *) data1; 1404 top = data1 + len; 1405 while ((char *)(de2=(void*)de+le16_to_cpu(de->rec_len)) < top) 1406 de = de2; 1407 de->rec_len = cpu_to_le16(data1 + blocksize - (char *) de); 1408 /* Initialize the root; the dot dirents already exist */ 1409 de = (struct ext4_dir_entry_2 *) (&root->dotdot); 1410 de->rec_len = cpu_to_le16(blocksize - EXT4_DIR_REC_LEN(2)); 1411 memset (&root->info, 0, sizeof(root->info)); 1412 root->info.info_length = sizeof(root->info); 1413 root->info.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version; 1414 entries = root->entries; 1415 dx_set_block (entries, 1); 1416 dx_set_count (entries, 1); 1417 dx_set_limit (entries, dx_root_limit(dir, sizeof(root->info))); 1418 1419 /* Initialize as for dx_probe */ 1420 hinfo.hash_version = root->info.hash_version; 1421 hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed; 1422 ext4fs_dirhash(name, namelen, &hinfo); 1423 frame = frames; 1424 frame->entries = entries; 1425 frame->at = entries; 1426 frame->bh = bh; 1427 bh = bh2; 1428 de = do_split(handle,dir, &bh, frame, &hinfo, &retval); 1429 dx_release (frames); 1430 if (!(de)) 1431 return retval; 1432 1433 return add_dirent_to_buf(handle, dentry, inode, de, bh); 1434 } 1435 1436 /* 1437 * ext4_add_entry() 1438 * 1439 * adds a file entry to the specified directory, using the same 1440 * semantics as ext4_find_entry(). It returns NULL if it failed. 1441 * 1442 * NOTE!! The inode part of 'de' is left at 0 - which means you 1443 * may not sleep between calling this and putting something into 1444 * the entry, as someone else might have used it while you slept. 1445 */ 1446 static int ext4_add_entry (handle_t *handle, struct dentry *dentry, 1447 struct inode *inode) 1448 { 1449 struct inode *dir = dentry->d_parent->d_inode; 1450 unsigned long offset; 1451 struct buffer_head * bh; 1452 struct ext4_dir_entry_2 *de; 1453 struct super_block * sb; 1454 int retval; 1455 int dx_fallback=0; 1456 unsigned blocksize; 1457 u32 block, blocks; 1458 1459 sb = dir->i_sb; 1460 blocksize = sb->s_blocksize; 1461 if (!dentry->d_name.len) 1462 return -EINVAL; 1463 if (is_dx(dir)) { 1464 retval = ext4_dx_add_entry(handle, dentry, inode); 1465 if (!retval || (retval != ERR_BAD_DX_DIR)) 1466 return retval; 1467 EXT4_I(dir)->i_flags &= ~EXT4_INDEX_FL; 1468 dx_fallback++; 1469 ext4_mark_inode_dirty(handle, dir); 1470 } 1471 blocks = dir->i_size >> sb->s_blocksize_bits; 1472 for (block = 0, offset = 0; block < blocks; block++) { 1473 bh = ext4_bread(handle, dir, block, 0, &retval); 1474 if(!bh) 1475 return retval; 1476 retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh); 1477 if (retval != -ENOSPC) 1478 return retval; 1479 1480 if (blocks == 1 && !dx_fallback && 1481 EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) 1482 return make_indexed_dir(handle, dentry, inode, bh); 1483 brelse(bh); 1484 } 1485 bh = ext4_append(handle, dir, &block, &retval); 1486 if (!bh) 1487 return retval; 1488 de = (struct ext4_dir_entry_2 *) bh->b_data; 1489 de->inode = 0; 1490 de->rec_len = cpu_to_le16(blocksize); 1491 return add_dirent_to_buf(handle, dentry, inode, de, bh); 1492 } 1493 1494 /* 1495 * Returns 0 for success, or a negative error value 1496 */ 1497 static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry, 1498 struct inode *inode) 1499 { 1500 struct dx_frame frames[2], *frame; 1501 struct dx_entry *entries, *at; 1502 struct dx_hash_info hinfo; 1503 struct buffer_head * bh; 1504 struct inode *dir = dentry->d_parent->d_inode; 1505 struct super_block * sb = dir->i_sb; 1506 struct ext4_dir_entry_2 *de; 1507 int err; 1508 1509 frame = dx_probe(dentry, NULL, &hinfo, frames, &err); 1510 if (!frame) 1511 return err; 1512 entries = frame->entries; 1513 at = frame->at; 1514 1515 if (!(bh = ext4_bread(handle,dir, dx_get_block(frame->at), 0, &err))) 1516 goto cleanup; 1517 1518 BUFFER_TRACE(bh, "get_write_access"); 1519 err = ext4_journal_get_write_access(handle, bh); 1520 if (err) 1521 goto journal_error; 1522 1523 err = add_dirent_to_buf(handle, dentry, inode, NULL, bh); 1524 if (err != -ENOSPC) { 1525 bh = NULL; 1526 goto cleanup; 1527 } 1528 1529 /* Block full, should compress but for now just split */ 1530 dxtrace(printk("using %u of %u node entries\n", 1531 dx_get_count(entries), dx_get_limit(entries))); 1532 /* Need to split index? */ 1533 if (dx_get_count(entries) == dx_get_limit(entries)) { 1534 u32 newblock; 1535 unsigned icount = dx_get_count(entries); 1536 int levels = frame - frames; 1537 struct dx_entry *entries2; 1538 struct dx_node *node2; 1539 struct buffer_head *bh2; 1540 1541 if (levels && (dx_get_count(frames->entries) == 1542 dx_get_limit(frames->entries))) { 1543 ext4_warning(sb, __FUNCTION__, 1544 "Directory index full!"); 1545 err = -ENOSPC; 1546 goto cleanup; 1547 } 1548 bh2 = ext4_append (handle, dir, &newblock, &err); 1549 if (!(bh2)) 1550 goto cleanup; 1551 node2 = (struct dx_node *)(bh2->b_data); 1552 entries2 = node2->entries; 1553 node2->fake.rec_len = cpu_to_le16(sb->s_blocksize); 1554 node2->fake.inode = 0; 1555 BUFFER_TRACE(frame->bh, "get_write_access"); 1556 err = ext4_journal_get_write_access(handle, frame->bh); 1557 if (err) 1558 goto journal_error; 1559 if (levels) { 1560 unsigned icount1 = icount/2, icount2 = icount - icount1; 1561 unsigned hash2 = dx_get_hash(entries + icount1); 1562 dxtrace(printk("Split index %i/%i\n", icount1, icount2)); 1563 1564 BUFFER_TRACE(frame->bh, "get_write_access"); /* index root */ 1565 err = ext4_journal_get_write_access(handle, 1566 frames[0].bh); 1567 if (err) 1568 goto journal_error; 1569 1570 memcpy ((char *) entries2, (char *) (entries + icount1), 1571 icount2 * sizeof(struct dx_entry)); 1572 dx_set_count (entries, icount1); 1573 dx_set_count (entries2, icount2); 1574 dx_set_limit (entries2, dx_node_limit(dir)); 1575 1576 /* Which index block gets the new entry? */ 1577 if (at - entries >= icount1) { 1578 frame->at = at = at - entries - icount1 + entries2; 1579 frame->entries = entries = entries2; 1580 swap(frame->bh, bh2); 1581 } 1582 dx_insert_block (frames + 0, hash2, newblock); 1583 dxtrace(dx_show_index ("node", frames[1].entries)); 1584 dxtrace(dx_show_index ("node", 1585 ((struct dx_node *) bh2->b_data)->entries)); 1586 err = ext4_journal_dirty_metadata(handle, bh2); 1587 if (err) 1588 goto journal_error; 1589 brelse (bh2); 1590 } else { 1591 dxtrace(printk("Creating second level index...\n")); 1592 memcpy((char *) entries2, (char *) entries, 1593 icount * sizeof(struct dx_entry)); 1594 dx_set_limit(entries2, dx_node_limit(dir)); 1595 1596 /* Set up root */ 1597 dx_set_count(entries, 1); 1598 dx_set_block(entries + 0, newblock); 1599 ((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels = 1; 1600 1601 /* Add new access path frame */ 1602 frame = frames + 1; 1603 frame->at = at = at - entries + entries2; 1604 frame->entries = entries = entries2; 1605 frame->bh = bh2; 1606 err = ext4_journal_get_write_access(handle, 1607 frame->bh); 1608 if (err) 1609 goto journal_error; 1610 } 1611 ext4_journal_dirty_metadata(handle, frames[0].bh); 1612 } 1613 de = do_split(handle, dir, &bh, frame, &hinfo, &err); 1614 if (!de) 1615 goto cleanup; 1616 err = add_dirent_to_buf(handle, dentry, inode, de, bh); 1617 bh = NULL; 1618 goto cleanup; 1619 1620 journal_error: 1621 ext4_std_error(dir->i_sb, err); 1622 cleanup: 1623 if (bh) 1624 brelse(bh); 1625 dx_release(frames); 1626 return err; 1627 } 1628 1629 /* 1630 * ext4_delete_entry deletes a directory entry by merging it with the 1631 * previous entry 1632 */ 1633 static int ext4_delete_entry (handle_t *handle, 1634 struct inode * dir, 1635 struct ext4_dir_entry_2 * de_del, 1636 struct buffer_head * bh) 1637 { 1638 struct ext4_dir_entry_2 * de, * pde; 1639 int i; 1640 1641 i = 0; 1642 pde = NULL; 1643 de = (struct ext4_dir_entry_2 *) bh->b_data; 1644 while (i < bh->b_size) { 1645 if (!ext4_check_dir_entry("ext4_delete_entry", dir, de, bh, i)) 1646 return -EIO; 1647 if (de == de_del) { 1648 BUFFER_TRACE(bh, "get_write_access"); 1649 ext4_journal_get_write_access(handle, bh); 1650 if (pde) 1651 pde->rec_len = 1652 cpu_to_le16(le16_to_cpu(pde->rec_len) + 1653 le16_to_cpu(de->rec_len)); 1654 else 1655 de->inode = 0; 1656 dir->i_version++; 1657 BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata"); 1658 ext4_journal_dirty_metadata(handle, bh); 1659 return 0; 1660 } 1661 i += le16_to_cpu(de->rec_len); 1662 pde = de; 1663 de = (struct ext4_dir_entry_2 *) 1664 ((char *) de + le16_to_cpu(de->rec_len)); 1665 } 1666 return -ENOENT; 1667 } 1668 1669 /* 1670 * DIR_NLINK feature is set if 1) nlinks > EXT4_LINK_MAX or 2) nlinks == 2, 1671 * since this indicates that nlinks count was previously 1. 1672 */ 1673 static void ext4_inc_count(handle_t *handle, struct inode *inode) 1674 { 1675 inc_nlink(inode); 1676 if (is_dx(inode) && inode->i_nlink > 1) { 1677 /* limit is 16-bit i_links_count */ 1678 if (inode->i_nlink >= EXT4_LINK_MAX || inode->i_nlink == 2) { 1679 inode->i_nlink = 1; 1680 EXT4_SET_RO_COMPAT_FEATURE(inode->i_sb, 1681 EXT4_FEATURE_RO_COMPAT_DIR_NLINK); 1682 } 1683 } 1684 } 1685 1686 /* 1687 * If a directory had nlink == 1, then we should let it be 1. This indicates 1688 * directory has >EXT4_LINK_MAX subdirs. 1689 */ 1690 static void ext4_dec_count(handle_t *handle, struct inode *inode) 1691 { 1692 drop_nlink(inode); 1693 if (S_ISDIR(inode->i_mode) && inode->i_nlink == 0) 1694 inc_nlink(inode); 1695 } 1696 1697 1698 static int ext4_add_nondir(handle_t *handle, 1699 struct dentry *dentry, struct inode *inode) 1700 { 1701 int err = ext4_add_entry(handle, dentry, inode); 1702 if (!err) { 1703 ext4_mark_inode_dirty(handle, inode); 1704 d_instantiate(dentry, inode); 1705 return 0; 1706 } 1707 drop_nlink(inode); 1708 iput(inode); 1709 return err; 1710 } 1711 1712 /* 1713 * By the time this is called, we already have created 1714 * the directory cache entry for the new file, but it 1715 * is so far negative - it has no inode. 1716 * 1717 * If the create succeeds, we fill in the inode information 1718 * with d_instantiate(). 1719 */ 1720 static int ext4_create (struct inode * dir, struct dentry * dentry, int mode, 1721 struct nameidata *nd) 1722 { 1723 handle_t *handle; 1724 struct inode * inode; 1725 int err, retries = 0; 1726 1727 retry: 1728 handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + 1729 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + 1730 2*EXT4_QUOTA_INIT_BLOCKS(dir->i_sb)); 1731 if (IS_ERR(handle)) 1732 return PTR_ERR(handle); 1733 1734 if (IS_DIRSYNC(dir)) 1735 handle->h_sync = 1; 1736 1737 inode = ext4_new_inode (handle, dir, mode); 1738 err = PTR_ERR(inode); 1739 if (!IS_ERR(inode)) { 1740 inode->i_op = &ext4_file_inode_operations; 1741 inode->i_fop = &ext4_file_operations; 1742 ext4_set_aops(inode); 1743 err = ext4_add_nondir(handle, dentry, inode); 1744 } 1745 ext4_journal_stop(handle); 1746 if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) 1747 goto retry; 1748 return err; 1749 } 1750 1751 static int ext4_mknod (struct inode * dir, struct dentry *dentry, 1752 int mode, dev_t rdev) 1753 { 1754 handle_t *handle; 1755 struct inode *inode; 1756 int err, retries = 0; 1757 1758 if (!new_valid_dev(rdev)) 1759 return -EINVAL; 1760 1761 retry: 1762 handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + 1763 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + 1764 2*EXT4_QUOTA_INIT_BLOCKS(dir->i_sb)); 1765 if (IS_ERR(handle)) 1766 return PTR_ERR(handle); 1767 1768 if (IS_DIRSYNC(dir)) 1769 handle->h_sync = 1; 1770 1771 inode = ext4_new_inode (handle, dir, mode); 1772 err = PTR_ERR(inode); 1773 if (!IS_ERR(inode)) { 1774 init_special_inode(inode, inode->i_mode, rdev); 1775 #ifdef CONFIG_EXT4DEV_FS_XATTR 1776 inode->i_op = &ext4_special_inode_operations; 1777 #endif 1778 err = ext4_add_nondir(handle, dentry, inode); 1779 } 1780 ext4_journal_stop(handle); 1781 if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) 1782 goto retry; 1783 return err; 1784 } 1785 1786 static int ext4_mkdir(struct inode * dir, struct dentry * dentry, int mode) 1787 { 1788 handle_t *handle; 1789 struct inode * inode; 1790 struct buffer_head * dir_block; 1791 struct ext4_dir_entry_2 * de; 1792 int err, retries = 0; 1793 1794 if (EXT4_DIR_LINK_MAX(dir)) 1795 return -EMLINK; 1796 1797 retry: 1798 handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + 1799 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + 1800 2*EXT4_QUOTA_INIT_BLOCKS(dir->i_sb)); 1801 if (IS_ERR(handle)) 1802 return PTR_ERR(handle); 1803 1804 if (IS_DIRSYNC(dir)) 1805 handle->h_sync = 1; 1806 1807 inode = ext4_new_inode (handle, dir, S_IFDIR | mode); 1808 err = PTR_ERR(inode); 1809 if (IS_ERR(inode)) 1810 goto out_stop; 1811 1812 inode->i_op = &ext4_dir_inode_operations; 1813 inode->i_fop = &ext4_dir_operations; 1814 inode->i_size = EXT4_I(inode)->i_disksize = inode->i_sb->s_blocksize; 1815 dir_block = ext4_bread (handle, inode, 0, 1, &err); 1816 if (!dir_block) { 1817 ext4_dec_count(handle, inode); /* is this nlink == 0? */ 1818 ext4_mark_inode_dirty(handle, inode); 1819 iput (inode); 1820 goto out_stop; 1821 } 1822 BUFFER_TRACE(dir_block, "get_write_access"); 1823 ext4_journal_get_write_access(handle, dir_block); 1824 de = (struct ext4_dir_entry_2 *) dir_block->b_data; 1825 de->inode = cpu_to_le32(inode->i_ino); 1826 de->name_len = 1; 1827 de->rec_len = cpu_to_le16(EXT4_DIR_REC_LEN(de->name_len)); 1828 strcpy (de->name, "."); 1829 ext4_set_de_type(dir->i_sb, de, S_IFDIR); 1830 de = (struct ext4_dir_entry_2 *) 1831 ((char *) de + le16_to_cpu(de->rec_len)); 1832 de->inode = cpu_to_le32(dir->i_ino); 1833 de->rec_len = cpu_to_le16(inode->i_sb->s_blocksize-EXT4_DIR_REC_LEN(1)); 1834 de->name_len = 2; 1835 strcpy (de->name, ".."); 1836 ext4_set_de_type(dir->i_sb, de, S_IFDIR); 1837 inode->i_nlink = 2; 1838 BUFFER_TRACE(dir_block, "call ext4_journal_dirty_metadata"); 1839 ext4_journal_dirty_metadata(handle, dir_block); 1840 brelse (dir_block); 1841 ext4_mark_inode_dirty(handle, inode); 1842 err = ext4_add_entry (handle, dentry, inode); 1843 if (err) { 1844 inode->i_nlink = 0; 1845 ext4_mark_inode_dirty(handle, inode); 1846 iput (inode); 1847 goto out_stop; 1848 } 1849 ext4_inc_count(handle, dir); 1850 ext4_update_dx_flag(dir); 1851 ext4_mark_inode_dirty(handle, dir); 1852 d_instantiate(dentry, inode); 1853 out_stop: 1854 ext4_journal_stop(handle); 1855 if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) 1856 goto retry; 1857 return err; 1858 } 1859 1860 /* 1861 * routine to check that the specified directory is empty (for rmdir) 1862 */ 1863 static int empty_dir (struct inode * inode) 1864 { 1865 unsigned long offset; 1866 struct buffer_head * bh; 1867 struct ext4_dir_entry_2 * de, * de1; 1868 struct super_block * sb; 1869 int err = 0; 1870 1871 sb = inode->i_sb; 1872 if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2) || 1873 !(bh = ext4_bread (NULL, inode, 0, 0, &err))) { 1874 if (err) 1875 ext4_error(inode->i_sb, __FUNCTION__, 1876 "error %d reading directory #%lu offset 0", 1877 err, inode->i_ino); 1878 else 1879 ext4_warning(inode->i_sb, __FUNCTION__, 1880 "bad directory (dir #%lu) - no data block", 1881 inode->i_ino); 1882 return 1; 1883 } 1884 de = (struct ext4_dir_entry_2 *) bh->b_data; 1885 de1 = (struct ext4_dir_entry_2 *) 1886 ((char *) de + le16_to_cpu(de->rec_len)); 1887 if (le32_to_cpu(de->inode) != inode->i_ino || 1888 !le32_to_cpu(de1->inode) || 1889 strcmp (".", de->name) || 1890 strcmp ("..", de1->name)) { 1891 ext4_warning (inode->i_sb, "empty_dir", 1892 "bad directory (dir #%lu) - no `.' or `..'", 1893 inode->i_ino); 1894 brelse (bh); 1895 return 1; 1896 } 1897 offset = le16_to_cpu(de->rec_len) + le16_to_cpu(de1->rec_len); 1898 de = (struct ext4_dir_entry_2 *) 1899 ((char *) de1 + le16_to_cpu(de1->rec_len)); 1900 while (offset < inode->i_size ) { 1901 if (!bh || 1902 (void *) de >= (void *) (bh->b_data+sb->s_blocksize)) { 1903 err = 0; 1904 brelse (bh); 1905 bh = ext4_bread (NULL, inode, 1906 offset >> EXT4_BLOCK_SIZE_BITS(sb), 0, &err); 1907 if (!bh) { 1908 if (err) 1909 ext4_error(sb, __FUNCTION__, 1910 "error %d reading directory" 1911 " #%lu offset %lu", 1912 err, inode->i_ino, offset); 1913 offset += sb->s_blocksize; 1914 continue; 1915 } 1916 de = (struct ext4_dir_entry_2 *) bh->b_data; 1917 } 1918 if (!ext4_check_dir_entry("empty_dir", inode, de, bh, offset)) { 1919 de = (struct ext4_dir_entry_2 *)(bh->b_data + 1920 sb->s_blocksize); 1921 offset = (offset | (sb->s_blocksize - 1)) + 1; 1922 continue; 1923 } 1924 if (le32_to_cpu(de->inode)) { 1925 brelse (bh); 1926 return 0; 1927 } 1928 offset += le16_to_cpu(de->rec_len); 1929 de = (struct ext4_dir_entry_2 *) 1930 ((char *) de + le16_to_cpu(de->rec_len)); 1931 } 1932 brelse (bh); 1933 return 1; 1934 } 1935 1936 /* ext4_orphan_add() links an unlinked or truncated inode into a list of 1937 * such inodes, starting at the superblock, in case we crash before the 1938 * file is closed/deleted, or in case the inode truncate spans multiple 1939 * transactions and the last transaction is not recovered after a crash. 1940 * 1941 * At filesystem recovery time, we walk this list deleting unlinked 1942 * inodes and truncating linked inodes in ext4_orphan_cleanup(). 1943 */ 1944 int ext4_orphan_add(handle_t *handle, struct inode *inode) 1945 { 1946 struct super_block *sb = inode->i_sb; 1947 struct ext4_iloc iloc; 1948 int err = 0, rc; 1949 1950 lock_super(sb); 1951 if (!list_empty(&EXT4_I(inode)->i_orphan)) 1952 goto out_unlock; 1953 1954 /* Orphan handling is only valid for files with data blocks 1955 * being truncated, or files being unlinked. */ 1956 1957 /* @@@ FIXME: Observation from aviro: 1958 * I think I can trigger J_ASSERT in ext4_orphan_add(). We block 1959 * here (on lock_super()), so race with ext4_link() which might bump 1960 * ->i_nlink. For, say it, character device. Not a regular file, 1961 * not a directory, not a symlink and ->i_nlink > 0. 1962 */ 1963 J_ASSERT ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 1964 S_ISLNK(inode->i_mode)) || inode->i_nlink == 0); 1965 1966 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access"); 1967 err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh); 1968 if (err) 1969 goto out_unlock; 1970 1971 err = ext4_reserve_inode_write(handle, inode, &iloc); 1972 if (err) 1973 goto out_unlock; 1974 1975 /* Insert this inode at the head of the on-disk orphan list... */ 1976 NEXT_ORPHAN(inode) = le32_to_cpu(EXT4_SB(sb)->s_es->s_last_orphan); 1977 EXT4_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino); 1978 err = ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh); 1979 rc = ext4_mark_iloc_dirty(handle, inode, &iloc); 1980 if (!err) 1981 err = rc; 1982 1983 /* Only add to the head of the in-memory list if all the 1984 * previous operations succeeded. If the orphan_add is going to 1985 * fail (possibly taking the journal offline), we can't risk 1986 * leaving the inode on the orphan list: stray orphan-list 1987 * entries can cause panics at unmount time. 1988 * 1989 * This is safe: on error we're going to ignore the orphan list 1990 * anyway on the next recovery. */ 1991 if (!err) 1992 list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan); 1993 1994 jbd_debug(4, "superblock will point to %lu\n", inode->i_ino); 1995 jbd_debug(4, "orphan inode %lu will point to %d\n", 1996 inode->i_ino, NEXT_ORPHAN(inode)); 1997 out_unlock: 1998 unlock_super(sb); 1999 ext4_std_error(inode->i_sb, err); 2000 return err; 2001 } 2002 2003 /* 2004 * ext4_orphan_del() removes an unlinked or truncated inode from the list 2005 * of such inodes stored on disk, because it is finally being cleaned up. 2006 */ 2007 int ext4_orphan_del(handle_t *handle, struct inode *inode) 2008 { 2009 struct list_head *prev; 2010 struct ext4_inode_info *ei = EXT4_I(inode); 2011 struct ext4_sb_info *sbi; 2012 unsigned long ino_next; 2013 struct ext4_iloc iloc; 2014 int err = 0; 2015 2016 lock_super(inode->i_sb); 2017 if (list_empty(&ei->i_orphan)) { 2018 unlock_super(inode->i_sb); 2019 return 0; 2020 } 2021 2022 ino_next = NEXT_ORPHAN(inode); 2023 prev = ei->i_orphan.prev; 2024 sbi = EXT4_SB(inode->i_sb); 2025 2026 jbd_debug(4, "remove inode %lu from orphan list\n", inode->i_ino); 2027 2028 list_del_init(&ei->i_orphan); 2029 2030 /* If we're on an error path, we may not have a valid 2031 * transaction handle with which to update the orphan list on 2032 * disk, but we still need to remove the inode from the linked 2033 * list in memory. */ 2034 if (!handle) 2035 goto out; 2036 2037 err = ext4_reserve_inode_write(handle, inode, &iloc); 2038 if (err) 2039 goto out_err; 2040 2041 if (prev == &sbi->s_orphan) { 2042 jbd_debug(4, "superblock will point to %lu\n", ino_next); 2043 BUFFER_TRACE(sbi->s_sbh, "get_write_access"); 2044 err = ext4_journal_get_write_access(handle, sbi->s_sbh); 2045 if (err) 2046 goto out_brelse; 2047 sbi->s_es->s_last_orphan = cpu_to_le32(ino_next); 2048 err = ext4_journal_dirty_metadata(handle, sbi->s_sbh); 2049 } else { 2050 struct ext4_iloc iloc2; 2051 struct inode *i_prev = 2052 &list_entry(prev, struct ext4_inode_info, i_orphan)->vfs_inode; 2053 2054 jbd_debug(4, "orphan inode %lu will point to %lu\n", 2055 i_prev->i_ino, ino_next); 2056 err = ext4_reserve_inode_write(handle, i_prev, &iloc2); 2057 if (err) 2058 goto out_brelse; 2059 NEXT_ORPHAN(i_prev) = ino_next; 2060 err = ext4_mark_iloc_dirty(handle, i_prev, &iloc2); 2061 } 2062 if (err) 2063 goto out_brelse; 2064 NEXT_ORPHAN(inode) = 0; 2065 err = ext4_mark_iloc_dirty(handle, inode, &iloc); 2066 2067 out_err: 2068 ext4_std_error(inode->i_sb, err); 2069 out: 2070 unlock_super(inode->i_sb); 2071 return err; 2072 2073 out_brelse: 2074 brelse(iloc.bh); 2075 goto out_err; 2076 } 2077 2078 static int ext4_rmdir (struct inode * dir, struct dentry *dentry) 2079 { 2080 int retval; 2081 struct inode * inode; 2082 struct buffer_head * bh; 2083 struct ext4_dir_entry_2 * de; 2084 handle_t *handle; 2085 2086 /* Initialize quotas before so that eventual writes go in 2087 * separate transaction */ 2088 DQUOT_INIT(dentry->d_inode); 2089 handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb)); 2090 if (IS_ERR(handle)) 2091 return PTR_ERR(handle); 2092 2093 retval = -ENOENT; 2094 bh = ext4_find_entry (dentry, &de); 2095 if (!bh) 2096 goto end_rmdir; 2097 2098 if (IS_DIRSYNC(dir)) 2099 handle->h_sync = 1; 2100 2101 inode = dentry->d_inode; 2102 2103 retval = -EIO; 2104 if (le32_to_cpu(de->inode) != inode->i_ino) 2105 goto end_rmdir; 2106 2107 retval = -ENOTEMPTY; 2108 if (!empty_dir (inode)) 2109 goto end_rmdir; 2110 2111 retval = ext4_delete_entry(handle, dir, de, bh); 2112 if (retval) 2113 goto end_rmdir; 2114 if (!EXT4_DIR_LINK_EMPTY(inode)) 2115 ext4_warning (inode->i_sb, "ext4_rmdir", 2116 "empty directory has too many links (%d)", 2117 inode->i_nlink); 2118 inode->i_version++; 2119 clear_nlink(inode); 2120 /* There's no need to set i_disksize: the fact that i_nlink is 2121 * zero will ensure that the right thing happens during any 2122 * recovery. */ 2123 inode->i_size = 0; 2124 ext4_orphan_add(handle, inode); 2125 inode->i_ctime = dir->i_ctime = dir->i_mtime = ext4_current_time(inode); 2126 ext4_mark_inode_dirty(handle, inode); 2127 ext4_dec_count(handle, dir); 2128 ext4_update_dx_flag(dir); 2129 ext4_mark_inode_dirty(handle, dir); 2130 2131 end_rmdir: 2132 ext4_journal_stop(handle); 2133 brelse (bh); 2134 return retval; 2135 } 2136 2137 static int ext4_unlink(struct inode * dir, struct dentry *dentry) 2138 { 2139 int retval; 2140 struct inode * inode; 2141 struct buffer_head * bh; 2142 struct ext4_dir_entry_2 * de; 2143 handle_t *handle; 2144 2145 /* Initialize quotas before so that eventual writes go 2146 * in separate transaction */ 2147 DQUOT_INIT(dentry->d_inode); 2148 handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb)); 2149 if (IS_ERR(handle)) 2150 return PTR_ERR(handle); 2151 2152 if (IS_DIRSYNC(dir)) 2153 handle->h_sync = 1; 2154 2155 retval = -ENOENT; 2156 bh = ext4_find_entry (dentry, &de); 2157 if (!bh) 2158 goto end_unlink; 2159 2160 inode = dentry->d_inode; 2161 2162 retval = -EIO; 2163 if (le32_to_cpu(de->inode) != inode->i_ino) 2164 goto end_unlink; 2165 2166 if (!inode->i_nlink) { 2167 ext4_warning (inode->i_sb, "ext4_unlink", 2168 "Deleting nonexistent file (%lu), %d", 2169 inode->i_ino, inode->i_nlink); 2170 inode->i_nlink = 1; 2171 } 2172 retval = ext4_delete_entry(handle, dir, de, bh); 2173 if (retval) 2174 goto end_unlink; 2175 dir->i_ctime = dir->i_mtime = ext4_current_time(dir); 2176 ext4_update_dx_flag(dir); 2177 ext4_mark_inode_dirty(handle, dir); 2178 ext4_dec_count(handle, inode); 2179 if (!inode->i_nlink) 2180 ext4_orphan_add(handle, inode); 2181 inode->i_ctime = ext4_current_time(inode); 2182 ext4_mark_inode_dirty(handle, inode); 2183 retval = 0; 2184 2185 end_unlink: 2186 ext4_journal_stop(handle); 2187 brelse (bh); 2188 return retval; 2189 } 2190 2191 static int ext4_symlink (struct inode * dir, 2192 struct dentry *dentry, const char * symname) 2193 { 2194 handle_t *handle; 2195 struct inode * inode; 2196 int l, err, retries = 0; 2197 2198 l = strlen(symname)+1; 2199 if (l > dir->i_sb->s_blocksize) 2200 return -ENAMETOOLONG; 2201 2202 retry: 2203 handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + 2204 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 5 + 2205 2*EXT4_QUOTA_INIT_BLOCKS(dir->i_sb)); 2206 if (IS_ERR(handle)) 2207 return PTR_ERR(handle); 2208 2209 if (IS_DIRSYNC(dir)) 2210 handle->h_sync = 1; 2211 2212 inode = ext4_new_inode (handle, dir, S_IFLNK|S_IRWXUGO); 2213 err = PTR_ERR(inode); 2214 if (IS_ERR(inode)) 2215 goto out_stop; 2216 2217 if (l > sizeof (EXT4_I(inode)->i_data)) { 2218 inode->i_op = &ext4_symlink_inode_operations; 2219 ext4_set_aops(inode); 2220 /* 2221 * page_symlink() calls into ext4_prepare/commit_write. 2222 * We have a transaction open. All is sweetness. It also sets 2223 * i_size in generic_commit_write(). 2224 */ 2225 err = __page_symlink(inode, symname, l, 2226 mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); 2227 if (err) { 2228 ext4_dec_count(handle, inode); 2229 ext4_mark_inode_dirty(handle, inode); 2230 iput (inode); 2231 goto out_stop; 2232 } 2233 } else { 2234 inode->i_op = &ext4_fast_symlink_inode_operations; 2235 memcpy((char*)&EXT4_I(inode)->i_data,symname,l); 2236 inode->i_size = l-1; 2237 } 2238 EXT4_I(inode)->i_disksize = inode->i_size; 2239 err = ext4_add_nondir(handle, dentry, inode); 2240 out_stop: 2241 ext4_journal_stop(handle); 2242 if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) 2243 goto retry; 2244 return err; 2245 } 2246 2247 static int ext4_link (struct dentry * old_dentry, 2248 struct inode * dir, struct dentry *dentry) 2249 { 2250 handle_t *handle; 2251 struct inode *inode = old_dentry->d_inode; 2252 int err, retries = 0; 2253 2254 if (EXT4_DIR_LINK_MAX(inode)) 2255 return -EMLINK; 2256 2257 /* 2258 * Return -ENOENT if we've raced with unlink and i_nlink is 0. Doing 2259 * otherwise has the potential to corrupt the orphan inode list. 2260 */ 2261 if (inode->i_nlink == 0) 2262 return -ENOENT; 2263 2264 retry: 2265 handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + 2266 EXT4_INDEX_EXTRA_TRANS_BLOCKS); 2267 if (IS_ERR(handle)) 2268 return PTR_ERR(handle); 2269 2270 if (IS_DIRSYNC(dir)) 2271 handle->h_sync = 1; 2272 2273 inode->i_ctime = ext4_current_time(inode); 2274 ext4_inc_count(handle, inode); 2275 atomic_inc(&inode->i_count); 2276 2277 err = ext4_add_nondir(handle, dentry, inode); 2278 ext4_journal_stop(handle); 2279 if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) 2280 goto retry; 2281 return err; 2282 } 2283 2284 #define PARENT_INO(buffer) \ 2285 ((struct ext4_dir_entry_2 *) ((char *) buffer + \ 2286 le16_to_cpu(((struct ext4_dir_entry_2 *) buffer)->rec_len)))->inode 2287 2288 /* 2289 * Anybody can rename anything with this: the permission checks are left to the 2290 * higher-level routines. 2291 */ 2292 static int ext4_rename (struct inode * old_dir, struct dentry *old_dentry, 2293 struct inode * new_dir,struct dentry *new_dentry) 2294 { 2295 handle_t *handle; 2296 struct inode * old_inode, * new_inode; 2297 struct buffer_head * old_bh, * new_bh, * dir_bh; 2298 struct ext4_dir_entry_2 * old_de, * new_de; 2299 int retval; 2300 2301 old_bh = new_bh = dir_bh = NULL; 2302 2303 /* Initialize quotas before so that eventual writes go 2304 * in separate transaction */ 2305 if (new_dentry->d_inode) 2306 DQUOT_INIT(new_dentry->d_inode); 2307 handle = ext4_journal_start(old_dir, 2 * 2308 EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) + 2309 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2); 2310 if (IS_ERR(handle)) 2311 return PTR_ERR(handle); 2312 2313 if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir)) 2314 handle->h_sync = 1; 2315 2316 old_bh = ext4_find_entry (old_dentry, &old_de); 2317 /* 2318 * Check for inode number is _not_ due to possible IO errors. 2319 * We might rmdir the source, keep it as pwd of some process 2320 * and merrily kill the link to whatever was created under the 2321 * same name. Goodbye sticky bit ;-< 2322 */ 2323 old_inode = old_dentry->d_inode; 2324 retval = -ENOENT; 2325 if (!old_bh || le32_to_cpu(old_de->inode) != old_inode->i_ino) 2326 goto end_rename; 2327 2328 new_inode = new_dentry->d_inode; 2329 new_bh = ext4_find_entry (new_dentry, &new_de); 2330 if (new_bh) { 2331 if (!new_inode) { 2332 brelse (new_bh); 2333 new_bh = NULL; 2334 } 2335 } 2336 if (S_ISDIR(old_inode->i_mode)) { 2337 if (new_inode) { 2338 retval = -ENOTEMPTY; 2339 if (!empty_dir (new_inode)) 2340 goto end_rename; 2341 } 2342 retval = -EIO; 2343 dir_bh = ext4_bread (handle, old_inode, 0, 0, &retval); 2344 if (!dir_bh) 2345 goto end_rename; 2346 if (le32_to_cpu(PARENT_INO(dir_bh->b_data)) != old_dir->i_ino) 2347 goto end_rename; 2348 retval = -EMLINK; 2349 if (!new_inode && new_dir!=old_dir && 2350 new_dir->i_nlink >= EXT4_LINK_MAX) 2351 goto end_rename; 2352 } 2353 if (!new_bh) { 2354 retval = ext4_add_entry (handle, new_dentry, old_inode); 2355 if (retval) 2356 goto end_rename; 2357 } else { 2358 BUFFER_TRACE(new_bh, "get write access"); 2359 ext4_journal_get_write_access(handle, new_bh); 2360 new_de->inode = cpu_to_le32(old_inode->i_ino); 2361 if (EXT4_HAS_INCOMPAT_FEATURE(new_dir->i_sb, 2362 EXT4_FEATURE_INCOMPAT_FILETYPE)) 2363 new_de->file_type = old_de->file_type; 2364 new_dir->i_version++; 2365 BUFFER_TRACE(new_bh, "call ext4_journal_dirty_metadata"); 2366 ext4_journal_dirty_metadata(handle, new_bh); 2367 brelse(new_bh); 2368 new_bh = NULL; 2369 } 2370 2371 /* 2372 * Like most other Unix systems, set the ctime for inodes on a 2373 * rename. 2374 */ 2375 old_inode->i_ctime = ext4_current_time(old_inode); 2376 ext4_mark_inode_dirty(handle, old_inode); 2377 2378 /* 2379 * ok, that's it 2380 */ 2381 if (le32_to_cpu(old_de->inode) != old_inode->i_ino || 2382 old_de->name_len != old_dentry->d_name.len || 2383 strncmp(old_de->name, old_dentry->d_name.name, old_de->name_len) || 2384 (retval = ext4_delete_entry(handle, old_dir, 2385 old_de, old_bh)) == -ENOENT) { 2386 /* old_de could have moved from under us during htree split, so 2387 * make sure that we are deleting the right entry. We might 2388 * also be pointing to a stale entry in the unused part of 2389 * old_bh so just checking inum and the name isn't enough. */ 2390 struct buffer_head *old_bh2; 2391 struct ext4_dir_entry_2 *old_de2; 2392 2393 old_bh2 = ext4_find_entry(old_dentry, &old_de2); 2394 if (old_bh2) { 2395 retval = ext4_delete_entry(handle, old_dir, 2396 old_de2, old_bh2); 2397 brelse(old_bh2); 2398 } 2399 } 2400 if (retval) { 2401 ext4_warning(old_dir->i_sb, "ext4_rename", 2402 "Deleting old file (%lu), %d, error=%d", 2403 old_dir->i_ino, old_dir->i_nlink, retval); 2404 } 2405 2406 if (new_inode) { 2407 ext4_dec_count(handle, new_inode); 2408 new_inode->i_ctime = ext4_current_time(new_inode); 2409 } 2410 old_dir->i_ctime = old_dir->i_mtime = ext4_current_time(old_dir); 2411 ext4_update_dx_flag(old_dir); 2412 if (dir_bh) { 2413 BUFFER_TRACE(dir_bh, "get_write_access"); 2414 ext4_journal_get_write_access(handle, dir_bh); 2415 PARENT_INO(dir_bh->b_data) = cpu_to_le32(new_dir->i_ino); 2416 BUFFER_TRACE(dir_bh, "call ext4_journal_dirty_metadata"); 2417 ext4_journal_dirty_metadata(handle, dir_bh); 2418 ext4_dec_count(handle, old_dir); 2419 if (new_inode) { 2420 /* checked empty_dir above, can't have another parent, 2421 * ext3_dec_count() won't work for many-linked dirs */ 2422 new_inode->i_nlink = 0; 2423 } else { 2424 ext4_inc_count(handle, new_dir); 2425 ext4_update_dx_flag(new_dir); 2426 ext4_mark_inode_dirty(handle, new_dir); 2427 } 2428 } 2429 ext4_mark_inode_dirty(handle, old_dir); 2430 if (new_inode) { 2431 ext4_mark_inode_dirty(handle, new_inode); 2432 if (!new_inode->i_nlink) 2433 ext4_orphan_add(handle, new_inode); 2434 } 2435 retval = 0; 2436 2437 end_rename: 2438 brelse (dir_bh); 2439 brelse (old_bh); 2440 brelse (new_bh); 2441 ext4_journal_stop(handle); 2442 return retval; 2443 } 2444 2445 /* 2446 * directories can handle most operations... 2447 */ 2448 const struct inode_operations ext4_dir_inode_operations = { 2449 .create = ext4_create, 2450 .lookup = ext4_lookup, 2451 .link = ext4_link, 2452 .unlink = ext4_unlink, 2453 .symlink = ext4_symlink, 2454 .mkdir = ext4_mkdir, 2455 .rmdir = ext4_rmdir, 2456 .mknod = ext4_mknod, 2457 .rename = ext4_rename, 2458 .setattr = ext4_setattr, 2459 #ifdef CONFIG_EXT4DEV_FS_XATTR 2460 .setxattr = generic_setxattr, 2461 .getxattr = generic_getxattr, 2462 .listxattr = ext4_listxattr, 2463 .removexattr = generic_removexattr, 2464 #endif 2465 .permission = ext4_permission, 2466 }; 2467 2468 const struct inode_operations ext4_special_inode_operations = { 2469 .setattr = ext4_setattr, 2470 #ifdef CONFIG_EXT4DEV_FS_XATTR 2471 .setxattr = generic_setxattr, 2472 .getxattr = generic_getxattr, 2473 .listxattr = ext4_listxattr, 2474 .removexattr = generic_removexattr, 2475 #endif 2476 .permission = ext4_permission, 2477 }; 2478