xref: /openbmc/linux/fs/ext4/namei.c (revision 97da55fc)
1 /*
2  *  linux/fs/ext4/namei.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  from
10  *
11  *  linux/fs/minix/namei.c
12  *
13  *  Copyright (C) 1991, 1992  Linus Torvalds
14  *
15  *  Big-endian to little-endian byte-swapping/bitmaps by
16  *        David S. Miller (davem@caip.rutgers.edu), 1995
17  *  Directory entry file type support and forward compatibility hooks
18  *	for B-tree directories by Theodore Ts'o (tytso@mit.edu), 1998
19  *  Hash Tree Directory indexing (c)
20  *	Daniel Phillips, 2001
21  *  Hash Tree Directory indexing porting
22  *	Christopher Li, 2002
23  *  Hash Tree Directory indexing cleanup
24  *	Theodore Ts'o, 2002
25  */
26 
27 #include <linux/fs.h>
28 #include <linux/pagemap.h>
29 #include <linux/jbd2.h>
30 #include <linux/time.h>
31 #include <linux/fcntl.h>
32 #include <linux/stat.h>
33 #include <linux/string.h>
34 #include <linux/quotaops.h>
35 #include <linux/buffer_head.h>
36 #include <linux/bio.h>
37 #include "ext4.h"
38 #include "ext4_jbd2.h"
39 
40 #include "xattr.h"
41 #include "acl.h"
42 
43 #include <trace/events/ext4.h>
44 /*
45  * define how far ahead to read directories while searching them.
46  */
47 #define NAMEI_RA_CHUNKS  2
48 #define NAMEI_RA_BLOCKS  4
49 #define NAMEI_RA_SIZE	     (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
50 
51 static struct buffer_head *ext4_append(handle_t *handle,
52 					struct inode *inode,
53 					ext4_lblk_t *block)
54 {
55 	struct buffer_head *bh;
56 	int err = 0;
57 
58 	if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
59 		     ((inode->i_size >> 10) >=
60 		      EXT4_SB(inode->i_sb)->s_max_dir_size_kb)))
61 		return ERR_PTR(-ENOSPC);
62 
63 	*block = inode->i_size >> inode->i_sb->s_blocksize_bits;
64 
65 	bh = ext4_bread(handle, inode, *block, 1, &err);
66 	if (!bh)
67 		return ERR_PTR(err);
68 	inode->i_size += inode->i_sb->s_blocksize;
69 	EXT4_I(inode)->i_disksize = inode->i_size;
70 	err = ext4_journal_get_write_access(handle, bh);
71 	if (err) {
72 		brelse(bh);
73 		ext4_std_error(inode->i_sb, err);
74 		return ERR_PTR(err);
75 	}
76 	return bh;
77 }
78 
79 static int ext4_dx_csum_verify(struct inode *inode,
80 			       struct ext4_dir_entry *dirent);
81 
82 typedef enum {
83 	EITHER, INDEX, DIRENT
84 } dirblock_type_t;
85 
86 #define ext4_read_dirblock(inode, block, type) \
87 	__ext4_read_dirblock((inode), (block), (type), __LINE__)
88 
89 static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
90 					      ext4_lblk_t block,
91 					      dirblock_type_t type,
92 					      unsigned int line)
93 {
94 	struct buffer_head *bh;
95 	struct ext4_dir_entry *dirent;
96 	int err = 0, is_dx_block = 0;
97 
98 	bh = ext4_bread(NULL, inode, block, 0, &err);
99 	if (!bh) {
100 		if (err == 0) {
101 			ext4_error_inode(inode, __func__, line, block,
102 					       "Directory hole found");
103 			return ERR_PTR(-EIO);
104 		}
105 		__ext4_warning(inode->i_sb, __func__, line,
106 			       "error reading directory block "
107 			       "(ino %lu, block %lu)", inode->i_ino,
108 			       (unsigned long) block);
109 		return ERR_PTR(err);
110 	}
111 	dirent = (struct ext4_dir_entry *) bh->b_data;
112 	/* Determine whether or not we have an index block */
113 	if (is_dx(inode)) {
114 		if (block == 0)
115 			is_dx_block = 1;
116 		else if (ext4_rec_len_from_disk(dirent->rec_len,
117 						inode->i_sb->s_blocksize) ==
118 			 inode->i_sb->s_blocksize)
119 			is_dx_block = 1;
120 	}
121 	if (!is_dx_block && type == INDEX) {
122 		ext4_error_inode(inode, __func__, line, block,
123 		       "directory leaf block found instead of index block");
124 		return ERR_PTR(-EIO);
125 	}
126 	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
127 					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) ||
128 	    buffer_verified(bh))
129 		return bh;
130 
131 	/*
132 	 * An empty leaf block can get mistaken for a index block; for
133 	 * this reason, we can only check the index checksum when the
134 	 * caller is sure it should be an index block.
135 	 */
136 	if (is_dx_block && type == INDEX) {
137 		if (ext4_dx_csum_verify(inode, dirent))
138 			set_buffer_verified(bh);
139 		else {
140 			ext4_error_inode(inode, __func__, line, block,
141 				"Directory index failed checksum");
142 			brelse(bh);
143 			return ERR_PTR(-EIO);
144 		}
145 	}
146 	if (!is_dx_block) {
147 		if (ext4_dirent_csum_verify(inode, dirent))
148 			set_buffer_verified(bh);
149 		else {
150 			ext4_error_inode(inode, __func__, line, block,
151 				"Directory block failed checksum");
152 			brelse(bh);
153 			return ERR_PTR(-EIO);
154 		}
155 	}
156 	return bh;
157 }
158 
159 #ifndef assert
160 #define assert(test) J_ASSERT(test)
161 #endif
162 
163 #ifdef DX_DEBUG
164 #define dxtrace(command) command
165 #else
166 #define dxtrace(command)
167 #endif
168 
169 struct fake_dirent
170 {
171 	__le32 inode;
172 	__le16 rec_len;
173 	u8 name_len;
174 	u8 file_type;
175 };
176 
177 struct dx_countlimit
178 {
179 	__le16 limit;
180 	__le16 count;
181 };
182 
183 struct dx_entry
184 {
185 	__le32 hash;
186 	__le32 block;
187 };
188 
189 /*
190  * dx_root_info is laid out so that if it should somehow get overlaid by a
191  * dirent the two low bits of the hash version will be zero.  Therefore, the
192  * hash version mod 4 should never be 0.  Sincerely, the paranoia department.
193  */
194 
195 struct dx_root
196 {
197 	struct fake_dirent dot;
198 	char dot_name[4];
199 	struct fake_dirent dotdot;
200 	char dotdot_name[4];
201 	struct dx_root_info
202 	{
203 		__le32 reserved_zero;
204 		u8 hash_version;
205 		u8 info_length; /* 8 */
206 		u8 indirect_levels;
207 		u8 unused_flags;
208 	}
209 	info;
210 	struct dx_entry	entries[0];
211 };
212 
213 struct dx_node
214 {
215 	struct fake_dirent fake;
216 	struct dx_entry	entries[0];
217 };
218 
219 
220 struct dx_frame
221 {
222 	struct buffer_head *bh;
223 	struct dx_entry *entries;
224 	struct dx_entry *at;
225 };
226 
227 struct dx_map_entry
228 {
229 	u32 hash;
230 	u16 offs;
231 	u16 size;
232 };
233 
234 /*
235  * This goes at the end of each htree block.
236  */
237 struct dx_tail {
238 	u32 dt_reserved;
239 	__le32 dt_checksum;	/* crc32c(uuid+inum+dirblock) */
240 };
241 
242 static inline ext4_lblk_t dx_get_block(struct dx_entry *entry);
243 static void dx_set_block(struct dx_entry *entry, ext4_lblk_t value);
244 static inline unsigned dx_get_hash(struct dx_entry *entry);
245 static void dx_set_hash(struct dx_entry *entry, unsigned value);
246 static unsigned dx_get_count(struct dx_entry *entries);
247 static unsigned dx_get_limit(struct dx_entry *entries);
248 static void dx_set_count(struct dx_entry *entries, unsigned value);
249 static void dx_set_limit(struct dx_entry *entries, unsigned value);
250 static unsigned dx_root_limit(struct inode *dir, unsigned infosize);
251 static unsigned dx_node_limit(struct inode *dir);
252 static struct dx_frame *dx_probe(const struct qstr *d_name,
253 				 struct inode *dir,
254 				 struct dx_hash_info *hinfo,
255 				 struct dx_frame *frame,
256 				 int *err);
257 static void dx_release(struct dx_frame *frames);
258 static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize,
259 		       struct dx_hash_info *hinfo, struct dx_map_entry map[]);
260 static void dx_sort_map(struct dx_map_entry *map, unsigned count);
261 static struct ext4_dir_entry_2 *dx_move_dirents(char *from, char *to,
262 		struct dx_map_entry *offsets, int count, unsigned blocksize);
263 static struct ext4_dir_entry_2* dx_pack_dirents(char *base, unsigned blocksize);
264 static void dx_insert_block(struct dx_frame *frame,
265 					u32 hash, ext4_lblk_t block);
266 static int ext4_htree_next_block(struct inode *dir, __u32 hash,
267 				 struct dx_frame *frame,
268 				 struct dx_frame *frames,
269 				 __u32 *start_hash);
270 static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
271 		const struct qstr *d_name,
272 		struct ext4_dir_entry_2 **res_dir,
273 		int *err);
274 static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
275 			     struct inode *inode);
276 
277 /* checksumming functions */
278 void initialize_dirent_tail(struct ext4_dir_entry_tail *t,
279 			    unsigned int blocksize)
280 {
281 	memset(t, 0, sizeof(struct ext4_dir_entry_tail));
282 	t->det_rec_len = ext4_rec_len_to_disk(
283 			sizeof(struct ext4_dir_entry_tail), blocksize);
284 	t->det_reserved_ft = EXT4_FT_DIR_CSUM;
285 }
286 
287 /* Walk through a dirent block to find a checksum "dirent" at the tail */
288 static struct ext4_dir_entry_tail *get_dirent_tail(struct inode *inode,
289 						   struct ext4_dir_entry *de)
290 {
291 	struct ext4_dir_entry_tail *t;
292 
293 #ifdef PARANOID
294 	struct ext4_dir_entry *d, *top;
295 
296 	d = de;
297 	top = (struct ext4_dir_entry *)(((void *)de) +
298 		(EXT4_BLOCK_SIZE(inode->i_sb) -
299 		sizeof(struct ext4_dir_entry_tail)));
300 	while (d < top && d->rec_len)
301 		d = (struct ext4_dir_entry *)(((void *)d) +
302 		    le16_to_cpu(d->rec_len));
303 
304 	if (d != top)
305 		return NULL;
306 
307 	t = (struct ext4_dir_entry_tail *)d;
308 #else
309 	t = EXT4_DIRENT_TAIL(de, EXT4_BLOCK_SIZE(inode->i_sb));
310 #endif
311 
312 	if (t->det_reserved_zero1 ||
313 	    le16_to_cpu(t->det_rec_len) != sizeof(struct ext4_dir_entry_tail) ||
314 	    t->det_reserved_zero2 ||
315 	    t->det_reserved_ft != EXT4_FT_DIR_CSUM)
316 		return NULL;
317 
318 	return t;
319 }
320 
321 static __le32 ext4_dirent_csum(struct inode *inode,
322 			       struct ext4_dir_entry *dirent, int size)
323 {
324 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
325 	struct ext4_inode_info *ei = EXT4_I(inode);
326 	__u32 csum;
327 
328 	csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size);
329 	return cpu_to_le32(csum);
330 }
331 
332 static void warn_no_space_for_csum(struct inode *inode)
333 {
334 	ext4_warning(inode->i_sb, "no space in directory inode %lu leaf for "
335 		     "checksum.  Please run e2fsck -D.", inode->i_ino);
336 }
337 
338 int ext4_dirent_csum_verify(struct inode *inode, struct ext4_dir_entry *dirent)
339 {
340 	struct ext4_dir_entry_tail *t;
341 
342 	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
343 					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
344 		return 1;
345 
346 	t = get_dirent_tail(inode, dirent);
347 	if (!t) {
348 		warn_no_space_for_csum(inode);
349 		return 0;
350 	}
351 
352 	if (t->det_checksum != ext4_dirent_csum(inode, dirent,
353 						(void *)t - (void *)dirent))
354 		return 0;
355 
356 	return 1;
357 }
358 
359 static void ext4_dirent_csum_set(struct inode *inode,
360 				 struct ext4_dir_entry *dirent)
361 {
362 	struct ext4_dir_entry_tail *t;
363 
364 	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
365 					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
366 		return;
367 
368 	t = get_dirent_tail(inode, dirent);
369 	if (!t) {
370 		warn_no_space_for_csum(inode);
371 		return;
372 	}
373 
374 	t->det_checksum = ext4_dirent_csum(inode, dirent,
375 					   (void *)t - (void *)dirent);
376 }
377 
378 int ext4_handle_dirty_dirent_node(handle_t *handle,
379 				  struct inode *inode,
380 				  struct buffer_head *bh)
381 {
382 	ext4_dirent_csum_set(inode, (struct ext4_dir_entry *)bh->b_data);
383 	return ext4_handle_dirty_metadata(handle, inode, bh);
384 }
385 
386 static struct dx_countlimit *get_dx_countlimit(struct inode *inode,
387 					       struct ext4_dir_entry *dirent,
388 					       int *offset)
389 {
390 	struct ext4_dir_entry *dp;
391 	struct dx_root_info *root;
392 	int count_offset;
393 
394 	if (le16_to_cpu(dirent->rec_len) == EXT4_BLOCK_SIZE(inode->i_sb))
395 		count_offset = 8;
396 	else if (le16_to_cpu(dirent->rec_len) == 12) {
397 		dp = (struct ext4_dir_entry *)(((void *)dirent) + 12);
398 		if (le16_to_cpu(dp->rec_len) !=
399 		    EXT4_BLOCK_SIZE(inode->i_sb) - 12)
400 			return NULL;
401 		root = (struct dx_root_info *)(((void *)dp + 12));
402 		if (root->reserved_zero ||
403 		    root->info_length != sizeof(struct dx_root_info))
404 			return NULL;
405 		count_offset = 32;
406 	} else
407 		return NULL;
408 
409 	if (offset)
410 		*offset = count_offset;
411 	return (struct dx_countlimit *)(((void *)dirent) + count_offset);
412 }
413 
414 static __le32 ext4_dx_csum(struct inode *inode, struct ext4_dir_entry *dirent,
415 			   int count_offset, int count, struct dx_tail *t)
416 {
417 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
418 	struct ext4_inode_info *ei = EXT4_I(inode);
419 	__u32 csum, old_csum;
420 	int size;
421 
422 	size = count_offset + (count * sizeof(struct dx_entry));
423 	old_csum = t->dt_checksum;
424 	t->dt_checksum = 0;
425 	csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size);
426 	csum = ext4_chksum(sbi, csum, (__u8 *)t, sizeof(struct dx_tail));
427 	t->dt_checksum = old_csum;
428 
429 	return cpu_to_le32(csum);
430 }
431 
432 static int ext4_dx_csum_verify(struct inode *inode,
433 			       struct ext4_dir_entry *dirent)
434 {
435 	struct dx_countlimit *c;
436 	struct dx_tail *t;
437 	int count_offset, limit, count;
438 
439 	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
440 					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
441 		return 1;
442 
443 	c = get_dx_countlimit(inode, dirent, &count_offset);
444 	if (!c) {
445 		EXT4_ERROR_INODE(inode, "dir seems corrupt?  Run e2fsck -D.");
446 		return 1;
447 	}
448 	limit = le16_to_cpu(c->limit);
449 	count = le16_to_cpu(c->count);
450 	if (count_offset + (limit * sizeof(struct dx_entry)) >
451 	    EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct dx_tail)) {
452 		warn_no_space_for_csum(inode);
453 		return 1;
454 	}
455 	t = (struct dx_tail *)(((struct dx_entry *)c) + limit);
456 
457 	if (t->dt_checksum != ext4_dx_csum(inode, dirent, count_offset,
458 					    count, t))
459 		return 0;
460 	return 1;
461 }
462 
463 static void ext4_dx_csum_set(struct inode *inode, struct ext4_dir_entry *dirent)
464 {
465 	struct dx_countlimit *c;
466 	struct dx_tail *t;
467 	int count_offset, limit, count;
468 
469 	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
470 					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
471 		return;
472 
473 	c = get_dx_countlimit(inode, dirent, &count_offset);
474 	if (!c) {
475 		EXT4_ERROR_INODE(inode, "dir seems corrupt?  Run e2fsck -D.");
476 		return;
477 	}
478 	limit = le16_to_cpu(c->limit);
479 	count = le16_to_cpu(c->count);
480 	if (count_offset + (limit * sizeof(struct dx_entry)) >
481 	    EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct dx_tail)) {
482 		warn_no_space_for_csum(inode);
483 		return;
484 	}
485 	t = (struct dx_tail *)(((struct dx_entry *)c) + limit);
486 
487 	t->dt_checksum = ext4_dx_csum(inode, dirent, count_offset, count, t);
488 }
489 
490 static inline int ext4_handle_dirty_dx_node(handle_t *handle,
491 					    struct inode *inode,
492 					    struct buffer_head *bh)
493 {
494 	ext4_dx_csum_set(inode, (struct ext4_dir_entry *)bh->b_data);
495 	return ext4_handle_dirty_metadata(handle, inode, bh);
496 }
497 
498 /*
499  * p is at least 6 bytes before the end of page
500  */
501 static inline struct ext4_dir_entry_2 *
502 ext4_next_entry(struct ext4_dir_entry_2 *p, unsigned long blocksize)
503 {
504 	return (struct ext4_dir_entry_2 *)((char *)p +
505 		ext4_rec_len_from_disk(p->rec_len, blocksize));
506 }
507 
508 /*
509  * Future: use high four bits of block for coalesce-on-delete flags
510  * Mask them off for now.
511  */
512 
513 static inline ext4_lblk_t dx_get_block(struct dx_entry *entry)
514 {
515 	return le32_to_cpu(entry->block) & 0x00ffffff;
516 }
517 
518 static inline void dx_set_block(struct dx_entry *entry, ext4_lblk_t value)
519 {
520 	entry->block = cpu_to_le32(value);
521 }
522 
523 static inline unsigned dx_get_hash(struct dx_entry *entry)
524 {
525 	return le32_to_cpu(entry->hash);
526 }
527 
528 static inline void dx_set_hash(struct dx_entry *entry, unsigned value)
529 {
530 	entry->hash = cpu_to_le32(value);
531 }
532 
533 static inline unsigned dx_get_count(struct dx_entry *entries)
534 {
535 	return le16_to_cpu(((struct dx_countlimit *) entries)->count);
536 }
537 
538 static inline unsigned dx_get_limit(struct dx_entry *entries)
539 {
540 	return le16_to_cpu(((struct dx_countlimit *) entries)->limit);
541 }
542 
543 static inline void dx_set_count(struct dx_entry *entries, unsigned value)
544 {
545 	((struct dx_countlimit *) entries)->count = cpu_to_le16(value);
546 }
547 
548 static inline void dx_set_limit(struct dx_entry *entries, unsigned value)
549 {
550 	((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
551 }
552 
553 static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize)
554 {
555 	unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) -
556 		EXT4_DIR_REC_LEN(2) - infosize;
557 
558 	if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
559 				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
560 		entry_space -= sizeof(struct dx_tail);
561 	return entry_space / sizeof(struct dx_entry);
562 }
563 
564 static inline unsigned dx_node_limit(struct inode *dir)
565 {
566 	unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0);
567 
568 	if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
569 				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
570 		entry_space -= sizeof(struct dx_tail);
571 	return entry_space / sizeof(struct dx_entry);
572 }
573 
574 /*
575  * Debug
576  */
577 #ifdef DX_DEBUG
578 static void dx_show_index(char * label, struct dx_entry *entries)
579 {
580 	int i, n = dx_get_count (entries);
581 	printk(KERN_DEBUG "%s index ", label);
582 	for (i = 0; i < n; i++) {
583 		printk("%x->%lu ", i ? dx_get_hash(entries + i) :
584 				0, (unsigned long)dx_get_block(entries + i));
585 	}
586 	printk("\n");
587 }
588 
589 struct stats
590 {
591 	unsigned names;
592 	unsigned space;
593 	unsigned bcount;
594 };
595 
596 static struct stats dx_show_leaf(struct dx_hash_info *hinfo, struct ext4_dir_entry_2 *de,
597 				 int size, int show_names)
598 {
599 	unsigned names = 0, space = 0;
600 	char *base = (char *) de;
601 	struct dx_hash_info h = *hinfo;
602 
603 	printk("names: ");
604 	while ((char *) de < base + size)
605 	{
606 		if (de->inode)
607 		{
608 			if (show_names)
609 			{
610 				int len = de->name_len;
611 				char *name = de->name;
612 				while (len--) printk("%c", *name++);
613 				ext4fs_dirhash(de->name, de->name_len, &h);
614 				printk(":%x.%u ", h.hash,
615 				       (unsigned) ((char *) de - base));
616 			}
617 			space += EXT4_DIR_REC_LEN(de->name_len);
618 			names++;
619 		}
620 		de = ext4_next_entry(de, size);
621 	}
622 	printk("(%i)\n", names);
623 	return (struct stats) { names, space, 1 };
624 }
625 
626 struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
627 			     struct dx_entry *entries, int levels)
628 {
629 	unsigned blocksize = dir->i_sb->s_blocksize;
630 	unsigned count = dx_get_count(entries), names = 0, space = 0, i;
631 	unsigned bcount = 0;
632 	struct buffer_head *bh;
633 	int err;
634 	printk("%i indexed blocks...\n", count);
635 	for (i = 0; i < count; i++, entries++)
636 	{
637 		ext4_lblk_t block = dx_get_block(entries);
638 		ext4_lblk_t hash  = i ? dx_get_hash(entries): 0;
639 		u32 range = i < count - 1? (dx_get_hash(entries + 1) - hash): ~hash;
640 		struct stats stats;
641 		printk("%s%3u:%03u hash %8x/%8x ",levels?"":"   ", i, block, hash, range);
642 		if (!(bh = ext4_bread (NULL,dir, block, 0,&err))) continue;
643 		stats = levels?
644 		   dx_show_entries(hinfo, dir, ((struct dx_node *) bh->b_data)->entries, levels - 1):
645 		   dx_show_leaf(hinfo, (struct ext4_dir_entry_2 *) bh->b_data, blocksize, 0);
646 		names += stats.names;
647 		space += stats.space;
648 		bcount += stats.bcount;
649 		brelse(bh);
650 	}
651 	if (bcount)
652 		printk(KERN_DEBUG "%snames %u, fullness %u (%u%%)\n",
653 		       levels ? "" : "   ", names, space/bcount,
654 		       (space/bcount)*100/blocksize);
655 	return (struct stats) { names, space, bcount};
656 }
657 #endif /* DX_DEBUG */
658 
659 /*
660  * Probe for a directory leaf block to search.
661  *
662  * dx_probe can return ERR_BAD_DX_DIR, which means there was a format
663  * error in the directory index, and the caller should fall back to
664  * searching the directory normally.  The callers of dx_probe **MUST**
665  * check for this error code, and make sure it never gets reflected
666  * back to userspace.
667  */
668 static struct dx_frame *
669 dx_probe(const struct qstr *d_name, struct inode *dir,
670 	 struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err)
671 {
672 	unsigned count, indirect;
673 	struct dx_entry *at, *entries, *p, *q, *m;
674 	struct dx_root *root;
675 	struct buffer_head *bh;
676 	struct dx_frame *frame = frame_in;
677 	u32 hash;
678 
679 	frame->bh = NULL;
680 	bh = ext4_read_dirblock(dir, 0, INDEX);
681 	if (IS_ERR(bh)) {
682 		*err = PTR_ERR(bh);
683 		goto fail;
684 	}
685 	root = (struct dx_root *) bh->b_data;
686 	if (root->info.hash_version != DX_HASH_TEA &&
687 	    root->info.hash_version != DX_HASH_HALF_MD4 &&
688 	    root->info.hash_version != DX_HASH_LEGACY) {
689 		ext4_warning(dir->i_sb, "Unrecognised inode hash code %d",
690 			     root->info.hash_version);
691 		brelse(bh);
692 		*err = ERR_BAD_DX_DIR;
693 		goto fail;
694 	}
695 	hinfo->hash_version = root->info.hash_version;
696 	if (hinfo->hash_version <= DX_HASH_TEA)
697 		hinfo->hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
698 	hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed;
699 	if (d_name)
700 		ext4fs_dirhash(d_name->name, d_name->len, hinfo);
701 	hash = hinfo->hash;
702 
703 	if (root->info.unused_flags & 1) {
704 		ext4_warning(dir->i_sb, "Unimplemented inode hash flags: %#06x",
705 			     root->info.unused_flags);
706 		brelse(bh);
707 		*err = ERR_BAD_DX_DIR;
708 		goto fail;
709 	}
710 
711 	if ((indirect = root->info.indirect_levels) > 1) {
712 		ext4_warning(dir->i_sb, "Unimplemented inode hash depth: %#06x",
713 			     root->info.indirect_levels);
714 		brelse(bh);
715 		*err = ERR_BAD_DX_DIR;
716 		goto fail;
717 	}
718 
719 	entries = (struct dx_entry *) (((char *)&root->info) +
720 				       root->info.info_length);
721 
722 	if (dx_get_limit(entries) != dx_root_limit(dir,
723 						   root->info.info_length)) {
724 		ext4_warning(dir->i_sb, "dx entry: limit != root limit");
725 		brelse(bh);
726 		*err = ERR_BAD_DX_DIR;
727 		goto fail;
728 	}
729 
730 	dxtrace(printk("Look up %x", hash));
731 	while (1)
732 	{
733 		count = dx_get_count(entries);
734 		if (!count || count > dx_get_limit(entries)) {
735 			ext4_warning(dir->i_sb,
736 				     "dx entry: no count or count > limit");
737 			brelse(bh);
738 			*err = ERR_BAD_DX_DIR;
739 			goto fail2;
740 		}
741 
742 		p = entries + 1;
743 		q = entries + count - 1;
744 		while (p <= q)
745 		{
746 			m = p + (q - p)/2;
747 			dxtrace(printk("."));
748 			if (dx_get_hash(m) > hash)
749 				q = m - 1;
750 			else
751 				p = m + 1;
752 		}
753 
754 		if (0) // linear search cross check
755 		{
756 			unsigned n = count - 1;
757 			at = entries;
758 			while (n--)
759 			{
760 				dxtrace(printk(","));
761 				if (dx_get_hash(++at) > hash)
762 				{
763 					at--;
764 					break;
765 				}
766 			}
767 			assert (at == p - 1);
768 		}
769 
770 		at = p - 1;
771 		dxtrace(printk(" %x->%u\n", at == entries? 0: dx_get_hash(at), dx_get_block(at)));
772 		frame->bh = bh;
773 		frame->entries = entries;
774 		frame->at = at;
775 		if (!indirect--) return frame;
776 		bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX);
777 		if (IS_ERR(bh)) {
778 			*err = PTR_ERR(bh);
779 			goto fail2;
780 		}
781 		entries = ((struct dx_node *) bh->b_data)->entries;
782 
783 		if (dx_get_limit(entries) != dx_node_limit (dir)) {
784 			ext4_warning(dir->i_sb,
785 				     "dx entry: limit != node limit");
786 			brelse(bh);
787 			*err = ERR_BAD_DX_DIR;
788 			goto fail2;
789 		}
790 		frame++;
791 		frame->bh = NULL;
792 	}
793 fail2:
794 	while (frame >= frame_in) {
795 		brelse(frame->bh);
796 		frame--;
797 	}
798 fail:
799 	if (*err == ERR_BAD_DX_DIR)
800 		ext4_warning(dir->i_sb,
801 			     "Corrupt dir inode %lu, running e2fsck is "
802 			     "recommended.", dir->i_ino);
803 	return NULL;
804 }
805 
806 static void dx_release (struct dx_frame *frames)
807 {
808 	if (frames[0].bh == NULL)
809 		return;
810 
811 	if (((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels)
812 		brelse(frames[1].bh);
813 	brelse(frames[0].bh);
814 }
815 
816 /*
817  * This function increments the frame pointer to search the next leaf
818  * block, and reads in the necessary intervening nodes if the search
819  * should be necessary.  Whether or not the search is necessary is
820  * controlled by the hash parameter.  If the hash value is even, then
821  * the search is only continued if the next block starts with that
822  * hash value.  This is used if we are searching for a specific file.
823  *
824  * If the hash value is HASH_NB_ALWAYS, then always go to the next block.
825  *
826  * This function returns 1 if the caller should continue to search,
827  * or 0 if it should not.  If there is an error reading one of the
828  * index blocks, it will a negative error code.
829  *
830  * If start_hash is non-null, it will be filled in with the starting
831  * hash of the next page.
832  */
833 static int ext4_htree_next_block(struct inode *dir, __u32 hash,
834 				 struct dx_frame *frame,
835 				 struct dx_frame *frames,
836 				 __u32 *start_hash)
837 {
838 	struct dx_frame *p;
839 	struct buffer_head *bh;
840 	int num_frames = 0;
841 	__u32 bhash;
842 
843 	p = frame;
844 	/*
845 	 * Find the next leaf page by incrementing the frame pointer.
846 	 * If we run out of entries in the interior node, loop around and
847 	 * increment pointer in the parent node.  When we break out of
848 	 * this loop, num_frames indicates the number of interior
849 	 * nodes need to be read.
850 	 */
851 	while (1) {
852 		if (++(p->at) < p->entries + dx_get_count(p->entries))
853 			break;
854 		if (p == frames)
855 			return 0;
856 		num_frames++;
857 		p--;
858 	}
859 
860 	/*
861 	 * If the hash is 1, then continue only if the next page has a
862 	 * continuation hash of any value.  This is used for readdir
863 	 * handling.  Otherwise, check to see if the hash matches the
864 	 * desired contiuation hash.  If it doesn't, return since
865 	 * there's no point to read in the successive index pages.
866 	 */
867 	bhash = dx_get_hash(p->at);
868 	if (start_hash)
869 		*start_hash = bhash;
870 	if ((hash & 1) == 0) {
871 		if ((bhash & ~1) != hash)
872 			return 0;
873 	}
874 	/*
875 	 * If the hash is HASH_NB_ALWAYS, we always go to the next
876 	 * block so no check is necessary
877 	 */
878 	while (num_frames--) {
879 		bh = ext4_read_dirblock(dir, dx_get_block(p->at), INDEX);
880 		if (IS_ERR(bh))
881 			return PTR_ERR(bh);
882 		p++;
883 		brelse(p->bh);
884 		p->bh = bh;
885 		p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
886 	}
887 	return 1;
888 }
889 
890 
891 /*
892  * This function fills a red-black tree with information from a
893  * directory block.  It returns the number directory entries loaded
894  * into the tree.  If there is an error it is returned in err.
895  */
896 static int htree_dirblock_to_tree(struct file *dir_file,
897 				  struct inode *dir, ext4_lblk_t block,
898 				  struct dx_hash_info *hinfo,
899 				  __u32 start_hash, __u32 start_minor_hash)
900 {
901 	struct buffer_head *bh;
902 	struct ext4_dir_entry_2 *de, *top;
903 	int err = 0, count = 0;
904 
905 	dxtrace(printk(KERN_INFO "In htree dirblock_to_tree: block %lu\n",
906 							(unsigned long)block));
907 	bh = ext4_read_dirblock(dir, block, DIRENT);
908 	if (IS_ERR(bh))
909 		return PTR_ERR(bh);
910 
911 	de = (struct ext4_dir_entry_2 *) bh->b_data;
912 	top = (struct ext4_dir_entry_2 *) ((char *) de +
913 					   dir->i_sb->s_blocksize -
914 					   EXT4_DIR_REC_LEN(0));
915 	for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) {
916 		if (ext4_check_dir_entry(dir, NULL, de, bh,
917 				bh->b_data, bh->b_size,
918 				(block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
919 					 + ((char *)de - bh->b_data))) {
920 			/* On error, skip the f_pos to the next block. */
921 			dir_file->f_pos = (dir_file->f_pos |
922 					(dir->i_sb->s_blocksize - 1)) + 1;
923 			brelse(bh);
924 			return count;
925 		}
926 		ext4fs_dirhash(de->name, de->name_len, hinfo);
927 		if ((hinfo->hash < start_hash) ||
928 		    ((hinfo->hash == start_hash) &&
929 		     (hinfo->minor_hash < start_minor_hash)))
930 			continue;
931 		if (de->inode == 0)
932 			continue;
933 		if ((err = ext4_htree_store_dirent(dir_file,
934 				   hinfo->hash, hinfo->minor_hash, de)) != 0) {
935 			brelse(bh);
936 			return err;
937 		}
938 		count++;
939 	}
940 	brelse(bh);
941 	return count;
942 }
943 
944 
945 /*
946  * This function fills a red-black tree with information from a
947  * directory.  We start scanning the directory in hash order, starting
948  * at start_hash and start_minor_hash.
949  *
950  * This function returns the number of entries inserted into the tree,
951  * or a negative error code.
952  */
953 int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
954 			 __u32 start_minor_hash, __u32 *next_hash)
955 {
956 	struct dx_hash_info hinfo;
957 	struct ext4_dir_entry_2 *de;
958 	struct dx_frame frames[2], *frame;
959 	struct inode *dir;
960 	ext4_lblk_t block;
961 	int count = 0;
962 	int ret, err;
963 	__u32 hashval;
964 
965 	dxtrace(printk(KERN_DEBUG "In htree_fill_tree, start hash: %x:%x\n",
966 		       start_hash, start_minor_hash));
967 	dir = file_inode(dir_file);
968 	if (!(ext4_test_inode_flag(dir, EXT4_INODE_INDEX))) {
969 		hinfo.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
970 		if (hinfo.hash_version <= DX_HASH_TEA)
971 			hinfo.hash_version +=
972 				EXT4_SB(dir->i_sb)->s_hash_unsigned;
973 		hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
974 		count = htree_dirblock_to_tree(dir_file, dir, 0, &hinfo,
975 					       start_hash, start_minor_hash);
976 		*next_hash = ~0;
977 		return count;
978 	}
979 	hinfo.hash = start_hash;
980 	hinfo.minor_hash = 0;
981 	frame = dx_probe(NULL, dir, &hinfo, frames, &err);
982 	if (!frame)
983 		return err;
984 
985 	/* Add '.' and '..' from the htree header */
986 	if (!start_hash && !start_minor_hash) {
987 		de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
988 		if ((err = ext4_htree_store_dirent(dir_file, 0, 0, de)) != 0)
989 			goto errout;
990 		count++;
991 	}
992 	if (start_hash < 2 || (start_hash ==2 && start_minor_hash==0)) {
993 		de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
994 		de = ext4_next_entry(de, dir->i_sb->s_blocksize);
995 		if ((err = ext4_htree_store_dirent(dir_file, 2, 0, de)) != 0)
996 			goto errout;
997 		count++;
998 	}
999 
1000 	while (1) {
1001 		block = dx_get_block(frame->at);
1002 		ret = htree_dirblock_to_tree(dir_file, dir, block, &hinfo,
1003 					     start_hash, start_minor_hash);
1004 		if (ret < 0) {
1005 			err = ret;
1006 			goto errout;
1007 		}
1008 		count += ret;
1009 		hashval = ~0;
1010 		ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
1011 					    frame, frames, &hashval);
1012 		*next_hash = hashval;
1013 		if (ret < 0) {
1014 			err = ret;
1015 			goto errout;
1016 		}
1017 		/*
1018 		 * Stop if:  (a) there are no more entries, or
1019 		 * (b) we have inserted at least one entry and the
1020 		 * next hash value is not a continuation
1021 		 */
1022 		if ((ret == 0) ||
1023 		    (count && ((hashval & 1) == 0)))
1024 			break;
1025 	}
1026 	dx_release(frames);
1027 	dxtrace(printk(KERN_DEBUG "Fill tree: returned %d entries, "
1028 		       "next hash: %x\n", count, *next_hash));
1029 	return count;
1030 errout:
1031 	dx_release(frames);
1032 	return (err);
1033 }
1034 
1035 static inline int search_dirblock(struct buffer_head *bh,
1036 				  struct inode *dir,
1037 				  const struct qstr *d_name,
1038 				  unsigned int offset,
1039 				  struct ext4_dir_entry_2 **res_dir)
1040 {
1041 	return search_dir(bh, bh->b_data, dir->i_sb->s_blocksize, dir,
1042 			  d_name, offset, res_dir);
1043 }
1044 
1045 /*
1046  * Directory block splitting, compacting
1047  */
1048 
1049 /*
1050  * Create map of hash values, offsets, and sizes, stored at end of block.
1051  * Returns number of entries mapped.
1052  */
1053 static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize,
1054 		       struct dx_hash_info *hinfo,
1055 		       struct dx_map_entry *map_tail)
1056 {
1057 	int count = 0;
1058 	char *base = (char *) de;
1059 	struct dx_hash_info h = *hinfo;
1060 
1061 	while ((char *) de < base + blocksize) {
1062 		if (de->name_len && de->inode) {
1063 			ext4fs_dirhash(de->name, de->name_len, &h);
1064 			map_tail--;
1065 			map_tail->hash = h.hash;
1066 			map_tail->offs = ((char *) de - base)>>2;
1067 			map_tail->size = le16_to_cpu(de->rec_len);
1068 			count++;
1069 			cond_resched();
1070 		}
1071 		/* XXX: do we need to check rec_len == 0 case? -Chris */
1072 		de = ext4_next_entry(de, blocksize);
1073 	}
1074 	return count;
1075 }
1076 
1077 /* Sort map by hash value */
1078 static void dx_sort_map (struct dx_map_entry *map, unsigned count)
1079 {
1080 	struct dx_map_entry *p, *q, *top = map + count - 1;
1081 	int more;
1082 	/* Combsort until bubble sort doesn't suck */
1083 	while (count > 2) {
1084 		count = count*10/13;
1085 		if (count - 9 < 2) /* 9, 10 -> 11 */
1086 			count = 11;
1087 		for (p = top, q = p - count; q >= map; p--, q--)
1088 			if (p->hash < q->hash)
1089 				swap(*p, *q);
1090 	}
1091 	/* Garden variety bubble sort */
1092 	do {
1093 		more = 0;
1094 		q = top;
1095 		while (q-- > map) {
1096 			if (q[1].hash >= q[0].hash)
1097 				continue;
1098 			swap(*(q+1), *q);
1099 			more = 1;
1100 		}
1101 	} while(more);
1102 }
1103 
1104 static void dx_insert_block(struct dx_frame *frame, u32 hash, ext4_lblk_t block)
1105 {
1106 	struct dx_entry *entries = frame->entries;
1107 	struct dx_entry *old = frame->at, *new = old + 1;
1108 	int count = dx_get_count(entries);
1109 
1110 	assert(count < dx_get_limit(entries));
1111 	assert(old < entries + count);
1112 	memmove(new + 1, new, (char *)(entries + count) - (char *)(new));
1113 	dx_set_hash(new, hash);
1114 	dx_set_block(new, block);
1115 	dx_set_count(entries, count + 1);
1116 }
1117 
1118 /*
1119  * NOTE! unlike strncmp, ext4_match returns 1 for success, 0 for failure.
1120  *
1121  * `len <= EXT4_NAME_LEN' is guaranteed by caller.
1122  * `de != NULL' is guaranteed by caller.
1123  */
1124 static inline int ext4_match (int len, const char * const name,
1125 			      struct ext4_dir_entry_2 * de)
1126 {
1127 	if (len != de->name_len)
1128 		return 0;
1129 	if (!de->inode)
1130 		return 0;
1131 	return !memcmp(name, de->name, len);
1132 }
1133 
1134 /*
1135  * Returns 0 if not found, -1 on failure, and 1 on success
1136  */
1137 int search_dir(struct buffer_head *bh,
1138 	       char *search_buf,
1139 	       int buf_size,
1140 	       struct inode *dir,
1141 	       const struct qstr *d_name,
1142 	       unsigned int offset,
1143 	       struct ext4_dir_entry_2 **res_dir)
1144 {
1145 	struct ext4_dir_entry_2 * de;
1146 	char * dlimit;
1147 	int de_len;
1148 	const char *name = d_name->name;
1149 	int namelen = d_name->len;
1150 
1151 	de = (struct ext4_dir_entry_2 *)search_buf;
1152 	dlimit = search_buf + buf_size;
1153 	while ((char *) de < dlimit) {
1154 		/* this code is executed quadratically often */
1155 		/* do minimal checking `by hand' */
1156 
1157 		if ((char *) de + namelen <= dlimit &&
1158 		    ext4_match (namelen, name, de)) {
1159 			/* found a match - just to be sure, do a full check */
1160 			if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data,
1161 						 bh->b_size, offset))
1162 				return -1;
1163 			*res_dir = de;
1164 			return 1;
1165 		}
1166 		/* prevent looping on a bad block */
1167 		de_len = ext4_rec_len_from_disk(de->rec_len,
1168 						dir->i_sb->s_blocksize);
1169 		if (de_len <= 0)
1170 			return -1;
1171 		offset += de_len;
1172 		de = (struct ext4_dir_entry_2 *) ((char *) de + de_len);
1173 	}
1174 	return 0;
1175 }
1176 
1177 static int is_dx_internal_node(struct inode *dir, ext4_lblk_t block,
1178 			       struct ext4_dir_entry *de)
1179 {
1180 	struct super_block *sb = dir->i_sb;
1181 
1182 	if (!is_dx(dir))
1183 		return 0;
1184 	if (block == 0)
1185 		return 1;
1186 	if (de->inode == 0 &&
1187 	    ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize) ==
1188 			sb->s_blocksize)
1189 		return 1;
1190 	return 0;
1191 }
1192 
1193 /*
1194  *	ext4_find_entry()
1195  *
1196  * finds an entry in the specified directory with the wanted name. It
1197  * returns the cache buffer in which the entry was found, and the entry
1198  * itself (as a parameter - res_dir). It does NOT read the inode of the
1199  * entry - you'll have to do that yourself if you want to.
1200  *
1201  * The returned buffer_head has ->b_count elevated.  The caller is expected
1202  * to brelse() it when appropriate.
1203  */
1204 static struct buffer_head * ext4_find_entry (struct inode *dir,
1205 					const struct qstr *d_name,
1206 					struct ext4_dir_entry_2 **res_dir,
1207 					int *inlined)
1208 {
1209 	struct super_block *sb;
1210 	struct buffer_head *bh_use[NAMEI_RA_SIZE];
1211 	struct buffer_head *bh, *ret = NULL;
1212 	ext4_lblk_t start, block, b;
1213 	const u8 *name = d_name->name;
1214 	int ra_max = 0;		/* Number of bh's in the readahead
1215 				   buffer, bh_use[] */
1216 	int ra_ptr = 0;		/* Current index into readahead
1217 				   buffer */
1218 	int num = 0;
1219 	ext4_lblk_t  nblocks;
1220 	int i, err;
1221 	int namelen;
1222 
1223 	*res_dir = NULL;
1224 	sb = dir->i_sb;
1225 	namelen = d_name->len;
1226 	if (namelen > EXT4_NAME_LEN)
1227 		return NULL;
1228 
1229 	if (ext4_has_inline_data(dir)) {
1230 		int has_inline_data = 1;
1231 		ret = ext4_find_inline_entry(dir, d_name, res_dir,
1232 					     &has_inline_data);
1233 		if (has_inline_data) {
1234 			if (inlined)
1235 				*inlined = 1;
1236 			return ret;
1237 		}
1238 	}
1239 
1240 	if ((namelen <= 2) && (name[0] == '.') &&
1241 	    (name[1] == '.' || name[1] == '\0')) {
1242 		/*
1243 		 * "." or ".." will only be in the first block
1244 		 * NFS may look up ".."; "." should be handled by the VFS
1245 		 */
1246 		block = start = 0;
1247 		nblocks = 1;
1248 		goto restart;
1249 	}
1250 	if (is_dx(dir)) {
1251 		bh = ext4_dx_find_entry(dir, d_name, res_dir, &err);
1252 		/*
1253 		 * On success, or if the error was file not found,
1254 		 * return.  Otherwise, fall back to doing a search the
1255 		 * old fashioned way.
1256 		 */
1257 		if (bh || (err != ERR_BAD_DX_DIR))
1258 			return bh;
1259 		dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
1260 			       "falling back\n"));
1261 	}
1262 	nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
1263 	start = EXT4_I(dir)->i_dir_start_lookup;
1264 	if (start >= nblocks)
1265 		start = 0;
1266 	block = start;
1267 restart:
1268 	do {
1269 		/*
1270 		 * We deal with the read-ahead logic here.
1271 		 */
1272 		if (ra_ptr >= ra_max) {
1273 			/* Refill the readahead buffer */
1274 			ra_ptr = 0;
1275 			b = block;
1276 			for (ra_max = 0; ra_max < NAMEI_RA_SIZE; ra_max++) {
1277 				/*
1278 				 * Terminate if we reach the end of the
1279 				 * directory and must wrap, or if our
1280 				 * search has finished at this block.
1281 				 */
1282 				if (b >= nblocks || (num && block == start)) {
1283 					bh_use[ra_max] = NULL;
1284 					break;
1285 				}
1286 				num++;
1287 				bh = ext4_getblk(NULL, dir, b++, 0, &err);
1288 				bh_use[ra_max] = bh;
1289 				if (bh)
1290 					ll_rw_block(READ | REQ_META | REQ_PRIO,
1291 						    1, &bh);
1292 			}
1293 		}
1294 		if ((bh = bh_use[ra_ptr++]) == NULL)
1295 			goto next;
1296 		wait_on_buffer(bh);
1297 		if (!buffer_uptodate(bh)) {
1298 			/* read error, skip block & hope for the best */
1299 			EXT4_ERROR_INODE(dir, "reading directory lblock %lu",
1300 					 (unsigned long) block);
1301 			brelse(bh);
1302 			goto next;
1303 		}
1304 		if (!buffer_verified(bh) &&
1305 		    !is_dx_internal_node(dir, block,
1306 					 (struct ext4_dir_entry *)bh->b_data) &&
1307 		    !ext4_dirent_csum_verify(dir,
1308 				(struct ext4_dir_entry *)bh->b_data)) {
1309 			EXT4_ERROR_INODE(dir, "checksumming directory "
1310 					 "block %lu", (unsigned long)block);
1311 			brelse(bh);
1312 			goto next;
1313 		}
1314 		set_buffer_verified(bh);
1315 		i = search_dirblock(bh, dir, d_name,
1316 			    block << EXT4_BLOCK_SIZE_BITS(sb), res_dir);
1317 		if (i == 1) {
1318 			EXT4_I(dir)->i_dir_start_lookup = block;
1319 			ret = bh;
1320 			goto cleanup_and_exit;
1321 		} else {
1322 			brelse(bh);
1323 			if (i < 0)
1324 				goto cleanup_and_exit;
1325 		}
1326 	next:
1327 		if (++block >= nblocks)
1328 			block = 0;
1329 	} while (block != start);
1330 
1331 	/*
1332 	 * If the directory has grown while we were searching, then
1333 	 * search the last part of the directory before giving up.
1334 	 */
1335 	block = nblocks;
1336 	nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
1337 	if (block < nblocks) {
1338 		start = 0;
1339 		goto restart;
1340 	}
1341 
1342 cleanup_and_exit:
1343 	/* Clean up the read-ahead blocks */
1344 	for (; ra_ptr < ra_max; ra_ptr++)
1345 		brelse(bh_use[ra_ptr]);
1346 	return ret;
1347 }
1348 
1349 static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name,
1350 		       struct ext4_dir_entry_2 **res_dir, int *err)
1351 {
1352 	struct super_block * sb = dir->i_sb;
1353 	struct dx_hash_info	hinfo;
1354 	struct dx_frame frames[2], *frame;
1355 	struct buffer_head *bh;
1356 	ext4_lblk_t block;
1357 	int retval;
1358 
1359 	if (!(frame = dx_probe(d_name, dir, &hinfo, frames, err)))
1360 		return NULL;
1361 	do {
1362 		block = dx_get_block(frame->at);
1363 		bh = ext4_read_dirblock(dir, block, DIRENT);
1364 		if (IS_ERR(bh)) {
1365 			*err = PTR_ERR(bh);
1366 			goto errout;
1367 		}
1368 		retval = search_dirblock(bh, dir, d_name,
1369 					 block << EXT4_BLOCK_SIZE_BITS(sb),
1370 					 res_dir);
1371 		if (retval == 1) { 	/* Success! */
1372 			dx_release(frames);
1373 			return bh;
1374 		}
1375 		brelse(bh);
1376 		if (retval == -1) {
1377 			*err = ERR_BAD_DX_DIR;
1378 			goto errout;
1379 		}
1380 
1381 		/* Check to see if we should continue to search */
1382 		retval = ext4_htree_next_block(dir, hinfo.hash, frame,
1383 					       frames, NULL);
1384 		if (retval < 0) {
1385 			ext4_warning(sb,
1386 			     "error reading index page in directory #%lu",
1387 			     dir->i_ino);
1388 			*err = retval;
1389 			goto errout;
1390 		}
1391 	} while (retval == 1);
1392 
1393 	*err = -ENOENT;
1394 errout:
1395 	dxtrace(printk(KERN_DEBUG "%s not found\n", d_name->name));
1396 	dx_release (frames);
1397 	return NULL;
1398 }
1399 
1400 static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
1401 {
1402 	struct inode *inode;
1403 	struct ext4_dir_entry_2 *de;
1404 	struct buffer_head *bh;
1405 
1406 	if (dentry->d_name.len > EXT4_NAME_LEN)
1407 		return ERR_PTR(-ENAMETOOLONG);
1408 
1409 	bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
1410 	inode = NULL;
1411 	if (bh) {
1412 		__u32 ino = le32_to_cpu(de->inode);
1413 		brelse(bh);
1414 		if (!ext4_valid_inum(dir->i_sb, ino)) {
1415 			EXT4_ERROR_INODE(dir, "bad inode number: %u", ino);
1416 			return ERR_PTR(-EIO);
1417 		}
1418 		if (unlikely(ino == dir->i_ino)) {
1419 			EXT4_ERROR_INODE(dir, "'%.*s' linked to parent dir",
1420 					 dentry->d_name.len,
1421 					 dentry->d_name.name);
1422 			return ERR_PTR(-EIO);
1423 		}
1424 		inode = ext4_iget(dir->i_sb, ino);
1425 		if (inode == ERR_PTR(-ESTALE)) {
1426 			EXT4_ERROR_INODE(dir,
1427 					 "deleted inode referenced: %u",
1428 					 ino);
1429 			return ERR_PTR(-EIO);
1430 		}
1431 	}
1432 	return d_splice_alias(inode, dentry);
1433 }
1434 
1435 
1436 struct dentry *ext4_get_parent(struct dentry *child)
1437 {
1438 	__u32 ino;
1439 	static const struct qstr dotdot = QSTR_INIT("..", 2);
1440 	struct ext4_dir_entry_2 * de;
1441 	struct buffer_head *bh;
1442 
1443 	bh = ext4_find_entry(child->d_inode, &dotdot, &de, NULL);
1444 	if (!bh)
1445 		return ERR_PTR(-ENOENT);
1446 	ino = le32_to_cpu(de->inode);
1447 	brelse(bh);
1448 
1449 	if (!ext4_valid_inum(child->d_inode->i_sb, ino)) {
1450 		EXT4_ERROR_INODE(child->d_inode,
1451 				 "bad parent inode number: %u", ino);
1452 		return ERR_PTR(-EIO);
1453 	}
1454 
1455 	return d_obtain_alias(ext4_iget(child->d_inode->i_sb, ino));
1456 }
1457 
1458 #define S_SHIFT 12
1459 static unsigned char ext4_type_by_mode[S_IFMT >> S_SHIFT] = {
1460 	[S_IFREG >> S_SHIFT]	= EXT4_FT_REG_FILE,
1461 	[S_IFDIR >> S_SHIFT]	= EXT4_FT_DIR,
1462 	[S_IFCHR >> S_SHIFT]	= EXT4_FT_CHRDEV,
1463 	[S_IFBLK >> S_SHIFT]	= EXT4_FT_BLKDEV,
1464 	[S_IFIFO >> S_SHIFT]	= EXT4_FT_FIFO,
1465 	[S_IFSOCK >> S_SHIFT]	= EXT4_FT_SOCK,
1466 	[S_IFLNK >> S_SHIFT]	= EXT4_FT_SYMLINK,
1467 };
1468 
1469 static inline void ext4_set_de_type(struct super_block *sb,
1470 				struct ext4_dir_entry_2 *de,
1471 				umode_t mode) {
1472 	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FILETYPE))
1473 		de->file_type = ext4_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
1474 }
1475 
1476 /*
1477  * Move count entries from end of map between two memory locations.
1478  * Returns pointer to last entry moved.
1479  */
1480 static struct ext4_dir_entry_2 *
1481 dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count,
1482 		unsigned blocksize)
1483 {
1484 	unsigned rec_len = 0;
1485 
1486 	while (count--) {
1487 		struct ext4_dir_entry_2 *de = (struct ext4_dir_entry_2 *)
1488 						(from + (map->offs<<2));
1489 		rec_len = EXT4_DIR_REC_LEN(de->name_len);
1490 		memcpy (to, de, rec_len);
1491 		((struct ext4_dir_entry_2 *) to)->rec_len =
1492 				ext4_rec_len_to_disk(rec_len, blocksize);
1493 		de->inode = 0;
1494 		map++;
1495 		to += rec_len;
1496 	}
1497 	return (struct ext4_dir_entry_2 *) (to - rec_len);
1498 }
1499 
1500 /*
1501  * Compact each dir entry in the range to the minimal rec_len.
1502  * Returns pointer to last entry in range.
1503  */
1504 static struct ext4_dir_entry_2* dx_pack_dirents(char *base, unsigned blocksize)
1505 {
1506 	struct ext4_dir_entry_2 *next, *to, *prev, *de = (struct ext4_dir_entry_2 *) base;
1507 	unsigned rec_len = 0;
1508 
1509 	prev = to = de;
1510 	while ((char*)de < base + blocksize) {
1511 		next = ext4_next_entry(de, blocksize);
1512 		if (de->inode && de->name_len) {
1513 			rec_len = EXT4_DIR_REC_LEN(de->name_len);
1514 			if (de > to)
1515 				memmove(to, de, rec_len);
1516 			to->rec_len = ext4_rec_len_to_disk(rec_len, blocksize);
1517 			prev = to;
1518 			to = (struct ext4_dir_entry_2 *) (((char *) to) + rec_len);
1519 		}
1520 		de = next;
1521 	}
1522 	return prev;
1523 }
1524 
1525 /*
1526  * Split a full leaf block to make room for a new dir entry.
1527  * Allocate a new block, and move entries so that they are approx. equally full.
1528  * Returns pointer to de in block into which the new entry will be inserted.
1529  */
1530 static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1531 			struct buffer_head **bh,struct dx_frame *frame,
1532 			struct dx_hash_info *hinfo, int *error)
1533 {
1534 	unsigned blocksize = dir->i_sb->s_blocksize;
1535 	unsigned count, continued;
1536 	struct buffer_head *bh2;
1537 	ext4_lblk_t newblock;
1538 	u32 hash2;
1539 	struct dx_map_entry *map;
1540 	char *data1 = (*bh)->b_data, *data2;
1541 	unsigned split, move, size;
1542 	struct ext4_dir_entry_2 *de = NULL, *de2;
1543 	struct ext4_dir_entry_tail *t;
1544 	int	csum_size = 0;
1545 	int	err = 0, i;
1546 
1547 	if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
1548 				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
1549 		csum_size = sizeof(struct ext4_dir_entry_tail);
1550 
1551 	bh2 = ext4_append(handle, dir, &newblock);
1552 	if (IS_ERR(bh2)) {
1553 		brelse(*bh);
1554 		*bh = NULL;
1555 		*error = PTR_ERR(bh2);
1556 		return NULL;
1557 	}
1558 
1559 	BUFFER_TRACE(*bh, "get_write_access");
1560 	err = ext4_journal_get_write_access(handle, *bh);
1561 	if (err)
1562 		goto journal_error;
1563 
1564 	BUFFER_TRACE(frame->bh, "get_write_access");
1565 	err = ext4_journal_get_write_access(handle, frame->bh);
1566 	if (err)
1567 		goto journal_error;
1568 
1569 	data2 = bh2->b_data;
1570 
1571 	/* create map in the end of data2 block */
1572 	map = (struct dx_map_entry *) (data2 + blocksize);
1573 	count = dx_make_map((struct ext4_dir_entry_2 *) data1,
1574 			     blocksize, hinfo, map);
1575 	map -= count;
1576 	dx_sort_map(map, count);
1577 	/* Split the existing block in the middle, size-wise */
1578 	size = 0;
1579 	move = 0;
1580 	for (i = count-1; i >= 0; i--) {
1581 		/* is more than half of this entry in 2nd half of the block? */
1582 		if (size + map[i].size/2 > blocksize/2)
1583 			break;
1584 		size += map[i].size;
1585 		move++;
1586 	}
1587 	/* map index at which we will split */
1588 	split = count - move;
1589 	hash2 = map[split].hash;
1590 	continued = hash2 == map[split - 1].hash;
1591 	dxtrace(printk(KERN_INFO "Split block %lu at %x, %i/%i\n",
1592 			(unsigned long)dx_get_block(frame->at),
1593 					hash2, split, count-split));
1594 
1595 	/* Fancy dance to stay within two buffers */
1596 	de2 = dx_move_dirents(data1, data2, map + split, count - split, blocksize);
1597 	de = dx_pack_dirents(data1, blocksize);
1598 	de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
1599 					   (char *) de,
1600 					   blocksize);
1601 	de2->rec_len = ext4_rec_len_to_disk(data2 + (blocksize - csum_size) -
1602 					    (char *) de2,
1603 					    blocksize);
1604 	if (csum_size) {
1605 		t = EXT4_DIRENT_TAIL(data2, blocksize);
1606 		initialize_dirent_tail(t, blocksize);
1607 
1608 		t = EXT4_DIRENT_TAIL(data1, blocksize);
1609 		initialize_dirent_tail(t, blocksize);
1610 	}
1611 
1612 	dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1));
1613 	dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1));
1614 
1615 	/* Which block gets the new entry? */
1616 	if (hinfo->hash >= hash2)
1617 	{
1618 		swap(*bh, bh2);
1619 		de = de2;
1620 	}
1621 	dx_insert_block(frame, hash2 + continued, newblock);
1622 	err = ext4_handle_dirty_dirent_node(handle, dir, bh2);
1623 	if (err)
1624 		goto journal_error;
1625 	err = ext4_handle_dirty_dx_node(handle, dir, frame->bh);
1626 	if (err)
1627 		goto journal_error;
1628 	brelse(bh2);
1629 	dxtrace(dx_show_index("frame", frame->entries));
1630 	return de;
1631 
1632 journal_error:
1633 	brelse(*bh);
1634 	brelse(bh2);
1635 	*bh = NULL;
1636 	ext4_std_error(dir->i_sb, err);
1637 	*error = err;
1638 	return NULL;
1639 }
1640 
1641 int ext4_find_dest_de(struct inode *dir, struct inode *inode,
1642 		      struct buffer_head *bh,
1643 		      void *buf, int buf_size,
1644 		      const char *name, int namelen,
1645 		      struct ext4_dir_entry_2 **dest_de)
1646 {
1647 	struct ext4_dir_entry_2 *de;
1648 	unsigned short reclen = EXT4_DIR_REC_LEN(namelen);
1649 	int nlen, rlen;
1650 	unsigned int offset = 0;
1651 	char *top;
1652 
1653 	de = (struct ext4_dir_entry_2 *)buf;
1654 	top = buf + buf_size - reclen;
1655 	while ((char *) de <= top) {
1656 		if (ext4_check_dir_entry(dir, NULL, de, bh,
1657 					 buf, buf_size, offset))
1658 			return -EIO;
1659 		if (ext4_match(namelen, name, de))
1660 			return -EEXIST;
1661 		nlen = EXT4_DIR_REC_LEN(de->name_len);
1662 		rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
1663 		if ((de->inode ? rlen - nlen : rlen) >= reclen)
1664 			break;
1665 		de = (struct ext4_dir_entry_2 *)((char *)de + rlen);
1666 		offset += rlen;
1667 	}
1668 	if ((char *) de > top)
1669 		return -ENOSPC;
1670 
1671 	*dest_de = de;
1672 	return 0;
1673 }
1674 
1675 void ext4_insert_dentry(struct inode *inode,
1676 			struct ext4_dir_entry_2 *de,
1677 			int buf_size,
1678 			const char *name, int namelen)
1679 {
1680 
1681 	int nlen, rlen;
1682 
1683 	nlen = EXT4_DIR_REC_LEN(de->name_len);
1684 	rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
1685 	if (de->inode) {
1686 		struct ext4_dir_entry_2 *de1 =
1687 				(struct ext4_dir_entry_2 *)((char *)de + nlen);
1688 		de1->rec_len = ext4_rec_len_to_disk(rlen - nlen, buf_size);
1689 		de->rec_len = ext4_rec_len_to_disk(nlen, buf_size);
1690 		de = de1;
1691 	}
1692 	de->file_type = EXT4_FT_UNKNOWN;
1693 	de->inode = cpu_to_le32(inode->i_ino);
1694 	ext4_set_de_type(inode->i_sb, de, inode->i_mode);
1695 	de->name_len = namelen;
1696 	memcpy(de->name, name, namelen);
1697 }
1698 /*
1699  * Add a new entry into a directory (leaf) block.  If de is non-NULL,
1700  * it points to a directory entry which is guaranteed to be large
1701  * enough for new directory entry.  If de is NULL, then
1702  * add_dirent_to_buf will attempt search the directory block for
1703  * space.  It will return -ENOSPC if no space is available, and -EIO
1704  * and -EEXIST if directory entry already exists.
1705  */
1706 static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
1707 			     struct inode *inode, struct ext4_dir_entry_2 *de,
1708 			     struct buffer_head *bh)
1709 {
1710 	struct inode	*dir = dentry->d_parent->d_inode;
1711 	const char	*name = dentry->d_name.name;
1712 	int		namelen = dentry->d_name.len;
1713 	unsigned int	blocksize = dir->i_sb->s_blocksize;
1714 	int		csum_size = 0;
1715 	int		err;
1716 
1717 	if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
1718 				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
1719 		csum_size = sizeof(struct ext4_dir_entry_tail);
1720 
1721 	if (!de) {
1722 		err = ext4_find_dest_de(dir, inode,
1723 					bh, bh->b_data, blocksize - csum_size,
1724 					name, namelen, &de);
1725 		if (err)
1726 			return err;
1727 	}
1728 	BUFFER_TRACE(bh, "get_write_access");
1729 	err = ext4_journal_get_write_access(handle, bh);
1730 	if (err) {
1731 		ext4_std_error(dir->i_sb, err);
1732 		return err;
1733 	}
1734 
1735 	/* By now the buffer is marked for journaling */
1736 	ext4_insert_dentry(inode, de, blocksize, name, namelen);
1737 
1738 	/*
1739 	 * XXX shouldn't update any times until successful
1740 	 * completion of syscall, but too many callers depend
1741 	 * on this.
1742 	 *
1743 	 * XXX similarly, too many callers depend on
1744 	 * ext4_new_inode() setting the times, but error
1745 	 * recovery deletes the inode, so the worst that can
1746 	 * happen is that the times are slightly out of date
1747 	 * and/or different from the directory change time.
1748 	 */
1749 	dir->i_mtime = dir->i_ctime = ext4_current_time(dir);
1750 	ext4_update_dx_flag(dir);
1751 	dir->i_version++;
1752 	ext4_mark_inode_dirty(handle, dir);
1753 	BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
1754 	err = ext4_handle_dirty_dirent_node(handle, dir, bh);
1755 	if (err)
1756 		ext4_std_error(dir->i_sb, err);
1757 	return 0;
1758 }
1759 
1760 /*
1761  * This converts a one block unindexed directory to a 3 block indexed
1762  * directory, and adds the dentry to the indexed directory.
1763  */
1764 static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
1765 			    struct inode *inode, struct buffer_head *bh)
1766 {
1767 	struct inode	*dir = dentry->d_parent->d_inode;
1768 	const char	*name = dentry->d_name.name;
1769 	int		namelen = dentry->d_name.len;
1770 	struct buffer_head *bh2;
1771 	struct dx_root	*root;
1772 	struct dx_frame	frames[2], *frame;
1773 	struct dx_entry *entries;
1774 	struct ext4_dir_entry_2	*de, *de2;
1775 	struct ext4_dir_entry_tail *t;
1776 	char		*data1, *top;
1777 	unsigned	len;
1778 	int		retval;
1779 	unsigned	blocksize;
1780 	struct dx_hash_info hinfo;
1781 	ext4_lblk_t  block;
1782 	struct fake_dirent *fde;
1783 	int		csum_size = 0;
1784 
1785 	if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
1786 				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
1787 		csum_size = sizeof(struct ext4_dir_entry_tail);
1788 
1789 	blocksize =  dir->i_sb->s_blocksize;
1790 	dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino));
1791 	retval = ext4_journal_get_write_access(handle, bh);
1792 	if (retval) {
1793 		ext4_std_error(dir->i_sb, retval);
1794 		brelse(bh);
1795 		return retval;
1796 	}
1797 	root = (struct dx_root *) bh->b_data;
1798 
1799 	/* The 0th block becomes the root, move the dirents out */
1800 	fde = &root->dotdot;
1801 	de = (struct ext4_dir_entry_2 *)((char *)fde +
1802 		ext4_rec_len_from_disk(fde->rec_len, blocksize));
1803 	if ((char *) de >= (((char *) root) + blocksize)) {
1804 		EXT4_ERROR_INODE(dir, "invalid rec_len for '..'");
1805 		brelse(bh);
1806 		return -EIO;
1807 	}
1808 	len = ((char *) root) + (blocksize - csum_size) - (char *) de;
1809 
1810 	/* Allocate new block for the 0th block's dirents */
1811 	bh2 = ext4_append(handle, dir, &block);
1812 	if (IS_ERR(bh2)) {
1813 		brelse(bh);
1814 		return PTR_ERR(bh2);
1815 	}
1816 	ext4_set_inode_flag(dir, EXT4_INODE_INDEX);
1817 	data1 = bh2->b_data;
1818 
1819 	memcpy (data1, de, len);
1820 	de = (struct ext4_dir_entry_2 *) data1;
1821 	top = data1 + len;
1822 	while ((char *)(de2 = ext4_next_entry(de, blocksize)) < top)
1823 		de = de2;
1824 	de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
1825 					   (char *) de,
1826 					   blocksize);
1827 
1828 	if (csum_size) {
1829 		t = EXT4_DIRENT_TAIL(data1, blocksize);
1830 		initialize_dirent_tail(t, blocksize);
1831 	}
1832 
1833 	/* Initialize the root; the dot dirents already exist */
1834 	de = (struct ext4_dir_entry_2 *) (&root->dotdot);
1835 	de->rec_len = ext4_rec_len_to_disk(blocksize - EXT4_DIR_REC_LEN(2),
1836 					   blocksize);
1837 	memset (&root->info, 0, sizeof(root->info));
1838 	root->info.info_length = sizeof(root->info);
1839 	root->info.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
1840 	entries = root->entries;
1841 	dx_set_block(entries, 1);
1842 	dx_set_count(entries, 1);
1843 	dx_set_limit(entries, dx_root_limit(dir, sizeof(root->info)));
1844 
1845 	/* Initialize as for dx_probe */
1846 	hinfo.hash_version = root->info.hash_version;
1847 	if (hinfo.hash_version <= DX_HASH_TEA)
1848 		hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
1849 	hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
1850 	ext4fs_dirhash(name, namelen, &hinfo);
1851 	frame = frames;
1852 	frame->entries = entries;
1853 	frame->at = entries;
1854 	frame->bh = bh;
1855 	bh = bh2;
1856 
1857 	ext4_handle_dirty_dx_node(handle, dir, frame->bh);
1858 	ext4_handle_dirty_dirent_node(handle, dir, bh);
1859 
1860 	de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
1861 	if (!de) {
1862 		/*
1863 		 * Even if the block split failed, we have to properly write
1864 		 * out all the changes we did so far. Otherwise we can end up
1865 		 * with corrupted filesystem.
1866 		 */
1867 		ext4_mark_inode_dirty(handle, dir);
1868 		dx_release(frames);
1869 		return retval;
1870 	}
1871 	dx_release(frames);
1872 
1873 	retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
1874 	brelse(bh);
1875 	return retval;
1876 }
1877 
1878 /*
1879  *	ext4_add_entry()
1880  *
1881  * adds a file entry to the specified directory, using the same
1882  * semantics as ext4_find_entry(). It returns NULL if it failed.
1883  *
1884  * NOTE!! The inode part of 'de' is left at 0 - which means you
1885  * may not sleep between calling this and putting something into
1886  * the entry, as someone else might have used it while you slept.
1887  */
1888 static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
1889 			  struct inode *inode)
1890 {
1891 	struct inode *dir = dentry->d_parent->d_inode;
1892 	struct buffer_head *bh;
1893 	struct ext4_dir_entry_2 *de;
1894 	struct ext4_dir_entry_tail *t;
1895 	struct super_block *sb;
1896 	int	retval;
1897 	int	dx_fallback=0;
1898 	unsigned blocksize;
1899 	ext4_lblk_t block, blocks;
1900 	int	csum_size = 0;
1901 
1902 	if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
1903 				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
1904 		csum_size = sizeof(struct ext4_dir_entry_tail);
1905 
1906 	sb = dir->i_sb;
1907 	blocksize = sb->s_blocksize;
1908 	if (!dentry->d_name.len)
1909 		return -EINVAL;
1910 
1911 	if (ext4_has_inline_data(dir)) {
1912 		retval = ext4_try_add_inline_entry(handle, dentry, inode);
1913 		if (retval < 0)
1914 			return retval;
1915 		if (retval == 1) {
1916 			retval = 0;
1917 			return retval;
1918 		}
1919 	}
1920 
1921 	if (is_dx(dir)) {
1922 		retval = ext4_dx_add_entry(handle, dentry, inode);
1923 		if (!retval || (retval != ERR_BAD_DX_DIR))
1924 			return retval;
1925 		ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
1926 		dx_fallback++;
1927 		ext4_mark_inode_dirty(handle, dir);
1928 	}
1929 	blocks = dir->i_size >> sb->s_blocksize_bits;
1930 	for (block = 0; block < blocks; block++) {
1931 		bh = ext4_read_dirblock(dir, block, DIRENT);
1932 		if (IS_ERR(bh))
1933 			return PTR_ERR(bh);
1934 
1935 		retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
1936 		if (retval != -ENOSPC) {
1937 			brelse(bh);
1938 			return retval;
1939 		}
1940 
1941 		if (blocks == 1 && !dx_fallback &&
1942 		    EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX))
1943 			return make_indexed_dir(handle, dentry, inode, bh);
1944 		brelse(bh);
1945 	}
1946 	bh = ext4_append(handle, dir, &block);
1947 	if (IS_ERR(bh))
1948 		return PTR_ERR(bh);
1949 	de = (struct ext4_dir_entry_2 *) bh->b_data;
1950 	de->inode = 0;
1951 	de->rec_len = ext4_rec_len_to_disk(blocksize - csum_size, blocksize);
1952 
1953 	if (csum_size) {
1954 		t = EXT4_DIRENT_TAIL(bh->b_data, blocksize);
1955 		initialize_dirent_tail(t, blocksize);
1956 	}
1957 
1958 	retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
1959 	brelse(bh);
1960 	if (retval == 0)
1961 		ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
1962 	return retval;
1963 }
1964 
1965 /*
1966  * Returns 0 for success, or a negative error value
1967  */
1968 static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
1969 			     struct inode *inode)
1970 {
1971 	struct dx_frame frames[2], *frame;
1972 	struct dx_entry *entries, *at;
1973 	struct dx_hash_info hinfo;
1974 	struct buffer_head *bh;
1975 	struct inode *dir = dentry->d_parent->d_inode;
1976 	struct super_block *sb = dir->i_sb;
1977 	struct ext4_dir_entry_2 *de;
1978 	int err;
1979 
1980 	frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, &err);
1981 	if (!frame)
1982 		return err;
1983 	entries = frame->entries;
1984 	at = frame->at;
1985 	bh = ext4_read_dirblock(dir, dx_get_block(frame->at), DIRENT);
1986 	if (IS_ERR(bh)) {
1987 		err = PTR_ERR(bh);
1988 		bh = NULL;
1989 		goto cleanup;
1990 	}
1991 
1992 	BUFFER_TRACE(bh, "get_write_access");
1993 	err = ext4_journal_get_write_access(handle, bh);
1994 	if (err)
1995 		goto journal_error;
1996 
1997 	err = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
1998 	if (err != -ENOSPC)
1999 		goto cleanup;
2000 
2001 	/* Block full, should compress but for now just split */
2002 	dxtrace(printk(KERN_DEBUG "using %u of %u node entries\n",
2003 		       dx_get_count(entries), dx_get_limit(entries)));
2004 	/* Need to split index? */
2005 	if (dx_get_count(entries) == dx_get_limit(entries)) {
2006 		ext4_lblk_t newblock;
2007 		unsigned icount = dx_get_count(entries);
2008 		int levels = frame - frames;
2009 		struct dx_entry *entries2;
2010 		struct dx_node *node2;
2011 		struct buffer_head *bh2;
2012 
2013 		if (levels && (dx_get_count(frames->entries) ==
2014 			       dx_get_limit(frames->entries))) {
2015 			ext4_warning(sb, "Directory index full!");
2016 			err = -ENOSPC;
2017 			goto cleanup;
2018 		}
2019 		bh2 = ext4_append(handle, dir, &newblock);
2020 		if (IS_ERR(bh2)) {
2021 			err = PTR_ERR(bh2);
2022 			goto cleanup;
2023 		}
2024 		node2 = (struct dx_node *)(bh2->b_data);
2025 		entries2 = node2->entries;
2026 		memset(&node2->fake, 0, sizeof(struct fake_dirent));
2027 		node2->fake.rec_len = ext4_rec_len_to_disk(sb->s_blocksize,
2028 							   sb->s_blocksize);
2029 		BUFFER_TRACE(frame->bh, "get_write_access");
2030 		err = ext4_journal_get_write_access(handle, frame->bh);
2031 		if (err)
2032 			goto journal_error;
2033 		if (levels) {
2034 			unsigned icount1 = icount/2, icount2 = icount - icount1;
2035 			unsigned hash2 = dx_get_hash(entries + icount1);
2036 			dxtrace(printk(KERN_DEBUG "Split index %i/%i\n",
2037 				       icount1, icount2));
2038 
2039 			BUFFER_TRACE(frame->bh, "get_write_access"); /* index root */
2040 			err = ext4_journal_get_write_access(handle,
2041 							     frames[0].bh);
2042 			if (err)
2043 				goto journal_error;
2044 
2045 			memcpy((char *) entries2, (char *) (entries + icount1),
2046 			       icount2 * sizeof(struct dx_entry));
2047 			dx_set_count(entries, icount1);
2048 			dx_set_count(entries2, icount2);
2049 			dx_set_limit(entries2, dx_node_limit(dir));
2050 
2051 			/* Which index block gets the new entry? */
2052 			if (at - entries >= icount1) {
2053 				frame->at = at = at - entries - icount1 + entries2;
2054 				frame->entries = entries = entries2;
2055 				swap(frame->bh, bh2);
2056 			}
2057 			dx_insert_block(frames + 0, hash2, newblock);
2058 			dxtrace(dx_show_index("node", frames[1].entries));
2059 			dxtrace(dx_show_index("node",
2060 			       ((struct dx_node *) bh2->b_data)->entries));
2061 			err = ext4_handle_dirty_dx_node(handle, dir, bh2);
2062 			if (err)
2063 				goto journal_error;
2064 			brelse (bh2);
2065 		} else {
2066 			dxtrace(printk(KERN_DEBUG
2067 				       "Creating second level index...\n"));
2068 			memcpy((char *) entries2, (char *) entries,
2069 			       icount * sizeof(struct dx_entry));
2070 			dx_set_limit(entries2, dx_node_limit(dir));
2071 
2072 			/* Set up root */
2073 			dx_set_count(entries, 1);
2074 			dx_set_block(entries + 0, newblock);
2075 			((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels = 1;
2076 
2077 			/* Add new access path frame */
2078 			frame = frames + 1;
2079 			frame->at = at = at - entries + entries2;
2080 			frame->entries = entries = entries2;
2081 			frame->bh = bh2;
2082 			err = ext4_journal_get_write_access(handle,
2083 							     frame->bh);
2084 			if (err)
2085 				goto journal_error;
2086 		}
2087 		err = ext4_handle_dirty_dx_node(handle, dir, frames[0].bh);
2088 		if (err) {
2089 			ext4_std_error(inode->i_sb, err);
2090 			goto cleanup;
2091 		}
2092 	}
2093 	de = do_split(handle, dir, &bh, frame, &hinfo, &err);
2094 	if (!de)
2095 		goto cleanup;
2096 	err = add_dirent_to_buf(handle, dentry, inode, de, bh);
2097 	goto cleanup;
2098 
2099 journal_error:
2100 	ext4_std_error(dir->i_sb, err);
2101 cleanup:
2102 	brelse(bh);
2103 	dx_release(frames);
2104 	return err;
2105 }
2106 
2107 /*
2108  * ext4_generic_delete_entry deletes a directory entry by merging it
2109  * with the previous entry
2110  */
2111 int ext4_generic_delete_entry(handle_t *handle,
2112 			      struct inode *dir,
2113 			      struct ext4_dir_entry_2 *de_del,
2114 			      struct buffer_head *bh,
2115 			      void *entry_buf,
2116 			      int buf_size,
2117 			      int csum_size)
2118 {
2119 	struct ext4_dir_entry_2 *de, *pde;
2120 	unsigned int blocksize = dir->i_sb->s_blocksize;
2121 	int i;
2122 
2123 	i = 0;
2124 	pde = NULL;
2125 	de = (struct ext4_dir_entry_2 *)entry_buf;
2126 	while (i < buf_size - csum_size) {
2127 		if (ext4_check_dir_entry(dir, NULL, de, bh,
2128 					 bh->b_data, bh->b_size, i))
2129 			return -EIO;
2130 		if (de == de_del)  {
2131 			if (pde)
2132 				pde->rec_len = ext4_rec_len_to_disk(
2133 					ext4_rec_len_from_disk(pde->rec_len,
2134 							       blocksize) +
2135 					ext4_rec_len_from_disk(de->rec_len,
2136 							       blocksize),
2137 					blocksize);
2138 			else
2139 				de->inode = 0;
2140 			dir->i_version++;
2141 			return 0;
2142 		}
2143 		i += ext4_rec_len_from_disk(de->rec_len, blocksize);
2144 		pde = de;
2145 		de = ext4_next_entry(de, blocksize);
2146 	}
2147 	return -ENOENT;
2148 }
2149 
2150 static int ext4_delete_entry(handle_t *handle,
2151 			     struct inode *dir,
2152 			     struct ext4_dir_entry_2 *de_del,
2153 			     struct buffer_head *bh)
2154 {
2155 	int err, csum_size = 0;
2156 
2157 	if (ext4_has_inline_data(dir)) {
2158 		int has_inline_data = 1;
2159 		err = ext4_delete_inline_entry(handle, dir, de_del, bh,
2160 					       &has_inline_data);
2161 		if (has_inline_data)
2162 			return err;
2163 	}
2164 
2165 	if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
2166 				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
2167 		csum_size = sizeof(struct ext4_dir_entry_tail);
2168 
2169 	BUFFER_TRACE(bh, "get_write_access");
2170 	err = ext4_journal_get_write_access(handle, bh);
2171 	if (unlikely(err))
2172 		goto out;
2173 
2174 	err = ext4_generic_delete_entry(handle, dir, de_del,
2175 					bh, bh->b_data,
2176 					dir->i_sb->s_blocksize, csum_size);
2177 	if (err)
2178 		goto out;
2179 
2180 	BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
2181 	err = ext4_handle_dirty_dirent_node(handle, dir, bh);
2182 	if (unlikely(err))
2183 		goto out;
2184 
2185 	return 0;
2186 out:
2187 	if (err != -ENOENT)
2188 		ext4_std_error(dir->i_sb, err);
2189 	return err;
2190 }
2191 
2192 /*
2193  * DIR_NLINK feature is set if 1) nlinks > EXT4_LINK_MAX or 2) nlinks == 2,
2194  * since this indicates that nlinks count was previously 1.
2195  */
2196 static void ext4_inc_count(handle_t *handle, struct inode *inode)
2197 {
2198 	inc_nlink(inode);
2199 	if (is_dx(inode) && inode->i_nlink > 1) {
2200 		/* limit is 16-bit i_links_count */
2201 		if (inode->i_nlink >= EXT4_LINK_MAX || inode->i_nlink == 2) {
2202 			set_nlink(inode, 1);
2203 			EXT4_SET_RO_COMPAT_FEATURE(inode->i_sb,
2204 					      EXT4_FEATURE_RO_COMPAT_DIR_NLINK);
2205 		}
2206 	}
2207 }
2208 
2209 /*
2210  * If a directory had nlink == 1, then we should let it be 1. This indicates
2211  * directory has >EXT4_LINK_MAX subdirs.
2212  */
2213 static void ext4_dec_count(handle_t *handle, struct inode *inode)
2214 {
2215 	if (!S_ISDIR(inode->i_mode) || inode->i_nlink > 2)
2216 		drop_nlink(inode);
2217 }
2218 
2219 
2220 static int ext4_add_nondir(handle_t *handle,
2221 		struct dentry *dentry, struct inode *inode)
2222 {
2223 	int err = ext4_add_entry(handle, dentry, inode);
2224 	if (!err) {
2225 		ext4_mark_inode_dirty(handle, inode);
2226 		unlock_new_inode(inode);
2227 		d_instantiate(dentry, inode);
2228 		return 0;
2229 	}
2230 	drop_nlink(inode);
2231 	unlock_new_inode(inode);
2232 	iput(inode);
2233 	return err;
2234 }
2235 
2236 /*
2237  * By the time this is called, we already have created
2238  * the directory cache entry for the new file, but it
2239  * is so far negative - it has no inode.
2240  *
2241  * If the create succeeds, we fill in the inode information
2242  * with d_instantiate().
2243  */
2244 static int ext4_create(struct inode *dir, struct dentry *dentry, umode_t mode,
2245 		       bool excl)
2246 {
2247 	handle_t *handle;
2248 	struct inode *inode;
2249 	int err, credits, retries = 0;
2250 
2251 	dquot_initialize(dir);
2252 
2253 	credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
2254 		   EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
2255 		   EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
2256 retry:
2257 	inode = ext4_new_inode_start_handle(dir, mode, &dentry->d_name, 0,
2258 					    NULL, EXT4_HT_DIR, credits);
2259 	handle = ext4_journal_current_handle();
2260 	err = PTR_ERR(inode);
2261 	if (!IS_ERR(inode)) {
2262 		inode->i_op = &ext4_file_inode_operations;
2263 		inode->i_fop = &ext4_file_operations;
2264 		ext4_set_aops(inode);
2265 		err = ext4_add_nondir(handle, dentry, inode);
2266 		if (!err && IS_DIRSYNC(dir))
2267 			ext4_handle_sync(handle);
2268 	}
2269 	if (handle)
2270 		ext4_journal_stop(handle);
2271 	if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
2272 		goto retry;
2273 	return err;
2274 }
2275 
2276 static int ext4_mknod(struct inode *dir, struct dentry *dentry,
2277 		      umode_t mode, dev_t rdev)
2278 {
2279 	handle_t *handle;
2280 	struct inode *inode;
2281 	int err, credits, retries = 0;
2282 
2283 	if (!new_valid_dev(rdev))
2284 		return -EINVAL;
2285 
2286 	dquot_initialize(dir);
2287 
2288 	credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
2289 		   EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
2290 		   EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
2291 retry:
2292 	inode = ext4_new_inode_start_handle(dir, mode, &dentry->d_name, 0,
2293 					    NULL, EXT4_HT_DIR, credits);
2294 	handle = ext4_journal_current_handle();
2295 	err = PTR_ERR(inode);
2296 	if (!IS_ERR(inode)) {
2297 		init_special_inode(inode, inode->i_mode, rdev);
2298 		inode->i_op = &ext4_special_inode_operations;
2299 		err = ext4_add_nondir(handle, dentry, inode);
2300 		if (!err && IS_DIRSYNC(dir))
2301 			ext4_handle_sync(handle);
2302 	}
2303 	if (handle)
2304 		ext4_journal_stop(handle);
2305 	if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
2306 		goto retry;
2307 	return err;
2308 }
2309 
2310 struct ext4_dir_entry_2 *ext4_init_dot_dotdot(struct inode *inode,
2311 			  struct ext4_dir_entry_2 *de,
2312 			  int blocksize, int csum_size,
2313 			  unsigned int parent_ino, int dotdot_real_len)
2314 {
2315 	de->inode = cpu_to_le32(inode->i_ino);
2316 	de->name_len = 1;
2317 	de->rec_len = ext4_rec_len_to_disk(EXT4_DIR_REC_LEN(de->name_len),
2318 					   blocksize);
2319 	strcpy(de->name, ".");
2320 	ext4_set_de_type(inode->i_sb, de, S_IFDIR);
2321 
2322 	de = ext4_next_entry(de, blocksize);
2323 	de->inode = cpu_to_le32(parent_ino);
2324 	de->name_len = 2;
2325 	if (!dotdot_real_len)
2326 		de->rec_len = ext4_rec_len_to_disk(blocksize -
2327 					(csum_size + EXT4_DIR_REC_LEN(1)),
2328 					blocksize);
2329 	else
2330 		de->rec_len = ext4_rec_len_to_disk(
2331 				EXT4_DIR_REC_LEN(de->name_len), blocksize);
2332 	strcpy(de->name, "..");
2333 	ext4_set_de_type(inode->i_sb, de, S_IFDIR);
2334 
2335 	return ext4_next_entry(de, blocksize);
2336 }
2337 
2338 static int ext4_init_new_dir(handle_t *handle, struct inode *dir,
2339 			     struct inode *inode)
2340 {
2341 	struct buffer_head *dir_block = NULL;
2342 	struct ext4_dir_entry_2 *de;
2343 	struct ext4_dir_entry_tail *t;
2344 	ext4_lblk_t block = 0;
2345 	unsigned int blocksize = dir->i_sb->s_blocksize;
2346 	int csum_size = 0;
2347 	int err;
2348 
2349 	if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
2350 				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
2351 		csum_size = sizeof(struct ext4_dir_entry_tail);
2352 
2353 	if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
2354 		err = ext4_try_create_inline_dir(handle, dir, inode);
2355 		if (err < 0 && err != -ENOSPC)
2356 			goto out;
2357 		if (!err)
2358 			goto out;
2359 	}
2360 
2361 	inode->i_size = 0;
2362 	dir_block = ext4_append(handle, inode, &block);
2363 	if (IS_ERR(dir_block))
2364 		return PTR_ERR(dir_block);
2365 	BUFFER_TRACE(dir_block, "get_write_access");
2366 	err = ext4_journal_get_write_access(handle, dir_block);
2367 	if (err)
2368 		goto out;
2369 	de = (struct ext4_dir_entry_2 *)dir_block->b_data;
2370 	ext4_init_dot_dotdot(inode, de, blocksize, csum_size, dir->i_ino, 0);
2371 	set_nlink(inode, 2);
2372 	if (csum_size) {
2373 		t = EXT4_DIRENT_TAIL(dir_block->b_data, blocksize);
2374 		initialize_dirent_tail(t, blocksize);
2375 	}
2376 
2377 	BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
2378 	err = ext4_handle_dirty_dirent_node(handle, inode, dir_block);
2379 	if (err)
2380 		goto out;
2381 	set_buffer_verified(dir_block);
2382 out:
2383 	brelse(dir_block);
2384 	return err;
2385 }
2386 
2387 static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
2388 {
2389 	handle_t *handle;
2390 	struct inode *inode;
2391 	int err, credits, retries = 0;
2392 
2393 	if (EXT4_DIR_LINK_MAX(dir))
2394 		return -EMLINK;
2395 
2396 	dquot_initialize(dir);
2397 
2398 	credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
2399 		   EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
2400 		   EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
2401 retry:
2402 	inode = ext4_new_inode_start_handle(dir, S_IFDIR | mode,
2403 					    &dentry->d_name,
2404 					    0, NULL, EXT4_HT_DIR, credits);
2405 	handle = ext4_journal_current_handle();
2406 	err = PTR_ERR(inode);
2407 	if (IS_ERR(inode))
2408 		goto out_stop;
2409 
2410 	inode->i_op = &ext4_dir_inode_operations;
2411 	inode->i_fop = &ext4_dir_operations;
2412 	err = ext4_init_new_dir(handle, dir, inode);
2413 	if (err)
2414 		goto out_clear_inode;
2415 	err = ext4_mark_inode_dirty(handle, inode);
2416 	if (!err)
2417 		err = ext4_add_entry(handle, dentry, inode);
2418 	if (err) {
2419 out_clear_inode:
2420 		clear_nlink(inode);
2421 		unlock_new_inode(inode);
2422 		ext4_mark_inode_dirty(handle, inode);
2423 		iput(inode);
2424 		goto out_stop;
2425 	}
2426 	ext4_inc_count(handle, dir);
2427 	ext4_update_dx_flag(dir);
2428 	err = ext4_mark_inode_dirty(handle, dir);
2429 	if (err)
2430 		goto out_clear_inode;
2431 	unlock_new_inode(inode);
2432 	d_instantiate(dentry, inode);
2433 	if (IS_DIRSYNC(dir))
2434 		ext4_handle_sync(handle);
2435 
2436 out_stop:
2437 	if (handle)
2438 		ext4_journal_stop(handle);
2439 	if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
2440 		goto retry;
2441 	return err;
2442 }
2443 
2444 /*
2445  * routine to check that the specified directory is empty (for rmdir)
2446  */
2447 static int empty_dir(struct inode *inode)
2448 {
2449 	unsigned int offset;
2450 	struct buffer_head *bh;
2451 	struct ext4_dir_entry_2 *de, *de1;
2452 	struct super_block *sb;
2453 	int err = 0;
2454 
2455 	if (ext4_has_inline_data(inode)) {
2456 		int has_inline_data = 1;
2457 
2458 		err = empty_inline_dir(inode, &has_inline_data);
2459 		if (has_inline_data)
2460 			return err;
2461 	}
2462 
2463 	sb = inode->i_sb;
2464 	if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2)) {
2465 		EXT4_ERROR_INODE(inode, "invalid size");
2466 		return 1;
2467 	}
2468 	bh = ext4_read_dirblock(inode, 0, EITHER);
2469 	if (IS_ERR(bh))
2470 		return 1;
2471 
2472 	de = (struct ext4_dir_entry_2 *) bh->b_data;
2473 	de1 = ext4_next_entry(de, sb->s_blocksize);
2474 	if (le32_to_cpu(de->inode) != inode->i_ino ||
2475 			!le32_to_cpu(de1->inode) ||
2476 			strcmp(".", de->name) ||
2477 			strcmp("..", de1->name)) {
2478 		ext4_warning(inode->i_sb,
2479 			     "bad directory (dir #%lu) - no `.' or `..'",
2480 			     inode->i_ino);
2481 		brelse(bh);
2482 		return 1;
2483 	}
2484 	offset = ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize) +
2485 		 ext4_rec_len_from_disk(de1->rec_len, sb->s_blocksize);
2486 	de = ext4_next_entry(de1, sb->s_blocksize);
2487 	while (offset < inode->i_size) {
2488 		if (!bh ||
2489 		    (void *) de >= (void *) (bh->b_data+sb->s_blocksize)) {
2490 			unsigned int lblock;
2491 			err = 0;
2492 			brelse(bh);
2493 			lblock = offset >> EXT4_BLOCK_SIZE_BITS(sb);
2494 			bh = ext4_read_dirblock(inode, lblock, EITHER);
2495 			if (IS_ERR(bh))
2496 				return 1;
2497 			de = (struct ext4_dir_entry_2 *) bh->b_data;
2498 		}
2499 		if (ext4_check_dir_entry(inode, NULL, de, bh,
2500 					 bh->b_data, bh->b_size, offset)) {
2501 			de = (struct ext4_dir_entry_2 *)(bh->b_data +
2502 							 sb->s_blocksize);
2503 			offset = (offset | (sb->s_blocksize - 1)) + 1;
2504 			continue;
2505 		}
2506 		if (le32_to_cpu(de->inode)) {
2507 			brelse(bh);
2508 			return 0;
2509 		}
2510 		offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize);
2511 		de = ext4_next_entry(de, sb->s_blocksize);
2512 	}
2513 	brelse(bh);
2514 	return 1;
2515 }
2516 
2517 /* ext4_orphan_add() links an unlinked or truncated inode into a list of
2518  * such inodes, starting at the superblock, in case we crash before the
2519  * file is closed/deleted, or in case the inode truncate spans multiple
2520  * transactions and the last transaction is not recovered after a crash.
2521  *
2522  * At filesystem recovery time, we walk this list deleting unlinked
2523  * inodes and truncating linked inodes in ext4_orphan_cleanup().
2524  */
2525 int ext4_orphan_add(handle_t *handle, struct inode *inode)
2526 {
2527 	struct super_block *sb = inode->i_sb;
2528 	struct ext4_iloc iloc;
2529 	int err = 0, rc;
2530 
2531 	if (!EXT4_SB(sb)->s_journal)
2532 		return 0;
2533 
2534 	mutex_lock(&EXT4_SB(sb)->s_orphan_lock);
2535 	if (!list_empty(&EXT4_I(inode)->i_orphan))
2536 		goto out_unlock;
2537 
2538 	/*
2539 	 * Orphan handling is only valid for files with data blocks
2540 	 * being truncated, or files being unlinked. Note that we either
2541 	 * hold i_mutex, or the inode can not be referenced from outside,
2542 	 * so i_nlink should not be bumped due to race
2543 	 */
2544 	J_ASSERT((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
2545 		  S_ISLNK(inode->i_mode)) || inode->i_nlink == 0);
2546 
2547 	BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
2548 	err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
2549 	if (err)
2550 		goto out_unlock;
2551 
2552 	err = ext4_reserve_inode_write(handle, inode, &iloc);
2553 	if (err)
2554 		goto out_unlock;
2555 	/*
2556 	 * Due to previous errors inode may be already a part of on-disk
2557 	 * orphan list. If so skip on-disk list modification.
2558 	 */
2559 	if (NEXT_ORPHAN(inode) && NEXT_ORPHAN(inode) <=
2560 		(le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)))
2561 			goto mem_insert;
2562 
2563 	/* Insert this inode at the head of the on-disk orphan list... */
2564 	NEXT_ORPHAN(inode) = le32_to_cpu(EXT4_SB(sb)->s_es->s_last_orphan);
2565 	EXT4_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino);
2566 	err = ext4_handle_dirty_super(handle, sb);
2567 	rc = ext4_mark_iloc_dirty(handle, inode, &iloc);
2568 	if (!err)
2569 		err = rc;
2570 
2571 	/* Only add to the head of the in-memory list if all the
2572 	 * previous operations succeeded.  If the orphan_add is going to
2573 	 * fail (possibly taking the journal offline), we can't risk
2574 	 * leaving the inode on the orphan list: stray orphan-list
2575 	 * entries can cause panics at unmount time.
2576 	 *
2577 	 * This is safe: on error we're going to ignore the orphan list
2578 	 * anyway on the next recovery. */
2579 mem_insert:
2580 	if (!err)
2581 		list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
2582 
2583 	jbd_debug(4, "superblock will point to %lu\n", inode->i_ino);
2584 	jbd_debug(4, "orphan inode %lu will point to %d\n",
2585 			inode->i_ino, NEXT_ORPHAN(inode));
2586 out_unlock:
2587 	mutex_unlock(&EXT4_SB(sb)->s_orphan_lock);
2588 	ext4_std_error(inode->i_sb, err);
2589 	return err;
2590 }
2591 
2592 /*
2593  * ext4_orphan_del() removes an unlinked or truncated inode from the list
2594  * of such inodes stored on disk, because it is finally being cleaned up.
2595  */
2596 int ext4_orphan_del(handle_t *handle, struct inode *inode)
2597 {
2598 	struct list_head *prev;
2599 	struct ext4_inode_info *ei = EXT4_I(inode);
2600 	struct ext4_sb_info *sbi;
2601 	__u32 ino_next;
2602 	struct ext4_iloc iloc;
2603 	int err = 0;
2604 
2605 	if ((!EXT4_SB(inode->i_sb)->s_journal) &&
2606 	    !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS))
2607 		return 0;
2608 
2609 	mutex_lock(&EXT4_SB(inode->i_sb)->s_orphan_lock);
2610 	if (list_empty(&ei->i_orphan))
2611 		goto out;
2612 
2613 	ino_next = NEXT_ORPHAN(inode);
2614 	prev = ei->i_orphan.prev;
2615 	sbi = EXT4_SB(inode->i_sb);
2616 
2617 	jbd_debug(4, "remove inode %lu from orphan list\n", inode->i_ino);
2618 
2619 	list_del_init(&ei->i_orphan);
2620 
2621 	/* If we're on an error path, we may not have a valid
2622 	 * transaction handle with which to update the orphan list on
2623 	 * disk, but we still need to remove the inode from the linked
2624 	 * list in memory. */
2625 	if (!handle)
2626 		goto out;
2627 
2628 	err = ext4_reserve_inode_write(handle, inode, &iloc);
2629 	if (err)
2630 		goto out_err;
2631 
2632 	if (prev == &sbi->s_orphan) {
2633 		jbd_debug(4, "superblock will point to %u\n", ino_next);
2634 		BUFFER_TRACE(sbi->s_sbh, "get_write_access");
2635 		err = ext4_journal_get_write_access(handle, sbi->s_sbh);
2636 		if (err)
2637 			goto out_brelse;
2638 		sbi->s_es->s_last_orphan = cpu_to_le32(ino_next);
2639 		err = ext4_handle_dirty_super(handle, inode->i_sb);
2640 	} else {
2641 		struct ext4_iloc iloc2;
2642 		struct inode *i_prev =
2643 			&list_entry(prev, struct ext4_inode_info, i_orphan)->vfs_inode;
2644 
2645 		jbd_debug(4, "orphan inode %lu will point to %u\n",
2646 			  i_prev->i_ino, ino_next);
2647 		err = ext4_reserve_inode_write(handle, i_prev, &iloc2);
2648 		if (err)
2649 			goto out_brelse;
2650 		NEXT_ORPHAN(i_prev) = ino_next;
2651 		err = ext4_mark_iloc_dirty(handle, i_prev, &iloc2);
2652 	}
2653 	if (err)
2654 		goto out_brelse;
2655 	NEXT_ORPHAN(inode) = 0;
2656 	err = ext4_mark_iloc_dirty(handle, inode, &iloc);
2657 
2658 out_err:
2659 	ext4_std_error(inode->i_sb, err);
2660 out:
2661 	mutex_unlock(&EXT4_SB(inode->i_sb)->s_orphan_lock);
2662 	return err;
2663 
2664 out_brelse:
2665 	brelse(iloc.bh);
2666 	goto out_err;
2667 }
2668 
2669 static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
2670 {
2671 	int retval;
2672 	struct inode *inode;
2673 	struct buffer_head *bh;
2674 	struct ext4_dir_entry_2 *de;
2675 	handle_t *handle = NULL;
2676 
2677 	/* Initialize quotas before so that eventual writes go in
2678 	 * separate transaction */
2679 	dquot_initialize(dir);
2680 	dquot_initialize(dentry->d_inode);
2681 
2682 	retval = -ENOENT;
2683 	bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
2684 	if (!bh)
2685 		goto end_rmdir;
2686 
2687 	inode = dentry->d_inode;
2688 
2689 	retval = -EIO;
2690 	if (le32_to_cpu(de->inode) != inode->i_ino)
2691 		goto end_rmdir;
2692 
2693 	retval = -ENOTEMPTY;
2694 	if (!empty_dir(inode))
2695 		goto end_rmdir;
2696 
2697 	handle = ext4_journal_start(dir, EXT4_HT_DIR,
2698 				    EXT4_DATA_TRANS_BLOCKS(dir->i_sb));
2699 	if (IS_ERR(handle)) {
2700 		retval = PTR_ERR(handle);
2701 		handle = NULL;
2702 		goto end_rmdir;
2703 	}
2704 
2705 	if (IS_DIRSYNC(dir))
2706 		ext4_handle_sync(handle);
2707 
2708 	retval = ext4_delete_entry(handle, dir, de, bh);
2709 	if (retval)
2710 		goto end_rmdir;
2711 	if (!EXT4_DIR_LINK_EMPTY(inode))
2712 		ext4_warning(inode->i_sb,
2713 			     "empty directory has too many links (%d)",
2714 			     inode->i_nlink);
2715 	inode->i_version++;
2716 	clear_nlink(inode);
2717 	/* There's no need to set i_disksize: the fact that i_nlink is
2718 	 * zero will ensure that the right thing happens during any
2719 	 * recovery. */
2720 	inode->i_size = 0;
2721 	ext4_orphan_add(handle, inode);
2722 	inode->i_ctime = dir->i_ctime = dir->i_mtime = ext4_current_time(inode);
2723 	ext4_mark_inode_dirty(handle, inode);
2724 	ext4_dec_count(handle, dir);
2725 	ext4_update_dx_flag(dir);
2726 	ext4_mark_inode_dirty(handle, dir);
2727 
2728 end_rmdir:
2729 	brelse(bh);
2730 	if (handle)
2731 		ext4_journal_stop(handle);
2732 	return retval;
2733 }
2734 
2735 static int ext4_unlink(struct inode *dir, struct dentry *dentry)
2736 {
2737 	int retval;
2738 	struct inode *inode;
2739 	struct buffer_head *bh;
2740 	struct ext4_dir_entry_2 *de;
2741 	handle_t *handle = NULL;
2742 
2743 	trace_ext4_unlink_enter(dir, dentry);
2744 	/* Initialize quotas before so that eventual writes go
2745 	 * in separate transaction */
2746 	dquot_initialize(dir);
2747 	dquot_initialize(dentry->d_inode);
2748 
2749 	retval = -ENOENT;
2750 	bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
2751 	if (!bh)
2752 		goto end_unlink;
2753 
2754 	inode = dentry->d_inode;
2755 
2756 	retval = -EIO;
2757 	if (le32_to_cpu(de->inode) != inode->i_ino)
2758 		goto end_unlink;
2759 
2760 	handle = ext4_journal_start(dir, EXT4_HT_DIR,
2761 				    EXT4_DATA_TRANS_BLOCKS(dir->i_sb));
2762 	if (IS_ERR(handle)) {
2763 		retval = PTR_ERR(handle);
2764 		handle = NULL;
2765 		goto end_unlink;
2766 	}
2767 
2768 	if (IS_DIRSYNC(dir))
2769 		ext4_handle_sync(handle);
2770 
2771 	if (!inode->i_nlink) {
2772 		ext4_warning(inode->i_sb,
2773 			     "Deleting nonexistent file (%lu), %d",
2774 			     inode->i_ino, inode->i_nlink);
2775 		set_nlink(inode, 1);
2776 	}
2777 	retval = ext4_delete_entry(handle, dir, de, bh);
2778 	if (retval)
2779 		goto end_unlink;
2780 	dir->i_ctime = dir->i_mtime = ext4_current_time(dir);
2781 	ext4_update_dx_flag(dir);
2782 	ext4_mark_inode_dirty(handle, dir);
2783 	drop_nlink(inode);
2784 	if (!inode->i_nlink)
2785 		ext4_orphan_add(handle, inode);
2786 	inode->i_ctime = ext4_current_time(inode);
2787 	ext4_mark_inode_dirty(handle, inode);
2788 	retval = 0;
2789 
2790 end_unlink:
2791 	brelse(bh);
2792 	if (handle)
2793 		ext4_journal_stop(handle);
2794 	trace_ext4_unlink_exit(dentry, retval);
2795 	return retval;
2796 }
2797 
2798 static int ext4_symlink(struct inode *dir,
2799 			struct dentry *dentry, const char *symname)
2800 {
2801 	handle_t *handle;
2802 	struct inode *inode;
2803 	int l, err, retries = 0;
2804 	int credits;
2805 
2806 	l = strlen(symname)+1;
2807 	if (l > dir->i_sb->s_blocksize)
2808 		return -ENAMETOOLONG;
2809 
2810 	dquot_initialize(dir);
2811 
2812 	if (l > EXT4_N_BLOCKS * 4) {
2813 		/*
2814 		 * For non-fast symlinks, we just allocate inode and put it on
2815 		 * orphan list in the first transaction => we need bitmap,
2816 		 * group descriptor, sb, inode block, quota blocks, and
2817 		 * possibly selinux xattr blocks.
2818 		 */
2819 		credits = 4 + EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb) +
2820 			  EXT4_XATTR_TRANS_BLOCKS;
2821 	} else {
2822 		/*
2823 		 * Fast symlink. We have to add entry to directory
2824 		 * (EXT4_DATA_TRANS_BLOCKS + EXT4_INDEX_EXTRA_TRANS_BLOCKS),
2825 		 * allocate new inode (bitmap, group descriptor, inode block,
2826 		 * quota blocks, sb is already counted in previous macros).
2827 		 */
2828 		credits = EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
2829 			  EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
2830 			  EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb);
2831 	}
2832 retry:
2833 	inode = ext4_new_inode_start_handle(dir, S_IFLNK|S_IRWXUGO,
2834 					    &dentry->d_name, 0, NULL,
2835 					    EXT4_HT_DIR, credits);
2836 	handle = ext4_journal_current_handle();
2837 	err = PTR_ERR(inode);
2838 	if (IS_ERR(inode))
2839 		goto out_stop;
2840 
2841 	if (l > EXT4_N_BLOCKS * 4) {
2842 		inode->i_op = &ext4_symlink_inode_operations;
2843 		ext4_set_aops(inode);
2844 		/*
2845 		 * We cannot call page_symlink() with transaction started
2846 		 * because it calls into ext4_write_begin() which can wait
2847 		 * for transaction commit if we are running out of space
2848 		 * and thus we deadlock. So we have to stop transaction now
2849 		 * and restart it when symlink contents is written.
2850 		 *
2851 		 * To keep fs consistent in case of crash, we have to put inode
2852 		 * to orphan list in the mean time.
2853 		 */
2854 		drop_nlink(inode);
2855 		err = ext4_orphan_add(handle, inode);
2856 		ext4_journal_stop(handle);
2857 		if (err)
2858 			goto err_drop_inode;
2859 		err = __page_symlink(inode, symname, l, 1);
2860 		if (err)
2861 			goto err_drop_inode;
2862 		/*
2863 		 * Now inode is being linked into dir (EXT4_DATA_TRANS_BLOCKS
2864 		 * + EXT4_INDEX_EXTRA_TRANS_BLOCKS), inode is also modified
2865 		 */
2866 		handle = ext4_journal_start(dir, EXT4_HT_DIR,
2867 				EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
2868 				EXT4_INDEX_EXTRA_TRANS_BLOCKS + 1);
2869 		if (IS_ERR(handle)) {
2870 			err = PTR_ERR(handle);
2871 			goto err_drop_inode;
2872 		}
2873 		set_nlink(inode, 1);
2874 		err = ext4_orphan_del(handle, inode);
2875 		if (err) {
2876 			ext4_journal_stop(handle);
2877 			clear_nlink(inode);
2878 			goto err_drop_inode;
2879 		}
2880 	} else {
2881 		/* clear the extent format for fast symlink */
2882 		ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
2883 		inode->i_op = &ext4_fast_symlink_inode_operations;
2884 		memcpy((char *)&EXT4_I(inode)->i_data, symname, l);
2885 		inode->i_size = l-1;
2886 	}
2887 	EXT4_I(inode)->i_disksize = inode->i_size;
2888 	err = ext4_add_nondir(handle, dentry, inode);
2889 	if (!err && IS_DIRSYNC(dir))
2890 		ext4_handle_sync(handle);
2891 
2892 out_stop:
2893 	if (handle)
2894 		ext4_journal_stop(handle);
2895 	if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
2896 		goto retry;
2897 	return err;
2898 err_drop_inode:
2899 	unlock_new_inode(inode);
2900 	iput(inode);
2901 	return err;
2902 }
2903 
2904 static int ext4_link(struct dentry *old_dentry,
2905 		     struct inode *dir, struct dentry *dentry)
2906 {
2907 	handle_t *handle;
2908 	struct inode *inode = old_dentry->d_inode;
2909 	int err, retries = 0;
2910 
2911 	if (inode->i_nlink >= EXT4_LINK_MAX)
2912 		return -EMLINK;
2913 
2914 	dquot_initialize(dir);
2915 
2916 retry:
2917 	handle = ext4_journal_start(dir, EXT4_HT_DIR,
2918 		(EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
2919 		 EXT4_INDEX_EXTRA_TRANS_BLOCKS));
2920 	if (IS_ERR(handle))
2921 		return PTR_ERR(handle);
2922 
2923 	if (IS_DIRSYNC(dir))
2924 		ext4_handle_sync(handle);
2925 
2926 	inode->i_ctime = ext4_current_time(inode);
2927 	ext4_inc_count(handle, inode);
2928 	ihold(inode);
2929 
2930 	err = ext4_add_entry(handle, dentry, inode);
2931 	if (!err) {
2932 		ext4_mark_inode_dirty(handle, inode);
2933 		d_instantiate(dentry, inode);
2934 	} else {
2935 		drop_nlink(inode);
2936 		iput(inode);
2937 	}
2938 	ext4_journal_stop(handle);
2939 	if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
2940 		goto retry;
2941 	return err;
2942 }
2943 
2944 
2945 /*
2946  * Try to find buffer head where contains the parent block.
2947  * It should be the inode block if it is inlined or the 1st block
2948  * if it is a normal dir.
2949  */
2950 static struct buffer_head *ext4_get_first_dir_block(handle_t *handle,
2951 					struct inode *inode,
2952 					int *retval,
2953 					struct ext4_dir_entry_2 **parent_de,
2954 					int *inlined)
2955 {
2956 	struct buffer_head *bh;
2957 
2958 	if (!ext4_has_inline_data(inode)) {
2959 		bh = ext4_read_dirblock(inode, 0, EITHER);
2960 		if (IS_ERR(bh)) {
2961 			*retval = PTR_ERR(bh);
2962 			return NULL;
2963 		}
2964 		*parent_de = ext4_next_entry(
2965 					(struct ext4_dir_entry_2 *)bh->b_data,
2966 					inode->i_sb->s_blocksize);
2967 		return bh;
2968 	}
2969 
2970 	*inlined = 1;
2971 	return ext4_get_first_inline_block(inode, parent_de, retval);
2972 }
2973 
2974 /*
2975  * Anybody can rename anything with this: the permission checks are left to the
2976  * higher-level routines.
2977  */
2978 static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
2979 		       struct inode *new_dir, struct dentry *new_dentry)
2980 {
2981 	handle_t *handle;
2982 	struct inode *old_inode, *new_inode;
2983 	struct buffer_head *old_bh, *new_bh, *dir_bh;
2984 	struct ext4_dir_entry_2 *old_de, *new_de;
2985 	int retval, force_da_alloc = 0;
2986 	int inlined = 0, new_inlined = 0;
2987 	struct ext4_dir_entry_2 *parent_de;
2988 
2989 	dquot_initialize(old_dir);
2990 	dquot_initialize(new_dir);
2991 
2992 	old_bh = new_bh = dir_bh = NULL;
2993 
2994 	/* Initialize quotas before so that eventual writes go
2995 	 * in separate transaction */
2996 	if (new_dentry->d_inode)
2997 		dquot_initialize(new_dentry->d_inode);
2998 	handle = ext4_journal_start(old_dir, EXT4_HT_DIR,
2999 		(2 * EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) +
3000 		 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2));
3001 	if (IS_ERR(handle))
3002 		return PTR_ERR(handle);
3003 
3004 	if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
3005 		ext4_handle_sync(handle);
3006 
3007 	old_bh = ext4_find_entry(old_dir, &old_dentry->d_name, &old_de, NULL);
3008 	/*
3009 	 *  Check for inode number is _not_ due to possible IO errors.
3010 	 *  We might rmdir the source, keep it as pwd of some process
3011 	 *  and merrily kill the link to whatever was created under the
3012 	 *  same name. Goodbye sticky bit ;-<
3013 	 */
3014 	old_inode = old_dentry->d_inode;
3015 	retval = -ENOENT;
3016 	if (!old_bh || le32_to_cpu(old_de->inode) != old_inode->i_ino)
3017 		goto end_rename;
3018 
3019 	new_inode = new_dentry->d_inode;
3020 	new_bh = ext4_find_entry(new_dir, &new_dentry->d_name,
3021 				 &new_de, &new_inlined);
3022 	if (new_bh) {
3023 		if (!new_inode) {
3024 			brelse(new_bh);
3025 			new_bh = NULL;
3026 		}
3027 	}
3028 	if (S_ISDIR(old_inode->i_mode)) {
3029 		if (new_inode) {
3030 			retval = -ENOTEMPTY;
3031 			if (!empty_dir(new_inode))
3032 				goto end_rename;
3033 		}
3034 		retval = -EIO;
3035 		dir_bh = ext4_get_first_dir_block(handle, old_inode,
3036 						  &retval, &parent_de,
3037 						  &inlined);
3038 		if (!dir_bh)
3039 			goto end_rename;
3040 		if (le32_to_cpu(parent_de->inode) != old_dir->i_ino)
3041 			goto end_rename;
3042 		retval = -EMLINK;
3043 		if (!new_inode && new_dir != old_dir &&
3044 		    EXT4_DIR_LINK_MAX(new_dir))
3045 			goto end_rename;
3046 		BUFFER_TRACE(dir_bh, "get_write_access");
3047 		retval = ext4_journal_get_write_access(handle, dir_bh);
3048 		if (retval)
3049 			goto end_rename;
3050 	}
3051 	if (!new_bh) {
3052 		retval = ext4_add_entry(handle, new_dentry, old_inode);
3053 		if (retval)
3054 			goto end_rename;
3055 	} else {
3056 		BUFFER_TRACE(new_bh, "get write access");
3057 		retval = ext4_journal_get_write_access(handle, new_bh);
3058 		if (retval)
3059 			goto end_rename;
3060 		new_de->inode = cpu_to_le32(old_inode->i_ino);
3061 		if (EXT4_HAS_INCOMPAT_FEATURE(new_dir->i_sb,
3062 					      EXT4_FEATURE_INCOMPAT_FILETYPE))
3063 			new_de->file_type = old_de->file_type;
3064 		new_dir->i_version++;
3065 		new_dir->i_ctime = new_dir->i_mtime =
3066 					ext4_current_time(new_dir);
3067 		ext4_mark_inode_dirty(handle, new_dir);
3068 		BUFFER_TRACE(new_bh, "call ext4_handle_dirty_metadata");
3069 		if (!new_inlined) {
3070 			retval = ext4_handle_dirty_dirent_node(handle,
3071 							       new_dir, new_bh);
3072 			if (unlikely(retval)) {
3073 				ext4_std_error(new_dir->i_sb, retval);
3074 				goto end_rename;
3075 			}
3076 		}
3077 		brelse(new_bh);
3078 		new_bh = NULL;
3079 	}
3080 
3081 	/*
3082 	 * Like most other Unix systems, set the ctime for inodes on a
3083 	 * rename.
3084 	 */
3085 	old_inode->i_ctime = ext4_current_time(old_inode);
3086 	ext4_mark_inode_dirty(handle, old_inode);
3087 
3088 	/*
3089 	 * ok, that's it
3090 	 */
3091 	if (le32_to_cpu(old_de->inode) != old_inode->i_ino ||
3092 	    old_de->name_len != old_dentry->d_name.len ||
3093 	    strncmp(old_de->name, old_dentry->d_name.name, old_de->name_len) ||
3094 	    (retval = ext4_delete_entry(handle, old_dir,
3095 					old_de, old_bh)) == -ENOENT) {
3096 		/* old_de could have moved from under us during htree split, so
3097 		 * make sure that we are deleting the right entry.  We might
3098 		 * also be pointing to a stale entry in the unused part of
3099 		 * old_bh so just checking inum and the name isn't enough. */
3100 		struct buffer_head *old_bh2;
3101 		struct ext4_dir_entry_2 *old_de2;
3102 
3103 		old_bh2 = ext4_find_entry(old_dir, &old_dentry->d_name,
3104 					  &old_de2, NULL);
3105 		if (old_bh2) {
3106 			retval = ext4_delete_entry(handle, old_dir,
3107 						   old_de2, old_bh2);
3108 			brelse(old_bh2);
3109 		}
3110 	}
3111 	if (retval) {
3112 		ext4_warning(old_dir->i_sb,
3113 				"Deleting old file (%lu), %d, error=%d",
3114 				old_dir->i_ino, old_dir->i_nlink, retval);
3115 	}
3116 
3117 	if (new_inode) {
3118 		ext4_dec_count(handle, new_inode);
3119 		new_inode->i_ctime = ext4_current_time(new_inode);
3120 	}
3121 	old_dir->i_ctime = old_dir->i_mtime = ext4_current_time(old_dir);
3122 	ext4_update_dx_flag(old_dir);
3123 	if (dir_bh) {
3124 		parent_de->inode = cpu_to_le32(new_dir->i_ino);
3125 		BUFFER_TRACE(dir_bh, "call ext4_handle_dirty_metadata");
3126 		if (!inlined) {
3127 			if (is_dx(old_inode)) {
3128 				retval = ext4_handle_dirty_dx_node(handle,
3129 								   old_inode,
3130 								   dir_bh);
3131 			} else {
3132 				retval = ext4_handle_dirty_dirent_node(handle,
3133 							old_inode, dir_bh);
3134 			}
3135 		} else {
3136 			retval = ext4_mark_inode_dirty(handle, old_inode);
3137 		}
3138 		if (retval) {
3139 			ext4_std_error(old_dir->i_sb, retval);
3140 			goto end_rename;
3141 		}
3142 		ext4_dec_count(handle, old_dir);
3143 		if (new_inode) {
3144 			/* checked empty_dir above, can't have another parent,
3145 			 * ext4_dec_count() won't work for many-linked dirs */
3146 			clear_nlink(new_inode);
3147 		} else {
3148 			ext4_inc_count(handle, new_dir);
3149 			ext4_update_dx_flag(new_dir);
3150 			ext4_mark_inode_dirty(handle, new_dir);
3151 		}
3152 	}
3153 	ext4_mark_inode_dirty(handle, old_dir);
3154 	if (new_inode) {
3155 		ext4_mark_inode_dirty(handle, new_inode);
3156 		if (!new_inode->i_nlink)
3157 			ext4_orphan_add(handle, new_inode);
3158 		if (!test_opt(new_dir->i_sb, NO_AUTO_DA_ALLOC))
3159 			force_da_alloc = 1;
3160 	}
3161 	retval = 0;
3162 
3163 end_rename:
3164 	brelse(dir_bh);
3165 	brelse(old_bh);
3166 	brelse(new_bh);
3167 	ext4_journal_stop(handle);
3168 	if (retval == 0 && force_da_alloc)
3169 		ext4_alloc_da_blocks(old_inode);
3170 	return retval;
3171 }
3172 
3173 /*
3174  * directories can handle most operations...
3175  */
3176 const struct inode_operations ext4_dir_inode_operations = {
3177 	.create		= ext4_create,
3178 	.lookup		= ext4_lookup,
3179 	.link		= ext4_link,
3180 	.unlink		= ext4_unlink,
3181 	.symlink	= ext4_symlink,
3182 	.mkdir		= ext4_mkdir,
3183 	.rmdir		= ext4_rmdir,
3184 	.mknod		= ext4_mknod,
3185 	.rename		= ext4_rename,
3186 	.setattr	= ext4_setattr,
3187 	.setxattr	= generic_setxattr,
3188 	.getxattr	= generic_getxattr,
3189 	.listxattr	= ext4_listxattr,
3190 	.removexattr	= generic_removexattr,
3191 	.get_acl	= ext4_get_acl,
3192 	.fiemap         = ext4_fiemap,
3193 };
3194 
3195 const struct inode_operations ext4_special_inode_operations = {
3196 	.setattr	= ext4_setattr,
3197 	.setxattr	= generic_setxattr,
3198 	.getxattr	= generic_getxattr,
3199 	.listxattr	= ext4_listxattr,
3200 	.removexattr	= generic_removexattr,
3201 	.get_acl	= ext4_get_acl,
3202 };
3203