xref: /openbmc/linux/fs/gfs2/inode.c (revision 383f01fb)
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/posix_acl.h>
16 #include <linux/sort.h>
17 #include <linux/gfs2_ondisk.h>
18 #include <linux/crc32.h>
19 #include <linux/lm_interface.h>
20 #include <linux/security.h>
21 #include <linux/time.h>
22 
23 #include "gfs2.h"
24 #include "incore.h"
25 #include "acl.h"
26 #include "bmap.h"
27 #include "dir.h"
28 #include "eattr.h"
29 #include "glock.h"
30 #include "glops.h"
31 #include "inode.h"
32 #include "log.h"
33 #include "meta_io.h"
34 #include "ops_address.h"
35 #include "quota.h"
36 #include "rgrp.h"
37 #include "trans.h"
38 #include "util.h"
39 
40 struct gfs2_inum_range_host {
41 	u64 ir_start;
42 	u64 ir_length;
43 };
44 
45 static int iget_test(struct inode *inode, void *opaque)
46 {
47 	struct gfs2_inode *ip = GFS2_I(inode);
48 	u64 *no_addr = opaque;
49 
50 	if (ip->i_no_addr == *no_addr && test_bit(GIF_USER, &ip->i_flags))
51 		return 1;
52 
53 	return 0;
54 }
55 
56 static int iget_set(struct inode *inode, void *opaque)
57 {
58 	struct gfs2_inode *ip = GFS2_I(inode);
59 	u64 *no_addr = opaque;
60 
61 	inode->i_ino = (unsigned long)*no_addr;
62 	ip->i_no_addr = *no_addr;
63 	set_bit(GIF_USER, &ip->i_flags);
64 	return 0;
65 }
66 
67 struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr)
68 {
69 	unsigned long hash = (unsigned long)no_addr;
70 	return ilookup5(sb, hash, iget_test, &no_addr);
71 }
72 
73 static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr)
74 {
75 	unsigned long hash = (unsigned long)no_addr;
76 	return iget5_locked(sb, hash, iget_test, iget_set, &no_addr);
77 }
78 
79 struct gfs2_skip_data {
80 	u64	no_addr;
81 	int	skipped;
82 };
83 
84 static int iget_skip_test(struct inode *inode, void *opaque)
85 {
86 	struct gfs2_inode *ip = GFS2_I(inode);
87 	struct gfs2_skip_data *data = opaque;
88 
89 	if (ip->i_no_addr == data->no_addr && test_bit(GIF_USER, &ip->i_flags)){
90 		if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)){
91 			data->skipped = 1;
92 			return 0;
93 		}
94 		return 1;
95 	}
96 	return 0;
97 }
98 
99 static int iget_skip_set(struct inode *inode, void *opaque)
100 {
101 	struct gfs2_inode *ip = GFS2_I(inode);
102 	struct gfs2_skip_data *data = opaque;
103 
104 	if (data->skipped)
105 		return 1;
106 	inode->i_ino = (unsigned long)(data->no_addr);
107 	ip->i_no_addr = data->no_addr;
108 	set_bit(GIF_USER, &ip->i_flags);
109 	return 0;
110 }
111 
112 static struct inode *gfs2_iget_skip(struct super_block *sb,
113 				    u64 no_addr)
114 {
115 	struct gfs2_skip_data data;
116 	unsigned long hash = (unsigned long)no_addr;
117 
118 	data.no_addr = no_addr;
119 	data.skipped = 0;
120 	return iget5_locked(sb, hash, iget_skip_test, iget_skip_set, &data);
121 }
122 
123 /**
124  * GFS2 lookup code fills in vfs inode contents based on info obtained
125  * from directory entry inside gfs2_inode_lookup(). This has caused issues
126  * with NFS code path since its get_dentry routine doesn't have the relevant
127  * directory entry when gfs2_inode_lookup() is invoked. Part of the code
128  * segment inside gfs2_inode_lookup code needs to get moved around.
129  *
130  * Clean up I_LOCK and I_NEW as well.
131  **/
132 
133 void gfs2_set_iop(struct inode *inode)
134 {
135 	struct gfs2_sbd *sdp = GFS2_SB(inode);
136 	umode_t mode = inode->i_mode;
137 
138 	if (S_ISREG(mode)) {
139 		inode->i_op = &gfs2_file_iops;
140 		if (sdp->sd_args.ar_localflocks)
141 			inode->i_fop = &gfs2_file_fops_nolock;
142 		else
143 			inode->i_fop = &gfs2_file_fops;
144 	} else if (S_ISDIR(mode)) {
145 		inode->i_op = &gfs2_dir_iops;
146 		if (sdp->sd_args.ar_localflocks)
147 			inode->i_fop = &gfs2_dir_fops_nolock;
148 		else
149 			inode->i_fop = &gfs2_dir_fops;
150 	} else if (S_ISLNK(mode)) {
151 		inode->i_op = &gfs2_symlink_iops;
152 	} else {
153 		inode->i_op = &gfs2_file_iops;
154 		init_special_inode(inode, inode->i_mode, inode->i_rdev);
155 	}
156 
157 	unlock_new_inode(inode);
158 }
159 
160 /**
161  * gfs2_inode_lookup - Lookup an inode
162  * @sb: The super block
163  * @no_addr: The inode number
164  * @type: The type of the inode
165  * @skip_freeing: set this not return an inode if it is currently being freed.
166  *
167  * Returns: A VFS inode, or an error
168  */
169 
170 struct inode *gfs2_inode_lookup(struct super_block *sb,
171 				unsigned int type,
172 				u64 no_addr,
173 				u64 no_formal_ino, int skip_freeing)
174 {
175 	struct inode *inode;
176 	struct gfs2_inode *ip;
177 	struct gfs2_glock *io_gl;
178 	int error;
179 
180 	if (skip_freeing)
181 		inode = gfs2_iget_skip(sb, no_addr);
182 	else
183 		inode = gfs2_iget(sb, no_addr);
184 	ip = GFS2_I(inode);
185 
186 	if (!inode)
187 		return ERR_PTR(-ENOBUFS);
188 
189 	if (inode->i_state & I_NEW) {
190 		struct gfs2_sbd *sdp = GFS2_SB(inode);
191 		ip->i_no_formal_ino = no_formal_ino;
192 
193 		error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
194 		if (unlikely(error))
195 			goto fail;
196 		ip->i_gl->gl_object = ip;
197 
198 		error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
199 		if (unlikely(error))
200 			goto fail_put;
201 
202 		set_bit(GIF_INVALID, &ip->i_flags);
203 		error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
204 		if (unlikely(error))
205 			goto fail_iopen;
206 		ip->i_iopen_gh.gh_gl->gl_object = ip;
207 
208 		gfs2_glock_put(io_gl);
209 
210 		if ((type == DT_UNKNOWN) && (no_formal_ino == 0))
211 			goto gfs2_nfsbypass;
212 
213 		inode->i_mode = DT2IF(type);
214 
215 		/*
216 		 * We must read the inode in order to work out its type in
217 		 * this case. Note that this doesn't happen often as we normally
218 		 * know the type beforehand. This code path only occurs during
219 		 * unlinked inode recovery (where it is safe to do this glock,
220 		 * which is not true in the general case).
221 		 */
222 		if (type == DT_UNKNOWN) {
223 			struct gfs2_holder gh;
224 			error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
225 			if (unlikely(error))
226 				goto fail_glock;
227 			/* Inode is now uptodate */
228 			gfs2_glock_dq_uninit(&gh);
229 		}
230 
231 		gfs2_set_iop(inode);
232 	}
233 
234 gfs2_nfsbypass:
235 	return inode;
236 fail_glock:
237 	gfs2_glock_dq(&ip->i_iopen_gh);
238 fail_iopen:
239 	gfs2_glock_put(io_gl);
240 fail_put:
241 	ip->i_gl->gl_object = NULL;
242 	gfs2_glock_put(ip->i_gl);
243 fail:
244 	iget_failed(inode);
245 	return ERR_PTR(error);
246 }
247 
248 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
249 {
250 	const struct gfs2_dinode *str = buf;
251 	struct timespec atime;
252 	u16 height, depth;
253 
254 	if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
255 		goto corrupt;
256 	ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
257 	ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
258 	ip->i_inode.i_rdev = 0;
259 	switch (ip->i_inode.i_mode & S_IFMT) {
260 	case S_IFBLK:
261 	case S_IFCHR:
262 		ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
263 					   be32_to_cpu(str->di_minor));
264 		break;
265 	};
266 
267 	ip->i_inode.i_uid = be32_to_cpu(str->di_uid);
268 	ip->i_inode.i_gid = be32_to_cpu(str->di_gid);
269 	/*
270 	 * We will need to review setting the nlink count here in the
271 	 * light of the forthcoming ro bind mount work. This is a reminder
272 	 * to do that.
273 	 */
274 	ip->i_inode.i_nlink = be32_to_cpu(str->di_nlink);
275 	ip->i_disksize = be64_to_cpu(str->di_size);
276 	i_size_write(&ip->i_inode, ip->i_disksize);
277 	gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
278 	atime.tv_sec = be64_to_cpu(str->di_atime);
279 	atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
280 	if (timespec_compare(&ip->i_inode.i_atime, &atime) < 0)
281 		ip->i_inode.i_atime = atime;
282 	ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
283 	ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
284 	ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
285 	ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
286 
287 	ip->i_goal = be64_to_cpu(str->di_goal_meta);
288 	ip->i_generation = be64_to_cpu(str->di_generation);
289 
290 	ip->i_diskflags = be32_to_cpu(str->di_flags);
291 	gfs2_set_inode_flags(&ip->i_inode);
292 	height = be16_to_cpu(str->di_height);
293 	if (unlikely(height > GFS2_MAX_META_HEIGHT))
294 		goto corrupt;
295 	ip->i_height = (u8)height;
296 
297 	depth = be16_to_cpu(str->di_depth);
298 	if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
299 		goto corrupt;
300 	ip->i_depth = (u8)depth;
301 	ip->i_entries = be32_to_cpu(str->di_entries);
302 
303 	ip->i_eattr = be64_to_cpu(str->di_eattr);
304 	if (S_ISREG(ip->i_inode.i_mode))
305 		gfs2_set_aops(&ip->i_inode);
306 
307 	return 0;
308 corrupt:
309 	if (gfs2_consist_inode(ip))
310 		gfs2_dinode_print(ip);
311 	return -EIO;
312 }
313 
314 /**
315  * gfs2_inode_refresh - Refresh the incore copy of the dinode
316  * @ip: The GFS2 inode
317  *
318  * Returns: errno
319  */
320 
321 int gfs2_inode_refresh(struct gfs2_inode *ip)
322 {
323 	struct buffer_head *dibh;
324 	int error;
325 
326 	error = gfs2_meta_inode_buffer(ip, &dibh);
327 	if (error)
328 		return error;
329 
330 	if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), dibh, GFS2_METATYPE_DI)) {
331 		brelse(dibh);
332 		return -EIO;
333 	}
334 
335 	error = gfs2_dinode_in(ip, dibh->b_data);
336 	brelse(dibh);
337 	clear_bit(GIF_INVALID, &ip->i_flags);
338 
339 	return error;
340 }
341 
342 int gfs2_dinode_dealloc(struct gfs2_inode *ip)
343 {
344 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
345 	struct gfs2_alloc *al;
346 	struct gfs2_rgrpd *rgd;
347 	int error;
348 
349 	if (gfs2_get_inode_blocks(&ip->i_inode) != 1) {
350 		if (gfs2_consist_inode(ip))
351 			gfs2_dinode_print(ip);
352 		return -EIO;
353 	}
354 
355 	al = gfs2_alloc_get(ip);
356 	if (!al)
357 		return -ENOMEM;
358 
359 	error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
360 	if (error)
361 		goto out;
362 
363 	error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
364 	if (error)
365 		goto out_qs;
366 
367 	rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr);
368 	if (!rgd) {
369 		gfs2_consist_inode(ip);
370 		error = -EIO;
371 		goto out_rindex_relse;
372 	}
373 
374 	error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
375 				   &al->al_rgd_gh);
376 	if (error)
377 		goto out_rindex_relse;
378 
379 	error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA, 1);
380 	if (error)
381 		goto out_rg_gunlock;
382 
383 	set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
384 	set_bit(GLF_LFLUSH, &ip->i_gl->gl_flags);
385 
386 	gfs2_free_di(rgd, ip);
387 
388 	gfs2_trans_end(sdp);
389 	clear_bit(GLF_STICKY, &ip->i_gl->gl_flags);
390 
391 out_rg_gunlock:
392 	gfs2_glock_dq_uninit(&al->al_rgd_gh);
393 out_rindex_relse:
394 	gfs2_glock_dq_uninit(&al->al_ri_gh);
395 out_qs:
396 	gfs2_quota_unhold(ip);
397 out:
398 	gfs2_alloc_put(ip);
399 	return error;
400 }
401 
402 /**
403  * gfs2_change_nlink - Change nlink count on inode
404  * @ip: The GFS2 inode
405  * @diff: The change in the nlink count required
406  *
407  * Returns: errno
408  */
409 int gfs2_change_nlink(struct gfs2_inode *ip, int diff)
410 {
411 	struct buffer_head *dibh;
412 	u32 nlink;
413 	int error;
414 
415 	BUG_ON(diff != 1 && diff != -1);
416 	nlink = ip->i_inode.i_nlink + diff;
417 
418 	/* If we are reducing the nlink count, but the new value ends up being
419 	   bigger than the old one, we must have underflowed. */
420 	if (diff < 0 && nlink > ip->i_inode.i_nlink) {
421 		if (gfs2_consist_inode(ip))
422 			gfs2_dinode_print(ip);
423 		return -EIO;
424 	}
425 
426 	error = gfs2_meta_inode_buffer(ip, &dibh);
427 	if (error)
428 		return error;
429 
430 	if (diff > 0)
431 		inc_nlink(&ip->i_inode);
432 	else
433 		drop_nlink(&ip->i_inode);
434 
435 	ip->i_inode.i_ctime = CURRENT_TIME;
436 
437 	gfs2_trans_add_bh(ip->i_gl, dibh, 1);
438 	gfs2_dinode_out(ip, dibh->b_data);
439 	brelse(dibh);
440 	mark_inode_dirty(&ip->i_inode);
441 
442 	if (ip->i_inode.i_nlink == 0)
443 		gfs2_unlink_di(&ip->i_inode); /* mark inode unlinked */
444 
445 	return error;
446 }
447 
448 struct inode *gfs2_lookup_simple(struct inode *dip, const char *name)
449 {
450 	struct qstr qstr;
451 	struct inode *inode;
452 	gfs2_str2qstr(&qstr, name);
453 	inode = gfs2_lookupi(dip, &qstr, 1);
454 	/* gfs2_lookupi has inconsistent callers: vfs
455 	 * related routines expect NULL for no entry found,
456 	 * gfs2_lookup_simple callers expect ENOENT
457 	 * and do not check for NULL.
458 	 */
459 	if (inode == NULL)
460 		return ERR_PTR(-ENOENT);
461 	else
462 		return inode;
463 }
464 
465 
466 /**
467  * gfs2_lookupi - Look up a filename in a directory and return its inode
468  * @d_gh: An initialized holder for the directory glock
469  * @name: The name of the inode to look for
470  * @is_root: If 1, ignore the caller's permissions
471  * @i_gh: An uninitialized holder for the new inode glock
472  *
473  * This can be called via the VFS filldir function when NFS is doing
474  * a readdirplus and the inode which its intending to stat isn't
475  * already in cache. In this case we must not take the directory glock
476  * again, since the readdir call will have already taken that lock.
477  *
478  * Returns: errno
479  */
480 
481 struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
482 			   int is_root)
483 {
484 	struct super_block *sb = dir->i_sb;
485 	struct gfs2_inode *dip = GFS2_I(dir);
486 	struct gfs2_holder d_gh;
487 	int error = 0;
488 	struct inode *inode = NULL;
489 	int unlock = 0;
490 
491 	if (!name->len || name->len > GFS2_FNAMESIZE)
492 		return ERR_PTR(-ENAMETOOLONG);
493 
494 	if ((name->len == 1 && memcmp(name->name, ".", 1) == 0) ||
495 	    (name->len == 2 && memcmp(name->name, "..", 2) == 0 &&
496 	     dir == sb->s_root->d_inode)) {
497 		igrab(dir);
498 		return dir;
499 	}
500 
501 	if (gfs2_glock_is_locked_by_me(dip->i_gl) == NULL) {
502 		error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
503 		if (error)
504 			return ERR_PTR(error);
505 		unlock = 1;
506 	}
507 
508 	if (!is_root) {
509 		error = gfs2_permission(dir, MAY_EXEC);
510 		if (error)
511 			goto out;
512 	}
513 
514 	inode = gfs2_dir_search(dir, name);
515 	if (IS_ERR(inode))
516 		error = PTR_ERR(inode);
517 out:
518 	if (unlock)
519 		gfs2_glock_dq_uninit(&d_gh);
520 	if (error == -ENOENT)
521 		return NULL;
522 	return inode ? inode : ERR_PTR(error);
523 }
524 
525 static void gfs2_inum_range_in(struct gfs2_inum_range_host *ir, const void *buf)
526 {
527 	const struct gfs2_inum_range *str = buf;
528 
529 	ir->ir_start = be64_to_cpu(str->ir_start);
530 	ir->ir_length = be64_to_cpu(str->ir_length);
531 }
532 
533 static void gfs2_inum_range_out(const struct gfs2_inum_range_host *ir, void *buf)
534 {
535 	struct gfs2_inum_range *str = buf;
536 
537 	str->ir_start = cpu_to_be64(ir->ir_start);
538 	str->ir_length = cpu_to_be64(ir->ir_length);
539 }
540 
541 static int pick_formal_ino_1(struct gfs2_sbd *sdp, u64 *formal_ino)
542 {
543 	struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
544 	struct buffer_head *bh;
545 	struct gfs2_inum_range_host ir;
546 	int error;
547 
548 	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
549 	if (error)
550 		return error;
551 	mutex_lock(&sdp->sd_inum_mutex);
552 
553 	error = gfs2_meta_inode_buffer(ip, &bh);
554 	if (error) {
555 		mutex_unlock(&sdp->sd_inum_mutex);
556 		gfs2_trans_end(sdp);
557 		return error;
558 	}
559 
560 	gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
561 
562 	if (ir.ir_length) {
563 		*formal_ino = ir.ir_start++;
564 		ir.ir_length--;
565 		gfs2_trans_add_bh(ip->i_gl, bh, 1);
566 		gfs2_inum_range_out(&ir,
567 				    bh->b_data + sizeof(struct gfs2_dinode));
568 		brelse(bh);
569 		mutex_unlock(&sdp->sd_inum_mutex);
570 		gfs2_trans_end(sdp);
571 		return 0;
572 	}
573 
574 	brelse(bh);
575 
576 	mutex_unlock(&sdp->sd_inum_mutex);
577 	gfs2_trans_end(sdp);
578 
579 	return 1;
580 }
581 
582 static int pick_formal_ino_2(struct gfs2_sbd *sdp, u64 *formal_ino)
583 {
584 	struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
585 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_inum_inode);
586 	struct gfs2_holder gh;
587 	struct buffer_head *bh;
588 	struct gfs2_inum_range_host ir;
589 	int error;
590 
591 	error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
592 	if (error)
593 		return error;
594 
595 	error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
596 	if (error)
597 		goto out;
598 	mutex_lock(&sdp->sd_inum_mutex);
599 
600 	error = gfs2_meta_inode_buffer(ip, &bh);
601 	if (error)
602 		goto out_end_trans;
603 
604 	gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
605 
606 	if (!ir.ir_length) {
607 		struct buffer_head *m_bh;
608 		u64 x, y;
609 		__be64 z;
610 
611 		error = gfs2_meta_inode_buffer(m_ip, &m_bh);
612 		if (error)
613 			goto out_brelse;
614 
615 		z = *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode));
616 		x = y = be64_to_cpu(z);
617 		ir.ir_start = x;
618 		ir.ir_length = GFS2_INUM_QUANTUM;
619 		x += GFS2_INUM_QUANTUM;
620 		if (x < y)
621 			gfs2_consist_inode(m_ip);
622 		z = cpu_to_be64(x);
623 		gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
624 		*(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode)) = z;
625 
626 		brelse(m_bh);
627 	}
628 
629 	*formal_ino = ir.ir_start++;
630 	ir.ir_length--;
631 
632 	gfs2_trans_add_bh(ip->i_gl, bh, 1);
633 	gfs2_inum_range_out(&ir, bh->b_data + sizeof(struct gfs2_dinode));
634 
635 out_brelse:
636 	brelse(bh);
637 out_end_trans:
638 	mutex_unlock(&sdp->sd_inum_mutex);
639 	gfs2_trans_end(sdp);
640 out:
641 	gfs2_glock_dq_uninit(&gh);
642 	return error;
643 }
644 
645 static int pick_formal_ino(struct gfs2_sbd *sdp, u64 *inum)
646 {
647 	int error;
648 
649 	error = pick_formal_ino_1(sdp, inum);
650 	if (error <= 0)
651 		return error;
652 
653 	error = pick_formal_ino_2(sdp, inum);
654 
655 	return error;
656 }
657 
658 /**
659  * create_ok - OK to create a new on-disk inode here?
660  * @dip:  Directory in which dinode is to be created
661  * @name:  Name of new dinode
662  * @mode:
663  *
664  * Returns: errno
665  */
666 
667 static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
668 		     unsigned int mode)
669 {
670 	int error;
671 
672 	error = gfs2_permission(&dip->i_inode, MAY_WRITE | MAY_EXEC);
673 	if (error)
674 		return error;
675 
676 	/*  Don't create entries in an unlinked directory  */
677 	if (!dip->i_inode.i_nlink)
678 		return -EPERM;
679 
680 	error = gfs2_dir_check(&dip->i_inode, name, NULL);
681 	switch (error) {
682 	case -ENOENT:
683 		error = 0;
684 		break;
685 	case 0:
686 		return -EEXIST;
687 	default:
688 		return error;
689 	}
690 
691 	if (dip->i_entries == (u32)-1)
692 		return -EFBIG;
693 	if (S_ISDIR(mode) && dip->i_inode.i_nlink == (u32)-1)
694 		return -EMLINK;
695 
696 	return 0;
697 }
698 
699 static void munge_mode_uid_gid(struct gfs2_inode *dip, unsigned int *mode,
700 			       unsigned int *uid, unsigned int *gid)
701 {
702 	if (GFS2_SB(&dip->i_inode)->sd_args.ar_suiddir &&
703 	    (dip->i_inode.i_mode & S_ISUID) && dip->i_inode.i_uid) {
704 		if (S_ISDIR(*mode))
705 			*mode |= S_ISUID;
706 		else if (dip->i_inode.i_uid != current_fsuid())
707 			*mode &= ~07111;
708 		*uid = dip->i_inode.i_uid;
709 	} else
710 		*uid = current_fsuid();
711 
712 	if (dip->i_inode.i_mode & S_ISGID) {
713 		if (S_ISDIR(*mode))
714 			*mode |= S_ISGID;
715 		*gid = dip->i_inode.i_gid;
716 	} else
717 		*gid = current_fsgid();
718 }
719 
720 static int alloc_dinode(struct gfs2_inode *dip, u64 *no_addr, u64 *generation)
721 {
722 	struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
723 	int error;
724 
725 	if (gfs2_alloc_get(dip) == NULL)
726 		return -ENOMEM;
727 
728 	dip->i_alloc->al_requested = RES_DINODE;
729 	error = gfs2_inplace_reserve(dip);
730 	if (error)
731 		goto out;
732 
733 	error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS, 0);
734 	if (error)
735 		goto out_ipreserv;
736 
737 	*no_addr = gfs2_alloc_di(dip, generation);
738 
739 	gfs2_trans_end(sdp);
740 
741 out_ipreserv:
742 	gfs2_inplace_release(dip);
743 out:
744 	gfs2_alloc_put(dip);
745 	return error;
746 }
747 
748 /**
749  * init_dinode - Fill in a new dinode structure
750  * @dip: the directory this inode is being created in
751  * @gl: The glock covering the new inode
752  * @inum: the inode number
753  * @mode: the file permissions
754  * @uid:
755  * @gid:
756  *
757  */
758 
759 static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
760 			const struct gfs2_inum_host *inum, unsigned int mode,
761 			unsigned int uid, unsigned int gid,
762 			const u64 *generation, dev_t dev, struct buffer_head **bhp)
763 {
764 	struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
765 	struct gfs2_dinode *di;
766 	struct buffer_head *dibh;
767 	struct timespec tv = CURRENT_TIME;
768 
769 	dibh = gfs2_meta_new(gl, inum->no_addr);
770 	gfs2_trans_add_bh(gl, dibh, 1);
771 	gfs2_metatype_set(dibh, GFS2_METATYPE_DI, GFS2_FORMAT_DI);
772 	gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
773 	di = (struct gfs2_dinode *)dibh->b_data;
774 
775 	di->di_num.no_formal_ino = cpu_to_be64(inum->no_formal_ino);
776 	di->di_num.no_addr = cpu_to_be64(inum->no_addr);
777 	di->di_mode = cpu_to_be32(mode);
778 	di->di_uid = cpu_to_be32(uid);
779 	di->di_gid = cpu_to_be32(gid);
780 	di->di_nlink = 0;
781 	di->di_size = 0;
782 	di->di_blocks = cpu_to_be64(1);
783 	di->di_atime = di->di_mtime = di->di_ctime = cpu_to_be64(tv.tv_sec);
784 	di->di_major = cpu_to_be32(MAJOR(dev));
785 	di->di_minor = cpu_to_be32(MINOR(dev));
786 	di->di_goal_meta = di->di_goal_data = cpu_to_be64(inum->no_addr);
787 	di->di_generation = cpu_to_be64(*generation);
788 	di->di_flags = 0;
789 
790 	if (S_ISREG(mode)) {
791 		if ((dip->i_diskflags & GFS2_DIF_INHERIT_JDATA) ||
792 		    gfs2_tune_get(sdp, gt_new_files_jdata))
793 			di->di_flags |= cpu_to_be32(GFS2_DIF_JDATA);
794 	} else if (S_ISDIR(mode)) {
795 		di->di_flags |= cpu_to_be32(dip->i_diskflags &
796 					    GFS2_DIF_INHERIT_JDATA);
797 	}
798 
799 	di->__pad1 = 0;
800 	di->di_payload_format = cpu_to_be32(S_ISDIR(mode) ? GFS2_FORMAT_DE : 0);
801 	di->di_height = 0;
802 	di->__pad2 = 0;
803 	di->__pad3 = 0;
804 	di->di_depth = 0;
805 	di->di_entries = 0;
806 	memset(&di->__pad4, 0, sizeof(di->__pad4));
807 	di->di_eattr = 0;
808 	di->di_atime_nsec = cpu_to_be32(tv.tv_nsec);
809 	di->di_mtime_nsec = cpu_to_be32(tv.tv_nsec);
810 	di->di_ctime_nsec = cpu_to_be32(tv.tv_nsec);
811 	memset(&di->di_reserved, 0, sizeof(di->di_reserved));
812 
813 	set_buffer_uptodate(dibh);
814 
815 	*bhp = dibh;
816 }
817 
818 static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
819 		       unsigned int mode, const struct gfs2_inum_host *inum,
820 		       const u64 *generation, dev_t dev, struct buffer_head **bhp)
821 {
822 	struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
823 	unsigned int uid, gid;
824 	int error;
825 
826 	munge_mode_uid_gid(dip, &mode, &uid, &gid);
827 	if (!gfs2_alloc_get(dip))
828 		return -ENOMEM;
829 
830 	error = gfs2_quota_lock(dip, uid, gid);
831 	if (error)
832 		goto out;
833 
834 	error = gfs2_quota_check(dip, uid, gid);
835 	if (error)
836 		goto out_quota;
837 
838 	error = gfs2_trans_begin(sdp, RES_DINODE + RES_QUOTA, 0);
839 	if (error)
840 		goto out_quota;
841 
842 	init_dinode(dip, gl, inum, mode, uid, gid, generation, dev, bhp);
843 	gfs2_quota_change(dip, +1, uid, gid);
844 	gfs2_trans_end(sdp);
845 
846 out_quota:
847 	gfs2_quota_unlock(dip);
848 out:
849 	gfs2_alloc_put(dip);
850 	return error;
851 }
852 
853 static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
854 		       struct gfs2_inode *ip)
855 {
856 	struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
857 	struct gfs2_alloc *al;
858 	int alloc_required;
859 	struct buffer_head *dibh;
860 	int error;
861 
862 	al = gfs2_alloc_get(dip);
863 	if (!al)
864 		return -ENOMEM;
865 
866 	error = gfs2_quota_lock(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
867 	if (error)
868 		goto fail;
869 
870 	error = alloc_required = gfs2_diradd_alloc_required(&dip->i_inode, name);
871 	if (alloc_required < 0)
872 		goto fail_quota_locks;
873 	if (alloc_required) {
874 		error = gfs2_quota_check(dip, dip->i_inode.i_uid, dip->i_inode.i_gid);
875 		if (error)
876 			goto fail_quota_locks;
877 
878 		al->al_requested = sdp->sd_max_dirres;
879 
880 		error = gfs2_inplace_reserve(dip);
881 		if (error)
882 			goto fail_quota_locks;
883 
884 		error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
885 					 al->al_rgd->rd_length +
886 					 2 * RES_DINODE +
887 					 RES_STATFS + RES_QUOTA, 0);
888 		if (error)
889 			goto fail_ipreserv;
890 	} else {
891 		error = gfs2_trans_begin(sdp, RES_LEAF + 2 * RES_DINODE, 0);
892 		if (error)
893 			goto fail_quota_locks;
894 	}
895 
896 	error = gfs2_dir_add(&dip->i_inode, name, ip, IF2DT(ip->i_inode.i_mode));
897 	if (error)
898 		goto fail_end_trans;
899 
900 	error = gfs2_meta_inode_buffer(ip, &dibh);
901 	if (error)
902 		goto fail_end_trans;
903 	ip->i_inode.i_nlink = 1;
904 	gfs2_trans_add_bh(ip->i_gl, dibh, 1);
905 	gfs2_dinode_out(ip, dibh->b_data);
906 	brelse(dibh);
907 	return 0;
908 
909 fail_end_trans:
910 	gfs2_trans_end(sdp);
911 
912 fail_ipreserv:
913 	if (dip->i_alloc->al_rgd)
914 		gfs2_inplace_release(dip);
915 
916 fail_quota_locks:
917 	gfs2_quota_unlock(dip);
918 
919 fail:
920 	gfs2_alloc_put(dip);
921 	return error;
922 }
923 
924 static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip)
925 {
926 	int err;
927 	size_t len;
928 	void *value;
929 	char *name;
930 	struct gfs2_ea_request er;
931 
932 	err = security_inode_init_security(&ip->i_inode, &dip->i_inode,
933 					   &name, &value, &len);
934 
935 	if (err) {
936 		if (err == -EOPNOTSUPP)
937 			return 0;
938 		return err;
939 	}
940 
941 	memset(&er, 0, sizeof(struct gfs2_ea_request));
942 
943 	er.er_type = GFS2_EATYPE_SECURITY;
944 	er.er_name = name;
945 	er.er_data = value;
946 	er.er_name_len = strlen(name);
947 	er.er_data_len = len;
948 
949 	err = gfs2_ea_set_i(ip, &er);
950 
951 	kfree(value);
952 	kfree(name);
953 
954 	return err;
955 }
956 
957 /**
958  * gfs2_createi - Create a new inode
959  * @ghs: An array of two holders
960  * @name: The name of the new file
961  * @mode: the permissions on the new inode
962  *
963  * @ghs[0] is an initialized holder for the directory
964  * @ghs[1] is the holder for the inode lock
965  *
966  * If the return value is not NULL, the glocks on both the directory and the new
967  * file are held.  A transaction has been started and an inplace reservation
968  * is held, as well.
969  *
970  * Returns: An inode
971  */
972 
973 struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
974 			   unsigned int mode, dev_t dev)
975 {
976 	struct inode *inode = NULL;
977 	struct gfs2_inode *dip = ghs->gh_gl->gl_object;
978 	struct inode *dir = &dip->i_inode;
979 	struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
980 	struct gfs2_inum_host inum = { .no_addr = 0, .no_formal_ino = 0 };
981 	int error;
982 	u64 generation;
983 	struct buffer_head *bh = NULL;
984 
985 	if (!name->len || name->len > GFS2_FNAMESIZE)
986 		return ERR_PTR(-ENAMETOOLONG);
987 
988 	gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, ghs);
989 	error = gfs2_glock_nq(ghs);
990 	if (error)
991 		goto fail;
992 
993 	error = create_ok(dip, name, mode);
994 	if (error)
995 		goto fail_gunlock;
996 
997 	error = pick_formal_ino(sdp, &inum.no_formal_ino);
998 	if (error)
999 		goto fail_gunlock;
1000 
1001 	error = alloc_dinode(dip, &inum.no_addr, &generation);
1002 	if (error)
1003 		goto fail_gunlock;
1004 
1005 	error = gfs2_glock_nq_num(sdp, inum.no_addr, &gfs2_inode_glops,
1006 				  LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
1007 	if (error)
1008 		goto fail_gunlock;
1009 
1010 	error = make_dinode(dip, ghs[1].gh_gl, mode, &inum, &generation, dev, &bh);
1011 	if (error)
1012 		goto fail_gunlock2;
1013 
1014 	inode = gfs2_inode_lookup(dir->i_sb, IF2DT(mode),
1015 					inum.no_addr,
1016 					inum.no_formal_ino, 0);
1017 	if (IS_ERR(inode))
1018 		goto fail_gunlock2;
1019 
1020 	error = gfs2_inode_refresh(GFS2_I(inode));
1021 	if (error)
1022 		goto fail_gunlock2;
1023 
1024 	error = gfs2_acl_create(dip, GFS2_I(inode));
1025 	if (error)
1026 		goto fail_gunlock2;
1027 
1028 	error = gfs2_security_init(dip, GFS2_I(inode));
1029 	if (error)
1030 		goto fail_gunlock2;
1031 
1032 	error = link_dinode(dip, name, GFS2_I(inode));
1033 	if (error)
1034 		goto fail_gunlock2;
1035 
1036 	if (bh)
1037 		brelse(bh);
1038 	return inode;
1039 
1040 fail_gunlock2:
1041 	gfs2_glock_dq_uninit(ghs + 1);
1042 	if (inode && !IS_ERR(inode))
1043 		iput(inode);
1044 fail_gunlock:
1045 	gfs2_glock_dq(ghs);
1046 fail:
1047 	if (bh)
1048 		brelse(bh);
1049 	return ERR_PTR(error);
1050 }
1051 
1052 /**
1053  * gfs2_rmdiri - Remove a directory
1054  * @dip: The parent directory of the directory to be removed
1055  * @name: The name of the directory to be removed
1056  * @ip: The GFS2 inode of the directory to be removed
1057  *
1058  * Assumes Glocks on dip and ip are held
1059  *
1060  * Returns: errno
1061  */
1062 
1063 int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
1064 		struct gfs2_inode *ip)
1065 {
1066 	struct qstr dotname;
1067 	int error;
1068 
1069 	if (ip->i_entries != 2) {
1070 		if (gfs2_consist_inode(ip))
1071 			gfs2_dinode_print(ip);
1072 		return -EIO;
1073 	}
1074 
1075 	error = gfs2_dir_del(dip, name);
1076 	if (error)
1077 		return error;
1078 
1079 	error = gfs2_change_nlink(dip, -1);
1080 	if (error)
1081 		return error;
1082 
1083 	gfs2_str2qstr(&dotname, ".");
1084 	error = gfs2_dir_del(ip, &dotname);
1085 	if (error)
1086 		return error;
1087 
1088 	gfs2_str2qstr(&dotname, "..");
1089 	error = gfs2_dir_del(ip, &dotname);
1090 	if (error)
1091 		return error;
1092 
1093 	/* It looks odd, but it really should be done twice */
1094 	error = gfs2_change_nlink(ip, -1);
1095 	if (error)
1096 		return error;
1097 
1098 	error = gfs2_change_nlink(ip, -1);
1099 	if (error)
1100 		return error;
1101 
1102 	return error;
1103 }
1104 
1105 /*
1106  * gfs2_unlink_ok - check to see that a inode is still in a directory
1107  * @dip: the directory
1108  * @name: the name of the file
1109  * @ip: the inode
1110  *
1111  * Assumes that the lock on (at least) @dip is held.
1112  *
1113  * Returns: 0 if the parent/child relationship is correct, errno if it isn't
1114  */
1115 
1116 int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
1117 		   const struct gfs2_inode *ip)
1118 {
1119 	int error;
1120 
1121 	if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode))
1122 		return -EPERM;
1123 
1124 	if ((dip->i_inode.i_mode & S_ISVTX) &&
1125 	    dip->i_inode.i_uid != current_fsuid() &&
1126 	    ip->i_inode.i_uid != current_fsuid() && !capable(CAP_FOWNER))
1127 		return -EPERM;
1128 
1129 	if (IS_APPEND(&dip->i_inode))
1130 		return -EPERM;
1131 
1132 	error = gfs2_permission(&dip->i_inode, MAY_WRITE | MAY_EXEC);
1133 	if (error)
1134 		return error;
1135 
1136 	error = gfs2_dir_check(&dip->i_inode, name, ip);
1137 	if (error)
1138 		return error;
1139 
1140 	return 0;
1141 }
1142 
1143 /**
1144  * gfs2_readlinki - return the contents of a symlink
1145  * @ip: the symlink's inode
1146  * @buf: a pointer to the buffer to be filled
1147  * @len: a pointer to the length of @buf
1148  *
1149  * If @buf is too small, a piece of memory is kmalloc()ed and needs
1150  * to be freed by the caller.
1151  *
1152  * Returns: errno
1153  */
1154 
1155 int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len)
1156 {
1157 	struct gfs2_holder i_gh;
1158 	struct buffer_head *dibh;
1159 	unsigned int x;
1160 	int error;
1161 
1162 	gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
1163 	error = gfs2_glock_nq(&i_gh);
1164 	if (error) {
1165 		gfs2_holder_uninit(&i_gh);
1166 		return error;
1167 	}
1168 
1169 	if (!ip->i_disksize) {
1170 		gfs2_consist_inode(ip);
1171 		error = -EIO;
1172 		goto out;
1173 	}
1174 
1175 	error = gfs2_meta_inode_buffer(ip, &dibh);
1176 	if (error)
1177 		goto out;
1178 
1179 	x = ip->i_disksize + 1;
1180 	if (x > *len) {
1181 		*buf = kmalloc(x, GFP_NOFS);
1182 		if (!*buf) {
1183 			error = -ENOMEM;
1184 			goto out_brelse;
1185 		}
1186 	}
1187 
1188 	memcpy(*buf, dibh->b_data + sizeof(struct gfs2_dinode), x);
1189 	*len = x;
1190 
1191 out_brelse:
1192 	brelse(dibh);
1193 out:
1194 	gfs2_glock_dq_uninit(&i_gh);
1195 	return error;
1196 }
1197 
1198 static int
1199 __gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
1200 {
1201 	struct buffer_head *dibh;
1202 	int error;
1203 
1204 	error = gfs2_meta_inode_buffer(ip, &dibh);
1205 	if (!error) {
1206 		error = inode_setattr(&ip->i_inode, attr);
1207 		gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1208 		gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1209 		gfs2_dinode_out(ip, dibh->b_data);
1210 		brelse(dibh);
1211 	}
1212 	return error;
1213 }
1214 
1215 /**
1216  * gfs2_setattr_simple -
1217  * @ip:
1218  * @attr:
1219  *
1220  * Called with a reference on the vnode.
1221  *
1222  * Returns: errno
1223  */
1224 
1225 int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
1226 {
1227 	int error;
1228 
1229 	if (current->journal_info)
1230 		return __gfs2_setattr_simple(ip, attr);
1231 
1232 	error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE, 0);
1233 	if (error)
1234 		return error;
1235 
1236 	error = __gfs2_setattr_simple(ip, attr);
1237 	gfs2_trans_end(GFS2_SB(&ip->i_inode));
1238 	return error;
1239 }
1240 
1241 void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
1242 {
1243 	struct gfs2_dinode *str = buf;
1244 
1245 	str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
1246 	str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
1247 	str->di_header.__pad0 = 0;
1248 	str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
1249 	str->di_header.__pad1 = 0;
1250 	str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
1251 	str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
1252 	str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
1253 	str->di_uid = cpu_to_be32(ip->i_inode.i_uid);
1254 	str->di_gid = cpu_to_be32(ip->i_inode.i_gid);
1255 	str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
1256 	str->di_size = cpu_to_be64(ip->i_disksize);
1257 	str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
1258 	str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
1259 	str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec);
1260 	str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec);
1261 
1262 	str->di_goal_meta = cpu_to_be64(ip->i_goal);
1263 	str->di_goal_data = cpu_to_be64(ip->i_goal);
1264 	str->di_generation = cpu_to_be64(ip->i_generation);
1265 
1266 	str->di_flags = cpu_to_be32(ip->i_diskflags);
1267 	str->di_height = cpu_to_be16(ip->i_height);
1268 	str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) &&
1269 					     !(ip->i_diskflags & GFS2_DIF_EXHASH) ?
1270 					     GFS2_FORMAT_DE : 0);
1271 	str->di_depth = cpu_to_be16(ip->i_depth);
1272 	str->di_entries = cpu_to_be32(ip->i_entries);
1273 
1274 	str->di_eattr = cpu_to_be64(ip->i_eattr);
1275 	str->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
1276 	str->di_mtime_nsec = cpu_to_be32(ip->i_inode.i_mtime.tv_nsec);
1277 	str->di_ctime_nsec = cpu_to_be32(ip->i_inode.i_ctime.tv_nsec);
1278 }
1279 
1280 void gfs2_dinode_print(const struct gfs2_inode *ip)
1281 {
1282 	printk(KERN_INFO "  no_formal_ino = %llu\n",
1283 	       (unsigned long long)ip->i_no_formal_ino);
1284 	printk(KERN_INFO "  no_addr = %llu\n",
1285 	       (unsigned long long)ip->i_no_addr);
1286 	printk(KERN_INFO "  i_disksize = %llu\n",
1287 	       (unsigned long long)ip->i_disksize);
1288 	printk(KERN_INFO "  blocks = %llu\n",
1289 	       (unsigned long long)gfs2_get_inode_blocks(&ip->i_inode));
1290 	printk(KERN_INFO "  i_goal = %llu\n",
1291 	       (unsigned long long)ip->i_goal);
1292 	printk(KERN_INFO "  i_diskflags = 0x%.8X\n", ip->i_diskflags);
1293 	printk(KERN_INFO "  i_height = %u\n", ip->i_height);
1294 	printk(KERN_INFO "  i_depth = %u\n", ip->i_depth);
1295 	printk(KERN_INFO "  i_entries = %u\n", ip->i_entries);
1296 	printk(KERN_INFO "  i_eattr = %llu\n",
1297 	       (unsigned long long)ip->i_eattr);
1298 }
1299 
1300