xref: /openbmc/linux/fs/gfs2/inode.c (revision 35dcc52e)
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/posix_acl.h>
16 #include <linux/sort.h>
17 #include <linux/gfs2_ondisk.h>
18 #include <linux/crc32.h>
19 #include <linux/lm_interface.h>
20 #include <linux/security.h>
21 
22 #include "gfs2.h"
23 #include "incore.h"
24 #include "acl.h"
25 #include "bmap.h"
26 #include "dir.h"
27 #include "eattr.h"
28 #include "glock.h"
29 #include "glops.h"
30 #include "inode.h"
31 #include "log.h"
32 #include "meta_io.h"
33 #include "ops_address.h"
34 #include "ops_file.h"
35 #include "ops_inode.h"
36 #include "quota.h"
37 #include "rgrp.h"
38 #include "trans.h"
39 #include "util.h"
40 
41 struct gfs2_inum_range_host {
42 	u64 ir_start;
43 	u64 ir_length;
44 };
45 
46 static int iget_test(struct inode *inode, void *opaque)
47 {
48 	struct gfs2_inode *ip = GFS2_I(inode);
49 	u64 *no_addr = opaque;
50 
51 	if (ip->i_no_addr == *no_addr &&
52 	    inode->i_private != NULL)
53 		return 1;
54 
55 	return 0;
56 }
57 
58 static int iget_set(struct inode *inode, void *opaque)
59 {
60 	struct gfs2_inode *ip = GFS2_I(inode);
61 	u64 *no_addr = opaque;
62 
63 	inode->i_ino = (unsigned long)*no_addr;
64 	ip->i_no_addr = *no_addr;
65 	return 0;
66 }
67 
68 struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr)
69 {
70 	unsigned long hash = (unsigned long)no_addr;
71 	return ilookup5(sb, hash, iget_test, &no_addr);
72 }
73 
74 static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr)
75 {
76 	unsigned long hash = (unsigned long)no_addr;
77 	return iget5_locked(sb, hash, iget_test, iget_set, &no_addr);
78 }
79 
80 /**
81  * GFS2 lookup code fills in vfs inode contents based on info obtained
82  * from directory entry inside gfs2_inode_lookup(). This has caused issues
83  * with NFS code path since its get_dentry routine doesn't have the relevant
84  * directory entry when gfs2_inode_lookup() is invoked. Part of the code
85  * segment inside gfs2_inode_lookup code needs to get moved around.
86  *
87  * Clean up I_LOCK and I_NEW as well.
88  **/
89 
90 void gfs2_set_iop(struct inode *inode)
91 {
92 	umode_t mode = inode->i_mode;
93 
94 	if (S_ISREG(mode)) {
95 		inode->i_op = &gfs2_file_iops;
96 		inode->i_fop = &gfs2_file_fops;
97 		inode->i_mapping->a_ops = &gfs2_file_aops;
98 	} else if (S_ISDIR(mode)) {
99 		inode->i_op = &gfs2_dir_iops;
100 		inode->i_fop = &gfs2_dir_fops;
101 	} else if (S_ISLNK(mode)) {
102 		inode->i_op = &gfs2_symlink_iops;
103 	} else {
104 		inode->i_op = &gfs2_dev_iops;
105 	}
106 
107 	unlock_new_inode(inode);
108 }
109 
110 /**
111  * gfs2_inode_lookup - Lookup an inode
112  * @sb: The super block
113  * @no_addr: The inode number
114  * @type: The type of the inode
115  *
116  * Returns: A VFS inode, or an error
117  */
118 
119 struct inode *gfs2_inode_lookup(struct super_block *sb,
120 				unsigned int type,
121 				u64 no_addr,
122 				u64 no_formal_ino)
123 {
124 	struct inode *inode = gfs2_iget(sb, no_addr);
125 	struct gfs2_inode *ip = GFS2_I(inode);
126 	struct gfs2_glock *io_gl;
127 	int error;
128 
129 	if (!inode)
130 		return ERR_PTR(-ENOBUFS);
131 
132 	if (inode->i_state & I_NEW) {
133 		struct gfs2_sbd *sdp = GFS2_SB(inode);
134 		inode->i_private = ip;
135 		ip->i_no_formal_ino = no_formal_ino;
136 
137 		error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
138 		if (unlikely(error))
139 			goto fail;
140 		ip->i_gl->gl_object = ip;
141 
142 		error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
143 		if (unlikely(error))
144 			goto fail_put;
145 
146 		set_bit(GIF_INVALID, &ip->i_flags);
147 		error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
148 		if (unlikely(error))
149 			goto fail_iopen;
150 		ip->i_iopen_gh.gh_gl->gl_object = ip;
151 
152 		gfs2_glock_put(io_gl);
153 
154 		if ((type == DT_UNKNOWN) && (no_formal_ino == 0))
155 			goto gfs2_nfsbypass;
156 
157 		inode->i_mode = DT2IF(type);
158 
159 		/*
160 		 * We must read the inode in order to work out its type in
161 		 * this case. Note that this doesn't happen often as we normally
162 		 * know the type beforehand. This code path only occurs during
163 		 * unlinked inode recovery (where it is safe to do this glock,
164 		 * which is not true in the general case).
165 		 */
166 		if (type == DT_UNKNOWN) {
167 			struct gfs2_holder gh;
168 			error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
169 			if (unlikely(error))
170 				goto fail_glock;
171 			/* Inode is now uptodate */
172 			gfs2_glock_dq_uninit(&gh);
173 		}
174 
175 		gfs2_set_iop(inode);
176 	}
177 
178 gfs2_nfsbypass:
179 	return inode;
180 fail_glock:
181 	gfs2_glock_dq(&ip->i_iopen_gh);
182 fail_iopen:
183 	gfs2_glock_put(io_gl);
184 fail_put:
185 	ip->i_gl->gl_object = NULL;
186 	gfs2_glock_put(ip->i_gl);
187 fail:
188 	iput(inode);
189 	return ERR_PTR(error);
190 }
191 
192 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
193 {
194 	struct gfs2_dinode_host *di = &ip->i_di;
195 	const struct gfs2_dinode *str = buf;
196 
197 	if (ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)) {
198 		if (gfs2_consist_inode(ip))
199 			gfs2_dinode_print(ip);
200 		return -EIO;
201 	}
202 	ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
203 	ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
204 	ip->i_inode.i_rdev = 0;
205 	switch (ip->i_inode.i_mode & S_IFMT) {
206 	case S_IFBLK:
207 	case S_IFCHR:
208 		ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
209 					   be32_to_cpu(str->di_minor));
210 		break;
211 	};
212 
213 	ip->i_inode.i_uid = be32_to_cpu(str->di_uid);
214 	ip->i_inode.i_gid = be32_to_cpu(str->di_gid);
215 	/*
216 	 * We will need to review setting the nlink count here in the
217 	 * light of the forthcoming ro bind mount work. This is a reminder
218 	 * to do that.
219 	 */
220 	ip->i_inode.i_nlink = be32_to_cpu(str->di_nlink);
221 	di->di_size = be64_to_cpu(str->di_size);
222 	i_size_write(&ip->i_inode, di->di_size);
223 	di->di_blocks = be64_to_cpu(str->di_blocks);
224 	gfs2_set_inode_blocks(&ip->i_inode);
225 	ip->i_inode.i_atime.tv_sec = be64_to_cpu(str->di_atime);
226 	ip->i_inode.i_atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
227 	ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
228 	ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
229 	ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
230 	ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
231 
232 	di->di_goal_meta = be64_to_cpu(str->di_goal_meta);
233 	di->di_goal_data = be64_to_cpu(str->di_goal_data);
234 	di->di_generation = be64_to_cpu(str->di_generation);
235 
236 	di->di_flags = be32_to_cpu(str->di_flags);
237 	gfs2_set_inode_flags(&ip->i_inode);
238 	di->di_height = be16_to_cpu(str->di_height);
239 
240 	di->di_depth = be16_to_cpu(str->di_depth);
241 	di->di_entries = be32_to_cpu(str->di_entries);
242 
243 	di->di_eattr = be64_to_cpu(str->di_eattr);
244 	return 0;
245 }
246 
247 /**
248  * gfs2_inode_refresh - Refresh the incore copy of the dinode
249  * @ip: The GFS2 inode
250  *
251  * Returns: errno
252  */
253 
254 int gfs2_inode_refresh(struct gfs2_inode *ip)
255 {
256 	struct buffer_head *dibh;
257 	int error;
258 
259 	error = gfs2_meta_inode_buffer(ip, &dibh);
260 	if (error)
261 		return error;
262 
263 	if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), dibh, GFS2_METATYPE_DI)) {
264 		brelse(dibh);
265 		return -EIO;
266 	}
267 
268 	error = gfs2_dinode_in(ip, dibh->b_data);
269 	brelse(dibh);
270 	clear_bit(GIF_INVALID, &ip->i_flags);
271 
272 	return error;
273 }
274 
275 int gfs2_dinode_dealloc(struct gfs2_inode *ip)
276 {
277 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
278 	struct gfs2_alloc *al;
279 	struct gfs2_rgrpd *rgd;
280 	int error;
281 
282 	if (ip->i_di.di_blocks != 1) {
283 		if (gfs2_consist_inode(ip))
284 			gfs2_dinode_print(ip);
285 		return -EIO;
286 	}
287 
288 	al = gfs2_alloc_get(ip);
289 
290 	error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
291 	if (error)
292 		goto out;
293 
294 	error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
295 	if (error)
296 		goto out_qs;
297 
298 	rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr);
299 	if (!rgd) {
300 		gfs2_consist_inode(ip);
301 		error = -EIO;
302 		goto out_rindex_relse;
303 	}
304 
305 	error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
306 				   &al->al_rgd_gh);
307 	if (error)
308 		goto out_rindex_relse;
309 
310 	error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA, 1);
311 	if (error)
312 		goto out_rg_gunlock;
313 
314 	gfs2_trans_add_gl(ip->i_gl);
315 
316 	gfs2_free_di(rgd, ip);
317 
318 	gfs2_trans_end(sdp);
319 	clear_bit(GLF_STICKY, &ip->i_gl->gl_flags);
320 
321 out_rg_gunlock:
322 	gfs2_glock_dq_uninit(&al->al_rgd_gh);
323 out_rindex_relse:
324 	gfs2_glock_dq_uninit(&al->al_ri_gh);
325 out_qs:
326 	gfs2_quota_unhold(ip);
327 out:
328 	gfs2_alloc_put(ip);
329 	return error;
330 }
331 
332 /**
333  * gfs2_change_nlink - Change nlink count on inode
334  * @ip: The GFS2 inode
335  * @diff: The change in the nlink count required
336  *
337  * Returns: errno
338  */
339 int gfs2_change_nlink(struct gfs2_inode *ip, int diff)
340 {
341 	struct buffer_head *dibh;
342 	u32 nlink;
343 	int error;
344 
345 	BUG_ON(diff != 1 && diff != -1);
346 	nlink = ip->i_inode.i_nlink + diff;
347 
348 	/* If we are reducing the nlink count, but the new value ends up being
349 	   bigger than the old one, we must have underflowed. */
350 	if (diff < 0 && nlink > ip->i_inode.i_nlink) {
351 		if (gfs2_consist_inode(ip))
352 			gfs2_dinode_print(ip);
353 		return -EIO;
354 	}
355 
356 	error = gfs2_meta_inode_buffer(ip, &dibh);
357 	if (error)
358 		return error;
359 
360 	if (diff > 0)
361 		inc_nlink(&ip->i_inode);
362 	else
363 		drop_nlink(&ip->i_inode);
364 
365 	ip->i_inode.i_ctime = CURRENT_TIME;
366 
367 	gfs2_trans_add_bh(ip->i_gl, dibh, 1);
368 	gfs2_dinode_out(ip, dibh->b_data);
369 	brelse(dibh);
370 	mark_inode_dirty(&ip->i_inode);
371 
372 	if (ip->i_inode.i_nlink == 0)
373 		gfs2_unlink_di(&ip->i_inode); /* mark inode unlinked */
374 
375 	return error;
376 }
377 
378 struct inode *gfs2_lookup_simple(struct inode *dip, const char *name)
379 {
380 	struct qstr qstr;
381 	struct inode *inode;
382 	gfs2_str2qstr(&qstr, name);
383 	inode = gfs2_lookupi(dip, &qstr, 1, NULL);
384 	/* gfs2_lookupi has inconsistent callers: vfs
385 	 * related routines expect NULL for no entry found,
386 	 * gfs2_lookup_simple callers expect ENOENT
387 	 * and do not check for NULL.
388 	 */
389 	if (inode == NULL)
390 		return ERR_PTR(-ENOENT);
391 	else
392 		return inode;
393 }
394 
395 
396 /**
397  * gfs2_lookupi - Look up a filename in a directory and return its inode
398  * @d_gh: An initialized holder for the directory glock
399  * @name: The name of the inode to look for
400  * @is_root: If 1, ignore the caller's permissions
401  * @i_gh: An uninitialized holder for the new inode glock
402  *
403  * This can be called via the VFS filldir function when NFS is doing
404  * a readdirplus and the inode which its intending to stat isn't
405  * already in cache. In this case we must not take the directory glock
406  * again, since the readdir call will have already taken that lock.
407  *
408  * Returns: errno
409  */
410 
411 struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
412 			   int is_root, struct nameidata *nd)
413 {
414 	struct super_block *sb = dir->i_sb;
415 	struct gfs2_inode *dip = GFS2_I(dir);
416 	struct gfs2_holder d_gh;
417 	int error = 0;
418 	struct inode *inode = NULL;
419 	int unlock = 0;
420 
421 	if (!name->len || name->len > GFS2_FNAMESIZE)
422 		return ERR_PTR(-ENAMETOOLONG);
423 
424 	if ((name->len == 1 && memcmp(name->name, ".", 1) == 0) ||
425 	    (name->len == 2 && memcmp(name->name, "..", 2) == 0 &&
426 	     dir == sb->s_root->d_inode)) {
427 		igrab(dir);
428 		return dir;
429 	}
430 
431 	if (gfs2_glock_is_locked_by_me(dip->i_gl) == 0) {
432 		error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
433 		if (error)
434 			return ERR_PTR(error);
435 		unlock = 1;
436 	}
437 
438 	if (!is_root) {
439 		error = permission(dir, MAY_EXEC, NULL);
440 		if (error)
441 			goto out;
442 	}
443 
444 	inode = gfs2_dir_search(dir, name);
445 	if (IS_ERR(inode))
446 		error = PTR_ERR(inode);
447 out:
448 	if (unlock)
449 		gfs2_glock_dq_uninit(&d_gh);
450 	if (error == -ENOENT)
451 		return NULL;
452 	return inode ? inode : ERR_PTR(error);
453 }
454 
455 static void gfs2_inum_range_in(struct gfs2_inum_range_host *ir, const void *buf)
456 {
457 	const struct gfs2_inum_range *str = buf;
458 
459 	ir->ir_start = be64_to_cpu(str->ir_start);
460 	ir->ir_length = be64_to_cpu(str->ir_length);
461 }
462 
463 static void gfs2_inum_range_out(const struct gfs2_inum_range_host *ir, void *buf)
464 {
465 	struct gfs2_inum_range *str = buf;
466 
467 	str->ir_start = cpu_to_be64(ir->ir_start);
468 	str->ir_length = cpu_to_be64(ir->ir_length);
469 }
470 
471 static int pick_formal_ino_1(struct gfs2_sbd *sdp, u64 *formal_ino)
472 {
473 	struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
474 	struct buffer_head *bh;
475 	struct gfs2_inum_range_host ir;
476 	int error;
477 
478 	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
479 	if (error)
480 		return error;
481 	mutex_lock(&sdp->sd_inum_mutex);
482 
483 	error = gfs2_meta_inode_buffer(ip, &bh);
484 	if (error) {
485 		mutex_unlock(&sdp->sd_inum_mutex);
486 		gfs2_trans_end(sdp);
487 		return error;
488 	}
489 
490 	gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
491 
492 	if (ir.ir_length) {
493 		*formal_ino = ir.ir_start++;
494 		ir.ir_length--;
495 		gfs2_trans_add_bh(ip->i_gl, bh, 1);
496 		gfs2_inum_range_out(&ir,
497 				    bh->b_data + sizeof(struct gfs2_dinode));
498 		brelse(bh);
499 		mutex_unlock(&sdp->sd_inum_mutex);
500 		gfs2_trans_end(sdp);
501 		return 0;
502 	}
503 
504 	brelse(bh);
505 
506 	mutex_unlock(&sdp->sd_inum_mutex);
507 	gfs2_trans_end(sdp);
508 
509 	return 1;
510 }
511 
512 static int pick_formal_ino_2(struct gfs2_sbd *sdp, u64 *formal_ino)
513 {
514 	struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
515 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_inum_inode);
516 	struct gfs2_holder gh;
517 	struct buffer_head *bh;
518 	struct gfs2_inum_range_host ir;
519 	int error;
520 
521 	error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
522 	if (error)
523 		return error;
524 
525 	error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
526 	if (error)
527 		goto out;
528 	mutex_lock(&sdp->sd_inum_mutex);
529 
530 	error = gfs2_meta_inode_buffer(ip, &bh);
531 	if (error)
532 		goto out_end_trans;
533 
534 	gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
535 
536 	if (!ir.ir_length) {
537 		struct buffer_head *m_bh;
538 		u64 x, y;
539 		__be64 z;
540 
541 		error = gfs2_meta_inode_buffer(m_ip, &m_bh);
542 		if (error)
543 			goto out_brelse;
544 
545 		z = *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode));
546 		x = y = be64_to_cpu(z);
547 		ir.ir_start = x;
548 		ir.ir_length = GFS2_INUM_QUANTUM;
549 		x += GFS2_INUM_QUANTUM;
550 		if (x < y)
551 			gfs2_consist_inode(m_ip);
552 		z = cpu_to_be64(x);
553 		gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
554 		*(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode)) = z;
555 
556 		brelse(m_bh);
557 	}
558 
559 	*formal_ino = ir.ir_start++;
560 	ir.ir_length--;
561 
562 	gfs2_trans_add_bh(ip->i_gl, bh, 1);
563 	gfs2_inum_range_out(&ir, bh->b_data + sizeof(struct gfs2_dinode));
564 
565 out_brelse:
566 	brelse(bh);
567 out_end_trans:
568 	mutex_unlock(&sdp->sd_inum_mutex);
569 	gfs2_trans_end(sdp);
570 out:
571 	gfs2_glock_dq_uninit(&gh);
572 	return error;
573 }
574 
575 static int pick_formal_ino(struct gfs2_sbd *sdp, u64 *inum)
576 {
577 	int error;
578 
579 	error = pick_formal_ino_1(sdp, inum);
580 	if (error <= 0)
581 		return error;
582 
583 	error = pick_formal_ino_2(sdp, inum);
584 
585 	return error;
586 }
587 
588 /**
589  * create_ok - OK to create a new on-disk inode here?
590  * @dip:  Directory in which dinode is to be created
591  * @name:  Name of new dinode
592  * @mode:
593  *
594  * Returns: errno
595  */
596 
597 static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
598 		     unsigned int mode)
599 {
600 	int error;
601 
602 	error = permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, NULL);
603 	if (error)
604 		return error;
605 
606 	/*  Don't create entries in an unlinked directory  */
607 	if (!dip->i_inode.i_nlink)
608 		return -EPERM;
609 
610 	error = gfs2_dir_check(&dip->i_inode, name, NULL);
611 	switch (error) {
612 	case -ENOENT:
613 		error = 0;
614 		break;
615 	case 0:
616 		return -EEXIST;
617 	default:
618 		return error;
619 	}
620 
621 	if (dip->i_di.di_entries == (u32)-1)
622 		return -EFBIG;
623 	if (S_ISDIR(mode) && dip->i_inode.i_nlink == (u32)-1)
624 		return -EMLINK;
625 
626 	return 0;
627 }
628 
629 static void munge_mode_uid_gid(struct gfs2_inode *dip, unsigned int *mode,
630 			       unsigned int *uid, unsigned int *gid)
631 {
632 	if (GFS2_SB(&dip->i_inode)->sd_args.ar_suiddir &&
633 	    (dip->i_inode.i_mode & S_ISUID) && dip->i_inode.i_uid) {
634 		if (S_ISDIR(*mode))
635 			*mode |= S_ISUID;
636 		else if (dip->i_inode.i_uid != current->fsuid)
637 			*mode &= ~07111;
638 		*uid = dip->i_inode.i_uid;
639 	} else
640 		*uid = current->fsuid;
641 
642 	if (dip->i_inode.i_mode & S_ISGID) {
643 		if (S_ISDIR(*mode))
644 			*mode |= S_ISGID;
645 		*gid = dip->i_inode.i_gid;
646 	} else
647 		*gid = current->fsgid;
648 }
649 
650 static int alloc_dinode(struct gfs2_inode *dip, u64 *no_addr, u64 *generation)
651 {
652 	struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
653 	int error;
654 
655 	gfs2_alloc_get(dip);
656 
657 	dip->i_alloc.al_requested = RES_DINODE;
658 	error = gfs2_inplace_reserve(dip);
659 	if (error)
660 		goto out;
661 
662 	error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS, 0);
663 	if (error)
664 		goto out_ipreserv;
665 
666 	*no_addr = gfs2_alloc_di(dip, generation);
667 
668 	gfs2_trans_end(sdp);
669 
670 out_ipreserv:
671 	gfs2_inplace_release(dip);
672 out:
673 	gfs2_alloc_put(dip);
674 	return error;
675 }
676 
677 /**
678  * init_dinode - Fill in a new dinode structure
679  * @dip: the directory this inode is being created in
680  * @gl: The glock covering the new inode
681  * @inum: the inode number
682  * @mode: the file permissions
683  * @uid:
684  * @gid:
685  *
686  */
687 
688 static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
689 			const struct gfs2_inum_host *inum, unsigned int mode,
690 			unsigned int uid, unsigned int gid,
691 			const u64 *generation, dev_t dev)
692 {
693 	struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
694 	struct gfs2_dinode *di;
695 	struct buffer_head *dibh;
696 	struct timespec tv = CURRENT_TIME;
697 
698 	dibh = gfs2_meta_new(gl, inum->no_addr);
699 	gfs2_trans_add_bh(gl, dibh, 1);
700 	gfs2_metatype_set(dibh, GFS2_METATYPE_DI, GFS2_FORMAT_DI);
701 	gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
702 	di = (struct gfs2_dinode *)dibh->b_data;
703 
704 	di->di_num.no_formal_ino = cpu_to_be64(inum->no_formal_ino);
705 	di->di_num.no_addr = cpu_to_be64(inum->no_addr);
706 	di->di_mode = cpu_to_be32(mode);
707 	di->di_uid = cpu_to_be32(uid);
708 	di->di_gid = cpu_to_be32(gid);
709 	di->di_nlink = 0;
710 	di->di_size = 0;
711 	di->di_blocks = cpu_to_be64(1);
712 	di->di_atime = di->di_mtime = di->di_ctime = cpu_to_be64(tv.tv_sec);
713 	di->di_major = cpu_to_be32(MAJOR(dev));
714 	di->di_minor = cpu_to_be32(MINOR(dev));
715 	di->di_goal_meta = di->di_goal_data = cpu_to_be64(inum->no_addr);
716 	di->di_generation = cpu_to_be64(*generation);
717 	di->di_flags = 0;
718 
719 	if (S_ISREG(mode)) {
720 		if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_JDATA) ||
721 		    gfs2_tune_get(sdp, gt_new_files_jdata))
722 			di->di_flags |= cpu_to_be32(GFS2_DIF_JDATA);
723 		if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_DIRECTIO) ||
724 		    gfs2_tune_get(sdp, gt_new_files_directio))
725 			di->di_flags |= cpu_to_be32(GFS2_DIF_DIRECTIO);
726 	} else if (S_ISDIR(mode)) {
727 		di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
728 					    GFS2_DIF_INHERIT_DIRECTIO);
729 		di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
730 					    GFS2_DIF_INHERIT_JDATA);
731 	}
732 
733 	di->__pad1 = 0;
734 	di->di_payload_format = cpu_to_be32(S_ISDIR(mode) ? GFS2_FORMAT_DE : 0);
735 	di->di_height = 0;
736 	di->__pad2 = 0;
737 	di->__pad3 = 0;
738 	di->di_depth = 0;
739 	di->di_entries = 0;
740 	memset(&di->__pad4, 0, sizeof(di->__pad4));
741 	di->di_eattr = 0;
742 	di->di_atime_nsec = cpu_to_be32(tv.tv_nsec);
743 	di->di_mtime_nsec = cpu_to_be32(tv.tv_nsec);
744 	di->di_ctime_nsec = cpu_to_be32(tv.tv_nsec);
745 	memset(&di->di_reserved, 0, sizeof(di->di_reserved));
746 
747 	brelse(dibh);
748 }
749 
750 static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
751 		       unsigned int mode, const struct gfs2_inum_host *inum,
752 		       const u64 *generation, dev_t dev)
753 {
754 	struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
755 	unsigned int uid, gid;
756 	int error;
757 
758 	munge_mode_uid_gid(dip, &mode, &uid, &gid);
759 	gfs2_alloc_get(dip);
760 
761 	error = gfs2_quota_lock(dip, uid, gid);
762 	if (error)
763 		goto out;
764 
765 	error = gfs2_quota_check(dip, uid, gid);
766 	if (error)
767 		goto out_quota;
768 
769 	error = gfs2_trans_begin(sdp, RES_DINODE + RES_QUOTA, 0);
770 	if (error)
771 		goto out_quota;
772 
773 	init_dinode(dip, gl, inum, mode, uid, gid, generation, dev);
774 	gfs2_quota_change(dip, +1, uid, gid);
775 	gfs2_trans_end(sdp);
776 
777 out_quota:
778 	gfs2_quota_unlock(dip);
779 out:
780 	gfs2_alloc_put(dip);
781 	return error;
782 }
783 
784 static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
785 		       struct gfs2_inode *ip)
786 {
787 	struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
788 	struct gfs2_alloc *al;
789 	int alloc_required;
790 	struct buffer_head *dibh;
791 	int error;
792 
793 	al = gfs2_alloc_get(dip);
794 
795 	error = gfs2_quota_lock(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
796 	if (error)
797 		goto fail;
798 
799 	error = alloc_required = gfs2_diradd_alloc_required(&dip->i_inode, name);
800 	if (alloc_required < 0)
801 		goto fail;
802 	if (alloc_required) {
803 		error = gfs2_quota_check(dip, dip->i_inode.i_uid, dip->i_inode.i_gid);
804 		if (error)
805 			goto fail_quota_locks;
806 
807 		al->al_requested = sdp->sd_max_dirres;
808 
809 		error = gfs2_inplace_reserve(dip);
810 		if (error)
811 			goto fail_quota_locks;
812 
813 		error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
814 					 al->al_rgd->rd_length +
815 					 2 * RES_DINODE +
816 					 RES_STATFS + RES_QUOTA, 0);
817 		if (error)
818 			goto fail_ipreserv;
819 	} else {
820 		error = gfs2_trans_begin(sdp, RES_LEAF + 2 * RES_DINODE, 0);
821 		if (error)
822 			goto fail_quota_locks;
823 	}
824 
825 	error = gfs2_dir_add(&dip->i_inode, name, ip, IF2DT(ip->i_inode.i_mode));
826 	if (error)
827 		goto fail_end_trans;
828 
829 	error = gfs2_meta_inode_buffer(ip, &dibh);
830 	if (error)
831 		goto fail_end_trans;
832 	ip->i_inode.i_nlink = 1;
833 	gfs2_trans_add_bh(ip->i_gl, dibh, 1);
834 	gfs2_dinode_out(ip, dibh->b_data);
835 	brelse(dibh);
836 	return 0;
837 
838 fail_end_trans:
839 	gfs2_trans_end(sdp);
840 
841 fail_ipreserv:
842 	if (dip->i_alloc.al_rgd)
843 		gfs2_inplace_release(dip);
844 
845 fail_quota_locks:
846 	gfs2_quota_unlock(dip);
847 
848 fail:
849 	gfs2_alloc_put(dip);
850 	return error;
851 }
852 
853 static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip)
854 {
855 	int err;
856 	size_t len;
857 	void *value;
858 	char *name;
859 	struct gfs2_ea_request er;
860 
861 	err = security_inode_init_security(&ip->i_inode, &dip->i_inode,
862 					   &name, &value, &len);
863 
864 	if (err) {
865 		if (err == -EOPNOTSUPP)
866 			return 0;
867 		return err;
868 	}
869 
870 	memset(&er, 0, sizeof(struct gfs2_ea_request));
871 
872 	er.er_type = GFS2_EATYPE_SECURITY;
873 	er.er_name = name;
874 	er.er_data = value;
875 	er.er_name_len = strlen(name);
876 	er.er_data_len = len;
877 
878 	err = gfs2_ea_set_i(ip, &er);
879 
880 	kfree(value);
881 	kfree(name);
882 
883 	return err;
884 }
885 
886 /**
887  * gfs2_createi - Create a new inode
888  * @ghs: An array of two holders
889  * @name: The name of the new file
890  * @mode: the permissions on the new inode
891  *
892  * @ghs[0] is an initialized holder for the directory
893  * @ghs[1] is the holder for the inode lock
894  *
895  * If the return value is not NULL, the glocks on both the directory and the new
896  * file are held.  A transaction has been started and an inplace reservation
897  * is held, as well.
898  *
899  * Returns: An inode
900  */
901 
902 struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
903 			   unsigned int mode, dev_t dev)
904 {
905 	struct inode *inode = NULL;
906 	struct gfs2_inode *dip = ghs->gh_gl->gl_object;
907 	struct inode *dir = &dip->i_inode;
908 	struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
909 	struct gfs2_inum_host inum = { .no_addr = 0, .no_formal_ino = 0 };
910 	int error;
911 	u64 generation;
912 
913 	if (!name->len || name->len > GFS2_FNAMESIZE)
914 		return ERR_PTR(-ENAMETOOLONG);
915 
916 	gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, ghs);
917 	error = gfs2_glock_nq(ghs);
918 	if (error)
919 		goto fail;
920 
921 	error = create_ok(dip, name, mode);
922 	if (error)
923 		goto fail_gunlock;
924 
925 	error = pick_formal_ino(sdp, &inum.no_formal_ino);
926 	if (error)
927 		goto fail_gunlock;
928 
929 	error = alloc_dinode(dip, &inum.no_addr, &generation);
930 	if (error)
931 		goto fail_gunlock;
932 
933 	error = gfs2_glock_nq_num(sdp, inum.no_addr, &gfs2_inode_glops,
934 				  LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
935 	if (error)
936 		goto fail_gunlock;
937 
938 	error = make_dinode(dip, ghs[1].gh_gl, mode, &inum, &generation, dev);
939 	if (error)
940 		goto fail_gunlock2;
941 
942 	inode = gfs2_inode_lookup(dir->i_sb, IF2DT(mode),
943 					inum.no_addr,
944 					inum.no_formal_ino);
945 	if (IS_ERR(inode))
946 		goto fail_gunlock2;
947 
948 	error = gfs2_inode_refresh(GFS2_I(inode));
949 	if (error)
950 		goto fail_gunlock2;
951 
952 	error = gfs2_acl_create(dip, GFS2_I(inode));
953 	if (error)
954 		goto fail_gunlock2;
955 
956 	error = gfs2_security_init(dip, GFS2_I(inode));
957 	if (error)
958 		goto fail_gunlock2;
959 
960 	error = link_dinode(dip, name, GFS2_I(inode));
961 	if (error)
962 		goto fail_gunlock2;
963 
964 	if (!inode)
965 		return ERR_PTR(-ENOMEM);
966 	return inode;
967 
968 fail_gunlock2:
969 	gfs2_glock_dq_uninit(ghs + 1);
970 	if (inode)
971 		iput(inode);
972 fail_gunlock:
973 	gfs2_glock_dq(ghs);
974 fail:
975 	return ERR_PTR(error);
976 }
977 
978 /**
979  * gfs2_rmdiri - Remove a directory
980  * @dip: The parent directory of the directory to be removed
981  * @name: The name of the directory to be removed
982  * @ip: The GFS2 inode of the directory to be removed
983  *
984  * Assumes Glocks on dip and ip are held
985  *
986  * Returns: errno
987  */
988 
989 int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
990 		struct gfs2_inode *ip)
991 {
992 	struct qstr dotname;
993 	int error;
994 
995 	if (ip->i_di.di_entries != 2) {
996 		if (gfs2_consist_inode(ip))
997 			gfs2_dinode_print(ip);
998 		return -EIO;
999 	}
1000 
1001 	error = gfs2_dir_del(dip, name);
1002 	if (error)
1003 		return error;
1004 
1005 	error = gfs2_change_nlink(dip, -1);
1006 	if (error)
1007 		return error;
1008 
1009 	gfs2_str2qstr(&dotname, ".");
1010 	error = gfs2_dir_del(ip, &dotname);
1011 	if (error)
1012 		return error;
1013 
1014 	gfs2_str2qstr(&dotname, "..");
1015 	error = gfs2_dir_del(ip, &dotname);
1016 	if (error)
1017 		return error;
1018 
1019 	/* It looks odd, but it really should be done twice */
1020 	error = gfs2_change_nlink(ip, -1);
1021 	if (error)
1022 		return error;
1023 
1024 	error = gfs2_change_nlink(ip, -1);
1025 	if (error)
1026 		return error;
1027 
1028 	return error;
1029 }
1030 
1031 /*
1032  * gfs2_unlink_ok - check to see that a inode is still in a directory
1033  * @dip: the directory
1034  * @name: the name of the file
1035  * @ip: the inode
1036  *
1037  * Assumes that the lock on (at least) @dip is held.
1038  *
1039  * Returns: 0 if the parent/child relationship is correct, errno if it isn't
1040  */
1041 
1042 int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
1043 		   const struct gfs2_inode *ip)
1044 {
1045 	int error;
1046 
1047 	if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode))
1048 		return -EPERM;
1049 
1050 	if ((dip->i_inode.i_mode & S_ISVTX) &&
1051 	    dip->i_inode.i_uid != current->fsuid &&
1052 	    ip->i_inode.i_uid != current->fsuid && !capable(CAP_FOWNER))
1053 		return -EPERM;
1054 
1055 	if (IS_APPEND(&dip->i_inode))
1056 		return -EPERM;
1057 
1058 	error = permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, NULL);
1059 	if (error)
1060 		return error;
1061 
1062 	error = gfs2_dir_check(&dip->i_inode, name, ip);
1063 	if (error)
1064 		return error;
1065 
1066 	return 0;
1067 }
1068 
1069 /*
1070  * gfs2_ok_to_move - check if it's ok to move a directory to another directory
1071  * @this: move this
1072  * @to: to here
1073  *
1074  * Follow @to back to the root and make sure we don't encounter @this
1075  * Assumes we already hold the rename lock.
1076  *
1077  * Returns: errno
1078  */
1079 
1080 int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to)
1081 {
1082 	struct inode *dir = &to->i_inode;
1083 	struct super_block *sb = dir->i_sb;
1084 	struct inode *tmp;
1085 	struct qstr dotdot;
1086 	int error = 0;
1087 
1088 	gfs2_str2qstr(&dotdot, "..");
1089 
1090 	igrab(dir);
1091 
1092 	for (;;) {
1093 		if (dir == &this->i_inode) {
1094 			error = -EINVAL;
1095 			break;
1096 		}
1097 		if (dir == sb->s_root->d_inode) {
1098 			error = 0;
1099 			break;
1100 		}
1101 
1102 		tmp = gfs2_lookupi(dir, &dotdot, 1, NULL);
1103 		if (IS_ERR(tmp)) {
1104 			error = PTR_ERR(tmp);
1105 			break;
1106 		}
1107 
1108 		iput(dir);
1109 		dir = tmp;
1110 	}
1111 
1112 	iput(dir);
1113 
1114 	return error;
1115 }
1116 
1117 /**
1118  * gfs2_readlinki - return the contents of a symlink
1119  * @ip: the symlink's inode
1120  * @buf: a pointer to the buffer to be filled
1121  * @len: a pointer to the length of @buf
1122  *
1123  * If @buf is too small, a piece of memory is kmalloc()ed and needs
1124  * to be freed by the caller.
1125  *
1126  * Returns: errno
1127  */
1128 
1129 int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len)
1130 {
1131 	struct gfs2_holder i_gh;
1132 	struct buffer_head *dibh;
1133 	unsigned int x;
1134 	int error;
1135 
1136 	gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &i_gh);
1137 	error = gfs2_glock_nq_atime(&i_gh);
1138 	if (error) {
1139 		gfs2_holder_uninit(&i_gh);
1140 		return error;
1141 	}
1142 
1143 	if (!ip->i_di.di_size) {
1144 		gfs2_consist_inode(ip);
1145 		error = -EIO;
1146 		goto out;
1147 	}
1148 
1149 	error = gfs2_meta_inode_buffer(ip, &dibh);
1150 	if (error)
1151 		goto out;
1152 
1153 	x = ip->i_di.di_size + 1;
1154 	if (x > *len) {
1155 		*buf = kmalloc(x, GFP_KERNEL);
1156 		if (!*buf) {
1157 			error = -ENOMEM;
1158 			goto out_brelse;
1159 		}
1160 	}
1161 
1162 	memcpy(*buf, dibh->b_data + sizeof(struct gfs2_dinode), x);
1163 	*len = x;
1164 
1165 out_brelse:
1166 	brelse(dibh);
1167 out:
1168 	gfs2_glock_dq_uninit(&i_gh);
1169 	return error;
1170 }
1171 
1172 /**
1173  * gfs2_glock_nq_atime - Acquire a hold on an inode's glock, and
1174  *       conditionally update the inode's atime
1175  * @gh: the holder to acquire
1176  *
1177  * Tests atime (access time) for gfs2_read, gfs2_readdir and gfs2_mmap
1178  * Update if the difference between the current time and the inode's current
1179  * atime is greater than an interval specified at mount.
1180  *
1181  * Returns: errno
1182  */
1183 
1184 int gfs2_glock_nq_atime(struct gfs2_holder *gh)
1185 {
1186 	struct gfs2_glock *gl = gh->gh_gl;
1187 	struct gfs2_sbd *sdp = gl->gl_sbd;
1188 	struct gfs2_inode *ip = gl->gl_object;
1189 	s64 quantum = gfs2_tune_get(sdp, gt_atime_quantum);
1190 	unsigned int state;
1191 	int flags;
1192 	int error;
1193 	struct timespec tv = CURRENT_TIME;
1194 
1195 	if (gfs2_assert_warn(sdp, gh->gh_flags & GL_ATIME) ||
1196 	    gfs2_assert_warn(sdp, !(gh->gh_flags & GL_ASYNC)) ||
1197 	    gfs2_assert_warn(sdp, gl->gl_ops == &gfs2_inode_glops))
1198 		return -EINVAL;
1199 
1200 	state = gh->gh_state;
1201 	flags = gh->gh_flags;
1202 
1203 	error = gfs2_glock_nq(gh);
1204 	if (error)
1205 		return error;
1206 
1207 	if (test_bit(SDF_NOATIME, &sdp->sd_flags) ||
1208 	    (sdp->sd_vfs->s_flags & MS_RDONLY))
1209 		return 0;
1210 
1211 	if (tv.tv_sec - ip->i_inode.i_atime.tv_sec >= quantum) {
1212 		gfs2_glock_dq(gh);
1213 		gfs2_holder_reinit(LM_ST_EXCLUSIVE, gh->gh_flags & ~LM_FLAG_ANY,
1214 				   gh);
1215 		error = gfs2_glock_nq(gh);
1216 		if (error)
1217 			return error;
1218 
1219 		/* Verify that atime hasn't been updated while we were
1220 		   trying to get exclusive lock. */
1221 
1222 		tv = CURRENT_TIME;
1223 		if (tv.tv_sec - ip->i_inode.i_atime.tv_sec >= quantum) {
1224 			struct buffer_head *dibh;
1225 			struct gfs2_dinode *di;
1226 
1227 			error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1228 			if (error == -EROFS)
1229 				return 0;
1230 			if (error)
1231 				goto fail;
1232 
1233 			error = gfs2_meta_inode_buffer(ip, &dibh);
1234 			if (error)
1235 				goto fail_end_trans;
1236 
1237 			ip->i_inode.i_atime = tv;
1238 
1239 			gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1240 			di = (struct gfs2_dinode *)dibh->b_data;
1241 			di->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
1242 			di->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
1243 			brelse(dibh);
1244 
1245 			gfs2_trans_end(sdp);
1246 		}
1247 
1248 		/* If someone else has asked for the glock,
1249 		   unlock and let them have it. Then reacquire
1250 		   in the original state. */
1251 		if (gfs2_glock_is_blocking(gl)) {
1252 			gfs2_glock_dq(gh);
1253 			gfs2_holder_reinit(state, flags, gh);
1254 			return gfs2_glock_nq(gh);
1255 		}
1256 	}
1257 
1258 	return 0;
1259 
1260 fail_end_trans:
1261 	gfs2_trans_end(sdp);
1262 fail:
1263 	gfs2_glock_dq(gh);
1264 	return error;
1265 }
1266 
1267 static int
1268 __gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
1269 {
1270 	struct buffer_head *dibh;
1271 	int error;
1272 
1273 	error = gfs2_meta_inode_buffer(ip, &dibh);
1274 	if (!error) {
1275 		error = inode_setattr(&ip->i_inode, attr);
1276 		gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1277 		gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1278 		gfs2_dinode_out(ip, dibh->b_data);
1279 		brelse(dibh);
1280 	}
1281 	return error;
1282 }
1283 
1284 /**
1285  * gfs2_setattr_simple -
1286  * @ip:
1287  * @attr:
1288  *
1289  * Called with a reference on the vnode.
1290  *
1291  * Returns: errno
1292  */
1293 
1294 int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
1295 {
1296 	int error;
1297 
1298 	if (current->journal_info)
1299 		return __gfs2_setattr_simple(ip, attr);
1300 
1301 	error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE, 0);
1302 	if (error)
1303 		return error;
1304 
1305 	error = __gfs2_setattr_simple(ip, attr);
1306 	gfs2_trans_end(GFS2_SB(&ip->i_inode));
1307 	return error;
1308 }
1309 
1310 void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
1311 {
1312 	const struct gfs2_dinode_host *di = &ip->i_di;
1313 	struct gfs2_dinode *str = buf;
1314 
1315 	str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
1316 	str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
1317 	str->di_header.__pad0 = 0;
1318 	str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
1319 	str->di_header.__pad1 = 0;
1320 	str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
1321 	str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
1322 	str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
1323 	str->di_uid = cpu_to_be32(ip->i_inode.i_uid);
1324 	str->di_gid = cpu_to_be32(ip->i_inode.i_gid);
1325 	str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
1326 	str->di_size = cpu_to_be64(di->di_size);
1327 	str->di_blocks = cpu_to_be64(di->di_blocks);
1328 	str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
1329 	str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec);
1330 	str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec);
1331 
1332 	str->di_goal_meta = cpu_to_be64(di->di_goal_meta);
1333 	str->di_goal_data = cpu_to_be64(di->di_goal_data);
1334 	str->di_generation = cpu_to_be64(di->di_generation);
1335 
1336 	str->di_flags = cpu_to_be32(di->di_flags);
1337 	str->di_height = cpu_to_be16(di->di_height);
1338 	str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) &&
1339 					     !(ip->i_di.di_flags & GFS2_DIF_EXHASH) ?
1340 					     GFS2_FORMAT_DE : 0);
1341 	str->di_depth = cpu_to_be16(di->di_depth);
1342 	str->di_entries = cpu_to_be32(di->di_entries);
1343 
1344 	str->di_eattr = cpu_to_be64(di->di_eattr);
1345 	str->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
1346 	str->di_mtime_nsec = cpu_to_be32(ip->i_inode.i_mtime.tv_nsec);
1347 	str->di_ctime_nsec = cpu_to_be32(ip->i_inode.i_ctime.tv_nsec);
1348 }
1349 
1350 void gfs2_dinode_print(const struct gfs2_inode *ip)
1351 {
1352 	const struct gfs2_dinode_host *di = &ip->i_di;
1353 
1354 	printk(KERN_INFO "  no_formal_ino = %llu\n",
1355 	       (unsigned long long)ip->i_no_formal_ino);
1356 	printk(KERN_INFO "  no_addr = %llu\n",
1357 	       (unsigned long long)ip->i_no_addr);
1358 	printk(KERN_INFO "  di_size = %llu\n", (unsigned long long)di->di_size);
1359 	printk(KERN_INFO "  di_blocks = %llu\n",
1360 	       (unsigned long long)di->di_blocks);
1361 	printk(KERN_INFO "  di_goal_meta = %llu\n",
1362 	       (unsigned long long)di->di_goal_meta);
1363 	printk(KERN_INFO "  di_goal_data = %llu\n",
1364 	       (unsigned long long)di->di_goal_data);
1365 	printk(KERN_INFO "  di_flags = 0x%.8X\n", di->di_flags);
1366 	printk(KERN_INFO "  di_height = %u\n", di->di_height);
1367 	printk(KERN_INFO "  di_depth = %u\n", di->di_depth);
1368 	printk(KERN_INFO "  di_entries = %u\n", di->di_entries);
1369 	printk(KERN_INFO "  di_eattr = %llu\n",
1370 	       (unsigned long long)di->di_eattr);
1371 }
1372 
1373