xref: /openbmc/linux/fs/gfs2/inode.c (revision 96de0e252cedffad61b3cb5e05662c591898e69a)
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/posix_acl.h>
16 #include <linux/sort.h>
17 #include <linux/gfs2_ondisk.h>
18 #include <linux/crc32.h>
19 #include <linux/lm_interface.h>
20 #include <linux/security.h>
21 
22 #include "gfs2.h"
23 #include "incore.h"
24 #include "acl.h"
25 #include "bmap.h"
26 #include "dir.h"
27 #include "eattr.h"
28 #include "glock.h"
29 #include "glops.h"
30 #include "inode.h"
31 #include "log.h"
32 #include "meta_io.h"
33 #include "ops_address.h"
34 #include "ops_file.h"
35 #include "ops_inode.h"
36 #include "quota.h"
37 #include "rgrp.h"
38 #include "trans.h"
39 #include "util.h"
40 
41 struct gfs2_inum_range_host {
42 	u64 ir_start;
43 	u64 ir_length;
44 };
45 
46 static int iget_test(struct inode *inode, void *opaque)
47 {
48 	struct gfs2_inode *ip = GFS2_I(inode);
49 	u64 *no_addr = opaque;
50 
51 	if (ip->i_no_addr == *no_addr &&
52 	    inode->i_private != NULL)
53 		return 1;
54 
55 	return 0;
56 }
57 
58 static int iget_set(struct inode *inode, void *opaque)
59 {
60 	struct gfs2_inode *ip = GFS2_I(inode);
61 	u64 *no_addr = opaque;
62 
63 	inode->i_ino = (unsigned long)*no_addr;
64 	ip->i_no_addr = *no_addr;
65 	return 0;
66 }
67 
68 struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr)
69 {
70 	unsigned long hash = (unsigned long)no_addr;
71 	return ilookup5(sb, hash, iget_test, &no_addr);
72 }
73 
74 static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr)
75 {
76 	unsigned long hash = (unsigned long)no_addr;
77 	return iget5_locked(sb, hash, iget_test, iget_set, &no_addr);
78 }
79 
80 struct gfs2_skip_data {
81 	u64	no_addr;
82 	int	skipped;
83 };
84 
85 static int iget_skip_test(struct inode *inode, void *opaque)
86 {
87 	struct gfs2_inode *ip = GFS2_I(inode);
88 	struct gfs2_skip_data *data = opaque;
89 
90 	if (ip->i_no_addr == data->no_addr && inode->i_private != NULL){
91 		if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)){
92 			data->skipped = 1;
93 			return 0;
94 		}
95 		return 1;
96 	}
97 	return 0;
98 }
99 
100 static int iget_skip_set(struct inode *inode, void *opaque)
101 {
102 	struct gfs2_inode *ip = GFS2_I(inode);
103 	struct gfs2_skip_data *data = opaque;
104 
105 	if (data->skipped)
106 		return 1;
107 	inode->i_ino = (unsigned long)(data->no_addr);
108 	ip->i_no_addr = data->no_addr;
109 	return 0;
110 }
111 
112 static struct inode *gfs2_iget_skip(struct super_block *sb,
113 				    u64 no_addr)
114 {
115 	struct gfs2_skip_data data;
116 	unsigned long hash = (unsigned long)no_addr;
117 
118 	data.no_addr = no_addr;
119 	data.skipped = 0;
120 	return iget5_locked(sb, hash, iget_skip_test, iget_skip_set, &data);
121 }
122 
123 /**
124  * GFS2 lookup code fills in vfs inode contents based on info obtained
125  * from directory entry inside gfs2_inode_lookup(). This has caused issues
126  * with NFS code path since its get_dentry routine doesn't have the relevant
127  * directory entry when gfs2_inode_lookup() is invoked. Part of the code
128  * segment inside gfs2_inode_lookup code needs to get moved around.
129  *
130  * Clean up I_LOCK and I_NEW as well.
131  **/
132 
133 void gfs2_set_iop(struct inode *inode)
134 {
135 	umode_t mode = inode->i_mode;
136 
137 	if (S_ISREG(mode)) {
138 		inode->i_op = &gfs2_file_iops;
139 		inode->i_fop = &gfs2_file_fops;
140 		inode->i_mapping->a_ops = &gfs2_file_aops;
141 	} else if (S_ISDIR(mode)) {
142 		inode->i_op = &gfs2_dir_iops;
143 		inode->i_fop = &gfs2_dir_fops;
144 	} else if (S_ISLNK(mode)) {
145 		inode->i_op = &gfs2_symlink_iops;
146 	} else {
147 		inode->i_op = &gfs2_dev_iops;
148 	}
149 
150 	unlock_new_inode(inode);
151 }
152 
153 /**
154  * gfs2_inode_lookup - Lookup an inode
155  * @sb: The super block
156  * @no_addr: The inode number
157  * @type: The type of the inode
158  * @skip_freeing: set this not return an inode if it is currently being freed.
159  *
160  * Returns: A VFS inode, or an error
161  */
162 
163 struct inode *gfs2_inode_lookup(struct super_block *sb,
164 				unsigned int type,
165 				u64 no_addr,
166 				u64 no_formal_ino, int skip_freeing)
167 {
168 	struct inode *inode;
169 	struct gfs2_inode *ip;
170 	struct gfs2_glock *io_gl;
171 	int error;
172 
173 	if (skip_freeing)
174 		inode = gfs2_iget_skip(sb, no_addr);
175 	else
176 		inode = gfs2_iget(sb, no_addr);
177 	ip = GFS2_I(inode);
178 
179 	if (!inode)
180 		return ERR_PTR(-ENOBUFS);
181 
182 	if (inode->i_state & I_NEW) {
183 		struct gfs2_sbd *sdp = GFS2_SB(inode);
184 		inode->i_private = ip;
185 		ip->i_no_formal_ino = no_formal_ino;
186 
187 		error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
188 		if (unlikely(error))
189 			goto fail;
190 		ip->i_gl->gl_object = ip;
191 
192 		error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
193 		if (unlikely(error))
194 			goto fail_put;
195 
196 		set_bit(GIF_INVALID, &ip->i_flags);
197 		error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
198 		if (unlikely(error))
199 			goto fail_iopen;
200 		ip->i_iopen_gh.gh_gl->gl_object = ip;
201 
202 		gfs2_glock_put(io_gl);
203 
204 		if ((type == DT_UNKNOWN) && (no_formal_ino == 0))
205 			goto gfs2_nfsbypass;
206 
207 		inode->i_mode = DT2IF(type);
208 
209 		/*
210 		 * We must read the inode in order to work out its type in
211 		 * this case. Note that this doesn't happen often as we normally
212 		 * know the type beforehand. This code path only occurs during
213 		 * unlinked inode recovery (where it is safe to do this glock,
214 		 * which is not true in the general case).
215 		 */
216 		if (type == DT_UNKNOWN) {
217 			struct gfs2_holder gh;
218 			error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
219 			if (unlikely(error))
220 				goto fail_glock;
221 			/* Inode is now uptodate */
222 			gfs2_glock_dq_uninit(&gh);
223 		}
224 
225 		gfs2_set_iop(inode);
226 	}
227 
228 gfs2_nfsbypass:
229 	return inode;
230 fail_glock:
231 	gfs2_glock_dq(&ip->i_iopen_gh);
232 fail_iopen:
233 	gfs2_glock_put(io_gl);
234 fail_put:
235 	ip->i_gl->gl_object = NULL;
236 	gfs2_glock_put(ip->i_gl);
237 fail:
238 	iput(inode);
239 	return ERR_PTR(error);
240 }
241 
242 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
243 {
244 	struct gfs2_dinode_host *di = &ip->i_di;
245 	const struct gfs2_dinode *str = buf;
246 
247 	if (ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)) {
248 		if (gfs2_consist_inode(ip))
249 			gfs2_dinode_print(ip);
250 		return -EIO;
251 	}
252 	ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
253 	ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
254 	ip->i_inode.i_rdev = 0;
255 	switch (ip->i_inode.i_mode & S_IFMT) {
256 	case S_IFBLK:
257 	case S_IFCHR:
258 		ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
259 					   be32_to_cpu(str->di_minor));
260 		break;
261 	};
262 
263 	ip->i_inode.i_uid = be32_to_cpu(str->di_uid);
264 	ip->i_inode.i_gid = be32_to_cpu(str->di_gid);
265 	/*
266 	 * We will need to review setting the nlink count here in the
267 	 * light of the forthcoming ro bind mount work. This is a reminder
268 	 * to do that.
269 	 */
270 	ip->i_inode.i_nlink = be32_to_cpu(str->di_nlink);
271 	di->di_size = be64_to_cpu(str->di_size);
272 	i_size_write(&ip->i_inode, di->di_size);
273 	di->di_blocks = be64_to_cpu(str->di_blocks);
274 	gfs2_set_inode_blocks(&ip->i_inode);
275 	ip->i_inode.i_atime.tv_sec = be64_to_cpu(str->di_atime);
276 	ip->i_inode.i_atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
277 	ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
278 	ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
279 	ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
280 	ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
281 
282 	di->di_goal_meta = be64_to_cpu(str->di_goal_meta);
283 	di->di_goal_data = be64_to_cpu(str->di_goal_data);
284 	di->di_generation = be64_to_cpu(str->di_generation);
285 
286 	di->di_flags = be32_to_cpu(str->di_flags);
287 	gfs2_set_inode_flags(&ip->i_inode);
288 	di->di_height = be16_to_cpu(str->di_height);
289 
290 	di->di_depth = be16_to_cpu(str->di_depth);
291 	di->di_entries = be32_to_cpu(str->di_entries);
292 
293 	di->di_eattr = be64_to_cpu(str->di_eattr);
294 	return 0;
295 }
296 
297 static void gfs2_inode_bh(struct gfs2_inode *ip, struct buffer_head *bh)
298 {
299 	ip->i_cache[0] = bh;
300 }
301 
302 /**
303  * gfs2_inode_refresh - Refresh the incore copy of the dinode
304  * @ip: The GFS2 inode
305  *
306  * Returns: errno
307  */
308 
309 int gfs2_inode_refresh(struct gfs2_inode *ip)
310 {
311 	struct buffer_head *dibh;
312 	int error;
313 
314 	error = gfs2_meta_inode_buffer(ip, &dibh);
315 	if (error)
316 		return error;
317 
318 	if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), dibh, GFS2_METATYPE_DI)) {
319 		brelse(dibh);
320 		return -EIO;
321 	}
322 
323 	error = gfs2_dinode_in(ip, dibh->b_data);
324 	brelse(dibh);
325 	clear_bit(GIF_INVALID, &ip->i_flags);
326 
327 	return error;
328 }
329 
330 int gfs2_dinode_dealloc(struct gfs2_inode *ip)
331 {
332 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
333 	struct gfs2_alloc *al;
334 	struct gfs2_rgrpd *rgd;
335 	int error;
336 
337 	if (ip->i_di.di_blocks != 1) {
338 		if (gfs2_consist_inode(ip))
339 			gfs2_dinode_print(ip);
340 		return -EIO;
341 	}
342 
343 	al = gfs2_alloc_get(ip);
344 
345 	error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
346 	if (error)
347 		goto out;
348 
349 	error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
350 	if (error)
351 		goto out_qs;
352 
353 	rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr);
354 	if (!rgd) {
355 		gfs2_consist_inode(ip);
356 		error = -EIO;
357 		goto out_rindex_relse;
358 	}
359 
360 	error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
361 				   &al->al_rgd_gh);
362 	if (error)
363 		goto out_rindex_relse;
364 
365 	error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA, 1);
366 	if (error)
367 		goto out_rg_gunlock;
368 
369 	gfs2_trans_add_gl(ip->i_gl);
370 
371 	gfs2_free_di(rgd, ip);
372 
373 	gfs2_trans_end(sdp);
374 	clear_bit(GLF_STICKY, &ip->i_gl->gl_flags);
375 
376 out_rg_gunlock:
377 	gfs2_glock_dq_uninit(&al->al_rgd_gh);
378 out_rindex_relse:
379 	gfs2_glock_dq_uninit(&al->al_ri_gh);
380 out_qs:
381 	gfs2_quota_unhold(ip);
382 out:
383 	gfs2_alloc_put(ip);
384 	return error;
385 }
386 
387 /**
388  * gfs2_change_nlink - Change nlink count on inode
389  * @ip: The GFS2 inode
390  * @diff: The change in the nlink count required
391  *
392  * Returns: errno
393  */
394 int gfs2_change_nlink(struct gfs2_inode *ip, int diff)
395 {
396 	struct buffer_head *dibh;
397 	u32 nlink;
398 	int error;
399 
400 	BUG_ON(diff != 1 && diff != -1);
401 	nlink = ip->i_inode.i_nlink + diff;
402 
403 	/* If we are reducing the nlink count, but the new value ends up being
404 	   bigger than the old one, we must have underflowed. */
405 	if (diff < 0 && nlink > ip->i_inode.i_nlink) {
406 		if (gfs2_consist_inode(ip))
407 			gfs2_dinode_print(ip);
408 		return -EIO;
409 	}
410 
411 	error = gfs2_meta_inode_buffer(ip, &dibh);
412 	if (error)
413 		return error;
414 
415 	if (diff > 0)
416 		inc_nlink(&ip->i_inode);
417 	else
418 		drop_nlink(&ip->i_inode);
419 
420 	ip->i_inode.i_ctime = CURRENT_TIME;
421 
422 	gfs2_trans_add_bh(ip->i_gl, dibh, 1);
423 	gfs2_dinode_out(ip, dibh->b_data);
424 	brelse(dibh);
425 	mark_inode_dirty(&ip->i_inode);
426 
427 	if (ip->i_inode.i_nlink == 0)
428 		gfs2_unlink_di(&ip->i_inode); /* mark inode unlinked */
429 
430 	return error;
431 }
432 
433 struct inode *gfs2_lookup_simple(struct inode *dip, const char *name)
434 {
435 	struct qstr qstr;
436 	struct inode *inode;
437 	gfs2_str2qstr(&qstr, name);
438 	inode = gfs2_lookupi(dip, &qstr, 1, NULL);
439 	/* gfs2_lookupi has inconsistent callers: vfs
440 	 * related routines expect NULL for no entry found,
441 	 * gfs2_lookup_simple callers expect ENOENT
442 	 * and do not check for NULL.
443 	 */
444 	if (inode == NULL)
445 		return ERR_PTR(-ENOENT);
446 	else
447 		return inode;
448 }
449 
450 
451 /**
452  * gfs2_lookupi - Look up a filename in a directory and return its inode
453  * @d_gh: An initialized holder for the directory glock
454  * @name: The name of the inode to look for
455  * @is_root: If 1, ignore the caller's permissions
456  * @i_gh: An uninitialized holder for the new inode glock
457  *
458  * This can be called via the VFS filldir function when NFS is doing
459  * a readdirplus and the inode which its intending to stat isn't
460  * already in cache. In this case we must not take the directory glock
461  * again, since the readdir call will have already taken that lock.
462  *
463  * Returns: errno
464  */
465 
466 struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
467 			   int is_root, struct nameidata *nd)
468 {
469 	struct super_block *sb = dir->i_sb;
470 	struct gfs2_inode *dip = GFS2_I(dir);
471 	struct gfs2_holder d_gh;
472 	int error = 0;
473 	struct inode *inode = NULL;
474 	int unlock = 0;
475 
476 	if (!name->len || name->len > GFS2_FNAMESIZE)
477 		return ERR_PTR(-ENAMETOOLONG);
478 
479 	if ((name->len == 1 && memcmp(name->name, ".", 1) == 0) ||
480 	    (name->len == 2 && memcmp(name->name, "..", 2) == 0 &&
481 	     dir == sb->s_root->d_inode)) {
482 		igrab(dir);
483 		return dir;
484 	}
485 
486 	if (gfs2_glock_is_locked_by_me(dip->i_gl) == 0) {
487 		error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
488 		if (error)
489 			return ERR_PTR(error);
490 		unlock = 1;
491 	}
492 
493 	if (!is_root) {
494 		error = permission(dir, MAY_EXEC, NULL);
495 		if (error)
496 			goto out;
497 	}
498 
499 	inode = gfs2_dir_search(dir, name);
500 	if (IS_ERR(inode))
501 		error = PTR_ERR(inode);
502 out:
503 	if (unlock)
504 		gfs2_glock_dq_uninit(&d_gh);
505 	if (error == -ENOENT)
506 		return NULL;
507 	return inode ? inode : ERR_PTR(error);
508 }
509 
510 static void gfs2_inum_range_in(struct gfs2_inum_range_host *ir, const void *buf)
511 {
512 	const struct gfs2_inum_range *str = buf;
513 
514 	ir->ir_start = be64_to_cpu(str->ir_start);
515 	ir->ir_length = be64_to_cpu(str->ir_length);
516 }
517 
518 static void gfs2_inum_range_out(const struct gfs2_inum_range_host *ir, void *buf)
519 {
520 	struct gfs2_inum_range *str = buf;
521 
522 	str->ir_start = cpu_to_be64(ir->ir_start);
523 	str->ir_length = cpu_to_be64(ir->ir_length);
524 }
525 
526 static int pick_formal_ino_1(struct gfs2_sbd *sdp, u64 *formal_ino)
527 {
528 	struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
529 	struct buffer_head *bh;
530 	struct gfs2_inum_range_host ir;
531 	int error;
532 
533 	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
534 	if (error)
535 		return error;
536 	mutex_lock(&sdp->sd_inum_mutex);
537 
538 	error = gfs2_meta_inode_buffer(ip, &bh);
539 	if (error) {
540 		mutex_unlock(&sdp->sd_inum_mutex);
541 		gfs2_trans_end(sdp);
542 		return error;
543 	}
544 
545 	gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
546 
547 	if (ir.ir_length) {
548 		*formal_ino = ir.ir_start++;
549 		ir.ir_length--;
550 		gfs2_trans_add_bh(ip->i_gl, bh, 1);
551 		gfs2_inum_range_out(&ir,
552 				    bh->b_data + sizeof(struct gfs2_dinode));
553 		brelse(bh);
554 		mutex_unlock(&sdp->sd_inum_mutex);
555 		gfs2_trans_end(sdp);
556 		return 0;
557 	}
558 
559 	brelse(bh);
560 
561 	mutex_unlock(&sdp->sd_inum_mutex);
562 	gfs2_trans_end(sdp);
563 
564 	return 1;
565 }
566 
567 static int pick_formal_ino_2(struct gfs2_sbd *sdp, u64 *formal_ino)
568 {
569 	struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
570 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_inum_inode);
571 	struct gfs2_holder gh;
572 	struct buffer_head *bh;
573 	struct gfs2_inum_range_host ir;
574 	int error;
575 
576 	error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
577 	if (error)
578 		return error;
579 
580 	error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
581 	if (error)
582 		goto out;
583 	mutex_lock(&sdp->sd_inum_mutex);
584 
585 	error = gfs2_meta_inode_buffer(ip, &bh);
586 	if (error)
587 		goto out_end_trans;
588 
589 	gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
590 
591 	if (!ir.ir_length) {
592 		struct buffer_head *m_bh;
593 		u64 x, y;
594 		__be64 z;
595 
596 		error = gfs2_meta_inode_buffer(m_ip, &m_bh);
597 		if (error)
598 			goto out_brelse;
599 
600 		z = *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode));
601 		x = y = be64_to_cpu(z);
602 		ir.ir_start = x;
603 		ir.ir_length = GFS2_INUM_QUANTUM;
604 		x += GFS2_INUM_QUANTUM;
605 		if (x < y)
606 			gfs2_consist_inode(m_ip);
607 		z = cpu_to_be64(x);
608 		gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
609 		*(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode)) = z;
610 
611 		brelse(m_bh);
612 	}
613 
614 	*formal_ino = ir.ir_start++;
615 	ir.ir_length--;
616 
617 	gfs2_trans_add_bh(ip->i_gl, bh, 1);
618 	gfs2_inum_range_out(&ir, bh->b_data + sizeof(struct gfs2_dinode));
619 
620 out_brelse:
621 	brelse(bh);
622 out_end_trans:
623 	mutex_unlock(&sdp->sd_inum_mutex);
624 	gfs2_trans_end(sdp);
625 out:
626 	gfs2_glock_dq_uninit(&gh);
627 	return error;
628 }
629 
630 static int pick_formal_ino(struct gfs2_sbd *sdp, u64 *inum)
631 {
632 	int error;
633 
634 	error = pick_formal_ino_1(sdp, inum);
635 	if (error <= 0)
636 		return error;
637 
638 	error = pick_formal_ino_2(sdp, inum);
639 
640 	return error;
641 }
642 
643 /**
644  * create_ok - OK to create a new on-disk inode here?
645  * @dip:  Directory in which dinode is to be created
646  * @name:  Name of new dinode
647  * @mode:
648  *
649  * Returns: errno
650  */
651 
652 static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
653 		     unsigned int mode)
654 {
655 	int error;
656 
657 	error = permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, NULL);
658 	if (error)
659 		return error;
660 
661 	/*  Don't create entries in an unlinked directory  */
662 	if (!dip->i_inode.i_nlink)
663 		return -EPERM;
664 
665 	error = gfs2_dir_check(&dip->i_inode, name, NULL);
666 	switch (error) {
667 	case -ENOENT:
668 		error = 0;
669 		break;
670 	case 0:
671 		return -EEXIST;
672 	default:
673 		return error;
674 	}
675 
676 	if (dip->i_di.di_entries == (u32)-1)
677 		return -EFBIG;
678 	if (S_ISDIR(mode) && dip->i_inode.i_nlink == (u32)-1)
679 		return -EMLINK;
680 
681 	return 0;
682 }
683 
684 static void munge_mode_uid_gid(struct gfs2_inode *dip, unsigned int *mode,
685 			       unsigned int *uid, unsigned int *gid)
686 {
687 	if (GFS2_SB(&dip->i_inode)->sd_args.ar_suiddir &&
688 	    (dip->i_inode.i_mode & S_ISUID) && dip->i_inode.i_uid) {
689 		if (S_ISDIR(*mode))
690 			*mode |= S_ISUID;
691 		else if (dip->i_inode.i_uid != current->fsuid)
692 			*mode &= ~07111;
693 		*uid = dip->i_inode.i_uid;
694 	} else
695 		*uid = current->fsuid;
696 
697 	if (dip->i_inode.i_mode & S_ISGID) {
698 		if (S_ISDIR(*mode))
699 			*mode |= S_ISGID;
700 		*gid = dip->i_inode.i_gid;
701 	} else
702 		*gid = current->fsgid;
703 }
704 
705 static int alloc_dinode(struct gfs2_inode *dip, u64 *no_addr, u64 *generation)
706 {
707 	struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
708 	int error;
709 
710 	gfs2_alloc_get(dip);
711 
712 	dip->i_alloc.al_requested = RES_DINODE;
713 	error = gfs2_inplace_reserve(dip);
714 	if (error)
715 		goto out;
716 
717 	error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS, 0);
718 	if (error)
719 		goto out_ipreserv;
720 
721 	*no_addr = gfs2_alloc_di(dip, generation);
722 
723 	gfs2_trans_end(sdp);
724 
725 out_ipreserv:
726 	gfs2_inplace_release(dip);
727 out:
728 	gfs2_alloc_put(dip);
729 	return error;
730 }
731 
732 /**
733  * init_dinode - Fill in a new dinode structure
734  * @dip: the directory this inode is being created in
735  * @gl: The glock covering the new inode
736  * @inum: the inode number
737  * @mode: the file permissions
738  * @uid:
739  * @gid:
740  *
741  */
742 
743 static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
744 			const struct gfs2_inum_host *inum, unsigned int mode,
745 			unsigned int uid, unsigned int gid,
746 			const u64 *generation, dev_t dev, struct buffer_head **bhp)
747 {
748 	struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
749 	struct gfs2_dinode *di;
750 	struct buffer_head *dibh;
751 	struct timespec tv = CURRENT_TIME;
752 
753 	dibh = gfs2_meta_new(gl, inum->no_addr);
754 	gfs2_trans_add_bh(gl, dibh, 1);
755 	gfs2_metatype_set(dibh, GFS2_METATYPE_DI, GFS2_FORMAT_DI);
756 	gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
757 	di = (struct gfs2_dinode *)dibh->b_data;
758 
759 	di->di_num.no_formal_ino = cpu_to_be64(inum->no_formal_ino);
760 	di->di_num.no_addr = cpu_to_be64(inum->no_addr);
761 	di->di_mode = cpu_to_be32(mode);
762 	di->di_uid = cpu_to_be32(uid);
763 	di->di_gid = cpu_to_be32(gid);
764 	di->di_nlink = 0;
765 	di->di_size = 0;
766 	di->di_blocks = cpu_to_be64(1);
767 	di->di_atime = di->di_mtime = di->di_ctime = cpu_to_be64(tv.tv_sec);
768 	di->di_major = cpu_to_be32(MAJOR(dev));
769 	di->di_minor = cpu_to_be32(MINOR(dev));
770 	di->di_goal_meta = di->di_goal_data = cpu_to_be64(inum->no_addr);
771 	di->di_generation = cpu_to_be64(*generation);
772 	di->di_flags = 0;
773 
774 	if (S_ISREG(mode)) {
775 		if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_JDATA) ||
776 		    gfs2_tune_get(sdp, gt_new_files_jdata))
777 			di->di_flags |= cpu_to_be32(GFS2_DIF_JDATA);
778 		if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_DIRECTIO) ||
779 		    gfs2_tune_get(sdp, gt_new_files_directio))
780 			di->di_flags |= cpu_to_be32(GFS2_DIF_DIRECTIO);
781 	} else if (S_ISDIR(mode)) {
782 		di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
783 					    GFS2_DIF_INHERIT_DIRECTIO);
784 		di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
785 					    GFS2_DIF_INHERIT_JDATA);
786 	}
787 
788 	di->__pad1 = 0;
789 	di->di_payload_format = cpu_to_be32(S_ISDIR(mode) ? GFS2_FORMAT_DE : 0);
790 	di->di_height = 0;
791 	di->__pad2 = 0;
792 	di->__pad3 = 0;
793 	di->di_depth = 0;
794 	di->di_entries = 0;
795 	memset(&di->__pad4, 0, sizeof(di->__pad4));
796 	di->di_eattr = 0;
797 	di->di_atime_nsec = cpu_to_be32(tv.tv_nsec);
798 	di->di_mtime_nsec = cpu_to_be32(tv.tv_nsec);
799 	di->di_ctime_nsec = cpu_to_be32(tv.tv_nsec);
800 	memset(&di->di_reserved, 0, sizeof(di->di_reserved));
801 
802 	set_buffer_uptodate(dibh);
803 
804 	*bhp = dibh;
805 }
806 
807 static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
808 		       unsigned int mode, const struct gfs2_inum_host *inum,
809 		       const u64 *generation, dev_t dev, struct buffer_head **bhp)
810 {
811 	struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
812 	unsigned int uid, gid;
813 	int error;
814 
815 	munge_mode_uid_gid(dip, &mode, &uid, &gid);
816 	gfs2_alloc_get(dip);
817 
818 	error = gfs2_quota_lock(dip, uid, gid);
819 	if (error)
820 		goto out;
821 
822 	error = gfs2_quota_check(dip, uid, gid);
823 	if (error)
824 		goto out_quota;
825 
826 	error = gfs2_trans_begin(sdp, RES_DINODE + RES_QUOTA, 0);
827 	if (error)
828 		goto out_quota;
829 
830 	init_dinode(dip, gl, inum, mode, uid, gid, generation, dev, bhp);
831 	gfs2_quota_change(dip, +1, uid, gid);
832 	gfs2_trans_end(sdp);
833 
834 out_quota:
835 	gfs2_quota_unlock(dip);
836 out:
837 	gfs2_alloc_put(dip);
838 	return error;
839 }
840 
841 static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
842 		       struct gfs2_inode *ip)
843 {
844 	struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
845 	struct gfs2_alloc *al;
846 	int alloc_required;
847 	struct buffer_head *dibh;
848 	int error;
849 
850 	al = gfs2_alloc_get(dip);
851 
852 	error = gfs2_quota_lock(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
853 	if (error)
854 		goto fail;
855 
856 	error = alloc_required = gfs2_diradd_alloc_required(&dip->i_inode, name);
857 	if (alloc_required < 0)
858 		goto fail;
859 	if (alloc_required) {
860 		error = gfs2_quota_check(dip, dip->i_inode.i_uid, dip->i_inode.i_gid);
861 		if (error)
862 			goto fail_quota_locks;
863 
864 		al->al_requested = sdp->sd_max_dirres;
865 
866 		error = gfs2_inplace_reserve(dip);
867 		if (error)
868 			goto fail_quota_locks;
869 
870 		error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
871 					 al->al_rgd->rd_length +
872 					 2 * RES_DINODE +
873 					 RES_STATFS + RES_QUOTA, 0);
874 		if (error)
875 			goto fail_ipreserv;
876 	} else {
877 		error = gfs2_trans_begin(sdp, RES_LEAF + 2 * RES_DINODE, 0);
878 		if (error)
879 			goto fail_quota_locks;
880 	}
881 
882 	error = gfs2_dir_add(&dip->i_inode, name, ip, IF2DT(ip->i_inode.i_mode));
883 	if (error)
884 		goto fail_end_trans;
885 
886 	error = gfs2_meta_inode_buffer(ip, &dibh);
887 	if (error)
888 		goto fail_end_trans;
889 	ip->i_inode.i_nlink = 1;
890 	gfs2_trans_add_bh(ip->i_gl, dibh, 1);
891 	gfs2_dinode_out(ip, dibh->b_data);
892 	brelse(dibh);
893 	return 0;
894 
895 fail_end_trans:
896 	gfs2_trans_end(sdp);
897 
898 fail_ipreserv:
899 	if (dip->i_alloc.al_rgd)
900 		gfs2_inplace_release(dip);
901 
902 fail_quota_locks:
903 	gfs2_quota_unlock(dip);
904 
905 fail:
906 	gfs2_alloc_put(dip);
907 	return error;
908 }
909 
910 static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip)
911 {
912 	int err;
913 	size_t len;
914 	void *value;
915 	char *name;
916 	struct gfs2_ea_request er;
917 
918 	err = security_inode_init_security(&ip->i_inode, &dip->i_inode,
919 					   &name, &value, &len);
920 
921 	if (err) {
922 		if (err == -EOPNOTSUPP)
923 			return 0;
924 		return err;
925 	}
926 
927 	memset(&er, 0, sizeof(struct gfs2_ea_request));
928 
929 	er.er_type = GFS2_EATYPE_SECURITY;
930 	er.er_name = name;
931 	er.er_data = value;
932 	er.er_name_len = strlen(name);
933 	er.er_data_len = len;
934 
935 	err = gfs2_ea_set_i(ip, &er);
936 
937 	kfree(value);
938 	kfree(name);
939 
940 	return err;
941 }
942 
943 /**
944  * gfs2_createi - Create a new inode
945  * @ghs: An array of two holders
946  * @name: The name of the new file
947  * @mode: the permissions on the new inode
948  *
949  * @ghs[0] is an initialized holder for the directory
950  * @ghs[1] is the holder for the inode lock
951  *
952  * If the return value is not NULL, the glocks on both the directory and the new
953  * file are held.  A transaction has been started and an inplace reservation
954  * is held, as well.
955  *
956  * Returns: An inode
957  */
958 
959 struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
960 			   unsigned int mode, dev_t dev)
961 {
962 	struct inode *inode = NULL;
963 	struct gfs2_inode *dip = ghs->gh_gl->gl_object;
964 	struct inode *dir = &dip->i_inode;
965 	struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
966 	struct gfs2_inum_host inum = { .no_addr = 0, .no_formal_ino = 0 };
967 	int error;
968 	u64 generation;
969 	struct buffer_head *bh=NULL;
970 
971 	if (!name->len || name->len > GFS2_FNAMESIZE)
972 		return ERR_PTR(-ENAMETOOLONG);
973 
974 	gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, ghs);
975 	error = gfs2_glock_nq(ghs);
976 	if (error)
977 		goto fail;
978 
979 	error = create_ok(dip, name, mode);
980 	if (error)
981 		goto fail_gunlock;
982 
983 	error = pick_formal_ino(sdp, &inum.no_formal_ino);
984 	if (error)
985 		goto fail_gunlock;
986 
987 	error = alloc_dinode(dip, &inum.no_addr, &generation);
988 	if (error)
989 		goto fail_gunlock;
990 
991 	error = gfs2_glock_nq_num(sdp, inum.no_addr, &gfs2_inode_glops,
992 				  LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
993 	if (error)
994 		goto fail_gunlock;
995 
996 	error = make_dinode(dip, ghs[1].gh_gl, mode, &inum, &generation, dev, &bh);
997 	if (error)
998 		goto fail_gunlock2;
999 
1000 	inode = gfs2_inode_lookup(dir->i_sb, IF2DT(mode),
1001 					inum.no_addr,
1002 					inum.no_formal_ino, 0);
1003 	if (IS_ERR(inode))
1004 		goto fail_gunlock2;
1005 
1006 	gfs2_inode_bh(GFS2_I(inode), bh);
1007 
1008 	error = gfs2_inode_refresh(GFS2_I(inode));
1009 	if (error)
1010 		goto fail_gunlock2;
1011 
1012 	error = gfs2_acl_create(dip, GFS2_I(inode));
1013 	if (error)
1014 		goto fail_gunlock2;
1015 
1016 	error = gfs2_security_init(dip, GFS2_I(inode));
1017 	if (error)
1018 		goto fail_gunlock2;
1019 
1020 	error = link_dinode(dip, name, GFS2_I(inode));
1021 	if (error)
1022 		goto fail_gunlock2;
1023 
1024 	if (!inode)
1025 		return ERR_PTR(-ENOMEM);
1026 	return inode;
1027 
1028 fail_gunlock2:
1029 	gfs2_glock_dq_uninit(ghs + 1);
1030 	if (inode)
1031 		iput(inode);
1032 fail_gunlock:
1033 	gfs2_glock_dq(ghs);
1034 fail:
1035 	return ERR_PTR(error);
1036 }
1037 
1038 /**
1039  * gfs2_rmdiri - Remove a directory
1040  * @dip: The parent directory of the directory to be removed
1041  * @name: The name of the directory to be removed
1042  * @ip: The GFS2 inode of the directory to be removed
1043  *
1044  * Assumes Glocks on dip and ip are held
1045  *
1046  * Returns: errno
1047  */
1048 
1049 int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
1050 		struct gfs2_inode *ip)
1051 {
1052 	struct qstr dotname;
1053 	int error;
1054 
1055 	if (ip->i_di.di_entries != 2) {
1056 		if (gfs2_consist_inode(ip))
1057 			gfs2_dinode_print(ip);
1058 		return -EIO;
1059 	}
1060 
1061 	error = gfs2_dir_del(dip, name);
1062 	if (error)
1063 		return error;
1064 
1065 	error = gfs2_change_nlink(dip, -1);
1066 	if (error)
1067 		return error;
1068 
1069 	gfs2_str2qstr(&dotname, ".");
1070 	error = gfs2_dir_del(ip, &dotname);
1071 	if (error)
1072 		return error;
1073 
1074 	gfs2_str2qstr(&dotname, "..");
1075 	error = gfs2_dir_del(ip, &dotname);
1076 	if (error)
1077 		return error;
1078 
1079 	/* It looks odd, but it really should be done twice */
1080 	error = gfs2_change_nlink(ip, -1);
1081 	if (error)
1082 		return error;
1083 
1084 	error = gfs2_change_nlink(ip, -1);
1085 	if (error)
1086 		return error;
1087 
1088 	return error;
1089 }
1090 
1091 /*
1092  * gfs2_unlink_ok - check to see that a inode is still in a directory
1093  * @dip: the directory
1094  * @name: the name of the file
1095  * @ip: the inode
1096  *
1097  * Assumes that the lock on (at least) @dip is held.
1098  *
1099  * Returns: 0 if the parent/child relationship is correct, errno if it isn't
1100  */
1101 
1102 int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
1103 		   const struct gfs2_inode *ip)
1104 {
1105 	int error;
1106 
1107 	if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode))
1108 		return -EPERM;
1109 
1110 	if ((dip->i_inode.i_mode & S_ISVTX) &&
1111 	    dip->i_inode.i_uid != current->fsuid &&
1112 	    ip->i_inode.i_uid != current->fsuid && !capable(CAP_FOWNER))
1113 		return -EPERM;
1114 
1115 	if (IS_APPEND(&dip->i_inode))
1116 		return -EPERM;
1117 
1118 	error = permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, NULL);
1119 	if (error)
1120 		return error;
1121 
1122 	error = gfs2_dir_check(&dip->i_inode, name, ip);
1123 	if (error)
1124 		return error;
1125 
1126 	return 0;
1127 }
1128 
1129 /*
1130  * gfs2_ok_to_move - check if it's ok to move a directory to another directory
1131  * @this: move this
1132  * @to: to here
1133  *
1134  * Follow @to back to the root and make sure we don't encounter @this
1135  * Assumes we already hold the rename lock.
1136  *
1137  * Returns: errno
1138  */
1139 
1140 int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to)
1141 {
1142 	struct inode *dir = &to->i_inode;
1143 	struct super_block *sb = dir->i_sb;
1144 	struct inode *tmp;
1145 	struct qstr dotdot;
1146 	int error = 0;
1147 
1148 	gfs2_str2qstr(&dotdot, "..");
1149 
1150 	igrab(dir);
1151 
1152 	for (;;) {
1153 		if (dir == &this->i_inode) {
1154 			error = -EINVAL;
1155 			break;
1156 		}
1157 		if (dir == sb->s_root->d_inode) {
1158 			error = 0;
1159 			break;
1160 		}
1161 
1162 		tmp = gfs2_lookupi(dir, &dotdot, 1, NULL);
1163 		if (IS_ERR(tmp)) {
1164 			error = PTR_ERR(tmp);
1165 			break;
1166 		}
1167 
1168 		iput(dir);
1169 		dir = tmp;
1170 	}
1171 
1172 	iput(dir);
1173 
1174 	return error;
1175 }
1176 
1177 /**
1178  * gfs2_readlinki - return the contents of a symlink
1179  * @ip: the symlink's inode
1180  * @buf: a pointer to the buffer to be filled
1181  * @len: a pointer to the length of @buf
1182  *
1183  * If @buf is too small, a piece of memory is kmalloc()ed and needs
1184  * to be freed by the caller.
1185  *
1186  * Returns: errno
1187  */
1188 
1189 int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len)
1190 {
1191 	struct gfs2_holder i_gh;
1192 	struct buffer_head *dibh;
1193 	unsigned int x;
1194 	int error;
1195 
1196 	gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &i_gh);
1197 	error = gfs2_glock_nq_atime(&i_gh);
1198 	if (error) {
1199 		gfs2_holder_uninit(&i_gh);
1200 		return error;
1201 	}
1202 
1203 	if (!ip->i_di.di_size) {
1204 		gfs2_consist_inode(ip);
1205 		error = -EIO;
1206 		goto out;
1207 	}
1208 
1209 	error = gfs2_meta_inode_buffer(ip, &dibh);
1210 	if (error)
1211 		goto out;
1212 
1213 	x = ip->i_di.di_size + 1;
1214 	if (x > *len) {
1215 		*buf = kmalloc(x, GFP_KERNEL);
1216 		if (!*buf) {
1217 			error = -ENOMEM;
1218 			goto out_brelse;
1219 		}
1220 	}
1221 
1222 	memcpy(*buf, dibh->b_data + sizeof(struct gfs2_dinode), x);
1223 	*len = x;
1224 
1225 out_brelse:
1226 	brelse(dibh);
1227 out:
1228 	gfs2_glock_dq_uninit(&i_gh);
1229 	return error;
1230 }
1231 
1232 /**
1233  * gfs2_glock_nq_atime - Acquire a hold on an inode's glock, and
1234  *       conditionally update the inode's atime
1235  * @gh: the holder to acquire
1236  *
1237  * Tests atime (access time) for gfs2_read, gfs2_readdir and gfs2_mmap
1238  * Update if the difference between the current time and the inode's current
1239  * atime is greater than an interval specified at mount.
1240  *
1241  * Returns: errno
1242  */
1243 
1244 int gfs2_glock_nq_atime(struct gfs2_holder *gh)
1245 {
1246 	struct gfs2_glock *gl = gh->gh_gl;
1247 	struct gfs2_sbd *sdp = gl->gl_sbd;
1248 	struct gfs2_inode *ip = gl->gl_object;
1249 	s64 quantum = gfs2_tune_get(sdp, gt_atime_quantum);
1250 	unsigned int state;
1251 	int flags;
1252 	int error;
1253 	struct timespec tv = CURRENT_TIME;
1254 
1255 	if (gfs2_assert_warn(sdp, gh->gh_flags & GL_ATIME) ||
1256 	    gfs2_assert_warn(sdp, !(gh->gh_flags & GL_ASYNC)) ||
1257 	    gfs2_assert_warn(sdp, gl->gl_ops == &gfs2_inode_glops))
1258 		return -EINVAL;
1259 
1260 	state = gh->gh_state;
1261 	flags = gh->gh_flags;
1262 
1263 	error = gfs2_glock_nq(gh);
1264 	if (error)
1265 		return error;
1266 
1267 	if (test_bit(SDF_NOATIME, &sdp->sd_flags) ||
1268 	    (sdp->sd_vfs->s_flags & MS_RDONLY))
1269 		return 0;
1270 
1271 	if (tv.tv_sec - ip->i_inode.i_atime.tv_sec >= quantum) {
1272 		gfs2_glock_dq(gh);
1273 		gfs2_holder_reinit(LM_ST_EXCLUSIVE, gh->gh_flags & ~LM_FLAG_ANY,
1274 				   gh);
1275 		error = gfs2_glock_nq(gh);
1276 		if (error)
1277 			return error;
1278 
1279 		/* Verify that atime hasn't been updated while we were
1280 		   trying to get exclusive lock. */
1281 
1282 		tv = CURRENT_TIME;
1283 		if (tv.tv_sec - ip->i_inode.i_atime.tv_sec >= quantum) {
1284 			struct buffer_head *dibh;
1285 			struct gfs2_dinode *di;
1286 
1287 			error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1288 			if (error == -EROFS)
1289 				return 0;
1290 			if (error)
1291 				goto fail;
1292 
1293 			error = gfs2_meta_inode_buffer(ip, &dibh);
1294 			if (error)
1295 				goto fail_end_trans;
1296 
1297 			ip->i_inode.i_atime = tv;
1298 
1299 			gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1300 			di = (struct gfs2_dinode *)dibh->b_data;
1301 			di->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
1302 			di->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
1303 			brelse(dibh);
1304 
1305 			gfs2_trans_end(sdp);
1306 		}
1307 
1308 		/* If someone else has asked for the glock,
1309 		   unlock and let them have it. Then reacquire
1310 		   in the original state. */
1311 		if (gfs2_glock_is_blocking(gl)) {
1312 			gfs2_glock_dq(gh);
1313 			gfs2_holder_reinit(state, flags, gh);
1314 			return gfs2_glock_nq(gh);
1315 		}
1316 	}
1317 
1318 	return 0;
1319 
1320 fail_end_trans:
1321 	gfs2_trans_end(sdp);
1322 fail:
1323 	gfs2_glock_dq(gh);
1324 	return error;
1325 }
1326 
1327 static int
1328 __gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
1329 {
1330 	struct buffer_head *dibh;
1331 	int error;
1332 
1333 	error = gfs2_meta_inode_buffer(ip, &dibh);
1334 	if (!error) {
1335 		error = inode_setattr(&ip->i_inode, attr);
1336 		gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1337 		gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1338 		gfs2_dinode_out(ip, dibh->b_data);
1339 		brelse(dibh);
1340 	}
1341 	return error;
1342 }
1343 
1344 /**
1345  * gfs2_setattr_simple -
1346  * @ip:
1347  * @attr:
1348  *
1349  * Called with a reference on the vnode.
1350  *
1351  * Returns: errno
1352  */
1353 
1354 int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
1355 {
1356 	int error;
1357 
1358 	if (current->journal_info)
1359 		return __gfs2_setattr_simple(ip, attr);
1360 
1361 	error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE, 0);
1362 	if (error)
1363 		return error;
1364 
1365 	error = __gfs2_setattr_simple(ip, attr);
1366 	gfs2_trans_end(GFS2_SB(&ip->i_inode));
1367 	return error;
1368 }
1369 
1370 void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
1371 {
1372 	const struct gfs2_dinode_host *di = &ip->i_di;
1373 	struct gfs2_dinode *str = buf;
1374 
1375 	str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
1376 	str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
1377 	str->di_header.__pad0 = 0;
1378 	str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
1379 	str->di_header.__pad1 = 0;
1380 	str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
1381 	str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
1382 	str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
1383 	str->di_uid = cpu_to_be32(ip->i_inode.i_uid);
1384 	str->di_gid = cpu_to_be32(ip->i_inode.i_gid);
1385 	str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
1386 	str->di_size = cpu_to_be64(di->di_size);
1387 	str->di_blocks = cpu_to_be64(di->di_blocks);
1388 	str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
1389 	str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec);
1390 	str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec);
1391 
1392 	str->di_goal_meta = cpu_to_be64(di->di_goal_meta);
1393 	str->di_goal_data = cpu_to_be64(di->di_goal_data);
1394 	str->di_generation = cpu_to_be64(di->di_generation);
1395 
1396 	str->di_flags = cpu_to_be32(di->di_flags);
1397 	str->di_height = cpu_to_be16(di->di_height);
1398 	str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) &&
1399 					     !(ip->i_di.di_flags & GFS2_DIF_EXHASH) ?
1400 					     GFS2_FORMAT_DE : 0);
1401 	str->di_depth = cpu_to_be16(di->di_depth);
1402 	str->di_entries = cpu_to_be32(di->di_entries);
1403 
1404 	str->di_eattr = cpu_to_be64(di->di_eattr);
1405 	str->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
1406 	str->di_mtime_nsec = cpu_to_be32(ip->i_inode.i_mtime.tv_nsec);
1407 	str->di_ctime_nsec = cpu_to_be32(ip->i_inode.i_ctime.tv_nsec);
1408 }
1409 
1410 void gfs2_dinode_print(const struct gfs2_inode *ip)
1411 {
1412 	const struct gfs2_dinode_host *di = &ip->i_di;
1413 
1414 	printk(KERN_INFO "  no_formal_ino = %llu\n",
1415 	       (unsigned long long)ip->i_no_formal_ino);
1416 	printk(KERN_INFO "  no_addr = %llu\n",
1417 	       (unsigned long long)ip->i_no_addr);
1418 	printk(KERN_INFO "  di_size = %llu\n", (unsigned long long)di->di_size);
1419 	printk(KERN_INFO "  di_blocks = %llu\n",
1420 	       (unsigned long long)di->di_blocks);
1421 	printk(KERN_INFO "  di_goal_meta = %llu\n",
1422 	       (unsigned long long)di->di_goal_meta);
1423 	printk(KERN_INFO "  di_goal_data = %llu\n",
1424 	       (unsigned long long)di->di_goal_data);
1425 	printk(KERN_INFO "  di_flags = 0x%.8X\n", di->di_flags);
1426 	printk(KERN_INFO "  di_height = %u\n", di->di_height);
1427 	printk(KERN_INFO "  di_depth = %u\n", di->di_depth);
1428 	printk(KERN_INFO "  di_entries = %u\n", di->di_entries);
1429 	printk(KERN_INFO "  di_eattr = %llu\n",
1430 	       (unsigned long long)di->di_eattr);
1431 }
1432 
1433