xref: /openbmc/linux/fs/gfs2/super.c (revision 0a305e49)
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/bio.h>
11 #include <linux/sched.h>
12 #include <linux/slab.h>
13 #include <linux/spinlock.h>
14 #include <linux/completion.h>
15 #include <linux/buffer_head.h>
16 #include <linux/statfs.h>
17 #include <linux/seq_file.h>
18 #include <linux/mount.h>
19 #include <linux/kthread.h>
20 #include <linux/delay.h>
21 #include <linux/gfs2_ondisk.h>
22 #include <linux/crc32.h>
23 #include <linux/time.h>
24 #include <linux/wait.h>
25 #include <linux/writeback.h>
26 #include <linux/backing-dev.h>
27 
28 #include "gfs2.h"
29 #include "incore.h"
30 #include "bmap.h"
31 #include "dir.h"
32 #include "glock.h"
33 #include "glops.h"
34 #include "inode.h"
35 #include "log.h"
36 #include "meta_io.h"
37 #include "quota.h"
38 #include "recovery.h"
39 #include "rgrp.h"
40 #include "super.h"
41 #include "trans.h"
42 #include "util.h"
43 #include "sys.h"
44 #include "xattr.h"
45 
46 #define args_neq(a1, a2, x) ((a1)->ar_##x != (a2)->ar_##x)
47 
48 enum {
49 	Opt_lockproto,
50 	Opt_locktable,
51 	Opt_hostdata,
52 	Opt_spectator,
53 	Opt_ignore_local_fs,
54 	Opt_localflocks,
55 	Opt_localcaching,
56 	Opt_debug,
57 	Opt_nodebug,
58 	Opt_upgrade,
59 	Opt_acl,
60 	Opt_noacl,
61 	Opt_quota_off,
62 	Opt_quota_account,
63 	Opt_quota_on,
64 	Opt_quota,
65 	Opt_noquota,
66 	Opt_suiddir,
67 	Opt_nosuiddir,
68 	Opt_data_writeback,
69 	Opt_data_ordered,
70 	Opt_meta,
71 	Opt_discard,
72 	Opt_nodiscard,
73 	Opt_commit,
74 	Opt_err_withdraw,
75 	Opt_err_panic,
76 	Opt_statfs_quantum,
77 	Opt_statfs_percent,
78 	Opt_quota_quantum,
79 	Opt_barrier,
80 	Opt_nobarrier,
81 	Opt_error,
82 };
83 
84 static const match_table_t tokens = {
85 	{Opt_lockproto, "lockproto=%s"},
86 	{Opt_locktable, "locktable=%s"},
87 	{Opt_hostdata, "hostdata=%s"},
88 	{Opt_spectator, "spectator"},
89 	{Opt_spectator, "norecovery"},
90 	{Opt_ignore_local_fs, "ignore_local_fs"},
91 	{Opt_localflocks, "localflocks"},
92 	{Opt_localcaching, "localcaching"},
93 	{Opt_debug, "debug"},
94 	{Opt_nodebug, "nodebug"},
95 	{Opt_upgrade, "upgrade"},
96 	{Opt_acl, "acl"},
97 	{Opt_noacl, "noacl"},
98 	{Opt_quota_off, "quota=off"},
99 	{Opt_quota_account, "quota=account"},
100 	{Opt_quota_on, "quota=on"},
101 	{Opt_quota, "quota"},
102 	{Opt_noquota, "noquota"},
103 	{Opt_suiddir, "suiddir"},
104 	{Opt_nosuiddir, "nosuiddir"},
105 	{Opt_data_writeback, "data=writeback"},
106 	{Opt_data_ordered, "data=ordered"},
107 	{Opt_meta, "meta"},
108 	{Opt_discard, "discard"},
109 	{Opt_nodiscard, "nodiscard"},
110 	{Opt_commit, "commit=%d"},
111 	{Opt_err_withdraw, "errors=withdraw"},
112 	{Opt_err_panic, "errors=panic"},
113 	{Opt_statfs_quantum, "statfs_quantum=%d"},
114 	{Opt_statfs_percent, "statfs_percent=%d"},
115 	{Opt_quota_quantum, "quota_quantum=%d"},
116 	{Opt_barrier, "barrier"},
117 	{Opt_nobarrier, "nobarrier"},
118 	{Opt_error, NULL}
119 };
120 
121 /**
122  * gfs2_mount_args - Parse mount options
123  * @args: The structure into which the parsed options will be written
124  * @options: The options to parse
125  *
126  * Return: errno
127  */
128 
129 int gfs2_mount_args(struct gfs2_args *args, char *options)
130 {
131 	char *o;
132 	int token;
133 	substring_t tmp[MAX_OPT_ARGS];
134 	int rv;
135 
136 	/* Split the options into tokens with the "," character and
137 	   process them */
138 
139 	while (1) {
140 		o = strsep(&options, ",");
141 		if (o == NULL)
142 			break;
143 		if (*o == '\0')
144 			continue;
145 
146 		token = match_token(o, tokens, tmp);
147 		switch (token) {
148 		case Opt_lockproto:
149 			match_strlcpy(args->ar_lockproto, &tmp[0],
150 				      GFS2_LOCKNAME_LEN);
151 			break;
152 		case Opt_locktable:
153 			match_strlcpy(args->ar_locktable, &tmp[0],
154 				      GFS2_LOCKNAME_LEN);
155 			break;
156 		case Opt_hostdata:
157 			match_strlcpy(args->ar_hostdata, &tmp[0],
158 				      GFS2_LOCKNAME_LEN);
159 			break;
160 		case Opt_spectator:
161 			args->ar_spectator = 1;
162 			break;
163 		case Opt_ignore_local_fs:
164 			/* Retained for backwards compat only */
165 			break;
166 		case Opt_localflocks:
167 			args->ar_localflocks = 1;
168 			break;
169 		case Opt_localcaching:
170 			/* Retained for backwards compat only */
171 			break;
172 		case Opt_debug:
173 			if (args->ar_errors == GFS2_ERRORS_PANIC) {
174 				printk(KERN_WARNING "GFS2: -o debug and -o errors=panic "
175 				       "are mutually exclusive.\n");
176 				return -EINVAL;
177 			}
178 			args->ar_debug = 1;
179 			break;
180 		case Opt_nodebug:
181 			args->ar_debug = 0;
182 			break;
183 		case Opt_upgrade:
184 			/* Retained for backwards compat only */
185 			break;
186 		case Opt_acl:
187 			args->ar_posix_acl = 1;
188 			break;
189 		case Opt_noacl:
190 			args->ar_posix_acl = 0;
191 			break;
192 		case Opt_quota_off:
193 		case Opt_noquota:
194 			args->ar_quota = GFS2_QUOTA_OFF;
195 			break;
196 		case Opt_quota_account:
197 			args->ar_quota = GFS2_QUOTA_ACCOUNT;
198 			break;
199 		case Opt_quota_on:
200 		case Opt_quota:
201 			args->ar_quota = GFS2_QUOTA_ON;
202 			break;
203 		case Opt_suiddir:
204 			args->ar_suiddir = 1;
205 			break;
206 		case Opt_nosuiddir:
207 			args->ar_suiddir = 0;
208 			break;
209 		case Opt_data_writeback:
210 			args->ar_data = GFS2_DATA_WRITEBACK;
211 			break;
212 		case Opt_data_ordered:
213 			args->ar_data = GFS2_DATA_ORDERED;
214 			break;
215 		case Opt_meta:
216 			args->ar_meta = 1;
217 			break;
218 		case Opt_discard:
219 			args->ar_discard = 1;
220 			break;
221 		case Opt_nodiscard:
222 			args->ar_discard = 0;
223 			break;
224 		case Opt_commit:
225 			rv = match_int(&tmp[0], &args->ar_commit);
226 			if (rv || args->ar_commit <= 0) {
227 				printk(KERN_WARNING "GFS2: commit mount option requires a positive numeric argument\n");
228 				return rv ? rv : -EINVAL;
229 			}
230 			break;
231 		case Opt_statfs_quantum:
232 			rv = match_int(&tmp[0], &args->ar_statfs_quantum);
233 			if (rv || args->ar_statfs_quantum < 0) {
234 				printk(KERN_WARNING "GFS2: statfs_quantum mount option requires a non-negative numeric argument\n");
235 				return rv ? rv : -EINVAL;
236 			}
237 			break;
238 		case Opt_quota_quantum:
239 			rv = match_int(&tmp[0], &args->ar_quota_quantum);
240 			if (rv || args->ar_quota_quantum <= 0) {
241 				printk(KERN_WARNING "GFS2: quota_quantum mount option requires a positive numeric argument\n");
242 				return rv ? rv : -EINVAL;
243 			}
244 			break;
245 		case Opt_statfs_percent:
246 			rv = match_int(&tmp[0], &args->ar_statfs_percent);
247 			if (rv || args->ar_statfs_percent < 0 ||
248 			    args->ar_statfs_percent > 100) {
249 				printk(KERN_WARNING "statfs_percent mount option requires a numeric argument between 0 and 100\n");
250 				return rv ? rv : -EINVAL;
251 			}
252 			break;
253 		case Opt_err_withdraw:
254 			args->ar_errors = GFS2_ERRORS_WITHDRAW;
255 			break;
256 		case Opt_err_panic:
257 			if (args->ar_debug) {
258 				printk(KERN_WARNING "GFS2: -o debug and -o errors=panic "
259 					"are mutually exclusive.\n");
260 				return -EINVAL;
261 			}
262 			args->ar_errors = GFS2_ERRORS_PANIC;
263 			break;
264 		case Opt_barrier:
265 			args->ar_nobarrier = 0;
266 			break;
267 		case Opt_nobarrier:
268 			args->ar_nobarrier = 1;
269 			break;
270 		case Opt_error:
271 		default:
272 			printk(KERN_WARNING "GFS2: invalid mount option: %s\n", o);
273 			return -EINVAL;
274 		}
275 	}
276 
277 	return 0;
278 }
279 
280 /**
281  * gfs2_jindex_free - Clear all the journal index information
282  * @sdp: The GFS2 superblock
283  *
284  */
285 
286 void gfs2_jindex_free(struct gfs2_sbd *sdp)
287 {
288 	struct list_head list, *head;
289 	struct gfs2_jdesc *jd;
290 	struct gfs2_journal_extent *jext;
291 
292 	spin_lock(&sdp->sd_jindex_spin);
293 	list_add(&list, &sdp->sd_jindex_list);
294 	list_del_init(&sdp->sd_jindex_list);
295 	sdp->sd_journals = 0;
296 	spin_unlock(&sdp->sd_jindex_spin);
297 
298 	while (!list_empty(&list)) {
299 		jd = list_entry(list.next, struct gfs2_jdesc, jd_list);
300 		head = &jd->extent_list;
301 		while (!list_empty(head)) {
302 			jext = list_entry(head->next,
303 					  struct gfs2_journal_extent,
304 					  extent_list);
305 			list_del(&jext->extent_list);
306 			kfree(jext);
307 		}
308 		list_del(&jd->jd_list);
309 		iput(jd->jd_inode);
310 		kfree(jd);
311 	}
312 }
313 
314 static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid)
315 {
316 	struct gfs2_jdesc *jd;
317 	int found = 0;
318 
319 	list_for_each_entry(jd, head, jd_list) {
320 		if (jd->jd_jid == jid) {
321 			found = 1;
322 			break;
323 		}
324 	}
325 
326 	if (!found)
327 		jd = NULL;
328 
329 	return jd;
330 }
331 
332 struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid)
333 {
334 	struct gfs2_jdesc *jd;
335 
336 	spin_lock(&sdp->sd_jindex_spin);
337 	jd = jdesc_find_i(&sdp->sd_jindex_list, jid);
338 	spin_unlock(&sdp->sd_jindex_spin);
339 
340 	return jd;
341 }
342 
343 int gfs2_jdesc_check(struct gfs2_jdesc *jd)
344 {
345 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
346 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
347 	u64 size = i_size_read(jd->jd_inode);
348 
349 	if (gfs2_check_internal_file_size(jd->jd_inode, 8 << 20, 1 << 30))
350 		return -EIO;
351 
352 	jd->jd_blocks = size >> sdp->sd_sb.sb_bsize_shift;
353 
354 	if (gfs2_write_alloc_required(ip, 0, size)) {
355 		gfs2_consist_inode(ip);
356 		return -EIO;
357 	}
358 
359 	return 0;
360 }
361 
362 /**
363  * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
364  * @sdp: the filesystem
365  *
366  * Returns: errno
367  */
368 
369 int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
370 {
371 	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
372 	struct gfs2_glock *j_gl = ip->i_gl;
373 	struct gfs2_holder t_gh;
374 	struct gfs2_log_header_host head;
375 	int error;
376 
377 	error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, 0, &t_gh);
378 	if (error)
379 		return error;
380 
381 	j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
382 
383 	error = gfs2_find_jhead(sdp->sd_jdesc, &head);
384 	if (error)
385 		goto fail;
386 
387 	if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
388 		gfs2_consist(sdp);
389 		error = -EIO;
390 		goto fail;
391 	}
392 
393 	/*  Initialize some head of the log stuff  */
394 	sdp->sd_log_sequence = head.lh_sequence + 1;
395 	gfs2_log_pointers_init(sdp, head.lh_blkno);
396 
397 	error = gfs2_quota_init(sdp);
398 	if (error)
399 		goto fail;
400 
401 	set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
402 
403 	gfs2_glock_dq_uninit(&t_gh);
404 
405 	return 0;
406 
407 fail:
408 	t_gh.gh_flags |= GL_NOCACHE;
409 	gfs2_glock_dq_uninit(&t_gh);
410 
411 	return error;
412 }
413 
414 void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf)
415 {
416 	const struct gfs2_statfs_change *str = buf;
417 
418 	sc->sc_total = be64_to_cpu(str->sc_total);
419 	sc->sc_free = be64_to_cpu(str->sc_free);
420 	sc->sc_dinodes = be64_to_cpu(str->sc_dinodes);
421 }
422 
423 static void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf)
424 {
425 	struct gfs2_statfs_change *str = buf;
426 
427 	str->sc_total = cpu_to_be64(sc->sc_total);
428 	str->sc_free = cpu_to_be64(sc->sc_free);
429 	str->sc_dinodes = cpu_to_be64(sc->sc_dinodes);
430 }
431 
432 int gfs2_statfs_init(struct gfs2_sbd *sdp)
433 {
434 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
435 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
436 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
437 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
438 	struct buffer_head *m_bh, *l_bh;
439 	struct gfs2_holder gh;
440 	int error;
441 
442 	error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
443 				   &gh);
444 	if (error)
445 		return error;
446 
447 	error = gfs2_meta_inode_buffer(m_ip, &m_bh);
448 	if (error)
449 		goto out;
450 
451 	if (sdp->sd_args.ar_spectator) {
452 		spin_lock(&sdp->sd_statfs_spin);
453 		gfs2_statfs_change_in(m_sc, m_bh->b_data +
454 				      sizeof(struct gfs2_dinode));
455 		spin_unlock(&sdp->sd_statfs_spin);
456 	} else {
457 		error = gfs2_meta_inode_buffer(l_ip, &l_bh);
458 		if (error)
459 			goto out_m_bh;
460 
461 		spin_lock(&sdp->sd_statfs_spin);
462 		gfs2_statfs_change_in(m_sc, m_bh->b_data +
463 				      sizeof(struct gfs2_dinode));
464 		gfs2_statfs_change_in(l_sc, l_bh->b_data +
465 				      sizeof(struct gfs2_dinode));
466 		spin_unlock(&sdp->sd_statfs_spin);
467 
468 		brelse(l_bh);
469 	}
470 
471 out_m_bh:
472 	brelse(m_bh);
473 out:
474 	gfs2_glock_dq_uninit(&gh);
475 	return 0;
476 }
477 
478 void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
479 			s64 dinodes)
480 {
481 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
482 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
483 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
484 	struct buffer_head *l_bh;
485 	s64 x, y;
486 	int need_sync = 0;
487 	int error;
488 
489 	error = gfs2_meta_inode_buffer(l_ip, &l_bh);
490 	if (error)
491 		return;
492 
493 	gfs2_trans_add_bh(l_ip->i_gl, l_bh, 1);
494 
495 	spin_lock(&sdp->sd_statfs_spin);
496 	l_sc->sc_total += total;
497 	l_sc->sc_free += free;
498 	l_sc->sc_dinodes += dinodes;
499 	gfs2_statfs_change_out(l_sc, l_bh->b_data + sizeof(struct gfs2_dinode));
500 	if (sdp->sd_args.ar_statfs_percent) {
501 		x = 100 * l_sc->sc_free;
502 		y = m_sc->sc_free * sdp->sd_args.ar_statfs_percent;
503 		if (x >= y || x <= -y)
504 			need_sync = 1;
505 	}
506 	spin_unlock(&sdp->sd_statfs_spin);
507 
508 	brelse(l_bh);
509 	if (need_sync)
510 		gfs2_wake_up_statfs(sdp);
511 }
512 
513 void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
514 		   struct buffer_head *l_bh)
515 {
516 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
517 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
518 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
519 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
520 
521 	gfs2_trans_add_bh(l_ip->i_gl, l_bh, 1);
522 
523 	spin_lock(&sdp->sd_statfs_spin);
524 	m_sc->sc_total += l_sc->sc_total;
525 	m_sc->sc_free += l_sc->sc_free;
526 	m_sc->sc_dinodes += l_sc->sc_dinodes;
527 	memset(l_sc, 0, sizeof(struct gfs2_statfs_change));
528 	memset(l_bh->b_data + sizeof(struct gfs2_dinode),
529 	       0, sizeof(struct gfs2_statfs_change));
530 	spin_unlock(&sdp->sd_statfs_spin);
531 
532 	gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
533 	gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
534 }
535 
536 int gfs2_statfs_sync(struct super_block *sb, int type)
537 {
538 	struct gfs2_sbd *sdp = sb->s_fs_info;
539 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
540 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
541 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
542 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
543 	struct gfs2_holder gh;
544 	struct buffer_head *m_bh, *l_bh;
545 	int error;
546 
547 	error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
548 				   &gh);
549 	if (error)
550 		return error;
551 
552 	error = gfs2_meta_inode_buffer(m_ip, &m_bh);
553 	if (error)
554 		goto out;
555 
556 	spin_lock(&sdp->sd_statfs_spin);
557 	gfs2_statfs_change_in(m_sc, m_bh->b_data +
558 			      sizeof(struct gfs2_dinode));
559 	if (!l_sc->sc_total && !l_sc->sc_free && !l_sc->sc_dinodes) {
560 		spin_unlock(&sdp->sd_statfs_spin);
561 		goto out_bh;
562 	}
563 	spin_unlock(&sdp->sd_statfs_spin);
564 
565 	error = gfs2_meta_inode_buffer(l_ip, &l_bh);
566 	if (error)
567 		goto out_bh;
568 
569 	error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
570 	if (error)
571 		goto out_bh2;
572 
573 	update_statfs(sdp, m_bh, l_bh);
574 	sdp->sd_statfs_force_sync = 0;
575 
576 	gfs2_trans_end(sdp);
577 
578 out_bh2:
579 	brelse(l_bh);
580 out_bh:
581 	brelse(m_bh);
582 out:
583 	gfs2_glock_dq_uninit(&gh);
584 	return error;
585 }
586 
587 struct lfcc {
588 	struct list_head list;
589 	struct gfs2_holder gh;
590 };
591 
592 /**
593  * gfs2_lock_fs_check_clean - Stop all writes to the FS and check that all
594  *                            journals are clean
595  * @sdp: the file system
596  * @state: the state to put the transaction lock into
597  * @t_gh: the hold on the transaction lock
598  *
599  * Returns: errno
600  */
601 
602 static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp,
603 				    struct gfs2_holder *t_gh)
604 {
605 	struct gfs2_inode *ip;
606 	struct gfs2_jdesc *jd;
607 	struct lfcc *lfcc;
608 	LIST_HEAD(list);
609 	struct gfs2_log_header_host lh;
610 	int error;
611 
612 	list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
613 		lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL);
614 		if (!lfcc) {
615 			error = -ENOMEM;
616 			goto out;
617 		}
618 		ip = GFS2_I(jd->jd_inode);
619 		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &lfcc->gh);
620 		if (error) {
621 			kfree(lfcc);
622 			goto out;
623 		}
624 		list_add(&lfcc->list, &list);
625 	}
626 
627 	error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_DEFERRED,
628 				   GL_NOCACHE, t_gh);
629 
630 	list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
631 		error = gfs2_jdesc_check(jd);
632 		if (error)
633 			break;
634 		error = gfs2_find_jhead(jd, &lh);
635 		if (error)
636 			break;
637 		if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
638 			error = -EBUSY;
639 			break;
640 		}
641 	}
642 
643 	if (error)
644 		gfs2_glock_dq_uninit(t_gh);
645 
646 out:
647 	while (!list_empty(&list)) {
648 		lfcc = list_entry(list.next, struct lfcc, list);
649 		list_del(&lfcc->list);
650 		gfs2_glock_dq_uninit(&lfcc->gh);
651 		kfree(lfcc);
652 	}
653 	return error;
654 }
655 
656 /**
657  * gfs2_freeze_fs - freezes the file system
658  * @sdp: the file system
659  *
660  * This function flushes data and meta data for all machines by
661  * acquiring the transaction log exclusively.  All journals are
662  * ensured to be in a clean state as well.
663  *
664  * Returns: errno
665  */
666 
667 int gfs2_freeze_fs(struct gfs2_sbd *sdp)
668 {
669 	int error = 0;
670 
671 	mutex_lock(&sdp->sd_freeze_lock);
672 
673 	if (!sdp->sd_freeze_count++) {
674 		error = gfs2_lock_fs_check_clean(sdp, &sdp->sd_freeze_gh);
675 		if (error)
676 			sdp->sd_freeze_count--;
677 	}
678 
679 	mutex_unlock(&sdp->sd_freeze_lock);
680 
681 	return error;
682 }
683 
684 /**
685  * gfs2_unfreeze_fs - unfreezes the file system
686  * @sdp: the file system
687  *
688  * This function allows the file system to proceed by unlocking
689  * the exclusively held transaction lock.  Other GFS2 nodes are
690  * now free to acquire the lock shared and go on with their lives.
691  *
692  */
693 
694 void gfs2_unfreeze_fs(struct gfs2_sbd *sdp)
695 {
696 	mutex_lock(&sdp->sd_freeze_lock);
697 
698 	if (sdp->sd_freeze_count && !--sdp->sd_freeze_count)
699 		gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
700 
701 	mutex_unlock(&sdp->sd_freeze_lock);
702 }
703 
704 void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
705 {
706 	struct gfs2_dinode *str = buf;
707 
708 	str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
709 	str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
710 	str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
711 	str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
712 	str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
713 	str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
714 	str->di_uid = cpu_to_be32(ip->i_inode.i_uid);
715 	str->di_gid = cpu_to_be32(ip->i_inode.i_gid);
716 	str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
717 	str->di_size = cpu_to_be64(i_size_read(&ip->i_inode));
718 	str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
719 	str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
720 	str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec);
721 	str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec);
722 
723 	str->di_goal_meta = cpu_to_be64(ip->i_goal);
724 	str->di_goal_data = cpu_to_be64(ip->i_goal);
725 	str->di_generation = cpu_to_be64(ip->i_generation);
726 
727 	str->di_flags = cpu_to_be32(ip->i_diskflags);
728 	str->di_height = cpu_to_be16(ip->i_height);
729 	str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) &&
730 					     !(ip->i_diskflags & GFS2_DIF_EXHASH) ?
731 					     GFS2_FORMAT_DE : 0);
732 	str->di_depth = cpu_to_be16(ip->i_depth);
733 	str->di_entries = cpu_to_be32(ip->i_entries);
734 
735 	str->di_eattr = cpu_to_be64(ip->i_eattr);
736 	str->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
737 	str->di_mtime_nsec = cpu_to_be32(ip->i_inode.i_mtime.tv_nsec);
738 	str->di_ctime_nsec = cpu_to_be32(ip->i_inode.i_ctime.tv_nsec);
739 }
740 
741 /**
742  * gfs2_write_inode - Make sure the inode is stable on the disk
743  * @inode: The inode
744  * @wbc: The writeback control structure
745  *
746  * Returns: errno
747  */
748 
749 static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
750 {
751 	struct gfs2_inode *ip = GFS2_I(inode);
752 	struct gfs2_sbd *sdp = GFS2_SB(inode);
753 	struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl);
754 	struct backing_dev_info *bdi = metamapping->backing_dev_info;
755 	int ret = 0;
756 
757 	if (wbc->sync_mode == WB_SYNC_ALL)
758 		gfs2_log_flush(GFS2_SB(inode), ip->i_gl);
759 	if (bdi->dirty_exceeded)
760 		gfs2_ail1_flush(sdp, wbc);
761 	else
762 		filemap_fdatawrite(metamapping);
763 	if (wbc->sync_mode == WB_SYNC_ALL)
764 		ret = filemap_fdatawait(metamapping);
765 	if (ret)
766 		mark_inode_dirty_sync(inode);
767 	return ret;
768 }
769 
770 /**
771  * gfs2_dirty_inode - check for atime updates
772  * @inode: The inode in question
773  * @flags: The type of dirty
774  *
775  * Unfortunately it can be called under any combination of inode
776  * glock and transaction lock, so we have to check carefully.
777  *
778  * At the moment this deals only with atime - it should be possible
779  * to expand that role in future, once a review of the locking has
780  * been carried out.
781  */
782 
783 static void gfs2_dirty_inode(struct inode *inode, int flags)
784 {
785 	struct gfs2_inode *ip = GFS2_I(inode);
786 	struct gfs2_sbd *sdp = GFS2_SB(inode);
787 	struct buffer_head *bh;
788 	struct gfs2_holder gh;
789 	int need_unlock = 0;
790 	int need_endtrans = 0;
791 	int ret;
792 
793 	if (!(flags & (I_DIRTY_DATASYNC|I_DIRTY_SYNC)))
794 		return;
795 
796 	if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
797 		ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
798 		if (ret) {
799 			fs_err(sdp, "dirty_inode: glock %d\n", ret);
800 			return;
801 		}
802 		need_unlock = 1;
803 	}
804 
805 	if (current->journal_info == NULL) {
806 		ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
807 		if (ret) {
808 			fs_err(sdp, "dirty_inode: gfs2_trans_begin %d\n", ret);
809 			goto out;
810 		}
811 		need_endtrans = 1;
812 	}
813 
814 	ret = gfs2_meta_inode_buffer(ip, &bh);
815 	if (ret == 0) {
816 		gfs2_trans_add_bh(ip->i_gl, bh, 1);
817 		gfs2_dinode_out(ip, bh->b_data);
818 		brelse(bh);
819 	}
820 
821 	if (need_endtrans)
822 		gfs2_trans_end(sdp);
823 out:
824 	if (need_unlock)
825 		gfs2_glock_dq_uninit(&gh);
826 }
827 
828 /**
829  * gfs2_make_fs_ro - Turn a Read-Write FS into a Read-Only one
830  * @sdp: the filesystem
831  *
832  * Returns: errno
833  */
834 
835 static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
836 {
837 	struct gfs2_holder t_gh;
838 	int error;
839 
840 	flush_workqueue(gfs2_delete_workqueue);
841 	gfs2_quota_sync(sdp->sd_vfs, 0, 1);
842 	gfs2_statfs_sync(sdp->sd_vfs, 0);
843 
844 	error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, GL_NOCACHE,
845 				   &t_gh);
846 	if (error && !test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
847 		return error;
848 
849 	gfs2_meta_syncfs(sdp);
850 	gfs2_log_shutdown(sdp);
851 
852 	clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
853 
854 	if (t_gh.gh_gl)
855 		gfs2_glock_dq_uninit(&t_gh);
856 
857 	gfs2_quota_cleanup(sdp);
858 
859 	return error;
860 }
861 
862 static int gfs2_umount_recovery_wait(void *word)
863 {
864 	schedule();
865 	return 0;
866 }
867 
868 /**
869  * gfs2_put_super - Unmount the filesystem
870  * @sb: The VFS superblock
871  *
872  */
873 
874 static void gfs2_put_super(struct super_block *sb)
875 {
876 	struct gfs2_sbd *sdp = sb->s_fs_info;
877 	int error;
878 	struct gfs2_jdesc *jd;
879 
880 	/*  Unfreeze the filesystem, if we need to  */
881 
882 	mutex_lock(&sdp->sd_freeze_lock);
883 	if (sdp->sd_freeze_count)
884 		gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
885 	mutex_unlock(&sdp->sd_freeze_lock);
886 
887 	/* No more recovery requests */
888 	set_bit(SDF_NORECOVERY, &sdp->sd_flags);
889 	smp_mb();
890 
891 	/* Wait on outstanding recovery */
892 restart:
893 	spin_lock(&sdp->sd_jindex_spin);
894 	list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
895 		if (!test_bit(JDF_RECOVERY, &jd->jd_flags))
896 			continue;
897 		spin_unlock(&sdp->sd_jindex_spin);
898 		wait_on_bit(&jd->jd_flags, JDF_RECOVERY,
899 			    gfs2_umount_recovery_wait, TASK_UNINTERRUPTIBLE);
900 		goto restart;
901 	}
902 	spin_unlock(&sdp->sd_jindex_spin);
903 
904 	kthread_stop(sdp->sd_quotad_process);
905 	kthread_stop(sdp->sd_logd_process);
906 
907 	if (!(sb->s_flags & MS_RDONLY)) {
908 		error = gfs2_make_fs_ro(sdp);
909 		if (error)
910 			gfs2_io_error(sdp);
911 	}
912 	/*  At this point, we're through modifying the disk  */
913 
914 	/*  Release stuff  */
915 
916 	iput(sdp->sd_jindex);
917 	iput(sdp->sd_statfs_inode);
918 	iput(sdp->sd_rindex);
919 	iput(sdp->sd_quota_inode);
920 
921 	gfs2_glock_put(sdp->sd_rename_gl);
922 	gfs2_glock_put(sdp->sd_trans_gl);
923 
924 	if (!sdp->sd_args.ar_spectator) {
925 		gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
926 		gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
927 		gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
928 		gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
929 		iput(sdp->sd_sc_inode);
930 		iput(sdp->sd_qc_inode);
931 	}
932 
933 	gfs2_glock_dq_uninit(&sdp->sd_live_gh);
934 	gfs2_clear_rgrpd(sdp);
935 	gfs2_jindex_free(sdp);
936 	/*  Take apart glock structures and buffer lists  */
937 	gfs2_gl_hash_clear(sdp);
938 	/*  Unmount the locking protocol  */
939 	gfs2_lm_unmount(sdp);
940 
941 	/*  At this point, we're through participating in the lockspace  */
942 	gfs2_sys_fs_del(sdp);
943 }
944 
945 /**
946  * gfs2_sync_fs - sync the filesystem
947  * @sb: the superblock
948  *
949  * Flushes the log to disk.
950  */
951 
952 static int gfs2_sync_fs(struct super_block *sb, int wait)
953 {
954 	struct gfs2_sbd *sdp = sb->s_fs_info;
955 	if (wait && sdp)
956 		gfs2_log_flush(sdp, NULL);
957 	return 0;
958 }
959 
960 /**
961  * gfs2_freeze - prevent further writes to the filesystem
962  * @sb: the VFS structure for the filesystem
963  *
964  */
965 
966 static int gfs2_freeze(struct super_block *sb)
967 {
968 	struct gfs2_sbd *sdp = sb->s_fs_info;
969 	int error;
970 
971 	if (test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
972 		return -EINVAL;
973 
974 	for (;;) {
975 		error = gfs2_freeze_fs(sdp);
976 		if (!error)
977 			break;
978 
979 		switch (error) {
980 		case -EBUSY:
981 			fs_err(sdp, "waiting for recovery before freeze\n");
982 			break;
983 
984 		default:
985 			fs_err(sdp, "error freezing FS: %d\n", error);
986 			break;
987 		}
988 
989 		fs_err(sdp, "retrying...\n");
990 		msleep(1000);
991 	}
992 	return 0;
993 }
994 
995 /**
996  * gfs2_unfreeze - reallow writes to the filesystem
997  * @sb: the VFS structure for the filesystem
998  *
999  */
1000 
1001 static int gfs2_unfreeze(struct super_block *sb)
1002 {
1003 	gfs2_unfreeze_fs(sb->s_fs_info);
1004 	return 0;
1005 }
1006 
1007 /**
1008  * statfs_fill - fill in the sg for a given RG
1009  * @rgd: the RG
1010  * @sc: the sc structure
1011  *
1012  * Returns: 0 on success, -ESTALE if the LVB is invalid
1013  */
1014 
1015 static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
1016 			    struct gfs2_statfs_change_host *sc)
1017 {
1018 	gfs2_rgrp_verify(rgd);
1019 	sc->sc_total += rgd->rd_data;
1020 	sc->sc_free += rgd->rd_free;
1021 	sc->sc_dinodes += rgd->rd_dinodes;
1022 	return 0;
1023 }
1024 
1025 /**
1026  * gfs2_statfs_slow - Stat a filesystem using asynchronous locking
1027  * @sdp: the filesystem
1028  * @sc: the sc info that will be returned
1029  *
1030  * Any error (other than a signal) will cause this routine to fall back
1031  * to the synchronous version.
1032  *
1033  * FIXME: This really shouldn't busy wait like this.
1034  *
1035  * Returns: errno
1036  */
1037 
1038 static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
1039 {
1040 	struct gfs2_rgrpd *rgd_next;
1041 	struct gfs2_holder *gha, *gh;
1042 	unsigned int slots = 64;
1043 	unsigned int x;
1044 	int done;
1045 	int error = 0, err;
1046 
1047 	memset(sc, 0, sizeof(struct gfs2_statfs_change_host));
1048 	gha = kcalloc(slots, sizeof(struct gfs2_holder), GFP_KERNEL);
1049 	if (!gha)
1050 		return -ENOMEM;
1051 
1052 	rgd_next = gfs2_rgrpd_get_first(sdp);
1053 
1054 	for (;;) {
1055 		done = 1;
1056 
1057 		for (x = 0; x < slots; x++) {
1058 			gh = gha + x;
1059 
1060 			if (gh->gh_gl && gfs2_glock_poll(gh)) {
1061 				err = gfs2_glock_wait(gh);
1062 				if (err) {
1063 					gfs2_holder_uninit(gh);
1064 					error = err;
1065 				} else {
1066 					if (!error)
1067 						error = statfs_slow_fill(
1068 							gh->gh_gl->gl_object, sc);
1069 					gfs2_glock_dq_uninit(gh);
1070 				}
1071 			}
1072 
1073 			if (gh->gh_gl)
1074 				done = 0;
1075 			else if (rgd_next && !error) {
1076 				error = gfs2_glock_nq_init(rgd_next->rd_gl,
1077 							   LM_ST_SHARED,
1078 							   GL_ASYNC,
1079 							   gh);
1080 				rgd_next = gfs2_rgrpd_get_next(rgd_next);
1081 				done = 0;
1082 			}
1083 
1084 			if (signal_pending(current))
1085 				error = -ERESTARTSYS;
1086 		}
1087 
1088 		if (done)
1089 			break;
1090 
1091 		yield();
1092 	}
1093 
1094 	kfree(gha);
1095 	return error;
1096 }
1097 
1098 /**
1099  * gfs2_statfs_i - Do a statfs
1100  * @sdp: the filesystem
1101  * @sg: the sg structure
1102  *
1103  * Returns: errno
1104  */
1105 
1106 static int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
1107 {
1108 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
1109 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
1110 
1111 	spin_lock(&sdp->sd_statfs_spin);
1112 
1113 	*sc = *m_sc;
1114 	sc->sc_total += l_sc->sc_total;
1115 	sc->sc_free += l_sc->sc_free;
1116 	sc->sc_dinodes += l_sc->sc_dinodes;
1117 
1118 	spin_unlock(&sdp->sd_statfs_spin);
1119 
1120 	if (sc->sc_free < 0)
1121 		sc->sc_free = 0;
1122 	if (sc->sc_free > sc->sc_total)
1123 		sc->sc_free = sc->sc_total;
1124 	if (sc->sc_dinodes < 0)
1125 		sc->sc_dinodes = 0;
1126 
1127 	return 0;
1128 }
1129 
1130 /**
1131  * gfs2_statfs - Gather and return stats about the filesystem
1132  * @sb: The superblock
1133  * @statfsbuf: The buffer
1134  *
1135  * Returns: 0 on success or error code
1136  */
1137 
1138 static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
1139 {
1140 	struct super_block *sb = dentry->d_inode->i_sb;
1141 	struct gfs2_sbd *sdp = sb->s_fs_info;
1142 	struct gfs2_statfs_change_host sc;
1143 	int error;
1144 
1145 	error = gfs2_rindex_update(sdp);
1146 	if (error)
1147 		return error;
1148 
1149 	if (gfs2_tune_get(sdp, gt_statfs_slow))
1150 		error = gfs2_statfs_slow(sdp, &sc);
1151 	else
1152 		error = gfs2_statfs_i(sdp, &sc);
1153 
1154 	if (error)
1155 		return error;
1156 
1157 	buf->f_type = GFS2_MAGIC;
1158 	buf->f_bsize = sdp->sd_sb.sb_bsize;
1159 	buf->f_blocks = sc.sc_total;
1160 	buf->f_bfree = sc.sc_free;
1161 	buf->f_bavail = sc.sc_free;
1162 	buf->f_files = sc.sc_dinodes + sc.sc_free;
1163 	buf->f_ffree = sc.sc_free;
1164 	buf->f_namelen = GFS2_FNAMESIZE;
1165 
1166 	return 0;
1167 }
1168 
1169 /**
1170  * gfs2_remount_fs - called when the FS is remounted
1171  * @sb:  the filesystem
1172  * @flags:  the remount flags
1173  * @data:  extra data passed in (not used right now)
1174  *
1175  * Returns: errno
1176  */
1177 
1178 static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
1179 {
1180 	struct gfs2_sbd *sdp = sb->s_fs_info;
1181 	struct gfs2_args args = sdp->sd_args; /* Default to current settings */
1182 	struct gfs2_tune *gt = &sdp->sd_tune;
1183 	int error;
1184 
1185 	spin_lock(&gt->gt_spin);
1186 	args.ar_commit = gt->gt_logd_secs;
1187 	args.ar_quota_quantum = gt->gt_quota_quantum;
1188 	if (gt->gt_statfs_slow)
1189 		args.ar_statfs_quantum = 0;
1190 	else
1191 		args.ar_statfs_quantum = gt->gt_statfs_quantum;
1192 	spin_unlock(&gt->gt_spin);
1193 	error = gfs2_mount_args(&args, data);
1194 	if (error)
1195 		return error;
1196 
1197 	/* Not allowed to change locking details */
1198 	if (strcmp(args.ar_lockproto, sdp->sd_args.ar_lockproto) ||
1199 	    strcmp(args.ar_locktable, sdp->sd_args.ar_locktable) ||
1200 	    strcmp(args.ar_hostdata, sdp->sd_args.ar_hostdata))
1201 		return -EINVAL;
1202 
1203 	/* Some flags must not be changed */
1204 	if (args_neq(&args, &sdp->sd_args, spectator) ||
1205 	    args_neq(&args, &sdp->sd_args, localflocks) ||
1206 	    args_neq(&args, &sdp->sd_args, meta))
1207 		return -EINVAL;
1208 
1209 	if (sdp->sd_args.ar_spectator)
1210 		*flags |= MS_RDONLY;
1211 
1212 	if ((sb->s_flags ^ *flags) & MS_RDONLY) {
1213 		if (*flags & MS_RDONLY)
1214 			error = gfs2_make_fs_ro(sdp);
1215 		else
1216 			error = gfs2_make_fs_rw(sdp);
1217 		if (error)
1218 			return error;
1219 	}
1220 
1221 	sdp->sd_args = args;
1222 	if (sdp->sd_args.ar_posix_acl)
1223 		sb->s_flags |= MS_POSIXACL;
1224 	else
1225 		sb->s_flags &= ~MS_POSIXACL;
1226 	if (sdp->sd_args.ar_nobarrier)
1227 		set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
1228 	else
1229 		clear_bit(SDF_NOBARRIERS, &sdp->sd_flags);
1230 	spin_lock(&gt->gt_spin);
1231 	gt->gt_logd_secs = args.ar_commit;
1232 	gt->gt_quota_quantum = args.ar_quota_quantum;
1233 	if (args.ar_statfs_quantum) {
1234 		gt->gt_statfs_slow = 0;
1235 		gt->gt_statfs_quantum = args.ar_statfs_quantum;
1236 	}
1237 	else {
1238 		gt->gt_statfs_slow = 1;
1239 		gt->gt_statfs_quantum = 30;
1240 	}
1241 	spin_unlock(&gt->gt_spin);
1242 
1243 	gfs2_online_uevent(sdp);
1244 	return 0;
1245 }
1246 
1247 /**
1248  * gfs2_drop_inode - Drop an inode (test for remote unlink)
1249  * @inode: The inode to drop
1250  *
1251  * If we've received a callback on an iopen lock then its because a
1252  * remote node tried to deallocate the inode but failed due to this node
1253  * still having the inode open. Here we mark the link count zero
1254  * since we know that it must have reached zero if the GLF_DEMOTE flag
1255  * is set on the iopen glock. If we didn't do a disk read since the
1256  * remote node removed the final link then we might otherwise miss
1257  * this event. This check ensures that this node will deallocate the
1258  * inode's blocks, or alternatively pass the baton on to another
1259  * node for later deallocation.
1260  */
1261 
1262 static int gfs2_drop_inode(struct inode *inode)
1263 {
1264 	struct gfs2_inode *ip = GFS2_I(inode);
1265 
1266 	if (inode->i_nlink) {
1267 		struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1268 		if (gl && test_bit(GLF_DEMOTE, &gl->gl_flags))
1269 			clear_nlink(inode);
1270 	}
1271 	return generic_drop_inode(inode);
1272 }
1273 
1274 static int is_ancestor(const struct dentry *d1, const struct dentry *d2)
1275 {
1276 	do {
1277 		if (d1 == d2)
1278 			return 1;
1279 		d1 = d1->d_parent;
1280 	} while (!IS_ROOT(d1));
1281 	return 0;
1282 }
1283 
1284 /**
1285  * gfs2_show_options - Show mount options for /proc/mounts
1286  * @s: seq_file structure
1287  * @root: root of this (sub)tree
1288  *
1289  * Returns: 0 on success or error code
1290  */
1291 
1292 static int gfs2_show_options(struct seq_file *s, struct dentry *root)
1293 {
1294 	struct gfs2_sbd *sdp = root->d_sb->s_fs_info;
1295 	struct gfs2_args *args = &sdp->sd_args;
1296 	int val;
1297 
1298 	if (is_ancestor(root, sdp->sd_master_dir))
1299 		seq_printf(s, ",meta");
1300 	if (args->ar_lockproto[0])
1301 		seq_printf(s, ",lockproto=%s", args->ar_lockproto);
1302 	if (args->ar_locktable[0])
1303 		seq_printf(s, ",locktable=%s", args->ar_locktable);
1304 	if (args->ar_hostdata[0])
1305 		seq_printf(s, ",hostdata=%s", args->ar_hostdata);
1306 	if (args->ar_spectator)
1307 		seq_printf(s, ",spectator");
1308 	if (args->ar_localflocks)
1309 		seq_printf(s, ",localflocks");
1310 	if (args->ar_debug)
1311 		seq_printf(s, ",debug");
1312 	if (args->ar_posix_acl)
1313 		seq_printf(s, ",acl");
1314 	if (args->ar_quota != GFS2_QUOTA_DEFAULT) {
1315 		char *state;
1316 		switch (args->ar_quota) {
1317 		case GFS2_QUOTA_OFF:
1318 			state = "off";
1319 			break;
1320 		case GFS2_QUOTA_ACCOUNT:
1321 			state = "account";
1322 			break;
1323 		case GFS2_QUOTA_ON:
1324 			state = "on";
1325 			break;
1326 		default:
1327 			state = "unknown";
1328 			break;
1329 		}
1330 		seq_printf(s, ",quota=%s", state);
1331 	}
1332 	if (args->ar_suiddir)
1333 		seq_printf(s, ",suiddir");
1334 	if (args->ar_data != GFS2_DATA_DEFAULT) {
1335 		char *state;
1336 		switch (args->ar_data) {
1337 		case GFS2_DATA_WRITEBACK:
1338 			state = "writeback";
1339 			break;
1340 		case GFS2_DATA_ORDERED:
1341 			state = "ordered";
1342 			break;
1343 		default:
1344 			state = "unknown";
1345 			break;
1346 		}
1347 		seq_printf(s, ",data=%s", state);
1348 	}
1349 	if (args->ar_discard)
1350 		seq_printf(s, ",discard");
1351 	val = sdp->sd_tune.gt_logd_secs;
1352 	if (val != 30)
1353 		seq_printf(s, ",commit=%d", val);
1354 	val = sdp->sd_tune.gt_statfs_quantum;
1355 	if (val != 30)
1356 		seq_printf(s, ",statfs_quantum=%d", val);
1357 	val = sdp->sd_tune.gt_quota_quantum;
1358 	if (val != 60)
1359 		seq_printf(s, ",quota_quantum=%d", val);
1360 	if (args->ar_statfs_percent)
1361 		seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent);
1362 	if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
1363 		const char *state;
1364 
1365 		switch (args->ar_errors) {
1366 		case GFS2_ERRORS_WITHDRAW:
1367 			state = "withdraw";
1368 			break;
1369 		case GFS2_ERRORS_PANIC:
1370 			state = "panic";
1371 			break;
1372 		default:
1373 			state = "unknown";
1374 			break;
1375 		}
1376 		seq_printf(s, ",errors=%s", state);
1377 	}
1378 	if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
1379 		seq_printf(s, ",nobarrier");
1380 	if (test_bit(SDF_DEMOTE, &sdp->sd_flags))
1381 		seq_printf(s, ",demote_interface_used");
1382 	return 0;
1383 }
1384 
1385 static void gfs2_final_release_pages(struct gfs2_inode *ip)
1386 {
1387 	struct inode *inode = &ip->i_inode;
1388 	struct gfs2_glock *gl = ip->i_gl;
1389 
1390 	truncate_inode_pages(gfs2_glock2aspace(ip->i_gl), 0);
1391 	truncate_inode_pages(&inode->i_data, 0);
1392 
1393 	if (atomic_read(&gl->gl_revokes) == 0) {
1394 		clear_bit(GLF_LFLUSH, &gl->gl_flags);
1395 		clear_bit(GLF_DIRTY, &gl->gl_flags);
1396 	}
1397 }
1398 
1399 static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
1400 {
1401 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1402 	struct gfs2_qadata *qa;
1403 	struct gfs2_rgrpd *rgd;
1404 	struct gfs2_holder gh;
1405 	int error;
1406 
1407 	if (gfs2_get_inode_blocks(&ip->i_inode) != 1) {
1408 		gfs2_consist_inode(ip);
1409 		return -EIO;
1410 	}
1411 
1412 	qa = gfs2_qadata_get(ip);
1413 	if (!qa)
1414 		return -ENOMEM;
1415 
1416 	error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
1417 	if (error)
1418 		goto out;
1419 
1420 	rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1);
1421 	if (!rgd) {
1422 		gfs2_consist_inode(ip);
1423 		error = -EIO;
1424 		goto out_qs;
1425 	}
1426 
1427 	error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
1428 	if (error)
1429 		goto out_qs;
1430 
1431 	error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA,
1432 				 sdp->sd_jdesc->jd_blocks);
1433 	if (error)
1434 		goto out_rg_gunlock;
1435 
1436 	gfs2_free_di(rgd, ip);
1437 
1438 	gfs2_final_release_pages(ip);
1439 
1440 	gfs2_trans_end(sdp);
1441 
1442 out_rg_gunlock:
1443 	gfs2_glock_dq_uninit(&gh);
1444 out_qs:
1445 	gfs2_quota_unhold(ip);
1446 out:
1447 	gfs2_qadata_put(ip);
1448 	return error;
1449 }
1450 
1451 /**
1452  * gfs2_evict_inode - Remove an inode from cache
1453  * @inode: The inode to evict
1454  *
1455  * There are three cases to consider:
1456  * 1. i_nlink == 0, we are final opener (and must deallocate)
1457  * 2. i_nlink == 0, we are not the final opener (and cannot deallocate)
1458  * 3. i_nlink > 0
1459  *
1460  * If the fs is read only, then we have to treat all cases as per #3
1461  * since we are unable to do any deallocation. The inode will be
1462  * deallocated by the next read/write node to attempt an allocation
1463  * in the same resource group
1464  *
1465  * We have to (at the moment) hold the inodes main lock to cover
1466  * the gap between unlocking the shared lock on the iopen lock and
1467  * taking the exclusive lock. I'd rather do a shared -> exclusive
1468  * conversion on the iopen lock, but we can change that later. This
1469  * is safe, just less efficient.
1470  */
1471 
1472 static void gfs2_evict_inode(struct inode *inode)
1473 {
1474 	struct super_block *sb = inode->i_sb;
1475 	struct gfs2_sbd *sdp = sb->s_fs_info;
1476 	struct gfs2_inode *ip = GFS2_I(inode);
1477 	struct gfs2_holder gh;
1478 	int error;
1479 
1480 	if (inode->i_nlink || (sb->s_flags & MS_RDONLY))
1481 		goto out;
1482 
1483 	/* Must not read inode block until block type has been verified */
1484 	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, &gh);
1485 	if (unlikely(error)) {
1486 		gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1487 		goto out;
1488 	}
1489 
1490 	if (!test_bit(GIF_ALLOC_FAILED, &ip->i_flags)) {
1491 		error = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED);
1492 		if (error)
1493 			goto out_truncate;
1494 	}
1495 
1496 	if (test_bit(GIF_INVALID, &ip->i_flags)) {
1497 		error = gfs2_inode_refresh(ip);
1498 		if (error)
1499 			goto out_truncate;
1500 	}
1501 
1502 	ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1503 	gfs2_glock_dq_wait(&ip->i_iopen_gh);
1504 	gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &ip->i_iopen_gh);
1505 	error = gfs2_glock_nq(&ip->i_iopen_gh);
1506 	if (error)
1507 		goto out_truncate;
1508 
1509 	/* Case 1 starts here */
1510 
1511 	if (S_ISDIR(inode->i_mode) &&
1512 	    (ip->i_diskflags & GFS2_DIF_EXHASH)) {
1513 		error = gfs2_dir_exhash_dealloc(ip);
1514 		if (error)
1515 			goto out_unlock;
1516 	}
1517 
1518 	if (ip->i_eattr) {
1519 		error = gfs2_ea_dealloc(ip);
1520 		if (error)
1521 			goto out_unlock;
1522 	}
1523 
1524 	if (!gfs2_is_stuffed(ip)) {
1525 		error = gfs2_file_dealloc(ip);
1526 		if (error)
1527 			goto out_unlock;
1528 	}
1529 
1530 	error = gfs2_dinode_dealloc(ip);
1531 	goto out_unlock;
1532 
1533 out_truncate:
1534 	gfs2_log_flush(sdp, ip->i_gl);
1535 	write_inode_now(inode, 1);
1536 	gfs2_ail_flush(ip->i_gl, 0);
1537 
1538 	/* Case 2 starts here */
1539 	error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
1540 	if (error)
1541 		goto out_unlock;
1542 	/* Needs to be done before glock release & also in a transaction */
1543 	truncate_inode_pages(&inode->i_data, 0);
1544 	gfs2_trans_end(sdp);
1545 
1546 out_unlock:
1547 	/* Error path for case 1 */
1548 	if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags))
1549 		gfs2_glock_dq(&ip->i_iopen_gh);
1550 	gfs2_holder_uninit(&ip->i_iopen_gh);
1551 	gfs2_glock_dq_uninit(&gh);
1552 	if (error && error != GLR_TRYFAILED && error != -EROFS)
1553 		fs_warn(sdp, "gfs2_evict_inode: %d\n", error);
1554 out:
1555 	/* Case 3 starts here */
1556 	truncate_inode_pages(&inode->i_data, 0);
1557 	gfs2_rs_delete(ip);
1558 	clear_inode(inode);
1559 	gfs2_dir_hash_inval(ip);
1560 	ip->i_gl->gl_object = NULL;
1561 	flush_delayed_work_sync(&ip->i_gl->gl_work);
1562 	gfs2_glock_add_to_lru(ip->i_gl);
1563 	gfs2_glock_put(ip->i_gl);
1564 	ip->i_gl = NULL;
1565 	if (ip->i_iopen_gh.gh_gl) {
1566 		ip->i_iopen_gh.gh_gl->gl_object = NULL;
1567 		gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1568 	}
1569 }
1570 
1571 static struct inode *gfs2_alloc_inode(struct super_block *sb)
1572 {
1573 	struct gfs2_inode *ip;
1574 
1575 	ip = kmem_cache_alloc(gfs2_inode_cachep, GFP_KERNEL);
1576 	if (ip) {
1577 		ip->i_flags = 0;
1578 		ip->i_gl = NULL;
1579 		ip->i_rgd = NULL;
1580 		ip->i_res = NULL;
1581 	}
1582 	return &ip->i_inode;
1583 }
1584 
1585 static void gfs2_i_callback(struct rcu_head *head)
1586 {
1587 	struct inode *inode = container_of(head, struct inode, i_rcu);
1588 	kmem_cache_free(gfs2_inode_cachep, inode);
1589 }
1590 
1591 static void gfs2_destroy_inode(struct inode *inode)
1592 {
1593 	call_rcu(&inode->i_rcu, gfs2_i_callback);
1594 }
1595 
1596 const struct super_operations gfs2_super_ops = {
1597 	.alloc_inode		= gfs2_alloc_inode,
1598 	.destroy_inode		= gfs2_destroy_inode,
1599 	.write_inode		= gfs2_write_inode,
1600 	.dirty_inode		= gfs2_dirty_inode,
1601 	.evict_inode		= gfs2_evict_inode,
1602 	.put_super		= gfs2_put_super,
1603 	.sync_fs		= gfs2_sync_fs,
1604 	.freeze_fs 		= gfs2_freeze,
1605 	.unfreeze_fs		= gfs2_unfreeze,
1606 	.statfs			= gfs2_statfs,
1607 	.remount_fs		= gfs2_remount_fs,
1608 	.drop_inode		= gfs2_drop_inode,
1609 	.show_options		= gfs2_show_options,
1610 };
1611 
1612