xref: /openbmc/linux/fs/gfs2/super.c (revision 2be6bc48df59c99d35aab16a51d4a814e9bb8c35)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
5  */
6 
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 
9 #include <linux/bio.h>
10 #include <linux/sched/signal.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/statfs.h>
16 #include <linux/seq_file.h>
17 #include <linux/mount.h>
18 #include <linux/kthread.h>
19 #include <linux/delay.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/crc32.h>
22 #include <linux/time.h>
23 #include <linux/wait.h>
24 #include <linux/writeback.h>
25 #include <linux/backing-dev.h>
26 #include <linux/kernel.h>
27 
28 #include "gfs2.h"
29 #include "incore.h"
30 #include "bmap.h"
31 #include "dir.h"
32 #include "glock.h"
33 #include "glops.h"
34 #include "inode.h"
35 #include "log.h"
36 #include "meta_io.h"
37 #include "quota.h"
38 #include "recovery.h"
39 #include "rgrp.h"
40 #include "super.h"
41 #include "trans.h"
42 #include "util.h"
43 #include "sys.h"
44 #include "xattr.h"
45 #include "lops.h"
46 
47 enum dinode_demise {
48 	SHOULD_DELETE_DINODE,
49 	SHOULD_NOT_DELETE_DINODE,
50 	SHOULD_DEFER_EVICTION,
51 };
52 
53 /**
54  * gfs2_jindex_free - Clear all the journal index information
55  * @sdp: The GFS2 superblock
56  *
57  */
58 
59 void gfs2_jindex_free(struct gfs2_sbd *sdp)
60 {
61 	struct list_head list;
62 	struct gfs2_jdesc *jd;
63 
64 	spin_lock(&sdp->sd_jindex_spin);
65 	list_add(&list, &sdp->sd_jindex_list);
66 	list_del_init(&sdp->sd_jindex_list);
67 	sdp->sd_journals = 0;
68 	spin_unlock(&sdp->sd_jindex_spin);
69 
70 	sdp->sd_jdesc = NULL;
71 	while (!list_empty(&list)) {
72 		jd = list_first_entry(&list, struct gfs2_jdesc, jd_list);
73 		gfs2_free_journal_extents(jd);
74 		list_del(&jd->jd_list);
75 		iput(jd->jd_inode);
76 		jd->jd_inode = NULL;
77 		kfree(jd);
78 	}
79 }
80 
81 static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid)
82 {
83 	struct gfs2_jdesc *jd;
84 
85 	list_for_each_entry(jd, head, jd_list) {
86 		if (jd->jd_jid == jid)
87 			return jd;
88 	}
89 	return NULL;
90 }
91 
92 struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid)
93 {
94 	struct gfs2_jdesc *jd;
95 
96 	spin_lock(&sdp->sd_jindex_spin);
97 	jd = jdesc_find_i(&sdp->sd_jindex_list, jid);
98 	spin_unlock(&sdp->sd_jindex_spin);
99 
100 	return jd;
101 }
102 
103 int gfs2_jdesc_check(struct gfs2_jdesc *jd)
104 {
105 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
106 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
107 	u64 size = i_size_read(jd->jd_inode);
108 
109 	if (gfs2_check_internal_file_size(jd->jd_inode, 8 << 20, BIT(30)))
110 		return -EIO;
111 
112 	jd->jd_blocks = size >> sdp->sd_sb.sb_bsize_shift;
113 
114 	if (gfs2_write_alloc_required(ip, 0, size)) {
115 		gfs2_consist_inode(ip);
116 		return -EIO;
117 	}
118 
119 	return 0;
120 }
121 
122 /**
123  * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
124  * @sdp: the filesystem
125  *
126  * Returns: errno
127  */
128 
129 int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
130 {
131 	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
132 	struct gfs2_glock *j_gl = ip->i_gl;
133 	struct gfs2_log_header_host head;
134 	int error;
135 
136 	j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
137 	if (gfs2_withdrawn(sdp))
138 		return -EIO;
139 
140 	error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
141 	if (error) {
142 		gfs2_consist(sdp);
143 		return error;
144 	}
145 
146 	if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
147 		gfs2_consist(sdp);
148 		return -EIO;
149 	}
150 
151 	/*  Initialize some head of the log stuff  */
152 	sdp->sd_log_sequence = head.lh_sequence + 1;
153 	gfs2_log_pointers_init(sdp, head.lh_blkno);
154 
155 	error = gfs2_quota_init(sdp);
156 	if (!error && gfs2_withdrawn(sdp))
157 		error = -EIO;
158 	if (!error)
159 		set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
160 	return error;
161 }
162 
163 void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf)
164 {
165 	const struct gfs2_statfs_change *str = buf;
166 
167 	sc->sc_total = be64_to_cpu(str->sc_total);
168 	sc->sc_free = be64_to_cpu(str->sc_free);
169 	sc->sc_dinodes = be64_to_cpu(str->sc_dinodes);
170 }
171 
172 void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf)
173 {
174 	struct gfs2_statfs_change *str = buf;
175 
176 	str->sc_total = cpu_to_be64(sc->sc_total);
177 	str->sc_free = cpu_to_be64(sc->sc_free);
178 	str->sc_dinodes = cpu_to_be64(sc->sc_dinodes);
179 }
180 
181 int gfs2_statfs_init(struct gfs2_sbd *sdp)
182 {
183 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
184 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
185 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
186 	struct buffer_head *m_bh;
187 	struct gfs2_holder gh;
188 	int error;
189 
190 	error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
191 				   &gh);
192 	if (error)
193 		return error;
194 
195 	error = gfs2_meta_inode_buffer(m_ip, &m_bh);
196 	if (error)
197 		goto out;
198 
199 	if (sdp->sd_args.ar_spectator) {
200 		spin_lock(&sdp->sd_statfs_spin);
201 		gfs2_statfs_change_in(m_sc, m_bh->b_data +
202 				      sizeof(struct gfs2_dinode));
203 		spin_unlock(&sdp->sd_statfs_spin);
204 	} else {
205 		spin_lock(&sdp->sd_statfs_spin);
206 		gfs2_statfs_change_in(m_sc, m_bh->b_data +
207 				      sizeof(struct gfs2_dinode));
208 		gfs2_statfs_change_in(l_sc, sdp->sd_sc_bh->b_data +
209 				      sizeof(struct gfs2_dinode));
210 		spin_unlock(&sdp->sd_statfs_spin);
211 
212 	}
213 
214 	brelse(m_bh);
215 out:
216 	gfs2_glock_dq_uninit(&gh);
217 	return 0;
218 }
219 
220 void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
221 			s64 dinodes)
222 {
223 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
224 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
225 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
226 	s64 x, y;
227 	int need_sync = 0;
228 
229 	gfs2_trans_add_meta(l_ip->i_gl, sdp->sd_sc_bh);
230 
231 	spin_lock(&sdp->sd_statfs_spin);
232 	l_sc->sc_total += total;
233 	l_sc->sc_free += free;
234 	l_sc->sc_dinodes += dinodes;
235 	gfs2_statfs_change_out(l_sc, sdp->sd_sc_bh->b_data +
236 			       sizeof(struct gfs2_dinode));
237 	if (sdp->sd_args.ar_statfs_percent) {
238 		x = 100 * l_sc->sc_free;
239 		y = m_sc->sc_free * sdp->sd_args.ar_statfs_percent;
240 		if (x >= y || x <= -y)
241 			need_sync = 1;
242 	}
243 	spin_unlock(&sdp->sd_statfs_spin);
244 
245 	if (need_sync)
246 		gfs2_wake_up_statfs(sdp);
247 }
248 
249 void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh)
250 {
251 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
252 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
253 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
254 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
255 
256 	gfs2_trans_add_meta(l_ip->i_gl, sdp->sd_sc_bh);
257 	gfs2_trans_add_meta(m_ip->i_gl, m_bh);
258 
259 	spin_lock(&sdp->sd_statfs_spin);
260 	m_sc->sc_total += l_sc->sc_total;
261 	m_sc->sc_free += l_sc->sc_free;
262 	m_sc->sc_dinodes += l_sc->sc_dinodes;
263 	memset(l_sc, 0, sizeof(struct gfs2_statfs_change));
264 	memset(sdp->sd_sc_bh->b_data + sizeof(struct gfs2_dinode),
265 	       0, sizeof(struct gfs2_statfs_change));
266 	gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
267 	spin_unlock(&sdp->sd_statfs_spin);
268 }
269 
270 int gfs2_statfs_sync(struct super_block *sb, int type)
271 {
272 	struct gfs2_sbd *sdp = sb->s_fs_info;
273 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
274 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
275 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
276 	struct gfs2_holder gh;
277 	struct buffer_head *m_bh;
278 	int error;
279 
280 	error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
281 				   &gh);
282 	if (error)
283 		goto out;
284 
285 	error = gfs2_meta_inode_buffer(m_ip, &m_bh);
286 	if (error)
287 		goto out_unlock;
288 
289 	spin_lock(&sdp->sd_statfs_spin);
290 	gfs2_statfs_change_in(m_sc, m_bh->b_data +
291 			      sizeof(struct gfs2_dinode));
292 	if (!l_sc->sc_total && !l_sc->sc_free && !l_sc->sc_dinodes) {
293 		spin_unlock(&sdp->sd_statfs_spin);
294 		goto out_bh;
295 	}
296 	spin_unlock(&sdp->sd_statfs_spin);
297 
298 	error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
299 	if (error)
300 		goto out_bh;
301 
302 	update_statfs(sdp, m_bh);
303 	sdp->sd_statfs_force_sync = 0;
304 
305 	gfs2_trans_end(sdp);
306 
307 out_bh:
308 	brelse(m_bh);
309 out_unlock:
310 	gfs2_glock_dq_uninit(&gh);
311 out:
312 	return error;
313 }
314 
315 struct lfcc {
316 	struct list_head list;
317 	struct gfs2_holder gh;
318 };
319 
320 /**
321  * gfs2_lock_fs_check_clean - Stop all writes to the FS and check that all
322  *                            journals are clean
323  * @sdp: the file system
324  *
325  * Returns: errno
326  */
327 
328 static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
329 {
330 	struct gfs2_inode *ip;
331 	struct gfs2_jdesc *jd;
332 	struct lfcc *lfcc;
333 	LIST_HEAD(list);
334 	struct gfs2_log_header_host lh;
335 	int error, error2;
336 
337 	/*
338 	 * Grab all the journal glocks in SH mode.  We are *probably* doing
339 	 * that to prevent recovery.
340 	 */
341 
342 	list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
343 		lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL);
344 		if (!lfcc) {
345 			error = -ENOMEM;
346 			goto out;
347 		}
348 		ip = GFS2_I(jd->jd_inode);
349 		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &lfcc->gh);
350 		if (error) {
351 			kfree(lfcc);
352 			goto out;
353 		}
354 		list_add(&lfcc->list, &list);
355 	}
356 
357 	gfs2_freeze_unlock(&sdp->sd_freeze_gh);
358 
359 	error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE,
360 				   LM_FLAG_NOEXP | GL_NOPID,
361 				   &sdp->sd_freeze_gh);
362 	if (error)
363 		goto relock_shared;
364 
365 	list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
366 		error = gfs2_jdesc_check(jd);
367 		if (error)
368 			break;
369 		error = gfs2_find_jhead(jd, &lh, false);
370 		if (error)
371 			break;
372 		if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
373 			error = -EBUSY;
374 			break;
375 		}
376 	}
377 
378 	if (!error)
379 		goto out;  /* success */
380 
381 	gfs2_freeze_unlock(&sdp->sd_freeze_gh);
382 
383 relock_shared:
384 	error2 = gfs2_freeze_lock_shared(sdp);
385 	gfs2_assert_withdraw(sdp, !error2);
386 
387 out:
388 	while (!list_empty(&list)) {
389 		lfcc = list_first_entry(&list, struct lfcc, list);
390 		list_del(&lfcc->list);
391 		gfs2_glock_dq_uninit(&lfcc->gh);
392 		kfree(lfcc);
393 	}
394 	return error;
395 }
396 
397 void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
398 {
399 	const struct inode *inode = &ip->i_inode;
400 	struct gfs2_dinode *str = buf;
401 
402 	str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
403 	str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
404 	str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
405 	str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
406 	str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
407 	str->di_mode = cpu_to_be32(inode->i_mode);
408 	str->di_uid = cpu_to_be32(i_uid_read(inode));
409 	str->di_gid = cpu_to_be32(i_gid_read(inode));
410 	str->di_nlink = cpu_to_be32(inode->i_nlink);
411 	str->di_size = cpu_to_be64(i_size_read(inode));
412 	str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(inode));
413 	str->di_atime = cpu_to_be64(inode->i_atime.tv_sec);
414 	str->di_mtime = cpu_to_be64(inode->i_mtime.tv_sec);
415 	str->di_ctime = cpu_to_be64(inode_get_ctime(inode).tv_sec);
416 
417 	str->di_goal_meta = cpu_to_be64(ip->i_goal);
418 	str->di_goal_data = cpu_to_be64(ip->i_goal);
419 	str->di_generation = cpu_to_be64(ip->i_generation);
420 
421 	str->di_flags = cpu_to_be32(ip->i_diskflags);
422 	str->di_height = cpu_to_be16(ip->i_height);
423 	str->di_payload_format = cpu_to_be32(S_ISDIR(inode->i_mode) &&
424 					     !(ip->i_diskflags & GFS2_DIF_EXHASH) ?
425 					     GFS2_FORMAT_DE : 0);
426 	str->di_depth = cpu_to_be16(ip->i_depth);
427 	str->di_entries = cpu_to_be32(ip->i_entries);
428 
429 	str->di_eattr = cpu_to_be64(ip->i_eattr);
430 	str->di_atime_nsec = cpu_to_be32(inode->i_atime.tv_nsec);
431 	str->di_mtime_nsec = cpu_to_be32(inode->i_mtime.tv_nsec);
432 	str->di_ctime_nsec = cpu_to_be32(inode_get_ctime(inode).tv_nsec);
433 }
434 
435 /**
436  * gfs2_write_inode - Make sure the inode is stable on the disk
437  * @inode: The inode
438  * @wbc: The writeback control structure
439  *
440  * Returns: errno
441  */
442 
443 static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
444 {
445 	struct gfs2_inode *ip = GFS2_I(inode);
446 	struct gfs2_sbd *sdp = GFS2_SB(inode);
447 	struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl);
448 	struct backing_dev_info *bdi = inode_to_bdi(metamapping->host);
449 	int ret = 0;
450 	bool flush_all = (wbc->sync_mode == WB_SYNC_ALL || gfs2_is_jdata(ip));
451 
452 	if (flush_all)
453 		gfs2_log_flush(GFS2_SB(inode), ip->i_gl,
454 			       GFS2_LOG_HEAD_FLUSH_NORMAL |
455 			       GFS2_LFC_WRITE_INODE);
456 	if (bdi->wb.dirty_exceeded)
457 		gfs2_ail1_flush(sdp, wbc);
458 	else
459 		filemap_fdatawrite(metamapping);
460 	if (flush_all)
461 		ret = filemap_fdatawait(metamapping);
462 	if (ret)
463 		mark_inode_dirty_sync(inode);
464 	else {
465 		spin_lock(&inode->i_lock);
466 		if (!(inode->i_flags & I_DIRTY))
467 			gfs2_ordered_del_inode(ip);
468 		spin_unlock(&inode->i_lock);
469 	}
470 	return ret;
471 }
472 
473 /**
474  * gfs2_dirty_inode - check for atime updates
475  * @inode: The inode in question
476  * @flags: The type of dirty
477  *
478  * Unfortunately it can be called under any combination of inode
479  * glock and freeze glock, so we have to check carefully.
480  *
481  * At the moment this deals only with atime - it should be possible
482  * to expand that role in future, once a review of the locking has
483  * been carried out.
484  */
485 
486 static void gfs2_dirty_inode(struct inode *inode, int flags)
487 {
488 	struct gfs2_inode *ip = GFS2_I(inode);
489 	struct gfs2_sbd *sdp = GFS2_SB(inode);
490 	struct buffer_head *bh;
491 	struct gfs2_holder gh;
492 	int need_unlock = 0;
493 	int need_endtrans = 0;
494 	int ret;
495 
496 	if (unlikely(!ip->i_gl)) {
497 		/* This can only happen during incomplete inode creation. */
498 		BUG_ON(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags));
499 		return;
500 	}
501 
502 	if (unlikely(gfs2_withdrawn(sdp)))
503 		return;
504 	if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
505 		ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
506 		if (ret) {
507 			fs_err(sdp, "dirty_inode: glock %d\n", ret);
508 			gfs2_dump_glock(NULL, ip->i_gl, true);
509 			return;
510 		}
511 		need_unlock = 1;
512 	} else if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE))
513 		return;
514 
515 	if (current->journal_info == NULL) {
516 		ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
517 		if (ret) {
518 			fs_err(sdp, "dirty_inode: gfs2_trans_begin %d\n", ret);
519 			goto out;
520 		}
521 		need_endtrans = 1;
522 	}
523 
524 	ret = gfs2_meta_inode_buffer(ip, &bh);
525 	if (ret == 0) {
526 		gfs2_trans_add_meta(ip->i_gl, bh);
527 		gfs2_dinode_out(ip, bh->b_data);
528 		brelse(bh);
529 	}
530 
531 	if (need_endtrans)
532 		gfs2_trans_end(sdp);
533 out:
534 	if (need_unlock)
535 		gfs2_glock_dq_uninit(&gh);
536 }
537 
538 /**
539  * gfs2_make_fs_ro - Turn a Read-Write FS into a Read-Only one
540  * @sdp: the filesystem
541  *
542  * Returns: errno
543  */
544 
545 void gfs2_make_fs_ro(struct gfs2_sbd *sdp)
546 {
547 	int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
548 
549 	if (!test_bit(SDF_DEACTIVATING, &sdp->sd_flags))
550 		gfs2_flush_delete_work(sdp);
551 
552 	if (!log_write_allowed && current == sdp->sd_quotad_process)
553 		fs_warn(sdp, "The quotad daemon is withdrawing.\n");
554 	else if (sdp->sd_quotad_process)
555 		kthread_stop(sdp->sd_quotad_process);
556 	sdp->sd_quotad_process = NULL;
557 
558 	if (!log_write_allowed && current == sdp->sd_logd_process)
559 		fs_warn(sdp, "The logd daemon is withdrawing.\n");
560 	else if (sdp->sd_logd_process)
561 		kthread_stop(sdp->sd_logd_process);
562 	sdp->sd_logd_process = NULL;
563 
564 	if (log_write_allowed) {
565 		gfs2_quota_sync(sdp->sd_vfs, 0);
566 		gfs2_statfs_sync(sdp->sd_vfs, 0);
567 
568 		/* We do two log flushes here. The first one commits dirty inodes
569 		 * and rgrps to the journal, but queues up revokes to the ail list.
570 		 * The second flush writes out and removes the revokes.
571 		 *
572 		 * The first must be done before the FLUSH_SHUTDOWN code
573 		 * clears the LIVE flag, otherwise it will not be able to start
574 		 * a transaction to write its revokes, and the error will cause
575 		 * a withdraw of the file system. */
576 		gfs2_log_flush(sdp, NULL, GFS2_LFC_MAKE_FS_RO);
577 		gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
578 			       GFS2_LFC_MAKE_FS_RO);
579 		wait_event_timeout(sdp->sd_log_waitq,
580 				   gfs2_log_is_empty(sdp),
581 				   HZ * 5);
582 		gfs2_assert_warn(sdp, gfs2_log_is_empty(sdp));
583 	} else {
584 		wait_event_timeout(sdp->sd_log_waitq,
585 				   gfs2_log_is_empty(sdp),
586 				   HZ * 5);
587 	}
588 	gfs2_quota_cleanup(sdp);
589 
590 	if (!log_write_allowed)
591 		sdp->sd_vfs->s_flags |= SB_RDONLY;
592 }
593 
594 /**
595  * gfs2_put_super - Unmount the filesystem
596  * @sb: The VFS superblock
597  *
598  */
599 
600 static void gfs2_put_super(struct super_block *sb)
601 {
602 	struct gfs2_sbd *sdp = sb->s_fs_info;
603 	struct gfs2_jdesc *jd;
604 
605 	/* No more recovery requests */
606 	set_bit(SDF_NORECOVERY, &sdp->sd_flags);
607 	smp_mb();
608 
609 	/* Wait on outstanding recovery */
610 restart:
611 	spin_lock(&sdp->sd_jindex_spin);
612 	list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
613 		if (!test_bit(JDF_RECOVERY, &jd->jd_flags))
614 			continue;
615 		spin_unlock(&sdp->sd_jindex_spin);
616 		wait_on_bit(&jd->jd_flags, JDF_RECOVERY,
617 			    TASK_UNINTERRUPTIBLE);
618 		goto restart;
619 	}
620 	spin_unlock(&sdp->sd_jindex_spin);
621 
622 	if (!sb_rdonly(sb)) {
623 		gfs2_make_fs_ro(sdp);
624 	}
625 	WARN_ON(gfs2_withdrawing(sdp));
626 
627 	/*  At this point, we're through modifying the disk  */
628 
629 	/*  Release stuff  */
630 
631 	gfs2_freeze_unlock(&sdp->sd_freeze_gh);
632 
633 	iput(sdp->sd_jindex);
634 	iput(sdp->sd_statfs_inode);
635 	iput(sdp->sd_rindex);
636 	iput(sdp->sd_quota_inode);
637 
638 	gfs2_glock_put(sdp->sd_rename_gl);
639 	gfs2_glock_put(sdp->sd_freeze_gl);
640 
641 	if (!sdp->sd_args.ar_spectator) {
642 		if (gfs2_holder_initialized(&sdp->sd_journal_gh))
643 			gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
644 		if (gfs2_holder_initialized(&sdp->sd_jinode_gh))
645 			gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
646 		brelse(sdp->sd_sc_bh);
647 		gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
648 		gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
649 		free_local_statfs_inodes(sdp);
650 		iput(sdp->sd_qc_inode);
651 	}
652 
653 	gfs2_glock_dq_uninit(&sdp->sd_live_gh);
654 	gfs2_clear_rgrpd(sdp);
655 	gfs2_jindex_free(sdp);
656 	/*  Take apart glock structures and buffer lists  */
657 	gfs2_gl_hash_clear(sdp);
658 	truncate_inode_pages_final(&sdp->sd_aspace);
659 	gfs2_delete_debugfs_file(sdp);
660 	/*  Unmount the locking protocol  */
661 	gfs2_lm_unmount(sdp);
662 
663 	/*  At this point, we're through participating in the lockspace  */
664 	gfs2_sys_fs_del(sdp);
665 	free_sbd(sdp);
666 }
667 
668 /**
669  * gfs2_sync_fs - sync the filesystem
670  * @sb: the superblock
671  * @wait: true to wait for completion
672  *
673  * Flushes the log to disk.
674  */
675 
676 static int gfs2_sync_fs(struct super_block *sb, int wait)
677 {
678 	struct gfs2_sbd *sdp = sb->s_fs_info;
679 
680 	gfs2_quota_sync(sb, -1);
681 	if (wait)
682 		gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
683 			       GFS2_LFC_SYNC_FS);
684 	return sdp->sd_log_error;
685 }
686 
687 static int gfs2_freeze_locally(struct gfs2_sbd *sdp)
688 {
689 	struct super_block *sb = sdp->sd_vfs;
690 	int error;
691 
692 	error = freeze_super(sb, FREEZE_HOLDER_USERSPACE);
693 	if (error)
694 		return error;
695 
696 	if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
697 		gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
698 			       GFS2_LFC_FREEZE_GO_SYNC);
699 		if (gfs2_withdrawn(sdp)) {
700 			error = thaw_super(sb, FREEZE_HOLDER_USERSPACE);
701 			if (error)
702 				return error;
703 			return -EIO;
704 		}
705 	}
706 	return 0;
707 }
708 
709 static int gfs2_do_thaw(struct gfs2_sbd *sdp)
710 {
711 	struct super_block *sb = sdp->sd_vfs;
712 	int error;
713 
714 	error = gfs2_freeze_lock_shared(sdp);
715 	if (error)
716 		goto fail;
717 	error = thaw_super(sb, FREEZE_HOLDER_USERSPACE);
718 	if (!error)
719 		return 0;
720 
721 fail:
722 	fs_info(sdp, "GFS2: couldn't thaw filesystem: %d\n", error);
723 	gfs2_assert_withdraw(sdp, 0);
724 	return error;
725 }
726 
727 void gfs2_freeze_func(struct work_struct *work)
728 {
729 	struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_freeze_work);
730 	struct super_block *sb = sdp->sd_vfs;
731 	int error;
732 
733 	mutex_lock(&sdp->sd_freeze_mutex);
734 	error = -EBUSY;
735 	if (test_bit(SDF_FROZEN, &sdp->sd_flags))
736 		goto freeze_failed;
737 
738 	error = gfs2_freeze_locally(sdp);
739 	if (error)
740 		goto freeze_failed;
741 
742 	gfs2_freeze_unlock(&sdp->sd_freeze_gh);
743 	set_bit(SDF_FROZEN, &sdp->sd_flags);
744 
745 	error = gfs2_do_thaw(sdp);
746 	if (error)
747 		goto out;
748 
749 	clear_bit(SDF_FROZEN, &sdp->sd_flags);
750 	goto out;
751 
752 freeze_failed:
753 	fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n", error);
754 
755 out:
756 	mutex_unlock(&sdp->sd_freeze_mutex);
757 	deactivate_super(sb);
758 }
759 
760 /**
761  * gfs2_freeze_super - prevent further writes to the filesystem
762  * @sb: the VFS structure for the filesystem
763  *
764  */
765 
766 static int gfs2_freeze_super(struct super_block *sb, enum freeze_holder who)
767 {
768 	struct gfs2_sbd *sdp = sb->s_fs_info;
769 	int error;
770 
771 	if (!mutex_trylock(&sdp->sd_freeze_mutex))
772 		return -EBUSY;
773 	error = -EBUSY;
774 	if (test_bit(SDF_FROZEN, &sdp->sd_flags))
775 		goto out;
776 
777 	for (;;) {
778 		error = gfs2_freeze_locally(sdp);
779 		if (error) {
780 			fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
781 				error);
782 			goto out;
783 		}
784 
785 		error = gfs2_lock_fs_check_clean(sdp);
786 		if (!error)
787 			break;  /* success */
788 
789 		error = gfs2_do_thaw(sdp);
790 		if (error)
791 			goto out;
792 
793 		if (error == -EBUSY)
794 			fs_err(sdp, "waiting for recovery before freeze\n");
795 		else if (error == -EIO) {
796 			fs_err(sdp, "Fatal IO error: cannot freeze gfs2 due "
797 			       "to recovery error.\n");
798 			goto out;
799 		} else {
800 			fs_err(sdp, "error freezing FS: %d\n", error);
801 		}
802 		fs_err(sdp, "retrying...\n");
803 		msleep(1000);
804 	}
805 
806 out:
807 	if (!error) {
808 		set_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags);
809 		set_bit(SDF_FROZEN, &sdp->sd_flags);
810 	}
811 	mutex_unlock(&sdp->sd_freeze_mutex);
812 	return error;
813 }
814 
815 /**
816  * gfs2_thaw_super - reallow writes to the filesystem
817  * @sb: the VFS structure for the filesystem
818  *
819  */
820 
821 static int gfs2_thaw_super(struct super_block *sb, enum freeze_holder who)
822 {
823 	struct gfs2_sbd *sdp = sb->s_fs_info;
824 	int error;
825 
826 	if (!mutex_trylock(&sdp->sd_freeze_mutex))
827 		return -EBUSY;
828 	error = -EINVAL;
829 	if (!test_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags))
830 		goto out;
831 
832 	gfs2_freeze_unlock(&sdp->sd_freeze_gh);
833 
834 	error = gfs2_do_thaw(sdp);
835 
836 	if (!error) {
837 		clear_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags);
838 		clear_bit(SDF_FROZEN, &sdp->sd_flags);
839 	}
840 out:
841 	mutex_unlock(&sdp->sd_freeze_mutex);
842 	return error;
843 }
844 
845 void gfs2_thaw_freeze_initiator(struct super_block *sb)
846 {
847 	struct gfs2_sbd *sdp = sb->s_fs_info;
848 
849 	mutex_lock(&sdp->sd_freeze_mutex);
850 	if (!test_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags))
851 		goto out;
852 
853 	gfs2_freeze_unlock(&sdp->sd_freeze_gh);
854 
855 out:
856 	mutex_unlock(&sdp->sd_freeze_mutex);
857 }
858 
859 /**
860  * statfs_slow_fill - fill in the sg for a given RG
861  * @rgd: the RG
862  * @sc: the sc structure
863  *
864  * Returns: 0 on success, -ESTALE if the LVB is invalid
865  */
866 
867 static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
868 			    struct gfs2_statfs_change_host *sc)
869 {
870 	gfs2_rgrp_verify(rgd);
871 	sc->sc_total += rgd->rd_data;
872 	sc->sc_free += rgd->rd_free;
873 	sc->sc_dinodes += rgd->rd_dinodes;
874 	return 0;
875 }
876 
877 /**
878  * gfs2_statfs_slow - Stat a filesystem using asynchronous locking
879  * @sdp: the filesystem
880  * @sc: the sc info that will be returned
881  *
882  * Any error (other than a signal) will cause this routine to fall back
883  * to the synchronous version.
884  *
885  * FIXME: This really shouldn't busy wait like this.
886  *
887  * Returns: errno
888  */
889 
890 static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
891 {
892 	struct gfs2_rgrpd *rgd_next;
893 	struct gfs2_holder *gha, *gh;
894 	unsigned int slots = 64;
895 	unsigned int x;
896 	int done;
897 	int error = 0, err;
898 
899 	memset(sc, 0, sizeof(struct gfs2_statfs_change_host));
900 	gha = kmalloc_array(slots, sizeof(struct gfs2_holder), GFP_KERNEL);
901 	if (!gha)
902 		return -ENOMEM;
903 	for (x = 0; x < slots; x++)
904 		gfs2_holder_mark_uninitialized(gha + x);
905 
906 	rgd_next = gfs2_rgrpd_get_first(sdp);
907 
908 	for (;;) {
909 		done = 1;
910 
911 		for (x = 0; x < slots; x++) {
912 			gh = gha + x;
913 
914 			if (gfs2_holder_initialized(gh) && gfs2_glock_poll(gh)) {
915 				err = gfs2_glock_wait(gh);
916 				if (err) {
917 					gfs2_holder_uninit(gh);
918 					error = err;
919 				} else {
920 					if (!error) {
921 						struct gfs2_rgrpd *rgd =
922 							gfs2_glock2rgrp(gh->gh_gl);
923 
924 						error = statfs_slow_fill(rgd, sc);
925 					}
926 					gfs2_glock_dq_uninit(gh);
927 				}
928 			}
929 
930 			if (gfs2_holder_initialized(gh))
931 				done = 0;
932 			else if (rgd_next && !error) {
933 				error = gfs2_glock_nq_init(rgd_next->rd_gl,
934 							   LM_ST_SHARED,
935 							   GL_ASYNC,
936 							   gh);
937 				rgd_next = gfs2_rgrpd_get_next(rgd_next);
938 				done = 0;
939 			}
940 
941 			if (signal_pending(current))
942 				error = -ERESTARTSYS;
943 		}
944 
945 		if (done)
946 			break;
947 
948 		yield();
949 	}
950 
951 	kfree(gha);
952 	return error;
953 }
954 
955 /**
956  * gfs2_statfs_i - Do a statfs
957  * @sdp: the filesystem
958  * @sc: the sc structure
959  *
960  * Returns: errno
961  */
962 
963 static int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
964 {
965 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
966 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
967 
968 	spin_lock(&sdp->sd_statfs_spin);
969 
970 	*sc = *m_sc;
971 	sc->sc_total += l_sc->sc_total;
972 	sc->sc_free += l_sc->sc_free;
973 	sc->sc_dinodes += l_sc->sc_dinodes;
974 
975 	spin_unlock(&sdp->sd_statfs_spin);
976 
977 	if (sc->sc_free < 0)
978 		sc->sc_free = 0;
979 	if (sc->sc_free > sc->sc_total)
980 		sc->sc_free = sc->sc_total;
981 	if (sc->sc_dinodes < 0)
982 		sc->sc_dinodes = 0;
983 
984 	return 0;
985 }
986 
987 /**
988  * gfs2_statfs - Gather and return stats about the filesystem
989  * @dentry: The name of the link
990  * @buf: The buffer
991  *
992  * Returns: 0 on success or error code
993  */
994 
995 static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
996 {
997 	struct super_block *sb = dentry->d_sb;
998 	struct gfs2_sbd *sdp = sb->s_fs_info;
999 	struct gfs2_statfs_change_host sc;
1000 	int error;
1001 
1002 	error = gfs2_rindex_update(sdp);
1003 	if (error)
1004 		return error;
1005 
1006 	if (gfs2_tune_get(sdp, gt_statfs_slow))
1007 		error = gfs2_statfs_slow(sdp, &sc);
1008 	else
1009 		error = gfs2_statfs_i(sdp, &sc);
1010 
1011 	if (error)
1012 		return error;
1013 
1014 	buf->f_type = GFS2_MAGIC;
1015 	buf->f_bsize = sdp->sd_sb.sb_bsize;
1016 	buf->f_blocks = sc.sc_total;
1017 	buf->f_bfree = sc.sc_free;
1018 	buf->f_bavail = sc.sc_free;
1019 	buf->f_files = sc.sc_dinodes + sc.sc_free;
1020 	buf->f_ffree = sc.sc_free;
1021 	buf->f_namelen = GFS2_FNAMESIZE;
1022 
1023 	return 0;
1024 }
1025 
1026 /**
1027  * gfs2_drop_inode - Drop an inode (test for remote unlink)
1028  * @inode: The inode to drop
1029  *
1030  * If we've received a callback on an iopen lock then it's because a
1031  * remote node tried to deallocate the inode but failed due to this node
1032  * still having the inode open. Here we mark the link count zero
1033  * since we know that it must have reached zero if the GLF_DEMOTE flag
1034  * is set on the iopen glock. If we didn't do a disk read since the
1035  * remote node removed the final link then we might otherwise miss
1036  * this event. This check ensures that this node will deallocate the
1037  * inode's blocks, or alternatively pass the baton on to another
1038  * node for later deallocation.
1039  */
1040 
1041 static int gfs2_drop_inode(struct inode *inode)
1042 {
1043 	struct gfs2_inode *ip = GFS2_I(inode);
1044 	struct gfs2_sbd *sdp = GFS2_SB(inode);
1045 
1046 	if (inode->i_nlink &&
1047 	    gfs2_holder_initialized(&ip->i_iopen_gh)) {
1048 		struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1049 		if (test_bit(GLF_DEMOTE, &gl->gl_flags))
1050 			clear_nlink(inode);
1051 	}
1052 
1053 	/*
1054 	 * When under memory pressure when an inode's link count has dropped to
1055 	 * zero, defer deleting the inode to the delete workqueue.  This avoids
1056 	 * calling into DLM under memory pressure, which can deadlock.
1057 	 */
1058 	if (!inode->i_nlink &&
1059 	    unlikely(current->flags & PF_MEMALLOC) &&
1060 	    gfs2_holder_initialized(&ip->i_iopen_gh)) {
1061 		struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1062 
1063 		gfs2_glock_hold(gl);
1064 		if (!gfs2_queue_try_to_evict(gl))
1065 			gfs2_glock_queue_put(gl);
1066 		return 0;
1067 	}
1068 
1069 	/*
1070 	 * No longer cache inodes when trying to evict them all.
1071 	 */
1072 	if (test_bit(SDF_EVICTING, &sdp->sd_flags))
1073 		return 1;
1074 
1075 	return generic_drop_inode(inode);
1076 }
1077 
1078 static int is_ancestor(const struct dentry *d1, const struct dentry *d2)
1079 {
1080 	do {
1081 		if (d1 == d2)
1082 			return 1;
1083 		d1 = d1->d_parent;
1084 	} while (!IS_ROOT(d1));
1085 	return 0;
1086 }
1087 
1088 /**
1089  * gfs2_show_options - Show mount options for /proc/mounts
1090  * @s: seq_file structure
1091  * @root: root of this (sub)tree
1092  *
1093  * Returns: 0 on success or error code
1094  */
1095 
1096 static int gfs2_show_options(struct seq_file *s, struct dentry *root)
1097 {
1098 	struct gfs2_sbd *sdp = root->d_sb->s_fs_info;
1099 	struct gfs2_args *args = &sdp->sd_args;
1100 	unsigned int logd_secs, statfs_slow, statfs_quantum, quota_quantum;
1101 
1102 	spin_lock(&sdp->sd_tune.gt_spin);
1103 	logd_secs = sdp->sd_tune.gt_logd_secs;
1104 	quota_quantum = sdp->sd_tune.gt_quota_quantum;
1105 	statfs_quantum = sdp->sd_tune.gt_statfs_quantum;
1106 	statfs_slow = sdp->sd_tune.gt_statfs_slow;
1107 	spin_unlock(&sdp->sd_tune.gt_spin);
1108 
1109 	if (is_ancestor(root, sdp->sd_master_dir))
1110 		seq_puts(s, ",meta");
1111 	if (args->ar_lockproto[0])
1112 		seq_show_option(s, "lockproto", args->ar_lockproto);
1113 	if (args->ar_locktable[0])
1114 		seq_show_option(s, "locktable", args->ar_locktable);
1115 	if (args->ar_hostdata[0])
1116 		seq_show_option(s, "hostdata", args->ar_hostdata);
1117 	if (args->ar_spectator)
1118 		seq_puts(s, ",spectator");
1119 	if (args->ar_localflocks)
1120 		seq_puts(s, ",localflocks");
1121 	if (args->ar_debug)
1122 		seq_puts(s, ",debug");
1123 	if (args->ar_posix_acl)
1124 		seq_puts(s, ",acl");
1125 	if (args->ar_quota != GFS2_QUOTA_DEFAULT) {
1126 		char *state;
1127 		switch (args->ar_quota) {
1128 		case GFS2_QUOTA_OFF:
1129 			state = "off";
1130 			break;
1131 		case GFS2_QUOTA_ACCOUNT:
1132 			state = "account";
1133 			break;
1134 		case GFS2_QUOTA_ON:
1135 			state = "on";
1136 			break;
1137 		default:
1138 			state = "unknown";
1139 			break;
1140 		}
1141 		seq_printf(s, ",quota=%s", state);
1142 	}
1143 	if (args->ar_suiddir)
1144 		seq_puts(s, ",suiddir");
1145 	if (args->ar_data != GFS2_DATA_DEFAULT) {
1146 		char *state;
1147 		switch (args->ar_data) {
1148 		case GFS2_DATA_WRITEBACK:
1149 			state = "writeback";
1150 			break;
1151 		case GFS2_DATA_ORDERED:
1152 			state = "ordered";
1153 			break;
1154 		default:
1155 			state = "unknown";
1156 			break;
1157 		}
1158 		seq_printf(s, ",data=%s", state);
1159 	}
1160 	if (args->ar_discard)
1161 		seq_puts(s, ",discard");
1162 	if (logd_secs != 30)
1163 		seq_printf(s, ",commit=%d", logd_secs);
1164 	if (statfs_quantum != 30)
1165 		seq_printf(s, ",statfs_quantum=%d", statfs_quantum);
1166 	else if (statfs_slow)
1167 		seq_puts(s, ",statfs_quantum=0");
1168 	if (quota_quantum != 60)
1169 		seq_printf(s, ",quota_quantum=%d", quota_quantum);
1170 	if (args->ar_statfs_percent)
1171 		seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent);
1172 	if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
1173 		const char *state;
1174 
1175 		switch (args->ar_errors) {
1176 		case GFS2_ERRORS_WITHDRAW:
1177 			state = "withdraw";
1178 			break;
1179 		case GFS2_ERRORS_PANIC:
1180 			state = "panic";
1181 			break;
1182 		default:
1183 			state = "unknown";
1184 			break;
1185 		}
1186 		seq_printf(s, ",errors=%s", state);
1187 	}
1188 	if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
1189 		seq_puts(s, ",nobarrier");
1190 	if (test_bit(SDF_DEMOTE, &sdp->sd_flags))
1191 		seq_puts(s, ",demote_interface_used");
1192 	if (args->ar_rgrplvb)
1193 		seq_puts(s, ",rgrplvb");
1194 	if (args->ar_loccookie)
1195 		seq_puts(s, ",loccookie");
1196 	return 0;
1197 }
1198 
1199 static void gfs2_final_release_pages(struct gfs2_inode *ip)
1200 {
1201 	struct inode *inode = &ip->i_inode;
1202 	struct gfs2_glock *gl = ip->i_gl;
1203 
1204 	if (unlikely(!gl)) {
1205 		/* This can only happen during incomplete inode creation. */
1206 		BUG_ON(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags));
1207 		return;
1208 	}
1209 
1210 	truncate_inode_pages(gfs2_glock2aspace(gl), 0);
1211 	truncate_inode_pages(&inode->i_data, 0);
1212 
1213 	if (atomic_read(&gl->gl_revokes) == 0) {
1214 		clear_bit(GLF_LFLUSH, &gl->gl_flags);
1215 		clear_bit(GLF_DIRTY, &gl->gl_flags);
1216 	}
1217 }
1218 
1219 static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
1220 {
1221 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1222 	struct gfs2_rgrpd *rgd;
1223 	struct gfs2_holder gh;
1224 	int error;
1225 
1226 	if (gfs2_get_inode_blocks(&ip->i_inode) != 1) {
1227 		gfs2_consist_inode(ip);
1228 		return -EIO;
1229 	}
1230 
1231 	gfs2_rindex_update(sdp);
1232 
1233 	error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1234 	if (error)
1235 		return error;
1236 
1237 	rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1);
1238 	if (!rgd) {
1239 		gfs2_consist_inode(ip);
1240 		error = -EIO;
1241 		goto out_qs;
1242 	}
1243 
1244 	error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
1245 				   LM_FLAG_NODE_SCOPE, &gh);
1246 	if (error)
1247 		goto out_qs;
1248 
1249 	error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA,
1250 				 sdp->sd_jdesc->jd_blocks);
1251 	if (error)
1252 		goto out_rg_gunlock;
1253 
1254 	gfs2_free_di(rgd, ip);
1255 
1256 	gfs2_final_release_pages(ip);
1257 
1258 	gfs2_trans_end(sdp);
1259 
1260 out_rg_gunlock:
1261 	gfs2_glock_dq_uninit(&gh);
1262 out_qs:
1263 	gfs2_quota_unhold(ip);
1264 	return error;
1265 }
1266 
1267 /**
1268  * gfs2_glock_put_eventually
1269  * @gl:	The glock to put
1270  *
1271  * When under memory pressure, trigger a deferred glock put to make sure we
1272  * won't call into DLM and deadlock.  Otherwise, put the glock directly.
1273  */
1274 
1275 static void gfs2_glock_put_eventually(struct gfs2_glock *gl)
1276 {
1277 	if (current->flags & PF_MEMALLOC)
1278 		gfs2_glock_queue_put(gl);
1279 	else
1280 		gfs2_glock_put(gl);
1281 }
1282 
1283 static bool gfs2_upgrade_iopen_glock(struct inode *inode)
1284 {
1285 	struct gfs2_inode *ip = GFS2_I(inode);
1286 	struct gfs2_sbd *sdp = GFS2_SB(inode);
1287 	struct gfs2_holder *gh = &ip->i_iopen_gh;
1288 	long timeout = 5 * HZ;
1289 	int error;
1290 
1291 	gh->gh_flags |= GL_NOCACHE;
1292 	gfs2_glock_dq_wait(gh);
1293 
1294 	/*
1295 	 * If there are no other lock holders, we will immediately get
1296 	 * exclusive access to the iopen glock here.
1297 	 *
1298 	 * Otherwise, the other nodes holding the lock will be notified about
1299 	 * our locking request.  If they do not have the inode open, they are
1300 	 * expected to evict the cached inode and release the lock, allowing us
1301 	 * to proceed.
1302 	 *
1303 	 * Otherwise, if they cannot evict the inode, they are expected to poke
1304 	 * the inode glock (note: not the iopen glock).  We will notice that
1305 	 * and stop waiting for the iopen glock immediately.  The other node(s)
1306 	 * are then expected to take care of deleting the inode when they no
1307 	 * longer use it.
1308 	 *
1309 	 * As a last resort, if another node keeps holding the iopen glock
1310 	 * without showing any activity on the inode glock, we will eventually
1311 	 * time out and fail the iopen glock upgrade.
1312 	 *
1313 	 * Note that we're passing the LM_FLAG_TRY_1CB flag to the first
1314 	 * locking request as an optimization to notify lock holders as soon as
1315 	 * possible.  Without that flag, they'd be notified implicitly by the
1316 	 * second locking request.
1317 	 */
1318 
1319 	gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, gh);
1320 	error = gfs2_glock_nq(gh);
1321 	if (error != GLR_TRYFAILED)
1322 		return !error;
1323 
1324 	gfs2_holder_reinit(LM_ST_EXCLUSIVE, GL_ASYNC | GL_NOCACHE, gh);
1325 	error = gfs2_glock_nq(gh);
1326 	if (error)
1327 		return false;
1328 
1329 	timeout = wait_event_interruptible_timeout(sdp->sd_async_glock_wait,
1330 		!test_bit(HIF_WAIT, &gh->gh_iflags) ||
1331 		test_bit(GLF_DEMOTE, &ip->i_gl->gl_flags),
1332 		timeout);
1333 	if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) {
1334 		gfs2_glock_dq(gh);
1335 		return false;
1336 	}
1337 	return gfs2_glock_holder_ready(gh) == 0;
1338 }
1339 
1340 /**
1341  * evict_should_delete - determine whether the inode is eligible for deletion
1342  * @inode: The inode to evict
1343  * @gh: The glock holder structure
1344  *
1345  * This function determines whether the evicted inode is eligible to be deleted
1346  * and locks the inode glock.
1347  *
1348  * Returns: the fate of the dinode
1349  */
1350 static enum dinode_demise evict_should_delete(struct inode *inode,
1351 					      struct gfs2_holder *gh)
1352 {
1353 	struct gfs2_inode *ip = GFS2_I(inode);
1354 	struct super_block *sb = inode->i_sb;
1355 	struct gfs2_sbd *sdp = sb->s_fs_info;
1356 	int ret;
1357 
1358 	if (unlikely(test_bit(GIF_ALLOC_FAILED, &ip->i_flags)))
1359 		goto should_delete;
1360 
1361 	if (test_bit(GIF_DEFERRED_DELETE, &ip->i_flags))
1362 		return SHOULD_DEFER_EVICTION;
1363 
1364 	/* Deletes should never happen under memory pressure anymore.  */
1365 	if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
1366 		return SHOULD_DEFER_EVICTION;
1367 
1368 	/* Must not read inode block until block type has been verified */
1369 	ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, gh);
1370 	if (unlikely(ret)) {
1371 		glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
1372 		ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1373 		gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1374 		return SHOULD_DEFER_EVICTION;
1375 	}
1376 
1377 	if (gfs2_inode_already_deleted(ip->i_gl, ip->i_no_formal_ino))
1378 		return SHOULD_NOT_DELETE_DINODE;
1379 	ret = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED);
1380 	if (ret)
1381 		return SHOULD_NOT_DELETE_DINODE;
1382 
1383 	ret = gfs2_instantiate(gh);
1384 	if (ret)
1385 		return SHOULD_NOT_DELETE_DINODE;
1386 
1387 	/*
1388 	 * The inode may have been recreated in the meantime.
1389 	 */
1390 	if (inode->i_nlink)
1391 		return SHOULD_NOT_DELETE_DINODE;
1392 
1393 should_delete:
1394 	if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
1395 	    test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
1396 		if (!gfs2_upgrade_iopen_glock(inode)) {
1397 			gfs2_holder_uninit(&ip->i_iopen_gh);
1398 			return SHOULD_NOT_DELETE_DINODE;
1399 		}
1400 	}
1401 	return SHOULD_DELETE_DINODE;
1402 }
1403 
1404 /**
1405  * evict_unlinked_inode - delete the pieces of an unlinked evicted inode
1406  * @inode: The inode to evict
1407  */
1408 static int evict_unlinked_inode(struct inode *inode)
1409 {
1410 	struct gfs2_inode *ip = GFS2_I(inode);
1411 	int ret;
1412 
1413 	if (S_ISDIR(inode->i_mode) &&
1414 	    (ip->i_diskflags & GFS2_DIF_EXHASH)) {
1415 		ret = gfs2_dir_exhash_dealloc(ip);
1416 		if (ret)
1417 			goto out;
1418 	}
1419 
1420 	if (ip->i_eattr) {
1421 		ret = gfs2_ea_dealloc(ip);
1422 		if (ret)
1423 			goto out;
1424 	}
1425 
1426 	if (!gfs2_is_stuffed(ip)) {
1427 		ret = gfs2_file_dealloc(ip);
1428 		if (ret)
1429 			goto out;
1430 	}
1431 
1432 	/*
1433 	 * As soon as we clear the bitmap for the dinode, gfs2_create_inode()
1434 	 * can get called to recreate it, or even gfs2_inode_lookup() if the
1435 	 * inode was recreated on another node in the meantime.
1436 	 *
1437 	 * However, inserting the new inode into the inode hash table will not
1438 	 * succeed until the old inode is removed, and that only happens after
1439 	 * ->evict_inode() returns.  The new inode is attached to its inode and
1440 	 *  iopen glocks after inserting it into the inode hash table, so at
1441 	 *  that point we can be sure that both glocks are unused.
1442 	 */
1443 
1444 	ret = gfs2_dinode_dealloc(ip);
1445 	if (!ret && ip->i_gl)
1446 		gfs2_inode_remember_delete(ip->i_gl, ip->i_no_formal_ino);
1447 
1448 out:
1449 	return ret;
1450 }
1451 
1452 /*
1453  * evict_linked_inode - evict an inode whose dinode has not been unlinked
1454  * @inode: The inode to evict
1455  */
1456 static int evict_linked_inode(struct inode *inode)
1457 {
1458 	struct super_block *sb = inode->i_sb;
1459 	struct gfs2_sbd *sdp = sb->s_fs_info;
1460 	struct gfs2_inode *ip = GFS2_I(inode);
1461 	struct address_space *metamapping;
1462 	int ret;
1463 
1464 	gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
1465 		       GFS2_LFC_EVICT_INODE);
1466 	metamapping = gfs2_glock2aspace(ip->i_gl);
1467 	if (test_bit(GLF_DIRTY, &ip->i_gl->gl_flags)) {
1468 		filemap_fdatawrite(metamapping);
1469 		filemap_fdatawait(metamapping);
1470 	}
1471 	write_inode_now(inode, 1);
1472 	gfs2_ail_flush(ip->i_gl, 0);
1473 
1474 	ret = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
1475 	if (ret)
1476 		return ret;
1477 
1478 	/* Needs to be done before glock release & also in a transaction */
1479 	truncate_inode_pages(&inode->i_data, 0);
1480 	truncate_inode_pages(metamapping, 0);
1481 	gfs2_trans_end(sdp);
1482 	return 0;
1483 }
1484 
1485 /**
1486  * gfs2_evict_inode - Remove an inode from cache
1487  * @inode: The inode to evict
1488  *
1489  * There are three cases to consider:
1490  * 1. i_nlink == 0, we are final opener (and must deallocate)
1491  * 2. i_nlink == 0, we are not the final opener (and cannot deallocate)
1492  * 3. i_nlink > 0
1493  *
1494  * If the fs is read only, then we have to treat all cases as per #3
1495  * since we are unable to do any deallocation. The inode will be
1496  * deallocated by the next read/write node to attempt an allocation
1497  * in the same resource group
1498  *
1499  * We have to (at the moment) hold the inodes main lock to cover
1500  * the gap between unlocking the shared lock on the iopen lock and
1501  * taking the exclusive lock. I'd rather do a shared -> exclusive
1502  * conversion on the iopen lock, but we can change that later. This
1503  * is safe, just less efficient.
1504  */
1505 
1506 static void gfs2_evict_inode(struct inode *inode)
1507 {
1508 	struct super_block *sb = inode->i_sb;
1509 	struct gfs2_sbd *sdp = sb->s_fs_info;
1510 	struct gfs2_inode *ip = GFS2_I(inode);
1511 	struct gfs2_holder gh;
1512 	int ret;
1513 
1514 	if (inode->i_nlink || sb_rdonly(sb) || !ip->i_no_addr)
1515 		goto out;
1516 
1517 	/*
1518 	 * In case of an incomplete mount, gfs2_evict_inode() may be called for
1519 	 * system files without having an active journal to write to.  In that
1520 	 * case, skip the filesystem evict.
1521 	 */
1522 	if (!sdp->sd_jdesc)
1523 		goto out;
1524 
1525 	gfs2_holder_mark_uninitialized(&gh);
1526 	ret = evict_should_delete(inode, &gh);
1527 	if (ret == SHOULD_DEFER_EVICTION)
1528 		goto out;
1529 	if (ret == SHOULD_DELETE_DINODE)
1530 		ret = evict_unlinked_inode(inode);
1531 	else
1532 		ret = evict_linked_inode(inode);
1533 
1534 	if (gfs2_rs_active(&ip->i_res))
1535 		gfs2_rs_deltree(&ip->i_res);
1536 
1537 	if (gfs2_holder_initialized(&gh))
1538 		gfs2_glock_dq_uninit(&gh);
1539 	if (ret && ret != GLR_TRYFAILED && ret != -EROFS)
1540 		fs_warn(sdp, "gfs2_evict_inode: %d\n", ret);
1541 out:
1542 	truncate_inode_pages_final(&inode->i_data);
1543 	if (ip->i_qadata)
1544 		gfs2_assert_warn(sdp, ip->i_qadata->qa_ref == 0);
1545 	gfs2_rs_deltree(&ip->i_res);
1546 	gfs2_ordered_del_inode(ip);
1547 	clear_inode(inode);
1548 	gfs2_dir_hash_inval(ip);
1549 	if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
1550 		struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1551 
1552 		glock_clear_object(gl, ip);
1553 		gfs2_glock_hold(gl);
1554 		ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1555 		gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1556 		gfs2_glock_put_eventually(gl);
1557 	}
1558 	if (ip->i_gl) {
1559 		glock_clear_object(ip->i_gl, ip);
1560 		wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
1561 		gfs2_glock_add_to_lru(ip->i_gl);
1562 		gfs2_glock_put_eventually(ip->i_gl);
1563 		ip->i_gl = NULL;
1564 	}
1565 }
1566 
1567 static struct inode *gfs2_alloc_inode(struct super_block *sb)
1568 {
1569 	struct gfs2_inode *ip;
1570 
1571 	ip = alloc_inode_sb(sb, gfs2_inode_cachep, GFP_KERNEL);
1572 	if (!ip)
1573 		return NULL;
1574 	ip->i_no_addr = 0;
1575 	ip->i_flags = 0;
1576 	ip->i_gl = NULL;
1577 	gfs2_holder_mark_uninitialized(&ip->i_iopen_gh);
1578 	memset(&ip->i_res, 0, sizeof(ip->i_res));
1579 	RB_CLEAR_NODE(&ip->i_res.rs_node);
1580 	ip->i_rahead = 0;
1581 	return &ip->i_inode;
1582 }
1583 
1584 static void gfs2_free_inode(struct inode *inode)
1585 {
1586 	kmem_cache_free(gfs2_inode_cachep, GFS2_I(inode));
1587 }
1588 
1589 extern void free_local_statfs_inodes(struct gfs2_sbd *sdp)
1590 {
1591 	struct local_statfs_inode *lsi, *safe;
1592 
1593 	/* Run through the statfs inodes list to iput and free memory */
1594 	list_for_each_entry_safe(lsi, safe, &sdp->sd_sc_inodes_list, si_list) {
1595 		if (lsi->si_jid == sdp->sd_jdesc->jd_jid)
1596 			sdp->sd_sc_inode = NULL; /* belongs to this node */
1597 		if (lsi->si_sc_inode)
1598 			iput(lsi->si_sc_inode);
1599 		list_del(&lsi->si_list);
1600 		kfree(lsi);
1601 	}
1602 }
1603 
1604 extern struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp,
1605 					     unsigned int index)
1606 {
1607 	struct local_statfs_inode *lsi;
1608 
1609 	/* Return the local (per node) statfs inode in the
1610 	 * sdp->sd_sc_inodes_list corresponding to the 'index'. */
1611 	list_for_each_entry(lsi, &sdp->sd_sc_inodes_list, si_list) {
1612 		if (lsi->si_jid == index)
1613 			return lsi->si_sc_inode;
1614 	}
1615 	return NULL;
1616 }
1617 
1618 const struct super_operations gfs2_super_ops = {
1619 	.alloc_inode		= gfs2_alloc_inode,
1620 	.free_inode		= gfs2_free_inode,
1621 	.write_inode		= gfs2_write_inode,
1622 	.dirty_inode		= gfs2_dirty_inode,
1623 	.evict_inode		= gfs2_evict_inode,
1624 	.put_super		= gfs2_put_super,
1625 	.sync_fs		= gfs2_sync_fs,
1626 	.freeze_super		= gfs2_freeze_super,
1627 	.thaw_super		= gfs2_thaw_super,
1628 	.statfs			= gfs2_statfs,
1629 	.drop_inode		= gfs2_drop_inode,
1630 	.show_options		= gfs2_show_options,
1631 };
1632 
1633