xref: /openbmc/linux/fs/gfs2/super.c (revision 9a32dd32)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
5  */
6 
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 
9 #include <linux/bio.h>
10 #include <linux/sched/signal.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/statfs.h>
16 #include <linux/seq_file.h>
17 #include <linux/mount.h>
18 #include <linux/kthread.h>
19 #include <linux/delay.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/crc32.h>
22 #include <linux/time.h>
23 #include <linux/wait.h>
24 #include <linux/writeback.h>
25 #include <linux/backing-dev.h>
26 #include <linux/kernel.h>
27 
28 #include "gfs2.h"
29 #include "incore.h"
30 #include "bmap.h"
31 #include "dir.h"
32 #include "glock.h"
33 #include "glops.h"
34 #include "inode.h"
35 #include "log.h"
36 #include "meta_io.h"
37 #include "quota.h"
38 #include "recovery.h"
39 #include "rgrp.h"
40 #include "super.h"
41 #include "trans.h"
42 #include "util.h"
43 #include "sys.h"
44 #include "xattr.h"
45 #include "lops.h"
46 
47 enum dinode_demise {
48 	SHOULD_DELETE_DINODE,
49 	SHOULD_NOT_DELETE_DINODE,
50 	SHOULD_DEFER_EVICTION,
51 };
52 
53 /**
54  * gfs2_jindex_free - Clear all the journal index information
55  * @sdp: The GFS2 superblock
56  *
57  */
58 
59 void gfs2_jindex_free(struct gfs2_sbd *sdp)
60 {
61 	struct list_head list;
62 	struct gfs2_jdesc *jd;
63 
64 	spin_lock(&sdp->sd_jindex_spin);
65 	list_add(&list, &sdp->sd_jindex_list);
66 	list_del_init(&sdp->sd_jindex_list);
67 	sdp->sd_journals = 0;
68 	spin_unlock(&sdp->sd_jindex_spin);
69 
70 	sdp->sd_jdesc = NULL;
71 	while (!list_empty(&list)) {
72 		jd = list_first_entry(&list, struct gfs2_jdesc, jd_list);
73 		gfs2_free_journal_extents(jd);
74 		list_del(&jd->jd_list);
75 		iput(jd->jd_inode);
76 		jd->jd_inode = NULL;
77 		kfree(jd);
78 	}
79 }
80 
81 static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid)
82 {
83 	struct gfs2_jdesc *jd;
84 
85 	list_for_each_entry(jd, head, jd_list) {
86 		if (jd->jd_jid == jid)
87 			return jd;
88 	}
89 	return NULL;
90 }
91 
92 struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid)
93 {
94 	struct gfs2_jdesc *jd;
95 
96 	spin_lock(&sdp->sd_jindex_spin);
97 	jd = jdesc_find_i(&sdp->sd_jindex_list, jid);
98 	spin_unlock(&sdp->sd_jindex_spin);
99 
100 	return jd;
101 }
102 
103 int gfs2_jdesc_check(struct gfs2_jdesc *jd)
104 {
105 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
106 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
107 	u64 size = i_size_read(jd->jd_inode);
108 
109 	if (gfs2_check_internal_file_size(jd->jd_inode, 8 << 20, BIT(30)))
110 		return -EIO;
111 
112 	jd->jd_blocks = size >> sdp->sd_sb.sb_bsize_shift;
113 
114 	if (gfs2_write_alloc_required(ip, 0, size)) {
115 		gfs2_consist_inode(ip);
116 		return -EIO;
117 	}
118 
119 	return 0;
120 }
121 
122 /**
123  * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
124  * @sdp: the filesystem
125  *
126  * Returns: errno
127  */
128 
129 int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
130 {
131 	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
132 	struct gfs2_glock *j_gl = ip->i_gl;
133 	struct gfs2_log_header_host head;
134 	int error;
135 
136 	j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
137 	if (gfs2_withdrawn(sdp))
138 		return -EIO;
139 
140 	error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
141 	if (error) {
142 		gfs2_consist(sdp);
143 		return error;
144 	}
145 
146 	if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
147 		gfs2_consist(sdp);
148 		return -EIO;
149 	}
150 
151 	/*  Initialize some head of the log stuff  */
152 	sdp->sd_log_sequence = head.lh_sequence + 1;
153 	gfs2_log_pointers_init(sdp, head.lh_blkno);
154 
155 	error = gfs2_quota_init(sdp);
156 	if (!error && gfs2_withdrawn(sdp))
157 		error = -EIO;
158 	if (!error)
159 		set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
160 	return error;
161 }
162 
163 void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf)
164 {
165 	const struct gfs2_statfs_change *str = buf;
166 
167 	sc->sc_total = be64_to_cpu(str->sc_total);
168 	sc->sc_free = be64_to_cpu(str->sc_free);
169 	sc->sc_dinodes = be64_to_cpu(str->sc_dinodes);
170 }
171 
172 void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf)
173 {
174 	struct gfs2_statfs_change *str = buf;
175 
176 	str->sc_total = cpu_to_be64(sc->sc_total);
177 	str->sc_free = cpu_to_be64(sc->sc_free);
178 	str->sc_dinodes = cpu_to_be64(sc->sc_dinodes);
179 }
180 
181 int gfs2_statfs_init(struct gfs2_sbd *sdp)
182 {
183 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
184 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
185 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
186 	struct buffer_head *m_bh;
187 	struct gfs2_holder gh;
188 	int error;
189 
190 	error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
191 				   &gh);
192 	if (error)
193 		return error;
194 
195 	error = gfs2_meta_inode_buffer(m_ip, &m_bh);
196 	if (error)
197 		goto out;
198 
199 	if (sdp->sd_args.ar_spectator) {
200 		spin_lock(&sdp->sd_statfs_spin);
201 		gfs2_statfs_change_in(m_sc, m_bh->b_data +
202 				      sizeof(struct gfs2_dinode));
203 		spin_unlock(&sdp->sd_statfs_spin);
204 	} else {
205 		spin_lock(&sdp->sd_statfs_spin);
206 		gfs2_statfs_change_in(m_sc, m_bh->b_data +
207 				      sizeof(struct gfs2_dinode));
208 		gfs2_statfs_change_in(l_sc, sdp->sd_sc_bh->b_data +
209 				      sizeof(struct gfs2_dinode));
210 		spin_unlock(&sdp->sd_statfs_spin);
211 
212 	}
213 
214 	brelse(m_bh);
215 out:
216 	gfs2_glock_dq_uninit(&gh);
217 	return 0;
218 }
219 
220 void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
221 			s64 dinodes)
222 {
223 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
224 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
225 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
226 	s64 x, y;
227 	int need_sync = 0;
228 
229 	gfs2_trans_add_meta(l_ip->i_gl, sdp->sd_sc_bh);
230 
231 	spin_lock(&sdp->sd_statfs_spin);
232 	l_sc->sc_total += total;
233 	l_sc->sc_free += free;
234 	l_sc->sc_dinodes += dinodes;
235 	gfs2_statfs_change_out(l_sc, sdp->sd_sc_bh->b_data +
236 			       sizeof(struct gfs2_dinode));
237 	if (sdp->sd_args.ar_statfs_percent) {
238 		x = 100 * l_sc->sc_free;
239 		y = m_sc->sc_free * sdp->sd_args.ar_statfs_percent;
240 		if (x >= y || x <= -y)
241 			need_sync = 1;
242 	}
243 	spin_unlock(&sdp->sd_statfs_spin);
244 
245 	if (need_sync)
246 		gfs2_wake_up_statfs(sdp);
247 }
248 
249 void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh)
250 {
251 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
252 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
253 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
254 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
255 
256 	gfs2_trans_add_meta(l_ip->i_gl, sdp->sd_sc_bh);
257 	gfs2_trans_add_meta(m_ip->i_gl, m_bh);
258 
259 	spin_lock(&sdp->sd_statfs_spin);
260 	m_sc->sc_total += l_sc->sc_total;
261 	m_sc->sc_free += l_sc->sc_free;
262 	m_sc->sc_dinodes += l_sc->sc_dinodes;
263 	memset(l_sc, 0, sizeof(struct gfs2_statfs_change));
264 	memset(sdp->sd_sc_bh->b_data + sizeof(struct gfs2_dinode),
265 	       0, sizeof(struct gfs2_statfs_change));
266 	gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
267 	spin_unlock(&sdp->sd_statfs_spin);
268 }
269 
270 int gfs2_statfs_sync(struct super_block *sb, int type)
271 {
272 	struct gfs2_sbd *sdp = sb->s_fs_info;
273 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
274 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
275 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
276 	struct gfs2_holder gh;
277 	struct buffer_head *m_bh;
278 	int error;
279 
280 	error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
281 				   &gh);
282 	if (error)
283 		goto out;
284 
285 	error = gfs2_meta_inode_buffer(m_ip, &m_bh);
286 	if (error)
287 		goto out_unlock;
288 
289 	spin_lock(&sdp->sd_statfs_spin);
290 	gfs2_statfs_change_in(m_sc, m_bh->b_data +
291 			      sizeof(struct gfs2_dinode));
292 	if (!l_sc->sc_total && !l_sc->sc_free && !l_sc->sc_dinodes) {
293 		spin_unlock(&sdp->sd_statfs_spin);
294 		goto out_bh;
295 	}
296 	spin_unlock(&sdp->sd_statfs_spin);
297 
298 	error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
299 	if (error)
300 		goto out_bh;
301 
302 	update_statfs(sdp, m_bh);
303 	sdp->sd_statfs_force_sync = 0;
304 
305 	gfs2_trans_end(sdp);
306 
307 out_bh:
308 	brelse(m_bh);
309 out_unlock:
310 	gfs2_glock_dq_uninit(&gh);
311 out:
312 	return error;
313 }
314 
315 struct lfcc {
316 	struct list_head list;
317 	struct gfs2_holder gh;
318 };
319 
320 /**
321  * gfs2_lock_fs_check_clean - Stop all writes to the FS and check that all
322  *                            journals are clean
323  * @sdp: the file system
324  *
325  * Returns: errno
326  */
327 
328 static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
329 {
330 	struct gfs2_inode *ip;
331 	struct gfs2_jdesc *jd;
332 	struct lfcc *lfcc;
333 	LIST_HEAD(list);
334 	struct gfs2_log_header_host lh;
335 	int error;
336 
337 	list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
338 		lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL);
339 		if (!lfcc) {
340 			error = -ENOMEM;
341 			goto out;
342 		}
343 		ip = GFS2_I(jd->jd_inode);
344 		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &lfcc->gh);
345 		if (error) {
346 			kfree(lfcc);
347 			goto out;
348 		}
349 		list_add(&lfcc->list, &list);
350 	}
351 
352 	error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE,
353 				   LM_FLAG_NOEXP | GL_NOPID,
354 				   &sdp->sd_freeze_gh);
355 	if (error)
356 		goto out;
357 
358 	list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
359 		error = gfs2_jdesc_check(jd);
360 		if (error)
361 			break;
362 		error = gfs2_find_jhead(jd, &lh, false);
363 		if (error)
364 			break;
365 		if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
366 			error = -EBUSY;
367 			break;
368 		}
369 	}
370 
371 	if (error)
372 		gfs2_freeze_unlock(&sdp->sd_freeze_gh);
373 
374 out:
375 	while (!list_empty(&list)) {
376 		lfcc = list_first_entry(&list, struct lfcc, list);
377 		list_del(&lfcc->list);
378 		gfs2_glock_dq_uninit(&lfcc->gh);
379 		kfree(lfcc);
380 	}
381 	return error;
382 }
383 
384 void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
385 {
386 	const struct inode *inode = &ip->i_inode;
387 	struct gfs2_dinode *str = buf;
388 
389 	str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
390 	str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
391 	str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
392 	str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
393 	str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
394 	str->di_mode = cpu_to_be32(inode->i_mode);
395 	str->di_uid = cpu_to_be32(i_uid_read(inode));
396 	str->di_gid = cpu_to_be32(i_gid_read(inode));
397 	str->di_nlink = cpu_to_be32(inode->i_nlink);
398 	str->di_size = cpu_to_be64(i_size_read(inode));
399 	str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(inode));
400 	str->di_atime = cpu_to_be64(inode->i_atime.tv_sec);
401 	str->di_mtime = cpu_to_be64(inode->i_mtime.tv_sec);
402 	str->di_ctime = cpu_to_be64(inode->i_ctime.tv_sec);
403 
404 	str->di_goal_meta = cpu_to_be64(ip->i_goal);
405 	str->di_goal_data = cpu_to_be64(ip->i_goal);
406 	str->di_generation = cpu_to_be64(ip->i_generation);
407 
408 	str->di_flags = cpu_to_be32(ip->i_diskflags);
409 	str->di_height = cpu_to_be16(ip->i_height);
410 	str->di_payload_format = cpu_to_be32(S_ISDIR(inode->i_mode) &&
411 					     !(ip->i_diskflags & GFS2_DIF_EXHASH) ?
412 					     GFS2_FORMAT_DE : 0);
413 	str->di_depth = cpu_to_be16(ip->i_depth);
414 	str->di_entries = cpu_to_be32(ip->i_entries);
415 
416 	str->di_eattr = cpu_to_be64(ip->i_eattr);
417 	str->di_atime_nsec = cpu_to_be32(inode->i_atime.tv_nsec);
418 	str->di_mtime_nsec = cpu_to_be32(inode->i_mtime.tv_nsec);
419 	str->di_ctime_nsec = cpu_to_be32(inode->i_ctime.tv_nsec);
420 }
421 
422 /**
423  * gfs2_write_inode - Make sure the inode is stable on the disk
424  * @inode: The inode
425  * @wbc: The writeback control structure
426  *
427  * Returns: errno
428  */
429 
430 static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
431 {
432 	struct gfs2_inode *ip = GFS2_I(inode);
433 	struct gfs2_sbd *sdp = GFS2_SB(inode);
434 	struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl);
435 	struct backing_dev_info *bdi = inode_to_bdi(metamapping->host);
436 	int ret = 0;
437 	bool flush_all = (wbc->sync_mode == WB_SYNC_ALL || gfs2_is_jdata(ip));
438 
439 	if (flush_all)
440 		gfs2_log_flush(GFS2_SB(inode), ip->i_gl,
441 			       GFS2_LOG_HEAD_FLUSH_NORMAL |
442 			       GFS2_LFC_WRITE_INODE);
443 	if (bdi->wb.dirty_exceeded)
444 		gfs2_ail1_flush(sdp, wbc);
445 	else
446 		filemap_fdatawrite(metamapping);
447 	if (flush_all)
448 		ret = filemap_fdatawait(metamapping);
449 	if (ret)
450 		mark_inode_dirty_sync(inode);
451 	else {
452 		spin_lock(&inode->i_lock);
453 		if (!(inode->i_flags & I_DIRTY))
454 			gfs2_ordered_del_inode(ip);
455 		spin_unlock(&inode->i_lock);
456 	}
457 	return ret;
458 }
459 
460 /**
461  * gfs2_dirty_inode - check for atime updates
462  * @inode: The inode in question
463  * @flags: The type of dirty
464  *
465  * Unfortunately it can be called under any combination of inode
466  * glock and transaction lock, so we have to check carefully.
467  *
468  * At the moment this deals only with atime - it should be possible
469  * to expand that role in future, once a review of the locking has
470  * been carried out.
471  */
472 
473 static void gfs2_dirty_inode(struct inode *inode, int flags)
474 {
475 	struct gfs2_inode *ip = GFS2_I(inode);
476 	struct gfs2_sbd *sdp = GFS2_SB(inode);
477 	struct buffer_head *bh;
478 	struct gfs2_holder gh;
479 	int need_unlock = 0;
480 	int need_endtrans = 0;
481 	int ret;
482 
483 	if (unlikely(!ip->i_gl)) {
484 		/* This can only happen during incomplete inode creation. */
485 		BUG_ON(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags));
486 		return;
487 	}
488 
489 	if (unlikely(gfs2_withdrawn(sdp)))
490 		return;
491 	if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
492 		ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
493 		if (ret) {
494 			fs_err(sdp, "dirty_inode: glock %d\n", ret);
495 			gfs2_dump_glock(NULL, ip->i_gl, true);
496 			return;
497 		}
498 		need_unlock = 1;
499 	} else if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE))
500 		return;
501 
502 	if (current->journal_info == NULL) {
503 		ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
504 		if (ret) {
505 			fs_err(sdp, "dirty_inode: gfs2_trans_begin %d\n", ret);
506 			goto out;
507 		}
508 		need_endtrans = 1;
509 	}
510 
511 	ret = gfs2_meta_inode_buffer(ip, &bh);
512 	if (ret == 0) {
513 		gfs2_trans_add_meta(ip->i_gl, bh);
514 		gfs2_dinode_out(ip, bh->b_data);
515 		brelse(bh);
516 	}
517 
518 	if (need_endtrans)
519 		gfs2_trans_end(sdp);
520 out:
521 	if (need_unlock)
522 		gfs2_glock_dq_uninit(&gh);
523 }
524 
525 /**
526  * gfs2_make_fs_ro - Turn a Read-Write FS into a Read-Only one
527  * @sdp: the filesystem
528  *
529  * Returns: errno
530  */
531 
532 void gfs2_make_fs_ro(struct gfs2_sbd *sdp)
533 {
534 	int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
535 
536 	if (!test_bit(SDF_DEACTIVATING, &sdp->sd_flags))
537 		gfs2_flush_delete_work(sdp);
538 
539 	if (!log_write_allowed && current == sdp->sd_quotad_process)
540 		fs_warn(sdp, "The quotad daemon is withdrawing.\n");
541 	else if (sdp->sd_quotad_process)
542 		kthread_stop(sdp->sd_quotad_process);
543 	sdp->sd_quotad_process = NULL;
544 
545 	if (!log_write_allowed && current == sdp->sd_logd_process)
546 		fs_warn(sdp, "The logd daemon is withdrawing.\n");
547 	else if (sdp->sd_logd_process)
548 		kthread_stop(sdp->sd_logd_process);
549 	sdp->sd_logd_process = NULL;
550 
551 	if (log_write_allowed) {
552 		gfs2_quota_sync(sdp->sd_vfs, 0);
553 		gfs2_statfs_sync(sdp->sd_vfs, 0);
554 
555 		/* We do two log flushes here. The first one commits dirty inodes
556 		 * and rgrps to the journal, but queues up revokes to the ail list.
557 		 * The second flush writes out and removes the revokes.
558 		 *
559 		 * The first must be done before the FLUSH_SHUTDOWN code
560 		 * clears the LIVE flag, otherwise it will not be able to start
561 		 * a transaction to write its revokes, and the error will cause
562 		 * a withdraw of the file system. */
563 		gfs2_log_flush(sdp, NULL, GFS2_LFC_MAKE_FS_RO);
564 		gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
565 			       GFS2_LFC_MAKE_FS_RO);
566 		wait_event_timeout(sdp->sd_log_waitq,
567 				   gfs2_log_is_empty(sdp),
568 				   HZ * 5);
569 		gfs2_assert_warn(sdp, gfs2_log_is_empty(sdp));
570 	} else {
571 		wait_event_timeout(sdp->sd_log_waitq,
572 				   gfs2_log_is_empty(sdp),
573 				   HZ * 5);
574 	}
575 	gfs2_quota_cleanup(sdp);
576 
577 	if (!log_write_allowed)
578 		sdp->sd_vfs->s_flags |= SB_RDONLY;
579 }
580 
581 /**
582  * gfs2_put_super - Unmount the filesystem
583  * @sb: The VFS superblock
584  *
585  */
586 
587 static void gfs2_put_super(struct super_block *sb)
588 {
589 	struct gfs2_sbd *sdp = sb->s_fs_info;
590 	struct gfs2_jdesc *jd;
591 
592 	/* No more recovery requests */
593 	set_bit(SDF_NORECOVERY, &sdp->sd_flags);
594 	smp_mb();
595 
596 	/* Wait on outstanding recovery */
597 restart:
598 	spin_lock(&sdp->sd_jindex_spin);
599 	list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
600 		if (!test_bit(JDF_RECOVERY, &jd->jd_flags))
601 			continue;
602 		spin_unlock(&sdp->sd_jindex_spin);
603 		wait_on_bit(&jd->jd_flags, JDF_RECOVERY,
604 			    TASK_UNINTERRUPTIBLE);
605 		goto restart;
606 	}
607 	spin_unlock(&sdp->sd_jindex_spin);
608 
609 	if (!sb_rdonly(sb)) {
610 		gfs2_make_fs_ro(sdp);
611 	}
612 	WARN_ON(gfs2_withdrawing(sdp));
613 
614 	/*  At this point, we're through modifying the disk  */
615 
616 	/*  Release stuff  */
617 
618 	iput(sdp->sd_jindex);
619 	iput(sdp->sd_statfs_inode);
620 	iput(sdp->sd_rindex);
621 	iput(sdp->sd_quota_inode);
622 
623 	gfs2_glock_put(sdp->sd_rename_gl);
624 	gfs2_glock_put(sdp->sd_freeze_gl);
625 
626 	if (!sdp->sd_args.ar_spectator) {
627 		if (gfs2_holder_initialized(&sdp->sd_journal_gh))
628 			gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
629 		if (gfs2_holder_initialized(&sdp->sd_jinode_gh))
630 			gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
631 		brelse(sdp->sd_sc_bh);
632 		gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
633 		gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
634 		free_local_statfs_inodes(sdp);
635 		iput(sdp->sd_qc_inode);
636 	}
637 
638 	gfs2_glock_dq_uninit(&sdp->sd_live_gh);
639 	gfs2_clear_rgrpd(sdp);
640 	gfs2_jindex_free(sdp);
641 	/*  Take apart glock structures and buffer lists  */
642 	gfs2_gl_hash_clear(sdp);
643 	truncate_inode_pages_final(&sdp->sd_aspace);
644 	gfs2_delete_debugfs_file(sdp);
645 	/*  Unmount the locking protocol  */
646 	gfs2_lm_unmount(sdp);
647 
648 	/*  At this point, we're through participating in the lockspace  */
649 	gfs2_sys_fs_del(sdp);
650 	free_sbd(sdp);
651 }
652 
653 /**
654  * gfs2_sync_fs - sync the filesystem
655  * @sb: the superblock
656  * @wait: true to wait for completion
657  *
658  * Flushes the log to disk.
659  */
660 
661 static int gfs2_sync_fs(struct super_block *sb, int wait)
662 {
663 	struct gfs2_sbd *sdp = sb->s_fs_info;
664 
665 	gfs2_quota_sync(sb, -1);
666 	if (wait)
667 		gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
668 			       GFS2_LFC_SYNC_FS);
669 	return sdp->sd_log_error;
670 }
671 
672 void gfs2_freeze_func(struct work_struct *work)
673 {
674 	int error;
675 	struct gfs2_holder freeze_gh;
676 	struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_freeze_work);
677 	struct super_block *sb = sdp->sd_vfs;
678 
679 	atomic_inc(&sb->s_active);
680 	error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
681 	if (error) {
682 		gfs2_assert_withdraw(sdp, 0);
683 	} else {
684 		atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
685 		error = thaw_super(sb);
686 		if (error) {
687 			fs_info(sdp, "GFS2: couldn't thaw filesystem: %d\n",
688 				error);
689 			gfs2_assert_withdraw(sdp, 0);
690 		}
691 		gfs2_freeze_unlock(&freeze_gh);
692 	}
693 	deactivate_super(sb);
694 	clear_bit_unlock(SDF_FS_FROZEN, &sdp->sd_flags);
695 	wake_up_bit(&sdp->sd_flags, SDF_FS_FROZEN);
696 	return;
697 }
698 
699 /**
700  * gfs2_freeze - prevent further writes to the filesystem
701  * @sb: the VFS structure for the filesystem
702  *
703  */
704 
705 static int gfs2_freeze(struct super_block *sb)
706 {
707 	struct gfs2_sbd *sdp = sb->s_fs_info;
708 	int error;
709 
710 	mutex_lock(&sdp->sd_freeze_mutex);
711 	if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN) {
712 		error = -EBUSY;
713 		goto out;
714 	}
715 
716 	for (;;) {
717 		if (gfs2_withdrawn(sdp)) {
718 			error = -EINVAL;
719 			goto out;
720 		}
721 
722 		error = gfs2_lock_fs_check_clean(sdp);
723 		if (!error)
724 			break;
725 
726 		if (error == -EBUSY)
727 			fs_err(sdp, "waiting for recovery before freeze\n");
728 		else if (error == -EIO) {
729 			fs_err(sdp, "Fatal IO error: cannot freeze gfs2 due "
730 			       "to recovery error.\n");
731 			goto out;
732 		} else {
733 			fs_err(sdp, "error freezing FS: %d\n", error);
734 		}
735 		fs_err(sdp, "retrying...\n");
736 		msleep(1000);
737 	}
738 	set_bit(SDF_FS_FROZEN, &sdp->sd_flags);
739 out:
740 	mutex_unlock(&sdp->sd_freeze_mutex);
741 	return error;
742 }
743 
744 /**
745  * gfs2_unfreeze - reallow writes to the filesystem
746  * @sb: the VFS structure for the filesystem
747  *
748  */
749 
750 static int gfs2_unfreeze(struct super_block *sb)
751 {
752 	struct gfs2_sbd *sdp = sb->s_fs_info;
753 
754 	mutex_lock(&sdp->sd_freeze_mutex);
755 	if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN ||
756 	    !gfs2_holder_initialized(&sdp->sd_freeze_gh)) {
757 		mutex_unlock(&sdp->sd_freeze_mutex);
758 		return -EINVAL;
759 	}
760 
761 	gfs2_freeze_unlock(&sdp->sd_freeze_gh);
762 	mutex_unlock(&sdp->sd_freeze_mutex);
763 	return wait_on_bit(&sdp->sd_flags, SDF_FS_FROZEN, TASK_INTERRUPTIBLE);
764 }
765 
766 /**
767  * statfs_slow_fill - fill in the sg for a given RG
768  * @rgd: the RG
769  * @sc: the sc structure
770  *
771  * Returns: 0 on success, -ESTALE if the LVB is invalid
772  */
773 
774 static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
775 			    struct gfs2_statfs_change_host *sc)
776 {
777 	gfs2_rgrp_verify(rgd);
778 	sc->sc_total += rgd->rd_data;
779 	sc->sc_free += rgd->rd_free;
780 	sc->sc_dinodes += rgd->rd_dinodes;
781 	return 0;
782 }
783 
784 /**
785  * gfs2_statfs_slow - Stat a filesystem using asynchronous locking
786  * @sdp: the filesystem
787  * @sc: the sc info that will be returned
788  *
789  * Any error (other than a signal) will cause this routine to fall back
790  * to the synchronous version.
791  *
792  * FIXME: This really shouldn't busy wait like this.
793  *
794  * Returns: errno
795  */
796 
797 static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
798 {
799 	struct gfs2_rgrpd *rgd_next;
800 	struct gfs2_holder *gha, *gh;
801 	unsigned int slots = 64;
802 	unsigned int x;
803 	int done;
804 	int error = 0, err;
805 
806 	memset(sc, 0, sizeof(struct gfs2_statfs_change_host));
807 	gha = kmalloc_array(slots, sizeof(struct gfs2_holder), GFP_KERNEL);
808 	if (!gha)
809 		return -ENOMEM;
810 	for (x = 0; x < slots; x++)
811 		gfs2_holder_mark_uninitialized(gha + x);
812 
813 	rgd_next = gfs2_rgrpd_get_first(sdp);
814 
815 	for (;;) {
816 		done = 1;
817 
818 		for (x = 0; x < slots; x++) {
819 			gh = gha + x;
820 
821 			if (gfs2_holder_initialized(gh) && gfs2_glock_poll(gh)) {
822 				err = gfs2_glock_wait(gh);
823 				if (err) {
824 					gfs2_holder_uninit(gh);
825 					error = err;
826 				} else {
827 					if (!error) {
828 						struct gfs2_rgrpd *rgd =
829 							gfs2_glock2rgrp(gh->gh_gl);
830 
831 						error = statfs_slow_fill(rgd, sc);
832 					}
833 					gfs2_glock_dq_uninit(gh);
834 				}
835 			}
836 
837 			if (gfs2_holder_initialized(gh))
838 				done = 0;
839 			else if (rgd_next && !error) {
840 				error = gfs2_glock_nq_init(rgd_next->rd_gl,
841 							   LM_ST_SHARED,
842 							   GL_ASYNC,
843 							   gh);
844 				rgd_next = gfs2_rgrpd_get_next(rgd_next);
845 				done = 0;
846 			}
847 
848 			if (signal_pending(current))
849 				error = -ERESTARTSYS;
850 		}
851 
852 		if (done)
853 			break;
854 
855 		yield();
856 	}
857 
858 	kfree(gha);
859 	return error;
860 }
861 
862 /**
863  * gfs2_statfs_i - Do a statfs
864  * @sdp: the filesystem
865  * @sc: the sc structure
866  *
867  * Returns: errno
868  */
869 
870 static int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
871 {
872 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
873 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
874 
875 	spin_lock(&sdp->sd_statfs_spin);
876 
877 	*sc = *m_sc;
878 	sc->sc_total += l_sc->sc_total;
879 	sc->sc_free += l_sc->sc_free;
880 	sc->sc_dinodes += l_sc->sc_dinodes;
881 
882 	spin_unlock(&sdp->sd_statfs_spin);
883 
884 	if (sc->sc_free < 0)
885 		sc->sc_free = 0;
886 	if (sc->sc_free > sc->sc_total)
887 		sc->sc_free = sc->sc_total;
888 	if (sc->sc_dinodes < 0)
889 		sc->sc_dinodes = 0;
890 
891 	return 0;
892 }
893 
894 /**
895  * gfs2_statfs - Gather and return stats about the filesystem
896  * @dentry: The name of the link
897  * @buf: The buffer
898  *
899  * Returns: 0 on success or error code
900  */
901 
902 static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
903 {
904 	struct super_block *sb = dentry->d_sb;
905 	struct gfs2_sbd *sdp = sb->s_fs_info;
906 	struct gfs2_statfs_change_host sc;
907 	int error;
908 
909 	error = gfs2_rindex_update(sdp);
910 	if (error)
911 		return error;
912 
913 	if (gfs2_tune_get(sdp, gt_statfs_slow))
914 		error = gfs2_statfs_slow(sdp, &sc);
915 	else
916 		error = gfs2_statfs_i(sdp, &sc);
917 
918 	if (error)
919 		return error;
920 
921 	buf->f_type = GFS2_MAGIC;
922 	buf->f_bsize = sdp->sd_sb.sb_bsize;
923 	buf->f_blocks = sc.sc_total;
924 	buf->f_bfree = sc.sc_free;
925 	buf->f_bavail = sc.sc_free;
926 	buf->f_files = sc.sc_dinodes + sc.sc_free;
927 	buf->f_ffree = sc.sc_free;
928 	buf->f_namelen = GFS2_FNAMESIZE;
929 
930 	return 0;
931 }
932 
933 /**
934  * gfs2_drop_inode - Drop an inode (test for remote unlink)
935  * @inode: The inode to drop
936  *
937  * If we've received a callback on an iopen lock then it's because a
938  * remote node tried to deallocate the inode but failed due to this node
939  * still having the inode open. Here we mark the link count zero
940  * since we know that it must have reached zero if the GLF_DEMOTE flag
941  * is set on the iopen glock. If we didn't do a disk read since the
942  * remote node removed the final link then we might otherwise miss
943  * this event. This check ensures that this node will deallocate the
944  * inode's blocks, or alternatively pass the baton on to another
945  * node for later deallocation.
946  */
947 
948 static int gfs2_drop_inode(struct inode *inode)
949 {
950 	struct gfs2_inode *ip = GFS2_I(inode);
951 	struct gfs2_sbd *sdp = GFS2_SB(inode);
952 
953 	if (inode->i_nlink &&
954 	    gfs2_holder_initialized(&ip->i_iopen_gh)) {
955 		struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
956 		if (test_bit(GLF_DEMOTE, &gl->gl_flags))
957 			clear_nlink(inode);
958 	}
959 
960 	/*
961 	 * When under memory pressure when an inode's link count has dropped to
962 	 * zero, defer deleting the inode to the delete workqueue.  This avoids
963 	 * calling into DLM under memory pressure, which can deadlock.
964 	 */
965 	if (!inode->i_nlink &&
966 	    unlikely(current->flags & PF_MEMALLOC) &&
967 	    gfs2_holder_initialized(&ip->i_iopen_gh)) {
968 		struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
969 
970 		gfs2_glock_hold(gl);
971 		if (!gfs2_queue_try_to_evict(gl))
972 			gfs2_glock_queue_put(gl);
973 		return 0;
974 	}
975 
976 	/*
977 	 * No longer cache inodes when trying to evict them all.
978 	 */
979 	if (test_bit(SDF_EVICTING, &sdp->sd_flags))
980 		return 1;
981 
982 	return generic_drop_inode(inode);
983 }
984 
985 static int is_ancestor(const struct dentry *d1, const struct dentry *d2)
986 {
987 	do {
988 		if (d1 == d2)
989 			return 1;
990 		d1 = d1->d_parent;
991 	} while (!IS_ROOT(d1));
992 	return 0;
993 }
994 
995 /**
996  * gfs2_show_options - Show mount options for /proc/mounts
997  * @s: seq_file structure
998  * @root: root of this (sub)tree
999  *
1000  * Returns: 0 on success or error code
1001  */
1002 
1003 static int gfs2_show_options(struct seq_file *s, struct dentry *root)
1004 {
1005 	struct gfs2_sbd *sdp = root->d_sb->s_fs_info;
1006 	struct gfs2_args *args = &sdp->sd_args;
1007 	int val;
1008 
1009 	if (is_ancestor(root, sdp->sd_master_dir))
1010 		seq_puts(s, ",meta");
1011 	if (args->ar_lockproto[0])
1012 		seq_show_option(s, "lockproto", args->ar_lockproto);
1013 	if (args->ar_locktable[0])
1014 		seq_show_option(s, "locktable", args->ar_locktable);
1015 	if (args->ar_hostdata[0])
1016 		seq_show_option(s, "hostdata", args->ar_hostdata);
1017 	if (args->ar_spectator)
1018 		seq_puts(s, ",spectator");
1019 	if (args->ar_localflocks)
1020 		seq_puts(s, ",localflocks");
1021 	if (args->ar_debug)
1022 		seq_puts(s, ",debug");
1023 	if (args->ar_posix_acl)
1024 		seq_puts(s, ",acl");
1025 	if (args->ar_quota != GFS2_QUOTA_DEFAULT) {
1026 		char *state;
1027 		switch (args->ar_quota) {
1028 		case GFS2_QUOTA_OFF:
1029 			state = "off";
1030 			break;
1031 		case GFS2_QUOTA_ACCOUNT:
1032 			state = "account";
1033 			break;
1034 		case GFS2_QUOTA_ON:
1035 			state = "on";
1036 			break;
1037 		default:
1038 			state = "unknown";
1039 			break;
1040 		}
1041 		seq_printf(s, ",quota=%s", state);
1042 	}
1043 	if (args->ar_suiddir)
1044 		seq_puts(s, ",suiddir");
1045 	if (args->ar_data != GFS2_DATA_DEFAULT) {
1046 		char *state;
1047 		switch (args->ar_data) {
1048 		case GFS2_DATA_WRITEBACK:
1049 			state = "writeback";
1050 			break;
1051 		case GFS2_DATA_ORDERED:
1052 			state = "ordered";
1053 			break;
1054 		default:
1055 			state = "unknown";
1056 			break;
1057 		}
1058 		seq_printf(s, ",data=%s", state);
1059 	}
1060 	if (args->ar_discard)
1061 		seq_puts(s, ",discard");
1062 	val = sdp->sd_tune.gt_logd_secs;
1063 	if (val != 30)
1064 		seq_printf(s, ",commit=%d", val);
1065 	val = sdp->sd_tune.gt_statfs_quantum;
1066 	if (val != 30)
1067 		seq_printf(s, ",statfs_quantum=%d", val);
1068 	else if (sdp->sd_tune.gt_statfs_slow)
1069 		seq_puts(s, ",statfs_quantum=0");
1070 	val = sdp->sd_tune.gt_quota_quantum;
1071 	if (val != 60)
1072 		seq_printf(s, ",quota_quantum=%d", val);
1073 	if (args->ar_statfs_percent)
1074 		seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent);
1075 	if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
1076 		const char *state;
1077 
1078 		switch (args->ar_errors) {
1079 		case GFS2_ERRORS_WITHDRAW:
1080 			state = "withdraw";
1081 			break;
1082 		case GFS2_ERRORS_PANIC:
1083 			state = "panic";
1084 			break;
1085 		default:
1086 			state = "unknown";
1087 			break;
1088 		}
1089 		seq_printf(s, ",errors=%s", state);
1090 	}
1091 	if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
1092 		seq_puts(s, ",nobarrier");
1093 	if (test_bit(SDF_DEMOTE, &sdp->sd_flags))
1094 		seq_puts(s, ",demote_interface_used");
1095 	if (args->ar_rgrplvb)
1096 		seq_puts(s, ",rgrplvb");
1097 	if (args->ar_loccookie)
1098 		seq_puts(s, ",loccookie");
1099 	return 0;
1100 }
1101 
1102 static void gfs2_final_release_pages(struct gfs2_inode *ip)
1103 {
1104 	struct inode *inode = &ip->i_inode;
1105 	struct gfs2_glock *gl = ip->i_gl;
1106 
1107 	if (unlikely(!gl)) {
1108 		/* This can only happen during incomplete inode creation. */
1109 		BUG_ON(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags));
1110 		return;
1111 	}
1112 
1113 	truncate_inode_pages(gfs2_glock2aspace(gl), 0);
1114 	truncate_inode_pages(&inode->i_data, 0);
1115 
1116 	if (atomic_read(&gl->gl_revokes) == 0) {
1117 		clear_bit(GLF_LFLUSH, &gl->gl_flags);
1118 		clear_bit(GLF_DIRTY, &gl->gl_flags);
1119 	}
1120 }
1121 
1122 static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
1123 {
1124 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1125 	struct gfs2_rgrpd *rgd;
1126 	struct gfs2_holder gh;
1127 	int error;
1128 
1129 	if (gfs2_get_inode_blocks(&ip->i_inode) != 1) {
1130 		gfs2_consist_inode(ip);
1131 		return -EIO;
1132 	}
1133 
1134 	error = gfs2_rindex_update(sdp);
1135 	if (error)
1136 		return error;
1137 
1138 	error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1139 	if (error)
1140 		return error;
1141 
1142 	rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1);
1143 	if (!rgd) {
1144 		gfs2_consist_inode(ip);
1145 		error = -EIO;
1146 		goto out_qs;
1147 	}
1148 
1149 	error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
1150 				   LM_FLAG_NODE_SCOPE, &gh);
1151 	if (error)
1152 		goto out_qs;
1153 
1154 	error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA,
1155 				 sdp->sd_jdesc->jd_blocks);
1156 	if (error)
1157 		goto out_rg_gunlock;
1158 
1159 	gfs2_free_di(rgd, ip);
1160 
1161 	gfs2_final_release_pages(ip);
1162 
1163 	gfs2_trans_end(sdp);
1164 
1165 out_rg_gunlock:
1166 	gfs2_glock_dq_uninit(&gh);
1167 out_qs:
1168 	gfs2_quota_unhold(ip);
1169 	return error;
1170 }
1171 
1172 /**
1173  * gfs2_glock_put_eventually
1174  * @gl:	The glock to put
1175  *
1176  * When under memory pressure, trigger a deferred glock put to make sure we
1177  * won't call into DLM and deadlock.  Otherwise, put the glock directly.
1178  */
1179 
1180 static void gfs2_glock_put_eventually(struct gfs2_glock *gl)
1181 {
1182 	if (current->flags & PF_MEMALLOC)
1183 		gfs2_glock_queue_put(gl);
1184 	else
1185 		gfs2_glock_put(gl);
1186 }
1187 
1188 static bool gfs2_upgrade_iopen_glock(struct inode *inode)
1189 {
1190 	struct gfs2_inode *ip = GFS2_I(inode);
1191 	struct gfs2_sbd *sdp = GFS2_SB(inode);
1192 	struct gfs2_holder *gh = &ip->i_iopen_gh;
1193 	long timeout = 5 * HZ;
1194 	int error;
1195 
1196 	gh->gh_flags |= GL_NOCACHE;
1197 	gfs2_glock_dq_wait(gh);
1198 
1199 	/*
1200 	 * If there are no other lock holders, we will immediately get
1201 	 * exclusive access to the iopen glock here.
1202 	 *
1203 	 * Otherwise, the other nodes holding the lock will be notified about
1204 	 * our locking request.  If they do not have the inode open, they are
1205 	 * expected to evict the cached inode and release the lock, allowing us
1206 	 * to proceed.
1207 	 *
1208 	 * Otherwise, if they cannot evict the inode, they are expected to poke
1209 	 * the inode glock (note: not the iopen glock).  We will notice that
1210 	 * and stop waiting for the iopen glock immediately.  The other node(s)
1211 	 * are then expected to take care of deleting the inode when they no
1212 	 * longer use it.
1213 	 *
1214 	 * As a last resort, if another node keeps holding the iopen glock
1215 	 * without showing any activity on the inode glock, we will eventually
1216 	 * time out and fail the iopen glock upgrade.
1217 	 *
1218 	 * Note that we're passing the LM_FLAG_TRY_1CB flag to the first
1219 	 * locking request as an optimization to notify lock holders as soon as
1220 	 * possible.  Without that flag, they'd be notified implicitly by the
1221 	 * second locking request.
1222 	 */
1223 
1224 	gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, gh);
1225 	error = gfs2_glock_nq(gh);
1226 	if (error != GLR_TRYFAILED)
1227 		return !error;
1228 
1229 	gfs2_holder_reinit(LM_ST_EXCLUSIVE, GL_ASYNC | GL_NOCACHE, gh);
1230 	error = gfs2_glock_nq(gh);
1231 	if (error)
1232 		return false;
1233 
1234 	timeout = wait_event_interruptible_timeout(sdp->sd_async_glock_wait,
1235 		!test_bit(HIF_WAIT, &gh->gh_iflags) ||
1236 		test_bit(GLF_DEMOTE, &ip->i_gl->gl_flags),
1237 		timeout);
1238 	if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) {
1239 		gfs2_glock_dq(gh);
1240 		return false;
1241 	}
1242 	return gfs2_glock_holder_ready(gh) == 0;
1243 }
1244 
1245 /**
1246  * evict_should_delete - determine whether the inode is eligible for deletion
1247  * @inode: The inode to evict
1248  * @gh: The glock holder structure
1249  *
1250  * This function determines whether the evicted inode is eligible to be deleted
1251  * and locks the inode glock.
1252  *
1253  * Returns: the fate of the dinode
1254  */
1255 static enum dinode_demise evict_should_delete(struct inode *inode,
1256 					      struct gfs2_holder *gh)
1257 {
1258 	struct gfs2_inode *ip = GFS2_I(inode);
1259 	struct super_block *sb = inode->i_sb;
1260 	struct gfs2_sbd *sdp = sb->s_fs_info;
1261 	int ret;
1262 
1263 	if (unlikely(test_bit(GIF_ALLOC_FAILED, &ip->i_flags)))
1264 		goto should_delete;
1265 
1266 	if (test_bit(GIF_DEFERRED_DELETE, &ip->i_flags))
1267 		return SHOULD_DEFER_EVICTION;
1268 
1269 	/* Deletes should never happen under memory pressure anymore.  */
1270 	if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
1271 		return SHOULD_DEFER_EVICTION;
1272 
1273 	/* Must not read inode block until block type has been verified */
1274 	ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, gh);
1275 	if (unlikely(ret)) {
1276 		glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
1277 		ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1278 		gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1279 		return SHOULD_DEFER_EVICTION;
1280 	}
1281 
1282 	if (gfs2_inode_already_deleted(ip->i_gl, ip->i_no_formal_ino))
1283 		return SHOULD_NOT_DELETE_DINODE;
1284 	ret = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED);
1285 	if (ret)
1286 		return SHOULD_NOT_DELETE_DINODE;
1287 
1288 	ret = gfs2_instantiate(gh);
1289 	if (ret)
1290 		return SHOULD_NOT_DELETE_DINODE;
1291 
1292 	/*
1293 	 * The inode may have been recreated in the meantime.
1294 	 */
1295 	if (inode->i_nlink)
1296 		return SHOULD_NOT_DELETE_DINODE;
1297 
1298 should_delete:
1299 	if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
1300 	    test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
1301 		if (!gfs2_upgrade_iopen_glock(inode)) {
1302 			gfs2_holder_uninit(&ip->i_iopen_gh);
1303 			return SHOULD_NOT_DELETE_DINODE;
1304 		}
1305 	}
1306 	return SHOULD_DELETE_DINODE;
1307 }
1308 
1309 /**
1310  * evict_unlinked_inode - delete the pieces of an unlinked evicted inode
1311  * @inode: The inode to evict
1312  */
1313 static int evict_unlinked_inode(struct inode *inode)
1314 {
1315 	struct gfs2_inode *ip = GFS2_I(inode);
1316 	int ret;
1317 
1318 	if (S_ISDIR(inode->i_mode) &&
1319 	    (ip->i_diskflags & GFS2_DIF_EXHASH)) {
1320 		ret = gfs2_dir_exhash_dealloc(ip);
1321 		if (ret)
1322 			goto out;
1323 	}
1324 
1325 	if (ip->i_eattr) {
1326 		ret = gfs2_ea_dealloc(ip);
1327 		if (ret)
1328 			goto out;
1329 	}
1330 
1331 	if (!gfs2_is_stuffed(ip)) {
1332 		ret = gfs2_file_dealloc(ip);
1333 		if (ret)
1334 			goto out;
1335 	}
1336 
1337 	if (ip->i_gl)
1338 		gfs2_inode_remember_delete(ip->i_gl, ip->i_no_formal_ino);
1339 
1340 	/*
1341 	 * As soon as we clear the bitmap for the dinode, gfs2_create_inode()
1342 	 * can get called to recreate it, or even gfs2_inode_lookup() if the
1343 	 * inode was recreated on another node in the meantime.
1344 	 *
1345 	 * However, inserting the new inode into the inode hash table will not
1346 	 * succeed until the old inode is removed, and that only happens after
1347 	 * ->evict_inode() returns.  The new inode is attached to its inode and
1348 	 *  iopen glocks after inserting it into the inode hash table, so at
1349 	 *  that point we can be sure that both glocks are unused.
1350 	 */
1351 
1352 	ret = gfs2_dinode_dealloc(ip);
1353 out:
1354 	return ret;
1355 }
1356 
1357 /*
1358  * evict_linked_inode - evict an inode whose dinode has not been unlinked
1359  * @inode: The inode to evict
1360  */
1361 static int evict_linked_inode(struct inode *inode)
1362 {
1363 	struct super_block *sb = inode->i_sb;
1364 	struct gfs2_sbd *sdp = sb->s_fs_info;
1365 	struct gfs2_inode *ip = GFS2_I(inode);
1366 	struct address_space *metamapping;
1367 	int ret;
1368 
1369 	gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
1370 		       GFS2_LFC_EVICT_INODE);
1371 	metamapping = gfs2_glock2aspace(ip->i_gl);
1372 	if (test_bit(GLF_DIRTY, &ip->i_gl->gl_flags)) {
1373 		filemap_fdatawrite(metamapping);
1374 		filemap_fdatawait(metamapping);
1375 	}
1376 	write_inode_now(inode, 1);
1377 	gfs2_ail_flush(ip->i_gl, 0);
1378 
1379 	ret = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
1380 	if (ret)
1381 		return ret;
1382 
1383 	/* Needs to be done before glock release & also in a transaction */
1384 	truncate_inode_pages(&inode->i_data, 0);
1385 	truncate_inode_pages(metamapping, 0);
1386 	gfs2_trans_end(sdp);
1387 	return 0;
1388 }
1389 
1390 /**
1391  * gfs2_evict_inode - Remove an inode from cache
1392  * @inode: The inode to evict
1393  *
1394  * There are three cases to consider:
1395  * 1. i_nlink == 0, we are final opener (and must deallocate)
1396  * 2. i_nlink == 0, we are not the final opener (and cannot deallocate)
1397  * 3. i_nlink > 0
1398  *
1399  * If the fs is read only, then we have to treat all cases as per #3
1400  * since we are unable to do any deallocation. The inode will be
1401  * deallocated by the next read/write node to attempt an allocation
1402  * in the same resource group
1403  *
1404  * We have to (at the moment) hold the inodes main lock to cover
1405  * the gap between unlocking the shared lock on the iopen lock and
1406  * taking the exclusive lock. I'd rather do a shared -> exclusive
1407  * conversion on the iopen lock, but we can change that later. This
1408  * is safe, just less efficient.
1409  */
1410 
1411 static void gfs2_evict_inode(struct inode *inode)
1412 {
1413 	struct super_block *sb = inode->i_sb;
1414 	struct gfs2_sbd *sdp = sb->s_fs_info;
1415 	struct gfs2_inode *ip = GFS2_I(inode);
1416 	struct gfs2_holder gh;
1417 	int ret;
1418 
1419 	if (inode->i_nlink || sb_rdonly(sb) || !ip->i_no_addr)
1420 		goto out;
1421 
1422 	/*
1423 	 * In case of an incomplete mount, gfs2_evict_inode() may be called for
1424 	 * system files without having an active journal to write to.  In that
1425 	 * case, skip the filesystem evict.
1426 	 */
1427 	if (!sdp->sd_jdesc)
1428 		goto out;
1429 
1430 	gfs2_holder_mark_uninitialized(&gh);
1431 	ret = evict_should_delete(inode, &gh);
1432 	if (ret == SHOULD_DEFER_EVICTION)
1433 		goto out;
1434 	if (ret == SHOULD_DELETE_DINODE)
1435 		ret = evict_unlinked_inode(inode);
1436 	else
1437 		ret = evict_linked_inode(inode);
1438 
1439 	if (gfs2_rs_active(&ip->i_res))
1440 		gfs2_rs_deltree(&ip->i_res);
1441 
1442 	if (gfs2_holder_initialized(&gh))
1443 		gfs2_glock_dq_uninit(&gh);
1444 	if (ret && ret != GLR_TRYFAILED && ret != -EROFS)
1445 		fs_warn(sdp, "gfs2_evict_inode: %d\n", ret);
1446 out:
1447 	truncate_inode_pages_final(&inode->i_data);
1448 	if (ip->i_qadata)
1449 		gfs2_assert_warn(sdp, ip->i_qadata->qa_ref == 0);
1450 	gfs2_rs_deltree(&ip->i_res);
1451 	gfs2_ordered_del_inode(ip);
1452 	clear_inode(inode);
1453 	gfs2_dir_hash_inval(ip);
1454 	if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
1455 		struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1456 
1457 		glock_clear_object(gl, ip);
1458 		gfs2_glock_hold(gl);
1459 		ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1460 		gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1461 		gfs2_glock_put_eventually(gl);
1462 	}
1463 	if (ip->i_gl) {
1464 		glock_clear_object(ip->i_gl, ip);
1465 		wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
1466 		gfs2_glock_add_to_lru(ip->i_gl);
1467 		gfs2_glock_put_eventually(ip->i_gl);
1468 		ip->i_gl = NULL;
1469 	}
1470 }
1471 
1472 static struct inode *gfs2_alloc_inode(struct super_block *sb)
1473 {
1474 	struct gfs2_inode *ip;
1475 
1476 	ip = alloc_inode_sb(sb, gfs2_inode_cachep, GFP_KERNEL);
1477 	if (!ip)
1478 		return NULL;
1479 	ip->i_no_addr = 0;
1480 	ip->i_flags = 0;
1481 	ip->i_gl = NULL;
1482 	gfs2_holder_mark_uninitialized(&ip->i_iopen_gh);
1483 	memset(&ip->i_res, 0, sizeof(ip->i_res));
1484 	RB_CLEAR_NODE(&ip->i_res.rs_node);
1485 	ip->i_rahead = 0;
1486 	return &ip->i_inode;
1487 }
1488 
1489 static void gfs2_free_inode(struct inode *inode)
1490 {
1491 	kmem_cache_free(gfs2_inode_cachep, GFS2_I(inode));
1492 }
1493 
1494 extern void free_local_statfs_inodes(struct gfs2_sbd *sdp)
1495 {
1496 	struct local_statfs_inode *lsi, *safe;
1497 
1498 	/* Run through the statfs inodes list to iput and free memory */
1499 	list_for_each_entry_safe(lsi, safe, &sdp->sd_sc_inodes_list, si_list) {
1500 		if (lsi->si_jid == sdp->sd_jdesc->jd_jid)
1501 			sdp->sd_sc_inode = NULL; /* belongs to this node */
1502 		if (lsi->si_sc_inode)
1503 			iput(lsi->si_sc_inode);
1504 		list_del(&lsi->si_list);
1505 		kfree(lsi);
1506 	}
1507 }
1508 
1509 extern struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp,
1510 					     unsigned int index)
1511 {
1512 	struct local_statfs_inode *lsi;
1513 
1514 	/* Return the local (per node) statfs inode in the
1515 	 * sdp->sd_sc_inodes_list corresponding to the 'index'. */
1516 	list_for_each_entry(lsi, &sdp->sd_sc_inodes_list, si_list) {
1517 		if (lsi->si_jid == index)
1518 			return lsi->si_sc_inode;
1519 	}
1520 	return NULL;
1521 }
1522 
1523 const struct super_operations gfs2_super_ops = {
1524 	.alloc_inode		= gfs2_alloc_inode,
1525 	.free_inode		= gfs2_free_inode,
1526 	.write_inode		= gfs2_write_inode,
1527 	.dirty_inode		= gfs2_dirty_inode,
1528 	.evict_inode		= gfs2_evict_inode,
1529 	.put_super		= gfs2_put_super,
1530 	.sync_fs		= gfs2_sync_fs,
1531 	.freeze_super		= gfs2_freeze,
1532 	.thaw_super		= gfs2_unfreeze,
1533 	.statfs			= gfs2_statfs,
1534 	.drop_inode		= gfs2_drop_inode,
1535 	.show_options		= gfs2_show_options,
1536 };
1537 
1538