xref: /openbmc/linux/fs/gfs2/super.c (revision f5ad1c74)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
5  */
6 
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 
9 #include <linux/bio.h>
10 #include <linux/sched/signal.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/statfs.h>
16 #include <linux/seq_file.h>
17 #include <linux/mount.h>
18 #include <linux/kthread.h>
19 #include <linux/delay.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/crc32.h>
22 #include <linux/time.h>
23 #include <linux/wait.h>
24 #include <linux/writeback.h>
25 #include <linux/backing-dev.h>
26 #include <linux/kernel.h>
27 
28 #include "gfs2.h"
29 #include "incore.h"
30 #include "bmap.h"
31 #include "dir.h"
32 #include "glock.h"
33 #include "glops.h"
34 #include "inode.h"
35 #include "log.h"
36 #include "meta_io.h"
37 #include "quota.h"
38 #include "recovery.h"
39 #include "rgrp.h"
40 #include "super.h"
41 #include "trans.h"
42 #include "util.h"
43 #include "sys.h"
44 #include "xattr.h"
45 #include "lops.h"
46 
47 enum dinode_demise {
48 	SHOULD_DELETE_DINODE,
49 	SHOULD_NOT_DELETE_DINODE,
50 	SHOULD_DEFER_EVICTION,
51 };
52 
53 /**
54  * gfs2_jindex_free - Clear all the journal index information
55  * @sdp: The GFS2 superblock
56  *
57  */
58 
59 void gfs2_jindex_free(struct gfs2_sbd *sdp)
60 {
61 	struct list_head list;
62 	struct gfs2_jdesc *jd;
63 
64 	spin_lock(&sdp->sd_jindex_spin);
65 	list_add(&list, &sdp->sd_jindex_list);
66 	list_del_init(&sdp->sd_jindex_list);
67 	sdp->sd_journals = 0;
68 	spin_unlock(&sdp->sd_jindex_spin);
69 
70 	sdp->sd_jdesc = NULL;
71 	while (!list_empty(&list)) {
72 		jd = list_first_entry(&list, struct gfs2_jdesc, jd_list);
73 		gfs2_free_journal_extents(jd);
74 		list_del(&jd->jd_list);
75 		iput(jd->jd_inode);
76 		jd->jd_inode = NULL;
77 		kfree(jd);
78 	}
79 }
80 
81 static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid)
82 {
83 	struct gfs2_jdesc *jd;
84 	int found = 0;
85 
86 	list_for_each_entry(jd, head, jd_list) {
87 		if (jd->jd_jid == jid) {
88 			found = 1;
89 			break;
90 		}
91 	}
92 
93 	if (!found)
94 		jd = NULL;
95 
96 	return jd;
97 }
98 
99 struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid)
100 {
101 	struct gfs2_jdesc *jd;
102 
103 	spin_lock(&sdp->sd_jindex_spin);
104 	jd = jdesc_find_i(&sdp->sd_jindex_list, jid);
105 	spin_unlock(&sdp->sd_jindex_spin);
106 
107 	return jd;
108 }
109 
110 int gfs2_jdesc_check(struct gfs2_jdesc *jd)
111 {
112 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
113 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
114 	u64 size = i_size_read(jd->jd_inode);
115 
116 	if (gfs2_check_internal_file_size(jd->jd_inode, 8 << 20, BIT(30)))
117 		return -EIO;
118 
119 	jd->jd_blocks = size >> sdp->sd_sb.sb_bsize_shift;
120 
121 	if (gfs2_write_alloc_required(ip, 0, size)) {
122 		gfs2_consist_inode(ip);
123 		return -EIO;
124 	}
125 
126 	return 0;
127 }
128 
129 static int init_threads(struct gfs2_sbd *sdp)
130 {
131 	struct task_struct *p;
132 	int error = 0;
133 
134 	p = kthread_run(gfs2_logd, sdp, "gfs2_logd");
135 	if (IS_ERR(p)) {
136 		error = PTR_ERR(p);
137 		fs_err(sdp, "can't start logd thread: %d\n", error);
138 		return error;
139 	}
140 	sdp->sd_logd_process = p;
141 
142 	p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad");
143 	if (IS_ERR(p)) {
144 		error = PTR_ERR(p);
145 		fs_err(sdp, "can't start quotad thread: %d\n", error);
146 		goto fail;
147 	}
148 	sdp->sd_quotad_process = p;
149 	return 0;
150 
151 fail:
152 	kthread_stop(sdp->sd_logd_process);
153 	sdp->sd_logd_process = NULL;
154 	return error;
155 }
156 
157 /**
158  * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
159  * @sdp: the filesystem
160  *
161  * Returns: errno
162  */
163 
164 int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
165 {
166 	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
167 	struct gfs2_glock *j_gl = ip->i_gl;
168 	struct gfs2_holder freeze_gh;
169 	struct gfs2_log_header_host head;
170 	int error;
171 
172 	error = init_threads(sdp);
173 	if (error)
174 		return error;
175 
176 	error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
177 				   LM_FLAG_NOEXP | GL_EXACT,
178 				   &freeze_gh);
179 	if (error)
180 		goto fail_threads;
181 
182 	j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
183 	if (gfs2_withdrawn(sdp)) {
184 		error = -EIO;
185 		goto fail;
186 	}
187 
188 	error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
189 	if (error || gfs2_withdrawn(sdp))
190 		goto fail;
191 
192 	if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
193 		gfs2_consist(sdp);
194 		error = -EIO;
195 		goto fail;
196 	}
197 
198 	/*  Initialize some head of the log stuff  */
199 	sdp->sd_log_sequence = head.lh_sequence + 1;
200 	gfs2_log_pointers_init(sdp, head.lh_blkno);
201 
202 	error = gfs2_quota_init(sdp);
203 	if (error || gfs2_withdrawn(sdp))
204 		goto fail;
205 
206 	set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
207 
208 	gfs2_glock_dq_uninit(&freeze_gh);
209 
210 	return 0;
211 
212 fail:
213 	gfs2_glock_dq_uninit(&freeze_gh);
214 fail_threads:
215 	if (sdp->sd_quotad_process)
216 		kthread_stop(sdp->sd_quotad_process);
217 	sdp->sd_quotad_process = NULL;
218 	if (sdp->sd_logd_process)
219 		kthread_stop(sdp->sd_logd_process);
220 	sdp->sd_logd_process = NULL;
221 	return error;
222 }
223 
224 void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf)
225 {
226 	const struct gfs2_statfs_change *str = buf;
227 
228 	sc->sc_total = be64_to_cpu(str->sc_total);
229 	sc->sc_free = be64_to_cpu(str->sc_free);
230 	sc->sc_dinodes = be64_to_cpu(str->sc_dinodes);
231 }
232 
233 void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf)
234 {
235 	struct gfs2_statfs_change *str = buf;
236 
237 	str->sc_total = cpu_to_be64(sc->sc_total);
238 	str->sc_free = cpu_to_be64(sc->sc_free);
239 	str->sc_dinodes = cpu_to_be64(sc->sc_dinodes);
240 }
241 
242 int gfs2_statfs_init(struct gfs2_sbd *sdp)
243 {
244 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
245 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
246 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
247 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
248 	struct buffer_head *m_bh, *l_bh;
249 	struct gfs2_holder gh;
250 	int error;
251 
252 	error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
253 				   &gh);
254 	if (error)
255 		return error;
256 
257 	error = gfs2_meta_inode_buffer(m_ip, &m_bh);
258 	if (error)
259 		goto out;
260 
261 	if (sdp->sd_args.ar_spectator) {
262 		spin_lock(&sdp->sd_statfs_spin);
263 		gfs2_statfs_change_in(m_sc, m_bh->b_data +
264 				      sizeof(struct gfs2_dinode));
265 		spin_unlock(&sdp->sd_statfs_spin);
266 	} else {
267 		error = gfs2_meta_inode_buffer(l_ip, &l_bh);
268 		if (error)
269 			goto out_m_bh;
270 
271 		spin_lock(&sdp->sd_statfs_spin);
272 		gfs2_statfs_change_in(m_sc, m_bh->b_data +
273 				      sizeof(struct gfs2_dinode));
274 		gfs2_statfs_change_in(l_sc, l_bh->b_data +
275 				      sizeof(struct gfs2_dinode));
276 		spin_unlock(&sdp->sd_statfs_spin);
277 
278 		brelse(l_bh);
279 	}
280 
281 out_m_bh:
282 	brelse(m_bh);
283 out:
284 	gfs2_glock_dq_uninit(&gh);
285 	return 0;
286 }
287 
288 void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
289 			s64 dinodes)
290 {
291 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
292 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
293 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
294 	struct buffer_head *l_bh;
295 	s64 x, y;
296 	int need_sync = 0;
297 	int error;
298 
299 	error = gfs2_meta_inode_buffer(l_ip, &l_bh);
300 	if (error)
301 		return;
302 
303 	gfs2_trans_add_meta(l_ip->i_gl, l_bh);
304 
305 	spin_lock(&sdp->sd_statfs_spin);
306 	l_sc->sc_total += total;
307 	l_sc->sc_free += free;
308 	l_sc->sc_dinodes += dinodes;
309 	gfs2_statfs_change_out(l_sc, l_bh->b_data + sizeof(struct gfs2_dinode));
310 	if (sdp->sd_args.ar_statfs_percent) {
311 		x = 100 * l_sc->sc_free;
312 		y = m_sc->sc_free * sdp->sd_args.ar_statfs_percent;
313 		if (x >= y || x <= -y)
314 			need_sync = 1;
315 	}
316 	spin_unlock(&sdp->sd_statfs_spin);
317 
318 	brelse(l_bh);
319 	if (need_sync)
320 		gfs2_wake_up_statfs(sdp);
321 }
322 
323 void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
324 		   struct buffer_head *l_bh)
325 {
326 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
327 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
328 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
329 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
330 
331 	gfs2_trans_add_meta(l_ip->i_gl, l_bh);
332 	gfs2_trans_add_meta(m_ip->i_gl, m_bh);
333 
334 	spin_lock(&sdp->sd_statfs_spin);
335 	m_sc->sc_total += l_sc->sc_total;
336 	m_sc->sc_free += l_sc->sc_free;
337 	m_sc->sc_dinodes += l_sc->sc_dinodes;
338 	memset(l_sc, 0, sizeof(struct gfs2_statfs_change));
339 	memset(l_bh->b_data + sizeof(struct gfs2_dinode),
340 	       0, sizeof(struct gfs2_statfs_change));
341 	gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
342 	spin_unlock(&sdp->sd_statfs_spin);
343 }
344 
345 int gfs2_statfs_sync(struct super_block *sb, int type)
346 {
347 	struct gfs2_sbd *sdp = sb->s_fs_info;
348 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
349 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
350 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
351 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
352 	struct gfs2_holder gh;
353 	struct buffer_head *m_bh, *l_bh;
354 	int error;
355 
356 	sb_start_write(sb);
357 	error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
358 				   &gh);
359 	if (error)
360 		goto out;
361 
362 	error = gfs2_meta_inode_buffer(m_ip, &m_bh);
363 	if (error)
364 		goto out_unlock;
365 
366 	spin_lock(&sdp->sd_statfs_spin);
367 	gfs2_statfs_change_in(m_sc, m_bh->b_data +
368 			      sizeof(struct gfs2_dinode));
369 	if (!l_sc->sc_total && !l_sc->sc_free && !l_sc->sc_dinodes) {
370 		spin_unlock(&sdp->sd_statfs_spin);
371 		goto out_bh;
372 	}
373 	spin_unlock(&sdp->sd_statfs_spin);
374 
375 	error = gfs2_meta_inode_buffer(l_ip, &l_bh);
376 	if (error)
377 		goto out_bh;
378 
379 	error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
380 	if (error)
381 		goto out_bh2;
382 
383 	update_statfs(sdp, m_bh, l_bh);
384 	sdp->sd_statfs_force_sync = 0;
385 
386 	gfs2_trans_end(sdp);
387 
388 out_bh2:
389 	brelse(l_bh);
390 out_bh:
391 	brelse(m_bh);
392 out_unlock:
393 	gfs2_glock_dq_uninit(&gh);
394 out:
395 	sb_end_write(sb);
396 	return error;
397 }
398 
399 struct lfcc {
400 	struct list_head list;
401 	struct gfs2_holder gh;
402 };
403 
404 /**
405  * gfs2_lock_fs_check_clean - Stop all writes to the FS and check that all
406  *                            journals are clean
407  * @sdp: the file system
408  * @state: the state to put the transaction lock into
409  * @t_gh: the hold on the transaction lock
410  *
411  * Returns: errno
412  */
413 
414 static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
415 {
416 	struct gfs2_inode *ip;
417 	struct gfs2_jdesc *jd;
418 	struct lfcc *lfcc;
419 	LIST_HEAD(list);
420 	struct gfs2_log_header_host lh;
421 	int error;
422 
423 	list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
424 		lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL);
425 		if (!lfcc) {
426 			error = -ENOMEM;
427 			goto out;
428 		}
429 		ip = GFS2_I(jd->jd_inode);
430 		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &lfcc->gh);
431 		if (error) {
432 			kfree(lfcc);
433 			goto out;
434 		}
435 		list_add(&lfcc->list, &list);
436 	}
437 
438 	error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE,
439 				   LM_FLAG_NOEXP, &sdp->sd_freeze_gh);
440 	if (error)
441 		goto out;
442 
443 	list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
444 		error = gfs2_jdesc_check(jd);
445 		if (error)
446 			break;
447 		error = gfs2_find_jhead(jd, &lh, false);
448 		if (error)
449 			break;
450 		if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
451 			error = -EBUSY;
452 			break;
453 		}
454 	}
455 
456 	if (error)
457 		gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
458 
459 out:
460 	while (!list_empty(&list)) {
461 		lfcc = list_first_entry(&list, struct lfcc, list);
462 		list_del(&lfcc->list);
463 		gfs2_glock_dq_uninit(&lfcc->gh);
464 		kfree(lfcc);
465 	}
466 	return error;
467 }
468 
469 void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
470 {
471 	struct gfs2_dinode *str = buf;
472 
473 	str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
474 	str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
475 	str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
476 	str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
477 	str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
478 	str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
479 	str->di_uid = cpu_to_be32(i_uid_read(&ip->i_inode));
480 	str->di_gid = cpu_to_be32(i_gid_read(&ip->i_inode));
481 	str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
482 	str->di_size = cpu_to_be64(i_size_read(&ip->i_inode));
483 	str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
484 	str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
485 	str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec);
486 	str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec);
487 
488 	str->di_goal_meta = cpu_to_be64(ip->i_goal);
489 	str->di_goal_data = cpu_to_be64(ip->i_goal);
490 	str->di_generation = cpu_to_be64(ip->i_generation);
491 
492 	str->di_flags = cpu_to_be32(ip->i_diskflags);
493 	str->di_height = cpu_to_be16(ip->i_height);
494 	str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) &&
495 					     !(ip->i_diskflags & GFS2_DIF_EXHASH) ?
496 					     GFS2_FORMAT_DE : 0);
497 	str->di_depth = cpu_to_be16(ip->i_depth);
498 	str->di_entries = cpu_to_be32(ip->i_entries);
499 
500 	str->di_eattr = cpu_to_be64(ip->i_eattr);
501 	str->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
502 	str->di_mtime_nsec = cpu_to_be32(ip->i_inode.i_mtime.tv_nsec);
503 	str->di_ctime_nsec = cpu_to_be32(ip->i_inode.i_ctime.tv_nsec);
504 }
505 
506 /**
507  * gfs2_write_inode - Make sure the inode is stable on the disk
508  * @inode: The inode
509  * @wbc: The writeback control structure
510  *
511  * Returns: errno
512  */
513 
514 static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
515 {
516 	struct gfs2_inode *ip = GFS2_I(inode);
517 	struct gfs2_sbd *sdp = GFS2_SB(inode);
518 	struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl);
519 	struct backing_dev_info *bdi = inode_to_bdi(metamapping->host);
520 	int ret = 0;
521 	bool flush_all = (wbc->sync_mode == WB_SYNC_ALL || gfs2_is_jdata(ip));
522 
523 	if (flush_all)
524 		gfs2_log_flush(GFS2_SB(inode), ip->i_gl,
525 			       GFS2_LOG_HEAD_FLUSH_NORMAL |
526 			       GFS2_LFC_WRITE_INODE);
527 	if (bdi->wb.dirty_exceeded)
528 		gfs2_ail1_flush(sdp, wbc);
529 	else
530 		filemap_fdatawrite(metamapping);
531 	if (flush_all)
532 		ret = filemap_fdatawait(metamapping);
533 	if (ret)
534 		mark_inode_dirty_sync(inode);
535 	else {
536 		spin_lock(&inode->i_lock);
537 		if (!(inode->i_flags & I_DIRTY))
538 			gfs2_ordered_del_inode(ip);
539 		spin_unlock(&inode->i_lock);
540 	}
541 	return ret;
542 }
543 
544 /**
545  * gfs2_dirty_inode - check for atime updates
546  * @inode: The inode in question
547  * @flags: The type of dirty
548  *
549  * Unfortunately it can be called under any combination of inode
550  * glock and transaction lock, so we have to check carefully.
551  *
552  * At the moment this deals only with atime - it should be possible
553  * to expand that role in future, once a review of the locking has
554  * been carried out.
555  */
556 
557 static void gfs2_dirty_inode(struct inode *inode, int flags)
558 {
559 	struct gfs2_inode *ip = GFS2_I(inode);
560 	struct gfs2_sbd *sdp = GFS2_SB(inode);
561 	struct buffer_head *bh;
562 	struct gfs2_holder gh;
563 	int need_unlock = 0;
564 	int need_endtrans = 0;
565 	int ret;
566 
567 	if (!(flags & I_DIRTY_INODE))
568 		return;
569 	if (unlikely(gfs2_withdrawn(sdp)))
570 		return;
571 	if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
572 		ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
573 		if (ret) {
574 			fs_err(sdp, "dirty_inode: glock %d\n", ret);
575 			gfs2_dump_glock(NULL, ip->i_gl, true);
576 			return;
577 		}
578 		need_unlock = 1;
579 	} else if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE))
580 		return;
581 
582 	if (current->journal_info == NULL) {
583 		ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
584 		if (ret) {
585 			fs_err(sdp, "dirty_inode: gfs2_trans_begin %d\n", ret);
586 			goto out;
587 		}
588 		need_endtrans = 1;
589 	}
590 
591 	ret = gfs2_meta_inode_buffer(ip, &bh);
592 	if (ret == 0) {
593 		gfs2_trans_add_meta(ip->i_gl, bh);
594 		gfs2_dinode_out(ip, bh->b_data);
595 		brelse(bh);
596 	}
597 
598 	if (need_endtrans)
599 		gfs2_trans_end(sdp);
600 out:
601 	if (need_unlock)
602 		gfs2_glock_dq_uninit(&gh);
603 }
604 
605 /**
606  * gfs2_make_fs_ro - Turn a Read-Write FS into a Read-Only one
607  * @sdp: the filesystem
608  *
609  * Returns: errno
610  */
611 
612 int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
613 {
614 	struct gfs2_holder freeze_gh;
615 	int error = 0;
616 	int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
617 
618 	gfs2_holder_mark_uninitialized(&freeze_gh);
619 	if (sdp->sd_freeze_gl &&
620 	    !gfs2_glock_is_locked_by_me(sdp->sd_freeze_gl)) {
621 		if (!log_write_allowed) {
622 			error = gfs2_glock_nq_init(sdp->sd_freeze_gl,
623 						   LM_ST_SHARED, LM_FLAG_TRY |
624 						   LM_FLAG_NOEXP | GL_EXACT,
625 						   &freeze_gh);
626 			if (error == GLR_TRYFAILED)
627 				error = 0;
628 		} else {
629 			error = gfs2_glock_nq_init(sdp->sd_freeze_gl,
630 						   LM_ST_SHARED,
631 						   LM_FLAG_NOEXP | GL_EXACT,
632 						   &freeze_gh);
633 			if (error && !gfs2_withdrawn(sdp))
634 				return error;
635 		}
636 	}
637 
638 	gfs2_flush_delete_work(sdp);
639 	if (!log_write_allowed && current == sdp->sd_quotad_process)
640 		fs_warn(sdp, "The quotad daemon is withdrawing.\n");
641 	else if (sdp->sd_quotad_process)
642 		kthread_stop(sdp->sd_quotad_process);
643 	sdp->sd_quotad_process = NULL;
644 
645 	if (!log_write_allowed && current == sdp->sd_logd_process)
646 		fs_warn(sdp, "The logd daemon is withdrawing.\n");
647 	else if (sdp->sd_logd_process)
648 		kthread_stop(sdp->sd_logd_process);
649 	sdp->sd_logd_process = NULL;
650 
651 	if (log_write_allowed) {
652 		gfs2_quota_sync(sdp->sd_vfs, 0);
653 		gfs2_statfs_sync(sdp->sd_vfs, 0);
654 
655 		gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
656 			       GFS2_LFC_MAKE_FS_RO);
657 		wait_event(sdp->sd_reserving_log_wait,
658 			   atomic_read(&sdp->sd_reserving_log) == 0);
659 		gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) ==
660 				 sdp->sd_jdesc->jd_blocks);
661 	} else {
662 		wait_event_timeout(sdp->sd_reserving_log_wait,
663 				   atomic_read(&sdp->sd_reserving_log) == 0,
664 				   HZ * 5);
665 	}
666 	if (gfs2_holder_initialized(&freeze_gh))
667 		gfs2_glock_dq_uninit(&freeze_gh);
668 
669 	gfs2_quota_cleanup(sdp);
670 
671 	if (!log_write_allowed)
672 		sdp->sd_vfs->s_flags |= SB_RDONLY;
673 
674 	return error;
675 }
676 
677 /**
678  * gfs2_put_super - Unmount the filesystem
679  * @sb: The VFS superblock
680  *
681  */
682 
683 static void gfs2_put_super(struct super_block *sb)
684 {
685 	struct gfs2_sbd *sdp = sb->s_fs_info;
686 	int error;
687 	struct gfs2_jdesc *jd;
688 
689 	/* No more recovery requests */
690 	set_bit(SDF_NORECOVERY, &sdp->sd_flags);
691 	smp_mb();
692 
693 	/* Wait on outstanding recovery */
694 restart:
695 	spin_lock(&sdp->sd_jindex_spin);
696 	list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
697 		if (!test_bit(JDF_RECOVERY, &jd->jd_flags))
698 			continue;
699 		spin_unlock(&sdp->sd_jindex_spin);
700 		wait_on_bit(&jd->jd_flags, JDF_RECOVERY,
701 			    TASK_UNINTERRUPTIBLE);
702 		goto restart;
703 	}
704 	spin_unlock(&sdp->sd_jindex_spin);
705 
706 	if (!sb_rdonly(sb)) {
707 		error = gfs2_make_fs_ro(sdp);
708 		if (error)
709 			gfs2_io_error(sdp);
710 	}
711 	WARN_ON(gfs2_withdrawing(sdp));
712 
713 	/*  At this point, we're through modifying the disk  */
714 
715 	/*  Release stuff  */
716 
717 	iput(sdp->sd_jindex);
718 	iput(sdp->sd_statfs_inode);
719 	iput(sdp->sd_rindex);
720 	iput(sdp->sd_quota_inode);
721 
722 	gfs2_glock_put(sdp->sd_rename_gl);
723 	gfs2_glock_put(sdp->sd_freeze_gl);
724 
725 	if (!sdp->sd_args.ar_spectator) {
726 		if (gfs2_holder_initialized(&sdp->sd_journal_gh))
727 			gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
728 		if (gfs2_holder_initialized(&sdp->sd_jinode_gh))
729 			gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
730 		gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
731 		gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
732 		free_local_statfs_inodes(sdp);
733 		iput(sdp->sd_qc_inode);
734 	}
735 
736 	gfs2_glock_dq_uninit(&sdp->sd_live_gh);
737 	gfs2_clear_rgrpd(sdp);
738 	gfs2_jindex_free(sdp);
739 	/*  Take apart glock structures and buffer lists  */
740 	gfs2_gl_hash_clear(sdp);
741 	truncate_inode_pages_final(&sdp->sd_aspace);
742 	gfs2_delete_debugfs_file(sdp);
743 	/*  Unmount the locking protocol  */
744 	gfs2_lm_unmount(sdp);
745 
746 	/*  At this point, we're through participating in the lockspace  */
747 	gfs2_sys_fs_del(sdp);
748 	free_sbd(sdp);
749 }
750 
751 /**
752  * gfs2_sync_fs - sync the filesystem
753  * @sb: the superblock
754  *
755  * Flushes the log to disk.
756  */
757 
758 static int gfs2_sync_fs(struct super_block *sb, int wait)
759 {
760 	struct gfs2_sbd *sdp = sb->s_fs_info;
761 
762 	gfs2_quota_sync(sb, -1);
763 	if (wait)
764 		gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
765 			       GFS2_LFC_SYNC_FS);
766 	return sdp->sd_log_error;
767 }
768 
769 void gfs2_freeze_func(struct work_struct *work)
770 {
771 	int error;
772 	struct gfs2_holder freeze_gh;
773 	struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_freeze_work);
774 	struct super_block *sb = sdp->sd_vfs;
775 
776 	atomic_inc(&sb->s_active);
777 	error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
778 				   LM_FLAG_NOEXP | GL_EXACT, &freeze_gh);
779 	if (error) {
780 		fs_info(sdp, "GFS2: couldn't get freeze lock : %d\n", error);
781 		gfs2_assert_withdraw(sdp, 0);
782 	} else {
783 		atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
784 		error = thaw_super(sb);
785 		if (error) {
786 			fs_info(sdp, "GFS2: couldn't thaw filesystem: %d\n",
787 				error);
788 			gfs2_assert_withdraw(sdp, 0);
789 		}
790 		gfs2_glock_dq_uninit(&freeze_gh);
791 	}
792 	deactivate_super(sb);
793 	clear_bit_unlock(SDF_FS_FROZEN, &sdp->sd_flags);
794 	wake_up_bit(&sdp->sd_flags, SDF_FS_FROZEN);
795 	return;
796 }
797 
798 /**
799  * gfs2_freeze - prevent further writes to the filesystem
800  * @sb: the VFS structure for the filesystem
801  *
802  */
803 
804 static int gfs2_freeze(struct super_block *sb)
805 {
806 	struct gfs2_sbd *sdp = sb->s_fs_info;
807 	int error = 0;
808 
809 	mutex_lock(&sdp->sd_freeze_mutex);
810 	if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN)
811 		goto out;
812 
813 	for (;;) {
814 		if (gfs2_withdrawn(sdp)) {
815 			error = -EINVAL;
816 			goto out;
817 		}
818 
819 		error = gfs2_lock_fs_check_clean(sdp);
820 		if (!error)
821 			break;
822 
823 		if (error == -EBUSY)
824 			fs_err(sdp, "waiting for recovery before freeze\n");
825 		else if (error == -EIO) {
826 			fs_err(sdp, "Fatal IO error: cannot freeze gfs2 due "
827 			       "to recovery error.\n");
828 			goto out;
829 		} else {
830 			fs_err(sdp, "error freezing FS: %d\n", error);
831 		}
832 		fs_err(sdp, "retrying...\n");
833 		msleep(1000);
834 	}
835 	set_bit(SDF_FS_FROZEN, &sdp->sd_flags);
836 out:
837 	mutex_unlock(&sdp->sd_freeze_mutex);
838 	return error;
839 }
840 
841 /**
842  * gfs2_unfreeze - reallow writes to the filesystem
843  * @sb: the VFS structure for the filesystem
844  *
845  */
846 
847 static int gfs2_unfreeze(struct super_block *sb)
848 {
849 	struct gfs2_sbd *sdp = sb->s_fs_info;
850 
851 	mutex_lock(&sdp->sd_freeze_mutex);
852         if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN ||
853 	    !gfs2_holder_initialized(&sdp->sd_freeze_gh)) {
854 		mutex_unlock(&sdp->sd_freeze_mutex);
855                 return 0;
856 	}
857 
858 	gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
859 	mutex_unlock(&sdp->sd_freeze_mutex);
860 	return wait_on_bit(&sdp->sd_flags, SDF_FS_FROZEN, TASK_INTERRUPTIBLE);
861 }
862 
863 /**
864  * statfs_fill - fill in the sg for a given RG
865  * @rgd: the RG
866  * @sc: the sc structure
867  *
868  * Returns: 0 on success, -ESTALE if the LVB is invalid
869  */
870 
871 static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
872 			    struct gfs2_statfs_change_host *sc)
873 {
874 	gfs2_rgrp_verify(rgd);
875 	sc->sc_total += rgd->rd_data;
876 	sc->sc_free += rgd->rd_free;
877 	sc->sc_dinodes += rgd->rd_dinodes;
878 	return 0;
879 }
880 
881 /**
882  * gfs2_statfs_slow - Stat a filesystem using asynchronous locking
883  * @sdp: the filesystem
884  * @sc: the sc info that will be returned
885  *
886  * Any error (other than a signal) will cause this routine to fall back
887  * to the synchronous version.
888  *
889  * FIXME: This really shouldn't busy wait like this.
890  *
891  * Returns: errno
892  */
893 
894 static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
895 {
896 	struct gfs2_rgrpd *rgd_next;
897 	struct gfs2_holder *gha, *gh;
898 	unsigned int slots = 64;
899 	unsigned int x;
900 	int done;
901 	int error = 0, err;
902 
903 	memset(sc, 0, sizeof(struct gfs2_statfs_change_host));
904 	gha = kmalloc_array(slots, sizeof(struct gfs2_holder), GFP_KERNEL);
905 	if (!gha)
906 		return -ENOMEM;
907 	for (x = 0; x < slots; x++)
908 		gfs2_holder_mark_uninitialized(gha + x);
909 
910 	rgd_next = gfs2_rgrpd_get_first(sdp);
911 
912 	for (;;) {
913 		done = 1;
914 
915 		for (x = 0; x < slots; x++) {
916 			gh = gha + x;
917 
918 			if (gfs2_holder_initialized(gh) && gfs2_glock_poll(gh)) {
919 				err = gfs2_glock_wait(gh);
920 				if (err) {
921 					gfs2_holder_uninit(gh);
922 					error = err;
923 				} else {
924 					if (!error) {
925 						struct gfs2_rgrpd *rgd =
926 							gfs2_glock2rgrp(gh->gh_gl);
927 
928 						error = statfs_slow_fill(rgd, sc);
929 					}
930 					gfs2_glock_dq_uninit(gh);
931 				}
932 			}
933 
934 			if (gfs2_holder_initialized(gh))
935 				done = 0;
936 			else if (rgd_next && !error) {
937 				error = gfs2_glock_nq_init(rgd_next->rd_gl,
938 							   LM_ST_SHARED,
939 							   GL_ASYNC,
940 							   gh);
941 				rgd_next = gfs2_rgrpd_get_next(rgd_next);
942 				done = 0;
943 			}
944 
945 			if (signal_pending(current))
946 				error = -ERESTARTSYS;
947 		}
948 
949 		if (done)
950 			break;
951 
952 		yield();
953 	}
954 
955 	kfree(gha);
956 	return error;
957 }
958 
959 /**
960  * gfs2_statfs_i - Do a statfs
961  * @sdp: the filesystem
962  * @sg: the sg structure
963  *
964  * Returns: errno
965  */
966 
967 static int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
968 {
969 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
970 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
971 
972 	spin_lock(&sdp->sd_statfs_spin);
973 
974 	*sc = *m_sc;
975 	sc->sc_total += l_sc->sc_total;
976 	sc->sc_free += l_sc->sc_free;
977 	sc->sc_dinodes += l_sc->sc_dinodes;
978 
979 	spin_unlock(&sdp->sd_statfs_spin);
980 
981 	if (sc->sc_free < 0)
982 		sc->sc_free = 0;
983 	if (sc->sc_free > sc->sc_total)
984 		sc->sc_free = sc->sc_total;
985 	if (sc->sc_dinodes < 0)
986 		sc->sc_dinodes = 0;
987 
988 	return 0;
989 }
990 
991 /**
992  * gfs2_statfs - Gather and return stats about the filesystem
993  * @sb: The superblock
994  * @statfsbuf: The buffer
995  *
996  * Returns: 0 on success or error code
997  */
998 
999 static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
1000 {
1001 	struct super_block *sb = dentry->d_sb;
1002 	struct gfs2_sbd *sdp = sb->s_fs_info;
1003 	struct gfs2_statfs_change_host sc;
1004 	int error;
1005 
1006 	error = gfs2_rindex_update(sdp);
1007 	if (error)
1008 		return error;
1009 
1010 	if (gfs2_tune_get(sdp, gt_statfs_slow))
1011 		error = gfs2_statfs_slow(sdp, &sc);
1012 	else
1013 		error = gfs2_statfs_i(sdp, &sc);
1014 
1015 	if (error)
1016 		return error;
1017 
1018 	buf->f_type = GFS2_MAGIC;
1019 	buf->f_bsize = sdp->sd_sb.sb_bsize;
1020 	buf->f_blocks = sc.sc_total;
1021 	buf->f_bfree = sc.sc_free;
1022 	buf->f_bavail = sc.sc_free;
1023 	buf->f_files = sc.sc_dinodes + sc.sc_free;
1024 	buf->f_ffree = sc.sc_free;
1025 	buf->f_namelen = GFS2_FNAMESIZE;
1026 
1027 	return 0;
1028 }
1029 
1030 /**
1031  * gfs2_drop_inode - Drop an inode (test for remote unlink)
1032  * @inode: The inode to drop
1033  *
1034  * If we've received a callback on an iopen lock then it's because a
1035  * remote node tried to deallocate the inode but failed due to this node
1036  * still having the inode open. Here we mark the link count zero
1037  * since we know that it must have reached zero if the GLF_DEMOTE flag
1038  * is set on the iopen glock. If we didn't do a disk read since the
1039  * remote node removed the final link then we might otherwise miss
1040  * this event. This check ensures that this node will deallocate the
1041  * inode's blocks, or alternatively pass the baton on to another
1042  * node for later deallocation.
1043  */
1044 
1045 static int gfs2_drop_inode(struct inode *inode)
1046 {
1047 	struct gfs2_inode *ip = GFS2_I(inode);
1048 
1049 	if (!test_bit(GIF_FREE_VFS_INODE, &ip->i_flags) &&
1050 	    inode->i_nlink &&
1051 	    gfs2_holder_initialized(&ip->i_iopen_gh)) {
1052 		struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1053 		if (test_bit(GLF_DEMOTE, &gl->gl_flags))
1054 			clear_nlink(inode);
1055 	}
1056 
1057 	/*
1058 	 * When under memory pressure when an inode's link count has dropped to
1059 	 * zero, defer deleting the inode to the delete workqueue.  This avoids
1060 	 * calling into DLM under memory pressure, which can deadlock.
1061 	 */
1062 	if (!inode->i_nlink &&
1063 	    unlikely(current->flags & PF_MEMALLOC) &&
1064 	    gfs2_holder_initialized(&ip->i_iopen_gh)) {
1065 		struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1066 
1067 		gfs2_glock_hold(gl);
1068 		if (!gfs2_queue_delete_work(gl, 0))
1069 			gfs2_glock_queue_put(gl);
1070 		return false;
1071 	}
1072 
1073 	return generic_drop_inode(inode);
1074 }
1075 
1076 static int is_ancestor(const struct dentry *d1, const struct dentry *d2)
1077 {
1078 	do {
1079 		if (d1 == d2)
1080 			return 1;
1081 		d1 = d1->d_parent;
1082 	} while (!IS_ROOT(d1));
1083 	return 0;
1084 }
1085 
1086 /**
1087  * gfs2_show_options - Show mount options for /proc/mounts
1088  * @s: seq_file structure
1089  * @root: root of this (sub)tree
1090  *
1091  * Returns: 0 on success or error code
1092  */
1093 
1094 static int gfs2_show_options(struct seq_file *s, struct dentry *root)
1095 {
1096 	struct gfs2_sbd *sdp = root->d_sb->s_fs_info;
1097 	struct gfs2_args *args = &sdp->sd_args;
1098 	int val;
1099 
1100 	if (is_ancestor(root, sdp->sd_master_dir))
1101 		seq_puts(s, ",meta");
1102 	if (args->ar_lockproto[0])
1103 		seq_show_option(s, "lockproto", args->ar_lockproto);
1104 	if (args->ar_locktable[0])
1105 		seq_show_option(s, "locktable", args->ar_locktable);
1106 	if (args->ar_hostdata[0])
1107 		seq_show_option(s, "hostdata", args->ar_hostdata);
1108 	if (args->ar_spectator)
1109 		seq_puts(s, ",spectator");
1110 	if (args->ar_localflocks)
1111 		seq_puts(s, ",localflocks");
1112 	if (args->ar_debug)
1113 		seq_puts(s, ",debug");
1114 	if (args->ar_posix_acl)
1115 		seq_puts(s, ",acl");
1116 	if (args->ar_quota != GFS2_QUOTA_DEFAULT) {
1117 		char *state;
1118 		switch (args->ar_quota) {
1119 		case GFS2_QUOTA_OFF:
1120 			state = "off";
1121 			break;
1122 		case GFS2_QUOTA_ACCOUNT:
1123 			state = "account";
1124 			break;
1125 		case GFS2_QUOTA_ON:
1126 			state = "on";
1127 			break;
1128 		default:
1129 			state = "unknown";
1130 			break;
1131 		}
1132 		seq_printf(s, ",quota=%s", state);
1133 	}
1134 	if (args->ar_suiddir)
1135 		seq_puts(s, ",suiddir");
1136 	if (args->ar_data != GFS2_DATA_DEFAULT) {
1137 		char *state;
1138 		switch (args->ar_data) {
1139 		case GFS2_DATA_WRITEBACK:
1140 			state = "writeback";
1141 			break;
1142 		case GFS2_DATA_ORDERED:
1143 			state = "ordered";
1144 			break;
1145 		default:
1146 			state = "unknown";
1147 			break;
1148 		}
1149 		seq_printf(s, ",data=%s", state);
1150 	}
1151 	if (args->ar_discard)
1152 		seq_puts(s, ",discard");
1153 	val = sdp->sd_tune.gt_logd_secs;
1154 	if (val != 30)
1155 		seq_printf(s, ",commit=%d", val);
1156 	val = sdp->sd_tune.gt_statfs_quantum;
1157 	if (val != 30)
1158 		seq_printf(s, ",statfs_quantum=%d", val);
1159 	else if (sdp->sd_tune.gt_statfs_slow)
1160 		seq_puts(s, ",statfs_quantum=0");
1161 	val = sdp->sd_tune.gt_quota_quantum;
1162 	if (val != 60)
1163 		seq_printf(s, ",quota_quantum=%d", val);
1164 	if (args->ar_statfs_percent)
1165 		seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent);
1166 	if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
1167 		const char *state;
1168 
1169 		switch (args->ar_errors) {
1170 		case GFS2_ERRORS_WITHDRAW:
1171 			state = "withdraw";
1172 			break;
1173 		case GFS2_ERRORS_PANIC:
1174 			state = "panic";
1175 			break;
1176 		default:
1177 			state = "unknown";
1178 			break;
1179 		}
1180 		seq_printf(s, ",errors=%s", state);
1181 	}
1182 	if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
1183 		seq_puts(s, ",nobarrier");
1184 	if (test_bit(SDF_DEMOTE, &sdp->sd_flags))
1185 		seq_puts(s, ",demote_interface_used");
1186 	if (args->ar_rgrplvb)
1187 		seq_puts(s, ",rgrplvb");
1188 	if (args->ar_loccookie)
1189 		seq_puts(s, ",loccookie");
1190 	return 0;
1191 }
1192 
1193 static void gfs2_final_release_pages(struct gfs2_inode *ip)
1194 {
1195 	struct inode *inode = &ip->i_inode;
1196 	struct gfs2_glock *gl = ip->i_gl;
1197 
1198 	truncate_inode_pages(gfs2_glock2aspace(ip->i_gl), 0);
1199 	truncate_inode_pages(&inode->i_data, 0);
1200 
1201 	if (atomic_read(&gl->gl_revokes) == 0) {
1202 		clear_bit(GLF_LFLUSH, &gl->gl_flags);
1203 		clear_bit(GLF_DIRTY, &gl->gl_flags);
1204 	}
1205 }
1206 
1207 static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
1208 {
1209 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1210 	struct gfs2_rgrpd *rgd;
1211 	struct gfs2_holder gh;
1212 	int error;
1213 
1214 	if (gfs2_get_inode_blocks(&ip->i_inode) != 1) {
1215 		gfs2_consist_inode(ip);
1216 		return -EIO;
1217 	}
1218 
1219 	error = gfs2_rindex_update(sdp);
1220 	if (error)
1221 		return error;
1222 
1223 	error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1224 	if (error)
1225 		return error;
1226 
1227 	rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1);
1228 	if (!rgd) {
1229 		gfs2_consist_inode(ip);
1230 		error = -EIO;
1231 		goto out_qs;
1232 	}
1233 
1234 	error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
1235 	if (error)
1236 		goto out_qs;
1237 
1238 	error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA,
1239 				 sdp->sd_jdesc->jd_blocks);
1240 	if (error)
1241 		goto out_rg_gunlock;
1242 
1243 	gfs2_free_di(rgd, ip);
1244 
1245 	gfs2_final_release_pages(ip);
1246 
1247 	gfs2_trans_end(sdp);
1248 
1249 out_rg_gunlock:
1250 	gfs2_glock_dq_uninit(&gh);
1251 out_qs:
1252 	gfs2_quota_unhold(ip);
1253 	return error;
1254 }
1255 
1256 /**
1257  * gfs2_glock_put_eventually
1258  * @gl:	The glock to put
1259  *
1260  * When under memory pressure, trigger a deferred glock put to make sure we
1261  * won't call into DLM and deadlock.  Otherwise, put the glock directly.
1262  */
1263 
1264 static void gfs2_glock_put_eventually(struct gfs2_glock *gl)
1265 {
1266 	if (current->flags & PF_MEMALLOC)
1267 		gfs2_glock_queue_put(gl);
1268 	else
1269 		gfs2_glock_put(gl);
1270 }
1271 
1272 static bool gfs2_upgrade_iopen_glock(struct inode *inode)
1273 {
1274 	struct gfs2_inode *ip = GFS2_I(inode);
1275 	struct gfs2_sbd *sdp = GFS2_SB(inode);
1276 	struct gfs2_holder *gh = &ip->i_iopen_gh;
1277 	long timeout = 5 * HZ;
1278 	int error;
1279 
1280 	gh->gh_flags |= GL_NOCACHE;
1281 	gfs2_glock_dq_wait(gh);
1282 
1283 	/*
1284 	 * If there are no other lock holders, we'll get the lock immediately.
1285 	 * Otherwise, the other nodes holding the lock will be notified about
1286 	 * our locking request.  If they don't have the inode open, they'll
1287 	 * evict the cached inode and release the lock.  Otherwise, if they
1288 	 * poke the inode glock, we'll take this as an indication that they
1289 	 * still need the iopen glock and that they'll take care of deleting
1290 	 * the inode when they're done.  As a last resort, if another node
1291 	 * keeps holding the iopen glock without showing any activity on the
1292 	 * inode glock, we'll eventually time out.
1293 	 *
1294 	 * Note that we're passing the LM_FLAG_TRY_1CB flag to the first
1295 	 * locking request as an optimization to notify lock holders as soon as
1296 	 * possible.  Without that flag, they'd be notified implicitly by the
1297 	 * second locking request.
1298 	 */
1299 
1300 	gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, gh);
1301 	error = gfs2_glock_nq(gh);
1302 	if (error != GLR_TRYFAILED)
1303 		return !error;
1304 
1305 	gfs2_holder_reinit(LM_ST_EXCLUSIVE, GL_ASYNC | GL_NOCACHE, gh);
1306 	error = gfs2_glock_nq(gh);
1307 	if (error)
1308 		return false;
1309 
1310 	timeout = wait_event_interruptible_timeout(sdp->sd_async_glock_wait,
1311 		!test_bit(HIF_WAIT, &gh->gh_iflags) ||
1312 		test_bit(GLF_DEMOTE, &ip->i_gl->gl_flags),
1313 		timeout);
1314 	if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) {
1315 		gfs2_glock_dq(gh);
1316 		return false;
1317 	}
1318 	return true;
1319 }
1320 
1321 /**
1322  * evict_should_delete - determine whether the inode is eligible for deletion
1323  * @inode: The inode to evict
1324  *
1325  * This function determines whether the evicted inode is eligible to be deleted
1326  * and locks the inode glock.
1327  *
1328  * Returns: the fate of the dinode
1329  */
1330 static enum dinode_demise evict_should_delete(struct inode *inode,
1331 					      struct gfs2_holder *gh)
1332 {
1333 	struct gfs2_inode *ip = GFS2_I(inode);
1334 	struct super_block *sb = inode->i_sb;
1335 	struct gfs2_sbd *sdp = sb->s_fs_info;
1336 	int ret;
1337 
1338 	if (test_bit(GIF_ALLOC_FAILED, &ip->i_flags)) {
1339 		BUG_ON(!gfs2_glock_is_locked_by_me(ip->i_gl));
1340 		goto should_delete;
1341 	}
1342 
1343 	if (test_bit(GIF_DEFERRED_DELETE, &ip->i_flags))
1344 		return SHOULD_DEFER_EVICTION;
1345 
1346 	/* Deletes should never happen under memory pressure anymore.  */
1347 	if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
1348 		return SHOULD_DEFER_EVICTION;
1349 
1350 	/* Must not read inode block until block type has been verified */
1351 	ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, gh);
1352 	if (unlikely(ret)) {
1353 		glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
1354 		ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1355 		gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1356 		return SHOULD_DEFER_EVICTION;
1357 	}
1358 
1359 	if (gfs2_inode_already_deleted(ip->i_gl, ip->i_no_formal_ino))
1360 		return SHOULD_NOT_DELETE_DINODE;
1361 	ret = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED);
1362 	if (ret)
1363 		return SHOULD_NOT_DELETE_DINODE;
1364 
1365 	if (test_bit(GIF_INVALID, &ip->i_flags)) {
1366 		ret = gfs2_inode_refresh(ip);
1367 		if (ret)
1368 			return SHOULD_NOT_DELETE_DINODE;
1369 	}
1370 
1371 	/*
1372 	 * The inode may have been recreated in the meantime.
1373 	 */
1374 	if (inode->i_nlink)
1375 		return SHOULD_NOT_DELETE_DINODE;
1376 
1377 should_delete:
1378 	if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
1379 	    test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
1380 		if (!gfs2_upgrade_iopen_glock(inode)) {
1381 			gfs2_holder_uninit(&ip->i_iopen_gh);
1382 			return SHOULD_NOT_DELETE_DINODE;
1383 		}
1384 	}
1385 	return SHOULD_DELETE_DINODE;
1386 }
1387 
1388 /**
1389  * evict_unlinked_inode - delete the pieces of an unlinked evicted inode
1390  * @inode: The inode to evict
1391  */
1392 static int evict_unlinked_inode(struct inode *inode)
1393 {
1394 	struct gfs2_inode *ip = GFS2_I(inode);
1395 	int ret;
1396 
1397 	if (S_ISDIR(inode->i_mode) &&
1398 	    (ip->i_diskflags & GFS2_DIF_EXHASH)) {
1399 		ret = gfs2_dir_exhash_dealloc(ip);
1400 		if (ret)
1401 			goto out;
1402 	}
1403 
1404 	if (ip->i_eattr) {
1405 		ret = gfs2_ea_dealloc(ip);
1406 		if (ret)
1407 			goto out;
1408 	}
1409 
1410 	if (!gfs2_is_stuffed(ip)) {
1411 		ret = gfs2_file_dealloc(ip);
1412 		if (ret)
1413 			goto out;
1414 	}
1415 
1416 	/* We're about to clear the bitmap for the dinode, but as soon as we
1417 	   do, gfs2_create_inode can create another inode at the same block
1418 	   location and try to set gl_object again. We clear gl_object here so
1419 	   that subsequent inode creates don't see an old gl_object. */
1420 	glock_clear_object(ip->i_gl, ip);
1421 	ret = gfs2_dinode_dealloc(ip);
1422 	gfs2_inode_remember_delete(ip->i_gl, ip->i_no_formal_ino);
1423 out:
1424 	return ret;
1425 }
1426 
1427 /*
1428  * evict_linked_inode - evict an inode whose dinode has not been unlinked
1429  * @inode: The inode to evict
1430  */
1431 static int evict_linked_inode(struct inode *inode)
1432 {
1433 	struct super_block *sb = inode->i_sb;
1434 	struct gfs2_sbd *sdp = sb->s_fs_info;
1435 	struct gfs2_inode *ip = GFS2_I(inode);
1436 	struct address_space *metamapping;
1437 	int ret;
1438 
1439 	gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
1440 		       GFS2_LFC_EVICT_INODE);
1441 	metamapping = gfs2_glock2aspace(ip->i_gl);
1442 	if (test_bit(GLF_DIRTY, &ip->i_gl->gl_flags)) {
1443 		filemap_fdatawrite(metamapping);
1444 		filemap_fdatawait(metamapping);
1445 	}
1446 	write_inode_now(inode, 1);
1447 	gfs2_ail_flush(ip->i_gl, 0);
1448 
1449 	ret = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
1450 	if (ret)
1451 		return ret;
1452 
1453 	/* Needs to be done before glock release & also in a transaction */
1454 	truncate_inode_pages(&inode->i_data, 0);
1455 	truncate_inode_pages(metamapping, 0);
1456 	gfs2_trans_end(sdp);
1457 	return 0;
1458 }
1459 
1460 /**
1461  * gfs2_evict_inode - Remove an inode from cache
1462  * @inode: The inode to evict
1463  *
1464  * There are three cases to consider:
1465  * 1. i_nlink == 0, we are final opener (and must deallocate)
1466  * 2. i_nlink == 0, we are not the final opener (and cannot deallocate)
1467  * 3. i_nlink > 0
1468  *
1469  * If the fs is read only, then we have to treat all cases as per #3
1470  * since we are unable to do any deallocation. The inode will be
1471  * deallocated by the next read/write node to attempt an allocation
1472  * in the same resource group
1473  *
1474  * We have to (at the moment) hold the inodes main lock to cover
1475  * the gap between unlocking the shared lock on the iopen lock and
1476  * taking the exclusive lock. I'd rather do a shared -> exclusive
1477  * conversion on the iopen lock, but we can change that later. This
1478  * is safe, just less efficient.
1479  */
1480 
1481 static void gfs2_evict_inode(struct inode *inode)
1482 {
1483 	struct super_block *sb = inode->i_sb;
1484 	struct gfs2_sbd *sdp = sb->s_fs_info;
1485 	struct gfs2_inode *ip = GFS2_I(inode);
1486 	struct gfs2_holder gh;
1487 	int ret;
1488 
1489 	if (test_bit(GIF_FREE_VFS_INODE, &ip->i_flags)) {
1490 		clear_inode(inode);
1491 		return;
1492 	}
1493 
1494 	if (inode->i_nlink || sb_rdonly(sb))
1495 		goto out;
1496 
1497 	gfs2_holder_mark_uninitialized(&gh);
1498 	ret = evict_should_delete(inode, &gh);
1499 	if (ret == SHOULD_DEFER_EVICTION)
1500 		goto out;
1501 	if (ret == SHOULD_DELETE_DINODE)
1502 		ret = evict_unlinked_inode(inode);
1503 	else
1504 		ret = evict_linked_inode(inode);
1505 
1506 	if (gfs2_rs_active(&ip->i_res))
1507 		gfs2_rs_deltree(&ip->i_res);
1508 
1509 	if (gfs2_holder_initialized(&gh)) {
1510 		glock_clear_object(ip->i_gl, ip);
1511 		gfs2_glock_dq_uninit(&gh);
1512 	}
1513 	if (ret && ret != GLR_TRYFAILED && ret != -EROFS)
1514 		fs_warn(sdp, "gfs2_evict_inode: %d\n", ret);
1515 out:
1516 	truncate_inode_pages_final(&inode->i_data);
1517 	if (ip->i_qadata)
1518 		gfs2_assert_warn(sdp, ip->i_qadata->qa_ref == 0);
1519 	gfs2_rs_delete(ip, NULL);
1520 	gfs2_ordered_del_inode(ip);
1521 	clear_inode(inode);
1522 	gfs2_dir_hash_inval(ip);
1523 	if (ip->i_gl) {
1524 		glock_clear_object(ip->i_gl, ip);
1525 		wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
1526 		gfs2_glock_add_to_lru(ip->i_gl);
1527 		gfs2_glock_put_eventually(ip->i_gl);
1528 		ip->i_gl = NULL;
1529 	}
1530 	if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
1531 		struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1532 
1533 		glock_clear_object(gl, ip);
1534 		if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
1535 			ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1536 			gfs2_glock_dq(&ip->i_iopen_gh);
1537 		}
1538 		gfs2_glock_hold(gl);
1539 		gfs2_holder_uninit(&ip->i_iopen_gh);
1540 		gfs2_glock_put_eventually(gl);
1541 	}
1542 }
1543 
1544 static struct inode *gfs2_alloc_inode(struct super_block *sb)
1545 {
1546 	struct gfs2_inode *ip;
1547 
1548 	ip = kmem_cache_alloc(gfs2_inode_cachep, GFP_KERNEL);
1549 	if (!ip)
1550 		return NULL;
1551 	ip->i_flags = 0;
1552 	ip->i_gl = NULL;
1553 	gfs2_holder_mark_uninitialized(&ip->i_iopen_gh);
1554 	memset(&ip->i_res, 0, sizeof(ip->i_res));
1555 	RB_CLEAR_NODE(&ip->i_res.rs_node);
1556 	ip->i_rahead = 0;
1557 	return &ip->i_inode;
1558 }
1559 
1560 static void gfs2_free_inode(struct inode *inode)
1561 {
1562 	kmem_cache_free(gfs2_inode_cachep, GFS2_I(inode));
1563 }
1564 
1565 extern void free_local_statfs_inodes(struct gfs2_sbd *sdp)
1566 {
1567 	struct local_statfs_inode *lsi, *safe;
1568 
1569 	/* Run through the statfs inodes list to iput and free memory */
1570 	list_for_each_entry_safe(lsi, safe, &sdp->sd_sc_inodes_list, si_list) {
1571 		if (lsi->si_jid == sdp->sd_jdesc->jd_jid)
1572 			sdp->sd_sc_inode = NULL; /* belongs to this node */
1573 		if (lsi->si_sc_inode)
1574 			iput(lsi->si_sc_inode);
1575 		list_del(&lsi->si_list);
1576 		kfree(lsi);
1577 	}
1578 }
1579 
1580 extern struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp,
1581 					     unsigned int index)
1582 {
1583 	struct local_statfs_inode *lsi;
1584 
1585 	/* Return the local (per node) statfs inode in the
1586 	 * sdp->sd_sc_inodes_list corresponding to the 'index'. */
1587 	list_for_each_entry(lsi, &sdp->sd_sc_inodes_list, si_list) {
1588 		if (lsi->si_jid == index)
1589 			return lsi->si_sc_inode;
1590 	}
1591 	return NULL;
1592 }
1593 
1594 const struct super_operations gfs2_super_ops = {
1595 	.alloc_inode		= gfs2_alloc_inode,
1596 	.free_inode		= gfs2_free_inode,
1597 	.write_inode		= gfs2_write_inode,
1598 	.dirty_inode		= gfs2_dirty_inode,
1599 	.evict_inode		= gfs2_evict_inode,
1600 	.put_super		= gfs2_put_super,
1601 	.sync_fs		= gfs2_sync_fs,
1602 	.freeze_super		= gfs2_freeze,
1603 	.thaw_super		= gfs2_unfreeze,
1604 	.statfs			= gfs2_statfs,
1605 	.drop_inode		= gfs2_drop_inode,
1606 	.show_options		= gfs2_show_options,
1607 };
1608 
1609