xref: /openbmc/linux/fs/gfs2/super.c (revision 55fd7e02)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
5  */
6 
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 
9 #include <linux/bio.h>
10 #include <linux/sched/signal.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/statfs.h>
16 #include <linux/seq_file.h>
17 #include <linux/mount.h>
18 #include <linux/kthread.h>
19 #include <linux/delay.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/crc32.h>
22 #include <linux/time.h>
23 #include <linux/wait.h>
24 #include <linux/writeback.h>
25 #include <linux/backing-dev.h>
26 #include <linux/kernel.h>
27 
28 #include "gfs2.h"
29 #include "incore.h"
30 #include "bmap.h"
31 #include "dir.h"
32 #include "glock.h"
33 #include "glops.h"
34 #include "inode.h"
35 #include "log.h"
36 #include "meta_io.h"
37 #include "quota.h"
38 #include "recovery.h"
39 #include "rgrp.h"
40 #include "super.h"
41 #include "trans.h"
42 #include "util.h"
43 #include "sys.h"
44 #include "xattr.h"
45 #include "lops.h"
46 
47 /**
48  * gfs2_jindex_free - Clear all the journal index information
49  * @sdp: The GFS2 superblock
50  *
51  */
52 
53 void gfs2_jindex_free(struct gfs2_sbd *sdp)
54 {
55 	struct list_head list;
56 	struct gfs2_jdesc *jd;
57 
58 	spin_lock(&sdp->sd_jindex_spin);
59 	list_add(&list, &sdp->sd_jindex_list);
60 	list_del_init(&sdp->sd_jindex_list);
61 	sdp->sd_journals = 0;
62 	spin_unlock(&sdp->sd_jindex_spin);
63 
64 	sdp->sd_jdesc = NULL;
65 	while (!list_empty(&list)) {
66 		jd = list_first_entry(&list, struct gfs2_jdesc, jd_list);
67 		gfs2_free_journal_extents(jd);
68 		list_del(&jd->jd_list);
69 		iput(jd->jd_inode);
70 		jd->jd_inode = NULL;
71 		kfree(jd);
72 	}
73 }
74 
75 static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid)
76 {
77 	struct gfs2_jdesc *jd;
78 	int found = 0;
79 
80 	list_for_each_entry(jd, head, jd_list) {
81 		if (jd->jd_jid == jid) {
82 			found = 1;
83 			break;
84 		}
85 	}
86 
87 	if (!found)
88 		jd = NULL;
89 
90 	return jd;
91 }
92 
93 struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid)
94 {
95 	struct gfs2_jdesc *jd;
96 
97 	spin_lock(&sdp->sd_jindex_spin);
98 	jd = jdesc_find_i(&sdp->sd_jindex_list, jid);
99 	spin_unlock(&sdp->sd_jindex_spin);
100 
101 	return jd;
102 }
103 
104 int gfs2_jdesc_check(struct gfs2_jdesc *jd)
105 {
106 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
107 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
108 	u64 size = i_size_read(jd->jd_inode);
109 
110 	if (gfs2_check_internal_file_size(jd->jd_inode, 8 << 20, BIT(30)))
111 		return -EIO;
112 
113 	jd->jd_blocks = size >> sdp->sd_sb.sb_bsize_shift;
114 
115 	if (gfs2_write_alloc_required(ip, 0, size)) {
116 		gfs2_consist_inode(ip);
117 		return -EIO;
118 	}
119 
120 	return 0;
121 }
122 
123 static int init_threads(struct gfs2_sbd *sdp)
124 {
125 	struct task_struct *p;
126 	int error = 0;
127 
128 	p = kthread_run(gfs2_logd, sdp, "gfs2_logd");
129 	if (IS_ERR(p)) {
130 		error = PTR_ERR(p);
131 		fs_err(sdp, "can't start logd thread: %d\n", error);
132 		return error;
133 	}
134 	sdp->sd_logd_process = p;
135 
136 	p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad");
137 	if (IS_ERR(p)) {
138 		error = PTR_ERR(p);
139 		fs_err(sdp, "can't start quotad thread: %d\n", error);
140 		goto fail;
141 	}
142 	sdp->sd_quotad_process = p;
143 	return 0;
144 
145 fail:
146 	kthread_stop(sdp->sd_logd_process);
147 	sdp->sd_logd_process = NULL;
148 	return error;
149 }
150 
151 /**
152  * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
153  * @sdp: the filesystem
154  *
155  * Returns: errno
156  */
157 
158 int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
159 {
160 	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
161 	struct gfs2_glock *j_gl = ip->i_gl;
162 	struct gfs2_holder freeze_gh;
163 	struct gfs2_log_header_host head;
164 	int error;
165 
166 	error = init_threads(sdp);
167 	if (error)
168 		return error;
169 
170 	error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 0,
171 				   &freeze_gh);
172 	if (error)
173 		goto fail_threads;
174 
175 	j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
176 	if (gfs2_withdrawn(sdp)) {
177 		error = -EIO;
178 		goto fail;
179 	}
180 
181 	error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
182 	if (error || gfs2_withdrawn(sdp))
183 		goto fail;
184 
185 	if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
186 		gfs2_consist(sdp);
187 		error = -EIO;
188 		goto fail;
189 	}
190 
191 	/*  Initialize some head of the log stuff  */
192 	sdp->sd_log_sequence = head.lh_sequence + 1;
193 	gfs2_log_pointers_init(sdp, head.lh_blkno);
194 
195 	error = gfs2_quota_init(sdp);
196 	if (error || gfs2_withdrawn(sdp))
197 		goto fail;
198 
199 	set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
200 
201 	gfs2_glock_dq_uninit(&freeze_gh);
202 
203 	return 0;
204 
205 fail:
206 	freeze_gh.gh_flags |= GL_NOCACHE;
207 	gfs2_glock_dq_uninit(&freeze_gh);
208 fail_threads:
209 	if (sdp->sd_quotad_process)
210 		kthread_stop(sdp->sd_quotad_process);
211 	sdp->sd_quotad_process = NULL;
212 	if (sdp->sd_logd_process)
213 		kthread_stop(sdp->sd_logd_process);
214 	sdp->sd_logd_process = NULL;
215 	return error;
216 }
217 
218 void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf)
219 {
220 	const struct gfs2_statfs_change *str = buf;
221 
222 	sc->sc_total = be64_to_cpu(str->sc_total);
223 	sc->sc_free = be64_to_cpu(str->sc_free);
224 	sc->sc_dinodes = be64_to_cpu(str->sc_dinodes);
225 }
226 
227 static void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf)
228 {
229 	struct gfs2_statfs_change *str = buf;
230 
231 	str->sc_total = cpu_to_be64(sc->sc_total);
232 	str->sc_free = cpu_to_be64(sc->sc_free);
233 	str->sc_dinodes = cpu_to_be64(sc->sc_dinodes);
234 }
235 
236 int gfs2_statfs_init(struct gfs2_sbd *sdp)
237 {
238 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
239 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
240 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
241 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
242 	struct buffer_head *m_bh, *l_bh;
243 	struct gfs2_holder gh;
244 	int error;
245 
246 	error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
247 				   &gh);
248 	if (error)
249 		return error;
250 
251 	error = gfs2_meta_inode_buffer(m_ip, &m_bh);
252 	if (error)
253 		goto out;
254 
255 	if (sdp->sd_args.ar_spectator) {
256 		spin_lock(&sdp->sd_statfs_spin);
257 		gfs2_statfs_change_in(m_sc, m_bh->b_data +
258 				      sizeof(struct gfs2_dinode));
259 		spin_unlock(&sdp->sd_statfs_spin);
260 	} else {
261 		error = gfs2_meta_inode_buffer(l_ip, &l_bh);
262 		if (error)
263 			goto out_m_bh;
264 
265 		spin_lock(&sdp->sd_statfs_spin);
266 		gfs2_statfs_change_in(m_sc, m_bh->b_data +
267 				      sizeof(struct gfs2_dinode));
268 		gfs2_statfs_change_in(l_sc, l_bh->b_data +
269 				      sizeof(struct gfs2_dinode));
270 		spin_unlock(&sdp->sd_statfs_spin);
271 
272 		brelse(l_bh);
273 	}
274 
275 out_m_bh:
276 	brelse(m_bh);
277 out:
278 	gfs2_glock_dq_uninit(&gh);
279 	return 0;
280 }
281 
282 void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
283 			s64 dinodes)
284 {
285 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
286 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
287 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
288 	struct buffer_head *l_bh;
289 	s64 x, y;
290 	int need_sync = 0;
291 	int error;
292 
293 	error = gfs2_meta_inode_buffer(l_ip, &l_bh);
294 	if (error)
295 		return;
296 
297 	gfs2_trans_add_meta(l_ip->i_gl, l_bh);
298 
299 	spin_lock(&sdp->sd_statfs_spin);
300 	l_sc->sc_total += total;
301 	l_sc->sc_free += free;
302 	l_sc->sc_dinodes += dinodes;
303 	gfs2_statfs_change_out(l_sc, l_bh->b_data + sizeof(struct gfs2_dinode));
304 	if (sdp->sd_args.ar_statfs_percent) {
305 		x = 100 * l_sc->sc_free;
306 		y = m_sc->sc_free * sdp->sd_args.ar_statfs_percent;
307 		if (x >= y || x <= -y)
308 			need_sync = 1;
309 	}
310 	spin_unlock(&sdp->sd_statfs_spin);
311 
312 	brelse(l_bh);
313 	if (need_sync)
314 		gfs2_wake_up_statfs(sdp);
315 }
316 
317 void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
318 		   struct buffer_head *l_bh)
319 {
320 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
321 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
322 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
323 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
324 
325 	gfs2_trans_add_meta(l_ip->i_gl, l_bh);
326 	gfs2_trans_add_meta(m_ip->i_gl, m_bh);
327 
328 	spin_lock(&sdp->sd_statfs_spin);
329 	m_sc->sc_total += l_sc->sc_total;
330 	m_sc->sc_free += l_sc->sc_free;
331 	m_sc->sc_dinodes += l_sc->sc_dinodes;
332 	memset(l_sc, 0, sizeof(struct gfs2_statfs_change));
333 	memset(l_bh->b_data + sizeof(struct gfs2_dinode),
334 	       0, sizeof(struct gfs2_statfs_change));
335 	gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
336 	spin_unlock(&sdp->sd_statfs_spin);
337 }
338 
339 int gfs2_statfs_sync(struct super_block *sb, int type)
340 {
341 	struct gfs2_sbd *sdp = sb->s_fs_info;
342 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
343 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
344 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
345 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
346 	struct gfs2_holder gh;
347 	struct buffer_head *m_bh, *l_bh;
348 	int error;
349 
350 	sb_start_write(sb);
351 	error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
352 				   &gh);
353 	if (error)
354 		goto out;
355 
356 	error = gfs2_meta_inode_buffer(m_ip, &m_bh);
357 	if (error)
358 		goto out_unlock;
359 
360 	spin_lock(&sdp->sd_statfs_spin);
361 	gfs2_statfs_change_in(m_sc, m_bh->b_data +
362 			      sizeof(struct gfs2_dinode));
363 	if (!l_sc->sc_total && !l_sc->sc_free && !l_sc->sc_dinodes) {
364 		spin_unlock(&sdp->sd_statfs_spin);
365 		goto out_bh;
366 	}
367 	spin_unlock(&sdp->sd_statfs_spin);
368 
369 	error = gfs2_meta_inode_buffer(l_ip, &l_bh);
370 	if (error)
371 		goto out_bh;
372 
373 	error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
374 	if (error)
375 		goto out_bh2;
376 
377 	update_statfs(sdp, m_bh, l_bh);
378 	sdp->sd_statfs_force_sync = 0;
379 
380 	gfs2_trans_end(sdp);
381 
382 out_bh2:
383 	brelse(l_bh);
384 out_bh:
385 	brelse(m_bh);
386 out_unlock:
387 	gfs2_glock_dq_uninit(&gh);
388 out:
389 	sb_end_write(sb);
390 	return error;
391 }
392 
393 struct lfcc {
394 	struct list_head list;
395 	struct gfs2_holder gh;
396 };
397 
398 /**
399  * gfs2_lock_fs_check_clean - Stop all writes to the FS and check that all
400  *                            journals are clean
401  * @sdp: the file system
402  * @state: the state to put the transaction lock into
403  * @t_gh: the hold on the transaction lock
404  *
405  * Returns: errno
406  */
407 
408 static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
409 {
410 	struct gfs2_inode *ip;
411 	struct gfs2_jdesc *jd;
412 	struct lfcc *lfcc;
413 	LIST_HEAD(list);
414 	struct gfs2_log_header_host lh;
415 	int error;
416 
417 	list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
418 		lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL);
419 		if (!lfcc) {
420 			error = -ENOMEM;
421 			goto out;
422 		}
423 		ip = GFS2_I(jd->jd_inode);
424 		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &lfcc->gh);
425 		if (error) {
426 			kfree(lfcc);
427 			goto out;
428 		}
429 		list_add(&lfcc->list, &list);
430 	}
431 
432 	error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE,
433 				   GL_NOCACHE, &sdp->sd_freeze_gh);
434 	if (error)
435 		goto out;
436 
437 	list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
438 		error = gfs2_jdesc_check(jd);
439 		if (error)
440 			break;
441 		error = gfs2_find_jhead(jd, &lh, false);
442 		if (error)
443 			break;
444 		if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
445 			error = -EBUSY;
446 			break;
447 		}
448 	}
449 
450 	if (error)
451 		gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
452 
453 out:
454 	while (!list_empty(&list)) {
455 		lfcc = list_first_entry(&list, struct lfcc, list);
456 		list_del(&lfcc->list);
457 		gfs2_glock_dq_uninit(&lfcc->gh);
458 		kfree(lfcc);
459 	}
460 	return error;
461 }
462 
463 void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
464 {
465 	struct gfs2_dinode *str = buf;
466 
467 	str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
468 	str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
469 	str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
470 	str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
471 	str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
472 	str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
473 	str->di_uid = cpu_to_be32(i_uid_read(&ip->i_inode));
474 	str->di_gid = cpu_to_be32(i_gid_read(&ip->i_inode));
475 	str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
476 	str->di_size = cpu_to_be64(i_size_read(&ip->i_inode));
477 	str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
478 	str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
479 	str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec);
480 	str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec);
481 
482 	str->di_goal_meta = cpu_to_be64(ip->i_goal);
483 	str->di_goal_data = cpu_to_be64(ip->i_goal);
484 	str->di_generation = cpu_to_be64(ip->i_generation);
485 
486 	str->di_flags = cpu_to_be32(ip->i_diskflags);
487 	str->di_height = cpu_to_be16(ip->i_height);
488 	str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) &&
489 					     !(ip->i_diskflags & GFS2_DIF_EXHASH) ?
490 					     GFS2_FORMAT_DE : 0);
491 	str->di_depth = cpu_to_be16(ip->i_depth);
492 	str->di_entries = cpu_to_be32(ip->i_entries);
493 
494 	str->di_eattr = cpu_to_be64(ip->i_eattr);
495 	str->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
496 	str->di_mtime_nsec = cpu_to_be32(ip->i_inode.i_mtime.tv_nsec);
497 	str->di_ctime_nsec = cpu_to_be32(ip->i_inode.i_ctime.tv_nsec);
498 }
499 
500 /**
501  * gfs2_write_inode - Make sure the inode is stable on the disk
502  * @inode: The inode
503  * @wbc: The writeback control structure
504  *
505  * Returns: errno
506  */
507 
508 static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
509 {
510 	struct gfs2_inode *ip = GFS2_I(inode);
511 	struct gfs2_sbd *sdp = GFS2_SB(inode);
512 	struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl);
513 	struct backing_dev_info *bdi = inode_to_bdi(metamapping->host);
514 	int ret = 0;
515 	bool flush_all = (wbc->sync_mode == WB_SYNC_ALL || gfs2_is_jdata(ip));
516 
517 	if (flush_all)
518 		gfs2_log_flush(GFS2_SB(inode), ip->i_gl,
519 			       GFS2_LOG_HEAD_FLUSH_NORMAL |
520 			       GFS2_LFC_WRITE_INODE);
521 	if (bdi->wb.dirty_exceeded)
522 		gfs2_ail1_flush(sdp, wbc);
523 	else
524 		filemap_fdatawrite(metamapping);
525 	if (flush_all)
526 		ret = filemap_fdatawait(metamapping);
527 	if (ret)
528 		mark_inode_dirty_sync(inode);
529 	else {
530 		spin_lock(&inode->i_lock);
531 		if (!(inode->i_flags & I_DIRTY))
532 			gfs2_ordered_del_inode(ip);
533 		spin_unlock(&inode->i_lock);
534 	}
535 	return ret;
536 }
537 
538 /**
539  * gfs2_dirty_inode - check for atime updates
540  * @inode: The inode in question
541  * @flags: The type of dirty
542  *
543  * Unfortunately it can be called under any combination of inode
544  * glock and transaction lock, so we have to check carefully.
545  *
546  * At the moment this deals only with atime - it should be possible
547  * to expand that role in future, once a review of the locking has
548  * been carried out.
549  */
550 
551 static void gfs2_dirty_inode(struct inode *inode, int flags)
552 {
553 	struct gfs2_inode *ip = GFS2_I(inode);
554 	struct gfs2_sbd *sdp = GFS2_SB(inode);
555 	struct buffer_head *bh;
556 	struct gfs2_holder gh;
557 	int need_unlock = 0;
558 	int need_endtrans = 0;
559 	int ret;
560 
561 	if (!(flags & I_DIRTY_INODE))
562 		return;
563 	if (unlikely(gfs2_withdrawn(sdp)))
564 		return;
565 	if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
566 		ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
567 		if (ret) {
568 			fs_err(sdp, "dirty_inode: glock %d\n", ret);
569 			return;
570 		}
571 		need_unlock = 1;
572 	} else if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE))
573 		return;
574 
575 	if (current->journal_info == NULL) {
576 		ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
577 		if (ret) {
578 			fs_err(sdp, "dirty_inode: gfs2_trans_begin %d\n", ret);
579 			goto out;
580 		}
581 		need_endtrans = 1;
582 	}
583 
584 	ret = gfs2_meta_inode_buffer(ip, &bh);
585 	if (ret == 0) {
586 		gfs2_trans_add_meta(ip->i_gl, bh);
587 		gfs2_dinode_out(ip, bh->b_data);
588 		brelse(bh);
589 	}
590 
591 	if (need_endtrans)
592 		gfs2_trans_end(sdp);
593 out:
594 	if (need_unlock)
595 		gfs2_glock_dq_uninit(&gh);
596 }
597 
598 /**
599  * gfs2_make_fs_ro - Turn a Read-Write FS into a Read-Only one
600  * @sdp: the filesystem
601  *
602  * Returns: errno
603  */
604 
605 int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
606 {
607 	struct gfs2_holder freeze_gh;
608 	int error = 0;
609 	int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
610 
611 	gfs2_holder_mark_uninitialized(&freeze_gh);
612 	if (sdp->sd_freeze_gl &&
613 	    !gfs2_glock_is_locked_by_me(sdp->sd_freeze_gl)) {
614 		if (!log_write_allowed) {
615 			error = gfs2_glock_nq_init(sdp->sd_freeze_gl,
616 						   LM_ST_SHARED, GL_NOCACHE |
617 						   LM_FLAG_TRY, &freeze_gh);
618 			if (error == GLR_TRYFAILED)
619 				error = 0;
620 		} else {
621 			error = gfs2_glock_nq_init(sdp->sd_freeze_gl,
622 						   LM_ST_SHARED, GL_NOCACHE,
623 						   &freeze_gh);
624 			if (error && !gfs2_withdrawn(sdp))
625 				return error;
626 		}
627 	}
628 
629 	gfs2_flush_delete_work(sdp);
630 	if (!log_write_allowed && current == sdp->sd_quotad_process)
631 		fs_warn(sdp, "The quotad daemon is withdrawing.\n");
632 	else if (sdp->sd_quotad_process)
633 		kthread_stop(sdp->sd_quotad_process);
634 	sdp->sd_quotad_process = NULL;
635 
636 	if (!log_write_allowed && current == sdp->sd_logd_process)
637 		fs_warn(sdp, "The logd daemon is withdrawing.\n");
638 	else if (sdp->sd_logd_process)
639 		kthread_stop(sdp->sd_logd_process);
640 	sdp->sd_logd_process = NULL;
641 
642 	if (log_write_allowed) {
643 		gfs2_quota_sync(sdp->sd_vfs, 0);
644 		gfs2_statfs_sync(sdp->sd_vfs, 0);
645 
646 		gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
647 			       GFS2_LFC_MAKE_FS_RO);
648 		wait_event(sdp->sd_reserving_log_wait,
649 			   atomic_read(&sdp->sd_reserving_log) == 0);
650 		gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) ==
651 				 sdp->sd_jdesc->jd_blocks);
652 	} else {
653 		wait_event_timeout(sdp->sd_reserving_log_wait,
654 				   atomic_read(&sdp->sd_reserving_log) == 0,
655 				   HZ * 5);
656 	}
657 	if (gfs2_holder_initialized(&freeze_gh))
658 		gfs2_glock_dq_uninit(&freeze_gh);
659 
660 	gfs2_quota_cleanup(sdp);
661 
662 	if (!log_write_allowed)
663 		sdp->sd_vfs->s_flags |= SB_RDONLY;
664 
665 	return error;
666 }
667 
668 /**
669  * gfs2_put_super - Unmount the filesystem
670  * @sb: The VFS superblock
671  *
672  */
673 
674 static void gfs2_put_super(struct super_block *sb)
675 {
676 	struct gfs2_sbd *sdp = sb->s_fs_info;
677 	int error;
678 	struct gfs2_jdesc *jd;
679 
680 	/* No more recovery requests */
681 	set_bit(SDF_NORECOVERY, &sdp->sd_flags);
682 	smp_mb();
683 
684 	/* Wait on outstanding recovery */
685 restart:
686 	spin_lock(&sdp->sd_jindex_spin);
687 	list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
688 		if (!test_bit(JDF_RECOVERY, &jd->jd_flags))
689 			continue;
690 		spin_unlock(&sdp->sd_jindex_spin);
691 		wait_on_bit(&jd->jd_flags, JDF_RECOVERY,
692 			    TASK_UNINTERRUPTIBLE);
693 		goto restart;
694 	}
695 	spin_unlock(&sdp->sd_jindex_spin);
696 
697 	if (!sb_rdonly(sb)) {
698 		error = gfs2_make_fs_ro(sdp);
699 		if (error)
700 			gfs2_io_error(sdp);
701 	}
702 	/*  At this point, we're through modifying the disk  */
703 
704 	/*  Release stuff  */
705 
706 	iput(sdp->sd_jindex);
707 	iput(sdp->sd_statfs_inode);
708 	iput(sdp->sd_rindex);
709 	iput(sdp->sd_quota_inode);
710 
711 	gfs2_glock_put(sdp->sd_rename_gl);
712 	gfs2_glock_put(sdp->sd_freeze_gl);
713 
714 	if (!sdp->sd_args.ar_spectator) {
715 		if (gfs2_holder_initialized(&sdp->sd_journal_gh))
716 			gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
717 		if (gfs2_holder_initialized(&sdp->sd_jinode_gh))
718 			gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
719 		gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
720 		gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
721 		iput(sdp->sd_sc_inode);
722 		iput(sdp->sd_qc_inode);
723 	}
724 
725 	gfs2_glock_dq_uninit(&sdp->sd_live_gh);
726 	gfs2_clear_rgrpd(sdp);
727 	gfs2_jindex_free(sdp);
728 	/*  Take apart glock structures and buffer lists  */
729 	gfs2_gl_hash_clear(sdp);
730 	gfs2_delete_debugfs_file(sdp);
731 	/*  Unmount the locking protocol  */
732 	gfs2_lm_unmount(sdp);
733 
734 	/*  At this point, we're through participating in the lockspace  */
735 	gfs2_sys_fs_del(sdp);
736 }
737 
738 /**
739  * gfs2_sync_fs - sync the filesystem
740  * @sb: the superblock
741  *
742  * Flushes the log to disk.
743  */
744 
745 static int gfs2_sync_fs(struct super_block *sb, int wait)
746 {
747 	struct gfs2_sbd *sdp = sb->s_fs_info;
748 
749 	gfs2_quota_sync(sb, -1);
750 	if (wait)
751 		gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
752 			       GFS2_LFC_SYNC_FS);
753 	return sdp->sd_log_error;
754 }
755 
756 void gfs2_freeze_func(struct work_struct *work)
757 {
758 	int error;
759 	struct gfs2_holder freeze_gh;
760 	struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_freeze_work);
761 	struct super_block *sb = sdp->sd_vfs;
762 
763 	atomic_inc(&sb->s_active);
764 	error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 0,
765 				   &freeze_gh);
766 	if (error) {
767 		fs_info(sdp, "GFS2: couldn't get freeze lock : %d\n", error);
768 		gfs2_assert_withdraw(sdp, 0);
769 	} else {
770 		atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
771 		error = thaw_super(sb);
772 		if (error) {
773 			fs_info(sdp, "GFS2: couldn't thaw filesystem: %d\n",
774 				error);
775 			gfs2_assert_withdraw(sdp, 0);
776 		}
777 		if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
778 			freeze_gh.gh_flags |= GL_NOCACHE;
779 		gfs2_glock_dq_uninit(&freeze_gh);
780 	}
781 	deactivate_super(sb);
782 	clear_bit_unlock(SDF_FS_FROZEN, &sdp->sd_flags);
783 	wake_up_bit(&sdp->sd_flags, SDF_FS_FROZEN);
784 	return;
785 }
786 
787 /**
788  * gfs2_freeze - prevent further writes to the filesystem
789  * @sb: the VFS structure for the filesystem
790  *
791  */
792 
793 static int gfs2_freeze(struct super_block *sb)
794 {
795 	struct gfs2_sbd *sdp = sb->s_fs_info;
796 	int error = 0;
797 
798 	mutex_lock(&sdp->sd_freeze_mutex);
799 	if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN)
800 		goto out;
801 
802 	for (;;) {
803 		if (gfs2_withdrawn(sdp)) {
804 			error = -EINVAL;
805 			goto out;
806 		}
807 
808 		error = gfs2_lock_fs_check_clean(sdp);
809 		if (!error)
810 			break;
811 
812 		if (error == -EBUSY)
813 			fs_err(sdp, "waiting for recovery before freeze\n");
814 		else if (error == -EIO) {
815 			fs_err(sdp, "Fatal IO error: cannot freeze gfs2 due "
816 			       "to recovery error.\n");
817 			goto out;
818 		} else {
819 			fs_err(sdp, "error freezing FS: %d\n", error);
820 		}
821 		fs_err(sdp, "retrying...\n");
822 		msleep(1000);
823 	}
824 	set_bit(SDF_FS_FROZEN, &sdp->sd_flags);
825 out:
826 	mutex_unlock(&sdp->sd_freeze_mutex);
827 	return error;
828 }
829 
830 /**
831  * gfs2_unfreeze - reallow writes to the filesystem
832  * @sb: the VFS structure for the filesystem
833  *
834  */
835 
836 static int gfs2_unfreeze(struct super_block *sb)
837 {
838 	struct gfs2_sbd *sdp = sb->s_fs_info;
839 
840 	mutex_lock(&sdp->sd_freeze_mutex);
841         if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN ||
842 	    !gfs2_holder_initialized(&sdp->sd_freeze_gh)) {
843 		mutex_unlock(&sdp->sd_freeze_mutex);
844                 return 0;
845 	}
846 
847 	gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
848 	mutex_unlock(&sdp->sd_freeze_mutex);
849 	return wait_on_bit(&sdp->sd_flags, SDF_FS_FROZEN, TASK_INTERRUPTIBLE);
850 }
851 
852 /**
853  * statfs_fill - fill in the sg for a given RG
854  * @rgd: the RG
855  * @sc: the sc structure
856  *
857  * Returns: 0 on success, -ESTALE if the LVB is invalid
858  */
859 
860 static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
861 			    struct gfs2_statfs_change_host *sc)
862 {
863 	gfs2_rgrp_verify(rgd);
864 	sc->sc_total += rgd->rd_data;
865 	sc->sc_free += rgd->rd_free;
866 	sc->sc_dinodes += rgd->rd_dinodes;
867 	return 0;
868 }
869 
870 /**
871  * gfs2_statfs_slow - Stat a filesystem using asynchronous locking
872  * @sdp: the filesystem
873  * @sc: the sc info that will be returned
874  *
875  * Any error (other than a signal) will cause this routine to fall back
876  * to the synchronous version.
877  *
878  * FIXME: This really shouldn't busy wait like this.
879  *
880  * Returns: errno
881  */
882 
883 static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
884 {
885 	struct gfs2_rgrpd *rgd_next;
886 	struct gfs2_holder *gha, *gh;
887 	unsigned int slots = 64;
888 	unsigned int x;
889 	int done;
890 	int error = 0, err;
891 
892 	memset(sc, 0, sizeof(struct gfs2_statfs_change_host));
893 	gha = kmalloc_array(slots, sizeof(struct gfs2_holder), GFP_KERNEL);
894 	if (!gha)
895 		return -ENOMEM;
896 	for (x = 0; x < slots; x++)
897 		gfs2_holder_mark_uninitialized(gha + x);
898 
899 	rgd_next = gfs2_rgrpd_get_first(sdp);
900 
901 	for (;;) {
902 		done = 1;
903 
904 		for (x = 0; x < slots; x++) {
905 			gh = gha + x;
906 
907 			if (gfs2_holder_initialized(gh) && gfs2_glock_poll(gh)) {
908 				err = gfs2_glock_wait(gh);
909 				if (err) {
910 					gfs2_holder_uninit(gh);
911 					error = err;
912 				} else {
913 					if (!error) {
914 						struct gfs2_rgrpd *rgd =
915 							gfs2_glock2rgrp(gh->gh_gl);
916 
917 						error = statfs_slow_fill(rgd, sc);
918 					}
919 					gfs2_glock_dq_uninit(gh);
920 				}
921 			}
922 
923 			if (gfs2_holder_initialized(gh))
924 				done = 0;
925 			else if (rgd_next && !error) {
926 				error = gfs2_glock_nq_init(rgd_next->rd_gl,
927 							   LM_ST_SHARED,
928 							   GL_ASYNC,
929 							   gh);
930 				rgd_next = gfs2_rgrpd_get_next(rgd_next);
931 				done = 0;
932 			}
933 
934 			if (signal_pending(current))
935 				error = -ERESTARTSYS;
936 		}
937 
938 		if (done)
939 			break;
940 
941 		yield();
942 	}
943 
944 	kfree(gha);
945 	return error;
946 }
947 
948 /**
949  * gfs2_statfs_i - Do a statfs
950  * @sdp: the filesystem
951  * @sg: the sg structure
952  *
953  * Returns: errno
954  */
955 
956 static int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
957 {
958 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
959 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
960 
961 	spin_lock(&sdp->sd_statfs_spin);
962 
963 	*sc = *m_sc;
964 	sc->sc_total += l_sc->sc_total;
965 	sc->sc_free += l_sc->sc_free;
966 	sc->sc_dinodes += l_sc->sc_dinodes;
967 
968 	spin_unlock(&sdp->sd_statfs_spin);
969 
970 	if (sc->sc_free < 0)
971 		sc->sc_free = 0;
972 	if (sc->sc_free > sc->sc_total)
973 		sc->sc_free = sc->sc_total;
974 	if (sc->sc_dinodes < 0)
975 		sc->sc_dinodes = 0;
976 
977 	return 0;
978 }
979 
980 /**
981  * gfs2_statfs - Gather and return stats about the filesystem
982  * @sb: The superblock
983  * @statfsbuf: The buffer
984  *
985  * Returns: 0 on success or error code
986  */
987 
988 static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
989 {
990 	struct super_block *sb = dentry->d_sb;
991 	struct gfs2_sbd *sdp = sb->s_fs_info;
992 	struct gfs2_statfs_change_host sc;
993 	int error;
994 
995 	error = gfs2_rindex_update(sdp);
996 	if (error)
997 		return error;
998 
999 	if (gfs2_tune_get(sdp, gt_statfs_slow))
1000 		error = gfs2_statfs_slow(sdp, &sc);
1001 	else
1002 		error = gfs2_statfs_i(sdp, &sc);
1003 
1004 	if (error)
1005 		return error;
1006 
1007 	buf->f_type = GFS2_MAGIC;
1008 	buf->f_bsize = sdp->sd_sb.sb_bsize;
1009 	buf->f_blocks = sc.sc_total;
1010 	buf->f_bfree = sc.sc_free;
1011 	buf->f_bavail = sc.sc_free;
1012 	buf->f_files = sc.sc_dinodes + sc.sc_free;
1013 	buf->f_ffree = sc.sc_free;
1014 	buf->f_namelen = GFS2_FNAMESIZE;
1015 
1016 	return 0;
1017 }
1018 
1019 /**
1020  * gfs2_drop_inode - Drop an inode (test for remote unlink)
1021  * @inode: The inode to drop
1022  *
1023  * If we've received a callback on an iopen lock then it's because a
1024  * remote node tried to deallocate the inode but failed due to this node
1025  * still having the inode open. Here we mark the link count zero
1026  * since we know that it must have reached zero if the GLF_DEMOTE flag
1027  * is set on the iopen glock. If we didn't do a disk read since the
1028  * remote node removed the final link then we might otherwise miss
1029  * this event. This check ensures that this node will deallocate the
1030  * inode's blocks, or alternatively pass the baton on to another
1031  * node for later deallocation.
1032  */
1033 
1034 static int gfs2_drop_inode(struct inode *inode)
1035 {
1036 	struct gfs2_inode *ip = GFS2_I(inode);
1037 
1038 	if (!test_bit(GIF_FREE_VFS_INODE, &ip->i_flags) &&
1039 	    inode->i_nlink &&
1040 	    gfs2_holder_initialized(&ip->i_iopen_gh)) {
1041 		struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1042 		if (test_bit(GLF_DEMOTE, &gl->gl_flags))
1043 			clear_nlink(inode);
1044 	}
1045 
1046 	/*
1047 	 * When under memory pressure when an inode's link count has dropped to
1048 	 * zero, defer deleting the inode to the delete workqueue.  This avoids
1049 	 * calling into DLM under memory pressure, which can deadlock.
1050 	 */
1051 	if (!inode->i_nlink &&
1052 	    unlikely(current->flags & PF_MEMALLOC) &&
1053 	    gfs2_holder_initialized(&ip->i_iopen_gh)) {
1054 		struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1055 
1056 		gfs2_glock_hold(gl);
1057 		if (!gfs2_queue_delete_work(gl, 0))
1058 			gfs2_glock_queue_put(gl);
1059 		return false;
1060 	}
1061 
1062 	return generic_drop_inode(inode);
1063 }
1064 
1065 static int is_ancestor(const struct dentry *d1, const struct dentry *d2)
1066 {
1067 	do {
1068 		if (d1 == d2)
1069 			return 1;
1070 		d1 = d1->d_parent;
1071 	} while (!IS_ROOT(d1));
1072 	return 0;
1073 }
1074 
1075 /**
1076  * gfs2_show_options - Show mount options for /proc/mounts
1077  * @s: seq_file structure
1078  * @root: root of this (sub)tree
1079  *
1080  * Returns: 0 on success or error code
1081  */
1082 
1083 static int gfs2_show_options(struct seq_file *s, struct dentry *root)
1084 {
1085 	struct gfs2_sbd *sdp = root->d_sb->s_fs_info;
1086 	struct gfs2_args *args = &sdp->sd_args;
1087 	int val;
1088 
1089 	if (is_ancestor(root, sdp->sd_master_dir))
1090 		seq_puts(s, ",meta");
1091 	if (args->ar_lockproto[0])
1092 		seq_show_option(s, "lockproto", args->ar_lockproto);
1093 	if (args->ar_locktable[0])
1094 		seq_show_option(s, "locktable", args->ar_locktable);
1095 	if (args->ar_hostdata[0])
1096 		seq_show_option(s, "hostdata", args->ar_hostdata);
1097 	if (args->ar_spectator)
1098 		seq_puts(s, ",spectator");
1099 	if (args->ar_localflocks)
1100 		seq_puts(s, ",localflocks");
1101 	if (args->ar_debug)
1102 		seq_puts(s, ",debug");
1103 	if (args->ar_posix_acl)
1104 		seq_puts(s, ",acl");
1105 	if (args->ar_quota != GFS2_QUOTA_DEFAULT) {
1106 		char *state;
1107 		switch (args->ar_quota) {
1108 		case GFS2_QUOTA_OFF:
1109 			state = "off";
1110 			break;
1111 		case GFS2_QUOTA_ACCOUNT:
1112 			state = "account";
1113 			break;
1114 		case GFS2_QUOTA_ON:
1115 			state = "on";
1116 			break;
1117 		default:
1118 			state = "unknown";
1119 			break;
1120 		}
1121 		seq_printf(s, ",quota=%s", state);
1122 	}
1123 	if (args->ar_suiddir)
1124 		seq_puts(s, ",suiddir");
1125 	if (args->ar_data != GFS2_DATA_DEFAULT) {
1126 		char *state;
1127 		switch (args->ar_data) {
1128 		case GFS2_DATA_WRITEBACK:
1129 			state = "writeback";
1130 			break;
1131 		case GFS2_DATA_ORDERED:
1132 			state = "ordered";
1133 			break;
1134 		default:
1135 			state = "unknown";
1136 			break;
1137 		}
1138 		seq_printf(s, ",data=%s", state);
1139 	}
1140 	if (args->ar_discard)
1141 		seq_puts(s, ",discard");
1142 	val = sdp->sd_tune.gt_logd_secs;
1143 	if (val != 30)
1144 		seq_printf(s, ",commit=%d", val);
1145 	val = sdp->sd_tune.gt_statfs_quantum;
1146 	if (val != 30)
1147 		seq_printf(s, ",statfs_quantum=%d", val);
1148 	else if (sdp->sd_tune.gt_statfs_slow)
1149 		seq_puts(s, ",statfs_quantum=0");
1150 	val = sdp->sd_tune.gt_quota_quantum;
1151 	if (val != 60)
1152 		seq_printf(s, ",quota_quantum=%d", val);
1153 	if (args->ar_statfs_percent)
1154 		seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent);
1155 	if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
1156 		const char *state;
1157 
1158 		switch (args->ar_errors) {
1159 		case GFS2_ERRORS_WITHDRAW:
1160 			state = "withdraw";
1161 			break;
1162 		case GFS2_ERRORS_PANIC:
1163 			state = "panic";
1164 			break;
1165 		default:
1166 			state = "unknown";
1167 			break;
1168 		}
1169 		seq_printf(s, ",errors=%s", state);
1170 	}
1171 	if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
1172 		seq_puts(s, ",nobarrier");
1173 	if (test_bit(SDF_DEMOTE, &sdp->sd_flags))
1174 		seq_puts(s, ",demote_interface_used");
1175 	if (args->ar_rgrplvb)
1176 		seq_puts(s, ",rgrplvb");
1177 	if (args->ar_loccookie)
1178 		seq_puts(s, ",loccookie");
1179 	return 0;
1180 }
1181 
1182 static void gfs2_final_release_pages(struct gfs2_inode *ip)
1183 {
1184 	struct inode *inode = &ip->i_inode;
1185 	struct gfs2_glock *gl = ip->i_gl;
1186 
1187 	truncate_inode_pages(gfs2_glock2aspace(ip->i_gl), 0);
1188 	truncate_inode_pages(&inode->i_data, 0);
1189 
1190 	if (atomic_read(&gl->gl_revokes) == 0) {
1191 		clear_bit(GLF_LFLUSH, &gl->gl_flags);
1192 		clear_bit(GLF_DIRTY, &gl->gl_flags);
1193 	}
1194 }
1195 
1196 static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
1197 {
1198 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1199 	struct gfs2_rgrpd *rgd;
1200 	struct gfs2_holder gh;
1201 	int error;
1202 
1203 	if (gfs2_get_inode_blocks(&ip->i_inode) != 1) {
1204 		gfs2_consist_inode(ip);
1205 		return -EIO;
1206 	}
1207 
1208 	error = gfs2_rindex_update(sdp);
1209 	if (error)
1210 		return error;
1211 
1212 	error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1213 	if (error)
1214 		return error;
1215 
1216 	rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1);
1217 	if (!rgd) {
1218 		gfs2_consist_inode(ip);
1219 		error = -EIO;
1220 		goto out_qs;
1221 	}
1222 
1223 	error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
1224 	if (error)
1225 		goto out_qs;
1226 
1227 	error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA,
1228 				 sdp->sd_jdesc->jd_blocks);
1229 	if (error)
1230 		goto out_rg_gunlock;
1231 
1232 	gfs2_free_di(rgd, ip);
1233 
1234 	gfs2_final_release_pages(ip);
1235 
1236 	gfs2_trans_end(sdp);
1237 
1238 out_rg_gunlock:
1239 	gfs2_glock_dq_uninit(&gh);
1240 out_qs:
1241 	gfs2_quota_unhold(ip);
1242 	return error;
1243 }
1244 
1245 /**
1246  * gfs2_glock_put_eventually
1247  * @gl:	The glock to put
1248  *
1249  * When under memory pressure, trigger a deferred glock put to make sure we
1250  * won't call into DLM and deadlock.  Otherwise, put the glock directly.
1251  */
1252 
1253 static void gfs2_glock_put_eventually(struct gfs2_glock *gl)
1254 {
1255 	if (current->flags & PF_MEMALLOC)
1256 		gfs2_glock_queue_put(gl);
1257 	else
1258 		gfs2_glock_put(gl);
1259 }
1260 
1261 static bool gfs2_upgrade_iopen_glock(struct inode *inode)
1262 {
1263 	struct gfs2_inode *ip = GFS2_I(inode);
1264 	struct gfs2_sbd *sdp = GFS2_SB(inode);
1265 	struct gfs2_holder *gh = &ip->i_iopen_gh;
1266 	long timeout = 5 * HZ;
1267 	int error;
1268 
1269 	gh->gh_flags |= GL_NOCACHE;
1270 	gfs2_glock_dq_wait(gh);
1271 
1272 	/*
1273 	 * If there are no other lock holders, we'll get the lock immediately.
1274 	 * Otherwise, the other nodes holding the lock will be notified about
1275 	 * our locking request.  If they don't have the inode open, they'll
1276 	 * evict the cached inode and release the lock.  Otherwise, if they
1277 	 * poke the inode glock, we'll take this as an indication that they
1278 	 * still need the iopen glock and that they'll take care of deleting
1279 	 * the inode when they're done.  As a last resort, if another node
1280 	 * keeps holding the iopen glock without showing any activity on the
1281 	 * inode glock, we'll eventually time out.
1282 	 *
1283 	 * Note that we're passing the LM_FLAG_TRY_1CB flag to the first
1284 	 * locking request as an optimization to notify lock holders as soon as
1285 	 * possible.  Without that flag, they'd be notified implicitly by the
1286 	 * second locking request.
1287 	 */
1288 
1289 	gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, gh);
1290 	error = gfs2_glock_nq(gh);
1291 	if (error != GLR_TRYFAILED)
1292 		return !error;
1293 
1294 	gfs2_holder_reinit(LM_ST_EXCLUSIVE, GL_ASYNC | GL_NOCACHE, gh);
1295 	error = gfs2_glock_nq(gh);
1296 	if (error)
1297 		return false;
1298 
1299 	timeout = wait_event_interruptible_timeout(sdp->sd_async_glock_wait,
1300 		!test_bit(HIF_WAIT, &gh->gh_iflags) ||
1301 		test_bit(GLF_DEMOTE, &ip->i_gl->gl_flags),
1302 		timeout);
1303 	if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) {
1304 		gfs2_glock_dq(gh);
1305 		return false;
1306 	}
1307 	return true;
1308 }
1309 
1310 /**
1311  * gfs2_evict_inode - Remove an inode from cache
1312  * @inode: The inode to evict
1313  *
1314  * There are three cases to consider:
1315  * 1. i_nlink == 0, we are final opener (and must deallocate)
1316  * 2. i_nlink == 0, we are not the final opener (and cannot deallocate)
1317  * 3. i_nlink > 0
1318  *
1319  * If the fs is read only, then we have to treat all cases as per #3
1320  * since we are unable to do any deallocation. The inode will be
1321  * deallocated by the next read/write node to attempt an allocation
1322  * in the same resource group
1323  *
1324  * We have to (at the moment) hold the inodes main lock to cover
1325  * the gap between unlocking the shared lock on the iopen lock and
1326  * taking the exclusive lock. I'd rather do a shared -> exclusive
1327  * conversion on the iopen lock, but we can change that later. This
1328  * is safe, just less efficient.
1329  */
1330 
1331 static void gfs2_evict_inode(struct inode *inode)
1332 {
1333 	struct super_block *sb = inode->i_sb;
1334 	struct gfs2_sbd *sdp = sb->s_fs_info;
1335 	struct gfs2_inode *ip = GFS2_I(inode);
1336 	struct gfs2_holder gh;
1337 	struct address_space *metamapping;
1338 	int error;
1339 
1340 	if (test_bit(GIF_FREE_VFS_INODE, &ip->i_flags)) {
1341 		clear_inode(inode);
1342 		return;
1343 	}
1344 
1345 	if (inode->i_nlink || sb_rdonly(sb))
1346 		goto out;
1347 
1348 	if (test_bit(GIF_ALLOC_FAILED, &ip->i_flags)) {
1349 		BUG_ON(!gfs2_glock_is_locked_by_me(ip->i_gl));
1350 		gfs2_holder_mark_uninitialized(&gh);
1351 		goto out_delete;
1352 	}
1353 
1354 	if (test_bit(GIF_DEFERRED_DELETE, &ip->i_flags))
1355 		goto out;
1356 
1357 	/* Deletes should never happen under memory pressure anymore.  */
1358 	if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
1359 		goto out;
1360 
1361 	/* Must not read inode block until block type has been verified */
1362 	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, &gh);
1363 	if (unlikely(error)) {
1364 		glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
1365 		ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1366 		gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1367 		goto out;
1368 	}
1369 
1370 	if (gfs2_inode_already_deleted(ip->i_gl, ip->i_no_formal_ino))
1371 		goto out_truncate;
1372 	error = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED);
1373 	if (error)
1374 		goto out_truncate;
1375 
1376 	if (test_bit(GIF_INVALID, &ip->i_flags)) {
1377 		error = gfs2_inode_refresh(ip);
1378 		if (error)
1379 			goto out_truncate;
1380 	}
1381 
1382 	/*
1383 	 * The inode may have been recreated in the meantime.
1384 	 */
1385 	if (inode->i_nlink)
1386 		goto out_truncate;
1387 
1388 out_delete:
1389 	if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
1390 	    test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
1391 		if (!gfs2_upgrade_iopen_glock(inode)) {
1392 			gfs2_holder_uninit(&ip->i_iopen_gh);
1393 			goto out_truncate;
1394 		}
1395 	}
1396 
1397 	if (S_ISDIR(inode->i_mode) &&
1398 	    (ip->i_diskflags & GFS2_DIF_EXHASH)) {
1399 		error = gfs2_dir_exhash_dealloc(ip);
1400 		if (error)
1401 			goto out_unlock;
1402 	}
1403 
1404 	if (ip->i_eattr) {
1405 		error = gfs2_ea_dealloc(ip);
1406 		if (error)
1407 			goto out_unlock;
1408 	}
1409 
1410 	if (!gfs2_is_stuffed(ip)) {
1411 		error = gfs2_file_dealloc(ip);
1412 		if (error)
1413 			goto out_unlock;
1414 	}
1415 
1416 	/* We're about to clear the bitmap for the dinode, but as soon as we
1417 	   do, gfs2_create_inode can create another inode at the same block
1418 	   location and try to set gl_object again. We clear gl_object here so
1419 	   that subsequent inode creates don't see an old gl_object. */
1420 	glock_clear_object(ip->i_gl, ip);
1421 	error = gfs2_dinode_dealloc(ip);
1422 	gfs2_inode_remember_delete(ip->i_gl, ip->i_no_formal_ino);
1423 	goto out_unlock;
1424 
1425 out_truncate:
1426 	gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
1427 		       GFS2_LFC_EVICT_INODE);
1428 	metamapping = gfs2_glock2aspace(ip->i_gl);
1429 	if (test_bit(GLF_DIRTY, &ip->i_gl->gl_flags)) {
1430 		filemap_fdatawrite(metamapping);
1431 		filemap_fdatawait(metamapping);
1432 	}
1433 	write_inode_now(inode, 1);
1434 	gfs2_ail_flush(ip->i_gl, 0);
1435 
1436 	error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
1437 	if (error)
1438 		goto out_unlock;
1439 	/* Needs to be done before glock release & also in a transaction */
1440 	truncate_inode_pages(&inode->i_data, 0);
1441 	truncate_inode_pages(metamapping, 0);
1442 	gfs2_trans_end(sdp);
1443 
1444 out_unlock:
1445 	if (gfs2_rs_active(&ip->i_res))
1446 		gfs2_rs_deltree(&ip->i_res);
1447 
1448 	if (gfs2_holder_initialized(&gh)) {
1449 		glock_clear_object(ip->i_gl, ip);
1450 		gfs2_glock_dq_uninit(&gh);
1451 	}
1452 	if (error && error != GLR_TRYFAILED && error != -EROFS)
1453 		fs_warn(sdp, "gfs2_evict_inode: %d\n", error);
1454 out:
1455 	truncate_inode_pages_final(&inode->i_data);
1456 	if (ip->i_qadata)
1457 		gfs2_assert_warn(sdp, ip->i_qadata->qa_ref == 0);
1458 	gfs2_rs_delete(ip, NULL);
1459 	gfs2_ordered_del_inode(ip);
1460 	clear_inode(inode);
1461 	gfs2_dir_hash_inval(ip);
1462 	if (ip->i_gl) {
1463 		glock_clear_object(ip->i_gl, ip);
1464 		wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
1465 		gfs2_glock_add_to_lru(ip->i_gl);
1466 		gfs2_glock_put_eventually(ip->i_gl);
1467 		ip->i_gl = NULL;
1468 	}
1469 	if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
1470 		struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1471 
1472 		glock_clear_object(gl, ip);
1473 		if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
1474 			ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1475 			gfs2_glock_dq(&ip->i_iopen_gh);
1476 		}
1477 		gfs2_glock_hold(gl);
1478 		gfs2_holder_uninit(&ip->i_iopen_gh);
1479 		gfs2_glock_put_eventually(gl);
1480 	}
1481 }
1482 
1483 static struct inode *gfs2_alloc_inode(struct super_block *sb)
1484 {
1485 	struct gfs2_inode *ip;
1486 
1487 	ip = kmem_cache_alloc(gfs2_inode_cachep, GFP_KERNEL);
1488 	if (!ip)
1489 		return NULL;
1490 	ip->i_flags = 0;
1491 	ip->i_gl = NULL;
1492 	gfs2_holder_mark_uninitialized(&ip->i_iopen_gh);
1493 	memset(&ip->i_res, 0, sizeof(ip->i_res));
1494 	RB_CLEAR_NODE(&ip->i_res.rs_node);
1495 	ip->i_rahead = 0;
1496 	return &ip->i_inode;
1497 }
1498 
1499 static void gfs2_free_inode(struct inode *inode)
1500 {
1501 	kmem_cache_free(gfs2_inode_cachep, GFS2_I(inode));
1502 }
1503 
1504 const struct super_operations gfs2_super_ops = {
1505 	.alloc_inode		= gfs2_alloc_inode,
1506 	.free_inode		= gfs2_free_inode,
1507 	.write_inode		= gfs2_write_inode,
1508 	.dirty_inode		= gfs2_dirty_inode,
1509 	.evict_inode		= gfs2_evict_inode,
1510 	.put_super		= gfs2_put_super,
1511 	.sync_fs		= gfs2_sync_fs,
1512 	.freeze_super		= gfs2_freeze,
1513 	.thaw_super		= gfs2_unfreeze,
1514 	.statfs			= gfs2_statfs,
1515 	.drop_inode		= gfs2_drop_inode,
1516 	.show_options		= gfs2_show_options,
1517 };
1518 
1519