1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
5 */
6
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9 #include <linux/bio.h>
10 #include <linux/sched/signal.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/statfs.h>
16 #include <linux/seq_file.h>
17 #include <linux/mount.h>
18 #include <linux/kthread.h>
19 #include <linux/delay.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/crc32.h>
22 #include <linux/time.h>
23 #include <linux/wait.h>
24 #include <linux/writeback.h>
25 #include <linux/backing-dev.h>
26 #include <linux/kernel.h>
27
28 #include "gfs2.h"
29 #include "incore.h"
30 #include "bmap.h"
31 #include "dir.h"
32 #include "glock.h"
33 #include "glops.h"
34 #include "inode.h"
35 #include "log.h"
36 #include "meta_io.h"
37 #include "quota.h"
38 #include "recovery.h"
39 #include "rgrp.h"
40 #include "super.h"
41 #include "trans.h"
42 #include "util.h"
43 #include "sys.h"
44 #include "xattr.h"
45 #include "lops.h"
46
47 enum dinode_demise {
48 SHOULD_DELETE_DINODE,
49 SHOULD_NOT_DELETE_DINODE,
50 SHOULD_DEFER_EVICTION,
51 };
52
53 /**
54 * gfs2_jindex_free - Clear all the journal index information
55 * @sdp: The GFS2 superblock
56 *
57 */
58
gfs2_jindex_free(struct gfs2_sbd * sdp)59 void gfs2_jindex_free(struct gfs2_sbd *sdp)
60 {
61 struct list_head list;
62 struct gfs2_jdesc *jd;
63
64 spin_lock(&sdp->sd_jindex_spin);
65 list_add(&list, &sdp->sd_jindex_list);
66 list_del_init(&sdp->sd_jindex_list);
67 sdp->sd_journals = 0;
68 spin_unlock(&sdp->sd_jindex_spin);
69
70 sdp->sd_jdesc = NULL;
71 while (!list_empty(&list)) {
72 jd = list_first_entry(&list, struct gfs2_jdesc, jd_list);
73 gfs2_free_journal_extents(jd);
74 list_del(&jd->jd_list);
75 iput(jd->jd_inode);
76 jd->jd_inode = NULL;
77 kfree(jd);
78 }
79 }
80
jdesc_find_i(struct list_head * head,unsigned int jid)81 static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid)
82 {
83 struct gfs2_jdesc *jd;
84
85 list_for_each_entry(jd, head, jd_list) {
86 if (jd->jd_jid == jid)
87 return jd;
88 }
89 return NULL;
90 }
91
gfs2_jdesc_find(struct gfs2_sbd * sdp,unsigned int jid)92 struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid)
93 {
94 struct gfs2_jdesc *jd;
95
96 spin_lock(&sdp->sd_jindex_spin);
97 jd = jdesc_find_i(&sdp->sd_jindex_list, jid);
98 spin_unlock(&sdp->sd_jindex_spin);
99
100 return jd;
101 }
102
gfs2_jdesc_check(struct gfs2_jdesc * jd)103 int gfs2_jdesc_check(struct gfs2_jdesc *jd)
104 {
105 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
106 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
107 u64 size = i_size_read(jd->jd_inode);
108
109 if (gfs2_check_internal_file_size(jd->jd_inode, 8 << 20, BIT(30)))
110 return -EIO;
111
112 jd->jd_blocks = size >> sdp->sd_sb.sb_bsize_shift;
113
114 if (gfs2_write_alloc_required(ip, 0, size)) {
115 gfs2_consist_inode(ip);
116 return -EIO;
117 }
118
119 return 0;
120 }
121
122 /**
123 * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
124 * @sdp: the filesystem
125 *
126 * Returns: errno
127 */
128
gfs2_make_fs_rw(struct gfs2_sbd * sdp)129 int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
130 {
131 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
132 struct gfs2_glock *j_gl = ip->i_gl;
133 struct gfs2_log_header_host head;
134 int error;
135
136 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
137 if (gfs2_withdrawing_or_withdrawn(sdp))
138 return -EIO;
139
140 error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
141 if (error) {
142 gfs2_consist(sdp);
143 return error;
144 }
145
146 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
147 gfs2_consist(sdp);
148 return -EIO;
149 }
150
151 /* Initialize some head of the log stuff */
152 sdp->sd_log_sequence = head.lh_sequence + 1;
153 gfs2_log_pointers_init(sdp, head.lh_blkno);
154
155 error = gfs2_quota_init(sdp);
156 if (!error && gfs2_withdrawing_or_withdrawn(sdp))
157 error = -EIO;
158 if (!error)
159 set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
160 return error;
161 }
162
gfs2_statfs_change_in(struct gfs2_statfs_change_host * sc,const void * buf)163 void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf)
164 {
165 const struct gfs2_statfs_change *str = buf;
166
167 sc->sc_total = be64_to_cpu(str->sc_total);
168 sc->sc_free = be64_to_cpu(str->sc_free);
169 sc->sc_dinodes = be64_to_cpu(str->sc_dinodes);
170 }
171
gfs2_statfs_change_out(const struct gfs2_statfs_change_host * sc,void * buf)172 void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf)
173 {
174 struct gfs2_statfs_change *str = buf;
175
176 str->sc_total = cpu_to_be64(sc->sc_total);
177 str->sc_free = cpu_to_be64(sc->sc_free);
178 str->sc_dinodes = cpu_to_be64(sc->sc_dinodes);
179 }
180
gfs2_statfs_init(struct gfs2_sbd * sdp)181 int gfs2_statfs_init(struct gfs2_sbd *sdp)
182 {
183 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
184 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
185 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
186 struct buffer_head *m_bh;
187 struct gfs2_holder gh;
188 int error;
189
190 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
191 &gh);
192 if (error)
193 return error;
194
195 error = gfs2_meta_inode_buffer(m_ip, &m_bh);
196 if (error)
197 goto out;
198
199 if (sdp->sd_args.ar_spectator) {
200 spin_lock(&sdp->sd_statfs_spin);
201 gfs2_statfs_change_in(m_sc, m_bh->b_data +
202 sizeof(struct gfs2_dinode));
203 spin_unlock(&sdp->sd_statfs_spin);
204 } else {
205 spin_lock(&sdp->sd_statfs_spin);
206 gfs2_statfs_change_in(m_sc, m_bh->b_data +
207 sizeof(struct gfs2_dinode));
208 gfs2_statfs_change_in(l_sc, sdp->sd_sc_bh->b_data +
209 sizeof(struct gfs2_dinode));
210 spin_unlock(&sdp->sd_statfs_spin);
211
212 }
213
214 brelse(m_bh);
215 out:
216 gfs2_glock_dq_uninit(&gh);
217 return 0;
218 }
219
gfs2_statfs_change(struct gfs2_sbd * sdp,s64 total,s64 free,s64 dinodes)220 void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
221 s64 dinodes)
222 {
223 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
224 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
225 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
226 s64 x, y;
227 int need_sync = 0;
228
229 gfs2_trans_add_meta(l_ip->i_gl, sdp->sd_sc_bh);
230
231 spin_lock(&sdp->sd_statfs_spin);
232 l_sc->sc_total += total;
233 l_sc->sc_free += free;
234 l_sc->sc_dinodes += dinodes;
235 gfs2_statfs_change_out(l_sc, sdp->sd_sc_bh->b_data +
236 sizeof(struct gfs2_dinode));
237 if (sdp->sd_args.ar_statfs_percent) {
238 x = 100 * l_sc->sc_free;
239 y = m_sc->sc_free * sdp->sd_args.ar_statfs_percent;
240 if (x >= y || x <= -y)
241 need_sync = 1;
242 }
243 spin_unlock(&sdp->sd_statfs_spin);
244
245 if (need_sync)
246 gfs2_wake_up_statfs(sdp);
247 }
248
update_statfs(struct gfs2_sbd * sdp,struct buffer_head * m_bh)249 void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh)
250 {
251 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
252 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
253 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
254 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
255
256 gfs2_trans_add_meta(l_ip->i_gl, sdp->sd_sc_bh);
257 gfs2_trans_add_meta(m_ip->i_gl, m_bh);
258
259 spin_lock(&sdp->sd_statfs_spin);
260 m_sc->sc_total += l_sc->sc_total;
261 m_sc->sc_free += l_sc->sc_free;
262 m_sc->sc_dinodes += l_sc->sc_dinodes;
263 memset(l_sc, 0, sizeof(struct gfs2_statfs_change));
264 memset(sdp->sd_sc_bh->b_data + sizeof(struct gfs2_dinode),
265 0, sizeof(struct gfs2_statfs_change));
266 gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
267 spin_unlock(&sdp->sd_statfs_spin);
268 }
269
gfs2_statfs_sync(struct super_block * sb,int type)270 int gfs2_statfs_sync(struct super_block *sb, int type)
271 {
272 struct gfs2_sbd *sdp = sb->s_fs_info;
273 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
274 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
275 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
276 struct gfs2_holder gh;
277 struct buffer_head *m_bh;
278 int error;
279
280 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
281 &gh);
282 if (error)
283 goto out;
284
285 error = gfs2_meta_inode_buffer(m_ip, &m_bh);
286 if (error)
287 goto out_unlock;
288
289 spin_lock(&sdp->sd_statfs_spin);
290 gfs2_statfs_change_in(m_sc, m_bh->b_data +
291 sizeof(struct gfs2_dinode));
292 if (!l_sc->sc_total && !l_sc->sc_free && !l_sc->sc_dinodes) {
293 spin_unlock(&sdp->sd_statfs_spin);
294 goto out_bh;
295 }
296 spin_unlock(&sdp->sd_statfs_spin);
297
298 error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
299 if (error)
300 goto out_bh;
301
302 update_statfs(sdp, m_bh);
303 sdp->sd_statfs_force_sync = 0;
304
305 gfs2_trans_end(sdp);
306
307 out_bh:
308 brelse(m_bh);
309 out_unlock:
310 gfs2_glock_dq_uninit(&gh);
311 out:
312 return error;
313 }
314
315 struct lfcc {
316 struct list_head list;
317 struct gfs2_holder gh;
318 };
319
320 /**
321 * gfs2_lock_fs_check_clean - Stop all writes to the FS and check that all
322 * journals are clean
323 * @sdp: the file system
324 *
325 * Returns: errno
326 */
327
gfs2_lock_fs_check_clean(struct gfs2_sbd * sdp)328 static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
329 {
330 struct gfs2_inode *ip;
331 struct gfs2_jdesc *jd;
332 struct lfcc *lfcc;
333 LIST_HEAD(list);
334 struct gfs2_log_header_host lh;
335 int error, error2;
336
337 /*
338 * Grab all the journal glocks in SH mode. We are *probably* doing
339 * that to prevent recovery.
340 */
341
342 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
343 lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL);
344 if (!lfcc) {
345 error = -ENOMEM;
346 goto out;
347 }
348 ip = GFS2_I(jd->jd_inode);
349 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &lfcc->gh);
350 if (error) {
351 kfree(lfcc);
352 goto out;
353 }
354 list_add(&lfcc->list, &list);
355 }
356
357 gfs2_freeze_unlock(&sdp->sd_freeze_gh);
358
359 error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE,
360 LM_FLAG_NOEXP | GL_NOPID,
361 &sdp->sd_freeze_gh);
362 if (error)
363 goto relock_shared;
364
365 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
366 error = gfs2_jdesc_check(jd);
367 if (error)
368 break;
369 error = gfs2_find_jhead(jd, &lh, false);
370 if (error)
371 break;
372 if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
373 error = -EBUSY;
374 break;
375 }
376 }
377
378 if (!error)
379 goto out; /* success */
380
381 gfs2_freeze_unlock(&sdp->sd_freeze_gh);
382
383 relock_shared:
384 error2 = gfs2_freeze_lock_shared(sdp);
385 gfs2_assert_withdraw(sdp, !error2);
386
387 out:
388 while (!list_empty(&list)) {
389 lfcc = list_first_entry(&list, struct lfcc, list);
390 list_del(&lfcc->list);
391 gfs2_glock_dq_uninit(&lfcc->gh);
392 kfree(lfcc);
393 }
394 return error;
395 }
396
gfs2_dinode_out(const struct gfs2_inode * ip,void * buf)397 void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
398 {
399 const struct inode *inode = &ip->i_inode;
400 struct gfs2_dinode *str = buf;
401
402 str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
403 str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
404 str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
405 str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
406 str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
407 str->di_mode = cpu_to_be32(inode->i_mode);
408 str->di_uid = cpu_to_be32(i_uid_read(inode));
409 str->di_gid = cpu_to_be32(i_gid_read(inode));
410 str->di_nlink = cpu_to_be32(inode->i_nlink);
411 str->di_size = cpu_to_be64(i_size_read(inode));
412 str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(inode));
413 str->di_atime = cpu_to_be64(inode->i_atime.tv_sec);
414 str->di_mtime = cpu_to_be64(inode->i_mtime.tv_sec);
415 str->di_ctime = cpu_to_be64(inode_get_ctime(inode).tv_sec);
416
417 str->di_goal_meta = cpu_to_be64(ip->i_goal);
418 str->di_goal_data = cpu_to_be64(ip->i_goal);
419 str->di_generation = cpu_to_be64(ip->i_generation);
420
421 str->di_flags = cpu_to_be32(ip->i_diskflags);
422 str->di_height = cpu_to_be16(ip->i_height);
423 str->di_payload_format = cpu_to_be32(S_ISDIR(inode->i_mode) &&
424 !(ip->i_diskflags & GFS2_DIF_EXHASH) ?
425 GFS2_FORMAT_DE : 0);
426 str->di_depth = cpu_to_be16(ip->i_depth);
427 str->di_entries = cpu_to_be32(ip->i_entries);
428
429 str->di_eattr = cpu_to_be64(ip->i_eattr);
430 str->di_atime_nsec = cpu_to_be32(inode->i_atime.tv_nsec);
431 str->di_mtime_nsec = cpu_to_be32(inode->i_mtime.tv_nsec);
432 str->di_ctime_nsec = cpu_to_be32(inode_get_ctime(inode).tv_nsec);
433 }
434
435 /**
436 * gfs2_write_inode - Make sure the inode is stable on the disk
437 * @inode: The inode
438 * @wbc: The writeback control structure
439 *
440 * Returns: errno
441 */
442
gfs2_write_inode(struct inode * inode,struct writeback_control * wbc)443 static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
444 {
445 struct gfs2_inode *ip = GFS2_I(inode);
446 struct gfs2_sbd *sdp = GFS2_SB(inode);
447 struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl);
448 struct backing_dev_info *bdi = inode_to_bdi(metamapping->host);
449 int ret = 0;
450 bool flush_all = (wbc->sync_mode == WB_SYNC_ALL || gfs2_is_jdata(ip));
451
452 if (flush_all)
453 gfs2_log_flush(GFS2_SB(inode), ip->i_gl,
454 GFS2_LOG_HEAD_FLUSH_NORMAL |
455 GFS2_LFC_WRITE_INODE);
456 if (bdi->wb.dirty_exceeded)
457 gfs2_ail1_flush(sdp, wbc);
458 else
459 filemap_fdatawrite(metamapping);
460 if (flush_all)
461 ret = filemap_fdatawait(metamapping);
462 if (ret)
463 mark_inode_dirty_sync(inode);
464 else {
465 spin_lock(&inode->i_lock);
466 if (!(inode->i_flags & I_DIRTY))
467 gfs2_ordered_del_inode(ip);
468 spin_unlock(&inode->i_lock);
469 }
470 return ret;
471 }
472
473 /**
474 * gfs2_dirty_inode - check for atime updates
475 * @inode: The inode in question
476 * @flags: The type of dirty
477 *
478 * Unfortunately it can be called under any combination of inode
479 * glock and freeze glock, so we have to check carefully.
480 *
481 * At the moment this deals only with atime - it should be possible
482 * to expand that role in future, once a review of the locking has
483 * been carried out.
484 */
485
gfs2_dirty_inode(struct inode * inode,int flags)486 static void gfs2_dirty_inode(struct inode *inode, int flags)
487 {
488 struct gfs2_inode *ip = GFS2_I(inode);
489 struct gfs2_sbd *sdp = GFS2_SB(inode);
490 struct buffer_head *bh;
491 struct gfs2_holder gh;
492 int need_unlock = 0;
493 int need_endtrans = 0;
494 int ret;
495
496 if (unlikely(!ip->i_gl)) {
497 /* This can only happen during incomplete inode creation. */
498 BUG_ON(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags));
499 return;
500 }
501
502 if (gfs2_withdrawing_or_withdrawn(sdp))
503 return;
504 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
505 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
506 if (ret) {
507 fs_err(sdp, "dirty_inode: glock %d\n", ret);
508 gfs2_dump_glock(NULL, ip->i_gl, true);
509 return;
510 }
511 need_unlock = 1;
512 } else if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE))
513 return;
514
515 if (current->journal_info == NULL) {
516 ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
517 if (ret) {
518 fs_err(sdp, "dirty_inode: gfs2_trans_begin %d\n", ret);
519 goto out;
520 }
521 need_endtrans = 1;
522 }
523
524 ret = gfs2_meta_inode_buffer(ip, &bh);
525 if (ret == 0) {
526 gfs2_trans_add_meta(ip->i_gl, bh);
527 gfs2_dinode_out(ip, bh->b_data);
528 brelse(bh);
529 }
530
531 if (need_endtrans)
532 gfs2_trans_end(sdp);
533 out:
534 if (need_unlock)
535 gfs2_glock_dq_uninit(&gh);
536 }
537
538 /**
539 * gfs2_make_fs_ro - Turn a Read-Write FS into a Read-Only one
540 * @sdp: the filesystem
541 *
542 * Returns: errno
543 */
544
gfs2_make_fs_ro(struct gfs2_sbd * sdp)545 void gfs2_make_fs_ro(struct gfs2_sbd *sdp)
546 {
547 int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
548
549 if (!test_bit(SDF_KILL, &sdp->sd_flags))
550 gfs2_flush_delete_work(sdp);
551
552 gfs2_destroy_threads(sdp);
553
554 if (log_write_allowed) {
555 gfs2_quota_sync(sdp->sd_vfs, 0);
556 gfs2_statfs_sync(sdp->sd_vfs, 0);
557
558 /* We do two log flushes here. The first one commits dirty inodes
559 * and rgrps to the journal, but queues up revokes to the ail list.
560 * The second flush writes out and removes the revokes.
561 *
562 * The first must be done before the FLUSH_SHUTDOWN code
563 * clears the LIVE flag, otherwise it will not be able to start
564 * a transaction to write its revokes, and the error will cause
565 * a withdraw of the file system. */
566 gfs2_log_flush(sdp, NULL, GFS2_LFC_MAKE_FS_RO);
567 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
568 GFS2_LFC_MAKE_FS_RO);
569 wait_event_timeout(sdp->sd_log_waitq,
570 gfs2_log_is_empty(sdp),
571 HZ * 5);
572 gfs2_assert_warn(sdp, gfs2_log_is_empty(sdp));
573 }
574 gfs2_quota_cleanup(sdp);
575 }
576
577 /**
578 * gfs2_put_super - Unmount the filesystem
579 * @sb: The VFS superblock
580 *
581 */
582
gfs2_put_super(struct super_block * sb)583 static void gfs2_put_super(struct super_block *sb)
584 {
585 struct gfs2_sbd *sdp = sb->s_fs_info;
586 struct gfs2_jdesc *jd;
587
588 /* No more recovery requests */
589 set_bit(SDF_NORECOVERY, &sdp->sd_flags);
590 smp_mb();
591
592 /* Wait on outstanding recovery */
593 restart:
594 spin_lock(&sdp->sd_jindex_spin);
595 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
596 if (!test_bit(JDF_RECOVERY, &jd->jd_flags))
597 continue;
598 spin_unlock(&sdp->sd_jindex_spin);
599 wait_on_bit(&jd->jd_flags, JDF_RECOVERY,
600 TASK_UNINTERRUPTIBLE);
601 goto restart;
602 }
603 spin_unlock(&sdp->sd_jindex_spin);
604
605 if (!sb_rdonly(sb))
606 gfs2_make_fs_ro(sdp);
607 else {
608 if (gfs2_withdrawing_or_withdrawn(sdp))
609 gfs2_destroy_threads(sdp);
610
611 gfs2_quota_cleanup(sdp);
612 }
613
614 WARN_ON(gfs2_withdrawing(sdp));
615
616 /* At this point, we're through modifying the disk */
617
618 /* Release stuff */
619
620 gfs2_freeze_unlock(&sdp->sd_freeze_gh);
621
622 iput(sdp->sd_jindex);
623 iput(sdp->sd_statfs_inode);
624 iput(sdp->sd_rindex);
625 iput(sdp->sd_quota_inode);
626
627 gfs2_glock_put(sdp->sd_rename_gl);
628 gfs2_glock_put(sdp->sd_freeze_gl);
629
630 if (!sdp->sd_args.ar_spectator) {
631 if (gfs2_holder_initialized(&sdp->sd_journal_gh))
632 gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
633 if (gfs2_holder_initialized(&sdp->sd_jinode_gh))
634 gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
635 brelse(sdp->sd_sc_bh);
636 gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
637 gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
638 free_local_statfs_inodes(sdp);
639 iput(sdp->sd_qc_inode);
640 }
641
642 gfs2_glock_dq_uninit(&sdp->sd_live_gh);
643 gfs2_clear_rgrpd(sdp);
644 gfs2_jindex_free(sdp);
645 /* Take apart glock structures and buffer lists */
646 gfs2_gl_hash_clear(sdp);
647 truncate_inode_pages_final(&sdp->sd_aspace);
648 gfs2_delete_debugfs_file(sdp);
649
650 gfs2_sys_fs_del(sdp);
651 free_sbd(sdp);
652 }
653
654 /**
655 * gfs2_sync_fs - sync the filesystem
656 * @sb: the superblock
657 * @wait: true to wait for completion
658 *
659 * Flushes the log to disk.
660 */
661
gfs2_sync_fs(struct super_block * sb,int wait)662 static int gfs2_sync_fs(struct super_block *sb, int wait)
663 {
664 struct gfs2_sbd *sdp = sb->s_fs_info;
665
666 gfs2_quota_sync(sb, -1);
667 if (wait)
668 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
669 GFS2_LFC_SYNC_FS);
670 return sdp->sd_log_error;
671 }
672
gfs2_freeze_locally(struct gfs2_sbd * sdp)673 static int gfs2_freeze_locally(struct gfs2_sbd *sdp)
674 {
675 struct super_block *sb = sdp->sd_vfs;
676 int error;
677
678 error = freeze_super(sb, FREEZE_HOLDER_USERSPACE);
679 if (error)
680 return error;
681
682 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
683 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
684 GFS2_LFC_FREEZE_GO_SYNC);
685 if (gfs2_withdrawing_or_withdrawn(sdp)) {
686 error = thaw_super(sb, FREEZE_HOLDER_USERSPACE);
687 if (error)
688 return error;
689 return -EIO;
690 }
691 }
692 return 0;
693 }
694
gfs2_do_thaw(struct gfs2_sbd * sdp)695 static int gfs2_do_thaw(struct gfs2_sbd *sdp)
696 {
697 struct super_block *sb = sdp->sd_vfs;
698 int error;
699
700 error = gfs2_freeze_lock_shared(sdp);
701 if (error)
702 goto fail;
703 error = thaw_super(sb, FREEZE_HOLDER_USERSPACE);
704 if (!error)
705 return 0;
706
707 fail:
708 fs_info(sdp, "GFS2: couldn't thaw filesystem: %d\n", error);
709 gfs2_assert_withdraw(sdp, 0);
710 return error;
711 }
712
gfs2_freeze_func(struct work_struct * work)713 void gfs2_freeze_func(struct work_struct *work)
714 {
715 struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_freeze_work);
716 struct super_block *sb = sdp->sd_vfs;
717 int error;
718
719 mutex_lock(&sdp->sd_freeze_mutex);
720 error = -EBUSY;
721 if (test_bit(SDF_FROZEN, &sdp->sd_flags))
722 goto freeze_failed;
723
724 error = gfs2_freeze_locally(sdp);
725 if (error)
726 goto freeze_failed;
727
728 gfs2_freeze_unlock(&sdp->sd_freeze_gh);
729 set_bit(SDF_FROZEN, &sdp->sd_flags);
730
731 error = gfs2_do_thaw(sdp);
732 if (error)
733 goto out;
734
735 clear_bit(SDF_FROZEN, &sdp->sd_flags);
736 goto out;
737
738 freeze_failed:
739 fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n", error);
740
741 out:
742 mutex_unlock(&sdp->sd_freeze_mutex);
743 deactivate_super(sb);
744 }
745
746 /**
747 * gfs2_freeze_super - prevent further writes to the filesystem
748 * @sb: the VFS structure for the filesystem
749 *
750 */
751
gfs2_freeze_super(struct super_block * sb,enum freeze_holder who)752 static int gfs2_freeze_super(struct super_block *sb, enum freeze_holder who)
753 {
754 struct gfs2_sbd *sdp = sb->s_fs_info;
755 int error;
756
757 if (!mutex_trylock(&sdp->sd_freeze_mutex))
758 return -EBUSY;
759 error = -EBUSY;
760 if (test_bit(SDF_FROZEN, &sdp->sd_flags))
761 goto out;
762
763 for (;;) {
764 error = gfs2_freeze_locally(sdp);
765 if (error) {
766 fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
767 error);
768 goto out;
769 }
770
771 error = gfs2_lock_fs_check_clean(sdp);
772 if (!error)
773 break; /* success */
774
775 error = gfs2_do_thaw(sdp);
776 if (error)
777 goto out;
778
779 if (error == -EBUSY)
780 fs_err(sdp, "waiting for recovery before freeze\n");
781 else if (error == -EIO) {
782 fs_err(sdp, "Fatal IO error: cannot freeze gfs2 due "
783 "to recovery error.\n");
784 goto out;
785 } else {
786 fs_err(sdp, "error freezing FS: %d\n", error);
787 }
788 fs_err(sdp, "retrying...\n");
789 msleep(1000);
790 }
791
792 out:
793 if (!error) {
794 set_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags);
795 set_bit(SDF_FROZEN, &sdp->sd_flags);
796 }
797 mutex_unlock(&sdp->sd_freeze_mutex);
798 return error;
799 }
800
801 /**
802 * gfs2_thaw_super - reallow writes to the filesystem
803 * @sb: the VFS structure for the filesystem
804 *
805 */
806
gfs2_thaw_super(struct super_block * sb,enum freeze_holder who)807 static int gfs2_thaw_super(struct super_block *sb, enum freeze_holder who)
808 {
809 struct gfs2_sbd *sdp = sb->s_fs_info;
810 int error;
811
812 if (!mutex_trylock(&sdp->sd_freeze_mutex))
813 return -EBUSY;
814 error = -EINVAL;
815 if (!test_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags))
816 goto out;
817
818 gfs2_freeze_unlock(&sdp->sd_freeze_gh);
819
820 error = gfs2_do_thaw(sdp);
821
822 if (!error) {
823 clear_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags);
824 clear_bit(SDF_FROZEN, &sdp->sd_flags);
825 }
826 out:
827 mutex_unlock(&sdp->sd_freeze_mutex);
828 return error;
829 }
830
gfs2_thaw_freeze_initiator(struct super_block * sb)831 void gfs2_thaw_freeze_initiator(struct super_block *sb)
832 {
833 struct gfs2_sbd *sdp = sb->s_fs_info;
834
835 mutex_lock(&sdp->sd_freeze_mutex);
836 if (!test_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags))
837 goto out;
838
839 gfs2_freeze_unlock(&sdp->sd_freeze_gh);
840
841 out:
842 mutex_unlock(&sdp->sd_freeze_mutex);
843 }
844
845 /**
846 * statfs_slow_fill - fill in the sg for a given RG
847 * @rgd: the RG
848 * @sc: the sc structure
849 *
850 * Returns: 0 on success, -ESTALE if the LVB is invalid
851 */
852
statfs_slow_fill(struct gfs2_rgrpd * rgd,struct gfs2_statfs_change_host * sc)853 static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
854 struct gfs2_statfs_change_host *sc)
855 {
856 gfs2_rgrp_verify(rgd);
857 sc->sc_total += rgd->rd_data;
858 sc->sc_free += rgd->rd_free;
859 sc->sc_dinodes += rgd->rd_dinodes;
860 return 0;
861 }
862
863 /**
864 * gfs2_statfs_slow - Stat a filesystem using asynchronous locking
865 * @sdp: the filesystem
866 * @sc: the sc info that will be returned
867 *
868 * Any error (other than a signal) will cause this routine to fall back
869 * to the synchronous version.
870 *
871 * FIXME: This really shouldn't busy wait like this.
872 *
873 * Returns: errno
874 */
875
gfs2_statfs_slow(struct gfs2_sbd * sdp,struct gfs2_statfs_change_host * sc)876 static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
877 {
878 struct gfs2_rgrpd *rgd_next;
879 struct gfs2_holder *gha, *gh;
880 unsigned int slots = 64;
881 unsigned int x;
882 int done;
883 int error = 0, err;
884
885 memset(sc, 0, sizeof(struct gfs2_statfs_change_host));
886 gha = kmalloc_array(slots, sizeof(struct gfs2_holder), GFP_KERNEL);
887 if (!gha)
888 return -ENOMEM;
889 for (x = 0; x < slots; x++)
890 gfs2_holder_mark_uninitialized(gha + x);
891
892 rgd_next = gfs2_rgrpd_get_first(sdp);
893
894 for (;;) {
895 done = 1;
896
897 for (x = 0; x < slots; x++) {
898 gh = gha + x;
899
900 if (gfs2_holder_initialized(gh) && gfs2_glock_poll(gh)) {
901 err = gfs2_glock_wait(gh);
902 if (err) {
903 gfs2_holder_uninit(gh);
904 error = err;
905 } else {
906 if (!error) {
907 struct gfs2_rgrpd *rgd =
908 gfs2_glock2rgrp(gh->gh_gl);
909
910 error = statfs_slow_fill(rgd, sc);
911 }
912 gfs2_glock_dq_uninit(gh);
913 }
914 }
915
916 if (gfs2_holder_initialized(gh))
917 done = 0;
918 else if (rgd_next && !error) {
919 error = gfs2_glock_nq_init(rgd_next->rd_gl,
920 LM_ST_SHARED,
921 GL_ASYNC,
922 gh);
923 rgd_next = gfs2_rgrpd_get_next(rgd_next);
924 done = 0;
925 }
926
927 if (signal_pending(current))
928 error = -ERESTARTSYS;
929 }
930
931 if (done)
932 break;
933
934 yield();
935 }
936
937 kfree(gha);
938 return error;
939 }
940
941 /**
942 * gfs2_statfs_i - Do a statfs
943 * @sdp: the filesystem
944 * @sc: the sc structure
945 *
946 * Returns: errno
947 */
948
gfs2_statfs_i(struct gfs2_sbd * sdp,struct gfs2_statfs_change_host * sc)949 static int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
950 {
951 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
952 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
953
954 spin_lock(&sdp->sd_statfs_spin);
955
956 *sc = *m_sc;
957 sc->sc_total += l_sc->sc_total;
958 sc->sc_free += l_sc->sc_free;
959 sc->sc_dinodes += l_sc->sc_dinodes;
960
961 spin_unlock(&sdp->sd_statfs_spin);
962
963 if (sc->sc_free < 0)
964 sc->sc_free = 0;
965 if (sc->sc_free > sc->sc_total)
966 sc->sc_free = sc->sc_total;
967 if (sc->sc_dinodes < 0)
968 sc->sc_dinodes = 0;
969
970 return 0;
971 }
972
973 /**
974 * gfs2_statfs - Gather and return stats about the filesystem
975 * @dentry: The name of the link
976 * @buf: The buffer
977 *
978 * Returns: 0 on success or error code
979 */
980
gfs2_statfs(struct dentry * dentry,struct kstatfs * buf)981 static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
982 {
983 struct super_block *sb = dentry->d_sb;
984 struct gfs2_sbd *sdp = sb->s_fs_info;
985 struct gfs2_statfs_change_host sc;
986 int error;
987
988 error = gfs2_rindex_update(sdp);
989 if (error)
990 return error;
991
992 if (gfs2_tune_get(sdp, gt_statfs_slow))
993 error = gfs2_statfs_slow(sdp, &sc);
994 else
995 error = gfs2_statfs_i(sdp, &sc);
996
997 if (error)
998 return error;
999
1000 buf->f_type = GFS2_MAGIC;
1001 buf->f_bsize = sdp->sd_sb.sb_bsize;
1002 buf->f_blocks = sc.sc_total;
1003 buf->f_bfree = sc.sc_free;
1004 buf->f_bavail = sc.sc_free;
1005 buf->f_files = sc.sc_dinodes + sc.sc_free;
1006 buf->f_ffree = sc.sc_free;
1007 buf->f_namelen = GFS2_FNAMESIZE;
1008
1009 return 0;
1010 }
1011
1012 /**
1013 * gfs2_drop_inode - Drop an inode (test for remote unlink)
1014 * @inode: The inode to drop
1015 *
1016 * If we've received a callback on an iopen lock then it's because a
1017 * remote node tried to deallocate the inode but failed due to this node
1018 * still having the inode open. Here we mark the link count zero
1019 * since we know that it must have reached zero if the GLF_DEMOTE flag
1020 * is set on the iopen glock. If we didn't do a disk read since the
1021 * remote node removed the final link then we might otherwise miss
1022 * this event. This check ensures that this node will deallocate the
1023 * inode's blocks, or alternatively pass the baton on to another
1024 * node for later deallocation.
1025 */
1026
gfs2_drop_inode(struct inode * inode)1027 static int gfs2_drop_inode(struct inode *inode)
1028 {
1029 struct gfs2_inode *ip = GFS2_I(inode);
1030 struct gfs2_sbd *sdp = GFS2_SB(inode);
1031
1032 if (inode->i_nlink &&
1033 gfs2_holder_initialized(&ip->i_iopen_gh)) {
1034 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1035 if (test_bit(GLF_DEMOTE, &gl->gl_flags))
1036 clear_nlink(inode);
1037 }
1038
1039 /*
1040 * When under memory pressure when an inode's link count has dropped to
1041 * zero, defer deleting the inode to the delete workqueue. This avoids
1042 * calling into DLM under memory pressure, which can deadlock.
1043 */
1044 if (!inode->i_nlink &&
1045 unlikely(current->flags & PF_MEMALLOC) &&
1046 gfs2_holder_initialized(&ip->i_iopen_gh)) {
1047 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1048
1049 gfs2_glock_hold(gl);
1050 if (!gfs2_queue_try_to_evict(gl))
1051 gfs2_glock_queue_put(gl);
1052 return 0;
1053 }
1054
1055 /*
1056 * No longer cache inodes when trying to evict them all.
1057 */
1058 if (test_bit(SDF_EVICTING, &sdp->sd_flags))
1059 return 1;
1060
1061 return generic_drop_inode(inode);
1062 }
1063
is_ancestor(const struct dentry * d1,const struct dentry * d2)1064 static int is_ancestor(const struct dentry *d1, const struct dentry *d2)
1065 {
1066 do {
1067 if (d1 == d2)
1068 return 1;
1069 d1 = d1->d_parent;
1070 } while (!IS_ROOT(d1));
1071 return 0;
1072 }
1073
1074 /**
1075 * gfs2_show_options - Show mount options for /proc/mounts
1076 * @s: seq_file structure
1077 * @root: root of this (sub)tree
1078 *
1079 * Returns: 0 on success or error code
1080 */
1081
gfs2_show_options(struct seq_file * s,struct dentry * root)1082 static int gfs2_show_options(struct seq_file *s, struct dentry *root)
1083 {
1084 struct gfs2_sbd *sdp = root->d_sb->s_fs_info;
1085 struct gfs2_args *args = &sdp->sd_args;
1086 unsigned int logd_secs, statfs_slow, statfs_quantum, quota_quantum;
1087
1088 spin_lock(&sdp->sd_tune.gt_spin);
1089 logd_secs = sdp->sd_tune.gt_logd_secs;
1090 quota_quantum = sdp->sd_tune.gt_quota_quantum;
1091 statfs_quantum = sdp->sd_tune.gt_statfs_quantum;
1092 statfs_slow = sdp->sd_tune.gt_statfs_slow;
1093 spin_unlock(&sdp->sd_tune.gt_spin);
1094
1095 if (is_ancestor(root, sdp->sd_master_dir))
1096 seq_puts(s, ",meta");
1097 if (args->ar_lockproto[0])
1098 seq_show_option(s, "lockproto", args->ar_lockproto);
1099 if (args->ar_locktable[0])
1100 seq_show_option(s, "locktable", args->ar_locktable);
1101 if (args->ar_hostdata[0])
1102 seq_show_option(s, "hostdata", args->ar_hostdata);
1103 if (args->ar_spectator)
1104 seq_puts(s, ",spectator");
1105 if (args->ar_localflocks)
1106 seq_puts(s, ",localflocks");
1107 if (args->ar_debug)
1108 seq_puts(s, ",debug");
1109 if (args->ar_posix_acl)
1110 seq_puts(s, ",acl");
1111 if (args->ar_quota != GFS2_QUOTA_DEFAULT) {
1112 char *state;
1113 switch (args->ar_quota) {
1114 case GFS2_QUOTA_OFF:
1115 state = "off";
1116 break;
1117 case GFS2_QUOTA_ACCOUNT:
1118 state = "account";
1119 break;
1120 case GFS2_QUOTA_ON:
1121 state = "on";
1122 break;
1123 case GFS2_QUOTA_QUIET:
1124 state = "quiet";
1125 break;
1126 default:
1127 state = "unknown";
1128 break;
1129 }
1130 seq_printf(s, ",quota=%s", state);
1131 }
1132 if (args->ar_suiddir)
1133 seq_puts(s, ",suiddir");
1134 if (args->ar_data != GFS2_DATA_DEFAULT) {
1135 char *state;
1136 switch (args->ar_data) {
1137 case GFS2_DATA_WRITEBACK:
1138 state = "writeback";
1139 break;
1140 case GFS2_DATA_ORDERED:
1141 state = "ordered";
1142 break;
1143 default:
1144 state = "unknown";
1145 break;
1146 }
1147 seq_printf(s, ",data=%s", state);
1148 }
1149 if (args->ar_discard)
1150 seq_puts(s, ",discard");
1151 if (logd_secs != 30)
1152 seq_printf(s, ",commit=%d", logd_secs);
1153 if (statfs_quantum != 30)
1154 seq_printf(s, ",statfs_quantum=%d", statfs_quantum);
1155 else if (statfs_slow)
1156 seq_puts(s, ",statfs_quantum=0");
1157 if (quota_quantum != 60)
1158 seq_printf(s, ",quota_quantum=%d", quota_quantum);
1159 if (args->ar_statfs_percent)
1160 seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent);
1161 if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
1162 const char *state;
1163
1164 switch (args->ar_errors) {
1165 case GFS2_ERRORS_WITHDRAW:
1166 state = "withdraw";
1167 break;
1168 case GFS2_ERRORS_PANIC:
1169 state = "panic";
1170 break;
1171 default:
1172 state = "unknown";
1173 break;
1174 }
1175 seq_printf(s, ",errors=%s", state);
1176 }
1177 if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
1178 seq_puts(s, ",nobarrier");
1179 if (test_bit(SDF_DEMOTE, &sdp->sd_flags))
1180 seq_puts(s, ",demote_interface_used");
1181 if (args->ar_rgrplvb)
1182 seq_puts(s, ",rgrplvb");
1183 if (args->ar_loccookie)
1184 seq_puts(s, ",loccookie");
1185 return 0;
1186 }
1187
gfs2_final_release_pages(struct gfs2_inode * ip)1188 static void gfs2_final_release_pages(struct gfs2_inode *ip)
1189 {
1190 struct inode *inode = &ip->i_inode;
1191 struct gfs2_glock *gl = ip->i_gl;
1192
1193 if (unlikely(!gl)) {
1194 /* This can only happen during incomplete inode creation. */
1195 BUG_ON(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags));
1196 return;
1197 }
1198
1199 truncate_inode_pages(gfs2_glock2aspace(gl), 0);
1200 truncate_inode_pages(&inode->i_data, 0);
1201
1202 if (atomic_read(&gl->gl_revokes) == 0) {
1203 clear_bit(GLF_LFLUSH, &gl->gl_flags);
1204 clear_bit(GLF_DIRTY, &gl->gl_flags);
1205 }
1206 }
1207
gfs2_dinode_dealloc(struct gfs2_inode * ip)1208 static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
1209 {
1210 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1211 struct gfs2_rgrpd *rgd;
1212 struct gfs2_holder gh;
1213 int error;
1214
1215 if (gfs2_get_inode_blocks(&ip->i_inode) != 1) {
1216 gfs2_consist_inode(ip);
1217 return -EIO;
1218 }
1219
1220 gfs2_rindex_update(sdp);
1221
1222 error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1223 if (error)
1224 return error;
1225
1226 rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1);
1227 if (!rgd) {
1228 gfs2_consist_inode(ip);
1229 error = -EIO;
1230 goto out_qs;
1231 }
1232
1233 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
1234 LM_FLAG_NODE_SCOPE, &gh);
1235 if (error)
1236 goto out_qs;
1237
1238 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA,
1239 sdp->sd_jdesc->jd_blocks);
1240 if (error)
1241 goto out_rg_gunlock;
1242
1243 gfs2_free_di(rgd, ip);
1244
1245 gfs2_final_release_pages(ip);
1246
1247 gfs2_trans_end(sdp);
1248
1249 out_rg_gunlock:
1250 gfs2_glock_dq_uninit(&gh);
1251 out_qs:
1252 gfs2_quota_unhold(ip);
1253 return error;
1254 }
1255
1256 /**
1257 * gfs2_glock_put_eventually
1258 * @gl: The glock to put
1259 *
1260 * When under memory pressure, trigger a deferred glock put to make sure we
1261 * won't call into DLM and deadlock. Otherwise, put the glock directly.
1262 */
1263
gfs2_glock_put_eventually(struct gfs2_glock * gl)1264 static void gfs2_glock_put_eventually(struct gfs2_glock *gl)
1265 {
1266 if (current->flags & PF_MEMALLOC)
1267 gfs2_glock_queue_put(gl);
1268 else
1269 gfs2_glock_put(gl);
1270 }
1271
gfs2_upgrade_iopen_glock(struct inode * inode)1272 static bool gfs2_upgrade_iopen_glock(struct inode *inode)
1273 {
1274 struct gfs2_inode *ip = GFS2_I(inode);
1275 struct gfs2_sbd *sdp = GFS2_SB(inode);
1276 struct gfs2_holder *gh = &ip->i_iopen_gh;
1277 long timeout = 5 * HZ;
1278 int error;
1279
1280 gh->gh_flags |= GL_NOCACHE;
1281 gfs2_glock_dq_wait(gh);
1282
1283 /*
1284 * If there are no other lock holders, we will immediately get
1285 * exclusive access to the iopen glock here.
1286 *
1287 * Otherwise, the other nodes holding the lock will be notified about
1288 * our locking request. If they do not have the inode open, they are
1289 * expected to evict the cached inode and release the lock, allowing us
1290 * to proceed.
1291 *
1292 * Otherwise, if they cannot evict the inode, they are expected to poke
1293 * the inode glock (note: not the iopen glock). We will notice that
1294 * and stop waiting for the iopen glock immediately. The other node(s)
1295 * are then expected to take care of deleting the inode when they no
1296 * longer use it.
1297 *
1298 * As a last resort, if another node keeps holding the iopen glock
1299 * without showing any activity on the inode glock, we will eventually
1300 * time out and fail the iopen glock upgrade.
1301 *
1302 * Note that we're passing the LM_FLAG_TRY_1CB flag to the first
1303 * locking request as an optimization to notify lock holders as soon as
1304 * possible. Without that flag, they'd be notified implicitly by the
1305 * second locking request.
1306 */
1307
1308 gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, gh);
1309 error = gfs2_glock_nq(gh);
1310 if (error != GLR_TRYFAILED)
1311 return !error;
1312
1313 gfs2_holder_reinit(LM_ST_EXCLUSIVE, GL_ASYNC | GL_NOCACHE, gh);
1314 error = gfs2_glock_nq(gh);
1315 if (error)
1316 return false;
1317
1318 timeout = wait_event_interruptible_timeout(sdp->sd_async_glock_wait,
1319 !test_bit(HIF_WAIT, &gh->gh_iflags) ||
1320 test_bit(GLF_DEMOTE, &ip->i_gl->gl_flags),
1321 timeout);
1322 if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) {
1323 gfs2_glock_dq(gh);
1324 return false;
1325 }
1326 return gfs2_glock_holder_ready(gh) == 0;
1327 }
1328
1329 /**
1330 * evict_should_delete - determine whether the inode is eligible for deletion
1331 * @inode: The inode to evict
1332 * @gh: The glock holder structure
1333 *
1334 * This function determines whether the evicted inode is eligible to be deleted
1335 * and locks the inode glock.
1336 *
1337 * Returns: the fate of the dinode
1338 */
evict_should_delete(struct inode * inode,struct gfs2_holder * gh)1339 static enum dinode_demise evict_should_delete(struct inode *inode,
1340 struct gfs2_holder *gh)
1341 {
1342 struct gfs2_inode *ip = GFS2_I(inode);
1343 struct super_block *sb = inode->i_sb;
1344 struct gfs2_sbd *sdp = sb->s_fs_info;
1345 int ret;
1346
1347 if (unlikely(test_bit(GIF_ALLOC_FAILED, &ip->i_flags)))
1348 goto should_delete;
1349
1350 if (test_bit(GIF_DEFERRED_DELETE, &ip->i_flags))
1351 return SHOULD_DEFER_EVICTION;
1352
1353 /* Deletes should never happen under memory pressure anymore. */
1354 if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
1355 return SHOULD_DEFER_EVICTION;
1356
1357 /* Must not read inode block until block type has been verified */
1358 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, gh);
1359 if (unlikely(ret)) {
1360 glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
1361 ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1362 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1363 return SHOULD_DEFER_EVICTION;
1364 }
1365
1366 if (gfs2_inode_already_deleted(ip->i_gl, ip->i_no_formal_ino))
1367 return SHOULD_NOT_DELETE_DINODE;
1368 ret = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED);
1369 if (ret)
1370 return SHOULD_NOT_DELETE_DINODE;
1371
1372 ret = gfs2_instantiate(gh);
1373 if (ret)
1374 return SHOULD_NOT_DELETE_DINODE;
1375
1376 /*
1377 * The inode may have been recreated in the meantime.
1378 */
1379 if (inode->i_nlink)
1380 return SHOULD_NOT_DELETE_DINODE;
1381
1382 should_delete:
1383 if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
1384 test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
1385 if (!gfs2_upgrade_iopen_glock(inode)) {
1386 gfs2_holder_uninit(&ip->i_iopen_gh);
1387 return SHOULD_NOT_DELETE_DINODE;
1388 }
1389 }
1390 return SHOULD_DELETE_DINODE;
1391 }
1392
1393 /**
1394 * evict_unlinked_inode - delete the pieces of an unlinked evicted inode
1395 * @inode: The inode to evict
1396 */
evict_unlinked_inode(struct inode * inode)1397 static int evict_unlinked_inode(struct inode *inode)
1398 {
1399 struct gfs2_inode *ip = GFS2_I(inode);
1400 int ret;
1401
1402 if (S_ISDIR(inode->i_mode) &&
1403 (ip->i_diskflags & GFS2_DIF_EXHASH)) {
1404 ret = gfs2_dir_exhash_dealloc(ip);
1405 if (ret)
1406 goto out;
1407 }
1408
1409 if (ip->i_eattr) {
1410 ret = gfs2_ea_dealloc(ip);
1411 if (ret)
1412 goto out;
1413 }
1414
1415 if (!gfs2_is_stuffed(ip)) {
1416 ret = gfs2_file_dealloc(ip);
1417 if (ret)
1418 goto out;
1419 }
1420
1421 /*
1422 * As soon as we clear the bitmap for the dinode, gfs2_create_inode()
1423 * can get called to recreate it, or even gfs2_inode_lookup() if the
1424 * inode was recreated on another node in the meantime.
1425 *
1426 * However, inserting the new inode into the inode hash table will not
1427 * succeed until the old inode is removed, and that only happens after
1428 * ->evict_inode() returns. The new inode is attached to its inode and
1429 * iopen glocks after inserting it into the inode hash table, so at
1430 * that point we can be sure that both glocks are unused.
1431 */
1432
1433 ret = gfs2_dinode_dealloc(ip);
1434 if (!ret && ip->i_gl)
1435 gfs2_inode_remember_delete(ip->i_gl, ip->i_no_formal_ino);
1436
1437 out:
1438 return ret;
1439 }
1440
1441 /*
1442 * evict_linked_inode - evict an inode whose dinode has not been unlinked
1443 * @inode: The inode to evict
1444 */
evict_linked_inode(struct inode * inode)1445 static int evict_linked_inode(struct inode *inode)
1446 {
1447 struct super_block *sb = inode->i_sb;
1448 struct gfs2_sbd *sdp = sb->s_fs_info;
1449 struct gfs2_inode *ip = GFS2_I(inode);
1450 struct address_space *metamapping;
1451 int ret;
1452
1453 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
1454 GFS2_LFC_EVICT_INODE);
1455 metamapping = gfs2_glock2aspace(ip->i_gl);
1456 if (test_bit(GLF_DIRTY, &ip->i_gl->gl_flags)) {
1457 filemap_fdatawrite(metamapping);
1458 filemap_fdatawait(metamapping);
1459 }
1460 write_inode_now(inode, 1);
1461 gfs2_ail_flush(ip->i_gl, 0);
1462
1463 ret = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
1464 if (ret)
1465 return ret;
1466
1467 /* Needs to be done before glock release & also in a transaction */
1468 truncate_inode_pages(&inode->i_data, 0);
1469 truncate_inode_pages(metamapping, 0);
1470 gfs2_trans_end(sdp);
1471 return 0;
1472 }
1473
1474 /**
1475 * gfs2_evict_inode - Remove an inode from cache
1476 * @inode: The inode to evict
1477 *
1478 * There are three cases to consider:
1479 * 1. i_nlink == 0, we are final opener (and must deallocate)
1480 * 2. i_nlink == 0, we are not the final opener (and cannot deallocate)
1481 * 3. i_nlink > 0
1482 *
1483 * If the fs is read only, then we have to treat all cases as per #3
1484 * since we are unable to do any deallocation. The inode will be
1485 * deallocated by the next read/write node to attempt an allocation
1486 * in the same resource group
1487 *
1488 * We have to (at the moment) hold the inodes main lock to cover
1489 * the gap between unlocking the shared lock on the iopen lock and
1490 * taking the exclusive lock. I'd rather do a shared -> exclusive
1491 * conversion on the iopen lock, but we can change that later. This
1492 * is safe, just less efficient.
1493 */
1494
gfs2_evict_inode(struct inode * inode)1495 static void gfs2_evict_inode(struct inode *inode)
1496 {
1497 struct super_block *sb = inode->i_sb;
1498 struct gfs2_sbd *sdp = sb->s_fs_info;
1499 struct gfs2_inode *ip = GFS2_I(inode);
1500 struct gfs2_holder gh;
1501 int ret;
1502
1503 if (inode->i_nlink || sb_rdonly(sb) || !ip->i_no_addr)
1504 goto out;
1505
1506 /*
1507 * In case of an incomplete mount, gfs2_evict_inode() may be called for
1508 * system files without having an active journal to write to. In that
1509 * case, skip the filesystem evict.
1510 */
1511 if (!sdp->sd_jdesc)
1512 goto out;
1513
1514 gfs2_holder_mark_uninitialized(&gh);
1515 ret = evict_should_delete(inode, &gh);
1516 if (ret == SHOULD_DEFER_EVICTION)
1517 goto out;
1518 if (ret == SHOULD_DELETE_DINODE)
1519 ret = evict_unlinked_inode(inode);
1520 else
1521 ret = evict_linked_inode(inode);
1522
1523 if (gfs2_rs_active(&ip->i_res))
1524 gfs2_rs_deltree(&ip->i_res);
1525
1526 if (gfs2_holder_initialized(&gh))
1527 gfs2_glock_dq_uninit(&gh);
1528 if (ret && ret != GLR_TRYFAILED && ret != -EROFS)
1529 fs_warn(sdp, "gfs2_evict_inode: %d\n", ret);
1530 out:
1531 truncate_inode_pages_final(&inode->i_data);
1532 if (ip->i_qadata)
1533 gfs2_assert_warn(sdp, ip->i_qadata->qa_ref == 0);
1534 gfs2_rs_deltree(&ip->i_res);
1535 gfs2_ordered_del_inode(ip);
1536 clear_inode(inode);
1537 gfs2_dir_hash_inval(ip);
1538 if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
1539 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1540
1541 glock_clear_object(gl, ip);
1542 gfs2_glock_hold(gl);
1543 ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1544 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1545 gfs2_glock_put_eventually(gl);
1546 }
1547 if (ip->i_gl) {
1548 glock_clear_object(ip->i_gl, ip);
1549 wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
1550 gfs2_glock_add_to_lru(ip->i_gl);
1551 gfs2_glock_put_eventually(ip->i_gl);
1552 rcu_assign_pointer(ip->i_gl, NULL);
1553 }
1554 }
1555
gfs2_alloc_inode(struct super_block * sb)1556 static struct inode *gfs2_alloc_inode(struct super_block *sb)
1557 {
1558 struct gfs2_inode *ip;
1559
1560 ip = alloc_inode_sb(sb, gfs2_inode_cachep, GFP_KERNEL);
1561 if (!ip)
1562 return NULL;
1563 ip->i_no_addr = 0;
1564 ip->i_flags = 0;
1565 ip->i_gl = NULL;
1566 gfs2_holder_mark_uninitialized(&ip->i_iopen_gh);
1567 memset(&ip->i_res, 0, sizeof(ip->i_res));
1568 RB_CLEAR_NODE(&ip->i_res.rs_node);
1569 ip->i_rahead = 0;
1570 return &ip->i_inode;
1571 }
1572
gfs2_free_inode(struct inode * inode)1573 static void gfs2_free_inode(struct inode *inode)
1574 {
1575 kmem_cache_free(gfs2_inode_cachep, GFS2_I(inode));
1576 }
1577
free_local_statfs_inodes(struct gfs2_sbd * sdp)1578 void free_local_statfs_inodes(struct gfs2_sbd *sdp)
1579 {
1580 struct local_statfs_inode *lsi, *safe;
1581
1582 /* Run through the statfs inodes list to iput and free memory */
1583 list_for_each_entry_safe(lsi, safe, &sdp->sd_sc_inodes_list, si_list) {
1584 if (lsi->si_jid == sdp->sd_jdesc->jd_jid)
1585 sdp->sd_sc_inode = NULL; /* belongs to this node */
1586 if (lsi->si_sc_inode)
1587 iput(lsi->si_sc_inode);
1588 list_del(&lsi->si_list);
1589 kfree(lsi);
1590 }
1591 }
1592
find_local_statfs_inode(struct gfs2_sbd * sdp,unsigned int index)1593 struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp,
1594 unsigned int index)
1595 {
1596 struct local_statfs_inode *lsi;
1597
1598 /* Return the local (per node) statfs inode in the
1599 * sdp->sd_sc_inodes_list corresponding to the 'index'. */
1600 list_for_each_entry(lsi, &sdp->sd_sc_inodes_list, si_list) {
1601 if (lsi->si_jid == index)
1602 return lsi->si_sc_inode;
1603 }
1604 return NULL;
1605 }
1606
1607 const struct super_operations gfs2_super_ops = {
1608 .alloc_inode = gfs2_alloc_inode,
1609 .free_inode = gfs2_free_inode,
1610 .write_inode = gfs2_write_inode,
1611 .dirty_inode = gfs2_dirty_inode,
1612 .evict_inode = gfs2_evict_inode,
1613 .put_super = gfs2_put_super,
1614 .sync_fs = gfs2_sync_fs,
1615 .freeze_super = gfs2_freeze_super,
1616 .thaw_super = gfs2_thaw_super,
1617 .statfs = gfs2_statfs,
1618 .drop_inode = gfs2_drop_inode,
1619 .show_options = gfs2_show_options,
1620 };
1621
1622