1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Dummy inodes to buffer blocks for garbage collection
4 *
5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 *
7 * Written by Seiji Kihara, Amagai Yoshiji, and Ryusuke Konishi.
8 * Revised by Ryusuke Konishi.
9 *
10 */
11 /*
12 * This file adds the cache of on-disk blocks to be moved in garbage
13 * collection. The disk blocks are held with dummy inodes (called
14 * gcinodes), and this file provides lookup function of the dummy
15 * inodes and their buffer read function.
16 *
17 * Buffers and pages held by the dummy inodes will be released each
18 * time after they are copied to a new log. Dirty blocks made on the
19 * current generation and the blocks to be moved by GC never overlap
20 * because the dirty blocks make a new generation; they rather must be
21 * written individually.
22 */
23
24 #include <linux/buffer_head.h>
25 #include <linux/mpage.h>
26 #include <linux/hash.h>
27 #include <linux/slab.h>
28 #include <linux/swap.h>
29 #include "nilfs.h"
30 #include "btree.h"
31 #include "btnode.h"
32 #include "page.h"
33 #include "mdt.h"
34 #include "dat.h"
35 #include "ifile.h"
36
37 /*
38 * nilfs_gccache_submit_read_data() - add data buffer and submit read request
39 * @inode - gc inode
40 * @blkoff - dummy offset treated as the key for the page cache
41 * @pbn - physical block number of the block
42 * @vbn - virtual block number of the block, 0 for non-virtual block
43 * @out_bh - indirect pointer to a buffer_head struct to receive the results
44 *
45 * Description: nilfs_gccache_submit_read_data() registers the data buffer
46 * specified by @pbn to the GC pagecache with the key @blkoff.
47 * This function sets @vbn (@pbn if @vbn is zero) in b_blocknr of the buffer.
48 *
49 * Return Value: On success, 0 is returned. On Error, one of the following
50 * negative error code is returned.
51 *
52 * %-EIO - I/O error.
53 *
54 * %-ENOMEM - Insufficient amount of memory available.
55 *
56 * %-ENOENT - The block specified with @pbn does not exist.
57 */
nilfs_gccache_submit_read_data(struct inode * inode,sector_t blkoff,sector_t pbn,__u64 vbn,struct buffer_head ** out_bh)58 int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
59 sector_t pbn, __u64 vbn,
60 struct buffer_head **out_bh)
61 {
62 struct buffer_head *bh;
63 int err;
64
65 bh = nilfs_grab_buffer(inode, inode->i_mapping, blkoff, 0);
66 if (unlikely(!bh))
67 return -ENOMEM;
68
69 if (buffer_uptodate(bh))
70 goto out;
71
72 if (pbn == 0) {
73 struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
74
75 err = nilfs_dat_translate(nilfs->ns_dat, vbn, &pbn);
76 if (unlikely(err)) /* -EIO, -ENOMEM, -ENOENT */
77 goto failed;
78 }
79
80 lock_buffer(bh);
81 if (buffer_uptodate(bh)) {
82 unlock_buffer(bh);
83 goto out;
84 }
85
86 if (!buffer_mapped(bh)) {
87 bh->b_bdev = inode->i_sb->s_bdev;
88 set_buffer_mapped(bh);
89 }
90 bh->b_blocknr = pbn;
91 bh->b_end_io = end_buffer_read_sync;
92 get_bh(bh);
93 submit_bh(REQ_OP_READ, bh);
94 if (vbn)
95 bh->b_blocknr = vbn;
96 out:
97 err = 0;
98 *out_bh = bh;
99
100 failed:
101 unlock_page(bh->b_page);
102 put_page(bh->b_page);
103 if (unlikely(err))
104 brelse(bh);
105 return err;
106 }
107
108 /*
109 * nilfs_gccache_submit_read_node() - add node buffer and submit read request
110 * @inode - gc inode
111 * @pbn - physical block number for the block
112 * @vbn - virtual block number for the block
113 * @out_bh - indirect pointer to a buffer_head struct to receive the results
114 *
115 * Description: nilfs_gccache_submit_read_node() registers the node buffer
116 * specified by @vbn to the GC pagecache. @pbn can be supplied by the
117 * caller to avoid translation of the disk block address.
118 *
119 * Return Value: On success, 0 is returned. On Error, one of the following
120 * negative error code is returned.
121 *
122 * %-EIO - I/O error.
123 *
124 * %-ENOMEM - Insufficient amount of memory available.
125 */
nilfs_gccache_submit_read_node(struct inode * inode,sector_t pbn,__u64 vbn,struct buffer_head ** out_bh)126 int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn,
127 __u64 vbn, struct buffer_head **out_bh)
128 {
129 struct inode *btnc_inode = NILFS_I(inode)->i_assoc_inode;
130 int ret;
131
132 ret = nilfs_btnode_submit_block(btnc_inode->i_mapping, vbn ? : pbn, pbn,
133 REQ_OP_READ, out_bh, &pbn);
134 if (ret == -EEXIST) /* internal code (cache hit) */
135 ret = 0;
136 return ret;
137 }
138
nilfs_gccache_wait_and_mark_dirty(struct buffer_head * bh)139 int nilfs_gccache_wait_and_mark_dirty(struct buffer_head *bh)
140 {
141 wait_on_buffer(bh);
142 if (!buffer_uptodate(bh)) {
143 struct inode *inode = bh->b_folio->mapping->host;
144
145 nilfs_err(inode->i_sb,
146 "I/O error reading %s block for GC (ino=%lu, vblocknr=%llu)",
147 buffer_nilfs_node(bh) ? "node" : "data",
148 inode->i_ino, (unsigned long long)bh->b_blocknr);
149 return -EIO;
150 }
151 if (buffer_dirty(bh))
152 return -EEXIST;
153
154 if (buffer_nilfs_node(bh) && nilfs_btree_broken_node_block(bh)) {
155 clear_buffer_uptodate(bh);
156 return -EIO;
157 }
158 mark_buffer_dirty(bh);
159 return 0;
160 }
161
nilfs_init_gcinode(struct inode * inode)162 int nilfs_init_gcinode(struct inode *inode)
163 {
164 struct nilfs_inode_info *ii = NILFS_I(inode);
165
166 inode->i_mode = S_IFREG;
167 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
168 inode->i_mapping->a_ops = &empty_aops;
169
170 ii->i_flags = 0;
171 nilfs_bmap_init_gc(ii->i_bmap);
172
173 return nilfs_attach_btree_node_cache(inode);
174 }
175
176 /**
177 * nilfs_remove_all_gcinodes() - remove all unprocessed gc inodes
178 */
nilfs_remove_all_gcinodes(struct the_nilfs * nilfs)179 void nilfs_remove_all_gcinodes(struct the_nilfs *nilfs)
180 {
181 struct list_head *head = &nilfs->ns_gc_inodes;
182 struct nilfs_inode_info *ii;
183
184 while (!list_empty(head)) {
185 ii = list_first_entry(head, struct nilfs_inode_info, i_dirty);
186 list_del_init(&ii->i_dirty);
187 truncate_inode_pages(&ii->vfs_inode.i_data, 0);
188 nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping);
189 iput(&ii->vfs_inode);
190 }
191 }
192