xref: /openbmc/linux/fs/f2fs/iostat.c (revision 0646c28b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * f2fs iostat support
4  *
5  * Copyright 2021 Google LLC
6  * Author: Daeho Jeong <daehojeong@google.com>
7  */
8 
9 #include <linux/fs.h>
10 #include <linux/f2fs_fs.h>
11 #include <linux/seq_file.h>
12 
13 #include "f2fs.h"
14 #include "iostat.h"
15 #include <trace/events/f2fs.h>
16 
17 #define NUM_PREALLOC_IOSTAT_CTXS	128
18 static struct kmem_cache *bio_iostat_ctx_cache;
19 static mempool_t *bio_iostat_ctx_pool;
20 
21 int __maybe_unused iostat_info_seq_show(struct seq_file *seq, void *offset)
22 {
23 	struct super_block *sb = seq->private;
24 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
25 	time64_t now = ktime_get_real_seconds();
26 
27 	if (!sbi->iostat_enable)
28 		return 0;
29 
30 	seq_printf(seq, "time:		%-16llu\n", now);
31 
32 	/* print app write IOs */
33 	seq_puts(seq, "[WRITE]\n");
34 	seq_printf(seq, "app buffered data:	%-16llu\n",
35 				sbi->rw_iostat[APP_BUFFERED_IO]);
36 	seq_printf(seq, "app direct data:	%-16llu\n",
37 				sbi->rw_iostat[APP_DIRECT_IO]);
38 	seq_printf(seq, "app mapped data:	%-16llu\n",
39 				sbi->rw_iostat[APP_MAPPED_IO]);
40 	seq_printf(seq, "app buffered cdata:	%-16llu\n",
41 				sbi->rw_iostat[APP_BUFFERED_CDATA_IO]);
42 	seq_printf(seq, "app mapped cdata:	%-16llu\n",
43 				sbi->rw_iostat[APP_MAPPED_CDATA_IO]);
44 
45 	/* print fs write IOs */
46 	seq_printf(seq, "fs data:		%-16llu\n",
47 				sbi->rw_iostat[FS_DATA_IO]);
48 	seq_printf(seq, "fs cdata:		%-16llu\n",
49 				sbi->rw_iostat[FS_CDATA_IO]);
50 	seq_printf(seq, "fs node:		%-16llu\n",
51 				sbi->rw_iostat[FS_NODE_IO]);
52 	seq_printf(seq, "fs meta:		%-16llu\n",
53 				sbi->rw_iostat[FS_META_IO]);
54 	seq_printf(seq, "fs gc data:		%-16llu\n",
55 				sbi->rw_iostat[FS_GC_DATA_IO]);
56 	seq_printf(seq, "fs gc node:		%-16llu\n",
57 				sbi->rw_iostat[FS_GC_NODE_IO]);
58 	seq_printf(seq, "fs cp data:		%-16llu\n",
59 				sbi->rw_iostat[FS_CP_DATA_IO]);
60 	seq_printf(seq, "fs cp node:		%-16llu\n",
61 				sbi->rw_iostat[FS_CP_NODE_IO]);
62 	seq_printf(seq, "fs cp meta:		%-16llu\n",
63 				sbi->rw_iostat[FS_CP_META_IO]);
64 
65 	/* print app read IOs */
66 	seq_puts(seq, "[READ]\n");
67 	seq_printf(seq, "app buffered data:	%-16llu\n",
68 				sbi->rw_iostat[APP_BUFFERED_READ_IO]);
69 	seq_printf(seq, "app direct data:	%-16llu\n",
70 				sbi->rw_iostat[APP_DIRECT_READ_IO]);
71 	seq_printf(seq, "app mapped data:	%-16llu\n",
72 				sbi->rw_iostat[APP_MAPPED_READ_IO]);
73 	seq_printf(seq, "app buffered cdata:	%-16llu\n",
74 				sbi->rw_iostat[APP_BUFFERED_CDATA_READ_IO]);
75 	seq_printf(seq, "app mapped cdata:	%-16llu\n",
76 				sbi->rw_iostat[APP_MAPPED_CDATA_READ_IO]);
77 
78 	/* print fs read IOs */
79 	seq_printf(seq, "fs data:		%-16llu\n",
80 				sbi->rw_iostat[FS_DATA_READ_IO]);
81 	seq_printf(seq, "fs gc data:		%-16llu\n",
82 				sbi->rw_iostat[FS_GDATA_READ_IO]);
83 	seq_printf(seq, "fs cdata:		%-16llu\n",
84 				sbi->rw_iostat[FS_CDATA_READ_IO]);
85 	seq_printf(seq, "fs node:		%-16llu\n",
86 				sbi->rw_iostat[FS_NODE_READ_IO]);
87 	seq_printf(seq, "fs meta:		%-16llu\n",
88 				sbi->rw_iostat[FS_META_READ_IO]);
89 
90 	/* print other IOs */
91 	seq_puts(seq, "[OTHER]\n");
92 	seq_printf(seq, "fs discard:		%-16llu\n",
93 				sbi->rw_iostat[FS_DISCARD]);
94 
95 	return 0;
96 }
97 
98 static inline void __record_iostat_latency(struct f2fs_sb_info *sbi)
99 {
100 	int io, idx = 0;
101 	unsigned int cnt;
102 	struct f2fs_iostat_latency iostat_lat[MAX_IO_TYPE][NR_PAGE_TYPE];
103 	struct iostat_lat_info *io_lat = sbi->iostat_io_lat;
104 	unsigned long flags;
105 
106 	spin_lock_irqsave(&sbi->iostat_lat_lock, flags);
107 	for (idx = 0; idx < MAX_IO_TYPE; idx++) {
108 		for (io = 0; io < NR_PAGE_TYPE; io++) {
109 			cnt = io_lat->bio_cnt[idx][io];
110 			iostat_lat[idx][io].peak_lat =
111 			   jiffies_to_msecs(io_lat->peak_lat[idx][io]);
112 			iostat_lat[idx][io].cnt = cnt;
113 			iostat_lat[idx][io].avg_lat = cnt ?
114 			   jiffies_to_msecs(io_lat->sum_lat[idx][io]) / cnt : 0;
115 			io_lat->sum_lat[idx][io] = 0;
116 			io_lat->peak_lat[idx][io] = 0;
117 			io_lat->bio_cnt[idx][io] = 0;
118 		}
119 	}
120 	spin_unlock_irqrestore(&sbi->iostat_lat_lock, flags);
121 
122 	trace_f2fs_iostat_latency(sbi, iostat_lat);
123 }
124 
125 static inline void f2fs_record_iostat(struct f2fs_sb_info *sbi)
126 {
127 	unsigned long long iostat_diff[NR_IO_TYPE];
128 	int i;
129 	unsigned long flags;
130 
131 	if (time_is_after_jiffies(sbi->iostat_next_period))
132 		return;
133 
134 	/* Need double check under the lock */
135 	spin_lock_irqsave(&sbi->iostat_lock, flags);
136 	if (time_is_after_jiffies(sbi->iostat_next_period)) {
137 		spin_unlock_irqrestore(&sbi->iostat_lock, flags);
138 		return;
139 	}
140 	sbi->iostat_next_period = jiffies +
141 				msecs_to_jiffies(sbi->iostat_period_ms);
142 
143 	for (i = 0; i < NR_IO_TYPE; i++) {
144 		iostat_diff[i] = sbi->rw_iostat[i] -
145 				sbi->prev_rw_iostat[i];
146 		sbi->prev_rw_iostat[i] = sbi->rw_iostat[i];
147 	}
148 	spin_unlock_irqrestore(&sbi->iostat_lock, flags);
149 
150 	trace_f2fs_iostat(sbi, iostat_diff);
151 
152 	__record_iostat_latency(sbi);
153 }
154 
155 void f2fs_reset_iostat(struct f2fs_sb_info *sbi)
156 {
157 	struct iostat_lat_info *io_lat = sbi->iostat_io_lat;
158 	int i;
159 
160 	spin_lock_irq(&sbi->iostat_lock);
161 	for (i = 0; i < NR_IO_TYPE; i++) {
162 		sbi->rw_iostat[i] = 0;
163 		sbi->prev_rw_iostat[i] = 0;
164 	}
165 	spin_unlock_irq(&sbi->iostat_lock);
166 
167 	spin_lock_irq(&sbi->iostat_lat_lock);
168 	memset(io_lat, 0, sizeof(struct iostat_lat_info));
169 	spin_unlock_irq(&sbi->iostat_lat_lock);
170 }
171 
172 void f2fs_update_iostat(struct f2fs_sb_info *sbi, struct inode *inode,
173 			enum iostat_type type, unsigned long long io_bytes)
174 {
175 	unsigned long flags;
176 
177 	if (!sbi->iostat_enable)
178 		return;
179 
180 	spin_lock_irqsave(&sbi->iostat_lock, flags);
181 	sbi->rw_iostat[type] += io_bytes;
182 
183 	if (type == APP_BUFFERED_IO || type == APP_DIRECT_IO)
184 		sbi->rw_iostat[APP_WRITE_IO] += io_bytes;
185 
186 	if (type == APP_BUFFERED_READ_IO || type == APP_DIRECT_READ_IO)
187 		sbi->rw_iostat[APP_READ_IO] += io_bytes;
188 
189 #ifdef CONFIG_F2FS_FS_COMPRESSION
190 	if (inode && f2fs_compressed_file(inode)) {
191 		if (type == APP_BUFFERED_IO)
192 			sbi->rw_iostat[APP_BUFFERED_CDATA_IO] += io_bytes;
193 
194 		if (type == APP_BUFFERED_READ_IO)
195 			sbi->rw_iostat[APP_BUFFERED_CDATA_READ_IO] += io_bytes;
196 
197 		if (type == APP_MAPPED_READ_IO)
198 			sbi->rw_iostat[APP_MAPPED_CDATA_READ_IO] += io_bytes;
199 
200 		if (type == APP_MAPPED_IO)
201 			sbi->rw_iostat[APP_MAPPED_CDATA_IO] += io_bytes;
202 
203 		if (type == FS_DATA_READ_IO)
204 			sbi->rw_iostat[FS_CDATA_READ_IO] += io_bytes;
205 
206 		if (type == FS_DATA_IO)
207 			sbi->rw_iostat[FS_CDATA_IO] += io_bytes;
208 	}
209 #endif
210 
211 	spin_unlock_irqrestore(&sbi->iostat_lock, flags);
212 
213 	f2fs_record_iostat(sbi);
214 }
215 
216 static inline void __update_iostat_latency(struct bio_iostat_ctx *iostat_ctx,
217 				int rw, bool is_sync)
218 {
219 	unsigned long ts_diff;
220 	unsigned int iotype = iostat_ctx->type;
221 	struct f2fs_sb_info *sbi = iostat_ctx->sbi;
222 	struct iostat_lat_info *io_lat = sbi->iostat_io_lat;
223 	int idx;
224 	unsigned long flags;
225 
226 	if (!sbi->iostat_enable)
227 		return;
228 
229 	ts_diff = jiffies - iostat_ctx->submit_ts;
230 	if (iotype >= META_FLUSH)
231 		iotype = META;
232 
233 	if (rw == 0) {
234 		idx = READ_IO;
235 	} else {
236 		if (is_sync)
237 			idx = WRITE_SYNC_IO;
238 		else
239 			idx = WRITE_ASYNC_IO;
240 	}
241 
242 	spin_lock_irqsave(&sbi->iostat_lat_lock, flags);
243 	io_lat->sum_lat[idx][iotype] += ts_diff;
244 	io_lat->bio_cnt[idx][iotype]++;
245 	if (ts_diff > io_lat->peak_lat[idx][iotype])
246 		io_lat->peak_lat[idx][iotype] = ts_diff;
247 	spin_unlock_irqrestore(&sbi->iostat_lat_lock, flags);
248 }
249 
250 void iostat_update_and_unbind_ctx(struct bio *bio, int rw)
251 {
252 	struct bio_iostat_ctx *iostat_ctx = bio->bi_private;
253 	bool is_sync = bio->bi_opf & REQ_SYNC;
254 
255 	if (rw == 0)
256 		bio->bi_private = iostat_ctx->post_read_ctx;
257 	else
258 		bio->bi_private = iostat_ctx->sbi;
259 	__update_iostat_latency(iostat_ctx, rw, is_sync);
260 	mempool_free(iostat_ctx, bio_iostat_ctx_pool);
261 }
262 
263 void iostat_alloc_and_bind_ctx(struct f2fs_sb_info *sbi,
264 		struct bio *bio, struct bio_post_read_ctx *ctx)
265 {
266 	struct bio_iostat_ctx *iostat_ctx;
267 	/* Due to the mempool, this never fails. */
268 	iostat_ctx = mempool_alloc(bio_iostat_ctx_pool, GFP_NOFS);
269 	iostat_ctx->sbi = sbi;
270 	iostat_ctx->submit_ts = 0;
271 	iostat_ctx->type = 0;
272 	iostat_ctx->post_read_ctx = ctx;
273 	bio->bi_private = iostat_ctx;
274 }
275 
276 int __init f2fs_init_iostat_processing(void)
277 {
278 	bio_iostat_ctx_cache =
279 		kmem_cache_create("f2fs_bio_iostat_ctx",
280 				  sizeof(struct bio_iostat_ctx), 0, 0, NULL);
281 	if (!bio_iostat_ctx_cache)
282 		goto fail;
283 	bio_iostat_ctx_pool =
284 		mempool_create_slab_pool(NUM_PREALLOC_IOSTAT_CTXS,
285 					 bio_iostat_ctx_cache);
286 	if (!bio_iostat_ctx_pool)
287 		goto fail_free_cache;
288 	return 0;
289 
290 fail_free_cache:
291 	kmem_cache_destroy(bio_iostat_ctx_cache);
292 fail:
293 	return -ENOMEM;
294 }
295 
296 void f2fs_destroy_iostat_processing(void)
297 {
298 	mempool_destroy(bio_iostat_ctx_pool);
299 	kmem_cache_destroy(bio_iostat_ctx_cache);
300 }
301 
302 int f2fs_init_iostat(struct f2fs_sb_info *sbi)
303 {
304 	/* init iostat info */
305 	spin_lock_init(&sbi->iostat_lock);
306 	spin_lock_init(&sbi->iostat_lat_lock);
307 	sbi->iostat_enable = false;
308 	sbi->iostat_period_ms = DEFAULT_IOSTAT_PERIOD_MS;
309 	sbi->iostat_io_lat = f2fs_kzalloc(sbi, sizeof(struct iostat_lat_info),
310 					GFP_KERNEL);
311 	if (!sbi->iostat_io_lat)
312 		return -ENOMEM;
313 
314 	return 0;
315 }
316 
317 void f2fs_destroy_iostat(struct f2fs_sb_info *sbi)
318 {
319 	kfree(sbi->iostat_io_lat);
320 }
321