xref: /openbmc/linux/mm/page_io.c (revision 479c3304)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/mm/page_io.c
4  *
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  *
7  *  Swap reorganised 29.12.95,
8  *  Asynchronous swapping added 30.12.95. Stephen Tweedie
9  *  Removed race in async swapping. 14.4.1996. Bruno Haible
10  *  Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
11  *  Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
12  */
13 
14 #include <linux/mm.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/gfp.h>
17 #include <linux/pagemap.h>
18 #include <linux/swap.h>
19 #include <linux/bio.h>
20 #include <linux/swapops.h>
21 #include <linux/writeback.h>
22 #include <linux/frontswap.h>
23 #include <linux/blkdev.h>
24 #include <linux/psi.h>
25 #include <linux/uio.h>
26 #include <linux/sched/task.h>
27 #include <linux/delayacct.h>
28 #include "swap.h"
29 
30 static void __end_swap_bio_write(struct bio *bio)
31 {
32 	struct page *page = bio_first_page_all(bio);
33 
34 	if (bio->bi_status) {
35 		SetPageError(page);
36 		/*
37 		 * We failed to write the page out to swap-space.
38 		 * Re-dirty the page in order to avoid it being reclaimed.
39 		 * Also print a dire warning that things will go BAD (tm)
40 		 * very quickly.
41 		 *
42 		 * Also clear PG_reclaim to avoid folio_rotate_reclaimable()
43 		 */
44 		set_page_dirty(page);
45 		pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n",
46 				     MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
47 				     (unsigned long long)bio->bi_iter.bi_sector);
48 		ClearPageReclaim(page);
49 	}
50 	end_page_writeback(page);
51 }
52 
53 static void end_swap_bio_write(struct bio *bio)
54 {
55 	__end_swap_bio_write(bio);
56 	bio_put(bio);
57 }
58 
59 static void __end_swap_bio_read(struct bio *bio)
60 {
61 	struct page *page = bio_first_page_all(bio);
62 
63 	if (bio->bi_status) {
64 		SetPageError(page);
65 		pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n",
66 				     MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
67 				     (unsigned long long)bio->bi_iter.bi_sector);
68 	} else {
69 		SetPageUptodate(page);
70 	}
71 	unlock_page(page);
72 }
73 
74 static void end_swap_bio_read(struct bio *bio)
75 {
76 	__end_swap_bio_read(bio);
77 	bio_put(bio);
78 }
79 
80 int generic_swapfile_activate(struct swap_info_struct *sis,
81 				struct file *swap_file,
82 				sector_t *span)
83 {
84 	struct address_space *mapping = swap_file->f_mapping;
85 	struct inode *inode = mapping->host;
86 	unsigned blocks_per_page;
87 	unsigned long page_no;
88 	unsigned blkbits;
89 	sector_t probe_block;
90 	sector_t last_block;
91 	sector_t lowest_block = -1;
92 	sector_t highest_block = 0;
93 	int nr_extents = 0;
94 	int ret;
95 
96 	blkbits = inode->i_blkbits;
97 	blocks_per_page = PAGE_SIZE >> blkbits;
98 
99 	/*
100 	 * Map all the blocks into the extent tree.  This code doesn't try
101 	 * to be very smart.
102 	 */
103 	probe_block = 0;
104 	page_no = 0;
105 	last_block = i_size_read(inode) >> blkbits;
106 	while ((probe_block + blocks_per_page) <= last_block &&
107 			page_no < sis->max) {
108 		unsigned block_in_page;
109 		sector_t first_block;
110 
111 		cond_resched();
112 
113 		first_block = probe_block;
114 		ret = bmap(inode, &first_block);
115 		if (ret || !first_block)
116 			goto bad_bmap;
117 
118 		/*
119 		 * It must be PAGE_SIZE aligned on-disk
120 		 */
121 		if (first_block & (blocks_per_page - 1)) {
122 			probe_block++;
123 			goto reprobe;
124 		}
125 
126 		for (block_in_page = 1; block_in_page < blocks_per_page;
127 					block_in_page++) {
128 			sector_t block;
129 
130 			block = probe_block + block_in_page;
131 			ret = bmap(inode, &block);
132 			if (ret || !block)
133 				goto bad_bmap;
134 
135 			if (block != first_block + block_in_page) {
136 				/* Discontiguity */
137 				probe_block++;
138 				goto reprobe;
139 			}
140 		}
141 
142 		first_block >>= (PAGE_SHIFT - blkbits);
143 		if (page_no) {	/* exclude the header page */
144 			if (first_block < lowest_block)
145 				lowest_block = first_block;
146 			if (first_block > highest_block)
147 				highest_block = first_block;
148 		}
149 
150 		/*
151 		 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
152 		 */
153 		ret = add_swap_extent(sis, page_no, 1, first_block);
154 		if (ret < 0)
155 			goto out;
156 		nr_extents += ret;
157 		page_no++;
158 		probe_block += blocks_per_page;
159 reprobe:
160 		continue;
161 	}
162 	ret = nr_extents;
163 	*span = 1 + highest_block - lowest_block;
164 	if (page_no == 0)
165 		page_no = 1;	/* force Empty message */
166 	sis->max = page_no;
167 	sis->pages = page_no - 1;
168 	sis->highest_bit = page_no - 1;
169 out:
170 	return ret;
171 bad_bmap:
172 	pr_err("swapon: swapfile has holes\n");
173 	ret = -EINVAL;
174 	goto out;
175 }
176 
177 /*
178  * We may have stale swap cache pages in memory: notice
179  * them here and get rid of the unnecessary final write.
180  */
181 int swap_writepage(struct page *page, struct writeback_control *wbc)
182 {
183 	struct folio *folio = page_folio(page);
184 	int ret;
185 
186 	if (folio_free_swap(folio)) {
187 		folio_unlock(folio);
188 		return 0;
189 	}
190 	/*
191 	 * Arch code may have to preserve more data than just the page
192 	 * contents, e.g. memory tags.
193 	 */
194 	ret = arch_prepare_to_swap(&folio->page);
195 	if (ret) {
196 		folio_mark_dirty(folio);
197 		folio_unlock(folio);
198 		return ret;
199 	}
200 	if (frontswap_store(&folio->page) == 0) {
201 		folio_start_writeback(folio);
202 		folio_unlock(folio);
203 		folio_end_writeback(folio);
204 		return 0;
205 	}
206 	__swap_writepage(&folio->page, wbc);
207 	return 0;
208 }
209 
210 static inline void count_swpout_vm_event(struct page *page)
211 {
212 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
213 	if (unlikely(PageTransHuge(page)))
214 		count_vm_event(THP_SWPOUT);
215 #endif
216 	count_vm_events(PSWPOUT, thp_nr_pages(page));
217 }
218 
219 #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
220 static void bio_associate_blkg_from_page(struct bio *bio, struct page *page)
221 {
222 	struct cgroup_subsys_state *css;
223 	struct mem_cgroup *memcg;
224 
225 	memcg = page_memcg(page);
226 	if (!memcg)
227 		return;
228 
229 	rcu_read_lock();
230 	css = cgroup_e_css(memcg->css.cgroup, &io_cgrp_subsys);
231 	bio_associate_blkg_from_css(bio, css);
232 	rcu_read_unlock();
233 }
234 #else
235 #define bio_associate_blkg_from_page(bio, page)		do { } while (0)
236 #endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */
237 
238 struct swap_iocb {
239 	struct kiocb		iocb;
240 	struct bio_vec		bvec[SWAP_CLUSTER_MAX];
241 	int			pages;
242 	int			len;
243 };
244 static mempool_t *sio_pool;
245 
246 int sio_pool_init(void)
247 {
248 	if (!sio_pool) {
249 		mempool_t *pool = mempool_create_kmalloc_pool(
250 			SWAP_CLUSTER_MAX, sizeof(struct swap_iocb));
251 		if (cmpxchg(&sio_pool, NULL, pool))
252 			mempool_destroy(pool);
253 	}
254 	if (!sio_pool)
255 		return -ENOMEM;
256 	return 0;
257 }
258 
259 static void sio_write_complete(struct kiocb *iocb, long ret)
260 {
261 	struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
262 	struct page *page = sio->bvec[0].bv_page;
263 	int p;
264 
265 	if (ret != sio->len) {
266 		/*
267 		 * In the case of swap-over-nfs, this can be a
268 		 * temporary failure if the system has limited
269 		 * memory for allocating transmit buffers.
270 		 * Mark the page dirty and avoid
271 		 * folio_rotate_reclaimable but rate-limit the
272 		 * messages but do not flag PageError like
273 		 * the normal direct-to-bio case as it could
274 		 * be temporary.
275 		 */
276 		pr_err_ratelimited("Write error %ld on dio swapfile (%llu)\n",
277 				   ret, page_file_offset(page));
278 		for (p = 0; p < sio->pages; p++) {
279 			page = sio->bvec[p].bv_page;
280 			set_page_dirty(page);
281 			ClearPageReclaim(page);
282 		}
283 	} else {
284 		for (p = 0; p < sio->pages; p++)
285 			count_swpout_vm_event(sio->bvec[p].bv_page);
286 	}
287 
288 	for (p = 0; p < sio->pages; p++)
289 		end_page_writeback(sio->bvec[p].bv_page);
290 
291 	mempool_free(sio, sio_pool);
292 }
293 
294 static void swap_writepage_fs(struct page *page, struct writeback_control *wbc)
295 {
296 	struct swap_iocb *sio = NULL;
297 	struct swap_info_struct *sis = page_swap_info(page);
298 	struct file *swap_file = sis->swap_file;
299 	loff_t pos = page_file_offset(page);
300 
301 	set_page_writeback(page);
302 	unlock_page(page);
303 	if (wbc->swap_plug)
304 		sio = *wbc->swap_plug;
305 	if (sio) {
306 		if (sio->iocb.ki_filp != swap_file ||
307 		    sio->iocb.ki_pos + sio->len != pos) {
308 			swap_write_unplug(sio);
309 			sio = NULL;
310 		}
311 	}
312 	if (!sio) {
313 		sio = mempool_alloc(sio_pool, GFP_NOIO);
314 		init_sync_kiocb(&sio->iocb, swap_file);
315 		sio->iocb.ki_complete = sio_write_complete;
316 		sio->iocb.ki_pos = pos;
317 		sio->pages = 0;
318 		sio->len = 0;
319 	}
320 	bvec_set_page(&sio->bvec[sio->pages], page, thp_size(page), 0);
321 	sio->len += thp_size(page);
322 	sio->pages += 1;
323 	if (sio->pages == ARRAY_SIZE(sio->bvec) || !wbc->swap_plug) {
324 		swap_write_unplug(sio);
325 		sio = NULL;
326 	}
327 	if (wbc->swap_plug)
328 		*wbc->swap_plug = sio;
329 }
330 
331 static void swap_writepage_bdev_sync(struct page *page,
332 		struct writeback_control *wbc, struct swap_info_struct *sis)
333 {
334 	struct bio_vec bv;
335 	struct bio bio;
336 
337 	bio_init(&bio, sis->bdev, &bv, 1,
338 		 REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc));
339 	bio.bi_iter.bi_sector = swap_page_sector(page);
340 	__bio_add_page(&bio, page, thp_size(page), 0);
341 
342 	bio_associate_blkg_from_page(&bio, page);
343 	count_swpout_vm_event(page);
344 
345 	set_page_writeback(page);
346 	unlock_page(page);
347 
348 	submit_bio_wait(&bio);
349 	__end_swap_bio_write(&bio);
350 }
351 
352 static void swap_writepage_bdev_async(struct page *page,
353 		struct writeback_control *wbc, struct swap_info_struct *sis)
354 {
355 	struct bio *bio;
356 
357 	bio = bio_alloc(sis->bdev, 1,
358 			REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc),
359 			GFP_NOIO);
360 	bio->bi_iter.bi_sector = swap_page_sector(page);
361 	bio->bi_end_io = end_swap_bio_write;
362 	__bio_add_page(bio, page, thp_size(page), 0);
363 
364 	bio_associate_blkg_from_page(bio, page);
365 	count_swpout_vm_event(page);
366 	set_page_writeback(page);
367 	unlock_page(page);
368 	submit_bio(bio);
369 }
370 
371 void __swap_writepage(struct page *page, struct writeback_control *wbc)
372 {
373 	struct swap_info_struct *sis = page_swap_info(page);
374 
375 	VM_BUG_ON_PAGE(!PageSwapCache(page), page);
376 	/*
377 	 * ->flags can be updated non-atomicially (scan_swap_map_slots),
378 	 * but that will never affect SWP_FS_OPS, so the data_race
379 	 * is safe.
380 	 */
381 	if (data_race(sis->flags & SWP_FS_OPS))
382 		swap_writepage_fs(page, wbc);
383 	else if (sis->flags & SWP_SYNCHRONOUS_IO)
384 		swap_writepage_bdev_sync(page, wbc, sis);
385 	else
386 		swap_writepage_bdev_async(page, wbc, sis);
387 }
388 
389 void swap_write_unplug(struct swap_iocb *sio)
390 {
391 	struct iov_iter from;
392 	struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
393 	int ret;
394 
395 	iov_iter_bvec(&from, ITER_SOURCE, sio->bvec, sio->pages, sio->len);
396 	ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
397 	if (ret != -EIOCBQUEUED)
398 		sio_write_complete(&sio->iocb, ret);
399 }
400 
401 static void sio_read_complete(struct kiocb *iocb, long ret)
402 {
403 	struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
404 	int p;
405 
406 	if (ret == sio->len) {
407 		for (p = 0; p < sio->pages; p++) {
408 			struct page *page = sio->bvec[p].bv_page;
409 
410 			SetPageUptodate(page);
411 			unlock_page(page);
412 		}
413 		count_vm_events(PSWPIN, sio->pages);
414 	} else {
415 		for (p = 0; p < sio->pages; p++) {
416 			struct page *page = sio->bvec[p].bv_page;
417 
418 			SetPageError(page);
419 			unlock_page(page);
420 		}
421 		pr_alert_ratelimited("Read-error on swap-device\n");
422 	}
423 	mempool_free(sio, sio_pool);
424 }
425 
426 static void swap_readpage_fs(struct page *page,
427 			     struct swap_iocb **plug)
428 {
429 	struct swap_info_struct *sis = page_swap_info(page);
430 	struct swap_iocb *sio = NULL;
431 	loff_t pos = page_file_offset(page);
432 
433 	if (plug)
434 		sio = *plug;
435 	if (sio) {
436 		if (sio->iocb.ki_filp != sis->swap_file ||
437 		    sio->iocb.ki_pos + sio->len != pos) {
438 			swap_read_unplug(sio);
439 			sio = NULL;
440 		}
441 	}
442 	if (!sio) {
443 		sio = mempool_alloc(sio_pool, GFP_KERNEL);
444 		init_sync_kiocb(&sio->iocb, sis->swap_file);
445 		sio->iocb.ki_pos = pos;
446 		sio->iocb.ki_complete = sio_read_complete;
447 		sio->pages = 0;
448 		sio->len = 0;
449 	}
450 	bvec_set_page(&sio->bvec[sio->pages], page, thp_size(page), 0);
451 	sio->len += thp_size(page);
452 	sio->pages += 1;
453 	if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) {
454 		swap_read_unplug(sio);
455 		sio = NULL;
456 	}
457 	if (plug)
458 		*plug = sio;
459 }
460 
461 static void swap_readpage_bdev_sync(struct page *page,
462 		struct swap_info_struct *sis)
463 {
464 	struct bio_vec bv;
465 	struct bio bio;
466 
467 	bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_READ);
468 	bio.bi_iter.bi_sector = swap_page_sector(page);
469 	__bio_add_page(&bio, page, thp_size(page), 0);
470 	/*
471 	 * Keep this task valid during swap readpage because the oom killer may
472 	 * attempt to access it in the page fault retry time check.
473 	 */
474 	get_task_struct(current);
475 	count_vm_event(PSWPIN);
476 	submit_bio_wait(&bio);
477 	__end_swap_bio_read(&bio);
478 	put_task_struct(current);
479 }
480 
481 static void swap_readpage_bdev_async(struct page *page,
482 		struct swap_info_struct *sis)
483 {
484 	struct bio *bio;
485 
486 	bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL);
487 	bio->bi_iter.bi_sector = swap_page_sector(page);
488 	bio->bi_end_io = end_swap_bio_read;
489 	__bio_add_page(bio, page, thp_size(page), 0);
490 	count_vm_event(PSWPIN);
491 	submit_bio(bio);
492 }
493 
494 void swap_readpage(struct page *page, bool synchronous, struct swap_iocb **plug)
495 {
496 	struct swap_info_struct *sis = page_swap_info(page);
497 	bool workingset = PageWorkingset(page);
498 	unsigned long pflags;
499 	bool in_thrashing;
500 
501 	VM_BUG_ON_PAGE(!PageSwapCache(page) && !synchronous, page);
502 	VM_BUG_ON_PAGE(!PageLocked(page), page);
503 	VM_BUG_ON_PAGE(PageUptodate(page), page);
504 
505 	/*
506 	 * Count submission time as memory stall and delay. When the device
507 	 * is congested, or the submitting cgroup IO-throttled, submission
508 	 * can be a significant part of overall IO time.
509 	 */
510 	if (workingset) {
511 		delayacct_thrashing_start(&in_thrashing);
512 		psi_memstall_enter(&pflags);
513 	}
514 	delayacct_swapin_start();
515 
516 	if (frontswap_load(page) == 0) {
517 		SetPageUptodate(page);
518 		unlock_page(page);
519 	} else if (data_race(sis->flags & SWP_FS_OPS)) {
520 		swap_readpage_fs(page, plug);
521 	} else if (synchronous || (sis->flags & SWP_SYNCHRONOUS_IO)) {
522 		swap_readpage_bdev_sync(page, sis);
523 	} else {
524 		swap_readpage_bdev_async(page, sis);
525 	}
526 
527 	if (workingset) {
528 		delayacct_thrashing_end(&in_thrashing);
529 		psi_memstall_leave(&pflags);
530 	}
531 	delayacct_swapin_end();
532 }
533 
534 void __swap_read_unplug(struct swap_iocb *sio)
535 {
536 	struct iov_iter from;
537 	struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
538 	int ret;
539 
540 	iov_iter_bvec(&from, ITER_DEST, sio->bvec, sio->pages, sio->len);
541 	ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
542 	if (ret != -EIOCBQUEUED)
543 		sio_read_complete(&sio->iocb, ret);
544 }
545