xref: /openbmc/linux/mm/page_io.c (revision 0a94608f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/mm/page_io.c
4  *
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  *
7  *  Swap reorganised 29.12.95,
8  *  Asynchronous swapping added 30.12.95. Stephen Tweedie
9  *  Removed race in async swapping. 14.4.1996. Bruno Haible
10  *  Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
11  *  Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
12  */
13 
14 #include <linux/mm.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/gfp.h>
17 #include <linux/pagemap.h>
18 #include <linux/swap.h>
19 #include <linux/bio.h>
20 #include <linux/swapops.h>
21 #include <linux/buffer_head.h>
22 #include <linux/writeback.h>
23 #include <linux/frontswap.h>
24 #include <linux/blkdev.h>
25 #include <linux/psi.h>
26 #include <linux/uio.h>
27 #include <linux/sched/task.h>
28 #include <linux/delayacct.h>
29 
30 void end_swap_bio_write(struct bio *bio)
31 {
32 	struct page *page = bio_first_page_all(bio);
33 
34 	if (bio->bi_status) {
35 		SetPageError(page);
36 		/*
37 		 * We failed to write the page out to swap-space.
38 		 * Re-dirty the page in order to avoid it being reclaimed.
39 		 * Also print a dire warning that things will go BAD (tm)
40 		 * very quickly.
41 		 *
42 		 * Also clear PG_reclaim to avoid folio_rotate_reclaimable()
43 		 */
44 		set_page_dirty(page);
45 		pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n",
46 				     MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
47 				     (unsigned long long)bio->bi_iter.bi_sector);
48 		ClearPageReclaim(page);
49 	}
50 	end_page_writeback(page);
51 	bio_put(bio);
52 }
53 
54 static void end_swap_bio_read(struct bio *bio)
55 {
56 	struct page *page = bio_first_page_all(bio);
57 	struct task_struct *waiter = bio->bi_private;
58 
59 	if (bio->bi_status) {
60 		SetPageError(page);
61 		ClearPageUptodate(page);
62 		pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n",
63 				     MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
64 				     (unsigned long long)bio->bi_iter.bi_sector);
65 		goto out;
66 	}
67 
68 	SetPageUptodate(page);
69 out:
70 	unlock_page(page);
71 	WRITE_ONCE(bio->bi_private, NULL);
72 	bio_put(bio);
73 	if (waiter) {
74 		blk_wake_io_task(waiter);
75 		put_task_struct(waiter);
76 	}
77 }
78 
79 int generic_swapfile_activate(struct swap_info_struct *sis,
80 				struct file *swap_file,
81 				sector_t *span)
82 {
83 	struct address_space *mapping = swap_file->f_mapping;
84 	struct inode *inode = mapping->host;
85 	unsigned blocks_per_page;
86 	unsigned long page_no;
87 	unsigned blkbits;
88 	sector_t probe_block;
89 	sector_t last_block;
90 	sector_t lowest_block = -1;
91 	sector_t highest_block = 0;
92 	int nr_extents = 0;
93 	int ret;
94 
95 	blkbits = inode->i_blkbits;
96 	blocks_per_page = PAGE_SIZE >> blkbits;
97 
98 	/*
99 	 * Map all the blocks into the extent tree.  This code doesn't try
100 	 * to be very smart.
101 	 */
102 	probe_block = 0;
103 	page_no = 0;
104 	last_block = i_size_read(inode) >> blkbits;
105 	while ((probe_block + blocks_per_page) <= last_block &&
106 			page_no < sis->max) {
107 		unsigned block_in_page;
108 		sector_t first_block;
109 
110 		cond_resched();
111 
112 		first_block = probe_block;
113 		ret = bmap(inode, &first_block);
114 		if (ret || !first_block)
115 			goto bad_bmap;
116 
117 		/*
118 		 * It must be PAGE_SIZE aligned on-disk
119 		 */
120 		if (first_block & (blocks_per_page - 1)) {
121 			probe_block++;
122 			goto reprobe;
123 		}
124 
125 		for (block_in_page = 1; block_in_page < blocks_per_page;
126 					block_in_page++) {
127 			sector_t block;
128 
129 			block = probe_block + block_in_page;
130 			ret = bmap(inode, &block);
131 			if (ret || !block)
132 				goto bad_bmap;
133 
134 			if (block != first_block + block_in_page) {
135 				/* Discontiguity */
136 				probe_block++;
137 				goto reprobe;
138 			}
139 		}
140 
141 		first_block >>= (PAGE_SHIFT - blkbits);
142 		if (page_no) {	/* exclude the header page */
143 			if (first_block < lowest_block)
144 				lowest_block = first_block;
145 			if (first_block > highest_block)
146 				highest_block = first_block;
147 		}
148 
149 		/*
150 		 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
151 		 */
152 		ret = add_swap_extent(sis, page_no, 1, first_block);
153 		if (ret < 0)
154 			goto out;
155 		nr_extents += ret;
156 		page_no++;
157 		probe_block += blocks_per_page;
158 reprobe:
159 		continue;
160 	}
161 	ret = nr_extents;
162 	*span = 1 + highest_block - lowest_block;
163 	if (page_no == 0)
164 		page_no = 1;	/* force Empty message */
165 	sis->max = page_no;
166 	sis->pages = page_no - 1;
167 	sis->highest_bit = page_no - 1;
168 out:
169 	return ret;
170 bad_bmap:
171 	pr_err("swapon: swapfile has holes\n");
172 	ret = -EINVAL;
173 	goto out;
174 }
175 
176 /*
177  * We may have stale swap cache pages in memory: notice
178  * them here and get rid of the unnecessary final write.
179  */
180 int swap_writepage(struct page *page, struct writeback_control *wbc)
181 {
182 	int ret = 0;
183 
184 	if (try_to_free_swap(page)) {
185 		unlock_page(page);
186 		goto out;
187 	}
188 	/*
189 	 * Arch code may have to preserve more data than just the page
190 	 * contents, e.g. memory tags.
191 	 */
192 	ret = arch_prepare_to_swap(page);
193 	if (ret) {
194 		set_page_dirty(page);
195 		unlock_page(page);
196 		goto out;
197 	}
198 	if (frontswap_store(page) == 0) {
199 		set_page_writeback(page);
200 		unlock_page(page);
201 		end_page_writeback(page);
202 		goto out;
203 	}
204 	ret = __swap_writepage(page, wbc, end_swap_bio_write);
205 out:
206 	return ret;
207 }
208 
209 static inline void count_swpout_vm_event(struct page *page)
210 {
211 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
212 	if (unlikely(PageTransHuge(page)))
213 		count_vm_event(THP_SWPOUT);
214 #endif
215 	count_vm_events(PSWPOUT, thp_nr_pages(page));
216 }
217 
218 #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
219 static void bio_associate_blkg_from_page(struct bio *bio, struct page *page)
220 {
221 	struct cgroup_subsys_state *css;
222 	struct mem_cgroup *memcg;
223 
224 	memcg = page_memcg(page);
225 	if (!memcg)
226 		return;
227 
228 	rcu_read_lock();
229 	css = cgroup_e_css(memcg->css.cgroup, &io_cgrp_subsys);
230 	bio_associate_blkg_from_css(bio, css);
231 	rcu_read_unlock();
232 }
233 #else
234 #define bio_associate_blkg_from_page(bio, page)		do { } while (0)
235 #endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */
236 
237 int __swap_writepage(struct page *page, struct writeback_control *wbc,
238 		bio_end_io_t end_write_func)
239 {
240 	struct bio *bio;
241 	int ret;
242 	struct swap_info_struct *sis = page_swap_info(page);
243 
244 	VM_BUG_ON_PAGE(!PageSwapCache(page), page);
245 	if (data_race(sis->flags & SWP_FS_OPS)) {
246 		struct kiocb kiocb;
247 		struct file *swap_file = sis->swap_file;
248 		struct address_space *mapping = swap_file->f_mapping;
249 		struct bio_vec bv = {
250 			.bv_page = page,
251 			.bv_len  = PAGE_SIZE,
252 			.bv_offset = 0
253 		};
254 		struct iov_iter from;
255 
256 		iov_iter_bvec(&from, WRITE, &bv, 1, PAGE_SIZE);
257 		init_sync_kiocb(&kiocb, swap_file);
258 		kiocb.ki_pos = page_file_offset(page);
259 
260 		set_page_writeback(page);
261 		unlock_page(page);
262 		ret = mapping->a_ops->direct_IO(&kiocb, &from);
263 		if (ret == PAGE_SIZE) {
264 			count_vm_event(PSWPOUT);
265 			ret = 0;
266 		} else {
267 			/*
268 			 * In the case of swap-over-nfs, this can be a
269 			 * temporary failure if the system has limited
270 			 * memory for allocating transmit buffers.
271 			 * Mark the page dirty and avoid
272 			 * folio_rotate_reclaimable but rate-limit the
273 			 * messages but do not flag PageError like
274 			 * the normal direct-to-bio case as it could
275 			 * be temporary.
276 			 */
277 			set_page_dirty(page);
278 			ClearPageReclaim(page);
279 			pr_err_ratelimited("Write error on dio swapfile (%llu)\n",
280 					   page_file_offset(page));
281 		}
282 		end_page_writeback(page);
283 		return ret;
284 	}
285 
286 	ret = bdev_write_page(sis->bdev, swap_page_sector(page), page, wbc);
287 	if (!ret) {
288 		count_swpout_vm_event(page);
289 		return 0;
290 	}
291 
292 	bio = bio_alloc(sis->bdev, 1,
293 			REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc),
294 			GFP_NOIO);
295 	bio->bi_iter.bi_sector = swap_page_sector(page);
296 	bio->bi_end_io = end_write_func;
297 	bio_add_page(bio, page, thp_size(page), 0);
298 
299 	bio_associate_blkg_from_page(bio, page);
300 	count_swpout_vm_event(page);
301 	set_page_writeback(page);
302 	unlock_page(page);
303 	submit_bio(bio);
304 
305 	return 0;
306 }
307 
308 int swap_readpage(struct page *page, bool synchronous)
309 {
310 	struct bio *bio;
311 	int ret = 0;
312 	struct swap_info_struct *sis = page_swap_info(page);
313 	bool workingset = PageWorkingset(page);
314 	unsigned long pflags;
315 
316 	VM_BUG_ON_PAGE(!PageSwapCache(page) && !synchronous, page);
317 	VM_BUG_ON_PAGE(!PageLocked(page), page);
318 	VM_BUG_ON_PAGE(PageUptodate(page), page);
319 
320 	/*
321 	 * Count submission time as memory stall. When the device is congested,
322 	 * or the submitting cgroup IO-throttled, submission can be a
323 	 * significant part of overall IO time.
324 	 */
325 	if (workingset)
326 		psi_memstall_enter(&pflags);
327 	delayacct_swapin_start();
328 
329 	if (frontswap_load(page) == 0) {
330 		SetPageUptodate(page);
331 		unlock_page(page);
332 		goto out;
333 	}
334 
335 	if (data_race(sis->flags & SWP_FS_OPS)) {
336 		struct file *swap_file = sis->swap_file;
337 		struct address_space *mapping = swap_file->f_mapping;
338 
339 		ret = mapping->a_ops->readpage(swap_file, page);
340 		if (!ret)
341 			count_vm_event(PSWPIN);
342 		goto out;
343 	}
344 
345 	if (sis->flags & SWP_SYNCHRONOUS_IO) {
346 		ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
347 		if (!ret) {
348 			count_vm_event(PSWPIN);
349 			goto out;
350 		}
351 	}
352 
353 	ret = 0;
354 	bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL);
355 	bio->bi_iter.bi_sector = swap_page_sector(page);
356 	bio->bi_end_io = end_swap_bio_read;
357 	bio_add_page(bio, page, thp_size(page), 0);
358 	/*
359 	 * Keep this task valid during swap readpage because the oom killer may
360 	 * attempt to access it in the page fault retry time check.
361 	 */
362 	if (synchronous) {
363 		bio->bi_opf |= REQ_POLLED;
364 		get_task_struct(current);
365 		bio->bi_private = current;
366 	}
367 	count_vm_event(PSWPIN);
368 	bio_get(bio);
369 	submit_bio(bio);
370 	while (synchronous) {
371 		set_current_state(TASK_UNINTERRUPTIBLE);
372 		if (!READ_ONCE(bio->bi_private))
373 			break;
374 
375 		if (!bio_poll(bio, NULL, 0))
376 			blk_io_schedule();
377 	}
378 	__set_current_state(TASK_RUNNING);
379 	bio_put(bio);
380 
381 out:
382 	if (workingset)
383 		psi_memstall_leave(&pflags);
384 	delayacct_swapin_end();
385 	return ret;
386 }
387 
388 bool swap_dirty_folio(struct address_space *mapping, struct folio *folio)
389 {
390 	struct swap_info_struct *sis = swp_swap_info(folio_swap_entry(folio));
391 
392 	if (data_race(sis->flags & SWP_FS_OPS)) {
393 		const struct address_space_operations *aops;
394 
395 		mapping = sis->swap_file->f_mapping;
396 		aops = mapping->a_ops;
397 
398 		VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
399 		return aops->dirty_folio(mapping, folio);
400 	} else {
401 		return noop_dirty_folio(mapping, folio);
402 	}
403 }
404