xref: /openbmc/linux/mm/page_io.c (revision fce96cf0443083e37455eff8f78fd240c621dae3)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   *  linux/mm/page_io.c
4   *
5   *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6   *
7   *  Swap reorganised 29.12.95,
8   *  Asynchronous swapping added 30.12.95. Stephen Tweedie
9   *  Removed race in async swapping. 14.4.1996. Bruno Haible
10   *  Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
11   *  Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
12   */
13  
14  #include <linux/mm.h>
15  #include <linux/kernel_stat.h>
16  #include <linux/gfp.h>
17  #include <linux/pagemap.h>
18  #include <linux/swap.h>
19  #include <linux/bio.h>
20  #include <linux/swapops.h>
21  #include <linux/buffer_head.h>
22  #include <linux/writeback.h>
23  #include <linux/frontswap.h>
24  #include <linux/blkdev.h>
25  #include <linux/psi.h>
26  #include <linux/uio.h>
27  #include <linux/sched/task.h>
28  #include <linux/delayacct.h>
29  
30  void end_swap_bio_write(struct bio *bio)
31  {
32  	struct page *page = bio_first_page_all(bio);
33  
34  	if (bio->bi_status) {
35  		SetPageError(page);
36  		/*
37  		 * We failed to write the page out to swap-space.
38  		 * Re-dirty the page in order to avoid it being reclaimed.
39  		 * Also print a dire warning that things will go BAD (tm)
40  		 * very quickly.
41  		 *
42  		 * Also clear PG_reclaim to avoid folio_rotate_reclaimable()
43  		 */
44  		set_page_dirty(page);
45  		pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n",
46  				     MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
47  				     (unsigned long long)bio->bi_iter.bi_sector);
48  		ClearPageReclaim(page);
49  	}
50  	end_page_writeback(page);
51  	bio_put(bio);
52  }
53  
54  static void swap_slot_free_notify(struct page *page)
55  {
56  	struct swap_info_struct *sis;
57  	struct gendisk *disk;
58  	swp_entry_t entry;
59  
60  	/*
61  	 * There is no guarantee that the page is in swap cache - the software
62  	 * suspend code (at least) uses end_swap_bio_read() against a non-
63  	 * swapcache page.  So we must check PG_swapcache before proceeding with
64  	 * this optimization.
65  	 */
66  	if (unlikely(!PageSwapCache(page)))
67  		return;
68  
69  	sis = page_swap_info(page);
70  	if (data_race(!(sis->flags & SWP_BLKDEV)))
71  		return;
72  
73  	/*
74  	 * The swap subsystem performs lazy swap slot freeing,
75  	 * expecting that the page will be swapped out again.
76  	 * So we can avoid an unnecessary write if the page
77  	 * isn't redirtied.
78  	 * This is good for real swap storage because we can
79  	 * reduce unnecessary I/O and enhance wear-leveling
80  	 * if an SSD is used as the as swap device.
81  	 * But if in-memory swap device (eg zram) is used,
82  	 * this causes a duplicated copy between uncompressed
83  	 * data in VM-owned memory and compressed data in
84  	 * zram-owned memory.  So let's free zram-owned memory
85  	 * and make the VM-owned decompressed page *dirty*,
86  	 * so the page should be swapped out somewhere again if
87  	 * we again wish to reclaim it.
88  	 */
89  	disk = sis->bdev->bd_disk;
90  	entry.val = page_private(page);
91  	if (disk->fops->swap_slot_free_notify && __swap_count(entry) == 1) {
92  		unsigned long offset;
93  
94  		offset = swp_offset(entry);
95  
96  		SetPageDirty(page);
97  		disk->fops->swap_slot_free_notify(sis->bdev,
98  				offset);
99  	}
100  }
101  
102  static void end_swap_bio_read(struct bio *bio)
103  {
104  	struct page *page = bio_first_page_all(bio);
105  	struct task_struct *waiter = bio->bi_private;
106  
107  	if (bio->bi_status) {
108  		SetPageError(page);
109  		ClearPageUptodate(page);
110  		pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n",
111  				     MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
112  				     (unsigned long long)bio->bi_iter.bi_sector);
113  		goto out;
114  	}
115  
116  	SetPageUptodate(page);
117  	swap_slot_free_notify(page);
118  out:
119  	unlock_page(page);
120  	WRITE_ONCE(bio->bi_private, NULL);
121  	bio_put(bio);
122  	if (waiter) {
123  		blk_wake_io_task(waiter);
124  		put_task_struct(waiter);
125  	}
126  }
127  
128  int generic_swapfile_activate(struct swap_info_struct *sis,
129  				struct file *swap_file,
130  				sector_t *span)
131  {
132  	struct address_space *mapping = swap_file->f_mapping;
133  	struct inode *inode = mapping->host;
134  	unsigned blocks_per_page;
135  	unsigned long page_no;
136  	unsigned blkbits;
137  	sector_t probe_block;
138  	sector_t last_block;
139  	sector_t lowest_block = -1;
140  	sector_t highest_block = 0;
141  	int nr_extents = 0;
142  	int ret;
143  
144  	blkbits = inode->i_blkbits;
145  	blocks_per_page = PAGE_SIZE >> blkbits;
146  
147  	/*
148  	 * Map all the blocks into the extent tree.  This code doesn't try
149  	 * to be very smart.
150  	 */
151  	probe_block = 0;
152  	page_no = 0;
153  	last_block = i_size_read(inode) >> blkbits;
154  	while ((probe_block + blocks_per_page) <= last_block &&
155  			page_no < sis->max) {
156  		unsigned block_in_page;
157  		sector_t first_block;
158  
159  		cond_resched();
160  
161  		first_block = probe_block;
162  		ret = bmap(inode, &first_block);
163  		if (ret || !first_block)
164  			goto bad_bmap;
165  
166  		/*
167  		 * It must be PAGE_SIZE aligned on-disk
168  		 */
169  		if (first_block & (blocks_per_page - 1)) {
170  			probe_block++;
171  			goto reprobe;
172  		}
173  
174  		for (block_in_page = 1; block_in_page < blocks_per_page;
175  					block_in_page++) {
176  			sector_t block;
177  
178  			block = probe_block + block_in_page;
179  			ret = bmap(inode, &block);
180  			if (ret || !block)
181  				goto bad_bmap;
182  
183  			if (block != first_block + block_in_page) {
184  				/* Discontiguity */
185  				probe_block++;
186  				goto reprobe;
187  			}
188  		}
189  
190  		first_block >>= (PAGE_SHIFT - blkbits);
191  		if (page_no) {	/* exclude the header page */
192  			if (first_block < lowest_block)
193  				lowest_block = first_block;
194  			if (first_block > highest_block)
195  				highest_block = first_block;
196  		}
197  
198  		/*
199  		 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
200  		 */
201  		ret = add_swap_extent(sis, page_no, 1, first_block);
202  		if (ret < 0)
203  			goto out;
204  		nr_extents += ret;
205  		page_no++;
206  		probe_block += blocks_per_page;
207  reprobe:
208  		continue;
209  	}
210  	ret = nr_extents;
211  	*span = 1 + highest_block - lowest_block;
212  	if (page_no == 0)
213  		page_no = 1;	/* force Empty message */
214  	sis->max = page_no;
215  	sis->pages = page_no - 1;
216  	sis->highest_bit = page_no - 1;
217  out:
218  	return ret;
219  bad_bmap:
220  	pr_err("swapon: swapfile has holes\n");
221  	ret = -EINVAL;
222  	goto out;
223  }
224  
225  /*
226   * We may have stale swap cache pages in memory: notice
227   * them here and get rid of the unnecessary final write.
228   */
229  int swap_writepage(struct page *page, struct writeback_control *wbc)
230  {
231  	int ret = 0;
232  
233  	if (try_to_free_swap(page)) {
234  		unlock_page(page);
235  		goto out;
236  	}
237  	/*
238  	 * Arch code may have to preserve more data than just the page
239  	 * contents, e.g. memory tags.
240  	 */
241  	ret = arch_prepare_to_swap(page);
242  	if (ret) {
243  		set_page_dirty(page);
244  		unlock_page(page);
245  		goto out;
246  	}
247  	if (frontswap_store(page) == 0) {
248  		set_page_writeback(page);
249  		unlock_page(page);
250  		end_page_writeback(page);
251  		goto out;
252  	}
253  	ret = __swap_writepage(page, wbc, end_swap_bio_write);
254  out:
255  	return ret;
256  }
257  
258  static inline void count_swpout_vm_event(struct page *page)
259  {
260  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
261  	if (unlikely(PageTransHuge(page)))
262  		count_vm_event(THP_SWPOUT);
263  #endif
264  	count_vm_events(PSWPOUT, thp_nr_pages(page));
265  }
266  
267  #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
268  static void bio_associate_blkg_from_page(struct bio *bio, struct page *page)
269  {
270  	struct cgroup_subsys_state *css;
271  	struct mem_cgroup *memcg;
272  
273  	memcg = page_memcg(page);
274  	if (!memcg)
275  		return;
276  
277  	rcu_read_lock();
278  	css = cgroup_e_css(memcg->css.cgroup, &io_cgrp_subsys);
279  	bio_associate_blkg_from_css(bio, css);
280  	rcu_read_unlock();
281  }
282  #else
283  #define bio_associate_blkg_from_page(bio, page)		do { } while (0)
284  #endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */
285  
286  int __swap_writepage(struct page *page, struct writeback_control *wbc,
287  		bio_end_io_t end_write_func)
288  {
289  	struct bio *bio;
290  	int ret;
291  	struct swap_info_struct *sis = page_swap_info(page);
292  
293  	VM_BUG_ON_PAGE(!PageSwapCache(page), page);
294  	if (data_race(sis->flags & SWP_FS_OPS)) {
295  		struct kiocb kiocb;
296  		struct file *swap_file = sis->swap_file;
297  		struct address_space *mapping = swap_file->f_mapping;
298  		struct bio_vec bv = {
299  			.bv_page = page,
300  			.bv_len  = PAGE_SIZE,
301  			.bv_offset = 0
302  		};
303  		struct iov_iter from;
304  
305  		iov_iter_bvec(&from, WRITE, &bv, 1, PAGE_SIZE);
306  		init_sync_kiocb(&kiocb, swap_file);
307  		kiocb.ki_pos = page_file_offset(page);
308  
309  		set_page_writeback(page);
310  		unlock_page(page);
311  		ret = mapping->a_ops->direct_IO(&kiocb, &from);
312  		if (ret == PAGE_SIZE) {
313  			count_vm_event(PSWPOUT);
314  			ret = 0;
315  		} else {
316  			/*
317  			 * In the case of swap-over-nfs, this can be a
318  			 * temporary failure if the system has limited
319  			 * memory for allocating transmit buffers.
320  			 * Mark the page dirty and avoid
321  			 * folio_rotate_reclaimable but rate-limit the
322  			 * messages but do not flag PageError like
323  			 * the normal direct-to-bio case as it could
324  			 * be temporary.
325  			 */
326  			set_page_dirty(page);
327  			ClearPageReclaim(page);
328  			pr_err_ratelimited("Write error on dio swapfile (%llu)\n",
329  					   page_file_offset(page));
330  		}
331  		end_page_writeback(page);
332  		return ret;
333  	}
334  
335  	ret = bdev_write_page(sis->bdev, swap_page_sector(page), page, wbc);
336  	if (!ret) {
337  		count_swpout_vm_event(page);
338  		return 0;
339  	}
340  
341  	bio = bio_alloc(sis->bdev, 1,
342  			REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc),
343  			GFP_NOIO);
344  	bio->bi_iter.bi_sector = swap_page_sector(page);
345  	bio->bi_end_io = end_write_func;
346  	bio_add_page(bio, page, thp_size(page), 0);
347  
348  	bio_associate_blkg_from_page(bio, page);
349  	count_swpout_vm_event(page);
350  	set_page_writeback(page);
351  	unlock_page(page);
352  	submit_bio(bio);
353  
354  	return 0;
355  }
356  
357  int swap_readpage(struct page *page, bool synchronous)
358  {
359  	struct bio *bio;
360  	int ret = 0;
361  	struct swap_info_struct *sis = page_swap_info(page);
362  	bool workingset = PageWorkingset(page);
363  	unsigned long pflags;
364  
365  	VM_BUG_ON_PAGE(!PageSwapCache(page) && !synchronous, page);
366  	VM_BUG_ON_PAGE(!PageLocked(page), page);
367  	VM_BUG_ON_PAGE(PageUptodate(page), page);
368  
369  	/*
370  	 * Count submission time as memory stall. When the device is congested,
371  	 * or the submitting cgroup IO-throttled, submission can be a
372  	 * significant part of overall IO time.
373  	 */
374  	if (workingset)
375  		psi_memstall_enter(&pflags);
376  	delayacct_swapin_start();
377  
378  	if (frontswap_load(page) == 0) {
379  		SetPageUptodate(page);
380  		unlock_page(page);
381  		goto out;
382  	}
383  
384  	if (data_race(sis->flags & SWP_FS_OPS)) {
385  		struct file *swap_file = sis->swap_file;
386  		struct address_space *mapping = swap_file->f_mapping;
387  
388  		ret = mapping->a_ops->readpage(swap_file, page);
389  		if (!ret)
390  			count_vm_event(PSWPIN);
391  		goto out;
392  	}
393  
394  	if (sis->flags & SWP_SYNCHRONOUS_IO) {
395  		ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
396  		if (!ret) {
397  			if (trylock_page(page)) {
398  				swap_slot_free_notify(page);
399  				unlock_page(page);
400  			}
401  
402  			count_vm_event(PSWPIN);
403  			goto out;
404  		}
405  	}
406  
407  	ret = 0;
408  	bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL);
409  	bio->bi_iter.bi_sector = swap_page_sector(page);
410  	bio->bi_end_io = end_swap_bio_read;
411  	bio_add_page(bio, page, thp_size(page), 0);
412  	/*
413  	 * Keep this task valid during swap readpage because the oom killer may
414  	 * attempt to access it in the page fault retry time check.
415  	 */
416  	if (synchronous) {
417  		bio->bi_opf |= REQ_POLLED;
418  		get_task_struct(current);
419  		bio->bi_private = current;
420  	}
421  	count_vm_event(PSWPIN);
422  	bio_get(bio);
423  	submit_bio(bio);
424  	while (synchronous) {
425  		set_current_state(TASK_UNINTERRUPTIBLE);
426  		if (!READ_ONCE(bio->bi_private))
427  			break;
428  
429  		if (!bio_poll(bio, NULL, 0))
430  			blk_io_schedule();
431  	}
432  	__set_current_state(TASK_RUNNING);
433  	bio_put(bio);
434  
435  out:
436  	if (workingset)
437  		psi_memstall_leave(&pflags);
438  	delayacct_swapin_end();
439  	return ret;
440  }
441  
442  bool swap_dirty_folio(struct address_space *mapping, struct folio *folio)
443  {
444  	struct swap_info_struct *sis = swp_swap_info(folio_swap_entry(folio));
445  
446  	if (data_race(sis->flags & SWP_FS_OPS)) {
447  		const struct address_space_operations *aops;
448  
449  		mapping = sis->swap_file->f_mapping;
450  		aops = mapping->a_ops;
451  
452  		VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
453  		return aops->dirty_folio(mapping, folio);
454  	} else {
455  		return noop_dirty_folio(mapping, folio);
456  	}
457  }
458