1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/mm/page_io.c 4 * 5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 6 * 7 * Swap reorganised 29.12.95, 8 * Asynchronous swapping added 30.12.95. Stephen Tweedie 9 * Removed race in async swapping. 14.4.1996. Bruno Haible 10 * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie 11 * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman 12 */ 13 14 #include <linux/mm.h> 15 #include <linux/kernel_stat.h> 16 #include <linux/gfp.h> 17 #include <linux/pagemap.h> 18 #include <linux/swap.h> 19 #include <linux/bio.h> 20 #include <linux/swapops.h> 21 #include <linux/writeback.h> 22 #include <linux/frontswap.h> 23 #include <linux/blkdev.h> 24 #include <linux/psi.h> 25 #include <linux/uio.h> 26 #include <linux/sched/task.h> 27 #include <linux/delayacct.h> 28 #include "swap.h" 29 30 static void end_swap_bio_write(struct bio *bio) 31 { 32 struct page *page = bio_first_page_all(bio); 33 34 if (bio->bi_status) { 35 SetPageError(page); 36 /* 37 * We failed to write the page out to swap-space. 38 * Re-dirty the page in order to avoid it being reclaimed. 39 * Also print a dire warning that things will go BAD (tm) 40 * very quickly. 41 * 42 * Also clear PG_reclaim to avoid folio_rotate_reclaimable() 43 */ 44 set_page_dirty(page); 45 pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n", 46 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), 47 (unsigned long long)bio->bi_iter.bi_sector); 48 ClearPageReclaim(page); 49 } 50 end_page_writeback(page); 51 bio_put(bio); 52 } 53 54 static void __end_swap_bio_read(struct bio *bio) 55 { 56 struct page *page = bio_first_page_all(bio); 57 58 if (bio->bi_status) { 59 SetPageError(page); 60 ClearPageUptodate(page); 61 pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n", 62 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), 63 (unsigned long long)bio->bi_iter.bi_sector); 64 } else { 65 SetPageUptodate(page); 66 } 67 unlock_page(page); 68 } 69 70 static void end_swap_bio_read(struct bio *bio) 71 { 72 __end_swap_bio_read(bio); 73 bio_put(bio); 74 } 75 76 int generic_swapfile_activate(struct swap_info_struct *sis, 77 struct file *swap_file, 78 sector_t *span) 79 { 80 struct address_space *mapping = swap_file->f_mapping; 81 struct inode *inode = mapping->host; 82 unsigned blocks_per_page; 83 unsigned long page_no; 84 unsigned blkbits; 85 sector_t probe_block; 86 sector_t last_block; 87 sector_t lowest_block = -1; 88 sector_t highest_block = 0; 89 int nr_extents = 0; 90 int ret; 91 92 blkbits = inode->i_blkbits; 93 blocks_per_page = PAGE_SIZE >> blkbits; 94 95 /* 96 * Map all the blocks into the extent tree. This code doesn't try 97 * to be very smart. 98 */ 99 probe_block = 0; 100 page_no = 0; 101 last_block = i_size_read(inode) >> blkbits; 102 while ((probe_block + blocks_per_page) <= last_block && 103 page_no < sis->max) { 104 unsigned block_in_page; 105 sector_t first_block; 106 107 cond_resched(); 108 109 first_block = probe_block; 110 ret = bmap(inode, &first_block); 111 if (ret || !first_block) 112 goto bad_bmap; 113 114 /* 115 * It must be PAGE_SIZE aligned on-disk 116 */ 117 if (first_block & (blocks_per_page - 1)) { 118 probe_block++; 119 goto reprobe; 120 } 121 122 for (block_in_page = 1; block_in_page < blocks_per_page; 123 block_in_page++) { 124 sector_t block; 125 126 block = probe_block + block_in_page; 127 ret = bmap(inode, &block); 128 if (ret || !block) 129 goto bad_bmap; 130 131 if (block != first_block + block_in_page) { 132 /* Discontiguity */ 133 probe_block++; 134 goto reprobe; 135 } 136 } 137 138 first_block >>= (PAGE_SHIFT - blkbits); 139 if (page_no) { /* exclude the header page */ 140 if (first_block < lowest_block) 141 lowest_block = first_block; 142 if (first_block > highest_block) 143 highest_block = first_block; 144 } 145 146 /* 147 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks 148 */ 149 ret = add_swap_extent(sis, page_no, 1, first_block); 150 if (ret < 0) 151 goto out; 152 nr_extents += ret; 153 page_no++; 154 probe_block += blocks_per_page; 155 reprobe: 156 continue; 157 } 158 ret = nr_extents; 159 *span = 1 + highest_block - lowest_block; 160 if (page_no == 0) 161 page_no = 1; /* force Empty message */ 162 sis->max = page_no; 163 sis->pages = page_no - 1; 164 sis->highest_bit = page_no - 1; 165 out: 166 return ret; 167 bad_bmap: 168 pr_err("swapon: swapfile has holes\n"); 169 ret = -EINVAL; 170 goto out; 171 } 172 173 /* 174 * We may have stale swap cache pages in memory: notice 175 * them here and get rid of the unnecessary final write. 176 */ 177 int swap_writepage(struct page *page, struct writeback_control *wbc) 178 { 179 struct folio *folio = page_folio(page); 180 int ret; 181 182 if (folio_free_swap(folio)) { 183 folio_unlock(folio); 184 return 0; 185 } 186 /* 187 * Arch code may have to preserve more data than just the page 188 * contents, e.g. memory tags. 189 */ 190 ret = arch_prepare_to_swap(&folio->page); 191 if (ret) { 192 folio_mark_dirty(folio); 193 folio_unlock(folio); 194 return ret; 195 } 196 if (frontswap_store(&folio->page) == 0) { 197 folio_start_writeback(folio); 198 folio_unlock(folio); 199 folio_end_writeback(folio); 200 return 0; 201 } 202 __swap_writepage(&folio->page, wbc); 203 return 0; 204 } 205 206 static inline void count_swpout_vm_event(struct page *page) 207 { 208 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 209 if (unlikely(PageTransHuge(page))) 210 count_vm_event(THP_SWPOUT); 211 #endif 212 count_vm_events(PSWPOUT, thp_nr_pages(page)); 213 } 214 215 #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) 216 static void bio_associate_blkg_from_page(struct bio *bio, struct page *page) 217 { 218 struct cgroup_subsys_state *css; 219 struct mem_cgroup *memcg; 220 221 memcg = page_memcg(page); 222 if (!memcg) 223 return; 224 225 rcu_read_lock(); 226 css = cgroup_e_css(memcg->css.cgroup, &io_cgrp_subsys); 227 bio_associate_blkg_from_css(bio, css); 228 rcu_read_unlock(); 229 } 230 #else 231 #define bio_associate_blkg_from_page(bio, page) do { } while (0) 232 #endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */ 233 234 struct swap_iocb { 235 struct kiocb iocb; 236 struct bio_vec bvec[SWAP_CLUSTER_MAX]; 237 int pages; 238 int len; 239 }; 240 static mempool_t *sio_pool; 241 242 int sio_pool_init(void) 243 { 244 if (!sio_pool) { 245 mempool_t *pool = mempool_create_kmalloc_pool( 246 SWAP_CLUSTER_MAX, sizeof(struct swap_iocb)); 247 if (cmpxchg(&sio_pool, NULL, pool)) 248 mempool_destroy(pool); 249 } 250 if (!sio_pool) 251 return -ENOMEM; 252 return 0; 253 } 254 255 static void sio_write_complete(struct kiocb *iocb, long ret) 256 { 257 struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb); 258 struct page *page = sio->bvec[0].bv_page; 259 int p; 260 261 if (ret != sio->len) { 262 /* 263 * In the case of swap-over-nfs, this can be a 264 * temporary failure if the system has limited 265 * memory for allocating transmit buffers. 266 * Mark the page dirty and avoid 267 * folio_rotate_reclaimable but rate-limit the 268 * messages but do not flag PageError like 269 * the normal direct-to-bio case as it could 270 * be temporary. 271 */ 272 pr_err_ratelimited("Write error %ld on dio swapfile (%llu)\n", 273 ret, page_file_offset(page)); 274 for (p = 0; p < sio->pages; p++) { 275 page = sio->bvec[p].bv_page; 276 set_page_dirty(page); 277 ClearPageReclaim(page); 278 } 279 } else { 280 for (p = 0; p < sio->pages; p++) 281 count_swpout_vm_event(sio->bvec[p].bv_page); 282 } 283 284 for (p = 0; p < sio->pages; p++) 285 end_page_writeback(sio->bvec[p].bv_page); 286 287 mempool_free(sio, sio_pool); 288 } 289 290 static void swap_writepage_fs(struct page *page, struct writeback_control *wbc) 291 { 292 struct swap_iocb *sio = NULL; 293 struct swap_info_struct *sis = page_swap_info(page); 294 struct file *swap_file = sis->swap_file; 295 loff_t pos = page_file_offset(page); 296 297 set_page_writeback(page); 298 unlock_page(page); 299 if (wbc->swap_plug) 300 sio = *wbc->swap_plug; 301 if (sio) { 302 if (sio->iocb.ki_filp != swap_file || 303 sio->iocb.ki_pos + sio->len != pos) { 304 swap_write_unplug(sio); 305 sio = NULL; 306 } 307 } 308 if (!sio) { 309 sio = mempool_alloc(sio_pool, GFP_NOIO); 310 init_sync_kiocb(&sio->iocb, swap_file); 311 sio->iocb.ki_complete = sio_write_complete; 312 sio->iocb.ki_pos = pos; 313 sio->pages = 0; 314 sio->len = 0; 315 } 316 sio->bvec[sio->pages].bv_page = page; 317 sio->bvec[sio->pages].bv_len = thp_size(page); 318 sio->bvec[sio->pages].bv_offset = 0; 319 sio->len += thp_size(page); 320 sio->pages += 1; 321 if (sio->pages == ARRAY_SIZE(sio->bvec) || !wbc->swap_plug) { 322 swap_write_unplug(sio); 323 sio = NULL; 324 } 325 if (wbc->swap_plug) 326 *wbc->swap_plug = sio; 327 } 328 329 static void swap_writepage_bdev(struct page *page, 330 struct writeback_control *wbc, struct swap_info_struct *sis) 331 { 332 struct bio *bio; 333 334 if (!bdev_write_page(sis->bdev, swap_page_sector(page), page, wbc)) { 335 count_swpout_vm_event(page); 336 return; 337 } 338 339 bio = bio_alloc(sis->bdev, 1, 340 REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc), 341 GFP_NOIO); 342 bio->bi_iter.bi_sector = swap_page_sector(page); 343 bio->bi_end_io = end_swap_bio_write; 344 bio_add_page(bio, page, thp_size(page), 0); 345 346 bio_associate_blkg_from_page(bio, page); 347 count_swpout_vm_event(page); 348 set_page_writeback(page); 349 unlock_page(page); 350 submit_bio(bio); 351 } 352 353 void __swap_writepage(struct page *page, struct writeback_control *wbc) 354 { 355 struct swap_info_struct *sis = page_swap_info(page); 356 357 VM_BUG_ON_PAGE(!PageSwapCache(page), page); 358 /* 359 * ->flags can be updated non-atomicially (scan_swap_map_slots), 360 * but that will never affect SWP_FS_OPS, so the data_race 361 * is safe. 362 */ 363 if (data_race(sis->flags & SWP_FS_OPS)) 364 swap_writepage_fs(page, wbc); 365 else 366 swap_writepage_bdev(page, wbc, sis); 367 } 368 369 void swap_write_unplug(struct swap_iocb *sio) 370 { 371 struct iov_iter from; 372 struct address_space *mapping = sio->iocb.ki_filp->f_mapping; 373 int ret; 374 375 iov_iter_bvec(&from, ITER_SOURCE, sio->bvec, sio->pages, sio->len); 376 ret = mapping->a_ops->swap_rw(&sio->iocb, &from); 377 if (ret != -EIOCBQUEUED) 378 sio_write_complete(&sio->iocb, ret); 379 } 380 381 static void sio_read_complete(struct kiocb *iocb, long ret) 382 { 383 struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb); 384 int p; 385 386 if (ret == sio->len) { 387 for (p = 0; p < sio->pages; p++) { 388 struct page *page = sio->bvec[p].bv_page; 389 390 SetPageUptodate(page); 391 unlock_page(page); 392 } 393 count_vm_events(PSWPIN, sio->pages); 394 } else { 395 for (p = 0; p < sio->pages; p++) { 396 struct page *page = sio->bvec[p].bv_page; 397 398 SetPageError(page); 399 ClearPageUptodate(page); 400 unlock_page(page); 401 } 402 pr_alert_ratelimited("Read-error on swap-device\n"); 403 } 404 mempool_free(sio, sio_pool); 405 } 406 407 static void swap_readpage_fs(struct page *page, 408 struct swap_iocb **plug) 409 { 410 struct swap_info_struct *sis = page_swap_info(page); 411 struct swap_iocb *sio = NULL; 412 loff_t pos = page_file_offset(page); 413 414 if (plug) 415 sio = *plug; 416 if (sio) { 417 if (sio->iocb.ki_filp != sis->swap_file || 418 sio->iocb.ki_pos + sio->len != pos) { 419 swap_read_unplug(sio); 420 sio = NULL; 421 } 422 } 423 if (!sio) { 424 sio = mempool_alloc(sio_pool, GFP_KERNEL); 425 init_sync_kiocb(&sio->iocb, sis->swap_file); 426 sio->iocb.ki_pos = pos; 427 sio->iocb.ki_complete = sio_read_complete; 428 sio->pages = 0; 429 sio->len = 0; 430 } 431 sio->bvec[sio->pages].bv_page = page; 432 sio->bvec[sio->pages].bv_len = thp_size(page); 433 sio->bvec[sio->pages].bv_offset = 0; 434 sio->len += thp_size(page); 435 sio->pages += 1; 436 if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) { 437 swap_read_unplug(sio); 438 sio = NULL; 439 } 440 if (plug) 441 *plug = sio; 442 } 443 444 static void swap_readpage_bdev_sync(struct page *page, 445 struct swap_info_struct *sis) 446 { 447 struct bio_vec bv; 448 struct bio bio; 449 450 if ((sis->flags & SWP_SYNCHRONOUS_IO) && 451 !bdev_read_page(sis->bdev, swap_page_sector(page), page)) { 452 count_vm_event(PSWPIN); 453 return; 454 } 455 456 bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_READ); 457 bio.bi_iter.bi_sector = swap_page_sector(page); 458 bio_add_page(&bio, page, thp_size(page), 0); 459 /* 460 * Keep this task valid during swap readpage because the oom killer may 461 * attempt to access it in the page fault retry time check. 462 */ 463 get_task_struct(current); 464 count_vm_event(PSWPIN); 465 submit_bio_wait(&bio); 466 __end_swap_bio_read(&bio); 467 put_task_struct(current); 468 } 469 470 static void swap_readpage_bdev_async(struct page *page, 471 struct swap_info_struct *sis) 472 { 473 struct bio *bio; 474 475 if ((sis->flags & SWP_SYNCHRONOUS_IO) && 476 !bdev_read_page(sis->bdev, swap_page_sector(page), page)) { 477 count_vm_event(PSWPIN); 478 return; 479 } 480 481 bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL); 482 bio->bi_iter.bi_sector = swap_page_sector(page); 483 bio->bi_end_io = end_swap_bio_read; 484 bio_add_page(bio, page, thp_size(page), 0); 485 count_vm_event(PSWPIN); 486 submit_bio(bio); 487 } 488 489 void swap_readpage(struct page *page, bool synchronous, struct swap_iocb **plug) 490 { 491 struct swap_info_struct *sis = page_swap_info(page); 492 bool workingset = PageWorkingset(page); 493 unsigned long pflags; 494 bool in_thrashing; 495 496 VM_BUG_ON_PAGE(!PageSwapCache(page) && !synchronous, page); 497 VM_BUG_ON_PAGE(!PageLocked(page), page); 498 VM_BUG_ON_PAGE(PageUptodate(page), page); 499 500 /* 501 * Count submission time as memory stall and delay. When the device 502 * is congested, or the submitting cgroup IO-throttled, submission 503 * can be a significant part of overall IO time. 504 */ 505 if (workingset) { 506 delayacct_thrashing_start(&in_thrashing); 507 psi_memstall_enter(&pflags); 508 } 509 delayacct_swapin_start(); 510 511 if (frontswap_load(page) == 0) { 512 SetPageUptodate(page); 513 unlock_page(page); 514 } else if (data_race(sis->flags & SWP_FS_OPS)) { 515 swap_readpage_fs(page, plug); 516 } else if (synchronous) { 517 swap_readpage_bdev_sync(page, sis); 518 } else { 519 swap_readpage_bdev_async(page, sis); 520 } 521 522 if (workingset) { 523 delayacct_thrashing_end(&in_thrashing); 524 psi_memstall_leave(&pflags); 525 } 526 delayacct_swapin_end(); 527 } 528 529 void __swap_read_unplug(struct swap_iocb *sio) 530 { 531 struct iov_iter from; 532 struct address_space *mapping = sio->iocb.ki_filp->f_mapping; 533 int ret; 534 535 iov_iter_bvec(&from, ITER_DEST, sio->bvec, sio->pages, sio->len); 536 ret = mapping->a_ops->swap_rw(&sio->iocb, &from); 537 if (ret != -EIOCBQUEUED) 538 sio_read_complete(&sio->iocb, ret); 539 } 540