1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/mm/page_io.c 4 * 5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 6 * 7 * Swap reorganised 29.12.95, 8 * Asynchronous swapping added 30.12.95. Stephen Tweedie 9 * Removed race in async swapping. 14.4.1996. Bruno Haible 10 * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie 11 * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman 12 */ 13 14 #include <linux/mm.h> 15 #include <linux/kernel_stat.h> 16 #include <linux/gfp.h> 17 #include <linux/pagemap.h> 18 #include <linux/swap.h> 19 #include <linux/bio.h> 20 #include <linux/swapops.h> 21 #include <linux/writeback.h> 22 #include <linux/frontswap.h> 23 #include <linux/blkdev.h> 24 #include <linux/psi.h> 25 #include <linux/uio.h> 26 #include <linux/sched/task.h> 27 #include <linux/delayacct.h> 28 #include "swap.h" 29 30 static void __end_swap_bio_write(struct bio *bio) 31 { 32 struct page *page = bio_first_page_all(bio); 33 34 if (bio->bi_status) { 35 SetPageError(page); 36 /* 37 * We failed to write the page out to swap-space. 38 * Re-dirty the page in order to avoid it being reclaimed. 39 * Also print a dire warning that things will go BAD (tm) 40 * very quickly. 41 * 42 * Also clear PG_reclaim to avoid folio_rotate_reclaimable() 43 */ 44 set_page_dirty(page); 45 pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n", 46 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), 47 (unsigned long long)bio->bi_iter.bi_sector); 48 ClearPageReclaim(page); 49 } 50 end_page_writeback(page); 51 } 52 53 static void end_swap_bio_write(struct bio *bio) 54 { 55 __end_swap_bio_write(bio); 56 bio_put(bio); 57 } 58 59 static void __end_swap_bio_read(struct bio *bio) 60 { 61 struct page *page = bio_first_page_all(bio); 62 63 if (bio->bi_status) { 64 SetPageError(page); 65 ClearPageUptodate(page); 66 pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n", 67 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), 68 (unsigned long long)bio->bi_iter.bi_sector); 69 } else { 70 SetPageUptodate(page); 71 } 72 unlock_page(page); 73 } 74 75 static void end_swap_bio_read(struct bio *bio) 76 { 77 __end_swap_bio_read(bio); 78 bio_put(bio); 79 } 80 81 int generic_swapfile_activate(struct swap_info_struct *sis, 82 struct file *swap_file, 83 sector_t *span) 84 { 85 struct address_space *mapping = swap_file->f_mapping; 86 struct inode *inode = mapping->host; 87 unsigned blocks_per_page; 88 unsigned long page_no; 89 unsigned blkbits; 90 sector_t probe_block; 91 sector_t last_block; 92 sector_t lowest_block = -1; 93 sector_t highest_block = 0; 94 int nr_extents = 0; 95 int ret; 96 97 blkbits = inode->i_blkbits; 98 blocks_per_page = PAGE_SIZE >> blkbits; 99 100 /* 101 * Map all the blocks into the extent tree. This code doesn't try 102 * to be very smart. 103 */ 104 probe_block = 0; 105 page_no = 0; 106 last_block = i_size_read(inode) >> blkbits; 107 while ((probe_block + blocks_per_page) <= last_block && 108 page_no < sis->max) { 109 unsigned block_in_page; 110 sector_t first_block; 111 112 cond_resched(); 113 114 first_block = probe_block; 115 ret = bmap(inode, &first_block); 116 if (ret || !first_block) 117 goto bad_bmap; 118 119 /* 120 * It must be PAGE_SIZE aligned on-disk 121 */ 122 if (first_block & (blocks_per_page - 1)) { 123 probe_block++; 124 goto reprobe; 125 } 126 127 for (block_in_page = 1; block_in_page < blocks_per_page; 128 block_in_page++) { 129 sector_t block; 130 131 block = probe_block + block_in_page; 132 ret = bmap(inode, &block); 133 if (ret || !block) 134 goto bad_bmap; 135 136 if (block != first_block + block_in_page) { 137 /* Discontiguity */ 138 probe_block++; 139 goto reprobe; 140 } 141 } 142 143 first_block >>= (PAGE_SHIFT - blkbits); 144 if (page_no) { /* exclude the header page */ 145 if (first_block < lowest_block) 146 lowest_block = first_block; 147 if (first_block > highest_block) 148 highest_block = first_block; 149 } 150 151 /* 152 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks 153 */ 154 ret = add_swap_extent(sis, page_no, 1, first_block); 155 if (ret < 0) 156 goto out; 157 nr_extents += ret; 158 page_no++; 159 probe_block += blocks_per_page; 160 reprobe: 161 continue; 162 } 163 ret = nr_extents; 164 *span = 1 + highest_block - lowest_block; 165 if (page_no == 0) 166 page_no = 1; /* force Empty message */ 167 sis->max = page_no; 168 sis->pages = page_no - 1; 169 sis->highest_bit = page_no - 1; 170 out: 171 return ret; 172 bad_bmap: 173 pr_err("swapon: swapfile has holes\n"); 174 ret = -EINVAL; 175 goto out; 176 } 177 178 /* 179 * We may have stale swap cache pages in memory: notice 180 * them here and get rid of the unnecessary final write. 181 */ 182 int swap_writepage(struct page *page, struct writeback_control *wbc) 183 { 184 struct folio *folio = page_folio(page); 185 int ret; 186 187 if (folio_free_swap(folio)) { 188 folio_unlock(folio); 189 return 0; 190 } 191 /* 192 * Arch code may have to preserve more data than just the page 193 * contents, e.g. memory tags. 194 */ 195 ret = arch_prepare_to_swap(&folio->page); 196 if (ret) { 197 folio_mark_dirty(folio); 198 folio_unlock(folio); 199 return ret; 200 } 201 if (frontswap_store(&folio->page) == 0) { 202 folio_start_writeback(folio); 203 folio_unlock(folio); 204 folio_end_writeback(folio); 205 return 0; 206 } 207 __swap_writepage(&folio->page, wbc); 208 return 0; 209 } 210 211 static inline void count_swpout_vm_event(struct page *page) 212 { 213 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 214 if (unlikely(PageTransHuge(page))) 215 count_vm_event(THP_SWPOUT); 216 #endif 217 count_vm_events(PSWPOUT, thp_nr_pages(page)); 218 } 219 220 #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) 221 static void bio_associate_blkg_from_page(struct bio *bio, struct page *page) 222 { 223 struct cgroup_subsys_state *css; 224 struct mem_cgroup *memcg; 225 226 memcg = page_memcg(page); 227 if (!memcg) 228 return; 229 230 rcu_read_lock(); 231 css = cgroup_e_css(memcg->css.cgroup, &io_cgrp_subsys); 232 bio_associate_blkg_from_css(bio, css); 233 rcu_read_unlock(); 234 } 235 #else 236 #define bio_associate_blkg_from_page(bio, page) do { } while (0) 237 #endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */ 238 239 struct swap_iocb { 240 struct kiocb iocb; 241 struct bio_vec bvec[SWAP_CLUSTER_MAX]; 242 int pages; 243 int len; 244 }; 245 static mempool_t *sio_pool; 246 247 int sio_pool_init(void) 248 { 249 if (!sio_pool) { 250 mempool_t *pool = mempool_create_kmalloc_pool( 251 SWAP_CLUSTER_MAX, sizeof(struct swap_iocb)); 252 if (cmpxchg(&sio_pool, NULL, pool)) 253 mempool_destroy(pool); 254 } 255 if (!sio_pool) 256 return -ENOMEM; 257 return 0; 258 } 259 260 static void sio_write_complete(struct kiocb *iocb, long ret) 261 { 262 struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb); 263 struct page *page = sio->bvec[0].bv_page; 264 int p; 265 266 if (ret != sio->len) { 267 /* 268 * In the case of swap-over-nfs, this can be a 269 * temporary failure if the system has limited 270 * memory for allocating transmit buffers. 271 * Mark the page dirty and avoid 272 * folio_rotate_reclaimable but rate-limit the 273 * messages but do not flag PageError like 274 * the normal direct-to-bio case as it could 275 * be temporary. 276 */ 277 pr_err_ratelimited("Write error %ld on dio swapfile (%llu)\n", 278 ret, page_file_offset(page)); 279 for (p = 0; p < sio->pages; p++) { 280 page = sio->bvec[p].bv_page; 281 set_page_dirty(page); 282 ClearPageReclaim(page); 283 } 284 } else { 285 for (p = 0; p < sio->pages; p++) 286 count_swpout_vm_event(sio->bvec[p].bv_page); 287 } 288 289 for (p = 0; p < sio->pages; p++) 290 end_page_writeback(sio->bvec[p].bv_page); 291 292 mempool_free(sio, sio_pool); 293 } 294 295 static void swap_writepage_fs(struct page *page, struct writeback_control *wbc) 296 { 297 struct swap_iocb *sio = NULL; 298 struct swap_info_struct *sis = page_swap_info(page); 299 struct file *swap_file = sis->swap_file; 300 loff_t pos = page_file_offset(page); 301 302 set_page_writeback(page); 303 unlock_page(page); 304 if (wbc->swap_plug) 305 sio = *wbc->swap_plug; 306 if (sio) { 307 if (sio->iocb.ki_filp != swap_file || 308 sio->iocb.ki_pos + sio->len != pos) { 309 swap_write_unplug(sio); 310 sio = NULL; 311 } 312 } 313 if (!sio) { 314 sio = mempool_alloc(sio_pool, GFP_NOIO); 315 init_sync_kiocb(&sio->iocb, swap_file); 316 sio->iocb.ki_complete = sio_write_complete; 317 sio->iocb.ki_pos = pos; 318 sio->pages = 0; 319 sio->len = 0; 320 } 321 sio->bvec[sio->pages].bv_page = page; 322 sio->bvec[sio->pages].bv_len = thp_size(page); 323 sio->bvec[sio->pages].bv_offset = 0; 324 sio->len += thp_size(page); 325 sio->pages += 1; 326 if (sio->pages == ARRAY_SIZE(sio->bvec) || !wbc->swap_plug) { 327 swap_write_unplug(sio); 328 sio = NULL; 329 } 330 if (wbc->swap_plug) 331 *wbc->swap_plug = sio; 332 } 333 334 static void swap_writepage_bdev_sync(struct page *page, 335 struct writeback_control *wbc, struct swap_info_struct *sis) 336 { 337 struct bio_vec bv; 338 struct bio bio; 339 340 bio_init(&bio, sis->bdev, &bv, 1, 341 REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc)); 342 bio.bi_iter.bi_sector = swap_page_sector(page); 343 bio_add_page(&bio, page, thp_size(page), 0); 344 345 bio_associate_blkg_from_page(&bio, page); 346 count_swpout_vm_event(page); 347 348 set_page_writeback(page); 349 unlock_page(page); 350 351 submit_bio_wait(&bio); 352 __end_swap_bio_write(&bio); 353 } 354 355 static void swap_writepage_bdev_async(struct page *page, 356 struct writeback_control *wbc, struct swap_info_struct *sis) 357 { 358 struct bio *bio; 359 360 bio = bio_alloc(sis->bdev, 1, 361 REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc), 362 GFP_NOIO); 363 bio->bi_iter.bi_sector = swap_page_sector(page); 364 bio->bi_end_io = end_swap_bio_write; 365 bio_add_page(bio, page, thp_size(page), 0); 366 367 bio_associate_blkg_from_page(bio, page); 368 count_swpout_vm_event(page); 369 set_page_writeback(page); 370 unlock_page(page); 371 submit_bio(bio); 372 } 373 374 void __swap_writepage(struct page *page, struct writeback_control *wbc) 375 { 376 struct swap_info_struct *sis = page_swap_info(page); 377 378 VM_BUG_ON_PAGE(!PageSwapCache(page), page); 379 /* 380 * ->flags can be updated non-atomicially (scan_swap_map_slots), 381 * but that will never affect SWP_FS_OPS, so the data_race 382 * is safe. 383 */ 384 if (data_race(sis->flags & SWP_FS_OPS)) 385 swap_writepage_fs(page, wbc); 386 else if (sis->flags & SWP_SYNCHRONOUS_IO) 387 swap_writepage_bdev_sync(page, wbc, sis); 388 else 389 swap_writepage_bdev_async(page, wbc, sis); 390 } 391 392 void swap_write_unplug(struct swap_iocb *sio) 393 { 394 struct iov_iter from; 395 struct address_space *mapping = sio->iocb.ki_filp->f_mapping; 396 int ret; 397 398 iov_iter_bvec(&from, ITER_SOURCE, sio->bvec, sio->pages, sio->len); 399 ret = mapping->a_ops->swap_rw(&sio->iocb, &from); 400 if (ret != -EIOCBQUEUED) 401 sio_write_complete(&sio->iocb, ret); 402 } 403 404 static void sio_read_complete(struct kiocb *iocb, long ret) 405 { 406 struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb); 407 int p; 408 409 if (ret == sio->len) { 410 for (p = 0; p < sio->pages; p++) { 411 struct page *page = sio->bvec[p].bv_page; 412 413 SetPageUptodate(page); 414 unlock_page(page); 415 } 416 count_vm_events(PSWPIN, sio->pages); 417 } else { 418 for (p = 0; p < sio->pages; p++) { 419 struct page *page = sio->bvec[p].bv_page; 420 421 SetPageError(page); 422 ClearPageUptodate(page); 423 unlock_page(page); 424 } 425 pr_alert_ratelimited("Read-error on swap-device\n"); 426 } 427 mempool_free(sio, sio_pool); 428 } 429 430 static void swap_readpage_fs(struct page *page, 431 struct swap_iocb **plug) 432 { 433 struct swap_info_struct *sis = page_swap_info(page); 434 struct swap_iocb *sio = NULL; 435 loff_t pos = page_file_offset(page); 436 437 if (plug) 438 sio = *plug; 439 if (sio) { 440 if (sio->iocb.ki_filp != sis->swap_file || 441 sio->iocb.ki_pos + sio->len != pos) { 442 swap_read_unplug(sio); 443 sio = NULL; 444 } 445 } 446 if (!sio) { 447 sio = mempool_alloc(sio_pool, GFP_KERNEL); 448 init_sync_kiocb(&sio->iocb, sis->swap_file); 449 sio->iocb.ki_pos = pos; 450 sio->iocb.ki_complete = sio_read_complete; 451 sio->pages = 0; 452 sio->len = 0; 453 } 454 sio->bvec[sio->pages].bv_page = page; 455 sio->bvec[sio->pages].bv_len = thp_size(page); 456 sio->bvec[sio->pages].bv_offset = 0; 457 sio->len += thp_size(page); 458 sio->pages += 1; 459 if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) { 460 swap_read_unplug(sio); 461 sio = NULL; 462 } 463 if (plug) 464 *plug = sio; 465 } 466 467 static void swap_readpage_bdev_sync(struct page *page, 468 struct swap_info_struct *sis) 469 { 470 struct bio_vec bv; 471 struct bio bio; 472 473 bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_READ); 474 bio.bi_iter.bi_sector = swap_page_sector(page); 475 bio_add_page(&bio, page, thp_size(page), 0); 476 /* 477 * Keep this task valid during swap readpage because the oom killer may 478 * attempt to access it in the page fault retry time check. 479 */ 480 get_task_struct(current); 481 count_vm_event(PSWPIN); 482 submit_bio_wait(&bio); 483 __end_swap_bio_read(&bio); 484 put_task_struct(current); 485 } 486 487 static void swap_readpage_bdev_async(struct page *page, 488 struct swap_info_struct *sis) 489 { 490 struct bio *bio; 491 492 bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL); 493 bio->bi_iter.bi_sector = swap_page_sector(page); 494 bio->bi_end_io = end_swap_bio_read; 495 bio_add_page(bio, page, thp_size(page), 0); 496 count_vm_event(PSWPIN); 497 submit_bio(bio); 498 } 499 500 void swap_readpage(struct page *page, bool synchronous, struct swap_iocb **plug) 501 { 502 struct swap_info_struct *sis = page_swap_info(page); 503 bool workingset = PageWorkingset(page); 504 unsigned long pflags; 505 bool in_thrashing; 506 507 VM_BUG_ON_PAGE(!PageSwapCache(page) && !synchronous, page); 508 VM_BUG_ON_PAGE(!PageLocked(page), page); 509 VM_BUG_ON_PAGE(PageUptodate(page), page); 510 511 /* 512 * Count submission time as memory stall and delay. When the device 513 * is congested, or the submitting cgroup IO-throttled, submission 514 * can be a significant part of overall IO time. 515 */ 516 if (workingset) { 517 delayacct_thrashing_start(&in_thrashing); 518 psi_memstall_enter(&pflags); 519 } 520 delayacct_swapin_start(); 521 522 if (frontswap_load(page) == 0) { 523 SetPageUptodate(page); 524 unlock_page(page); 525 } else if (data_race(sis->flags & SWP_FS_OPS)) { 526 swap_readpage_fs(page, plug); 527 } else if (synchronous || (sis->flags & SWP_SYNCHRONOUS_IO)) { 528 swap_readpage_bdev_sync(page, sis); 529 } else { 530 swap_readpage_bdev_async(page, sis); 531 } 532 533 if (workingset) { 534 delayacct_thrashing_end(&in_thrashing); 535 psi_memstall_leave(&pflags); 536 } 537 delayacct_swapin_end(); 538 } 539 540 void __swap_read_unplug(struct swap_iocb *sio) 541 { 542 struct iov_iter from; 543 struct address_space *mapping = sio->iocb.ki_filp->f_mapping; 544 int ret; 545 546 iov_iter_bvec(&from, ITER_DEST, sio->bvec, sio->pages, sio->len); 547 ret = mapping->a_ops->swap_rw(&sio->iocb, &from); 548 if (ret != -EIOCBQUEUED) 549 sio_read_complete(&sio->iocb, ret); 550 } 551