1 /* 2 * mm/readahead.c - address_space-level file readahead. 3 * 4 * Copyright (C) 2002, Linus Torvalds 5 * 6 * 09Apr2002 Andrew Morton 7 * Initial version. 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/gfp.h> 12 #include <linux/export.h> 13 #include <linux/blkdev.h> 14 #include <linux/backing-dev.h> 15 #include <linux/task_io_accounting_ops.h> 16 #include <linux/pagevec.h> 17 #include <linux/pagemap.h> 18 #include <linux/syscalls.h> 19 #include <linux/file.h> 20 21 #include "internal.h" 22 23 /* 24 * Initialise a struct file's readahead state. Assumes that the caller has 25 * memset *ra to zero. 26 */ 27 void 28 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) 29 { 30 ra->ra_pages = mapping->backing_dev_info->ra_pages; 31 ra->prev_pos = -1; 32 } 33 EXPORT_SYMBOL_GPL(file_ra_state_init); 34 35 #define list_to_page(head) (list_entry((head)->prev, struct page, lru)) 36 37 /* 38 * see if a page needs releasing upon read_cache_pages() failure 39 * - the caller of read_cache_pages() may have set PG_private or PG_fscache 40 * before calling, such as the NFS fs marking pages that are cached locally 41 * on disk, thus we need to give the fs a chance to clean up in the event of 42 * an error 43 */ 44 static void read_cache_pages_invalidate_page(struct address_space *mapping, 45 struct page *page) 46 { 47 if (page_has_private(page)) { 48 if (!trylock_page(page)) 49 BUG(); 50 page->mapping = mapping; 51 do_invalidatepage(page, 0, PAGE_CACHE_SIZE); 52 page->mapping = NULL; 53 unlock_page(page); 54 } 55 page_cache_release(page); 56 } 57 58 /* 59 * release a list of pages, invalidating them first if need be 60 */ 61 static void read_cache_pages_invalidate_pages(struct address_space *mapping, 62 struct list_head *pages) 63 { 64 struct page *victim; 65 66 while (!list_empty(pages)) { 67 victim = list_to_page(pages); 68 list_del(&victim->lru); 69 read_cache_pages_invalidate_page(mapping, victim); 70 } 71 } 72 73 /** 74 * read_cache_pages - populate an address space with some pages & start reads against them 75 * @mapping: the address_space 76 * @pages: The address of a list_head which contains the target pages. These 77 * pages have their ->index populated and are otherwise uninitialised. 78 * @filler: callback routine for filling a single page. 79 * @data: private data for the callback routine. 80 * 81 * Hides the details of the LRU cache etc from the filesystems. 82 */ 83 int read_cache_pages(struct address_space *mapping, struct list_head *pages, 84 int (*filler)(void *, struct page *), void *data) 85 { 86 struct page *page; 87 int ret = 0; 88 89 while (!list_empty(pages)) { 90 page = list_to_page(pages); 91 list_del(&page->lru); 92 if (add_to_page_cache_lru(page, mapping, 93 page->index, GFP_KERNEL)) { 94 read_cache_pages_invalidate_page(mapping, page); 95 continue; 96 } 97 page_cache_release(page); 98 99 ret = filler(data, page); 100 if (unlikely(ret)) { 101 read_cache_pages_invalidate_pages(mapping, pages); 102 break; 103 } 104 task_io_account_read(PAGE_CACHE_SIZE); 105 } 106 return ret; 107 } 108 109 EXPORT_SYMBOL(read_cache_pages); 110 111 static int read_pages(struct address_space *mapping, struct file *filp, 112 struct list_head *pages, unsigned nr_pages) 113 { 114 struct blk_plug plug; 115 unsigned page_idx; 116 int ret; 117 118 blk_start_plug(&plug); 119 120 if (mapping->a_ops->readpages) { 121 ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages); 122 /* Clean up the remaining pages */ 123 put_pages_list(pages); 124 goto out; 125 } 126 127 for (page_idx = 0; page_idx < nr_pages; page_idx++) { 128 struct page *page = list_to_page(pages); 129 list_del(&page->lru); 130 if (!add_to_page_cache_lru(page, mapping, 131 page->index, GFP_KERNEL)) { 132 mapping->a_ops->readpage(filp, page); 133 } 134 page_cache_release(page); 135 } 136 ret = 0; 137 138 out: 139 blk_finish_plug(&plug); 140 141 return ret; 142 } 143 144 /* 145 * __do_page_cache_readahead() actually reads a chunk of disk. It allocates all 146 * the pages first, then submits them all for I/O. This avoids the very bad 147 * behaviour which would occur if page allocations are causing VM writeback. 148 * We really don't want to intermingle reads and writes like that. 149 * 150 * Returns the number of pages requested, or the maximum amount of I/O allowed. 151 */ 152 int __do_page_cache_readahead(struct address_space *mapping, struct file *filp, 153 pgoff_t offset, unsigned long nr_to_read, 154 unsigned long lookahead_size) 155 { 156 struct inode *inode = mapping->host; 157 struct page *page; 158 unsigned long end_index; /* The last page we want to read */ 159 LIST_HEAD(page_pool); 160 int page_idx; 161 int ret = 0; 162 loff_t isize = i_size_read(inode); 163 164 if (isize == 0) 165 goto out; 166 167 end_index = ((isize - 1) >> PAGE_CACHE_SHIFT); 168 169 /* 170 * Preallocate as many pages as we will need. 171 */ 172 for (page_idx = 0; page_idx < nr_to_read; page_idx++) { 173 pgoff_t page_offset = offset + page_idx; 174 175 if (page_offset > end_index) 176 break; 177 178 rcu_read_lock(); 179 page = radix_tree_lookup(&mapping->page_tree, page_offset); 180 rcu_read_unlock(); 181 if (page && !radix_tree_exceptional_entry(page)) 182 continue; 183 184 page = page_cache_alloc_readahead(mapping); 185 if (!page) 186 break; 187 page->index = page_offset; 188 list_add(&page->lru, &page_pool); 189 if (page_idx == nr_to_read - lookahead_size) 190 SetPageReadahead(page); 191 ret++; 192 } 193 194 /* 195 * Now start the IO. We ignore I/O errors - if the page is not 196 * uptodate then the caller will launch readpage again, and 197 * will then handle the error. 198 */ 199 if (ret) 200 read_pages(mapping, filp, &page_pool, ret); 201 BUG_ON(!list_empty(&page_pool)); 202 out: 203 return ret; 204 } 205 206 /* 207 * Chunk the readahead into 2 megabyte units, so that we don't pin too much 208 * memory at once. 209 */ 210 int force_page_cache_readahead(struct address_space *mapping, struct file *filp, 211 pgoff_t offset, unsigned long nr_to_read) 212 { 213 if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages)) 214 return -EINVAL; 215 216 nr_to_read = max_sane_readahead(nr_to_read); 217 while (nr_to_read) { 218 int err; 219 220 unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE; 221 222 if (this_chunk > nr_to_read) 223 this_chunk = nr_to_read; 224 err = __do_page_cache_readahead(mapping, filp, 225 offset, this_chunk, 0); 226 if (err < 0) 227 return err; 228 229 offset += this_chunk; 230 nr_to_read -= this_chunk; 231 } 232 return 0; 233 } 234 235 #define MAX_READAHEAD ((512*4096)/PAGE_CACHE_SIZE) 236 /* 237 * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a 238 * sensible upper limit. 239 */ 240 unsigned long max_sane_readahead(unsigned long nr) 241 { 242 return min(nr, MAX_READAHEAD); 243 } 244 245 /* 246 * Set the initial window size, round to next power of 2 and square 247 * for small size, x 4 for medium, and x 2 for large 248 * for 128k (32 page) max ra 249 * 1-8 page = 32k initial, > 8 page = 128k initial 250 */ 251 static unsigned long get_init_ra_size(unsigned long size, unsigned long max) 252 { 253 unsigned long newsize = roundup_pow_of_two(size); 254 255 if (newsize <= max / 32) 256 newsize = newsize * 4; 257 else if (newsize <= max / 4) 258 newsize = newsize * 2; 259 else 260 newsize = max; 261 262 return newsize; 263 } 264 265 /* 266 * Get the previous window size, ramp it up, and 267 * return it as the new window size. 268 */ 269 static unsigned long get_next_ra_size(struct file_ra_state *ra, 270 unsigned long max) 271 { 272 unsigned long cur = ra->size; 273 unsigned long newsize; 274 275 if (cur < max / 16) 276 newsize = 4 * cur; 277 else 278 newsize = 2 * cur; 279 280 return min(newsize, max); 281 } 282 283 /* 284 * On-demand readahead design. 285 * 286 * The fields in struct file_ra_state represent the most-recently-executed 287 * readahead attempt: 288 * 289 * |<----- async_size ---------| 290 * |------------------- size -------------------->| 291 * |==================#===========================| 292 * ^start ^page marked with PG_readahead 293 * 294 * To overlap application thinking time and disk I/O time, we do 295 * `readahead pipelining': Do not wait until the application consumed all 296 * readahead pages and stalled on the missing page at readahead_index; 297 * Instead, submit an asynchronous readahead I/O as soon as there are 298 * only async_size pages left in the readahead window. Normally async_size 299 * will be equal to size, for maximum pipelining. 300 * 301 * In interleaved sequential reads, concurrent streams on the same fd can 302 * be invalidating each other's readahead state. So we flag the new readahead 303 * page at (start+size-async_size) with PG_readahead, and use it as readahead 304 * indicator. The flag won't be set on already cached pages, to avoid the 305 * readahead-for-nothing fuss, saving pointless page cache lookups. 306 * 307 * prev_pos tracks the last visited byte in the _previous_ read request. 308 * It should be maintained by the caller, and will be used for detecting 309 * small random reads. Note that the readahead algorithm checks loosely 310 * for sequential patterns. Hence interleaved reads might be served as 311 * sequential ones. 312 * 313 * There is a special-case: if the first page which the application tries to 314 * read happens to be the first page of the file, it is assumed that a linear 315 * read is about to happen and the window is immediately set to the initial size 316 * based on I/O request size and the max_readahead. 317 * 318 * The code ramps up the readahead size aggressively at first, but slow down as 319 * it approaches max_readhead. 320 */ 321 322 /* 323 * Count contiguously cached pages from @offset-1 to @offset-@max, 324 * this count is a conservative estimation of 325 * - length of the sequential read sequence, or 326 * - thrashing threshold in memory tight systems 327 */ 328 static pgoff_t count_history_pages(struct address_space *mapping, 329 pgoff_t offset, unsigned long max) 330 { 331 pgoff_t head; 332 333 rcu_read_lock(); 334 head = page_cache_prev_hole(mapping, offset - 1, max); 335 rcu_read_unlock(); 336 337 return offset - 1 - head; 338 } 339 340 /* 341 * page cache context based read-ahead 342 */ 343 static int try_context_readahead(struct address_space *mapping, 344 struct file_ra_state *ra, 345 pgoff_t offset, 346 unsigned long req_size, 347 unsigned long max) 348 { 349 pgoff_t size; 350 351 size = count_history_pages(mapping, offset, max); 352 353 /* 354 * not enough history pages: 355 * it could be a random read 356 */ 357 if (size <= req_size) 358 return 0; 359 360 /* 361 * starts from beginning of file: 362 * it is a strong indication of long-run stream (or whole-file-read) 363 */ 364 if (size >= offset) 365 size *= 2; 366 367 ra->start = offset; 368 ra->size = min(size + req_size, max); 369 ra->async_size = 1; 370 371 return 1; 372 } 373 374 /* 375 * A minimal readahead algorithm for trivial sequential/random reads. 376 */ 377 static unsigned long 378 ondemand_readahead(struct address_space *mapping, 379 struct file_ra_state *ra, struct file *filp, 380 bool hit_readahead_marker, pgoff_t offset, 381 unsigned long req_size) 382 { 383 unsigned long max = max_sane_readahead(ra->ra_pages); 384 pgoff_t prev_offset; 385 386 /* 387 * start of file 388 */ 389 if (!offset) 390 goto initial_readahead; 391 392 /* 393 * It's the expected callback offset, assume sequential access. 394 * Ramp up sizes, and push forward the readahead window. 395 */ 396 if ((offset == (ra->start + ra->size - ra->async_size) || 397 offset == (ra->start + ra->size))) { 398 ra->start += ra->size; 399 ra->size = get_next_ra_size(ra, max); 400 ra->async_size = ra->size; 401 goto readit; 402 } 403 404 /* 405 * Hit a marked page without valid readahead state. 406 * E.g. interleaved reads. 407 * Query the pagecache for async_size, which normally equals to 408 * readahead size. Ramp it up and use it as the new readahead size. 409 */ 410 if (hit_readahead_marker) { 411 pgoff_t start; 412 413 rcu_read_lock(); 414 start = page_cache_next_hole(mapping, offset + 1, max); 415 rcu_read_unlock(); 416 417 if (!start || start - offset > max) 418 return 0; 419 420 ra->start = start; 421 ra->size = start - offset; /* old async_size */ 422 ra->size += req_size; 423 ra->size = get_next_ra_size(ra, max); 424 ra->async_size = ra->size; 425 goto readit; 426 } 427 428 /* 429 * oversize read 430 */ 431 if (req_size > max) 432 goto initial_readahead; 433 434 /* 435 * sequential cache miss 436 * trivial case: (offset - prev_offset) == 1 437 * unaligned reads: (offset - prev_offset) == 0 438 */ 439 prev_offset = (unsigned long long)ra->prev_pos >> PAGE_CACHE_SHIFT; 440 if (offset - prev_offset <= 1UL) 441 goto initial_readahead; 442 443 /* 444 * Query the page cache and look for the traces(cached history pages) 445 * that a sequential stream would leave behind. 446 */ 447 if (try_context_readahead(mapping, ra, offset, req_size, max)) 448 goto readit; 449 450 /* 451 * standalone, small random read 452 * Read as is, and do not pollute the readahead state. 453 */ 454 return __do_page_cache_readahead(mapping, filp, offset, req_size, 0); 455 456 initial_readahead: 457 ra->start = offset; 458 ra->size = get_init_ra_size(req_size, max); 459 ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size; 460 461 readit: 462 /* 463 * Will this read hit the readahead marker made by itself? 464 * If so, trigger the readahead marker hit now, and merge 465 * the resulted next readahead window into the current one. 466 */ 467 if (offset == ra->start && ra->size == ra->async_size) { 468 ra->async_size = get_next_ra_size(ra, max); 469 ra->size += ra->async_size; 470 } 471 472 return ra_submit(ra, mapping, filp); 473 } 474 475 /** 476 * page_cache_sync_readahead - generic file readahead 477 * @mapping: address_space which holds the pagecache and I/O vectors 478 * @ra: file_ra_state which holds the readahead state 479 * @filp: passed on to ->readpage() and ->readpages() 480 * @offset: start offset into @mapping, in pagecache page-sized units 481 * @req_size: hint: total size of the read which the caller is performing in 482 * pagecache pages 483 * 484 * page_cache_sync_readahead() should be called when a cache miss happened: 485 * it will submit the read. The readahead logic may decide to piggyback more 486 * pages onto the read request if access patterns suggest it will improve 487 * performance. 488 */ 489 void page_cache_sync_readahead(struct address_space *mapping, 490 struct file_ra_state *ra, struct file *filp, 491 pgoff_t offset, unsigned long req_size) 492 { 493 /* no read-ahead */ 494 if (!ra->ra_pages) 495 return; 496 497 /* be dumb */ 498 if (filp && (filp->f_mode & FMODE_RANDOM)) { 499 force_page_cache_readahead(mapping, filp, offset, req_size); 500 return; 501 } 502 503 /* do read-ahead */ 504 ondemand_readahead(mapping, ra, filp, false, offset, req_size); 505 } 506 EXPORT_SYMBOL_GPL(page_cache_sync_readahead); 507 508 /** 509 * page_cache_async_readahead - file readahead for marked pages 510 * @mapping: address_space which holds the pagecache and I/O vectors 511 * @ra: file_ra_state which holds the readahead state 512 * @filp: passed on to ->readpage() and ->readpages() 513 * @page: the page at @offset which has the PG_readahead flag set 514 * @offset: start offset into @mapping, in pagecache page-sized units 515 * @req_size: hint: total size of the read which the caller is performing in 516 * pagecache pages 517 * 518 * page_cache_async_readahead() should be called when a page is used which 519 * has the PG_readahead flag; this is a marker to suggest that the application 520 * has used up enough of the readahead window that we should start pulling in 521 * more pages. 522 */ 523 void 524 page_cache_async_readahead(struct address_space *mapping, 525 struct file_ra_state *ra, struct file *filp, 526 struct page *page, pgoff_t offset, 527 unsigned long req_size) 528 { 529 /* no read-ahead */ 530 if (!ra->ra_pages) 531 return; 532 533 /* 534 * Same bit is used for PG_readahead and PG_reclaim. 535 */ 536 if (PageWriteback(page)) 537 return; 538 539 ClearPageReadahead(page); 540 541 /* 542 * Defer asynchronous read-ahead on IO congestion. 543 */ 544 if (bdi_read_congested(mapping->backing_dev_info)) 545 return; 546 547 /* do read-ahead */ 548 ondemand_readahead(mapping, ra, filp, true, offset, req_size); 549 } 550 EXPORT_SYMBOL_GPL(page_cache_async_readahead); 551 552 static ssize_t 553 do_readahead(struct address_space *mapping, struct file *filp, 554 pgoff_t index, unsigned long nr) 555 { 556 if (!mapping || !mapping->a_ops) 557 return -EINVAL; 558 559 return force_page_cache_readahead(mapping, filp, index, nr); 560 } 561 562 SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count) 563 { 564 ssize_t ret; 565 struct fd f; 566 567 ret = -EBADF; 568 f = fdget(fd); 569 if (f.file) { 570 if (f.file->f_mode & FMODE_READ) { 571 struct address_space *mapping = f.file->f_mapping; 572 pgoff_t start = offset >> PAGE_CACHE_SHIFT; 573 pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT; 574 unsigned long len = end - start + 1; 575 ret = do_readahead(mapping, f.file, start, len); 576 } 577 fdput(f); 578 } 579 return ret; 580 } 581