xref: /openbmc/linux/mm/readahead.c (revision 6ab3d562)
1 /*
2  * mm/readahead.c - address_space-level file readahead.
3  *
4  * Copyright (C) 2002, Linus Torvalds
5  *
6  * 09Apr2002	akpm@zip.com.au
7  *		Initial version.
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/fs.h>
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <linux/blkdev.h>
15 #include <linux/backing-dev.h>
16 #include <linux/pagevec.h>
17 
18 void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
19 {
20 }
21 EXPORT_SYMBOL(default_unplug_io_fn);
22 
23 struct backing_dev_info default_backing_dev_info = {
24 	.ra_pages	= (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE,
25 	.state		= 0,
26 	.capabilities	= BDI_CAP_MAP_COPY,
27 	.unplug_io_fn	= default_unplug_io_fn,
28 };
29 EXPORT_SYMBOL_GPL(default_backing_dev_info);
30 
31 /*
32  * Initialise a struct file's readahead state.  Assumes that the caller has
33  * memset *ra to zero.
34  */
35 void
36 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
37 {
38 	ra->ra_pages = mapping->backing_dev_info->ra_pages;
39 	ra->prev_page = -1;
40 }
41 
42 /*
43  * Return max readahead size for this inode in number-of-pages.
44  */
45 static inline unsigned long get_max_readahead(struct file_ra_state *ra)
46 {
47 	return ra->ra_pages;
48 }
49 
50 static inline unsigned long get_min_readahead(struct file_ra_state *ra)
51 {
52 	return (VM_MIN_READAHEAD * 1024) / PAGE_CACHE_SIZE;
53 }
54 
55 static inline void reset_ahead_window(struct file_ra_state *ra)
56 {
57 	/*
58 	 * ... but preserve ahead_start + ahead_size value,
59 	 * see 'recheck:' label in page_cache_readahead().
60 	 * Note: We never use ->ahead_size as rvalue without
61 	 * checking ->ahead_start != 0 first.
62 	 */
63 	ra->ahead_size += ra->ahead_start;
64 	ra->ahead_start = 0;
65 }
66 
67 static inline void ra_off(struct file_ra_state *ra)
68 {
69 	ra->start = 0;
70 	ra->flags = 0;
71 	ra->size = 0;
72 	reset_ahead_window(ra);
73 	return;
74 }
75 
76 /*
77  * Set the initial window size, round to next power of 2 and square
78  * for small size, x 4 for medium, and x 2 for large
79  * for 128k (32 page) max ra
80  * 1-8 page = 32k initial, > 8 page = 128k initial
81  */
82 static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
83 {
84 	unsigned long newsize = roundup_pow_of_two(size);
85 
86 	if (newsize <= max / 32)
87 		newsize = newsize * 4;
88 	else if (newsize <= max / 4)
89 		newsize = newsize * 2;
90 	else
91 		newsize = max;
92 	return newsize;
93 }
94 
95 /*
96  * Set the new window size, this is called only when I/O is to be submitted,
97  * not for each call to readahead.  If a cache miss occured, reduce next I/O
98  * size, else increase depending on how close to max we are.
99  */
100 static inline unsigned long get_next_ra_size(struct file_ra_state *ra)
101 {
102 	unsigned long max = get_max_readahead(ra);
103 	unsigned long min = get_min_readahead(ra);
104 	unsigned long cur = ra->size;
105 	unsigned long newsize;
106 
107 	if (ra->flags & RA_FLAG_MISS) {
108 		ra->flags &= ~RA_FLAG_MISS;
109 		newsize = max((cur - 2), min);
110 	} else if (cur < max / 16) {
111 		newsize = 4 * cur;
112 	} else {
113 		newsize = 2 * cur;
114 	}
115 	return min(newsize, max);
116 }
117 
118 #define list_to_page(head) (list_entry((head)->prev, struct page, lru))
119 
120 /**
121  * read_cache_pages - populate an address space with some pages & start reads against them
122  * @mapping: the address_space
123  * @pages: The address of a list_head which contains the target pages.  These
124  *   pages have their ->index populated and are otherwise uninitialised.
125  * @filler: callback routine for filling a single page.
126  * @data: private data for the callback routine.
127  *
128  * Hides the details of the LRU cache etc from the filesystems.
129  */
130 int read_cache_pages(struct address_space *mapping, struct list_head *pages,
131 			int (*filler)(void *, struct page *), void *data)
132 {
133 	struct page *page;
134 	struct pagevec lru_pvec;
135 	int ret = 0;
136 
137 	pagevec_init(&lru_pvec, 0);
138 
139 	while (!list_empty(pages)) {
140 		page = list_to_page(pages);
141 		list_del(&page->lru);
142 		if (add_to_page_cache(page, mapping, page->index, GFP_KERNEL)) {
143 			page_cache_release(page);
144 			continue;
145 		}
146 		ret = filler(data, page);
147 		if (!pagevec_add(&lru_pvec, page))
148 			__pagevec_lru_add(&lru_pvec);
149 		if (ret) {
150 			while (!list_empty(pages)) {
151 				struct page *victim;
152 
153 				victim = list_to_page(pages);
154 				list_del(&victim->lru);
155 				page_cache_release(victim);
156 			}
157 			break;
158 		}
159 	}
160 	pagevec_lru_add(&lru_pvec);
161 	return ret;
162 }
163 
164 EXPORT_SYMBOL(read_cache_pages);
165 
166 static int read_pages(struct address_space *mapping, struct file *filp,
167 		struct list_head *pages, unsigned nr_pages)
168 {
169 	unsigned page_idx;
170 	struct pagevec lru_pvec;
171 	int ret;
172 
173 	if (mapping->a_ops->readpages) {
174 		ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages);
175 		goto out;
176 	}
177 
178 	pagevec_init(&lru_pvec, 0);
179 	for (page_idx = 0; page_idx < nr_pages; page_idx++) {
180 		struct page *page = list_to_page(pages);
181 		list_del(&page->lru);
182 		if (!add_to_page_cache(page, mapping,
183 					page->index, GFP_KERNEL)) {
184 			mapping->a_ops->readpage(filp, page);
185 			if (!pagevec_add(&lru_pvec, page))
186 				__pagevec_lru_add(&lru_pvec);
187 		} else
188 			page_cache_release(page);
189 	}
190 	pagevec_lru_add(&lru_pvec);
191 	ret = 0;
192 out:
193 	return ret;
194 }
195 
196 /*
197  * Readahead design.
198  *
199  * The fields in struct file_ra_state represent the most-recently-executed
200  * readahead attempt:
201  *
202  * start:	Page index at which we started the readahead
203  * size:	Number of pages in that read
204  *              Together, these form the "current window".
205  *              Together, start and size represent the `readahead window'.
206  * prev_page:   The page which the readahead algorithm most-recently inspected.
207  *              It is mainly used to detect sequential file reading.
208  *              If page_cache_readahead sees that it is again being called for
209  *              a page which it just looked at, it can return immediately without
210  *              making any state changes.
211  * ahead_start,
212  * ahead_size:  Together, these form the "ahead window".
213  * ra_pages:	The externally controlled max readahead for this fd.
214  *
215  * When readahead is in the off state (size == 0), readahead is disabled.
216  * In this state, prev_page is used to detect the resumption of sequential I/O.
217  *
218  * The readahead code manages two windows - the "current" and the "ahead"
219  * windows.  The intent is that while the application is walking the pages
220  * in the current window, I/O is underway on the ahead window.  When the
221  * current window is fully traversed, it is replaced by the ahead window
222  * and the ahead window is invalidated.  When this copying happens, the
223  * new current window's pages are probably still locked.  So
224  * we submit a new batch of I/O immediately, creating a new ahead window.
225  *
226  * So:
227  *
228  *   ----|----------------|----------------|-----
229  *       ^start           ^start+size
230  *                        ^ahead_start     ^ahead_start+ahead_size
231  *
232  *         ^ When this page is read, we submit I/O for the
233  *           ahead window.
234  *
235  * A `readahead hit' occurs when a read request is made against a page which is
236  * the next sequential page. Ahead window calculations are done only when it
237  * is time to submit a new IO.  The code ramps up the size agressively at first,
238  * but slow down as it approaches max_readhead.
239  *
240  * Any seek/ramdom IO will result in readahead being turned off.  It will resume
241  * at the first sequential access.
242  *
243  * There is a special-case: if the first page which the application tries to
244  * read happens to be the first page of the file, it is assumed that a linear
245  * read is about to happen and the window is immediately set to the initial size
246  * based on I/O request size and the max_readahead.
247  *
248  * This function is to be called for every read request, rather than when
249  * it is time to perform readahead.  It is called only once for the entire I/O
250  * regardless of size unless readahead is unable to start enough I/O to satisfy
251  * the request (I/O request > max_readahead).
252  */
253 
254 /*
255  * do_page_cache_readahead actually reads a chunk of disk.  It allocates all
256  * the pages first, then submits them all for I/O. This avoids the very bad
257  * behaviour which would occur if page allocations are causing VM writeback.
258  * We really don't want to intermingle reads and writes like that.
259  *
260  * Returns the number of pages requested, or the maximum amount of I/O allowed.
261  *
262  * do_page_cache_readahead() returns -1 if it encountered request queue
263  * congestion.
264  */
265 static int
266 __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
267 			pgoff_t offset, unsigned long nr_to_read)
268 {
269 	struct inode *inode = mapping->host;
270 	struct page *page;
271 	unsigned long end_index;	/* The last page we want to read */
272 	LIST_HEAD(page_pool);
273 	int page_idx;
274 	int ret = 0;
275 	loff_t isize = i_size_read(inode);
276 
277 	if (isize == 0)
278 		goto out;
279 
280  	end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
281 
282 	/*
283 	 * Preallocate as many pages as we will need.
284 	 */
285 	read_lock_irq(&mapping->tree_lock);
286 	for (page_idx = 0; page_idx < nr_to_read; page_idx++) {
287 		pgoff_t page_offset = offset + page_idx;
288 
289 		if (page_offset > end_index)
290 			break;
291 
292 		page = radix_tree_lookup(&mapping->page_tree, page_offset);
293 		if (page)
294 			continue;
295 
296 		read_unlock_irq(&mapping->tree_lock);
297 		page = page_cache_alloc_cold(mapping);
298 		read_lock_irq(&mapping->tree_lock);
299 		if (!page)
300 			break;
301 		page->index = page_offset;
302 		list_add(&page->lru, &page_pool);
303 		ret++;
304 	}
305 	read_unlock_irq(&mapping->tree_lock);
306 
307 	/*
308 	 * Now start the IO.  We ignore I/O errors - if the page is not
309 	 * uptodate then the caller will launch readpage again, and
310 	 * will then handle the error.
311 	 */
312 	if (ret)
313 		read_pages(mapping, filp, &page_pool, ret);
314 	BUG_ON(!list_empty(&page_pool));
315 out:
316 	return ret;
317 }
318 
319 /*
320  * Chunk the readahead into 2 megabyte units, so that we don't pin too much
321  * memory at once.
322  */
323 int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
324 		pgoff_t offset, unsigned long nr_to_read)
325 {
326 	int ret = 0;
327 
328 	if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages))
329 		return -EINVAL;
330 
331 	while (nr_to_read) {
332 		int err;
333 
334 		unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE;
335 
336 		if (this_chunk > nr_to_read)
337 			this_chunk = nr_to_read;
338 		err = __do_page_cache_readahead(mapping, filp,
339 						offset, this_chunk);
340 		if (err < 0) {
341 			ret = err;
342 			break;
343 		}
344 		ret += err;
345 		offset += this_chunk;
346 		nr_to_read -= this_chunk;
347 	}
348 	return ret;
349 }
350 
351 /*
352  * Check how effective readahead is being.  If the amount of started IO is
353  * less than expected then the file is partly or fully in pagecache and
354  * readahead isn't helping.
355  *
356  */
357 static inline int check_ra_success(struct file_ra_state *ra,
358 			unsigned long nr_to_read, unsigned long actual)
359 {
360 	if (actual == 0) {
361 		ra->cache_hit += nr_to_read;
362 		if (ra->cache_hit >= VM_MAX_CACHE_HIT) {
363 			ra_off(ra);
364 			ra->flags |= RA_FLAG_INCACHE;
365 			return 0;
366 		}
367 	} else {
368 		ra->cache_hit=0;
369 	}
370 	return 1;
371 }
372 
373 /*
374  * This version skips the IO if the queue is read-congested, and will tell the
375  * block layer to abandon the readahead if request allocation would block.
376  *
377  * force_page_cache_readahead() will ignore queue congestion and will block on
378  * request queues.
379  */
380 int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
381 			pgoff_t offset, unsigned long nr_to_read)
382 {
383 	if (bdi_read_congested(mapping->backing_dev_info))
384 		return -1;
385 
386 	return __do_page_cache_readahead(mapping, filp, offset, nr_to_read);
387 }
388 
389 /*
390  * Read 'nr_to_read' pages starting at page 'offset'. If the flag 'block'
391  * is set wait till the read completes.  Otherwise attempt to read without
392  * blocking.
393  * Returns 1 meaning 'success' if read is successful without switching off
394  * readahead mode. Otherwise return failure.
395  */
396 static int
397 blockable_page_cache_readahead(struct address_space *mapping, struct file *filp,
398 			pgoff_t offset, unsigned long nr_to_read,
399 			struct file_ra_state *ra, int block)
400 {
401 	int actual;
402 
403 	if (!block && bdi_read_congested(mapping->backing_dev_info))
404 		return 0;
405 
406 	actual = __do_page_cache_readahead(mapping, filp, offset, nr_to_read);
407 
408 	return check_ra_success(ra, nr_to_read, actual);
409 }
410 
411 static int make_ahead_window(struct address_space *mapping, struct file *filp,
412 				struct file_ra_state *ra, int force)
413 {
414 	int block, ret;
415 
416 	ra->ahead_size = get_next_ra_size(ra);
417 	ra->ahead_start = ra->start + ra->size;
418 
419 	block = force || (ra->prev_page >= ra->ahead_start);
420 	ret = blockable_page_cache_readahead(mapping, filp,
421 			ra->ahead_start, ra->ahead_size, ra, block);
422 
423 	if (!ret && !force) {
424 		/* A read failure in blocking mode, implies pages are
425 		 * all cached. So we can safely assume we have taken
426 		 * care of all the pages requested in this call.
427 		 * A read failure in non-blocking mode, implies we are
428 		 * reading more pages than requested in this call.  So
429 		 * we safely assume we have taken care of all the pages
430 		 * requested in this call.
431 		 *
432 		 * Just reset the ahead window in case we failed due to
433 		 * congestion.  The ahead window will any way be closed
434 		 * in case we failed due to excessive page cache hits.
435 		 */
436 		reset_ahead_window(ra);
437 	}
438 
439 	return ret;
440 }
441 
442 /**
443  * page_cache_readahead - generic adaptive readahead
444  * @mapping: address_space which holds the pagecache and I/O vectors
445  * @ra: file_ra_state which holds the readahead state
446  * @filp: passed on to ->readpage() and ->readpages()
447  * @offset: start offset into @mapping, in PAGE_CACHE_SIZE units
448  * @req_size: hint: total size of the read which the caller is performing in
449  *            PAGE_CACHE_SIZE units
450  *
451  * page_cache_readahead() is the main function.  If performs the adaptive
452  * readahead window size management and submits the readahead I/O.
453  *
454  * Note that @filp is purely used for passing on to the ->readpage[s]()
455  * handler: it may refer to a different file from @mapping (so we may not use
456  * @filp->f_mapping or @filp->f_dentry->d_inode here).
457  * Also, @ra may not be equal to &@filp->f_ra.
458  *
459  */
460 unsigned long
461 page_cache_readahead(struct address_space *mapping, struct file_ra_state *ra,
462 		     struct file *filp, pgoff_t offset, unsigned long req_size)
463 {
464 	unsigned long max, newsize;
465 	int sequential;
466 
467 	/*
468 	 * We avoid doing extra work and bogusly perturbing the readahead
469 	 * window expansion logic.
470 	 */
471 	if (offset == ra->prev_page && --req_size)
472 		++offset;
473 
474 	/* Note that prev_page == -1 if it is a first read */
475 	sequential = (offset == ra->prev_page + 1);
476 	ra->prev_page = offset;
477 
478 	max = get_max_readahead(ra);
479 	newsize = min(req_size, max);
480 
481 	/* No readahead or sub-page sized read or file already in cache */
482 	if (newsize == 0 || (ra->flags & RA_FLAG_INCACHE))
483 		goto out;
484 
485 	ra->prev_page += newsize - 1;
486 
487 	/*
488 	 * Special case - first read at start of file. We'll assume it's
489 	 * a whole-file read and grow the window fast.  Or detect first
490 	 * sequential access
491 	 */
492 	if (sequential && ra->size == 0) {
493 		ra->size = get_init_ra_size(newsize, max);
494 		ra->start = offset;
495 		if (!blockable_page_cache_readahead(mapping, filp, offset,
496 							 ra->size, ra, 1))
497 			goto out;
498 
499 		/*
500 		 * If the request size is larger than our max readahead, we
501 		 * at least want to be sure that we get 2 IOs in flight and
502 		 * we know that we will definitly need the new I/O.
503 		 * once we do this, subsequent calls should be able to overlap
504 		 * IOs,* thus preventing stalls. so issue the ahead window
505 		 * immediately.
506 		 */
507 		if (req_size >= max)
508 			make_ahead_window(mapping, filp, ra, 1);
509 
510 		goto out;
511 	}
512 
513 	/*
514 	 * Now handle the random case:
515 	 * partial page reads and first access were handled above,
516 	 * so this must be the next page otherwise it is random
517 	 */
518 	if (!sequential) {
519 		ra_off(ra);
520 		blockable_page_cache_readahead(mapping, filp, offset,
521 				 newsize, ra, 1);
522 		goto out;
523 	}
524 
525 	/*
526 	 * If we get here we are doing sequential IO and this was not the first
527 	 * occurence (ie we have an existing window)
528 	 */
529 	if (ra->ahead_start == 0) {	 /* no ahead window yet */
530 		if (!make_ahead_window(mapping, filp, ra, 0))
531 			goto recheck;
532 	}
533 
534 	/*
535 	 * Already have an ahead window, check if we crossed into it.
536 	 * If so, shift windows and issue a new ahead window.
537 	 * Only return the #pages that are in the current window, so that
538 	 * we get called back on the first page of the ahead window which
539 	 * will allow us to submit more IO.
540 	 */
541 	if (ra->prev_page >= ra->ahead_start) {
542 		ra->start = ra->ahead_start;
543 		ra->size = ra->ahead_size;
544 		make_ahead_window(mapping, filp, ra, 0);
545 recheck:
546 		/* prev_page shouldn't overrun the ahead window */
547 		ra->prev_page = min(ra->prev_page,
548 			ra->ahead_start + ra->ahead_size - 1);
549 	}
550 
551 out:
552 	return ra->prev_page + 1;
553 }
554 EXPORT_SYMBOL_GPL(page_cache_readahead);
555 
556 /*
557  * handle_ra_miss() is called when it is known that a page which should have
558  * been present in the pagecache (we just did some readahead there) was in fact
559  * not found.  This will happen if it was evicted by the VM (readahead
560  * thrashing)
561  *
562  * Turn on the cache miss flag in the RA struct, this will cause the RA code
563  * to reduce the RA size on the next read.
564  */
565 void handle_ra_miss(struct address_space *mapping,
566 		struct file_ra_state *ra, pgoff_t offset)
567 {
568 	ra->flags |= RA_FLAG_MISS;
569 	ra->flags &= ~RA_FLAG_INCACHE;
570 	ra->cache_hit = 0;
571 }
572 
573 /*
574  * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
575  * sensible upper limit.
576  */
577 unsigned long max_sane_readahead(unsigned long nr)
578 {
579 	unsigned long active;
580 	unsigned long inactive;
581 	unsigned long free;
582 
583 	__get_zone_counts(&active, &inactive, &free, NODE_DATA(numa_node_id()));
584 	return min(nr, (inactive + free) / 2);
585 }
586