xref: /openbmc/linux/mm/filemap.c (revision 1b36955c)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   *	linux/mm/filemap.c
4   *
5   * Copyright (C) 1994-1999  Linus Torvalds
6   */
7  
8  /*
9   * This file handles the generic file mmap semantics used by
10   * most "normal" filesystems (but you don't /have/ to use this:
11   * the NFS filesystem used to do this differently, for example)
12   */
13  #include <linux/export.h>
14  #include <linux/compiler.h>
15  #include <linux/dax.h>
16  #include <linux/fs.h>
17  #include <linux/sched/signal.h>
18  #include <linux/uaccess.h>
19  #include <linux/capability.h>
20  #include <linux/kernel_stat.h>
21  #include <linux/gfp.h>
22  #include <linux/mm.h>
23  #include <linux/swap.h>
24  #include <linux/swapops.h>
25  #include <linux/syscalls.h>
26  #include <linux/mman.h>
27  #include <linux/pagemap.h>
28  #include <linux/file.h>
29  #include <linux/uio.h>
30  #include <linux/error-injection.h>
31  #include <linux/hash.h>
32  #include <linux/writeback.h>
33  #include <linux/backing-dev.h>
34  #include <linux/pagevec.h>
35  #include <linux/security.h>
36  #include <linux/cpuset.h>
37  #include <linux/hugetlb.h>
38  #include <linux/memcontrol.h>
39  #include <linux/shmem_fs.h>
40  #include <linux/rmap.h>
41  #include <linux/delayacct.h>
42  #include <linux/psi.h>
43  #include <linux/ramfs.h>
44  #include <linux/page_idle.h>
45  #include <linux/migrate.h>
46  #include <linux/pipe_fs_i.h>
47  #include <linux/splice.h>
48  #include <asm/pgalloc.h>
49  #include <asm/tlbflush.h>
50  #include "internal.h"
51  
52  #define CREATE_TRACE_POINTS
53  #include <trace/events/filemap.h>
54  
55  /*
56   * FIXME: remove all knowledge of the buffer layer from the core VM
57   */
58  #include <linux/buffer_head.h> /* for try_to_free_buffers */
59  
60  #include <asm/mman.h>
61  
62  #include "swap.h"
63  
64  /*
65   * Shared mappings implemented 30.11.1994. It's not fully working yet,
66   * though.
67   *
68   * Shared mappings now work. 15.8.1995  Bruno.
69   *
70   * finished 'unifying' the page and buffer cache and SMP-threaded the
71   * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
72   *
73   * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
74   */
75  
76  /*
77   * Lock ordering:
78   *
79   *  ->i_mmap_rwsem		(truncate_pagecache)
80   *    ->private_lock		(__free_pte->block_dirty_folio)
81   *      ->swap_lock		(exclusive_swap_page, others)
82   *        ->i_pages lock
83   *
84   *  ->i_rwsem
85   *    ->invalidate_lock		(acquired by fs in truncate path)
86   *      ->i_mmap_rwsem		(truncate->unmap_mapping_range)
87   *
88   *  ->mmap_lock
89   *    ->i_mmap_rwsem
90   *      ->page_table_lock or pte_lock	(various, mainly in memory.c)
91   *        ->i_pages lock	(arch-dependent flush_dcache_mmap_lock)
92   *
93   *  ->mmap_lock
94   *    ->invalidate_lock		(filemap_fault)
95   *      ->lock_page		(filemap_fault, access_process_vm)
96   *
97   *  ->i_rwsem			(generic_perform_write)
98   *    ->mmap_lock		(fault_in_readable->do_page_fault)
99   *
100   *  bdi->wb.list_lock
101   *    sb_lock			(fs/fs-writeback.c)
102   *    ->i_pages lock		(__sync_single_inode)
103   *
104   *  ->i_mmap_rwsem
105   *    ->anon_vma.lock		(vma_merge)
106   *
107   *  ->anon_vma.lock
108   *    ->page_table_lock or pte_lock	(anon_vma_prepare and various)
109   *
110   *  ->page_table_lock or pte_lock
111   *    ->swap_lock		(try_to_unmap_one)
112   *    ->private_lock		(try_to_unmap_one)
113   *    ->i_pages lock		(try_to_unmap_one)
114   *    ->lruvec->lru_lock	(follow_page->mark_page_accessed)
115   *    ->lruvec->lru_lock	(check_pte_range->isolate_lru_page)
116   *    ->private_lock		(page_remove_rmap->set_page_dirty)
117   *    ->i_pages lock		(page_remove_rmap->set_page_dirty)
118   *    bdi.wb->list_lock		(page_remove_rmap->set_page_dirty)
119   *    ->inode->i_lock		(page_remove_rmap->set_page_dirty)
120   *    ->memcg->move_lock	(page_remove_rmap->folio_memcg_lock)
121   *    bdi.wb->list_lock		(zap_pte_range->set_page_dirty)
122   *    ->inode->i_lock		(zap_pte_range->set_page_dirty)
123   *    ->private_lock		(zap_pte_range->block_dirty_folio)
124   *
125   * ->i_mmap_rwsem
126   *   ->tasklist_lock            (memory_failure, collect_procs_ao)
127   */
128  
129  static void page_cache_delete(struct address_space *mapping,
130  				   struct folio *folio, void *shadow)
131  {
132  	XA_STATE(xas, &mapping->i_pages, folio->index);
133  	long nr = 1;
134  
135  	mapping_set_update(&xas, mapping);
136  
137  	/* hugetlb pages are represented by a single entry in the xarray */
138  	if (!folio_test_hugetlb(folio)) {
139  		xas_set_order(&xas, folio->index, folio_order(folio));
140  		nr = folio_nr_pages(folio);
141  	}
142  
143  	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
144  
145  	xas_store(&xas, shadow);
146  	xas_init_marks(&xas);
147  
148  	folio->mapping = NULL;
149  	/* Leave page->index set: truncation lookup relies upon it */
150  	mapping->nrpages -= nr;
151  }
152  
153  static void filemap_unaccount_folio(struct address_space *mapping,
154  		struct folio *folio)
155  {
156  	long nr;
157  
158  	VM_BUG_ON_FOLIO(folio_mapped(folio), folio);
159  	if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(folio_mapped(folio))) {
160  		pr_alert("BUG: Bad page cache in process %s  pfn:%05lx\n",
161  			 current->comm, folio_pfn(folio));
162  		dump_page(&folio->page, "still mapped when deleted");
163  		dump_stack();
164  		add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
165  
166  		if (mapping_exiting(mapping) && !folio_test_large(folio)) {
167  			int mapcount = page_mapcount(&folio->page);
168  
169  			if (folio_ref_count(folio) >= mapcount + 2) {
170  				/*
171  				 * All vmas have already been torn down, so it's
172  				 * a good bet that actually the page is unmapped
173  				 * and we'd rather not leak it: if we're wrong,
174  				 * another bad page check should catch it later.
175  				 */
176  				page_mapcount_reset(&folio->page);
177  				folio_ref_sub(folio, mapcount);
178  			}
179  		}
180  	}
181  
182  	/* hugetlb folios do not participate in page cache accounting. */
183  	if (folio_test_hugetlb(folio))
184  		return;
185  
186  	nr = folio_nr_pages(folio);
187  
188  	__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
189  	if (folio_test_swapbacked(folio)) {
190  		__lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
191  		if (folio_test_pmd_mappable(folio))
192  			__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr);
193  	} else if (folio_test_pmd_mappable(folio)) {
194  		__lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr);
195  		filemap_nr_thps_dec(mapping);
196  	}
197  
198  	/*
199  	 * At this point folio must be either written or cleaned by
200  	 * truncate.  Dirty folio here signals a bug and loss of
201  	 * unwritten data - on ordinary filesystems.
202  	 *
203  	 * But it's harmless on in-memory filesystems like tmpfs; and can
204  	 * occur when a driver which did get_user_pages() sets page dirty
205  	 * before putting it, while the inode is being finally evicted.
206  	 *
207  	 * Below fixes dirty accounting after removing the folio entirely
208  	 * but leaves the dirty flag set: it has no effect for truncated
209  	 * folio and anyway will be cleared before returning folio to
210  	 * buddy allocator.
211  	 */
212  	if (WARN_ON_ONCE(folio_test_dirty(folio) &&
213  			 mapping_can_writeback(mapping)))
214  		folio_account_cleaned(folio, inode_to_wb(mapping->host));
215  }
216  
217  /*
218   * Delete a page from the page cache and free it. Caller has to make
219   * sure the page is locked and that nobody else uses it - or that usage
220   * is safe.  The caller must hold the i_pages lock.
221   */
222  void __filemap_remove_folio(struct folio *folio, void *shadow)
223  {
224  	struct address_space *mapping = folio->mapping;
225  
226  	trace_mm_filemap_delete_from_page_cache(folio);
227  	filemap_unaccount_folio(mapping, folio);
228  	page_cache_delete(mapping, folio, shadow);
229  }
230  
231  void filemap_free_folio(struct address_space *mapping, struct folio *folio)
232  {
233  	void (*free_folio)(struct folio *);
234  	int refs = 1;
235  
236  	free_folio = mapping->a_ops->free_folio;
237  	if (free_folio)
238  		free_folio(folio);
239  
240  	if (folio_test_large(folio) && !folio_test_hugetlb(folio))
241  		refs = folio_nr_pages(folio);
242  	folio_put_refs(folio, refs);
243  }
244  
245  /**
246   * filemap_remove_folio - Remove folio from page cache.
247   * @folio: The folio.
248   *
249   * This must be called only on folios that are locked and have been
250   * verified to be in the page cache.  It will never put the folio into
251   * the free list because the caller has a reference on the page.
252   */
253  void filemap_remove_folio(struct folio *folio)
254  {
255  	struct address_space *mapping = folio->mapping;
256  
257  	BUG_ON(!folio_test_locked(folio));
258  	spin_lock(&mapping->host->i_lock);
259  	xa_lock_irq(&mapping->i_pages);
260  	__filemap_remove_folio(folio, NULL);
261  	xa_unlock_irq(&mapping->i_pages);
262  	if (mapping_shrinkable(mapping))
263  		inode_add_lru(mapping->host);
264  	spin_unlock(&mapping->host->i_lock);
265  
266  	filemap_free_folio(mapping, folio);
267  }
268  
269  /*
270   * page_cache_delete_batch - delete several folios from page cache
271   * @mapping: the mapping to which folios belong
272   * @fbatch: batch of folios to delete
273   *
274   * The function walks over mapping->i_pages and removes folios passed in
275   * @fbatch from the mapping. The function expects @fbatch to be sorted
276   * by page index and is optimised for it to be dense.
277   * It tolerates holes in @fbatch (mapping entries at those indices are not
278   * modified).
279   *
280   * The function expects the i_pages lock to be held.
281   */
282  static void page_cache_delete_batch(struct address_space *mapping,
283  			     struct folio_batch *fbatch)
284  {
285  	XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index);
286  	long total_pages = 0;
287  	int i = 0;
288  	struct folio *folio;
289  
290  	mapping_set_update(&xas, mapping);
291  	xas_for_each(&xas, folio, ULONG_MAX) {
292  		if (i >= folio_batch_count(fbatch))
293  			break;
294  
295  		/* A swap/dax/shadow entry got inserted? Skip it. */
296  		if (xa_is_value(folio))
297  			continue;
298  		/*
299  		 * A page got inserted in our range? Skip it. We have our
300  		 * pages locked so they are protected from being removed.
301  		 * If we see a page whose index is higher than ours, it
302  		 * means our page has been removed, which shouldn't be
303  		 * possible because we're holding the PageLock.
304  		 */
305  		if (folio != fbatch->folios[i]) {
306  			VM_BUG_ON_FOLIO(folio->index >
307  					fbatch->folios[i]->index, folio);
308  			continue;
309  		}
310  
311  		WARN_ON_ONCE(!folio_test_locked(folio));
312  
313  		folio->mapping = NULL;
314  		/* Leave folio->index set: truncation lookup relies on it */
315  
316  		i++;
317  		xas_store(&xas, NULL);
318  		total_pages += folio_nr_pages(folio);
319  	}
320  	mapping->nrpages -= total_pages;
321  }
322  
323  void delete_from_page_cache_batch(struct address_space *mapping,
324  				  struct folio_batch *fbatch)
325  {
326  	int i;
327  
328  	if (!folio_batch_count(fbatch))
329  		return;
330  
331  	spin_lock(&mapping->host->i_lock);
332  	xa_lock_irq(&mapping->i_pages);
333  	for (i = 0; i < folio_batch_count(fbatch); i++) {
334  		struct folio *folio = fbatch->folios[i];
335  
336  		trace_mm_filemap_delete_from_page_cache(folio);
337  		filemap_unaccount_folio(mapping, folio);
338  	}
339  	page_cache_delete_batch(mapping, fbatch);
340  	xa_unlock_irq(&mapping->i_pages);
341  	if (mapping_shrinkable(mapping))
342  		inode_add_lru(mapping->host);
343  	spin_unlock(&mapping->host->i_lock);
344  
345  	for (i = 0; i < folio_batch_count(fbatch); i++)
346  		filemap_free_folio(mapping, fbatch->folios[i]);
347  }
348  
349  int filemap_check_errors(struct address_space *mapping)
350  {
351  	int ret = 0;
352  	/* Check for outstanding write errors */
353  	if (test_bit(AS_ENOSPC, &mapping->flags) &&
354  	    test_and_clear_bit(AS_ENOSPC, &mapping->flags))
355  		ret = -ENOSPC;
356  	if (test_bit(AS_EIO, &mapping->flags) &&
357  	    test_and_clear_bit(AS_EIO, &mapping->flags))
358  		ret = -EIO;
359  	return ret;
360  }
361  EXPORT_SYMBOL(filemap_check_errors);
362  
363  static int filemap_check_and_keep_errors(struct address_space *mapping)
364  {
365  	/* Check for outstanding write errors */
366  	if (test_bit(AS_EIO, &mapping->flags))
367  		return -EIO;
368  	if (test_bit(AS_ENOSPC, &mapping->flags))
369  		return -ENOSPC;
370  	return 0;
371  }
372  
373  /**
374   * filemap_fdatawrite_wbc - start writeback on mapping dirty pages in range
375   * @mapping:	address space structure to write
376   * @wbc:	the writeback_control controlling the writeout
377   *
378   * Call writepages on the mapping using the provided wbc to control the
379   * writeout.
380   *
381   * Return: %0 on success, negative error code otherwise.
382   */
383  int filemap_fdatawrite_wbc(struct address_space *mapping,
384  			   struct writeback_control *wbc)
385  {
386  	int ret;
387  
388  	if (!mapping_can_writeback(mapping) ||
389  	    !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
390  		return 0;
391  
392  	wbc_attach_fdatawrite_inode(wbc, mapping->host);
393  	ret = do_writepages(mapping, wbc);
394  	wbc_detach_inode(wbc);
395  	return ret;
396  }
397  EXPORT_SYMBOL(filemap_fdatawrite_wbc);
398  
399  /**
400   * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
401   * @mapping:	address space structure to write
402   * @start:	offset in bytes where the range starts
403   * @end:	offset in bytes where the range ends (inclusive)
404   * @sync_mode:	enable synchronous operation
405   *
406   * Start writeback against all of a mapping's dirty pages that lie
407   * within the byte offsets <start, end> inclusive.
408   *
409   * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
410   * opposed to a regular memory cleansing writeback.  The difference between
411   * these two operations is that if a dirty page/buffer is encountered, it must
412   * be waited upon, and not just skipped over.
413   *
414   * Return: %0 on success, negative error code otherwise.
415   */
416  int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
417  				loff_t end, int sync_mode)
418  {
419  	struct writeback_control wbc = {
420  		.sync_mode = sync_mode,
421  		.nr_to_write = LONG_MAX,
422  		.range_start = start,
423  		.range_end = end,
424  	};
425  
426  	return filemap_fdatawrite_wbc(mapping, &wbc);
427  }
428  
429  static inline int __filemap_fdatawrite(struct address_space *mapping,
430  	int sync_mode)
431  {
432  	return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
433  }
434  
435  int filemap_fdatawrite(struct address_space *mapping)
436  {
437  	return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
438  }
439  EXPORT_SYMBOL(filemap_fdatawrite);
440  
441  int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
442  				loff_t end)
443  {
444  	return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
445  }
446  EXPORT_SYMBOL(filemap_fdatawrite_range);
447  
448  /**
449   * filemap_flush - mostly a non-blocking flush
450   * @mapping:	target address_space
451   *
452   * This is a mostly non-blocking flush.  Not suitable for data-integrity
453   * purposes - I/O may not be started against all dirty pages.
454   *
455   * Return: %0 on success, negative error code otherwise.
456   */
457  int filemap_flush(struct address_space *mapping)
458  {
459  	return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
460  }
461  EXPORT_SYMBOL(filemap_flush);
462  
463  /**
464   * filemap_range_has_page - check if a page exists in range.
465   * @mapping:           address space within which to check
466   * @start_byte:        offset in bytes where the range starts
467   * @end_byte:          offset in bytes where the range ends (inclusive)
468   *
469   * Find at least one page in the range supplied, usually used to check if
470   * direct writing in this range will trigger a writeback.
471   *
472   * Return: %true if at least one page exists in the specified range,
473   * %false otherwise.
474   */
475  bool filemap_range_has_page(struct address_space *mapping,
476  			   loff_t start_byte, loff_t end_byte)
477  {
478  	struct folio *folio;
479  	XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
480  	pgoff_t max = end_byte >> PAGE_SHIFT;
481  
482  	if (end_byte < start_byte)
483  		return false;
484  
485  	rcu_read_lock();
486  	for (;;) {
487  		folio = xas_find(&xas, max);
488  		if (xas_retry(&xas, folio))
489  			continue;
490  		/* Shadow entries don't count */
491  		if (xa_is_value(folio))
492  			continue;
493  		/*
494  		 * We don't need to try to pin this page; we're about to
495  		 * release the RCU lock anyway.  It is enough to know that
496  		 * there was a page here recently.
497  		 */
498  		break;
499  	}
500  	rcu_read_unlock();
501  
502  	return folio != NULL;
503  }
504  EXPORT_SYMBOL(filemap_range_has_page);
505  
506  static void __filemap_fdatawait_range(struct address_space *mapping,
507  				     loff_t start_byte, loff_t end_byte)
508  {
509  	pgoff_t index = start_byte >> PAGE_SHIFT;
510  	pgoff_t end = end_byte >> PAGE_SHIFT;
511  	struct folio_batch fbatch;
512  	unsigned nr_folios;
513  
514  	folio_batch_init(&fbatch);
515  
516  	while (index <= end) {
517  		unsigned i;
518  
519  		nr_folios = filemap_get_folios_tag(mapping, &index, end,
520  				PAGECACHE_TAG_WRITEBACK, &fbatch);
521  
522  		if (!nr_folios)
523  			break;
524  
525  		for (i = 0; i < nr_folios; i++) {
526  			struct folio *folio = fbatch.folios[i];
527  
528  			folio_wait_writeback(folio);
529  			folio_clear_error(folio);
530  		}
531  		folio_batch_release(&fbatch);
532  		cond_resched();
533  	}
534  }
535  
536  /**
537   * filemap_fdatawait_range - wait for writeback to complete
538   * @mapping:		address space structure to wait for
539   * @start_byte:		offset in bytes where the range starts
540   * @end_byte:		offset in bytes where the range ends (inclusive)
541   *
542   * Walk the list of under-writeback pages of the given address space
543   * in the given range and wait for all of them.  Check error status of
544   * the address space and return it.
545   *
546   * Since the error status of the address space is cleared by this function,
547   * callers are responsible for checking the return value and handling and/or
548   * reporting the error.
549   *
550   * Return: error status of the address space.
551   */
552  int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
553  			    loff_t end_byte)
554  {
555  	__filemap_fdatawait_range(mapping, start_byte, end_byte);
556  	return filemap_check_errors(mapping);
557  }
558  EXPORT_SYMBOL(filemap_fdatawait_range);
559  
560  /**
561   * filemap_fdatawait_range_keep_errors - wait for writeback to complete
562   * @mapping:		address space structure to wait for
563   * @start_byte:		offset in bytes where the range starts
564   * @end_byte:		offset in bytes where the range ends (inclusive)
565   *
566   * Walk the list of under-writeback pages of the given address space in the
567   * given range and wait for all of them.  Unlike filemap_fdatawait_range(),
568   * this function does not clear error status of the address space.
569   *
570   * Use this function if callers don't handle errors themselves.  Expected
571   * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
572   * fsfreeze(8)
573   */
574  int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
575  		loff_t start_byte, loff_t end_byte)
576  {
577  	__filemap_fdatawait_range(mapping, start_byte, end_byte);
578  	return filemap_check_and_keep_errors(mapping);
579  }
580  EXPORT_SYMBOL(filemap_fdatawait_range_keep_errors);
581  
582  /**
583   * file_fdatawait_range - wait for writeback to complete
584   * @file:		file pointing to address space structure to wait for
585   * @start_byte:		offset in bytes where the range starts
586   * @end_byte:		offset in bytes where the range ends (inclusive)
587   *
588   * Walk the list of under-writeback pages of the address space that file
589   * refers to, in the given range and wait for all of them.  Check error
590   * status of the address space vs. the file->f_wb_err cursor and return it.
591   *
592   * Since the error status of the file is advanced by this function,
593   * callers are responsible for checking the return value and handling and/or
594   * reporting the error.
595   *
596   * Return: error status of the address space vs. the file->f_wb_err cursor.
597   */
598  int file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte)
599  {
600  	struct address_space *mapping = file->f_mapping;
601  
602  	__filemap_fdatawait_range(mapping, start_byte, end_byte);
603  	return file_check_and_advance_wb_err(file);
604  }
605  EXPORT_SYMBOL(file_fdatawait_range);
606  
607  /**
608   * filemap_fdatawait_keep_errors - wait for writeback without clearing errors
609   * @mapping: address space structure to wait for
610   *
611   * Walk the list of under-writeback pages of the given address space
612   * and wait for all of them.  Unlike filemap_fdatawait(), this function
613   * does not clear error status of the address space.
614   *
615   * Use this function if callers don't handle errors themselves.  Expected
616   * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
617   * fsfreeze(8)
618   *
619   * Return: error status of the address space.
620   */
621  int filemap_fdatawait_keep_errors(struct address_space *mapping)
622  {
623  	__filemap_fdatawait_range(mapping, 0, LLONG_MAX);
624  	return filemap_check_and_keep_errors(mapping);
625  }
626  EXPORT_SYMBOL(filemap_fdatawait_keep_errors);
627  
628  /* Returns true if writeback might be needed or already in progress. */
629  static bool mapping_needs_writeback(struct address_space *mapping)
630  {
631  	return mapping->nrpages;
632  }
633  
634  bool filemap_range_has_writeback(struct address_space *mapping,
635  				 loff_t start_byte, loff_t end_byte)
636  {
637  	XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
638  	pgoff_t max = end_byte >> PAGE_SHIFT;
639  	struct folio *folio;
640  
641  	if (end_byte < start_byte)
642  		return false;
643  
644  	rcu_read_lock();
645  	xas_for_each(&xas, folio, max) {
646  		if (xas_retry(&xas, folio))
647  			continue;
648  		if (xa_is_value(folio))
649  			continue;
650  		if (folio_test_dirty(folio) || folio_test_locked(folio) ||
651  				folio_test_writeback(folio))
652  			break;
653  	}
654  	rcu_read_unlock();
655  	return folio != NULL;
656  }
657  EXPORT_SYMBOL_GPL(filemap_range_has_writeback);
658  
659  /**
660   * filemap_write_and_wait_range - write out & wait on a file range
661   * @mapping:	the address_space for the pages
662   * @lstart:	offset in bytes where the range starts
663   * @lend:	offset in bytes where the range ends (inclusive)
664   *
665   * Write out and wait upon file offsets lstart->lend, inclusive.
666   *
667   * Note that @lend is inclusive (describes the last byte to be written) so
668   * that this function can be used to write to the very end-of-file (end = -1).
669   *
670   * Return: error status of the address space.
671   */
672  int filemap_write_and_wait_range(struct address_space *mapping,
673  				 loff_t lstart, loff_t lend)
674  {
675  	int err = 0, err2;
676  
677  	if (lend < lstart)
678  		return 0;
679  
680  	if (mapping_needs_writeback(mapping)) {
681  		err = __filemap_fdatawrite_range(mapping, lstart, lend,
682  						 WB_SYNC_ALL);
683  		/*
684  		 * Even if the above returned error, the pages may be
685  		 * written partially (e.g. -ENOSPC), so we wait for it.
686  		 * But the -EIO is special case, it may indicate the worst
687  		 * thing (e.g. bug) happened, so we avoid waiting for it.
688  		 */
689  		if (err != -EIO)
690  			__filemap_fdatawait_range(mapping, lstart, lend);
691  	}
692  	err2 = filemap_check_errors(mapping);
693  	if (!err)
694  		err = err2;
695  	return err;
696  }
697  EXPORT_SYMBOL(filemap_write_and_wait_range);
698  
699  void __filemap_set_wb_err(struct address_space *mapping, int err)
700  {
701  	errseq_t eseq = errseq_set(&mapping->wb_err, err);
702  
703  	trace_filemap_set_wb_err(mapping, eseq);
704  }
705  EXPORT_SYMBOL(__filemap_set_wb_err);
706  
707  /**
708   * file_check_and_advance_wb_err - report wb error (if any) that was previously
709   * 				   and advance wb_err to current one
710   * @file: struct file on which the error is being reported
711   *
712   * When userland calls fsync (or something like nfsd does the equivalent), we
713   * want to report any writeback errors that occurred since the last fsync (or
714   * since the file was opened if there haven't been any).
715   *
716   * Grab the wb_err from the mapping. If it matches what we have in the file,
717   * then just quickly return 0. The file is all caught up.
718   *
719   * If it doesn't match, then take the mapping value, set the "seen" flag in
720   * it and try to swap it into place. If it works, or another task beat us
721   * to it with the new value, then update the f_wb_err and return the error
722   * portion. The error at this point must be reported via proper channels
723   * (a'la fsync, or NFS COMMIT operation, etc.).
724   *
725   * While we handle mapping->wb_err with atomic operations, the f_wb_err
726   * value is protected by the f_lock since we must ensure that it reflects
727   * the latest value swapped in for this file descriptor.
728   *
729   * Return: %0 on success, negative error code otherwise.
730   */
731  int file_check_and_advance_wb_err(struct file *file)
732  {
733  	int err = 0;
734  	errseq_t old = READ_ONCE(file->f_wb_err);
735  	struct address_space *mapping = file->f_mapping;
736  
737  	/* Locklessly handle the common case where nothing has changed */
738  	if (errseq_check(&mapping->wb_err, old)) {
739  		/* Something changed, must use slow path */
740  		spin_lock(&file->f_lock);
741  		old = file->f_wb_err;
742  		err = errseq_check_and_advance(&mapping->wb_err,
743  						&file->f_wb_err);
744  		trace_file_check_and_advance_wb_err(file, old);
745  		spin_unlock(&file->f_lock);
746  	}
747  
748  	/*
749  	 * We're mostly using this function as a drop in replacement for
750  	 * filemap_check_errors. Clear AS_EIO/AS_ENOSPC to emulate the effect
751  	 * that the legacy code would have had on these flags.
752  	 */
753  	clear_bit(AS_EIO, &mapping->flags);
754  	clear_bit(AS_ENOSPC, &mapping->flags);
755  	return err;
756  }
757  EXPORT_SYMBOL(file_check_and_advance_wb_err);
758  
759  /**
760   * file_write_and_wait_range - write out & wait on a file range
761   * @file:	file pointing to address_space with pages
762   * @lstart:	offset in bytes where the range starts
763   * @lend:	offset in bytes where the range ends (inclusive)
764   *
765   * Write out and wait upon file offsets lstart->lend, inclusive.
766   *
767   * Note that @lend is inclusive (describes the last byte to be written) so
768   * that this function can be used to write to the very end-of-file (end = -1).
769   *
770   * After writing out and waiting on the data, we check and advance the
771   * f_wb_err cursor to the latest value, and return any errors detected there.
772   *
773   * Return: %0 on success, negative error code otherwise.
774   */
775  int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend)
776  {
777  	int err = 0, err2;
778  	struct address_space *mapping = file->f_mapping;
779  
780  	if (lend < lstart)
781  		return 0;
782  
783  	if (mapping_needs_writeback(mapping)) {
784  		err = __filemap_fdatawrite_range(mapping, lstart, lend,
785  						 WB_SYNC_ALL);
786  		/* See comment of filemap_write_and_wait() */
787  		if (err != -EIO)
788  			__filemap_fdatawait_range(mapping, lstart, lend);
789  	}
790  	err2 = file_check_and_advance_wb_err(file);
791  	if (!err)
792  		err = err2;
793  	return err;
794  }
795  EXPORT_SYMBOL(file_write_and_wait_range);
796  
797  /**
798   * replace_page_cache_folio - replace a pagecache folio with a new one
799   * @old:	folio to be replaced
800   * @new:	folio to replace with
801   *
802   * This function replaces a folio in the pagecache with a new one.  On
803   * success it acquires the pagecache reference for the new folio and
804   * drops it for the old folio.  Both the old and new folios must be
805   * locked.  This function does not add the new folio to the LRU, the
806   * caller must do that.
807   *
808   * The remove + add is atomic.  This function cannot fail.
809   */
810  void replace_page_cache_folio(struct folio *old, struct folio *new)
811  {
812  	struct address_space *mapping = old->mapping;
813  	void (*free_folio)(struct folio *) = mapping->a_ops->free_folio;
814  	pgoff_t offset = old->index;
815  	XA_STATE(xas, &mapping->i_pages, offset);
816  
817  	VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
818  	VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
819  	VM_BUG_ON_FOLIO(new->mapping, new);
820  
821  	folio_get(new);
822  	new->mapping = mapping;
823  	new->index = offset;
824  
825  	mem_cgroup_migrate(old, new);
826  
827  	xas_lock_irq(&xas);
828  	xas_store(&xas, new);
829  
830  	old->mapping = NULL;
831  	/* hugetlb pages do not participate in page cache accounting. */
832  	if (!folio_test_hugetlb(old))
833  		__lruvec_stat_sub_folio(old, NR_FILE_PAGES);
834  	if (!folio_test_hugetlb(new))
835  		__lruvec_stat_add_folio(new, NR_FILE_PAGES);
836  	if (folio_test_swapbacked(old))
837  		__lruvec_stat_sub_folio(old, NR_SHMEM);
838  	if (folio_test_swapbacked(new))
839  		__lruvec_stat_add_folio(new, NR_SHMEM);
840  	xas_unlock_irq(&xas);
841  	if (free_folio)
842  		free_folio(old);
843  	folio_put(old);
844  }
845  EXPORT_SYMBOL_GPL(replace_page_cache_folio);
846  
847  noinline int __filemap_add_folio(struct address_space *mapping,
848  		struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp)
849  {
850  	XA_STATE(xas, &mapping->i_pages, index);
851  	int huge = folio_test_hugetlb(folio);
852  	bool charged = false;
853  	long nr = 1;
854  
855  	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
856  	VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio);
857  	mapping_set_update(&xas, mapping);
858  
859  	if (!huge) {
860  		int error = mem_cgroup_charge(folio, NULL, gfp);
861  		VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio);
862  		if (error)
863  			return error;
864  		charged = true;
865  		xas_set_order(&xas, index, folio_order(folio));
866  		nr = folio_nr_pages(folio);
867  	}
868  
869  	gfp &= GFP_RECLAIM_MASK;
870  	folio_ref_add(folio, nr);
871  	folio->mapping = mapping;
872  	folio->index = xas.xa_index;
873  
874  	do {
875  		unsigned int order = xa_get_order(xas.xa, xas.xa_index);
876  		void *entry, *old = NULL;
877  
878  		if (order > folio_order(folio))
879  			xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index),
880  					order, gfp);
881  		xas_lock_irq(&xas);
882  		xas_for_each_conflict(&xas, entry) {
883  			old = entry;
884  			if (!xa_is_value(entry)) {
885  				xas_set_err(&xas, -EEXIST);
886  				goto unlock;
887  			}
888  		}
889  
890  		if (old) {
891  			if (shadowp)
892  				*shadowp = old;
893  			/* entry may have been split before we acquired lock */
894  			order = xa_get_order(xas.xa, xas.xa_index);
895  			if (order > folio_order(folio)) {
896  				/* How to handle large swap entries? */
897  				BUG_ON(shmem_mapping(mapping));
898  				xas_split(&xas, old, order);
899  				xas_reset(&xas);
900  			}
901  		}
902  
903  		xas_store(&xas, folio);
904  		if (xas_error(&xas))
905  			goto unlock;
906  
907  		mapping->nrpages += nr;
908  
909  		/* hugetlb pages do not participate in page cache accounting */
910  		if (!huge) {
911  			__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
912  			if (folio_test_pmd_mappable(folio))
913  				__lruvec_stat_mod_folio(folio,
914  						NR_FILE_THPS, nr);
915  		}
916  unlock:
917  		xas_unlock_irq(&xas);
918  	} while (xas_nomem(&xas, gfp));
919  
920  	if (xas_error(&xas))
921  		goto error;
922  
923  	trace_mm_filemap_add_to_page_cache(folio);
924  	return 0;
925  error:
926  	if (charged)
927  		mem_cgroup_uncharge(folio);
928  	folio->mapping = NULL;
929  	/* Leave page->index set: truncation relies upon it */
930  	folio_put_refs(folio, nr);
931  	return xas_error(&xas);
932  }
933  ALLOW_ERROR_INJECTION(__filemap_add_folio, ERRNO);
934  
935  int filemap_add_folio(struct address_space *mapping, struct folio *folio,
936  				pgoff_t index, gfp_t gfp)
937  {
938  	void *shadow = NULL;
939  	int ret;
940  
941  	__folio_set_locked(folio);
942  	ret = __filemap_add_folio(mapping, folio, index, gfp, &shadow);
943  	if (unlikely(ret))
944  		__folio_clear_locked(folio);
945  	else {
946  		/*
947  		 * The folio might have been evicted from cache only
948  		 * recently, in which case it should be activated like
949  		 * any other repeatedly accessed folio.
950  		 * The exception is folios getting rewritten; evicting other
951  		 * data from the working set, only to cache data that will
952  		 * get overwritten with something else, is a waste of memory.
953  		 */
954  		WARN_ON_ONCE(folio_test_active(folio));
955  		if (!(gfp & __GFP_WRITE) && shadow)
956  			workingset_refault(folio, shadow);
957  		folio_add_lru(folio);
958  	}
959  	return ret;
960  }
961  EXPORT_SYMBOL_GPL(filemap_add_folio);
962  
963  #ifdef CONFIG_NUMA
964  struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order)
965  {
966  	int n;
967  	struct folio *folio;
968  
969  	if (cpuset_do_page_mem_spread()) {
970  		unsigned int cpuset_mems_cookie;
971  		do {
972  			cpuset_mems_cookie = read_mems_allowed_begin();
973  			n = cpuset_mem_spread_node();
974  			folio = __folio_alloc_node(gfp, order, n);
975  		} while (!folio && read_mems_allowed_retry(cpuset_mems_cookie));
976  
977  		return folio;
978  	}
979  	return folio_alloc(gfp, order);
980  }
981  EXPORT_SYMBOL(filemap_alloc_folio);
982  #endif
983  
984  /*
985   * filemap_invalidate_lock_two - lock invalidate_lock for two mappings
986   *
987   * Lock exclusively invalidate_lock of any passed mapping that is not NULL.
988   *
989   * @mapping1: the first mapping to lock
990   * @mapping2: the second mapping to lock
991   */
992  void filemap_invalidate_lock_two(struct address_space *mapping1,
993  				 struct address_space *mapping2)
994  {
995  	if (mapping1 > mapping2)
996  		swap(mapping1, mapping2);
997  	if (mapping1)
998  		down_write(&mapping1->invalidate_lock);
999  	if (mapping2 && mapping1 != mapping2)
1000  		down_write_nested(&mapping2->invalidate_lock, 1);
1001  }
1002  EXPORT_SYMBOL(filemap_invalidate_lock_two);
1003  
1004  /*
1005   * filemap_invalidate_unlock_two - unlock invalidate_lock for two mappings
1006   *
1007   * Unlock exclusive invalidate_lock of any passed mapping that is not NULL.
1008   *
1009   * @mapping1: the first mapping to unlock
1010   * @mapping2: the second mapping to unlock
1011   */
1012  void filemap_invalidate_unlock_two(struct address_space *mapping1,
1013  				   struct address_space *mapping2)
1014  {
1015  	if (mapping1)
1016  		up_write(&mapping1->invalidate_lock);
1017  	if (mapping2 && mapping1 != mapping2)
1018  		up_write(&mapping2->invalidate_lock);
1019  }
1020  EXPORT_SYMBOL(filemap_invalidate_unlock_two);
1021  
1022  /*
1023   * In order to wait for pages to become available there must be
1024   * waitqueues associated with pages. By using a hash table of
1025   * waitqueues where the bucket discipline is to maintain all
1026   * waiters on the same queue and wake all when any of the pages
1027   * become available, and for the woken contexts to check to be
1028   * sure the appropriate page became available, this saves space
1029   * at a cost of "thundering herd" phenomena during rare hash
1030   * collisions.
1031   */
1032  #define PAGE_WAIT_TABLE_BITS 8
1033  #define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS)
1034  static wait_queue_head_t folio_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned;
1035  
1036  static wait_queue_head_t *folio_waitqueue(struct folio *folio)
1037  {
1038  	return &folio_wait_table[hash_ptr(folio, PAGE_WAIT_TABLE_BITS)];
1039  }
1040  
1041  void __init pagecache_init(void)
1042  {
1043  	int i;
1044  
1045  	for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++)
1046  		init_waitqueue_head(&folio_wait_table[i]);
1047  
1048  	page_writeback_init();
1049  }
1050  
1051  /*
1052   * The page wait code treats the "wait->flags" somewhat unusually, because
1053   * we have multiple different kinds of waits, not just the usual "exclusive"
1054   * one.
1055   *
1056   * We have:
1057   *
1058   *  (a) no special bits set:
1059   *
1060   *	We're just waiting for the bit to be released, and when a waker
1061   *	calls the wakeup function, we set WQ_FLAG_WOKEN and wake it up,
1062   *	and remove it from the wait queue.
1063   *
1064   *	Simple and straightforward.
1065   *
1066   *  (b) WQ_FLAG_EXCLUSIVE:
1067   *
1068   *	The waiter is waiting to get the lock, and only one waiter should
1069   *	be woken up to avoid any thundering herd behavior. We'll set the
1070   *	WQ_FLAG_WOKEN bit, wake it up, and remove it from the wait queue.
1071   *
1072   *	This is the traditional exclusive wait.
1073   *
1074   *  (c) WQ_FLAG_EXCLUSIVE | WQ_FLAG_CUSTOM:
1075   *
1076   *	The waiter is waiting to get the bit, and additionally wants the
1077   *	lock to be transferred to it for fair lock behavior. If the lock
1078   *	cannot be taken, we stop walking the wait queue without waking
1079   *	the waiter.
1080   *
1081   *	This is the "fair lock handoff" case, and in addition to setting
1082   *	WQ_FLAG_WOKEN, we set WQ_FLAG_DONE to let the waiter easily see
1083   *	that it now has the lock.
1084   */
1085  static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg)
1086  {
1087  	unsigned int flags;
1088  	struct wait_page_key *key = arg;
1089  	struct wait_page_queue *wait_page
1090  		= container_of(wait, struct wait_page_queue, wait);
1091  
1092  	if (!wake_page_match(wait_page, key))
1093  		return 0;
1094  
1095  	/*
1096  	 * If it's a lock handoff wait, we get the bit for it, and
1097  	 * stop walking (and do not wake it up) if we can't.
1098  	 */
1099  	flags = wait->flags;
1100  	if (flags & WQ_FLAG_EXCLUSIVE) {
1101  		if (test_bit(key->bit_nr, &key->folio->flags))
1102  			return -1;
1103  		if (flags & WQ_FLAG_CUSTOM) {
1104  			if (test_and_set_bit(key->bit_nr, &key->folio->flags))
1105  				return -1;
1106  			flags |= WQ_FLAG_DONE;
1107  		}
1108  	}
1109  
1110  	/*
1111  	 * We are holding the wait-queue lock, but the waiter that
1112  	 * is waiting for this will be checking the flags without
1113  	 * any locking.
1114  	 *
1115  	 * So update the flags atomically, and wake up the waiter
1116  	 * afterwards to avoid any races. This store-release pairs
1117  	 * with the load-acquire in folio_wait_bit_common().
1118  	 */
1119  	smp_store_release(&wait->flags, flags | WQ_FLAG_WOKEN);
1120  	wake_up_state(wait->private, mode);
1121  
1122  	/*
1123  	 * Ok, we have successfully done what we're waiting for,
1124  	 * and we can unconditionally remove the wait entry.
1125  	 *
1126  	 * Note that this pairs with the "finish_wait()" in the
1127  	 * waiter, and has to be the absolute last thing we do.
1128  	 * After this list_del_init(&wait->entry) the wait entry
1129  	 * might be de-allocated and the process might even have
1130  	 * exited.
1131  	 */
1132  	list_del_init_careful(&wait->entry);
1133  	return (flags & WQ_FLAG_EXCLUSIVE) != 0;
1134  }
1135  
1136  static void folio_wake_bit(struct folio *folio, int bit_nr)
1137  {
1138  	wait_queue_head_t *q = folio_waitqueue(folio);
1139  	struct wait_page_key key;
1140  	unsigned long flags;
1141  	wait_queue_entry_t bookmark;
1142  
1143  	key.folio = folio;
1144  	key.bit_nr = bit_nr;
1145  	key.page_match = 0;
1146  
1147  	bookmark.flags = 0;
1148  	bookmark.private = NULL;
1149  	bookmark.func = NULL;
1150  	INIT_LIST_HEAD(&bookmark.entry);
1151  
1152  	spin_lock_irqsave(&q->lock, flags);
1153  	__wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark);
1154  
1155  	while (bookmark.flags & WQ_FLAG_BOOKMARK) {
1156  		/*
1157  		 * Take a breather from holding the lock,
1158  		 * allow pages that finish wake up asynchronously
1159  		 * to acquire the lock and remove themselves
1160  		 * from wait queue
1161  		 */
1162  		spin_unlock_irqrestore(&q->lock, flags);
1163  		cpu_relax();
1164  		spin_lock_irqsave(&q->lock, flags);
1165  		__wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark);
1166  	}
1167  
1168  	/*
1169  	 * It's possible to miss clearing waiters here, when we woke our page
1170  	 * waiters, but the hashed waitqueue has waiters for other pages on it.
1171  	 * That's okay, it's a rare case. The next waker will clear it.
1172  	 *
1173  	 * Note that, depending on the page pool (buddy, hugetlb, ZONE_DEVICE,
1174  	 * other), the flag may be cleared in the course of freeing the page;
1175  	 * but that is not required for correctness.
1176  	 */
1177  	if (!waitqueue_active(q) || !key.page_match)
1178  		folio_clear_waiters(folio);
1179  
1180  	spin_unlock_irqrestore(&q->lock, flags);
1181  }
1182  
1183  static void folio_wake(struct folio *folio, int bit)
1184  {
1185  	if (!folio_test_waiters(folio))
1186  		return;
1187  	folio_wake_bit(folio, bit);
1188  }
1189  
1190  /*
1191   * A choice of three behaviors for folio_wait_bit_common():
1192   */
1193  enum behavior {
1194  	EXCLUSIVE,	/* Hold ref to page and take the bit when woken, like
1195  			 * __folio_lock() waiting on then setting PG_locked.
1196  			 */
1197  	SHARED,		/* Hold ref to page and check the bit when woken, like
1198  			 * folio_wait_writeback() waiting on PG_writeback.
1199  			 */
1200  	DROP,		/* Drop ref to page before wait, no check when woken,
1201  			 * like folio_put_wait_locked() on PG_locked.
1202  			 */
1203  };
1204  
1205  /*
1206   * Attempt to check (or get) the folio flag, and mark us done
1207   * if successful.
1208   */
1209  static inline bool folio_trylock_flag(struct folio *folio, int bit_nr,
1210  					struct wait_queue_entry *wait)
1211  {
1212  	if (wait->flags & WQ_FLAG_EXCLUSIVE) {
1213  		if (test_and_set_bit(bit_nr, &folio->flags))
1214  			return false;
1215  	} else if (test_bit(bit_nr, &folio->flags))
1216  		return false;
1217  
1218  	wait->flags |= WQ_FLAG_WOKEN | WQ_FLAG_DONE;
1219  	return true;
1220  }
1221  
1222  /* How many times do we accept lock stealing from under a waiter? */
1223  int sysctl_page_lock_unfairness = 5;
1224  
1225  static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
1226  		int state, enum behavior behavior)
1227  {
1228  	wait_queue_head_t *q = folio_waitqueue(folio);
1229  	int unfairness = sysctl_page_lock_unfairness;
1230  	struct wait_page_queue wait_page;
1231  	wait_queue_entry_t *wait = &wait_page.wait;
1232  	bool thrashing = false;
1233  	unsigned long pflags;
1234  	bool in_thrashing;
1235  
1236  	if (bit_nr == PG_locked &&
1237  	    !folio_test_uptodate(folio) && folio_test_workingset(folio)) {
1238  		delayacct_thrashing_start(&in_thrashing);
1239  		psi_memstall_enter(&pflags);
1240  		thrashing = true;
1241  	}
1242  
1243  	init_wait(wait);
1244  	wait->func = wake_page_function;
1245  	wait_page.folio = folio;
1246  	wait_page.bit_nr = bit_nr;
1247  
1248  repeat:
1249  	wait->flags = 0;
1250  	if (behavior == EXCLUSIVE) {
1251  		wait->flags = WQ_FLAG_EXCLUSIVE;
1252  		if (--unfairness < 0)
1253  			wait->flags |= WQ_FLAG_CUSTOM;
1254  	}
1255  
1256  	/*
1257  	 * Do one last check whether we can get the
1258  	 * page bit synchronously.
1259  	 *
1260  	 * Do the folio_set_waiters() marking before that
1261  	 * to let any waker we _just_ missed know they
1262  	 * need to wake us up (otherwise they'll never
1263  	 * even go to the slow case that looks at the
1264  	 * page queue), and add ourselves to the wait
1265  	 * queue if we need to sleep.
1266  	 *
1267  	 * This part needs to be done under the queue
1268  	 * lock to avoid races.
1269  	 */
1270  	spin_lock_irq(&q->lock);
1271  	folio_set_waiters(folio);
1272  	if (!folio_trylock_flag(folio, bit_nr, wait))
1273  		__add_wait_queue_entry_tail(q, wait);
1274  	spin_unlock_irq(&q->lock);
1275  
1276  	/*
1277  	 * From now on, all the logic will be based on
1278  	 * the WQ_FLAG_WOKEN and WQ_FLAG_DONE flag, to
1279  	 * see whether the page bit testing has already
1280  	 * been done by the wake function.
1281  	 *
1282  	 * We can drop our reference to the folio.
1283  	 */
1284  	if (behavior == DROP)
1285  		folio_put(folio);
1286  
1287  	/*
1288  	 * Note that until the "finish_wait()", or until
1289  	 * we see the WQ_FLAG_WOKEN flag, we need to
1290  	 * be very careful with the 'wait->flags', because
1291  	 * we may race with a waker that sets them.
1292  	 */
1293  	for (;;) {
1294  		unsigned int flags;
1295  
1296  		set_current_state(state);
1297  
1298  		/* Loop until we've been woken or interrupted */
1299  		flags = smp_load_acquire(&wait->flags);
1300  		if (!(flags & WQ_FLAG_WOKEN)) {
1301  			if (signal_pending_state(state, current))
1302  				break;
1303  
1304  			io_schedule();
1305  			continue;
1306  		}
1307  
1308  		/* If we were non-exclusive, we're done */
1309  		if (behavior != EXCLUSIVE)
1310  			break;
1311  
1312  		/* If the waker got the lock for us, we're done */
1313  		if (flags & WQ_FLAG_DONE)
1314  			break;
1315  
1316  		/*
1317  		 * Otherwise, if we're getting the lock, we need to
1318  		 * try to get it ourselves.
1319  		 *
1320  		 * And if that fails, we'll have to retry this all.
1321  		 */
1322  		if (unlikely(test_and_set_bit(bit_nr, folio_flags(folio, 0))))
1323  			goto repeat;
1324  
1325  		wait->flags |= WQ_FLAG_DONE;
1326  		break;
1327  	}
1328  
1329  	/*
1330  	 * If a signal happened, this 'finish_wait()' may remove the last
1331  	 * waiter from the wait-queues, but the folio waiters bit will remain
1332  	 * set. That's ok. The next wakeup will take care of it, and trying
1333  	 * to do it here would be difficult and prone to races.
1334  	 */
1335  	finish_wait(q, wait);
1336  
1337  	if (thrashing) {
1338  		delayacct_thrashing_end(&in_thrashing);
1339  		psi_memstall_leave(&pflags);
1340  	}
1341  
1342  	/*
1343  	 * NOTE! The wait->flags weren't stable until we've done the
1344  	 * 'finish_wait()', and we could have exited the loop above due
1345  	 * to a signal, and had a wakeup event happen after the signal
1346  	 * test but before the 'finish_wait()'.
1347  	 *
1348  	 * So only after the finish_wait() can we reliably determine
1349  	 * if we got woken up or not, so we can now figure out the final
1350  	 * return value based on that state without races.
1351  	 *
1352  	 * Also note that WQ_FLAG_WOKEN is sufficient for a non-exclusive
1353  	 * waiter, but an exclusive one requires WQ_FLAG_DONE.
1354  	 */
1355  	if (behavior == EXCLUSIVE)
1356  		return wait->flags & WQ_FLAG_DONE ? 0 : -EINTR;
1357  
1358  	return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR;
1359  }
1360  
1361  #ifdef CONFIG_MIGRATION
1362  /**
1363   * migration_entry_wait_on_locked - Wait for a migration entry to be removed
1364   * @entry: migration swap entry.
1365   * @ptl: already locked ptl. This function will drop the lock.
1366   *
1367   * Wait for a migration entry referencing the given page to be removed. This is
1368   * equivalent to put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE) except
1369   * this can be called without taking a reference on the page. Instead this
1370   * should be called while holding the ptl for the migration entry referencing
1371   * the page.
1372   *
1373   * Returns after unlocking the ptl.
1374   *
1375   * This follows the same logic as folio_wait_bit_common() so see the comments
1376   * there.
1377   */
1378  void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl)
1379  	__releases(ptl)
1380  {
1381  	struct wait_page_queue wait_page;
1382  	wait_queue_entry_t *wait = &wait_page.wait;
1383  	bool thrashing = false;
1384  	unsigned long pflags;
1385  	bool in_thrashing;
1386  	wait_queue_head_t *q;
1387  	struct folio *folio = page_folio(pfn_swap_entry_to_page(entry));
1388  
1389  	q = folio_waitqueue(folio);
1390  	if (!folio_test_uptodate(folio) && folio_test_workingset(folio)) {
1391  		delayacct_thrashing_start(&in_thrashing);
1392  		psi_memstall_enter(&pflags);
1393  		thrashing = true;
1394  	}
1395  
1396  	init_wait(wait);
1397  	wait->func = wake_page_function;
1398  	wait_page.folio = folio;
1399  	wait_page.bit_nr = PG_locked;
1400  	wait->flags = 0;
1401  
1402  	spin_lock_irq(&q->lock);
1403  	folio_set_waiters(folio);
1404  	if (!folio_trylock_flag(folio, PG_locked, wait))
1405  		__add_wait_queue_entry_tail(q, wait);
1406  	spin_unlock_irq(&q->lock);
1407  
1408  	/*
1409  	 * If a migration entry exists for the page the migration path must hold
1410  	 * a valid reference to the page, and it must take the ptl to remove the
1411  	 * migration entry. So the page is valid until the ptl is dropped.
1412  	 */
1413  	spin_unlock(ptl);
1414  
1415  	for (;;) {
1416  		unsigned int flags;
1417  
1418  		set_current_state(TASK_UNINTERRUPTIBLE);
1419  
1420  		/* Loop until we've been woken or interrupted */
1421  		flags = smp_load_acquire(&wait->flags);
1422  		if (!(flags & WQ_FLAG_WOKEN)) {
1423  			if (signal_pending_state(TASK_UNINTERRUPTIBLE, current))
1424  				break;
1425  
1426  			io_schedule();
1427  			continue;
1428  		}
1429  		break;
1430  	}
1431  
1432  	finish_wait(q, wait);
1433  
1434  	if (thrashing) {
1435  		delayacct_thrashing_end(&in_thrashing);
1436  		psi_memstall_leave(&pflags);
1437  	}
1438  }
1439  #endif
1440  
1441  void folio_wait_bit(struct folio *folio, int bit_nr)
1442  {
1443  	folio_wait_bit_common(folio, bit_nr, TASK_UNINTERRUPTIBLE, SHARED);
1444  }
1445  EXPORT_SYMBOL(folio_wait_bit);
1446  
1447  int folio_wait_bit_killable(struct folio *folio, int bit_nr)
1448  {
1449  	return folio_wait_bit_common(folio, bit_nr, TASK_KILLABLE, SHARED);
1450  }
1451  EXPORT_SYMBOL(folio_wait_bit_killable);
1452  
1453  /**
1454   * folio_put_wait_locked - Drop a reference and wait for it to be unlocked
1455   * @folio: The folio to wait for.
1456   * @state: The sleep state (TASK_KILLABLE, TASK_UNINTERRUPTIBLE, etc).
1457   *
1458   * The caller should hold a reference on @folio.  They expect the page to
1459   * become unlocked relatively soon, but do not wish to hold up migration
1460   * (for example) by holding the reference while waiting for the folio to
1461   * come unlocked.  After this function returns, the caller should not
1462   * dereference @folio.
1463   *
1464   * Return: 0 if the folio was unlocked or -EINTR if interrupted by a signal.
1465   */
1466  static int folio_put_wait_locked(struct folio *folio, int state)
1467  {
1468  	return folio_wait_bit_common(folio, PG_locked, state, DROP);
1469  }
1470  
1471  /**
1472   * folio_add_wait_queue - Add an arbitrary waiter to a folio's wait queue
1473   * @folio: Folio defining the wait queue of interest
1474   * @waiter: Waiter to add to the queue
1475   *
1476   * Add an arbitrary @waiter to the wait queue for the nominated @folio.
1477   */
1478  void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter)
1479  {
1480  	wait_queue_head_t *q = folio_waitqueue(folio);
1481  	unsigned long flags;
1482  
1483  	spin_lock_irqsave(&q->lock, flags);
1484  	__add_wait_queue_entry_tail(q, waiter);
1485  	folio_set_waiters(folio);
1486  	spin_unlock_irqrestore(&q->lock, flags);
1487  }
1488  EXPORT_SYMBOL_GPL(folio_add_wait_queue);
1489  
1490  #ifndef clear_bit_unlock_is_negative_byte
1491  
1492  /*
1493   * PG_waiters is the high bit in the same byte as PG_lock.
1494   *
1495   * On x86 (and on many other architectures), we can clear PG_lock and
1496   * test the sign bit at the same time. But if the architecture does
1497   * not support that special operation, we just do this all by hand
1498   * instead.
1499   *
1500   * The read of PG_waiters has to be after (or concurrently with) PG_locked
1501   * being cleared, but a memory barrier should be unnecessary since it is
1502   * in the same byte as PG_locked.
1503   */
1504  static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem)
1505  {
1506  	clear_bit_unlock(nr, mem);
1507  	/* smp_mb__after_atomic(); */
1508  	return test_bit(PG_waiters, mem);
1509  }
1510  
1511  #endif
1512  
1513  /**
1514   * folio_unlock - Unlock a locked folio.
1515   * @folio: The folio.
1516   *
1517   * Unlocks the folio and wakes up any thread sleeping on the page lock.
1518   *
1519   * Context: May be called from interrupt or process context.  May not be
1520   * called from NMI context.
1521   */
1522  void folio_unlock(struct folio *folio)
1523  {
1524  	/* Bit 7 allows x86 to check the byte's sign bit */
1525  	BUILD_BUG_ON(PG_waiters != 7);
1526  	BUILD_BUG_ON(PG_locked > 7);
1527  	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1528  	if (clear_bit_unlock_is_negative_byte(PG_locked, folio_flags(folio, 0)))
1529  		folio_wake_bit(folio, PG_locked);
1530  }
1531  EXPORT_SYMBOL(folio_unlock);
1532  
1533  /**
1534   * folio_end_private_2 - Clear PG_private_2 and wake any waiters.
1535   * @folio: The folio.
1536   *
1537   * Clear the PG_private_2 bit on a folio and wake up any sleepers waiting for
1538   * it.  The folio reference held for PG_private_2 being set is released.
1539   *
1540   * This is, for example, used when a netfs folio is being written to a local
1541   * disk cache, thereby allowing writes to the cache for the same folio to be
1542   * serialised.
1543   */
1544  void folio_end_private_2(struct folio *folio)
1545  {
1546  	VM_BUG_ON_FOLIO(!folio_test_private_2(folio), folio);
1547  	clear_bit_unlock(PG_private_2, folio_flags(folio, 0));
1548  	folio_wake_bit(folio, PG_private_2);
1549  	folio_put(folio);
1550  }
1551  EXPORT_SYMBOL(folio_end_private_2);
1552  
1553  /**
1554   * folio_wait_private_2 - Wait for PG_private_2 to be cleared on a folio.
1555   * @folio: The folio to wait on.
1556   *
1557   * Wait for PG_private_2 (aka PG_fscache) to be cleared on a folio.
1558   */
1559  void folio_wait_private_2(struct folio *folio)
1560  {
1561  	while (folio_test_private_2(folio))
1562  		folio_wait_bit(folio, PG_private_2);
1563  }
1564  EXPORT_SYMBOL(folio_wait_private_2);
1565  
1566  /**
1567   * folio_wait_private_2_killable - Wait for PG_private_2 to be cleared on a folio.
1568   * @folio: The folio to wait on.
1569   *
1570   * Wait for PG_private_2 (aka PG_fscache) to be cleared on a folio or until a
1571   * fatal signal is received by the calling task.
1572   *
1573   * Return:
1574   * - 0 if successful.
1575   * - -EINTR if a fatal signal was encountered.
1576   */
1577  int folio_wait_private_2_killable(struct folio *folio)
1578  {
1579  	int ret = 0;
1580  
1581  	while (folio_test_private_2(folio)) {
1582  		ret = folio_wait_bit_killable(folio, PG_private_2);
1583  		if (ret < 0)
1584  			break;
1585  	}
1586  
1587  	return ret;
1588  }
1589  EXPORT_SYMBOL(folio_wait_private_2_killable);
1590  
1591  /**
1592   * folio_end_writeback - End writeback against a folio.
1593   * @folio: The folio.
1594   */
1595  void folio_end_writeback(struct folio *folio)
1596  {
1597  	/*
1598  	 * folio_test_clear_reclaim() could be used here but it is an
1599  	 * atomic operation and overkill in this particular case. Failing
1600  	 * to shuffle a folio marked for immediate reclaim is too mild
1601  	 * a gain to justify taking an atomic operation penalty at the
1602  	 * end of every folio writeback.
1603  	 */
1604  	if (folio_test_reclaim(folio)) {
1605  		folio_clear_reclaim(folio);
1606  		folio_rotate_reclaimable(folio);
1607  	}
1608  
1609  	/*
1610  	 * Writeback does not hold a folio reference of its own, relying
1611  	 * on truncation to wait for the clearing of PG_writeback.
1612  	 * But here we must make sure that the folio is not freed and
1613  	 * reused before the folio_wake().
1614  	 */
1615  	folio_get(folio);
1616  	if (!__folio_end_writeback(folio))
1617  		BUG();
1618  
1619  	smp_mb__after_atomic();
1620  	folio_wake(folio, PG_writeback);
1621  	acct_reclaim_writeback(folio);
1622  	folio_put(folio);
1623  }
1624  EXPORT_SYMBOL(folio_end_writeback);
1625  
1626  /**
1627   * __folio_lock - Get a lock on the folio, assuming we need to sleep to get it.
1628   * @folio: The folio to lock
1629   */
1630  void __folio_lock(struct folio *folio)
1631  {
1632  	folio_wait_bit_common(folio, PG_locked, TASK_UNINTERRUPTIBLE,
1633  				EXCLUSIVE);
1634  }
1635  EXPORT_SYMBOL(__folio_lock);
1636  
1637  int __folio_lock_killable(struct folio *folio)
1638  {
1639  	return folio_wait_bit_common(folio, PG_locked, TASK_KILLABLE,
1640  					EXCLUSIVE);
1641  }
1642  EXPORT_SYMBOL_GPL(__folio_lock_killable);
1643  
1644  static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait)
1645  {
1646  	struct wait_queue_head *q = folio_waitqueue(folio);
1647  	int ret = 0;
1648  
1649  	wait->folio = folio;
1650  	wait->bit_nr = PG_locked;
1651  
1652  	spin_lock_irq(&q->lock);
1653  	__add_wait_queue_entry_tail(q, &wait->wait);
1654  	folio_set_waiters(folio);
1655  	ret = !folio_trylock(folio);
1656  	/*
1657  	 * If we were successful now, we know we're still on the
1658  	 * waitqueue as we're still under the lock. This means it's
1659  	 * safe to remove and return success, we know the callback
1660  	 * isn't going to trigger.
1661  	 */
1662  	if (!ret)
1663  		__remove_wait_queue(q, &wait->wait);
1664  	else
1665  		ret = -EIOCBQUEUED;
1666  	spin_unlock_irq(&q->lock);
1667  	return ret;
1668  }
1669  
1670  /*
1671   * Return values:
1672   * true - folio is locked; mmap_lock is still held.
1673   * false - folio is not locked.
1674   *     mmap_lock has been released (mmap_read_unlock(), unless flags had both
1675   *     FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in
1676   *     which case mmap_lock is still held.
1677   *
1678   * If neither ALLOW_RETRY nor KILLABLE are set, will always return true
1679   * with the folio locked and the mmap_lock unperturbed.
1680   */
1681  bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm,
1682  			 unsigned int flags)
1683  {
1684  	if (fault_flag_allow_retry_first(flags)) {
1685  		/*
1686  		 * CAUTION! In this case, mmap_lock is not released
1687  		 * even though return 0.
1688  		 */
1689  		if (flags & FAULT_FLAG_RETRY_NOWAIT)
1690  			return false;
1691  
1692  		mmap_read_unlock(mm);
1693  		if (flags & FAULT_FLAG_KILLABLE)
1694  			folio_wait_locked_killable(folio);
1695  		else
1696  			folio_wait_locked(folio);
1697  		return false;
1698  	}
1699  	if (flags & FAULT_FLAG_KILLABLE) {
1700  		bool ret;
1701  
1702  		ret = __folio_lock_killable(folio);
1703  		if (ret) {
1704  			mmap_read_unlock(mm);
1705  			return false;
1706  		}
1707  	} else {
1708  		__folio_lock(folio);
1709  	}
1710  
1711  	return true;
1712  }
1713  
1714  /**
1715   * page_cache_next_miss() - Find the next gap in the page cache.
1716   * @mapping: Mapping.
1717   * @index: Index.
1718   * @max_scan: Maximum range to search.
1719   *
1720   * Search the range [index, min(index + max_scan - 1, ULONG_MAX)] for the
1721   * gap with the lowest index.
1722   *
1723   * This function may be called under the rcu_read_lock.  However, this will
1724   * not atomically search a snapshot of the cache at a single point in time.
1725   * For example, if a gap is created at index 5, then subsequently a gap is
1726   * created at index 10, page_cache_next_miss covering both indices may
1727   * return 10 if called under the rcu_read_lock.
1728   *
1729   * Return: The index of the gap if found, otherwise an index outside the
1730   * range specified (in which case 'return - index >= max_scan' will be true).
1731   * In the rare case of index wrap-around, 0 will be returned.
1732   */
1733  pgoff_t page_cache_next_miss(struct address_space *mapping,
1734  			     pgoff_t index, unsigned long max_scan)
1735  {
1736  	XA_STATE(xas, &mapping->i_pages, index);
1737  
1738  	while (max_scan--) {
1739  		void *entry = xas_next(&xas);
1740  		if (!entry || xa_is_value(entry))
1741  			break;
1742  		if (xas.xa_index == 0)
1743  			break;
1744  	}
1745  
1746  	return xas.xa_index;
1747  }
1748  EXPORT_SYMBOL(page_cache_next_miss);
1749  
1750  /**
1751   * page_cache_prev_miss() - Find the previous gap in the page cache.
1752   * @mapping: Mapping.
1753   * @index: Index.
1754   * @max_scan: Maximum range to search.
1755   *
1756   * Search the range [max(index - max_scan + 1, 0), index] for the
1757   * gap with the highest index.
1758   *
1759   * This function may be called under the rcu_read_lock.  However, this will
1760   * not atomically search a snapshot of the cache at a single point in time.
1761   * For example, if a gap is created at index 10, then subsequently a gap is
1762   * created at index 5, page_cache_prev_miss() covering both indices may
1763   * return 5 if called under the rcu_read_lock.
1764   *
1765   * Return: The index of the gap if found, otherwise an index outside the
1766   * range specified (in which case 'index - return >= max_scan' will be true).
1767   * In the rare case of wrap-around, ULONG_MAX will be returned.
1768   */
1769  pgoff_t page_cache_prev_miss(struct address_space *mapping,
1770  			     pgoff_t index, unsigned long max_scan)
1771  {
1772  	XA_STATE(xas, &mapping->i_pages, index);
1773  
1774  	while (max_scan--) {
1775  		void *entry = xas_prev(&xas);
1776  		if (!entry || xa_is_value(entry))
1777  			break;
1778  		if (xas.xa_index == ULONG_MAX)
1779  			break;
1780  	}
1781  
1782  	return xas.xa_index;
1783  }
1784  EXPORT_SYMBOL(page_cache_prev_miss);
1785  
1786  /*
1787   * Lockless page cache protocol:
1788   * On the lookup side:
1789   * 1. Load the folio from i_pages
1790   * 2. Increment the refcount if it's not zero
1791   * 3. If the folio is not found by xas_reload(), put the refcount and retry
1792   *
1793   * On the removal side:
1794   * A. Freeze the page (by zeroing the refcount if nobody else has a reference)
1795   * B. Remove the page from i_pages
1796   * C. Return the page to the page allocator
1797   *
1798   * This means that any page may have its reference count temporarily
1799   * increased by a speculative page cache (or fast GUP) lookup as it can
1800   * be allocated by another user before the RCU grace period expires.
1801   * Because the refcount temporarily acquired here may end up being the
1802   * last refcount on the page, any page allocation must be freeable by
1803   * folio_put().
1804   */
1805  
1806  /*
1807   * filemap_get_entry - Get a page cache entry.
1808   * @mapping: the address_space to search
1809   * @index: The page cache index.
1810   *
1811   * Looks up the page cache entry at @mapping & @index.  If it is a folio,
1812   * it is returned with an increased refcount.  If it is a shadow entry
1813   * of a previously evicted folio, or a swap entry from shmem/tmpfs,
1814   * it is returned without further action.
1815   *
1816   * Return: The folio, swap or shadow entry, %NULL if nothing is found.
1817   */
1818  void *filemap_get_entry(struct address_space *mapping, pgoff_t index)
1819  {
1820  	XA_STATE(xas, &mapping->i_pages, index);
1821  	struct folio *folio;
1822  
1823  	rcu_read_lock();
1824  repeat:
1825  	xas_reset(&xas);
1826  	folio = xas_load(&xas);
1827  	if (xas_retry(&xas, folio))
1828  		goto repeat;
1829  	/*
1830  	 * A shadow entry of a recently evicted page, or a swap entry from
1831  	 * shmem/tmpfs.  Return it without attempting to raise page count.
1832  	 */
1833  	if (!folio || xa_is_value(folio))
1834  		goto out;
1835  
1836  	if (!folio_try_get_rcu(folio))
1837  		goto repeat;
1838  
1839  	if (unlikely(folio != xas_reload(&xas))) {
1840  		folio_put(folio);
1841  		goto repeat;
1842  	}
1843  out:
1844  	rcu_read_unlock();
1845  
1846  	return folio;
1847  }
1848  
1849  /**
1850   * __filemap_get_folio - Find and get a reference to a folio.
1851   * @mapping: The address_space to search.
1852   * @index: The page index.
1853   * @fgp_flags: %FGP flags modify how the folio is returned.
1854   * @gfp: Memory allocation flags to use if %FGP_CREAT is specified.
1855   *
1856   * Looks up the page cache entry at @mapping & @index.
1857   *
1858   * If %FGP_LOCK or %FGP_CREAT are specified then the function may sleep even
1859   * if the %GFP flags specified for %FGP_CREAT are atomic.
1860   *
1861   * If this function returns a folio, it is returned with an increased refcount.
1862   *
1863   * Return: The found folio or an ERR_PTR() otherwise.
1864   */
1865  struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
1866  		fgf_t fgp_flags, gfp_t gfp)
1867  {
1868  	struct folio *folio;
1869  
1870  repeat:
1871  	folio = filemap_get_entry(mapping, index);
1872  	if (xa_is_value(folio))
1873  		folio = NULL;
1874  	if (!folio)
1875  		goto no_page;
1876  
1877  	if (fgp_flags & FGP_LOCK) {
1878  		if (fgp_flags & FGP_NOWAIT) {
1879  			if (!folio_trylock(folio)) {
1880  				folio_put(folio);
1881  				return ERR_PTR(-EAGAIN);
1882  			}
1883  		} else {
1884  			folio_lock(folio);
1885  		}
1886  
1887  		/* Has the page been truncated? */
1888  		if (unlikely(folio->mapping != mapping)) {
1889  			folio_unlock(folio);
1890  			folio_put(folio);
1891  			goto repeat;
1892  		}
1893  		VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);
1894  	}
1895  
1896  	if (fgp_flags & FGP_ACCESSED)
1897  		folio_mark_accessed(folio);
1898  	else if (fgp_flags & FGP_WRITE) {
1899  		/* Clear idle flag for buffer write */
1900  		if (folio_test_idle(folio))
1901  			folio_clear_idle(folio);
1902  	}
1903  
1904  	if (fgp_flags & FGP_STABLE)
1905  		folio_wait_stable(folio);
1906  no_page:
1907  	if (!folio && (fgp_flags & FGP_CREAT)) {
1908  		unsigned order = FGF_GET_ORDER(fgp_flags);
1909  		int err;
1910  
1911  		if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping))
1912  			gfp |= __GFP_WRITE;
1913  		if (fgp_flags & FGP_NOFS)
1914  			gfp &= ~__GFP_FS;
1915  		if (fgp_flags & FGP_NOWAIT) {
1916  			gfp &= ~GFP_KERNEL;
1917  			gfp |= GFP_NOWAIT | __GFP_NOWARN;
1918  		}
1919  		if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
1920  			fgp_flags |= FGP_LOCK;
1921  
1922  		if (!mapping_large_folio_support(mapping))
1923  			order = 0;
1924  		if (order > MAX_PAGECACHE_ORDER)
1925  			order = MAX_PAGECACHE_ORDER;
1926  		/* If we're not aligned, allocate a smaller folio */
1927  		if (index & ((1UL << order) - 1))
1928  			order = __ffs(index);
1929  
1930  		do {
1931  			gfp_t alloc_gfp = gfp;
1932  
1933  			err = -ENOMEM;
1934  			if (order == 1)
1935  				order = 0;
1936  			if (order > 0)
1937  				alloc_gfp |= __GFP_NORETRY | __GFP_NOWARN;
1938  			folio = filemap_alloc_folio(alloc_gfp, order);
1939  			if (!folio)
1940  				continue;
1941  
1942  			/* Init accessed so avoid atomic mark_page_accessed later */
1943  			if (fgp_flags & FGP_ACCESSED)
1944  				__folio_set_referenced(folio);
1945  
1946  			err = filemap_add_folio(mapping, folio, index, gfp);
1947  			if (!err)
1948  				break;
1949  			folio_put(folio);
1950  			folio = NULL;
1951  		} while (order-- > 0);
1952  
1953  		if (err == -EEXIST)
1954  			goto repeat;
1955  		if (err)
1956  			return ERR_PTR(err);
1957  		/*
1958  		 * filemap_add_folio locks the page, and for mmap
1959  		 * we expect an unlocked page.
1960  		 */
1961  		if (folio && (fgp_flags & FGP_FOR_MMAP))
1962  			folio_unlock(folio);
1963  	}
1964  
1965  	if (!folio)
1966  		return ERR_PTR(-ENOENT);
1967  	return folio;
1968  }
1969  EXPORT_SYMBOL(__filemap_get_folio);
1970  
1971  static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max,
1972  		xa_mark_t mark)
1973  {
1974  	struct folio *folio;
1975  
1976  retry:
1977  	if (mark == XA_PRESENT)
1978  		folio = xas_find(xas, max);
1979  	else
1980  		folio = xas_find_marked(xas, max, mark);
1981  
1982  	if (xas_retry(xas, folio))
1983  		goto retry;
1984  	/*
1985  	 * A shadow entry of a recently evicted page, a swap
1986  	 * entry from shmem/tmpfs or a DAX entry.  Return it
1987  	 * without attempting to raise page count.
1988  	 */
1989  	if (!folio || xa_is_value(folio))
1990  		return folio;
1991  
1992  	if (!folio_try_get_rcu(folio))
1993  		goto reset;
1994  
1995  	if (unlikely(folio != xas_reload(xas))) {
1996  		folio_put(folio);
1997  		goto reset;
1998  	}
1999  
2000  	return folio;
2001  reset:
2002  	xas_reset(xas);
2003  	goto retry;
2004  }
2005  
2006  /**
2007   * find_get_entries - gang pagecache lookup
2008   * @mapping:	The address_space to search
2009   * @start:	The starting page cache index
2010   * @end:	The final page index (inclusive).
2011   * @fbatch:	Where the resulting entries are placed.
2012   * @indices:	The cache indices corresponding to the entries in @entries
2013   *
2014   * find_get_entries() will search for and return a batch of entries in
2015   * the mapping.  The entries are placed in @fbatch.  find_get_entries()
2016   * takes a reference on any actual folios it returns.
2017   *
2018   * The entries have ascending indexes.  The indices may not be consecutive
2019   * due to not-present entries or large folios.
2020   *
2021   * Any shadow entries of evicted folios, or swap entries from
2022   * shmem/tmpfs, are included in the returned array.
2023   *
2024   * Return: The number of entries which were found.
2025   */
2026  unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
2027  		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices)
2028  {
2029  	XA_STATE(xas, &mapping->i_pages, *start);
2030  	struct folio *folio;
2031  
2032  	rcu_read_lock();
2033  	while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) {
2034  		indices[fbatch->nr] = xas.xa_index;
2035  		if (!folio_batch_add(fbatch, folio))
2036  			break;
2037  	}
2038  	rcu_read_unlock();
2039  
2040  	if (folio_batch_count(fbatch)) {
2041  		unsigned long nr = 1;
2042  		int idx = folio_batch_count(fbatch) - 1;
2043  
2044  		folio = fbatch->folios[idx];
2045  		if (!xa_is_value(folio) && !folio_test_hugetlb(folio))
2046  			nr = folio_nr_pages(folio);
2047  		*start = indices[idx] + nr;
2048  	}
2049  	return folio_batch_count(fbatch);
2050  }
2051  
2052  /**
2053   * find_lock_entries - Find a batch of pagecache entries.
2054   * @mapping:	The address_space to search.
2055   * @start:	The starting page cache index.
2056   * @end:	The final page index (inclusive).
2057   * @fbatch:	Where the resulting entries are placed.
2058   * @indices:	The cache indices of the entries in @fbatch.
2059   *
2060   * find_lock_entries() will return a batch of entries from @mapping.
2061   * Swap, shadow and DAX entries are included.  Folios are returned
2062   * locked and with an incremented refcount.  Folios which are locked
2063   * by somebody else or under writeback are skipped.  Folios which are
2064   * partially outside the range are not returned.
2065   *
2066   * The entries have ascending indexes.  The indices may not be consecutive
2067   * due to not-present entries, large folios, folios which could not be
2068   * locked or folios under writeback.
2069   *
2070   * Return: The number of entries which were found.
2071   */
2072  unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
2073  		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices)
2074  {
2075  	XA_STATE(xas, &mapping->i_pages, *start);
2076  	struct folio *folio;
2077  
2078  	rcu_read_lock();
2079  	while ((folio = find_get_entry(&xas, end, XA_PRESENT))) {
2080  		if (!xa_is_value(folio)) {
2081  			if (folio->index < *start)
2082  				goto put;
2083  			if (folio->index + folio_nr_pages(folio) - 1 > end)
2084  				goto put;
2085  			if (!folio_trylock(folio))
2086  				goto put;
2087  			if (folio->mapping != mapping ||
2088  			    folio_test_writeback(folio))
2089  				goto unlock;
2090  			VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index),
2091  					folio);
2092  		}
2093  		indices[fbatch->nr] = xas.xa_index;
2094  		if (!folio_batch_add(fbatch, folio))
2095  			break;
2096  		continue;
2097  unlock:
2098  		folio_unlock(folio);
2099  put:
2100  		folio_put(folio);
2101  	}
2102  	rcu_read_unlock();
2103  
2104  	if (folio_batch_count(fbatch)) {
2105  		unsigned long nr = 1;
2106  		int idx = folio_batch_count(fbatch) - 1;
2107  
2108  		folio = fbatch->folios[idx];
2109  		if (!xa_is_value(folio) && !folio_test_hugetlb(folio))
2110  			nr = folio_nr_pages(folio);
2111  		*start = indices[idx] + nr;
2112  	}
2113  	return folio_batch_count(fbatch);
2114  }
2115  
2116  /**
2117   * filemap_get_folios - Get a batch of folios
2118   * @mapping:	The address_space to search
2119   * @start:	The starting page index
2120   * @end:	The final page index (inclusive)
2121   * @fbatch:	The batch to fill.
2122   *
2123   * Search for and return a batch of folios in the mapping starting at
2124   * index @start and up to index @end (inclusive).  The folios are returned
2125   * in @fbatch with an elevated reference count.
2126   *
2127   * The first folio may start before @start; if it does, it will contain
2128   * @start.  The final folio may extend beyond @end; if it does, it will
2129   * contain @end.  The folios have ascending indices.  There may be gaps
2130   * between the folios if there are indices which have no folio in the
2131   * page cache.  If folios are added to or removed from the page cache
2132   * while this is running, they may or may not be found by this call.
2133   *
2134   * Return: The number of folios which were found.
2135   * We also update @start to index the next folio for the traversal.
2136   */
2137  unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
2138  		pgoff_t end, struct folio_batch *fbatch)
2139  {
2140  	XA_STATE(xas, &mapping->i_pages, *start);
2141  	struct folio *folio;
2142  
2143  	rcu_read_lock();
2144  	while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) {
2145  		/* Skip over shadow, swap and DAX entries */
2146  		if (xa_is_value(folio))
2147  			continue;
2148  		if (!folio_batch_add(fbatch, folio)) {
2149  			unsigned long nr = folio_nr_pages(folio);
2150  
2151  			if (folio_test_hugetlb(folio))
2152  				nr = 1;
2153  			*start = folio->index + nr;
2154  			goto out;
2155  		}
2156  	}
2157  
2158  	/*
2159  	 * We come here when there is no page beyond @end. We take care to not
2160  	 * overflow the index @start as it confuses some of the callers. This
2161  	 * breaks the iteration when there is a page at index -1 but that is
2162  	 * already broken anyway.
2163  	 */
2164  	if (end == (pgoff_t)-1)
2165  		*start = (pgoff_t)-1;
2166  	else
2167  		*start = end + 1;
2168  out:
2169  	rcu_read_unlock();
2170  
2171  	return folio_batch_count(fbatch);
2172  }
2173  EXPORT_SYMBOL(filemap_get_folios);
2174  
2175  static inline
2176  bool folio_more_pages(struct folio *folio, pgoff_t index, pgoff_t max)
2177  {
2178  	if (!folio_test_large(folio) || folio_test_hugetlb(folio))
2179  		return false;
2180  	if (index >= max)
2181  		return false;
2182  	return index < folio->index + folio_nr_pages(folio) - 1;
2183  }
2184  
2185  /**
2186   * filemap_get_folios_contig - Get a batch of contiguous folios
2187   * @mapping:	The address_space to search
2188   * @start:	The starting page index
2189   * @end:	The final page index (inclusive)
2190   * @fbatch:	The batch to fill
2191   *
2192   * filemap_get_folios_contig() works exactly like filemap_get_folios(),
2193   * except the returned folios are guaranteed to be contiguous. This may
2194   * not return all contiguous folios if the batch gets filled up.
2195   *
2196   * Return: The number of folios found.
2197   * Also update @start to be positioned for traversal of the next folio.
2198   */
2199  
2200  unsigned filemap_get_folios_contig(struct address_space *mapping,
2201  		pgoff_t *start, pgoff_t end, struct folio_batch *fbatch)
2202  {
2203  	XA_STATE(xas, &mapping->i_pages, *start);
2204  	unsigned long nr;
2205  	struct folio *folio;
2206  
2207  	rcu_read_lock();
2208  
2209  	for (folio = xas_load(&xas); folio && xas.xa_index <= end;
2210  			folio = xas_next(&xas)) {
2211  		if (xas_retry(&xas, folio))
2212  			continue;
2213  		/*
2214  		 * If the entry has been swapped out, we can stop looking.
2215  		 * No current caller is looking for DAX entries.
2216  		 */
2217  		if (xa_is_value(folio))
2218  			goto update_start;
2219  
2220  		if (!folio_try_get_rcu(folio))
2221  			goto retry;
2222  
2223  		if (unlikely(folio != xas_reload(&xas)))
2224  			goto put_folio;
2225  
2226  		if (!folio_batch_add(fbatch, folio)) {
2227  			nr = folio_nr_pages(folio);
2228  
2229  			if (folio_test_hugetlb(folio))
2230  				nr = 1;
2231  			*start = folio->index + nr;
2232  			goto out;
2233  		}
2234  		continue;
2235  put_folio:
2236  		folio_put(folio);
2237  
2238  retry:
2239  		xas_reset(&xas);
2240  	}
2241  
2242  update_start:
2243  	nr = folio_batch_count(fbatch);
2244  
2245  	if (nr) {
2246  		folio = fbatch->folios[nr - 1];
2247  		if (folio_test_hugetlb(folio))
2248  			*start = folio->index + 1;
2249  		else
2250  			*start = folio->index + folio_nr_pages(folio);
2251  	}
2252  out:
2253  	rcu_read_unlock();
2254  	return folio_batch_count(fbatch);
2255  }
2256  EXPORT_SYMBOL(filemap_get_folios_contig);
2257  
2258  /**
2259   * filemap_get_folios_tag - Get a batch of folios matching @tag
2260   * @mapping:    The address_space to search
2261   * @start:      The starting page index
2262   * @end:        The final page index (inclusive)
2263   * @tag:        The tag index
2264   * @fbatch:     The batch to fill
2265   *
2266   * Same as filemap_get_folios(), but only returning folios tagged with @tag.
2267   *
2268   * Return: The number of folios found.
2269   * Also update @start to index the next folio for traversal.
2270   */
2271  unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start,
2272  			pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch)
2273  {
2274  	XA_STATE(xas, &mapping->i_pages, *start);
2275  	struct folio *folio;
2276  
2277  	rcu_read_lock();
2278  	while ((folio = find_get_entry(&xas, end, tag)) != NULL) {
2279  		/*
2280  		 * Shadow entries should never be tagged, but this iteration
2281  		 * is lockless so there is a window for page reclaim to evict
2282  		 * a page we saw tagged. Skip over it.
2283  		 */
2284  		if (xa_is_value(folio))
2285  			continue;
2286  		if (!folio_batch_add(fbatch, folio)) {
2287  			unsigned long nr = folio_nr_pages(folio);
2288  
2289  			if (folio_test_hugetlb(folio))
2290  				nr = 1;
2291  			*start = folio->index + nr;
2292  			goto out;
2293  		}
2294  	}
2295  	/*
2296  	 * We come here when there is no page beyond @end. We take care to not
2297  	 * overflow the index @start as it confuses some of the callers. This
2298  	 * breaks the iteration when there is a page at index -1 but that is
2299  	 * already broke anyway.
2300  	 */
2301  	if (end == (pgoff_t)-1)
2302  		*start = (pgoff_t)-1;
2303  	else
2304  		*start = end + 1;
2305  out:
2306  	rcu_read_unlock();
2307  
2308  	return folio_batch_count(fbatch);
2309  }
2310  EXPORT_SYMBOL(filemap_get_folios_tag);
2311  
2312  /*
2313   * CD/DVDs are error prone. When a medium error occurs, the driver may fail
2314   * a _large_ part of the i/o request. Imagine the worst scenario:
2315   *
2316   *      ---R__________________________________________B__________
2317   *         ^ reading here                             ^ bad block(assume 4k)
2318   *
2319   * read(R) => miss => readahead(R...B) => media error => frustrating retries
2320   * => failing the whole request => read(R) => read(R+1) =>
2321   * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
2322   * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
2323   * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
2324   *
2325   * It is going insane. Fix it by quickly scaling down the readahead size.
2326   */
2327  static void shrink_readahead_size_eio(struct file_ra_state *ra)
2328  {
2329  	ra->ra_pages /= 4;
2330  }
2331  
2332  /*
2333   * filemap_get_read_batch - Get a batch of folios for read
2334   *
2335   * Get a batch of folios which represent a contiguous range of bytes in
2336   * the file.  No exceptional entries will be returned.  If @index is in
2337   * the middle of a folio, the entire folio will be returned.  The last
2338   * folio in the batch may have the readahead flag set or the uptodate flag
2339   * clear so that the caller can take the appropriate action.
2340   */
2341  static void filemap_get_read_batch(struct address_space *mapping,
2342  		pgoff_t index, pgoff_t max, struct folio_batch *fbatch)
2343  {
2344  	XA_STATE(xas, &mapping->i_pages, index);
2345  	struct folio *folio;
2346  
2347  	rcu_read_lock();
2348  	for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) {
2349  		if (xas_retry(&xas, folio))
2350  			continue;
2351  		if (xas.xa_index > max || xa_is_value(folio))
2352  			break;
2353  		if (xa_is_sibling(folio))
2354  			break;
2355  		if (!folio_try_get_rcu(folio))
2356  			goto retry;
2357  
2358  		if (unlikely(folio != xas_reload(&xas)))
2359  			goto put_folio;
2360  
2361  		if (!folio_batch_add(fbatch, folio))
2362  			break;
2363  		if (!folio_test_uptodate(folio))
2364  			break;
2365  		if (folio_test_readahead(folio))
2366  			break;
2367  		xas_advance(&xas, folio->index + folio_nr_pages(folio) - 1);
2368  		continue;
2369  put_folio:
2370  		folio_put(folio);
2371  retry:
2372  		xas_reset(&xas);
2373  	}
2374  	rcu_read_unlock();
2375  }
2376  
2377  static int filemap_read_folio(struct file *file, filler_t filler,
2378  		struct folio *folio)
2379  {
2380  	bool workingset = folio_test_workingset(folio);
2381  	unsigned long pflags;
2382  	int error;
2383  
2384  	/*
2385  	 * A previous I/O error may have been due to temporary failures,
2386  	 * eg. multipath errors.  PG_error will be set again if read_folio
2387  	 * fails.
2388  	 */
2389  	folio_clear_error(folio);
2390  
2391  	/* Start the actual read. The read will unlock the page. */
2392  	if (unlikely(workingset))
2393  		psi_memstall_enter(&pflags);
2394  	error = filler(file, folio);
2395  	if (unlikely(workingset))
2396  		psi_memstall_leave(&pflags);
2397  	if (error)
2398  		return error;
2399  
2400  	error = folio_wait_locked_killable(folio);
2401  	if (error)
2402  		return error;
2403  	if (folio_test_uptodate(folio))
2404  		return 0;
2405  	if (file)
2406  		shrink_readahead_size_eio(&file->f_ra);
2407  	return -EIO;
2408  }
2409  
2410  static bool filemap_range_uptodate(struct address_space *mapping,
2411  		loff_t pos, size_t count, struct folio *folio,
2412  		bool need_uptodate)
2413  {
2414  	if (folio_test_uptodate(folio))
2415  		return true;
2416  	/* pipes can't handle partially uptodate pages */
2417  	if (need_uptodate)
2418  		return false;
2419  	if (!mapping->a_ops->is_partially_uptodate)
2420  		return false;
2421  	if (mapping->host->i_blkbits >= folio_shift(folio))
2422  		return false;
2423  
2424  	if (folio_pos(folio) > pos) {
2425  		count -= folio_pos(folio) - pos;
2426  		pos = 0;
2427  	} else {
2428  		pos -= folio_pos(folio);
2429  	}
2430  
2431  	return mapping->a_ops->is_partially_uptodate(folio, pos, count);
2432  }
2433  
2434  static int filemap_update_page(struct kiocb *iocb,
2435  		struct address_space *mapping, size_t count,
2436  		struct folio *folio, bool need_uptodate)
2437  {
2438  	int error;
2439  
2440  	if (iocb->ki_flags & IOCB_NOWAIT) {
2441  		if (!filemap_invalidate_trylock_shared(mapping))
2442  			return -EAGAIN;
2443  	} else {
2444  		filemap_invalidate_lock_shared(mapping);
2445  	}
2446  
2447  	if (!folio_trylock(folio)) {
2448  		error = -EAGAIN;
2449  		if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO))
2450  			goto unlock_mapping;
2451  		if (!(iocb->ki_flags & IOCB_WAITQ)) {
2452  			filemap_invalidate_unlock_shared(mapping);
2453  			/*
2454  			 * This is where we usually end up waiting for a
2455  			 * previously submitted readahead to finish.
2456  			 */
2457  			folio_put_wait_locked(folio, TASK_KILLABLE);
2458  			return AOP_TRUNCATED_PAGE;
2459  		}
2460  		error = __folio_lock_async(folio, iocb->ki_waitq);
2461  		if (error)
2462  			goto unlock_mapping;
2463  	}
2464  
2465  	error = AOP_TRUNCATED_PAGE;
2466  	if (!folio->mapping)
2467  		goto unlock;
2468  
2469  	error = 0;
2470  	if (filemap_range_uptodate(mapping, iocb->ki_pos, count, folio,
2471  				   need_uptodate))
2472  		goto unlock;
2473  
2474  	error = -EAGAIN;
2475  	if (iocb->ki_flags & (IOCB_NOIO | IOCB_NOWAIT | IOCB_WAITQ))
2476  		goto unlock;
2477  
2478  	error = filemap_read_folio(iocb->ki_filp, mapping->a_ops->read_folio,
2479  			folio);
2480  	goto unlock_mapping;
2481  unlock:
2482  	folio_unlock(folio);
2483  unlock_mapping:
2484  	filemap_invalidate_unlock_shared(mapping);
2485  	if (error == AOP_TRUNCATED_PAGE)
2486  		folio_put(folio);
2487  	return error;
2488  }
2489  
2490  static int filemap_create_folio(struct file *file,
2491  		struct address_space *mapping, pgoff_t index,
2492  		struct folio_batch *fbatch)
2493  {
2494  	struct folio *folio;
2495  	int error;
2496  
2497  	folio = filemap_alloc_folio(mapping_gfp_mask(mapping), 0);
2498  	if (!folio)
2499  		return -ENOMEM;
2500  
2501  	/*
2502  	 * Protect against truncate / hole punch. Grabbing invalidate_lock
2503  	 * here assures we cannot instantiate and bring uptodate new
2504  	 * pagecache folios after evicting page cache during truncate
2505  	 * and before actually freeing blocks.	Note that we could
2506  	 * release invalidate_lock after inserting the folio into
2507  	 * the page cache as the locked folio would then be enough to
2508  	 * synchronize with hole punching. But there are code paths
2509  	 * such as filemap_update_page() filling in partially uptodate
2510  	 * pages or ->readahead() that need to hold invalidate_lock
2511  	 * while mapping blocks for IO so let's hold the lock here as
2512  	 * well to keep locking rules simple.
2513  	 */
2514  	filemap_invalidate_lock_shared(mapping);
2515  	error = filemap_add_folio(mapping, folio, index,
2516  			mapping_gfp_constraint(mapping, GFP_KERNEL));
2517  	if (error == -EEXIST)
2518  		error = AOP_TRUNCATED_PAGE;
2519  	if (error)
2520  		goto error;
2521  
2522  	error = filemap_read_folio(file, mapping->a_ops->read_folio, folio);
2523  	if (error)
2524  		goto error;
2525  
2526  	filemap_invalidate_unlock_shared(mapping);
2527  	folio_batch_add(fbatch, folio);
2528  	return 0;
2529  error:
2530  	filemap_invalidate_unlock_shared(mapping);
2531  	folio_put(folio);
2532  	return error;
2533  }
2534  
2535  static int filemap_readahead(struct kiocb *iocb, struct file *file,
2536  		struct address_space *mapping, struct folio *folio,
2537  		pgoff_t last_index)
2538  {
2539  	DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, folio->index);
2540  
2541  	if (iocb->ki_flags & IOCB_NOIO)
2542  		return -EAGAIN;
2543  	page_cache_async_ra(&ractl, folio, last_index - folio->index);
2544  	return 0;
2545  }
2546  
2547  static int filemap_get_pages(struct kiocb *iocb, size_t count,
2548  		struct folio_batch *fbatch, bool need_uptodate)
2549  {
2550  	struct file *filp = iocb->ki_filp;
2551  	struct address_space *mapping = filp->f_mapping;
2552  	struct file_ra_state *ra = &filp->f_ra;
2553  	pgoff_t index = iocb->ki_pos >> PAGE_SHIFT;
2554  	pgoff_t last_index;
2555  	struct folio *folio;
2556  	int err = 0;
2557  
2558  	/* "last_index" is the index of the page beyond the end of the read */
2559  	last_index = DIV_ROUND_UP(iocb->ki_pos + count, PAGE_SIZE);
2560  retry:
2561  	if (fatal_signal_pending(current))
2562  		return -EINTR;
2563  
2564  	filemap_get_read_batch(mapping, index, last_index - 1, fbatch);
2565  	if (!folio_batch_count(fbatch)) {
2566  		if (iocb->ki_flags & IOCB_NOIO)
2567  			return -EAGAIN;
2568  		page_cache_sync_readahead(mapping, ra, filp, index,
2569  				last_index - index);
2570  		filemap_get_read_batch(mapping, index, last_index - 1, fbatch);
2571  	}
2572  	if (!folio_batch_count(fbatch)) {
2573  		if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ))
2574  			return -EAGAIN;
2575  		err = filemap_create_folio(filp, mapping,
2576  				iocb->ki_pos >> PAGE_SHIFT, fbatch);
2577  		if (err == AOP_TRUNCATED_PAGE)
2578  			goto retry;
2579  		return err;
2580  	}
2581  
2582  	folio = fbatch->folios[folio_batch_count(fbatch) - 1];
2583  	if (folio_test_readahead(folio)) {
2584  		err = filemap_readahead(iocb, filp, mapping, folio, last_index);
2585  		if (err)
2586  			goto err;
2587  	}
2588  	if (!folio_test_uptodate(folio)) {
2589  		if ((iocb->ki_flags & IOCB_WAITQ) &&
2590  		    folio_batch_count(fbatch) > 1)
2591  			iocb->ki_flags |= IOCB_NOWAIT;
2592  		err = filemap_update_page(iocb, mapping, count, folio,
2593  					  need_uptodate);
2594  		if (err)
2595  			goto err;
2596  	}
2597  
2598  	return 0;
2599  err:
2600  	if (err < 0)
2601  		folio_put(folio);
2602  	if (likely(--fbatch->nr))
2603  		return 0;
2604  	if (err == AOP_TRUNCATED_PAGE)
2605  		goto retry;
2606  	return err;
2607  }
2608  
2609  static inline bool pos_same_folio(loff_t pos1, loff_t pos2, struct folio *folio)
2610  {
2611  	unsigned int shift = folio_shift(folio);
2612  
2613  	return (pos1 >> shift == pos2 >> shift);
2614  }
2615  
2616  /**
2617   * filemap_read - Read data from the page cache.
2618   * @iocb: The iocb to read.
2619   * @iter: Destination for the data.
2620   * @already_read: Number of bytes already read by the caller.
2621   *
2622   * Copies data from the page cache.  If the data is not currently present,
2623   * uses the readahead and read_folio address_space operations to fetch it.
2624   *
2625   * Return: Total number of bytes copied, including those already read by
2626   * the caller.  If an error happens before any bytes are copied, returns
2627   * a negative error number.
2628   */
2629  ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
2630  		ssize_t already_read)
2631  {
2632  	struct file *filp = iocb->ki_filp;
2633  	struct file_ra_state *ra = &filp->f_ra;
2634  	struct address_space *mapping = filp->f_mapping;
2635  	struct inode *inode = mapping->host;
2636  	struct folio_batch fbatch;
2637  	int i, error = 0;
2638  	bool writably_mapped;
2639  	loff_t isize, end_offset;
2640  
2641  	if (unlikely(iocb->ki_pos >= inode->i_sb->s_maxbytes))
2642  		return 0;
2643  	if (unlikely(!iov_iter_count(iter)))
2644  		return 0;
2645  
2646  	iov_iter_truncate(iter, inode->i_sb->s_maxbytes);
2647  	folio_batch_init(&fbatch);
2648  
2649  	do {
2650  		cond_resched();
2651  
2652  		/*
2653  		 * If we've already successfully copied some data, then we
2654  		 * can no longer safely return -EIOCBQUEUED. Hence mark
2655  		 * an async read NOWAIT at that point.
2656  		 */
2657  		if ((iocb->ki_flags & IOCB_WAITQ) && already_read)
2658  			iocb->ki_flags |= IOCB_NOWAIT;
2659  
2660  		if (unlikely(iocb->ki_pos >= i_size_read(inode)))
2661  			break;
2662  
2663  		error = filemap_get_pages(iocb, iter->count, &fbatch, false);
2664  		if (error < 0)
2665  			break;
2666  
2667  		/*
2668  		 * i_size must be checked after we know the pages are Uptodate.
2669  		 *
2670  		 * Checking i_size after the check allows us to calculate
2671  		 * the correct value for "nr", which means the zero-filled
2672  		 * part of the page is not copied back to userspace (unless
2673  		 * another truncate extends the file - this is desired though).
2674  		 */
2675  		isize = i_size_read(inode);
2676  		if (unlikely(iocb->ki_pos >= isize))
2677  			goto put_folios;
2678  		end_offset = min_t(loff_t, isize, iocb->ki_pos + iter->count);
2679  
2680  		/*
2681  		 * Once we start copying data, we don't want to be touching any
2682  		 * cachelines that might be contended:
2683  		 */
2684  		writably_mapped = mapping_writably_mapped(mapping);
2685  
2686  		/*
2687  		 * When a read accesses the same folio several times, only
2688  		 * mark it as accessed the first time.
2689  		 */
2690  		if (!pos_same_folio(iocb->ki_pos, ra->prev_pos - 1,
2691  							fbatch.folios[0]))
2692  			folio_mark_accessed(fbatch.folios[0]);
2693  
2694  		for (i = 0; i < folio_batch_count(&fbatch); i++) {
2695  			struct folio *folio = fbatch.folios[i];
2696  			size_t fsize = folio_size(folio);
2697  			size_t offset = iocb->ki_pos & (fsize - 1);
2698  			size_t bytes = min_t(loff_t, end_offset - iocb->ki_pos,
2699  					     fsize - offset);
2700  			size_t copied;
2701  
2702  			if (end_offset < folio_pos(folio))
2703  				break;
2704  			if (i > 0)
2705  				folio_mark_accessed(folio);
2706  			/*
2707  			 * If users can be writing to this folio using arbitrary
2708  			 * virtual addresses, take care of potential aliasing
2709  			 * before reading the folio on the kernel side.
2710  			 */
2711  			if (writably_mapped)
2712  				flush_dcache_folio(folio);
2713  
2714  			copied = copy_folio_to_iter(folio, offset, bytes, iter);
2715  
2716  			already_read += copied;
2717  			iocb->ki_pos += copied;
2718  			ra->prev_pos = iocb->ki_pos;
2719  
2720  			if (copied < bytes) {
2721  				error = -EFAULT;
2722  				break;
2723  			}
2724  		}
2725  put_folios:
2726  		for (i = 0; i < folio_batch_count(&fbatch); i++)
2727  			folio_put(fbatch.folios[i]);
2728  		folio_batch_init(&fbatch);
2729  	} while (iov_iter_count(iter) && iocb->ki_pos < isize && !error);
2730  
2731  	file_accessed(filp);
2732  
2733  	return already_read ? already_read : error;
2734  }
2735  EXPORT_SYMBOL_GPL(filemap_read);
2736  
2737  int kiocb_write_and_wait(struct kiocb *iocb, size_t count)
2738  {
2739  	struct address_space *mapping = iocb->ki_filp->f_mapping;
2740  	loff_t pos = iocb->ki_pos;
2741  	loff_t end = pos + count - 1;
2742  
2743  	if (iocb->ki_flags & IOCB_NOWAIT) {
2744  		if (filemap_range_needs_writeback(mapping, pos, end))
2745  			return -EAGAIN;
2746  		return 0;
2747  	}
2748  
2749  	return filemap_write_and_wait_range(mapping, pos, end);
2750  }
2751  
2752  int kiocb_invalidate_pages(struct kiocb *iocb, size_t count)
2753  {
2754  	struct address_space *mapping = iocb->ki_filp->f_mapping;
2755  	loff_t pos = iocb->ki_pos;
2756  	loff_t end = pos + count - 1;
2757  	int ret;
2758  
2759  	if (iocb->ki_flags & IOCB_NOWAIT) {
2760  		/* we could block if there are any pages in the range */
2761  		if (filemap_range_has_page(mapping, pos, end))
2762  			return -EAGAIN;
2763  	} else {
2764  		ret = filemap_write_and_wait_range(mapping, pos, end);
2765  		if (ret)
2766  			return ret;
2767  	}
2768  
2769  	/*
2770  	 * After a write we want buffered reads to be sure to go to disk to get
2771  	 * the new data.  We invalidate clean cached page from the region we're
2772  	 * about to write.  We do this *before* the write so that we can return
2773  	 * without clobbering -EIOCBQUEUED from ->direct_IO().
2774  	 */
2775  	return invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT,
2776  					     end >> PAGE_SHIFT);
2777  }
2778  
2779  /**
2780   * generic_file_read_iter - generic filesystem read routine
2781   * @iocb:	kernel I/O control block
2782   * @iter:	destination for the data read
2783   *
2784   * This is the "read_iter()" routine for all filesystems
2785   * that can use the page cache directly.
2786   *
2787   * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall
2788   * be returned when no data can be read without waiting for I/O requests
2789   * to complete; it doesn't prevent readahead.
2790   *
2791   * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O
2792   * requests shall be made for the read or for readahead.  When no data
2793   * can be read, -EAGAIN shall be returned.  When readahead would be
2794   * triggered, a partial, possibly empty read shall be returned.
2795   *
2796   * Return:
2797   * * number of bytes copied, even for partial reads
2798   * * negative error code (or 0 if IOCB_NOIO) if nothing was read
2799   */
2800  ssize_t
2801  generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2802  {
2803  	size_t count = iov_iter_count(iter);
2804  	ssize_t retval = 0;
2805  
2806  	if (!count)
2807  		return 0; /* skip atime */
2808  
2809  	if (iocb->ki_flags & IOCB_DIRECT) {
2810  		struct file *file = iocb->ki_filp;
2811  		struct address_space *mapping = file->f_mapping;
2812  		struct inode *inode = mapping->host;
2813  
2814  		retval = kiocb_write_and_wait(iocb, count);
2815  		if (retval < 0)
2816  			return retval;
2817  		file_accessed(file);
2818  
2819  		retval = mapping->a_ops->direct_IO(iocb, iter);
2820  		if (retval >= 0) {
2821  			iocb->ki_pos += retval;
2822  			count -= retval;
2823  		}
2824  		if (retval != -EIOCBQUEUED)
2825  			iov_iter_revert(iter, count - iov_iter_count(iter));
2826  
2827  		/*
2828  		 * Btrfs can have a short DIO read if we encounter
2829  		 * compressed extents, so if there was an error, or if
2830  		 * we've already read everything we wanted to, or if
2831  		 * there was a short read because we hit EOF, go ahead
2832  		 * and return.  Otherwise fallthrough to buffered io for
2833  		 * the rest of the read.  Buffered reads will not work for
2834  		 * DAX files, so don't bother trying.
2835  		 */
2836  		if (retval < 0 || !count || IS_DAX(inode))
2837  			return retval;
2838  		if (iocb->ki_pos >= i_size_read(inode))
2839  			return retval;
2840  	}
2841  
2842  	return filemap_read(iocb, iter, retval);
2843  }
2844  EXPORT_SYMBOL(generic_file_read_iter);
2845  
2846  /*
2847   * Splice subpages from a folio into a pipe.
2848   */
2849  size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
2850  			      struct folio *folio, loff_t fpos, size_t size)
2851  {
2852  	struct page *page;
2853  	size_t spliced = 0, offset = offset_in_folio(folio, fpos);
2854  
2855  	page = folio_page(folio, offset / PAGE_SIZE);
2856  	size = min(size, folio_size(folio) - offset);
2857  	offset %= PAGE_SIZE;
2858  
2859  	while (spliced < size &&
2860  	       !pipe_full(pipe->head, pipe->tail, pipe->max_usage)) {
2861  		struct pipe_buffer *buf = pipe_head_buf(pipe);
2862  		size_t part = min_t(size_t, PAGE_SIZE - offset, size - spliced);
2863  
2864  		*buf = (struct pipe_buffer) {
2865  			.ops	= &page_cache_pipe_buf_ops,
2866  			.page	= page,
2867  			.offset	= offset,
2868  			.len	= part,
2869  		};
2870  		folio_get(folio);
2871  		pipe->head++;
2872  		page++;
2873  		spliced += part;
2874  		offset = 0;
2875  	}
2876  
2877  	return spliced;
2878  }
2879  
2880  /**
2881   * filemap_splice_read -  Splice data from a file's pagecache into a pipe
2882   * @in: The file to read from
2883   * @ppos: Pointer to the file position to read from
2884   * @pipe: The pipe to splice into
2885   * @len: The amount to splice
2886   * @flags: The SPLICE_F_* flags
2887   *
2888   * This function gets folios from a file's pagecache and splices them into the
2889   * pipe.  Readahead will be called as necessary to fill more folios.  This may
2890   * be used for blockdevs also.
2891   *
2892   * Return: On success, the number of bytes read will be returned and *@ppos
2893   * will be updated if appropriate; 0 will be returned if there is no more data
2894   * to be read; -EAGAIN will be returned if the pipe had no space, and some
2895   * other negative error code will be returned on error.  A short read may occur
2896   * if the pipe has insufficient space, we reach the end of the data or we hit a
2897   * hole.
2898   */
2899  ssize_t filemap_splice_read(struct file *in, loff_t *ppos,
2900  			    struct pipe_inode_info *pipe,
2901  			    size_t len, unsigned int flags)
2902  {
2903  	struct folio_batch fbatch;
2904  	struct kiocb iocb;
2905  	size_t total_spliced = 0, used, npages;
2906  	loff_t isize, end_offset;
2907  	bool writably_mapped;
2908  	int i, error = 0;
2909  
2910  	if (unlikely(*ppos >= in->f_mapping->host->i_sb->s_maxbytes))
2911  		return 0;
2912  
2913  	init_sync_kiocb(&iocb, in);
2914  	iocb.ki_pos = *ppos;
2915  
2916  	/* Work out how much data we can actually add into the pipe */
2917  	used = pipe_occupancy(pipe->head, pipe->tail);
2918  	npages = max_t(ssize_t, pipe->max_usage - used, 0);
2919  	len = min_t(size_t, len, npages * PAGE_SIZE);
2920  
2921  	folio_batch_init(&fbatch);
2922  
2923  	do {
2924  		cond_resched();
2925  
2926  		if (*ppos >= i_size_read(in->f_mapping->host))
2927  			break;
2928  
2929  		iocb.ki_pos = *ppos;
2930  		error = filemap_get_pages(&iocb, len, &fbatch, true);
2931  		if (error < 0)
2932  			break;
2933  
2934  		/*
2935  		 * i_size must be checked after we know the pages are Uptodate.
2936  		 *
2937  		 * Checking i_size after the check allows us to calculate
2938  		 * the correct value for "nr", which means the zero-filled
2939  		 * part of the page is not copied back to userspace (unless
2940  		 * another truncate extends the file - this is desired though).
2941  		 */
2942  		isize = i_size_read(in->f_mapping->host);
2943  		if (unlikely(*ppos >= isize))
2944  			break;
2945  		end_offset = min_t(loff_t, isize, *ppos + len);
2946  
2947  		/*
2948  		 * Once we start copying data, we don't want to be touching any
2949  		 * cachelines that might be contended:
2950  		 */
2951  		writably_mapped = mapping_writably_mapped(in->f_mapping);
2952  
2953  		for (i = 0; i < folio_batch_count(&fbatch); i++) {
2954  			struct folio *folio = fbatch.folios[i];
2955  			size_t n;
2956  
2957  			if (folio_pos(folio) >= end_offset)
2958  				goto out;
2959  			folio_mark_accessed(folio);
2960  
2961  			/*
2962  			 * If users can be writing to this folio using arbitrary
2963  			 * virtual addresses, take care of potential aliasing
2964  			 * before reading the folio on the kernel side.
2965  			 */
2966  			if (writably_mapped)
2967  				flush_dcache_folio(folio);
2968  
2969  			n = min_t(loff_t, len, isize - *ppos);
2970  			n = splice_folio_into_pipe(pipe, folio, *ppos, n);
2971  			if (!n)
2972  				goto out;
2973  			len -= n;
2974  			total_spliced += n;
2975  			*ppos += n;
2976  			in->f_ra.prev_pos = *ppos;
2977  			if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
2978  				goto out;
2979  		}
2980  
2981  		folio_batch_release(&fbatch);
2982  	} while (len);
2983  
2984  out:
2985  	folio_batch_release(&fbatch);
2986  	file_accessed(in);
2987  
2988  	return total_spliced ? total_spliced : error;
2989  }
2990  EXPORT_SYMBOL(filemap_splice_read);
2991  
2992  static inline loff_t folio_seek_hole_data(struct xa_state *xas,
2993  		struct address_space *mapping, struct folio *folio,
2994  		loff_t start, loff_t end, bool seek_data)
2995  {
2996  	const struct address_space_operations *ops = mapping->a_ops;
2997  	size_t offset, bsz = i_blocksize(mapping->host);
2998  
2999  	if (xa_is_value(folio) || folio_test_uptodate(folio))
3000  		return seek_data ? start : end;
3001  	if (!ops->is_partially_uptodate)
3002  		return seek_data ? end : start;
3003  
3004  	xas_pause(xas);
3005  	rcu_read_unlock();
3006  	folio_lock(folio);
3007  	if (unlikely(folio->mapping != mapping))
3008  		goto unlock;
3009  
3010  	offset = offset_in_folio(folio, start) & ~(bsz - 1);
3011  
3012  	do {
3013  		if (ops->is_partially_uptodate(folio, offset, bsz) ==
3014  							seek_data)
3015  			break;
3016  		start = (start + bsz) & ~(bsz - 1);
3017  		offset += bsz;
3018  	} while (offset < folio_size(folio));
3019  unlock:
3020  	folio_unlock(folio);
3021  	rcu_read_lock();
3022  	return start;
3023  }
3024  
3025  static inline size_t seek_folio_size(struct xa_state *xas, struct folio *folio)
3026  {
3027  	if (xa_is_value(folio))
3028  		return PAGE_SIZE << xa_get_order(xas->xa, xas->xa_index);
3029  	return folio_size(folio);
3030  }
3031  
3032  /**
3033   * mapping_seek_hole_data - Seek for SEEK_DATA / SEEK_HOLE in the page cache.
3034   * @mapping: Address space to search.
3035   * @start: First byte to consider.
3036   * @end: Limit of search (exclusive).
3037   * @whence: Either SEEK_HOLE or SEEK_DATA.
3038   *
3039   * If the page cache knows which blocks contain holes and which blocks
3040   * contain data, your filesystem can use this function to implement
3041   * SEEK_HOLE and SEEK_DATA.  This is useful for filesystems which are
3042   * entirely memory-based such as tmpfs, and filesystems which support
3043   * unwritten extents.
3044   *
3045   * Return: The requested offset on success, or -ENXIO if @whence specifies
3046   * SEEK_DATA and there is no data after @start.  There is an implicit hole
3047   * after @end - 1, so SEEK_HOLE returns @end if all the bytes between @start
3048   * and @end contain data.
3049   */
3050  loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
3051  		loff_t end, int whence)
3052  {
3053  	XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT);
3054  	pgoff_t max = (end - 1) >> PAGE_SHIFT;
3055  	bool seek_data = (whence == SEEK_DATA);
3056  	struct folio *folio;
3057  
3058  	if (end <= start)
3059  		return -ENXIO;
3060  
3061  	rcu_read_lock();
3062  	while ((folio = find_get_entry(&xas, max, XA_PRESENT))) {
3063  		loff_t pos = (u64)xas.xa_index << PAGE_SHIFT;
3064  		size_t seek_size;
3065  
3066  		if (start < pos) {
3067  			if (!seek_data)
3068  				goto unlock;
3069  			start = pos;
3070  		}
3071  
3072  		seek_size = seek_folio_size(&xas, folio);
3073  		pos = round_up((u64)pos + 1, seek_size);
3074  		start = folio_seek_hole_data(&xas, mapping, folio, start, pos,
3075  				seek_data);
3076  		if (start < pos)
3077  			goto unlock;
3078  		if (start >= end)
3079  			break;
3080  		if (seek_size > PAGE_SIZE)
3081  			xas_set(&xas, pos >> PAGE_SHIFT);
3082  		if (!xa_is_value(folio))
3083  			folio_put(folio);
3084  	}
3085  	if (seek_data)
3086  		start = -ENXIO;
3087  unlock:
3088  	rcu_read_unlock();
3089  	if (folio && !xa_is_value(folio))
3090  		folio_put(folio);
3091  	if (start > end)
3092  		return end;
3093  	return start;
3094  }
3095  
3096  #ifdef CONFIG_MMU
3097  #define MMAP_LOTSAMISS  (100)
3098  /*
3099   * lock_folio_maybe_drop_mmap - lock the page, possibly dropping the mmap_lock
3100   * @vmf - the vm_fault for this fault.
3101   * @folio - the folio to lock.
3102   * @fpin - the pointer to the file we may pin (or is already pinned).
3103   *
3104   * This works similar to lock_folio_or_retry in that it can drop the
3105   * mmap_lock.  It differs in that it actually returns the folio locked
3106   * if it returns 1 and 0 if it couldn't lock the folio.  If we did have
3107   * to drop the mmap_lock then fpin will point to the pinned file and
3108   * needs to be fput()'ed at a later point.
3109   */
3110  static int lock_folio_maybe_drop_mmap(struct vm_fault *vmf, struct folio *folio,
3111  				     struct file **fpin)
3112  {
3113  	if (folio_trylock(folio))
3114  		return 1;
3115  
3116  	/*
3117  	 * NOTE! This will make us return with VM_FAULT_RETRY, but with
3118  	 * the mmap_lock still held. That's how FAULT_FLAG_RETRY_NOWAIT
3119  	 * is supposed to work. We have way too many special cases..
3120  	 */
3121  	if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
3122  		return 0;
3123  
3124  	*fpin = maybe_unlock_mmap_for_io(vmf, *fpin);
3125  	if (vmf->flags & FAULT_FLAG_KILLABLE) {
3126  		if (__folio_lock_killable(folio)) {
3127  			/*
3128  			 * We didn't have the right flags to drop the mmap_lock,
3129  			 * but all fault_handlers only check for fatal signals
3130  			 * if we return VM_FAULT_RETRY, so we need to drop the
3131  			 * mmap_lock here and return 0 if we don't have a fpin.
3132  			 */
3133  			if (*fpin == NULL)
3134  				mmap_read_unlock(vmf->vma->vm_mm);
3135  			return 0;
3136  		}
3137  	} else
3138  		__folio_lock(folio);
3139  
3140  	return 1;
3141  }
3142  
3143  /*
3144   * Synchronous readahead happens when we don't even find a page in the page
3145   * cache at all.  We don't want to perform IO under the mmap sem, so if we have
3146   * to drop the mmap sem we return the file that was pinned in order for us to do
3147   * that.  If we didn't pin a file then we return NULL.  The file that is
3148   * returned needs to be fput()'ed when we're done with it.
3149   */
3150  static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
3151  {
3152  	struct file *file = vmf->vma->vm_file;
3153  	struct file_ra_state *ra = &file->f_ra;
3154  	struct address_space *mapping = file->f_mapping;
3155  	DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff);
3156  	struct file *fpin = NULL;
3157  	unsigned long vm_flags = vmf->vma->vm_flags;
3158  	unsigned int mmap_miss;
3159  
3160  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3161  	/* Use the readahead code, even if readahead is disabled */
3162  	if (vm_flags & VM_HUGEPAGE) {
3163  		fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3164  		ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1);
3165  		ra->size = HPAGE_PMD_NR;
3166  		/*
3167  		 * Fetch two PMD folios, so we get the chance to actually
3168  		 * readahead, unless we've been told not to.
3169  		 */
3170  		if (!(vm_flags & VM_RAND_READ))
3171  			ra->size *= 2;
3172  		ra->async_size = HPAGE_PMD_NR;
3173  		page_cache_ra_order(&ractl, ra, HPAGE_PMD_ORDER);
3174  		return fpin;
3175  	}
3176  #endif
3177  
3178  	/* If we don't want any read-ahead, don't bother */
3179  	if (vm_flags & VM_RAND_READ)
3180  		return fpin;
3181  	if (!ra->ra_pages)
3182  		return fpin;
3183  
3184  	if (vm_flags & VM_SEQ_READ) {
3185  		fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3186  		page_cache_sync_ra(&ractl, ra->ra_pages);
3187  		return fpin;
3188  	}
3189  
3190  	/* Avoid banging the cache line if not needed */
3191  	mmap_miss = READ_ONCE(ra->mmap_miss);
3192  	if (mmap_miss < MMAP_LOTSAMISS * 10)
3193  		WRITE_ONCE(ra->mmap_miss, ++mmap_miss);
3194  
3195  	/*
3196  	 * Do we miss much more than hit in this file? If so,
3197  	 * stop bothering with read-ahead. It will only hurt.
3198  	 */
3199  	if (mmap_miss > MMAP_LOTSAMISS)
3200  		return fpin;
3201  
3202  	/*
3203  	 * mmap read-around
3204  	 */
3205  	fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3206  	ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2);
3207  	ra->size = ra->ra_pages;
3208  	ra->async_size = ra->ra_pages / 4;
3209  	ractl._index = ra->start;
3210  	page_cache_ra_order(&ractl, ra, 0);
3211  	return fpin;
3212  }
3213  
3214  /*
3215   * Asynchronous readahead happens when we find the page and PG_readahead,
3216   * so we want to possibly extend the readahead further.  We return the file that
3217   * was pinned if we have to drop the mmap_lock in order to do IO.
3218   */
3219  static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
3220  					    struct folio *folio)
3221  {
3222  	struct file *file = vmf->vma->vm_file;
3223  	struct file_ra_state *ra = &file->f_ra;
3224  	DEFINE_READAHEAD(ractl, file, ra, file->f_mapping, vmf->pgoff);
3225  	struct file *fpin = NULL;
3226  	unsigned int mmap_miss;
3227  
3228  	/* If we don't want any read-ahead, don't bother */
3229  	if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages)
3230  		return fpin;
3231  
3232  	mmap_miss = READ_ONCE(ra->mmap_miss);
3233  	if (mmap_miss)
3234  		WRITE_ONCE(ra->mmap_miss, --mmap_miss);
3235  
3236  	if (folio_test_readahead(folio)) {
3237  		fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3238  		page_cache_async_ra(&ractl, folio, ra->ra_pages);
3239  	}
3240  	return fpin;
3241  }
3242  
3243  /**
3244   * filemap_fault - read in file data for page fault handling
3245   * @vmf:	struct vm_fault containing details of the fault
3246   *
3247   * filemap_fault() is invoked via the vma operations vector for a
3248   * mapped memory region to read in file data during a page fault.
3249   *
3250   * The goto's are kind of ugly, but this streamlines the normal case of having
3251   * it in the page cache, and handles the special cases reasonably without
3252   * having a lot of duplicated code.
3253   *
3254   * vma->vm_mm->mmap_lock must be held on entry.
3255   *
3256   * If our return value has VM_FAULT_RETRY set, it's because the mmap_lock
3257   * may be dropped before doing I/O or by lock_folio_maybe_drop_mmap().
3258   *
3259   * If our return value does not have VM_FAULT_RETRY set, the mmap_lock
3260   * has not been released.
3261   *
3262   * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set.
3263   *
3264   * Return: bitwise-OR of %VM_FAULT_ codes.
3265   */
3266  vm_fault_t filemap_fault(struct vm_fault *vmf)
3267  {
3268  	int error;
3269  	struct file *file = vmf->vma->vm_file;
3270  	struct file *fpin = NULL;
3271  	struct address_space *mapping = file->f_mapping;
3272  	struct inode *inode = mapping->host;
3273  	pgoff_t max_idx, index = vmf->pgoff;
3274  	struct folio *folio;
3275  	vm_fault_t ret = 0;
3276  	bool mapping_locked = false;
3277  
3278  	max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3279  	if (unlikely(index >= max_idx))
3280  		return VM_FAULT_SIGBUS;
3281  
3282  	/*
3283  	 * Do we have something in the page cache already?
3284  	 */
3285  	folio = filemap_get_folio(mapping, index);
3286  	if (likely(!IS_ERR(folio))) {
3287  		/*
3288  		 * We found the page, so try async readahead before waiting for
3289  		 * the lock.
3290  		 */
3291  		if (!(vmf->flags & FAULT_FLAG_TRIED))
3292  			fpin = do_async_mmap_readahead(vmf, folio);
3293  		if (unlikely(!folio_test_uptodate(folio))) {
3294  			filemap_invalidate_lock_shared(mapping);
3295  			mapping_locked = true;
3296  		}
3297  	} else {
3298  		/* No page in the page cache at all */
3299  		count_vm_event(PGMAJFAULT);
3300  		count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
3301  		ret = VM_FAULT_MAJOR;
3302  		fpin = do_sync_mmap_readahead(vmf);
3303  retry_find:
3304  		/*
3305  		 * See comment in filemap_create_folio() why we need
3306  		 * invalidate_lock
3307  		 */
3308  		if (!mapping_locked) {
3309  			filemap_invalidate_lock_shared(mapping);
3310  			mapping_locked = true;
3311  		}
3312  		folio = __filemap_get_folio(mapping, index,
3313  					  FGP_CREAT|FGP_FOR_MMAP,
3314  					  vmf->gfp_mask);
3315  		if (IS_ERR(folio)) {
3316  			if (fpin)
3317  				goto out_retry;
3318  			filemap_invalidate_unlock_shared(mapping);
3319  			return VM_FAULT_OOM;
3320  		}
3321  	}
3322  
3323  	if (!lock_folio_maybe_drop_mmap(vmf, folio, &fpin))
3324  		goto out_retry;
3325  
3326  	/* Did it get truncated? */
3327  	if (unlikely(folio->mapping != mapping)) {
3328  		folio_unlock(folio);
3329  		folio_put(folio);
3330  		goto retry_find;
3331  	}
3332  	VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);
3333  
3334  	/*
3335  	 * We have a locked page in the page cache, now we need to check
3336  	 * that it's up-to-date. If not, it is going to be due to an error.
3337  	 */
3338  	if (unlikely(!folio_test_uptodate(folio))) {
3339  		/*
3340  		 * The page was in cache and uptodate and now it is not.
3341  		 * Strange but possible since we didn't hold the page lock all
3342  		 * the time. Let's drop everything get the invalidate lock and
3343  		 * try again.
3344  		 */
3345  		if (!mapping_locked) {
3346  			folio_unlock(folio);
3347  			folio_put(folio);
3348  			goto retry_find;
3349  		}
3350  		goto page_not_uptodate;
3351  	}
3352  
3353  	/*
3354  	 * We've made it this far and we had to drop our mmap_lock, now is the
3355  	 * time to return to the upper layer and have it re-find the vma and
3356  	 * redo the fault.
3357  	 */
3358  	if (fpin) {
3359  		folio_unlock(folio);
3360  		goto out_retry;
3361  	}
3362  	if (mapping_locked)
3363  		filemap_invalidate_unlock_shared(mapping);
3364  
3365  	/*
3366  	 * Found the page and have a reference on it.
3367  	 * We must recheck i_size under page lock.
3368  	 */
3369  	max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3370  	if (unlikely(index >= max_idx)) {
3371  		folio_unlock(folio);
3372  		folio_put(folio);
3373  		return VM_FAULT_SIGBUS;
3374  	}
3375  
3376  	vmf->page = folio_file_page(folio, index);
3377  	return ret | VM_FAULT_LOCKED;
3378  
3379  page_not_uptodate:
3380  	/*
3381  	 * Umm, take care of errors if the page isn't up-to-date.
3382  	 * Try to re-read it _once_. We do this synchronously,
3383  	 * because there really aren't any performance issues here
3384  	 * and we need to check for errors.
3385  	 */
3386  	fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3387  	error = filemap_read_folio(file, mapping->a_ops->read_folio, folio);
3388  	if (fpin)
3389  		goto out_retry;
3390  	folio_put(folio);
3391  
3392  	if (!error || error == AOP_TRUNCATED_PAGE)
3393  		goto retry_find;
3394  	filemap_invalidate_unlock_shared(mapping);
3395  
3396  	return VM_FAULT_SIGBUS;
3397  
3398  out_retry:
3399  	/*
3400  	 * We dropped the mmap_lock, we need to return to the fault handler to
3401  	 * re-find the vma and come back and find our hopefully still populated
3402  	 * page.
3403  	 */
3404  	if (!IS_ERR(folio))
3405  		folio_put(folio);
3406  	if (mapping_locked)
3407  		filemap_invalidate_unlock_shared(mapping);
3408  	if (fpin)
3409  		fput(fpin);
3410  	return ret | VM_FAULT_RETRY;
3411  }
3412  EXPORT_SYMBOL(filemap_fault);
3413  
3414  static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio,
3415  		pgoff_t start)
3416  {
3417  	struct mm_struct *mm = vmf->vma->vm_mm;
3418  
3419  	/* Huge page is mapped? No need to proceed. */
3420  	if (pmd_trans_huge(*vmf->pmd)) {
3421  		folio_unlock(folio);
3422  		folio_put(folio);
3423  		return true;
3424  	}
3425  
3426  	if (pmd_none(*vmf->pmd) && folio_test_pmd_mappable(folio)) {
3427  		struct page *page = folio_file_page(folio, start);
3428  		vm_fault_t ret = do_set_pmd(vmf, page);
3429  		if (!ret) {
3430  			/* The page is mapped successfully, reference consumed. */
3431  			folio_unlock(folio);
3432  			return true;
3433  		}
3434  	}
3435  
3436  	if (pmd_none(*vmf->pmd))
3437  		pmd_install(mm, vmf->pmd, &vmf->prealloc_pte);
3438  
3439  	return false;
3440  }
3441  
3442  static struct folio *next_uptodate_page(struct folio *folio,
3443  				       struct address_space *mapping,
3444  				       struct xa_state *xas, pgoff_t end_pgoff)
3445  {
3446  	unsigned long max_idx;
3447  
3448  	do {
3449  		if (!folio)
3450  			return NULL;
3451  		if (xas_retry(xas, folio))
3452  			continue;
3453  		if (xa_is_value(folio))
3454  			continue;
3455  		if (folio_test_locked(folio))
3456  			continue;
3457  		if (!folio_try_get_rcu(folio))
3458  			continue;
3459  		/* Has the page moved or been split? */
3460  		if (unlikely(folio != xas_reload(xas)))
3461  			goto skip;
3462  		if (!folio_test_uptodate(folio) || folio_test_readahead(folio))
3463  			goto skip;
3464  		if (!folio_trylock(folio))
3465  			goto skip;
3466  		if (folio->mapping != mapping)
3467  			goto unlock;
3468  		if (!folio_test_uptodate(folio))
3469  			goto unlock;
3470  		max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
3471  		if (xas->xa_index >= max_idx)
3472  			goto unlock;
3473  		return folio;
3474  unlock:
3475  		folio_unlock(folio);
3476  skip:
3477  		folio_put(folio);
3478  	} while ((folio = xas_next_entry(xas, end_pgoff)) != NULL);
3479  
3480  	return NULL;
3481  }
3482  
3483  static inline struct folio *first_map_page(struct address_space *mapping,
3484  					  struct xa_state *xas,
3485  					  pgoff_t end_pgoff)
3486  {
3487  	return next_uptodate_page(xas_find(xas, end_pgoff),
3488  				  mapping, xas, end_pgoff);
3489  }
3490  
3491  static inline struct folio *next_map_page(struct address_space *mapping,
3492  					 struct xa_state *xas,
3493  					 pgoff_t end_pgoff)
3494  {
3495  	return next_uptodate_page(xas_next_entry(xas, end_pgoff),
3496  				  mapping, xas, end_pgoff);
3497  }
3498  
3499  vm_fault_t filemap_map_pages(struct vm_fault *vmf,
3500  			     pgoff_t start_pgoff, pgoff_t end_pgoff)
3501  {
3502  	struct vm_area_struct *vma = vmf->vma;
3503  	struct file *file = vma->vm_file;
3504  	struct address_space *mapping = file->f_mapping;
3505  	pgoff_t last_pgoff = start_pgoff;
3506  	unsigned long addr;
3507  	XA_STATE(xas, &mapping->i_pages, start_pgoff);
3508  	struct folio *folio;
3509  	struct page *page;
3510  	unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
3511  	vm_fault_t ret = 0;
3512  
3513  	rcu_read_lock();
3514  	folio = first_map_page(mapping, &xas, end_pgoff);
3515  	if (!folio)
3516  		goto out;
3517  
3518  	if (filemap_map_pmd(vmf, folio, start_pgoff)) {
3519  		ret = VM_FAULT_NOPAGE;
3520  		goto out;
3521  	}
3522  
3523  	addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT);
3524  	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
3525  	if (!vmf->pte) {
3526  		folio_unlock(folio);
3527  		folio_put(folio);
3528  		goto out;
3529  	}
3530  	do {
3531  again:
3532  		page = folio_file_page(folio, xas.xa_index);
3533  		if (PageHWPoison(page))
3534  			goto unlock;
3535  
3536  		if (mmap_miss > 0)
3537  			mmap_miss--;
3538  
3539  		addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
3540  		vmf->pte += xas.xa_index - last_pgoff;
3541  		last_pgoff = xas.xa_index;
3542  
3543  		/*
3544  		 * NOTE: If there're PTE markers, we'll leave them to be
3545  		 * handled in the specific fault path, and it'll prohibit the
3546  		 * fault-around logic.
3547  		 */
3548  		if (!pte_none(ptep_get(vmf->pte)))
3549  			goto unlock;
3550  
3551  		/* We're about to handle the fault */
3552  		if (vmf->address == addr)
3553  			ret = VM_FAULT_NOPAGE;
3554  
3555  		do_set_pte(vmf, page, addr);
3556  		/* no need to invalidate: a not-present page won't be cached */
3557  		update_mmu_cache(vma, addr, vmf->pte);
3558  		if (folio_more_pages(folio, xas.xa_index, end_pgoff)) {
3559  			xas.xa_index++;
3560  			folio_ref_inc(folio);
3561  			goto again;
3562  		}
3563  		folio_unlock(folio);
3564  		continue;
3565  unlock:
3566  		if (folio_more_pages(folio, xas.xa_index, end_pgoff)) {
3567  			xas.xa_index++;
3568  			goto again;
3569  		}
3570  		folio_unlock(folio);
3571  		folio_put(folio);
3572  	} while ((folio = next_map_page(mapping, &xas, end_pgoff)) != NULL);
3573  	pte_unmap_unlock(vmf->pte, vmf->ptl);
3574  out:
3575  	rcu_read_unlock();
3576  	WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss);
3577  	return ret;
3578  }
3579  EXPORT_SYMBOL(filemap_map_pages);
3580  
3581  vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
3582  {
3583  	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
3584  	struct folio *folio = page_folio(vmf->page);
3585  	vm_fault_t ret = VM_FAULT_LOCKED;
3586  
3587  	sb_start_pagefault(mapping->host->i_sb);
3588  	file_update_time(vmf->vma->vm_file);
3589  	folio_lock(folio);
3590  	if (folio->mapping != mapping) {
3591  		folio_unlock(folio);
3592  		ret = VM_FAULT_NOPAGE;
3593  		goto out;
3594  	}
3595  	/*
3596  	 * We mark the folio dirty already here so that when freeze is in
3597  	 * progress, we are guaranteed that writeback during freezing will
3598  	 * see the dirty folio and writeprotect it again.
3599  	 */
3600  	folio_mark_dirty(folio);
3601  	folio_wait_stable(folio);
3602  out:
3603  	sb_end_pagefault(mapping->host->i_sb);
3604  	return ret;
3605  }
3606  
3607  const struct vm_operations_struct generic_file_vm_ops = {
3608  	.fault		= filemap_fault,
3609  	.map_pages	= filemap_map_pages,
3610  	.page_mkwrite	= filemap_page_mkwrite,
3611  };
3612  
3613  /* This is used for a general mmap of a disk file */
3614  
3615  int generic_file_mmap(struct file *file, struct vm_area_struct *vma)
3616  {
3617  	struct address_space *mapping = file->f_mapping;
3618  
3619  	if (!mapping->a_ops->read_folio)
3620  		return -ENOEXEC;
3621  	file_accessed(file);
3622  	vma->vm_ops = &generic_file_vm_ops;
3623  	return 0;
3624  }
3625  
3626  /*
3627   * This is for filesystems which do not implement ->writepage.
3628   */
3629  int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
3630  {
3631  	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
3632  		return -EINVAL;
3633  	return generic_file_mmap(file, vma);
3634  }
3635  #else
3636  vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
3637  {
3638  	return VM_FAULT_SIGBUS;
3639  }
3640  int generic_file_mmap(struct file *file, struct vm_area_struct *vma)
3641  {
3642  	return -ENOSYS;
3643  }
3644  int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
3645  {
3646  	return -ENOSYS;
3647  }
3648  #endif /* CONFIG_MMU */
3649  
3650  EXPORT_SYMBOL(filemap_page_mkwrite);
3651  EXPORT_SYMBOL(generic_file_mmap);
3652  EXPORT_SYMBOL(generic_file_readonly_mmap);
3653  
3654  static struct folio *do_read_cache_folio(struct address_space *mapping,
3655  		pgoff_t index, filler_t filler, struct file *file, gfp_t gfp)
3656  {
3657  	struct folio *folio;
3658  	int err;
3659  
3660  	if (!filler)
3661  		filler = mapping->a_ops->read_folio;
3662  repeat:
3663  	folio = filemap_get_folio(mapping, index);
3664  	if (IS_ERR(folio)) {
3665  		folio = filemap_alloc_folio(gfp, 0);
3666  		if (!folio)
3667  			return ERR_PTR(-ENOMEM);
3668  		err = filemap_add_folio(mapping, folio, index, gfp);
3669  		if (unlikely(err)) {
3670  			folio_put(folio);
3671  			if (err == -EEXIST)
3672  				goto repeat;
3673  			/* Presumably ENOMEM for xarray node */
3674  			return ERR_PTR(err);
3675  		}
3676  
3677  		goto filler;
3678  	}
3679  	if (folio_test_uptodate(folio))
3680  		goto out;
3681  
3682  	if (!folio_trylock(folio)) {
3683  		folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE);
3684  		goto repeat;
3685  	}
3686  
3687  	/* Folio was truncated from mapping */
3688  	if (!folio->mapping) {
3689  		folio_unlock(folio);
3690  		folio_put(folio);
3691  		goto repeat;
3692  	}
3693  
3694  	/* Someone else locked and filled the page in a very small window */
3695  	if (folio_test_uptodate(folio)) {
3696  		folio_unlock(folio);
3697  		goto out;
3698  	}
3699  
3700  filler:
3701  	err = filemap_read_folio(file, filler, folio);
3702  	if (err) {
3703  		folio_put(folio);
3704  		if (err == AOP_TRUNCATED_PAGE)
3705  			goto repeat;
3706  		return ERR_PTR(err);
3707  	}
3708  
3709  out:
3710  	folio_mark_accessed(folio);
3711  	return folio;
3712  }
3713  
3714  /**
3715   * read_cache_folio - Read into page cache, fill it if needed.
3716   * @mapping: The address_space to read from.
3717   * @index: The index to read.
3718   * @filler: Function to perform the read, or NULL to use aops->read_folio().
3719   * @file: Passed to filler function, may be NULL if not required.
3720   *
3721   * Read one page into the page cache.  If it succeeds, the folio returned
3722   * will contain @index, but it may not be the first page of the folio.
3723   *
3724   * If the filler function returns an error, it will be returned to the
3725   * caller.
3726   *
3727   * Context: May sleep.  Expects mapping->invalidate_lock to be held.
3728   * Return: An uptodate folio on success, ERR_PTR() on failure.
3729   */
3730  struct folio *read_cache_folio(struct address_space *mapping, pgoff_t index,
3731  		filler_t filler, struct file *file)
3732  {
3733  	return do_read_cache_folio(mapping, index, filler, file,
3734  			mapping_gfp_mask(mapping));
3735  }
3736  EXPORT_SYMBOL(read_cache_folio);
3737  
3738  /**
3739   * mapping_read_folio_gfp - Read into page cache, using specified allocation flags.
3740   * @mapping:	The address_space for the folio.
3741   * @index:	The index that the allocated folio will contain.
3742   * @gfp:	The page allocator flags to use if allocating.
3743   *
3744   * This is the same as "read_cache_folio(mapping, index, NULL, NULL)", but with
3745   * any new memory allocations done using the specified allocation flags.
3746   *
3747   * The most likely error from this function is EIO, but ENOMEM is
3748   * possible and so is EINTR.  If ->read_folio returns another error,
3749   * that will be returned to the caller.
3750   *
3751   * The function expects mapping->invalidate_lock to be already held.
3752   *
3753   * Return: Uptodate folio on success, ERR_PTR() on failure.
3754   */
3755  struct folio *mapping_read_folio_gfp(struct address_space *mapping,
3756  		pgoff_t index, gfp_t gfp)
3757  {
3758  	return do_read_cache_folio(mapping, index, NULL, NULL, gfp);
3759  }
3760  EXPORT_SYMBOL(mapping_read_folio_gfp);
3761  
3762  static struct page *do_read_cache_page(struct address_space *mapping,
3763  		pgoff_t index, filler_t *filler, struct file *file, gfp_t gfp)
3764  {
3765  	struct folio *folio;
3766  
3767  	folio = do_read_cache_folio(mapping, index, filler, file, gfp);
3768  	if (IS_ERR(folio))
3769  		return &folio->page;
3770  	return folio_file_page(folio, index);
3771  }
3772  
3773  struct page *read_cache_page(struct address_space *mapping,
3774  			pgoff_t index, filler_t *filler, struct file *file)
3775  {
3776  	return do_read_cache_page(mapping, index, filler, file,
3777  			mapping_gfp_mask(mapping));
3778  }
3779  EXPORT_SYMBOL(read_cache_page);
3780  
3781  /**
3782   * read_cache_page_gfp - read into page cache, using specified page allocation flags.
3783   * @mapping:	the page's address_space
3784   * @index:	the page index
3785   * @gfp:	the page allocator flags to use if allocating
3786   *
3787   * This is the same as "read_mapping_page(mapping, index, NULL)", but with
3788   * any new page allocations done using the specified allocation flags.
3789   *
3790   * If the page does not get brought uptodate, return -EIO.
3791   *
3792   * The function expects mapping->invalidate_lock to be already held.
3793   *
3794   * Return: up to date page on success, ERR_PTR() on failure.
3795   */
3796  struct page *read_cache_page_gfp(struct address_space *mapping,
3797  				pgoff_t index,
3798  				gfp_t gfp)
3799  {
3800  	return do_read_cache_page(mapping, index, NULL, NULL, gfp);
3801  }
3802  EXPORT_SYMBOL(read_cache_page_gfp);
3803  
3804  /*
3805   * Warn about a page cache invalidation failure during a direct I/O write.
3806   */
3807  static void dio_warn_stale_pagecache(struct file *filp)
3808  {
3809  	static DEFINE_RATELIMIT_STATE(_rs, 86400 * HZ, DEFAULT_RATELIMIT_BURST);
3810  	char pathname[128];
3811  	char *path;
3812  
3813  	errseq_set(&filp->f_mapping->wb_err, -EIO);
3814  	if (__ratelimit(&_rs)) {
3815  		path = file_path(filp, pathname, sizeof(pathname));
3816  		if (IS_ERR(path))
3817  			path = "(unknown)";
3818  		pr_crit("Page cache invalidation failure on direct I/O.  Possible data corruption due to collision with buffered I/O!\n");
3819  		pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid,
3820  			current->comm);
3821  	}
3822  }
3823  
3824  void kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count)
3825  {
3826  	struct address_space *mapping = iocb->ki_filp->f_mapping;
3827  
3828  	if (mapping->nrpages &&
3829  	    invalidate_inode_pages2_range(mapping,
3830  			iocb->ki_pos >> PAGE_SHIFT,
3831  			(iocb->ki_pos + count - 1) >> PAGE_SHIFT))
3832  		dio_warn_stale_pagecache(iocb->ki_filp);
3833  }
3834  
3835  ssize_t
3836  generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
3837  {
3838  	struct address_space *mapping = iocb->ki_filp->f_mapping;
3839  	size_t write_len = iov_iter_count(from);
3840  	ssize_t written;
3841  
3842  	/*
3843  	 * If a page can not be invalidated, return 0 to fall back
3844  	 * to buffered write.
3845  	 */
3846  	written = kiocb_invalidate_pages(iocb, write_len);
3847  	if (written) {
3848  		if (written == -EBUSY)
3849  			return 0;
3850  		return written;
3851  	}
3852  
3853  	written = mapping->a_ops->direct_IO(iocb, from);
3854  
3855  	/*
3856  	 * Finally, try again to invalidate clean pages which might have been
3857  	 * cached by non-direct readahead, or faulted in by get_user_pages()
3858  	 * if the source of the write was an mmap'ed region of the file
3859  	 * we're writing.  Either one is a pretty crazy thing to do,
3860  	 * so we don't support it 100%.  If this invalidation
3861  	 * fails, tough, the write still worked...
3862  	 *
3863  	 * Most of the time we do not need this since dio_complete() will do
3864  	 * the invalidation for us. However there are some file systems that
3865  	 * do not end up with dio_complete() being called, so let's not break
3866  	 * them by removing it completely.
3867  	 *
3868  	 * Noticeable example is a blkdev_direct_IO().
3869  	 *
3870  	 * Skip invalidation for async writes or if mapping has no pages.
3871  	 */
3872  	if (written > 0) {
3873  		struct inode *inode = mapping->host;
3874  		loff_t pos = iocb->ki_pos;
3875  
3876  		kiocb_invalidate_post_direct_write(iocb, written);
3877  		pos += written;
3878  		write_len -= written;
3879  		if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
3880  			i_size_write(inode, pos);
3881  			mark_inode_dirty(inode);
3882  		}
3883  		iocb->ki_pos = pos;
3884  	}
3885  	if (written != -EIOCBQUEUED)
3886  		iov_iter_revert(from, write_len - iov_iter_count(from));
3887  	return written;
3888  }
3889  EXPORT_SYMBOL(generic_file_direct_write);
3890  
3891  ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i)
3892  {
3893  	struct file *file = iocb->ki_filp;
3894  	loff_t pos = iocb->ki_pos;
3895  	struct address_space *mapping = file->f_mapping;
3896  	const struct address_space_operations *a_ops = mapping->a_ops;
3897  	long status = 0;
3898  	ssize_t written = 0;
3899  
3900  	do {
3901  		struct page *page;
3902  		unsigned long offset;	/* Offset into pagecache page */
3903  		unsigned long bytes;	/* Bytes to write to page */
3904  		size_t copied;		/* Bytes copied from user */
3905  		void *fsdata = NULL;
3906  
3907  		offset = (pos & (PAGE_SIZE - 1));
3908  		bytes = min_t(unsigned long, PAGE_SIZE - offset,
3909  						iov_iter_count(i));
3910  
3911  again:
3912  		/*
3913  		 * Bring in the user page that we will copy from _first_.
3914  		 * Otherwise there's a nasty deadlock on copying from the
3915  		 * same page as we're writing to, without it being marked
3916  		 * up-to-date.
3917  		 */
3918  		if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) {
3919  			status = -EFAULT;
3920  			break;
3921  		}
3922  
3923  		if (fatal_signal_pending(current)) {
3924  			status = -EINTR;
3925  			break;
3926  		}
3927  
3928  		status = a_ops->write_begin(file, mapping, pos, bytes,
3929  						&page, &fsdata);
3930  		if (unlikely(status < 0))
3931  			break;
3932  
3933  		if (mapping_writably_mapped(mapping))
3934  			flush_dcache_page(page);
3935  
3936  		copied = copy_page_from_iter_atomic(page, offset, bytes, i);
3937  		flush_dcache_page(page);
3938  
3939  		status = a_ops->write_end(file, mapping, pos, bytes, copied,
3940  						page, fsdata);
3941  		if (unlikely(status != copied)) {
3942  			iov_iter_revert(i, copied - max(status, 0L));
3943  			if (unlikely(status < 0))
3944  				break;
3945  		}
3946  		cond_resched();
3947  
3948  		if (unlikely(status == 0)) {
3949  			/*
3950  			 * A short copy made ->write_end() reject the
3951  			 * thing entirely.  Might be memory poisoning
3952  			 * halfway through, might be a race with munmap,
3953  			 * might be severe memory pressure.
3954  			 */
3955  			if (copied)
3956  				bytes = copied;
3957  			goto again;
3958  		}
3959  		pos += status;
3960  		written += status;
3961  
3962  		balance_dirty_pages_ratelimited(mapping);
3963  	} while (iov_iter_count(i));
3964  
3965  	if (!written)
3966  		return status;
3967  	iocb->ki_pos += written;
3968  	return written;
3969  }
3970  EXPORT_SYMBOL(generic_perform_write);
3971  
3972  /**
3973   * __generic_file_write_iter - write data to a file
3974   * @iocb:	IO state structure (file, offset, etc.)
3975   * @from:	iov_iter with data to write
3976   *
3977   * This function does all the work needed for actually writing data to a
3978   * file. It does all basic checks, removes SUID from the file, updates
3979   * modification times and calls proper subroutines depending on whether we
3980   * do direct IO or a standard buffered write.
3981   *
3982   * It expects i_rwsem to be grabbed unless we work on a block device or similar
3983   * object which does not need locking at all.
3984   *
3985   * This function does *not* take care of syncing data in case of O_SYNC write.
3986   * A caller has to handle it. This is mainly due to the fact that we want to
3987   * avoid syncing under i_rwsem.
3988   *
3989   * Return:
3990   * * number of bytes written, even for truncated writes
3991   * * negative error code if no data has been written at all
3992   */
3993  ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3994  {
3995  	struct file *file = iocb->ki_filp;
3996  	struct address_space *mapping = file->f_mapping;
3997  	struct inode *inode = mapping->host;
3998  	ssize_t ret;
3999  
4000  	ret = file_remove_privs(file);
4001  	if (ret)
4002  		return ret;
4003  
4004  	ret = file_update_time(file);
4005  	if (ret)
4006  		return ret;
4007  
4008  	if (iocb->ki_flags & IOCB_DIRECT) {
4009  		ret = generic_file_direct_write(iocb, from);
4010  		/*
4011  		 * If the write stopped short of completing, fall back to
4012  		 * buffered writes.  Some filesystems do this for writes to
4013  		 * holes, for example.  For DAX files, a buffered write will
4014  		 * not succeed (even if it did, DAX does not handle dirty
4015  		 * page-cache pages correctly).
4016  		 */
4017  		if (ret < 0 || !iov_iter_count(from) || IS_DAX(inode))
4018  			return ret;
4019  		return direct_write_fallback(iocb, from, ret,
4020  				generic_perform_write(iocb, from));
4021  	}
4022  
4023  	return generic_perform_write(iocb, from);
4024  }
4025  EXPORT_SYMBOL(__generic_file_write_iter);
4026  
4027  /**
4028   * generic_file_write_iter - write data to a file
4029   * @iocb:	IO state structure
4030   * @from:	iov_iter with data to write
4031   *
4032   * This is a wrapper around __generic_file_write_iter() to be used by most
4033   * filesystems. It takes care of syncing the file in case of O_SYNC file
4034   * and acquires i_rwsem as needed.
4035   * Return:
4036   * * negative error code if no data has been written at all of
4037   *   vfs_fsync_range() failed for a synchronous write
4038   * * number of bytes written, even for truncated writes
4039   */
4040  ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4041  {
4042  	struct file *file = iocb->ki_filp;
4043  	struct inode *inode = file->f_mapping->host;
4044  	ssize_t ret;
4045  
4046  	inode_lock(inode);
4047  	ret = generic_write_checks(iocb, from);
4048  	if (ret > 0)
4049  		ret = __generic_file_write_iter(iocb, from);
4050  	inode_unlock(inode);
4051  
4052  	if (ret > 0)
4053  		ret = generic_write_sync(iocb, ret);
4054  	return ret;
4055  }
4056  EXPORT_SYMBOL(generic_file_write_iter);
4057  
4058  /**
4059   * filemap_release_folio() - Release fs-specific metadata on a folio.
4060   * @folio: The folio which the kernel is trying to free.
4061   * @gfp: Memory allocation flags (and I/O mode).
4062   *
4063   * The address_space is trying to release any data attached to a folio
4064   * (presumably at folio->private).
4065   *
4066   * This will also be called if the private_2 flag is set on a page,
4067   * indicating that the folio has other metadata associated with it.
4068   *
4069   * The @gfp argument specifies whether I/O may be performed to release
4070   * this page (__GFP_IO), and whether the call may block
4071   * (__GFP_RECLAIM & __GFP_FS).
4072   *
4073   * Return: %true if the release was successful, otherwise %false.
4074   */
4075  bool filemap_release_folio(struct folio *folio, gfp_t gfp)
4076  {
4077  	struct address_space * const mapping = folio->mapping;
4078  
4079  	BUG_ON(!folio_test_locked(folio));
4080  	if (folio_test_writeback(folio))
4081  		return false;
4082  
4083  	if (mapping && mapping->a_ops->release_folio)
4084  		return mapping->a_ops->release_folio(folio, gfp);
4085  	return try_to_free_buffers(folio);
4086  }
4087  EXPORT_SYMBOL(filemap_release_folio);
4088  
4089  #ifdef CONFIG_CACHESTAT_SYSCALL
4090  /**
4091   * filemap_cachestat() - compute the page cache statistics of a mapping
4092   * @mapping:	The mapping to compute the statistics for.
4093   * @first_index:	The starting page cache index.
4094   * @last_index:	The final page index (inclusive).
4095   * @cs:	the cachestat struct to write the result to.
4096   *
4097   * This will query the page cache statistics of a mapping in the
4098   * page range of [first_index, last_index] (inclusive). The statistics
4099   * queried include: number of dirty pages, number of pages marked for
4100   * writeback, and the number of (recently) evicted pages.
4101   */
4102  static void filemap_cachestat(struct address_space *mapping,
4103  		pgoff_t first_index, pgoff_t last_index, struct cachestat *cs)
4104  {
4105  	XA_STATE(xas, &mapping->i_pages, first_index);
4106  	struct folio *folio;
4107  
4108  	rcu_read_lock();
4109  	xas_for_each(&xas, folio, last_index) {
4110  		unsigned long nr_pages;
4111  		pgoff_t folio_first_index, folio_last_index;
4112  
4113  		if (xas_retry(&xas, folio))
4114  			continue;
4115  
4116  		if (xa_is_value(folio)) {
4117  			/* page is evicted */
4118  			void *shadow = (void *)folio;
4119  			bool workingset; /* not used */
4120  			int order = xa_get_order(xas.xa, xas.xa_index);
4121  
4122  			nr_pages = 1 << order;
4123  			folio_first_index = round_down(xas.xa_index, 1 << order);
4124  			folio_last_index = folio_first_index + nr_pages - 1;
4125  
4126  			/* Folios might straddle the range boundaries, only count covered pages */
4127  			if (folio_first_index < first_index)
4128  				nr_pages -= first_index - folio_first_index;
4129  
4130  			if (folio_last_index > last_index)
4131  				nr_pages -= folio_last_index - last_index;
4132  
4133  			cs->nr_evicted += nr_pages;
4134  
4135  #ifdef CONFIG_SWAP /* implies CONFIG_MMU */
4136  			if (shmem_mapping(mapping)) {
4137  				/* shmem file - in swap cache */
4138  				swp_entry_t swp = radix_to_swp_entry(folio);
4139  
4140  				shadow = get_shadow_from_swap_cache(swp);
4141  			}
4142  #endif
4143  			if (workingset_test_recent(shadow, true, &workingset))
4144  				cs->nr_recently_evicted += nr_pages;
4145  
4146  			goto resched;
4147  		}
4148  
4149  		nr_pages = folio_nr_pages(folio);
4150  		folio_first_index = folio_pgoff(folio);
4151  		folio_last_index = folio_first_index + nr_pages - 1;
4152  
4153  		/* Folios might straddle the range boundaries, only count covered pages */
4154  		if (folio_first_index < first_index)
4155  			nr_pages -= first_index - folio_first_index;
4156  
4157  		if (folio_last_index > last_index)
4158  			nr_pages -= folio_last_index - last_index;
4159  
4160  		/* page is in cache */
4161  		cs->nr_cache += nr_pages;
4162  
4163  		if (folio_test_dirty(folio))
4164  			cs->nr_dirty += nr_pages;
4165  
4166  		if (folio_test_writeback(folio))
4167  			cs->nr_writeback += nr_pages;
4168  
4169  resched:
4170  		if (need_resched()) {
4171  			xas_pause(&xas);
4172  			cond_resched_rcu();
4173  		}
4174  	}
4175  	rcu_read_unlock();
4176  }
4177  
4178  /*
4179   * The cachestat(2) system call.
4180   *
4181   * cachestat() returns the page cache statistics of a file in the
4182   * bytes range specified by `off` and `len`: number of cached pages,
4183   * number of dirty pages, number of pages marked for writeback,
4184   * number of evicted pages, and number of recently evicted pages.
4185   *
4186   * An evicted page is a page that is previously in the page cache
4187   * but has been evicted since. A page is recently evicted if its last
4188   * eviction was recent enough that its reentry to the cache would
4189   * indicate that it is actively being used by the system, and that
4190   * there is memory pressure on the system.
4191   *
4192   * `off` and `len` must be non-negative integers. If `len` > 0,
4193   * the queried range is [`off`, `off` + `len`]. If `len` == 0,
4194   * we will query in the range from `off` to the end of the file.
4195   *
4196   * The `flags` argument is unused for now, but is included for future
4197   * extensibility. User should pass 0 (i.e no flag specified).
4198   *
4199   * Currently, hugetlbfs is not supported.
4200   *
4201   * Because the status of a page can change after cachestat() checks it
4202   * but before it returns to the application, the returned values may
4203   * contain stale information.
4204   *
4205   * return values:
4206   *  zero        - success
4207   *  -EFAULT     - cstat or cstat_range points to an illegal address
4208   *  -EINVAL     - invalid flags
4209   *  -EBADF      - invalid file descriptor
4210   *  -EOPNOTSUPP - file descriptor is of a hugetlbfs file
4211   */
4212  SYSCALL_DEFINE4(cachestat, unsigned int, fd,
4213  		struct cachestat_range __user *, cstat_range,
4214  		struct cachestat __user *, cstat, unsigned int, flags)
4215  {
4216  	struct fd f = fdget(fd);
4217  	struct address_space *mapping;
4218  	struct cachestat_range csr;
4219  	struct cachestat cs;
4220  	pgoff_t first_index, last_index;
4221  
4222  	if (!f.file)
4223  		return -EBADF;
4224  
4225  	if (copy_from_user(&csr, cstat_range,
4226  			sizeof(struct cachestat_range))) {
4227  		fdput(f);
4228  		return -EFAULT;
4229  	}
4230  
4231  	/* hugetlbfs is not supported */
4232  	if (is_file_hugepages(f.file)) {
4233  		fdput(f);
4234  		return -EOPNOTSUPP;
4235  	}
4236  
4237  	if (flags != 0) {
4238  		fdput(f);
4239  		return -EINVAL;
4240  	}
4241  
4242  	first_index = csr.off >> PAGE_SHIFT;
4243  	last_index =
4244  		csr.len == 0 ? ULONG_MAX : (csr.off + csr.len - 1) >> PAGE_SHIFT;
4245  	memset(&cs, 0, sizeof(struct cachestat));
4246  	mapping = f.file->f_mapping;
4247  	filemap_cachestat(mapping, first_index, last_index, &cs);
4248  	fdput(f);
4249  
4250  	if (copy_to_user(cstat, &cs, sizeof(struct cachestat)))
4251  		return -EFAULT;
4252  
4253  	return 0;
4254  }
4255  #endif /* CONFIG_CACHESTAT_SYSCALL */
4256