xref: /openbmc/linux/mm/filemap.c (revision 1b39e7607144337d752f36c2068ed79447462f99)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   *	linux/mm/filemap.c
4   *
5   * Copyright (C) 1994-1999  Linus Torvalds
6   */
7  
8  /*
9   * This file handles the generic file mmap semantics used by
10   * most "normal" filesystems (but you don't /have/ to use this:
11   * the NFS filesystem used to do this differently, for example)
12   */
13  #include <linux/export.h>
14  #include <linux/compiler.h>
15  #include <linux/dax.h>
16  #include <linux/fs.h>
17  #include <linux/sched/signal.h>
18  #include <linux/uaccess.h>
19  #include <linux/capability.h>
20  #include <linux/kernel_stat.h>
21  #include <linux/gfp.h>
22  #include <linux/mm.h>
23  #include <linux/swap.h>
24  #include <linux/swapops.h>
25  #include <linux/mman.h>
26  #include <linux/pagemap.h>
27  #include <linux/file.h>
28  #include <linux/uio.h>
29  #include <linux/error-injection.h>
30  #include <linux/hash.h>
31  #include <linux/writeback.h>
32  #include <linux/backing-dev.h>
33  #include <linux/pagevec.h>
34  #include <linux/security.h>
35  #include <linux/cpuset.h>
36  #include <linux/hugetlb.h>
37  #include <linux/memcontrol.h>
38  #include <linux/shmem_fs.h>
39  #include <linux/rmap.h>
40  #include <linux/delayacct.h>
41  #include <linux/psi.h>
42  #include <linux/ramfs.h>
43  #include <linux/page_idle.h>
44  #include <linux/migrate.h>
45  #include <linux/pipe_fs_i.h>
46  #include <linux/splice.h>
47  #include <asm/pgalloc.h>
48  #include <asm/tlbflush.h>
49  #include "internal.h"
50  
51  #define CREATE_TRACE_POINTS
52  #include <trace/events/filemap.h>
53  
54  /*
55   * FIXME: remove all knowledge of the buffer layer from the core VM
56   */
57  #include <linux/buffer_head.h> /* for try_to_free_buffers */
58  
59  #include <asm/mman.h>
60  
61  /*
62   * Shared mappings implemented 30.11.1994. It's not fully working yet,
63   * though.
64   *
65   * Shared mappings now work. 15.8.1995  Bruno.
66   *
67   * finished 'unifying' the page and buffer cache and SMP-threaded the
68   * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
69   *
70   * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
71   */
72  
73  /*
74   * Lock ordering:
75   *
76   *  ->i_mmap_rwsem		(truncate_pagecache)
77   *    ->private_lock		(__free_pte->block_dirty_folio)
78   *      ->swap_lock		(exclusive_swap_page, others)
79   *        ->i_pages lock
80   *
81   *  ->i_rwsem
82   *    ->invalidate_lock		(acquired by fs in truncate path)
83   *      ->i_mmap_rwsem		(truncate->unmap_mapping_range)
84   *
85   *  ->mmap_lock
86   *    ->i_mmap_rwsem
87   *      ->page_table_lock or pte_lock	(various, mainly in memory.c)
88   *        ->i_pages lock	(arch-dependent flush_dcache_mmap_lock)
89   *
90   *  ->mmap_lock
91   *    ->invalidate_lock		(filemap_fault)
92   *      ->lock_page		(filemap_fault, access_process_vm)
93   *
94   *  ->i_rwsem			(generic_perform_write)
95   *    ->mmap_lock		(fault_in_readable->do_page_fault)
96   *
97   *  bdi->wb.list_lock
98   *    sb_lock			(fs/fs-writeback.c)
99   *    ->i_pages lock		(__sync_single_inode)
100   *
101   *  ->i_mmap_rwsem
102   *    ->anon_vma.lock		(vma_merge)
103   *
104   *  ->anon_vma.lock
105   *    ->page_table_lock or pte_lock	(anon_vma_prepare and various)
106   *
107   *  ->page_table_lock or pte_lock
108   *    ->swap_lock		(try_to_unmap_one)
109   *    ->private_lock		(try_to_unmap_one)
110   *    ->i_pages lock		(try_to_unmap_one)
111   *    ->lruvec->lru_lock	(follow_page->mark_page_accessed)
112   *    ->lruvec->lru_lock	(check_pte_range->isolate_lru_page)
113   *    ->private_lock		(page_remove_rmap->set_page_dirty)
114   *    ->i_pages lock		(page_remove_rmap->set_page_dirty)
115   *    bdi.wb->list_lock		(page_remove_rmap->set_page_dirty)
116   *    ->inode->i_lock		(page_remove_rmap->set_page_dirty)
117   *    ->memcg->move_lock	(page_remove_rmap->lock_page_memcg)
118   *    bdi.wb->list_lock		(zap_pte_range->set_page_dirty)
119   *    ->inode->i_lock		(zap_pte_range->set_page_dirty)
120   *    ->private_lock		(zap_pte_range->block_dirty_folio)
121   *
122   * ->i_mmap_rwsem
123   *   ->tasklist_lock            (memory_failure, collect_procs_ao)
124   */
125  
126  static void page_cache_delete(struct address_space *mapping,
127  				   struct folio *folio, void *shadow)
128  {
129  	XA_STATE(xas, &mapping->i_pages, folio->index);
130  	long nr = 1;
131  
132  	mapping_set_update(&xas, mapping);
133  
134  	/* hugetlb pages are represented by a single entry in the xarray */
135  	if (!folio_test_hugetlb(folio)) {
136  		xas_set_order(&xas, folio->index, folio_order(folio));
137  		nr = folio_nr_pages(folio);
138  	}
139  
140  	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
141  
142  	xas_store(&xas, shadow);
143  	xas_init_marks(&xas);
144  
145  	folio->mapping = NULL;
146  	/* Leave page->index set: truncation lookup relies upon it */
147  	mapping->nrpages -= nr;
148  }
149  
150  static void filemap_unaccount_folio(struct address_space *mapping,
151  		struct folio *folio)
152  {
153  	long nr;
154  
155  	VM_BUG_ON_FOLIO(folio_mapped(folio), folio);
156  	if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(folio_mapped(folio))) {
157  		pr_alert("BUG: Bad page cache in process %s  pfn:%05lx\n",
158  			 current->comm, folio_pfn(folio));
159  		dump_page(&folio->page, "still mapped when deleted");
160  		dump_stack();
161  		add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
162  
163  		if (mapping_exiting(mapping) && !folio_test_large(folio)) {
164  			int mapcount = page_mapcount(&folio->page);
165  
166  			if (folio_ref_count(folio) >= mapcount + 2) {
167  				/*
168  				 * All vmas have already been torn down, so it's
169  				 * a good bet that actually the page is unmapped
170  				 * and we'd rather not leak it: if we're wrong,
171  				 * another bad page check should catch it later.
172  				 */
173  				page_mapcount_reset(&folio->page);
174  				folio_ref_sub(folio, mapcount);
175  			}
176  		}
177  	}
178  
179  	/* hugetlb folios do not participate in page cache accounting. */
180  	if (folio_test_hugetlb(folio))
181  		return;
182  
183  	nr = folio_nr_pages(folio);
184  
185  	__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
186  	if (folio_test_swapbacked(folio)) {
187  		__lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
188  		if (folio_test_pmd_mappable(folio))
189  			__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr);
190  	} else if (folio_test_pmd_mappable(folio)) {
191  		__lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr);
192  		filemap_nr_thps_dec(mapping);
193  	}
194  
195  	/*
196  	 * At this point folio must be either written or cleaned by
197  	 * truncate.  Dirty folio here signals a bug and loss of
198  	 * unwritten data - on ordinary filesystems.
199  	 *
200  	 * But it's harmless on in-memory filesystems like tmpfs; and can
201  	 * occur when a driver which did get_user_pages() sets page dirty
202  	 * before putting it, while the inode is being finally evicted.
203  	 *
204  	 * Below fixes dirty accounting after removing the folio entirely
205  	 * but leaves the dirty flag set: it has no effect for truncated
206  	 * folio and anyway will be cleared before returning folio to
207  	 * buddy allocator.
208  	 */
209  	if (WARN_ON_ONCE(folio_test_dirty(folio) &&
210  			 mapping_can_writeback(mapping)))
211  		folio_account_cleaned(folio, inode_to_wb(mapping->host));
212  }
213  
214  /*
215   * Delete a page from the page cache and free it. Caller has to make
216   * sure the page is locked and that nobody else uses it - or that usage
217   * is safe.  The caller must hold the i_pages lock.
218   */
219  void __filemap_remove_folio(struct folio *folio, void *shadow)
220  {
221  	struct address_space *mapping = folio->mapping;
222  
223  	trace_mm_filemap_delete_from_page_cache(folio);
224  	filemap_unaccount_folio(mapping, folio);
225  	page_cache_delete(mapping, folio, shadow);
226  }
227  
228  void filemap_free_folio(struct address_space *mapping, struct folio *folio)
229  {
230  	void (*free_folio)(struct folio *);
231  	int refs = 1;
232  
233  	free_folio = mapping->a_ops->free_folio;
234  	if (free_folio)
235  		free_folio(folio);
236  
237  	if (folio_test_large(folio) && !folio_test_hugetlb(folio))
238  		refs = folio_nr_pages(folio);
239  	folio_put_refs(folio, refs);
240  }
241  
242  /**
243   * filemap_remove_folio - Remove folio from page cache.
244   * @folio: The folio.
245   *
246   * This must be called only on folios that are locked and have been
247   * verified to be in the page cache.  It will never put the folio into
248   * the free list because the caller has a reference on the page.
249   */
250  void filemap_remove_folio(struct folio *folio)
251  {
252  	struct address_space *mapping = folio->mapping;
253  
254  	BUG_ON(!folio_test_locked(folio));
255  	spin_lock(&mapping->host->i_lock);
256  	xa_lock_irq(&mapping->i_pages);
257  	__filemap_remove_folio(folio, NULL);
258  	xa_unlock_irq(&mapping->i_pages);
259  	if (mapping_shrinkable(mapping))
260  		inode_add_lru(mapping->host);
261  	spin_unlock(&mapping->host->i_lock);
262  
263  	filemap_free_folio(mapping, folio);
264  }
265  
266  /*
267   * page_cache_delete_batch - delete several folios from page cache
268   * @mapping: the mapping to which folios belong
269   * @fbatch: batch of folios to delete
270   *
271   * The function walks over mapping->i_pages and removes folios passed in
272   * @fbatch from the mapping. The function expects @fbatch to be sorted
273   * by page index and is optimised for it to be dense.
274   * It tolerates holes in @fbatch (mapping entries at those indices are not
275   * modified).
276   *
277   * The function expects the i_pages lock to be held.
278   */
279  static void page_cache_delete_batch(struct address_space *mapping,
280  			     struct folio_batch *fbatch)
281  {
282  	XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index);
283  	long total_pages = 0;
284  	int i = 0;
285  	struct folio *folio;
286  
287  	mapping_set_update(&xas, mapping);
288  	xas_for_each(&xas, folio, ULONG_MAX) {
289  		if (i >= folio_batch_count(fbatch))
290  			break;
291  
292  		/* A swap/dax/shadow entry got inserted? Skip it. */
293  		if (xa_is_value(folio))
294  			continue;
295  		/*
296  		 * A page got inserted in our range? Skip it. We have our
297  		 * pages locked so they are protected from being removed.
298  		 * If we see a page whose index is higher than ours, it
299  		 * means our page has been removed, which shouldn't be
300  		 * possible because we're holding the PageLock.
301  		 */
302  		if (folio != fbatch->folios[i]) {
303  			VM_BUG_ON_FOLIO(folio->index >
304  					fbatch->folios[i]->index, folio);
305  			continue;
306  		}
307  
308  		WARN_ON_ONCE(!folio_test_locked(folio));
309  
310  		folio->mapping = NULL;
311  		/* Leave folio->index set: truncation lookup relies on it */
312  
313  		i++;
314  		xas_store(&xas, NULL);
315  		total_pages += folio_nr_pages(folio);
316  	}
317  	mapping->nrpages -= total_pages;
318  }
319  
320  void delete_from_page_cache_batch(struct address_space *mapping,
321  				  struct folio_batch *fbatch)
322  {
323  	int i;
324  
325  	if (!folio_batch_count(fbatch))
326  		return;
327  
328  	spin_lock(&mapping->host->i_lock);
329  	xa_lock_irq(&mapping->i_pages);
330  	for (i = 0; i < folio_batch_count(fbatch); i++) {
331  		struct folio *folio = fbatch->folios[i];
332  
333  		trace_mm_filemap_delete_from_page_cache(folio);
334  		filemap_unaccount_folio(mapping, folio);
335  	}
336  	page_cache_delete_batch(mapping, fbatch);
337  	xa_unlock_irq(&mapping->i_pages);
338  	if (mapping_shrinkable(mapping))
339  		inode_add_lru(mapping->host);
340  	spin_unlock(&mapping->host->i_lock);
341  
342  	for (i = 0; i < folio_batch_count(fbatch); i++)
343  		filemap_free_folio(mapping, fbatch->folios[i]);
344  }
345  
346  int filemap_check_errors(struct address_space *mapping)
347  {
348  	int ret = 0;
349  	/* Check for outstanding write errors */
350  	if (test_bit(AS_ENOSPC, &mapping->flags) &&
351  	    test_and_clear_bit(AS_ENOSPC, &mapping->flags))
352  		ret = -ENOSPC;
353  	if (test_bit(AS_EIO, &mapping->flags) &&
354  	    test_and_clear_bit(AS_EIO, &mapping->flags))
355  		ret = -EIO;
356  	return ret;
357  }
358  EXPORT_SYMBOL(filemap_check_errors);
359  
360  static int filemap_check_and_keep_errors(struct address_space *mapping)
361  {
362  	/* Check for outstanding write errors */
363  	if (test_bit(AS_EIO, &mapping->flags))
364  		return -EIO;
365  	if (test_bit(AS_ENOSPC, &mapping->flags))
366  		return -ENOSPC;
367  	return 0;
368  }
369  
370  /**
371   * filemap_fdatawrite_wbc - start writeback on mapping dirty pages in range
372   * @mapping:	address space structure to write
373   * @wbc:	the writeback_control controlling the writeout
374   *
375   * Call writepages on the mapping using the provided wbc to control the
376   * writeout.
377   *
378   * Return: %0 on success, negative error code otherwise.
379   */
380  int filemap_fdatawrite_wbc(struct address_space *mapping,
381  			   struct writeback_control *wbc)
382  {
383  	int ret;
384  
385  	if (!mapping_can_writeback(mapping) ||
386  	    !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
387  		return 0;
388  
389  	wbc_attach_fdatawrite_inode(wbc, mapping->host);
390  	ret = do_writepages(mapping, wbc);
391  	wbc_detach_inode(wbc);
392  	return ret;
393  }
394  EXPORT_SYMBOL(filemap_fdatawrite_wbc);
395  
396  /**
397   * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
398   * @mapping:	address space structure to write
399   * @start:	offset in bytes where the range starts
400   * @end:	offset in bytes where the range ends (inclusive)
401   * @sync_mode:	enable synchronous operation
402   *
403   * Start writeback against all of a mapping's dirty pages that lie
404   * within the byte offsets <start, end> inclusive.
405   *
406   * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
407   * opposed to a regular memory cleansing writeback.  The difference between
408   * these two operations is that if a dirty page/buffer is encountered, it must
409   * be waited upon, and not just skipped over.
410   *
411   * Return: %0 on success, negative error code otherwise.
412   */
413  int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
414  				loff_t end, int sync_mode)
415  {
416  	struct writeback_control wbc = {
417  		.sync_mode = sync_mode,
418  		.nr_to_write = LONG_MAX,
419  		.range_start = start,
420  		.range_end = end,
421  	};
422  
423  	return filemap_fdatawrite_wbc(mapping, &wbc);
424  }
425  
426  static inline int __filemap_fdatawrite(struct address_space *mapping,
427  	int sync_mode)
428  {
429  	return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
430  }
431  
432  int filemap_fdatawrite(struct address_space *mapping)
433  {
434  	return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
435  }
436  EXPORT_SYMBOL(filemap_fdatawrite);
437  
438  int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
439  				loff_t end)
440  {
441  	return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
442  }
443  EXPORT_SYMBOL(filemap_fdatawrite_range);
444  
445  /**
446   * filemap_flush - mostly a non-blocking flush
447   * @mapping:	target address_space
448   *
449   * This is a mostly non-blocking flush.  Not suitable for data-integrity
450   * purposes - I/O may not be started against all dirty pages.
451   *
452   * Return: %0 on success, negative error code otherwise.
453   */
454  int filemap_flush(struct address_space *mapping)
455  {
456  	return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
457  }
458  EXPORT_SYMBOL(filemap_flush);
459  
460  /**
461   * filemap_range_has_page - check if a page exists in range.
462   * @mapping:           address space within which to check
463   * @start_byte:        offset in bytes where the range starts
464   * @end_byte:          offset in bytes where the range ends (inclusive)
465   *
466   * Find at least one page in the range supplied, usually used to check if
467   * direct writing in this range will trigger a writeback.
468   *
469   * Return: %true if at least one page exists in the specified range,
470   * %false otherwise.
471   */
472  bool filemap_range_has_page(struct address_space *mapping,
473  			   loff_t start_byte, loff_t end_byte)
474  {
475  	struct folio *folio;
476  	XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
477  	pgoff_t max = end_byte >> PAGE_SHIFT;
478  
479  	if (end_byte < start_byte)
480  		return false;
481  
482  	rcu_read_lock();
483  	for (;;) {
484  		folio = xas_find(&xas, max);
485  		if (xas_retry(&xas, folio))
486  			continue;
487  		/* Shadow entries don't count */
488  		if (xa_is_value(folio))
489  			continue;
490  		/*
491  		 * We don't need to try to pin this page; we're about to
492  		 * release the RCU lock anyway.  It is enough to know that
493  		 * there was a page here recently.
494  		 */
495  		break;
496  	}
497  	rcu_read_unlock();
498  
499  	return folio != NULL;
500  }
501  EXPORT_SYMBOL(filemap_range_has_page);
502  
503  static void __filemap_fdatawait_range(struct address_space *mapping,
504  				     loff_t start_byte, loff_t end_byte)
505  {
506  	pgoff_t index = start_byte >> PAGE_SHIFT;
507  	pgoff_t end = end_byte >> PAGE_SHIFT;
508  	struct folio_batch fbatch;
509  	unsigned nr_folios;
510  
511  	folio_batch_init(&fbatch);
512  
513  	while (index <= end) {
514  		unsigned i;
515  
516  		nr_folios = filemap_get_folios_tag(mapping, &index, end,
517  				PAGECACHE_TAG_WRITEBACK, &fbatch);
518  
519  		if (!nr_folios)
520  			break;
521  
522  		for (i = 0; i < nr_folios; i++) {
523  			struct folio *folio = fbatch.folios[i];
524  
525  			folio_wait_writeback(folio);
526  			folio_clear_error(folio);
527  		}
528  		folio_batch_release(&fbatch);
529  		cond_resched();
530  	}
531  }
532  
533  /**
534   * filemap_fdatawait_range - wait for writeback to complete
535   * @mapping:		address space structure to wait for
536   * @start_byte:		offset in bytes where the range starts
537   * @end_byte:		offset in bytes where the range ends (inclusive)
538   *
539   * Walk the list of under-writeback pages of the given address space
540   * in the given range and wait for all of them.  Check error status of
541   * the address space and return it.
542   *
543   * Since the error status of the address space is cleared by this function,
544   * callers are responsible for checking the return value and handling and/or
545   * reporting the error.
546   *
547   * Return: error status of the address space.
548   */
549  int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
550  			    loff_t end_byte)
551  {
552  	__filemap_fdatawait_range(mapping, start_byte, end_byte);
553  	return filemap_check_errors(mapping);
554  }
555  EXPORT_SYMBOL(filemap_fdatawait_range);
556  
557  /**
558   * filemap_fdatawait_range_keep_errors - wait for writeback to complete
559   * @mapping:		address space structure to wait for
560   * @start_byte:		offset in bytes where the range starts
561   * @end_byte:		offset in bytes where the range ends (inclusive)
562   *
563   * Walk the list of under-writeback pages of the given address space in the
564   * given range and wait for all of them.  Unlike filemap_fdatawait_range(),
565   * this function does not clear error status of the address space.
566   *
567   * Use this function if callers don't handle errors themselves.  Expected
568   * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
569   * fsfreeze(8)
570   */
571  int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
572  		loff_t start_byte, loff_t end_byte)
573  {
574  	__filemap_fdatawait_range(mapping, start_byte, end_byte);
575  	return filemap_check_and_keep_errors(mapping);
576  }
577  EXPORT_SYMBOL(filemap_fdatawait_range_keep_errors);
578  
579  /**
580   * file_fdatawait_range - wait for writeback to complete
581   * @file:		file pointing to address space structure to wait for
582   * @start_byte:		offset in bytes where the range starts
583   * @end_byte:		offset in bytes where the range ends (inclusive)
584   *
585   * Walk the list of under-writeback pages of the address space that file
586   * refers to, in the given range and wait for all of them.  Check error
587   * status of the address space vs. the file->f_wb_err cursor and return it.
588   *
589   * Since the error status of the file is advanced by this function,
590   * callers are responsible for checking the return value and handling and/or
591   * reporting the error.
592   *
593   * Return: error status of the address space vs. the file->f_wb_err cursor.
594   */
595  int file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte)
596  {
597  	struct address_space *mapping = file->f_mapping;
598  
599  	__filemap_fdatawait_range(mapping, start_byte, end_byte);
600  	return file_check_and_advance_wb_err(file);
601  }
602  EXPORT_SYMBOL(file_fdatawait_range);
603  
604  /**
605   * filemap_fdatawait_keep_errors - wait for writeback without clearing errors
606   * @mapping: address space structure to wait for
607   *
608   * Walk the list of under-writeback pages of the given address space
609   * and wait for all of them.  Unlike filemap_fdatawait(), this function
610   * does not clear error status of the address space.
611   *
612   * Use this function if callers don't handle errors themselves.  Expected
613   * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
614   * fsfreeze(8)
615   *
616   * Return: error status of the address space.
617   */
618  int filemap_fdatawait_keep_errors(struct address_space *mapping)
619  {
620  	__filemap_fdatawait_range(mapping, 0, LLONG_MAX);
621  	return filemap_check_and_keep_errors(mapping);
622  }
623  EXPORT_SYMBOL(filemap_fdatawait_keep_errors);
624  
625  /* Returns true if writeback might be needed or already in progress. */
626  static bool mapping_needs_writeback(struct address_space *mapping)
627  {
628  	return mapping->nrpages;
629  }
630  
631  bool filemap_range_has_writeback(struct address_space *mapping,
632  				 loff_t start_byte, loff_t end_byte)
633  {
634  	XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
635  	pgoff_t max = end_byte >> PAGE_SHIFT;
636  	struct folio *folio;
637  
638  	if (end_byte < start_byte)
639  		return false;
640  
641  	rcu_read_lock();
642  	xas_for_each(&xas, folio, max) {
643  		if (xas_retry(&xas, folio))
644  			continue;
645  		if (xa_is_value(folio))
646  			continue;
647  		if (folio_test_dirty(folio) || folio_test_locked(folio) ||
648  				folio_test_writeback(folio))
649  			break;
650  	}
651  	rcu_read_unlock();
652  	return folio != NULL;
653  }
654  EXPORT_SYMBOL_GPL(filemap_range_has_writeback);
655  
656  /**
657   * filemap_write_and_wait_range - write out & wait on a file range
658   * @mapping:	the address_space for the pages
659   * @lstart:	offset in bytes where the range starts
660   * @lend:	offset in bytes where the range ends (inclusive)
661   *
662   * Write out and wait upon file offsets lstart->lend, inclusive.
663   *
664   * Note that @lend is inclusive (describes the last byte to be written) so
665   * that this function can be used to write to the very end-of-file (end = -1).
666   *
667   * Return: error status of the address space.
668   */
669  int filemap_write_and_wait_range(struct address_space *mapping,
670  				 loff_t lstart, loff_t lend)
671  {
672  	int err = 0, err2;
673  
674  	if (lend < lstart)
675  		return 0;
676  
677  	if (mapping_needs_writeback(mapping)) {
678  		err = __filemap_fdatawrite_range(mapping, lstart, lend,
679  						 WB_SYNC_ALL);
680  		/*
681  		 * Even if the above returned error, the pages may be
682  		 * written partially (e.g. -ENOSPC), so we wait for it.
683  		 * But the -EIO is special case, it may indicate the worst
684  		 * thing (e.g. bug) happened, so we avoid waiting for it.
685  		 */
686  		if (err != -EIO)
687  			__filemap_fdatawait_range(mapping, lstart, lend);
688  	}
689  	err2 = filemap_check_errors(mapping);
690  	if (!err)
691  		err = err2;
692  	return err;
693  }
694  EXPORT_SYMBOL(filemap_write_and_wait_range);
695  
696  void __filemap_set_wb_err(struct address_space *mapping, int err)
697  {
698  	errseq_t eseq = errseq_set(&mapping->wb_err, err);
699  
700  	trace_filemap_set_wb_err(mapping, eseq);
701  }
702  EXPORT_SYMBOL(__filemap_set_wb_err);
703  
704  /**
705   * file_check_and_advance_wb_err - report wb error (if any) that was previously
706   * 				   and advance wb_err to current one
707   * @file: struct file on which the error is being reported
708   *
709   * When userland calls fsync (or something like nfsd does the equivalent), we
710   * want to report any writeback errors that occurred since the last fsync (or
711   * since the file was opened if there haven't been any).
712   *
713   * Grab the wb_err from the mapping. If it matches what we have in the file,
714   * then just quickly return 0. The file is all caught up.
715   *
716   * If it doesn't match, then take the mapping value, set the "seen" flag in
717   * it and try to swap it into place. If it works, or another task beat us
718   * to it with the new value, then update the f_wb_err and return the error
719   * portion. The error at this point must be reported via proper channels
720   * (a'la fsync, or NFS COMMIT operation, etc.).
721   *
722   * While we handle mapping->wb_err with atomic operations, the f_wb_err
723   * value is protected by the f_lock since we must ensure that it reflects
724   * the latest value swapped in for this file descriptor.
725   *
726   * Return: %0 on success, negative error code otherwise.
727   */
728  int file_check_and_advance_wb_err(struct file *file)
729  {
730  	int err = 0;
731  	errseq_t old = READ_ONCE(file->f_wb_err);
732  	struct address_space *mapping = file->f_mapping;
733  
734  	/* Locklessly handle the common case where nothing has changed */
735  	if (errseq_check(&mapping->wb_err, old)) {
736  		/* Something changed, must use slow path */
737  		spin_lock(&file->f_lock);
738  		old = file->f_wb_err;
739  		err = errseq_check_and_advance(&mapping->wb_err,
740  						&file->f_wb_err);
741  		trace_file_check_and_advance_wb_err(file, old);
742  		spin_unlock(&file->f_lock);
743  	}
744  
745  	/*
746  	 * We're mostly using this function as a drop in replacement for
747  	 * filemap_check_errors. Clear AS_EIO/AS_ENOSPC to emulate the effect
748  	 * that the legacy code would have had on these flags.
749  	 */
750  	clear_bit(AS_EIO, &mapping->flags);
751  	clear_bit(AS_ENOSPC, &mapping->flags);
752  	return err;
753  }
754  EXPORT_SYMBOL(file_check_and_advance_wb_err);
755  
756  /**
757   * file_write_and_wait_range - write out & wait on a file range
758   * @file:	file pointing to address_space with pages
759   * @lstart:	offset in bytes where the range starts
760   * @lend:	offset in bytes where the range ends (inclusive)
761   *
762   * Write out and wait upon file offsets lstart->lend, inclusive.
763   *
764   * Note that @lend is inclusive (describes the last byte to be written) so
765   * that this function can be used to write to the very end-of-file (end = -1).
766   *
767   * After writing out and waiting on the data, we check and advance the
768   * f_wb_err cursor to the latest value, and return any errors detected there.
769   *
770   * Return: %0 on success, negative error code otherwise.
771   */
772  int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend)
773  {
774  	int err = 0, err2;
775  	struct address_space *mapping = file->f_mapping;
776  
777  	if (lend < lstart)
778  		return 0;
779  
780  	if (mapping_needs_writeback(mapping)) {
781  		err = __filemap_fdatawrite_range(mapping, lstart, lend,
782  						 WB_SYNC_ALL);
783  		/* See comment of filemap_write_and_wait() */
784  		if (err != -EIO)
785  			__filemap_fdatawait_range(mapping, lstart, lend);
786  	}
787  	err2 = file_check_and_advance_wb_err(file);
788  	if (!err)
789  		err = err2;
790  	return err;
791  }
792  EXPORT_SYMBOL(file_write_and_wait_range);
793  
794  /**
795   * replace_page_cache_folio - replace a pagecache folio with a new one
796   * @old:	folio to be replaced
797   * @new:	folio to replace with
798   *
799   * This function replaces a folio in the pagecache with a new one.  On
800   * success it acquires the pagecache reference for the new folio and
801   * drops it for the old folio.  Both the old and new folios must be
802   * locked.  This function does not add the new folio to the LRU, the
803   * caller must do that.
804   *
805   * The remove + add is atomic.  This function cannot fail.
806   */
807  void replace_page_cache_folio(struct folio *old, struct folio *new)
808  {
809  	struct address_space *mapping = old->mapping;
810  	void (*free_folio)(struct folio *) = mapping->a_ops->free_folio;
811  	pgoff_t offset = old->index;
812  	XA_STATE(xas, &mapping->i_pages, offset);
813  
814  	VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
815  	VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
816  	VM_BUG_ON_FOLIO(new->mapping, new);
817  
818  	folio_get(new);
819  	new->mapping = mapping;
820  	new->index = offset;
821  
822  	mem_cgroup_migrate(old, new);
823  
824  	xas_lock_irq(&xas);
825  	xas_store(&xas, new);
826  
827  	old->mapping = NULL;
828  	/* hugetlb pages do not participate in page cache accounting. */
829  	if (!folio_test_hugetlb(old))
830  		__lruvec_stat_sub_folio(old, NR_FILE_PAGES);
831  	if (!folio_test_hugetlb(new))
832  		__lruvec_stat_add_folio(new, NR_FILE_PAGES);
833  	if (folio_test_swapbacked(old))
834  		__lruvec_stat_sub_folio(old, NR_SHMEM);
835  	if (folio_test_swapbacked(new))
836  		__lruvec_stat_add_folio(new, NR_SHMEM);
837  	xas_unlock_irq(&xas);
838  	if (free_folio)
839  		free_folio(old);
840  	folio_put(old);
841  }
842  EXPORT_SYMBOL_GPL(replace_page_cache_folio);
843  
844  noinline int __filemap_add_folio(struct address_space *mapping,
845  		struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp)
846  {
847  	XA_STATE(xas, &mapping->i_pages, index);
848  	int huge = folio_test_hugetlb(folio);
849  	bool charged = false;
850  	long nr = 1;
851  
852  	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
853  	VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio);
854  	mapping_set_update(&xas, mapping);
855  
856  	if (!huge) {
857  		int error = mem_cgroup_charge(folio, NULL, gfp);
858  		VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio);
859  		if (error)
860  			return error;
861  		charged = true;
862  		xas_set_order(&xas, index, folio_order(folio));
863  		nr = folio_nr_pages(folio);
864  	}
865  
866  	gfp &= GFP_RECLAIM_MASK;
867  	folio_ref_add(folio, nr);
868  	folio->mapping = mapping;
869  	folio->index = xas.xa_index;
870  
871  	do {
872  		unsigned int order = xa_get_order(xas.xa, xas.xa_index);
873  		void *entry, *old = NULL;
874  
875  		if (order > folio_order(folio))
876  			xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index),
877  					order, gfp);
878  		xas_lock_irq(&xas);
879  		xas_for_each_conflict(&xas, entry) {
880  			old = entry;
881  			if (!xa_is_value(entry)) {
882  				xas_set_err(&xas, -EEXIST);
883  				goto unlock;
884  			}
885  		}
886  
887  		if (old) {
888  			if (shadowp)
889  				*shadowp = old;
890  			/* entry may have been split before we acquired lock */
891  			order = xa_get_order(xas.xa, xas.xa_index);
892  			if (order > folio_order(folio)) {
893  				/* How to handle large swap entries? */
894  				BUG_ON(shmem_mapping(mapping));
895  				xas_split(&xas, old, order);
896  				xas_reset(&xas);
897  			}
898  		}
899  
900  		xas_store(&xas, folio);
901  		if (xas_error(&xas))
902  			goto unlock;
903  
904  		mapping->nrpages += nr;
905  
906  		/* hugetlb pages do not participate in page cache accounting */
907  		if (!huge) {
908  			__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
909  			if (folio_test_pmd_mappable(folio))
910  				__lruvec_stat_mod_folio(folio,
911  						NR_FILE_THPS, nr);
912  		}
913  unlock:
914  		xas_unlock_irq(&xas);
915  	} while (xas_nomem(&xas, gfp));
916  
917  	if (xas_error(&xas))
918  		goto error;
919  
920  	trace_mm_filemap_add_to_page_cache(folio);
921  	return 0;
922  error:
923  	if (charged)
924  		mem_cgroup_uncharge(folio);
925  	folio->mapping = NULL;
926  	/* Leave page->index set: truncation relies upon it */
927  	folio_put_refs(folio, nr);
928  	return xas_error(&xas);
929  }
930  ALLOW_ERROR_INJECTION(__filemap_add_folio, ERRNO);
931  
932  int filemap_add_folio(struct address_space *mapping, struct folio *folio,
933  				pgoff_t index, gfp_t gfp)
934  {
935  	void *shadow = NULL;
936  	int ret;
937  
938  	__folio_set_locked(folio);
939  	ret = __filemap_add_folio(mapping, folio, index, gfp, &shadow);
940  	if (unlikely(ret))
941  		__folio_clear_locked(folio);
942  	else {
943  		/*
944  		 * The folio might have been evicted from cache only
945  		 * recently, in which case it should be activated like
946  		 * any other repeatedly accessed folio.
947  		 * The exception is folios getting rewritten; evicting other
948  		 * data from the working set, only to cache data that will
949  		 * get overwritten with something else, is a waste of memory.
950  		 */
951  		WARN_ON_ONCE(folio_test_active(folio));
952  		if (!(gfp & __GFP_WRITE) && shadow)
953  			workingset_refault(folio, shadow);
954  		folio_add_lru(folio);
955  	}
956  	return ret;
957  }
958  EXPORT_SYMBOL_GPL(filemap_add_folio);
959  
960  #ifdef CONFIG_NUMA
961  struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order)
962  {
963  	int n;
964  	struct folio *folio;
965  
966  	if (cpuset_do_page_mem_spread()) {
967  		unsigned int cpuset_mems_cookie;
968  		do {
969  			cpuset_mems_cookie = read_mems_allowed_begin();
970  			n = cpuset_mem_spread_node();
971  			folio = __folio_alloc_node(gfp, order, n);
972  		} while (!folio && read_mems_allowed_retry(cpuset_mems_cookie));
973  
974  		return folio;
975  	}
976  	return folio_alloc(gfp, order);
977  }
978  EXPORT_SYMBOL(filemap_alloc_folio);
979  #endif
980  
981  /*
982   * filemap_invalidate_lock_two - lock invalidate_lock for two mappings
983   *
984   * Lock exclusively invalidate_lock of any passed mapping that is not NULL.
985   *
986   * @mapping1: the first mapping to lock
987   * @mapping2: the second mapping to lock
988   */
989  void filemap_invalidate_lock_two(struct address_space *mapping1,
990  				 struct address_space *mapping2)
991  {
992  	if (mapping1 > mapping2)
993  		swap(mapping1, mapping2);
994  	if (mapping1)
995  		down_write(&mapping1->invalidate_lock);
996  	if (mapping2 && mapping1 != mapping2)
997  		down_write_nested(&mapping2->invalidate_lock, 1);
998  }
999  EXPORT_SYMBOL(filemap_invalidate_lock_two);
1000  
1001  /*
1002   * filemap_invalidate_unlock_two - unlock invalidate_lock for two mappings
1003   *
1004   * Unlock exclusive invalidate_lock of any passed mapping that is not NULL.
1005   *
1006   * @mapping1: the first mapping to unlock
1007   * @mapping2: the second mapping to unlock
1008   */
1009  void filemap_invalidate_unlock_two(struct address_space *mapping1,
1010  				   struct address_space *mapping2)
1011  {
1012  	if (mapping1)
1013  		up_write(&mapping1->invalidate_lock);
1014  	if (mapping2 && mapping1 != mapping2)
1015  		up_write(&mapping2->invalidate_lock);
1016  }
1017  EXPORT_SYMBOL(filemap_invalidate_unlock_two);
1018  
1019  /*
1020   * In order to wait for pages to become available there must be
1021   * waitqueues associated with pages. By using a hash table of
1022   * waitqueues where the bucket discipline is to maintain all
1023   * waiters on the same queue and wake all when any of the pages
1024   * become available, and for the woken contexts to check to be
1025   * sure the appropriate page became available, this saves space
1026   * at a cost of "thundering herd" phenomena during rare hash
1027   * collisions.
1028   */
1029  #define PAGE_WAIT_TABLE_BITS 8
1030  #define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS)
1031  static wait_queue_head_t folio_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned;
1032  
1033  static wait_queue_head_t *folio_waitqueue(struct folio *folio)
1034  {
1035  	return &folio_wait_table[hash_ptr(folio, PAGE_WAIT_TABLE_BITS)];
1036  }
1037  
1038  void __init pagecache_init(void)
1039  {
1040  	int i;
1041  
1042  	for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++)
1043  		init_waitqueue_head(&folio_wait_table[i]);
1044  
1045  	page_writeback_init();
1046  }
1047  
1048  /*
1049   * The page wait code treats the "wait->flags" somewhat unusually, because
1050   * we have multiple different kinds of waits, not just the usual "exclusive"
1051   * one.
1052   *
1053   * We have:
1054   *
1055   *  (a) no special bits set:
1056   *
1057   *	We're just waiting for the bit to be released, and when a waker
1058   *	calls the wakeup function, we set WQ_FLAG_WOKEN and wake it up,
1059   *	and remove it from the wait queue.
1060   *
1061   *	Simple and straightforward.
1062   *
1063   *  (b) WQ_FLAG_EXCLUSIVE:
1064   *
1065   *	The waiter is waiting to get the lock, and only one waiter should
1066   *	be woken up to avoid any thundering herd behavior. We'll set the
1067   *	WQ_FLAG_WOKEN bit, wake it up, and remove it from the wait queue.
1068   *
1069   *	This is the traditional exclusive wait.
1070   *
1071   *  (c) WQ_FLAG_EXCLUSIVE | WQ_FLAG_CUSTOM:
1072   *
1073   *	The waiter is waiting to get the bit, and additionally wants the
1074   *	lock to be transferred to it for fair lock behavior. If the lock
1075   *	cannot be taken, we stop walking the wait queue without waking
1076   *	the waiter.
1077   *
1078   *	This is the "fair lock handoff" case, and in addition to setting
1079   *	WQ_FLAG_WOKEN, we set WQ_FLAG_DONE to let the waiter easily see
1080   *	that it now has the lock.
1081   */
1082  static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg)
1083  {
1084  	unsigned int flags;
1085  	struct wait_page_key *key = arg;
1086  	struct wait_page_queue *wait_page
1087  		= container_of(wait, struct wait_page_queue, wait);
1088  
1089  	if (!wake_page_match(wait_page, key))
1090  		return 0;
1091  
1092  	/*
1093  	 * If it's a lock handoff wait, we get the bit for it, and
1094  	 * stop walking (and do not wake it up) if we can't.
1095  	 */
1096  	flags = wait->flags;
1097  	if (flags & WQ_FLAG_EXCLUSIVE) {
1098  		if (test_bit(key->bit_nr, &key->folio->flags))
1099  			return -1;
1100  		if (flags & WQ_FLAG_CUSTOM) {
1101  			if (test_and_set_bit(key->bit_nr, &key->folio->flags))
1102  				return -1;
1103  			flags |= WQ_FLAG_DONE;
1104  		}
1105  	}
1106  
1107  	/*
1108  	 * We are holding the wait-queue lock, but the waiter that
1109  	 * is waiting for this will be checking the flags without
1110  	 * any locking.
1111  	 *
1112  	 * So update the flags atomically, and wake up the waiter
1113  	 * afterwards to avoid any races. This store-release pairs
1114  	 * with the load-acquire in folio_wait_bit_common().
1115  	 */
1116  	smp_store_release(&wait->flags, flags | WQ_FLAG_WOKEN);
1117  	wake_up_state(wait->private, mode);
1118  
1119  	/*
1120  	 * Ok, we have successfully done what we're waiting for,
1121  	 * and we can unconditionally remove the wait entry.
1122  	 *
1123  	 * Note that this pairs with the "finish_wait()" in the
1124  	 * waiter, and has to be the absolute last thing we do.
1125  	 * After this list_del_init(&wait->entry) the wait entry
1126  	 * might be de-allocated and the process might even have
1127  	 * exited.
1128  	 */
1129  	list_del_init_careful(&wait->entry);
1130  	return (flags & WQ_FLAG_EXCLUSIVE) != 0;
1131  }
1132  
1133  static void folio_wake_bit(struct folio *folio, int bit_nr)
1134  {
1135  	wait_queue_head_t *q = folio_waitqueue(folio);
1136  	struct wait_page_key key;
1137  	unsigned long flags;
1138  	wait_queue_entry_t bookmark;
1139  
1140  	key.folio = folio;
1141  	key.bit_nr = bit_nr;
1142  	key.page_match = 0;
1143  
1144  	bookmark.flags = 0;
1145  	bookmark.private = NULL;
1146  	bookmark.func = NULL;
1147  	INIT_LIST_HEAD(&bookmark.entry);
1148  
1149  	spin_lock_irqsave(&q->lock, flags);
1150  	__wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark);
1151  
1152  	while (bookmark.flags & WQ_FLAG_BOOKMARK) {
1153  		/*
1154  		 * Take a breather from holding the lock,
1155  		 * allow pages that finish wake up asynchronously
1156  		 * to acquire the lock and remove themselves
1157  		 * from wait queue
1158  		 */
1159  		spin_unlock_irqrestore(&q->lock, flags);
1160  		cpu_relax();
1161  		spin_lock_irqsave(&q->lock, flags);
1162  		__wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark);
1163  	}
1164  
1165  	/*
1166  	 * It's possible to miss clearing waiters here, when we woke our page
1167  	 * waiters, but the hashed waitqueue has waiters for other pages on it.
1168  	 * That's okay, it's a rare case. The next waker will clear it.
1169  	 *
1170  	 * Note that, depending on the page pool (buddy, hugetlb, ZONE_DEVICE,
1171  	 * other), the flag may be cleared in the course of freeing the page;
1172  	 * but that is not required for correctness.
1173  	 */
1174  	if (!waitqueue_active(q) || !key.page_match)
1175  		folio_clear_waiters(folio);
1176  
1177  	spin_unlock_irqrestore(&q->lock, flags);
1178  }
1179  
1180  static void folio_wake(struct folio *folio, int bit)
1181  {
1182  	if (!folio_test_waiters(folio))
1183  		return;
1184  	folio_wake_bit(folio, bit);
1185  }
1186  
1187  /*
1188   * A choice of three behaviors for folio_wait_bit_common():
1189   */
1190  enum behavior {
1191  	EXCLUSIVE,	/* Hold ref to page and take the bit when woken, like
1192  			 * __folio_lock() waiting on then setting PG_locked.
1193  			 */
1194  	SHARED,		/* Hold ref to page and check the bit when woken, like
1195  			 * folio_wait_writeback() waiting on PG_writeback.
1196  			 */
1197  	DROP,		/* Drop ref to page before wait, no check when woken,
1198  			 * like folio_put_wait_locked() on PG_locked.
1199  			 */
1200  };
1201  
1202  /*
1203   * Attempt to check (or get) the folio flag, and mark us done
1204   * if successful.
1205   */
1206  static inline bool folio_trylock_flag(struct folio *folio, int bit_nr,
1207  					struct wait_queue_entry *wait)
1208  {
1209  	if (wait->flags & WQ_FLAG_EXCLUSIVE) {
1210  		if (test_and_set_bit(bit_nr, &folio->flags))
1211  			return false;
1212  	} else if (test_bit(bit_nr, &folio->flags))
1213  		return false;
1214  
1215  	wait->flags |= WQ_FLAG_WOKEN | WQ_FLAG_DONE;
1216  	return true;
1217  }
1218  
1219  /* How many times do we accept lock stealing from under a waiter? */
1220  int sysctl_page_lock_unfairness = 5;
1221  
1222  static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
1223  		int state, enum behavior behavior)
1224  {
1225  	wait_queue_head_t *q = folio_waitqueue(folio);
1226  	int unfairness = sysctl_page_lock_unfairness;
1227  	struct wait_page_queue wait_page;
1228  	wait_queue_entry_t *wait = &wait_page.wait;
1229  	bool thrashing = false;
1230  	unsigned long pflags;
1231  	bool in_thrashing;
1232  
1233  	if (bit_nr == PG_locked &&
1234  	    !folio_test_uptodate(folio) && folio_test_workingset(folio)) {
1235  		delayacct_thrashing_start(&in_thrashing);
1236  		psi_memstall_enter(&pflags);
1237  		thrashing = true;
1238  	}
1239  
1240  	init_wait(wait);
1241  	wait->func = wake_page_function;
1242  	wait_page.folio = folio;
1243  	wait_page.bit_nr = bit_nr;
1244  
1245  repeat:
1246  	wait->flags = 0;
1247  	if (behavior == EXCLUSIVE) {
1248  		wait->flags = WQ_FLAG_EXCLUSIVE;
1249  		if (--unfairness < 0)
1250  			wait->flags |= WQ_FLAG_CUSTOM;
1251  	}
1252  
1253  	/*
1254  	 * Do one last check whether we can get the
1255  	 * page bit synchronously.
1256  	 *
1257  	 * Do the folio_set_waiters() marking before that
1258  	 * to let any waker we _just_ missed know they
1259  	 * need to wake us up (otherwise they'll never
1260  	 * even go to the slow case that looks at the
1261  	 * page queue), and add ourselves to the wait
1262  	 * queue if we need to sleep.
1263  	 *
1264  	 * This part needs to be done under the queue
1265  	 * lock to avoid races.
1266  	 */
1267  	spin_lock_irq(&q->lock);
1268  	folio_set_waiters(folio);
1269  	if (!folio_trylock_flag(folio, bit_nr, wait))
1270  		__add_wait_queue_entry_tail(q, wait);
1271  	spin_unlock_irq(&q->lock);
1272  
1273  	/*
1274  	 * From now on, all the logic will be based on
1275  	 * the WQ_FLAG_WOKEN and WQ_FLAG_DONE flag, to
1276  	 * see whether the page bit testing has already
1277  	 * been done by the wake function.
1278  	 *
1279  	 * We can drop our reference to the folio.
1280  	 */
1281  	if (behavior == DROP)
1282  		folio_put(folio);
1283  
1284  	/*
1285  	 * Note that until the "finish_wait()", or until
1286  	 * we see the WQ_FLAG_WOKEN flag, we need to
1287  	 * be very careful with the 'wait->flags', because
1288  	 * we may race with a waker that sets them.
1289  	 */
1290  	for (;;) {
1291  		unsigned int flags;
1292  
1293  		set_current_state(state);
1294  
1295  		/* Loop until we've been woken or interrupted */
1296  		flags = smp_load_acquire(&wait->flags);
1297  		if (!(flags & WQ_FLAG_WOKEN)) {
1298  			if (signal_pending_state(state, current))
1299  				break;
1300  
1301  			io_schedule();
1302  			continue;
1303  		}
1304  
1305  		/* If we were non-exclusive, we're done */
1306  		if (behavior != EXCLUSIVE)
1307  			break;
1308  
1309  		/* If the waker got the lock for us, we're done */
1310  		if (flags & WQ_FLAG_DONE)
1311  			break;
1312  
1313  		/*
1314  		 * Otherwise, if we're getting the lock, we need to
1315  		 * try to get it ourselves.
1316  		 *
1317  		 * And if that fails, we'll have to retry this all.
1318  		 */
1319  		if (unlikely(test_and_set_bit(bit_nr, folio_flags(folio, 0))))
1320  			goto repeat;
1321  
1322  		wait->flags |= WQ_FLAG_DONE;
1323  		break;
1324  	}
1325  
1326  	/*
1327  	 * If a signal happened, this 'finish_wait()' may remove the last
1328  	 * waiter from the wait-queues, but the folio waiters bit will remain
1329  	 * set. That's ok. The next wakeup will take care of it, and trying
1330  	 * to do it here would be difficult and prone to races.
1331  	 */
1332  	finish_wait(q, wait);
1333  
1334  	if (thrashing) {
1335  		delayacct_thrashing_end(&in_thrashing);
1336  		psi_memstall_leave(&pflags);
1337  	}
1338  
1339  	/*
1340  	 * NOTE! The wait->flags weren't stable until we've done the
1341  	 * 'finish_wait()', and we could have exited the loop above due
1342  	 * to a signal, and had a wakeup event happen after the signal
1343  	 * test but before the 'finish_wait()'.
1344  	 *
1345  	 * So only after the finish_wait() can we reliably determine
1346  	 * if we got woken up or not, so we can now figure out the final
1347  	 * return value based on that state without races.
1348  	 *
1349  	 * Also note that WQ_FLAG_WOKEN is sufficient for a non-exclusive
1350  	 * waiter, but an exclusive one requires WQ_FLAG_DONE.
1351  	 */
1352  	if (behavior == EXCLUSIVE)
1353  		return wait->flags & WQ_FLAG_DONE ? 0 : -EINTR;
1354  
1355  	return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR;
1356  }
1357  
1358  #ifdef CONFIG_MIGRATION
1359  /**
1360   * migration_entry_wait_on_locked - Wait for a migration entry to be removed
1361   * @entry: migration swap entry.
1362   * @ptep: mapped pte pointer. Will return with the ptep unmapped. Only required
1363   *        for pte entries, pass NULL for pmd entries.
1364   * @ptl: already locked ptl. This function will drop the lock.
1365   *
1366   * Wait for a migration entry referencing the given page to be removed. This is
1367   * equivalent to put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE) except
1368   * this can be called without taking a reference on the page. Instead this
1369   * should be called while holding the ptl for the migration entry referencing
1370   * the page.
1371   *
1372   * Returns after unmapping and unlocking the pte/ptl with pte_unmap_unlock().
1373   *
1374   * This follows the same logic as folio_wait_bit_common() so see the comments
1375   * there.
1376   */
1377  void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
1378  				spinlock_t *ptl)
1379  {
1380  	struct wait_page_queue wait_page;
1381  	wait_queue_entry_t *wait = &wait_page.wait;
1382  	bool thrashing = false;
1383  	unsigned long pflags;
1384  	bool in_thrashing;
1385  	wait_queue_head_t *q;
1386  	struct folio *folio = page_folio(pfn_swap_entry_to_page(entry));
1387  
1388  	q = folio_waitqueue(folio);
1389  	if (!folio_test_uptodate(folio) && folio_test_workingset(folio)) {
1390  		delayacct_thrashing_start(&in_thrashing);
1391  		psi_memstall_enter(&pflags);
1392  		thrashing = true;
1393  	}
1394  
1395  	init_wait(wait);
1396  	wait->func = wake_page_function;
1397  	wait_page.folio = folio;
1398  	wait_page.bit_nr = PG_locked;
1399  	wait->flags = 0;
1400  
1401  	spin_lock_irq(&q->lock);
1402  	folio_set_waiters(folio);
1403  	if (!folio_trylock_flag(folio, PG_locked, wait))
1404  		__add_wait_queue_entry_tail(q, wait);
1405  	spin_unlock_irq(&q->lock);
1406  
1407  	/*
1408  	 * If a migration entry exists for the page the migration path must hold
1409  	 * a valid reference to the page, and it must take the ptl to remove the
1410  	 * migration entry. So the page is valid until the ptl is dropped.
1411  	 */
1412  	if (ptep)
1413  		pte_unmap_unlock(ptep, ptl);
1414  	else
1415  		spin_unlock(ptl);
1416  
1417  	for (;;) {
1418  		unsigned int flags;
1419  
1420  		set_current_state(TASK_UNINTERRUPTIBLE);
1421  
1422  		/* Loop until we've been woken or interrupted */
1423  		flags = smp_load_acquire(&wait->flags);
1424  		if (!(flags & WQ_FLAG_WOKEN)) {
1425  			if (signal_pending_state(TASK_UNINTERRUPTIBLE, current))
1426  				break;
1427  
1428  			io_schedule();
1429  			continue;
1430  		}
1431  		break;
1432  	}
1433  
1434  	finish_wait(q, wait);
1435  
1436  	if (thrashing) {
1437  		delayacct_thrashing_end(&in_thrashing);
1438  		psi_memstall_leave(&pflags);
1439  	}
1440  }
1441  #endif
1442  
1443  void folio_wait_bit(struct folio *folio, int bit_nr)
1444  {
1445  	folio_wait_bit_common(folio, bit_nr, TASK_UNINTERRUPTIBLE, SHARED);
1446  }
1447  EXPORT_SYMBOL(folio_wait_bit);
1448  
1449  int folio_wait_bit_killable(struct folio *folio, int bit_nr)
1450  {
1451  	return folio_wait_bit_common(folio, bit_nr, TASK_KILLABLE, SHARED);
1452  }
1453  EXPORT_SYMBOL(folio_wait_bit_killable);
1454  
1455  /**
1456   * folio_put_wait_locked - Drop a reference and wait for it to be unlocked
1457   * @folio: The folio to wait for.
1458   * @state: The sleep state (TASK_KILLABLE, TASK_UNINTERRUPTIBLE, etc).
1459   *
1460   * The caller should hold a reference on @folio.  They expect the page to
1461   * become unlocked relatively soon, but do not wish to hold up migration
1462   * (for example) by holding the reference while waiting for the folio to
1463   * come unlocked.  After this function returns, the caller should not
1464   * dereference @folio.
1465   *
1466   * Return: 0 if the folio was unlocked or -EINTR if interrupted by a signal.
1467   */
1468  static int folio_put_wait_locked(struct folio *folio, int state)
1469  {
1470  	return folio_wait_bit_common(folio, PG_locked, state, DROP);
1471  }
1472  
1473  /**
1474   * folio_add_wait_queue - Add an arbitrary waiter to a folio's wait queue
1475   * @folio: Folio defining the wait queue of interest
1476   * @waiter: Waiter to add to the queue
1477   *
1478   * Add an arbitrary @waiter to the wait queue for the nominated @folio.
1479   */
1480  void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter)
1481  {
1482  	wait_queue_head_t *q = folio_waitqueue(folio);
1483  	unsigned long flags;
1484  
1485  	spin_lock_irqsave(&q->lock, flags);
1486  	__add_wait_queue_entry_tail(q, waiter);
1487  	folio_set_waiters(folio);
1488  	spin_unlock_irqrestore(&q->lock, flags);
1489  }
1490  EXPORT_SYMBOL_GPL(folio_add_wait_queue);
1491  
1492  #ifndef clear_bit_unlock_is_negative_byte
1493  
1494  /*
1495   * PG_waiters is the high bit in the same byte as PG_lock.
1496   *
1497   * On x86 (and on many other architectures), we can clear PG_lock and
1498   * test the sign bit at the same time. But if the architecture does
1499   * not support that special operation, we just do this all by hand
1500   * instead.
1501   *
1502   * The read of PG_waiters has to be after (or concurrently with) PG_locked
1503   * being cleared, but a memory barrier should be unnecessary since it is
1504   * in the same byte as PG_locked.
1505   */
1506  static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem)
1507  {
1508  	clear_bit_unlock(nr, mem);
1509  	/* smp_mb__after_atomic(); */
1510  	return test_bit(PG_waiters, mem);
1511  }
1512  
1513  #endif
1514  
1515  /**
1516   * folio_unlock - Unlock a locked folio.
1517   * @folio: The folio.
1518   *
1519   * Unlocks the folio and wakes up any thread sleeping on the page lock.
1520   *
1521   * Context: May be called from interrupt or process context.  May not be
1522   * called from NMI context.
1523   */
1524  void folio_unlock(struct folio *folio)
1525  {
1526  	/* Bit 7 allows x86 to check the byte's sign bit */
1527  	BUILD_BUG_ON(PG_waiters != 7);
1528  	BUILD_BUG_ON(PG_locked > 7);
1529  	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1530  	if (clear_bit_unlock_is_negative_byte(PG_locked, folio_flags(folio, 0)))
1531  		folio_wake_bit(folio, PG_locked);
1532  }
1533  EXPORT_SYMBOL(folio_unlock);
1534  
1535  /**
1536   * folio_end_private_2 - Clear PG_private_2 and wake any waiters.
1537   * @folio: The folio.
1538   *
1539   * Clear the PG_private_2 bit on a folio and wake up any sleepers waiting for
1540   * it.  The folio reference held for PG_private_2 being set is released.
1541   *
1542   * This is, for example, used when a netfs folio is being written to a local
1543   * disk cache, thereby allowing writes to the cache for the same folio to be
1544   * serialised.
1545   */
1546  void folio_end_private_2(struct folio *folio)
1547  {
1548  	VM_BUG_ON_FOLIO(!folio_test_private_2(folio), folio);
1549  	clear_bit_unlock(PG_private_2, folio_flags(folio, 0));
1550  	folio_wake_bit(folio, PG_private_2);
1551  	folio_put(folio);
1552  }
1553  EXPORT_SYMBOL(folio_end_private_2);
1554  
1555  /**
1556   * folio_wait_private_2 - Wait for PG_private_2 to be cleared on a folio.
1557   * @folio: The folio to wait on.
1558   *
1559   * Wait for PG_private_2 (aka PG_fscache) to be cleared on a folio.
1560   */
1561  void folio_wait_private_2(struct folio *folio)
1562  {
1563  	while (folio_test_private_2(folio))
1564  		folio_wait_bit(folio, PG_private_2);
1565  }
1566  EXPORT_SYMBOL(folio_wait_private_2);
1567  
1568  /**
1569   * folio_wait_private_2_killable - Wait for PG_private_2 to be cleared on a folio.
1570   * @folio: The folio to wait on.
1571   *
1572   * Wait for PG_private_2 (aka PG_fscache) to be cleared on a folio or until a
1573   * fatal signal is received by the calling task.
1574   *
1575   * Return:
1576   * - 0 if successful.
1577   * - -EINTR if a fatal signal was encountered.
1578   */
1579  int folio_wait_private_2_killable(struct folio *folio)
1580  {
1581  	int ret = 0;
1582  
1583  	while (folio_test_private_2(folio)) {
1584  		ret = folio_wait_bit_killable(folio, PG_private_2);
1585  		if (ret < 0)
1586  			break;
1587  	}
1588  
1589  	return ret;
1590  }
1591  EXPORT_SYMBOL(folio_wait_private_2_killable);
1592  
1593  /**
1594   * folio_end_writeback - End writeback against a folio.
1595   * @folio: The folio.
1596   */
1597  void folio_end_writeback(struct folio *folio)
1598  {
1599  	/*
1600  	 * folio_test_clear_reclaim() could be used here but it is an
1601  	 * atomic operation and overkill in this particular case. Failing
1602  	 * to shuffle a folio marked for immediate reclaim is too mild
1603  	 * a gain to justify taking an atomic operation penalty at the
1604  	 * end of every folio writeback.
1605  	 */
1606  	if (folio_test_reclaim(folio)) {
1607  		folio_clear_reclaim(folio);
1608  		folio_rotate_reclaimable(folio);
1609  	}
1610  
1611  	/*
1612  	 * Writeback does not hold a folio reference of its own, relying
1613  	 * on truncation to wait for the clearing of PG_writeback.
1614  	 * But here we must make sure that the folio is not freed and
1615  	 * reused before the folio_wake().
1616  	 */
1617  	folio_get(folio);
1618  	if (!__folio_end_writeback(folio))
1619  		BUG();
1620  
1621  	smp_mb__after_atomic();
1622  	folio_wake(folio, PG_writeback);
1623  	acct_reclaim_writeback(folio);
1624  	folio_put(folio);
1625  }
1626  EXPORT_SYMBOL(folio_end_writeback);
1627  
1628  /*
1629   * After completing I/O on a page, call this routine to update the page
1630   * flags appropriately
1631   */
1632  void page_endio(struct page *page, bool is_write, int err)
1633  {
1634  	struct folio *folio = page_folio(page);
1635  
1636  	if (!is_write) {
1637  		if (!err) {
1638  			folio_mark_uptodate(folio);
1639  		} else {
1640  			folio_clear_uptodate(folio);
1641  			folio_set_error(folio);
1642  		}
1643  		folio_unlock(folio);
1644  	} else {
1645  		if (err) {
1646  			struct address_space *mapping;
1647  
1648  			folio_set_error(folio);
1649  			mapping = folio_mapping(folio);
1650  			if (mapping)
1651  				mapping_set_error(mapping, err);
1652  		}
1653  		folio_end_writeback(folio);
1654  	}
1655  }
1656  EXPORT_SYMBOL_GPL(page_endio);
1657  
1658  /**
1659   * __folio_lock - Get a lock on the folio, assuming we need to sleep to get it.
1660   * @folio: The folio to lock
1661   */
1662  void __folio_lock(struct folio *folio)
1663  {
1664  	folio_wait_bit_common(folio, PG_locked, TASK_UNINTERRUPTIBLE,
1665  				EXCLUSIVE);
1666  }
1667  EXPORT_SYMBOL(__folio_lock);
1668  
1669  int __folio_lock_killable(struct folio *folio)
1670  {
1671  	return folio_wait_bit_common(folio, PG_locked, TASK_KILLABLE,
1672  					EXCLUSIVE);
1673  }
1674  EXPORT_SYMBOL_GPL(__folio_lock_killable);
1675  
1676  static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait)
1677  {
1678  	struct wait_queue_head *q = folio_waitqueue(folio);
1679  	int ret = 0;
1680  
1681  	wait->folio = folio;
1682  	wait->bit_nr = PG_locked;
1683  
1684  	spin_lock_irq(&q->lock);
1685  	__add_wait_queue_entry_tail(q, &wait->wait);
1686  	folio_set_waiters(folio);
1687  	ret = !folio_trylock(folio);
1688  	/*
1689  	 * If we were successful now, we know we're still on the
1690  	 * waitqueue as we're still under the lock. This means it's
1691  	 * safe to remove and return success, we know the callback
1692  	 * isn't going to trigger.
1693  	 */
1694  	if (!ret)
1695  		__remove_wait_queue(q, &wait->wait);
1696  	else
1697  		ret = -EIOCBQUEUED;
1698  	spin_unlock_irq(&q->lock);
1699  	return ret;
1700  }
1701  
1702  /*
1703   * Return values:
1704   * true - folio is locked; mmap_lock is still held.
1705   * false - folio is not locked.
1706   *     mmap_lock has been released (mmap_read_unlock(), unless flags had both
1707   *     FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in
1708   *     which case mmap_lock is still held.
1709   *
1710   * If neither ALLOW_RETRY nor KILLABLE are set, will always return true
1711   * with the folio locked and the mmap_lock unperturbed.
1712   */
1713  bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm,
1714  			 unsigned int flags)
1715  {
1716  	if (fault_flag_allow_retry_first(flags)) {
1717  		/*
1718  		 * CAUTION! In this case, mmap_lock is not released
1719  		 * even though return 0.
1720  		 */
1721  		if (flags & FAULT_FLAG_RETRY_NOWAIT)
1722  			return false;
1723  
1724  		mmap_read_unlock(mm);
1725  		if (flags & FAULT_FLAG_KILLABLE)
1726  			folio_wait_locked_killable(folio);
1727  		else
1728  			folio_wait_locked(folio);
1729  		return false;
1730  	}
1731  	if (flags & FAULT_FLAG_KILLABLE) {
1732  		bool ret;
1733  
1734  		ret = __folio_lock_killable(folio);
1735  		if (ret) {
1736  			mmap_read_unlock(mm);
1737  			return false;
1738  		}
1739  	} else {
1740  		__folio_lock(folio);
1741  	}
1742  
1743  	return true;
1744  }
1745  
1746  /**
1747   * page_cache_next_miss() - Find the next gap in the page cache.
1748   * @mapping: Mapping.
1749   * @index: Index.
1750   * @max_scan: Maximum range to search.
1751   *
1752   * Search the range [index, min(index + max_scan - 1, ULONG_MAX)] for the
1753   * gap with the lowest index.
1754   *
1755   * This function may be called under the rcu_read_lock.  However, this will
1756   * not atomically search a snapshot of the cache at a single point in time.
1757   * For example, if a gap is created at index 5, then subsequently a gap is
1758   * created at index 10, page_cache_next_miss covering both indices may
1759   * return 10 if called under the rcu_read_lock.
1760   *
1761   * Return: The index of the gap if found, otherwise an index outside the
1762   * range specified (in which case 'return - index >= max_scan' will be true).
1763   * In the rare case of index wrap-around, 0 will be returned.
1764   */
1765  pgoff_t page_cache_next_miss(struct address_space *mapping,
1766  			     pgoff_t index, unsigned long max_scan)
1767  {
1768  	XA_STATE(xas, &mapping->i_pages, index);
1769  
1770  	while (max_scan--) {
1771  		void *entry = xas_next(&xas);
1772  		if (!entry || xa_is_value(entry))
1773  			break;
1774  		if (xas.xa_index == 0)
1775  			break;
1776  	}
1777  
1778  	return xas.xa_index;
1779  }
1780  EXPORT_SYMBOL(page_cache_next_miss);
1781  
1782  /**
1783   * page_cache_prev_miss() - Find the previous gap in the page cache.
1784   * @mapping: Mapping.
1785   * @index: Index.
1786   * @max_scan: Maximum range to search.
1787   *
1788   * Search the range [max(index - max_scan + 1, 0), index] for the
1789   * gap with the highest index.
1790   *
1791   * This function may be called under the rcu_read_lock.  However, this will
1792   * not atomically search a snapshot of the cache at a single point in time.
1793   * For example, if a gap is created at index 10, then subsequently a gap is
1794   * created at index 5, page_cache_prev_miss() covering both indices may
1795   * return 5 if called under the rcu_read_lock.
1796   *
1797   * Return: The index of the gap if found, otherwise an index outside the
1798   * range specified (in which case 'index - return >= max_scan' will be true).
1799   * In the rare case of wrap-around, ULONG_MAX will be returned.
1800   */
1801  pgoff_t page_cache_prev_miss(struct address_space *mapping,
1802  			     pgoff_t index, unsigned long max_scan)
1803  {
1804  	XA_STATE(xas, &mapping->i_pages, index);
1805  
1806  	while (max_scan--) {
1807  		void *entry = xas_prev(&xas);
1808  		if (!entry || xa_is_value(entry))
1809  			break;
1810  		if (xas.xa_index == ULONG_MAX)
1811  			break;
1812  	}
1813  
1814  	return xas.xa_index;
1815  }
1816  EXPORT_SYMBOL(page_cache_prev_miss);
1817  
1818  /*
1819   * Lockless page cache protocol:
1820   * On the lookup side:
1821   * 1. Load the folio from i_pages
1822   * 2. Increment the refcount if it's not zero
1823   * 3. If the folio is not found by xas_reload(), put the refcount and retry
1824   *
1825   * On the removal side:
1826   * A. Freeze the page (by zeroing the refcount if nobody else has a reference)
1827   * B. Remove the page from i_pages
1828   * C. Return the page to the page allocator
1829   *
1830   * This means that any page may have its reference count temporarily
1831   * increased by a speculative page cache (or fast GUP) lookup as it can
1832   * be allocated by another user before the RCU grace period expires.
1833   * Because the refcount temporarily acquired here may end up being the
1834   * last refcount on the page, any page allocation must be freeable by
1835   * folio_put().
1836   */
1837  
1838  /*
1839   * filemap_get_entry - Get a page cache entry.
1840   * @mapping: the address_space to search
1841   * @index: The page cache index.
1842   *
1843   * Looks up the page cache entry at @mapping & @index.  If it is a folio,
1844   * it is returned with an increased refcount.  If it is a shadow entry
1845   * of a previously evicted folio, or a swap entry from shmem/tmpfs,
1846   * it is returned without further action.
1847   *
1848   * Return: The folio, swap or shadow entry, %NULL if nothing is found.
1849   */
1850  void *filemap_get_entry(struct address_space *mapping, pgoff_t index)
1851  {
1852  	XA_STATE(xas, &mapping->i_pages, index);
1853  	struct folio *folio;
1854  
1855  	rcu_read_lock();
1856  repeat:
1857  	xas_reset(&xas);
1858  	folio = xas_load(&xas);
1859  	if (xas_retry(&xas, folio))
1860  		goto repeat;
1861  	/*
1862  	 * A shadow entry of a recently evicted page, or a swap entry from
1863  	 * shmem/tmpfs.  Return it without attempting to raise page count.
1864  	 */
1865  	if (!folio || xa_is_value(folio))
1866  		goto out;
1867  
1868  	if (!folio_try_get_rcu(folio))
1869  		goto repeat;
1870  
1871  	if (unlikely(folio != xas_reload(&xas))) {
1872  		folio_put(folio);
1873  		goto repeat;
1874  	}
1875  out:
1876  	rcu_read_unlock();
1877  
1878  	return folio;
1879  }
1880  
1881  /**
1882   * __filemap_get_folio - Find and get a reference to a folio.
1883   * @mapping: The address_space to search.
1884   * @index: The page index.
1885   * @fgp_flags: %FGP flags modify how the folio is returned.
1886   * @gfp: Memory allocation flags to use if %FGP_CREAT is specified.
1887   *
1888   * Looks up the page cache entry at @mapping & @index.
1889   *
1890   * @fgp_flags can be zero or more of these flags:
1891   *
1892   * * %FGP_ACCESSED - The folio will be marked accessed.
1893   * * %FGP_LOCK - The folio is returned locked.
1894   * * %FGP_CREAT - If no page is present then a new page is allocated using
1895   *   @gfp and added to the page cache and the VM's LRU list.
1896   *   The page is returned locked and with an increased refcount.
1897   * * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the
1898   *   page is already in cache.  If the page was allocated, unlock it before
1899   *   returning so the caller can do the same dance.
1900   * * %FGP_WRITE - The page will be written to by the caller.
1901   * * %FGP_NOFS - __GFP_FS will get cleared in gfp.
1902   * * %FGP_NOWAIT - Don't get blocked by page lock.
1903   * * %FGP_STABLE - Wait for the folio to be stable (finished writeback)
1904   *
1905   * If %FGP_LOCK or %FGP_CREAT are specified then the function may sleep even
1906   * if the %GFP flags specified for %FGP_CREAT are atomic.
1907   *
1908   * If there is a page cache page, it is returned with an increased refcount.
1909   *
1910   * Return: The found folio or an ERR_PTR() otherwise.
1911   */
1912  struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
1913  		int fgp_flags, gfp_t gfp)
1914  {
1915  	struct folio *folio;
1916  
1917  repeat:
1918  	folio = filemap_get_entry(mapping, index);
1919  	if (xa_is_value(folio))
1920  		folio = NULL;
1921  	if (!folio)
1922  		goto no_page;
1923  
1924  	if (fgp_flags & FGP_LOCK) {
1925  		if (fgp_flags & FGP_NOWAIT) {
1926  			if (!folio_trylock(folio)) {
1927  				folio_put(folio);
1928  				return ERR_PTR(-EAGAIN);
1929  			}
1930  		} else {
1931  			folio_lock(folio);
1932  		}
1933  
1934  		/* Has the page been truncated? */
1935  		if (unlikely(folio->mapping != mapping)) {
1936  			folio_unlock(folio);
1937  			folio_put(folio);
1938  			goto repeat;
1939  		}
1940  		VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);
1941  	}
1942  
1943  	if (fgp_flags & FGP_ACCESSED)
1944  		folio_mark_accessed(folio);
1945  	else if (fgp_flags & FGP_WRITE) {
1946  		/* Clear idle flag for buffer write */
1947  		if (folio_test_idle(folio))
1948  			folio_clear_idle(folio);
1949  	}
1950  
1951  	if (fgp_flags & FGP_STABLE)
1952  		folio_wait_stable(folio);
1953  no_page:
1954  	if (!folio && (fgp_flags & FGP_CREAT)) {
1955  		int err;
1956  		if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping))
1957  			gfp |= __GFP_WRITE;
1958  		if (fgp_flags & FGP_NOFS)
1959  			gfp &= ~__GFP_FS;
1960  		if (fgp_flags & FGP_NOWAIT) {
1961  			gfp &= ~GFP_KERNEL;
1962  			gfp |= GFP_NOWAIT | __GFP_NOWARN;
1963  		}
1964  
1965  		folio = filemap_alloc_folio(gfp, 0);
1966  		if (!folio)
1967  			return ERR_PTR(-ENOMEM);
1968  
1969  		if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
1970  			fgp_flags |= FGP_LOCK;
1971  
1972  		/* Init accessed so avoid atomic mark_page_accessed later */
1973  		if (fgp_flags & FGP_ACCESSED)
1974  			__folio_set_referenced(folio);
1975  
1976  		err = filemap_add_folio(mapping, folio, index, gfp);
1977  		if (unlikely(err)) {
1978  			folio_put(folio);
1979  			folio = NULL;
1980  			if (err == -EEXIST)
1981  				goto repeat;
1982  		}
1983  
1984  		/*
1985  		 * filemap_add_folio locks the page, and for mmap
1986  		 * we expect an unlocked page.
1987  		 */
1988  		if (folio && (fgp_flags & FGP_FOR_MMAP))
1989  			folio_unlock(folio);
1990  	}
1991  
1992  	if (!folio)
1993  		return ERR_PTR(-ENOENT);
1994  	return folio;
1995  }
1996  EXPORT_SYMBOL(__filemap_get_folio);
1997  
1998  static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max,
1999  		xa_mark_t mark)
2000  {
2001  	struct folio *folio;
2002  
2003  retry:
2004  	if (mark == XA_PRESENT)
2005  		folio = xas_find(xas, max);
2006  	else
2007  		folio = xas_find_marked(xas, max, mark);
2008  
2009  	if (xas_retry(xas, folio))
2010  		goto retry;
2011  	/*
2012  	 * A shadow entry of a recently evicted page, a swap
2013  	 * entry from shmem/tmpfs or a DAX entry.  Return it
2014  	 * without attempting to raise page count.
2015  	 */
2016  	if (!folio || xa_is_value(folio))
2017  		return folio;
2018  
2019  	if (!folio_try_get_rcu(folio))
2020  		goto reset;
2021  
2022  	if (unlikely(folio != xas_reload(xas))) {
2023  		folio_put(folio);
2024  		goto reset;
2025  	}
2026  
2027  	return folio;
2028  reset:
2029  	xas_reset(xas);
2030  	goto retry;
2031  }
2032  
2033  /**
2034   * find_get_entries - gang pagecache lookup
2035   * @mapping:	The address_space to search
2036   * @start:	The starting page cache index
2037   * @end:	The final page index (inclusive).
2038   * @fbatch:	Where the resulting entries are placed.
2039   * @indices:	The cache indices corresponding to the entries in @entries
2040   *
2041   * find_get_entries() will search for and return a batch of entries in
2042   * the mapping.  The entries are placed in @fbatch.  find_get_entries()
2043   * takes a reference on any actual folios it returns.
2044   *
2045   * The entries have ascending indexes.  The indices may not be consecutive
2046   * due to not-present entries or large folios.
2047   *
2048   * Any shadow entries of evicted folios, or swap entries from
2049   * shmem/tmpfs, are included in the returned array.
2050   *
2051   * Return: The number of entries which were found.
2052   */
2053  unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
2054  		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices)
2055  {
2056  	XA_STATE(xas, &mapping->i_pages, *start);
2057  	struct folio *folio;
2058  
2059  	rcu_read_lock();
2060  	while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) {
2061  		indices[fbatch->nr] = xas.xa_index;
2062  		if (!folio_batch_add(fbatch, folio))
2063  			break;
2064  	}
2065  	rcu_read_unlock();
2066  
2067  	if (folio_batch_count(fbatch)) {
2068  		unsigned long nr = 1;
2069  		int idx = folio_batch_count(fbatch) - 1;
2070  
2071  		folio = fbatch->folios[idx];
2072  		if (!xa_is_value(folio) && !folio_test_hugetlb(folio))
2073  			nr = folio_nr_pages(folio);
2074  		*start = indices[idx] + nr;
2075  	}
2076  	return folio_batch_count(fbatch);
2077  }
2078  
2079  /**
2080   * find_lock_entries - Find a batch of pagecache entries.
2081   * @mapping:	The address_space to search.
2082   * @start:	The starting page cache index.
2083   * @end:	The final page index (inclusive).
2084   * @fbatch:	Where the resulting entries are placed.
2085   * @indices:	The cache indices of the entries in @fbatch.
2086   *
2087   * find_lock_entries() will return a batch of entries from @mapping.
2088   * Swap, shadow and DAX entries are included.  Folios are returned
2089   * locked and with an incremented refcount.  Folios which are locked
2090   * by somebody else or under writeback are skipped.  Folios which are
2091   * partially outside the range are not returned.
2092   *
2093   * The entries have ascending indexes.  The indices may not be consecutive
2094   * due to not-present entries, large folios, folios which could not be
2095   * locked or folios under writeback.
2096   *
2097   * Return: The number of entries which were found.
2098   */
2099  unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
2100  		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices)
2101  {
2102  	XA_STATE(xas, &mapping->i_pages, *start);
2103  	struct folio *folio;
2104  
2105  	rcu_read_lock();
2106  	while ((folio = find_get_entry(&xas, end, XA_PRESENT))) {
2107  		if (!xa_is_value(folio)) {
2108  			if (folio->index < *start)
2109  				goto put;
2110  			if (folio->index + folio_nr_pages(folio) - 1 > end)
2111  				goto put;
2112  			if (!folio_trylock(folio))
2113  				goto put;
2114  			if (folio->mapping != mapping ||
2115  			    folio_test_writeback(folio))
2116  				goto unlock;
2117  			VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index),
2118  					folio);
2119  		}
2120  		indices[fbatch->nr] = xas.xa_index;
2121  		if (!folio_batch_add(fbatch, folio))
2122  			break;
2123  		continue;
2124  unlock:
2125  		folio_unlock(folio);
2126  put:
2127  		folio_put(folio);
2128  	}
2129  	rcu_read_unlock();
2130  
2131  	if (folio_batch_count(fbatch)) {
2132  		unsigned long nr = 1;
2133  		int idx = folio_batch_count(fbatch) - 1;
2134  
2135  		folio = fbatch->folios[idx];
2136  		if (!xa_is_value(folio) && !folio_test_hugetlb(folio))
2137  			nr = folio_nr_pages(folio);
2138  		*start = indices[idx] + nr;
2139  	}
2140  	return folio_batch_count(fbatch);
2141  }
2142  
2143  /**
2144   * filemap_get_folios - Get a batch of folios
2145   * @mapping:	The address_space to search
2146   * @start:	The starting page index
2147   * @end:	The final page index (inclusive)
2148   * @fbatch:	The batch to fill.
2149   *
2150   * Search for and return a batch of folios in the mapping starting at
2151   * index @start and up to index @end (inclusive).  The folios are returned
2152   * in @fbatch with an elevated reference count.
2153   *
2154   * The first folio may start before @start; if it does, it will contain
2155   * @start.  The final folio may extend beyond @end; if it does, it will
2156   * contain @end.  The folios have ascending indices.  There may be gaps
2157   * between the folios if there are indices which have no folio in the
2158   * page cache.  If folios are added to or removed from the page cache
2159   * while this is running, they may or may not be found by this call.
2160   *
2161   * Return: The number of folios which were found.
2162   * We also update @start to index the next folio for the traversal.
2163   */
2164  unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
2165  		pgoff_t end, struct folio_batch *fbatch)
2166  {
2167  	XA_STATE(xas, &mapping->i_pages, *start);
2168  	struct folio *folio;
2169  
2170  	rcu_read_lock();
2171  	while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) {
2172  		/* Skip over shadow, swap and DAX entries */
2173  		if (xa_is_value(folio))
2174  			continue;
2175  		if (!folio_batch_add(fbatch, folio)) {
2176  			unsigned long nr = folio_nr_pages(folio);
2177  
2178  			if (folio_test_hugetlb(folio))
2179  				nr = 1;
2180  			*start = folio->index + nr;
2181  			goto out;
2182  		}
2183  	}
2184  
2185  	/*
2186  	 * We come here when there is no page beyond @end. We take care to not
2187  	 * overflow the index @start as it confuses some of the callers. This
2188  	 * breaks the iteration when there is a page at index -1 but that is
2189  	 * already broken anyway.
2190  	 */
2191  	if (end == (pgoff_t)-1)
2192  		*start = (pgoff_t)-1;
2193  	else
2194  		*start = end + 1;
2195  out:
2196  	rcu_read_unlock();
2197  
2198  	return folio_batch_count(fbatch);
2199  }
2200  EXPORT_SYMBOL(filemap_get_folios);
2201  
2202  static inline
2203  bool folio_more_pages(struct folio *folio, pgoff_t index, pgoff_t max)
2204  {
2205  	if (!folio_test_large(folio) || folio_test_hugetlb(folio))
2206  		return false;
2207  	if (index >= max)
2208  		return false;
2209  	return index < folio->index + folio_nr_pages(folio) - 1;
2210  }
2211  
2212  /**
2213   * filemap_get_folios_contig - Get a batch of contiguous folios
2214   * @mapping:	The address_space to search
2215   * @start:	The starting page index
2216   * @end:	The final page index (inclusive)
2217   * @fbatch:	The batch to fill
2218   *
2219   * filemap_get_folios_contig() works exactly like filemap_get_folios(),
2220   * except the returned folios are guaranteed to be contiguous. This may
2221   * not return all contiguous folios if the batch gets filled up.
2222   *
2223   * Return: The number of folios found.
2224   * Also update @start to be positioned for traversal of the next folio.
2225   */
2226  
2227  unsigned filemap_get_folios_contig(struct address_space *mapping,
2228  		pgoff_t *start, pgoff_t end, struct folio_batch *fbatch)
2229  {
2230  	XA_STATE(xas, &mapping->i_pages, *start);
2231  	unsigned long nr;
2232  	struct folio *folio;
2233  
2234  	rcu_read_lock();
2235  
2236  	for (folio = xas_load(&xas); folio && xas.xa_index <= end;
2237  			folio = xas_next(&xas)) {
2238  		if (xas_retry(&xas, folio))
2239  			continue;
2240  		/*
2241  		 * If the entry has been swapped out, we can stop looking.
2242  		 * No current caller is looking for DAX entries.
2243  		 */
2244  		if (xa_is_value(folio))
2245  			goto update_start;
2246  
2247  		if (!folio_try_get_rcu(folio))
2248  			goto retry;
2249  
2250  		if (unlikely(folio != xas_reload(&xas)))
2251  			goto put_folio;
2252  
2253  		if (!folio_batch_add(fbatch, folio)) {
2254  			nr = folio_nr_pages(folio);
2255  
2256  			if (folio_test_hugetlb(folio))
2257  				nr = 1;
2258  			*start = folio->index + nr;
2259  			goto out;
2260  		}
2261  		continue;
2262  put_folio:
2263  		folio_put(folio);
2264  
2265  retry:
2266  		xas_reset(&xas);
2267  	}
2268  
2269  update_start:
2270  	nr = folio_batch_count(fbatch);
2271  
2272  	if (nr) {
2273  		folio = fbatch->folios[nr - 1];
2274  		if (folio_test_hugetlb(folio))
2275  			*start = folio->index + 1;
2276  		else
2277  			*start = folio->index + folio_nr_pages(folio);
2278  	}
2279  out:
2280  	rcu_read_unlock();
2281  	return folio_batch_count(fbatch);
2282  }
2283  EXPORT_SYMBOL(filemap_get_folios_contig);
2284  
2285  /**
2286   * filemap_get_folios_tag - Get a batch of folios matching @tag
2287   * @mapping:    The address_space to search
2288   * @start:      The starting page index
2289   * @end:        The final page index (inclusive)
2290   * @tag:        The tag index
2291   * @fbatch:     The batch to fill
2292   *
2293   * Same as filemap_get_folios(), but only returning folios tagged with @tag.
2294   *
2295   * Return: The number of folios found.
2296   * Also update @start to index the next folio for traversal.
2297   */
2298  unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start,
2299  			pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch)
2300  {
2301  	XA_STATE(xas, &mapping->i_pages, *start);
2302  	struct folio *folio;
2303  
2304  	rcu_read_lock();
2305  	while ((folio = find_get_entry(&xas, end, tag)) != NULL) {
2306  		/*
2307  		 * Shadow entries should never be tagged, but this iteration
2308  		 * is lockless so there is a window for page reclaim to evict
2309  		 * a page we saw tagged. Skip over it.
2310  		 */
2311  		if (xa_is_value(folio))
2312  			continue;
2313  		if (!folio_batch_add(fbatch, folio)) {
2314  			unsigned long nr = folio_nr_pages(folio);
2315  
2316  			if (folio_test_hugetlb(folio))
2317  				nr = 1;
2318  			*start = folio->index + nr;
2319  			goto out;
2320  		}
2321  	}
2322  	/*
2323  	 * We come here when there is no page beyond @end. We take care to not
2324  	 * overflow the index @start as it confuses some of the callers. This
2325  	 * breaks the iteration when there is a page at index -1 but that is
2326  	 * already broke anyway.
2327  	 */
2328  	if (end == (pgoff_t)-1)
2329  		*start = (pgoff_t)-1;
2330  	else
2331  		*start = end + 1;
2332  out:
2333  	rcu_read_unlock();
2334  
2335  	return folio_batch_count(fbatch);
2336  }
2337  EXPORT_SYMBOL(filemap_get_folios_tag);
2338  
2339  /*
2340   * CD/DVDs are error prone. When a medium error occurs, the driver may fail
2341   * a _large_ part of the i/o request. Imagine the worst scenario:
2342   *
2343   *      ---R__________________________________________B__________
2344   *         ^ reading here                             ^ bad block(assume 4k)
2345   *
2346   * read(R) => miss => readahead(R...B) => media error => frustrating retries
2347   * => failing the whole request => read(R) => read(R+1) =>
2348   * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
2349   * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
2350   * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
2351   *
2352   * It is going insane. Fix it by quickly scaling down the readahead size.
2353   */
2354  static void shrink_readahead_size_eio(struct file_ra_state *ra)
2355  {
2356  	ra->ra_pages /= 4;
2357  }
2358  
2359  /*
2360   * filemap_get_read_batch - Get a batch of folios for read
2361   *
2362   * Get a batch of folios which represent a contiguous range of bytes in
2363   * the file.  No exceptional entries will be returned.  If @index is in
2364   * the middle of a folio, the entire folio will be returned.  The last
2365   * folio in the batch may have the readahead flag set or the uptodate flag
2366   * clear so that the caller can take the appropriate action.
2367   */
2368  static void filemap_get_read_batch(struct address_space *mapping,
2369  		pgoff_t index, pgoff_t max, struct folio_batch *fbatch)
2370  {
2371  	XA_STATE(xas, &mapping->i_pages, index);
2372  	struct folio *folio;
2373  
2374  	rcu_read_lock();
2375  	for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) {
2376  		if (xas_retry(&xas, folio))
2377  			continue;
2378  		if (xas.xa_index > max || xa_is_value(folio))
2379  			break;
2380  		if (xa_is_sibling(folio))
2381  			break;
2382  		if (!folio_try_get_rcu(folio))
2383  			goto retry;
2384  
2385  		if (unlikely(folio != xas_reload(&xas)))
2386  			goto put_folio;
2387  
2388  		if (!folio_batch_add(fbatch, folio))
2389  			break;
2390  		if (!folio_test_uptodate(folio))
2391  			break;
2392  		if (folio_test_readahead(folio))
2393  			break;
2394  		xas_advance(&xas, folio->index + folio_nr_pages(folio) - 1);
2395  		continue;
2396  put_folio:
2397  		folio_put(folio);
2398  retry:
2399  		xas_reset(&xas);
2400  	}
2401  	rcu_read_unlock();
2402  }
2403  
2404  static int filemap_read_folio(struct file *file, filler_t filler,
2405  		struct folio *folio)
2406  {
2407  	bool workingset = folio_test_workingset(folio);
2408  	unsigned long pflags;
2409  	int error;
2410  
2411  	/*
2412  	 * A previous I/O error may have been due to temporary failures,
2413  	 * eg. multipath errors.  PG_error will be set again if read_folio
2414  	 * fails.
2415  	 */
2416  	folio_clear_error(folio);
2417  
2418  	/* Start the actual read. The read will unlock the page. */
2419  	if (unlikely(workingset))
2420  		psi_memstall_enter(&pflags);
2421  	error = filler(file, folio);
2422  	if (unlikely(workingset))
2423  		psi_memstall_leave(&pflags);
2424  	if (error)
2425  		return error;
2426  
2427  	error = folio_wait_locked_killable(folio);
2428  	if (error)
2429  		return error;
2430  	if (folio_test_uptodate(folio))
2431  		return 0;
2432  	if (file)
2433  		shrink_readahead_size_eio(&file->f_ra);
2434  	return -EIO;
2435  }
2436  
2437  static bool filemap_range_uptodate(struct address_space *mapping,
2438  		loff_t pos, size_t count, struct folio *folio,
2439  		bool need_uptodate)
2440  {
2441  	if (folio_test_uptodate(folio))
2442  		return true;
2443  	/* pipes can't handle partially uptodate pages */
2444  	if (need_uptodate)
2445  		return false;
2446  	if (!mapping->a_ops->is_partially_uptodate)
2447  		return false;
2448  	if (mapping->host->i_blkbits >= folio_shift(folio))
2449  		return false;
2450  
2451  	if (folio_pos(folio) > pos) {
2452  		count -= folio_pos(folio) - pos;
2453  		pos = 0;
2454  	} else {
2455  		pos -= folio_pos(folio);
2456  	}
2457  
2458  	return mapping->a_ops->is_partially_uptodate(folio, pos, count);
2459  }
2460  
2461  static int filemap_update_page(struct kiocb *iocb,
2462  		struct address_space *mapping, size_t count,
2463  		struct folio *folio, bool need_uptodate)
2464  {
2465  	int error;
2466  
2467  	if (iocb->ki_flags & IOCB_NOWAIT) {
2468  		if (!filemap_invalidate_trylock_shared(mapping))
2469  			return -EAGAIN;
2470  	} else {
2471  		filemap_invalidate_lock_shared(mapping);
2472  	}
2473  
2474  	if (!folio_trylock(folio)) {
2475  		error = -EAGAIN;
2476  		if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO))
2477  			goto unlock_mapping;
2478  		if (!(iocb->ki_flags & IOCB_WAITQ)) {
2479  			filemap_invalidate_unlock_shared(mapping);
2480  			/*
2481  			 * This is where we usually end up waiting for a
2482  			 * previously submitted readahead to finish.
2483  			 */
2484  			folio_put_wait_locked(folio, TASK_KILLABLE);
2485  			return AOP_TRUNCATED_PAGE;
2486  		}
2487  		error = __folio_lock_async(folio, iocb->ki_waitq);
2488  		if (error)
2489  			goto unlock_mapping;
2490  	}
2491  
2492  	error = AOP_TRUNCATED_PAGE;
2493  	if (!folio->mapping)
2494  		goto unlock;
2495  
2496  	error = 0;
2497  	if (filemap_range_uptodate(mapping, iocb->ki_pos, count, folio,
2498  				   need_uptodate))
2499  		goto unlock;
2500  
2501  	error = -EAGAIN;
2502  	if (iocb->ki_flags & (IOCB_NOIO | IOCB_NOWAIT | IOCB_WAITQ))
2503  		goto unlock;
2504  
2505  	error = filemap_read_folio(iocb->ki_filp, mapping->a_ops->read_folio,
2506  			folio);
2507  	goto unlock_mapping;
2508  unlock:
2509  	folio_unlock(folio);
2510  unlock_mapping:
2511  	filemap_invalidate_unlock_shared(mapping);
2512  	if (error == AOP_TRUNCATED_PAGE)
2513  		folio_put(folio);
2514  	return error;
2515  }
2516  
2517  static int filemap_create_folio(struct file *file,
2518  		struct address_space *mapping, pgoff_t index,
2519  		struct folio_batch *fbatch)
2520  {
2521  	struct folio *folio;
2522  	int error;
2523  
2524  	folio = filemap_alloc_folio(mapping_gfp_mask(mapping), 0);
2525  	if (!folio)
2526  		return -ENOMEM;
2527  
2528  	/*
2529  	 * Protect against truncate / hole punch. Grabbing invalidate_lock
2530  	 * here assures we cannot instantiate and bring uptodate new
2531  	 * pagecache folios after evicting page cache during truncate
2532  	 * and before actually freeing blocks.	Note that we could
2533  	 * release invalidate_lock after inserting the folio into
2534  	 * the page cache as the locked folio would then be enough to
2535  	 * synchronize with hole punching. But there are code paths
2536  	 * such as filemap_update_page() filling in partially uptodate
2537  	 * pages or ->readahead() that need to hold invalidate_lock
2538  	 * while mapping blocks for IO so let's hold the lock here as
2539  	 * well to keep locking rules simple.
2540  	 */
2541  	filemap_invalidate_lock_shared(mapping);
2542  	error = filemap_add_folio(mapping, folio, index,
2543  			mapping_gfp_constraint(mapping, GFP_KERNEL));
2544  	if (error == -EEXIST)
2545  		error = AOP_TRUNCATED_PAGE;
2546  	if (error)
2547  		goto error;
2548  
2549  	error = filemap_read_folio(file, mapping->a_ops->read_folio, folio);
2550  	if (error)
2551  		goto error;
2552  
2553  	filemap_invalidate_unlock_shared(mapping);
2554  	folio_batch_add(fbatch, folio);
2555  	return 0;
2556  error:
2557  	filemap_invalidate_unlock_shared(mapping);
2558  	folio_put(folio);
2559  	return error;
2560  }
2561  
2562  static int filemap_readahead(struct kiocb *iocb, struct file *file,
2563  		struct address_space *mapping, struct folio *folio,
2564  		pgoff_t last_index)
2565  {
2566  	DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, folio->index);
2567  
2568  	if (iocb->ki_flags & IOCB_NOIO)
2569  		return -EAGAIN;
2570  	page_cache_async_ra(&ractl, folio, last_index - folio->index);
2571  	return 0;
2572  }
2573  
2574  static int filemap_get_pages(struct kiocb *iocb, size_t count,
2575  		struct folio_batch *fbatch, bool need_uptodate)
2576  {
2577  	struct file *filp = iocb->ki_filp;
2578  	struct address_space *mapping = filp->f_mapping;
2579  	struct file_ra_state *ra = &filp->f_ra;
2580  	pgoff_t index = iocb->ki_pos >> PAGE_SHIFT;
2581  	pgoff_t last_index;
2582  	struct folio *folio;
2583  	int err = 0;
2584  
2585  	/* "last_index" is the index of the page beyond the end of the read */
2586  	last_index = DIV_ROUND_UP(iocb->ki_pos + count, PAGE_SIZE);
2587  retry:
2588  	if (fatal_signal_pending(current))
2589  		return -EINTR;
2590  
2591  	filemap_get_read_batch(mapping, index, last_index - 1, fbatch);
2592  	if (!folio_batch_count(fbatch)) {
2593  		if (iocb->ki_flags & IOCB_NOIO)
2594  			return -EAGAIN;
2595  		page_cache_sync_readahead(mapping, ra, filp, index,
2596  				last_index - index);
2597  		filemap_get_read_batch(mapping, index, last_index - 1, fbatch);
2598  	}
2599  	if (!folio_batch_count(fbatch)) {
2600  		if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ))
2601  			return -EAGAIN;
2602  		err = filemap_create_folio(filp, mapping,
2603  				iocb->ki_pos >> PAGE_SHIFT, fbatch);
2604  		if (err == AOP_TRUNCATED_PAGE)
2605  			goto retry;
2606  		return err;
2607  	}
2608  
2609  	folio = fbatch->folios[folio_batch_count(fbatch) - 1];
2610  	if (folio_test_readahead(folio)) {
2611  		err = filemap_readahead(iocb, filp, mapping, folio, last_index);
2612  		if (err)
2613  			goto err;
2614  	}
2615  	if (!folio_test_uptodate(folio)) {
2616  		if ((iocb->ki_flags & IOCB_WAITQ) &&
2617  		    folio_batch_count(fbatch) > 1)
2618  			iocb->ki_flags |= IOCB_NOWAIT;
2619  		err = filemap_update_page(iocb, mapping, count, folio,
2620  					  need_uptodate);
2621  		if (err)
2622  			goto err;
2623  	}
2624  
2625  	return 0;
2626  err:
2627  	if (err < 0)
2628  		folio_put(folio);
2629  	if (likely(--fbatch->nr))
2630  		return 0;
2631  	if (err == AOP_TRUNCATED_PAGE)
2632  		goto retry;
2633  	return err;
2634  }
2635  
2636  static inline bool pos_same_folio(loff_t pos1, loff_t pos2, struct folio *folio)
2637  {
2638  	unsigned int shift = folio_shift(folio);
2639  
2640  	return (pos1 >> shift == pos2 >> shift);
2641  }
2642  
2643  /**
2644   * filemap_read - Read data from the page cache.
2645   * @iocb: The iocb to read.
2646   * @iter: Destination for the data.
2647   * @already_read: Number of bytes already read by the caller.
2648   *
2649   * Copies data from the page cache.  If the data is not currently present,
2650   * uses the readahead and read_folio address_space operations to fetch it.
2651   *
2652   * Return: Total number of bytes copied, including those already read by
2653   * the caller.  If an error happens before any bytes are copied, returns
2654   * a negative error number.
2655   */
2656  ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
2657  		ssize_t already_read)
2658  {
2659  	struct file *filp = iocb->ki_filp;
2660  	struct file_ra_state *ra = &filp->f_ra;
2661  	struct address_space *mapping = filp->f_mapping;
2662  	struct inode *inode = mapping->host;
2663  	struct folio_batch fbatch;
2664  	int i, error = 0;
2665  	bool writably_mapped;
2666  	loff_t isize, end_offset;
2667  
2668  	if (unlikely(iocb->ki_pos >= inode->i_sb->s_maxbytes))
2669  		return 0;
2670  	if (unlikely(!iov_iter_count(iter)))
2671  		return 0;
2672  
2673  	iov_iter_truncate(iter, inode->i_sb->s_maxbytes);
2674  	folio_batch_init(&fbatch);
2675  
2676  	do {
2677  		cond_resched();
2678  
2679  		/*
2680  		 * If we've already successfully copied some data, then we
2681  		 * can no longer safely return -EIOCBQUEUED. Hence mark
2682  		 * an async read NOWAIT at that point.
2683  		 */
2684  		if ((iocb->ki_flags & IOCB_WAITQ) && already_read)
2685  			iocb->ki_flags |= IOCB_NOWAIT;
2686  
2687  		if (unlikely(iocb->ki_pos >= i_size_read(inode)))
2688  			break;
2689  
2690  		error = filemap_get_pages(iocb, iter->count, &fbatch,
2691  					  iov_iter_is_pipe(iter));
2692  		if (error < 0)
2693  			break;
2694  
2695  		/*
2696  		 * i_size must be checked after we know the pages are Uptodate.
2697  		 *
2698  		 * Checking i_size after the check allows us to calculate
2699  		 * the correct value for "nr", which means the zero-filled
2700  		 * part of the page is not copied back to userspace (unless
2701  		 * another truncate extends the file - this is desired though).
2702  		 */
2703  		isize = i_size_read(inode);
2704  		if (unlikely(iocb->ki_pos >= isize))
2705  			goto put_folios;
2706  		end_offset = min_t(loff_t, isize, iocb->ki_pos + iter->count);
2707  
2708  		/*
2709  		 * Once we start copying data, we don't want to be touching any
2710  		 * cachelines that might be contended:
2711  		 */
2712  		writably_mapped = mapping_writably_mapped(mapping);
2713  
2714  		/*
2715  		 * When a read accesses the same folio several times, only
2716  		 * mark it as accessed the first time.
2717  		 */
2718  		if (!pos_same_folio(iocb->ki_pos, ra->prev_pos - 1,
2719  							fbatch.folios[0]))
2720  			folio_mark_accessed(fbatch.folios[0]);
2721  
2722  		for (i = 0; i < folio_batch_count(&fbatch); i++) {
2723  			struct folio *folio = fbatch.folios[i];
2724  			size_t fsize = folio_size(folio);
2725  			size_t offset = iocb->ki_pos & (fsize - 1);
2726  			size_t bytes = min_t(loff_t, end_offset - iocb->ki_pos,
2727  					     fsize - offset);
2728  			size_t copied;
2729  
2730  			if (end_offset < folio_pos(folio))
2731  				break;
2732  			if (i > 0)
2733  				folio_mark_accessed(folio);
2734  			/*
2735  			 * If users can be writing to this folio using arbitrary
2736  			 * virtual addresses, take care of potential aliasing
2737  			 * before reading the folio on the kernel side.
2738  			 */
2739  			if (writably_mapped)
2740  				flush_dcache_folio(folio);
2741  
2742  			copied = copy_folio_to_iter(folio, offset, bytes, iter);
2743  
2744  			already_read += copied;
2745  			iocb->ki_pos += copied;
2746  			ra->prev_pos = iocb->ki_pos;
2747  
2748  			if (copied < bytes) {
2749  				error = -EFAULT;
2750  				break;
2751  			}
2752  		}
2753  put_folios:
2754  		for (i = 0; i < folio_batch_count(&fbatch); i++)
2755  			folio_put(fbatch.folios[i]);
2756  		folio_batch_init(&fbatch);
2757  	} while (iov_iter_count(iter) && iocb->ki_pos < isize && !error);
2758  
2759  	file_accessed(filp);
2760  
2761  	return already_read ? already_read : error;
2762  }
2763  EXPORT_SYMBOL_GPL(filemap_read);
2764  
2765  /**
2766   * generic_file_read_iter - generic filesystem read routine
2767   * @iocb:	kernel I/O control block
2768   * @iter:	destination for the data read
2769   *
2770   * This is the "read_iter()" routine for all filesystems
2771   * that can use the page cache directly.
2772   *
2773   * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall
2774   * be returned when no data can be read without waiting for I/O requests
2775   * to complete; it doesn't prevent readahead.
2776   *
2777   * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O
2778   * requests shall be made for the read or for readahead.  When no data
2779   * can be read, -EAGAIN shall be returned.  When readahead would be
2780   * triggered, a partial, possibly empty read shall be returned.
2781   *
2782   * Return:
2783   * * number of bytes copied, even for partial reads
2784   * * negative error code (or 0 if IOCB_NOIO) if nothing was read
2785   */
2786  ssize_t
2787  generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2788  {
2789  	size_t count = iov_iter_count(iter);
2790  	ssize_t retval = 0;
2791  
2792  	if (!count)
2793  		return 0; /* skip atime */
2794  
2795  	if (iocb->ki_flags & IOCB_DIRECT) {
2796  		struct file *file = iocb->ki_filp;
2797  		struct address_space *mapping = file->f_mapping;
2798  		struct inode *inode = mapping->host;
2799  
2800  		if (iocb->ki_flags & IOCB_NOWAIT) {
2801  			if (filemap_range_needs_writeback(mapping, iocb->ki_pos,
2802  						iocb->ki_pos + count - 1))
2803  				return -EAGAIN;
2804  		} else {
2805  			retval = filemap_write_and_wait_range(mapping,
2806  						iocb->ki_pos,
2807  					        iocb->ki_pos + count - 1);
2808  			if (retval < 0)
2809  				return retval;
2810  		}
2811  
2812  		file_accessed(file);
2813  
2814  		retval = mapping->a_ops->direct_IO(iocb, iter);
2815  		if (retval >= 0) {
2816  			iocb->ki_pos += retval;
2817  			count -= retval;
2818  		}
2819  		if (retval != -EIOCBQUEUED)
2820  			iov_iter_revert(iter, count - iov_iter_count(iter));
2821  
2822  		/*
2823  		 * Btrfs can have a short DIO read if we encounter
2824  		 * compressed extents, so if there was an error, or if
2825  		 * we've already read everything we wanted to, or if
2826  		 * there was a short read because we hit EOF, go ahead
2827  		 * and return.  Otherwise fallthrough to buffered io for
2828  		 * the rest of the read.  Buffered reads will not work for
2829  		 * DAX files, so don't bother trying.
2830  		 */
2831  		if (retval < 0 || !count || IS_DAX(inode))
2832  			return retval;
2833  		if (iocb->ki_pos >= i_size_read(inode))
2834  			return retval;
2835  	}
2836  
2837  	return filemap_read(iocb, iter, retval);
2838  }
2839  EXPORT_SYMBOL(generic_file_read_iter);
2840  
2841  /*
2842   * Splice subpages from a folio into a pipe.
2843   */
2844  size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
2845  			      struct folio *folio, loff_t fpos, size_t size)
2846  {
2847  	struct page *page;
2848  	size_t spliced = 0, offset = offset_in_folio(folio, fpos);
2849  
2850  	page = folio_page(folio, offset / PAGE_SIZE);
2851  	size = min(size, folio_size(folio) - offset);
2852  	offset %= PAGE_SIZE;
2853  
2854  	while (spliced < size &&
2855  	       !pipe_full(pipe->head, pipe->tail, pipe->max_usage)) {
2856  		struct pipe_buffer *buf = pipe_head_buf(pipe);
2857  		size_t part = min_t(size_t, PAGE_SIZE - offset, size - spliced);
2858  
2859  		*buf = (struct pipe_buffer) {
2860  			.ops	= &page_cache_pipe_buf_ops,
2861  			.page	= page,
2862  			.offset	= offset,
2863  			.len	= part,
2864  		};
2865  		folio_get(folio);
2866  		pipe->head++;
2867  		page++;
2868  		spliced += part;
2869  		offset = 0;
2870  	}
2871  
2872  	return spliced;
2873  }
2874  
2875  /*
2876   * Splice folios from the pagecache of a buffered (ie. non-O_DIRECT) file into
2877   * a pipe.
2878   */
2879  ssize_t filemap_splice_read(struct file *in, loff_t *ppos,
2880  			    struct pipe_inode_info *pipe,
2881  			    size_t len, unsigned int flags)
2882  {
2883  	struct folio_batch fbatch;
2884  	struct kiocb iocb;
2885  	size_t total_spliced = 0, used, npages;
2886  	loff_t isize, end_offset;
2887  	bool writably_mapped;
2888  	int i, error = 0;
2889  
2890  	init_sync_kiocb(&iocb, in);
2891  	iocb.ki_pos = *ppos;
2892  
2893  	/* Work out how much data we can actually add into the pipe */
2894  	used = pipe_occupancy(pipe->head, pipe->tail);
2895  	npages = max_t(ssize_t, pipe->max_usage - used, 0);
2896  	len = min_t(size_t, len, npages * PAGE_SIZE);
2897  
2898  	folio_batch_init(&fbatch);
2899  
2900  	do {
2901  		cond_resched();
2902  
2903  		if (*ppos >= i_size_read(file_inode(in)))
2904  			break;
2905  
2906  		iocb.ki_pos = *ppos;
2907  		error = filemap_get_pages(&iocb, len, &fbatch, true);
2908  		if (error < 0)
2909  			break;
2910  
2911  		/*
2912  		 * i_size must be checked after we know the pages are Uptodate.
2913  		 *
2914  		 * Checking i_size after the check allows us to calculate
2915  		 * the correct value for "nr", which means the zero-filled
2916  		 * part of the page is not copied back to userspace (unless
2917  		 * another truncate extends the file - this is desired though).
2918  		 */
2919  		isize = i_size_read(file_inode(in));
2920  		if (unlikely(*ppos >= isize))
2921  			break;
2922  		end_offset = min_t(loff_t, isize, *ppos + len);
2923  
2924  		/*
2925  		 * Once we start copying data, we don't want to be touching any
2926  		 * cachelines that might be contended:
2927  		 */
2928  		writably_mapped = mapping_writably_mapped(in->f_mapping);
2929  
2930  		for (i = 0; i < folio_batch_count(&fbatch); i++) {
2931  			struct folio *folio = fbatch.folios[i];
2932  			size_t n;
2933  
2934  			if (folio_pos(folio) >= end_offset)
2935  				goto out;
2936  			folio_mark_accessed(folio);
2937  
2938  			/*
2939  			 * If users can be writing to this folio using arbitrary
2940  			 * virtual addresses, take care of potential aliasing
2941  			 * before reading the folio on the kernel side.
2942  			 */
2943  			if (writably_mapped)
2944  				flush_dcache_folio(folio);
2945  
2946  			n = min_t(loff_t, len, isize - *ppos);
2947  			n = splice_folio_into_pipe(pipe, folio, *ppos, n);
2948  			if (!n)
2949  				goto out;
2950  			len -= n;
2951  			total_spliced += n;
2952  			*ppos += n;
2953  			in->f_ra.prev_pos = *ppos;
2954  			if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
2955  				goto out;
2956  		}
2957  
2958  		folio_batch_release(&fbatch);
2959  	} while (len);
2960  
2961  out:
2962  	folio_batch_release(&fbatch);
2963  	file_accessed(in);
2964  
2965  	return total_spliced ? total_spliced : error;
2966  }
2967  EXPORT_SYMBOL(filemap_splice_read);
2968  
2969  static inline loff_t folio_seek_hole_data(struct xa_state *xas,
2970  		struct address_space *mapping, struct folio *folio,
2971  		loff_t start, loff_t end, bool seek_data)
2972  {
2973  	const struct address_space_operations *ops = mapping->a_ops;
2974  	size_t offset, bsz = i_blocksize(mapping->host);
2975  
2976  	if (xa_is_value(folio) || folio_test_uptodate(folio))
2977  		return seek_data ? start : end;
2978  	if (!ops->is_partially_uptodate)
2979  		return seek_data ? end : start;
2980  
2981  	xas_pause(xas);
2982  	rcu_read_unlock();
2983  	folio_lock(folio);
2984  	if (unlikely(folio->mapping != mapping))
2985  		goto unlock;
2986  
2987  	offset = offset_in_folio(folio, start) & ~(bsz - 1);
2988  
2989  	do {
2990  		if (ops->is_partially_uptodate(folio, offset, bsz) ==
2991  							seek_data)
2992  			break;
2993  		start = (start + bsz) & ~(bsz - 1);
2994  		offset += bsz;
2995  	} while (offset < folio_size(folio));
2996  unlock:
2997  	folio_unlock(folio);
2998  	rcu_read_lock();
2999  	return start;
3000  }
3001  
3002  static inline size_t seek_folio_size(struct xa_state *xas, struct folio *folio)
3003  {
3004  	if (xa_is_value(folio))
3005  		return PAGE_SIZE << xa_get_order(xas->xa, xas->xa_index);
3006  	return folio_size(folio);
3007  }
3008  
3009  /**
3010   * mapping_seek_hole_data - Seek for SEEK_DATA / SEEK_HOLE in the page cache.
3011   * @mapping: Address space to search.
3012   * @start: First byte to consider.
3013   * @end: Limit of search (exclusive).
3014   * @whence: Either SEEK_HOLE or SEEK_DATA.
3015   *
3016   * If the page cache knows which blocks contain holes and which blocks
3017   * contain data, your filesystem can use this function to implement
3018   * SEEK_HOLE and SEEK_DATA.  This is useful for filesystems which are
3019   * entirely memory-based such as tmpfs, and filesystems which support
3020   * unwritten extents.
3021   *
3022   * Return: The requested offset on success, or -ENXIO if @whence specifies
3023   * SEEK_DATA and there is no data after @start.  There is an implicit hole
3024   * after @end - 1, so SEEK_HOLE returns @end if all the bytes between @start
3025   * and @end contain data.
3026   */
3027  loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
3028  		loff_t end, int whence)
3029  {
3030  	XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT);
3031  	pgoff_t max = (end - 1) >> PAGE_SHIFT;
3032  	bool seek_data = (whence == SEEK_DATA);
3033  	struct folio *folio;
3034  
3035  	if (end <= start)
3036  		return -ENXIO;
3037  
3038  	rcu_read_lock();
3039  	while ((folio = find_get_entry(&xas, max, XA_PRESENT))) {
3040  		loff_t pos = (u64)xas.xa_index << PAGE_SHIFT;
3041  		size_t seek_size;
3042  
3043  		if (start < pos) {
3044  			if (!seek_data)
3045  				goto unlock;
3046  			start = pos;
3047  		}
3048  
3049  		seek_size = seek_folio_size(&xas, folio);
3050  		pos = round_up((u64)pos + 1, seek_size);
3051  		start = folio_seek_hole_data(&xas, mapping, folio, start, pos,
3052  				seek_data);
3053  		if (start < pos)
3054  			goto unlock;
3055  		if (start >= end)
3056  			break;
3057  		if (seek_size > PAGE_SIZE)
3058  			xas_set(&xas, pos >> PAGE_SHIFT);
3059  		if (!xa_is_value(folio))
3060  			folio_put(folio);
3061  	}
3062  	if (seek_data)
3063  		start = -ENXIO;
3064  unlock:
3065  	rcu_read_unlock();
3066  	if (folio && !xa_is_value(folio))
3067  		folio_put(folio);
3068  	if (start > end)
3069  		return end;
3070  	return start;
3071  }
3072  
3073  #ifdef CONFIG_MMU
3074  #define MMAP_LOTSAMISS  (100)
3075  /*
3076   * lock_folio_maybe_drop_mmap - lock the page, possibly dropping the mmap_lock
3077   * @vmf - the vm_fault for this fault.
3078   * @folio - the folio to lock.
3079   * @fpin - the pointer to the file we may pin (or is already pinned).
3080   *
3081   * This works similar to lock_folio_or_retry in that it can drop the
3082   * mmap_lock.  It differs in that it actually returns the folio locked
3083   * if it returns 1 and 0 if it couldn't lock the folio.  If we did have
3084   * to drop the mmap_lock then fpin will point to the pinned file and
3085   * needs to be fput()'ed at a later point.
3086   */
3087  static int lock_folio_maybe_drop_mmap(struct vm_fault *vmf, struct folio *folio,
3088  				     struct file **fpin)
3089  {
3090  	if (folio_trylock(folio))
3091  		return 1;
3092  
3093  	/*
3094  	 * NOTE! This will make us return with VM_FAULT_RETRY, but with
3095  	 * the mmap_lock still held. That's how FAULT_FLAG_RETRY_NOWAIT
3096  	 * is supposed to work. We have way too many special cases..
3097  	 */
3098  	if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
3099  		return 0;
3100  
3101  	*fpin = maybe_unlock_mmap_for_io(vmf, *fpin);
3102  	if (vmf->flags & FAULT_FLAG_KILLABLE) {
3103  		if (__folio_lock_killable(folio)) {
3104  			/*
3105  			 * We didn't have the right flags to drop the mmap_lock,
3106  			 * but all fault_handlers only check for fatal signals
3107  			 * if we return VM_FAULT_RETRY, so we need to drop the
3108  			 * mmap_lock here and return 0 if we don't have a fpin.
3109  			 */
3110  			if (*fpin == NULL)
3111  				mmap_read_unlock(vmf->vma->vm_mm);
3112  			return 0;
3113  		}
3114  	} else
3115  		__folio_lock(folio);
3116  
3117  	return 1;
3118  }
3119  
3120  /*
3121   * Synchronous readahead happens when we don't even find a page in the page
3122   * cache at all.  We don't want to perform IO under the mmap sem, so if we have
3123   * to drop the mmap sem we return the file that was pinned in order for us to do
3124   * that.  If we didn't pin a file then we return NULL.  The file that is
3125   * returned needs to be fput()'ed when we're done with it.
3126   */
3127  static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
3128  {
3129  	struct file *file = vmf->vma->vm_file;
3130  	struct file_ra_state *ra = &file->f_ra;
3131  	struct address_space *mapping = file->f_mapping;
3132  	DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff);
3133  	struct file *fpin = NULL;
3134  	unsigned long vm_flags = vmf->vma->vm_flags;
3135  	unsigned int mmap_miss;
3136  
3137  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3138  	/* Use the readahead code, even if readahead is disabled */
3139  	if (vm_flags & VM_HUGEPAGE) {
3140  		fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3141  		ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1);
3142  		ra->size = HPAGE_PMD_NR;
3143  		/*
3144  		 * Fetch two PMD folios, so we get the chance to actually
3145  		 * readahead, unless we've been told not to.
3146  		 */
3147  		if (!(vm_flags & VM_RAND_READ))
3148  			ra->size *= 2;
3149  		ra->async_size = HPAGE_PMD_NR;
3150  		page_cache_ra_order(&ractl, ra, HPAGE_PMD_ORDER);
3151  		return fpin;
3152  	}
3153  #endif
3154  
3155  	/* If we don't want any read-ahead, don't bother */
3156  	if (vm_flags & VM_RAND_READ)
3157  		return fpin;
3158  	if (!ra->ra_pages)
3159  		return fpin;
3160  
3161  	if (vm_flags & VM_SEQ_READ) {
3162  		fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3163  		page_cache_sync_ra(&ractl, ra->ra_pages);
3164  		return fpin;
3165  	}
3166  
3167  	/* Avoid banging the cache line if not needed */
3168  	mmap_miss = READ_ONCE(ra->mmap_miss);
3169  	if (mmap_miss < MMAP_LOTSAMISS * 10)
3170  		WRITE_ONCE(ra->mmap_miss, ++mmap_miss);
3171  
3172  	/*
3173  	 * Do we miss much more than hit in this file? If so,
3174  	 * stop bothering with read-ahead. It will only hurt.
3175  	 */
3176  	if (mmap_miss > MMAP_LOTSAMISS)
3177  		return fpin;
3178  
3179  	/*
3180  	 * mmap read-around
3181  	 */
3182  	fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3183  	ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2);
3184  	ra->size = ra->ra_pages;
3185  	ra->async_size = ra->ra_pages / 4;
3186  	ractl._index = ra->start;
3187  	page_cache_ra_order(&ractl, ra, 0);
3188  	return fpin;
3189  }
3190  
3191  /*
3192   * Asynchronous readahead happens when we find the page and PG_readahead,
3193   * so we want to possibly extend the readahead further.  We return the file that
3194   * was pinned if we have to drop the mmap_lock in order to do IO.
3195   */
3196  static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
3197  					    struct folio *folio)
3198  {
3199  	struct file *file = vmf->vma->vm_file;
3200  	struct file_ra_state *ra = &file->f_ra;
3201  	DEFINE_READAHEAD(ractl, file, ra, file->f_mapping, vmf->pgoff);
3202  	struct file *fpin = NULL;
3203  	unsigned int mmap_miss;
3204  
3205  	/* If we don't want any read-ahead, don't bother */
3206  	if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages)
3207  		return fpin;
3208  
3209  	mmap_miss = READ_ONCE(ra->mmap_miss);
3210  	if (mmap_miss)
3211  		WRITE_ONCE(ra->mmap_miss, --mmap_miss);
3212  
3213  	if (folio_test_readahead(folio)) {
3214  		fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3215  		page_cache_async_ra(&ractl, folio, ra->ra_pages);
3216  	}
3217  	return fpin;
3218  }
3219  
3220  /**
3221   * filemap_fault - read in file data for page fault handling
3222   * @vmf:	struct vm_fault containing details of the fault
3223   *
3224   * filemap_fault() is invoked via the vma operations vector for a
3225   * mapped memory region to read in file data during a page fault.
3226   *
3227   * The goto's are kind of ugly, but this streamlines the normal case of having
3228   * it in the page cache, and handles the special cases reasonably without
3229   * having a lot of duplicated code.
3230   *
3231   * vma->vm_mm->mmap_lock must be held on entry.
3232   *
3233   * If our return value has VM_FAULT_RETRY set, it's because the mmap_lock
3234   * may be dropped before doing I/O or by lock_folio_maybe_drop_mmap().
3235   *
3236   * If our return value does not have VM_FAULT_RETRY set, the mmap_lock
3237   * has not been released.
3238   *
3239   * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set.
3240   *
3241   * Return: bitwise-OR of %VM_FAULT_ codes.
3242   */
3243  vm_fault_t filemap_fault(struct vm_fault *vmf)
3244  {
3245  	int error;
3246  	struct file *file = vmf->vma->vm_file;
3247  	struct file *fpin = NULL;
3248  	struct address_space *mapping = file->f_mapping;
3249  	struct inode *inode = mapping->host;
3250  	pgoff_t max_idx, index = vmf->pgoff;
3251  	struct folio *folio;
3252  	vm_fault_t ret = 0;
3253  	bool mapping_locked = false;
3254  
3255  	max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3256  	if (unlikely(index >= max_idx))
3257  		return VM_FAULT_SIGBUS;
3258  
3259  	/*
3260  	 * Do we have something in the page cache already?
3261  	 */
3262  	folio = filemap_get_folio(mapping, index);
3263  	if (likely(!IS_ERR(folio))) {
3264  		/*
3265  		 * We found the page, so try async readahead before waiting for
3266  		 * the lock.
3267  		 */
3268  		if (!(vmf->flags & FAULT_FLAG_TRIED))
3269  			fpin = do_async_mmap_readahead(vmf, folio);
3270  		if (unlikely(!folio_test_uptodate(folio))) {
3271  			filemap_invalidate_lock_shared(mapping);
3272  			mapping_locked = true;
3273  		}
3274  	} else {
3275  		/* No page in the page cache at all */
3276  		count_vm_event(PGMAJFAULT);
3277  		count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
3278  		ret = VM_FAULT_MAJOR;
3279  		fpin = do_sync_mmap_readahead(vmf);
3280  retry_find:
3281  		/*
3282  		 * See comment in filemap_create_folio() why we need
3283  		 * invalidate_lock
3284  		 */
3285  		if (!mapping_locked) {
3286  			filemap_invalidate_lock_shared(mapping);
3287  			mapping_locked = true;
3288  		}
3289  		folio = __filemap_get_folio(mapping, index,
3290  					  FGP_CREAT|FGP_FOR_MMAP,
3291  					  vmf->gfp_mask);
3292  		if (IS_ERR(folio)) {
3293  			if (fpin)
3294  				goto out_retry;
3295  			filemap_invalidate_unlock_shared(mapping);
3296  			return VM_FAULT_OOM;
3297  		}
3298  	}
3299  
3300  	if (!lock_folio_maybe_drop_mmap(vmf, folio, &fpin))
3301  		goto out_retry;
3302  
3303  	/* Did it get truncated? */
3304  	if (unlikely(folio->mapping != mapping)) {
3305  		folio_unlock(folio);
3306  		folio_put(folio);
3307  		goto retry_find;
3308  	}
3309  	VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);
3310  
3311  	/*
3312  	 * We have a locked page in the page cache, now we need to check
3313  	 * that it's up-to-date. If not, it is going to be due to an error.
3314  	 */
3315  	if (unlikely(!folio_test_uptodate(folio))) {
3316  		/*
3317  		 * The page was in cache and uptodate and now it is not.
3318  		 * Strange but possible since we didn't hold the page lock all
3319  		 * the time. Let's drop everything get the invalidate lock and
3320  		 * try again.
3321  		 */
3322  		if (!mapping_locked) {
3323  			folio_unlock(folio);
3324  			folio_put(folio);
3325  			goto retry_find;
3326  		}
3327  		goto page_not_uptodate;
3328  	}
3329  
3330  	/*
3331  	 * We've made it this far and we had to drop our mmap_lock, now is the
3332  	 * time to return to the upper layer and have it re-find the vma and
3333  	 * redo the fault.
3334  	 */
3335  	if (fpin) {
3336  		folio_unlock(folio);
3337  		goto out_retry;
3338  	}
3339  	if (mapping_locked)
3340  		filemap_invalidate_unlock_shared(mapping);
3341  
3342  	/*
3343  	 * Found the page and have a reference on it.
3344  	 * We must recheck i_size under page lock.
3345  	 */
3346  	max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3347  	if (unlikely(index >= max_idx)) {
3348  		folio_unlock(folio);
3349  		folio_put(folio);
3350  		return VM_FAULT_SIGBUS;
3351  	}
3352  
3353  	vmf->page = folio_file_page(folio, index);
3354  	return ret | VM_FAULT_LOCKED;
3355  
3356  page_not_uptodate:
3357  	/*
3358  	 * Umm, take care of errors if the page isn't up-to-date.
3359  	 * Try to re-read it _once_. We do this synchronously,
3360  	 * because there really aren't any performance issues here
3361  	 * and we need to check for errors.
3362  	 */
3363  	fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3364  	error = filemap_read_folio(file, mapping->a_ops->read_folio, folio);
3365  	if (fpin)
3366  		goto out_retry;
3367  	folio_put(folio);
3368  
3369  	if (!error || error == AOP_TRUNCATED_PAGE)
3370  		goto retry_find;
3371  	filemap_invalidate_unlock_shared(mapping);
3372  
3373  	return VM_FAULT_SIGBUS;
3374  
3375  out_retry:
3376  	/*
3377  	 * We dropped the mmap_lock, we need to return to the fault handler to
3378  	 * re-find the vma and come back and find our hopefully still populated
3379  	 * page.
3380  	 */
3381  	if (!IS_ERR(folio))
3382  		folio_put(folio);
3383  	if (mapping_locked)
3384  		filemap_invalidate_unlock_shared(mapping);
3385  	if (fpin)
3386  		fput(fpin);
3387  	return ret | VM_FAULT_RETRY;
3388  }
3389  EXPORT_SYMBOL(filemap_fault);
3390  
3391  static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio,
3392  		pgoff_t start)
3393  {
3394  	struct mm_struct *mm = vmf->vma->vm_mm;
3395  
3396  	/* Huge page is mapped? No need to proceed. */
3397  	if (pmd_trans_huge(*vmf->pmd)) {
3398  		folio_unlock(folio);
3399  		folio_put(folio);
3400  		return true;
3401  	}
3402  
3403  	if (pmd_none(*vmf->pmd) && folio_test_pmd_mappable(folio)) {
3404  		struct page *page = folio_file_page(folio, start);
3405  		vm_fault_t ret = do_set_pmd(vmf, page);
3406  		if (!ret) {
3407  			/* The page is mapped successfully, reference consumed. */
3408  			folio_unlock(folio);
3409  			return true;
3410  		}
3411  	}
3412  
3413  	if (pmd_none(*vmf->pmd))
3414  		pmd_install(mm, vmf->pmd, &vmf->prealloc_pte);
3415  
3416  	/* See comment in handle_pte_fault() */
3417  	if (pmd_devmap_trans_unstable(vmf->pmd)) {
3418  		folio_unlock(folio);
3419  		folio_put(folio);
3420  		return true;
3421  	}
3422  
3423  	return false;
3424  }
3425  
3426  static struct folio *next_uptodate_page(struct folio *folio,
3427  				       struct address_space *mapping,
3428  				       struct xa_state *xas, pgoff_t end_pgoff)
3429  {
3430  	unsigned long max_idx;
3431  
3432  	do {
3433  		if (!folio)
3434  			return NULL;
3435  		if (xas_retry(xas, folio))
3436  			continue;
3437  		if (xa_is_value(folio))
3438  			continue;
3439  		if (folio_test_locked(folio))
3440  			continue;
3441  		if (!folio_try_get_rcu(folio))
3442  			continue;
3443  		/* Has the page moved or been split? */
3444  		if (unlikely(folio != xas_reload(xas)))
3445  			goto skip;
3446  		if (!folio_test_uptodate(folio) || folio_test_readahead(folio))
3447  			goto skip;
3448  		if (!folio_trylock(folio))
3449  			goto skip;
3450  		if (folio->mapping != mapping)
3451  			goto unlock;
3452  		if (!folio_test_uptodate(folio))
3453  			goto unlock;
3454  		max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
3455  		if (xas->xa_index >= max_idx)
3456  			goto unlock;
3457  		return folio;
3458  unlock:
3459  		folio_unlock(folio);
3460  skip:
3461  		folio_put(folio);
3462  	} while ((folio = xas_next_entry(xas, end_pgoff)) != NULL);
3463  
3464  	return NULL;
3465  }
3466  
3467  static inline struct folio *first_map_page(struct address_space *mapping,
3468  					  struct xa_state *xas,
3469  					  pgoff_t end_pgoff)
3470  {
3471  	return next_uptodate_page(xas_find(xas, end_pgoff),
3472  				  mapping, xas, end_pgoff);
3473  }
3474  
3475  static inline struct folio *next_map_page(struct address_space *mapping,
3476  					 struct xa_state *xas,
3477  					 pgoff_t end_pgoff)
3478  {
3479  	return next_uptodate_page(xas_next_entry(xas, end_pgoff),
3480  				  mapping, xas, end_pgoff);
3481  }
3482  
3483  vm_fault_t filemap_map_pages(struct vm_fault *vmf,
3484  			     pgoff_t start_pgoff, pgoff_t end_pgoff)
3485  {
3486  	struct vm_area_struct *vma = vmf->vma;
3487  	struct file *file = vma->vm_file;
3488  	struct address_space *mapping = file->f_mapping;
3489  	pgoff_t last_pgoff = start_pgoff;
3490  	unsigned long addr;
3491  	XA_STATE(xas, &mapping->i_pages, start_pgoff);
3492  	struct folio *folio;
3493  	struct page *page;
3494  	unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
3495  	vm_fault_t ret = 0;
3496  
3497  	rcu_read_lock();
3498  	folio = first_map_page(mapping, &xas, end_pgoff);
3499  	if (!folio)
3500  		goto out;
3501  
3502  	if (filemap_map_pmd(vmf, folio, start_pgoff)) {
3503  		ret = VM_FAULT_NOPAGE;
3504  		goto out;
3505  	}
3506  
3507  	addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT);
3508  	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
3509  	do {
3510  again:
3511  		page = folio_file_page(folio, xas.xa_index);
3512  		if (PageHWPoison(page))
3513  			goto unlock;
3514  
3515  		if (mmap_miss > 0)
3516  			mmap_miss--;
3517  
3518  		addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
3519  		vmf->pte += xas.xa_index - last_pgoff;
3520  		last_pgoff = xas.xa_index;
3521  
3522  		/*
3523  		 * NOTE: If there're PTE markers, we'll leave them to be
3524  		 * handled in the specific fault path, and it'll prohibit the
3525  		 * fault-around logic.
3526  		 */
3527  		if (!pte_none(*vmf->pte))
3528  			goto unlock;
3529  
3530  		/* We're about to handle the fault */
3531  		if (vmf->address == addr)
3532  			ret = VM_FAULT_NOPAGE;
3533  
3534  		do_set_pte(vmf, page, addr);
3535  		/* no need to invalidate: a not-present page won't be cached */
3536  		update_mmu_cache(vma, addr, vmf->pte);
3537  		if (folio_more_pages(folio, xas.xa_index, end_pgoff)) {
3538  			xas.xa_index++;
3539  			folio_ref_inc(folio);
3540  			goto again;
3541  		}
3542  		folio_unlock(folio);
3543  		continue;
3544  unlock:
3545  		if (folio_more_pages(folio, xas.xa_index, end_pgoff)) {
3546  			xas.xa_index++;
3547  			goto again;
3548  		}
3549  		folio_unlock(folio);
3550  		folio_put(folio);
3551  	} while ((folio = next_map_page(mapping, &xas, end_pgoff)) != NULL);
3552  	pte_unmap_unlock(vmf->pte, vmf->ptl);
3553  out:
3554  	rcu_read_unlock();
3555  	WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss);
3556  	return ret;
3557  }
3558  EXPORT_SYMBOL(filemap_map_pages);
3559  
3560  vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
3561  {
3562  	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
3563  	struct folio *folio = page_folio(vmf->page);
3564  	vm_fault_t ret = VM_FAULT_LOCKED;
3565  
3566  	sb_start_pagefault(mapping->host->i_sb);
3567  	file_update_time(vmf->vma->vm_file);
3568  	folio_lock(folio);
3569  	if (folio->mapping != mapping) {
3570  		folio_unlock(folio);
3571  		ret = VM_FAULT_NOPAGE;
3572  		goto out;
3573  	}
3574  	/*
3575  	 * We mark the folio dirty already here so that when freeze is in
3576  	 * progress, we are guaranteed that writeback during freezing will
3577  	 * see the dirty folio and writeprotect it again.
3578  	 */
3579  	folio_mark_dirty(folio);
3580  	folio_wait_stable(folio);
3581  out:
3582  	sb_end_pagefault(mapping->host->i_sb);
3583  	return ret;
3584  }
3585  
3586  const struct vm_operations_struct generic_file_vm_ops = {
3587  	.fault		= filemap_fault,
3588  	.map_pages	= filemap_map_pages,
3589  	.page_mkwrite	= filemap_page_mkwrite,
3590  };
3591  
3592  /* This is used for a general mmap of a disk file */
3593  
3594  int generic_file_mmap(struct file *file, struct vm_area_struct *vma)
3595  {
3596  	struct address_space *mapping = file->f_mapping;
3597  
3598  	if (!mapping->a_ops->read_folio)
3599  		return -ENOEXEC;
3600  	file_accessed(file);
3601  	vma->vm_ops = &generic_file_vm_ops;
3602  	return 0;
3603  }
3604  
3605  /*
3606   * This is for filesystems which do not implement ->writepage.
3607   */
3608  int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
3609  {
3610  	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
3611  		return -EINVAL;
3612  	return generic_file_mmap(file, vma);
3613  }
3614  #else
3615  vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
3616  {
3617  	return VM_FAULT_SIGBUS;
3618  }
3619  int generic_file_mmap(struct file *file, struct vm_area_struct *vma)
3620  {
3621  	return -ENOSYS;
3622  }
3623  int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
3624  {
3625  	return -ENOSYS;
3626  }
3627  #endif /* CONFIG_MMU */
3628  
3629  EXPORT_SYMBOL(filemap_page_mkwrite);
3630  EXPORT_SYMBOL(generic_file_mmap);
3631  EXPORT_SYMBOL(generic_file_readonly_mmap);
3632  
3633  static struct folio *do_read_cache_folio(struct address_space *mapping,
3634  		pgoff_t index, filler_t filler, struct file *file, gfp_t gfp)
3635  {
3636  	struct folio *folio;
3637  	int err;
3638  
3639  	if (!filler)
3640  		filler = mapping->a_ops->read_folio;
3641  repeat:
3642  	folio = filemap_get_folio(mapping, index);
3643  	if (IS_ERR(folio)) {
3644  		folio = filemap_alloc_folio(gfp, 0);
3645  		if (!folio)
3646  			return ERR_PTR(-ENOMEM);
3647  		err = filemap_add_folio(mapping, folio, index, gfp);
3648  		if (unlikely(err)) {
3649  			folio_put(folio);
3650  			if (err == -EEXIST)
3651  				goto repeat;
3652  			/* Presumably ENOMEM for xarray node */
3653  			return ERR_PTR(err);
3654  		}
3655  
3656  		goto filler;
3657  	}
3658  	if (folio_test_uptodate(folio))
3659  		goto out;
3660  
3661  	if (!folio_trylock(folio)) {
3662  		folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE);
3663  		goto repeat;
3664  	}
3665  
3666  	/* Folio was truncated from mapping */
3667  	if (!folio->mapping) {
3668  		folio_unlock(folio);
3669  		folio_put(folio);
3670  		goto repeat;
3671  	}
3672  
3673  	/* Someone else locked and filled the page in a very small window */
3674  	if (folio_test_uptodate(folio)) {
3675  		folio_unlock(folio);
3676  		goto out;
3677  	}
3678  
3679  filler:
3680  	err = filemap_read_folio(file, filler, folio);
3681  	if (err) {
3682  		folio_put(folio);
3683  		if (err == AOP_TRUNCATED_PAGE)
3684  			goto repeat;
3685  		return ERR_PTR(err);
3686  	}
3687  
3688  out:
3689  	folio_mark_accessed(folio);
3690  	return folio;
3691  }
3692  
3693  /**
3694   * read_cache_folio - Read into page cache, fill it if needed.
3695   * @mapping: The address_space to read from.
3696   * @index: The index to read.
3697   * @filler: Function to perform the read, or NULL to use aops->read_folio().
3698   * @file: Passed to filler function, may be NULL if not required.
3699   *
3700   * Read one page into the page cache.  If it succeeds, the folio returned
3701   * will contain @index, but it may not be the first page of the folio.
3702   *
3703   * If the filler function returns an error, it will be returned to the
3704   * caller.
3705   *
3706   * Context: May sleep.  Expects mapping->invalidate_lock to be held.
3707   * Return: An uptodate folio on success, ERR_PTR() on failure.
3708   */
3709  struct folio *read_cache_folio(struct address_space *mapping, pgoff_t index,
3710  		filler_t filler, struct file *file)
3711  {
3712  	return do_read_cache_folio(mapping, index, filler, file,
3713  			mapping_gfp_mask(mapping));
3714  }
3715  EXPORT_SYMBOL(read_cache_folio);
3716  
3717  /**
3718   * mapping_read_folio_gfp - Read into page cache, using specified allocation flags.
3719   * @mapping:	The address_space for the folio.
3720   * @index:	The index that the allocated folio will contain.
3721   * @gfp:	The page allocator flags to use if allocating.
3722   *
3723   * This is the same as "read_cache_folio(mapping, index, NULL, NULL)", but with
3724   * any new memory allocations done using the specified allocation flags.
3725   *
3726   * The most likely error from this function is EIO, but ENOMEM is
3727   * possible and so is EINTR.  If ->read_folio returns another error,
3728   * that will be returned to the caller.
3729   *
3730   * The function expects mapping->invalidate_lock to be already held.
3731   *
3732   * Return: Uptodate folio on success, ERR_PTR() on failure.
3733   */
3734  struct folio *mapping_read_folio_gfp(struct address_space *mapping,
3735  		pgoff_t index, gfp_t gfp)
3736  {
3737  	return do_read_cache_folio(mapping, index, NULL, NULL, gfp);
3738  }
3739  EXPORT_SYMBOL(mapping_read_folio_gfp);
3740  
3741  static struct page *do_read_cache_page(struct address_space *mapping,
3742  		pgoff_t index, filler_t *filler, struct file *file, gfp_t gfp)
3743  {
3744  	struct folio *folio;
3745  
3746  	folio = do_read_cache_folio(mapping, index, filler, file, gfp);
3747  	if (IS_ERR(folio))
3748  		return &folio->page;
3749  	return folio_file_page(folio, index);
3750  }
3751  
3752  struct page *read_cache_page(struct address_space *mapping,
3753  			pgoff_t index, filler_t *filler, struct file *file)
3754  {
3755  	return do_read_cache_page(mapping, index, filler, file,
3756  			mapping_gfp_mask(mapping));
3757  }
3758  EXPORT_SYMBOL(read_cache_page);
3759  
3760  /**
3761   * read_cache_page_gfp - read into page cache, using specified page allocation flags.
3762   * @mapping:	the page's address_space
3763   * @index:	the page index
3764   * @gfp:	the page allocator flags to use if allocating
3765   *
3766   * This is the same as "read_mapping_page(mapping, index, NULL)", but with
3767   * any new page allocations done using the specified allocation flags.
3768   *
3769   * If the page does not get brought uptodate, return -EIO.
3770   *
3771   * The function expects mapping->invalidate_lock to be already held.
3772   *
3773   * Return: up to date page on success, ERR_PTR() on failure.
3774   */
3775  struct page *read_cache_page_gfp(struct address_space *mapping,
3776  				pgoff_t index,
3777  				gfp_t gfp)
3778  {
3779  	return do_read_cache_page(mapping, index, NULL, NULL, gfp);
3780  }
3781  EXPORT_SYMBOL(read_cache_page_gfp);
3782  
3783  /*
3784   * Warn about a page cache invalidation failure during a direct I/O write.
3785   */
3786  void dio_warn_stale_pagecache(struct file *filp)
3787  {
3788  	static DEFINE_RATELIMIT_STATE(_rs, 86400 * HZ, DEFAULT_RATELIMIT_BURST);
3789  	char pathname[128];
3790  	char *path;
3791  
3792  	errseq_set(&filp->f_mapping->wb_err, -EIO);
3793  	if (__ratelimit(&_rs)) {
3794  		path = file_path(filp, pathname, sizeof(pathname));
3795  		if (IS_ERR(path))
3796  			path = "(unknown)";
3797  		pr_crit("Page cache invalidation failure on direct I/O.  Possible data corruption due to collision with buffered I/O!\n");
3798  		pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid,
3799  			current->comm);
3800  	}
3801  }
3802  
3803  ssize_t
3804  generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
3805  {
3806  	struct file	*file = iocb->ki_filp;
3807  	struct address_space *mapping = file->f_mapping;
3808  	struct inode	*inode = mapping->host;
3809  	loff_t		pos = iocb->ki_pos;
3810  	ssize_t		written;
3811  	size_t		write_len;
3812  	pgoff_t		end;
3813  
3814  	write_len = iov_iter_count(from);
3815  	end = (pos + write_len - 1) >> PAGE_SHIFT;
3816  
3817  	if (iocb->ki_flags & IOCB_NOWAIT) {
3818  		/* If there are pages to writeback, return */
3819  		if (filemap_range_has_page(file->f_mapping, pos,
3820  					   pos + write_len - 1))
3821  			return -EAGAIN;
3822  	} else {
3823  		written = filemap_write_and_wait_range(mapping, pos,
3824  							pos + write_len - 1);
3825  		if (written)
3826  			goto out;
3827  	}
3828  
3829  	/*
3830  	 * After a write we want buffered reads to be sure to go to disk to get
3831  	 * the new data.  We invalidate clean cached page from the region we're
3832  	 * about to write.  We do this *before* the write so that we can return
3833  	 * without clobbering -EIOCBQUEUED from ->direct_IO().
3834  	 */
3835  	written = invalidate_inode_pages2_range(mapping,
3836  					pos >> PAGE_SHIFT, end);
3837  	/*
3838  	 * If a page can not be invalidated, return 0 to fall back
3839  	 * to buffered write.
3840  	 */
3841  	if (written) {
3842  		if (written == -EBUSY)
3843  			return 0;
3844  		goto out;
3845  	}
3846  
3847  	written = mapping->a_ops->direct_IO(iocb, from);
3848  
3849  	/*
3850  	 * Finally, try again to invalidate clean pages which might have been
3851  	 * cached by non-direct readahead, or faulted in by get_user_pages()
3852  	 * if the source of the write was an mmap'ed region of the file
3853  	 * we're writing.  Either one is a pretty crazy thing to do,
3854  	 * so we don't support it 100%.  If this invalidation
3855  	 * fails, tough, the write still worked...
3856  	 *
3857  	 * Most of the time we do not need this since dio_complete() will do
3858  	 * the invalidation for us. However there are some file systems that
3859  	 * do not end up with dio_complete() being called, so let's not break
3860  	 * them by removing it completely.
3861  	 *
3862  	 * Noticeable example is a blkdev_direct_IO().
3863  	 *
3864  	 * Skip invalidation for async writes or if mapping has no pages.
3865  	 */
3866  	if (written > 0 && mapping->nrpages &&
3867  	    invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT, end))
3868  		dio_warn_stale_pagecache(file);
3869  
3870  	if (written > 0) {
3871  		pos += written;
3872  		write_len -= written;
3873  		if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
3874  			i_size_write(inode, pos);
3875  			mark_inode_dirty(inode);
3876  		}
3877  		iocb->ki_pos = pos;
3878  	}
3879  	if (written != -EIOCBQUEUED)
3880  		iov_iter_revert(from, write_len - iov_iter_count(from));
3881  out:
3882  	return written;
3883  }
3884  EXPORT_SYMBOL(generic_file_direct_write);
3885  
3886  ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i)
3887  {
3888  	struct file *file = iocb->ki_filp;
3889  	loff_t pos = iocb->ki_pos;
3890  	struct address_space *mapping = file->f_mapping;
3891  	const struct address_space_operations *a_ops = mapping->a_ops;
3892  	long status = 0;
3893  	ssize_t written = 0;
3894  
3895  	do {
3896  		struct page *page;
3897  		unsigned long offset;	/* Offset into pagecache page */
3898  		unsigned long bytes;	/* Bytes to write to page */
3899  		size_t copied;		/* Bytes copied from user */
3900  		void *fsdata = NULL;
3901  
3902  		offset = (pos & (PAGE_SIZE - 1));
3903  		bytes = min_t(unsigned long, PAGE_SIZE - offset,
3904  						iov_iter_count(i));
3905  
3906  again:
3907  		/*
3908  		 * Bring in the user page that we will copy from _first_.
3909  		 * Otherwise there's a nasty deadlock on copying from the
3910  		 * same page as we're writing to, without it being marked
3911  		 * up-to-date.
3912  		 */
3913  		if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) {
3914  			status = -EFAULT;
3915  			break;
3916  		}
3917  
3918  		if (fatal_signal_pending(current)) {
3919  			status = -EINTR;
3920  			break;
3921  		}
3922  
3923  		status = a_ops->write_begin(file, mapping, pos, bytes,
3924  						&page, &fsdata);
3925  		if (unlikely(status < 0))
3926  			break;
3927  
3928  		if (mapping_writably_mapped(mapping))
3929  			flush_dcache_page(page);
3930  
3931  		copied = copy_page_from_iter_atomic(page, offset, bytes, i);
3932  		flush_dcache_page(page);
3933  
3934  		status = a_ops->write_end(file, mapping, pos, bytes, copied,
3935  						page, fsdata);
3936  		if (unlikely(status != copied)) {
3937  			iov_iter_revert(i, copied - max(status, 0L));
3938  			if (unlikely(status < 0))
3939  				break;
3940  		}
3941  		cond_resched();
3942  
3943  		if (unlikely(status == 0)) {
3944  			/*
3945  			 * A short copy made ->write_end() reject the
3946  			 * thing entirely.  Might be memory poisoning
3947  			 * halfway through, might be a race with munmap,
3948  			 * might be severe memory pressure.
3949  			 */
3950  			if (copied)
3951  				bytes = copied;
3952  			goto again;
3953  		}
3954  		pos += status;
3955  		written += status;
3956  
3957  		balance_dirty_pages_ratelimited(mapping);
3958  	} while (iov_iter_count(i));
3959  
3960  	return written ? written : status;
3961  }
3962  EXPORT_SYMBOL(generic_perform_write);
3963  
3964  /**
3965   * __generic_file_write_iter - write data to a file
3966   * @iocb:	IO state structure (file, offset, etc.)
3967   * @from:	iov_iter with data to write
3968   *
3969   * This function does all the work needed for actually writing data to a
3970   * file. It does all basic checks, removes SUID from the file, updates
3971   * modification times and calls proper subroutines depending on whether we
3972   * do direct IO or a standard buffered write.
3973   *
3974   * It expects i_rwsem to be grabbed unless we work on a block device or similar
3975   * object which does not need locking at all.
3976   *
3977   * This function does *not* take care of syncing data in case of O_SYNC write.
3978   * A caller has to handle it. This is mainly due to the fact that we want to
3979   * avoid syncing under i_rwsem.
3980   *
3981   * Return:
3982   * * number of bytes written, even for truncated writes
3983   * * negative error code if no data has been written at all
3984   */
3985  ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3986  {
3987  	struct file *file = iocb->ki_filp;
3988  	struct address_space *mapping = file->f_mapping;
3989  	struct inode 	*inode = mapping->host;
3990  	ssize_t		written = 0;
3991  	ssize_t		err;
3992  	ssize_t		status;
3993  
3994  	/* We can write back this queue in page reclaim */
3995  	current->backing_dev_info = inode_to_bdi(inode);
3996  	err = file_remove_privs(file);
3997  	if (err)
3998  		goto out;
3999  
4000  	err = file_update_time(file);
4001  	if (err)
4002  		goto out;
4003  
4004  	if (iocb->ki_flags & IOCB_DIRECT) {
4005  		loff_t pos, endbyte;
4006  
4007  		written = generic_file_direct_write(iocb, from);
4008  		/*
4009  		 * If the write stopped short of completing, fall back to
4010  		 * buffered writes.  Some filesystems do this for writes to
4011  		 * holes, for example.  For DAX files, a buffered write will
4012  		 * not succeed (even if it did, DAX does not handle dirty
4013  		 * page-cache pages correctly).
4014  		 */
4015  		if (written < 0 || !iov_iter_count(from) || IS_DAX(inode))
4016  			goto out;
4017  
4018  		pos = iocb->ki_pos;
4019  		status = generic_perform_write(iocb, from);
4020  		/*
4021  		 * If generic_perform_write() returned a synchronous error
4022  		 * then we want to return the number of bytes which were
4023  		 * direct-written, or the error code if that was zero.  Note
4024  		 * that this differs from normal direct-io semantics, which
4025  		 * will return -EFOO even if some bytes were written.
4026  		 */
4027  		if (unlikely(status < 0)) {
4028  			err = status;
4029  			goto out;
4030  		}
4031  		/*
4032  		 * We need to ensure that the page cache pages are written to
4033  		 * disk and invalidated to preserve the expected O_DIRECT
4034  		 * semantics.
4035  		 */
4036  		endbyte = pos + status - 1;
4037  		err = filemap_write_and_wait_range(mapping, pos, endbyte);
4038  		if (err == 0) {
4039  			iocb->ki_pos = endbyte + 1;
4040  			written += status;
4041  			invalidate_mapping_pages(mapping,
4042  						 pos >> PAGE_SHIFT,
4043  						 endbyte >> PAGE_SHIFT);
4044  		} else {
4045  			/*
4046  			 * We don't know how much we wrote, so just return
4047  			 * the number of bytes which were direct-written
4048  			 */
4049  		}
4050  	} else {
4051  		written = generic_perform_write(iocb, from);
4052  		if (likely(written > 0))
4053  			iocb->ki_pos += written;
4054  	}
4055  out:
4056  	current->backing_dev_info = NULL;
4057  	return written ? written : err;
4058  }
4059  EXPORT_SYMBOL(__generic_file_write_iter);
4060  
4061  /**
4062   * generic_file_write_iter - write data to a file
4063   * @iocb:	IO state structure
4064   * @from:	iov_iter with data to write
4065   *
4066   * This is a wrapper around __generic_file_write_iter() to be used by most
4067   * filesystems. It takes care of syncing the file in case of O_SYNC file
4068   * and acquires i_rwsem as needed.
4069   * Return:
4070   * * negative error code if no data has been written at all of
4071   *   vfs_fsync_range() failed for a synchronous write
4072   * * number of bytes written, even for truncated writes
4073   */
4074  ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4075  {
4076  	struct file *file = iocb->ki_filp;
4077  	struct inode *inode = file->f_mapping->host;
4078  	ssize_t ret;
4079  
4080  	inode_lock(inode);
4081  	ret = generic_write_checks(iocb, from);
4082  	if (ret > 0)
4083  		ret = __generic_file_write_iter(iocb, from);
4084  	inode_unlock(inode);
4085  
4086  	if (ret > 0)
4087  		ret = generic_write_sync(iocb, ret);
4088  	return ret;
4089  }
4090  EXPORT_SYMBOL(generic_file_write_iter);
4091  
4092  /**
4093   * filemap_release_folio() - Release fs-specific metadata on a folio.
4094   * @folio: The folio which the kernel is trying to free.
4095   * @gfp: Memory allocation flags (and I/O mode).
4096   *
4097   * The address_space is trying to release any data attached to a folio
4098   * (presumably at folio->private).
4099   *
4100   * This will also be called if the private_2 flag is set on a page,
4101   * indicating that the folio has other metadata associated with it.
4102   *
4103   * The @gfp argument specifies whether I/O may be performed to release
4104   * this page (__GFP_IO), and whether the call may block
4105   * (__GFP_RECLAIM & __GFP_FS).
4106   *
4107   * Return: %true if the release was successful, otherwise %false.
4108   */
4109  bool filemap_release_folio(struct folio *folio, gfp_t gfp)
4110  {
4111  	struct address_space * const mapping = folio->mapping;
4112  
4113  	BUG_ON(!folio_test_locked(folio));
4114  	if (folio_test_writeback(folio))
4115  		return false;
4116  
4117  	if (mapping && mapping->a_ops->release_folio)
4118  		return mapping->a_ops->release_folio(folio, gfp);
4119  	return try_to_free_buffers(folio);
4120  }
4121  EXPORT_SYMBOL(filemap_release_folio);
4122