xref: /openbmc/linux/fs/ntfs/aops.h (revision e75d5ae8)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /**
3  * aops.h - Defines for NTFS kernel address space operations and page cache
4  *	    handling.  Part of the Linux-NTFS project.
5  *
6  * Copyright (c) 2001-2004 Anton Altaparmakov
7  * Copyright (c) 2002 Richard Russon
8  */
9 
10 #ifndef _LINUX_NTFS_AOPS_H
11 #define _LINUX_NTFS_AOPS_H
12 
13 #include <linux/mm.h>
14 #include <linux/highmem.h>
15 #include <linux/pagemap.h>
16 #include <linux/fs.h>
17 
18 #include "inode.h"
19 
20 /**
21  * ntfs_unmap_page - release a page that was mapped using ntfs_map_page()
22  * @page:	the page to release
23  *
24  * Unpin, unmap and release a page that was obtained from ntfs_map_page().
25  */
26 static inline void ntfs_unmap_page(struct page *page)
27 {
28 	kunmap(page);
29 	put_page(page);
30 }
31 
32 /**
33  * ntfs_map_page - map a page into accessible memory, reading it if necessary
34  * @mapping:	address space for which to obtain the page
35  * @index:	index into the page cache for @mapping of the page to map
36  *
37  * Read a page from the page cache of the address space @mapping at position
38  * @index, where @index is in units of PAGE_SIZE, and not in bytes.
39  *
40  * If the page is not in memory it is loaded from disk first using the
41  * read_folio method defined in the address space operations of @mapping
42  * and the page is added to the page cache of @mapping in the process.
43  *
44  * If the page belongs to an mst protected attribute and it is marked as such
45  * in its ntfs inode (NInoMstProtected()) the mst fixups are applied but no
46  * error checking is performed.  This means the caller has to verify whether
47  * the ntfs record(s) contained in the page are valid or not using one of the
48  * ntfs_is_XXXX_record{,p}() macros, where XXXX is the record type you are
49  * expecting to see.  (For details of the macros, see fs/ntfs/layout.h.)
50  *
51  * If the page is in high memory it is mapped into memory directly addressible
52  * by the kernel.
53  *
54  * Finally the page count is incremented, thus pinning the page into place.
55  *
56  * The above means that page_address(page) can be used on all pages obtained
57  * with ntfs_map_page() to get the kernel virtual address of the page.
58  *
59  * When finished with the page, the caller has to call ntfs_unmap_page() to
60  * unpin, unmap and release the page.
61  *
62  * Note this does not grant exclusive access. If such is desired, the caller
63  * must provide it independently of the ntfs_{un}map_page() calls by using
64  * a {rw_}semaphore or other means of serialization. A spin lock cannot be
65  * used as ntfs_map_page() can block.
66  *
67  * The unlocked and uptodate page is returned on success or an encoded error
68  * on failure. Caller has to test for error using the IS_ERR() macro on the
69  * return value. If that evaluates to 'true', the negative error code can be
70  * obtained using PTR_ERR() on the return value of ntfs_map_page().
71  */
72 static inline struct page *ntfs_map_page(struct address_space *mapping,
73 		unsigned long index)
74 {
75 	struct page *page = read_mapping_page(mapping, index, NULL);
76 
77 	if (!IS_ERR(page)) {
78 		kmap(page);
79 		if (!PageError(page))
80 			return page;
81 		ntfs_unmap_page(page);
82 		return ERR_PTR(-EIO);
83 	}
84 	return page;
85 }
86 
87 #ifdef NTFS_RW
88 
89 extern void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs);
90 
91 #endif /* NTFS_RW */
92 
93 #endif /* _LINUX_NTFS_AOPS_H */
94