1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2199a31c6SChristoph Hellwig #ifndef LINUX_IOMAP_H
3199a31c6SChristoph Hellwig #define LINUX_IOMAP_H 1
4199a31c6SChristoph Hellwig
59dc55f13SChristoph Hellwig #include <linux/atomic.h>
69dc55f13SChristoph Hellwig #include <linux/bitmap.h>
7598ecfbaSChristoph Hellwig #include <linux/blk_types.h>
89dc55f13SChristoph Hellwig #include <linux/mm.h>
9199a31c6SChristoph Hellwig #include <linux/types.h>
105780a02fSSouptick Joarder #include <linux/mm_types.h>
11db074436SDarrick J. Wong #include <linux/blkdev.h>
12199a31c6SChristoph Hellwig
1389eb1906SChristoph Hellwig struct address_space;
148be9f564SChristoph Hellwig struct fiemap_extent_info;
15ae259a9cSChristoph Hellwig struct inode;
169060bc4dSAndreas Gruenbacher struct iomap_iter;
17c3d4ed1aSChristoph Hellwig struct iomap_dio;
18598ecfbaSChristoph Hellwig struct iomap_writepage_ctx;
19ae259a9cSChristoph Hellwig struct iov_iter;
20ae259a9cSChristoph Hellwig struct kiocb;
2163899c6fSChristoph Hellwig struct page;
22ae259a9cSChristoph Hellwig struct vm_area_struct;
23ae259a9cSChristoph Hellwig struct vm_fault;
24ae259a9cSChristoph Hellwig
25ae259a9cSChristoph Hellwig /*
26ae259a9cSChristoph Hellwig * Types of block ranges for iomap mappings:
27ae259a9cSChristoph Hellwig */
28eb81cf9dSChristoph Hellwig #define IOMAP_HOLE 0 /* no blocks allocated, need allocation */
29eb81cf9dSChristoph Hellwig #define IOMAP_DELALLOC 1 /* delayed allocation blocks */
30eb81cf9dSChristoph Hellwig #define IOMAP_MAPPED 2 /* blocks allocated at @addr */
31eb81cf9dSChristoph Hellwig #define IOMAP_UNWRITTEN 3 /* blocks allocated at @addr in unwritten state */
32eb81cf9dSChristoph Hellwig #define IOMAP_INLINE 4 /* data inline in the inode */
33199a31c6SChristoph Hellwig
34ae259a9cSChristoph Hellwig /*
3565a60e86SChristoph Hellwig * Flags reported by the file system from iomap_begin:
3665a60e86SChristoph Hellwig *
3765a60e86SChristoph Hellwig * IOMAP_F_NEW indicates that the blocks have been newly allocated and need
3865a60e86SChristoph Hellwig * zeroing for areas that no data is copied to.
39a3841f94SLinus Torvalds *
40caa51d26SJan Kara * IOMAP_F_DIRTY indicates the inode has uncommitted metadata needed to access
41caa51d26SJan Kara * written data and requires fdatasync to commit them to persistent storage.
427684e2c4SDave Chinner * This needs to take into account metadata changes that *may* be made at IO
437684e2c4SDave Chinner * completion, such as file size updates from direct IO.
4465a60e86SChristoph Hellwig *
4565a60e86SChristoph Hellwig * IOMAP_F_SHARED indicates that the blocks are shared, and will need to be
4665a60e86SChristoph Hellwig * unshared as part a write.
4765a60e86SChristoph Hellwig *
4865a60e86SChristoph Hellwig * IOMAP_F_MERGED indicates that the iomap contains the merge of multiple block
4965a60e86SChristoph Hellwig * mappings.
5065a60e86SChristoph Hellwig *
5165a60e86SChristoph Hellwig * IOMAP_F_BUFFER_HEAD indicates that the file system requires the use of
5265a60e86SChristoph Hellwig * buffer heads for this mapping.
53d7b64041SDave Chinner *
54d7b64041SDave Chinner * IOMAP_F_XATTR indicates that the iomap is for an extended attribute extent
55d7b64041SDave Chinner * rather than a file data extent.
5617de0a9fSChristoph Hellwig */
57d7b64041SDave Chinner #define IOMAP_F_NEW (1U << 0)
58d7b64041SDave Chinner #define IOMAP_F_DIRTY (1U << 1)
59d7b64041SDave Chinner #define IOMAP_F_SHARED (1U << 2)
60d7b64041SDave Chinner #define IOMAP_F_MERGED (1U << 3)
61*925c86a1SChristoph Hellwig #ifdef CONFIG_BUFFER_HEAD
62d7b64041SDave Chinner #define IOMAP_F_BUFFER_HEAD (1U << 4)
63*925c86a1SChristoph Hellwig #else
64*925c86a1SChristoph Hellwig #define IOMAP_F_BUFFER_HEAD 0
65*925c86a1SChristoph Hellwig #endif /* CONFIG_BUFFER_HEAD */
668e81aa16SChristoph Hellwig #define IOMAP_F_XATTR (1U << 5)
67d33fd776SChristoph Hellwig
68d33fd776SChristoph Hellwig /*
6965a60e86SChristoph Hellwig * Flags set by the core iomap code during operations:
7065a60e86SChristoph Hellwig *
7165a60e86SChristoph Hellwig * IOMAP_F_SIZE_CHANGED indicates to the iomap_end method that the file size
7265a60e86SChristoph Hellwig * has changed as the result of this write operation.
73d7b64041SDave Chinner *
74d7b64041SDave Chinner * IOMAP_F_STALE indicates that the iomap is not valid any longer and the file
75d7b64041SDave Chinner * range it covers needs to be remapped by the high level before the operation
76d7b64041SDave Chinner * can proceed.
77d33fd776SChristoph Hellwig */
78d7b64041SDave Chinner #define IOMAP_F_SIZE_CHANGED (1U << 8)
79d7b64041SDave Chinner #define IOMAP_F_STALE (1U << 9)
8017de0a9fSChristoph Hellwig
8117de0a9fSChristoph Hellwig /*
827ee66c03SChristoph Hellwig * Flags from 0x1000 up are for file system specific usage:
837ee66c03SChristoph Hellwig */
84d7b64041SDave Chinner #define IOMAP_F_PRIVATE (1U << 12)
857ee66c03SChristoph Hellwig
867ee66c03SChristoph Hellwig
877ee66c03SChristoph Hellwig /*
8819fe5f64SAndreas Gruenbacher * Magic value for addr:
89ae259a9cSChristoph Hellwig */
9019fe5f64SAndreas Gruenbacher #define IOMAP_NULL_ADDR -1ULL /* addr is not valid */
91199a31c6SChristoph Hellwig
92471859f5SAndreas Gruenbacher struct iomap_folio_ops;
93df0db3ecSAndreas Gruenbacher
94199a31c6SChristoph Hellwig struct iomap {
9519fe5f64SAndreas Gruenbacher u64 addr; /* disk offset of mapping, bytes */
96199a31c6SChristoph Hellwig loff_t offset; /* file offset of mapping, bytes */
97199a31c6SChristoph Hellwig u64 length; /* length of mapping, bytes */
9817de0a9fSChristoph Hellwig u16 type; /* type of mapping */
9917de0a9fSChristoph Hellwig u16 flags; /* flags for mapping */
100ae259a9cSChristoph Hellwig struct block_device *bdev; /* block device for I/O */
101fa5d932cSDan Williams struct dax_device *dax_dev; /* dax_dev for dax operations */
10219e0c58fSAndreas Gruenbacher void *inline_data;
103e184fde6SAndreas Gruenbacher void *private; /* filesystem private */
104471859f5SAndreas Gruenbacher const struct iomap_folio_ops *folio_ops;
105d7b64041SDave Chinner u64 validity_cookie; /* used with .iomap_valid() */
106df0db3ecSAndreas Gruenbacher };
10763899c6fSChristoph Hellwig
iomap_sector(const struct iomap * iomap,loff_t pos)10866b8165eSChristoph Hellwig static inline sector_t iomap_sector(const struct iomap *iomap, loff_t pos)
109db074436SDarrick J. Wong {
110db074436SDarrick J. Wong return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
111db074436SDarrick J. Wong }
112db074436SDarrick J. Wong
11363899c6fSChristoph Hellwig /*
11469f4a26cSGao Xiang * Returns the inline data pointer for logical offset @pos.
11569f4a26cSGao Xiang */
iomap_inline_data(const struct iomap * iomap,loff_t pos)1164495c33eSChristoph Hellwig static inline void *iomap_inline_data(const struct iomap *iomap, loff_t pos)
11769f4a26cSGao Xiang {
11869f4a26cSGao Xiang return iomap->inline_data + pos - iomap->offset;
11969f4a26cSGao Xiang }
12069f4a26cSGao Xiang
12169f4a26cSGao Xiang /*
12269f4a26cSGao Xiang * Check if the mapping's length is within the valid range for inline data.
12369f4a26cSGao Xiang * This is used to guard against accessing data beyond the page inline_data
12469f4a26cSGao Xiang * points at.
12569f4a26cSGao Xiang */
iomap_inline_data_valid(const struct iomap * iomap)126e3c4ffb0SChristoph Hellwig static inline bool iomap_inline_data_valid(const struct iomap *iomap)
12769f4a26cSGao Xiang {
12869f4a26cSGao Xiang return iomap->length <= PAGE_SIZE - offset_in_page(iomap->inline_data);
12969f4a26cSGao Xiang }
13069f4a26cSGao Xiang
13169f4a26cSGao Xiang /*
132471859f5SAndreas Gruenbacher * When a filesystem sets folio_ops in an iomap mapping it returns, get_folio
13340405dddSAndreas Gruenbacher * and put_folio will be called for each folio written to. This only applies
13440405dddSAndreas Gruenbacher * to buffered writes as unbuffered writes will not typically have folios
135df0db3ecSAndreas Gruenbacher * associated with them.
136df0db3ecSAndreas Gruenbacher *
137c82abc23SAndreas Gruenbacher * When get_folio succeeds, put_folio will always be called to do any
1389060bc4dSAndreas Gruenbacher * cleanup work necessary. put_folio is responsible for unlocking and putting
1399060bc4dSAndreas Gruenbacher * @folio.
14063899c6fSChristoph Hellwig */
141471859f5SAndreas Gruenbacher struct iomap_folio_ops {
142c82abc23SAndreas Gruenbacher struct folio *(*get_folio)(struct iomap_iter *iter, loff_t pos,
1439060bc4dSAndreas Gruenbacher unsigned len);
14440405dddSAndreas Gruenbacher void (*put_folio)(struct inode *inode, loff_t pos, unsigned copied,
14580baab88SAndreas Gruenbacher struct folio *folio);
146d7b64041SDave Chinner
147d7b64041SDave Chinner /*
148d7b64041SDave Chinner * Check that the cached iomap still maps correctly to the filesystem's
149d7b64041SDave Chinner * internal extent map. FS internal extent maps can change while iomap
150d7b64041SDave Chinner * is iterating a cached iomap, so this hook allows iomap to detect that
151d7b64041SDave Chinner * the iomap needs to be refreshed during a long running write
152d7b64041SDave Chinner * operation.
153d7b64041SDave Chinner *
154d7b64041SDave Chinner * The filesystem can store internal state (e.g. a sequence number) in
155d7b64041SDave Chinner * iomap->validity_cookie when the iomap is first mapped to be able to
156d7b64041SDave Chinner * detect changes between mapping time and whenever .iomap_valid() is
157d7b64041SDave Chinner * called.
158d7b64041SDave Chinner *
159d7b64041SDave Chinner * This is called with the folio over the specified file position held
160d7b64041SDave Chinner * locked by the iomap code.
161d7b64041SDave Chinner */
162d7b64041SDave Chinner bool (*iomap_valid)(struct inode *inode, const struct iomap *iomap);
163199a31c6SChristoph Hellwig };
164199a31c6SChristoph Hellwig
165ae259a9cSChristoph Hellwig /*
166ae259a9cSChristoph Hellwig * Flags for iomap_begin / iomap_end. No flag implies a read.
167ae259a9cSChristoph Hellwig */
168d33fd776SChristoph Hellwig #define IOMAP_WRITE (1 << 0) /* writing, must allocate blocks */
169d33fd776SChristoph Hellwig #define IOMAP_ZERO (1 << 1) /* zeroing operation, may skip holes */
170d33fd776SChristoph Hellwig #define IOMAP_REPORT (1 << 2) /* report extent status, e.g. FIEMAP */
1719484ab1bSJan Kara #define IOMAP_FAULT (1 << 3) /* mapping for page fault */
172ff6a9292SChristoph Hellwig #define IOMAP_DIRECT (1 << 4) /* direct I/O */
1739ecac0efSChristoph Hellwig #define IOMAP_NOWAIT (1 << 5) /* do not block */
174213f6271SChristoph Hellwig #define IOMAP_OVERWRITE_ONLY (1 << 6) /* only pure overwrites allowed */
175b74b1293SChristoph Hellwig #define IOMAP_UNSHARE (1 << 7) /* unshare_file_range */
176952da063SChristoph Hellwig #ifdef CONFIG_FS_DAX
177952da063SChristoph Hellwig #define IOMAP_DAX (1 << 8) /* DAX mapping */
178952da063SChristoph Hellwig #else
179952da063SChristoph Hellwig #define IOMAP_DAX 0
180952da063SChristoph Hellwig #endif /* CONFIG_FS_DAX */
181ae259a9cSChristoph Hellwig
182ae259a9cSChristoph Hellwig struct iomap_ops {
183ae259a9cSChristoph Hellwig /*
184ae259a9cSChristoph Hellwig * Return the existing mapping at pos, or reserve space starting at
185ae259a9cSChristoph Hellwig * pos for up to length, as long as we can do it as a single mapping.
186ae259a9cSChristoph Hellwig * The actual length is returned in iomap->length.
187ae259a9cSChristoph Hellwig */
188ae259a9cSChristoph Hellwig int (*iomap_begin)(struct inode *inode, loff_t pos, loff_t length,
189c039b997SGoldwyn Rodrigues unsigned flags, struct iomap *iomap,
190c039b997SGoldwyn Rodrigues struct iomap *srcmap);
191ae259a9cSChristoph Hellwig
192ae259a9cSChristoph Hellwig /*
193ae259a9cSChristoph Hellwig * Commit and/or unreserve space previous allocated using iomap_begin.
194ae259a9cSChristoph Hellwig * Written indicates the length of the successful write operation which
195ae259a9cSChristoph Hellwig * needs to be commited, while the rest needs to be unreserved.
196ae259a9cSChristoph Hellwig * Written might be zero if no data was written.
197ae259a9cSChristoph Hellwig */
198ae259a9cSChristoph Hellwig int (*iomap_end)(struct inode *inode, loff_t pos, loff_t length,
199ae259a9cSChristoph Hellwig ssize_t written, unsigned flags, struct iomap *iomap);
200ae259a9cSChristoph Hellwig };
201ae259a9cSChristoph Hellwig
202f4b896c2SChristoph Hellwig /**
203f4b896c2SChristoph Hellwig * struct iomap_iter - Iterate through a range of a file
204f4b896c2SChristoph Hellwig * @inode: Set at the start of the iteration and should not change.
205f4b896c2SChristoph Hellwig * @pos: The current file position we are operating on. It is updated by
206f4b896c2SChristoph Hellwig * calls to iomap_iter(). Treat as read-only in the body.
207f4b896c2SChristoph Hellwig * @len: The remaining length of the file segment we're operating on.
208f4b896c2SChristoph Hellwig * It is updated at the same time as @pos.
209f4b896c2SChristoph Hellwig * @processed: The number of bytes processed by the body in the most recent
210f4b896c2SChristoph Hellwig * iteration, or a negative errno. 0 causes the iteration to stop.
211f4b896c2SChristoph Hellwig * @flags: Zero or more of the iomap_begin flags above.
212f4b896c2SChristoph Hellwig * @iomap: Map describing the I/O iteration
213f4b896c2SChristoph Hellwig * @srcmap: Source map for COW operations
214f4b896c2SChristoph Hellwig */
215f4b896c2SChristoph Hellwig struct iomap_iter {
216f4b896c2SChristoph Hellwig struct inode *inode;
217f4b896c2SChristoph Hellwig loff_t pos;
218f4b896c2SChristoph Hellwig u64 len;
219f4b896c2SChristoph Hellwig s64 processed;
220f4b896c2SChristoph Hellwig unsigned flags;
221f4b896c2SChristoph Hellwig struct iomap iomap;
222f4b896c2SChristoph Hellwig struct iomap srcmap;
223786f847fSChristoph Hellwig void *private;
224f4b896c2SChristoph Hellwig };
225f4b896c2SChristoph Hellwig
226f4b896c2SChristoph Hellwig int iomap_iter(struct iomap_iter *iter, const struct iomap_ops *ops);
227f4b896c2SChristoph Hellwig
228f4b896c2SChristoph Hellwig /**
229f4b896c2SChristoph Hellwig * iomap_length - length of the current iomap iteration
230f4b896c2SChristoph Hellwig * @iter: iteration structure
231f4b896c2SChristoph Hellwig *
232f4b896c2SChristoph Hellwig * Returns the length that the operation applies to for the current iteration.
233f4b896c2SChristoph Hellwig */
iomap_length(const struct iomap_iter * iter)234f4b896c2SChristoph Hellwig static inline u64 iomap_length(const struct iomap_iter *iter)
235f4b896c2SChristoph Hellwig {
236f4b896c2SChristoph Hellwig u64 end = iter->iomap.offset + iter->iomap.length;
237f4b896c2SChristoph Hellwig
238f4b896c2SChristoph Hellwig if (iter->srcmap.type != IOMAP_HOLE)
239f4b896c2SChristoph Hellwig end = min(end, iter->srcmap.offset + iter->srcmap.length);
240f4b896c2SChristoph Hellwig return min(iter->len, end - iter->pos);
241f4b896c2SChristoph Hellwig }
242f4b896c2SChristoph Hellwig
243f4b896c2SChristoph Hellwig /**
244f4b896c2SChristoph Hellwig * iomap_iter_srcmap - return the source map for the current iomap iteration
245f4b896c2SChristoph Hellwig * @i: iteration structure
246f4b896c2SChristoph Hellwig *
247f4b896c2SChristoph Hellwig * Write operations on file systems with reflink support might require a
248f4b896c2SChristoph Hellwig * source and a destination map. This function retourns the source map
249f4b896c2SChristoph Hellwig * for a given operation, which may or may no be identical to the destination
250f4b896c2SChristoph Hellwig * map in &i->iomap.
251f4b896c2SChristoph Hellwig */
iomap_iter_srcmap(const struct iomap_iter * i)252fad0a1abSChristoph Hellwig static inline const struct iomap *iomap_iter_srcmap(const struct iomap_iter *i)
253f4b896c2SChristoph Hellwig {
254f4b896c2SChristoph Hellwig if (i->srcmap.type != IOMAP_HOLE)
255f4b896c2SChristoph Hellwig return &i->srcmap;
256f4b896c2SChristoph Hellwig return &i->iomap;
257f4b896c2SChristoph Hellwig }
258f4b896c2SChristoph Hellwig
259ae259a9cSChristoph Hellwig ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
2608ff6daa1SChristoph Hellwig const struct iomap_ops *ops);
2619c7babf9SDave Chinner int iomap_file_buffered_write_punch_delalloc(struct inode *inode,
2629c7babf9SDave Chinner struct iomap *iomap, loff_t pos, loff_t length, ssize_t written,
2639c7babf9SDave Chinner int (*punch)(struct inode *inode, loff_t pos, loff_t length));
2649c7babf9SDave Chinner
2657479c505SMatthew Wilcox (Oracle) int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops);
2669d24a13aSMatthew Wilcox (Oracle) void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
2672e7e80f7SMatthew Wilcox (Oracle) bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
26898321b51SAndreas Gruenbacher struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len);
2698597447dSMatthew Wilcox (Oracle) bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags);
2708306a5f5SMatthew Wilcox (Oracle) void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len);
2713590c4d8SChristoph Hellwig bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio);
2728ff6daa1SChristoph Hellwig int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
273ae259a9cSChristoph Hellwig const struct iomap_ops *ops);
2748ff6daa1SChristoph Hellwig int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
275ae259a9cSChristoph Hellwig bool *did_zero, const struct iomap_ops *ops);
2768ff6daa1SChristoph Hellwig int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
2775780a02fSSouptick Joarder const struct iomap_ops *ops);
2785780a02fSSouptick Joarder vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf,
2798be9f564SChristoph Hellwig const struct iomap_ops *ops);
28027328818SChristoph Hellwig int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2810ed3b0d4SAndreas Gruenbacher u64 start, u64 len, const struct iomap_ops *ops);
2820ed3b0d4SAndreas Gruenbacher loff_t iomap_seek_hole(struct inode *inode, loff_t offset,
2830ed3b0d4SAndreas Gruenbacher const struct iomap_ops *ops);
2840ed3b0d4SAndreas Gruenbacher loff_t iomap_seek_data(struct inode *inode, loff_t offset,
28589eb1906SChristoph Hellwig const struct iomap_ops *ops);
28689eb1906SChristoph Hellwig sector_t iomap_bmap(struct address_space *mapping, sector_t bno,
287ae259a9cSChristoph Hellwig const struct iomap_ops *ops);
288ff6a9292SChristoph Hellwig
289598ecfbaSChristoph Hellwig /*
290598ecfbaSChristoph Hellwig * Structure for writeback I/O completions.
291598ecfbaSChristoph Hellwig */
292598ecfbaSChristoph Hellwig struct iomap_ioend {
293598ecfbaSChristoph Hellwig struct list_head io_list; /* next ioend in chain */
294598ecfbaSChristoph Hellwig u16 io_type;
295ebb7fb15SDave Chinner u16 io_flags; /* IOMAP_F_* */
296598ecfbaSChristoph Hellwig u32 io_folios; /* folios added to ioend */
297598ecfbaSChristoph Hellwig struct inode *io_inode; /* file being written to */
298598ecfbaSChristoph Hellwig size_t io_size; /* size of the extent */
299ebb7fb15SDave Chinner loff_t io_offset; /* offset in the file */
300598ecfbaSChristoph Hellwig sector_t io_sector; /* start sector of ioend */
301598ecfbaSChristoph Hellwig struct bio *io_bio; /* bio being built */
302598ecfbaSChristoph Hellwig struct bio io_inline_bio; /* MUST BE LAST! */
303598ecfbaSChristoph Hellwig };
304598ecfbaSChristoph Hellwig
305598ecfbaSChristoph Hellwig struct iomap_writeback_ops {
306598ecfbaSChristoph Hellwig /*
307598ecfbaSChristoph Hellwig * Required, maps the blocks so that writeback can be performed on
308598ecfbaSChristoph Hellwig * the range starting at offset.
309598ecfbaSChristoph Hellwig */
310598ecfbaSChristoph Hellwig int (*map_blocks)(struct iomap_writepage_ctx *wpc, struct inode *inode,
311598ecfbaSChristoph Hellwig loff_t offset);
312598ecfbaSChristoph Hellwig
313598ecfbaSChristoph Hellwig /*
314598ecfbaSChristoph Hellwig * Optional, allows the file systems to perform actions just before
315598ecfbaSChristoph Hellwig * submitting the bio and/or override the bio end_io handler for complex
316598ecfbaSChristoph Hellwig * operations like copy on write extent manipulation or unwritten extent
317598ecfbaSChristoph Hellwig * conversions.
318598ecfbaSChristoph Hellwig */
319598ecfbaSChristoph Hellwig int (*prepare_ioend)(struct iomap_ioend *ioend, int status);
320598ecfbaSChristoph Hellwig
321598ecfbaSChristoph Hellwig /*
322598ecfbaSChristoph Hellwig * Optional, allows the file system to discard state on a page where
323598ecfbaSChristoph Hellwig * we failed to submit any I/O.
3246e478521SMatthew Wilcox (Oracle) */
325598ecfbaSChristoph Hellwig void (*discard_folio)(struct folio *folio, loff_t pos);
326598ecfbaSChristoph Hellwig };
327598ecfbaSChristoph Hellwig
328598ecfbaSChristoph Hellwig struct iomap_writepage_ctx {
329598ecfbaSChristoph Hellwig struct iomap iomap;
330598ecfbaSChristoph Hellwig struct iomap_ioend *ioend;
331598ecfbaSChristoph Hellwig const struct iomap_writeback_ops *ops;
332598ecfbaSChristoph Hellwig };
333598ecfbaSChristoph Hellwig
334598ecfbaSChristoph Hellwig void iomap_finish_ioends(struct iomap_ioend *ioend, int error);
3356e552494SBrian Foster void iomap_ioend_try_merge(struct iomap_ioend *ioend,
336598ecfbaSChristoph Hellwig struct list_head *more_ioends);
337598ecfbaSChristoph Hellwig void iomap_sort_ioends(struct list_head *ioend_list);
338598ecfbaSChristoph Hellwig int iomap_writepages(struct address_space *mapping,
339598ecfbaSChristoph Hellwig struct writeback_control *wbc, struct iomap_writepage_ctx *wpc,
340598ecfbaSChristoph Hellwig const struct iomap_writeback_ops *ops);
341598ecfbaSChristoph Hellwig
342ff6a9292SChristoph Hellwig /*
343ff6a9292SChristoph Hellwig * Flags for direct I/O ->end_io:
344ff6a9292SChristoph Hellwig */
345ff6a9292SChristoph Hellwig #define IOMAP_DIO_UNWRITTEN (1 << 0) /* covers unwritten extent(s) */
346838c4f3dSChristoph Hellwig #define IOMAP_DIO_COW (1 << 1) /* covers COW extent(s) */
347838c4f3dSChristoph Hellwig
348838c4f3dSChristoph Hellwig struct iomap_dio_ops {
349838c4f3dSChristoph Hellwig int (*end_io)(struct kiocb *iocb, ssize_t size, int error,
3503e08773cSChristoph Hellwig unsigned flags);
351a6d3d495SChristoph Hellwig void (*submit_io)(const struct iomap_iter *iter, struct bio *bio,
352908c5490SChristoph Hellwig loff_t file_offset);
353908c5490SChristoph Hellwig
354908c5490SChristoph Hellwig /*
355908c5490SChristoph Hellwig * Filesystems wishing to attach private information to a direct io bio
356908c5490SChristoph Hellwig * must provide a ->submit_io method that attaches the additional
357908c5490SChristoph Hellwig * information to the bio and changes the ->bi_end_io callback to a
358908c5490SChristoph Hellwig * custom function. This function should, at a minimum, perform any
359908c5490SChristoph Hellwig * relevant post-processing of the bio and end with a call to
360908c5490SChristoph Hellwig * iomap_dio_bio_end_io.
361908c5490SChristoph Hellwig */
362838c4f3dSChristoph Hellwig struct bio_set *bio_set;
363838c4f3dSChristoph Hellwig };
3642f632965SChristoph Hellwig
3652f632965SChristoph Hellwig /*
3662f632965SChristoph Hellwig * Wait for the I/O to complete in iomap_dio_rw even if the kiocb is not
3672f632965SChristoph Hellwig * synchronous.
3682f632965SChristoph Hellwig */
3692f632965SChristoph Hellwig #define IOMAP_DIO_FORCE_WAIT (1 << 0)
370213f6271SChristoph Hellwig
371213f6271SChristoph Hellwig /*
372213f6271SChristoph Hellwig * Do not allocate blocks or zero partial blocks, but instead fall back to
373213f6271SChristoph Hellwig * the caller by returning -EAGAIN. Used to optimize direct I/O writes that
374213f6271SChristoph Hellwig * are not aligned to the file system block size.
375213f6271SChristoph Hellwig */
376213f6271SChristoph Hellwig #define IOMAP_DIO_OVERWRITE_ONLY (1 << 1)
37797308f8bSAndreas Gruenbacher
37897308f8bSAndreas Gruenbacher /*
37997308f8bSAndreas Gruenbacher * When a page fault occurs, return a partial synchronous result and allow
38097308f8bSAndreas Gruenbacher * the caller to retry the rest of the operation after dealing with the page
38197308f8bSAndreas Gruenbacher * fault.
38297308f8bSAndreas Gruenbacher */
38397308f8bSAndreas Gruenbacher #define IOMAP_DIO_PARTIAL (1 << 2)
384ff6a9292SChristoph Hellwig
38513ef9544SJan Kara ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
386786f847fSChristoph Hellwig const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
387c3d4ed1aSChristoph Hellwig unsigned int dio_flags, void *private, size_t done_before);
388c3d4ed1aSChristoph Hellwig struct iomap_dio *__iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
389786f847fSChristoph Hellwig const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
390c3d4ed1aSChristoph Hellwig unsigned int dio_flags, void *private, size_t done_before);
391908c5490SChristoph Hellwig ssize_t iomap_dio_complete(struct iomap_dio *dio);
392ff6a9292SChristoph Hellwig void iomap_dio_bio_end_io(struct bio *bio);
39367482129SDarrick J. Wong
39467482129SDarrick J. Wong #ifdef CONFIG_SWAP
39567482129SDarrick J. Wong struct file;
39667482129SDarrick J. Wong struct swap_info_struct;
39767482129SDarrick J. Wong
39867482129SDarrick J. Wong int iomap_swapfile_activate(struct swap_info_struct *sis,
39967482129SDarrick J. Wong struct file *swap_file, sector_t *pagespan,
40067482129SDarrick J. Wong const struct iomap_ops *ops);
40167482129SDarrick J. Wong #else
40267482129SDarrick J. Wong # define iomap_swapfile_activate(sis, swapfile, pagespan, ops) (-EIO)
40367482129SDarrick J. Wong #endif /* CONFIG_SWAP */
404199a31c6SChristoph Hellwig
405 #endif /* LINUX_IOMAP_H */
406