1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2bd2d0210STheodore Ts'o /*
3bd2d0210STheodore Ts'o * linux/fs/ext4/page-io.c
4bd2d0210STheodore Ts'o *
5bd2d0210STheodore Ts'o * This contains the new page_io functions for ext4
6bd2d0210STheodore Ts'o *
7bd2d0210STheodore Ts'o * Written by Theodore Ts'o, 2010.
8bd2d0210STheodore Ts'o */
9bd2d0210STheodore Ts'o
10bd2d0210STheodore Ts'o #include <linux/fs.h>
11bd2d0210STheodore Ts'o #include <linux/time.h>
12bd2d0210STheodore Ts'o #include <linux/highuid.h>
13bd2d0210STheodore Ts'o #include <linux/pagemap.h>
14bd2d0210STheodore Ts'o #include <linux/quotaops.h>
15bd2d0210STheodore Ts'o #include <linux/string.h>
16bd2d0210STheodore Ts'o #include <linux/buffer_head.h>
17bd2d0210STheodore Ts'o #include <linux/writeback.h>
18bd2d0210STheodore Ts'o #include <linux/pagevec.h>
19bd2d0210STheodore Ts'o #include <linux/mpage.h>
20bd2d0210STheodore Ts'o #include <linux/namei.h>
21bd2d0210STheodore Ts'o #include <linux/uio.h>
22bd2d0210STheodore Ts'o #include <linux/bio.h>
23bd2d0210STheodore Ts'o #include <linux/workqueue.h>
24bd2d0210STheodore Ts'o #include <linux/kernel.h>
25bd2d0210STheodore Ts'o #include <linux/slab.h>
261ae48a63SJan Kara #include <linux/mm.h>
274034247aSNeilBrown #include <linux/sched/mm.h>
28bd2d0210STheodore Ts'o
29bd2d0210STheodore Ts'o #include "ext4_jbd2.h"
30bd2d0210STheodore Ts'o #include "xattr.h"
31bd2d0210STheodore Ts'o #include "acl.h"
32bd2d0210STheodore Ts'o
330058f965SJan Kara static struct kmem_cache *io_end_cachep;
34c8cc8816SRitesh Harjani static struct kmem_cache *io_end_vec_cachep;
35bd2d0210STheodore Ts'o
ext4_init_pageio(void)365dabfc78STheodore Ts'o int __init ext4_init_pageio(void)
37bd2d0210STheodore Ts'o {
38bd2d0210STheodore Ts'o io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
390058f965SJan Kara if (io_end_cachep == NULL)
40bd2d0210STheodore Ts'o return -ENOMEM;
41c8cc8816SRitesh Harjani
42c8cc8816SRitesh Harjani io_end_vec_cachep = KMEM_CACHE(ext4_io_end_vec, 0);
43c8cc8816SRitesh Harjani if (io_end_vec_cachep == NULL) {
44c8cc8816SRitesh Harjani kmem_cache_destroy(io_end_cachep);
45c8cc8816SRitesh Harjani return -ENOMEM;
46c8cc8816SRitesh Harjani }
47bd2d0210STheodore Ts'o return 0;
48bd2d0210STheodore Ts'o }
49bd2d0210STheodore Ts'o
ext4_exit_pageio(void)505dabfc78STheodore Ts'o void ext4_exit_pageio(void)
51bd2d0210STheodore Ts'o {
52bd2d0210STheodore Ts'o kmem_cache_destroy(io_end_cachep);
53c8cc8816SRitesh Harjani kmem_cache_destroy(io_end_vec_cachep);
54c8cc8816SRitesh Harjani }
55c8cc8816SRitesh Harjani
ext4_alloc_io_end_vec(ext4_io_end_t * io_end)56c8cc8816SRitesh Harjani struct ext4_io_end_vec *ext4_alloc_io_end_vec(ext4_io_end_t *io_end)
57c8cc8816SRitesh Harjani {
58c8cc8816SRitesh Harjani struct ext4_io_end_vec *io_end_vec;
59c8cc8816SRitesh Harjani
60c8cc8816SRitesh Harjani io_end_vec = kmem_cache_zalloc(io_end_vec_cachep, GFP_NOFS);
61c8cc8816SRitesh Harjani if (!io_end_vec)
62c8cc8816SRitesh Harjani return ERR_PTR(-ENOMEM);
63c8cc8816SRitesh Harjani INIT_LIST_HEAD(&io_end_vec->list);
64c8cc8816SRitesh Harjani list_add_tail(&io_end_vec->list, &io_end->list_vec);
65c8cc8816SRitesh Harjani return io_end_vec;
66c8cc8816SRitesh Harjani }
67c8cc8816SRitesh Harjani
ext4_free_io_end_vec(ext4_io_end_t * io_end)68c8cc8816SRitesh Harjani static void ext4_free_io_end_vec(ext4_io_end_t *io_end)
69c8cc8816SRitesh Harjani {
70c8cc8816SRitesh Harjani struct ext4_io_end_vec *io_end_vec, *tmp;
71c8cc8816SRitesh Harjani
72c8cc8816SRitesh Harjani if (list_empty(&io_end->list_vec))
73c8cc8816SRitesh Harjani return;
74c8cc8816SRitesh Harjani list_for_each_entry_safe(io_end_vec, tmp, &io_end->list_vec, list) {
75c8cc8816SRitesh Harjani list_del(&io_end_vec->list);
76c8cc8816SRitesh Harjani kmem_cache_free(io_end_vec_cachep, io_end_vec);
77c8cc8816SRitesh Harjani }
78c8cc8816SRitesh Harjani }
79c8cc8816SRitesh Harjani
ext4_last_io_end_vec(ext4_io_end_t * io_end)80c8cc8816SRitesh Harjani struct ext4_io_end_vec *ext4_last_io_end_vec(ext4_io_end_t *io_end)
81c8cc8816SRitesh Harjani {
82c8cc8816SRitesh Harjani BUG_ON(list_empty(&io_end->list_vec));
83c8cc8816SRitesh Harjani return list_last_entry(&io_end->list_vec, struct ext4_io_end_vec, list);
84bd2d0210STheodore Ts'o }
85bd2d0210STheodore Ts'o
861ada47d9STheodore Ts'o /*
87b0857d30SJan Kara * Print an buffer I/O error compatible with the fs/buffer.c. This
88b0857d30SJan Kara * provides compatibility with dmesg scrapers that look for a specific
89b0857d30SJan Kara * buffer I/O error message. We really need a unified error reporting
90b0857d30SJan Kara * structure to userspace ala Digital Unix's uerf system, but it's
91b0857d30SJan Kara * probably not going to happen in my lifetime, due to LKML politics...
92b0857d30SJan Kara */
buffer_io_error(struct buffer_head * bh)93b0857d30SJan Kara static void buffer_io_error(struct buffer_head *bh)
94b0857d30SJan Kara {
95a1c6f057SDmitry Monakhov printk_ratelimited(KERN_ERR "Buffer I/O error on device %pg, logical block %llu\n",
96a1c6f057SDmitry Monakhov bh->b_bdev,
97b0857d30SJan Kara (unsigned long long)bh->b_blocknr);
98b0857d30SJan Kara }
99b0857d30SJan Kara
ext4_finish_bio(struct bio * bio)100b0857d30SJan Kara static void ext4_finish_bio(struct bio *bio)
101b0857d30SJan Kara {
102bb64c08bSMatthew Wilcox struct folio_iter fi;
103b0857d30SJan Kara
104bb64c08bSMatthew Wilcox bio_for_each_folio_all(fi, bio) {
105bb64c08bSMatthew Wilcox struct folio *folio = fi.folio;
106bb64c08bSMatthew Wilcox struct folio *io_folio = NULL;
107b0857d30SJan Kara struct buffer_head *bh, *head;
108bb64c08bSMatthew Wilcox size_t bio_start = fi.offset;
109bb64c08bSMatthew Wilcox size_t bio_end = bio_start + fi.length;
110b0857d30SJan Kara unsigned under_io = 0;
111b0857d30SJan Kara unsigned long flags;
112b0857d30SJan Kara
113bb64c08bSMatthew Wilcox if (fscrypt_is_bounce_folio(folio)) {
114bb64c08bSMatthew Wilcox io_folio = folio;
115bb64c08bSMatthew Wilcox folio = fscrypt_pagecache_folio(folio);
1162058f83aSMichael Halcrow }
1172058f83aSMichael Halcrow
1184e4cbee9SChristoph Hellwig if (bio->bi_status) {
119bb64c08bSMatthew Wilcox int err = blk_status_to_errno(bio->bi_status);
120bb64c08bSMatthew Wilcox mapping_set_error(folio->mapping, err);
121b0857d30SJan Kara }
122bb64c08bSMatthew Wilcox bh = head = folio_buffers(folio);
123b0857d30SJan Kara /*
124bb64c08bSMatthew Wilcox * We check all buffers in the folio under b_uptodate_lock
125b0857d30SJan Kara * to avoid races with other end io clearing async_write flags
126b0857d30SJan Kara */
127f1e67e35SThomas Gleixner spin_lock_irqsave(&head->b_uptodate_lock, flags);
128b0857d30SJan Kara do {
129b0857d30SJan Kara if (bh_offset(bh) < bio_start ||
130b0857d30SJan Kara bh_offset(bh) + bh->b_size > bio_end) {
131b0857d30SJan Kara if (buffer_async_write(bh))
132b0857d30SJan Kara under_io++;
133b0857d30SJan Kara continue;
134b0857d30SJan Kara }
135b0857d30SJan Kara clear_buffer_async_write(bh);
136a2b0b205SYe Bin if (bio->bi_status) {
137a2b0b205SYe Bin set_buffer_write_io_error(bh);
138b0857d30SJan Kara buffer_io_error(bh);
139a2b0b205SYe Bin }
140b0857d30SJan Kara } while ((bh = bh->b_this_page) != head);
141f1e67e35SThomas Gleixner spin_unlock_irqrestore(&head->b_uptodate_lock, flags);
1422058f83aSMichael Halcrow if (!under_io) {
143bb64c08bSMatthew Wilcox fscrypt_free_bounce_page(&io_folio->page);
144bb64c08bSMatthew Wilcox folio_end_writeback(folio);
145b0857d30SJan Kara }
146b0857d30SJan Kara }
1472058f83aSMichael Halcrow }
148b0857d30SJan Kara
ext4_release_io_end(ext4_io_end_t * io_end)14997a851edSJan Kara static void ext4_release_io_end(ext4_io_end_t *io_end)
150bd2d0210STheodore Ts'o {
151b0857d30SJan Kara struct bio *bio, *next_bio;
152b0857d30SJan Kara
15397a851edSJan Kara BUG_ON(!list_empty(&io_end->list));
15497a851edSJan Kara BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
1556b523df4SJan Kara WARN_ON(io_end->handle);
156bd2d0210STheodore Ts'o
157b0857d30SJan Kara for (bio = io_end->bio; bio; bio = next_bio) {
158b0857d30SJan Kara next_bio = bio->bi_private;
159b0857d30SJan Kara ext4_finish_bio(bio);
160b0857d30SJan Kara bio_put(bio);
161b0857d30SJan Kara }
162c8cc8816SRitesh Harjani ext4_free_io_end_vec(io_end);
16397a851edSJan Kara kmem_cache_free(io_end_cachep, io_end);
16497a851edSJan Kara }
16597a851edSJan Kara
166a115f749SJan Kara /*
167a115f749SJan Kara * Check a range of space and convert unwritten extents to written. Note that
168a115f749SJan Kara * we are protected from truncate touching same part of extent tree by the
169a115f749SJan Kara * fact that truncate code waits for all DIO to finish (thus exclusion from
170a115f749SJan Kara * direct IO is achieved) and also waits for PageWriteback bits. Thus we
171a115f749SJan Kara * cannot get to ext4_ext_truncate() before all IOs overlapping that range are
172a115f749SJan Kara * completed (happens from ext4_free_ioend()).
173a115f749SJan Kara */
ext4_end_io_end(ext4_io_end_t * io_end)174821ff38dSRitesh Harjani static int ext4_end_io_end(ext4_io_end_t *io_end)
175bd2d0210STheodore Ts'o {
176821ff38dSRitesh Harjani struct inode *inode = io_end->inode;
177821ff38dSRitesh Harjani handle_t *handle = io_end->handle;
178bd2d0210STheodore Ts'o int ret = 0;
179bd2d0210STheodore Ts'o
180821ff38dSRitesh Harjani ext4_debug("ext4_end_io_nolock: io_end 0x%p from inode %lu,list->next 0x%p,"
181bd2d0210STheodore Ts'o "list->prev 0x%p\n",
182821ff38dSRitesh Harjani io_end, inode->i_ino, io_end->list.next, io_end->list.prev);
183bd2d0210STheodore Ts'o
184821ff38dSRitesh Harjani io_end->handle = NULL; /* Following call will use up the handle */
185a00713eaSRitesh Harjani ret = ext4_convert_unwritten_io_end_vec(handle, io_end);
186*eb8ab444SJan Kara if (ret < 0 && !ext4_forced_shutdown(inode->i_sb)) {
187b82e384cSTheodore Ts'o ext4_msg(inode->i_sb, KERN_EMERG,
188b82e384cSTheodore Ts'o "failed to convert unwritten extents to written "
189b82e384cSTheodore Ts'o "extents -- potential data loss! "
190c8cc8816SRitesh Harjani "(inode %lu, error %d)", inode->i_ino, ret);
191bd2d0210STheodore Ts'o }
192821ff38dSRitesh Harjani ext4_clear_io_unwritten_flag(io_end);
193821ff38dSRitesh Harjani ext4_release_io_end(io_end);
194bd2d0210STheodore Ts'o return ret;
195bd2d0210STheodore Ts'o }
196bd2d0210STheodore Ts'o
dump_completed_IO(struct inode * inode,struct list_head * head)1972e8fa54eSJan Kara static void dump_completed_IO(struct inode *inode, struct list_head *head)
19828a535f9SDmitry Monakhov {
19928a535f9SDmitry Monakhov #ifdef EXT4FS_DEBUG
20028a535f9SDmitry Monakhov struct list_head *cur, *before, *after;
201821ff38dSRitesh Harjani ext4_io_end_t *io_end, *io_end0, *io_end1;
20228a535f9SDmitry Monakhov
2032e8fa54eSJan Kara if (list_empty(head))
20428a535f9SDmitry Monakhov return;
20528a535f9SDmitry Monakhov
2062e8fa54eSJan Kara ext4_debug("Dump inode %lu completed io list\n", inode->i_ino);
207821ff38dSRitesh Harjani list_for_each_entry(io_end, head, list) {
208821ff38dSRitesh Harjani cur = &io_end->list;
20928a535f9SDmitry Monakhov before = cur->prev;
210821ff38dSRitesh Harjani io_end0 = container_of(before, ext4_io_end_t, list);
21128a535f9SDmitry Monakhov after = cur->next;
212821ff38dSRitesh Harjani io_end1 = container_of(after, ext4_io_end_t, list);
21328a535f9SDmitry Monakhov
21428a535f9SDmitry Monakhov ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
215821ff38dSRitesh Harjani io_end, inode->i_ino, io_end0, io_end1);
21628a535f9SDmitry Monakhov }
21728a535f9SDmitry Monakhov #endif
21828a535f9SDmitry Monakhov }
21928a535f9SDmitry Monakhov
22028a535f9SDmitry Monakhov /* Add the io_end to per-inode completed end_io list. */
ext4_add_complete_io(ext4_io_end_t * io_end)22197a851edSJan Kara static void ext4_add_complete_io(ext4_io_end_t *io_end)
22228a535f9SDmitry Monakhov {
22328a535f9SDmitry Monakhov struct ext4_inode_info *ei = EXT4_I(io_end->inode);
22478371a45SJan Kara struct ext4_sb_info *sbi = EXT4_SB(io_end->inode->i_sb);
22528a535f9SDmitry Monakhov struct workqueue_struct *wq;
22628a535f9SDmitry Monakhov unsigned long flags;
22728a535f9SDmitry Monakhov
2287b7a8665SChristoph Hellwig /* Only reserved conversions from writeback should enter here */
2297b7a8665SChristoph Hellwig WARN_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
23078371a45SJan Kara WARN_ON(!io_end->handle && sbi->s_journal);
23128a535f9SDmitry Monakhov spin_lock_irqsave(&ei->i_completed_io_lock, flags);
23278371a45SJan Kara wq = sbi->rsv_conversion_wq;
2332e8fa54eSJan Kara if (list_empty(&ei->i_rsv_conversion_list))
2342e8fa54eSJan Kara queue_work(wq, &ei->i_rsv_conversion_work);
2352e8fa54eSJan Kara list_add_tail(&io_end->list, &ei->i_rsv_conversion_list);
23628a535f9SDmitry Monakhov spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
23728a535f9SDmitry Monakhov }
23828a535f9SDmitry Monakhov
ext4_do_flush_completed_IO(struct inode * inode,struct list_head * head)2392e8fa54eSJan Kara static int ext4_do_flush_completed_IO(struct inode *inode,
2402e8fa54eSJan Kara struct list_head *head)
24128a535f9SDmitry Monakhov {
242821ff38dSRitesh Harjani ext4_io_end_t *io_end;
243002bd7faSJan Kara struct list_head unwritten;
24428a535f9SDmitry Monakhov unsigned long flags;
24528a535f9SDmitry Monakhov struct ext4_inode_info *ei = EXT4_I(inode);
24628a535f9SDmitry Monakhov int err, ret = 0;
24728a535f9SDmitry Monakhov
24828a535f9SDmitry Monakhov spin_lock_irqsave(&ei->i_completed_io_lock, flags);
2492e8fa54eSJan Kara dump_completed_IO(inode, head);
2502e8fa54eSJan Kara list_replace_init(head, &unwritten);
25128a535f9SDmitry Monakhov spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
25228a535f9SDmitry Monakhov
25328a535f9SDmitry Monakhov while (!list_empty(&unwritten)) {
254821ff38dSRitesh Harjani io_end = list_entry(unwritten.next, ext4_io_end_t, list);
255821ff38dSRitesh Harjani BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
256821ff38dSRitesh Harjani list_del_init(&io_end->list);
25728a535f9SDmitry Monakhov
258821ff38dSRitesh Harjani err = ext4_end_io_end(io_end);
25928a535f9SDmitry Monakhov if (unlikely(!ret && err))
26028a535f9SDmitry Monakhov ret = err;
26128a535f9SDmitry Monakhov }
26228a535f9SDmitry Monakhov return ret;
26328a535f9SDmitry Monakhov }
26428a535f9SDmitry Monakhov
265bd2d0210STheodore Ts'o /*
2662e8fa54eSJan Kara * work on completed IO, to convert unwritten extents to extents
267bd2d0210STheodore Ts'o */
ext4_end_io_rsv_work(struct work_struct * work)2682e8fa54eSJan Kara void ext4_end_io_rsv_work(struct work_struct *work)
269bd2d0210STheodore Ts'o {
27084c17543SJan Kara struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
2712e8fa54eSJan Kara i_rsv_conversion_work);
2722e8fa54eSJan Kara ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_rsv_conversion_list);
2732e8fa54eSJan Kara }
2742e8fa54eSJan Kara
ext4_init_io_end(struct inode * inode,gfp_t flags)275bd2d0210STheodore Ts'o ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
276bd2d0210STheodore Ts'o {
277821ff38dSRitesh Harjani ext4_io_end_t *io_end = kmem_cache_zalloc(io_end_cachep, flags);
278821ff38dSRitesh Harjani
279821ff38dSRitesh Harjani if (io_end) {
280821ff38dSRitesh Harjani io_end->inode = inode;
281821ff38dSRitesh Harjani INIT_LIST_HEAD(&io_end->list);
282c8cc8816SRitesh Harjani INIT_LIST_HEAD(&io_end->list_vec);
28331d21d21SXiyu Yang refcount_set(&io_end->count, 1);
284bd2d0210STheodore Ts'o }
285821ff38dSRitesh Harjani return io_end;
286bd2d0210STheodore Ts'o }
287bd2d0210STheodore Ts'o
ext4_put_io_end_defer(ext4_io_end_t * io_end)28897a851edSJan Kara void ext4_put_io_end_defer(ext4_io_end_t *io_end)
28997a851edSJan Kara {
29031d21d21SXiyu Yang if (refcount_dec_and_test(&io_end->count)) {
291c8cc8816SRitesh Harjani if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) ||
292c8cc8816SRitesh Harjani list_empty(&io_end->list_vec)) {
29397a851edSJan Kara ext4_release_io_end(io_end);
29497a851edSJan Kara return;
29597a851edSJan Kara }
29697a851edSJan Kara ext4_add_complete_io(io_end);
29797a851edSJan Kara }
29897a851edSJan Kara }
29997a851edSJan Kara
ext4_put_io_end(ext4_io_end_t * io_end)30097a851edSJan Kara int ext4_put_io_end(ext4_io_end_t *io_end)
30197a851edSJan Kara {
30297a851edSJan Kara int err = 0;
30397a851edSJan Kara
30431d21d21SXiyu Yang if (refcount_dec_and_test(&io_end->count)) {
30597a851edSJan Kara if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
306a00713eaSRitesh Harjani err = ext4_convert_unwritten_io_end_vec(io_end->handle,
307a00713eaSRitesh Harjani io_end);
3086b523df4SJan Kara io_end->handle = NULL;
30997a851edSJan Kara ext4_clear_io_unwritten_flag(io_end);
31097a851edSJan Kara }
31197a851edSJan Kara ext4_release_io_end(io_end);
31297a851edSJan Kara }
31397a851edSJan Kara return err;
31497a851edSJan Kara }
31597a851edSJan Kara
ext4_get_io_end(ext4_io_end_t * io_end)31697a851edSJan Kara ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
31797a851edSJan Kara {
31831d21d21SXiyu Yang refcount_inc(&io_end->count);
31997a851edSJan Kara return io_end;
32097a851edSJan Kara }
32197a851edSJan Kara
322822dbba3SJan Kara /* BIO completion function for page writeback */
ext4_end_bio(struct bio * bio)3234246a0b6SChristoph Hellwig static void ext4_end_bio(struct bio *bio)
324bd2d0210STheodore Ts'o {
325bd2d0210STheodore Ts'o ext4_io_end_t *io_end = bio->bi_private;
3264f024f37SKent Overstreet sector_t bi_sector = bio->bi_iter.bi_sector;
327bd2d0210STheodore Ts'o
328734294e4SChristoph Hellwig if (WARN_ONCE(!io_end, "io_end is NULL: %pg: sector %Lu len %u err %d\n",
329734294e4SChristoph Hellwig bio->bi_bdev,
33072d622b4STheodore Ts'o (long long) bio->bi_iter.bi_sector,
33172d622b4STheodore Ts'o (unsigned) bio_sectors(bio),
3324e4cbee9SChristoph Hellwig bio->bi_status)) {
33372d622b4STheodore Ts'o ext4_finish_bio(bio);
33472d622b4STheodore Ts'o bio_put(bio);
33572d622b4STheodore Ts'o return;
33672d622b4STheodore Ts'o }
337bd2d0210STheodore Ts'o bio->bi_end_io = NULL;
3380058f965SJan Kara
3394e4cbee9SChristoph Hellwig if (bio->bi_status) {
340b0857d30SJan Kara struct inode *inode = io_end->inode;
341b0857d30SJan Kara
3429503c67cSMatthew Wilcox ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu "
343c8cc8816SRitesh Harjani "starting block %llu)",
3444e4cbee9SChristoph Hellwig bio->bi_status, inode->i_ino,
345f7ad6d2eSTheodore Ts'o (unsigned long long)
346d50bdd5aSCurt Wohlgemuth bi_sector >> (inode->i_blkbits - 9));
3474e4cbee9SChristoph Hellwig mapping_set_error(inode->i_mapping,
3484e4cbee9SChristoph Hellwig blk_status_to_errno(bio->bi_status));
349f7ad6d2eSTheodore Ts'o }
350822dbba3SJan Kara
351822dbba3SJan Kara if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
352822dbba3SJan Kara /*
353822dbba3SJan Kara * Link bio into list hanging from io_end. We have to do it
354822dbba3SJan Kara * atomically as bio completions can be racing against each
355822dbba3SJan Kara * other.
356822dbba3SJan Kara */
357822dbba3SJan Kara bio->bi_private = xchg(&io_end->bio, bio);
35897a851edSJan Kara ext4_put_io_end_defer(io_end);
359822dbba3SJan Kara } else {
360822dbba3SJan Kara /*
361822dbba3SJan Kara * Drop io_end reference early. Inode can get freed once
362822dbba3SJan Kara * we finish the bio.
363822dbba3SJan Kara */
364822dbba3SJan Kara ext4_put_io_end_defer(io_end);
365822dbba3SJan Kara ext4_finish_bio(bio);
366822dbba3SJan Kara bio_put(bio);
367822dbba3SJan Kara }
368bd2d0210STheodore Ts'o }
369bd2d0210STheodore Ts'o
ext4_io_submit(struct ext4_io_submit * io)370bd2d0210STheodore Ts'o void ext4_io_submit(struct ext4_io_submit *io)
371bd2d0210STheodore Ts'o {
372bd2d0210STheodore Ts'o struct bio *bio = io->io_bio;
373bd2d0210STheodore Ts'o
374bd2d0210STheodore Ts'o if (bio) {
3754c4dad11SChristoph Hellwig if (io->io_wbc->sync_mode == WB_SYNC_ALL)
3764c4dad11SChristoph Hellwig io->io_bio->bi_opf |= REQ_SYNC;
3774e49ea4aSMike Christie submit_bio(io->io_bio);
378bd2d0210STheodore Ts'o }
3797dc57615SPeter Huewe io->io_bio = NULL;
38097a851edSJan Kara }
38197a851edSJan Kara
ext4_io_submit_init(struct ext4_io_submit * io,struct writeback_control * wbc)38297a851edSJan Kara void ext4_io_submit_init(struct ext4_io_submit *io,
38397a851edSJan Kara struct writeback_control *wbc)
38497a851edSJan Kara {
3855a33911fSTejun Heo io->io_wbc = wbc;
38697a851edSJan Kara io->io_bio = NULL;
3877dc57615SPeter Huewe io->io_end = NULL;
388bd2d0210STheodore Ts'o }
389bd2d0210STheodore Ts'o
io_submit_init_bio(struct ext4_io_submit * io,struct buffer_head * bh)3905500221eSGao Xiang static void io_submit_init_bio(struct ext4_io_submit *io,
391bd2d0210STheodore Ts'o struct buffer_head *bh)
392bd2d0210STheodore Ts'o {
393bd2d0210STheodore Ts'o struct bio *bio;
394bd2d0210STheodore Ts'o
3955500221eSGao Xiang /*
3965500221eSGao Xiang * bio_alloc will _always_ be able to allocate a bio if
3975500221eSGao Xiang * __GFP_DIRECT_RECLAIM is set, see comments for bio_alloc_bioset().
3985500221eSGao Xiang */
3994c4dad11SChristoph Hellwig bio = bio_alloc(bh->b_bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOIO);
4004f74d15fSEric Biggers fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
4014f024f37SKent Overstreet bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
402bd2d0210STheodore Ts'o bio->bi_end_io = ext4_end_bio;
40397a851edSJan Kara bio->bi_private = ext4_get_io_end(io->io_end);
404bd2d0210STheodore Ts'o io->io_bio = bio;
405bd2d0210STheodore Ts'o io->io_next_block = bh->b_blocknr;
406fd42df30SDennis Zhou wbc_init_bio(io->io_wbc, bio);
407bd2d0210STheodore Ts'o }
408bd2d0210STheodore Ts'o
io_submit_add_bh(struct ext4_io_submit * io,struct inode * inode,struct folio * folio,struct folio * io_folio,struct buffer_head * bh)4095500221eSGao Xiang static void io_submit_add_bh(struct ext4_io_submit *io,
410bd2d0210STheodore Ts'o struct inode *inode,
411cd57b771SMatthew Wilcox struct folio *folio,
412cd57b771SMatthew Wilcox struct folio *io_folio,
413bd2d0210STheodore Ts'o struct buffer_head *bh)
414bd2d0210STheodore Ts'o {
4154f74d15fSEric Biggers if (io->io_bio && (bh->b_blocknr != io->io_next_block ||
4164f74d15fSEric Biggers !fscrypt_mergeable_bio_bh(io->io_bio, bh))) {
417bd2d0210STheodore Ts'o submit_and_retry:
418bd2d0210STheodore Ts'o ext4_io_submit(io);
419bd2d0210STheodore Ts'o }
420c75e707fSChristoph Hellwig if (io->io_bio == NULL)
4215500221eSGao Xiang io_submit_init_bio(io, bh);
422cd57b771SMatthew Wilcox if (!bio_add_folio(io->io_bio, io_folio, bh->b_size, bh_offset(bh)))
423a549984bSTheodore Ts'o goto submit_and_retry;
424cd57b771SMatthew Wilcox wbc_account_cgroup_owner(io->io_wbc, &folio->page, bh->b_size);
42597a851edSJan Kara io->io_next_block++;
426bd2d0210STheodore Ts'o }
427bd2d0210STheodore Ts'o
ext4_bio_write_folio(struct ext4_io_submit * io,struct folio * folio,size_t len)428e8d6062cSMatthew Wilcox int ext4_bio_write_folio(struct ext4_io_submit *io, struct folio *folio,
429e8d6062cSMatthew Wilcox size_t len)
430bd2d0210STheodore Ts'o {
431cd57b771SMatthew Wilcox struct folio *io_folio = folio;
432cd57b771SMatthew Wilcox struct inode *inode = folio->mapping->host;
43318017479SEric Engestrom unsigned block_start;
434bd2d0210STheodore Ts'o struct buffer_head *bh, *head;
435bd2d0210STheodore Ts'o int ret = 0;
436937d7b84STheodore Ts'o int nr_to_submit = 0;
437be993933SLei Chen struct writeback_control *wbc = io->io_wbc;
438dff4ac75SJan Kara bool keep_towrite = false;
439bd2d0210STheodore Ts'o
440cd57b771SMatthew Wilcox BUG_ON(!folio_test_locked(folio));
441cd57b771SMatthew Wilcox BUG_ON(folio_test_writeback(folio));
442bd2d0210STheodore Ts'o
4430058f965SJan Kara /*
444f8409abdSLinus Torvalds * Comments copied from block_write_full_page:
445eeece469SJan Kara *
446cd57b771SMatthew Wilcox * The folio straddles i_size. It must be zeroed out on each and every
447eeece469SJan Kara * writepage invocation because it may be mmapped. "A file is mapped
448eeece469SJan Kara * in multiples of the page size. For a file that is not a multiple of
449eeece469SJan Kara * the page size, the remaining memory is zeroed when mapped, and
450eeece469SJan Kara * writes to that region are not written out to the file."
451eeece469SJan Kara */
452cd57b771SMatthew Wilcox if (len < folio_size(folio))
453cd57b771SMatthew Wilcox folio_zero_segment(folio, len, folio_size(folio));
454eeece469SJan Kara /*
4550058f965SJan Kara * In the first loop we prepare and mark buffers to submit. We have to
456cd57b771SMatthew Wilcox * mark all buffers in the folio before submitting so that
457cd57b771SMatthew Wilcox * folio_end_writeback() cannot be called from ext4_end_bio() when IO
4580058f965SJan Kara * on the first buffer finishes and we are still working on submitting
4590058f965SJan Kara * the second buffer.
4600058f965SJan Kara */
461cd57b771SMatthew Wilcox bh = head = folio_buffers(folio);
4620058f965SJan Kara do {
4630058f965SJan Kara block_start = bh_offset(bh);
464bd2d0210STheodore Ts'o if (block_start >= len) {
465bd2d0210STheodore Ts'o clear_buffer_dirty(bh);
466bd2d0210STheodore Ts'o set_buffer_uptodate(bh);
467bd2d0210STheodore Ts'o continue;
468bd2d0210STheodore Ts'o }
4698a850c3fSJan Kara if (!buffer_dirty(bh) || buffer_delay(bh) ||
4708a850c3fSJan Kara !buffer_mapped(bh) || buffer_unwritten(bh)) {
4718a850c3fSJan Kara /* A hole? We can safely clear the dirty bit */
4728a850c3fSJan Kara if (!buffer_mapped(bh))
4738a850c3fSJan Kara clear_buffer_dirty(bh);
47404e568a3SJan Kara /*
475dff4ac75SJan Kara * Keeping dirty some buffer we cannot write? Make sure
476cd57b771SMatthew Wilcox * to redirty the folio and keep TOWRITE tag so that
477cd57b771SMatthew Wilcox * racing WB_SYNC_ALL writeback does not skip the folio.
478dff4ac75SJan Kara * This happens e.g. when doing writeout for
479265e72efSJan Kara * transaction commit or when journalled data is not
480265e72efSJan Kara * yet committed.
48104e568a3SJan Kara */
482265e72efSJan Kara if (buffer_dirty(bh) ||
483265e72efSJan Kara (buffer_jbd(bh) && buffer_jbddirty(bh))) {
484cd57b771SMatthew Wilcox if (!folio_test_dirty(folio))
485cd57b771SMatthew Wilcox folio_redirty_for_writepage(wbc, folio);
486dff4ac75SJan Kara keep_towrite = true;
487dff4ac75SJan Kara }
4888a850c3fSJan Kara continue;
4898a850c3fSJan Kara }
49016e08b14Szhangyi (F) if (buffer_new(bh))
4910058f965SJan Kara clear_buffer_new(bh);
4920058f965SJan Kara set_buffer_async_write(bh);
49304e568a3SJan Kara clear_buffer_dirty(bh);
494937d7b84STheodore Ts'o nr_to_submit++;
4950058f965SJan Kara } while ((bh = bh->b_this_page) != head);
4960058f965SJan Kara
497cd57b771SMatthew Wilcox /* Nothing to submit? Just unlock the folio... */
498dff4ac75SJan Kara if (!nr_to_submit)
499f1496362SJan Kara return 0;
500dff4ac75SJan Kara
501cd57b771SMatthew Wilcox bh = head = folio_buffers(folio);
5022058f83aSMichael Halcrow
5036e4b73bcSEric Biggers /*
5046e4b73bcSEric Biggers * If any blocks are being written to an encrypted file, encrypt them
5056e4b73bcSEric Biggers * into a bounce page. For simplicity, just encrypt until the last
5066e4b73bcSEric Biggers * block which might be needed. This may cause some unneeded blocks
5076e4b73bcSEric Biggers * (e.g. holes) to be unnecessarily encrypted, but this is rare and
5086e4b73bcSEric Biggers * can't happen in the common case of blocksize == PAGE_SIZE.
5096e4b73bcSEric Biggers */
5108ae56b4eSTom Rix if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
511c9af28fdSTheodore Ts'o gfp_t gfp_flags = GFP_NOFS;
5126e4b73bcSEric Biggers unsigned int enc_bytes = round_up(len, i_blocksize(inode));
513cd57b771SMatthew Wilcox struct page *bounce_page;
514c9af28fdSTheodore Ts'o
515547c556fSEric Biggers /*
516547c556fSEric Biggers * Since bounce page allocation uses a mempool, we can only use
517547c556fSEric Biggers * a waiting mask (i.e. request guaranteed allocation) on the
518547c556fSEric Biggers * first page of the bio. Otherwise it can deadlock.
519547c556fSEric Biggers */
520547c556fSEric Biggers if (io->io_bio)
521547c556fSEric Biggers gfp_flags = GFP_NOWAIT | __GFP_NOWARN;
522c9af28fdSTheodore Ts'o retry_encrypt:
523e8d6062cSMatthew Wilcox bounce_page = fscrypt_encrypt_pagecache_blocks(&folio->page,
524e8d6062cSMatthew Wilcox enc_bytes, 0, gfp_flags);
525d2d0727bSEric Biggers if (IS_ERR(bounce_page)) {
526d2d0727bSEric Biggers ret = PTR_ERR(bounce_page);
527547c556fSEric Biggers if (ret == -ENOMEM &&
528547c556fSEric Biggers (io->io_bio || wbc->sync_mode == WB_SYNC_ALL)) {
5294034247aSNeilBrown gfp_t new_gfp_flags = GFP_NOFS;
530547c556fSEric Biggers if (io->io_bio)
531c9af28fdSTheodore Ts'o ext4_io_submit(io);
532547c556fSEric Biggers else
5334034247aSNeilBrown new_gfp_flags |= __GFP_NOFAIL;
5344034247aSNeilBrown memalloc_retry_wait(gfp_flags);
5354034247aSNeilBrown gfp_flags = new_gfp_flags;
536c9af28fdSTheodore Ts'o goto retry_encrypt;
537c9af28fdSTheodore Ts'o }
5385500221eSGao Xiang
5395500221eSGao Xiang printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
540cd57b771SMatthew Wilcox folio_redirty_for_writepage(wbc, folio);
5415500221eSGao Xiang do {
54204e568a3SJan Kara if (buffer_async_write(bh)) {
5435500221eSGao Xiang clear_buffer_async_write(bh);
54404e568a3SJan Kara set_buffer_dirty(bh);
54504e568a3SJan Kara }
5465500221eSGao Xiang bh = bh->b_this_page;
5475500221eSGao Xiang } while (bh != head);
548f1496362SJan Kara
549f1496362SJan Kara return ret;
5502058f83aSMichael Halcrow }
551cd57b771SMatthew Wilcox io_folio = page_folio(bounce_page);
5522058f83aSMichael Halcrow }
5532058f83aSMichael Halcrow
554cd57b771SMatthew Wilcox __folio_start_writeback(folio, keep_towrite);
555dff4ac75SJan Kara
5562058f83aSMichael Halcrow /* Now submit buffers to write */
5570058f965SJan Kara do {
5580058f965SJan Kara if (!buffer_async_write(bh))
5590058f965SJan Kara continue;
560cd57b771SMatthew Wilcox io_submit_add_bh(io, inode, folio, io_folio, bh);
5610058f965SJan Kara } while ((bh = bh->b_this_page) != head);
562f1496362SJan Kara
563f1496362SJan Kara return 0;
564bd2d0210STheodore Ts'o }
565