1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2bd2d0210STheodore Ts'o /*
3bd2d0210STheodore Ts'o * linux/fs/ext4/page-io.c
4bd2d0210STheodore Ts'o *
5bd2d0210STheodore Ts'o * This contains the new page_io functions for ext4
6bd2d0210STheodore Ts'o *
7bd2d0210STheodore Ts'o * Written by Theodore Ts'o, 2010.
8bd2d0210STheodore Ts'o */
9bd2d0210STheodore Ts'o
10bd2d0210STheodore Ts'o #include <linux/fs.h>
11bd2d0210STheodore Ts'o #include <linux/time.h>
12bd2d0210STheodore Ts'o #include <linux/highuid.h>
13bd2d0210STheodore Ts'o #include <linux/pagemap.h>
14bd2d0210STheodore Ts'o #include <linux/quotaops.h>
15bd2d0210STheodore Ts'o #include <linux/string.h>
16bd2d0210STheodore Ts'o #include <linux/buffer_head.h>
17bd2d0210STheodore Ts'o #include <linux/writeback.h>
18bd2d0210STheodore Ts'o #include <linux/pagevec.h>
19bd2d0210STheodore Ts'o #include <linux/mpage.h>
20bd2d0210STheodore Ts'o #include <linux/namei.h>
21bd2d0210STheodore Ts'o #include <linux/uio.h>
22bd2d0210STheodore Ts'o #include <linux/bio.h>
23bd2d0210STheodore Ts'o #include <linux/workqueue.h>
24bd2d0210STheodore Ts'o #include <linux/kernel.h>
25bd2d0210STheodore Ts'o #include <linux/slab.h>
261ae48a63SJan Kara #include <linux/mm.h>
274034247aSNeilBrown #include <linux/sched/mm.h>
28bd2d0210STheodore Ts'o
29bd2d0210STheodore Ts'o #include "ext4_jbd2.h"
30bd2d0210STheodore Ts'o #include "xattr.h"
31bd2d0210STheodore Ts'o #include "acl.h"
32bd2d0210STheodore Ts'o
330058f965SJan Kara static struct kmem_cache *io_end_cachep;
34c8cc8816SRitesh Harjani static struct kmem_cache *io_end_vec_cachep;
35bd2d0210STheodore Ts'o
ext4_init_pageio(void)365dabfc78STheodore Ts'o int __init ext4_init_pageio(void)
37bd2d0210STheodore Ts'o {
38bd2d0210STheodore Ts'o io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
390058f965SJan Kara if (io_end_cachep == NULL)
40bd2d0210STheodore Ts'o return -ENOMEM;
41c8cc8816SRitesh Harjani
42c8cc8816SRitesh Harjani io_end_vec_cachep = KMEM_CACHE(ext4_io_end_vec, 0);
43c8cc8816SRitesh Harjani if (io_end_vec_cachep == NULL) {
44c8cc8816SRitesh Harjani kmem_cache_destroy(io_end_cachep);
45c8cc8816SRitesh Harjani return -ENOMEM;
46c8cc8816SRitesh Harjani }
47bd2d0210STheodore Ts'o return 0;
48bd2d0210STheodore Ts'o }
49bd2d0210STheodore Ts'o
ext4_exit_pageio(void)505dabfc78STheodore Ts'o void ext4_exit_pageio(void)
51bd2d0210STheodore Ts'o {
52bd2d0210STheodore Ts'o kmem_cache_destroy(io_end_cachep);
53c8cc8816SRitesh Harjani kmem_cache_destroy(io_end_vec_cachep);
54c8cc8816SRitesh Harjani }
55c8cc8816SRitesh Harjani
ext4_alloc_io_end_vec(ext4_io_end_t * io_end)56c8cc8816SRitesh Harjani struct ext4_io_end_vec *ext4_alloc_io_end_vec(ext4_io_end_t *io_end)
57c8cc8816SRitesh Harjani {
58c8cc8816SRitesh Harjani struct ext4_io_end_vec *io_end_vec;
59c8cc8816SRitesh Harjani
60c8cc8816SRitesh Harjani io_end_vec = kmem_cache_zalloc(io_end_vec_cachep, GFP_NOFS);
61c8cc8816SRitesh Harjani if (!io_end_vec)
62c8cc8816SRitesh Harjani return ERR_PTR(-ENOMEM);
63c8cc8816SRitesh Harjani INIT_LIST_HEAD(&io_end_vec->list);
64c8cc8816SRitesh Harjani list_add_tail(&io_end_vec->list, &io_end->list_vec);
65c8cc8816SRitesh Harjani return io_end_vec;
66c8cc8816SRitesh Harjani }
67c8cc8816SRitesh Harjani
ext4_free_io_end_vec(ext4_io_end_t * io_end)68c8cc8816SRitesh Harjani static void ext4_free_io_end_vec(ext4_io_end_t *io_end)
69c8cc8816SRitesh Harjani {
70c8cc8816SRitesh Harjani struct ext4_io_end_vec *io_end_vec, *tmp;
71c8cc8816SRitesh Harjani
72c8cc8816SRitesh Harjani if (list_empty(&io_end->list_vec))
73c8cc8816SRitesh Harjani return;
74c8cc8816SRitesh Harjani list_for_each_entry_safe(io_end_vec, tmp, &io_end->list_vec, list) {
75c8cc8816SRitesh Harjani list_del(&io_end_vec->list);
76c8cc8816SRitesh Harjani kmem_cache_free(io_end_vec_cachep, io_end_vec);
77c8cc8816SRitesh Harjani }
78c8cc8816SRitesh Harjani }
79c8cc8816SRitesh Harjani
ext4_last_io_end_vec(ext4_io_end_t * io_end)80c8cc8816SRitesh Harjani struct ext4_io_end_vec *ext4_last_io_end_vec(ext4_io_end_t *io_end)
81c8cc8816SRitesh Harjani {
82c8cc8816SRitesh Harjani BUG_ON(list_empty(&io_end->list_vec));
83c8cc8816SRitesh Harjani return list_last_entry(&io_end->list_vec, struct ext4_io_end_vec, list);
84bd2d0210STheodore Ts'o }
85bd2d0210STheodore Ts'o
861ada47d9STheodore Ts'o /*
87b0857d30SJan Kara * Print an buffer I/O error compatible with the fs/buffer.c. This
88b0857d30SJan Kara * provides compatibility with dmesg scrapers that look for a specific
89b0857d30SJan Kara * buffer I/O error message. We really need a unified error reporting
90b0857d30SJan Kara * structure to userspace ala Digital Unix's uerf system, but it's
91b0857d30SJan Kara * probably not going to happen in my lifetime, due to LKML politics...
92b0857d30SJan Kara */
buffer_io_error(struct buffer_head * bh)93b0857d30SJan Kara static void buffer_io_error(struct buffer_head *bh)
94b0857d30SJan Kara {
95a1c6f057SDmitry Monakhov printk_ratelimited(KERN_ERR "Buffer I/O error on device %pg, logical block %llu\n",
96a1c6f057SDmitry Monakhov bh->b_bdev,
97b0857d30SJan Kara (unsigned long long)bh->b_blocknr);
98b0857d30SJan Kara }
99b0857d30SJan Kara
ext4_finish_bio(struct bio * bio)100b0857d30SJan Kara static void ext4_finish_bio(struct bio *bio)
101b0857d30SJan Kara {
102bb64c08bSMatthew Wilcox struct folio_iter fi;
103b0857d30SJan Kara
104bb64c08bSMatthew Wilcox bio_for_each_folio_all(fi, bio) {
105bb64c08bSMatthew Wilcox struct folio *folio = fi.folio;
106bb64c08bSMatthew Wilcox struct folio *io_folio = NULL;
107b0857d30SJan Kara struct buffer_head *bh, *head;
108bb64c08bSMatthew Wilcox size_t bio_start = fi.offset;
109bb64c08bSMatthew Wilcox size_t bio_end = bio_start + fi.length;
110b0857d30SJan Kara unsigned under_io = 0;
111b0857d30SJan Kara unsigned long flags;
112b0857d30SJan Kara
113bb64c08bSMatthew Wilcox if (fscrypt_is_bounce_folio(folio)) {
114bb64c08bSMatthew Wilcox io_folio = folio;
115bb64c08bSMatthew Wilcox folio = fscrypt_pagecache_folio(folio);
1162058f83aSMichael Halcrow }
1172058f83aSMichael Halcrow
1184e4cbee9SChristoph Hellwig if (bio->bi_status) {
119bb64c08bSMatthew Wilcox int err = blk_status_to_errno(bio->bi_status);
120bb64c08bSMatthew Wilcox folio_set_error(folio);
121bb64c08bSMatthew Wilcox mapping_set_error(folio->mapping, err);
122b0857d30SJan Kara }
123bb64c08bSMatthew Wilcox bh = head = folio_buffers(folio);
124b0857d30SJan Kara /*
125bb64c08bSMatthew Wilcox * We check all buffers in the folio under b_uptodate_lock
126b0857d30SJan Kara * to avoid races with other end io clearing async_write flags
127b0857d30SJan Kara */
128f1e67e35SThomas Gleixner spin_lock_irqsave(&head->b_uptodate_lock, flags);
129b0857d30SJan Kara do {
130b0857d30SJan Kara if (bh_offset(bh) < bio_start ||
131b0857d30SJan Kara bh_offset(bh) + bh->b_size > bio_end) {
132b0857d30SJan Kara if (buffer_async_write(bh))
133b0857d30SJan Kara under_io++;
134b0857d30SJan Kara continue;
135b0857d30SJan Kara }
136b0857d30SJan Kara clear_buffer_async_write(bh);
137a2b0b205SYe Bin if (bio->bi_status) {
138a2b0b205SYe Bin set_buffer_write_io_error(bh);
139b0857d30SJan Kara buffer_io_error(bh);
140a2b0b205SYe Bin }
141b0857d30SJan Kara } while ((bh = bh->b_this_page) != head);
142f1e67e35SThomas Gleixner spin_unlock_irqrestore(&head->b_uptodate_lock, flags);
1432058f83aSMichael Halcrow if (!under_io) {
144bb64c08bSMatthew Wilcox fscrypt_free_bounce_page(&io_folio->page);
145bb64c08bSMatthew Wilcox folio_end_writeback(folio);
146b0857d30SJan Kara }
147b0857d30SJan Kara }
1482058f83aSMichael Halcrow }
149b0857d30SJan Kara
ext4_release_io_end(ext4_io_end_t * io_end)15097a851edSJan Kara static void ext4_release_io_end(ext4_io_end_t *io_end)
151bd2d0210STheodore Ts'o {
152b0857d30SJan Kara struct bio *bio, *next_bio;
153b0857d30SJan Kara
15497a851edSJan Kara BUG_ON(!list_empty(&io_end->list));
15597a851edSJan Kara BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
1566b523df4SJan Kara WARN_ON(io_end->handle);
157bd2d0210STheodore Ts'o
158b0857d30SJan Kara for (bio = io_end->bio; bio; bio = next_bio) {
159b0857d30SJan Kara next_bio = bio->bi_private;
160b0857d30SJan Kara ext4_finish_bio(bio);
161b0857d30SJan Kara bio_put(bio);
162b0857d30SJan Kara }
163c8cc8816SRitesh Harjani ext4_free_io_end_vec(io_end);
16497a851edSJan Kara kmem_cache_free(io_end_cachep, io_end);
16597a851edSJan Kara }
16697a851edSJan Kara
167a115f749SJan Kara /*
168a115f749SJan Kara * Check a range of space and convert unwritten extents to written. Note that
169a115f749SJan Kara * we are protected from truncate touching same part of extent tree by the
170a115f749SJan Kara * fact that truncate code waits for all DIO to finish (thus exclusion from
171a115f749SJan Kara * direct IO is achieved) and also waits for PageWriteback bits. Thus we
172a115f749SJan Kara * cannot get to ext4_ext_truncate() before all IOs overlapping that range are
173a115f749SJan Kara * completed (happens from ext4_free_ioend()).
174a115f749SJan Kara */
ext4_end_io_end(ext4_io_end_t * io_end)175821ff38dSRitesh Harjani static int ext4_end_io_end(ext4_io_end_t *io_end)
176bd2d0210STheodore Ts'o {
177821ff38dSRitesh Harjani struct inode *inode = io_end->inode;
178821ff38dSRitesh Harjani handle_t *handle = io_end->handle;
179bd2d0210STheodore Ts'o int ret = 0;
180bd2d0210STheodore Ts'o
181821ff38dSRitesh Harjani ext4_debug("ext4_end_io_nolock: io_end 0x%p from inode %lu,list->next 0x%p,"
182bd2d0210STheodore Ts'o "list->prev 0x%p\n",
183821ff38dSRitesh Harjani io_end, inode->i_ino, io_end->list.next, io_end->list.prev);
184bd2d0210STheodore Ts'o
185821ff38dSRitesh Harjani io_end->handle = NULL; /* Following call will use up the handle */
186a00713eaSRitesh Harjani ret = ext4_convert_unwritten_io_end_vec(handle, io_end);
187*eb8ab444SJan Kara if (ret < 0 && !ext4_forced_shutdown(inode->i_sb)) {
188b82e384cSTheodore Ts'o ext4_msg(inode->i_sb, KERN_EMERG,
189b82e384cSTheodore Ts'o "failed to convert unwritten extents to written "
190b82e384cSTheodore Ts'o "extents -- potential data loss! "
191c8cc8816SRitesh Harjani "(inode %lu, error %d)", inode->i_ino, ret);
192bd2d0210STheodore Ts'o }
193821ff38dSRitesh Harjani ext4_clear_io_unwritten_flag(io_end);
194821ff38dSRitesh Harjani ext4_release_io_end(io_end);
195bd2d0210STheodore Ts'o return ret;
196bd2d0210STheodore Ts'o }
197bd2d0210STheodore Ts'o
dump_completed_IO(struct inode * inode,struct list_head * head)1982e8fa54eSJan Kara static void dump_completed_IO(struct inode *inode, struct list_head *head)
19928a535f9SDmitry Monakhov {
20028a535f9SDmitry Monakhov #ifdef EXT4FS_DEBUG
20128a535f9SDmitry Monakhov struct list_head *cur, *before, *after;
202821ff38dSRitesh Harjani ext4_io_end_t *io_end, *io_end0, *io_end1;
20328a535f9SDmitry Monakhov
2042e8fa54eSJan Kara if (list_empty(head))
20528a535f9SDmitry Monakhov return;
20628a535f9SDmitry Monakhov
2072e8fa54eSJan Kara ext4_debug("Dump inode %lu completed io list\n", inode->i_ino);
208821ff38dSRitesh Harjani list_for_each_entry(io_end, head, list) {
209821ff38dSRitesh Harjani cur = &io_end->list;
21028a535f9SDmitry Monakhov before = cur->prev;
211821ff38dSRitesh Harjani io_end0 = container_of(before, ext4_io_end_t, list);
21228a535f9SDmitry Monakhov after = cur->next;
213821ff38dSRitesh Harjani io_end1 = container_of(after, ext4_io_end_t, list);
21428a535f9SDmitry Monakhov
21528a535f9SDmitry Monakhov ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
216821ff38dSRitesh Harjani io_end, inode->i_ino, io_end0, io_end1);
21728a535f9SDmitry Monakhov }
21828a535f9SDmitry Monakhov #endif
21928a535f9SDmitry Monakhov }
22028a535f9SDmitry Monakhov
22128a535f9SDmitry Monakhov /* Add the io_end to per-inode completed end_io list. */
ext4_add_complete_io(ext4_io_end_t * io_end)22297a851edSJan Kara static void ext4_add_complete_io(ext4_io_end_t *io_end)
22328a535f9SDmitry Monakhov {
22428a535f9SDmitry Monakhov struct ext4_inode_info *ei = EXT4_I(io_end->inode);
22578371a45SJan Kara struct ext4_sb_info *sbi = EXT4_SB(io_end->inode->i_sb);
22628a535f9SDmitry Monakhov struct workqueue_struct *wq;
22728a535f9SDmitry Monakhov unsigned long flags;
22828a535f9SDmitry Monakhov
2297b7a8665SChristoph Hellwig /* Only reserved conversions from writeback should enter here */
2307b7a8665SChristoph Hellwig WARN_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
23178371a45SJan Kara WARN_ON(!io_end->handle && sbi->s_journal);
23228a535f9SDmitry Monakhov spin_lock_irqsave(&ei->i_completed_io_lock, flags);
23378371a45SJan Kara wq = sbi->rsv_conversion_wq;
2342e8fa54eSJan Kara if (list_empty(&ei->i_rsv_conversion_list))
2352e8fa54eSJan Kara queue_work(wq, &ei->i_rsv_conversion_work);
2362e8fa54eSJan Kara list_add_tail(&io_end->list, &ei->i_rsv_conversion_list);
23728a535f9SDmitry Monakhov spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
23828a535f9SDmitry Monakhov }
23928a535f9SDmitry Monakhov
ext4_do_flush_completed_IO(struct inode * inode,struct list_head * head)2402e8fa54eSJan Kara static int ext4_do_flush_completed_IO(struct inode *inode,
2412e8fa54eSJan Kara struct list_head *head)
24228a535f9SDmitry Monakhov {
243821ff38dSRitesh Harjani ext4_io_end_t *io_end;
244002bd7faSJan Kara struct list_head unwritten;
24528a535f9SDmitry Monakhov unsigned long flags;
24628a535f9SDmitry Monakhov struct ext4_inode_info *ei = EXT4_I(inode);
24728a535f9SDmitry Monakhov int err, ret = 0;
24828a535f9SDmitry Monakhov
24928a535f9SDmitry Monakhov spin_lock_irqsave(&ei->i_completed_io_lock, flags);
2502e8fa54eSJan Kara dump_completed_IO(inode, head);
2512e8fa54eSJan Kara list_replace_init(head, &unwritten);
25228a535f9SDmitry Monakhov spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
25328a535f9SDmitry Monakhov
25428a535f9SDmitry Monakhov while (!list_empty(&unwritten)) {
255821ff38dSRitesh Harjani io_end = list_entry(unwritten.next, ext4_io_end_t, list);
256821ff38dSRitesh Harjani BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
257821ff38dSRitesh Harjani list_del_init(&io_end->list);
25828a535f9SDmitry Monakhov
259821ff38dSRitesh Harjani err = ext4_end_io_end(io_end);
26028a535f9SDmitry Monakhov if (unlikely(!ret && err))
26128a535f9SDmitry Monakhov ret = err;
26228a535f9SDmitry Monakhov }
26328a535f9SDmitry Monakhov return ret;
26428a535f9SDmitry Monakhov }
26528a535f9SDmitry Monakhov
266bd2d0210STheodore Ts'o /*
2672e8fa54eSJan Kara * work on completed IO, to convert unwritten extents to extents
268bd2d0210STheodore Ts'o */
ext4_end_io_rsv_work(struct work_struct * work)2692e8fa54eSJan Kara void ext4_end_io_rsv_work(struct work_struct *work)
270bd2d0210STheodore Ts'o {
27184c17543SJan Kara struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
2722e8fa54eSJan Kara i_rsv_conversion_work);
2732e8fa54eSJan Kara ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_rsv_conversion_list);
2742e8fa54eSJan Kara }
2752e8fa54eSJan Kara
ext4_init_io_end(struct inode * inode,gfp_t flags)276bd2d0210STheodore Ts'o ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
277bd2d0210STheodore Ts'o {
278821ff38dSRitesh Harjani ext4_io_end_t *io_end = kmem_cache_zalloc(io_end_cachep, flags);
279821ff38dSRitesh Harjani
280821ff38dSRitesh Harjani if (io_end) {
281821ff38dSRitesh Harjani io_end->inode = inode;
282821ff38dSRitesh Harjani INIT_LIST_HEAD(&io_end->list);
283c8cc8816SRitesh Harjani INIT_LIST_HEAD(&io_end->list_vec);
28431d21d21SXiyu Yang refcount_set(&io_end->count, 1);
285bd2d0210STheodore Ts'o }
286821ff38dSRitesh Harjani return io_end;
287bd2d0210STheodore Ts'o }
288bd2d0210STheodore Ts'o
ext4_put_io_end_defer(ext4_io_end_t * io_end)28997a851edSJan Kara void ext4_put_io_end_defer(ext4_io_end_t *io_end)
29097a851edSJan Kara {
29131d21d21SXiyu Yang if (refcount_dec_and_test(&io_end->count)) {
292c8cc8816SRitesh Harjani if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) ||
293c8cc8816SRitesh Harjani list_empty(&io_end->list_vec)) {
29497a851edSJan Kara ext4_release_io_end(io_end);
29597a851edSJan Kara return;
29697a851edSJan Kara }
29797a851edSJan Kara ext4_add_complete_io(io_end);
29897a851edSJan Kara }
29997a851edSJan Kara }
30097a851edSJan Kara
ext4_put_io_end(ext4_io_end_t * io_end)30197a851edSJan Kara int ext4_put_io_end(ext4_io_end_t *io_end)
30297a851edSJan Kara {
30397a851edSJan Kara int err = 0;
30497a851edSJan Kara
30531d21d21SXiyu Yang if (refcount_dec_and_test(&io_end->count)) {
30697a851edSJan Kara if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
307a00713eaSRitesh Harjani err = ext4_convert_unwritten_io_end_vec(io_end->handle,
308a00713eaSRitesh Harjani io_end);
3096b523df4SJan Kara io_end->handle = NULL;
31097a851edSJan Kara ext4_clear_io_unwritten_flag(io_end);
31197a851edSJan Kara }
31297a851edSJan Kara ext4_release_io_end(io_end);
31397a851edSJan Kara }
31497a851edSJan Kara return err;
31597a851edSJan Kara }
31697a851edSJan Kara
ext4_get_io_end(ext4_io_end_t * io_end)31797a851edSJan Kara ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
31897a851edSJan Kara {
31931d21d21SXiyu Yang refcount_inc(&io_end->count);
32097a851edSJan Kara return io_end;
32197a851edSJan Kara }
32297a851edSJan Kara
323822dbba3SJan Kara /* BIO completion function for page writeback */
ext4_end_bio(struct bio * bio)3244246a0b6SChristoph Hellwig static void ext4_end_bio(struct bio *bio)
325bd2d0210STheodore Ts'o {
326bd2d0210STheodore Ts'o ext4_io_end_t *io_end = bio->bi_private;
3274f024f37SKent Overstreet sector_t bi_sector = bio->bi_iter.bi_sector;
328bd2d0210STheodore Ts'o
329734294e4SChristoph Hellwig if (WARN_ONCE(!io_end, "io_end is NULL: %pg: sector %Lu len %u err %d\n",
330734294e4SChristoph Hellwig bio->bi_bdev,
33172d622b4STheodore Ts'o (long long) bio->bi_iter.bi_sector,
33272d622b4STheodore Ts'o (unsigned) bio_sectors(bio),
3334e4cbee9SChristoph Hellwig bio->bi_status)) {
33472d622b4STheodore Ts'o ext4_finish_bio(bio);
33572d622b4STheodore Ts'o bio_put(bio);
33672d622b4STheodore Ts'o return;
33772d622b4STheodore Ts'o }
338bd2d0210STheodore Ts'o bio->bi_end_io = NULL;
3390058f965SJan Kara
3404e4cbee9SChristoph Hellwig if (bio->bi_status) {
341b0857d30SJan Kara struct inode *inode = io_end->inode;
342b0857d30SJan Kara
3439503c67cSMatthew Wilcox ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu "
344c8cc8816SRitesh Harjani "starting block %llu)",
3454e4cbee9SChristoph Hellwig bio->bi_status, inode->i_ino,
346f7ad6d2eSTheodore Ts'o (unsigned long long)
347d50bdd5aSCurt Wohlgemuth bi_sector >> (inode->i_blkbits - 9));
3484e4cbee9SChristoph Hellwig mapping_set_error(inode->i_mapping,
3494e4cbee9SChristoph Hellwig blk_status_to_errno(bio->bi_status));
350f7ad6d2eSTheodore Ts'o }
351822dbba3SJan Kara
352822dbba3SJan Kara if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
353822dbba3SJan Kara /*
354822dbba3SJan Kara * Link bio into list hanging from io_end. We have to do it
355822dbba3SJan Kara * atomically as bio completions can be racing against each
356822dbba3SJan Kara * other.
357822dbba3SJan Kara */
358822dbba3SJan Kara bio->bi_private = xchg(&io_end->bio, bio);
35997a851edSJan Kara ext4_put_io_end_defer(io_end);
360822dbba3SJan Kara } else {
361822dbba3SJan Kara /*
362822dbba3SJan Kara * Drop io_end reference early. Inode can get freed once
363822dbba3SJan Kara * we finish the bio.
364822dbba3SJan Kara */
365822dbba3SJan Kara ext4_put_io_end_defer(io_end);
366822dbba3SJan Kara ext4_finish_bio(bio);
367822dbba3SJan Kara bio_put(bio);
368822dbba3SJan Kara }
369bd2d0210STheodore Ts'o }
370bd2d0210STheodore Ts'o
ext4_io_submit(struct ext4_io_submit * io)371bd2d0210STheodore Ts'o void ext4_io_submit(struct ext4_io_submit *io)
372bd2d0210STheodore Ts'o {
373bd2d0210STheodore Ts'o struct bio *bio = io->io_bio;
374bd2d0210STheodore Ts'o
375bd2d0210STheodore Ts'o if (bio) {
3764c4dad11SChristoph Hellwig if (io->io_wbc->sync_mode == WB_SYNC_ALL)
3774c4dad11SChristoph Hellwig io->io_bio->bi_opf |= REQ_SYNC;
3784e49ea4aSMike Christie submit_bio(io->io_bio);
379bd2d0210STheodore Ts'o }
3807dc57615SPeter Huewe io->io_bio = NULL;
38197a851edSJan Kara }
38297a851edSJan Kara
ext4_io_submit_init(struct ext4_io_submit * io,struct writeback_control * wbc)38397a851edSJan Kara void ext4_io_submit_init(struct ext4_io_submit *io,
38497a851edSJan Kara struct writeback_control *wbc)
38597a851edSJan Kara {
3865a33911fSTejun Heo io->io_wbc = wbc;
38797a851edSJan Kara io->io_bio = NULL;
3887dc57615SPeter Huewe io->io_end = NULL;
389bd2d0210STheodore Ts'o }
390bd2d0210STheodore Ts'o
io_submit_init_bio(struct ext4_io_submit * io,struct buffer_head * bh)3915500221eSGao Xiang static void io_submit_init_bio(struct ext4_io_submit *io,
392bd2d0210STheodore Ts'o struct buffer_head *bh)
393bd2d0210STheodore Ts'o {
394bd2d0210STheodore Ts'o struct bio *bio;
395bd2d0210STheodore Ts'o
3965500221eSGao Xiang /*
3975500221eSGao Xiang * bio_alloc will _always_ be able to allocate a bio if
3985500221eSGao Xiang * __GFP_DIRECT_RECLAIM is set, see comments for bio_alloc_bioset().
3995500221eSGao Xiang */
4004c4dad11SChristoph Hellwig bio = bio_alloc(bh->b_bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOIO);
4014f74d15fSEric Biggers fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
4024f024f37SKent Overstreet bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
403bd2d0210STheodore Ts'o bio->bi_end_io = ext4_end_bio;
40497a851edSJan Kara bio->bi_private = ext4_get_io_end(io->io_end);
405bd2d0210STheodore Ts'o io->io_bio = bio;
406bd2d0210STheodore Ts'o io->io_next_block = bh->b_blocknr;
407fd42df30SDennis Zhou wbc_init_bio(io->io_wbc, bio);
408bd2d0210STheodore Ts'o }
409bd2d0210STheodore Ts'o
io_submit_add_bh(struct ext4_io_submit * io,struct inode * inode,struct folio * folio,struct folio * io_folio,struct buffer_head * bh)4105500221eSGao Xiang static void io_submit_add_bh(struct ext4_io_submit *io,
411bd2d0210STheodore Ts'o struct inode *inode,
412cd57b771SMatthew Wilcox struct folio *folio,
413cd57b771SMatthew Wilcox struct folio *io_folio,
414bd2d0210STheodore Ts'o struct buffer_head *bh)
415bd2d0210STheodore Ts'o {
4164f74d15fSEric Biggers if (io->io_bio && (bh->b_blocknr != io->io_next_block ||
4174f74d15fSEric Biggers !fscrypt_mergeable_bio_bh(io->io_bio, bh))) {
418bd2d0210STheodore Ts'o submit_and_retry:
419bd2d0210STheodore Ts'o ext4_io_submit(io);
420bd2d0210STheodore Ts'o }
421c75e707fSChristoph Hellwig if (io->io_bio == NULL)
4225500221eSGao Xiang io_submit_init_bio(io, bh);
423cd57b771SMatthew Wilcox if (!bio_add_folio(io->io_bio, io_folio, bh->b_size, bh_offset(bh)))
424a549984bSTheodore Ts'o goto submit_and_retry;
425cd57b771SMatthew Wilcox wbc_account_cgroup_owner(io->io_wbc, &folio->page, bh->b_size);
42697a851edSJan Kara io->io_next_block++;
427bd2d0210STheodore Ts'o }
428bd2d0210STheodore Ts'o
ext4_bio_write_folio(struct ext4_io_submit * io,struct folio * folio,size_t len)429e8d6062cSMatthew Wilcox int ext4_bio_write_folio(struct ext4_io_submit *io, struct folio *folio,
430e8d6062cSMatthew Wilcox size_t len)
431bd2d0210STheodore Ts'o {
432cd57b771SMatthew Wilcox struct folio *io_folio = folio;
433cd57b771SMatthew Wilcox struct inode *inode = folio->mapping->host;
43418017479SEric Engestrom unsigned block_start;
435bd2d0210STheodore Ts'o struct buffer_head *bh, *head;
436bd2d0210STheodore Ts'o int ret = 0;
437937d7b84STheodore Ts'o int nr_to_submit = 0;
438be993933SLei Chen struct writeback_control *wbc = io->io_wbc;
439dff4ac75SJan Kara bool keep_towrite = false;
440bd2d0210STheodore Ts'o
441cd57b771SMatthew Wilcox BUG_ON(!folio_test_locked(folio));
442cd57b771SMatthew Wilcox BUG_ON(folio_test_writeback(folio));
443bd2d0210STheodore Ts'o
444cd57b771SMatthew Wilcox folio_clear_error(folio);
445bd2d0210STheodore Ts'o
4460058f965SJan Kara /*
447f8409abdSLinus Torvalds * Comments copied from block_write_full_page:
448eeece469SJan Kara *
449cd57b771SMatthew Wilcox * The folio straddles i_size. It must be zeroed out on each and every
450eeece469SJan Kara * writepage invocation because it may be mmapped. "A file is mapped
451eeece469SJan Kara * in multiples of the page size. For a file that is not a multiple of
452eeece469SJan Kara * the page size, the remaining memory is zeroed when mapped, and
453eeece469SJan Kara * writes to that region are not written out to the file."
454eeece469SJan Kara */
455cd57b771SMatthew Wilcox if (len < folio_size(folio))
456cd57b771SMatthew Wilcox folio_zero_segment(folio, len, folio_size(folio));
457eeece469SJan Kara /*
4580058f965SJan Kara * In the first loop we prepare and mark buffers to submit. We have to
459cd57b771SMatthew Wilcox * mark all buffers in the folio before submitting so that
460cd57b771SMatthew Wilcox * folio_end_writeback() cannot be called from ext4_end_bio() when IO
4610058f965SJan Kara * on the first buffer finishes and we are still working on submitting
4620058f965SJan Kara * the second buffer.
4630058f965SJan Kara */
464cd57b771SMatthew Wilcox bh = head = folio_buffers(folio);
4650058f965SJan Kara do {
4660058f965SJan Kara block_start = bh_offset(bh);
467bd2d0210STheodore Ts'o if (block_start >= len) {
468bd2d0210STheodore Ts'o clear_buffer_dirty(bh);
469bd2d0210STheodore Ts'o set_buffer_uptodate(bh);
470bd2d0210STheodore Ts'o continue;
471bd2d0210STheodore Ts'o }
4728a850c3fSJan Kara if (!buffer_dirty(bh) || buffer_delay(bh) ||
4738a850c3fSJan Kara !buffer_mapped(bh) || buffer_unwritten(bh)) {
4748a850c3fSJan Kara /* A hole? We can safely clear the dirty bit */
4758a850c3fSJan Kara if (!buffer_mapped(bh))
4768a850c3fSJan Kara clear_buffer_dirty(bh);
47704e568a3SJan Kara /*
478dff4ac75SJan Kara * Keeping dirty some buffer we cannot write? Make sure
479cd57b771SMatthew Wilcox * to redirty the folio and keep TOWRITE tag so that
480cd57b771SMatthew Wilcox * racing WB_SYNC_ALL writeback does not skip the folio.
481dff4ac75SJan Kara * This happens e.g. when doing writeout for
482265e72efSJan Kara * transaction commit or when journalled data is not
483265e72efSJan Kara * yet committed.
48404e568a3SJan Kara */
485265e72efSJan Kara if (buffer_dirty(bh) ||
486265e72efSJan Kara (buffer_jbd(bh) && buffer_jbddirty(bh))) {
487cd57b771SMatthew Wilcox if (!folio_test_dirty(folio))
488cd57b771SMatthew Wilcox folio_redirty_for_writepage(wbc, folio);
489dff4ac75SJan Kara keep_towrite = true;
490dff4ac75SJan Kara }
4918a850c3fSJan Kara continue;
4928a850c3fSJan Kara }
49316e08b14Szhangyi (F) if (buffer_new(bh))
4940058f965SJan Kara clear_buffer_new(bh);
4950058f965SJan Kara set_buffer_async_write(bh);
49604e568a3SJan Kara clear_buffer_dirty(bh);
497937d7b84STheodore Ts'o nr_to_submit++;
4980058f965SJan Kara } while ((bh = bh->b_this_page) != head);
4990058f965SJan Kara
500cd57b771SMatthew Wilcox /* Nothing to submit? Just unlock the folio... */
501dff4ac75SJan Kara if (!nr_to_submit)
502f1496362SJan Kara return 0;
503dff4ac75SJan Kara
504cd57b771SMatthew Wilcox bh = head = folio_buffers(folio);
5052058f83aSMichael Halcrow
5066e4b73bcSEric Biggers /*
5076e4b73bcSEric Biggers * If any blocks are being written to an encrypted file, encrypt them
5086e4b73bcSEric Biggers * into a bounce page. For simplicity, just encrypt until the last
5096e4b73bcSEric Biggers * block which might be needed. This may cause some unneeded blocks
5106e4b73bcSEric Biggers * (e.g. holes) to be unnecessarily encrypted, but this is rare and
5116e4b73bcSEric Biggers * can't happen in the common case of blocksize == PAGE_SIZE.
5126e4b73bcSEric Biggers */
5138ae56b4eSTom Rix if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
514c9af28fdSTheodore Ts'o gfp_t gfp_flags = GFP_NOFS;
5156e4b73bcSEric Biggers unsigned int enc_bytes = round_up(len, i_blocksize(inode));
516cd57b771SMatthew Wilcox struct page *bounce_page;
517c9af28fdSTheodore Ts'o
518547c556fSEric Biggers /*
519547c556fSEric Biggers * Since bounce page allocation uses a mempool, we can only use
520547c556fSEric Biggers * a waiting mask (i.e. request guaranteed allocation) on the
521547c556fSEric Biggers * first page of the bio. Otherwise it can deadlock.
522547c556fSEric Biggers */
523547c556fSEric Biggers if (io->io_bio)
524547c556fSEric Biggers gfp_flags = GFP_NOWAIT | __GFP_NOWARN;
525c9af28fdSTheodore Ts'o retry_encrypt:
526e8d6062cSMatthew Wilcox bounce_page = fscrypt_encrypt_pagecache_blocks(&folio->page,
527e8d6062cSMatthew Wilcox enc_bytes, 0, gfp_flags);
528d2d0727bSEric Biggers if (IS_ERR(bounce_page)) {
529d2d0727bSEric Biggers ret = PTR_ERR(bounce_page);
530547c556fSEric Biggers if (ret == -ENOMEM &&
531547c556fSEric Biggers (io->io_bio || wbc->sync_mode == WB_SYNC_ALL)) {
5324034247aSNeilBrown gfp_t new_gfp_flags = GFP_NOFS;
533547c556fSEric Biggers if (io->io_bio)
534c9af28fdSTheodore Ts'o ext4_io_submit(io);
535547c556fSEric Biggers else
5364034247aSNeilBrown new_gfp_flags |= __GFP_NOFAIL;
5374034247aSNeilBrown memalloc_retry_wait(gfp_flags);
5384034247aSNeilBrown gfp_flags = new_gfp_flags;
539c9af28fdSTheodore Ts'o goto retry_encrypt;
540c9af28fdSTheodore Ts'o }
5415500221eSGao Xiang
5425500221eSGao Xiang printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
543cd57b771SMatthew Wilcox folio_redirty_for_writepage(wbc, folio);
5445500221eSGao Xiang do {
54504e568a3SJan Kara if (buffer_async_write(bh)) {
5465500221eSGao Xiang clear_buffer_async_write(bh);
54704e568a3SJan Kara set_buffer_dirty(bh);
54804e568a3SJan Kara }
5495500221eSGao Xiang bh = bh->b_this_page;
5505500221eSGao Xiang } while (bh != head);
551f1496362SJan Kara
552f1496362SJan Kara return ret;
5532058f83aSMichael Halcrow }
554cd57b771SMatthew Wilcox io_folio = page_folio(bounce_page);
5552058f83aSMichael Halcrow }
5562058f83aSMichael Halcrow
557cd57b771SMatthew Wilcox __folio_start_writeback(folio, keep_towrite);
558dff4ac75SJan Kara
5592058f83aSMichael Halcrow /* Now submit buffers to write */
5600058f965SJan Kara do {
5610058f965SJan Kara if (!buffer_async_write(bh))
5620058f965SJan Kara continue;
563cd57b771SMatthew Wilcox io_submit_add_bh(io, inode, folio, io_folio, bh);
5640058f965SJan Kara } while ((bh = bh->b_this_page) != head);
565f1496362SJan Kara
566f1496362SJan Kara return 0;
567bd2d0210STheodore Ts'o }
568