xref: /openbmc/linux/fs/ext4/page-io.c (revision 22fd411a)
1 /*
2  * linux/fs/ext4/page-io.c
3  *
4  * This contains the new page_io functions for ext4
5  *
6  * Written by Theodore Ts'o, 2010.
7  */
8 
9 #include <linux/module.h>
10 #include <linux/fs.h>
11 #include <linux/time.h>
12 #include <linux/jbd2.h>
13 #include <linux/highuid.h>
14 #include <linux/pagemap.h>
15 #include <linux/quotaops.h>
16 #include <linux/string.h>
17 #include <linux/buffer_head.h>
18 #include <linux/writeback.h>
19 #include <linux/pagevec.h>
20 #include <linux/mpage.h>
21 #include <linux/namei.h>
22 #include <linux/uio.h>
23 #include <linux/bio.h>
24 #include <linux/workqueue.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 
28 #include "ext4_jbd2.h"
29 #include "xattr.h"
30 #include "acl.h"
31 #include "ext4_extents.h"
32 
33 static struct kmem_cache *io_page_cachep, *io_end_cachep;
34 
35 #define WQ_HASH_SZ		37
36 #define to_ioend_wq(v)	(&ioend_wq[((unsigned long)v) % WQ_HASH_SZ])
37 static wait_queue_head_t ioend_wq[WQ_HASH_SZ];
38 
39 int __init ext4_init_pageio(void)
40 {
41 	int i;
42 
43 	io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT);
44 	if (io_page_cachep == NULL)
45 		return -ENOMEM;
46 	io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
47 	if (io_end_cachep == NULL) {
48 		kmem_cache_destroy(io_page_cachep);
49 		return -ENOMEM;
50 	}
51 	for (i = 0; i < WQ_HASH_SZ; i++)
52 		init_waitqueue_head(&ioend_wq[i]);
53 
54 	return 0;
55 }
56 
57 void ext4_exit_pageio(void)
58 {
59 	kmem_cache_destroy(io_end_cachep);
60 	kmem_cache_destroy(io_page_cachep);
61 }
62 
63 void ext4_ioend_wait(struct inode *inode)
64 {
65 	wait_queue_head_t *wq = to_ioend_wq(inode);
66 
67 	wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0));
68 }
69 
70 static void put_io_page(struct ext4_io_page *io_page)
71 {
72 	if (atomic_dec_and_test(&io_page->p_count)) {
73 		end_page_writeback(io_page->p_page);
74 		put_page(io_page->p_page);
75 		kmem_cache_free(io_page_cachep, io_page);
76 	}
77 }
78 
79 void ext4_free_io_end(ext4_io_end_t *io)
80 {
81 	int i;
82 	wait_queue_head_t *wq;
83 
84 	BUG_ON(!io);
85 	if (io->page)
86 		put_page(io->page);
87 	for (i = 0; i < io->num_io_pages; i++)
88 		put_io_page(io->pages[i]);
89 	io->num_io_pages = 0;
90 	wq = to_ioend_wq(io->inode);
91 	if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count) &&
92 	    waitqueue_active(wq))
93 		wake_up_all(wq);
94 	kmem_cache_free(io_end_cachep, io);
95 }
96 
97 /*
98  * check a range of space and convert unwritten extents to written.
99  */
100 int ext4_end_io_nolock(ext4_io_end_t *io)
101 {
102 	struct inode *inode = io->inode;
103 	loff_t offset = io->offset;
104 	ssize_t size = io->size;
105 	int ret = 0;
106 
107 	ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
108 		   "list->prev 0x%p\n",
109 		   io, inode->i_ino, io->list.next, io->list.prev);
110 
111 	if (list_empty(&io->list))
112 		return ret;
113 
114 	if (!(io->flag & EXT4_IO_END_UNWRITTEN))
115 		return ret;
116 
117 	ret = ext4_convert_unwritten_extents(inode, offset, size);
118 	if (ret < 0) {
119 		printk(KERN_EMERG "%s: failed to convert unwritten "
120 			"extents to written extents, error is %d "
121 			"io is still on inode %lu aio dio list\n",
122 		       __func__, ret, inode->i_ino);
123 		return ret;
124 	}
125 
126 	if (io->iocb)
127 		aio_complete(io->iocb, io->result, 0);
128 	/* clear the DIO AIO unwritten flag */
129 	io->flag &= ~EXT4_IO_END_UNWRITTEN;
130 	return ret;
131 }
132 
133 /*
134  * work on completed aio dio IO, to convert unwritten extents to extents
135  */
136 static void ext4_end_io_work(struct work_struct *work)
137 {
138 	ext4_io_end_t		*io = container_of(work, ext4_io_end_t, work);
139 	struct inode		*inode = io->inode;
140 	struct ext4_inode_info	*ei = EXT4_I(inode);
141 	unsigned long		flags;
142 	int			ret;
143 
144 	mutex_lock(&inode->i_mutex);
145 	ret = ext4_end_io_nolock(io);
146 	if (ret < 0) {
147 		mutex_unlock(&inode->i_mutex);
148 		return;
149 	}
150 
151 	spin_lock_irqsave(&ei->i_completed_io_lock, flags);
152 	if (!list_empty(&io->list))
153 		list_del_init(&io->list);
154 	spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
155 	mutex_unlock(&inode->i_mutex);
156 	ext4_free_io_end(io);
157 }
158 
159 ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
160 {
161 	ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags);
162 	if (io) {
163 		atomic_inc(&EXT4_I(inode)->i_ioend_count);
164 		io->inode = inode;
165 		INIT_WORK(&io->work, ext4_end_io_work);
166 		INIT_LIST_HEAD(&io->list);
167 	}
168 	return io;
169 }
170 
171 /*
172  * Print an buffer I/O error compatible with the fs/buffer.c.  This
173  * provides compatibility with dmesg scrapers that look for a specific
174  * buffer I/O error message.  We really need a unified error reporting
175  * structure to userspace ala Digital Unix's uerf system, but it's
176  * probably not going to happen in my lifetime, due to LKML politics...
177  */
178 static void buffer_io_error(struct buffer_head *bh)
179 {
180 	char b[BDEVNAME_SIZE];
181 	printk(KERN_ERR "Buffer I/O error on device %s, logical block %llu\n",
182 			bdevname(bh->b_bdev, b),
183 			(unsigned long long)bh->b_blocknr);
184 }
185 
186 static void ext4_end_bio(struct bio *bio, int error)
187 {
188 	ext4_io_end_t *io_end = bio->bi_private;
189 	struct workqueue_struct *wq;
190 	struct inode *inode;
191 	unsigned long flags;
192 	int i;
193 
194 	BUG_ON(!io_end);
195 	bio->bi_private = NULL;
196 	bio->bi_end_io = NULL;
197 	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
198 		error = 0;
199 	bio_put(bio);
200 
201 	for (i = 0; i < io_end->num_io_pages; i++) {
202 		struct page *page = io_end->pages[i]->p_page;
203 		struct buffer_head *bh, *head;
204 		int partial_write = 0;
205 
206 		head = page_buffers(page);
207 		if (error)
208 			SetPageError(page);
209 		BUG_ON(!head);
210 		if (head->b_size == PAGE_CACHE_SIZE)
211 			clear_buffer_dirty(head);
212 		else {
213 			loff_t offset;
214 			loff_t io_end_offset = io_end->offset + io_end->size;
215 
216 			offset = (sector_t) page->index << PAGE_CACHE_SHIFT;
217 			bh = head;
218 			do {
219 				if ((offset >= io_end->offset) &&
220 				    (offset+bh->b_size <= io_end_offset)) {
221 					if (error)
222 						buffer_io_error(bh);
223 
224 					clear_buffer_dirty(bh);
225 				}
226 				if (buffer_delay(bh))
227 					partial_write = 1;
228 				else if (!buffer_mapped(bh))
229 					clear_buffer_dirty(bh);
230 				else if (buffer_dirty(bh))
231 					partial_write = 1;
232 				offset += bh->b_size;
233 				bh = bh->b_this_page;
234 			} while (bh != head);
235 		}
236 
237 		/*
238 		 * If this is a partial write which happened to make
239 		 * all buffers uptodate then we can optimize away a
240 		 * bogus readpage() for the next read(). Here we
241 		 * 'discover' whether the page went uptodate as a
242 		 * result of this (potentially partial) write.
243 		 */
244 		if (!partial_write)
245 			SetPageUptodate(page);
246 
247 		put_io_page(io_end->pages[i]);
248 	}
249 	io_end->num_io_pages = 0;
250 	inode = io_end->inode;
251 
252 	if (error) {
253 		io_end->flag |= EXT4_IO_END_ERROR;
254 		ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
255 			     "(offset %llu size %ld starting block %llu)",
256 			     inode->i_ino,
257 			     (unsigned long long) io_end->offset,
258 			     (long) io_end->size,
259 			     (unsigned long long)
260 			     bio->bi_sector >> (inode->i_blkbits - 9));
261 	}
262 
263 	/* Add the io_end to per-inode completed io list*/
264 	spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
265 	list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list);
266 	spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
267 
268 	wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq;
269 	/* queue the work to convert unwritten extents to written */
270 	queue_work(wq, &io_end->work);
271 }
272 
273 void ext4_io_submit(struct ext4_io_submit *io)
274 {
275 	struct bio *bio = io->io_bio;
276 
277 	if (bio) {
278 		bio_get(io->io_bio);
279 		submit_bio(io->io_op, io->io_bio);
280 		BUG_ON(bio_flagged(io->io_bio, BIO_EOPNOTSUPP));
281 		bio_put(io->io_bio);
282 	}
283 	io->io_bio = 0;
284 	io->io_op = 0;
285 	io->io_end = 0;
286 }
287 
288 static int io_submit_init(struct ext4_io_submit *io,
289 			  struct inode *inode,
290 			  struct writeback_control *wbc,
291 			  struct buffer_head *bh)
292 {
293 	ext4_io_end_t *io_end;
294 	struct page *page = bh->b_page;
295 	int nvecs = bio_get_nr_vecs(bh->b_bdev);
296 	struct bio *bio;
297 
298 	io_end = ext4_init_io_end(inode, GFP_NOFS);
299 	if (!io_end)
300 		return -ENOMEM;
301 	do {
302 		bio = bio_alloc(GFP_NOIO, nvecs);
303 		nvecs >>= 1;
304 	} while (bio == NULL);
305 
306 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
307 	bio->bi_bdev = bh->b_bdev;
308 	bio->bi_private = io->io_end = io_end;
309 	bio->bi_end_io = ext4_end_bio;
310 
311 	io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
312 
313 	io->io_bio = bio;
314 	io->io_op = (wbc->sync_mode == WB_SYNC_ALL ?
315 			WRITE_SYNC_PLUG : WRITE);
316 	io->io_next_block = bh->b_blocknr;
317 	return 0;
318 }
319 
320 static int io_submit_add_bh(struct ext4_io_submit *io,
321 			    struct ext4_io_page *io_page,
322 			    struct inode *inode,
323 			    struct writeback_control *wbc,
324 			    struct buffer_head *bh)
325 {
326 	ext4_io_end_t *io_end;
327 	int ret;
328 
329 	if (buffer_new(bh)) {
330 		clear_buffer_new(bh);
331 		unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
332 	}
333 
334 	if (!buffer_mapped(bh) || buffer_delay(bh)) {
335 		if (!buffer_mapped(bh))
336 			clear_buffer_dirty(bh);
337 		if (io->io_bio)
338 			ext4_io_submit(io);
339 		return 0;
340 	}
341 
342 	if (io->io_bio && bh->b_blocknr != io->io_next_block) {
343 submit_and_retry:
344 		ext4_io_submit(io);
345 	}
346 	if (io->io_bio == NULL) {
347 		ret = io_submit_init(io, inode, wbc, bh);
348 		if (ret)
349 			return ret;
350 	}
351 	io_end = io->io_end;
352 	if ((io_end->num_io_pages >= MAX_IO_PAGES) &&
353 	    (io_end->pages[io_end->num_io_pages-1] != io_page))
354 		goto submit_and_retry;
355 	if (buffer_uninit(bh))
356 		io->io_end->flag |= EXT4_IO_END_UNWRITTEN;
357 	io->io_end->size += bh->b_size;
358 	io->io_next_block++;
359 	ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh));
360 	if (ret != bh->b_size)
361 		goto submit_and_retry;
362 	if ((io_end->num_io_pages == 0) ||
363 	    (io_end->pages[io_end->num_io_pages-1] != io_page)) {
364 		io_end->pages[io_end->num_io_pages++] = io_page;
365 		atomic_inc(&io_page->p_count);
366 	}
367 	return 0;
368 }
369 
370 int ext4_bio_write_page(struct ext4_io_submit *io,
371 			struct page *page,
372 			int len,
373 			struct writeback_control *wbc)
374 {
375 	struct inode *inode = page->mapping->host;
376 	unsigned block_start, block_end, blocksize;
377 	struct ext4_io_page *io_page;
378 	struct buffer_head *bh, *head;
379 	int ret = 0;
380 
381 	blocksize = 1 << inode->i_blkbits;
382 
383 	BUG_ON(PageWriteback(page));
384 	set_page_writeback(page);
385 	ClearPageError(page);
386 
387 	io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS);
388 	if (!io_page) {
389 		set_page_dirty(page);
390 		unlock_page(page);
391 		return -ENOMEM;
392 	}
393 	io_page->p_page = page;
394 	atomic_set(&io_page->p_count, 1);
395 	get_page(page);
396 
397 	for (bh = head = page_buffers(page), block_start = 0;
398 	     bh != head || !block_start;
399 	     block_start = block_end, bh = bh->b_this_page) {
400 		block_end = block_start + blocksize;
401 		if (block_start >= len) {
402 			clear_buffer_dirty(bh);
403 			set_buffer_uptodate(bh);
404 			continue;
405 		}
406 		ret = io_submit_add_bh(io, io_page, inode, wbc, bh);
407 		if (ret) {
408 			/*
409 			 * We only get here on ENOMEM.  Not much else
410 			 * we can do but mark the page as dirty, and
411 			 * better luck next time.
412 			 */
413 			set_page_dirty(page);
414 			break;
415 		}
416 	}
417 	unlock_page(page);
418 	/*
419 	 * If the page was truncated before we could do the writeback,
420 	 * or we had a memory allocation error while trying to write
421 	 * the first buffer head, we won't have submitted any pages for
422 	 * I/O.  In that case we need to make sure we've cleared the
423 	 * PageWriteback bit from the page to prevent the system from
424 	 * wedging later on.
425 	 */
426 	put_io_page(io_page);
427 	return ret;
428 }
429