xref: /openbmc/linux/fs/ext4/page-io.c (revision d0b73b48)
1 /*
2  * linux/fs/ext4/page-io.c
3  *
4  * This contains the new page_io functions for ext4
5  *
6  * Written by Theodore Ts'o, 2010.
7  */
8 
9 #include <linux/fs.h>
10 #include <linux/time.h>
11 #include <linux/jbd2.h>
12 #include <linux/highuid.h>
13 #include <linux/pagemap.h>
14 #include <linux/quotaops.h>
15 #include <linux/string.h>
16 #include <linux/buffer_head.h>
17 #include <linux/writeback.h>
18 #include <linux/pagevec.h>
19 #include <linux/mpage.h>
20 #include <linux/namei.h>
21 #include <linux/uio.h>
22 #include <linux/bio.h>
23 #include <linux/workqueue.h>
24 #include <linux/kernel.h>
25 #include <linux/slab.h>
26 
27 #include "ext4_jbd2.h"
28 #include "xattr.h"
29 #include "acl.h"
30 
31 static struct kmem_cache *io_page_cachep, *io_end_cachep;
32 
33 int __init ext4_init_pageio(void)
34 {
35 	io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT);
36 	if (io_page_cachep == NULL)
37 		return -ENOMEM;
38 	io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
39 	if (io_end_cachep == NULL) {
40 		kmem_cache_destroy(io_page_cachep);
41 		return -ENOMEM;
42 	}
43 	return 0;
44 }
45 
46 void ext4_exit_pageio(void)
47 {
48 	kmem_cache_destroy(io_end_cachep);
49 	kmem_cache_destroy(io_page_cachep);
50 }
51 
52 void ext4_ioend_wait(struct inode *inode)
53 {
54 	wait_queue_head_t *wq = ext4_ioend_wq(inode);
55 
56 	wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0));
57 }
58 
59 static void put_io_page(struct ext4_io_page *io_page)
60 {
61 	if (atomic_dec_and_test(&io_page->p_count)) {
62 		end_page_writeback(io_page->p_page);
63 		put_page(io_page->p_page);
64 		kmem_cache_free(io_page_cachep, io_page);
65 	}
66 }
67 
68 void ext4_free_io_end(ext4_io_end_t *io)
69 {
70 	int i;
71 
72 	BUG_ON(!io);
73 	BUG_ON(!list_empty(&io->list));
74 	BUG_ON(io->flag & EXT4_IO_END_UNWRITTEN);
75 
76 	if (io->page)
77 		put_page(io->page);
78 	for (i = 0; i < io->num_io_pages; i++)
79 		put_io_page(io->pages[i]);
80 	io->num_io_pages = 0;
81 	if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count))
82 		wake_up_all(ext4_ioend_wq(io->inode));
83 	kmem_cache_free(io_end_cachep, io);
84 }
85 
86 /* check a range of space and convert unwritten extents to written. */
87 static int ext4_end_io(ext4_io_end_t *io)
88 {
89 	struct inode *inode = io->inode;
90 	loff_t offset = io->offset;
91 	ssize_t size = io->size;
92 	int ret = 0;
93 
94 	ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
95 		   "list->prev 0x%p\n",
96 		   io, inode->i_ino, io->list.next, io->list.prev);
97 
98 	ret = ext4_convert_unwritten_extents(inode, offset, size);
99 	if (ret < 0) {
100 		ext4_msg(inode->i_sb, KERN_EMERG,
101 			 "failed to convert unwritten extents to written "
102 			 "extents -- potential data loss!  "
103 			 "(inode %lu, offset %llu, size %zd, error %d)",
104 			 inode->i_ino, offset, size, ret);
105 	}
106 	if (io->iocb)
107 		aio_complete(io->iocb, io->result, 0);
108 
109 	if (io->flag & EXT4_IO_END_DIRECT)
110 		inode_dio_done(inode);
111 	/* Wake up anyone waiting on unwritten extent conversion */
112 	if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten))
113 		wake_up_all(ext4_ioend_wq(inode));
114 	return ret;
115 }
116 
117 static void dump_completed_IO(struct inode *inode)
118 {
119 #ifdef	EXT4FS_DEBUG
120 	struct list_head *cur, *before, *after;
121 	ext4_io_end_t *io, *io0, *io1;
122 	unsigned long flags;
123 
124 	if (list_empty(&EXT4_I(inode)->i_completed_io_list)) {
125 		ext4_debug("inode %lu completed_io list is empty\n",
126 			   inode->i_ino);
127 		return;
128 	}
129 
130 	ext4_debug("Dump inode %lu completed_io list\n", inode->i_ino);
131 	list_for_each_entry(io, &EXT4_I(inode)->i_completed_io_list, list) {
132 		cur = &io->list;
133 		before = cur->prev;
134 		io0 = container_of(before, ext4_io_end_t, list);
135 		after = cur->next;
136 		io1 = container_of(after, ext4_io_end_t, list);
137 
138 		ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
139 			    io, inode->i_ino, io0, io1);
140 	}
141 #endif
142 }
143 
144 /* Add the io_end to per-inode completed end_io list. */
145 void ext4_add_complete_io(ext4_io_end_t *io_end)
146 {
147 	struct ext4_inode_info *ei = EXT4_I(io_end->inode);
148 	struct workqueue_struct *wq;
149 	unsigned long flags;
150 
151 	BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
152 	wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
153 
154 	spin_lock_irqsave(&ei->i_completed_io_lock, flags);
155 	if (list_empty(&ei->i_completed_io_list)) {
156 		io_end->flag |= EXT4_IO_END_QUEUED;
157 		queue_work(wq, &io_end->work);
158 	}
159 	list_add_tail(&io_end->list, &ei->i_completed_io_list);
160 	spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
161 }
162 
163 static int ext4_do_flush_completed_IO(struct inode *inode,
164 				      ext4_io_end_t *work_io)
165 {
166 	ext4_io_end_t *io;
167 	struct list_head unwritten, complete, to_free;
168 	unsigned long flags;
169 	struct ext4_inode_info *ei = EXT4_I(inode);
170 	int err, ret = 0;
171 
172 	INIT_LIST_HEAD(&complete);
173 	INIT_LIST_HEAD(&to_free);
174 
175 	spin_lock_irqsave(&ei->i_completed_io_lock, flags);
176 	dump_completed_IO(inode);
177 	list_replace_init(&ei->i_completed_io_list, &unwritten);
178 	spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
179 
180 	while (!list_empty(&unwritten)) {
181 		io = list_entry(unwritten.next, ext4_io_end_t, list);
182 		BUG_ON(!(io->flag & EXT4_IO_END_UNWRITTEN));
183 		list_del_init(&io->list);
184 
185 		err = ext4_end_io(io);
186 		if (unlikely(!ret && err))
187 			ret = err;
188 
189 		list_add_tail(&io->list, &complete);
190 	}
191 	spin_lock_irqsave(&ei->i_completed_io_lock, flags);
192 	while (!list_empty(&complete)) {
193 		io = list_entry(complete.next, ext4_io_end_t, list);
194 		io->flag &= ~EXT4_IO_END_UNWRITTEN;
195 		/* end_io context can not be destroyed now because it still
196 		 * used by queued worker. Worker thread will destroy it later */
197 		if (io->flag & EXT4_IO_END_QUEUED)
198 			list_del_init(&io->list);
199 		else
200 			list_move(&io->list, &to_free);
201 	}
202 	/* If we are called from worker context, it is time to clear queued
203 	 * flag, and destroy it's end_io if it was converted already */
204 	if (work_io) {
205 		work_io->flag &= ~EXT4_IO_END_QUEUED;
206 		if (!(work_io->flag & EXT4_IO_END_UNWRITTEN))
207 			list_add_tail(&work_io->list, &to_free);
208 	}
209 	spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
210 
211 	while (!list_empty(&to_free)) {
212 		io = list_entry(to_free.next, ext4_io_end_t, list);
213 		list_del_init(&io->list);
214 		ext4_free_io_end(io);
215 	}
216 	return ret;
217 }
218 
219 /*
220  * work on completed aio dio IO, to convert unwritten extents to extents
221  */
222 static void ext4_end_io_work(struct work_struct *work)
223 {
224 	ext4_io_end_t *io = container_of(work, ext4_io_end_t, work);
225 	ext4_do_flush_completed_IO(io->inode, io);
226 }
227 
228 int ext4_flush_unwritten_io(struct inode *inode)
229 {
230 	int ret;
231 	WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex) &&
232 		     !(inode->i_state & I_FREEING));
233 	ret = ext4_do_flush_completed_IO(inode, NULL);
234 	ext4_unwritten_wait(inode);
235 	return ret;
236 }
237 
238 ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
239 {
240 	ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags);
241 	if (io) {
242 		atomic_inc(&EXT4_I(inode)->i_ioend_count);
243 		io->inode = inode;
244 		INIT_WORK(&io->work, ext4_end_io_work);
245 		INIT_LIST_HEAD(&io->list);
246 	}
247 	return io;
248 }
249 
250 /*
251  * Print an buffer I/O error compatible with the fs/buffer.c.  This
252  * provides compatibility with dmesg scrapers that look for a specific
253  * buffer I/O error message.  We really need a unified error reporting
254  * structure to userspace ala Digital Unix's uerf system, but it's
255  * probably not going to happen in my lifetime, due to LKML politics...
256  */
257 static void buffer_io_error(struct buffer_head *bh)
258 {
259 	char b[BDEVNAME_SIZE];
260 	printk(KERN_ERR "Buffer I/O error on device %s, logical block %llu\n",
261 			bdevname(bh->b_bdev, b),
262 			(unsigned long long)bh->b_blocknr);
263 }
264 
265 static void ext4_end_bio(struct bio *bio, int error)
266 {
267 	ext4_io_end_t *io_end = bio->bi_private;
268 	struct inode *inode;
269 	int i;
270 	sector_t bi_sector = bio->bi_sector;
271 
272 	BUG_ON(!io_end);
273 	bio->bi_private = NULL;
274 	bio->bi_end_io = NULL;
275 	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
276 		error = 0;
277 	bio_put(bio);
278 
279 	for (i = 0; i < io_end->num_io_pages; i++) {
280 		struct page *page = io_end->pages[i]->p_page;
281 		struct buffer_head *bh, *head;
282 		loff_t offset;
283 		loff_t io_end_offset;
284 
285 		if (error) {
286 			SetPageError(page);
287 			set_bit(AS_EIO, &page->mapping->flags);
288 			head = page_buffers(page);
289 			BUG_ON(!head);
290 
291 			io_end_offset = io_end->offset + io_end->size;
292 
293 			offset = (sector_t) page->index << PAGE_CACHE_SHIFT;
294 			bh = head;
295 			do {
296 				if ((offset >= io_end->offset) &&
297 				    (offset+bh->b_size <= io_end_offset))
298 					buffer_io_error(bh);
299 
300 				offset += bh->b_size;
301 				bh = bh->b_this_page;
302 			} while (bh != head);
303 		}
304 
305 		put_io_page(io_end->pages[i]);
306 	}
307 	io_end->num_io_pages = 0;
308 	inode = io_end->inode;
309 
310 	if (error) {
311 		io_end->flag |= EXT4_IO_END_ERROR;
312 		ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
313 			     "(offset %llu size %ld starting block %llu)",
314 			     inode->i_ino,
315 			     (unsigned long long) io_end->offset,
316 			     (long) io_end->size,
317 			     (unsigned long long)
318 			     bi_sector >> (inode->i_blkbits - 9));
319 	}
320 
321 	if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
322 		ext4_free_io_end(io_end);
323 		return;
324 	}
325 
326 	ext4_add_complete_io(io_end);
327 }
328 
329 void ext4_io_submit(struct ext4_io_submit *io)
330 {
331 	struct bio *bio = io->io_bio;
332 
333 	if (bio) {
334 		bio_get(io->io_bio);
335 		submit_bio(io->io_op, io->io_bio);
336 		BUG_ON(bio_flagged(io->io_bio, BIO_EOPNOTSUPP));
337 		bio_put(io->io_bio);
338 	}
339 	io->io_bio = NULL;
340 	io->io_op = 0;
341 	io->io_end = NULL;
342 }
343 
344 static int io_submit_init(struct ext4_io_submit *io,
345 			  struct inode *inode,
346 			  struct writeback_control *wbc,
347 			  struct buffer_head *bh)
348 {
349 	ext4_io_end_t *io_end;
350 	struct page *page = bh->b_page;
351 	int nvecs = bio_get_nr_vecs(bh->b_bdev);
352 	struct bio *bio;
353 
354 	io_end = ext4_init_io_end(inode, GFP_NOFS);
355 	if (!io_end)
356 		return -ENOMEM;
357 	bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
358 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
359 	bio->bi_bdev = bh->b_bdev;
360 	bio->bi_private = io->io_end = io_end;
361 	bio->bi_end_io = ext4_end_bio;
362 
363 	io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
364 
365 	io->io_bio = bio;
366 	io->io_op = (wbc->sync_mode == WB_SYNC_ALL ?  WRITE_SYNC : WRITE);
367 	io->io_next_block = bh->b_blocknr;
368 	return 0;
369 }
370 
371 static int io_submit_add_bh(struct ext4_io_submit *io,
372 			    struct ext4_io_page *io_page,
373 			    struct inode *inode,
374 			    struct writeback_control *wbc,
375 			    struct buffer_head *bh)
376 {
377 	ext4_io_end_t *io_end;
378 	int ret;
379 
380 	if (buffer_new(bh)) {
381 		clear_buffer_new(bh);
382 		unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
383 	}
384 
385 	if (!buffer_mapped(bh) || buffer_delay(bh)) {
386 		if (!buffer_mapped(bh))
387 			clear_buffer_dirty(bh);
388 		if (io->io_bio)
389 			ext4_io_submit(io);
390 		return 0;
391 	}
392 
393 	if (io->io_bio && bh->b_blocknr != io->io_next_block) {
394 submit_and_retry:
395 		ext4_io_submit(io);
396 	}
397 	if (io->io_bio == NULL) {
398 		ret = io_submit_init(io, inode, wbc, bh);
399 		if (ret)
400 			return ret;
401 	}
402 	io_end = io->io_end;
403 	if ((io_end->num_io_pages >= MAX_IO_PAGES) &&
404 	    (io_end->pages[io_end->num_io_pages-1] != io_page))
405 		goto submit_and_retry;
406 	if (buffer_uninit(bh))
407 		ext4_set_io_unwritten_flag(inode, io_end);
408 	io->io_end->size += bh->b_size;
409 	io->io_next_block++;
410 	ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh));
411 	if (ret != bh->b_size)
412 		goto submit_and_retry;
413 	if ((io_end->num_io_pages == 0) ||
414 	    (io_end->pages[io_end->num_io_pages-1] != io_page)) {
415 		io_end->pages[io_end->num_io_pages++] = io_page;
416 		atomic_inc(&io_page->p_count);
417 	}
418 	return 0;
419 }
420 
421 int ext4_bio_write_page(struct ext4_io_submit *io,
422 			struct page *page,
423 			int len,
424 			struct writeback_control *wbc)
425 {
426 	struct inode *inode = page->mapping->host;
427 	unsigned block_start, block_end, blocksize;
428 	struct ext4_io_page *io_page;
429 	struct buffer_head *bh, *head;
430 	int ret = 0;
431 
432 	blocksize = 1 << inode->i_blkbits;
433 
434 	BUG_ON(!PageLocked(page));
435 	BUG_ON(PageWriteback(page));
436 
437 	io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS);
438 	if (!io_page) {
439 		set_page_dirty(page);
440 		unlock_page(page);
441 		return -ENOMEM;
442 	}
443 	io_page->p_page = page;
444 	atomic_set(&io_page->p_count, 1);
445 	get_page(page);
446 	set_page_writeback(page);
447 	ClearPageError(page);
448 
449 	for (bh = head = page_buffers(page), block_start = 0;
450 	     bh != head || !block_start;
451 	     block_start = block_end, bh = bh->b_this_page) {
452 
453 		block_end = block_start + blocksize;
454 		if (block_start >= len) {
455 			/*
456 			 * Comments copied from block_write_full_page_endio:
457 			 *
458 			 * The page straddles i_size.  It must be zeroed out on
459 			 * each and every writepage invocation because it may
460 			 * be mmapped.  "A file is mapped in multiples of the
461 			 * page size.  For a file that is not a multiple of
462 			 * the  page size, the remaining memory is zeroed when
463 			 * mapped, and writes to that region are not written
464 			 * out to the file."
465 			 */
466 			zero_user_segment(page, block_start, block_end);
467 			clear_buffer_dirty(bh);
468 			set_buffer_uptodate(bh);
469 			continue;
470 		}
471 		clear_buffer_dirty(bh);
472 		ret = io_submit_add_bh(io, io_page, inode, wbc, bh);
473 		if (ret) {
474 			/*
475 			 * We only get here on ENOMEM.  Not much else
476 			 * we can do but mark the page as dirty, and
477 			 * better luck next time.
478 			 */
479 			set_page_dirty(page);
480 			break;
481 		}
482 	}
483 	unlock_page(page);
484 	/*
485 	 * If the page was truncated before we could do the writeback,
486 	 * or we had a memory allocation error while trying to write
487 	 * the first buffer head, we won't have submitted any pages for
488 	 * I/O.  In that case we need to make sure we've cleared the
489 	 * PageWriteback bit from the page to prevent the system from
490 	 * wedging later on.
491 	 */
492 	put_io_page(io_page);
493 	return ret;
494 }
495