1 /* 2 * linux/fs/ext4/page-io.c 3 * 4 * This contains the new page_io functions for ext4 5 * 6 * Written by Theodore Ts'o, 2010. 7 */ 8 9 #include <linux/module.h> 10 #include <linux/fs.h> 11 #include <linux/time.h> 12 #include <linux/jbd2.h> 13 #include <linux/highuid.h> 14 #include <linux/pagemap.h> 15 #include <linux/quotaops.h> 16 #include <linux/string.h> 17 #include <linux/buffer_head.h> 18 #include <linux/writeback.h> 19 #include <linux/pagevec.h> 20 #include <linux/mpage.h> 21 #include <linux/namei.h> 22 #include <linux/uio.h> 23 #include <linux/bio.h> 24 #include <linux/workqueue.h> 25 #include <linux/kernel.h> 26 #include <linux/slab.h> 27 28 #include "ext4_jbd2.h" 29 #include "xattr.h" 30 #include "acl.h" 31 #include "ext4_extents.h" 32 33 static struct kmem_cache *io_page_cachep, *io_end_cachep; 34 35 int __init ext4_init_pageio(void) 36 { 37 io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT); 38 if (io_page_cachep == NULL) 39 return -ENOMEM; 40 io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT); 41 if (io_page_cachep == NULL) { 42 kmem_cache_destroy(io_page_cachep); 43 return -ENOMEM; 44 } 45 46 return 0; 47 } 48 49 void ext4_exit_pageio(void) 50 { 51 kmem_cache_destroy(io_end_cachep); 52 kmem_cache_destroy(io_page_cachep); 53 } 54 55 void ext4_free_io_end(ext4_io_end_t *io) 56 { 57 int i; 58 59 BUG_ON(!io); 60 if (io->page) 61 put_page(io->page); 62 for (i = 0; i < io->num_io_pages; i++) { 63 if (--io->pages[i]->p_count == 0) { 64 struct page *page = io->pages[i]->p_page; 65 66 end_page_writeback(page); 67 put_page(page); 68 kmem_cache_free(io_page_cachep, io->pages[i]); 69 } 70 } 71 io->num_io_pages = 0; 72 iput(io->inode); 73 kmem_cache_free(io_end_cachep, io); 74 } 75 76 /* 77 * check a range of space and convert unwritten extents to written. 78 */ 79 int ext4_end_io_nolock(ext4_io_end_t *io) 80 { 81 struct inode *inode = io->inode; 82 loff_t offset = io->offset; 83 ssize_t size = io->size; 84 int ret = 0; 85 86 ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p," 87 "list->prev 0x%p\n", 88 io, inode->i_ino, io->list.next, io->list.prev); 89 90 if (list_empty(&io->list)) 91 return ret; 92 93 if (!(io->flag & EXT4_IO_END_UNWRITTEN)) 94 return ret; 95 96 ret = ext4_convert_unwritten_extents(inode, offset, size); 97 if (ret < 0) { 98 printk(KERN_EMERG "%s: failed to convert unwritten " 99 "extents to written extents, error is %d " 100 "io is still on inode %lu aio dio list\n", 101 __func__, ret, inode->i_ino); 102 return ret; 103 } 104 105 if (io->iocb) 106 aio_complete(io->iocb, io->result, 0); 107 /* clear the DIO AIO unwritten flag */ 108 io->flag &= ~EXT4_IO_END_UNWRITTEN; 109 return ret; 110 } 111 112 /* 113 * work on completed aio dio IO, to convert unwritten extents to extents 114 */ 115 static void ext4_end_io_work(struct work_struct *work) 116 { 117 ext4_io_end_t *io = container_of(work, ext4_io_end_t, work); 118 struct inode *inode = io->inode; 119 struct ext4_inode_info *ei = EXT4_I(inode); 120 unsigned long flags; 121 int ret; 122 123 mutex_lock(&inode->i_mutex); 124 ret = ext4_end_io_nolock(io); 125 if (ret < 0) { 126 mutex_unlock(&inode->i_mutex); 127 return; 128 } 129 130 spin_lock_irqsave(&ei->i_completed_io_lock, flags); 131 if (!list_empty(&io->list)) 132 list_del_init(&io->list); 133 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); 134 mutex_unlock(&inode->i_mutex); 135 ext4_free_io_end(io); 136 } 137 138 ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags) 139 { 140 ext4_io_end_t *io = NULL; 141 142 io = kmem_cache_alloc(io_end_cachep, flags); 143 if (io) { 144 memset(io, 0, sizeof(*io)); 145 io->inode = igrab(inode); 146 BUG_ON(!io->inode); 147 INIT_WORK(&io->work, ext4_end_io_work); 148 INIT_LIST_HEAD(&io->list); 149 } 150 return io; 151 } 152 153 /* 154 * Print an buffer I/O error compatible with the fs/buffer.c. This 155 * provides compatibility with dmesg scrapers that look for a specific 156 * buffer I/O error message. We really need a unified error reporting 157 * structure to userspace ala Digital Unix's uerf system, but it's 158 * probably not going to happen in my lifetime, due to LKML politics... 159 */ 160 static void buffer_io_error(struct buffer_head *bh) 161 { 162 char b[BDEVNAME_SIZE]; 163 printk(KERN_ERR "Buffer I/O error on device %s, logical block %llu\n", 164 bdevname(bh->b_bdev, b), 165 (unsigned long long)bh->b_blocknr); 166 } 167 168 static void ext4_end_bio(struct bio *bio, int error) 169 { 170 ext4_io_end_t *io_end = bio->bi_private; 171 struct workqueue_struct *wq; 172 struct inode *inode; 173 unsigned long flags; 174 ext4_fsblk_t err_block; 175 int i; 176 177 BUG_ON(!io_end); 178 inode = io_end->inode; 179 bio->bi_private = NULL; 180 bio->bi_end_io = NULL; 181 if (test_bit(BIO_UPTODATE, &bio->bi_flags)) 182 error = 0; 183 err_block = bio->bi_sector >> (inode->i_blkbits - 9); 184 bio_put(bio); 185 186 if (!(inode->i_sb->s_flags & MS_ACTIVE)) { 187 pr_err("sb umounted, discard end_io request for inode %lu\n", 188 io_end->inode->i_ino); 189 ext4_free_io_end(io_end); 190 return; 191 } 192 193 if (error) { 194 io_end->flag |= EXT4_IO_END_ERROR; 195 ext4_warning(inode->i_sb, "I/O error writing to inode %lu " 196 "(offset %llu size %ld starting block %llu)", 197 inode->i_ino, 198 (unsigned long long) io_end->offset, 199 (long) io_end->size, 200 (unsigned long long) err_block); 201 } 202 203 for (i = 0; i < io_end->num_io_pages; i++) { 204 struct page *page = io_end->pages[i]->p_page; 205 struct buffer_head *bh, *head; 206 int partial_write = 0; 207 208 head = page_buffers(page); 209 if (error) 210 SetPageError(page); 211 BUG_ON(!head); 212 if (head->b_size == PAGE_CACHE_SIZE) 213 clear_buffer_dirty(head); 214 else { 215 loff_t offset; 216 loff_t io_end_offset = io_end->offset + io_end->size; 217 218 offset = (sector_t) page->index << PAGE_CACHE_SHIFT; 219 bh = head; 220 do { 221 if ((offset >= io_end->offset) && 222 (offset+bh->b_size <= io_end_offset)) { 223 if (error) 224 buffer_io_error(bh); 225 226 clear_buffer_dirty(bh); 227 } 228 if (buffer_delay(bh)) 229 partial_write = 1; 230 else if (!buffer_mapped(bh)) 231 clear_buffer_dirty(bh); 232 else if (buffer_dirty(bh)) 233 partial_write = 1; 234 offset += bh->b_size; 235 bh = bh->b_this_page; 236 } while (bh != head); 237 } 238 239 if (--io_end->pages[i]->p_count == 0) { 240 struct page *page = io_end->pages[i]->p_page; 241 242 end_page_writeback(page); 243 put_page(page); 244 kmem_cache_free(io_page_cachep, io_end->pages[i]); 245 } 246 247 /* 248 * If this is a partial write which happened to make 249 * all buffers uptodate then we can optimize away a 250 * bogus readpage() for the next read(). Here we 251 * 'discover' whether the page went uptodate as a 252 * result of this (potentially partial) write. 253 */ 254 if (!partial_write) 255 SetPageUptodate(page); 256 } 257 258 io_end->num_io_pages = 0; 259 260 /* Add the io_end to per-inode completed io list*/ 261 spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags); 262 list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list); 263 spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags); 264 265 wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq; 266 /* queue the work to convert unwritten extents to written */ 267 queue_work(wq, &io_end->work); 268 } 269 270 void ext4_io_submit(struct ext4_io_submit *io) 271 { 272 struct bio *bio = io->io_bio; 273 274 if (bio) { 275 bio_get(io->io_bio); 276 submit_bio(io->io_op, io->io_bio); 277 BUG_ON(bio_flagged(io->io_bio, BIO_EOPNOTSUPP)); 278 bio_put(io->io_bio); 279 } 280 io->io_bio = 0; 281 io->io_op = 0; 282 io->io_end = 0; 283 } 284 285 static int io_submit_init(struct ext4_io_submit *io, 286 struct inode *inode, 287 struct writeback_control *wbc, 288 struct buffer_head *bh) 289 { 290 ext4_io_end_t *io_end; 291 struct page *page = bh->b_page; 292 int nvecs = bio_get_nr_vecs(bh->b_bdev); 293 struct bio *bio; 294 295 io_end = ext4_init_io_end(inode, GFP_NOFS); 296 if (!io_end) 297 return -ENOMEM; 298 do { 299 bio = bio_alloc(GFP_NOIO, nvecs); 300 nvecs >>= 1; 301 } while (bio == NULL); 302 303 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); 304 bio->bi_bdev = bh->b_bdev; 305 bio->bi_private = io->io_end = io_end; 306 bio->bi_end_io = ext4_end_bio; 307 308 io_end->inode = inode; 309 io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh); 310 311 io->io_bio = bio; 312 io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? 313 WRITE_SYNC_PLUG : WRITE); 314 io->io_next_block = bh->b_blocknr; 315 return 0; 316 } 317 318 static int io_submit_add_bh(struct ext4_io_submit *io, 319 struct ext4_io_page *io_page, 320 struct inode *inode, 321 struct writeback_control *wbc, 322 struct buffer_head *bh) 323 { 324 ext4_io_end_t *io_end; 325 int ret; 326 327 if (buffer_new(bh)) { 328 clear_buffer_new(bh); 329 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); 330 } 331 332 if (!buffer_mapped(bh) || buffer_delay(bh)) { 333 if (!buffer_mapped(bh)) 334 clear_buffer_dirty(bh); 335 if (io->io_bio) 336 ext4_io_submit(io); 337 return 0; 338 } 339 340 if (io->io_bio && bh->b_blocknr != io->io_next_block) { 341 submit_and_retry: 342 ext4_io_submit(io); 343 } 344 if (io->io_bio == NULL) { 345 ret = io_submit_init(io, inode, wbc, bh); 346 if (ret) 347 return ret; 348 } 349 io_end = io->io_end; 350 if ((io_end->num_io_pages >= MAX_IO_PAGES) && 351 (io_end->pages[io_end->num_io_pages-1] != io_page)) 352 goto submit_and_retry; 353 if (buffer_uninit(bh)) 354 io->io_end->flag |= EXT4_IO_END_UNWRITTEN; 355 io->io_end->size += bh->b_size; 356 io->io_next_block++; 357 ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh)); 358 if (ret != bh->b_size) 359 goto submit_and_retry; 360 if ((io_end->num_io_pages == 0) || 361 (io_end->pages[io_end->num_io_pages-1] != io_page)) { 362 io_end->pages[io_end->num_io_pages++] = io_page; 363 io_page->p_count++; 364 } 365 return 0; 366 } 367 368 int ext4_bio_write_page(struct ext4_io_submit *io, 369 struct page *page, 370 int len, 371 struct writeback_control *wbc) 372 { 373 struct inode *inode = page->mapping->host; 374 unsigned block_start, block_end, blocksize; 375 struct ext4_io_page *io_page; 376 struct buffer_head *bh, *head; 377 int ret = 0; 378 379 blocksize = 1 << inode->i_blkbits; 380 381 BUG_ON(PageWriteback(page)); 382 set_page_writeback(page); 383 ClearPageError(page); 384 385 io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS); 386 if (!io_page) { 387 set_page_dirty(page); 388 unlock_page(page); 389 return -ENOMEM; 390 } 391 io_page->p_page = page; 392 io_page->p_count = 0; 393 get_page(page); 394 395 for (bh = head = page_buffers(page), block_start = 0; 396 bh != head || !block_start; 397 block_start = block_end, bh = bh->b_this_page) { 398 block_end = block_start + blocksize; 399 if (block_start >= len) { 400 clear_buffer_dirty(bh); 401 set_buffer_uptodate(bh); 402 continue; 403 } 404 ret = io_submit_add_bh(io, io_page, inode, wbc, bh); 405 if (ret) { 406 /* 407 * We only get here on ENOMEM. Not much else 408 * we can do but mark the page as dirty, and 409 * better luck next time. 410 */ 411 set_page_dirty(page); 412 break; 413 } 414 } 415 unlock_page(page); 416 /* 417 * If the page was truncated before we could do the writeback, 418 * or we had a memory allocation error while trying to write 419 * the first buffer head, we won't have submitted any pages for 420 * I/O. In that case we need to make sure we've cleared the 421 * PageWriteback bit from the page to prevent the system from 422 * wedging later on. 423 */ 424 if (io_page->p_count == 0) { 425 put_page(page); 426 end_page_writeback(page); 427 kmem_cache_free(io_page_cachep, io_page); 428 } 429 return ret; 430 } 431