1 /* 2 * linux/fs/ext4/page-io.c 3 * 4 * This contains the new page_io functions for ext4 5 * 6 * Written by Theodore Ts'o, 2010. 7 */ 8 9 #include <linux/fs.h> 10 #include <linux/time.h> 11 #include <linux/highuid.h> 12 #include <linux/pagemap.h> 13 #include <linux/quotaops.h> 14 #include <linux/string.h> 15 #include <linux/buffer_head.h> 16 #include <linux/writeback.h> 17 #include <linux/pagevec.h> 18 #include <linux/mpage.h> 19 #include <linux/namei.h> 20 #include <linux/uio.h> 21 #include <linux/bio.h> 22 #include <linux/workqueue.h> 23 #include <linux/kernel.h> 24 #include <linux/slab.h> 25 #include <linux/mm.h> 26 #include <linux/backing-dev.h> 27 #include <linux/fscrypto.h> 28 29 #include "ext4_jbd2.h" 30 #include "xattr.h" 31 #include "acl.h" 32 33 static struct kmem_cache *io_end_cachep; 34 35 int __init ext4_init_pageio(void) 36 { 37 io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT); 38 if (io_end_cachep == NULL) 39 return -ENOMEM; 40 return 0; 41 } 42 43 void ext4_exit_pageio(void) 44 { 45 kmem_cache_destroy(io_end_cachep); 46 } 47 48 /* 49 * Print an buffer I/O error compatible with the fs/buffer.c. This 50 * provides compatibility with dmesg scrapers that look for a specific 51 * buffer I/O error message. We really need a unified error reporting 52 * structure to userspace ala Digital Unix's uerf system, but it's 53 * probably not going to happen in my lifetime, due to LKML politics... 54 */ 55 static void buffer_io_error(struct buffer_head *bh) 56 { 57 printk_ratelimited(KERN_ERR "Buffer I/O error on device %pg, logical block %llu\n", 58 bh->b_bdev, 59 (unsigned long long)bh->b_blocknr); 60 } 61 62 static void ext4_finish_bio(struct bio *bio) 63 { 64 int i; 65 struct bio_vec *bvec; 66 67 bio_for_each_segment_all(bvec, bio, i) { 68 struct page *page = bvec->bv_page; 69 #ifdef CONFIG_EXT4_FS_ENCRYPTION 70 struct page *data_page = NULL; 71 #endif 72 struct buffer_head *bh, *head; 73 unsigned bio_start = bvec->bv_offset; 74 unsigned bio_end = bio_start + bvec->bv_len; 75 unsigned under_io = 0; 76 unsigned long flags; 77 78 if (!page) 79 continue; 80 81 #ifdef CONFIG_EXT4_FS_ENCRYPTION 82 if (!page->mapping) { 83 /* The bounce data pages are unmapped. */ 84 data_page = page; 85 fscrypt_pullback_bio_page(&page, false); 86 } 87 #endif 88 89 if (bio->bi_error) { 90 SetPageError(page); 91 mapping_set_error(page->mapping, -EIO); 92 } 93 bh = head = page_buffers(page); 94 /* 95 * We check all buffers in the page under BH_Uptodate_Lock 96 * to avoid races with other end io clearing async_write flags 97 */ 98 local_irq_save(flags); 99 bit_spin_lock(BH_Uptodate_Lock, &head->b_state); 100 do { 101 if (bh_offset(bh) < bio_start || 102 bh_offset(bh) + bh->b_size > bio_end) { 103 if (buffer_async_write(bh)) 104 under_io++; 105 continue; 106 } 107 clear_buffer_async_write(bh); 108 if (bio->bi_error) 109 buffer_io_error(bh); 110 } while ((bh = bh->b_this_page) != head); 111 bit_spin_unlock(BH_Uptodate_Lock, &head->b_state); 112 local_irq_restore(flags); 113 if (!under_io) { 114 #ifdef CONFIG_EXT4_FS_ENCRYPTION 115 if (data_page) 116 fscrypt_restore_control_page(data_page); 117 #endif 118 end_page_writeback(page); 119 } 120 } 121 } 122 123 static void ext4_release_io_end(ext4_io_end_t *io_end) 124 { 125 struct bio *bio, *next_bio; 126 127 BUG_ON(!list_empty(&io_end->list)); 128 BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN); 129 WARN_ON(io_end->handle); 130 131 for (bio = io_end->bio; bio; bio = next_bio) { 132 next_bio = bio->bi_private; 133 ext4_finish_bio(bio); 134 bio_put(bio); 135 } 136 kmem_cache_free(io_end_cachep, io_end); 137 } 138 139 /* 140 * Check a range of space and convert unwritten extents to written. Note that 141 * we are protected from truncate touching same part of extent tree by the 142 * fact that truncate code waits for all DIO to finish (thus exclusion from 143 * direct IO is achieved) and also waits for PageWriteback bits. Thus we 144 * cannot get to ext4_ext_truncate() before all IOs overlapping that range are 145 * completed (happens from ext4_free_ioend()). 146 */ 147 static int ext4_end_io(ext4_io_end_t *io) 148 { 149 struct inode *inode = io->inode; 150 loff_t offset = io->offset; 151 ssize_t size = io->size; 152 handle_t *handle = io->handle; 153 int ret = 0; 154 155 ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p," 156 "list->prev 0x%p\n", 157 io, inode->i_ino, io->list.next, io->list.prev); 158 159 io->handle = NULL; /* Following call will use up the handle */ 160 ret = ext4_convert_unwritten_extents(handle, inode, offset, size); 161 if (ret < 0) { 162 ext4_msg(inode->i_sb, KERN_EMERG, 163 "failed to convert unwritten extents to written " 164 "extents -- potential data loss! " 165 "(inode %lu, offset %llu, size %zd, error %d)", 166 inode->i_ino, offset, size, ret); 167 } 168 ext4_clear_io_unwritten_flag(io); 169 ext4_release_io_end(io); 170 return ret; 171 } 172 173 static void dump_completed_IO(struct inode *inode, struct list_head *head) 174 { 175 #ifdef EXT4FS_DEBUG 176 struct list_head *cur, *before, *after; 177 ext4_io_end_t *io, *io0, *io1; 178 179 if (list_empty(head)) 180 return; 181 182 ext4_debug("Dump inode %lu completed io list\n", inode->i_ino); 183 list_for_each_entry(io, head, list) { 184 cur = &io->list; 185 before = cur->prev; 186 io0 = container_of(before, ext4_io_end_t, list); 187 after = cur->next; 188 io1 = container_of(after, ext4_io_end_t, list); 189 190 ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n", 191 io, inode->i_ino, io0, io1); 192 } 193 #endif 194 } 195 196 /* Add the io_end to per-inode completed end_io list. */ 197 static void ext4_add_complete_io(ext4_io_end_t *io_end) 198 { 199 struct ext4_inode_info *ei = EXT4_I(io_end->inode); 200 struct ext4_sb_info *sbi = EXT4_SB(io_end->inode->i_sb); 201 struct workqueue_struct *wq; 202 unsigned long flags; 203 204 /* Only reserved conversions from writeback should enter here */ 205 WARN_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN)); 206 WARN_ON(!io_end->handle && sbi->s_journal); 207 spin_lock_irqsave(&ei->i_completed_io_lock, flags); 208 wq = sbi->rsv_conversion_wq; 209 if (list_empty(&ei->i_rsv_conversion_list)) 210 queue_work(wq, &ei->i_rsv_conversion_work); 211 list_add_tail(&io_end->list, &ei->i_rsv_conversion_list); 212 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); 213 } 214 215 static int ext4_do_flush_completed_IO(struct inode *inode, 216 struct list_head *head) 217 { 218 ext4_io_end_t *io; 219 struct list_head unwritten; 220 unsigned long flags; 221 struct ext4_inode_info *ei = EXT4_I(inode); 222 int err, ret = 0; 223 224 spin_lock_irqsave(&ei->i_completed_io_lock, flags); 225 dump_completed_IO(inode, head); 226 list_replace_init(head, &unwritten); 227 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); 228 229 while (!list_empty(&unwritten)) { 230 io = list_entry(unwritten.next, ext4_io_end_t, list); 231 BUG_ON(!(io->flag & EXT4_IO_END_UNWRITTEN)); 232 list_del_init(&io->list); 233 234 err = ext4_end_io(io); 235 if (unlikely(!ret && err)) 236 ret = err; 237 } 238 return ret; 239 } 240 241 /* 242 * work on completed IO, to convert unwritten extents to extents 243 */ 244 void ext4_end_io_rsv_work(struct work_struct *work) 245 { 246 struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info, 247 i_rsv_conversion_work); 248 ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_rsv_conversion_list); 249 } 250 251 ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags) 252 { 253 ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags); 254 if (io) { 255 io->inode = inode; 256 INIT_LIST_HEAD(&io->list); 257 atomic_set(&io->count, 1); 258 } 259 return io; 260 } 261 262 void ext4_put_io_end_defer(ext4_io_end_t *io_end) 263 { 264 if (atomic_dec_and_test(&io_end->count)) { 265 if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) || !io_end->size) { 266 ext4_release_io_end(io_end); 267 return; 268 } 269 ext4_add_complete_io(io_end); 270 } 271 } 272 273 int ext4_put_io_end(ext4_io_end_t *io_end) 274 { 275 int err = 0; 276 277 if (atomic_dec_and_test(&io_end->count)) { 278 if (io_end->flag & EXT4_IO_END_UNWRITTEN) { 279 err = ext4_convert_unwritten_extents(io_end->handle, 280 io_end->inode, io_end->offset, 281 io_end->size); 282 io_end->handle = NULL; 283 ext4_clear_io_unwritten_flag(io_end); 284 } 285 ext4_release_io_end(io_end); 286 } 287 return err; 288 } 289 290 ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end) 291 { 292 atomic_inc(&io_end->count); 293 return io_end; 294 } 295 296 /* BIO completion function for page writeback */ 297 static void ext4_end_bio(struct bio *bio) 298 { 299 ext4_io_end_t *io_end = bio->bi_private; 300 sector_t bi_sector = bio->bi_iter.bi_sector; 301 302 BUG_ON(!io_end); 303 bio->bi_end_io = NULL; 304 305 if (bio->bi_error) { 306 struct inode *inode = io_end->inode; 307 308 ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu " 309 "(offset %llu size %ld starting block %llu)", 310 bio->bi_error, inode->i_ino, 311 (unsigned long long) io_end->offset, 312 (long) io_end->size, 313 (unsigned long long) 314 bi_sector >> (inode->i_blkbits - 9)); 315 mapping_set_error(inode->i_mapping, bio->bi_error); 316 } 317 318 if (io_end->flag & EXT4_IO_END_UNWRITTEN) { 319 /* 320 * Link bio into list hanging from io_end. We have to do it 321 * atomically as bio completions can be racing against each 322 * other. 323 */ 324 bio->bi_private = xchg(&io_end->bio, bio); 325 ext4_put_io_end_defer(io_end); 326 } else { 327 /* 328 * Drop io_end reference early. Inode can get freed once 329 * we finish the bio. 330 */ 331 ext4_put_io_end_defer(io_end); 332 ext4_finish_bio(bio); 333 bio_put(bio); 334 } 335 } 336 337 void ext4_io_submit(struct ext4_io_submit *io) 338 { 339 struct bio *bio = io->io_bio; 340 341 if (bio) { 342 int io_op_flags = io->io_wbc->sync_mode == WB_SYNC_ALL ? 343 WRITE_SYNC : 0; 344 bio_set_op_attrs(io->io_bio, REQ_OP_WRITE, io_op_flags); 345 submit_bio(io->io_bio); 346 } 347 io->io_bio = NULL; 348 } 349 350 void ext4_io_submit_init(struct ext4_io_submit *io, 351 struct writeback_control *wbc) 352 { 353 io->io_wbc = wbc; 354 io->io_bio = NULL; 355 io->io_end = NULL; 356 } 357 358 static int io_submit_init_bio(struct ext4_io_submit *io, 359 struct buffer_head *bh) 360 { 361 struct bio *bio; 362 363 bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES); 364 if (!bio) 365 return -ENOMEM; 366 wbc_init_bio(io->io_wbc, bio); 367 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); 368 bio->bi_bdev = bh->b_bdev; 369 bio->bi_end_io = ext4_end_bio; 370 bio->bi_private = ext4_get_io_end(io->io_end); 371 io->io_bio = bio; 372 io->io_next_block = bh->b_blocknr; 373 return 0; 374 } 375 376 static int io_submit_add_bh(struct ext4_io_submit *io, 377 struct inode *inode, 378 struct page *page, 379 struct buffer_head *bh) 380 { 381 int ret; 382 383 if (io->io_bio && bh->b_blocknr != io->io_next_block) { 384 submit_and_retry: 385 ext4_io_submit(io); 386 } 387 if (io->io_bio == NULL) { 388 ret = io_submit_init_bio(io, bh); 389 if (ret) 390 return ret; 391 } 392 ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh)); 393 if (ret != bh->b_size) 394 goto submit_and_retry; 395 wbc_account_io(io->io_wbc, page, bh->b_size); 396 io->io_next_block++; 397 return 0; 398 } 399 400 int ext4_bio_write_page(struct ext4_io_submit *io, 401 struct page *page, 402 int len, 403 struct writeback_control *wbc, 404 bool keep_towrite) 405 { 406 struct page *data_page = NULL; 407 struct inode *inode = page->mapping->host; 408 unsigned block_start; 409 struct buffer_head *bh, *head; 410 int ret = 0; 411 int nr_submitted = 0; 412 int nr_to_submit = 0; 413 414 BUG_ON(!PageLocked(page)); 415 BUG_ON(PageWriteback(page)); 416 417 if (keep_towrite) 418 set_page_writeback_keepwrite(page); 419 else 420 set_page_writeback(page); 421 ClearPageError(page); 422 423 /* 424 * Comments copied from block_write_full_page: 425 * 426 * The page straddles i_size. It must be zeroed out on each and every 427 * writepage invocation because it may be mmapped. "A file is mapped 428 * in multiples of the page size. For a file that is not a multiple of 429 * the page size, the remaining memory is zeroed when mapped, and 430 * writes to that region are not written out to the file." 431 */ 432 if (len < PAGE_SIZE) 433 zero_user_segment(page, len, PAGE_SIZE); 434 /* 435 * In the first loop we prepare and mark buffers to submit. We have to 436 * mark all buffers in the page before submitting so that 437 * end_page_writeback() cannot be called from ext4_bio_end_io() when IO 438 * on the first buffer finishes and we are still working on submitting 439 * the second buffer. 440 */ 441 bh = head = page_buffers(page); 442 do { 443 block_start = bh_offset(bh); 444 if (block_start >= len) { 445 clear_buffer_dirty(bh); 446 set_buffer_uptodate(bh); 447 continue; 448 } 449 if (!buffer_dirty(bh) || buffer_delay(bh) || 450 !buffer_mapped(bh) || buffer_unwritten(bh)) { 451 /* A hole? We can safely clear the dirty bit */ 452 if (!buffer_mapped(bh)) 453 clear_buffer_dirty(bh); 454 if (io->io_bio) 455 ext4_io_submit(io); 456 continue; 457 } 458 if (buffer_new(bh)) { 459 clear_buffer_new(bh); 460 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); 461 } 462 set_buffer_async_write(bh); 463 nr_to_submit++; 464 } while ((bh = bh->b_this_page) != head); 465 466 bh = head = page_buffers(page); 467 468 if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode) && 469 nr_to_submit) { 470 gfp_t gfp_flags = GFP_NOFS; 471 472 retry_encrypt: 473 data_page = fscrypt_encrypt_page(inode, page, gfp_flags); 474 if (IS_ERR(data_page)) { 475 ret = PTR_ERR(data_page); 476 if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) { 477 if (io->io_bio) { 478 ext4_io_submit(io); 479 congestion_wait(BLK_RW_ASYNC, HZ/50); 480 } 481 gfp_flags |= __GFP_NOFAIL; 482 goto retry_encrypt; 483 } 484 data_page = NULL; 485 goto out; 486 } 487 } 488 489 /* Now submit buffers to write */ 490 do { 491 if (!buffer_async_write(bh)) 492 continue; 493 ret = io_submit_add_bh(io, inode, 494 data_page ? data_page : page, bh); 495 if (ret) { 496 /* 497 * We only get here on ENOMEM. Not much else 498 * we can do but mark the page as dirty, and 499 * better luck next time. 500 */ 501 break; 502 } 503 nr_submitted++; 504 clear_buffer_dirty(bh); 505 } while ((bh = bh->b_this_page) != head); 506 507 /* Error stopped previous loop? Clean up buffers... */ 508 if (ret) { 509 out: 510 if (data_page) 511 fscrypt_restore_control_page(data_page); 512 printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret); 513 redirty_page_for_writepage(wbc, page); 514 do { 515 clear_buffer_async_write(bh); 516 bh = bh->b_this_page; 517 } while (bh != head); 518 } 519 unlock_page(page); 520 /* Nothing submitted - we have to end page writeback */ 521 if (!nr_submitted) 522 end_page_writeback(page); 523 return ret; 524 } 525