1 /* 2 * linux/fs/ext4/page-io.c 3 * 4 * This contains the new page_io functions for ext4 5 * 6 * Written by Theodore Ts'o, 2010. 7 */ 8 9 #include <linux/fs.h> 10 #include <linux/time.h> 11 #include <linux/jbd2.h> 12 #include <linux/highuid.h> 13 #include <linux/pagemap.h> 14 #include <linux/quotaops.h> 15 #include <linux/string.h> 16 #include <linux/buffer_head.h> 17 #include <linux/writeback.h> 18 #include <linux/pagevec.h> 19 #include <linux/mpage.h> 20 #include <linux/namei.h> 21 #include <linux/uio.h> 22 #include <linux/bio.h> 23 #include <linux/workqueue.h> 24 #include <linux/kernel.h> 25 #include <linux/slab.h> 26 27 #include "ext4_jbd2.h" 28 #include "xattr.h" 29 #include "acl.h" 30 #include "ext4_extents.h" 31 32 static struct kmem_cache *io_page_cachep, *io_end_cachep; 33 34 int __init ext4_init_pageio(void) 35 { 36 io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT); 37 if (io_page_cachep == NULL) 38 return -ENOMEM; 39 io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT); 40 if (io_end_cachep == NULL) { 41 kmem_cache_destroy(io_page_cachep); 42 return -ENOMEM; 43 } 44 return 0; 45 } 46 47 void ext4_exit_pageio(void) 48 { 49 kmem_cache_destroy(io_end_cachep); 50 kmem_cache_destroy(io_page_cachep); 51 } 52 53 void ext4_ioend_wait(struct inode *inode) 54 { 55 wait_queue_head_t *wq = ext4_ioend_wq(inode); 56 57 wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0)); 58 } 59 60 static void put_io_page(struct ext4_io_page *io_page) 61 { 62 if (atomic_dec_and_test(&io_page->p_count)) { 63 end_page_writeback(io_page->p_page); 64 put_page(io_page->p_page); 65 kmem_cache_free(io_page_cachep, io_page); 66 } 67 } 68 69 void ext4_free_io_end(ext4_io_end_t *io) 70 { 71 int i; 72 73 BUG_ON(!io); 74 if (io->page) 75 put_page(io->page); 76 for (i = 0; i < io->num_io_pages; i++) 77 put_io_page(io->pages[i]); 78 io->num_io_pages = 0; 79 if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count)) 80 wake_up_all(ext4_ioend_wq(io->inode)); 81 kmem_cache_free(io_end_cachep, io); 82 } 83 84 /* 85 * check a range of space and convert unwritten extents to written. 86 * 87 * Called with inode->i_mutex; we depend on this when we manipulate 88 * io->flag, since we could otherwise race with ext4_flush_completed_IO() 89 */ 90 int ext4_end_io_nolock(ext4_io_end_t *io) 91 { 92 struct inode *inode = io->inode; 93 loff_t offset = io->offset; 94 ssize_t size = io->size; 95 int ret = 0; 96 97 ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p," 98 "list->prev 0x%p\n", 99 io, inode->i_ino, io->list.next, io->list.prev); 100 101 ret = ext4_convert_unwritten_extents(inode, offset, size); 102 if (ret < 0) { 103 ext4_msg(inode->i_sb, KERN_EMERG, 104 "failed to convert unwritten extents to written " 105 "extents -- potential data loss! " 106 "(inode %lu, offset %llu, size %zd, error %d)", 107 inode->i_ino, offset, size, ret); 108 } 109 110 if (io->iocb) 111 aio_complete(io->iocb, io->result, 0); 112 113 if (io->flag & EXT4_IO_END_DIRECT) 114 inode_dio_done(inode); 115 /* Wake up anyone waiting on unwritten extent conversion */ 116 if (atomic_dec_and_test(&EXT4_I(inode)->i_aiodio_unwritten)) 117 wake_up_all(ext4_ioend_wq(io->inode)); 118 return ret; 119 } 120 121 /* 122 * work on completed aio dio IO, to convert unwritten extents to extents 123 */ 124 static void ext4_end_io_work(struct work_struct *work) 125 { 126 ext4_io_end_t *io = container_of(work, ext4_io_end_t, work); 127 struct inode *inode = io->inode; 128 struct ext4_inode_info *ei = EXT4_I(inode); 129 unsigned long flags; 130 131 spin_lock_irqsave(&ei->i_completed_io_lock, flags); 132 if (io->flag & EXT4_IO_END_IN_FSYNC) 133 goto requeue; 134 if (list_empty(&io->list)) { 135 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); 136 goto free; 137 } 138 139 if (!mutex_trylock(&inode->i_mutex)) { 140 bool was_queued; 141 requeue: 142 was_queued = !!(io->flag & EXT4_IO_END_QUEUED); 143 io->flag |= EXT4_IO_END_QUEUED; 144 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); 145 /* 146 * Requeue the work instead of waiting so that the work 147 * items queued after this can be processed. 148 */ 149 queue_work(EXT4_SB(inode->i_sb)->dio_unwritten_wq, &io->work); 150 /* 151 * To prevent the ext4-dio-unwritten thread from keeping 152 * requeueing end_io requests and occupying cpu for too long, 153 * yield the cpu if it sees an end_io request that has already 154 * been requeued. 155 */ 156 if (was_queued) 157 yield(); 158 return; 159 } 160 list_del_init(&io->list); 161 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); 162 (void) ext4_end_io_nolock(io); 163 mutex_unlock(&inode->i_mutex); 164 free: 165 ext4_free_io_end(io); 166 } 167 168 ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags) 169 { 170 ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags); 171 if (io) { 172 atomic_inc(&EXT4_I(inode)->i_ioend_count); 173 io->inode = inode; 174 INIT_WORK(&io->work, ext4_end_io_work); 175 INIT_LIST_HEAD(&io->list); 176 } 177 return io; 178 } 179 180 /* 181 * Print an buffer I/O error compatible with the fs/buffer.c. This 182 * provides compatibility with dmesg scrapers that look for a specific 183 * buffer I/O error message. We really need a unified error reporting 184 * structure to userspace ala Digital Unix's uerf system, but it's 185 * probably not going to happen in my lifetime, due to LKML politics... 186 */ 187 static void buffer_io_error(struct buffer_head *bh) 188 { 189 char b[BDEVNAME_SIZE]; 190 printk(KERN_ERR "Buffer I/O error on device %s, logical block %llu\n", 191 bdevname(bh->b_bdev, b), 192 (unsigned long long)bh->b_blocknr); 193 } 194 195 static void ext4_end_bio(struct bio *bio, int error) 196 { 197 ext4_io_end_t *io_end = bio->bi_private; 198 struct workqueue_struct *wq; 199 struct inode *inode; 200 unsigned long flags; 201 int i; 202 sector_t bi_sector = bio->bi_sector; 203 204 BUG_ON(!io_end); 205 bio->bi_private = NULL; 206 bio->bi_end_io = NULL; 207 if (test_bit(BIO_UPTODATE, &bio->bi_flags)) 208 error = 0; 209 bio_put(bio); 210 211 for (i = 0; i < io_end->num_io_pages; i++) { 212 struct page *page = io_end->pages[i]->p_page; 213 struct buffer_head *bh, *head; 214 loff_t offset; 215 loff_t io_end_offset; 216 217 if (error) { 218 SetPageError(page); 219 set_bit(AS_EIO, &page->mapping->flags); 220 head = page_buffers(page); 221 BUG_ON(!head); 222 223 io_end_offset = io_end->offset + io_end->size; 224 225 offset = (sector_t) page->index << PAGE_CACHE_SHIFT; 226 bh = head; 227 do { 228 if ((offset >= io_end->offset) && 229 (offset+bh->b_size <= io_end_offset)) 230 buffer_io_error(bh); 231 232 offset += bh->b_size; 233 bh = bh->b_this_page; 234 } while (bh != head); 235 } 236 237 put_io_page(io_end->pages[i]); 238 } 239 io_end->num_io_pages = 0; 240 inode = io_end->inode; 241 242 if (error) { 243 io_end->flag |= EXT4_IO_END_ERROR; 244 ext4_warning(inode->i_sb, "I/O error writing to inode %lu " 245 "(offset %llu size %ld starting block %llu)", 246 inode->i_ino, 247 (unsigned long long) io_end->offset, 248 (long) io_end->size, 249 (unsigned long long) 250 bi_sector >> (inode->i_blkbits - 9)); 251 } 252 253 if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) { 254 ext4_free_io_end(io_end); 255 return; 256 } 257 258 /* Add the io_end to per-inode completed io list*/ 259 spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags); 260 list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list); 261 spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags); 262 263 wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq; 264 /* queue the work to convert unwritten extents to written */ 265 queue_work(wq, &io_end->work); 266 } 267 268 void ext4_io_submit(struct ext4_io_submit *io) 269 { 270 struct bio *bio = io->io_bio; 271 272 if (bio) { 273 bio_get(io->io_bio); 274 submit_bio(io->io_op, io->io_bio); 275 BUG_ON(bio_flagged(io->io_bio, BIO_EOPNOTSUPP)); 276 bio_put(io->io_bio); 277 } 278 io->io_bio = NULL; 279 io->io_op = 0; 280 io->io_end = NULL; 281 } 282 283 static int io_submit_init(struct ext4_io_submit *io, 284 struct inode *inode, 285 struct writeback_control *wbc, 286 struct buffer_head *bh) 287 { 288 ext4_io_end_t *io_end; 289 struct page *page = bh->b_page; 290 int nvecs = bio_get_nr_vecs(bh->b_bdev); 291 struct bio *bio; 292 293 io_end = ext4_init_io_end(inode, GFP_NOFS); 294 if (!io_end) 295 return -ENOMEM; 296 bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES)); 297 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); 298 bio->bi_bdev = bh->b_bdev; 299 bio->bi_private = io->io_end = io_end; 300 bio->bi_end_io = ext4_end_bio; 301 302 io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh); 303 304 io->io_bio = bio; 305 io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); 306 io->io_next_block = bh->b_blocknr; 307 return 0; 308 } 309 310 static int io_submit_add_bh(struct ext4_io_submit *io, 311 struct ext4_io_page *io_page, 312 struct inode *inode, 313 struct writeback_control *wbc, 314 struct buffer_head *bh) 315 { 316 ext4_io_end_t *io_end; 317 int ret; 318 319 if (buffer_new(bh)) { 320 clear_buffer_new(bh); 321 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); 322 } 323 324 if (!buffer_mapped(bh) || buffer_delay(bh)) { 325 if (!buffer_mapped(bh)) 326 clear_buffer_dirty(bh); 327 if (io->io_bio) 328 ext4_io_submit(io); 329 return 0; 330 } 331 332 if (io->io_bio && bh->b_blocknr != io->io_next_block) { 333 submit_and_retry: 334 ext4_io_submit(io); 335 } 336 if (io->io_bio == NULL) { 337 ret = io_submit_init(io, inode, wbc, bh); 338 if (ret) 339 return ret; 340 } 341 io_end = io->io_end; 342 if ((io_end->num_io_pages >= MAX_IO_PAGES) && 343 (io_end->pages[io_end->num_io_pages-1] != io_page)) 344 goto submit_and_retry; 345 if (buffer_uninit(bh)) 346 ext4_set_io_unwritten_flag(inode, io_end); 347 io->io_end->size += bh->b_size; 348 io->io_next_block++; 349 ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh)); 350 if (ret != bh->b_size) 351 goto submit_and_retry; 352 if ((io_end->num_io_pages == 0) || 353 (io_end->pages[io_end->num_io_pages-1] != io_page)) { 354 io_end->pages[io_end->num_io_pages++] = io_page; 355 atomic_inc(&io_page->p_count); 356 } 357 return 0; 358 } 359 360 int ext4_bio_write_page(struct ext4_io_submit *io, 361 struct page *page, 362 int len, 363 struct writeback_control *wbc) 364 { 365 struct inode *inode = page->mapping->host; 366 unsigned block_start, block_end, blocksize; 367 struct ext4_io_page *io_page; 368 struct buffer_head *bh, *head; 369 int ret = 0; 370 371 blocksize = 1 << inode->i_blkbits; 372 373 BUG_ON(!PageLocked(page)); 374 BUG_ON(PageWriteback(page)); 375 376 io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS); 377 if (!io_page) { 378 set_page_dirty(page); 379 unlock_page(page); 380 return -ENOMEM; 381 } 382 io_page->p_page = page; 383 atomic_set(&io_page->p_count, 1); 384 get_page(page); 385 set_page_writeback(page); 386 ClearPageError(page); 387 388 for (bh = head = page_buffers(page), block_start = 0; 389 bh != head || !block_start; 390 block_start = block_end, bh = bh->b_this_page) { 391 392 block_end = block_start + blocksize; 393 if (block_start >= len) { 394 /* 395 * Comments copied from block_write_full_page_endio: 396 * 397 * The page straddles i_size. It must be zeroed out on 398 * each and every writepage invocation because it may 399 * be mmapped. "A file is mapped in multiples of the 400 * page size. For a file that is not a multiple of 401 * the page size, the remaining memory is zeroed when 402 * mapped, and writes to that region are not written 403 * out to the file." 404 */ 405 zero_user_segment(page, block_start, block_end); 406 clear_buffer_dirty(bh); 407 set_buffer_uptodate(bh); 408 continue; 409 } 410 clear_buffer_dirty(bh); 411 ret = io_submit_add_bh(io, io_page, inode, wbc, bh); 412 if (ret) { 413 /* 414 * We only get here on ENOMEM. Not much else 415 * we can do but mark the page as dirty, and 416 * better luck next time. 417 */ 418 set_page_dirty(page); 419 break; 420 } 421 } 422 unlock_page(page); 423 /* 424 * If the page was truncated before we could do the writeback, 425 * or we had a memory allocation error while trying to write 426 * the first buffer head, we won't have submitted any pages for 427 * I/O. In that case we need to make sure we've cleared the 428 * PageWriteback bit from the page to prevent the system from 429 * wedging later on. 430 */ 431 put_io_page(io_page); 432 return ret; 433 } 434