1 /* 2 * linux/fs/ext4/page-io.c 3 * 4 * This contains the new page_io functions for ext4 5 * 6 * Written by Theodore Ts'o, 2010. 7 */ 8 9 #include <linux/module.h> 10 #include <linux/fs.h> 11 #include <linux/time.h> 12 #include <linux/jbd2.h> 13 #include <linux/highuid.h> 14 #include <linux/pagemap.h> 15 #include <linux/quotaops.h> 16 #include <linux/string.h> 17 #include <linux/buffer_head.h> 18 #include <linux/writeback.h> 19 #include <linux/pagevec.h> 20 #include <linux/mpage.h> 21 #include <linux/namei.h> 22 #include <linux/uio.h> 23 #include <linux/bio.h> 24 #include <linux/workqueue.h> 25 #include <linux/kernel.h> 26 #include <linux/slab.h> 27 28 #include "ext4_jbd2.h" 29 #include "xattr.h" 30 #include "acl.h" 31 #include "ext4_extents.h" 32 33 static struct kmem_cache *io_page_cachep, *io_end_cachep; 34 35 int __init ext4_init_pageio(void) 36 { 37 io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT); 38 if (io_page_cachep == NULL) 39 return -ENOMEM; 40 io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT); 41 if (io_end_cachep == NULL) { 42 kmem_cache_destroy(io_page_cachep); 43 return -ENOMEM; 44 } 45 return 0; 46 } 47 48 void ext4_exit_pageio(void) 49 { 50 kmem_cache_destroy(io_end_cachep); 51 kmem_cache_destroy(io_page_cachep); 52 } 53 54 void ext4_ioend_wait(struct inode *inode) 55 { 56 wait_queue_head_t *wq = ext4_ioend_wq(inode); 57 58 wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0)); 59 } 60 61 static void put_io_page(struct ext4_io_page *io_page) 62 { 63 if (atomic_dec_and_test(&io_page->p_count)) { 64 end_page_writeback(io_page->p_page); 65 put_page(io_page->p_page); 66 kmem_cache_free(io_page_cachep, io_page); 67 } 68 } 69 70 void ext4_free_io_end(ext4_io_end_t *io) 71 { 72 int i; 73 wait_queue_head_t *wq; 74 75 BUG_ON(!io); 76 if (io->page) 77 put_page(io->page); 78 for (i = 0; i < io->num_io_pages; i++) 79 put_io_page(io->pages[i]); 80 io->num_io_pages = 0; 81 wq = ext4_ioend_wq(io->inode); 82 if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count) && 83 waitqueue_active(wq)) 84 wake_up_all(wq); 85 kmem_cache_free(io_end_cachep, io); 86 } 87 88 /* 89 * check a range of space and convert unwritten extents to written. 90 */ 91 int ext4_end_io_nolock(ext4_io_end_t *io) 92 { 93 struct inode *inode = io->inode; 94 loff_t offset = io->offset; 95 ssize_t size = io->size; 96 wait_queue_head_t *wq; 97 int ret = 0; 98 99 ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p," 100 "list->prev 0x%p\n", 101 io, inode->i_ino, io->list.next, io->list.prev); 102 103 if (list_empty(&io->list)) 104 return ret; 105 106 if (!(io->flag & EXT4_IO_END_UNWRITTEN)) 107 return ret; 108 109 ret = ext4_convert_unwritten_extents(inode, offset, size); 110 if (ret < 0) { 111 printk(KERN_EMERG "%s: failed to convert unwritten " 112 "extents to written extents, error is %d " 113 "io is still on inode %lu aio dio list\n", 114 __func__, ret, inode->i_ino); 115 return ret; 116 } 117 118 if (io->iocb) 119 aio_complete(io->iocb, io->result, 0); 120 /* clear the DIO AIO unwritten flag */ 121 if (io->flag & EXT4_IO_END_UNWRITTEN) { 122 io->flag &= ~EXT4_IO_END_UNWRITTEN; 123 /* Wake up anyone waiting on unwritten extent conversion */ 124 wq = ext4_ioend_wq(io->inode); 125 if (atomic_dec_and_test(&EXT4_I(inode)->i_aiodio_unwritten) && 126 waitqueue_active(wq)) { 127 wake_up_all(wq); 128 } 129 } 130 131 return ret; 132 } 133 134 /* 135 * work on completed aio dio IO, to convert unwritten extents to extents 136 */ 137 static void ext4_end_io_work(struct work_struct *work) 138 { 139 ext4_io_end_t *io = container_of(work, ext4_io_end_t, work); 140 struct inode *inode = io->inode; 141 struct ext4_inode_info *ei = EXT4_I(inode); 142 unsigned long flags; 143 int ret; 144 145 mutex_lock(&inode->i_mutex); 146 ret = ext4_end_io_nolock(io); 147 if (ret < 0) { 148 mutex_unlock(&inode->i_mutex); 149 return; 150 } 151 152 spin_lock_irqsave(&ei->i_completed_io_lock, flags); 153 if (!list_empty(&io->list)) 154 list_del_init(&io->list); 155 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); 156 mutex_unlock(&inode->i_mutex); 157 ext4_free_io_end(io); 158 } 159 160 ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags) 161 { 162 ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags); 163 if (io) { 164 atomic_inc(&EXT4_I(inode)->i_ioend_count); 165 io->inode = inode; 166 INIT_WORK(&io->work, ext4_end_io_work); 167 INIT_LIST_HEAD(&io->list); 168 } 169 return io; 170 } 171 172 /* 173 * Print an buffer I/O error compatible with the fs/buffer.c. This 174 * provides compatibility with dmesg scrapers that look for a specific 175 * buffer I/O error message. We really need a unified error reporting 176 * structure to userspace ala Digital Unix's uerf system, but it's 177 * probably not going to happen in my lifetime, due to LKML politics... 178 */ 179 static void buffer_io_error(struct buffer_head *bh) 180 { 181 char b[BDEVNAME_SIZE]; 182 printk(KERN_ERR "Buffer I/O error on device %s, logical block %llu\n", 183 bdevname(bh->b_bdev, b), 184 (unsigned long long)bh->b_blocknr); 185 } 186 187 static void ext4_end_bio(struct bio *bio, int error) 188 { 189 ext4_io_end_t *io_end = bio->bi_private; 190 struct workqueue_struct *wq; 191 struct inode *inode; 192 unsigned long flags; 193 int i; 194 sector_t bi_sector = bio->bi_sector; 195 196 BUG_ON(!io_end); 197 bio->bi_private = NULL; 198 bio->bi_end_io = NULL; 199 if (test_bit(BIO_UPTODATE, &bio->bi_flags)) 200 error = 0; 201 bio_put(bio); 202 203 for (i = 0; i < io_end->num_io_pages; i++) { 204 struct page *page = io_end->pages[i]->p_page; 205 struct buffer_head *bh, *head; 206 loff_t offset; 207 loff_t io_end_offset; 208 209 if (error) { 210 SetPageError(page); 211 set_bit(AS_EIO, &page->mapping->flags); 212 head = page_buffers(page); 213 BUG_ON(!head); 214 215 io_end_offset = io_end->offset + io_end->size; 216 217 offset = (sector_t) page->index << PAGE_CACHE_SHIFT; 218 bh = head; 219 do { 220 if ((offset >= io_end->offset) && 221 (offset+bh->b_size <= io_end_offset)) 222 buffer_io_error(bh); 223 224 offset += bh->b_size; 225 bh = bh->b_this_page; 226 } while (bh != head); 227 } 228 229 put_io_page(io_end->pages[i]); 230 } 231 io_end->num_io_pages = 0; 232 inode = io_end->inode; 233 234 if (error) { 235 io_end->flag |= EXT4_IO_END_ERROR; 236 ext4_warning(inode->i_sb, "I/O error writing to inode %lu " 237 "(offset %llu size %ld starting block %llu)", 238 inode->i_ino, 239 (unsigned long long) io_end->offset, 240 (long) io_end->size, 241 (unsigned long long) 242 bi_sector >> (inode->i_blkbits - 9)); 243 } 244 245 if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) { 246 ext4_free_io_end(io_end); 247 return; 248 } 249 250 /* Add the io_end to per-inode completed io list*/ 251 spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags); 252 list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list); 253 spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags); 254 255 wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq; 256 /* queue the work to convert unwritten extents to written */ 257 queue_work(wq, &io_end->work); 258 } 259 260 void ext4_io_submit(struct ext4_io_submit *io) 261 { 262 struct bio *bio = io->io_bio; 263 264 if (bio) { 265 bio_get(io->io_bio); 266 submit_bio(io->io_op, io->io_bio); 267 BUG_ON(bio_flagged(io->io_bio, BIO_EOPNOTSUPP)); 268 bio_put(io->io_bio); 269 } 270 io->io_bio = NULL; 271 io->io_op = 0; 272 io->io_end = NULL; 273 } 274 275 static int io_submit_init(struct ext4_io_submit *io, 276 struct inode *inode, 277 struct writeback_control *wbc, 278 struct buffer_head *bh) 279 { 280 ext4_io_end_t *io_end; 281 struct page *page = bh->b_page; 282 int nvecs = bio_get_nr_vecs(bh->b_bdev); 283 struct bio *bio; 284 285 io_end = ext4_init_io_end(inode, GFP_NOFS); 286 if (!io_end) 287 return -ENOMEM; 288 bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES)); 289 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); 290 bio->bi_bdev = bh->b_bdev; 291 bio->bi_private = io->io_end = io_end; 292 bio->bi_end_io = ext4_end_bio; 293 294 io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh); 295 296 io->io_bio = bio; 297 io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); 298 io->io_next_block = bh->b_blocknr; 299 return 0; 300 } 301 302 static int io_submit_add_bh(struct ext4_io_submit *io, 303 struct ext4_io_page *io_page, 304 struct inode *inode, 305 struct writeback_control *wbc, 306 struct buffer_head *bh) 307 { 308 ext4_io_end_t *io_end; 309 int ret; 310 311 if (buffer_new(bh)) { 312 clear_buffer_new(bh); 313 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); 314 } 315 316 if (!buffer_mapped(bh) || buffer_delay(bh)) { 317 if (!buffer_mapped(bh)) 318 clear_buffer_dirty(bh); 319 if (io->io_bio) 320 ext4_io_submit(io); 321 return 0; 322 } 323 324 if (io->io_bio && bh->b_blocknr != io->io_next_block) { 325 submit_and_retry: 326 ext4_io_submit(io); 327 } 328 if (io->io_bio == NULL) { 329 ret = io_submit_init(io, inode, wbc, bh); 330 if (ret) 331 return ret; 332 } 333 io_end = io->io_end; 334 if ((io_end->num_io_pages >= MAX_IO_PAGES) && 335 (io_end->pages[io_end->num_io_pages-1] != io_page)) 336 goto submit_and_retry; 337 if (buffer_uninit(bh)) 338 io->io_end->flag |= EXT4_IO_END_UNWRITTEN; 339 io->io_end->size += bh->b_size; 340 io->io_next_block++; 341 ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh)); 342 if (ret != bh->b_size) 343 goto submit_and_retry; 344 if ((io_end->num_io_pages == 0) || 345 (io_end->pages[io_end->num_io_pages-1] != io_page)) { 346 io_end->pages[io_end->num_io_pages++] = io_page; 347 atomic_inc(&io_page->p_count); 348 } 349 return 0; 350 } 351 352 int ext4_bio_write_page(struct ext4_io_submit *io, 353 struct page *page, 354 int len, 355 struct writeback_control *wbc) 356 { 357 struct inode *inode = page->mapping->host; 358 unsigned block_start, block_end, blocksize; 359 struct ext4_io_page *io_page; 360 struct buffer_head *bh, *head; 361 int ret = 0; 362 363 blocksize = 1 << inode->i_blkbits; 364 365 BUG_ON(!PageLocked(page)); 366 BUG_ON(PageWriteback(page)); 367 368 io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS); 369 if (!io_page) { 370 set_page_dirty(page); 371 unlock_page(page); 372 return -ENOMEM; 373 } 374 io_page->p_page = page; 375 atomic_set(&io_page->p_count, 1); 376 get_page(page); 377 set_page_writeback(page); 378 ClearPageError(page); 379 380 for (bh = head = page_buffers(page), block_start = 0; 381 bh != head || !block_start; 382 block_start = block_end, bh = bh->b_this_page) { 383 384 block_end = block_start + blocksize; 385 if (block_start >= len) { 386 clear_buffer_dirty(bh); 387 set_buffer_uptodate(bh); 388 continue; 389 } 390 clear_buffer_dirty(bh); 391 ret = io_submit_add_bh(io, io_page, inode, wbc, bh); 392 if (ret) { 393 /* 394 * We only get here on ENOMEM. Not much else 395 * we can do but mark the page as dirty, and 396 * better luck next time. 397 */ 398 set_page_dirty(page); 399 break; 400 } 401 } 402 unlock_page(page); 403 /* 404 * If the page was truncated before we could do the writeback, 405 * or we had a memory allocation error while trying to write 406 * the first buffer head, we won't have submitted any pages for 407 * I/O. In that case we need to make sure we've cleared the 408 * PageWriteback bit from the page to prevent the system from 409 * wedging later on. 410 */ 411 put_io_page(io_page); 412 return ret; 413 } 414