11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * fs/direct-io.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2002, Linus Torvalds. 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * O_DIRECT 71da177e4SLinus Torvalds * 8e1f8e874SFrancois Cami * 04Jul2002 Andrew Morton 91da177e4SLinus Torvalds * Initial version 101da177e4SLinus Torvalds * 11Sep2002 janetinc@us.ibm.com 111da177e4SLinus Torvalds * added readv/writev support. 12e1f8e874SFrancois Cami * 29Oct2002 Andrew Morton 131da177e4SLinus Torvalds * rewrote bio_add_page() support. 141da177e4SLinus Torvalds * 30Oct2002 pbadari@us.ibm.com 151da177e4SLinus Torvalds * added support for non-aligned IO. 161da177e4SLinus Torvalds * 06Nov2002 pbadari@us.ibm.com 171da177e4SLinus Torvalds * added asynchronous IO support. 181da177e4SLinus Torvalds * 21Jul2003 nathans@sgi.com 191da177e4SLinus Torvalds * added IO completion notifier. 201da177e4SLinus Torvalds */ 211da177e4SLinus Torvalds 221da177e4SLinus Torvalds #include <linux/kernel.h> 231da177e4SLinus Torvalds #include <linux/module.h> 241da177e4SLinus Torvalds #include <linux/types.h> 251da177e4SLinus Torvalds #include <linux/fs.h> 261da177e4SLinus Torvalds #include <linux/mm.h> 271da177e4SLinus Torvalds #include <linux/slab.h> 281da177e4SLinus Torvalds #include <linux/highmem.h> 291da177e4SLinus Torvalds #include <linux/pagemap.h> 3098c4d57dSAndrew Morton #include <linux/task_io_accounting_ops.h> 311da177e4SLinus Torvalds #include <linux/bio.h> 321da177e4SLinus Torvalds #include <linux/wait.h> 331da177e4SLinus Torvalds #include <linux/err.h> 341da177e4SLinus Torvalds #include <linux/blkdev.h> 351da177e4SLinus Torvalds #include <linux/buffer_head.h> 361da177e4SLinus Torvalds #include <linux/rwsem.h> 371da177e4SLinus Torvalds #include <linux/uio.h> 3860063497SArun Sharma #include <linux/atomic.h> 3965dd2aa9SAndi Kleen #include <linux/prefetch.h> 401da177e4SLinus Torvalds 411da177e4SLinus Torvalds /* 421da177e4SLinus Torvalds * How many user pages to map in one call to get_user_pages(). This determines 43cde1ecb3SAndi Kleen * the size of a structure in the slab cache 441da177e4SLinus Torvalds */ 451da177e4SLinus Torvalds #define DIO_PAGES 64 461da177e4SLinus Torvalds 471da177e4SLinus Torvalds /* 481da177e4SLinus Torvalds * This code generally works in units of "dio_blocks". A dio_block is 491da177e4SLinus Torvalds * somewhere between the hard sector size and the filesystem block size. it 501da177e4SLinus Torvalds * is determined on a per-invocation basis. When talking to the filesystem 511da177e4SLinus Torvalds * we need to convert dio_blocks to fs_blocks by scaling the dio_block quantity 521da177e4SLinus Torvalds * down by dio->blkfactor. Similarly, fs-blocksize quantities are converted 531da177e4SLinus Torvalds * to bio_block quantities by shifting left by blkfactor. 541da177e4SLinus Torvalds * 551da177e4SLinus Torvalds * If blkfactor is zero then the user's request was aligned to the filesystem's 561da177e4SLinus Torvalds * blocksize. 571da177e4SLinus Torvalds */ 581da177e4SLinus Torvalds 59eb28be2bSAndi Kleen /* dio_state only used in the submission path */ 60eb28be2bSAndi Kleen 61eb28be2bSAndi Kleen struct dio_submit { 621da177e4SLinus Torvalds struct bio *bio; /* bio under assembly */ 631da177e4SLinus Torvalds unsigned blkbits; /* doesn't change */ 641da177e4SLinus Torvalds unsigned blkfactor; /* When we're using an alignment which 651da177e4SLinus Torvalds is finer than the filesystem's soft 661da177e4SLinus Torvalds blocksize, this specifies how much 671da177e4SLinus Torvalds finer. blkfactor=2 means 1/4-block 681da177e4SLinus Torvalds alignment. Does not change */ 691da177e4SLinus Torvalds unsigned start_zero_done; /* flag: sub-blocksize zeroing has 701da177e4SLinus Torvalds been performed at the start of a 711da177e4SLinus Torvalds write */ 721da177e4SLinus Torvalds int pages_in_io; /* approximate total IO pages */ 731da177e4SLinus Torvalds sector_t block_in_file; /* Current offset into the underlying 741da177e4SLinus Torvalds file in dio_block units. */ 751da177e4SLinus Torvalds unsigned blocks_available; /* At block_in_file. changes */ 760dc2bc49SAndi Kleen int reap_counter; /* rate limit reaping */ 771da177e4SLinus Torvalds sector_t final_block_in_request;/* doesn't change */ 781da177e4SLinus Torvalds int boundary; /* prev block is at a boundary */ 791d8fa7a2SBadari Pulavarty get_block_t *get_block; /* block mapping function */ 80facd07b0SJosef Bacik dio_submit_t *submit_io; /* IO submition function */ 81eb28be2bSAndi Kleen 82facd07b0SJosef Bacik loff_t logical_offset_in_bio; /* current first logical block in bio */ 831da177e4SLinus Torvalds sector_t final_block_in_bio; /* current final block in bio + 1 */ 841da177e4SLinus Torvalds sector_t next_block_for_io; /* next block to be put under IO, 851da177e4SLinus Torvalds in dio_blocks units */ 861da177e4SLinus Torvalds 871da177e4SLinus Torvalds /* 881da177e4SLinus Torvalds * Deferred addition of a page to the dio. These variables are 891da177e4SLinus Torvalds * private to dio_send_cur_page(), submit_page_section() and 901da177e4SLinus Torvalds * dio_bio_add_page(). 911da177e4SLinus Torvalds */ 921da177e4SLinus Torvalds struct page *cur_page; /* The page */ 931da177e4SLinus Torvalds unsigned cur_page_offset; /* Offset into it, in bytes */ 941da177e4SLinus Torvalds unsigned cur_page_len; /* Nr of bytes at cur_page_offset */ 951da177e4SLinus Torvalds sector_t cur_page_block; /* Where it starts */ 96facd07b0SJosef Bacik loff_t cur_page_fs_offset; /* Offset in file */ 971da177e4SLinus Torvalds 987b2c99d1SAl Viro struct iov_iter *iter; 9923aee091SJeff Moyer /* 10023aee091SJeff Moyer * Page queue. These variables belong to dio_refill_pages() and 10123aee091SJeff Moyer * dio_get_page(). 10223aee091SJeff Moyer */ 10323aee091SJeff Moyer unsigned head; /* next page to process */ 10423aee091SJeff Moyer unsigned tail; /* last valid page + 1 */ 1057b2c99d1SAl Viro size_t from, to; 106eb28be2bSAndi Kleen }; 107eb28be2bSAndi Kleen 108eb28be2bSAndi Kleen /* dio_state communicated between submission path and end_io */ 109eb28be2bSAndi Kleen struct dio { 110eb28be2bSAndi Kleen int flags; /* doesn't change */ 111*8a4c1e42SMike Christie int op; 112*8a4c1e42SMike Christie int op_flags; 11315c4f638SJens Axboe blk_qc_t bio_cookie; 11415c4f638SJens Axboe struct block_device *bio_bdev; 1150dc2bc49SAndi Kleen struct inode *inode; 116eb28be2bSAndi Kleen loff_t i_size; /* i_size when submitted */ 117eb28be2bSAndi Kleen dio_iodone_t *end_io; /* IO completion function */ 118eb28be2bSAndi Kleen 11918772641SAndi Kleen void *private; /* copy from map_bh.b_private */ 120eb28be2bSAndi Kleen 121eb28be2bSAndi Kleen /* BIO completion state */ 122eb28be2bSAndi Kleen spinlock_t bio_lock; /* protects BIO fields below */ 1230dc2bc49SAndi Kleen int page_errors; /* errno from get_user_pages() */ 1240dc2bc49SAndi Kleen int is_async; /* is IO async ? */ 1257b7a8665SChristoph Hellwig bool defer_completion; /* defer AIO completion to workqueue? */ 12653cbf3b1SMing Lei bool should_dirty; /* if pages should be dirtied */ 1270dc2bc49SAndi Kleen int io_error; /* IO error in completion path */ 128eb28be2bSAndi Kleen unsigned long refcount; /* direct_io_worker() and bios */ 129eb28be2bSAndi Kleen struct bio *bio_list; /* singly linked via bi_private */ 130eb28be2bSAndi Kleen struct task_struct *waiter; /* waiting task (NULL if none) */ 131eb28be2bSAndi Kleen 132eb28be2bSAndi Kleen /* AIO related stuff */ 133eb28be2bSAndi Kleen struct kiocb *iocb; /* kiocb */ 134eb28be2bSAndi Kleen ssize_t result; /* IO result */ 135eb28be2bSAndi Kleen 13623aee091SJeff Moyer /* 13723aee091SJeff Moyer * pages[] (and any fields placed after it) are not zeroed out at 13823aee091SJeff Moyer * allocation time. Don't add new fields after pages[] unless you 13923aee091SJeff Moyer * wish that they not be zeroed. 14023aee091SJeff Moyer */ 1417b7a8665SChristoph Hellwig union { 14223aee091SJeff Moyer struct page *pages[DIO_PAGES]; /* page buffer */ 1437b7a8665SChristoph Hellwig struct work_struct complete_work;/* deferred AIO completion */ 1447b7a8665SChristoph Hellwig }; 1456e8267f5SAndi Kleen } ____cacheline_aligned_in_smp; 1466e8267f5SAndi Kleen 1476e8267f5SAndi Kleen static struct kmem_cache *dio_cache __read_mostly; 1481da177e4SLinus Torvalds 1491da177e4SLinus Torvalds /* 1501da177e4SLinus Torvalds * How many pages are in the queue? 1511da177e4SLinus Torvalds */ 152eb28be2bSAndi Kleen static inline unsigned dio_pages_present(struct dio_submit *sdio) 1531da177e4SLinus Torvalds { 154eb28be2bSAndi Kleen return sdio->tail - sdio->head; 1551da177e4SLinus Torvalds } 1561da177e4SLinus Torvalds 1571da177e4SLinus Torvalds /* 1581da177e4SLinus Torvalds * Go grab and pin some userspace pages. Typically we'll get 64 at a time. 1591da177e4SLinus Torvalds */ 160ba253fbfSAndi Kleen static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio) 1611da177e4SLinus Torvalds { 1627b2c99d1SAl Viro ssize_t ret; 1631da177e4SLinus Torvalds 1642c80929cSMiklos Szeredi ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES, 1657b2c99d1SAl Viro &sdio->from); 1661da177e4SLinus Torvalds 167*8a4c1e42SMike Christie if (ret < 0 && sdio->blocks_available && (dio->op == REQ_OP_WRITE)) { 168557ed1faSNick Piggin struct page *page = ZERO_PAGE(0); 1691da177e4SLinus Torvalds /* 1701da177e4SLinus Torvalds * A memory fault, but the filesystem has some outstanding 1711da177e4SLinus Torvalds * mapped blocks. We need to use those blocks up to avoid 1721da177e4SLinus Torvalds * leaking stale data in the file. 1731da177e4SLinus Torvalds */ 1741da177e4SLinus Torvalds if (dio->page_errors == 0) 1751da177e4SLinus Torvalds dio->page_errors = ret; 17609cbfeafSKirill A. Shutemov get_page(page); 177b5810039SNick Piggin dio->pages[0] = page; 178eb28be2bSAndi Kleen sdio->head = 0; 179eb28be2bSAndi Kleen sdio->tail = 1; 1807b2c99d1SAl Viro sdio->from = 0; 1817b2c99d1SAl Viro sdio->to = PAGE_SIZE; 1827b2c99d1SAl Viro return 0; 1831da177e4SLinus Torvalds } 1841da177e4SLinus Torvalds 1851da177e4SLinus Torvalds if (ret >= 0) { 1867b2c99d1SAl Viro iov_iter_advance(sdio->iter, ret); 1877b2c99d1SAl Viro ret += sdio->from; 188eb28be2bSAndi Kleen sdio->head = 0; 1897b2c99d1SAl Viro sdio->tail = (ret + PAGE_SIZE - 1) / PAGE_SIZE; 1907b2c99d1SAl Viro sdio->to = ((ret - 1) & (PAGE_SIZE - 1)) + 1; 1917b2c99d1SAl Viro return 0; 1921da177e4SLinus Torvalds } 1931da177e4SLinus Torvalds return ret; 1941da177e4SLinus Torvalds } 1951da177e4SLinus Torvalds 1961da177e4SLinus Torvalds /* 1971da177e4SLinus Torvalds * Get another userspace page. Returns an ERR_PTR on error. Pages are 1981da177e4SLinus Torvalds * buffered inside the dio so that we can call get_user_pages() against a 1991da177e4SLinus Torvalds * decent number of pages, less frequently. To provide nicer use of the 2001da177e4SLinus Torvalds * L1 cache. 2011da177e4SLinus Torvalds */ 202ba253fbfSAndi Kleen static inline struct page *dio_get_page(struct dio *dio, 2036fcc5420SBoaz Harrosh struct dio_submit *sdio) 2041da177e4SLinus Torvalds { 205eb28be2bSAndi Kleen if (dio_pages_present(sdio) == 0) { 2061da177e4SLinus Torvalds int ret; 2071da177e4SLinus Torvalds 208eb28be2bSAndi Kleen ret = dio_refill_pages(dio, sdio); 2091da177e4SLinus Torvalds if (ret) 2101da177e4SLinus Torvalds return ERR_PTR(ret); 211eb28be2bSAndi Kleen BUG_ON(dio_pages_present(sdio) == 0); 2121da177e4SLinus Torvalds } 2136fcc5420SBoaz Harrosh return dio->pages[sdio->head]; 2141da177e4SLinus Torvalds } 2151da177e4SLinus Torvalds 2166d544bb4SZach Brown /** 2176d544bb4SZach Brown * dio_complete() - called when all DIO BIO I/O has been completed 2186d544bb4SZach Brown * @offset: the byte offset in the file of the completed operation 2196d544bb4SZach Brown * 2207b7a8665SChristoph Hellwig * This drops i_dio_count, lets interested parties know that a DIO operation 2217b7a8665SChristoph Hellwig * has completed, and calculates the resulting return code for the operation. 2226d544bb4SZach Brown * 2236d544bb4SZach Brown * It lets the filesystem know if it registered an interest earlier via 2246d544bb4SZach Brown * get_block. Pass the private field of the map buffer_head so that 2256d544bb4SZach Brown * filesystems can use it to hold additional state between get_block calls and 2266d544bb4SZach Brown * dio_complete. 2271da177e4SLinus Torvalds */ 228716b9bc0SChristoph Hellwig static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async) 2291da177e4SLinus Torvalds { 230716b9bc0SChristoph Hellwig loff_t offset = dio->iocb->ki_pos; 2316d544bb4SZach Brown ssize_t transferred = 0; 2326d544bb4SZach Brown 2338459d86aSZach Brown /* 2348459d86aSZach Brown * AIO submission can race with bio completion to get here while 2358459d86aSZach Brown * expecting to have the last io completed by bio completion. 2368459d86aSZach Brown * In that case -EIOCBQUEUED is in fact not an error we want 2378459d86aSZach Brown * to preserve through this call. 2388459d86aSZach Brown */ 2398459d86aSZach Brown if (ret == -EIOCBQUEUED) 2408459d86aSZach Brown ret = 0; 2418459d86aSZach Brown 2426d544bb4SZach Brown if (dio->result) { 2436d544bb4SZach Brown transferred = dio->result; 2446d544bb4SZach Brown 2456d544bb4SZach Brown /* Check for short read case */ 246*8a4c1e42SMike Christie if ((dio->op == REQ_OP_READ) && 247*8a4c1e42SMike Christie ((offset + transferred) > dio->i_size)) 2486d544bb4SZach Brown transferred = dio->i_size - offset; 2496d544bb4SZach Brown } 2506d544bb4SZach Brown 2516d544bb4SZach Brown if (ret == 0) 2526d544bb4SZach Brown ret = dio->page_errors; 2536d544bb4SZach Brown if (ret == 0) 2546d544bb4SZach Brown ret = dio->io_error; 2556d544bb4SZach Brown if (ret == 0) 2566d544bb4SZach Brown ret = transferred; 2576d544bb4SZach Brown 258187372a3SChristoph Hellwig if (dio->end_io) { 259187372a3SChristoph Hellwig int err; 260187372a3SChristoph Hellwig 261e2592217SChristoph Hellwig // XXX: ki_pos?? 262187372a3SChristoph Hellwig err = dio->end_io(dio->iocb, offset, ret, dio->private); 263187372a3SChristoph Hellwig if (err) 264187372a3SChristoph Hellwig ret = err; 265187372a3SChristoph Hellwig } 2667b7a8665SChristoph Hellwig 267fe0f07d0SJens Axboe if (!(dio->flags & DIO_SKIP_DIO_COUNT)) 268fe0f07d0SJens Axboe inode_dio_end(dio->inode); 269fe0f07d0SJens Axboe 27002afc27fSChristoph Hellwig if (is_async) { 271e2592217SChristoph Hellwig /* 272e2592217SChristoph Hellwig * generic_write_sync expects ki_pos to have been updated 273e2592217SChristoph Hellwig * already, but the submission path only does this for 274e2592217SChristoph Hellwig * synchronous I/O. 275e2592217SChristoph Hellwig */ 276e2592217SChristoph Hellwig dio->iocb->ki_pos += transferred; 27702afc27fSChristoph Hellwig 278*8a4c1e42SMike Christie if (dio->op == REQ_OP_WRITE) 279e2592217SChristoph Hellwig ret = generic_write_sync(dio->iocb, transferred); 28004b2fa9fSChristoph Hellwig dio->iocb->ki_complete(dio->iocb, ret, 0); 28102afc27fSChristoph Hellwig } 28240e2e973SChristoph Hellwig 2837b7a8665SChristoph Hellwig kmem_cache_free(dio_cache, dio); 2846d544bb4SZach Brown return ret; 2851da177e4SLinus Torvalds } 2861da177e4SLinus Torvalds 2877b7a8665SChristoph Hellwig static void dio_aio_complete_work(struct work_struct *work) 2887b7a8665SChristoph Hellwig { 2897b7a8665SChristoph Hellwig struct dio *dio = container_of(work, struct dio, complete_work); 2907b7a8665SChristoph Hellwig 291716b9bc0SChristoph Hellwig dio_complete(dio, 0, true); 2927b7a8665SChristoph Hellwig } 2937b7a8665SChristoph Hellwig 2941da177e4SLinus Torvalds static int dio_bio_complete(struct dio *dio, struct bio *bio); 2957b7a8665SChristoph Hellwig 2961da177e4SLinus Torvalds /* 2971da177e4SLinus Torvalds * Asynchronous IO callback. 2981da177e4SLinus Torvalds */ 2994246a0b6SChristoph Hellwig static void dio_bio_end_aio(struct bio *bio) 3001da177e4SLinus Torvalds { 3011da177e4SLinus Torvalds struct dio *dio = bio->bi_private; 3025eb6c7a2SZach Brown unsigned long remaining; 3035eb6c7a2SZach Brown unsigned long flags; 3041da177e4SLinus Torvalds 3051da177e4SLinus Torvalds /* cleanup the bio */ 3061da177e4SLinus Torvalds dio_bio_complete(dio, bio); 3070273201eSZach Brown 3085eb6c7a2SZach Brown spin_lock_irqsave(&dio->bio_lock, flags); 3095eb6c7a2SZach Brown remaining = --dio->refcount; 3105eb6c7a2SZach Brown if (remaining == 1 && dio->waiter) 31120258b2bSZach Brown wake_up_process(dio->waiter); 3125eb6c7a2SZach Brown spin_unlock_irqrestore(&dio->bio_lock, flags); 31320258b2bSZach Brown 3148459d86aSZach Brown if (remaining == 0) { 3157b7a8665SChristoph Hellwig if (dio->result && dio->defer_completion) { 3167b7a8665SChristoph Hellwig INIT_WORK(&dio->complete_work, dio_aio_complete_work); 3177b7a8665SChristoph Hellwig queue_work(dio->inode->i_sb->s_dio_done_wq, 3187b7a8665SChristoph Hellwig &dio->complete_work); 3197b7a8665SChristoph Hellwig } else { 320716b9bc0SChristoph Hellwig dio_complete(dio, 0, true); 3217b7a8665SChristoph Hellwig } 3228459d86aSZach Brown } 3231da177e4SLinus Torvalds } 3241da177e4SLinus Torvalds 3251da177e4SLinus Torvalds /* 3261da177e4SLinus Torvalds * The BIO completion handler simply queues the BIO up for the process-context 3271da177e4SLinus Torvalds * handler. 3281da177e4SLinus Torvalds * 3291da177e4SLinus Torvalds * During I/O bi_private points at the dio. After I/O, bi_private is used to 3301da177e4SLinus Torvalds * implement a singly-linked list of completed BIOs, at dio->bio_list. 3311da177e4SLinus Torvalds */ 3324246a0b6SChristoph Hellwig static void dio_bio_end_io(struct bio *bio) 3331da177e4SLinus Torvalds { 3341da177e4SLinus Torvalds struct dio *dio = bio->bi_private; 3351da177e4SLinus Torvalds unsigned long flags; 3361da177e4SLinus Torvalds 3371da177e4SLinus Torvalds spin_lock_irqsave(&dio->bio_lock, flags); 3381da177e4SLinus Torvalds bio->bi_private = dio->bio_list; 3391da177e4SLinus Torvalds dio->bio_list = bio; 3405eb6c7a2SZach Brown if (--dio->refcount == 1 && dio->waiter) 3411da177e4SLinus Torvalds wake_up_process(dio->waiter); 3421da177e4SLinus Torvalds spin_unlock_irqrestore(&dio->bio_lock, flags); 3431da177e4SLinus Torvalds } 3441da177e4SLinus Torvalds 345facd07b0SJosef Bacik /** 346facd07b0SJosef Bacik * dio_end_io - handle the end io action for the given bio 347facd07b0SJosef Bacik * @bio: The direct io bio thats being completed 348facd07b0SJosef Bacik * @error: Error if there was one 349facd07b0SJosef Bacik * 350facd07b0SJosef Bacik * This is meant to be called by any filesystem that uses their own dio_submit_t 351facd07b0SJosef Bacik * so that the DIO specific endio actions are dealt with after the filesystem 352facd07b0SJosef Bacik * has done it's completion work. 353facd07b0SJosef Bacik */ 354facd07b0SJosef Bacik void dio_end_io(struct bio *bio, int error) 355facd07b0SJosef Bacik { 356facd07b0SJosef Bacik struct dio *dio = bio->bi_private; 357facd07b0SJosef Bacik 358facd07b0SJosef Bacik if (dio->is_async) 3594246a0b6SChristoph Hellwig dio_bio_end_aio(bio); 360facd07b0SJosef Bacik else 3614246a0b6SChristoph Hellwig dio_bio_end_io(bio); 362facd07b0SJosef Bacik } 363facd07b0SJosef Bacik EXPORT_SYMBOL_GPL(dio_end_io); 364facd07b0SJosef Bacik 365ba253fbfSAndi Kleen static inline void 366eb28be2bSAndi Kleen dio_bio_alloc(struct dio *dio, struct dio_submit *sdio, 367eb28be2bSAndi Kleen struct block_device *bdev, 3681da177e4SLinus Torvalds sector_t first_sector, int nr_vecs) 3691da177e4SLinus Torvalds { 3701da177e4SLinus Torvalds struct bio *bio; 3711da177e4SLinus Torvalds 37220d9600cSDavid Dillow /* 37320d9600cSDavid Dillow * bio_alloc() is guaranteed to return a bio when called with 37471baba4bSMel Gorman * __GFP_RECLAIM and we request a valid number of vectors. 37520d9600cSDavid Dillow */ 3761da177e4SLinus Torvalds bio = bio_alloc(GFP_KERNEL, nr_vecs); 3771da177e4SLinus Torvalds 3781da177e4SLinus Torvalds bio->bi_bdev = bdev; 3794f024f37SKent Overstreet bio->bi_iter.bi_sector = first_sector; 380*8a4c1e42SMike Christie bio_set_op_attrs(bio, dio->op, dio->op_flags); 3811da177e4SLinus Torvalds if (dio->is_async) 3821da177e4SLinus Torvalds bio->bi_end_io = dio_bio_end_aio; 3831da177e4SLinus Torvalds else 3841da177e4SLinus Torvalds bio->bi_end_io = dio_bio_end_io; 3851da177e4SLinus Torvalds 386eb28be2bSAndi Kleen sdio->bio = bio; 387eb28be2bSAndi Kleen sdio->logical_offset_in_bio = sdio->cur_page_fs_offset; 3881da177e4SLinus Torvalds } 3891da177e4SLinus Torvalds 3901da177e4SLinus Torvalds /* 3911da177e4SLinus Torvalds * In the AIO read case we speculatively dirty the pages before starting IO. 3921da177e4SLinus Torvalds * During IO completion, any of these pages which happen to have been written 3931da177e4SLinus Torvalds * back will be redirtied by bio_check_pages_dirty(). 3940273201eSZach Brown * 3950273201eSZach Brown * bios hold a dio reference between submit_bio and ->end_io. 3961da177e4SLinus Torvalds */ 397ba253fbfSAndi Kleen static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio) 3981da177e4SLinus Torvalds { 399eb28be2bSAndi Kleen struct bio *bio = sdio->bio; 4005eb6c7a2SZach Brown unsigned long flags; 4011da177e4SLinus Torvalds 4021da177e4SLinus Torvalds bio->bi_private = dio; 4035eb6c7a2SZach Brown 4045eb6c7a2SZach Brown spin_lock_irqsave(&dio->bio_lock, flags); 4055eb6c7a2SZach Brown dio->refcount++; 4065eb6c7a2SZach Brown spin_unlock_irqrestore(&dio->bio_lock, flags); 4075eb6c7a2SZach Brown 408*8a4c1e42SMike Christie if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty) 4091da177e4SLinus Torvalds bio_set_pages_dirty(bio); 4105eb6c7a2SZach Brown 411c1c53460SJens Axboe dio->bio_bdev = bio->bi_bdev; 412c1c53460SJens Axboe 41315c4f638SJens Axboe if (sdio->submit_io) { 414*8a4c1e42SMike Christie sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio); 41515c4f638SJens Axboe dio->bio_cookie = BLK_QC_T_NONE; 416c1c53460SJens Axboe } else 4174e49ea4aSMike Christie dio->bio_cookie = submit_bio(bio); 4181da177e4SLinus Torvalds 419eb28be2bSAndi Kleen sdio->bio = NULL; 420eb28be2bSAndi Kleen sdio->boundary = 0; 421eb28be2bSAndi Kleen sdio->logical_offset_in_bio = 0; 4221da177e4SLinus Torvalds } 4231da177e4SLinus Torvalds 4241da177e4SLinus Torvalds /* 4251da177e4SLinus Torvalds * Release any resources in case of a failure 4261da177e4SLinus Torvalds */ 427ba253fbfSAndi Kleen static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio) 4281da177e4SLinus Torvalds { 4297b2c99d1SAl Viro while (sdio->head < sdio->tail) 43009cbfeafSKirill A. Shutemov put_page(dio->pages[sdio->head++]); 4311da177e4SLinus Torvalds } 4321da177e4SLinus Torvalds 4331da177e4SLinus Torvalds /* 4340273201eSZach Brown * Wait for the next BIO to complete. Remove it and return it. NULL is 4350273201eSZach Brown * returned once all BIOs have been completed. This must only be called once 4360273201eSZach Brown * all bios have been issued so that dio->refcount can only decrease. This 4370273201eSZach Brown * requires that that the caller hold a reference on the dio. 4381da177e4SLinus Torvalds */ 4391da177e4SLinus Torvalds static struct bio *dio_await_one(struct dio *dio) 4401da177e4SLinus Torvalds { 4411da177e4SLinus Torvalds unsigned long flags; 4420273201eSZach Brown struct bio *bio = NULL; 4431da177e4SLinus Torvalds 4441da177e4SLinus Torvalds spin_lock_irqsave(&dio->bio_lock, flags); 4455eb6c7a2SZach Brown 4465eb6c7a2SZach Brown /* 4475eb6c7a2SZach Brown * Wait as long as the list is empty and there are bios in flight. bio 4485eb6c7a2SZach Brown * completion drops the count, maybe adds to the list, and wakes while 4495eb6c7a2SZach Brown * holding the bio_lock so we don't need set_current_state()'s barrier 4505eb6c7a2SZach Brown * and can call it after testing our condition. 4515eb6c7a2SZach Brown */ 4525eb6c7a2SZach Brown while (dio->refcount > 1 && dio->bio_list == NULL) { 4535eb6c7a2SZach Brown __set_current_state(TASK_UNINTERRUPTIBLE); 4541da177e4SLinus Torvalds dio->waiter = current; 4551da177e4SLinus Torvalds spin_unlock_irqrestore(&dio->bio_lock, flags); 456c43c83a2SChristoph Hellwig if (!(dio->iocb->ki_flags & IOCB_HIPRI) || 457c43c83a2SChristoph Hellwig !blk_poll(bdev_get_queue(dio->bio_bdev), dio->bio_cookie)) 4581da177e4SLinus Torvalds io_schedule(); 4595eb6c7a2SZach Brown /* wake up sets us TASK_RUNNING */ 4601da177e4SLinus Torvalds spin_lock_irqsave(&dio->bio_lock, flags); 4611da177e4SLinus Torvalds dio->waiter = NULL; 4621da177e4SLinus Torvalds } 4630273201eSZach Brown if (dio->bio_list) { 4641da177e4SLinus Torvalds bio = dio->bio_list; 4651da177e4SLinus Torvalds dio->bio_list = bio->bi_private; 4660273201eSZach Brown } 4671da177e4SLinus Torvalds spin_unlock_irqrestore(&dio->bio_lock, flags); 4681da177e4SLinus Torvalds return bio; 4691da177e4SLinus Torvalds } 4701da177e4SLinus Torvalds 4711da177e4SLinus Torvalds /* 4721da177e4SLinus Torvalds * Process one completed BIO. No locks are held. 4731da177e4SLinus Torvalds */ 4741da177e4SLinus Torvalds static int dio_bio_complete(struct dio *dio, struct bio *bio) 4751da177e4SLinus Torvalds { 476cb34e057SKent Overstreet struct bio_vec *bvec; 477cb34e057SKent Overstreet unsigned i; 4789b81c842SSasha Levin int err; 4791da177e4SLinus Torvalds 4804246a0b6SChristoph Hellwig if (bio->bi_error) 481174e27c6SChen, Kenneth W dio->io_error = -EIO; 4821da177e4SLinus Torvalds 483*8a4c1e42SMike Christie if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty) { 4849b81c842SSasha Levin err = bio->bi_error; 4857ddc971fSMike Krinkin bio_check_pages_dirty(bio); /* transfers ownership */ 4861da177e4SLinus Torvalds } else { 487cb34e057SKent Overstreet bio_for_each_segment_all(bvec, bio, i) { 488cb34e057SKent Overstreet struct page *page = bvec->bv_page; 4891da177e4SLinus Torvalds 490*8a4c1e42SMike Christie if (dio->op == REQ_OP_READ && !PageCompound(page) && 49153cbf3b1SMing Lei dio->should_dirty) 4921da177e4SLinus Torvalds set_page_dirty_lock(page); 49309cbfeafSKirill A. Shutemov put_page(page); 4941da177e4SLinus Torvalds } 4959b81c842SSasha Levin err = bio->bi_error; 4961da177e4SLinus Torvalds bio_put(bio); 4971da177e4SLinus Torvalds } 4989b81c842SSasha Levin return err; 4991da177e4SLinus Torvalds } 5001da177e4SLinus Torvalds 5011da177e4SLinus Torvalds /* 5020273201eSZach Brown * Wait on and process all in-flight BIOs. This must only be called once 5030273201eSZach Brown * all bios have been issued so that the refcount can only decrease. 5040273201eSZach Brown * This just waits for all bios to make it through dio_bio_complete. IO 505beb7dd86SRobert P. J. Day * errors are propagated through dio->io_error and should be propagated via 5060273201eSZach Brown * dio_complete(). 5071da177e4SLinus Torvalds */ 5086d544bb4SZach Brown static void dio_await_completion(struct dio *dio) 5091da177e4SLinus Torvalds { 5100273201eSZach Brown struct bio *bio; 5110273201eSZach Brown do { 5120273201eSZach Brown bio = dio_await_one(dio); 5130273201eSZach Brown if (bio) 5146d544bb4SZach Brown dio_bio_complete(dio, bio); 5150273201eSZach Brown } while (bio); 5161da177e4SLinus Torvalds } 5171da177e4SLinus Torvalds 5181da177e4SLinus Torvalds /* 5191da177e4SLinus Torvalds * A really large O_DIRECT read or write can generate a lot of BIOs. So 5201da177e4SLinus Torvalds * to keep the memory consumption sane we periodically reap any completed BIOs 5211da177e4SLinus Torvalds * during the BIO generation phase. 5221da177e4SLinus Torvalds * 5231da177e4SLinus Torvalds * This also helps to limit the peak amount of pinned userspace memory. 5241da177e4SLinus Torvalds */ 525ba253fbfSAndi Kleen static inline int dio_bio_reap(struct dio *dio, struct dio_submit *sdio) 5261da177e4SLinus Torvalds { 5271da177e4SLinus Torvalds int ret = 0; 5281da177e4SLinus Torvalds 529eb28be2bSAndi Kleen if (sdio->reap_counter++ >= 64) { 5301da177e4SLinus Torvalds while (dio->bio_list) { 5311da177e4SLinus Torvalds unsigned long flags; 5321da177e4SLinus Torvalds struct bio *bio; 5331da177e4SLinus Torvalds int ret2; 5341da177e4SLinus Torvalds 5351da177e4SLinus Torvalds spin_lock_irqsave(&dio->bio_lock, flags); 5361da177e4SLinus Torvalds bio = dio->bio_list; 5371da177e4SLinus Torvalds dio->bio_list = bio->bi_private; 5381da177e4SLinus Torvalds spin_unlock_irqrestore(&dio->bio_lock, flags); 5391da177e4SLinus Torvalds ret2 = dio_bio_complete(dio, bio); 5401da177e4SLinus Torvalds if (ret == 0) 5411da177e4SLinus Torvalds ret = ret2; 5421da177e4SLinus Torvalds } 543eb28be2bSAndi Kleen sdio->reap_counter = 0; 5441da177e4SLinus Torvalds } 5451da177e4SLinus Torvalds return ret; 5461da177e4SLinus Torvalds } 5471da177e4SLinus Torvalds 5481da177e4SLinus Torvalds /* 5497b7a8665SChristoph Hellwig * Create workqueue for deferred direct IO completions. We allocate the 5507b7a8665SChristoph Hellwig * workqueue when it's first needed. This avoids creating workqueue for 5517b7a8665SChristoph Hellwig * filesystems that don't need it and also allows us to create the workqueue 5527b7a8665SChristoph Hellwig * late enough so the we can include s_id in the name of the workqueue. 5537b7a8665SChristoph Hellwig */ 5547b7a8665SChristoph Hellwig static int sb_init_dio_done_wq(struct super_block *sb) 5557b7a8665SChristoph Hellwig { 55645150c43SOlof Johansson struct workqueue_struct *old; 5577b7a8665SChristoph Hellwig struct workqueue_struct *wq = alloc_workqueue("dio/%s", 5587b7a8665SChristoph Hellwig WQ_MEM_RECLAIM, 0, 5597b7a8665SChristoph Hellwig sb->s_id); 5607b7a8665SChristoph Hellwig if (!wq) 5617b7a8665SChristoph Hellwig return -ENOMEM; 5627b7a8665SChristoph Hellwig /* 5637b7a8665SChristoph Hellwig * This has to be atomic as more DIOs can race to create the workqueue 5647b7a8665SChristoph Hellwig */ 56545150c43SOlof Johansson old = cmpxchg(&sb->s_dio_done_wq, NULL, wq); 5667b7a8665SChristoph Hellwig /* Someone created workqueue before us? Free ours... */ 56745150c43SOlof Johansson if (old) 5687b7a8665SChristoph Hellwig destroy_workqueue(wq); 5697b7a8665SChristoph Hellwig return 0; 5707b7a8665SChristoph Hellwig } 5717b7a8665SChristoph Hellwig 5727b7a8665SChristoph Hellwig static int dio_set_defer_completion(struct dio *dio) 5737b7a8665SChristoph Hellwig { 5747b7a8665SChristoph Hellwig struct super_block *sb = dio->inode->i_sb; 5757b7a8665SChristoph Hellwig 5767b7a8665SChristoph Hellwig if (dio->defer_completion) 5777b7a8665SChristoph Hellwig return 0; 5787b7a8665SChristoph Hellwig dio->defer_completion = true; 5797b7a8665SChristoph Hellwig if (!sb->s_dio_done_wq) 5807b7a8665SChristoph Hellwig return sb_init_dio_done_wq(sb); 5817b7a8665SChristoph Hellwig return 0; 5827b7a8665SChristoph Hellwig } 5837b7a8665SChristoph Hellwig 5847b7a8665SChristoph Hellwig /* 5851da177e4SLinus Torvalds * Call into the fs to map some more disk blocks. We record the current number 586eb28be2bSAndi Kleen * of available blocks at sdio->blocks_available. These are in units of the 5871da177e4SLinus Torvalds * fs blocksize, (1 << inode->i_blkbits). 5881da177e4SLinus Torvalds * 5891da177e4SLinus Torvalds * The fs is allowed to map lots of blocks at once. If it wants to do that, 5901da177e4SLinus Torvalds * it uses the passed inode-relative block number as the file offset, as usual. 5911da177e4SLinus Torvalds * 5921d8fa7a2SBadari Pulavarty * get_block() is passed the number of i_blkbits-sized blocks which direct_io 5931da177e4SLinus Torvalds * has remaining to do. The fs should not map more than this number of blocks. 5941da177e4SLinus Torvalds * 5951da177e4SLinus Torvalds * If the fs has mapped a lot of blocks, it should populate bh->b_size to 5961da177e4SLinus Torvalds * indicate how much contiguous disk space has been made available at 5971da177e4SLinus Torvalds * bh->b_blocknr. 5981da177e4SLinus Torvalds * 5991da177e4SLinus Torvalds * If *any* of the mapped blocks are new, then the fs must set buffer_new(). 6001da177e4SLinus Torvalds * This isn't very efficient... 6011da177e4SLinus Torvalds * 6021da177e4SLinus Torvalds * In the case of filesystem holes: the fs may return an arbitrarily-large 6031da177e4SLinus Torvalds * hole by returning an appropriate value in b_size and by clearing 6041da177e4SLinus Torvalds * buffer_mapped(). However the direct-io code will only process holes one 6051d8fa7a2SBadari Pulavarty * block at a time - it will repeatedly call get_block() as it walks the hole. 6061da177e4SLinus Torvalds */ 60718772641SAndi Kleen static int get_more_blocks(struct dio *dio, struct dio_submit *sdio, 60818772641SAndi Kleen struct buffer_head *map_bh) 6091da177e4SLinus Torvalds { 6101da177e4SLinus Torvalds int ret; 6111da177e4SLinus Torvalds sector_t fs_startblk; /* Into file, in filesystem-sized blocks */ 612ae55e1aaSTao Ma sector_t fs_endblk; /* Into file, in filesystem-sized blocks */ 6131da177e4SLinus Torvalds unsigned long fs_count; /* Number of filesystem-sized blocks */ 6141da177e4SLinus Torvalds int create; 615ab73857eSLinus Torvalds unsigned int i_blkbits = sdio->blkbits + sdio->blkfactor; 6161da177e4SLinus Torvalds 6171da177e4SLinus Torvalds /* 6181da177e4SLinus Torvalds * If there was a memory error and we've overwritten all the 6191da177e4SLinus Torvalds * mapped blocks then we can now return that memory error 6201da177e4SLinus Torvalds */ 6211da177e4SLinus Torvalds ret = dio->page_errors; 6221da177e4SLinus Torvalds if (ret == 0) { 623eb28be2bSAndi Kleen BUG_ON(sdio->block_in_file >= sdio->final_block_in_request); 624eb28be2bSAndi Kleen fs_startblk = sdio->block_in_file >> sdio->blkfactor; 625ae55e1aaSTao Ma fs_endblk = (sdio->final_block_in_request - 1) >> 626ae55e1aaSTao Ma sdio->blkfactor; 627ae55e1aaSTao Ma fs_count = fs_endblk - fs_startblk + 1; 6281da177e4SLinus Torvalds 6293c674e74SNathan Scott map_bh->b_state = 0; 630ab73857eSLinus Torvalds map_bh->b_size = fs_count << i_blkbits; 6313c674e74SNathan Scott 6325fe878aeSChristoph Hellwig /* 6339ecd10b7SEryu Guan * For writes that could fill holes inside i_size on a 6349ecd10b7SEryu Guan * DIO_SKIP_HOLES filesystem we forbid block creations: only 6359ecd10b7SEryu Guan * overwrites are permitted. We will return early to the caller 6369ecd10b7SEryu Guan * once we see an unmapped buffer head returned, and the caller 6379ecd10b7SEryu Guan * will fall back to buffered I/O. 6385fe878aeSChristoph Hellwig * 6395fe878aeSChristoph Hellwig * Otherwise the decision is left to the get_blocks method, 6405fe878aeSChristoph Hellwig * which may decide to handle it or also return an unmapped 6415fe878aeSChristoph Hellwig * buffer head. 6425fe878aeSChristoph Hellwig */ 643*8a4c1e42SMike Christie create = dio->op == REQ_OP_WRITE; 6445fe878aeSChristoph Hellwig if (dio->flags & DIO_SKIP_HOLES) { 6459ecd10b7SEryu Guan if (fs_startblk <= ((i_size_read(dio->inode) - 1) >> 6469ecd10b7SEryu Guan i_blkbits)) 6471da177e4SLinus Torvalds create = 0; 6481da177e4SLinus Torvalds } 6493c674e74SNathan Scott 650eb28be2bSAndi Kleen ret = (*sdio->get_block)(dio->inode, fs_startblk, 6511da177e4SLinus Torvalds map_bh, create); 65218772641SAndi Kleen 65318772641SAndi Kleen /* Store for completion */ 65418772641SAndi Kleen dio->private = map_bh->b_private; 6557b7a8665SChristoph Hellwig 6567b7a8665SChristoph Hellwig if (ret == 0 && buffer_defer_completion(map_bh)) 6577b7a8665SChristoph Hellwig ret = dio_set_defer_completion(dio); 6581da177e4SLinus Torvalds } 6591da177e4SLinus Torvalds return ret; 6601da177e4SLinus Torvalds } 6611da177e4SLinus Torvalds 6621da177e4SLinus Torvalds /* 6631da177e4SLinus Torvalds * There is no bio. Make one now. 6641da177e4SLinus Torvalds */ 665ba253fbfSAndi Kleen static inline int dio_new_bio(struct dio *dio, struct dio_submit *sdio, 66618772641SAndi Kleen sector_t start_sector, struct buffer_head *map_bh) 6671da177e4SLinus Torvalds { 6681da177e4SLinus Torvalds sector_t sector; 6691da177e4SLinus Torvalds int ret, nr_pages; 6701da177e4SLinus Torvalds 671eb28be2bSAndi Kleen ret = dio_bio_reap(dio, sdio); 6721da177e4SLinus Torvalds if (ret) 6731da177e4SLinus Torvalds goto out; 674eb28be2bSAndi Kleen sector = start_sector << (sdio->blkbits - 9); 675b54ffb73SKent Overstreet nr_pages = min(sdio->pages_in_io, BIO_MAX_PAGES); 6761da177e4SLinus Torvalds BUG_ON(nr_pages <= 0); 67718772641SAndi Kleen dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages); 678eb28be2bSAndi Kleen sdio->boundary = 0; 6791da177e4SLinus Torvalds out: 6801da177e4SLinus Torvalds return ret; 6811da177e4SLinus Torvalds } 6821da177e4SLinus Torvalds 6831da177e4SLinus Torvalds /* 6841da177e4SLinus Torvalds * Attempt to put the current chunk of 'cur_page' into the current BIO. If 6851da177e4SLinus Torvalds * that was successful then update final_block_in_bio and take a ref against 6861da177e4SLinus Torvalds * the just-added page. 6871da177e4SLinus Torvalds * 6881da177e4SLinus Torvalds * Return zero on success. Non-zero means the caller needs to start a new BIO. 6891da177e4SLinus Torvalds */ 690ba253fbfSAndi Kleen static inline int dio_bio_add_page(struct dio_submit *sdio) 6911da177e4SLinus Torvalds { 6921da177e4SLinus Torvalds int ret; 6931da177e4SLinus Torvalds 694eb28be2bSAndi Kleen ret = bio_add_page(sdio->bio, sdio->cur_page, 695eb28be2bSAndi Kleen sdio->cur_page_len, sdio->cur_page_offset); 696eb28be2bSAndi Kleen if (ret == sdio->cur_page_len) { 6971da177e4SLinus Torvalds /* 6981da177e4SLinus Torvalds * Decrement count only, if we are done with this page 6991da177e4SLinus Torvalds */ 700eb28be2bSAndi Kleen if ((sdio->cur_page_len + sdio->cur_page_offset) == PAGE_SIZE) 701eb28be2bSAndi Kleen sdio->pages_in_io--; 70209cbfeafSKirill A. Shutemov get_page(sdio->cur_page); 703eb28be2bSAndi Kleen sdio->final_block_in_bio = sdio->cur_page_block + 704eb28be2bSAndi Kleen (sdio->cur_page_len >> sdio->blkbits); 7051da177e4SLinus Torvalds ret = 0; 7061da177e4SLinus Torvalds } else { 7071da177e4SLinus Torvalds ret = 1; 7081da177e4SLinus Torvalds } 7091da177e4SLinus Torvalds return ret; 7101da177e4SLinus Torvalds } 7111da177e4SLinus Torvalds 7121da177e4SLinus Torvalds /* 7131da177e4SLinus Torvalds * Put cur_page under IO. The section of cur_page which is described by 7141da177e4SLinus Torvalds * cur_page_offset,cur_page_len is put into a BIO. The section of cur_page 7151da177e4SLinus Torvalds * starts on-disk at cur_page_block. 7161da177e4SLinus Torvalds * 7171da177e4SLinus Torvalds * We take a ref against the page here (on behalf of its presence in the bio). 7181da177e4SLinus Torvalds * 7191da177e4SLinus Torvalds * The caller of this function is responsible for removing cur_page from the 7201da177e4SLinus Torvalds * dio, and for dropping the refcount which came from that presence. 7211da177e4SLinus Torvalds */ 722ba253fbfSAndi Kleen static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio, 72318772641SAndi Kleen struct buffer_head *map_bh) 7241da177e4SLinus Torvalds { 7251da177e4SLinus Torvalds int ret = 0; 7261da177e4SLinus Torvalds 727eb28be2bSAndi Kleen if (sdio->bio) { 728eb28be2bSAndi Kleen loff_t cur_offset = sdio->cur_page_fs_offset; 729eb28be2bSAndi Kleen loff_t bio_next_offset = sdio->logical_offset_in_bio + 7304f024f37SKent Overstreet sdio->bio->bi_iter.bi_size; 731c2c6ca41SJosef Bacik 7321da177e4SLinus Torvalds /* 733c2c6ca41SJosef Bacik * See whether this new request is contiguous with the old. 734c2c6ca41SJosef Bacik * 735f0940ceeSNamhyung Kim * Btrfs cannot handle having logically non-contiguous requests 736f0940ceeSNamhyung Kim * submitted. For example if you have 737c2c6ca41SJosef Bacik * 738c2c6ca41SJosef Bacik * Logical: [0-4095][HOLE][8192-12287] 739f0940ceeSNamhyung Kim * Physical: [0-4095] [4096-8191] 740c2c6ca41SJosef Bacik * 741c2c6ca41SJosef Bacik * We cannot submit those pages together as one BIO. So if our 742c2c6ca41SJosef Bacik * current logical offset in the file does not equal what would 743c2c6ca41SJosef Bacik * be the next logical offset in the bio, submit the bio we 744c2c6ca41SJosef Bacik * have. 7451da177e4SLinus Torvalds */ 746eb28be2bSAndi Kleen if (sdio->final_block_in_bio != sdio->cur_page_block || 747c2c6ca41SJosef Bacik cur_offset != bio_next_offset) 748eb28be2bSAndi Kleen dio_bio_submit(dio, sdio); 7491da177e4SLinus Torvalds } 7501da177e4SLinus Torvalds 751eb28be2bSAndi Kleen if (sdio->bio == NULL) { 75218772641SAndi Kleen ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh); 7531da177e4SLinus Torvalds if (ret) 7541da177e4SLinus Torvalds goto out; 7551da177e4SLinus Torvalds } 7561da177e4SLinus Torvalds 757eb28be2bSAndi Kleen if (dio_bio_add_page(sdio) != 0) { 758eb28be2bSAndi Kleen dio_bio_submit(dio, sdio); 75918772641SAndi Kleen ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh); 7601da177e4SLinus Torvalds if (ret == 0) { 761eb28be2bSAndi Kleen ret = dio_bio_add_page(sdio); 7621da177e4SLinus Torvalds BUG_ON(ret != 0); 7631da177e4SLinus Torvalds } 7641da177e4SLinus Torvalds } 7651da177e4SLinus Torvalds out: 7661da177e4SLinus Torvalds return ret; 7671da177e4SLinus Torvalds } 7681da177e4SLinus Torvalds 7691da177e4SLinus Torvalds /* 7701da177e4SLinus Torvalds * An autonomous function to put a chunk of a page under deferred IO. 7711da177e4SLinus Torvalds * 7721da177e4SLinus Torvalds * The caller doesn't actually know (or care) whether this piece of page is in 7731da177e4SLinus Torvalds * a BIO, or is under IO or whatever. We just take care of all possible 7741da177e4SLinus Torvalds * situations here. The separation between the logic of do_direct_IO() and 7751da177e4SLinus Torvalds * that of submit_page_section() is important for clarity. Please don't break. 7761da177e4SLinus Torvalds * 7771da177e4SLinus Torvalds * The chunk of page starts on-disk at blocknr. 7781da177e4SLinus Torvalds * 7791da177e4SLinus Torvalds * We perform deferred IO, by recording the last-submitted page inside our 7801da177e4SLinus Torvalds * private part of the dio structure. If possible, we just expand the IO 7811da177e4SLinus Torvalds * across that page here. 7821da177e4SLinus Torvalds * 7831da177e4SLinus Torvalds * If that doesn't work out then we put the old page into the bio and add this 7841da177e4SLinus Torvalds * page to the dio instead. 7851da177e4SLinus Torvalds */ 786ba253fbfSAndi Kleen static inline int 787eb28be2bSAndi Kleen submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page, 78818772641SAndi Kleen unsigned offset, unsigned len, sector_t blocknr, 78918772641SAndi Kleen struct buffer_head *map_bh) 7901da177e4SLinus Torvalds { 7911da177e4SLinus Torvalds int ret = 0; 7921da177e4SLinus Torvalds 793*8a4c1e42SMike Christie if (dio->op == REQ_OP_WRITE) { 79498c4d57dSAndrew Morton /* 79598c4d57dSAndrew Morton * Read accounting is performed in submit_bio() 79698c4d57dSAndrew Morton */ 79798c4d57dSAndrew Morton task_io_account_write(len); 79898c4d57dSAndrew Morton } 79998c4d57dSAndrew Morton 8001da177e4SLinus Torvalds /* 8011da177e4SLinus Torvalds * Can we just grow the current page's presence in the dio? 8021da177e4SLinus Torvalds */ 803eb28be2bSAndi Kleen if (sdio->cur_page == page && 804eb28be2bSAndi Kleen sdio->cur_page_offset + sdio->cur_page_len == offset && 805eb28be2bSAndi Kleen sdio->cur_page_block + 806eb28be2bSAndi Kleen (sdio->cur_page_len >> sdio->blkbits) == blocknr) { 807eb28be2bSAndi Kleen sdio->cur_page_len += len; 8081da177e4SLinus Torvalds goto out; 8091da177e4SLinus Torvalds } 8101da177e4SLinus Torvalds 8111da177e4SLinus Torvalds /* 8121da177e4SLinus Torvalds * If there's a deferred page already there then send it. 8131da177e4SLinus Torvalds */ 814eb28be2bSAndi Kleen if (sdio->cur_page) { 81518772641SAndi Kleen ret = dio_send_cur_page(dio, sdio, map_bh); 81609cbfeafSKirill A. Shutemov put_page(sdio->cur_page); 817eb28be2bSAndi Kleen sdio->cur_page = NULL; 8181da177e4SLinus Torvalds if (ret) 819b1058b98SJan Kara return ret; 8201da177e4SLinus Torvalds } 8211da177e4SLinus Torvalds 82209cbfeafSKirill A. Shutemov get_page(page); /* It is in dio */ 823eb28be2bSAndi Kleen sdio->cur_page = page; 824eb28be2bSAndi Kleen sdio->cur_page_offset = offset; 825eb28be2bSAndi Kleen sdio->cur_page_len = len; 826eb28be2bSAndi Kleen sdio->cur_page_block = blocknr; 827eb28be2bSAndi Kleen sdio->cur_page_fs_offset = sdio->block_in_file << sdio->blkbits; 8281da177e4SLinus Torvalds out: 829b1058b98SJan Kara /* 830b1058b98SJan Kara * If sdio->boundary then we want to schedule the IO now to 831b1058b98SJan Kara * avoid metadata seeks. 832b1058b98SJan Kara */ 833b1058b98SJan Kara if (sdio->boundary) { 834b1058b98SJan Kara ret = dio_send_cur_page(dio, sdio, map_bh); 835b1058b98SJan Kara dio_bio_submit(dio, sdio); 83609cbfeafSKirill A. Shutemov put_page(sdio->cur_page); 837b1058b98SJan Kara sdio->cur_page = NULL; 838b1058b98SJan Kara } 8391da177e4SLinus Torvalds return ret; 8401da177e4SLinus Torvalds } 8411da177e4SLinus Torvalds 8421da177e4SLinus Torvalds /* 8431da177e4SLinus Torvalds * Clean any dirty buffers in the blockdev mapping which alias newly-created 8441da177e4SLinus Torvalds * file blocks. Only called for S_ISREG files - blockdevs do not set 8451da177e4SLinus Torvalds * buffer_new 8461da177e4SLinus Torvalds */ 84718772641SAndi Kleen static void clean_blockdev_aliases(struct dio *dio, struct buffer_head *map_bh) 8481da177e4SLinus Torvalds { 8491da177e4SLinus Torvalds unsigned i; 8501da177e4SLinus Torvalds unsigned nblocks; 8511da177e4SLinus Torvalds 85218772641SAndi Kleen nblocks = map_bh->b_size >> dio->inode->i_blkbits; 8531da177e4SLinus Torvalds 8541da177e4SLinus Torvalds for (i = 0; i < nblocks; i++) { 85518772641SAndi Kleen unmap_underlying_metadata(map_bh->b_bdev, 85618772641SAndi Kleen map_bh->b_blocknr + i); 8571da177e4SLinus Torvalds } 8581da177e4SLinus Torvalds } 8591da177e4SLinus Torvalds 8601da177e4SLinus Torvalds /* 8611da177e4SLinus Torvalds * If we are not writing the entire block and get_block() allocated 8621da177e4SLinus Torvalds * the block for us, we need to fill-in the unused portion of the 8631da177e4SLinus Torvalds * block with zeros. This happens only if user-buffer, fileoffset or 8641da177e4SLinus Torvalds * io length is not filesystem block-size multiple. 8651da177e4SLinus Torvalds * 8661da177e4SLinus Torvalds * `end' is zero if we're doing the start of the IO, 1 at the end of the 8671da177e4SLinus Torvalds * IO. 8681da177e4SLinus Torvalds */ 869ba253fbfSAndi Kleen static inline void dio_zero_block(struct dio *dio, struct dio_submit *sdio, 870ba253fbfSAndi Kleen int end, struct buffer_head *map_bh) 8711da177e4SLinus Torvalds { 8721da177e4SLinus Torvalds unsigned dio_blocks_per_fs_block; 8731da177e4SLinus Torvalds unsigned this_chunk_blocks; /* In dio_blocks */ 8741da177e4SLinus Torvalds unsigned this_chunk_bytes; 8751da177e4SLinus Torvalds struct page *page; 8761da177e4SLinus Torvalds 877eb28be2bSAndi Kleen sdio->start_zero_done = 1; 87818772641SAndi Kleen if (!sdio->blkfactor || !buffer_new(map_bh)) 8791da177e4SLinus Torvalds return; 8801da177e4SLinus Torvalds 881eb28be2bSAndi Kleen dio_blocks_per_fs_block = 1 << sdio->blkfactor; 882eb28be2bSAndi Kleen this_chunk_blocks = sdio->block_in_file & (dio_blocks_per_fs_block - 1); 8831da177e4SLinus Torvalds 8841da177e4SLinus Torvalds if (!this_chunk_blocks) 8851da177e4SLinus Torvalds return; 8861da177e4SLinus Torvalds 8871da177e4SLinus Torvalds /* 8881da177e4SLinus Torvalds * We need to zero out part of an fs block. It is either at the 8891da177e4SLinus Torvalds * beginning or the end of the fs block. 8901da177e4SLinus Torvalds */ 8911da177e4SLinus Torvalds if (end) 8921da177e4SLinus Torvalds this_chunk_blocks = dio_blocks_per_fs_block - this_chunk_blocks; 8931da177e4SLinus Torvalds 894eb28be2bSAndi Kleen this_chunk_bytes = this_chunk_blocks << sdio->blkbits; 8951da177e4SLinus Torvalds 896557ed1faSNick Piggin page = ZERO_PAGE(0); 897eb28be2bSAndi Kleen if (submit_page_section(dio, sdio, page, 0, this_chunk_bytes, 89818772641SAndi Kleen sdio->next_block_for_io, map_bh)) 8991da177e4SLinus Torvalds return; 9001da177e4SLinus Torvalds 901eb28be2bSAndi Kleen sdio->next_block_for_io += this_chunk_blocks; 9021da177e4SLinus Torvalds } 9031da177e4SLinus Torvalds 9041da177e4SLinus Torvalds /* 9051da177e4SLinus Torvalds * Walk the user pages, and the file, mapping blocks to disk and generating 9061da177e4SLinus Torvalds * a sequence of (page,offset,len,block) mappings. These mappings are injected 9071da177e4SLinus Torvalds * into submit_page_section(), which takes care of the next stage of submission 9081da177e4SLinus Torvalds * 9091da177e4SLinus Torvalds * Direct IO against a blockdev is different from a file. Because we can 9101da177e4SLinus Torvalds * happily perform page-sized but 512-byte aligned IOs. It is important that 9111da177e4SLinus Torvalds * blockdev IO be able to have fine alignment and large sizes. 9121da177e4SLinus Torvalds * 9131d8fa7a2SBadari Pulavarty * So what we do is to permit the ->get_block function to populate bh.b_size 9141da177e4SLinus Torvalds * with the size of IO which is permitted at this offset and this i_blkbits. 9151da177e4SLinus Torvalds * 9161da177e4SLinus Torvalds * For best results, the blockdev should be set up with 512-byte i_blkbits and 9171d8fa7a2SBadari Pulavarty * it should set b_size to PAGE_SIZE or more inside get_block(). This gives 9181da177e4SLinus Torvalds * fine alignment but still allows this function to work in PAGE_SIZE units. 9191da177e4SLinus Torvalds */ 92018772641SAndi Kleen static int do_direct_IO(struct dio *dio, struct dio_submit *sdio, 92118772641SAndi Kleen struct buffer_head *map_bh) 9221da177e4SLinus Torvalds { 923eb28be2bSAndi Kleen const unsigned blkbits = sdio->blkbits; 9241da177e4SLinus Torvalds int ret = 0; 9251da177e4SLinus Torvalds 926eb28be2bSAndi Kleen while (sdio->block_in_file < sdio->final_block_in_request) { 9277b2c99d1SAl Viro struct page *page; 9287b2c99d1SAl Viro size_t from, to; 9296fcc5420SBoaz Harrosh 9306fcc5420SBoaz Harrosh page = dio_get_page(dio, sdio); 9311da177e4SLinus Torvalds if (IS_ERR(page)) { 9321da177e4SLinus Torvalds ret = PTR_ERR(page); 9331da177e4SLinus Torvalds goto out; 9341da177e4SLinus Torvalds } 9356fcc5420SBoaz Harrosh from = sdio->head ? 0 : sdio->from; 9366fcc5420SBoaz Harrosh to = (sdio->head == sdio->tail - 1) ? sdio->to : PAGE_SIZE; 9376fcc5420SBoaz Harrosh sdio->head++; 9381da177e4SLinus Torvalds 9397b2c99d1SAl Viro while (from < to) { 9401da177e4SLinus Torvalds unsigned this_chunk_bytes; /* # of bytes mapped */ 9411da177e4SLinus Torvalds unsigned this_chunk_blocks; /* # of blocks */ 9421da177e4SLinus Torvalds unsigned u; 9431da177e4SLinus Torvalds 944eb28be2bSAndi Kleen if (sdio->blocks_available == 0) { 9451da177e4SLinus Torvalds /* 9461da177e4SLinus Torvalds * Need to go and map some more disk 9471da177e4SLinus Torvalds */ 9481da177e4SLinus Torvalds unsigned long blkmask; 9491da177e4SLinus Torvalds unsigned long dio_remainder; 9501da177e4SLinus Torvalds 95118772641SAndi Kleen ret = get_more_blocks(dio, sdio, map_bh); 9521da177e4SLinus Torvalds if (ret) { 95309cbfeafSKirill A. Shutemov put_page(page); 9541da177e4SLinus Torvalds goto out; 9551da177e4SLinus Torvalds } 9561da177e4SLinus Torvalds if (!buffer_mapped(map_bh)) 9571da177e4SLinus Torvalds goto do_holes; 9581da177e4SLinus Torvalds 959eb28be2bSAndi Kleen sdio->blocks_available = 960eb28be2bSAndi Kleen map_bh->b_size >> sdio->blkbits; 961eb28be2bSAndi Kleen sdio->next_block_for_io = 962eb28be2bSAndi Kleen map_bh->b_blocknr << sdio->blkfactor; 9631da177e4SLinus Torvalds if (buffer_new(map_bh)) 96418772641SAndi Kleen clean_blockdev_aliases(dio, map_bh); 9651da177e4SLinus Torvalds 966eb28be2bSAndi Kleen if (!sdio->blkfactor) 9671da177e4SLinus Torvalds goto do_holes; 9681da177e4SLinus Torvalds 969eb28be2bSAndi Kleen blkmask = (1 << sdio->blkfactor) - 1; 970eb28be2bSAndi Kleen dio_remainder = (sdio->block_in_file & blkmask); 9711da177e4SLinus Torvalds 9721da177e4SLinus Torvalds /* 9731da177e4SLinus Torvalds * If we are at the start of IO and that IO 9741da177e4SLinus Torvalds * starts partway into a fs-block, 9751da177e4SLinus Torvalds * dio_remainder will be non-zero. If the IO 9761da177e4SLinus Torvalds * is a read then we can simply advance the IO 9771da177e4SLinus Torvalds * cursor to the first block which is to be 9781da177e4SLinus Torvalds * read. But if the IO is a write and the 9791da177e4SLinus Torvalds * block was newly allocated we cannot do that; 9801da177e4SLinus Torvalds * the start of the fs block must be zeroed out 9811da177e4SLinus Torvalds * on-disk 9821da177e4SLinus Torvalds */ 9831da177e4SLinus Torvalds if (!buffer_new(map_bh)) 984eb28be2bSAndi Kleen sdio->next_block_for_io += dio_remainder; 985eb28be2bSAndi Kleen sdio->blocks_available -= dio_remainder; 9861da177e4SLinus Torvalds } 9871da177e4SLinus Torvalds do_holes: 9881da177e4SLinus Torvalds /* Handle holes */ 9891da177e4SLinus Torvalds if (!buffer_mapped(map_bh)) { 99035dc8161SJeff Moyer loff_t i_size_aligned; 9911da177e4SLinus Torvalds 9921da177e4SLinus Torvalds /* AKPM: eargh, -ENOTBLK is a hack */ 993*8a4c1e42SMike Christie if (dio->op == REQ_OP_WRITE) { 99409cbfeafSKirill A. Shutemov put_page(page); 9951da177e4SLinus Torvalds return -ENOTBLK; 9961da177e4SLinus Torvalds } 9971da177e4SLinus Torvalds 99835dc8161SJeff Moyer /* 99935dc8161SJeff Moyer * Be sure to account for a partial block as the 100035dc8161SJeff Moyer * last block in the file 100135dc8161SJeff Moyer */ 100235dc8161SJeff Moyer i_size_aligned = ALIGN(i_size_read(dio->inode), 100335dc8161SJeff Moyer 1 << blkbits); 1004eb28be2bSAndi Kleen if (sdio->block_in_file >= 100535dc8161SJeff Moyer i_size_aligned >> blkbits) { 10061da177e4SLinus Torvalds /* We hit eof */ 100709cbfeafSKirill A. Shutemov put_page(page); 10081da177e4SLinus Torvalds goto out; 10091da177e4SLinus Torvalds } 10107b2c99d1SAl Viro zero_user(page, from, 1 << blkbits); 1011eb28be2bSAndi Kleen sdio->block_in_file++; 10127b2c99d1SAl Viro from += 1 << blkbits; 10133320c60bSAl Viro dio->result += 1 << blkbits; 10141da177e4SLinus Torvalds goto next_block; 10151da177e4SLinus Torvalds } 10161da177e4SLinus Torvalds 10171da177e4SLinus Torvalds /* 10181da177e4SLinus Torvalds * If we're performing IO which has an alignment which 10191da177e4SLinus Torvalds * is finer than the underlying fs, go check to see if 10201da177e4SLinus Torvalds * we must zero out the start of this block. 10211da177e4SLinus Torvalds */ 1022eb28be2bSAndi Kleen if (unlikely(sdio->blkfactor && !sdio->start_zero_done)) 102318772641SAndi Kleen dio_zero_block(dio, sdio, 0, map_bh); 10241da177e4SLinus Torvalds 10251da177e4SLinus Torvalds /* 10261da177e4SLinus Torvalds * Work out, in this_chunk_blocks, how much disk we 10271da177e4SLinus Torvalds * can add to this page 10281da177e4SLinus Torvalds */ 1029eb28be2bSAndi Kleen this_chunk_blocks = sdio->blocks_available; 10307b2c99d1SAl Viro u = (to - from) >> blkbits; 10311da177e4SLinus Torvalds if (this_chunk_blocks > u) 10321da177e4SLinus Torvalds this_chunk_blocks = u; 1033eb28be2bSAndi Kleen u = sdio->final_block_in_request - sdio->block_in_file; 10341da177e4SLinus Torvalds if (this_chunk_blocks > u) 10351da177e4SLinus Torvalds this_chunk_blocks = u; 10361da177e4SLinus Torvalds this_chunk_bytes = this_chunk_blocks << blkbits; 10371da177e4SLinus Torvalds BUG_ON(this_chunk_bytes == 0); 10381da177e4SLinus Torvalds 1039092c8d46SJan Kara if (this_chunk_blocks == sdio->blocks_available) 1040eb28be2bSAndi Kleen sdio->boundary = buffer_boundary(map_bh); 1041eb28be2bSAndi Kleen ret = submit_page_section(dio, sdio, page, 10427b2c99d1SAl Viro from, 1043eb28be2bSAndi Kleen this_chunk_bytes, 104418772641SAndi Kleen sdio->next_block_for_io, 104518772641SAndi Kleen map_bh); 10461da177e4SLinus Torvalds if (ret) { 104709cbfeafSKirill A. Shutemov put_page(page); 10481da177e4SLinus Torvalds goto out; 10491da177e4SLinus Torvalds } 1050eb28be2bSAndi Kleen sdio->next_block_for_io += this_chunk_blocks; 10511da177e4SLinus Torvalds 1052eb28be2bSAndi Kleen sdio->block_in_file += this_chunk_blocks; 10537b2c99d1SAl Viro from += this_chunk_bytes; 10547b2c99d1SAl Viro dio->result += this_chunk_bytes; 1055eb28be2bSAndi Kleen sdio->blocks_available -= this_chunk_blocks; 10561da177e4SLinus Torvalds next_block: 1057eb28be2bSAndi Kleen BUG_ON(sdio->block_in_file > sdio->final_block_in_request); 1058eb28be2bSAndi Kleen if (sdio->block_in_file == sdio->final_block_in_request) 10591da177e4SLinus Torvalds break; 10601da177e4SLinus Torvalds } 10611da177e4SLinus Torvalds 10621da177e4SLinus Torvalds /* Drop the ref which was taken in get_user_pages() */ 106309cbfeafSKirill A. Shutemov put_page(page); 10641da177e4SLinus Torvalds } 10651da177e4SLinus Torvalds out: 10661da177e4SLinus Torvalds return ret; 10671da177e4SLinus Torvalds } 10681da177e4SLinus Torvalds 1069847cc637SAndi Kleen static inline int drop_refcount(struct dio *dio) 10701da177e4SLinus Torvalds { 1071847cc637SAndi Kleen int ret2; 10725eb6c7a2SZach Brown unsigned long flags; 107320258b2bSZach Brown 10741da177e4SLinus Torvalds /* 10758459d86aSZach Brown * Sync will always be dropping the final ref and completing the 10765eb6c7a2SZach Brown * operation. AIO can if it was a broken operation described above or 10775eb6c7a2SZach Brown * in fact if all the bios race to complete before we get here. In 10785eb6c7a2SZach Brown * that case dio_complete() translates the EIOCBQUEUED into the proper 107904b2fa9fSChristoph Hellwig * return code that the caller will hand to ->complete(). 10805eb6c7a2SZach Brown * 10815eb6c7a2SZach Brown * This is managed by the bio_lock instead of being an atomic_t so that 10825eb6c7a2SZach Brown * completion paths can drop their ref and use the remaining count to 10835eb6c7a2SZach Brown * decide to wake the submission path atomically. 10841da177e4SLinus Torvalds */ 10855eb6c7a2SZach Brown spin_lock_irqsave(&dio->bio_lock, flags); 10865eb6c7a2SZach Brown ret2 = --dio->refcount; 10875eb6c7a2SZach Brown spin_unlock_irqrestore(&dio->bio_lock, flags); 1088847cc637SAndi Kleen return ret2; 10891da177e4SLinus Torvalds } 10901da177e4SLinus Torvalds 1091eafdc7d1SChristoph Hellwig /* 1092eafdc7d1SChristoph Hellwig * This is a library function for use by filesystem drivers. 1093eafdc7d1SChristoph Hellwig * 1094eafdc7d1SChristoph Hellwig * The locking rules are governed by the flags parameter: 1095eafdc7d1SChristoph Hellwig * - if the flags value contains DIO_LOCKING we use a fancy locking 1096eafdc7d1SChristoph Hellwig * scheme for dumb filesystems. 1097eafdc7d1SChristoph Hellwig * For writes this function is called under i_mutex and returns with 1098eafdc7d1SChristoph Hellwig * i_mutex held, for reads, i_mutex is not held on entry, but it is 1099eafdc7d1SChristoph Hellwig * taken and dropped again before returning. 1100eafdc7d1SChristoph Hellwig * - if the flags value does NOT contain DIO_LOCKING we don't use any 1101eafdc7d1SChristoph Hellwig * internal locking but rather rely on the filesystem to synchronize 1102eafdc7d1SChristoph Hellwig * direct I/O reads/writes versus each other and truncate. 1103df2d6f26SChristoph Hellwig * 1104df2d6f26SChristoph Hellwig * To help with locking against truncate we incremented the i_dio_count 1105df2d6f26SChristoph Hellwig * counter before starting direct I/O, and decrement it once we are done. 1106df2d6f26SChristoph Hellwig * Truncate can wait for it to reach zero to provide exclusion. It is 1107df2d6f26SChristoph Hellwig * expected that filesystem provide exclusion between new direct I/O 1108df2d6f26SChristoph Hellwig * and truncates. For DIO_LOCKING filesystems this is done by i_mutex, 1109df2d6f26SChristoph Hellwig * but other filesystems need to take care of this on their own. 1110ba253fbfSAndi Kleen * 1111ba253fbfSAndi Kleen * NOTE: if you pass "sdio" to anything by pointer make sure that function 1112ba253fbfSAndi Kleen * is always inlined. Otherwise gcc is unable to split the structure into 1113ba253fbfSAndi Kleen * individual fields and will generate much worse code. This is important 1114ba253fbfSAndi Kleen * for the whole file. 1115eafdc7d1SChristoph Hellwig */ 111665dd2aa9SAndi Kleen static inline ssize_t 111717f8c842SOmar Sandoval do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, 111817f8c842SOmar Sandoval struct block_device *bdev, struct iov_iter *iter, 1119c8b8e32dSChristoph Hellwig get_block_t get_block, dio_iodone_t end_io, 1120facd07b0SJosef Bacik dio_submit_t submit_io, int flags) 11211da177e4SLinus Torvalds { 1122ab73857eSLinus Torvalds unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits); 1123ab73857eSLinus Torvalds unsigned blkbits = i_blkbits; 11241da177e4SLinus Torvalds unsigned blocksize_mask = (1 << blkbits) - 1; 11251da177e4SLinus Torvalds ssize_t retval = -EINVAL; 1126af436472SChristoph Hellwig size_t count = iov_iter_count(iter); 1127c8b8e32dSChristoph Hellwig loff_t offset = iocb->ki_pos; 1128af436472SChristoph Hellwig loff_t end = offset + count; 11291da177e4SLinus Torvalds struct dio *dio; 1130eb28be2bSAndi Kleen struct dio_submit sdio = { 0, }; 1131847cc637SAndi Kleen struct buffer_head map_bh = { 0, }; 1132647d1e4cSFengguang Wu struct blk_plug plug; 1133886a3911SAl Viro unsigned long align = offset | iov_iter_alignment(iter); 11341da177e4SLinus Torvalds 113565dd2aa9SAndi Kleen /* 113665dd2aa9SAndi Kleen * Avoid references to bdev if not absolutely needed to give 113765dd2aa9SAndi Kleen * the early prefetch in the caller enough time. 113865dd2aa9SAndi Kleen */ 11391da177e4SLinus Torvalds 1140886a3911SAl Viro if (align & blocksize_mask) { 11411da177e4SLinus Torvalds if (bdev) 114265dd2aa9SAndi Kleen blkbits = blksize_bits(bdev_logical_block_size(bdev)); 11431da177e4SLinus Torvalds blocksize_mask = (1 << blkbits) - 1; 1144886a3911SAl Viro if (align & blocksize_mask) 11451da177e4SLinus Torvalds goto out; 11461da177e4SLinus Torvalds } 11471da177e4SLinus Torvalds 1148f9b5570dSChristoph Hellwig /* watch out for a 0 len io from a tricksy fs */ 114917f8c842SOmar Sandoval if (iov_iter_rw(iter) == READ && !iov_iter_count(iter)) 1150f9b5570dSChristoph Hellwig return 0; 1151f9b5570dSChristoph Hellwig 11526e8267f5SAndi Kleen dio = kmem_cache_alloc(dio_cache, GFP_KERNEL); 11531da177e4SLinus Torvalds retval = -ENOMEM; 11541da177e4SLinus Torvalds if (!dio) 11551da177e4SLinus Torvalds goto out; 115623aee091SJeff Moyer /* 115723aee091SJeff Moyer * Believe it or not, zeroing out the page array caused a .5% 115823aee091SJeff Moyer * performance regression in a database benchmark. So, we take 115923aee091SJeff Moyer * care to only zero out what's needed. 116023aee091SJeff Moyer */ 116123aee091SJeff Moyer memset(dio, 0, offsetof(struct dio, pages)); 11621da177e4SLinus Torvalds 11635fe878aeSChristoph Hellwig dio->flags = flags; 11645fe878aeSChristoph Hellwig if (dio->flags & DIO_LOCKING) { 116517f8c842SOmar Sandoval if (iov_iter_rw(iter) == READ) { 11665fe878aeSChristoph Hellwig struct address_space *mapping = 11675fe878aeSChristoph Hellwig iocb->ki_filp->f_mapping; 11681da177e4SLinus Torvalds 11695fe878aeSChristoph Hellwig /* will be released by direct_io_worker */ 11705955102cSAl Viro inode_lock(inode); 11711da177e4SLinus Torvalds 11721da177e4SLinus Torvalds retval = filemap_write_and_wait_range(mapping, offset, 11731da177e4SLinus Torvalds end - 1); 11741da177e4SLinus Torvalds if (retval) { 11755955102cSAl Viro inode_unlock(inode); 11766e8267f5SAndi Kleen kmem_cache_free(dio_cache, dio); 11771da177e4SLinus Torvalds goto out; 11781da177e4SLinus Torvalds } 11791da177e4SLinus Torvalds } 1180df2d6f26SChristoph Hellwig } 11811da177e4SLinus Torvalds 118274cedf9bSJan Kara /* Once we sampled i_size check for reads beyond EOF */ 118374cedf9bSJan Kara dio->i_size = i_size_read(inode); 118474cedf9bSJan Kara if (iov_iter_rw(iter) == READ && offset >= dio->i_size) { 118574cedf9bSJan Kara if (dio->flags & DIO_LOCKING) 11865955102cSAl Viro inode_unlock(inode); 118774cedf9bSJan Kara kmem_cache_free(dio_cache, dio); 11882d4594acSAl Viro retval = 0; 118974cedf9bSJan Kara goto out; 119074cedf9bSJan Kara } 119174cedf9bSJan Kara 11925fe878aeSChristoph Hellwig /* 119360392573SChristoph Hellwig * For file extending writes updating i_size before data writeouts 119460392573SChristoph Hellwig * complete can expose uninitialized blocks in dumb filesystems. 119560392573SChristoph Hellwig * In that case we need to wait for I/O completion even if asked 119660392573SChristoph Hellwig * for an asynchronous write. 11971da177e4SLinus Torvalds */ 119860392573SChristoph Hellwig if (is_sync_kiocb(iocb)) 119960392573SChristoph Hellwig dio->is_async = false; 120060392573SChristoph Hellwig else if (!(dio->flags & DIO_ASYNC_EXTEND) && 120117f8c842SOmar Sandoval iov_iter_rw(iter) == WRITE && end > i_size_read(inode)) 120260392573SChristoph Hellwig dio->is_async = false; 120360392573SChristoph Hellwig else 120460392573SChristoph Hellwig dio->is_async = true; 120560392573SChristoph Hellwig 1206847cc637SAndi Kleen dio->inode = inode; 1207*8a4c1e42SMike Christie if (iov_iter_rw(iter) == WRITE) { 1208*8a4c1e42SMike Christie dio->op = REQ_OP_WRITE; 1209*8a4c1e42SMike Christie dio->op_flags = WRITE_ODIRECT; 1210*8a4c1e42SMike Christie } else { 1211*8a4c1e42SMike Christie dio->op = REQ_OP_READ; 1212*8a4c1e42SMike Christie } 121302afc27fSChristoph Hellwig 121402afc27fSChristoph Hellwig /* 121502afc27fSChristoph Hellwig * For AIO O_(D)SYNC writes we need to defer completions to a workqueue 121602afc27fSChristoph Hellwig * so that we can call ->fsync. 121702afc27fSChristoph Hellwig */ 121817f8c842SOmar Sandoval if (dio->is_async && iov_iter_rw(iter) == WRITE && 121902afc27fSChristoph Hellwig ((iocb->ki_filp->f_flags & O_DSYNC) || 122002afc27fSChristoph Hellwig IS_SYNC(iocb->ki_filp->f_mapping->host))) { 122102afc27fSChristoph Hellwig retval = dio_set_defer_completion(dio); 122202afc27fSChristoph Hellwig if (retval) { 122302afc27fSChristoph Hellwig /* 122402afc27fSChristoph Hellwig * We grab i_mutex only for reads so we don't have 122502afc27fSChristoph Hellwig * to release it here 122602afc27fSChristoph Hellwig */ 122702afc27fSChristoph Hellwig kmem_cache_free(dio_cache, dio); 122802afc27fSChristoph Hellwig goto out; 122902afc27fSChristoph Hellwig } 123002afc27fSChristoph Hellwig } 123102afc27fSChristoph Hellwig 123202afc27fSChristoph Hellwig /* 123302afc27fSChristoph Hellwig * Will be decremented at I/O completion time. 123402afc27fSChristoph Hellwig */ 1235fe0f07d0SJens Axboe if (!(dio->flags & DIO_SKIP_DIO_COUNT)) 1236fe0f07d0SJens Axboe inode_dio_begin(inode); 123702afc27fSChristoph Hellwig 123802afc27fSChristoph Hellwig retval = 0; 1239847cc637SAndi Kleen sdio.blkbits = blkbits; 1240ab73857eSLinus Torvalds sdio.blkfactor = i_blkbits - blkbits; 1241847cc637SAndi Kleen sdio.block_in_file = offset >> blkbits; 1242847cc637SAndi Kleen 1243847cc637SAndi Kleen sdio.get_block = get_block; 1244847cc637SAndi Kleen dio->end_io = end_io; 1245847cc637SAndi Kleen sdio.submit_io = submit_io; 1246847cc637SAndi Kleen sdio.final_block_in_bio = -1; 1247847cc637SAndi Kleen sdio.next_block_for_io = -1; 1248847cc637SAndi Kleen 1249847cc637SAndi Kleen dio->iocb = iocb; 1250847cc637SAndi Kleen 1251847cc637SAndi Kleen spin_lock_init(&dio->bio_lock); 1252847cc637SAndi Kleen dio->refcount = 1; 1253847cc637SAndi Kleen 125453cbf3b1SMing Lei dio->should_dirty = (iter->type == ITER_IOVEC); 12557b2c99d1SAl Viro sdio.iter = iter; 12567b2c99d1SAl Viro sdio.final_block_in_request = 12577b2c99d1SAl Viro (offset + iov_iter_count(iter)) >> blkbits; 12587b2c99d1SAl Viro 1259847cc637SAndi Kleen /* 1260847cc637SAndi Kleen * In case of non-aligned buffers, we may need 2 more 1261847cc637SAndi Kleen * pages since we need to zero out first and last block. 1262847cc637SAndi Kleen */ 1263847cc637SAndi Kleen if (unlikely(sdio.blkfactor)) 1264847cc637SAndi Kleen sdio.pages_in_io = 2; 1265847cc637SAndi Kleen 1266f67da30cSAl Viro sdio.pages_in_io += iov_iter_npages(iter, INT_MAX); 1267847cc637SAndi Kleen 1268647d1e4cSFengguang Wu blk_start_plug(&plug); 1269647d1e4cSFengguang Wu 1270847cc637SAndi Kleen retval = do_direct_IO(dio, &sdio, &map_bh); 12717b2c99d1SAl Viro if (retval) 1272847cc637SAndi Kleen dio_cleanup(dio, &sdio); 1273847cc637SAndi Kleen 1274847cc637SAndi Kleen if (retval == -ENOTBLK) { 1275847cc637SAndi Kleen /* 1276847cc637SAndi Kleen * The remaining part of the request will be 1277847cc637SAndi Kleen * be handled by buffered I/O when we return 1278847cc637SAndi Kleen */ 1279847cc637SAndi Kleen retval = 0; 1280847cc637SAndi Kleen } 1281847cc637SAndi Kleen /* 1282847cc637SAndi Kleen * There may be some unwritten disk at the end of a part-written 1283847cc637SAndi Kleen * fs-block-sized block. Go zero that now. 1284847cc637SAndi Kleen */ 1285847cc637SAndi Kleen dio_zero_block(dio, &sdio, 1, &map_bh); 1286847cc637SAndi Kleen 1287847cc637SAndi Kleen if (sdio.cur_page) { 1288847cc637SAndi Kleen ssize_t ret2; 1289847cc637SAndi Kleen 1290847cc637SAndi Kleen ret2 = dio_send_cur_page(dio, &sdio, &map_bh); 1291847cc637SAndi Kleen if (retval == 0) 1292847cc637SAndi Kleen retval = ret2; 129309cbfeafSKirill A. Shutemov put_page(sdio.cur_page); 1294847cc637SAndi Kleen sdio.cur_page = NULL; 1295847cc637SAndi Kleen } 1296847cc637SAndi Kleen if (sdio.bio) 1297847cc637SAndi Kleen dio_bio_submit(dio, &sdio); 1298847cc637SAndi Kleen 1299647d1e4cSFengguang Wu blk_finish_plug(&plug); 1300647d1e4cSFengguang Wu 1301847cc637SAndi Kleen /* 1302847cc637SAndi Kleen * It is possible that, we return short IO due to end of file. 1303847cc637SAndi Kleen * In that case, we need to release all the pages we got hold on. 1304847cc637SAndi Kleen */ 1305847cc637SAndi Kleen dio_cleanup(dio, &sdio); 1306847cc637SAndi Kleen 1307847cc637SAndi Kleen /* 1308847cc637SAndi Kleen * All block lookups have been performed. For READ requests 1309847cc637SAndi Kleen * we can let i_mutex go now that its achieved its purpose 1310847cc637SAndi Kleen * of protecting us from looking up uninitialized blocks. 1311847cc637SAndi Kleen */ 131217f8c842SOmar Sandoval if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING)) 13135955102cSAl Viro inode_unlock(dio->inode); 1314847cc637SAndi Kleen 1315847cc637SAndi Kleen /* 1316847cc637SAndi Kleen * The only time we want to leave bios in flight is when a successful 1317847cc637SAndi Kleen * partial aio read or full aio write have been setup. In that case 1318847cc637SAndi Kleen * bio completion will call aio_complete. The only time it's safe to 1319847cc637SAndi Kleen * call aio_complete is when we return -EIOCBQUEUED, so we key on that. 1320847cc637SAndi Kleen * This had *better* be the only place that raises -EIOCBQUEUED. 1321847cc637SAndi Kleen */ 1322847cc637SAndi Kleen BUG_ON(retval == -EIOCBQUEUED); 1323847cc637SAndi Kleen if (dio->is_async && retval == 0 && dio->result && 132417f8c842SOmar Sandoval (iov_iter_rw(iter) == READ || dio->result == count)) 1325847cc637SAndi Kleen retval = -EIOCBQUEUED; 1326af436472SChristoph Hellwig else 1327847cc637SAndi Kleen dio_await_completion(dio); 1328847cc637SAndi Kleen 1329847cc637SAndi Kleen if (drop_refcount(dio) == 0) { 1330716b9bc0SChristoph Hellwig retval = dio_complete(dio, retval, false); 1331847cc637SAndi Kleen } else 1332847cc637SAndi Kleen BUG_ON(retval != -EIOCBQUEUED); 13331da177e4SLinus Torvalds 13347bb46a67Snpiggin@suse.de out: 13357bb46a67Snpiggin@suse.de return retval; 13367bb46a67Snpiggin@suse.de } 133765dd2aa9SAndi Kleen 133817f8c842SOmar Sandoval ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, 133917f8c842SOmar Sandoval struct block_device *bdev, struct iov_iter *iter, 1340c8b8e32dSChristoph Hellwig get_block_t get_block, 134117f8c842SOmar Sandoval dio_iodone_t end_io, dio_submit_t submit_io, 134217f8c842SOmar Sandoval int flags) 134365dd2aa9SAndi Kleen { 134465dd2aa9SAndi Kleen /* 134565dd2aa9SAndi Kleen * The block device state is needed in the end to finally 134665dd2aa9SAndi Kleen * submit everything. Since it's likely to be cache cold 134765dd2aa9SAndi Kleen * prefetch it here as first thing to hide some of the 134865dd2aa9SAndi Kleen * latency. 134965dd2aa9SAndi Kleen * 135065dd2aa9SAndi Kleen * Attempt to prefetch the pieces we likely need later. 135165dd2aa9SAndi Kleen */ 135265dd2aa9SAndi Kleen prefetch(&bdev->bd_disk->part_tbl); 135365dd2aa9SAndi Kleen prefetch(bdev->bd_queue); 135465dd2aa9SAndi Kleen prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES); 135565dd2aa9SAndi Kleen 1356c8b8e32dSChristoph Hellwig return do_blockdev_direct_IO(iocb, inode, bdev, iter, get_block, 135717f8c842SOmar Sandoval end_io, submit_io, flags); 135865dd2aa9SAndi Kleen } 135965dd2aa9SAndi Kleen 13601da177e4SLinus Torvalds EXPORT_SYMBOL(__blockdev_direct_IO); 13616e8267f5SAndi Kleen 13626e8267f5SAndi Kleen static __init int dio_init(void) 13636e8267f5SAndi Kleen { 13646e8267f5SAndi Kleen dio_cache = KMEM_CACHE(dio, SLAB_PANIC); 13656e8267f5SAndi Kleen return 0; 13666e8267f5SAndi Kleen } 13676e8267f5SAndi Kleen module_init(dio_init) 1368