1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * fs/direct-io.c 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright (C) 2002, Linus Torvalds. 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * O_DIRECT 81da177e4SLinus Torvalds * 9e1f8e874SFrancois Cami * 04Jul2002 Andrew Morton 101da177e4SLinus Torvalds * Initial version 111da177e4SLinus Torvalds * 11Sep2002 janetinc@us.ibm.com 121da177e4SLinus Torvalds * added readv/writev support. 13e1f8e874SFrancois Cami * 29Oct2002 Andrew Morton 141da177e4SLinus Torvalds * rewrote bio_add_page() support. 151da177e4SLinus Torvalds * 30Oct2002 pbadari@us.ibm.com 161da177e4SLinus Torvalds * added support for non-aligned IO. 171da177e4SLinus Torvalds * 06Nov2002 pbadari@us.ibm.com 181da177e4SLinus Torvalds * added asynchronous IO support. 191da177e4SLinus Torvalds * 21Jul2003 nathans@sgi.com 201da177e4SLinus Torvalds * added IO completion notifier. 211da177e4SLinus Torvalds */ 221da177e4SLinus Torvalds 231da177e4SLinus Torvalds #include <linux/kernel.h> 241da177e4SLinus Torvalds #include <linux/module.h> 251da177e4SLinus Torvalds #include <linux/types.h> 261da177e4SLinus Torvalds #include <linux/fs.h> 271da177e4SLinus Torvalds #include <linux/mm.h> 281da177e4SLinus Torvalds #include <linux/slab.h> 291da177e4SLinus Torvalds #include <linux/highmem.h> 301da177e4SLinus Torvalds #include <linux/pagemap.h> 3198c4d57dSAndrew Morton #include <linux/task_io_accounting_ops.h> 321da177e4SLinus Torvalds #include <linux/bio.h> 331da177e4SLinus Torvalds #include <linux/wait.h> 341da177e4SLinus Torvalds #include <linux/err.h> 351da177e4SLinus Torvalds #include <linux/blkdev.h> 361da177e4SLinus Torvalds #include <linux/buffer_head.h> 371da177e4SLinus Torvalds #include <linux/rwsem.h> 381da177e4SLinus Torvalds #include <linux/uio.h> 3960063497SArun Sharma #include <linux/atomic.h> 4065dd2aa9SAndi Kleen #include <linux/prefetch.h> 411da177e4SLinus Torvalds 42b16155a0SEric Biggers #include "internal.h" 43b16155a0SEric Biggers 441da177e4SLinus Torvalds /* 451da177e4SLinus Torvalds * How many user pages to map in one call to get_user_pages(). This determines 46cde1ecb3SAndi Kleen * the size of a structure in the slab cache 471da177e4SLinus Torvalds */ 481da177e4SLinus Torvalds #define DIO_PAGES 64 491da177e4SLinus Torvalds 501da177e4SLinus Torvalds /* 51ffe51f01SLukas Czerner * Flags for dio_complete() 52ffe51f01SLukas Czerner */ 53ffe51f01SLukas Czerner #define DIO_COMPLETE_ASYNC 0x01 /* This is async IO */ 54ffe51f01SLukas Czerner #define DIO_COMPLETE_INVALIDATE 0x02 /* Can invalidate pages */ 55ffe51f01SLukas Czerner 56ffe51f01SLukas Czerner /* 571da177e4SLinus Torvalds * This code generally works in units of "dio_blocks". A dio_block is 581da177e4SLinus Torvalds * somewhere between the hard sector size and the filesystem block size. it 591da177e4SLinus Torvalds * is determined on a per-invocation basis. When talking to the filesystem 601da177e4SLinus Torvalds * we need to convert dio_blocks to fs_blocks by scaling the dio_block quantity 611da177e4SLinus Torvalds * down by dio->blkfactor. Similarly, fs-blocksize quantities are converted 621da177e4SLinus Torvalds * to bio_block quantities by shifting left by blkfactor. 631da177e4SLinus Torvalds * 641da177e4SLinus Torvalds * If blkfactor is zero then the user's request was aligned to the filesystem's 651da177e4SLinus Torvalds * blocksize. 661da177e4SLinus Torvalds */ 671da177e4SLinus Torvalds 68eb28be2bSAndi Kleen /* dio_state only used in the submission path */ 69eb28be2bSAndi Kleen 70eb28be2bSAndi Kleen struct dio_submit { 711da177e4SLinus Torvalds struct bio *bio; /* bio under assembly */ 721da177e4SLinus Torvalds unsigned blkbits; /* doesn't change */ 731da177e4SLinus Torvalds unsigned blkfactor; /* When we're using an alignment which 741da177e4SLinus Torvalds is finer than the filesystem's soft 751da177e4SLinus Torvalds blocksize, this specifies how much 761da177e4SLinus Torvalds finer. blkfactor=2 means 1/4-block 771da177e4SLinus Torvalds alignment. Does not change */ 781da177e4SLinus Torvalds unsigned start_zero_done; /* flag: sub-blocksize zeroing has 791da177e4SLinus Torvalds been performed at the start of a 801da177e4SLinus Torvalds write */ 811da177e4SLinus Torvalds int pages_in_io; /* approximate total IO pages */ 821da177e4SLinus Torvalds sector_t block_in_file; /* Current offset into the underlying 831da177e4SLinus Torvalds file in dio_block units. */ 841da177e4SLinus Torvalds unsigned blocks_available; /* At block_in_file. changes */ 850dc2bc49SAndi Kleen int reap_counter; /* rate limit reaping */ 861da177e4SLinus Torvalds sector_t final_block_in_request;/* doesn't change */ 871da177e4SLinus Torvalds int boundary; /* prev block is at a boundary */ 881d8fa7a2SBadari Pulavarty get_block_t *get_block; /* block mapping function */ 89eb28be2bSAndi Kleen 90facd07b0SJosef Bacik loff_t logical_offset_in_bio; /* current first logical block in bio */ 911da177e4SLinus Torvalds sector_t final_block_in_bio; /* current final block in bio + 1 */ 921da177e4SLinus Torvalds sector_t next_block_for_io; /* next block to be put under IO, 931da177e4SLinus Torvalds in dio_blocks units */ 941da177e4SLinus Torvalds 951da177e4SLinus Torvalds /* 961da177e4SLinus Torvalds * Deferred addition of a page to the dio. These variables are 971da177e4SLinus Torvalds * private to dio_send_cur_page(), submit_page_section() and 981da177e4SLinus Torvalds * dio_bio_add_page(). 991da177e4SLinus Torvalds */ 1001da177e4SLinus Torvalds struct page *cur_page; /* The page */ 1011da177e4SLinus Torvalds unsigned cur_page_offset; /* Offset into it, in bytes */ 1021da177e4SLinus Torvalds unsigned cur_page_len; /* Nr of bytes at cur_page_offset */ 1031da177e4SLinus Torvalds sector_t cur_page_block; /* Where it starts */ 104facd07b0SJosef Bacik loff_t cur_page_fs_offset; /* Offset in file */ 1051da177e4SLinus Torvalds 1067b2c99d1SAl Viro struct iov_iter *iter; 10723aee091SJeff Moyer /* 10823aee091SJeff Moyer * Page queue. These variables belong to dio_refill_pages() and 10923aee091SJeff Moyer * dio_get_page(). 11023aee091SJeff Moyer */ 11123aee091SJeff Moyer unsigned head; /* next page to process */ 11223aee091SJeff Moyer unsigned tail; /* last valid page + 1 */ 1137b2c99d1SAl Viro size_t from, to; 114eb28be2bSAndi Kleen }; 115eb28be2bSAndi Kleen 116eb28be2bSAndi Kleen /* dio_state communicated between submission path and end_io */ 117eb28be2bSAndi Kleen struct dio { 118eb28be2bSAndi Kleen int flags; /* doesn't change */ 119c6293eacSBart Van Assche blk_opf_t opf; /* request operation type and flags */ 12074d46992SChristoph Hellwig struct gendisk *bio_disk; 1210dc2bc49SAndi Kleen struct inode *inode; 122eb28be2bSAndi Kleen loff_t i_size; /* i_size when submitted */ 123eb28be2bSAndi Kleen dio_iodone_t *end_io; /* IO completion function */ 124eb28be2bSAndi Kleen 12518772641SAndi Kleen void *private; /* copy from map_bh.b_private */ 126eb28be2bSAndi Kleen 127eb28be2bSAndi Kleen /* BIO completion state */ 128eb28be2bSAndi Kleen spinlock_t bio_lock; /* protects BIO fields below */ 1290dc2bc49SAndi Kleen int page_errors; /* errno from get_user_pages() */ 1300dc2bc49SAndi Kleen int is_async; /* is IO async ? */ 1317b7a8665SChristoph Hellwig bool defer_completion; /* defer AIO completion to workqueue? */ 13253cbf3b1SMing Lei bool should_dirty; /* if pages should be dirtied */ 1330dc2bc49SAndi Kleen int io_error; /* IO error in completion path */ 134eb28be2bSAndi Kleen unsigned long refcount; /* direct_io_worker() and bios */ 135eb28be2bSAndi Kleen struct bio *bio_list; /* singly linked via bi_private */ 136eb28be2bSAndi Kleen struct task_struct *waiter; /* waiting task (NULL if none) */ 137eb28be2bSAndi Kleen 138eb28be2bSAndi Kleen /* AIO related stuff */ 139eb28be2bSAndi Kleen struct kiocb *iocb; /* kiocb */ 140eb28be2bSAndi Kleen ssize_t result; /* IO result */ 141eb28be2bSAndi Kleen 14223aee091SJeff Moyer /* 14323aee091SJeff Moyer * pages[] (and any fields placed after it) are not zeroed out at 14423aee091SJeff Moyer * allocation time. Don't add new fields after pages[] unless you 14523aee091SJeff Moyer * wish that they not be zeroed. 14623aee091SJeff Moyer */ 1477b7a8665SChristoph Hellwig union { 14823aee091SJeff Moyer struct page *pages[DIO_PAGES]; /* page buffer */ 1497b7a8665SChristoph Hellwig struct work_struct complete_work;/* deferred AIO completion */ 1507b7a8665SChristoph Hellwig }; 1516e8267f5SAndi Kleen } ____cacheline_aligned_in_smp; 1526e8267f5SAndi Kleen 1536e8267f5SAndi Kleen static struct kmem_cache *dio_cache __read_mostly; 1541da177e4SLinus Torvalds 1551da177e4SLinus Torvalds /* 1561da177e4SLinus Torvalds * How many pages are in the queue? 1571da177e4SLinus Torvalds */ 158eb28be2bSAndi Kleen static inline unsigned dio_pages_present(struct dio_submit *sdio) 1591da177e4SLinus Torvalds { 160eb28be2bSAndi Kleen return sdio->tail - sdio->head; 1611da177e4SLinus Torvalds } 1621da177e4SLinus Torvalds 1631da177e4SLinus Torvalds /* 1641da177e4SLinus Torvalds * Go grab and pin some userspace pages. Typically we'll get 64 at a time. 1651da177e4SLinus Torvalds */ 166ba253fbfSAndi Kleen static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio) 1671da177e4SLinus Torvalds { 168c6293eacSBart Van Assche const enum req_op dio_op = dio->opf & REQ_OP_MASK; 1697b2c99d1SAl Viro ssize_t ret; 1701da177e4SLinus Torvalds 1711ef255e2SAl Viro ret = iov_iter_get_pages2(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES, 1727b2c99d1SAl Viro &sdio->from); 1731da177e4SLinus Torvalds 174c6293eacSBart Van Assche if (ret < 0 && sdio->blocks_available && dio_op == REQ_OP_WRITE) { 175557ed1faSNick Piggin struct page *page = ZERO_PAGE(0); 1761da177e4SLinus Torvalds /* 1771da177e4SLinus Torvalds * A memory fault, but the filesystem has some outstanding 1781da177e4SLinus Torvalds * mapped blocks. We need to use those blocks up to avoid 1791da177e4SLinus Torvalds * leaking stale data in the file. 1801da177e4SLinus Torvalds */ 1811da177e4SLinus Torvalds if (dio->page_errors == 0) 1821da177e4SLinus Torvalds dio->page_errors = ret; 18309cbfeafSKirill A. Shutemov get_page(page); 184b5810039SNick Piggin dio->pages[0] = page; 185eb28be2bSAndi Kleen sdio->head = 0; 186eb28be2bSAndi Kleen sdio->tail = 1; 1877b2c99d1SAl Viro sdio->from = 0; 1887b2c99d1SAl Viro sdio->to = PAGE_SIZE; 1897b2c99d1SAl Viro return 0; 1901da177e4SLinus Torvalds } 1911da177e4SLinus Torvalds 1921da177e4SLinus Torvalds if (ret >= 0) { 1937b2c99d1SAl Viro ret += sdio->from; 194eb28be2bSAndi Kleen sdio->head = 0; 1957b2c99d1SAl Viro sdio->tail = (ret + PAGE_SIZE - 1) / PAGE_SIZE; 1967b2c99d1SAl Viro sdio->to = ((ret - 1) & (PAGE_SIZE - 1)) + 1; 1977b2c99d1SAl Viro return 0; 1981da177e4SLinus Torvalds } 1991da177e4SLinus Torvalds return ret; 2001da177e4SLinus Torvalds } 2011da177e4SLinus Torvalds 2021da177e4SLinus Torvalds /* 2031da177e4SLinus Torvalds * Get another userspace page. Returns an ERR_PTR on error. Pages are 2041da177e4SLinus Torvalds * buffered inside the dio so that we can call get_user_pages() against a 2051da177e4SLinus Torvalds * decent number of pages, less frequently. To provide nicer use of the 2061da177e4SLinus Torvalds * L1 cache. 2071da177e4SLinus Torvalds */ 208ba253fbfSAndi Kleen static inline struct page *dio_get_page(struct dio *dio, 2096fcc5420SBoaz Harrosh struct dio_submit *sdio) 2101da177e4SLinus Torvalds { 211eb28be2bSAndi Kleen if (dio_pages_present(sdio) == 0) { 2121da177e4SLinus Torvalds int ret; 2131da177e4SLinus Torvalds 214eb28be2bSAndi Kleen ret = dio_refill_pages(dio, sdio); 2151da177e4SLinus Torvalds if (ret) 2161da177e4SLinus Torvalds return ERR_PTR(ret); 217eb28be2bSAndi Kleen BUG_ON(dio_pages_present(sdio) == 0); 2181da177e4SLinus Torvalds } 2196fcc5420SBoaz Harrosh return dio->pages[sdio->head]; 2201da177e4SLinus Torvalds } 2211da177e4SLinus Torvalds 2225a9d929dSDarrick J. Wong /* 2236d544bb4SZach Brown * dio_complete() - called when all DIO BIO I/O has been completed 2246d544bb4SZach Brown * 2257b7a8665SChristoph Hellwig * This drops i_dio_count, lets interested parties know that a DIO operation 2267b7a8665SChristoph Hellwig * has completed, and calculates the resulting return code for the operation. 2276d544bb4SZach Brown * 2286d544bb4SZach Brown * It lets the filesystem know if it registered an interest earlier via 2296d544bb4SZach Brown * get_block. Pass the private field of the map buffer_head so that 2306d544bb4SZach Brown * filesystems can use it to hold additional state between get_block calls and 2316d544bb4SZach Brown * dio_complete. 2321da177e4SLinus Torvalds */ 233ffe51f01SLukas Czerner static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags) 2341da177e4SLinus Torvalds { 235c6293eacSBart Van Assche const enum req_op dio_op = dio->opf & REQ_OP_MASK; 236716b9bc0SChristoph Hellwig loff_t offset = dio->iocb->ki_pos; 2376d544bb4SZach Brown ssize_t transferred = 0; 238332391a9SLukas Czerner int err; 2396d544bb4SZach Brown 2408459d86aSZach Brown /* 2418459d86aSZach Brown * AIO submission can race with bio completion to get here while 2428459d86aSZach Brown * expecting to have the last io completed by bio completion. 2438459d86aSZach Brown * In that case -EIOCBQUEUED is in fact not an error we want 2448459d86aSZach Brown * to preserve through this call. 2458459d86aSZach Brown */ 2468459d86aSZach Brown if (ret == -EIOCBQUEUED) 2478459d86aSZach Brown ret = 0; 2488459d86aSZach Brown 2496d544bb4SZach Brown if (dio->result) { 2506d544bb4SZach Brown transferred = dio->result; 2516d544bb4SZach Brown 2526d544bb4SZach Brown /* Check for short read case */ 253c6293eacSBart Van Assche if (dio_op == REQ_OP_READ && 2548a4c1e42SMike Christie ((offset + transferred) > dio->i_size)) 2556d544bb4SZach Brown transferred = dio->i_size - offset; 2564038acdbSAl Viro /* ignore EFAULT if some IO has been done */ 2574038acdbSAl Viro if (unlikely(ret == -EFAULT) && transferred) 2584038acdbSAl Viro ret = 0; 2596d544bb4SZach Brown } 2606d544bb4SZach Brown 2616d544bb4SZach Brown if (ret == 0) 2626d544bb4SZach Brown ret = dio->page_errors; 2636d544bb4SZach Brown if (ret == 0) 2646d544bb4SZach Brown ret = dio->io_error; 2656d544bb4SZach Brown if (ret == 0) 2666d544bb4SZach Brown ret = transferred; 2676d544bb4SZach Brown 2685e25c269SEryu Guan if (dio->end_io) { 2695e25c269SEryu Guan // XXX: ki_pos?? 2705e25c269SEryu Guan err = dio->end_io(dio->iocb, offset, ret, dio->private); 2715e25c269SEryu Guan if (err) 2725e25c269SEryu Guan ret = err; 2735e25c269SEryu Guan } 2745e25c269SEryu Guan 275332391a9SLukas Czerner /* 276332391a9SLukas Czerner * Try again to invalidate clean pages which might have been cached by 277332391a9SLukas Czerner * non-direct readahead, or faulted in by get_user_pages() if the source 278332391a9SLukas Czerner * of the write was an mmap'ed region of the file we're writing. Either 279332391a9SLukas Czerner * one is a pretty crazy thing to do, so we don't support it 100%. If 280332391a9SLukas Czerner * this invalidation fails, tough, the write still worked... 2815e25c269SEryu Guan * 2825e25c269SEryu Guan * And this page cache invalidation has to be after dio->end_io(), as 2835e25c269SEryu Guan * some filesystems convert unwritten extents to real allocations in 2845e25c269SEryu Guan * end_io() when necessary, otherwise a racing buffer read would cache 2855e25c269SEryu Guan * zeros from unwritten extents. 286332391a9SLukas Czerner */ 287ffe51f01SLukas Czerner if (flags & DIO_COMPLETE_INVALIDATE && 288c6293eacSBart Van Assche ret > 0 && dio_op == REQ_OP_WRITE && 289332391a9SLukas Czerner dio->inode->i_mapping->nrpages) { 290332391a9SLukas Czerner err = invalidate_inode_pages2_range(dio->inode->i_mapping, 291332391a9SLukas Czerner offset >> PAGE_SHIFT, 292332391a9SLukas Czerner (offset + ret - 1) >> PAGE_SHIFT); 2935a9d929dSDarrick J. Wong if (err) 2945a9d929dSDarrick J. Wong dio_warn_stale_pagecache(dio->iocb->ki_filp); 295332391a9SLukas Czerner } 296332391a9SLukas Czerner 297fe0f07d0SJens Axboe inode_dio_end(dio->inode); 298fe0f07d0SJens Axboe 299ffe51f01SLukas Czerner if (flags & DIO_COMPLETE_ASYNC) { 300e2592217SChristoph Hellwig /* 301e2592217SChristoph Hellwig * generic_write_sync expects ki_pos to have been updated 302e2592217SChristoph Hellwig * already, but the submission path only does this for 303e2592217SChristoph Hellwig * synchronous I/O. 304e2592217SChristoph Hellwig */ 305e2592217SChristoph Hellwig dio->iocb->ki_pos += transferred; 30602afc27fSChristoph Hellwig 307c6293eacSBart Van Assche if (ret > 0 && dio_op == REQ_OP_WRITE) 30841e817bcSMaximilian Heyne ret = generic_write_sync(dio->iocb, ret); 3096b19b766SJens Axboe dio->iocb->ki_complete(dio->iocb, ret); 31002afc27fSChristoph Hellwig } 31140e2e973SChristoph Hellwig 3127b7a8665SChristoph Hellwig kmem_cache_free(dio_cache, dio); 3136d544bb4SZach Brown return ret; 3141da177e4SLinus Torvalds } 3151da177e4SLinus Torvalds 3167b7a8665SChristoph Hellwig static void dio_aio_complete_work(struct work_struct *work) 3177b7a8665SChristoph Hellwig { 3187b7a8665SChristoph Hellwig struct dio *dio = container_of(work, struct dio, complete_work); 3197b7a8665SChristoph Hellwig 320ffe51f01SLukas Czerner dio_complete(dio, 0, DIO_COMPLETE_ASYNC | DIO_COMPLETE_INVALIDATE); 3217b7a8665SChristoph Hellwig } 3227b7a8665SChristoph Hellwig 3234e4cbee9SChristoph Hellwig static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio); 3247b7a8665SChristoph Hellwig 3251da177e4SLinus Torvalds /* 3261da177e4SLinus Torvalds * Asynchronous IO callback. 3271da177e4SLinus Torvalds */ 3284246a0b6SChristoph Hellwig static void dio_bio_end_aio(struct bio *bio) 3291da177e4SLinus Torvalds { 3301da177e4SLinus Torvalds struct dio *dio = bio->bi_private; 331c6293eacSBart Van Assche const enum req_op dio_op = dio->opf & REQ_OP_MASK; 3325eb6c7a2SZach Brown unsigned long remaining; 3335eb6c7a2SZach Brown unsigned long flags; 334332391a9SLukas Czerner bool defer_completion = false; 3351da177e4SLinus Torvalds 3361da177e4SLinus Torvalds /* cleanup the bio */ 3371da177e4SLinus Torvalds dio_bio_complete(dio, bio); 3380273201eSZach Brown 3395eb6c7a2SZach Brown spin_lock_irqsave(&dio->bio_lock, flags); 3405eb6c7a2SZach Brown remaining = --dio->refcount; 3415eb6c7a2SZach Brown if (remaining == 1 && dio->waiter) 34220258b2bSZach Brown wake_up_process(dio->waiter); 3435eb6c7a2SZach Brown spin_unlock_irqrestore(&dio->bio_lock, flags); 34420258b2bSZach Brown 3458459d86aSZach Brown if (remaining == 0) { 346332391a9SLukas Czerner /* 347332391a9SLukas Czerner * Defer completion when defer_completion is set or 348332391a9SLukas Czerner * when the inode has pages mapped and this is AIO write. 349332391a9SLukas Czerner * We need to invalidate those pages because there is a 350332391a9SLukas Czerner * chance they contain stale data in the case buffered IO 351332391a9SLukas Czerner * went in between AIO submission and completion into the 352332391a9SLukas Czerner * same region. 353332391a9SLukas Czerner */ 354332391a9SLukas Czerner if (dio->result) 355332391a9SLukas Czerner defer_completion = dio->defer_completion || 356c6293eacSBart Van Assche (dio_op == REQ_OP_WRITE && 357332391a9SLukas Czerner dio->inode->i_mapping->nrpages); 358332391a9SLukas Czerner if (defer_completion) { 3597b7a8665SChristoph Hellwig INIT_WORK(&dio->complete_work, dio_aio_complete_work); 3607b7a8665SChristoph Hellwig queue_work(dio->inode->i_sb->s_dio_done_wq, 3617b7a8665SChristoph Hellwig &dio->complete_work); 3627b7a8665SChristoph Hellwig } else { 363ffe51f01SLukas Czerner dio_complete(dio, 0, DIO_COMPLETE_ASYNC); 3647b7a8665SChristoph Hellwig } 3658459d86aSZach Brown } 3661da177e4SLinus Torvalds } 3671da177e4SLinus Torvalds 3681da177e4SLinus Torvalds /* 3691da177e4SLinus Torvalds * The BIO completion handler simply queues the BIO up for the process-context 3701da177e4SLinus Torvalds * handler. 3711da177e4SLinus Torvalds * 3721da177e4SLinus Torvalds * During I/O bi_private points at the dio. After I/O, bi_private is used to 3731da177e4SLinus Torvalds * implement a singly-linked list of completed BIOs, at dio->bio_list. 3741da177e4SLinus Torvalds */ 3754246a0b6SChristoph Hellwig static void dio_bio_end_io(struct bio *bio) 3761da177e4SLinus Torvalds { 3771da177e4SLinus Torvalds struct dio *dio = bio->bi_private; 3781da177e4SLinus Torvalds unsigned long flags; 3791da177e4SLinus Torvalds 3801da177e4SLinus Torvalds spin_lock_irqsave(&dio->bio_lock, flags); 3811da177e4SLinus Torvalds bio->bi_private = dio->bio_list; 3821da177e4SLinus Torvalds dio->bio_list = bio; 3835eb6c7a2SZach Brown if (--dio->refcount == 1 && dio->waiter) 3841da177e4SLinus Torvalds wake_up_process(dio->waiter); 3851da177e4SLinus Torvalds spin_unlock_irqrestore(&dio->bio_lock, flags); 3861da177e4SLinus Torvalds } 3871da177e4SLinus Torvalds 388ba253fbfSAndi Kleen static inline void 389eb28be2bSAndi Kleen dio_bio_alloc(struct dio *dio, struct dio_submit *sdio, 390eb28be2bSAndi Kleen struct block_device *bdev, 3911da177e4SLinus Torvalds sector_t first_sector, int nr_vecs) 3921da177e4SLinus Torvalds { 3931da177e4SLinus Torvalds struct bio *bio; 3941da177e4SLinus Torvalds 39520d9600cSDavid Dillow /* 3960eb0b63cSChristoph Hellwig * bio_alloc() is guaranteed to return a bio when allowed to sleep and 3970eb0b63cSChristoph Hellwig * we request a valid number of vectors. 39820d9600cSDavid Dillow */ 399c6293eacSBart Van Assche bio = bio_alloc(bdev, nr_vecs, dio->opf, GFP_KERNEL); 4004f024f37SKent Overstreet bio->bi_iter.bi_sector = first_sector; 4011da177e4SLinus Torvalds if (dio->is_async) 4021da177e4SLinus Torvalds bio->bi_end_io = dio_bio_end_aio; 4031da177e4SLinus Torvalds else 4041da177e4SLinus Torvalds bio->bi_end_io = dio_bio_end_io; 405eb28be2bSAndi Kleen sdio->bio = bio; 406eb28be2bSAndi Kleen sdio->logical_offset_in_bio = sdio->cur_page_fs_offset; 4071da177e4SLinus Torvalds } 4081da177e4SLinus Torvalds 4091da177e4SLinus Torvalds /* 4101da177e4SLinus Torvalds * In the AIO read case we speculatively dirty the pages before starting IO. 4111da177e4SLinus Torvalds * During IO completion, any of these pages which happen to have been written 4121da177e4SLinus Torvalds * back will be redirtied by bio_check_pages_dirty(). 4130273201eSZach Brown * 4140273201eSZach Brown * bios hold a dio reference between submit_bio and ->end_io. 4151da177e4SLinus Torvalds */ 416ba253fbfSAndi Kleen static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio) 4171da177e4SLinus Torvalds { 418c6293eacSBart Van Assche const enum req_op dio_op = dio->opf & REQ_OP_MASK; 419eb28be2bSAndi Kleen struct bio *bio = sdio->bio; 4205eb6c7a2SZach Brown unsigned long flags; 4211da177e4SLinus Torvalds 4221da177e4SLinus Torvalds bio->bi_private = dio; 4235eb6c7a2SZach Brown 4245eb6c7a2SZach Brown spin_lock_irqsave(&dio->bio_lock, flags); 4255eb6c7a2SZach Brown dio->refcount++; 4265eb6c7a2SZach Brown spin_unlock_irqrestore(&dio->bio_lock, flags); 4275eb6c7a2SZach Brown 428c6293eacSBart Van Assche if (dio->is_async && dio_op == REQ_OP_READ && dio->should_dirty) 4291da177e4SLinus Torvalds bio_set_pages_dirty(bio); 4305eb6c7a2SZach Brown 431309dca30SChristoph Hellwig dio->bio_disk = bio->bi_bdev->bd_disk; 432c1c53460SJens Axboe 43394c2ed58SChristoph Hellwig submit_bio(bio); 4341da177e4SLinus Torvalds 435eb28be2bSAndi Kleen sdio->bio = NULL; 436eb28be2bSAndi Kleen sdio->boundary = 0; 437eb28be2bSAndi Kleen sdio->logical_offset_in_bio = 0; 4381da177e4SLinus Torvalds } 4391da177e4SLinus Torvalds 4401da177e4SLinus Torvalds /* 4411da177e4SLinus Torvalds * Release any resources in case of a failure 4421da177e4SLinus Torvalds */ 443ba253fbfSAndi Kleen static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio) 4441da177e4SLinus Torvalds { 4457b2c99d1SAl Viro while (sdio->head < sdio->tail) 44609cbfeafSKirill A. Shutemov put_page(dio->pages[sdio->head++]); 4471da177e4SLinus Torvalds } 4481da177e4SLinus Torvalds 4491da177e4SLinus Torvalds /* 4500273201eSZach Brown * Wait for the next BIO to complete. Remove it and return it. NULL is 4510273201eSZach Brown * returned once all BIOs have been completed. This must only be called once 4520273201eSZach Brown * all bios have been issued so that dio->refcount can only decrease. This 4533d742d4bSRandy Dunlap * requires that the caller hold a reference on the dio. 4541da177e4SLinus Torvalds */ 4551da177e4SLinus Torvalds static struct bio *dio_await_one(struct dio *dio) 4561da177e4SLinus Torvalds { 4571da177e4SLinus Torvalds unsigned long flags; 4580273201eSZach Brown struct bio *bio = NULL; 4591da177e4SLinus Torvalds 4601da177e4SLinus Torvalds spin_lock_irqsave(&dio->bio_lock, flags); 4615eb6c7a2SZach Brown 4625eb6c7a2SZach Brown /* 4635eb6c7a2SZach Brown * Wait as long as the list is empty and there are bios in flight. bio 4645eb6c7a2SZach Brown * completion drops the count, maybe adds to the list, and wakes while 4655eb6c7a2SZach Brown * holding the bio_lock so we don't need set_current_state()'s barrier 4665eb6c7a2SZach Brown * and can call it after testing our condition. 4675eb6c7a2SZach Brown */ 4685eb6c7a2SZach Brown while (dio->refcount > 1 && dio->bio_list == NULL) { 4695eb6c7a2SZach Brown __set_current_state(TASK_UNINTERRUPTIBLE); 4701da177e4SLinus Torvalds dio->waiter = current; 4711da177e4SLinus Torvalds spin_unlock_irqrestore(&dio->bio_lock, flags); 472e6249cddSMing Lei blk_io_schedule(); 4735eb6c7a2SZach Brown /* wake up sets us TASK_RUNNING */ 4741da177e4SLinus Torvalds spin_lock_irqsave(&dio->bio_lock, flags); 4751da177e4SLinus Torvalds dio->waiter = NULL; 4761da177e4SLinus Torvalds } 4770273201eSZach Brown if (dio->bio_list) { 4781da177e4SLinus Torvalds bio = dio->bio_list; 4791da177e4SLinus Torvalds dio->bio_list = bio->bi_private; 4800273201eSZach Brown } 4811da177e4SLinus Torvalds spin_unlock_irqrestore(&dio->bio_lock, flags); 4821da177e4SLinus Torvalds return bio; 4831da177e4SLinus Torvalds } 4841da177e4SLinus Torvalds 4851da177e4SLinus Torvalds /* 4861da177e4SLinus Torvalds * Process one completed BIO. No locks are held. 4871da177e4SLinus Torvalds */ 4884e4cbee9SChristoph Hellwig static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio) 4891da177e4SLinus Torvalds { 4904e4cbee9SChristoph Hellwig blk_status_t err = bio->bi_status; 491c6293eacSBart Van Assche const enum req_op dio_op = dio->opf & REQ_OP_MASK; 492c6293eacSBart Van Assche bool should_dirty = dio_op == REQ_OP_READ && dio->should_dirty; 4931da177e4SLinus Torvalds 49403a07c92SGoldwyn Rodrigues if (err) { 49503a07c92SGoldwyn Rodrigues if (err == BLK_STS_AGAIN && (bio->bi_opf & REQ_NOWAIT)) 49603a07c92SGoldwyn Rodrigues dio->io_error = -EAGAIN; 49703a07c92SGoldwyn Rodrigues else 498174e27c6SChen, Kenneth W dio->io_error = -EIO; 49903a07c92SGoldwyn Rodrigues } 5001da177e4SLinus Torvalds 501d7c8aa85SChristoph Hellwig if (dio->is_async && should_dirty) { 5027ddc971fSMike Krinkin bio_check_pages_dirty(bio); /* transfers ownership */ 5031da177e4SLinus Torvalds } else { 504d7c8aa85SChristoph Hellwig bio_release_pages(bio, should_dirty); 5051da177e4SLinus Torvalds bio_put(bio); 5061da177e4SLinus Torvalds } 5079b81c842SSasha Levin return err; 5081da177e4SLinus Torvalds } 5091da177e4SLinus Torvalds 5101da177e4SLinus Torvalds /* 5110273201eSZach Brown * Wait on and process all in-flight BIOs. This must only be called once 5120273201eSZach Brown * all bios have been issued so that the refcount can only decrease. 5130273201eSZach Brown * This just waits for all bios to make it through dio_bio_complete. IO 514beb7dd86SRobert P. J. Day * errors are propagated through dio->io_error and should be propagated via 5150273201eSZach Brown * dio_complete(). 5161da177e4SLinus Torvalds */ 5176d544bb4SZach Brown static void dio_await_completion(struct dio *dio) 5181da177e4SLinus Torvalds { 5190273201eSZach Brown struct bio *bio; 5200273201eSZach Brown do { 5210273201eSZach Brown bio = dio_await_one(dio); 5220273201eSZach Brown if (bio) 5236d544bb4SZach Brown dio_bio_complete(dio, bio); 5240273201eSZach Brown } while (bio); 5251da177e4SLinus Torvalds } 5261da177e4SLinus Torvalds 5271da177e4SLinus Torvalds /* 5281da177e4SLinus Torvalds * A really large O_DIRECT read or write can generate a lot of BIOs. So 5291da177e4SLinus Torvalds * to keep the memory consumption sane we periodically reap any completed BIOs 5301da177e4SLinus Torvalds * during the BIO generation phase. 5311da177e4SLinus Torvalds * 5321da177e4SLinus Torvalds * This also helps to limit the peak amount of pinned userspace memory. 5331da177e4SLinus Torvalds */ 534ba253fbfSAndi Kleen static inline int dio_bio_reap(struct dio *dio, struct dio_submit *sdio) 5351da177e4SLinus Torvalds { 5361da177e4SLinus Torvalds int ret = 0; 5371da177e4SLinus Torvalds 538eb28be2bSAndi Kleen if (sdio->reap_counter++ >= 64) { 5391da177e4SLinus Torvalds while (dio->bio_list) { 5401da177e4SLinus Torvalds unsigned long flags; 5411da177e4SLinus Torvalds struct bio *bio; 5421da177e4SLinus Torvalds int ret2; 5431da177e4SLinus Torvalds 5441da177e4SLinus Torvalds spin_lock_irqsave(&dio->bio_lock, flags); 5451da177e4SLinus Torvalds bio = dio->bio_list; 5461da177e4SLinus Torvalds dio->bio_list = bio->bi_private; 5471da177e4SLinus Torvalds spin_unlock_irqrestore(&dio->bio_lock, flags); 5484e4cbee9SChristoph Hellwig ret2 = blk_status_to_errno(dio_bio_complete(dio, bio)); 5491da177e4SLinus Torvalds if (ret == 0) 5501da177e4SLinus Torvalds ret = ret2; 5511da177e4SLinus Torvalds } 552eb28be2bSAndi Kleen sdio->reap_counter = 0; 5531da177e4SLinus Torvalds } 5541da177e4SLinus Torvalds return ret; 5551da177e4SLinus Torvalds } 5561da177e4SLinus Torvalds 5577b7a8665SChristoph Hellwig static int dio_set_defer_completion(struct dio *dio) 5587b7a8665SChristoph Hellwig { 5597b7a8665SChristoph Hellwig struct super_block *sb = dio->inode->i_sb; 5607b7a8665SChristoph Hellwig 5617b7a8665SChristoph Hellwig if (dio->defer_completion) 5627b7a8665SChristoph Hellwig return 0; 5637b7a8665SChristoph Hellwig dio->defer_completion = true; 5647b7a8665SChristoph Hellwig if (!sb->s_dio_done_wq) 5657b7a8665SChristoph Hellwig return sb_init_dio_done_wq(sb); 5667b7a8665SChristoph Hellwig return 0; 5677b7a8665SChristoph Hellwig } 5687b7a8665SChristoph Hellwig 5697b7a8665SChristoph Hellwig /* 5701da177e4SLinus Torvalds * Call into the fs to map some more disk blocks. We record the current number 571eb28be2bSAndi Kleen * of available blocks at sdio->blocks_available. These are in units of the 57293407472SFabian Frederick * fs blocksize, i_blocksize(inode). 5731da177e4SLinus Torvalds * 5741da177e4SLinus Torvalds * The fs is allowed to map lots of blocks at once. If it wants to do that, 5751da177e4SLinus Torvalds * it uses the passed inode-relative block number as the file offset, as usual. 5761da177e4SLinus Torvalds * 5771d8fa7a2SBadari Pulavarty * get_block() is passed the number of i_blkbits-sized blocks which direct_io 5781da177e4SLinus Torvalds * has remaining to do. The fs should not map more than this number of blocks. 5791da177e4SLinus Torvalds * 5801da177e4SLinus Torvalds * If the fs has mapped a lot of blocks, it should populate bh->b_size to 5811da177e4SLinus Torvalds * indicate how much contiguous disk space has been made available at 5821da177e4SLinus Torvalds * bh->b_blocknr. 5831da177e4SLinus Torvalds * 5841da177e4SLinus Torvalds * If *any* of the mapped blocks are new, then the fs must set buffer_new(). 5851da177e4SLinus Torvalds * This isn't very efficient... 5861da177e4SLinus Torvalds * 5871da177e4SLinus Torvalds * In the case of filesystem holes: the fs may return an arbitrarily-large 5881da177e4SLinus Torvalds * hole by returning an appropriate value in b_size and by clearing 5891da177e4SLinus Torvalds * buffer_mapped(). However the direct-io code will only process holes one 5901d8fa7a2SBadari Pulavarty * block at a time - it will repeatedly call get_block() as it walks the hole. 5911da177e4SLinus Torvalds */ 59218772641SAndi Kleen static int get_more_blocks(struct dio *dio, struct dio_submit *sdio, 59318772641SAndi Kleen struct buffer_head *map_bh) 5941da177e4SLinus Torvalds { 595c6293eacSBart Van Assche const enum req_op dio_op = dio->opf & REQ_OP_MASK; 5961da177e4SLinus Torvalds int ret; 5971da177e4SLinus Torvalds sector_t fs_startblk; /* Into file, in filesystem-sized blocks */ 598ae55e1aaSTao Ma sector_t fs_endblk; /* Into file, in filesystem-sized blocks */ 5991da177e4SLinus Torvalds unsigned long fs_count; /* Number of filesystem-sized blocks */ 6001da177e4SLinus Torvalds int create; 601ab73857eSLinus Torvalds unsigned int i_blkbits = sdio->blkbits + sdio->blkfactor; 6028b9433ebSErnesto A. Fernández loff_t i_size; 6031da177e4SLinus Torvalds 6041da177e4SLinus Torvalds /* 6051da177e4SLinus Torvalds * If there was a memory error and we've overwritten all the 6061da177e4SLinus Torvalds * mapped blocks then we can now return that memory error 6071da177e4SLinus Torvalds */ 6081da177e4SLinus Torvalds ret = dio->page_errors; 6091da177e4SLinus Torvalds if (ret == 0) { 610eb28be2bSAndi Kleen BUG_ON(sdio->block_in_file >= sdio->final_block_in_request); 611eb28be2bSAndi Kleen fs_startblk = sdio->block_in_file >> sdio->blkfactor; 612ae55e1aaSTao Ma fs_endblk = (sdio->final_block_in_request - 1) >> 613ae55e1aaSTao Ma sdio->blkfactor; 614ae55e1aaSTao Ma fs_count = fs_endblk - fs_startblk + 1; 6151da177e4SLinus Torvalds 6163c674e74SNathan Scott map_bh->b_state = 0; 617ab73857eSLinus Torvalds map_bh->b_size = fs_count << i_blkbits; 6183c674e74SNathan Scott 6195fe878aeSChristoph Hellwig /* 6209ecd10b7SEryu Guan * For writes that could fill holes inside i_size on a 6219ecd10b7SEryu Guan * DIO_SKIP_HOLES filesystem we forbid block creations: only 6229ecd10b7SEryu Guan * overwrites are permitted. We will return early to the caller 6239ecd10b7SEryu Guan * once we see an unmapped buffer head returned, and the caller 6249ecd10b7SEryu Guan * will fall back to buffered I/O. 6255fe878aeSChristoph Hellwig * 6265fe878aeSChristoph Hellwig * Otherwise the decision is left to the get_blocks method, 6275fe878aeSChristoph Hellwig * which may decide to handle it or also return an unmapped 6285fe878aeSChristoph Hellwig * buffer head. 6295fe878aeSChristoph Hellwig */ 630c6293eacSBart Van Assche create = dio_op == REQ_OP_WRITE; 6315fe878aeSChristoph Hellwig if (dio->flags & DIO_SKIP_HOLES) { 6328b9433ebSErnesto A. Fernández i_size = i_size_read(dio->inode); 6338b9433ebSErnesto A. Fernández if (i_size && fs_startblk <= (i_size - 1) >> i_blkbits) 6341da177e4SLinus Torvalds create = 0; 6351da177e4SLinus Torvalds } 6363c674e74SNathan Scott 637eb28be2bSAndi Kleen ret = (*sdio->get_block)(dio->inode, fs_startblk, 6381da177e4SLinus Torvalds map_bh, create); 63918772641SAndi Kleen 64018772641SAndi Kleen /* Store for completion */ 64118772641SAndi Kleen dio->private = map_bh->b_private; 6427b7a8665SChristoph Hellwig 6437b7a8665SChristoph Hellwig if (ret == 0 && buffer_defer_completion(map_bh)) 6447b7a8665SChristoph Hellwig ret = dio_set_defer_completion(dio); 6451da177e4SLinus Torvalds } 6461da177e4SLinus Torvalds return ret; 6471da177e4SLinus Torvalds } 6481da177e4SLinus Torvalds 6491da177e4SLinus Torvalds /* 6501da177e4SLinus Torvalds * There is no bio. Make one now. 6511da177e4SLinus Torvalds */ 652ba253fbfSAndi Kleen static inline int dio_new_bio(struct dio *dio, struct dio_submit *sdio, 65318772641SAndi Kleen sector_t start_sector, struct buffer_head *map_bh) 6541da177e4SLinus Torvalds { 6551da177e4SLinus Torvalds sector_t sector; 6561da177e4SLinus Torvalds int ret, nr_pages; 6571da177e4SLinus Torvalds 658eb28be2bSAndi Kleen ret = dio_bio_reap(dio, sdio); 6591da177e4SLinus Torvalds if (ret) 6601da177e4SLinus Torvalds goto out; 661eb28be2bSAndi Kleen sector = start_sector << (sdio->blkbits - 9); 6625f7136dbSMatthew Wilcox (Oracle) nr_pages = bio_max_segs(sdio->pages_in_io); 6631da177e4SLinus Torvalds BUG_ON(nr_pages <= 0); 66418772641SAndi Kleen dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages); 665eb28be2bSAndi Kleen sdio->boundary = 0; 6661da177e4SLinus Torvalds out: 6671da177e4SLinus Torvalds return ret; 6681da177e4SLinus Torvalds } 6691da177e4SLinus Torvalds 6701da177e4SLinus Torvalds /* 6711da177e4SLinus Torvalds * Attempt to put the current chunk of 'cur_page' into the current BIO. If 6721da177e4SLinus Torvalds * that was successful then update final_block_in_bio and take a ref against 6731da177e4SLinus Torvalds * the just-added page. 6741da177e4SLinus Torvalds * 6751da177e4SLinus Torvalds * Return zero on success. Non-zero means the caller needs to start a new BIO. 6761da177e4SLinus Torvalds */ 677ba253fbfSAndi Kleen static inline int dio_bio_add_page(struct dio_submit *sdio) 6781da177e4SLinus Torvalds { 6791da177e4SLinus Torvalds int ret; 6801da177e4SLinus Torvalds 681eb28be2bSAndi Kleen ret = bio_add_page(sdio->bio, sdio->cur_page, 682eb28be2bSAndi Kleen sdio->cur_page_len, sdio->cur_page_offset); 683eb28be2bSAndi Kleen if (ret == sdio->cur_page_len) { 6841da177e4SLinus Torvalds /* 6851da177e4SLinus Torvalds * Decrement count only, if we are done with this page 6861da177e4SLinus Torvalds */ 687eb28be2bSAndi Kleen if ((sdio->cur_page_len + sdio->cur_page_offset) == PAGE_SIZE) 688eb28be2bSAndi Kleen sdio->pages_in_io--; 68909cbfeafSKirill A. Shutemov get_page(sdio->cur_page); 690eb28be2bSAndi Kleen sdio->final_block_in_bio = sdio->cur_page_block + 691eb28be2bSAndi Kleen (sdio->cur_page_len >> sdio->blkbits); 6921da177e4SLinus Torvalds ret = 0; 6931da177e4SLinus Torvalds } else { 6941da177e4SLinus Torvalds ret = 1; 6951da177e4SLinus Torvalds } 6961da177e4SLinus Torvalds return ret; 6971da177e4SLinus Torvalds } 6981da177e4SLinus Torvalds 6991da177e4SLinus Torvalds /* 7001da177e4SLinus Torvalds * Put cur_page under IO. The section of cur_page which is described by 7011da177e4SLinus Torvalds * cur_page_offset,cur_page_len is put into a BIO. The section of cur_page 7021da177e4SLinus Torvalds * starts on-disk at cur_page_block. 7031da177e4SLinus Torvalds * 7041da177e4SLinus Torvalds * We take a ref against the page here (on behalf of its presence in the bio). 7051da177e4SLinus Torvalds * 7061da177e4SLinus Torvalds * The caller of this function is responsible for removing cur_page from the 7071da177e4SLinus Torvalds * dio, and for dropping the refcount which came from that presence. 7081da177e4SLinus Torvalds */ 709ba253fbfSAndi Kleen static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio, 71018772641SAndi Kleen struct buffer_head *map_bh) 7111da177e4SLinus Torvalds { 7121da177e4SLinus Torvalds int ret = 0; 7131da177e4SLinus Torvalds 714eb28be2bSAndi Kleen if (sdio->bio) { 715eb28be2bSAndi Kleen loff_t cur_offset = sdio->cur_page_fs_offset; 716eb28be2bSAndi Kleen loff_t bio_next_offset = sdio->logical_offset_in_bio + 7174f024f37SKent Overstreet sdio->bio->bi_iter.bi_size; 718c2c6ca41SJosef Bacik 7191da177e4SLinus Torvalds /* 720c2c6ca41SJosef Bacik * See whether this new request is contiguous with the old. 721c2c6ca41SJosef Bacik * 722f0940ceeSNamhyung Kim * Btrfs cannot handle having logically non-contiguous requests 723f0940ceeSNamhyung Kim * submitted. For example if you have 724c2c6ca41SJosef Bacik * 725c2c6ca41SJosef Bacik * Logical: [0-4095][HOLE][8192-12287] 726f0940ceeSNamhyung Kim * Physical: [0-4095] [4096-8191] 727c2c6ca41SJosef Bacik * 728c2c6ca41SJosef Bacik * We cannot submit those pages together as one BIO. So if our 729c2c6ca41SJosef Bacik * current logical offset in the file does not equal what would 730c2c6ca41SJosef Bacik * be the next logical offset in the bio, submit the bio we 731c2c6ca41SJosef Bacik * have. 7321da177e4SLinus Torvalds */ 733eb28be2bSAndi Kleen if (sdio->final_block_in_bio != sdio->cur_page_block || 734c2c6ca41SJosef Bacik cur_offset != bio_next_offset) 735eb28be2bSAndi Kleen dio_bio_submit(dio, sdio); 7361da177e4SLinus Torvalds } 7371da177e4SLinus Torvalds 738eb28be2bSAndi Kleen if (sdio->bio == NULL) { 73918772641SAndi Kleen ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh); 7401da177e4SLinus Torvalds if (ret) 7411da177e4SLinus Torvalds goto out; 7421da177e4SLinus Torvalds } 7431da177e4SLinus Torvalds 744eb28be2bSAndi Kleen if (dio_bio_add_page(sdio) != 0) { 745eb28be2bSAndi Kleen dio_bio_submit(dio, sdio); 74618772641SAndi Kleen ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh); 7471da177e4SLinus Torvalds if (ret == 0) { 748eb28be2bSAndi Kleen ret = dio_bio_add_page(sdio); 7491da177e4SLinus Torvalds BUG_ON(ret != 0); 7501da177e4SLinus Torvalds } 7511da177e4SLinus Torvalds } 7521da177e4SLinus Torvalds out: 7531da177e4SLinus Torvalds return ret; 7541da177e4SLinus Torvalds } 7551da177e4SLinus Torvalds 7561da177e4SLinus Torvalds /* 7571da177e4SLinus Torvalds * An autonomous function to put a chunk of a page under deferred IO. 7581da177e4SLinus Torvalds * 7591da177e4SLinus Torvalds * The caller doesn't actually know (or care) whether this piece of page is in 7601da177e4SLinus Torvalds * a BIO, or is under IO or whatever. We just take care of all possible 7611da177e4SLinus Torvalds * situations here. The separation between the logic of do_direct_IO() and 7621da177e4SLinus Torvalds * that of submit_page_section() is important for clarity. Please don't break. 7631da177e4SLinus Torvalds * 7641da177e4SLinus Torvalds * The chunk of page starts on-disk at blocknr. 7651da177e4SLinus Torvalds * 7661da177e4SLinus Torvalds * We perform deferred IO, by recording the last-submitted page inside our 7671da177e4SLinus Torvalds * private part of the dio structure. If possible, we just expand the IO 7681da177e4SLinus Torvalds * across that page here. 7691da177e4SLinus Torvalds * 7701da177e4SLinus Torvalds * If that doesn't work out then we put the old page into the bio and add this 7711da177e4SLinus Torvalds * page to the dio instead. 7721da177e4SLinus Torvalds */ 773ba253fbfSAndi Kleen static inline int 774eb28be2bSAndi Kleen submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page, 77518772641SAndi Kleen unsigned offset, unsigned len, sector_t blocknr, 77618772641SAndi Kleen struct buffer_head *map_bh) 7771da177e4SLinus Torvalds { 778c6293eacSBart Van Assche const enum req_op dio_op = dio->opf & REQ_OP_MASK; 7791da177e4SLinus Torvalds int ret = 0; 780df41872bSJack Qiu int boundary = sdio->boundary; /* dio_send_cur_page may clear it */ 7811da177e4SLinus Torvalds 782c6293eacSBart Van Assche if (dio_op == REQ_OP_WRITE) { 78398c4d57dSAndrew Morton /* 78498c4d57dSAndrew Morton * Read accounting is performed in submit_bio() 78598c4d57dSAndrew Morton */ 78698c4d57dSAndrew Morton task_io_account_write(len); 78798c4d57dSAndrew Morton } 78898c4d57dSAndrew Morton 7891da177e4SLinus Torvalds /* 7901da177e4SLinus Torvalds * Can we just grow the current page's presence in the dio? 7911da177e4SLinus Torvalds */ 792eb28be2bSAndi Kleen if (sdio->cur_page == page && 793eb28be2bSAndi Kleen sdio->cur_page_offset + sdio->cur_page_len == offset && 794eb28be2bSAndi Kleen sdio->cur_page_block + 795eb28be2bSAndi Kleen (sdio->cur_page_len >> sdio->blkbits) == blocknr) { 796eb28be2bSAndi Kleen sdio->cur_page_len += len; 7971da177e4SLinus Torvalds goto out; 7981da177e4SLinus Torvalds } 7991da177e4SLinus Torvalds 8001da177e4SLinus Torvalds /* 8011da177e4SLinus Torvalds * If there's a deferred page already there then send it. 8021da177e4SLinus Torvalds */ 803eb28be2bSAndi Kleen if (sdio->cur_page) { 80418772641SAndi Kleen ret = dio_send_cur_page(dio, sdio, map_bh); 80509cbfeafSKirill A. Shutemov put_page(sdio->cur_page); 806eb28be2bSAndi Kleen sdio->cur_page = NULL; 8071da177e4SLinus Torvalds if (ret) 808b1058b98SJan Kara return ret; 8091da177e4SLinus Torvalds } 8101da177e4SLinus Torvalds 81109cbfeafSKirill A. Shutemov get_page(page); /* It is in dio */ 812eb28be2bSAndi Kleen sdio->cur_page = page; 813eb28be2bSAndi Kleen sdio->cur_page_offset = offset; 814eb28be2bSAndi Kleen sdio->cur_page_len = len; 815eb28be2bSAndi Kleen sdio->cur_page_block = blocknr; 816eb28be2bSAndi Kleen sdio->cur_page_fs_offset = sdio->block_in_file << sdio->blkbits; 8171da177e4SLinus Torvalds out: 818b1058b98SJan Kara /* 819df41872bSJack Qiu * If boundary then we want to schedule the IO now to 820b1058b98SJan Kara * avoid metadata seeks. 821b1058b98SJan Kara */ 822df41872bSJack Qiu if (boundary) { 823b1058b98SJan Kara ret = dio_send_cur_page(dio, sdio, map_bh); 824899f0429SAndreas Gruenbacher if (sdio->bio) 825b1058b98SJan Kara dio_bio_submit(dio, sdio); 82609cbfeafSKirill A. Shutemov put_page(sdio->cur_page); 827b1058b98SJan Kara sdio->cur_page = NULL; 828b1058b98SJan Kara } 8291da177e4SLinus Torvalds return ret; 8301da177e4SLinus Torvalds } 8311da177e4SLinus Torvalds 8321da177e4SLinus Torvalds /* 8331da177e4SLinus Torvalds * If we are not writing the entire block and get_block() allocated 8341da177e4SLinus Torvalds * the block for us, we need to fill-in the unused portion of the 8351da177e4SLinus Torvalds * block with zeros. This happens only if user-buffer, fileoffset or 8361da177e4SLinus Torvalds * io length is not filesystem block-size multiple. 8371da177e4SLinus Torvalds * 8381da177e4SLinus Torvalds * `end' is zero if we're doing the start of the IO, 1 at the end of the 8391da177e4SLinus Torvalds * IO. 8401da177e4SLinus Torvalds */ 841ba253fbfSAndi Kleen static inline void dio_zero_block(struct dio *dio, struct dio_submit *sdio, 842ba253fbfSAndi Kleen int end, struct buffer_head *map_bh) 8431da177e4SLinus Torvalds { 8441da177e4SLinus Torvalds unsigned dio_blocks_per_fs_block; 8451da177e4SLinus Torvalds unsigned this_chunk_blocks; /* In dio_blocks */ 8461da177e4SLinus Torvalds unsigned this_chunk_bytes; 8471da177e4SLinus Torvalds struct page *page; 8481da177e4SLinus Torvalds 849eb28be2bSAndi Kleen sdio->start_zero_done = 1; 85018772641SAndi Kleen if (!sdio->blkfactor || !buffer_new(map_bh)) 8511da177e4SLinus Torvalds return; 8521da177e4SLinus Torvalds 853eb28be2bSAndi Kleen dio_blocks_per_fs_block = 1 << sdio->blkfactor; 854eb28be2bSAndi Kleen this_chunk_blocks = sdio->block_in_file & (dio_blocks_per_fs_block - 1); 8551da177e4SLinus Torvalds 8561da177e4SLinus Torvalds if (!this_chunk_blocks) 8571da177e4SLinus Torvalds return; 8581da177e4SLinus Torvalds 8591da177e4SLinus Torvalds /* 8601da177e4SLinus Torvalds * We need to zero out part of an fs block. It is either at the 8611da177e4SLinus Torvalds * beginning or the end of the fs block. 8621da177e4SLinus Torvalds */ 8631da177e4SLinus Torvalds if (end) 8641da177e4SLinus Torvalds this_chunk_blocks = dio_blocks_per_fs_block - this_chunk_blocks; 8651da177e4SLinus Torvalds 866eb28be2bSAndi Kleen this_chunk_bytes = this_chunk_blocks << sdio->blkbits; 8671da177e4SLinus Torvalds 868557ed1faSNick Piggin page = ZERO_PAGE(0); 869eb28be2bSAndi Kleen if (submit_page_section(dio, sdio, page, 0, this_chunk_bytes, 87018772641SAndi Kleen sdio->next_block_for_io, map_bh)) 8711da177e4SLinus Torvalds return; 8721da177e4SLinus Torvalds 873eb28be2bSAndi Kleen sdio->next_block_for_io += this_chunk_blocks; 8741da177e4SLinus Torvalds } 8751da177e4SLinus Torvalds 8761da177e4SLinus Torvalds /* 8771da177e4SLinus Torvalds * Walk the user pages, and the file, mapping blocks to disk and generating 8781da177e4SLinus Torvalds * a sequence of (page,offset,len,block) mappings. These mappings are injected 8791da177e4SLinus Torvalds * into submit_page_section(), which takes care of the next stage of submission 8801da177e4SLinus Torvalds * 8811da177e4SLinus Torvalds * Direct IO against a blockdev is different from a file. Because we can 8821da177e4SLinus Torvalds * happily perform page-sized but 512-byte aligned IOs. It is important that 8831da177e4SLinus Torvalds * blockdev IO be able to have fine alignment and large sizes. 8841da177e4SLinus Torvalds * 8851d8fa7a2SBadari Pulavarty * So what we do is to permit the ->get_block function to populate bh.b_size 8861da177e4SLinus Torvalds * with the size of IO which is permitted at this offset and this i_blkbits. 8871da177e4SLinus Torvalds * 8881da177e4SLinus Torvalds * For best results, the blockdev should be set up with 512-byte i_blkbits and 8891d8fa7a2SBadari Pulavarty * it should set b_size to PAGE_SIZE or more inside get_block(). This gives 8901da177e4SLinus Torvalds * fine alignment but still allows this function to work in PAGE_SIZE units. 8911da177e4SLinus Torvalds */ 89218772641SAndi Kleen static int do_direct_IO(struct dio *dio, struct dio_submit *sdio, 89318772641SAndi Kleen struct buffer_head *map_bh) 8941da177e4SLinus Torvalds { 895c6293eacSBart Van Assche const enum req_op dio_op = dio->opf & REQ_OP_MASK; 896eb28be2bSAndi Kleen const unsigned blkbits = sdio->blkbits; 897dd545b52SChandan Rajendra const unsigned i_blkbits = blkbits + sdio->blkfactor; 8981da177e4SLinus Torvalds int ret = 0; 8991da177e4SLinus Torvalds 900eb28be2bSAndi Kleen while (sdio->block_in_file < sdio->final_block_in_request) { 9017b2c99d1SAl Viro struct page *page; 9027b2c99d1SAl Viro size_t from, to; 9036fcc5420SBoaz Harrosh 9046fcc5420SBoaz Harrosh page = dio_get_page(dio, sdio); 9051da177e4SLinus Torvalds if (IS_ERR(page)) { 9061da177e4SLinus Torvalds ret = PTR_ERR(page); 9071da177e4SLinus Torvalds goto out; 9081da177e4SLinus Torvalds } 9096fcc5420SBoaz Harrosh from = sdio->head ? 0 : sdio->from; 9106fcc5420SBoaz Harrosh to = (sdio->head == sdio->tail - 1) ? sdio->to : PAGE_SIZE; 9116fcc5420SBoaz Harrosh sdio->head++; 9121da177e4SLinus Torvalds 9137b2c99d1SAl Viro while (from < to) { 9141da177e4SLinus Torvalds unsigned this_chunk_bytes; /* # of bytes mapped */ 9151da177e4SLinus Torvalds unsigned this_chunk_blocks; /* # of blocks */ 9161da177e4SLinus Torvalds unsigned u; 9171da177e4SLinus Torvalds 918eb28be2bSAndi Kleen if (sdio->blocks_available == 0) { 9191da177e4SLinus Torvalds /* 9201da177e4SLinus Torvalds * Need to go and map some more disk 9211da177e4SLinus Torvalds */ 9221da177e4SLinus Torvalds unsigned long blkmask; 9231da177e4SLinus Torvalds unsigned long dio_remainder; 9241da177e4SLinus Torvalds 92518772641SAndi Kleen ret = get_more_blocks(dio, sdio, map_bh); 9261da177e4SLinus Torvalds if (ret) { 92709cbfeafSKirill A. Shutemov put_page(page); 9281da177e4SLinus Torvalds goto out; 9291da177e4SLinus Torvalds } 9301da177e4SLinus Torvalds if (!buffer_mapped(map_bh)) 9311da177e4SLinus Torvalds goto do_holes; 9321da177e4SLinus Torvalds 933eb28be2bSAndi Kleen sdio->blocks_available = 934f734c89cSJan Kara map_bh->b_size >> blkbits; 935eb28be2bSAndi Kleen sdio->next_block_for_io = 936eb28be2bSAndi Kleen map_bh->b_blocknr << sdio->blkfactor; 937f734c89cSJan Kara if (buffer_new(map_bh)) { 938f734c89cSJan Kara clean_bdev_aliases( 939f734c89cSJan Kara map_bh->b_bdev, 940f734c89cSJan Kara map_bh->b_blocknr, 941dd545b52SChandan Rajendra map_bh->b_size >> i_blkbits); 942f734c89cSJan Kara } 9431da177e4SLinus Torvalds 944eb28be2bSAndi Kleen if (!sdio->blkfactor) 9451da177e4SLinus Torvalds goto do_holes; 9461da177e4SLinus Torvalds 947eb28be2bSAndi Kleen blkmask = (1 << sdio->blkfactor) - 1; 948eb28be2bSAndi Kleen dio_remainder = (sdio->block_in_file & blkmask); 9491da177e4SLinus Torvalds 9501da177e4SLinus Torvalds /* 9511da177e4SLinus Torvalds * If we are at the start of IO and that IO 9521da177e4SLinus Torvalds * starts partway into a fs-block, 9531da177e4SLinus Torvalds * dio_remainder will be non-zero. If the IO 9541da177e4SLinus Torvalds * is a read then we can simply advance the IO 9551da177e4SLinus Torvalds * cursor to the first block which is to be 9561da177e4SLinus Torvalds * read. But if the IO is a write and the 9571da177e4SLinus Torvalds * block was newly allocated we cannot do that; 9581da177e4SLinus Torvalds * the start of the fs block must be zeroed out 9591da177e4SLinus Torvalds * on-disk 9601da177e4SLinus Torvalds */ 9611da177e4SLinus Torvalds if (!buffer_new(map_bh)) 962eb28be2bSAndi Kleen sdio->next_block_for_io += dio_remainder; 963eb28be2bSAndi Kleen sdio->blocks_available -= dio_remainder; 9641da177e4SLinus Torvalds } 9651da177e4SLinus Torvalds do_holes: 9661da177e4SLinus Torvalds /* Handle holes */ 9671da177e4SLinus Torvalds if (!buffer_mapped(map_bh)) { 96835dc8161SJeff Moyer loff_t i_size_aligned; 9691da177e4SLinus Torvalds 9701da177e4SLinus Torvalds /* AKPM: eargh, -ENOTBLK is a hack */ 971c6293eacSBart Van Assche if (dio_op == REQ_OP_WRITE) { 97209cbfeafSKirill A. Shutemov put_page(page); 9731da177e4SLinus Torvalds return -ENOTBLK; 9741da177e4SLinus Torvalds } 9751da177e4SLinus Torvalds 97635dc8161SJeff Moyer /* 97735dc8161SJeff Moyer * Be sure to account for a partial block as the 97835dc8161SJeff Moyer * last block in the file 97935dc8161SJeff Moyer */ 98035dc8161SJeff Moyer i_size_aligned = ALIGN(i_size_read(dio->inode), 98135dc8161SJeff Moyer 1 << blkbits); 982eb28be2bSAndi Kleen if (sdio->block_in_file >= 98335dc8161SJeff Moyer i_size_aligned >> blkbits) { 9841da177e4SLinus Torvalds /* We hit eof */ 98509cbfeafSKirill A. Shutemov put_page(page); 9861da177e4SLinus Torvalds goto out; 9871da177e4SLinus Torvalds } 9887b2c99d1SAl Viro zero_user(page, from, 1 << blkbits); 989eb28be2bSAndi Kleen sdio->block_in_file++; 9907b2c99d1SAl Viro from += 1 << blkbits; 9913320c60bSAl Viro dio->result += 1 << blkbits; 9921da177e4SLinus Torvalds goto next_block; 9931da177e4SLinus Torvalds } 9941da177e4SLinus Torvalds 9951da177e4SLinus Torvalds /* 9961da177e4SLinus Torvalds * If we're performing IO which has an alignment which 9971da177e4SLinus Torvalds * is finer than the underlying fs, go check to see if 9981da177e4SLinus Torvalds * we must zero out the start of this block. 9991da177e4SLinus Torvalds */ 1000eb28be2bSAndi Kleen if (unlikely(sdio->blkfactor && !sdio->start_zero_done)) 100118772641SAndi Kleen dio_zero_block(dio, sdio, 0, map_bh); 10021da177e4SLinus Torvalds 10031da177e4SLinus Torvalds /* 10041da177e4SLinus Torvalds * Work out, in this_chunk_blocks, how much disk we 10051da177e4SLinus Torvalds * can add to this page 10061da177e4SLinus Torvalds */ 1007eb28be2bSAndi Kleen this_chunk_blocks = sdio->blocks_available; 10087b2c99d1SAl Viro u = (to - from) >> blkbits; 10091da177e4SLinus Torvalds if (this_chunk_blocks > u) 10101da177e4SLinus Torvalds this_chunk_blocks = u; 1011eb28be2bSAndi Kleen u = sdio->final_block_in_request - sdio->block_in_file; 10121da177e4SLinus Torvalds if (this_chunk_blocks > u) 10131da177e4SLinus Torvalds this_chunk_blocks = u; 10141da177e4SLinus Torvalds this_chunk_bytes = this_chunk_blocks << blkbits; 10151da177e4SLinus Torvalds BUG_ON(this_chunk_bytes == 0); 10161da177e4SLinus Torvalds 1017092c8d46SJan Kara if (this_chunk_blocks == sdio->blocks_available) 1018eb28be2bSAndi Kleen sdio->boundary = buffer_boundary(map_bh); 1019eb28be2bSAndi Kleen ret = submit_page_section(dio, sdio, page, 10207b2c99d1SAl Viro from, 1021eb28be2bSAndi Kleen this_chunk_bytes, 102218772641SAndi Kleen sdio->next_block_for_io, 102318772641SAndi Kleen map_bh); 10241da177e4SLinus Torvalds if (ret) { 102509cbfeafSKirill A. Shutemov put_page(page); 10261da177e4SLinus Torvalds goto out; 10271da177e4SLinus Torvalds } 1028eb28be2bSAndi Kleen sdio->next_block_for_io += this_chunk_blocks; 10291da177e4SLinus Torvalds 1030eb28be2bSAndi Kleen sdio->block_in_file += this_chunk_blocks; 10317b2c99d1SAl Viro from += this_chunk_bytes; 10327b2c99d1SAl Viro dio->result += this_chunk_bytes; 1033eb28be2bSAndi Kleen sdio->blocks_available -= this_chunk_blocks; 10341da177e4SLinus Torvalds next_block: 1035eb28be2bSAndi Kleen BUG_ON(sdio->block_in_file > sdio->final_block_in_request); 1036eb28be2bSAndi Kleen if (sdio->block_in_file == sdio->final_block_in_request) 10371da177e4SLinus Torvalds break; 10381da177e4SLinus Torvalds } 10391da177e4SLinus Torvalds 10401da177e4SLinus Torvalds /* Drop the ref which was taken in get_user_pages() */ 104109cbfeafSKirill A. Shutemov put_page(page); 10421da177e4SLinus Torvalds } 10431da177e4SLinus Torvalds out: 10441da177e4SLinus Torvalds return ret; 10451da177e4SLinus Torvalds } 10461da177e4SLinus Torvalds 1047847cc637SAndi Kleen static inline int drop_refcount(struct dio *dio) 10481da177e4SLinus Torvalds { 1049847cc637SAndi Kleen int ret2; 10505eb6c7a2SZach Brown unsigned long flags; 105120258b2bSZach Brown 10521da177e4SLinus Torvalds /* 10538459d86aSZach Brown * Sync will always be dropping the final ref and completing the 10545eb6c7a2SZach Brown * operation. AIO can if it was a broken operation described above or 10555eb6c7a2SZach Brown * in fact if all the bios race to complete before we get here. In 10565eb6c7a2SZach Brown * that case dio_complete() translates the EIOCBQUEUED into the proper 105704b2fa9fSChristoph Hellwig * return code that the caller will hand to ->complete(). 10585eb6c7a2SZach Brown * 10595eb6c7a2SZach Brown * This is managed by the bio_lock instead of being an atomic_t so that 10605eb6c7a2SZach Brown * completion paths can drop their ref and use the remaining count to 10615eb6c7a2SZach Brown * decide to wake the submission path atomically. 10621da177e4SLinus Torvalds */ 10635eb6c7a2SZach Brown spin_lock_irqsave(&dio->bio_lock, flags); 10645eb6c7a2SZach Brown ret2 = --dio->refcount; 10655eb6c7a2SZach Brown spin_unlock_irqrestore(&dio->bio_lock, flags); 1066847cc637SAndi Kleen return ret2; 10671da177e4SLinus Torvalds } 10681da177e4SLinus Torvalds 1069eafdc7d1SChristoph Hellwig /* 1070eafdc7d1SChristoph Hellwig * This is a library function for use by filesystem drivers. 1071eafdc7d1SChristoph Hellwig * 1072eafdc7d1SChristoph Hellwig * The locking rules are governed by the flags parameter: 1073eafdc7d1SChristoph Hellwig * - if the flags value contains DIO_LOCKING we use a fancy locking 1074eafdc7d1SChristoph Hellwig * scheme for dumb filesystems. 1075eafdc7d1SChristoph Hellwig * For writes this function is called under i_mutex and returns with 1076eafdc7d1SChristoph Hellwig * i_mutex held, for reads, i_mutex is not held on entry, but it is 1077eafdc7d1SChristoph Hellwig * taken and dropped again before returning. 1078eafdc7d1SChristoph Hellwig * - if the flags value does NOT contain DIO_LOCKING we don't use any 1079eafdc7d1SChristoph Hellwig * internal locking but rather rely on the filesystem to synchronize 1080eafdc7d1SChristoph Hellwig * direct I/O reads/writes versus each other and truncate. 1081df2d6f26SChristoph Hellwig * 1082df2d6f26SChristoph Hellwig * To help with locking against truncate we incremented the i_dio_count 1083df2d6f26SChristoph Hellwig * counter before starting direct I/O, and decrement it once we are done. 1084df2d6f26SChristoph Hellwig * Truncate can wait for it to reach zero to provide exclusion. It is 1085df2d6f26SChristoph Hellwig * expected that filesystem provide exclusion between new direct I/O 1086df2d6f26SChristoph Hellwig * and truncates. For DIO_LOCKING filesystems this is done by i_mutex, 1087df2d6f26SChristoph Hellwig * but other filesystems need to take care of this on their own. 1088ba253fbfSAndi Kleen * 1089ba253fbfSAndi Kleen * NOTE: if you pass "sdio" to anything by pointer make sure that function 1090ba253fbfSAndi Kleen * is always inlined. Otherwise gcc is unable to split the structure into 1091ba253fbfSAndi Kleen * individual fields and will generate much worse code. This is important 1092ba253fbfSAndi Kleen * for the whole file. 1093eafdc7d1SChristoph Hellwig */ 1094c22198e7SChristoph Hellwig ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, 109517f8c842SOmar Sandoval struct block_device *bdev, struct iov_iter *iter, 1096c8b8e32dSChristoph Hellwig get_block_t get_block, dio_iodone_t end_io, 1097*0aaf08deSAl Viro int flags) 10981da177e4SLinus Torvalds { 10996aa7de05SMark Rutland unsigned i_blkbits = READ_ONCE(inode->i_blkbits); 1100ab73857eSLinus Torvalds unsigned blkbits = i_blkbits; 11011da177e4SLinus Torvalds unsigned blocksize_mask = (1 << blkbits) - 1; 11021da177e4SLinus Torvalds ssize_t retval = -EINVAL; 11031c0ff0f1SNikolay Borisov const size_t count = iov_iter_count(iter); 1104c8b8e32dSChristoph Hellwig loff_t offset = iocb->ki_pos; 11051c0ff0f1SNikolay Borisov const loff_t end = offset + count; 11061da177e4SLinus Torvalds struct dio *dio; 1107eb28be2bSAndi Kleen struct dio_submit sdio = { 0, }; 1108847cc637SAndi Kleen struct buffer_head map_bh = { 0, }; 1109647d1e4cSFengguang Wu struct blk_plug plug; 1110886a3911SAl Viro unsigned long align = offset | iov_iter_alignment(iter); 11111da177e4SLinus Torvalds 111265dd2aa9SAndi Kleen /* 111365dd2aa9SAndi Kleen * Avoid references to bdev if not absolutely needed to give 111465dd2aa9SAndi Kleen * the early prefetch in the caller enough time. 111565dd2aa9SAndi Kleen */ 11161da177e4SLinus Torvalds 1117f9b5570dSChristoph Hellwig /* watch out for a 0 len io from a tricksy fs */ 11181c0ff0f1SNikolay Borisov if (iov_iter_rw(iter) == READ && !count) 1119f9b5570dSChristoph Hellwig return 0; 1120f9b5570dSChristoph Hellwig 11216e8267f5SAndi Kleen dio = kmem_cache_alloc(dio_cache, GFP_KERNEL); 11221da177e4SLinus Torvalds if (!dio) 112346d71602SGabriel Krisman Bertazi return -ENOMEM; 112423aee091SJeff Moyer /* 112523aee091SJeff Moyer * Believe it or not, zeroing out the page array caused a .5% 112623aee091SJeff Moyer * performance regression in a database benchmark. So, we take 112723aee091SJeff Moyer * care to only zero out what's needed. 112823aee091SJeff Moyer */ 112923aee091SJeff Moyer memset(dio, 0, offsetof(struct dio, pages)); 11301da177e4SLinus Torvalds 11315fe878aeSChristoph Hellwig dio->flags = flags; 11320a9164cbSGabriel Krisman Bertazi if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ) { 11335fe878aeSChristoph Hellwig /* will be released by direct_io_worker */ 11345955102cSAl Viro inode_lock(inode); 1135df2d6f26SChristoph Hellwig } 11361da177e4SLinus Torvalds 113774cedf9bSJan Kara /* Once we sampled i_size check for reads beyond EOF */ 113874cedf9bSJan Kara dio->i_size = i_size_read(inode); 113974cedf9bSJan Kara if (iov_iter_rw(iter) == READ && offset >= dio->i_size) { 11402d4594acSAl Viro retval = 0; 114146d71602SGabriel Krisman Bertazi goto fail_dio; 114274cedf9bSJan Kara } 114374cedf9bSJan Kara 114441b21af3SGabriel Krisman Bertazi if (align & blocksize_mask) { 114541b21af3SGabriel Krisman Bertazi if (bdev) 114641b21af3SGabriel Krisman Bertazi blkbits = blksize_bits(bdev_logical_block_size(bdev)); 114741b21af3SGabriel Krisman Bertazi blocksize_mask = (1 << blkbits) - 1; 114841b21af3SGabriel Krisman Bertazi if (align & blocksize_mask) 114941b21af3SGabriel Krisman Bertazi goto fail_dio; 115041b21af3SGabriel Krisman Bertazi } 115141b21af3SGabriel Krisman Bertazi 11520a9164cbSGabriel Krisman Bertazi if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ) { 11530a9164cbSGabriel Krisman Bertazi struct address_space *mapping = iocb->ki_filp->f_mapping; 11540a9164cbSGabriel Krisman Bertazi 11550a9164cbSGabriel Krisman Bertazi retval = filemap_write_and_wait_range(mapping, offset, end - 1); 11560a9164cbSGabriel Krisman Bertazi if (retval) 11570a9164cbSGabriel Krisman Bertazi goto fail_dio; 11581da177e4SLinus Torvalds } 11591da177e4SLinus Torvalds 11605fe878aeSChristoph Hellwig /* 116160392573SChristoph Hellwig * For file extending writes updating i_size before data writeouts 116260392573SChristoph Hellwig * complete can expose uninitialized blocks in dumb filesystems. 116360392573SChristoph Hellwig * In that case we need to wait for I/O completion even if asked 116460392573SChristoph Hellwig * for an asynchronous write. 11651da177e4SLinus Torvalds */ 116660392573SChristoph Hellwig if (is_sync_kiocb(iocb)) 116760392573SChristoph Hellwig dio->is_async = false; 1168c8f4c36fSNikolay Borisov else if (iov_iter_rw(iter) == WRITE && end > i_size_read(inode)) 116960392573SChristoph Hellwig dio->is_async = false; 117060392573SChristoph Hellwig else 117160392573SChristoph Hellwig dio->is_async = true; 117260392573SChristoph Hellwig 1173847cc637SAndi Kleen dio->inode = inode; 11748a4c1e42SMike Christie if (iov_iter_rw(iter) == WRITE) { 1175c6293eacSBart Van Assche dio->opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; 117603a07c92SGoldwyn Rodrigues if (iocb->ki_flags & IOCB_NOWAIT) 1177c6293eacSBart Van Assche dio->opf |= REQ_NOWAIT; 11788a4c1e42SMike Christie } else { 1179c6293eacSBart Van Assche dio->opf = REQ_OP_READ; 11808a4c1e42SMike Christie } 118102afc27fSChristoph Hellwig 118202afc27fSChristoph Hellwig /* 118302afc27fSChristoph Hellwig * For AIO O_(D)SYNC writes we need to defer completions to a workqueue 118402afc27fSChristoph Hellwig * so that we can call ->fsync. 118502afc27fSChristoph Hellwig */ 1186332391a9SLukas Czerner if (dio->is_async && iov_iter_rw(iter) == WRITE) { 1187332391a9SLukas Czerner retval = 0; 118891b94c5dSAl Viro if (iocb_is_dsync(iocb)) 118902afc27fSChristoph Hellwig retval = dio_set_defer_completion(dio); 1190332391a9SLukas Czerner else if (!dio->inode->i_sb->s_dio_done_wq) { 1191332391a9SLukas Czerner /* 1192332391a9SLukas Czerner * In case of AIO write racing with buffered read we 1193332391a9SLukas Czerner * need to defer completion. We can't decide this now, 1194332391a9SLukas Czerner * however the workqueue needs to be initialized here. 1195332391a9SLukas Czerner */ 1196332391a9SLukas Czerner retval = sb_init_dio_done_wq(dio->inode->i_sb); 1197332391a9SLukas Czerner } 119846d71602SGabriel Krisman Bertazi if (retval) 119946d71602SGabriel Krisman Bertazi goto fail_dio; 120002afc27fSChristoph Hellwig } 120102afc27fSChristoph Hellwig 120202afc27fSChristoph Hellwig /* 120302afc27fSChristoph Hellwig * Will be decremented at I/O completion time. 120402afc27fSChristoph Hellwig */ 1205fe0f07d0SJens Axboe inode_dio_begin(inode); 120602afc27fSChristoph Hellwig 120702afc27fSChristoph Hellwig retval = 0; 1208847cc637SAndi Kleen sdio.blkbits = blkbits; 1209ab73857eSLinus Torvalds sdio.blkfactor = i_blkbits - blkbits; 1210847cc637SAndi Kleen sdio.block_in_file = offset >> blkbits; 1211847cc637SAndi Kleen 1212847cc637SAndi Kleen sdio.get_block = get_block; 1213847cc637SAndi Kleen dio->end_io = end_io; 1214847cc637SAndi Kleen sdio.final_block_in_bio = -1; 1215847cc637SAndi Kleen sdio.next_block_for_io = -1; 1216847cc637SAndi Kleen 1217847cc637SAndi Kleen dio->iocb = iocb; 1218847cc637SAndi Kleen 1219847cc637SAndi Kleen spin_lock_init(&dio->bio_lock); 1220847cc637SAndi Kleen dio->refcount = 1; 1221847cc637SAndi Kleen 1222fcb14cb1SAl Viro dio->should_dirty = user_backed_iter(iter) && iov_iter_rw(iter) == READ; 12237b2c99d1SAl Viro sdio.iter = iter; 12241c0ff0f1SNikolay Borisov sdio.final_block_in_request = end >> blkbits; 12257b2c99d1SAl Viro 1226847cc637SAndi Kleen /* 1227847cc637SAndi Kleen * In case of non-aligned buffers, we may need 2 more 1228847cc637SAndi Kleen * pages since we need to zero out first and last block. 1229847cc637SAndi Kleen */ 1230847cc637SAndi Kleen if (unlikely(sdio.blkfactor)) 1231847cc637SAndi Kleen sdio.pages_in_io = 2; 1232847cc637SAndi Kleen 1233f67da30cSAl Viro sdio.pages_in_io += iov_iter_npages(iter, INT_MAX); 1234847cc637SAndi Kleen 1235647d1e4cSFengguang Wu blk_start_plug(&plug); 1236647d1e4cSFengguang Wu 1237847cc637SAndi Kleen retval = do_direct_IO(dio, &sdio, &map_bh); 12387b2c99d1SAl Viro if (retval) 1239847cc637SAndi Kleen dio_cleanup(dio, &sdio); 1240847cc637SAndi Kleen 1241847cc637SAndi Kleen if (retval == -ENOTBLK) { 1242847cc637SAndi Kleen /* 1243847cc637SAndi Kleen * The remaining part of the request will be 12443d742d4bSRandy Dunlap * handled by buffered I/O when we return 1245847cc637SAndi Kleen */ 1246847cc637SAndi Kleen retval = 0; 1247847cc637SAndi Kleen } 1248847cc637SAndi Kleen /* 1249847cc637SAndi Kleen * There may be some unwritten disk at the end of a part-written 1250847cc637SAndi Kleen * fs-block-sized block. Go zero that now. 1251847cc637SAndi Kleen */ 1252847cc637SAndi Kleen dio_zero_block(dio, &sdio, 1, &map_bh); 1253847cc637SAndi Kleen 1254847cc637SAndi Kleen if (sdio.cur_page) { 1255847cc637SAndi Kleen ssize_t ret2; 1256847cc637SAndi Kleen 1257847cc637SAndi Kleen ret2 = dio_send_cur_page(dio, &sdio, &map_bh); 1258847cc637SAndi Kleen if (retval == 0) 1259847cc637SAndi Kleen retval = ret2; 126009cbfeafSKirill A. Shutemov put_page(sdio.cur_page); 1261847cc637SAndi Kleen sdio.cur_page = NULL; 1262847cc637SAndi Kleen } 1263847cc637SAndi Kleen if (sdio.bio) 1264847cc637SAndi Kleen dio_bio_submit(dio, &sdio); 1265847cc637SAndi Kleen 1266647d1e4cSFengguang Wu blk_finish_plug(&plug); 1267647d1e4cSFengguang Wu 1268847cc637SAndi Kleen /* 1269847cc637SAndi Kleen * It is possible that, we return short IO due to end of file. 1270847cc637SAndi Kleen * In that case, we need to release all the pages we got hold on. 1271847cc637SAndi Kleen */ 1272847cc637SAndi Kleen dio_cleanup(dio, &sdio); 1273847cc637SAndi Kleen 1274847cc637SAndi Kleen /* 1275847cc637SAndi Kleen * All block lookups have been performed. For READ requests 1276847cc637SAndi Kleen * we can let i_mutex go now that its achieved its purpose 1277847cc637SAndi Kleen * of protecting us from looking up uninitialized blocks. 1278847cc637SAndi Kleen */ 127917f8c842SOmar Sandoval if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING)) 12805955102cSAl Viro inode_unlock(dio->inode); 1281847cc637SAndi Kleen 1282847cc637SAndi Kleen /* 1283847cc637SAndi Kleen * The only time we want to leave bios in flight is when a successful 1284847cc637SAndi Kleen * partial aio read or full aio write have been setup. In that case 1285847cc637SAndi Kleen * bio completion will call aio_complete. The only time it's safe to 1286847cc637SAndi Kleen * call aio_complete is when we return -EIOCBQUEUED, so we key on that. 1287847cc637SAndi Kleen * This had *better* be the only place that raises -EIOCBQUEUED. 1288847cc637SAndi Kleen */ 1289847cc637SAndi Kleen BUG_ON(retval == -EIOCBQUEUED); 1290847cc637SAndi Kleen if (dio->is_async && retval == 0 && dio->result && 129117f8c842SOmar Sandoval (iov_iter_rw(iter) == READ || dio->result == count)) 1292847cc637SAndi Kleen retval = -EIOCBQUEUED; 1293af436472SChristoph Hellwig else 1294847cc637SAndi Kleen dio_await_completion(dio); 1295847cc637SAndi Kleen 1296847cc637SAndi Kleen if (drop_refcount(dio) == 0) { 1297ffe51f01SLukas Czerner retval = dio_complete(dio, retval, DIO_COMPLETE_INVALIDATE); 1298847cc637SAndi Kleen } else 1299847cc637SAndi Kleen BUG_ON(retval != -EIOCBQUEUED); 13001da177e4SLinus Torvalds 130146d71602SGabriel Krisman Bertazi return retval; 130246d71602SGabriel Krisman Bertazi 130346d71602SGabriel Krisman Bertazi fail_dio: 130446d71602SGabriel Krisman Bertazi if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ) 130546d71602SGabriel Krisman Bertazi inode_unlock(inode); 130646d71602SGabriel Krisman Bertazi 130746d71602SGabriel Krisman Bertazi kmem_cache_free(dio_cache, dio); 13087bb46a67Snpiggin@suse.de return retval; 13097bb46a67Snpiggin@suse.de } 13101da177e4SLinus Torvalds EXPORT_SYMBOL(__blockdev_direct_IO); 13116e8267f5SAndi Kleen 13126e8267f5SAndi Kleen static __init int dio_init(void) 13136e8267f5SAndi Kleen { 13146e8267f5SAndi Kleen dio_cache = KMEM_CACHE(dio, SLAB_PANIC); 13156e8267f5SAndi Kleen return 0; 13166e8267f5SAndi Kleen } 13176e8267f5SAndi Kleen module_init(dio_init) 1318