xref: /openbmc/linux/fs/direct-io.c (revision 03a07c92a9ed9938d828ca7f1d11b8bc63a7bb89)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * fs/direct-io.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright (C) 2002, Linus Torvalds.
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  * O_DIRECT
71da177e4SLinus Torvalds  *
8e1f8e874SFrancois Cami  * 04Jul2002	Andrew Morton
91da177e4SLinus Torvalds  *		Initial version
101da177e4SLinus Torvalds  * 11Sep2002	janetinc@us.ibm.com
111da177e4SLinus Torvalds  * 		added readv/writev support.
12e1f8e874SFrancois Cami  * 29Oct2002	Andrew Morton
131da177e4SLinus Torvalds  *		rewrote bio_add_page() support.
141da177e4SLinus Torvalds  * 30Oct2002	pbadari@us.ibm.com
151da177e4SLinus Torvalds  *		added support for non-aligned IO.
161da177e4SLinus Torvalds  * 06Nov2002	pbadari@us.ibm.com
171da177e4SLinus Torvalds  *		added asynchronous IO support.
181da177e4SLinus Torvalds  * 21Jul2003	nathans@sgi.com
191da177e4SLinus Torvalds  *		added IO completion notifier.
201da177e4SLinus Torvalds  */
211da177e4SLinus Torvalds 
221da177e4SLinus Torvalds #include <linux/kernel.h>
231da177e4SLinus Torvalds #include <linux/module.h>
241da177e4SLinus Torvalds #include <linux/types.h>
251da177e4SLinus Torvalds #include <linux/fs.h>
261da177e4SLinus Torvalds #include <linux/mm.h>
271da177e4SLinus Torvalds #include <linux/slab.h>
281da177e4SLinus Torvalds #include <linux/highmem.h>
291da177e4SLinus Torvalds #include <linux/pagemap.h>
3098c4d57dSAndrew Morton #include <linux/task_io_accounting_ops.h>
311da177e4SLinus Torvalds #include <linux/bio.h>
321da177e4SLinus Torvalds #include <linux/wait.h>
331da177e4SLinus Torvalds #include <linux/err.h>
341da177e4SLinus Torvalds #include <linux/blkdev.h>
351da177e4SLinus Torvalds #include <linux/buffer_head.h>
361da177e4SLinus Torvalds #include <linux/rwsem.h>
371da177e4SLinus Torvalds #include <linux/uio.h>
3860063497SArun Sharma #include <linux/atomic.h>
3965dd2aa9SAndi Kleen #include <linux/prefetch.h>
401da177e4SLinus Torvalds 
411da177e4SLinus Torvalds /*
421da177e4SLinus Torvalds  * How many user pages to map in one call to get_user_pages().  This determines
43cde1ecb3SAndi Kleen  * the size of a structure in the slab cache
441da177e4SLinus Torvalds  */
451da177e4SLinus Torvalds #define DIO_PAGES	64
461da177e4SLinus Torvalds 
471da177e4SLinus Torvalds /*
481da177e4SLinus Torvalds  * This code generally works in units of "dio_blocks".  A dio_block is
491da177e4SLinus Torvalds  * somewhere between the hard sector size and the filesystem block size.  it
501da177e4SLinus Torvalds  * is determined on a per-invocation basis.   When talking to the filesystem
511da177e4SLinus Torvalds  * we need to convert dio_blocks to fs_blocks by scaling the dio_block quantity
521da177e4SLinus Torvalds  * down by dio->blkfactor.  Similarly, fs-blocksize quantities are converted
531da177e4SLinus Torvalds  * to bio_block quantities by shifting left by blkfactor.
541da177e4SLinus Torvalds  *
551da177e4SLinus Torvalds  * If blkfactor is zero then the user's request was aligned to the filesystem's
561da177e4SLinus Torvalds  * blocksize.
571da177e4SLinus Torvalds  */
581da177e4SLinus Torvalds 
59eb28be2bSAndi Kleen /* dio_state only used in the submission path */
60eb28be2bSAndi Kleen 
61eb28be2bSAndi Kleen struct dio_submit {
621da177e4SLinus Torvalds 	struct bio *bio;		/* bio under assembly */
631da177e4SLinus Torvalds 	unsigned blkbits;		/* doesn't change */
641da177e4SLinus Torvalds 	unsigned blkfactor;		/* When we're using an alignment which
651da177e4SLinus Torvalds 					   is finer than the filesystem's soft
661da177e4SLinus Torvalds 					   blocksize, this specifies how much
671da177e4SLinus Torvalds 					   finer.  blkfactor=2 means 1/4-block
681da177e4SLinus Torvalds 					   alignment.  Does not change */
691da177e4SLinus Torvalds 	unsigned start_zero_done;	/* flag: sub-blocksize zeroing has
701da177e4SLinus Torvalds 					   been performed at the start of a
711da177e4SLinus Torvalds 					   write */
721da177e4SLinus Torvalds 	int pages_in_io;		/* approximate total IO pages */
731da177e4SLinus Torvalds 	sector_t block_in_file;		/* Current offset into the underlying
741da177e4SLinus Torvalds 					   file in dio_block units. */
751da177e4SLinus Torvalds 	unsigned blocks_available;	/* At block_in_file.  changes */
760dc2bc49SAndi Kleen 	int reap_counter;		/* rate limit reaping */
771da177e4SLinus Torvalds 	sector_t final_block_in_request;/* doesn't change */
781da177e4SLinus Torvalds 	int boundary;			/* prev block is at a boundary */
791d8fa7a2SBadari Pulavarty 	get_block_t *get_block;		/* block mapping function */
80facd07b0SJosef Bacik 	dio_submit_t *submit_io;	/* IO submition function */
81eb28be2bSAndi Kleen 
82facd07b0SJosef Bacik 	loff_t logical_offset_in_bio;	/* current first logical block in bio */
831da177e4SLinus Torvalds 	sector_t final_block_in_bio;	/* current final block in bio + 1 */
841da177e4SLinus Torvalds 	sector_t next_block_for_io;	/* next block to be put under IO,
851da177e4SLinus Torvalds 					   in dio_blocks units */
861da177e4SLinus Torvalds 
871da177e4SLinus Torvalds 	/*
881da177e4SLinus Torvalds 	 * Deferred addition of a page to the dio.  These variables are
891da177e4SLinus Torvalds 	 * private to dio_send_cur_page(), submit_page_section() and
901da177e4SLinus Torvalds 	 * dio_bio_add_page().
911da177e4SLinus Torvalds 	 */
921da177e4SLinus Torvalds 	struct page *cur_page;		/* The page */
931da177e4SLinus Torvalds 	unsigned cur_page_offset;	/* Offset into it, in bytes */
941da177e4SLinus Torvalds 	unsigned cur_page_len;		/* Nr of bytes at cur_page_offset */
951da177e4SLinus Torvalds 	sector_t cur_page_block;	/* Where it starts */
96facd07b0SJosef Bacik 	loff_t cur_page_fs_offset;	/* Offset in file */
971da177e4SLinus Torvalds 
987b2c99d1SAl Viro 	struct iov_iter *iter;
9923aee091SJeff Moyer 	/*
10023aee091SJeff Moyer 	 * Page queue.  These variables belong to dio_refill_pages() and
10123aee091SJeff Moyer 	 * dio_get_page().
10223aee091SJeff Moyer 	 */
10323aee091SJeff Moyer 	unsigned head;			/* next page to process */
10423aee091SJeff Moyer 	unsigned tail;			/* last valid page + 1 */
1057b2c99d1SAl Viro 	size_t from, to;
106eb28be2bSAndi Kleen };
107eb28be2bSAndi Kleen 
108eb28be2bSAndi Kleen /* dio_state communicated between submission path and end_io */
109eb28be2bSAndi Kleen struct dio {
110eb28be2bSAndi Kleen 	int flags;			/* doesn't change */
1118a4c1e42SMike Christie 	int op;
1128a4c1e42SMike Christie 	int op_flags;
11315c4f638SJens Axboe 	blk_qc_t bio_cookie;
11415c4f638SJens Axboe 	struct block_device *bio_bdev;
1150dc2bc49SAndi Kleen 	struct inode *inode;
116eb28be2bSAndi Kleen 	loff_t i_size;			/* i_size when submitted */
117eb28be2bSAndi Kleen 	dio_iodone_t *end_io;		/* IO completion function */
118eb28be2bSAndi Kleen 
11918772641SAndi Kleen 	void *private;			/* copy from map_bh.b_private */
120eb28be2bSAndi Kleen 
121eb28be2bSAndi Kleen 	/* BIO completion state */
122eb28be2bSAndi Kleen 	spinlock_t bio_lock;		/* protects BIO fields below */
1230dc2bc49SAndi Kleen 	int page_errors;		/* errno from get_user_pages() */
1240dc2bc49SAndi Kleen 	int is_async;			/* is IO async ? */
1257b7a8665SChristoph Hellwig 	bool defer_completion;		/* defer AIO completion to workqueue? */
12653cbf3b1SMing Lei 	bool should_dirty;		/* if pages should be dirtied */
1270dc2bc49SAndi Kleen 	int io_error;			/* IO error in completion path */
128eb28be2bSAndi Kleen 	unsigned long refcount;		/* direct_io_worker() and bios */
129eb28be2bSAndi Kleen 	struct bio *bio_list;		/* singly linked via bi_private */
130eb28be2bSAndi Kleen 	struct task_struct *waiter;	/* waiting task (NULL if none) */
131eb28be2bSAndi Kleen 
132eb28be2bSAndi Kleen 	/* AIO related stuff */
133eb28be2bSAndi Kleen 	struct kiocb *iocb;		/* kiocb */
134eb28be2bSAndi Kleen 	ssize_t result;                 /* IO result */
135eb28be2bSAndi Kleen 
13623aee091SJeff Moyer 	/*
13723aee091SJeff Moyer 	 * pages[] (and any fields placed after it) are not zeroed out at
13823aee091SJeff Moyer 	 * allocation time.  Don't add new fields after pages[] unless you
13923aee091SJeff Moyer 	 * wish that they not be zeroed.
14023aee091SJeff Moyer 	 */
1417b7a8665SChristoph Hellwig 	union {
14223aee091SJeff Moyer 		struct page *pages[DIO_PAGES];	/* page buffer */
1437b7a8665SChristoph Hellwig 		struct work_struct complete_work;/* deferred AIO completion */
1447b7a8665SChristoph Hellwig 	};
1456e8267f5SAndi Kleen } ____cacheline_aligned_in_smp;
1466e8267f5SAndi Kleen 
1476e8267f5SAndi Kleen static struct kmem_cache *dio_cache __read_mostly;
1481da177e4SLinus Torvalds 
1491da177e4SLinus Torvalds /*
1501da177e4SLinus Torvalds  * How many pages are in the queue?
1511da177e4SLinus Torvalds  */
152eb28be2bSAndi Kleen static inline unsigned dio_pages_present(struct dio_submit *sdio)
1531da177e4SLinus Torvalds {
154eb28be2bSAndi Kleen 	return sdio->tail - sdio->head;
1551da177e4SLinus Torvalds }
1561da177e4SLinus Torvalds 
1571da177e4SLinus Torvalds /*
1581da177e4SLinus Torvalds  * Go grab and pin some userspace pages.   Typically we'll get 64 at a time.
1591da177e4SLinus Torvalds  */
160ba253fbfSAndi Kleen static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
1611da177e4SLinus Torvalds {
1627b2c99d1SAl Viro 	ssize_t ret;
1631da177e4SLinus Torvalds 
1642c80929cSMiklos Szeredi 	ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES,
1657b2c99d1SAl Viro 				&sdio->from);
1661da177e4SLinus Torvalds 
1678a4c1e42SMike Christie 	if (ret < 0 && sdio->blocks_available && (dio->op == REQ_OP_WRITE)) {
168557ed1faSNick Piggin 		struct page *page = ZERO_PAGE(0);
1691da177e4SLinus Torvalds 		/*
1701da177e4SLinus Torvalds 		 * A memory fault, but the filesystem has some outstanding
1711da177e4SLinus Torvalds 		 * mapped blocks.  We need to use those blocks up to avoid
1721da177e4SLinus Torvalds 		 * leaking stale data in the file.
1731da177e4SLinus Torvalds 		 */
1741da177e4SLinus Torvalds 		if (dio->page_errors == 0)
1751da177e4SLinus Torvalds 			dio->page_errors = ret;
17609cbfeafSKirill A. Shutemov 		get_page(page);
177b5810039SNick Piggin 		dio->pages[0] = page;
178eb28be2bSAndi Kleen 		sdio->head = 0;
179eb28be2bSAndi Kleen 		sdio->tail = 1;
1807b2c99d1SAl Viro 		sdio->from = 0;
1817b2c99d1SAl Viro 		sdio->to = PAGE_SIZE;
1827b2c99d1SAl Viro 		return 0;
1831da177e4SLinus Torvalds 	}
1841da177e4SLinus Torvalds 
1851da177e4SLinus Torvalds 	if (ret >= 0) {
1867b2c99d1SAl Viro 		iov_iter_advance(sdio->iter, ret);
1877b2c99d1SAl Viro 		ret += sdio->from;
188eb28be2bSAndi Kleen 		sdio->head = 0;
1897b2c99d1SAl Viro 		sdio->tail = (ret + PAGE_SIZE - 1) / PAGE_SIZE;
1907b2c99d1SAl Viro 		sdio->to = ((ret - 1) & (PAGE_SIZE - 1)) + 1;
1917b2c99d1SAl Viro 		return 0;
1921da177e4SLinus Torvalds 	}
1931da177e4SLinus Torvalds 	return ret;
1941da177e4SLinus Torvalds }
1951da177e4SLinus Torvalds 
1961da177e4SLinus Torvalds /*
1971da177e4SLinus Torvalds  * Get another userspace page.  Returns an ERR_PTR on error.  Pages are
1981da177e4SLinus Torvalds  * buffered inside the dio so that we can call get_user_pages() against a
1991da177e4SLinus Torvalds  * decent number of pages, less frequently.  To provide nicer use of the
2001da177e4SLinus Torvalds  * L1 cache.
2011da177e4SLinus Torvalds  */
202ba253fbfSAndi Kleen static inline struct page *dio_get_page(struct dio *dio,
2036fcc5420SBoaz Harrosh 					struct dio_submit *sdio)
2041da177e4SLinus Torvalds {
205eb28be2bSAndi Kleen 	if (dio_pages_present(sdio) == 0) {
2061da177e4SLinus Torvalds 		int ret;
2071da177e4SLinus Torvalds 
208eb28be2bSAndi Kleen 		ret = dio_refill_pages(dio, sdio);
2091da177e4SLinus Torvalds 		if (ret)
2101da177e4SLinus Torvalds 			return ERR_PTR(ret);
211eb28be2bSAndi Kleen 		BUG_ON(dio_pages_present(sdio) == 0);
2121da177e4SLinus Torvalds 	}
2136fcc5420SBoaz Harrosh 	return dio->pages[sdio->head];
2141da177e4SLinus Torvalds }
2151da177e4SLinus Torvalds 
2166d544bb4SZach Brown /**
2176d544bb4SZach Brown  * dio_complete() - called when all DIO BIO I/O has been completed
2186d544bb4SZach Brown  * @offset: the byte offset in the file of the completed operation
2196d544bb4SZach Brown  *
2207b7a8665SChristoph Hellwig  * This drops i_dio_count, lets interested parties know that a DIO operation
2217b7a8665SChristoph Hellwig  * has completed, and calculates the resulting return code for the operation.
2226d544bb4SZach Brown  *
2236d544bb4SZach Brown  * It lets the filesystem know if it registered an interest earlier via
2246d544bb4SZach Brown  * get_block.  Pass the private field of the map buffer_head so that
2256d544bb4SZach Brown  * filesystems can use it to hold additional state between get_block calls and
2266d544bb4SZach Brown  * dio_complete.
2271da177e4SLinus Torvalds  */
228716b9bc0SChristoph Hellwig static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async)
2291da177e4SLinus Torvalds {
230716b9bc0SChristoph Hellwig 	loff_t offset = dio->iocb->ki_pos;
2316d544bb4SZach Brown 	ssize_t transferred = 0;
2326d544bb4SZach Brown 
2338459d86aSZach Brown 	/*
2348459d86aSZach Brown 	 * AIO submission can race with bio completion to get here while
2358459d86aSZach Brown 	 * expecting to have the last io completed by bio completion.
2368459d86aSZach Brown 	 * In that case -EIOCBQUEUED is in fact not an error we want
2378459d86aSZach Brown 	 * to preserve through this call.
2388459d86aSZach Brown 	 */
2398459d86aSZach Brown 	if (ret == -EIOCBQUEUED)
2408459d86aSZach Brown 		ret = 0;
2418459d86aSZach Brown 
2426d544bb4SZach Brown 	if (dio->result) {
2436d544bb4SZach Brown 		transferred = dio->result;
2446d544bb4SZach Brown 
2456d544bb4SZach Brown 		/* Check for short read case */
2468a4c1e42SMike Christie 		if ((dio->op == REQ_OP_READ) &&
2478a4c1e42SMike Christie 		    ((offset + transferred) > dio->i_size))
2486d544bb4SZach Brown 			transferred = dio->i_size - offset;
2494038acdbSAl Viro 		/* ignore EFAULT if some IO has been done */
2504038acdbSAl Viro 		if (unlikely(ret == -EFAULT) && transferred)
2514038acdbSAl Viro 			ret = 0;
2526d544bb4SZach Brown 	}
2536d544bb4SZach Brown 
2546d544bb4SZach Brown 	if (ret == 0)
2556d544bb4SZach Brown 		ret = dio->page_errors;
2566d544bb4SZach Brown 	if (ret == 0)
2576d544bb4SZach Brown 		ret = dio->io_error;
2586d544bb4SZach Brown 	if (ret == 0)
2596d544bb4SZach Brown 		ret = transferred;
2606d544bb4SZach Brown 
261187372a3SChristoph Hellwig 	if (dio->end_io) {
262187372a3SChristoph Hellwig 		int err;
263187372a3SChristoph Hellwig 
264e2592217SChristoph Hellwig 		// XXX: ki_pos??
265187372a3SChristoph Hellwig 		err = dio->end_io(dio->iocb, offset, ret, dio->private);
266187372a3SChristoph Hellwig 		if (err)
267187372a3SChristoph Hellwig 			ret = err;
268187372a3SChristoph Hellwig 	}
2697b7a8665SChristoph Hellwig 
270fe0f07d0SJens Axboe 	if (!(dio->flags & DIO_SKIP_DIO_COUNT))
271fe0f07d0SJens Axboe 		inode_dio_end(dio->inode);
272fe0f07d0SJens Axboe 
27302afc27fSChristoph Hellwig 	if (is_async) {
274e2592217SChristoph Hellwig 		/*
275e2592217SChristoph Hellwig 		 * generic_write_sync expects ki_pos to have been updated
276e2592217SChristoph Hellwig 		 * already, but the submission path only does this for
277e2592217SChristoph Hellwig 		 * synchronous I/O.
278e2592217SChristoph Hellwig 		 */
279e2592217SChristoph Hellwig 		dio->iocb->ki_pos += transferred;
28002afc27fSChristoph Hellwig 
2818a4c1e42SMike Christie 		if (dio->op == REQ_OP_WRITE)
282e2592217SChristoph Hellwig 			ret = generic_write_sync(dio->iocb,  transferred);
28304b2fa9fSChristoph Hellwig 		dio->iocb->ki_complete(dio->iocb, ret, 0);
28402afc27fSChristoph Hellwig 	}
28540e2e973SChristoph Hellwig 
2867b7a8665SChristoph Hellwig 	kmem_cache_free(dio_cache, dio);
2876d544bb4SZach Brown 	return ret;
2881da177e4SLinus Torvalds }
2891da177e4SLinus Torvalds 
2907b7a8665SChristoph Hellwig static void dio_aio_complete_work(struct work_struct *work)
2917b7a8665SChristoph Hellwig {
2927b7a8665SChristoph Hellwig 	struct dio *dio = container_of(work, struct dio, complete_work);
2937b7a8665SChristoph Hellwig 
294716b9bc0SChristoph Hellwig 	dio_complete(dio, 0, true);
2957b7a8665SChristoph Hellwig }
2967b7a8665SChristoph Hellwig 
2974e4cbee9SChristoph Hellwig static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio);
2987b7a8665SChristoph Hellwig 
2991da177e4SLinus Torvalds /*
3001da177e4SLinus Torvalds  * Asynchronous IO callback.
3011da177e4SLinus Torvalds  */
3024246a0b6SChristoph Hellwig static void dio_bio_end_aio(struct bio *bio)
3031da177e4SLinus Torvalds {
3041da177e4SLinus Torvalds 	struct dio *dio = bio->bi_private;
3055eb6c7a2SZach Brown 	unsigned long remaining;
3065eb6c7a2SZach Brown 	unsigned long flags;
3071da177e4SLinus Torvalds 
3081da177e4SLinus Torvalds 	/* cleanup the bio */
3091da177e4SLinus Torvalds 	dio_bio_complete(dio, bio);
3100273201eSZach Brown 
3115eb6c7a2SZach Brown 	spin_lock_irqsave(&dio->bio_lock, flags);
3125eb6c7a2SZach Brown 	remaining = --dio->refcount;
3135eb6c7a2SZach Brown 	if (remaining == 1 && dio->waiter)
31420258b2bSZach Brown 		wake_up_process(dio->waiter);
3155eb6c7a2SZach Brown 	spin_unlock_irqrestore(&dio->bio_lock, flags);
31620258b2bSZach Brown 
3178459d86aSZach Brown 	if (remaining == 0) {
3187b7a8665SChristoph Hellwig 		if (dio->result && dio->defer_completion) {
3197b7a8665SChristoph Hellwig 			INIT_WORK(&dio->complete_work, dio_aio_complete_work);
3207b7a8665SChristoph Hellwig 			queue_work(dio->inode->i_sb->s_dio_done_wq,
3217b7a8665SChristoph Hellwig 				   &dio->complete_work);
3227b7a8665SChristoph Hellwig 		} else {
323716b9bc0SChristoph Hellwig 			dio_complete(dio, 0, true);
3247b7a8665SChristoph Hellwig 		}
3258459d86aSZach Brown 	}
3261da177e4SLinus Torvalds }
3271da177e4SLinus Torvalds 
3281da177e4SLinus Torvalds /*
3291da177e4SLinus Torvalds  * The BIO completion handler simply queues the BIO up for the process-context
3301da177e4SLinus Torvalds  * handler.
3311da177e4SLinus Torvalds  *
3321da177e4SLinus Torvalds  * During I/O bi_private points at the dio.  After I/O, bi_private is used to
3331da177e4SLinus Torvalds  * implement a singly-linked list of completed BIOs, at dio->bio_list.
3341da177e4SLinus Torvalds  */
3354246a0b6SChristoph Hellwig static void dio_bio_end_io(struct bio *bio)
3361da177e4SLinus Torvalds {
3371da177e4SLinus Torvalds 	struct dio *dio = bio->bi_private;
3381da177e4SLinus Torvalds 	unsigned long flags;
3391da177e4SLinus Torvalds 
3401da177e4SLinus Torvalds 	spin_lock_irqsave(&dio->bio_lock, flags);
3411da177e4SLinus Torvalds 	bio->bi_private = dio->bio_list;
3421da177e4SLinus Torvalds 	dio->bio_list = bio;
3435eb6c7a2SZach Brown 	if (--dio->refcount == 1 && dio->waiter)
3441da177e4SLinus Torvalds 		wake_up_process(dio->waiter);
3451da177e4SLinus Torvalds 	spin_unlock_irqrestore(&dio->bio_lock, flags);
3461da177e4SLinus Torvalds }
3471da177e4SLinus Torvalds 
348facd07b0SJosef Bacik /**
349facd07b0SJosef Bacik  * dio_end_io - handle the end io action for the given bio
350facd07b0SJosef Bacik  * @bio: The direct io bio thats being completed
351facd07b0SJosef Bacik  *
352facd07b0SJosef Bacik  * This is meant to be called by any filesystem that uses their own dio_submit_t
353facd07b0SJosef Bacik  * so that the DIO specific endio actions are dealt with after the filesystem
354facd07b0SJosef Bacik  * has done it's completion work.
355facd07b0SJosef Bacik  */
3564055351cSChristoph Hellwig void dio_end_io(struct bio *bio)
357facd07b0SJosef Bacik {
358facd07b0SJosef Bacik 	struct dio *dio = bio->bi_private;
359facd07b0SJosef Bacik 
360facd07b0SJosef Bacik 	if (dio->is_async)
3614246a0b6SChristoph Hellwig 		dio_bio_end_aio(bio);
362facd07b0SJosef Bacik 	else
3634246a0b6SChristoph Hellwig 		dio_bio_end_io(bio);
364facd07b0SJosef Bacik }
365facd07b0SJosef Bacik EXPORT_SYMBOL_GPL(dio_end_io);
366facd07b0SJosef Bacik 
367ba253fbfSAndi Kleen static inline void
368eb28be2bSAndi Kleen dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
369eb28be2bSAndi Kleen 	      struct block_device *bdev,
3701da177e4SLinus Torvalds 	      sector_t first_sector, int nr_vecs)
3711da177e4SLinus Torvalds {
3721da177e4SLinus Torvalds 	struct bio *bio;
3731da177e4SLinus Torvalds 
37420d9600cSDavid Dillow 	/*
37520d9600cSDavid Dillow 	 * bio_alloc() is guaranteed to return a bio when called with
37671baba4bSMel Gorman 	 * __GFP_RECLAIM and we request a valid number of vectors.
37720d9600cSDavid Dillow 	 */
3781da177e4SLinus Torvalds 	bio = bio_alloc(GFP_KERNEL, nr_vecs);
3791da177e4SLinus Torvalds 
3801da177e4SLinus Torvalds 	bio->bi_bdev = bdev;
3814f024f37SKent Overstreet 	bio->bi_iter.bi_sector = first_sector;
3828a4c1e42SMike Christie 	bio_set_op_attrs(bio, dio->op, dio->op_flags);
3831da177e4SLinus Torvalds 	if (dio->is_async)
3841da177e4SLinus Torvalds 		bio->bi_end_io = dio_bio_end_aio;
3851da177e4SLinus Torvalds 	else
3861da177e4SLinus Torvalds 		bio->bi_end_io = dio_bio_end_io;
3871da177e4SLinus Torvalds 
388eb28be2bSAndi Kleen 	sdio->bio = bio;
389eb28be2bSAndi Kleen 	sdio->logical_offset_in_bio = sdio->cur_page_fs_offset;
3901da177e4SLinus Torvalds }
3911da177e4SLinus Torvalds 
3921da177e4SLinus Torvalds /*
3931da177e4SLinus Torvalds  * In the AIO read case we speculatively dirty the pages before starting IO.
3941da177e4SLinus Torvalds  * During IO completion, any of these pages which happen to have been written
3951da177e4SLinus Torvalds  * back will be redirtied by bio_check_pages_dirty().
3960273201eSZach Brown  *
3970273201eSZach Brown  * bios hold a dio reference between submit_bio and ->end_io.
3981da177e4SLinus Torvalds  */
399ba253fbfSAndi Kleen static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
4001da177e4SLinus Torvalds {
401eb28be2bSAndi Kleen 	struct bio *bio = sdio->bio;
4025eb6c7a2SZach Brown 	unsigned long flags;
4031da177e4SLinus Torvalds 
4041da177e4SLinus Torvalds 	bio->bi_private = dio;
4055eb6c7a2SZach Brown 
4065eb6c7a2SZach Brown 	spin_lock_irqsave(&dio->bio_lock, flags);
4075eb6c7a2SZach Brown 	dio->refcount++;
4085eb6c7a2SZach Brown 	spin_unlock_irqrestore(&dio->bio_lock, flags);
4095eb6c7a2SZach Brown 
4108a4c1e42SMike Christie 	if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty)
4111da177e4SLinus Torvalds 		bio_set_pages_dirty(bio);
4125eb6c7a2SZach Brown 
413c1c53460SJens Axboe 	dio->bio_bdev = bio->bi_bdev;
414c1c53460SJens Axboe 
41515c4f638SJens Axboe 	if (sdio->submit_io) {
4168a4c1e42SMike Christie 		sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio);
41715c4f638SJens Axboe 		dio->bio_cookie = BLK_QC_T_NONE;
418c1c53460SJens Axboe 	} else
4194e49ea4aSMike Christie 		dio->bio_cookie = submit_bio(bio);
4201da177e4SLinus Torvalds 
421eb28be2bSAndi Kleen 	sdio->bio = NULL;
422eb28be2bSAndi Kleen 	sdio->boundary = 0;
423eb28be2bSAndi Kleen 	sdio->logical_offset_in_bio = 0;
4241da177e4SLinus Torvalds }
4251da177e4SLinus Torvalds 
4261da177e4SLinus Torvalds /*
4271da177e4SLinus Torvalds  * Release any resources in case of a failure
4281da177e4SLinus Torvalds  */
429ba253fbfSAndi Kleen static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio)
4301da177e4SLinus Torvalds {
4317b2c99d1SAl Viro 	while (sdio->head < sdio->tail)
43209cbfeafSKirill A. Shutemov 		put_page(dio->pages[sdio->head++]);
4331da177e4SLinus Torvalds }
4341da177e4SLinus Torvalds 
4351da177e4SLinus Torvalds /*
4360273201eSZach Brown  * Wait for the next BIO to complete.  Remove it and return it.  NULL is
4370273201eSZach Brown  * returned once all BIOs have been completed.  This must only be called once
4380273201eSZach Brown  * all bios have been issued so that dio->refcount can only decrease.  This
4390273201eSZach Brown  * requires that that the caller hold a reference on the dio.
4401da177e4SLinus Torvalds  */
4411da177e4SLinus Torvalds static struct bio *dio_await_one(struct dio *dio)
4421da177e4SLinus Torvalds {
4431da177e4SLinus Torvalds 	unsigned long flags;
4440273201eSZach Brown 	struct bio *bio = NULL;
4451da177e4SLinus Torvalds 
4461da177e4SLinus Torvalds 	spin_lock_irqsave(&dio->bio_lock, flags);
4475eb6c7a2SZach Brown 
4485eb6c7a2SZach Brown 	/*
4495eb6c7a2SZach Brown 	 * Wait as long as the list is empty and there are bios in flight.  bio
4505eb6c7a2SZach Brown 	 * completion drops the count, maybe adds to the list, and wakes while
4515eb6c7a2SZach Brown 	 * holding the bio_lock so we don't need set_current_state()'s barrier
4525eb6c7a2SZach Brown 	 * and can call it after testing our condition.
4535eb6c7a2SZach Brown 	 */
4545eb6c7a2SZach Brown 	while (dio->refcount > 1 && dio->bio_list == NULL) {
4555eb6c7a2SZach Brown 		__set_current_state(TASK_UNINTERRUPTIBLE);
4561da177e4SLinus Torvalds 		dio->waiter = current;
4571da177e4SLinus Torvalds 		spin_unlock_irqrestore(&dio->bio_lock, flags);
458c43c83a2SChristoph Hellwig 		if (!(dio->iocb->ki_flags & IOCB_HIPRI) ||
459bbd7bb70SJens Axboe 		    !blk_mq_poll(bdev_get_queue(dio->bio_bdev), dio->bio_cookie))
4601da177e4SLinus Torvalds 			io_schedule();
4615eb6c7a2SZach Brown 		/* wake up sets us TASK_RUNNING */
4621da177e4SLinus Torvalds 		spin_lock_irqsave(&dio->bio_lock, flags);
4631da177e4SLinus Torvalds 		dio->waiter = NULL;
4641da177e4SLinus Torvalds 	}
4650273201eSZach Brown 	if (dio->bio_list) {
4661da177e4SLinus Torvalds 		bio = dio->bio_list;
4671da177e4SLinus Torvalds 		dio->bio_list = bio->bi_private;
4680273201eSZach Brown 	}
4691da177e4SLinus Torvalds 	spin_unlock_irqrestore(&dio->bio_lock, flags);
4701da177e4SLinus Torvalds 	return bio;
4711da177e4SLinus Torvalds }
4721da177e4SLinus Torvalds 
4731da177e4SLinus Torvalds /*
4741da177e4SLinus Torvalds  * Process one completed BIO.  No locks are held.
4751da177e4SLinus Torvalds  */
4764e4cbee9SChristoph Hellwig static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio)
4771da177e4SLinus Torvalds {
478cb34e057SKent Overstreet 	struct bio_vec *bvec;
479cb34e057SKent Overstreet 	unsigned i;
4804e4cbee9SChristoph Hellwig 	blk_status_t err = bio->bi_status;
4811da177e4SLinus Torvalds 
482*03a07c92SGoldwyn Rodrigues 	if (err) {
483*03a07c92SGoldwyn Rodrigues 		if (err == BLK_STS_AGAIN && (bio->bi_opf & REQ_NOWAIT))
484*03a07c92SGoldwyn Rodrigues 			dio->io_error = -EAGAIN;
485*03a07c92SGoldwyn Rodrigues 		else
486174e27c6SChen, Kenneth W 			dio->io_error = -EIO;
487*03a07c92SGoldwyn Rodrigues 	}
4881da177e4SLinus Torvalds 
4898a4c1e42SMike Christie 	if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty) {
4907ddc971fSMike Krinkin 		bio_check_pages_dirty(bio);	/* transfers ownership */
4911da177e4SLinus Torvalds 	} else {
492cb34e057SKent Overstreet 		bio_for_each_segment_all(bvec, bio, i) {
493cb34e057SKent Overstreet 			struct page *page = bvec->bv_page;
4941da177e4SLinus Torvalds 
4958a4c1e42SMike Christie 			if (dio->op == REQ_OP_READ && !PageCompound(page) &&
49653cbf3b1SMing Lei 					dio->should_dirty)
4971da177e4SLinus Torvalds 				set_page_dirty_lock(page);
49809cbfeafSKirill A. Shutemov 			put_page(page);
4991da177e4SLinus Torvalds 		}
5001da177e4SLinus Torvalds 		bio_put(bio);
5011da177e4SLinus Torvalds 	}
5029b81c842SSasha Levin 	return err;
5031da177e4SLinus Torvalds }
5041da177e4SLinus Torvalds 
5051da177e4SLinus Torvalds /*
5060273201eSZach Brown  * Wait on and process all in-flight BIOs.  This must only be called once
5070273201eSZach Brown  * all bios have been issued so that the refcount can only decrease.
5080273201eSZach Brown  * This just waits for all bios to make it through dio_bio_complete.  IO
509beb7dd86SRobert P. J. Day  * errors are propagated through dio->io_error and should be propagated via
5100273201eSZach Brown  * dio_complete().
5111da177e4SLinus Torvalds  */
5126d544bb4SZach Brown static void dio_await_completion(struct dio *dio)
5131da177e4SLinus Torvalds {
5140273201eSZach Brown 	struct bio *bio;
5150273201eSZach Brown 	do {
5160273201eSZach Brown 		bio = dio_await_one(dio);
5170273201eSZach Brown 		if (bio)
5186d544bb4SZach Brown 			dio_bio_complete(dio, bio);
5190273201eSZach Brown 	} while (bio);
5201da177e4SLinus Torvalds }
5211da177e4SLinus Torvalds 
5221da177e4SLinus Torvalds /*
5231da177e4SLinus Torvalds  * A really large O_DIRECT read or write can generate a lot of BIOs.  So
5241da177e4SLinus Torvalds  * to keep the memory consumption sane we periodically reap any completed BIOs
5251da177e4SLinus Torvalds  * during the BIO generation phase.
5261da177e4SLinus Torvalds  *
5271da177e4SLinus Torvalds  * This also helps to limit the peak amount of pinned userspace memory.
5281da177e4SLinus Torvalds  */
529ba253fbfSAndi Kleen static inline int dio_bio_reap(struct dio *dio, struct dio_submit *sdio)
5301da177e4SLinus Torvalds {
5311da177e4SLinus Torvalds 	int ret = 0;
5321da177e4SLinus Torvalds 
533eb28be2bSAndi Kleen 	if (sdio->reap_counter++ >= 64) {
5341da177e4SLinus Torvalds 		while (dio->bio_list) {
5351da177e4SLinus Torvalds 			unsigned long flags;
5361da177e4SLinus Torvalds 			struct bio *bio;
5371da177e4SLinus Torvalds 			int ret2;
5381da177e4SLinus Torvalds 
5391da177e4SLinus Torvalds 			spin_lock_irqsave(&dio->bio_lock, flags);
5401da177e4SLinus Torvalds 			bio = dio->bio_list;
5411da177e4SLinus Torvalds 			dio->bio_list = bio->bi_private;
5421da177e4SLinus Torvalds 			spin_unlock_irqrestore(&dio->bio_lock, flags);
5434e4cbee9SChristoph Hellwig 			ret2 = blk_status_to_errno(dio_bio_complete(dio, bio));
5441da177e4SLinus Torvalds 			if (ret == 0)
5451da177e4SLinus Torvalds 				ret = ret2;
5461da177e4SLinus Torvalds 		}
547eb28be2bSAndi Kleen 		sdio->reap_counter = 0;
5481da177e4SLinus Torvalds 	}
5491da177e4SLinus Torvalds 	return ret;
5501da177e4SLinus Torvalds }
5511da177e4SLinus Torvalds 
5521da177e4SLinus Torvalds /*
5537b7a8665SChristoph Hellwig  * Create workqueue for deferred direct IO completions. We allocate the
5547b7a8665SChristoph Hellwig  * workqueue when it's first needed. This avoids creating workqueue for
5557b7a8665SChristoph Hellwig  * filesystems that don't need it and also allows us to create the workqueue
5567b7a8665SChristoph Hellwig  * late enough so the we can include s_id in the name of the workqueue.
5577b7a8665SChristoph Hellwig  */
558ec1b8260SChristoph Hellwig int sb_init_dio_done_wq(struct super_block *sb)
5597b7a8665SChristoph Hellwig {
56045150c43SOlof Johansson 	struct workqueue_struct *old;
5617b7a8665SChristoph Hellwig 	struct workqueue_struct *wq = alloc_workqueue("dio/%s",
5627b7a8665SChristoph Hellwig 						      WQ_MEM_RECLAIM, 0,
5637b7a8665SChristoph Hellwig 						      sb->s_id);
5647b7a8665SChristoph Hellwig 	if (!wq)
5657b7a8665SChristoph Hellwig 		return -ENOMEM;
5667b7a8665SChristoph Hellwig 	/*
5677b7a8665SChristoph Hellwig 	 * This has to be atomic as more DIOs can race to create the workqueue
5687b7a8665SChristoph Hellwig 	 */
56945150c43SOlof Johansson 	old = cmpxchg(&sb->s_dio_done_wq, NULL, wq);
5707b7a8665SChristoph Hellwig 	/* Someone created workqueue before us? Free ours... */
57145150c43SOlof Johansson 	if (old)
5727b7a8665SChristoph Hellwig 		destroy_workqueue(wq);
5737b7a8665SChristoph Hellwig 	return 0;
5747b7a8665SChristoph Hellwig }
5757b7a8665SChristoph Hellwig 
5767b7a8665SChristoph Hellwig static int dio_set_defer_completion(struct dio *dio)
5777b7a8665SChristoph Hellwig {
5787b7a8665SChristoph Hellwig 	struct super_block *sb = dio->inode->i_sb;
5797b7a8665SChristoph Hellwig 
5807b7a8665SChristoph Hellwig 	if (dio->defer_completion)
5817b7a8665SChristoph Hellwig 		return 0;
5827b7a8665SChristoph Hellwig 	dio->defer_completion = true;
5837b7a8665SChristoph Hellwig 	if (!sb->s_dio_done_wq)
5847b7a8665SChristoph Hellwig 		return sb_init_dio_done_wq(sb);
5857b7a8665SChristoph Hellwig 	return 0;
5867b7a8665SChristoph Hellwig }
5877b7a8665SChristoph Hellwig 
5887b7a8665SChristoph Hellwig /*
5891da177e4SLinus Torvalds  * Call into the fs to map some more disk blocks.  We record the current number
590eb28be2bSAndi Kleen  * of available blocks at sdio->blocks_available.  These are in units of the
59193407472SFabian Frederick  * fs blocksize, i_blocksize(inode).
5921da177e4SLinus Torvalds  *
5931da177e4SLinus Torvalds  * The fs is allowed to map lots of blocks at once.  If it wants to do that,
5941da177e4SLinus Torvalds  * it uses the passed inode-relative block number as the file offset, as usual.
5951da177e4SLinus Torvalds  *
5961d8fa7a2SBadari Pulavarty  * get_block() is passed the number of i_blkbits-sized blocks which direct_io
5971da177e4SLinus Torvalds  * has remaining to do.  The fs should not map more than this number of blocks.
5981da177e4SLinus Torvalds  *
5991da177e4SLinus Torvalds  * If the fs has mapped a lot of blocks, it should populate bh->b_size to
6001da177e4SLinus Torvalds  * indicate how much contiguous disk space has been made available at
6011da177e4SLinus Torvalds  * bh->b_blocknr.
6021da177e4SLinus Torvalds  *
6031da177e4SLinus Torvalds  * If *any* of the mapped blocks are new, then the fs must set buffer_new().
6041da177e4SLinus Torvalds  * This isn't very efficient...
6051da177e4SLinus Torvalds  *
6061da177e4SLinus Torvalds  * In the case of filesystem holes: the fs may return an arbitrarily-large
6071da177e4SLinus Torvalds  * hole by returning an appropriate value in b_size and by clearing
6081da177e4SLinus Torvalds  * buffer_mapped().  However the direct-io code will only process holes one
6091d8fa7a2SBadari Pulavarty  * block at a time - it will repeatedly call get_block() as it walks the hole.
6101da177e4SLinus Torvalds  */
61118772641SAndi Kleen static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
61218772641SAndi Kleen 			   struct buffer_head *map_bh)
6131da177e4SLinus Torvalds {
6141da177e4SLinus Torvalds 	int ret;
6151da177e4SLinus Torvalds 	sector_t fs_startblk;	/* Into file, in filesystem-sized blocks */
616ae55e1aaSTao Ma 	sector_t fs_endblk;	/* Into file, in filesystem-sized blocks */
6171da177e4SLinus Torvalds 	unsigned long fs_count;	/* Number of filesystem-sized blocks */
6181da177e4SLinus Torvalds 	int create;
619ab73857eSLinus Torvalds 	unsigned int i_blkbits = sdio->blkbits + sdio->blkfactor;
6201da177e4SLinus Torvalds 
6211da177e4SLinus Torvalds 	/*
6221da177e4SLinus Torvalds 	 * If there was a memory error and we've overwritten all the
6231da177e4SLinus Torvalds 	 * mapped blocks then we can now return that memory error
6241da177e4SLinus Torvalds 	 */
6251da177e4SLinus Torvalds 	ret = dio->page_errors;
6261da177e4SLinus Torvalds 	if (ret == 0) {
627eb28be2bSAndi Kleen 		BUG_ON(sdio->block_in_file >= sdio->final_block_in_request);
628eb28be2bSAndi Kleen 		fs_startblk = sdio->block_in_file >> sdio->blkfactor;
629ae55e1aaSTao Ma 		fs_endblk = (sdio->final_block_in_request - 1) >>
630ae55e1aaSTao Ma 					sdio->blkfactor;
631ae55e1aaSTao Ma 		fs_count = fs_endblk - fs_startblk + 1;
6321da177e4SLinus Torvalds 
6333c674e74SNathan Scott 		map_bh->b_state = 0;
634ab73857eSLinus Torvalds 		map_bh->b_size = fs_count << i_blkbits;
6353c674e74SNathan Scott 
6365fe878aeSChristoph Hellwig 		/*
6379ecd10b7SEryu Guan 		 * For writes that could fill holes inside i_size on a
6389ecd10b7SEryu Guan 		 * DIO_SKIP_HOLES filesystem we forbid block creations: only
6399ecd10b7SEryu Guan 		 * overwrites are permitted. We will return early to the caller
6409ecd10b7SEryu Guan 		 * once we see an unmapped buffer head returned, and the caller
6419ecd10b7SEryu Guan 		 * will fall back to buffered I/O.
6425fe878aeSChristoph Hellwig 		 *
6435fe878aeSChristoph Hellwig 		 * Otherwise the decision is left to the get_blocks method,
6445fe878aeSChristoph Hellwig 		 * which may decide to handle it or also return an unmapped
6455fe878aeSChristoph Hellwig 		 * buffer head.
6465fe878aeSChristoph Hellwig 		 */
6478a4c1e42SMike Christie 		create = dio->op == REQ_OP_WRITE;
6485fe878aeSChristoph Hellwig 		if (dio->flags & DIO_SKIP_HOLES) {
6499ecd10b7SEryu Guan 			if (fs_startblk <= ((i_size_read(dio->inode) - 1) >>
6509ecd10b7SEryu Guan 							i_blkbits))
6511da177e4SLinus Torvalds 				create = 0;
6521da177e4SLinus Torvalds 		}
6533c674e74SNathan Scott 
654eb28be2bSAndi Kleen 		ret = (*sdio->get_block)(dio->inode, fs_startblk,
6551da177e4SLinus Torvalds 						map_bh, create);
65618772641SAndi Kleen 
65718772641SAndi Kleen 		/* Store for completion */
65818772641SAndi Kleen 		dio->private = map_bh->b_private;
6597b7a8665SChristoph Hellwig 
6607b7a8665SChristoph Hellwig 		if (ret == 0 && buffer_defer_completion(map_bh))
6617b7a8665SChristoph Hellwig 			ret = dio_set_defer_completion(dio);
6621da177e4SLinus Torvalds 	}
6631da177e4SLinus Torvalds 	return ret;
6641da177e4SLinus Torvalds }
6651da177e4SLinus Torvalds 
6661da177e4SLinus Torvalds /*
6671da177e4SLinus Torvalds  * There is no bio.  Make one now.
6681da177e4SLinus Torvalds  */
669ba253fbfSAndi Kleen static inline int dio_new_bio(struct dio *dio, struct dio_submit *sdio,
67018772641SAndi Kleen 		sector_t start_sector, struct buffer_head *map_bh)
6711da177e4SLinus Torvalds {
6721da177e4SLinus Torvalds 	sector_t sector;
6731da177e4SLinus Torvalds 	int ret, nr_pages;
6741da177e4SLinus Torvalds 
675eb28be2bSAndi Kleen 	ret = dio_bio_reap(dio, sdio);
6761da177e4SLinus Torvalds 	if (ret)
6771da177e4SLinus Torvalds 		goto out;
678eb28be2bSAndi Kleen 	sector = start_sector << (sdio->blkbits - 9);
679b54ffb73SKent Overstreet 	nr_pages = min(sdio->pages_in_io, BIO_MAX_PAGES);
6801da177e4SLinus Torvalds 	BUG_ON(nr_pages <= 0);
68118772641SAndi Kleen 	dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages);
682eb28be2bSAndi Kleen 	sdio->boundary = 0;
6831da177e4SLinus Torvalds out:
6841da177e4SLinus Torvalds 	return ret;
6851da177e4SLinus Torvalds }
6861da177e4SLinus Torvalds 
6871da177e4SLinus Torvalds /*
6881da177e4SLinus Torvalds  * Attempt to put the current chunk of 'cur_page' into the current BIO.  If
6891da177e4SLinus Torvalds  * that was successful then update final_block_in_bio and take a ref against
6901da177e4SLinus Torvalds  * the just-added page.
6911da177e4SLinus Torvalds  *
6921da177e4SLinus Torvalds  * Return zero on success.  Non-zero means the caller needs to start a new BIO.
6931da177e4SLinus Torvalds  */
694ba253fbfSAndi Kleen static inline int dio_bio_add_page(struct dio_submit *sdio)
6951da177e4SLinus Torvalds {
6961da177e4SLinus Torvalds 	int ret;
6971da177e4SLinus Torvalds 
698eb28be2bSAndi Kleen 	ret = bio_add_page(sdio->bio, sdio->cur_page,
699eb28be2bSAndi Kleen 			sdio->cur_page_len, sdio->cur_page_offset);
700eb28be2bSAndi Kleen 	if (ret == sdio->cur_page_len) {
7011da177e4SLinus Torvalds 		/*
7021da177e4SLinus Torvalds 		 * Decrement count only, if we are done with this page
7031da177e4SLinus Torvalds 		 */
704eb28be2bSAndi Kleen 		if ((sdio->cur_page_len + sdio->cur_page_offset) == PAGE_SIZE)
705eb28be2bSAndi Kleen 			sdio->pages_in_io--;
70609cbfeafSKirill A. Shutemov 		get_page(sdio->cur_page);
707eb28be2bSAndi Kleen 		sdio->final_block_in_bio = sdio->cur_page_block +
708eb28be2bSAndi Kleen 			(sdio->cur_page_len >> sdio->blkbits);
7091da177e4SLinus Torvalds 		ret = 0;
7101da177e4SLinus Torvalds 	} else {
7111da177e4SLinus Torvalds 		ret = 1;
7121da177e4SLinus Torvalds 	}
7131da177e4SLinus Torvalds 	return ret;
7141da177e4SLinus Torvalds }
7151da177e4SLinus Torvalds 
7161da177e4SLinus Torvalds /*
7171da177e4SLinus Torvalds  * Put cur_page under IO.  The section of cur_page which is described by
7181da177e4SLinus Torvalds  * cur_page_offset,cur_page_len is put into a BIO.  The section of cur_page
7191da177e4SLinus Torvalds  * starts on-disk at cur_page_block.
7201da177e4SLinus Torvalds  *
7211da177e4SLinus Torvalds  * We take a ref against the page here (on behalf of its presence in the bio).
7221da177e4SLinus Torvalds  *
7231da177e4SLinus Torvalds  * The caller of this function is responsible for removing cur_page from the
7241da177e4SLinus Torvalds  * dio, and for dropping the refcount which came from that presence.
7251da177e4SLinus Torvalds  */
726ba253fbfSAndi Kleen static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio,
72718772641SAndi Kleen 		struct buffer_head *map_bh)
7281da177e4SLinus Torvalds {
7291da177e4SLinus Torvalds 	int ret = 0;
7301da177e4SLinus Torvalds 
731eb28be2bSAndi Kleen 	if (sdio->bio) {
732eb28be2bSAndi Kleen 		loff_t cur_offset = sdio->cur_page_fs_offset;
733eb28be2bSAndi Kleen 		loff_t bio_next_offset = sdio->logical_offset_in_bio +
7344f024f37SKent Overstreet 			sdio->bio->bi_iter.bi_size;
735c2c6ca41SJosef Bacik 
7361da177e4SLinus Torvalds 		/*
737c2c6ca41SJosef Bacik 		 * See whether this new request is contiguous with the old.
738c2c6ca41SJosef Bacik 		 *
739f0940ceeSNamhyung Kim 		 * Btrfs cannot handle having logically non-contiguous requests
740f0940ceeSNamhyung Kim 		 * submitted.  For example if you have
741c2c6ca41SJosef Bacik 		 *
742c2c6ca41SJosef Bacik 		 * Logical:  [0-4095][HOLE][8192-12287]
743f0940ceeSNamhyung Kim 		 * Physical: [0-4095]      [4096-8191]
744c2c6ca41SJosef Bacik 		 *
745c2c6ca41SJosef Bacik 		 * We cannot submit those pages together as one BIO.  So if our
746c2c6ca41SJosef Bacik 		 * current logical offset in the file does not equal what would
747c2c6ca41SJosef Bacik 		 * be the next logical offset in the bio, submit the bio we
748c2c6ca41SJosef Bacik 		 * have.
7491da177e4SLinus Torvalds 		 */
750eb28be2bSAndi Kleen 		if (sdio->final_block_in_bio != sdio->cur_page_block ||
751c2c6ca41SJosef Bacik 		    cur_offset != bio_next_offset)
752eb28be2bSAndi Kleen 			dio_bio_submit(dio, sdio);
7531da177e4SLinus Torvalds 	}
7541da177e4SLinus Torvalds 
755eb28be2bSAndi Kleen 	if (sdio->bio == NULL) {
75618772641SAndi Kleen 		ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh);
7571da177e4SLinus Torvalds 		if (ret)
7581da177e4SLinus Torvalds 			goto out;
7591da177e4SLinus Torvalds 	}
7601da177e4SLinus Torvalds 
761eb28be2bSAndi Kleen 	if (dio_bio_add_page(sdio) != 0) {
762eb28be2bSAndi Kleen 		dio_bio_submit(dio, sdio);
76318772641SAndi Kleen 		ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh);
7641da177e4SLinus Torvalds 		if (ret == 0) {
765eb28be2bSAndi Kleen 			ret = dio_bio_add_page(sdio);
7661da177e4SLinus Torvalds 			BUG_ON(ret != 0);
7671da177e4SLinus Torvalds 		}
7681da177e4SLinus Torvalds 	}
7691da177e4SLinus Torvalds out:
7701da177e4SLinus Torvalds 	return ret;
7711da177e4SLinus Torvalds }
7721da177e4SLinus Torvalds 
7731da177e4SLinus Torvalds /*
7741da177e4SLinus Torvalds  * An autonomous function to put a chunk of a page under deferred IO.
7751da177e4SLinus Torvalds  *
7761da177e4SLinus Torvalds  * The caller doesn't actually know (or care) whether this piece of page is in
7771da177e4SLinus Torvalds  * a BIO, or is under IO or whatever.  We just take care of all possible
7781da177e4SLinus Torvalds  * situations here.  The separation between the logic of do_direct_IO() and
7791da177e4SLinus Torvalds  * that of submit_page_section() is important for clarity.  Please don't break.
7801da177e4SLinus Torvalds  *
7811da177e4SLinus Torvalds  * The chunk of page starts on-disk at blocknr.
7821da177e4SLinus Torvalds  *
7831da177e4SLinus Torvalds  * We perform deferred IO, by recording the last-submitted page inside our
7841da177e4SLinus Torvalds  * private part of the dio structure.  If possible, we just expand the IO
7851da177e4SLinus Torvalds  * across that page here.
7861da177e4SLinus Torvalds  *
7871da177e4SLinus Torvalds  * If that doesn't work out then we put the old page into the bio and add this
7881da177e4SLinus Torvalds  * page to the dio instead.
7891da177e4SLinus Torvalds  */
790ba253fbfSAndi Kleen static inline int
791eb28be2bSAndi Kleen submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
79218772641SAndi Kleen 		    unsigned offset, unsigned len, sector_t blocknr,
79318772641SAndi Kleen 		    struct buffer_head *map_bh)
7941da177e4SLinus Torvalds {
7951da177e4SLinus Torvalds 	int ret = 0;
7961da177e4SLinus Torvalds 
7978a4c1e42SMike Christie 	if (dio->op == REQ_OP_WRITE) {
79898c4d57dSAndrew Morton 		/*
79998c4d57dSAndrew Morton 		 * Read accounting is performed in submit_bio()
80098c4d57dSAndrew Morton 		 */
80198c4d57dSAndrew Morton 		task_io_account_write(len);
80298c4d57dSAndrew Morton 	}
80398c4d57dSAndrew Morton 
8041da177e4SLinus Torvalds 	/*
8051da177e4SLinus Torvalds 	 * Can we just grow the current page's presence in the dio?
8061da177e4SLinus Torvalds 	 */
807eb28be2bSAndi Kleen 	if (sdio->cur_page == page &&
808eb28be2bSAndi Kleen 	    sdio->cur_page_offset + sdio->cur_page_len == offset &&
809eb28be2bSAndi Kleen 	    sdio->cur_page_block +
810eb28be2bSAndi Kleen 	    (sdio->cur_page_len >> sdio->blkbits) == blocknr) {
811eb28be2bSAndi Kleen 		sdio->cur_page_len += len;
8121da177e4SLinus Torvalds 		goto out;
8131da177e4SLinus Torvalds 	}
8141da177e4SLinus Torvalds 
8151da177e4SLinus Torvalds 	/*
8161da177e4SLinus Torvalds 	 * If there's a deferred page already there then send it.
8171da177e4SLinus Torvalds 	 */
818eb28be2bSAndi Kleen 	if (sdio->cur_page) {
81918772641SAndi Kleen 		ret = dio_send_cur_page(dio, sdio, map_bh);
82009cbfeafSKirill A. Shutemov 		put_page(sdio->cur_page);
821eb28be2bSAndi Kleen 		sdio->cur_page = NULL;
8221da177e4SLinus Torvalds 		if (ret)
823b1058b98SJan Kara 			return ret;
8241da177e4SLinus Torvalds 	}
8251da177e4SLinus Torvalds 
82609cbfeafSKirill A. Shutemov 	get_page(page);		/* It is in dio */
827eb28be2bSAndi Kleen 	sdio->cur_page = page;
828eb28be2bSAndi Kleen 	sdio->cur_page_offset = offset;
829eb28be2bSAndi Kleen 	sdio->cur_page_len = len;
830eb28be2bSAndi Kleen 	sdio->cur_page_block = blocknr;
831eb28be2bSAndi Kleen 	sdio->cur_page_fs_offset = sdio->block_in_file << sdio->blkbits;
8321da177e4SLinus Torvalds out:
833b1058b98SJan Kara 	/*
834b1058b98SJan Kara 	 * If sdio->boundary then we want to schedule the IO now to
835b1058b98SJan Kara 	 * avoid metadata seeks.
836b1058b98SJan Kara 	 */
837b1058b98SJan Kara 	if (sdio->boundary) {
838b1058b98SJan Kara 		ret = dio_send_cur_page(dio, sdio, map_bh);
839b1058b98SJan Kara 		dio_bio_submit(dio, sdio);
84009cbfeafSKirill A. Shutemov 		put_page(sdio->cur_page);
841b1058b98SJan Kara 		sdio->cur_page = NULL;
842b1058b98SJan Kara 	}
8431da177e4SLinus Torvalds 	return ret;
8441da177e4SLinus Torvalds }
8451da177e4SLinus Torvalds 
8461da177e4SLinus Torvalds /*
8471da177e4SLinus Torvalds  * If we are not writing the entire block and get_block() allocated
8481da177e4SLinus Torvalds  * the block for us, we need to fill-in the unused portion of the
8491da177e4SLinus Torvalds  * block with zeros. This happens only if user-buffer, fileoffset or
8501da177e4SLinus Torvalds  * io length is not filesystem block-size multiple.
8511da177e4SLinus Torvalds  *
8521da177e4SLinus Torvalds  * `end' is zero if we're doing the start of the IO, 1 at the end of the
8531da177e4SLinus Torvalds  * IO.
8541da177e4SLinus Torvalds  */
855ba253fbfSAndi Kleen static inline void dio_zero_block(struct dio *dio, struct dio_submit *sdio,
856ba253fbfSAndi Kleen 		int end, struct buffer_head *map_bh)
8571da177e4SLinus Torvalds {
8581da177e4SLinus Torvalds 	unsigned dio_blocks_per_fs_block;
8591da177e4SLinus Torvalds 	unsigned this_chunk_blocks;	/* In dio_blocks */
8601da177e4SLinus Torvalds 	unsigned this_chunk_bytes;
8611da177e4SLinus Torvalds 	struct page *page;
8621da177e4SLinus Torvalds 
863eb28be2bSAndi Kleen 	sdio->start_zero_done = 1;
86418772641SAndi Kleen 	if (!sdio->blkfactor || !buffer_new(map_bh))
8651da177e4SLinus Torvalds 		return;
8661da177e4SLinus Torvalds 
867eb28be2bSAndi Kleen 	dio_blocks_per_fs_block = 1 << sdio->blkfactor;
868eb28be2bSAndi Kleen 	this_chunk_blocks = sdio->block_in_file & (dio_blocks_per_fs_block - 1);
8691da177e4SLinus Torvalds 
8701da177e4SLinus Torvalds 	if (!this_chunk_blocks)
8711da177e4SLinus Torvalds 		return;
8721da177e4SLinus Torvalds 
8731da177e4SLinus Torvalds 	/*
8741da177e4SLinus Torvalds 	 * We need to zero out part of an fs block.  It is either at the
8751da177e4SLinus Torvalds 	 * beginning or the end of the fs block.
8761da177e4SLinus Torvalds 	 */
8771da177e4SLinus Torvalds 	if (end)
8781da177e4SLinus Torvalds 		this_chunk_blocks = dio_blocks_per_fs_block - this_chunk_blocks;
8791da177e4SLinus Torvalds 
880eb28be2bSAndi Kleen 	this_chunk_bytes = this_chunk_blocks << sdio->blkbits;
8811da177e4SLinus Torvalds 
882557ed1faSNick Piggin 	page = ZERO_PAGE(0);
883eb28be2bSAndi Kleen 	if (submit_page_section(dio, sdio, page, 0, this_chunk_bytes,
88418772641SAndi Kleen 				sdio->next_block_for_io, map_bh))
8851da177e4SLinus Torvalds 		return;
8861da177e4SLinus Torvalds 
887eb28be2bSAndi Kleen 	sdio->next_block_for_io += this_chunk_blocks;
8881da177e4SLinus Torvalds }
8891da177e4SLinus Torvalds 
8901da177e4SLinus Torvalds /*
8911da177e4SLinus Torvalds  * Walk the user pages, and the file, mapping blocks to disk and generating
8921da177e4SLinus Torvalds  * a sequence of (page,offset,len,block) mappings.  These mappings are injected
8931da177e4SLinus Torvalds  * into submit_page_section(), which takes care of the next stage of submission
8941da177e4SLinus Torvalds  *
8951da177e4SLinus Torvalds  * Direct IO against a blockdev is different from a file.  Because we can
8961da177e4SLinus Torvalds  * happily perform page-sized but 512-byte aligned IOs.  It is important that
8971da177e4SLinus Torvalds  * blockdev IO be able to have fine alignment and large sizes.
8981da177e4SLinus Torvalds  *
8991d8fa7a2SBadari Pulavarty  * So what we do is to permit the ->get_block function to populate bh.b_size
9001da177e4SLinus Torvalds  * with the size of IO which is permitted at this offset and this i_blkbits.
9011da177e4SLinus Torvalds  *
9021da177e4SLinus Torvalds  * For best results, the blockdev should be set up with 512-byte i_blkbits and
9031d8fa7a2SBadari Pulavarty  * it should set b_size to PAGE_SIZE or more inside get_block().  This gives
9041da177e4SLinus Torvalds  * fine alignment but still allows this function to work in PAGE_SIZE units.
9051da177e4SLinus Torvalds  */
90618772641SAndi Kleen static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
90718772641SAndi Kleen 			struct buffer_head *map_bh)
9081da177e4SLinus Torvalds {
909eb28be2bSAndi Kleen 	const unsigned blkbits = sdio->blkbits;
910dd545b52SChandan Rajendra 	const unsigned i_blkbits = blkbits + sdio->blkfactor;
9111da177e4SLinus Torvalds 	int ret = 0;
9121da177e4SLinus Torvalds 
913eb28be2bSAndi Kleen 	while (sdio->block_in_file < sdio->final_block_in_request) {
9147b2c99d1SAl Viro 		struct page *page;
9157b2c99d1SAl Viro 		size_t from, to;
9166fcc5420SBoaz Harrosh 
9176fcc5420SBoaz Harrosh 		page = dio_get_page(dio, sdio);
9181da177e4SLinus Torvalds 		if (IS_ERR(page)) {
9191da177e4SLinus Torvalds 			ret = PTR_ERR(page);
9201da177e4SLinus Torvalds 			goto out;
9211da177e4SLinus Torvalds 		}
9226fcc5420SBoaz Harrosh 		from = sdio->head ? 0 : sdio->from;
9236fcc5420SBoaz Harrosh 		to = (sdio->head == sdio->tail - 1) ? sdio->to : PAGE_SIZE;
9246fcc5420SBoaz Harrosh 		sdio->head++;
9251da177e4SLinus Torvalds 
9267b2c99d1SAl Viro 		while (from < to) {
9271da177e4SLinus Torvalds 			unsigned this_chunk_bytes;	/* # of bytes mapped */
9281da177e4SLinus Torvalds 			unsigned this_chunk_blocks;	/* # of blocks */
9291da177e4SLinus Torvalds 			unsigned u;
9301da177e4SLinus Torvalds 
931eb28be2bSAndi Kleen 			if (sdio->blocks_available == 0) {
9321da177e4SLinus Torvalds 				/*
9331da177e4SLinus Torvalds 				 * Need to go and map some more disk
9341da177e4SLinus Torvalds 				 */
9351da177e4SLinus Torvalds 				unsigned long blkmask;
9361da177e4SLinus Torvalds 				unsigned long dio_remainder;
9371da177e4SLinus Torvalds 
93818772641SAndi Kleen 				ret = get_more_blocks(dio, sdio, map_bh);
9391da177e4SLinus Torvalds 				if (ret) {
94009cbfeafSKirill A. Shutemov 					put_page(page);
9411da177e4SLinus Torvalds 					goto out;
9421da177e4SLinus Torvalds 				}
9431da177e4SLinus Torvalds 				if (!buffer_mapped(map_bh))
9441da177e4SLinus Torvalds 					goto do_holes;
9451da177e4SLinus Torvalds 
946eb28be2bSAndi Kleen 				sdio->blocks_available =
947f734c89cSJan Kara 						map_bh->b_size >> blkbits;
948eb28be2bSAndi Kleen 				sdio->next_block_for_io =
949eb28be2bSAndi Kleen 					map_bh->b_blocknr << sdio->blkfactor;
950f734c89cSJan Kara 				if (buffer_new(map_bh)) {
951f734c89cSJan Kara 					clean_bdev_aliases(
952f734c89cSJan Kara 						map_bh->b_bdev,
953f734c89cSJan Kara 						map_bh->b_blocknr,
954dd545b52SChandan Rajendra 						map_bh->b_size >> i_blkbits);
955f734c89cSJan Kara 				}
9561da177e4SLinus Torvalds 
957eb28be2bSAndi Kleen 				if (!sdio->blkfactor)
9581da177e4SLinus Torvalds 					goto do_holes;
9591da177e4SLinus Torvalds 
960eb28be2bSAndi Kleen 				blkmask = (1 << sdio->blkfactor) - 1;
961eb28be2bSAndi Kleen 				dio_remainder = (sdio->block_in_file & blkmask);
9621da177e4SLinus Torvalds 
9631da177e4SLinus Torvalds 				/*
9641da177e4SLinus Torvalds 				 * If we are at the start of IO and that IO
9651da177e4SLinus Torvalds 				 * starts partway into a fs-block,
9661da177e4SLinus Torvalds 				 * dio_remainder will be non-zero.  If the IO
9671da177e4SLinus Torvalds 				 * is a read then we can simply advance the IO
9681da177e4SLinus Torvalds 				 * cursor to the first block which is to be
9691da177e4SLinus Torvalds 				 * read.  But if the IO is a write and the
9701da177e4SLinus Torvalds 				 * block was newly allocated we cannot do that;
9711da177e4SLinus Torvalds 				 * the start of the fs block must be zeroed out
9721da177e4SLinus Torvalds 				 * on-disk
9731da177e4SLinus Torvalds 				 */
9741da177e4SLinus Torvalds 				if (!buffer_new(map_bh))
975eb28be2bSAndi Kleen 					sdio->next_block_for_io += dio_remainder;
976eb28be2bSAndi Kleen 				sdio->blocks_available -= dio_remainder;
9771da177e4SLinus Torvalds 			}
9781da177e4SLinus Torvalds do_holes:
9791da177e4SLinus Torvalds 			/* Handle holes */
9801da177e4SLinus Torvalds 			if (!buffer_mapped(map_bh)) {
98135dc8161SJeff Moyer 				loff_t i_size_aligned;
9821da177e4SLinus Torvalds 
9831da177e4SLinus Torvalds 				/* AKPM: eargh, -ENOTBLK is a hack */
9848a4c1e42SMike Christie 				if (dio->op == REQ_OP_WRITE) {
98509cbfeafSKirill A. Shutemov 					put_page(page);
9861da177e4SLinus Torvalds 					return -ENOTBLK;
9871da177e4SLinus Torvalds 				}
9881da177e4SLinus Torvalds 
98935dc8161SJeff Moyer 				/*
99035dc8161SJeff Moyer 				 * Be sure to account for a partial block as the
99135dc8161SJeff Moyer 				 * last block in the file
99235dc8161SJeff Moyer 				 */
99335dc8161SJeff Moyer 				i_size_aligned = ALIGN(i_size_read(dio->inode),
99435dc8161SJeff Moyer 							1 << blkbits);
995eb28be2bSAndi Kleen 				if (sdio->block_in_file >=
99635dc8161SJeff Moyer 						i_size_aligned >> blkbits) {
9971da177e4SLinus Torvalds 					/* We hit eof */
99809cbfeafSKirill A. Shutemov 					put_page(page);
9991da177e4SLinus Torvalds 					goto out;
10001da177e4SLinus Torvalds 				}
10017b2c99d1SAl Viro 				zero_user(page, from, 1 << blkbits);
1002eb28be2bSAndi Kleen 				sdio->block_in_file++;
10037b2c99d1SAl Viro 				from += 1 << blkbits;
10043320c60bSAl Viro 				dio->result += 1 << blkbits;
10051da177e4SLinus Torvalds 				goto next_block;
10061da177e4SLinus Torvalds 			}
10071da177e4SLinus Torvalds 
10081da177e4SLinus Torvalds 			/*
10091da177e4SLinus Torvalds 			 * If we're performing IO which has an alignment which
10101da177e4SLinus Torvalds 			 * is finer than the underlying fs, go check to see if
10111da177e4SLinus Torvalds 			 * we must zero out the start of this block.
10121da177e4SLinus Torvalds 			 */
1013eb28be2bSAndi Kleen 			if (unlikely(sdio->blkfactor && !sdio->start_zero_done))
101418772641SAndi Kleen 				dio_zero_block(dio, sdio, 0, map_bh);
10151da177e4SLinus Torvalds 
10161da177e4SLinus Torvalds 			/*
10171da177e4SLinus Torvalds 			 * Work out, in this_chunk_blocks, how much disk we
10181da177e4SLinus Torvalds 			 * can add to this page
10191da177e4SLinus Torvalds 			 */
1020eb28be2bSAndi Kleen 			this_chunk_blocks = sdio->blocks_available;
10217b2c99d1SAl Viro 			u = (to - from) >> blkbits;
10221da177e4SLinus Torvalds 			if (this_chunk_blocks > u)
10231da177e4SLinus Torvalds 				this_chunk_blocks = u;
1024eb28be2bSAndi Kleen 			u = sdio->final_block_in_request - sdio->block_in_file;
10251da177e4SLinus Torvalds 			if (this_chunk_blocks > u)
10261da177e4SLinus Torvalds 				this_chunk_blocks = u;
10271da177e4SLinus Torvalds 			this_chunk_bytes = this_chunk_blocks << blkbits;
10281da177e4SLinus Torvalds 			BUG_ON(this_chunk_bytes == 0);
10291da177e4SLinus Torvalds 
1030092c8d46SJan Kara 			if (this_chunk_blocks == sdio->blocks_available)
1031eb28be2bSAndi Kleen 				sdio->boundary = buffer_boundary(map_bh);
1032eb28be2bSAndi Kleen 			ret = submit_page_section(dio, sdio, page,
10337b2c99d1SAl Viro 						  from,
1034eb28be2bSAndi Kleen 						  this_chunk_bytes,
103518772641SAndi Kleen 						  sdio->next_block_for_io,
103618772641SAndi Kleen 						  map_bh);
10371da177e4SLinus Torvalds 			if (ret) {
103809cbfeafSKirill A. Shutemov 				put_page(page);
10391da177e4SLinus Torvalds 				goto out;
10401da177e4SLinus Torvalds 			}
1041eb28be2bSAndi Kleen 			sdio->next_block_for_io += this_chunk_blocks;
10421da177e4SLinus Torvalds 
1043eb28be2bSAndi Kleen 			sdio->block_in_file += this_chunk_blocks;
10447b2c99d1SAl Viro 			from += this_chunk_bytes;
10457b2c99d1SAl Viro 			dio->result += this_chunk_bytes;
1046eb28be2bSAndi Kleen 			sdio->blocks_available -= this_chunk_blocks;
10471da177e4SLinus Torvalds next_block:
1048eb28be2bSAndi Kleen 			BUG_ON(sdio->block_in_file > sdio->final_block_in_request);
1049eb28be2bSAndi Kleen 			if (sdio->block_in_file == sdio->final_block_in_request)
10501da177e4SLinus Torvalds 				break;
10511da177e4SLinus Torvalds 		}
10521da177e4SLinus Torvalds 
10531da177e4SLinus Torvalds 		/* Drop the ref which was taken in get_user_pages() */
105409cbfeafSKirill A. Shutemov 		put_page(page);
10551da177e4SLinus Torvalds 	}
10561da177e4SLinus Torvalds out:
10571da177e4SLinus Torvalds 	return ret;
10581da177e4SLinus Torvalds }
10591da177e4SLinus Torvalds 
1060847cc637SAndi Kleen static inline int drop_refcount(struct dio *dio)
10611da177e4SLinus Torvalds {
1062847cc637SAndi Kleen 	int ret2;
10635eb6c7a2SZach Brown 	unsigned long flags;
106420258b2bSZach Brown 
10651da177e4SLinus Torvalds 	/*
10668459d86aSZach Brown 	 * Sync will always be dropping the final ref and completing the
10675eb6c7a2SZach Brown 	 * operation.  AIO can if it was a broken operation described above or
10685eb6c7a2SZach Brown 	 * in fact if all the bios race to complete before we get here.  In
10695eb6c7a2SZach Brown 	 * that case dio_complete() translates the EIOCBQUEUED into the proper
107004b2fa9fSChristoph Hellwig 	 * return code that the caller will hand to ->complete().
10715eb6c7a2SZach Brown 	 *
10725eb6c7a2SZach Brown 	 * This is managed by the bio_lock instead of being an atomic_t so that
10735eb6c7a2SZach Brown 	 * completion paths can drop their ref and use the remaining count to
10745eb6c7a2SZach Brown 	 * decide to wake the submission path atomically.
10751da177e4SLinus Torvalds 	 */
10765eb6c7a2SZach Brown 	spin_lock_irqsave(&dio->bio_lock, flags);
10775eb6c7a2SZach Brown 	ret2 = --dio->refcount;
10785eb6c7a2SZach Brown 	spin_unlock_irqrestore(&dio->bio_lock, flags);
1079847cc637SAndi Kleen 	return ret2;
10801da177e4SLinus Torvalds }
10811da177e4SLinus Torvalds 
1082eafdc7d1SChristoph Hellwig /*
1083eafdc7d1SChristoph Hellwig  * This is a library function for use by filesystem drivers.
1084eafdc7d1SChristoph Hellwig  *
1085eafdc7d1SChristoph Hellwig  * The locking rules are governed by the flags parameter:
1086eafdc7d1SChristoph Hellwig  *  - if the flags value contains DIO_LOCKING we use a fancy locking
1087eafdc7d1SChristoph Hellwig  *    scheme for dumb filesystems.
1088eafdc7d1SChristoph Hellwig  *    For writes this function is called under i_mutex and returns with
1089eafdc7d1SChristoph Hellwig  *    i_mutex held, for reads, i_mutex is not held on entry, but it is
1090eafdc7d1SChristoph Hellwig  *    taken and dropped again before returning.
1091eafdc7d1SChristoph Hellwig  *  - if the flags value does NOT contain DIO_LOCKING we don't use any
1092eafdc7d1SChristoph Hellwig  *    internal locking but rather rely on the filesystem to synchronize
1093eafdc7d1SChristoph Hellwig  *    direct I/O reads/writes versus each other and truncate.
1094df2d6f26SChristoph Hellwig  *
1095df2d6f26SChristoph Hellwig  * To help with locking against truncate we incremented the i_dio_count
1096df2d6f26SChristoph Hellwig  * counter before starting direct I/O, and decrement it once we are done.
1097df2d6f26SChristoph Hellwig  * Truncate can wait for it to reach zero to provide exclusion.  It is
1098df2d6f26SChristoph Hellwig  * expected that filesystem provide exclusion between new direct I/O
1099df2d6f26SChristoph Hellwig  * and truncates.  For DIO_LOCKING filesystems this is done by i_mutex,
1100df2d6f26SChristoph Hellwig  * but other filesystems need to take care of this on their own.
1101ba253fbfSAndi Kleen  *
1102ba253fbfSAndi Kleen  * NOTE: if you pass "sdio" to anything by pointer make sure that function
1103ba253fbfSAndi Kleen  * is always inlined. Otherwise gcc is unable to split the structure into
1104ba253fbfSAndi Kleen  * individual fields and will generate much worse code. This is important
1105ba253fbfSAndi Kleen  * for the whole file.
1106eafdc7d1SChristoph Hellwig  */
110765dd2aa9SAndi Kleen static inline ssize_t
110817f8c842SOmar Sandoval do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
110917f8c842SOmar Sandoval 		      struct block_device *bdev, struct iov_iter *iter,
1110c8b8e32dSChristoph Hellwig 		      get_block_t get_block, dio_iodone_t end_io,
1111facd07b0SJosef Bacik 		      dio_submit_t submit_io, int flags)
11121da177e4SLinus Torvalds {
1113ab73857eSLinus Torvalds 	unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits);
1114ab73857eSLinus Torvalds 	unsigned blkbits = i_blkbits;
11151da177e4SLinus Torvalds 	unsigned blocksize_mask = (1 << blkbits) - 1;
11161da177e4SLinus Torvalds 	ssize_t retval = -EINVAL;
1117af436472SChristoph Hellwig 	size_t count = iov_iter_count(iter);
1118c8b8e32dSChristoph Hellwig 	loff_t offset = iocb->ki_pos;
1119af436472SChristoph Hellwig 	loff_t end = offset + count;
11201da177e4SLinus Torvalds 	struct dio *dio;
1121eb28be2bSAndi Kleen 	struct dio_submit sdio = { 0, };
1122847cc637SAndi Kleen 	struct buffer_head map_bh = { 0, };
1123647d1e4cSFengguang Wu 	struct blk_plug plug;
1124886a3911SAl Viro 	unsigned long align = offset | iov_iter_alignment(iter);
11251da177e4SLinus Torvalds 
112665dd2aa9SAndi Kleen 	/*
112765dd2aa9SAndi Kleen 	 * Avoid references to bdev if not absolutely needed to give
112865dd2aa9SAndi Kleen 	 * the early prefetch in the caller enough time.
112965dd2aa9SAndi Kleen 	 */
11301da177e4SLinus Torvalds 
1131886a3911SAl Viro 	if (align & blocksize_mask) {
11321da177e4SLinus Torvalds 		if (bdev)
113365dd2aa9SAndi Kleen 			blkbits = blksize_bits(bdev_logical_block_size(bdev));
11341da177e4SLinus Torvalds 		blocksize_mask = (1 << blkbits) - 1;
1135886a3911SAl Viro 		if (align & blocksize_mask)
11361da177e4SLinus Torvalds 			goto out;
11371da177e4SLinus Torvalds 	}
11381da177e4SLinus Torvalds 
1139f9b5570dSChristoph Hellwig 	/* watch out for a 0 len io from a tricksy fs */
114017f8c842SOmar Sandoval 	if (iov_iter_rw(iter) == READ && !iov_iter_count(iter))
1141f9b5570dSChristoph Hellwig 		return 0;
1142f9b5570dSChristoph Hellwig 
11436e8267f5SAndi Kleen 	dio = kmem_cache_alloc(dio_cache, GFP_KERNEL);
11441da177e4SLinus Torvalds 	retval = -ENOMEM;
11451da177e4SLinus Torvalds 	if (!dio)
11461da177e4SLinus Torvalds 		goto out;
114723aee091SJeff Moyer 	/*
114823aee091SJeff Moyer 	 * Believe it or not, zeroing out the page array caused a .5%
114923aee091SJeff Moyer 	 * performance regression in a database benchmark.  So, we take
115023aee091SJeff Moyer 	 * care to only zero out what's needed.
115123aee091SJeff Moyer 	 */
115223aee091SJeff Moyer 	memset(dio, 0, offsetof(struct dio, pages));
11531da177e4SLinus Torvalds 
11545fe878aeSChristoph Hellwig 	dio->flags = flags;
11555fe878aeSChristoph Hellwig 	if (dio->flags & DIO_LOCKING) {
115617f8c842SOmar Sandoval 		if (iov_iter_rw(iter) == READ) {
11575fe878aeSChristoph Hellwig 			struct address_space *mapping =
11585fe878aeSChristoph Hellwig 					iocb->ki_filp->f_mapping;
11591da177e4SLinus Torvalds 
11605fe878aeSChristoph Hellwig 			/* will be released by direct_io_worker */
11615955102cSAl Viro 			inode_lock(inode);
11621da177e4SLinus Torvalds 
11631da177e4SLinus Torvalds 			retval = filemap_write_and_wait_range(mapping, offset,
11641da177e4SLinus Torvalds 							      end - 1);
11651da177e4SLinus Torvalds 			if (retval) {
11665955102cSAl Viro 				inode_unlock(inode);
11676e8267f5SAndi Kleen 				kmem_cache_free(dio_cache, dio);
11681da177e4SLinus Torvalds 				goto out;
11691da177e4SLinus Torvalds 			}
11701da177e4SLinus Torvalds 		}
1171df2d6f26SChristoph Hellwig 	}
11721da177e4SLinus Torvalds 
117374cedf9bSJan Kara 	/* Once we sampled i_size check for reads beyond EOF */
117474cedf9bSJan Kara 	dio->i_size = i_size_read(inode);
117574cedf9bSJan Kara 	if (iov_iter_rw(iter) == READ && offset >= dio->i_size) {
117674cedf9bSJan Kara 		if (dio->flags & DIO_LOCKING)
11775955102cSAl Viro 			inode_unlock(inode);
117874cedf9bSJan Kara 		kmem_cache_free(dio_cache, dio);
11792d4594acSAl Viro 		retval = 0;
118074cedf9bSJan Kara 		goto out;
118174cedf9bSJan Kara 	}
118274cedf9bSJan Kara 
11835fe878aeSChristoph Hellwig 	/*
118460392573SChristoph Hellwig 	 * For file extending writes updating i_size before data writeouts
118560392573SChristoph Hellwig 	 * complete can expose uninitialized blocks in dumb filesystems.
118660392573SChristoph Hellwig 	 * In that case we need to wait for I/O completion even if asked
118760392573SChristoph Hellwig 	 * for an asynchronous write.
11881da177e4SLinus Torvalds 	 */
118960392573SChristoph Hellwig 	if (is_sync_kiocb(iocb))
119060392573SChristoph Hellwig 		dio->is_async = false;
119160392573SChristoph Hellwig 	else if (!(dio->flags & DIO_ASYNC_EXTEND) &&
119217f8c842SOmar Sandoval 		 iov_iter_rw(iter) == WRITE && end > i_size_read(inode))
119360392573SChristoph Hellwig 		dio->is_async = false;
119460392573SChristoph Hellwig 	else
119560392573SChristoph Hellwig 		dio->is_async = true;
119660392573SChristoph Hellwig 
1197847cc637SAndi Kleen 	dio->inode = inode;
11988a4c1e42SMike Christie 	if (iov_iter_rw(iter) == WRITE) {
11998a4c1e42SMike Christie 		dio->op = REQ_OP_WRITE;
120070fd7614SChristoph Hellwig 		dio->op_flags = REQ_SYNC | REQ_IDLE;
1201*03a07c92SGoldwyn Rodrigues 		if (iocb->ki_flags & IOCB_NOWAIT)
1202*03a07c92SGoldwyn Rodrigues 			dio->op_flags |= REQ_NOWAIT;
12038a4c1e42SMike Christie 	} else {
12048a4c1e42SMike Christie 		dio->op = REQ_OP_READ;
12058a4c1e42SMike Christie 	}
120602afc27fSChristoph Hellwig 
120702afc27fSChristoph Hellwig 	/*
120802afc27fSChristoph Hellwig 	 * For AIO O_(D)SYNC writes we need to defer completions to a workqueue
120902afc27fSChristoph Hellwig 	 * so that we can call ->fsync.
121002afc27fSChristoph Hellwig 	 */
121117f8c842SOmar Sandoval 	if (dio->is_async && iov_iter_rw(iter) == WRITE &&
121202afc27fSChristoph Hellwig 	    ((iocb->ki_filp->f_flags & O_DSYNC) ||
121302afc27fSChristoph Hellwig 	     IS_SYNC(iocb->ki_filp->f_mapping->host))) {
121402afc27fSChristoph Hellwig 		retval = dio_set_defer_completion(dio);
121502afc27fSChristoph Hellwig 		if (retval) {
121602afc27fSChristoph Hellwig 			/*
121702afc27fSChristoph Hellwig 			 * We grab i_mutex only for reads so we don't have
121802afc27fSChristoph Hellwig 			 * to release it here
121902afc27fSChristoph Hellwig 			 */
122002afc27fSChristoph Hellwig 			kmem_cache_free(dio_cache, dio);
122102afc27fSChristoph Hellwig 			goto out;
122202afc27fSChristoph Hellwig 		}
122302afc27fSChristoph Hellwig 	}
122402afc27fSChristoph Hellwig 
122502afc27fSChristoph Hellwig 	/*
122602afc27fSChristoph Hellwig 	 * Will be decremented at I/O completion time.
122702afc27fSChristoph Hellwig 	 */
1228fe0f07d0SJens Axboe 	if (!(dio->flags & DIO_SKIP_DIO_COUNT))
1229fe0f07d0SJens Axboe 		inode_dio_begin(inode);
123002afc27fSChristoph Hellwig 
123102afc27fSChristoph Hellwig 	retval = 0;
1232847cc637SAndi Kleen 	sdio.blkbits = blkbits;
1233ab73857eSLinus Torvalds 	sdio.blkfactor = i_blkbits - blkbits;
1234847cc637SAndi Kleen 	sdio.block_in_file = offset >> blkbits;
1235847cc637SAndi Kleen 
1236847cc637SAndi Kleen 	sdio.get_block = get_block;
1237847cc637SAndi Kleen 	dio->end_io = end_io;
1238847cc637SAndi Kleen 	sdio.submit_io = submit_io;
1239847cc637SAndi Kleen 	sdio.final_block_in_bio = -1;
1240847cc637SAndi Kleen 	sdio.next_block_for_io = -1;
1241847cc637SAndi Kleen 
1242847cc637SAndi Kleen 	dio->iocb = iocb;
1243847cc637SAndi Kleen 
1244847cc637SAndi Kleen 	spin_lock_init(&dio->bio_lock);
1245847cc637SAndi Kleen 	dio->refcount = 1;
1246847cc637SAndi Kleen 
124753cbf3b1SMing Lei 	dio->should_dirty = (iter->type == ITER_IOVEC);
12487b2c99d1SAl Viro 	sdio.iter = iter;
12497b2c99d1SAl Viro 	sdio.final_block_in_request =
12507b2c99d1SAl Viro 		(offset + iov_iter_count(iter)) >> blkbits;
12517b2c99d1SAl Viro 
1252847cc637SAndi Kleen 	/*
1253847cc637SAndi Kleen 	 * In case of non-aligned buffers, we may need 2 more
1254847cc637SAndi Kleen 	 * pages since we need to zero out first and last block.
1255847cc637SAndi Kleen 	 */
1256847cc637SAndi Kleen 	if (unlikely(sdio.blkfactor))
1257847cc637SAndi Kleen 		sdio.pages_in_io = 2;
1258847cc637SAndi Kleen 
1259f67da30cSAl Viro 	sdio.pages_in_io += iov_iter_npages(iter, INT_MAX);
1260847cc637SAndi Kleen 
1261647d1e4cSFengguang Wu 	blk_start_plug(&plug);
1262647d1e4cSFengguang Wu 
1263847cc637SAndi Kleen 	retval = do_direct_IO(dio, &sdio, &map_bh);
12647b2c99d1SAl Viro 	if (retval)
1265847cc637SAndi Kleen 		dio_cleanup(dio, &sdio);
1266847cc637SAndi Kleen 
1267847cc637SAndi Kleen 	if (retval == -ENOTBLK) {
1268847cc637SAndi Kleen 		/*
1269847cc637SAndi Kleen 		 * The remaining part of the request will be
1270847cc637SAndi Kleen 		 * be handled by buffered I/O when we return
1271847cc637SAndi Kleen 		 */
1272847cc637SAndi Kleen 		retval = 0;
1273847cc637SAndi Kleen 	}
1274847cc637SAndi Kleen 	/*
1275847cc637SAndi Kleen 	 * There may be some unwritten disk at the end of a part-written
1276847cc637SAndi Kleen 	 * fs-block-sized block.  Go zero that now.
1277847cc637SAndi Kleen 	 */
1278847cc637SAndi Kleen 	dio_zero_block(dio, &sdio, 1, &map_bh);
1279847cc637SAndi Kleen 
1280847cc637SAndi Kleen 	if (sdio.cur_page) {
1281847cc637SAndi Kleen 		ssize_t ret2;
1282847cc637SAndi Kleen 
1283847cc637SAndi Kleen 		ret2 = dio_send_cur_page(dio, &sdio, &map_bh);
1284847cc637SAndi Kleen 		if (retval == 0)
1285847cc637SAndi Kleen 			retval = ret2;
128609cbfeafSKirill A. Shutemov 		put_page(sdio.cur_page);
1287847cc637SAndi Kleen 		sdio.cur_page = NULL;
1288847cc637SAndi Kleen 	}
1289847cc637SAndi Kleen 	if (sdio.bio)
1290847cc637SAndi Kleen 		dio_bio_submit(dio, &sdio);
1291847cc637SAndi Kleen 
1292647d1e4cSFengguang Wu 	blk_finish_plug(&plug);
1293647d1e4cSFengguang Wu 
1294847cc637SAndi Kleen 	/*
1295847cc637SAndi Kleen 	 * It is possible that, we return short IO due to end of file.
1296847cc637SAndi Kleen 	 * In that case, we need to release all the pages we got hold on.
1297847cc637SAndi Kleen 	 */
1298847cc637SAndi Kleen 	dio_cleanup(dio, &sdio);
1299847cc637SAndi Kleen 
1300847cc637SAndi Kleen 	/*
1301847cc637SAndi Kleen 	 * All block lookups have been performed. For READ requests
1302847cc637SAndi Kleen 	 * we can let i_mutex go now that its achieved its purpose
1303847cc637SAndi Kleen 	 * of protecting us from looking up uninitialized blocks.
1304847cc637SAndi Kleen 	 */
130517f8c842SOmar Sandoval 	if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING))
13065955102cSAl Viro 		inode_unlock(dio->inode);
1307847cc637SAndi Kleen 
1308847cc637SAndi Kleen 	/*
1309847cc637SAndi Kleen 	 * The only time we want to leave bios in flight is when a successful
1310847cc637SAndi Kleen 	 * partial aio read or full aio write have been setup.  In that case
1311847cc637SAndi Kleen 	 * bio completion will call aio_complete.  The only time it's safe to
1312847cc637SAndi Kleen 	 * call aio_complete is when we return -EIOCBQUEUED, so we key on that.
1313847cc637SAndi Kleen 	 * This had *better* be the only place that raises -EIOCBQUEUED.
1314847cc637SAndi Kleen 	 */
1315847cc637SAndi Kleen 	BUG_ON(retval == -EIOCBQUEUED);
1316847cc637SAndi Kleen 	if (dio->is_async && retval == 0 && dio->result &&
131717f8c842SOmar Sandoval 	    (iov_iter_rw(iter) == READ || dio->result == count))
1318847cc637SAndi Kleen 		retval = -EIOCBQUEUED;
1319af436472SChristoph Hellwig 	else
1320847cc637SAndi Kleen 		dio_await_completion(dio);
1321847cc637SAndi Kleen 
1322847cc637SAndi Kleen 	if (drop_refcount(dio) == 0) {
1323716b9bc0SChristoph Hellwig 		retval = dio_complete(dio, retval, false);
1324847cc637SAndi Kleen 	} else
1325847cc637SAndi Kleen 		BUG_ON(retval != -EIOCBQUEUED);
13261da177e4SLinus Torvalds 
13277bb46a67Snpiggin@suse.de out:
13287bb46a67Snpiggin@suse.de 	return retval;
13297bb46a67Snpiggin@suse.de }
133065dd2aa9SAndi Kleen 
133117f8c842SOmar Sandoval ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
133217f8c842SOmar Sandoval 			     struct block_device *bdev, struct iov_iter *iter,
1333c8b8e32dSChristoph Hellwig 			     get_block_t get_block,
133417f8c842SOmar Sandoval 			     dio_iodone_t end_io, dio_submit_t submit_io,
133517f8c842SOmar Sandoval 			     int flags)
133665dd2aa9SAndi Kleen {
133765dd2aa9SAndi Kleen 	/*
133865dd2aa9SAndi Kleen 	 * The block device state is needed in the end to finally
133965dd2aa9SAndi Kleen 	 * submit everything.  Since it's likely to be cache cold
134065dd2aa9SAndi Kleen 	 * prefetch it here as first thing to hide some of the
134165dd2aa9SAndi Kleen 	 * latency.
134265dd2aa9SAndi Kleen 	 *
134365dd2aa9SAndi Kleen 	 * Attempt to prefetch the pieces we likely need later.
134465dd2aa9SAndi Kleen 	 */
134565dd2aa9SAndi Kleen 	prefetch(&bdev->bd_disk->part_tbl);
134665dd2aa9SAndi Kleen 	prefetch(bdev->bd_queue);
134765dd2aa9SAndi Kleen 	prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES);
134865dd2aa9SAndi Kleen 
1349c8b8e32dSChristoph Hellwig 	return do_blockdev_direct_IO(iocb, inode, bdev, iter, get_block,
135017f8c842SOmar Sandoval 				     end_io, submit_io, flags);
135165dd2aa9SAndi Kleen }
135265dd2aa9SAndi Kleen 
13531da177e4SLinus Torvalds EXPORT_SYMBOL(__blockdev_direct_IO);
13546e8267f5SAndi Kleen 
13556e8267f5SAndi Kleen static __init int dio_init(void)
13566e8267f5SAndi Kleen {
13576e8267f5SAndi Kleen 	dio_cache = KMEM_CACHE(dio, SLAB_PANIC);
13586e8267f5SAndi Kleen 	return 0;
13596e8267f5SAndi Kleen }
13606e8267f5SAndi Kleen module_init(dio_init)
1361