xref: /openbmc/linux/fs/direct-io.c (revision 1da177e4)
1 /*
2  * fs/direct-io.c
3  *
4  * Copyright (C) 2002, Linus Torvalds.
5  *
6  * O_DIRECT
7  *
8  * 04Jul2002	akpm@zip.com.au
9  *		Initial version
10  * 11Sep2002	janetinc@us.ibm.com
11  * 		added readv/writev support.
12  * 29Oct2002	akpm@zip.com.au
13  *		rewrote bio_add_page() support.
14  * 30Oct2002	pbadari@us.ibm.com
15  *		added support for non-aligned IO.
16  * 06Nov2002	pbadari@us.ibm.com
17  *		added asynchronous IO support.
18  * 21Jul2003	nathans@sgi.com
19  *		added IO completion notifier.
20  */
21 
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/fs.h>
26 #include <linux/mm.h>
27 #include <linux/slab.h>
28 #include <linux/highmem.h>
29 #include <linux/pagemap.h>
30 #include <linux/bio.h>
31 #include <linux/wait.h>
32 #include <linux/err.h>
33 #include <linux/blkdev.h>
34 #include <linux/buffer_head.h>
35 #include <linux/rwsem.h>
36 #include <linux/uio.h>
37 #include <asm/atomic.h>
38 
39 /*
40  * How many user pages to map in one call to get_user_pages().  This determines
41  * the size of a structure on the stack.
42  */
43 #define DIO_PAGES	64
44 
45 /*
46  * This code generally works in units of "dio_blocks".  A dio_block is
47  * somewhere between the hard sector size and the filesystem block size.  it
48  * is determined on a per-invocation basis.   When talking to the filesystem
49  * we need to convert dio_blocks to fs_blocks by scaling the dio_block quantity
50  * down by dio->blkfactor.  Similarly, fs-blocksize quantities are converted
51  * to bio_block quantities by shifting left by blkfactor.
52  *
53  * If blkfactor is zero then the user's request was aligned to the filesystem's
54  * blocksize.
55  *
56  * lock_type is DIO_LOCKING for regular files on direct-IO-naive filesystems.
57  * This determines whether we need to do the fancy locking which prevents
58  * direct-IO from being able to read uninitialised disk blocks.  If its zero
59  * (blockdev) this locking is not done, and if it is DIO_OWN_LOCKING i_sem is
60  * not held for the entire direct write (taken briefly, initially, during a
61  * direct read though, but its never held for the duration of a direct-IO).
62  */
63 
64 struct dio {
65 	/* BIO submission state */
66 	struct bio *bio;		/* bio under assembly */
67 	struct inode *inode;
68 	int rw;
69 	int lock_type;			/* doesn't change */
70 	unsigned blkbits;		/* doesn't change */
71 	unsigned blkfactor;		/* When we're using an alignment which
72 					   is finer than the filesystem's soft
73 					   blocksize, this specifies how much
74 					   finer.  blkfactor=2 means 1/4-block
75 					   alignment.  Does not change */
76 	unsigned start_zero_done;	/* flag: sub-blocksize zeroing has
77 					   been performed at the start of a
78 					   write */
79 	int pages_in_io;		/* approximate total IO pages */
80 	size_t	size;			/* total request size (doesn't change)*/
81 	sector_t block_in_file;		/* Current offset into the underlying
82 					   file in dio_block units. */
83 	unsigned blocks_available;	/* At block_in_file.  changes */
84 	sector_t final_block_in_request;/* doesn't change */
85 	unsigned first_block_in_page;	/* doesn't change, Used only once */
86 	int boundary;			/* prev block is at a boundary */
87 	int reap_counter;		/* rate limit reaping */
88 	get_blocks_t *get_blocks;	/* block mapping function */
89 	dio_iodone_t *end_io;		/* IO completion function */
90 	sector_t final_block_in_bio;	/* current final block in bio + 1 */
91 	sector_t next_block_for_io;	/* next block to be put under IO,
92 					   in dio_blocks units */
93 	struct buffer_head map_bh;	/* last get_blocks() result */
94 
95 	/*
96 	 * Deferred addition of a page to the dio.  These variables are
97 	 * private to dio_send_cur_page(), submit_page_section() and
98 	 * dio_bio_add_page().
99 	 */
100 	struct page *cur_page;		/* The page */
101 	unsigned cur_page_offset;	/* Offset into it, in bytes */
102 	unsigned cur_page_len;		/* Nr of bytes at cur_page_offset */
103 	sector_t cur_page_block;	/* Where it starts */
104 
105 	/*
106 	 * Page fetching state. These variables belong to dio_refill_pages().
107 	 */
108 	int curr_page;			/* changes */
109 	int total_pages;		/* doesn't change */
110 	unsigned long curr_user_address;/* changes */
111 
112 	/*
113 	 * Page queue.  These variables belong to dio_refill_pages() and
114 	 * dio_get_page().
115 	 */
116 	struct page *pages[DIO_PAGES];	/* page buffer */
117 	unsigned head;			/* next page to process */
118 	unsigned tail;			/* last valid page + 1 */
119 	int page_errors;		/* errno from get_user_pages() */
120 
121 	/* BIO completion state */
122 	spinlock_t bio_lock;		/* protects BIO fields below */
123 	int bio_count;			/* nr bios to be completed */
124 	int bios_in_flight;		/* nr bios in flight */
125 	struct bio *bio_list;		/* singly linked via bi_private */
126 	struct task_struct *waiter;	/* waiting task (NULL if none) */
127 
128 	/* AIO related stuff */
129 	struct kiocb *iocb;		/* kiocb */
130 	int is_async;			/* is IO async ? */
131 	ssize_t result;                 /* IO result */
132 };
133 
134 /*
135  * How many pages are in the queue?
136  */
137 static inline unsigned dio_pages_present(struct dio *dio)
138 {
139 	return dio->tail - dio->head;
140 }
141 
142 /*
143  * Go grab and pin some userspace pages.   Typically we'll get 64 at a time.
144  */
145 static int dio_refill_pages(struct dio *dio)
146 {
147 	int ret;
148 	int nr_pages;
149 
150 	nr_pages = min(dio->total_pages - dio->curr_page, DIO_PAGES);
151 	down_read(&current->mm->mmap_sem);
152 	ret = get_user_pages(
153 		current,			/* Task for fault acounting */
154 		current->mm,			/* whose pages? */
155 		dio->curr_user_address,		/* Where from? */
156 		nr_pages,			/* How many pages? */
157 		dio->rw == READ,		/* Write to memory? */
158 		0,				/* force (?) */
159 		&dio->pages[0],
160 		NULL);				/* vmas */
161 	up_read(&current->mm->mmap_sem);
162 
163 	if (ret < 0 && dio->blocks_available && (dio->rw == WRITE)) {
164 		/*
165 		 * A memory fault, but the filesystem has some outstanding
166 		 * mapped blocks.  We need to use those blocks up to avoid
167 		 * leaking stale data in the file.
168 		 */
169 		if (dio->page_errors == 0)
170 			dio->page_errors = ret;
171 		dio->pages[0] = ZERO_PAGE(dio->curr_user_address);
172 		dio->head = 0;
173 		dio->tail = 1;
174 		ret = 0;
175 		goto out;
176 	}
177 
178 	if (ret >= 0) {
179 		dio->curr_user_address += ret * PAGE_SIZE;
180 		dio->curr_page += ret;
181 		dio->head = 0;
182 		dio->tail = ret;
183 		ret = 0;
184 	}
185 out:
186 	return ret;
187 }
188 
189 /*
190  * Get another userspace page.  Returns an ERR_PTR on error.  Pages are
191  * buffered inside the dio so that we can call get_user_pages() against a
192  * decent number of pages, less frequently.  To provide nicer use of the
193  * L1 cache.
194  */
195 static struct page *dio_get_page(struct dio *dio)
196 {
197 	if (dio_pages_present(dio) == 0) {
198 		int ret;
199 
200 		ret = dio_refill_pages(dio);
201 		if (ret)
202 			return ERR_PTR(ret);
203 		BUG_ON(dio_pages_present(dio) == 0);
204 	}
205 	return dio->pages[dio->head++];
206 }
207 
208 /*
209  * Called when all DIO BIO I/O has been completed - let the filesystem
210  * know, if it registered an interest earlier via get_blocks.  Pass the
211  * private field of the map buffer_head so that filesystems can use it
212  * to hold additional state between get_blocks calls and dio_complete.
213  */
214 static void dio_complete(struct dio *dio, loff_t offset, ssize_t bytes)
215 {
216 	if (dio->end_io && dio->result)
217 		dio->end_io(dio->inode, offset, bytes, dio->map_bh.b_private);
218 	if (dio->lock_type == DIO_LOCKING)
219 		up_read(&dio->inode->i_alloc_sem);
220 }
221 
222 /*
223  * Called when a BIO has been processed.  If the count goes to zero then IO is
224  * complete and we can signal this to the AIO layer.
225  */
226 static void finished_one_bio(struct dio *dio)
227 {
228 	unsigned long flags;
229 
230 	spin_lock_irqsave(&dio->bio_lock, flags);
231 	if (dio->bio_count == 1) {
232 		if (dio->is_async) {
233 			/*
234 			 * Last reference to the dio is going away.
235 			 * Drop spinlock and complete the DIO.
236 			 */
237 			spin_unlock_irqrestore(&dio->bio_lock, flags);
238 			dio_complete(dio, dio->block_in_file << dio->blkbits,
239 					dio->result);
240 			/* Complete AIO later if falling back to buffered i/o */
241 			if (dio->result == dio->size ||
242 				((dio->rw == READ) && dio->result)) {
243 				aio_complete(dio->iocb, dio->result, 0);
244 				kfree(dio);
245 				return;
246 			} else {
247 				/*
248 				 * Falling back to buffered
249 				 */
250 				spin_lock_irqsave(&dio->bio_lock, flags);
251 				dio->bio_count--;
252 				if (dio->waiter)
253 					wake_up_process(dio->waiter);
254 				spin_unlock_irqrestore(&dio->bio_lock, flags);
255 				return;
256 			}
257 		}
258 	}
259 	dio->bio_count--;
260 	spin_unlock_irqrestore(&dio->bio_lock, flags);
261 }
262 
263 static int dio_bio_complete(struct dio *dio, struct bio *bio);
264 /*
265  * Asynchronous IO callback.
266  */
267 static int dio_bio_end_aio(struct bio *bio, unsigned int bytes_done, int error)
268 {
269 	struct dio *dio = bio->bi_private;
270 
271 	if (bio->bi_size)
272 		return 1;
273 
274 	/* cleanup the bio */
275 	dio_bio_complete(dio, bio);
276 	return 0;
277 }
278 
279 /*
280  * The BIO completion handler simply queues the BIO up for the process-context
281  * handler.
282  *
283  * During I/O bi_private points at the dio.  After I/O, bi_private is used to
284  * implement a singly-linked list of completed BIOs, at dio->bio_list.
285  */
286 static int dio_bio_end_io(struct bio *bio, unsigned int bytes_done, int error)
287 {
288 	struct dio *dio = bio->bi_private;
289 	unsigned long flags;
290 
291 	if (bio->bi_size)
292 		return 1;
293 
294 	spin_lock_irqsave(&dio->bio_lock, flags);
295 	bio->bi_private = dio->bio_list;
296 	dio->bio_list = bio;
297 	dio->bios_in_flight--;
298 	if (dio->waiter && dio->bios_in_flight == 0)
299 		wake_up_process(dio->waiter);
300 	spin_unlock_irqrestore(&dio->bio_lock, flags);
301 	return 0;
302 }
303 
304 static int
305 dio_bio_alloc(struct dio *dio, struct block_device *bdev,
306 		sector_t first_sector, int nr_vecs)
307 {
308 	struct bio *bio;
309 
310 	bio = bio_alloc(GFP_KERNEL, nr_vecs);
311 	if (bio == NULL)
312 		return -ENOMEM;
313 
314 	bio->bi_bdev = bdev;
315 	bio->bi_sector = first_sector;
316 	if (dio->is_async)
317 		bio->bi_end_io = dio_bio_end_aio;
318 	else
319 		bio->bi_end_io = dio_bio_end_io;
320 
321 	dio->bio = bio;
322 	return 0;
323 }
324 
325 /*
326  * In the AIO read case we speculatively dirty the pages before starting IO.
327  * During IO completion, any of these pages which happen to have been written
328  * back will be redirtied by bio_check_pages_dirty().
329  */
330 static void dio_bio_submit(struct dio *dio)
331 {
332 	struct bio *bio = dio->bio;
333 	unsigned long flags;
334 
335 	bio->bi_private = dio;
336 	spin_lock_irqsave(&dio->bio_lock, flags);
337 	dio->bio_count++;
338 	dio->bios_in_flight++;
339 	spin_unlock_irqrestore(&dio->bio_lock, flags);
340 	if (dio->is_async && dio->rw == READ)
341 		bio_set_pages_dirty(bio);
342 	submit_bio(dio->rw, bio);
343 
344 	dio->bio = NULL;
345 	dio->boundary = 0;
346 }
347 
348 /*
349  * Release any resources in case of a failure
350  */
351 static void dio_cleanup(struct dio *dio)
352 {
353 	while (dio_pages_present(dio))
354 		page_cache_release(dio_get_page(dio));
355 }
356 
357 /*
358  * Wait for the next BIO to complete.  Remove it and return it.
359  */
360 static struct bio *dio_await_one(struct dio *dio)
361 {
362 	unsigned long flags;
363 	struct bio *bio;
364 
365 	spin_lock_irqsave(&dio->bio_lock, flags);
366 	while (dio->bio_list == NULL) {
367 		set_current_state(TASK_UNINTERRUPTIBLE);
368 		if (dio->bio_list == NULL) {
369 			dio->waiter = current;
370 			spin_unlock_irqrestore(&dio->bio_lock, flags);
371 			blk_run_address_space(dio->inode->i_mapping);
372 			io_schedule();
373 			spin_lock_irqsave(&dio->bio_lock, flags);
374 			dio->waiter = NULL;
375 		}
376 		set_current_state(TASK_RUNNING);
377 	}
378 	bio = dio->bio_list;
379 	dio->bio_list = bio->bi_private;
380 	spin_unlock_irqrestore(&dio->bio_lock, flags);
381 	return bio;
382 }
383 
384 /*
385  * Process one completed BIO.  No locks are held.
386  */
387 static int dio_bio_complete(struct dio *dio, struct bio *bio)
388 {
389 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
390 	struct bio_vec *bvec = bio->bi_io_vec;
391 	int page_no;
392 
393 	if (!uptodate)
394 		dio->result = -EIO;
395 
396 	if (dio->is_async && dio->rw == READ) {
397 		bio_check_pages_dirty(bio);	/* transfers ownership */
398 	} else {
399 		for (page_no = 0; page_no < bio->bi_vcnt; page_no++) {
400 			struct page *page = bvec[page_no].bv_page;
401 
402 			if (dio->rw == READ && !PageCompound(page))
403 				set_page_dirty_lock(page);
404 			page_cache_release(page);
405 		}
406 		bio_put(bio);
407 	}
408 	finished_one_bio(dio);
409 	return uptodate ? 0 : -EIO;
410 }
411 
412 /*
413  * Wait on and process all in-flight BIOs.
414  */
415 static int dio_await_completion(struct dio *dio)
416 {
417 	int ret = 0;
418 
419 	if (dio->bio)
420 		dio_bio_submit(dio);
421 
422 	/*
423 	 * The bio_lock is not held for the read of bio_count.
424 	 * This is ok since it is the dio_bio_complete() that changes
425 	 * bio_count.
426 	 */
427 	while (dio->bio_count) {
428 		struct bio *bio = dio_await_one(dio);
429 		int ret2;
430 
431 		ret2 = dio_bio_complete(dio, bio);
432 		if (ret == 0)
433 			ret = ret2;
434 	}
435 	return ret;
436 }
437 
438 /*
439  * A really large O_DIRECT read or write can generate a lot of BIOs.  So
440  * to keep the memory consumption sane we periodically reap any completed BIOs
441  * during the BIO generation phase.
442  *
443  * This also helps to limit the peak amount of pinned userspace memory.
444  */
445 static int dio_bio_reap(struct dio *dio)
446 {
447 	int ret = 0;
448 
449 	if (dio->reap_counter++ >= 64) {
450 		while (dio->bio_list) {
451 			unsigned long flags;
452 			struct bio *bio;
453 			int ret2;
454 
455 			spin_lock_irqsave(&dio->bio_lock, flags);
456 			bio = dio->bio_list;
457 			dio->bio_list = bio->bi_private;
458 			spin_unlock_irqrestore(&dio->bio_lock, flags);
459 			ret2 = dio_bio_complete(dio, bio);
460 			if (ret == 0)
461 				ret = ret2;
462 		}
463 		dio->reap_counter = 0;
464 	}
465 	return ret;
466 }
467 
468 /*
469  * Call into the fs to map some more disk blocks.  We record the current number
470  * of available blocks at dio->blocks_available.  These are in units of the
471  * fs blocksize, (1 << inode->i_blkbits).
472  *
473  * The fs is allowed to map lots of blocks at once.  If it wants to do that,
474  * it uses the passed inode-relative block number as the file offset, as usual.
475  *
476  * get_blocks() is passed the number of i_blkbits-sized blocks which direct_io
477  * has remaining to do.  The fs should not map more than this number of blocks.
478  *
479  * If the fs has mapped a lot of blocks, it should populate bh->b_size to
480  * indicate how much contiguous disk space has been made available at
481  * bh->b_blocknr.
482  *
483  * If *any* of the mapped blocks are new, then the fs must set buffer_new().
484  * This isn't very efficient...
485  *
486  * In the case of filesystem holes: the fs may return an arbitrarily-large
487  * hole by returning an appropriate value in b_size and by clearing
488  * buffer_mapped().  However the direct-io code will only process holes one
489  * block at a time - it will repeatedly call get_blocks() as it walks the hole.
490  */
491 static int get_more_blocks(struct dio *dio)
492 {
493 	int ret;
494 	struct buffer_head *map_bh = &dio->map_bh;
495 	sector_t fs_startblk;	/* Into file, in filesystem-sized blocks */
496 	unsigned long fs_count;	/* Number of filesystem-sized blocks */
497 	unsigned long dio_count;/* Number of dio_block-sized blocks */
498 	unsigned long blkmask;
499 	int create;
500 
501 	/*
502 	 * If there was a memory error and we've overwritten all the
503 	 * mapped blocks then we can now return that memory error
504 	 */
505 	ret = dio->page_errors;
506 	if (ret == 0) {
507 		map_bh->b_state = 0;
508 		map_bh->b_size = 0;
509 		BUG_ON(dio->block_in_file >= dio->final_block_in_request);
510 		fs_startblk = dio->block_in_file >> dio->blkfactor;
511 		dio_count = dio->final_block_in_request - dio->block_in_file;
512 		fs_count = dio_count >> dio->blkfactor;
513 		blkmask = (1 << dio->blkfactor) - 1;
514 		if (dio_count & blkmask)
515 			fs_count++;
516 
517 		create = dio->rw == WRITE;
518 		if (dio->lock_type == DIO_LOCKING) {
519 			if (dio->block_in_file < (i_size_read(dio->inode) >>
520 							dio->blkbits))
521 				create = 0;
522 		} else if (dio->lock_type == DIO_NO_LOCKING) {
523 			create = 0;
524 		}
525 		/*
526 		 * For writes inside i_size we forbid block creations: only
527 		 * overwrites are permitted.  We fall back to buffered writes
528 		 * at a higher level for inside-i_size block-instantiating
529 		 * writes.
530 		 */
531 		ret = (*dio->get_blocks)(dio->inode, fs_startblk, fs_count,
532 						map_bh, create);
533 	}
534 	return ret;
535 }
536 
537 /*
538  * There is no bio.  Make one now.
539  */
540 static int dio_new_bio(struct dio *dio, sector_t start_sector)
541 {
542 	sector_t sector;
543 	int ret, nr_pages;
544 
545 	ret = dio_bio_reap(dio);
546 	if (ret)
547 		goto out;
548 	sector = start_sector << (dio->blkbits - 9);
549 	nr_pages = min(dio->pages_in_io, bio_get_nr_vecs(dio->map_bh.b_bdev));
550 	BUG_ON(nr_pages <= 0);
551 	ret = dio_bio_alloc(dio, dio->map_bh.b_bdev, sector, nr_pages);
552 	dio->boundary = 0;
553 out:
554 	return ret;
555 }
556 
557 /*
558  * Attempt to put the current chunk of 'cur_page' into the current BIO.  If
559  * that was successful then update final_block_in_bio and take a ref against
560  * the just-added page.
561  *
562  * Return zero on success.  Non-zero means the caller needs to start a new BIO.
563  */
564 static int dio_bio_add_page(struct dio *dio)
565 {
566 	int ret;
567 
568 	ret = bio_add_page(dio->bio, dio->cur_page,
569 			dio->cur_page_len, dio->cur_page_offset);
570 	if (ret == dio->cur_page_len) {
571 		/*
572 		 * Decrement count only, if we are done with this page
573 		 */
574 		if ((dio->cur_page_len + dio->cur_page_offset) == PAGE_SIZE)
575 			dio->pages_in_io--;
576 		page_cache_get(dio->cur_page);
577 		dio->final_block_in_bio = dio->cur_page_block +
578 			(dio->cur_page_len >> dio->blkbits);
579 		ret = 0;
580 	} else {
581 		ret = 1;
582 	}
583 	return ret;
584 }
585 
586 /*
587  * Put cur_page under IO.  The section of cur_page which is described by
588  * cur_page_offset,cur_page_len is put into a BIO.  The section of cur_page
589  * starts on-disk at cur_page_block.
590  *
591  * We take a ref against the page here (on behalf of its presence in the bio).
592  *
593  * The caller of this function is responsible for removing cur_page from the
594  * dio, and for dropping the refcount which came from that presence.
595  */
596 static int dio_send_cur_page(struct dio *dio)
597 {
598 	int ret = 0;
599 
600 	if (dio->bio) {
601 		/*
602 		 * See whether this new request is contiguous with the old
603 		 */
604 		if (dio->final_block_in_bio != dio->cur_page_block)
605 			dio_bio_submit(dio);
606 		/*
607 		 * Submit now if the underlying fs is about to perform a
608 		 * metadata read
609 		 */
610 		if (dio->boundary)
611 			dio_bio_submit(dio);
612 	}
613 
614 	if (dio->bio == NULL) {
615 		ret = dio_new_bio(dio, dio->cur_page_block);
616 		if (ret)
617 			goto out;
618 	}
619 
620 	if (dio_bio_add_page(dio) != 0) {
621 		dio_bio_submit(dio);
622 		ret = dio_new_bio(dio, dio->cur_page_block);
623 		if (ret == 0) {
624 			ret = dio_bio_add_page(dio);
625 			BUG_ON(ret != 0);
626 		}
627 	}
628 out:
629 	return ret;
630 }
631 
632 /*
633  * An autonomous function to put a chunk of a page under deferred IO.
634  *
635  * The caller doesn't actually know (or care) whether this piece of page is in
636  * a BIO, or is under IO or whatever.  We just take care of all possible
637  * situations here.  The separation between the logic of do_direct_IO() and
638  * that of submit_page_section() is important for clarity.  Please don't break.
639  *
640  * The chunk of page starts on-disk at blocknr.
641  *
642  * We perform deferred IO, by recording the last-submitted page inside our
643  * private part of the dio structure.  If possible, we just expand the IO
644  * across that page here.
645  *
646  * If that doesn't work out then we put the old page into the bio and add this
647  * page to the dio instead.
648  */
649 static int
650 submit_page_section(struct dio *dio, struct page *page,
651 		unsigned offset, unsigned len, sector_t blocknr)
652 {
653 	int ret = 0;
654 
655 	/*
656 	 * Can we just grow the current page's presence in the dio?
657 	 */
658 	if (	(dio->cur_page == page) &&
659 		(dio->cur_page_offset + dio->cur_page_len == offset) &&
660 		(dio->cur_page_block +
661 			(dio->cur_page_len >> dio->blkbits) == blocknr)) {
662 		dio->cur_page_len += len;
663 
664 		/*
665 		 * If dio->boundary then we want to schedule the IO now to
666 		 * avoid metadata seeks.
667 		 */
668 		if (dio->boundary) {
669 			ret = dio_send_cur_page(dio);
670 			page_cache_release(dio->cur_page);
671 			dio->cur_page = NULL;
672 		}
673 		goto out;
674 	}
675 
676 	/*
677 	 * If there's a deferred page already there then send it.
678 	 */
679 	if (dio->cur_page) {
680 		ret = dio_send_cur_page(dio);
681 		page_cache_release(dio->cur_page);
682 		dio->cur_page = NULL;
683 		if (ret)
684 			goto out;
685 	}
686 
687 	page_cache_get(page);		/* It is in dio */
688 	dio->cur_page = page;
689 	dio->cur_page_offset = offset;
690 	dio->cur_page_len = len;
691 	dio->cur_page_block = blocknr;
692 out:
693 	return ret;
694 }
695 
696 /*
697  * Clean any dirty buffers in the blockdev mapping which alias newly-created
698  * file blocks.  Only called for S_ISREG files - blockdevs do not set
699  * buffer_new
700  */
701 static void clean_blockdev_aliases(struct dio *dio)
702 {
703 	unsigned i;
704 	unsigned nblocks;
705 
706 	nblocks = dio->map_bh.b_size >> dio->inode->i_blkbits;
707 
708 	for (i = 0; i < nblocks; i++) {
709 		unmap_underlying_metadata(dio->map_bh.b_bdev,
710 					dio->map_bh.b_blocknr + i);
711 	}
712 }
713 
714 /*
715  * If we are not writing the entire block and get_block() allocated
716  * the block for us, we need to fill-in the unused portion of the
717  * block with zeros. This happens only if user-buffer, fileoffset or
718  * io length is not filesystem block-size multiple.
719  *
720  * `end' is zero if we're doing the start of the IO, 1 at the end of the
721  * IO.
722  */
723 static void dio_zero_block(struct dio *dio, int end)
724 {
725 	unsigned dio_blocks_per_fs_block;
726 	unsigned this_chunk_blocks;	/* In dio_blocks */
727 	unsigned this_chunk_bytes;
728 	struct page *page;
729 
730 	dio->start_zero_done = 1;
731 	if (!dio->blkfactor || !buffer_new(&dio->map_bh))
732 		return;
733 
734 	dio_blocks_per_fs_block = 1 << dio->blkfactor;
735 	this_chunk_blocks = dio->block_in_file & (dio_blocks_per_fs_block - 1);
736 
737 	if (!this_chunk_blocks)
738 		return;
739 
740 	/*
741 	 * We need to zero out part of an fs block.  It is either at the
742 	 * beginning or the end of the fs block.
743 	 */
744 	if (end)
745 		this_chunk_blocks = dio_blocks_per_fs_block - this_chunk_blocks;
746 
747 	this_chunk_bytes = this_chunk_blocks << dio->blkbits;
748 
749 	page = ZERO_PAGE(dio->curr_user_address);
750 	if (submit_page_section(dio, page, 0, this_chunk_bytes,
751 				dio->next_block_for_io))
752 		return;
753 
754 	dio->next_block_for_io += this_chunk_blocks;
755 }
756 
757 /*
758  * Walk the user pages, and the file, mapping blocks to disk and generating
759  * a sequence of (page,offset,len,block) mappings.  These mappings are injected
760  * into submit_page_section(), which takes care of the next stage of submission
761  *
762  * Direct IO against a blockdev is different from a file.  Because we can
763  * happily perform page-sized but 512-byte aligned IOs.  It is important that
764  * blockdev IO be able to have fine alignment and large sizes.
765  *
766  * So what we do is to permit the ->get_blocks function to populate bh.b_size
767  * with the size of IO which is permitted at this offset and this i_blkbits.
768  *
769  * For best results, the blockdev should be set up with 512-byte i_blkbits and
770  * it should set b_size to PAGE_SIZE or more inside get_blocks().  This gives
771  * fine alignment but still allows this function to work in PAGE_SIZE units.
772  */
773 static int do_direct_IO(struct dio *dio)
774 {
775 	const unsigned blkbits = dio->blkbits;
776 	const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
777 	struct page *page;
778 	unsigned block_in_page;
779 	struct buffer_head *map_bh = &dio->map_bh;
780 	int ret = 0;
781 
782 	/* The I/O can start at any block offset within the first page */
783 	block_in_page = dio->first_block_in_page;
784 
785 	while (dio->block_in_file < dio->final_block_in_request) {
786 		page = dio_get_page(dio);
787 		if (IS_ERR(page)) {
788 			ret = PTR_ERR(page);
789 			goto out;
790 		}
791 
792 		while (block_in_page < blocks_per_page) {
793 			unsigned offset_in_page = block_in_page << blkbits;
794 			unsigned this_chunk_bytes;	/* # of bytes mapped */
795 			unsigned this_chunk_blocks;	/* # of blocks */
796 			unsigned u;
797 
798 			if (dio->blocks_available == 0) {
799 				/*
800 				 * Need to go and map some more disk
801 				 */
802 				unsigned long blkmask;
803 				unsigned long dio_remainder;
804 
805 				ret = get_more_blocks(dio);
806 				if (ret) {
807 					page_cache_release(page);
808 					goto out;
809 				}
810 				if (!buffer_mapped(map_bh))
811 					goto do_holes;
812 
813 				dio->blocks_available =
814 						map_bh->b_size >> dio->blkbits;
815 				dio->next_block_for_io =
816 					map_bh->b_blocknr << dio->blkfactor;
817 				if (buffer_new(map_bh))
818 					clean_blockdev_aliases(dio);
819 
820 				if (!dio->blkfactor)
821 					goto do_holes;
822 
823 				blkmask = (1 << dio->blkfactor) - 1;
824 				dio_remainder = (dio->block_in_file & blkmask);
825 
826 				/*
827 				 * If we are at the start of IO and that IO
828 				 * starts partway into a fs-block,
829 				 * dio_remainder will be non-zero.  If the IO
830 				 * is a read then we can simply advance the IO
831 				 * cursor to the first block which is to be
832 				 * read.  But if the IO is a write and the
833 				 * block was newly allocated we cannot do that;
834 				 * the start of the fs block must be zeroed out
835 				 * on-disk
836 				 */
837 				if (!buffer_new(map_bh))
838 					dio->next_block_for_io += dio_remainder;
839 				dio->blocks_available -= dio_remainder;
840 			}
841 do_holes:
842 			/* Handle holes */
843 			if (!buffer_mapped(map_bh)) {
844 				char *kaddr;
845 
846 				/* AKPM: eargh, -ENOTBLK is a hack */
847 				if (dio->rw == WRITE) {
848 					page_cache_release(page);
849 					return -ENOTBLK;
850 				}
851 
852 				if (dio->block_in_file >=
853 					i_size_read(dio->inode)>>blkbits) {
854 					/* We hit eof */
855 					page_cache_release(page);
856 					goto out;
857 				}
858 				kaddr = kmap_atomic(page, KM_USER0);
859 				memset(kaddr + (block_in_page << blkbits),
860 						0, 1 << blkbits);
861 				flush_dcache_page(page);
862 				kunmap_atomic(kaddr, KM_USER0);
863 				dio->block_in_file++;
864 				block_in_page++;
865 				goto next_block;
866 			}
867 
868 			/*
869 			 * If we're performing IO which has an alignment which
870 			 * is finer than the underlying fs, go check to see if
871 			 * we must zero out the start of this block.
872 			 */
873 			if (unlikely(dio->blkfactor && !dio->start_zero_done))
874 				dio_zero_block(dio, 0);
875 
876 			/*
877 			 * Work out, in this_chunk_blocks, how much disk we
878 			 * can add to this page
879 			 */
880 			this_chunk_blocks = dio->blocks_available;
881 			u = (PAGE_SIZE - offset_in_page) >> blkbits;
882 			if (this_chunk_blocks > u)
883 				this_chunk_blocks = u;
884 			u = dio->final_block_in_request - dio->block_in_file;
885 			if (this_chunk_blocks > u)
886 				this_chunk_blocks = u;
887 			this_chunk_bytes = this_chunk_blocks << blkbits;
888 			BUG_ON(this_chunk_bytes == 0);
889 
890 			dio->boundary = buffer_boundary(map_bh);
891 			ret = submit_page_section(dio, page, offset_in_page,
892 				this_chunk_bytes, dio->next_block_for_io);
893 			if (ret) {
894 				page_cache_release(page);
895 				goto out;
896 			}
897 			dio->next_block_for_io += this_chunk_blocks;
898 
899 			dio->block_in_file += this_chunk_blocks;
900 			block_in_page += this_chunk_blocks;
901 			dio->blocks_available -= this_chunk_blocks;
902 next_block:
903 			if (dio->block_in_file > dio->final_block_in_request)
904 				BUG();
905 			if (dio->block_in_file == dio->final_block_in_request)
906 				break;
907 		}
908 
909 		/* Drop the ref which was taken in get_user_pages() */
910 		page_cache_release(page);
911 		block_in_page = 0;
912 	}
913 out:
914 	return ret;
915 }
916 
917 /*
918  * Releases both i_sem and i_alloc_sem
919  */
920 static ssize_t
921 direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
922 	const struct iovec *iov, loff_t offset, unsigned long nr_segs,
923 	unsigned blkbits, get_blocks_t get_blocks, dio_iodone_t end_io,
924 	struct dio *dio)
925 {
926 	unsigned long user_addr;
927 	int seg;
928 	ssize_t ret = 0;
929 	ssize_t ret2;
930 	size_t bytes;
931 
932 	dio->bio = NULL;
933 	dio->inode = inode;
934 	dio->rw = rw;
935 	dio->blkbits = blkbits;
936 	dio->blkfactor = inode->i_blkbits - blkbits;
937 	dio->start_zero_done = 0;
938 	dio->size = 0;
939 	dio->block_in_file = offset >> blkbits;
940 	dio->blocks_available = 0;
941 	dio->cur_page = NULL;
942 
943 	dio->boundary = 0;
944 	dio->reap_counter = 0;
945 	dio->get_blocks = get_blocks;
946 	dio->end_io = end_io;
947 	dio->map_bh.b_private = NULL;
948 	dio->final_block_in_bio = -1;
949 	dio->next_block_for_io = -1;
950 
951 	dio->page_errors = 0;
952 	dio->result = 0;
953 	dio->iocb = iocb;
954 
955 	/*
956 	 * BIO completion state.
957 	 *
958 	 * ->bio_count starts out at one, and we decrement it to zero after all
959 	 * BIOs are submitted.  This to avoid the situation where a really fast
960 	 * (or synchronous) device could take the count to zero while we're
961 	 * still submitting BIOs.
962 	 */
963 	dio->bio_count = 1;
964 	dio->bios_in_flight = 0;
965 	spin_lock_init(&dio->bio_lock);
966 	dio->bio_list = NULL;
967 	dio->waiter = NULL;
968 
969 	/*
970 	 * In case of non-aligned buffers, we may need 2 more
971 	 * pages since we need to zero out first and last block.
972 	 */
973 	if (unlikely(dio->blkfactor))
974 		dio->pages_in_io = 2;
975 	else
976 		dio->pages_in_io = 0;
977 
978 	for (seg = 0; seg < nr_segs; seg++) {
979 		user_addr = (unsigned long)iov[seg].iov_base;
980 		dio->pages_in_io +=
981 			((user_addr+iov[seg].iov_len +PAGE_SIZE-1)/PAGE_SIZE
982 				- user_addr/PAGE_SIZE);
983 	}
984 
985 	for (seg = 0; seg < nr_segs; seg++) {
986 		user_addr = (unsigned long)iov[seg].iov_base;
987 		dio->size += bytes = iov[seg].iov_len;
988 
989 		/* Index into the first page of the first block */
990 		dio->first_block_in_page = (user_addr & ~PAGE_MASK) >> blkbits;
991 		dio->final_block_in_request = dio->block_in_file +
992 						(bytes >> blkbits);
993 		/* Page fetching state */
994 		dio->head = 0;
995 		dio->tail = 0;
996 		dio->curr_page = 0;
997 
998 		dio->total_pages = 0;
999 		if (user_addr & (PAGE_SIZE-1)) {
1000 			dio->total_pages++;
1001 			bytes -= PAGE_SIZE - (user_addr & (PAGE_SIZE - 1));
1002 		}
1003 		dio->total_pages += (bytes + PAGE_SIZE - 1) / PAGE_SIZE;
1004 		dio->curr_user_address = user_addr;
1005 
1006 		ret = do_direct_IO(dio);
1007 
1008 		dio->result += iov[seg].iov_len -
1009 			((dio->final_block_in_request - dio->block_in_file) <<
1010 					blkbits);
1011 
1012 		if (ret) {
1013 			dio_cleanup(dio);
1014 			break;
1015 		}
1016 	} /* end iovec loop */
1017 
1018 	if (ret == -ENOTBLK && rw == WRITE) {
1019 		/*
1020 		 * The remaining part of the request will be
1021 		 * be handled by buffered I/O when we return
1022 		 */
1023 		ret = 0;
1024 	}
1025 	/*
1026 	 * There may be some unwritten disk at the end of a part-written
1027 	 * fs-block-sized block.  Go zero that now.
1028 	 */
1029 	dio_zero_block(dio, 1);
1030 
1031 	if (dio->cur_page) {
1032 		ret2 = dio_send_cur_page(dio);
1033 		if (ret == 0)
1034 			ret = ret2;
1035 		page_cache_release(dio->cur_page);
1036 		dio->cur_page = NULL;
1037 	}
1038 	if (dio->bio)
1039 		dio_bio_submit(dio);
1040 
1041 	/*
1042 	 * It is possible that, we return short IO due to end of file.
1043 	 * In that case, we need to release all the pages we got hold on.
1044 	 */
1045 	dio_cleanup(dio);
1046 
1047 	/*
1048 	 * All block lookups have been performed. For READ requests
1049 	 * we can let i_sem go now that its achieved its purpose
1050 	 * of protecting us from looking up uninitialized blocks.
1051 	 */
1052 	if ((rw == READ) && (dio->lock_type == DIO_LOCKING))
1053 		up(&dio->inode->i_sem);
1054 
1055 	/*
1056 	 * OK, all BIOs are submitted, so we can decrement bio_count to truly
1057 	 * reflect the number of to-be-processed BIOs.
1058 	 */
1059 	if (dio->is_async) {
1060 		int should_wait = 0;
1061 
1062 		if (dio->result < dio->size && rw == WRITE) {
1063 			dio->waiter = current;
1064 			should_wait = 1;
1065 		}
1066 		if (ret == 0)
1067 			ret = dio->result;
1068 		finished_one_bio(dio);		/* This can free the dio */
1069 		blk_run_address_space(inode->i_mapping);
1070 		if (should_wait) {
1071 			unsigned long flags;
1072 			/*
1073 			 * Wait for already issued I/O to drain out and
1074 			 * release its references to user-space pages
1075 			 * before returning to fallback on buffered I/O
1076 			 */
1077 
1078 			spin_lock_irqsave(&dio->bio_lock, flags);
1079 			set_current_state(TASK_UNINTERRUPTIBLE);
1080 			while (dio->bio_count) {
1081 				spin_unlock_irqrestore(&dio->bio_lock, flags);
1082 				io_schedule();
1083 				spin_lock_irqsave(&dio->bio_lock, flags);
1084 				set_current_state(TASK_UNINTERRUPTIBLE);
1085 			}
1086 			spin_unlock_irqrestore(&dio->bio_lock, flags);
1087 			set_current_state(TASK_RUNNING);
1088 			kfree(dio);
1089 		}
1090 	} else {
1091 		ssize_t transferred = 0;
1092 
1093 		finished_one_bio(dio);
1094 		ret2 = dio_await_completion(dio);
1095 		if (ret == 0)
1096 			ret = ret2;
1097 		if (ret == 0)
1098 			ret = dio->page_errors;
1099 		if (dio->result) {
1100 			loff_t i_size = i_size_read(inode);
1101 
1102 			transferred = dio->result;
1103 			/*
1104 			 * Adjust the return value if the read crossed a
1105 			 * non-block-aligned EOF.
1106 			 */
1107 			if (rw == READ && (offset + transferred > i_size))
1108 				transferred = i_size - offset;
1109 		}
1110 		dio_complete(dio, offset, transferred);
1111 		if (ret == 0)
1112 			ret = transferred;
1113 
1114 		/* We could have also come here on an AIO file extend */
1115 		if (!is_sync_kiocb(iocb) && rw == WRITE &&
1116 		    ret >= 0 && dio->result == dio->size)
1117 			/*
1118 			 * For AIO writes where we have completed the
1119 			 * i/o, we have to mark the the aio complete.
1120 			 */
1121 			aio_complete(iocb, ret, 0);
1122 		kfree(dio);
1123 	}
1124 	return ret;
1125 }
1126 
1127 /*
1128  * This is a library function for use by filesystem drivers.
1129  * The locking rules are governed by the dio_lock_type parameter.
1130  *
1131  * DIO_NO_LOCKING (no locking, for raw block device access)
1132  * For writes, i_sem is not held on entry; it is never taken.
1133  *
1134  * DIO_LOCKING (simple locking for regular files)
1135  * For writes we are called under i_sem and return with i_sem held, even though
1136  * it is internally dropped.
1137  * For reads, i_sem is not held on entry, but it is taken and dropped before
1138  * returning.
1139  *
1140  * DIO_OWN_LOCKING (filesystem provides synchronisation and handling of
1141  *	uninitialised data, allowing parallel direct readers and writers)
1142  * For writes we are called without i_sem, return without it, never touch it.
1143  * For reads, i_sem is held on entry and will be released before returning.
1144  *
1145  * Additional i_alloc_sem locking requirements described inline below.
1146  */
1147 ssize_t
1148 __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1149 	struct block_device *bdev, const struct iovec *iov, loff_t offset,
1150 	unsigned long nr_segs, get_blocks_t get_blocks, dio_iodone_t end_io,
1151 	int dio_lock_type)
1152 {
1153 	int seg;
1154 	size_t size;
1155 	unsigned long addr;
1156 	unsigned blkbits = inode->i_blkbits;
1157 	unsigned bdev_blkbits = 0;
1158 	unsigned blocksize_mask = (1 << blkbits) - 1;
1159 	ssize_t retval = -EINVAL;
1160 	loff_t end = offset;
1161 	struct dio *dio;
1162 	int reader_with_isem = (rw == READ && dio_lock_type == DIO_OWN_LOCKING);
1163 
1164 	if (rw & WRITE)
1165 		current->flags |= PF_SYNCWRITE;
1166 
1167 	if (bdev)
1168 		bdev_blkbits = blksize_bits(bdev_hardsect_size(bdev));
1169 
1170 	if (offset & blocksize_mask) {
1171 		if (bdev)
1172 			 blkbits = bdev_blkbits;
1173 		blocksize_mask = (1 << blkbits) - 1;
1174 		if (offset & blocksize_mask)
1175 			goto out;
1176 	}
1177 
1178 	/* Check the memory alignment.  Blocks cannot straddle pages */
1179 	for (seg = 0; seg < nr_segs; seg++) {
1180 		addr = (unsigned long)iov[seg].iov_base;
1181 		size = iov[seg].iov_len;
1182 		end += size;
1183 		if ((addr & blocksize_mask) || (size & blocksize_mask))  {
1184 			if (bdev)
1185 				 blkbits = bdev_blkbits;
1186 			blocksize_mask = (1 << blkbits) - 1;
1187 			if ((addr & blocksize_mask) || (size & blocksize_mask))
1188 				goto out;
1189 		}
1190 	}
1191 
1192 	dio = kmalloc(sizeof(*dio), GFP_KERNEL);
1193 	retval = -ENOMEM;
1194 	if (!dio)
1195 		goto out;
1196 
1197 	/*
1198 	 * For block device access DIO_NO_LOCKING is used,
1199 	 *	neither readers nor writers do any locking at all
1200 	 * For regular files using DIO_LOCKING,
1201 	 *	readers need to grab i_sem and i_alloc_sem
1202 	 *	writers need to grab i_alloc_sem only (i_sem is already held)
1203 	 * For regular files using DIO_OWN_LOCKING,
1204 	 *	neither readers nor writers take any locks here
1205 	 *	(i_sem is already held and release for writers here)
1206 	 */
1207 	dio->lock_type = dio_lock_type;
1208 	if (dio_lock_type != DIO_NO_LOCKING) {
1209 		/* watch out for a 0 len io from a tricksy fs */
1210 		if (rw == READ && end > offset) {
1211 			struct address_space *mapping;
1212 
1213 			mapping = iocb->ki_filp->f_mapping;
1214 			if (dio_lock_type != DIO_OWN_LOCKING) {
1215 				down(&inode->i_sem);
1216 				reader_with_isem = 1;
1217 			}
1218 
1219 			retval = filemap_write_and_wait_range(mapping, offset,
1220 							      end - 1);
1221 			if (retval) {
1222 				kfree(dio);
1223 				goto out;
1224 			}
1225 
1226 			if (dio_lock_type == DIO_OWN_LOCKING) {
1227 				up(&inode->i_sem);
1228 				reader_with_isem = 0;
1229 			}
1230 		}
1231 
1232 		if (dio_lock_type == DIO_LOCKING)
1233 			down_read(&inode->i_alloc_sem);
1234 	}
1235 
1236 	/*
1237 	 * For file extending writes updating i_size before data
1238 	 * writeouts complete can expose uninitialized blocks. So
1239 	 * even for AIO, we need to wait for i/o to complete before
1240 	 * returning in this case.
1241 	 */
1242 	dio->is_async = !is_sync_kiocb(iocb) && !((rw == WRITE) &&
1243 		(end > i_size_read(inode)));
1244 
1245 	retval = direct_io_worker(rw, iocb, inode, iov, offset,
1246 				nr_segs, blkbits, get_blocks, end_io, dio);
1247 
1248 	if (rw == READ && dio_lock_type == DIO_LOCKING)
1249 		reader_with_isem = 0;
1250 
1251 out:
1252 	if (reader_with_isem)
1253 		up(&inode->i_sem);
1254 	if (rw & WRITE)
1255 		current->flags &= ~PF_SYNCWRITE;
1256 	return retval;
1257 }
1258 EXPORT_SYMBOL(__blockdev_direct_IO);
1259