xref: /openbmc/linux/block/fops.c (revision e7f127b2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 1991, 1992  Linus Torvalds
4  * Copyright (C) 2001  Andrea Arcangeli <andrea@suse.de> SuSE
5  * Copyright (C) 2016 - 2020 Christoph Hellwig
6  */
7 #include <linux/init.h>
8 #include <linux/mm.h>
9 #include <linux/blkdev.h>
10 #include <linux/buffer_head.h>
11 #include <linux/mpage.h>
12 #include <linux/uio.h>
13 #include <linux/namei.h>
14 #include <linux/task_io_accounting_ops.h>
15 #include <linux/falloc.h>
16 #include <linux/suspend.h>
17 #include <linux/fs.h>
18 #include <linux/module.h>
19 #include "blk.h"
20 
21 static inline struct inode *bdev_file_inode(struct file *file)
22 {
23 	return file->f_mapping->host;
24 }
25 
26 static int blkdev_get_block(struct inode *inode, sector_t iblock,
27 		struct buffer_head *bh, int create)
28 {
29 	bh->b_bdev = I_BDEV(inode);
30 	bh->b_blocknr = iblock;
31 	set_buffer_mapped(bh);
32 	return 0;
33 }
34 
35 static unsigned int dio_bio_write_op(struct kiocb *iocb)
36 {
37 	unsigned int op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
38 
39 	/* avoid the need for a I/O completion work item */
40 	if (iocb->ki_flags & IOCB_DSYNC)
41 		op |= REQ_FUA;
42 	return op;
43 }
44 
45 #define DIO_INLINE_BIO_VECS 4
46 
47 static void blkdev_bio_end_io_simple(struct bio *bio)
48 {
49 	struct task_struct *waiter = bio->bi_private;
50 
51 	WRITE_ONCE(bio->bi_private, NULL);
52 	blk_wake_io_task(waiter);
53 }
54 
55 static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
56 		struct iov_iter *iter, unsigned int nr_pages)
57 {
58 	struct block_device *bdev = iocb->ki_filp->private_data;
59 	struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs;
60 	loff_t pos = iocb->ki_pos;
61 	bool should_dirty = false;
62 	struct bio bio;
63 	ssize_t ret;
64 
65 	if ((pos | iov_iter_alignment(iter)) &
66 	    (bdev_logical_block_size(bdev) - 1))
67 		return -EINVAL;
68 
69 	if (nr_pages <= DIO_INLINE_BIO_VECS)
70 		vecs = inline_vecs;
71 	else {
72 		vecs = kmalloc_array(nr_pages, sizeof(struct bio_vec),
73 				     GFP_KERNEL);
74 		if (!vecs)
75 			return -ENOMEM;
76 	}
77 
78 	bio_init(&bio, vecs, nr_pages);
79 	bio_set_dev(&bio, bdev);
80 	bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT;
81 	bio.bi_write_hint = iocb->ki_hint;
82 	bio.bi_private = current;
83 	bio.bi_end_io = blkdev_bio_end_io_simple;
84 	bio.bi_ioprio = iocb->ki_ioprio;
85 
86 	ret = bio_iov_iter_get_pages(&bio, iter);
87 	if (unlikely(ret))
88 		goto out;
89 	ret = bio.bi_iter.bi_size;
90 
91 	if (iov_iter_rw(iter) == READ) {
92 		bio.bi_opf = REQ_OP_READ;
93 		if (iter_is_iovec(iter))
94 			should_dirty = true;
95 	} else {
96 		bio.bi_opf = dio_bio_write_op(iocb);
97 		task_io_account_write(ret);
98 	}
99 	if (iocb->ki_flags & IOCB_NOWAIT)
100 		bio.bi_opf |= REQ_NOWAIT;
101 	if (iocb->ki_flags & IOCB_HIPRI)
102 		bio_set_polled(&bio, iocb);
103 
104 	submit_bio(&bio);
105 	for (;;) {
106 		set_current_state(TASK_UNINTERRUPTIBLE);
107 		if (!READ_ONCE(bio.bi_private))
108 			break;
109 		if (!(iocb->ki_flags & IOCB_HIPRI) || !bio_poll(&bio, NULL, 0))
110 			blk_io_schedule();
111 	}
112 	__set_current_state(TASK_RUNNING);
113 
114 	bio_release_pages(&bio, should_dirty);
115 	if (unlikely(bio.bi_status))
116 		ret = blk_status_to_errno(bio.bi_status);
117 
118 out:
119 	if (vecs != inline_vecs)
120 		kfree(vecs);
121 
122 	bio_uninit(&bio);
123 
124 	return ret;
125 }
126 
127 enum {
128 	DIO_SHOULD_DIRTY	= 1,
129 	DIO_IS_SYNC		= 2,
130 };
131 
132 struct blkdev_dio {
133 	union {
134 		struct kiocb		*iocb;
135 		struct task_struct	*waiter;
136 	};
137 	size_t			size;
138 	atomic_t		ref;
139 	unsigned int		flags;
140 	struct bio		bio ____cacheline_aligned_in_smp;
141 };
142 
143 static struct bio_set blkdev_dio_pool;
144 
145 static void blkdev_bio_end_io(struct bio *bio)
146 {
147 	struct blkdev_dio *dio = bio->bi_private;
148 	bool should_dirty = dio->flags & DIO_SHOULD_DIRTY;
149 
150 	if (bio->bi_status && !dio->bio.bi_status)
151 		dio->bio.bi_status = bio->bi_status;
152 
153 	if (atomic_dec_and_test(&dio->ref)) {
154 		if (!(dio->flags & DIO_IS_SYNC)) {
155 			struct kiocb *iocb = dio->iocb;
156 			ssize_t ret;
157 
158 			WRITE_ONCE(iocb->private, NULL);
159 
160 			if (likely(!dio->bio.bi_status)) {
161 				ret = dio->size;
162 				iocb->ki_pos += ret;
163 			} else {
164 				ret = blk_status_to_errno(dio->bio.bi_status);
165 			}
166 
167 			dio->iocb->ki_complete(iocb, ret);
168 			bio_put(&dio->bio);
169 		} else {
170 			struct task_struct *waiter = dio->waiter;
171 
172 			WRITE_ONCE(dio->waiter, NULL);
173 			blk_wake_io_task(waiter);
174 		}
175 	}
176 
177 	if (should_dirty) {
178 		bio_check_pages_dirty(bio);
179 	} else {
180 		bio_release_pages(bio, false);
181 		bio_put(bio);
182 	}
183 }
184 
185 static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
186 		unsigned int nr_pages)
187 {
188 	struct block_device *bdev = iocb->ki_filp->private_data;
189 	struct blk_plug plug;
190 	struct blkdev_dio *dio;
191 	struct bio *bio;
192 	bool is_read = (iov_iter_rw(iter) == READ), is_sync;
193 	loff_t pos = iocb->ki_pos;
194 	int ret = 0;
195 
196 	if ((pos | iov_iter_alignment(iter)) &
197 	    (bdev_logical_block_size(bdev) - 1))
198 		return -EINVAL;
199 
200 	bio = bio_alloc_kiocb(iocb, nr_pages, &blkdev_dio_pool);
201 
202 	dio = container_of(bio, struct blkdev_dio, bio);
203 	atomic_set(&dio->ref, 1);
204 	/*
205 	 * Grab an extra reference to ensure the dio structure which is embedded
206 	 * into the first bio stays around.
207 	 */
208 	bio_get(bio);
209 
210 	is_sync = is_sync_kiocb(iocb);
211 	if (is_sync) {
212 		dio->flags = DIO_IS_SYNC;
213 		dio->waiter = current;
214 	} else {
215 		dio->flags = 0;
216 		dio->iocb = iocb;
217 	}
218 
219 	dio->size = 0;
220 	if (is_read && iter_is_iovec(iter))
221 		dio->flags |= DIO_SHOULD_DIRTY;
222 
223 	blk_start_plug(&plug);
224 
225 	for (;;) {
226 		bio_set_dev(bio, bdev);
227 		bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
228 		bio->bi_write_hint = iocb->ki_hint;
229 		bio->bi_private = dio;
230 		bio->bi_end_io = blkdev_bio_end_io;
231 		bio->bi_ioprio = iocb->ki_ioprio;
232 
233 		ret = bio_iov_iter_get_pages(bio, iter);
234 		if (unlikely(ret)) {
235 			bio->bi_status = BLK_STS_IOERR;
236 			bio_endio(bio);
237 			break;
238 		}
239 
240 		if (is_read) {
241 			bio->bi_opf = REQ_OP_READ;
242 			if (dio->flags & DIO_SHOULD_DIRTY)
243 				bio_set_pages_dirty(bio);
244 		} else {
245 			bio->bi_opf = dio_bio_write_op(iocb);
246 			task_io_account_write(bio->bi_iter.bi_size);
247 		}
248 		if (iocb->ki_flags & IOCB_NOWAIT)
249 			bio->bi_opf |= REQ_NOWAIT;
250 
251 		dio->size += bio->bi_iter.bi_size;
252 		pos += bio->bi_iter.bi_size;
253 
254 		nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS);
255 		if (!nr_pages) {
256 			submit_bio(bio);
257 			break;
258 		}
259 		atomic_inc(&dio->ref);
260 		submit_bio(bio);
261 		bio = bio_alloc(GFP_KERNEL, nr_pages);
262 	}
263 
264 	blk_finish_plug(&plug);
265 
266 	if (!is_sync)
267 		return -EIOCBQUEUED;
268 
269 	for (;;) {
270 		set_current_state(TASK_UNINTERRUPTIBLE);
271 		if (!READ_ONCE(dio->waiter))
272 			break;
273 		blk_io_schedule();
274 	}
275 	__set_current_state(TASK_RUNNING);
276 
277 	if (!ret)
278 		ret = blk_status_to_errno(dio->bio.bi_status);
279 	if (likely(!ret))
280 		ret = dio->size;
281 
282 	bio_put(&dio->bio);
283 	return ret;
284 }
285 
286 static void blkdev_bio_end_io_async(struct bio *bio)
287 {
288 	struct blkdev_dio *dio = container_of(bio, struct blkdev_dio, bio);
289 	struct kiocb *iocb = dio->iocb;
290 	ssize_t ret;
291 
292 	if (likely(!bio->bi_status)) {
293 		ret = dio->size;
294 		iocb->ki_pos += ret;
295 	} else {
296 		ret = blk_status_to_errno(bio->bi_status);
297 	}
298 
299 	iocb->ki_complete(iocb, ret);
300 
301 	if (dio->flags & DIO_SHOULD_DIRTY) {
302 		bio_check_pages_dirty(bio);
303 	} else {
304 		bio_release_pages(bio, false);
305 		bio_put(bio);
306 	}
307 }
308 
309 static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
310 					struct iov_iter *iter,
311 					unsigned int nr_pages)
312 {
313 	struct block_device *bdev = iocb->ki_filp->private_data;
314 	struct blkdev_dio *dio;
315 	struct bio *bio;
316 	loff_t pos = iocb->ki_pos;
317 	int ret = 0;
318 
319 	if ((pos | iov_iter_alignment(iter)) &
320 	    (bdev_logical_block_size(bdev) - 1))
321 		return -EINVAL;
322 
323 	bio = bio_alloc_kiocb(iocb, nr_pages, &blkdev_dio_pool);
324 	dio = container_of(bio, struct blkdev_dio, bio);
325 	dio->flags = 0;
326 	dio->iocb = iocb;
327 	bio_set_dev(bio, bdev);
328 	bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
329 	bio->bi_write_hint = iocb->ki_hint;
330 	bio->bi_end_io = blkdev_bio_end_io_async;
331 	bio->bi_ioprio = iocb->ki_ioprio;
332 
333 	if (iov_iter_is_bvec(iter)) {
334 		/*
335 		 * Users don't rely on the iterator being in any particular
336 		 * state for async I/O returning -EIOCBQUEUED, hence we can
337 		 * avoid expensive iov_iter_advance(). Bypass
338 		 * bio_iov_iter_get_pages() and set the bvec directly.
339 		 */
340 		bio_iov_bvec_set(bio, iter);
341 	} else {
342 		ret = bio_iov_iter_get_pages(bio, iter);
343 		if (unlikely(ret)) {
344 			bio_put(bio);
345 			return ret;
346 		}
347 	}
348 	dio->size = bio->bi_iter.bi_size;
349 
350 	if (iov_iter_rw(iter) == READ) {
351 		bio->bi_opf = REQ_OP_READ;
352 		if (iter_is_iovec(iter)) {
353 			dio->flags |= DIO_SHOULD_DIRTY;
354 			bio_set_pages_dirty(bio);
355 		}
356 	} else {
357 		bio->bi_opf = dio_bio_write_op(iocb);
358 		task_io_account_write(bio->bi_iter.bi_size);
359 	}
360 
361 	if (iocb->ki_flags & IOCB_HIPRI) {
362 		bio->bi_opf |= REQ_POLLED | REQ_NOWAIT;
363 		submit_bio(bio);
364 		WRITE_ONCE(iocb->private, bio);
365 	} else {
366 		if (iocb->ki_flags & IOCB_NOWAIT)
367 			bio->bi_opf |= REQ_NOWAIT;
368 		submit_bio(bio);
369 	}
370 	return -EIOCBQUEUED;
371 }
372 
373 static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
374 {
375 	unsigned int nr_pages;
376 
377 	if (!iov_iter_count(iter))
378 		return 0;
379 
380 	nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1);
381 	if (likely(nr_pages <= BIO_MAX_VECS)) {
382 		if (is_sync_kiocb(iocb))
383 			return __blkdev_direct_IO_simple(iocb, iter, nr_pages);
384 		return __blkdev_direct_IO_async(iocb, iter, nr_pages);
385 	}
386 	return __blkdev_direct_IO(iocb, iter, bio_max_segs(nr_pages));
387 }
388 
389 static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
390 {
391 	return block_write_full_page(page, blkdev_get_block, wbc);
392 }
393 
394 static int blkdev_readpage(struct file * file, struct page * page)
395 {
396 	return block_read_full_page(page, blkdev_get_block);
397 }
398 
399 static void blkdev_readahead(struct readahead_control *rac)
400 {
401 	mpage_readahead(rac, blkdev_get_block);
402 }
403 
404 static int blkdev_write_begin(struct file *file, struct address_space *mapping,
405 		loff_t pos, unsigned len, unsigned flags, struct page **pagep,
406 		void **fsdata)
407 {
408 	return block_write_begin(mapping, pos, len, flags, pagep,
409 				 blkdev_get_block);
410 }
411 
412 static int blkdev_write_end(struct file *file, struct address_space *mapping,
413 		loff_t pos, unsigned len, unsigned copied, struct page *page,
414 		void *fsdata)
415 {
416 	int ret;
417 	ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
418 
419 	unlock_page(page);
420 	put_page(page);
421 
422 	return ret;
423 }
424 
425 static int blkdev_writepages(struct address_space *mapping,
426 			     struct writeback_control *wbc)
427 {
428 	return generic_writepages(mapping, wbc);
429 }
430 
431 const struct address_space_operations def_blk_aops = {
432 	.set_page_dirty	= __set_page_dirty_buffers,
433 	.readpage	= blkdev_readpage,
434 	.readahead	= blkdev_readahead,
435 	.writepage	= blkdev_writepage,
436 	.write_begin	= blkdev_write_begin,
437 	.write_end	= blkdev_write_end,
438 	.writepages	= blkdev_writepages,
439 	.direct_IO	= blkdev_direct_IO,
440 	.migratepage	= buffer_migrate_page_norefs,
441 	.is_dirty_writeback = buffer_check_dirty_writeback,
442 };
443 
444 /*
445  * for a block special file file_inode(file)->i_size is zero
446  * so we compute the size by hand (just as in block_read/write above)
447  */
448 static loff_t blkdev_llseek(struct file *file, loff_t offset, int whence)
449 {
450 	struct inode *bd_inode = bdev_file_inode(file);
451 	loff_t retval;
452 
453 	inode_lock(bd_inode);
454 	retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode));
455 	inode_unlock(bd_inode);
456 	return retval;
457 }
458 
459 static int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
460 		int datasync)
461 {
462 	struct block_device *bdev = filp->private_data;
463 	int error;
464 
465 	error = file_write_and_wait_range(filp, start, end);
466 	if (error)
467 		return error;
468 
469 	/*
470 	 * There is no need to serialise calls to blkdev_issue_flush with
471 	 * i_mutex and doing so causes performance issues with concurrent
472 	 * O_SYNC writers to a block device.
473 	 */
474 	error = blkdev_issue_flush(bdev);
475 	if (error == -EOPNOTSUPP)
476 		error = 0;
477 
478 	return error;
479 }
480 
481 static int blkdev_open(struct inode *inode, struct file *filp)
482 {
483 	struct block_device *bdev;
484 
485 	/*
486 	 * Preserve backwards compatibility and allow large file access
487 	 * even if userspace doesn't ask for it explicitly. Some mkfs
488 	 * binary needs it. We might want to drop this workaround
489 	 * during an unstable branch.
490 	 */
491 	filp->f_flags |= O_LARGEFILE;
492 	filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
493 
494 	if (filp->f_flags & O_NDELAY)
495 		filp->f_mode |= FMODE_NDELAY;
496 	if (filp->f_flags & O_EXCL)
497 		filp->f_mode |= FMODE_EXCL;
498 	if ((filp->f_flags & O_ACCMODE) == 3)
499 		filp->f_mode |= FMODE_WRITE_IOCTL;
500 
501 	bdev = blkdev_get_by_dev(inode->i_rdev, filp->f_mode, filp);
502 	if (IS_ERR(bdev))
503 		return PTR_ERR(bdev);
504 
505 	filp->private_data = bdev;
506 	filp->f_mapping = bdev->bd_inode->i_mapping;
507 	filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
508 	return 0;
509 }
510 
511 static int blkdev_close(struct inode *inode, struct file *filp)
512 {
513 	struct block_device *bdev = filp->private_data;
514 
515 	blkdev_put(bdev, filp->f_mode);
516 	return 0;
517 }
518 
519 /*
520  * Write data to the block device.  Only intended for the block device itself
521  * and the raw driver which basically is a fake block device.
522  *
523  * Does not take i_mutex for the write and thus is not for general purpose
524  * use.
525  */
526 static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
527 {
528 	struct block_device *bdev = iocb->ki_filp->private_data;
529 	struct inode *bd_inode = bdev->bd_inode;
530 	loff_t size = bdev_nr_bytes(bdev);
531 	struct blk_plug plug;
532 	size_t shorted = 0;
533 	ssize_t ret;
534 
535 	if (bdev_read_only(bdev))
536 		return -EPERM;
537 
538 	if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(bd_inode->i_rdev))
539 		return -ETXTBSY;
540 
541 	if (!iov_iter_count(from))
542 		return 0;
543 
544 	if (iocb->ki_pos >= size)
545 		return -ENOSPC;
546 
547 	if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT)
548 		return -EOPNOTSUPP;
549 
550 	size -= iocb->ki_pos;
551 	if (iov_iter_count(from) > size) {
552 		shorted = iov_iter_count(from) - size;
553 		iov_iter_truncate(from, size);
554 	}
555 
556 	blk_start_plug(&plug);
557 	ret = __generic_file_write_iter(iocb, from);
558 	if (ret > 0)
559 		ret = generic_write_sync(iocb, ret);
560 	iov_iter_reexpand(from, iov_iter_count(from) + shorted);
561 	blk_finish_plug(&plug);
562 	return ret;
563 }
564 
565 static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
566 {
567 	struct block_device *bdev = iocb->ki_filp->private_data;
568 	loff_t size = bdev_nr_bytes(bdev);
569 	size_t count = iov_iter_count(to);
570 	loff_t pos = iocb->ki_pos;
571 	size_t shorted = 0;
572 	ssize_t ret = 0;
573 
574 	if (unlikely(pos + count > size)) {
575 		if (pos >= size)
576 			return 0;
577 		size -= pos;
578 		if (count > size) {
579 			shorted = count - size;
580 			iov_iter_truncate(to, size);
581 		}
582 	}
583 
584 	if (iocb->ki_flags & IOCB_DIRECT) {
585 		struct address_space *mapping = iocb->ki_filp->f_mapping;
586 
587 		if (iocb->ki_flags & IOCB_NOWAIT) {
588 			if (filemap_range_needs_writeback(mapping, iocb->ki_pos,
589 						iocb->ki_pos + count - 1))
590 				return -EAGAIN;
591 		} else {
592 			ret = filemap_write_and_wait_range(mapping,
593 						iocb->ki_pos,
594 					        iocb->ki_pos + count - 1);
595 			if (ret < 0)
596 				return ret;
597 		}
598 
599 		file_accessed(iocb->ki_filp);
600 
601 		ret = blkdev_direct_IO(iocb, to);
602 		if (ret >= 0) {
603 			iocb->ki_pos += ret;
604 			count -= ret;
605 		}
606 		if (ret < 0 || !count)
607 			return ret;
608 	}
609 
610 	ret = filemap_read(iocb, to, ret);
611 
612 	if (unlikely(shorted))
613 		iov_iter_reexpand(to, iov_iter_count(to) + shorted);
614 	return ret;
615 }
616 
617 #define	BLKDEV_FALLOC_FL_SUPPORTED					\
618 		(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |		\
619 		 FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE)
620 
621 static long blkdev_fallocate(struct file *file, int mode, loff_t start,
622 			     loff_t len)
623 {
624 	struct inode *inode = bdev_file_inode(file);
625 	struct block_device *bdev = I_BDEV(inode);
626 	loff_t end = start + len - 1;
627 	loff_t isize;
628 	int error;
629 
630 	/* Fail if we don't recognize the flags. */
631 	if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED)
632 		return -EOPNOTSUPP;
633 
634 	/* Don't go off the end of the device. */
635 	isize = bdev_nr_bytes(bdev);
636 	if (start >= isize)
637 		return -EINVAL;
638 	if (end >= isize) {
639 		if (mode & FALLOC_FL_KEEP_SIZE) {
640 			len = isize - start;
641 			end = start + len - 1;
642 		} else
643 			return -EINVAL;
644 	}
645 
646 	/*
647 	 * Don't allow IO that isn't aligned to logical block size.
648 	 */
649 	if ((start | len) & (bdev_logical_block_size(bdev) - 1))
650 		return -EINVAL;
651 
652 	filemap_invalidate_lock(inode->i_mapping);
653 
654 	/* Invalidate the page cache, including dirty pages. */
655 	error = truncate_bdev_range(bdev, file->f_mode, start, end);
656 	if (error)
657 		goto fail;
658 
659 	switch (mode) {
660 	case FALLOC_FL_ZERO_RANGE:
661 	case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE:
662 		error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
663 					     len >> SECTOR_SHIFT, GFP_KERNEL,
664 					     BLKDEV_ZERO_NOUNMAP);
665 		break;
666 	case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
667 		error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
668 					     len >> SECTOR_SHIFT, GFP_KERNEL,
669 					     BLKDEV_ZERO_NOFALLBACK);
670 		break;
671 	case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE:
672 		error = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT,
673 					     len >> SECTOR_SHIFT, GFP_KERNEL, 0);
674 		break;
675 	default:
676 		error = -EOPNOTSUPP;
677 	}
678 
679  fail:
680 	filemap_invalidate_unlock(inode->i_mapping);
681 	return error;
682 }
683 
684 const struct file_operations def_blk_fops = {
685 	.open		= blkdev_open,
686 	.release	= blkdev_close,
687 	.llseek		= blkdev_llseek,
688 	.read_iter	= blkdev_read_iter,
689 	.write_iter	= blkdev_write_iter,
690 	.iopoll		= iocb_bio_iopoll,
691 	.mmap		= generic_file_mmap,
692 	.fsync		= blkdev_fsync,
693 	.unlocked_ioctl	= blkdev_ioctl,
694 #ifdef CONFIG_COMPAT
695 	.compat_ioctl	= compat_blkdev_ioctl,
696 #endif
697 	.splice_read	= generic_file_splice_read,
698 	.splice_write	= iter_file_splice_write,
699 	.fallocate	= blkdev_fallocate,
700 };
701 
702 static __init int blkdev_init(void)
703 {
704 	return bioset_init(&blkdev_dio_pool, 4,
705 				offsetof(struct blkdev_dio, bio),
706 				BIOSET_NEED_BVECS|BIOSET_PERCPU_CACHE);
707 }
708 module_init(blkdev_init);
709