1cd82cca7SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0-only 2cd82cca7SChristoph Hellwig /* 3cd82cca7SChristoph Hellwig * Copyright (C) 1991, 1992 Linus Torvalds 4cd82cca7SChristoph Hellwig * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE 5cd82cca7SChristoph Hellwig * Copyright (C) 2016 - 2020 Christoph Hellwig 6cd82cca7SChristoph Hellwig */ 7cd82cca7SChristoph Hellwig #include <linux/init.h> 8cd82cca7SChristoph Hellwig #include <linux/mm.h> 9cd82cca7SChristoph Hellwig #include <linux/blkdev.h> 10cd82cca7SChristoph Hellwig #include <linux/buffer_head.h> 11cd82cca7SChristoph Hellwig #include <linux/mpage.h> 12cd82cca7SChristoph Hellwig #include <linux/uio.h> 13cd82cca7SChristoph Hellwig #include <linux/namei.h> 14cd82cca7SChristoph Hellwig #include <linux/task_io_accounting_ops.h> 15cd82cca7SChristoph Hellwig #include <linux/falloc.h> 16cd82cca7SChristoph Hellwig #include <linux/suspend.h> 17f278eb3dSMing Lei #include <linux/fs.h> 188581fd40SJakub Kicinski #include <linux/module.h> 19cd82cca7SChristoph Hellwig #include "blk.h" 20cd82cca7SChristoph Hellwig 21fac7c6d5SPavel Begunkov static inline struct inode *bdev_file_inode(struct file *file) 22cd82cca7SChristoph Hellwig { 23cd82cca7SChristoph Hellwig return file->f_mapping->host; 24cd82cca7SChristoph Hellwig } 25cd82cca7SChristoph Hellwig 26cd82cca7SChristoph Hellwig static int blkdev_get_block(struct inode *inode, sector_t iblock, 27cd82cca7SChristoph Hellwig struct buffer_head *bh, int create) 28cd82cca7SChristoph Hellwig { 29cd82cca7SChristoph Hellwig bh->b_bdev = I_BDEV(inode); 30cd82cca7SChristoph Hellwig bh->b_blocknr = iblock; 31cd82cca7SChristoph Hellwig set_buffer_mapped(bh); 32cd82cca7SChristoph Hellwig return 0; 33cd82cca7SChristoph Hellwig } 34cd82cca7SChristoph Hellwig 35cd82cca7SChristoph Hellwig static unsigned int dio_bio_write_op(struct kiocb *iocb) 36cd82cca7SChristoph Hellwig { 37cd82cca7SChristoph Hellwig unsigned int op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; 38cd82cca7SChristoph Hellwig 39cd82cca7SChristoph Hellwig /* avoid the need for a I/O completion work item */ 40cd82cca7SChristoph Hellwig if (iocb->ki_flags & IOCB_DSYNC) 41cd82cca7SChristoph Hellwig op |= REQ_FUA; 42cd82cca7SChristoph Hellwig return op; 43cd82cca7SChristoph Hellwig } 44cd82cca7SChristoph Hellwig 45cd82cca7SChristoph Hellwig #define DIO_INLINE_BIO_VECS 4 46cd82cca7SChristoph Hellwig 47cd82cca7SChristoph Hellwig static void blkdev_bio_end_io_simple(struct bio *bio) 48cd82cca7SChristoph Hellwig { 49cd82cca7SChristoph Hellwig struct task_struct *waiter = bio->bi_private; 50cd82cca7SChristoph Hellwig 51cd82cca7SChristoph Hellwig WRITE_ONCE(bio->bi_private, NULL); 52cd82cca7SChristoph Hellwig blk_wake_io_task(waiter); 53cd82cca7SChristoph Hellwig } 54cd82cca7SChristoph Hellwig 55cd82cca7SChristoph Hellwig static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb, 56cd82cca7SChristoph Hellwig struct iov_iter *iter, unsigned int nr_pages) 57cd82cca7SChristoph Hellwig { 58fac7c6d5SPavel Begunkov struct block_device *bdev = iocb->ki_filp->private_data; 59cd82cca7SChristoph Hellwig struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs; 60cd82cca7SChristoph Hellwig loff_t pos = iocb->ki_pos; 61cd82cca7SChristoph Hellwig bool should_dirty = false; 62cd82cca7SChristoph Hellwig struct bio bio; 63cd82cca7SChristoph Hellwig ssize_t ret; 64cd82cca7SChristoph Hellwig 65cd82cca7SChristoph Hellwig if ((pos | iov_iter_alignment(iter)) & 66cd82cca7SChristoph Hellwig (bdev_logical_block_size(bdev) - 1)) 67cd82cca7SChristoph Hellwig return -EINVAL; 68cd82cca7SChristoph Hellwig 69cd82cca7SChristoph Hellwig if (nr_pages <= DIO_INLINE_BIO_VECS) 70cd82cca7SChristoph Hellwig vecs = inline_vecs; 71cd82cca7SChristoph Hellwig else { 72cd82cca7SChristoph Hellwig vecs = kmalloc_array(nr_pages, sizeof(struct bio_vec), 73cd82cca7SChristoph Hellwig GFP_KERNEL); 74cd82cca7SChristoph Hellwig if (!vecs) 75cd82cca7SChristoph Hellwig return -ENOMEM; 76cd82cca7SChristoph Hellwig } 77cd82cca7SChristoph Hellwig 78cd82cca7SChristoph Hellwig bio_init(&bio, vecs, nr_pages); 79cd82cca7SChristoph Hellwig bio_set_dev(&bio, bdev); 806549a874SPavel Begunkov bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT; 81cd82cca7SChristoph Hellwig bio.bi_write_hint = iocb->ki_hint; 82cd82cca7SChristoph Hellwig bio.bi_private = current; 83cd82cca7SChristoph Hellwig bio.bi_end_io = blkdev_bio_end_io_simple; 84cd82cca7SChristoph Hellwig bio.bi_ioprio = iocb->ki_ioprio; 85cd82cca7SChristoph Hellwig 86cd82cca7SChristoph Hellwig ret = bio_iov_iter_get_pages(&bio, iter); 87cd82cca7SChristoph Hellwig if (unlikely(ret)) 88cd82cca7SChristoph Hellwig goto out; 89cd82cca7SChristoph Hellwig ret = bio.bi_iter.bi_size; 90cd82cca7SChristoph Hellwig 91cd82cca7SChristoph Hellwig if (iov_iter_rw(iter) == READ) { 92cd82cca7SChristoph Hellwig bio.bi_opf = REQ_OP_READ; 93cd82cca7SChristoph Hellwig if (iter_is_iovec(iter)) 94cd82cca7SChristoph Hellwig should_dirty = true; 95cd82cca7SChristoph Hellwig } else { 96cd82cca7SChristoph Hellwig bio.bi_opf = dio_bio_write_op(iocb); 97cd82cca7SChristoph Hellwig task_io_account_write(ret); 98cd82cca7SChristoph Hellwig } 99cd82cca7SChristoph Hellwig if (iocb->ki_flags & IOCB_NOWAIT) 100cd82cca7SChristoph Hellwig bio.bi_opf |= REQ_NOWAIT; 101cd82cca7SChristoph Hellwig if (iocb->ki_flags & IOCB_HIPRI) 102cd82cca7SChristoph Hellwig bio_set_polled(&bio, iocb); 103cd82cca7SChristoph Hellwig 1043e08773cSChristoph Hellwig submit_bio(&bio); 105cd82cca7SChristoph Hellwig for (;;) { 106cd82cca7SChristoph Hellwig set_current_state(TASK_UNINTERRUPTIBLE); 107cd82cca7SChristoph Hellwig if (!READ_ONCE(bio.bi_private)) 108cd82cca7SChristoph Hellwig break; 1095a72e899SJens Axboe if (!(iocb->ki_flags & IOCB_HIPRI) || !bio_poll(&bio, NULL, 0)) 110cd82cca7SChristoph Hellwig blk_io_schedule(); 111cd82cca7SChristoph Hellwig } 112cd82cca7SChristoph Hellwig __set_current_state(TASK_RUNNING); 113cd82cca7SChristoph Hellwig 114cd82cca7SChristoph Hellwig bio_release_pages(&bio, should_dirty); 115cd82cca7SChristoph Hellwig if (unlikely(bio.bi_status)) 116cd82cca7SChristoph Hellwig ret = blk_status_to_errno(bio.bi_status); 117cd82cca7SChristoph Hellwig 118cd82cca7SChristoph Hellwig out: 119cd82cca7SChristoph Hellwig if (vecs != inline_vecs) 120cd82cca7SChristoph Hellwig kfree(vecs); 121cd82cca7SChristoph Hellwig 122cd82cca7SChristoph Hellwig bio_uninit(&bio); 123cd82cca7SChristoph Hellwig 124cd82cca7SChristoph Hellwig return ret; 125cd82cca7SChristoph Hellwig } 126cd82cca7SChristoph Hellwig 12709ce8744SJens Axboe enum { 128e71aa913SPavel Begunkov DIO_SHOULD_DIRTY = 1, 129e71aa913SPavel Begunkov DIO_IS_SYNC = 2, 13009ce8744SJens Axboe }; 13109ce8744SJens Axboe 132cd82cca7SChristoph Hellwig struct blkdev_dio { 133cd82cca7SChristoph Hellwig union { 134cd82cca7SChristoph Hellwig struct kiocb *iocb; 135cd82cca7SChristoph Hellwig struct task_struct *waiter; 136cd82cca7SChristoph Hellwig }; 137cd82cca7SChristoph Hellwig size_t size; 138cd82cca7SChristoph Hellwig atomic_t ref; 13909ce8744SJens Axboe unsigned int flags; 1406155631aSJens Axboe struct bio bio ____cacheline_aligned_in_smp; 141cd82cca7SChristoph Hellwig }; 142cd82cca7SChristoph Hellwig 143cd82cca7SChristoph Hellwig static struct bio_set blkdev_dio_pool; 144cd82cca7SChristoph Hellwig 145cd82cca7SChristoph Hellwig static void blkdev_bio_end_io(struct bio *bio) 146cd82cca7SChristoph Hellwig { 147cd82cca7SChristoph Hellwig struct blkdev_dio *dio = bio->bi_private; 14809ce8744SJens Axboe bool should_dirty = dio->flags & DIO_SHOULD_DIRTY; 149cd82cca7SChristoph Hellwig 150cd82cca7SChristoph Hellwig if (bio->bi_status && !dio->bio.bi_status) 151cd82cca7SChristoph Hellwig dio->bio.bi_status = bio->bi_status; 152cd82cca7SChristoph Hellwig 153e71aa913SPavel Begunkov if (atomic_dec_and_test(&dio->ref)) { 15409ce8744SJens Axboe if (!(dio->flags & DIO_IS_SYNC)) { 155cd82cca7SChristoph Hellwig struct kiocb *iocb = dio->iocb; 156cd82cca7SChristoph Hellwig ssize_t ret; 157cd82cca7SChristoph Hellwig 1583e08773cSChristoph Hellwig WRITE_ONCE(iocb->private, NULL); 1593e08773cSChristoph Hellwig 160cd82cca7SChristoph Hellwig if (likely(!dio->bio.bi_status)) { 161cd82cca7SChristoph Hellwig ret = dio->size; 162cd82cca7SChristoph Hellwig iocb->ki_pos += ret; 163cd82cca7SChristoph Hellwig } else { 164cd82cca7SChristoph Hellwig ret = blk_status_to_errno(dio->bio.bi_status); 165cd82cca7SChristoph Hellwig } 166cd82cca7SChristoph Hellwig 1676b19b766SJens Axboe dio->iocb->ki_complete(iocb, ret); 168cd82cca7SChristoph Hellwig bio_put(&dio->bio); 169cd82cca7SChristoph Hellwig } else { 170cd82cca7SChristoph Hellwig struct task_struct *waiter = dio->waiter; 171cd82cca7SChristoph Hellwig 172cd82cca7SChristoph Hellwig WRITE_ONCE(dio->waiter, NULL); 173cd82cca7SChristoph Hellwig blk_wake_io_task(waiter); 174cd82cca7SChristoph Hellwig } 175cd82cca7SChristoph Hellwig } 176cd82cca7SChristoph Hellwig 177cd82cca7SChristoph Hellwig if (should_dirty) { 178cd82cca7SChristoph Hellwig bio_check_pages_dirty(bio); 179cd82cca7SChristoph Hellwig } else { 180cd82cca7SChristoph Hellwig bio_release_pages(bio, false); 181cd82cca7SChristoph Hellwig bio_put(bio); 182cd82cca7SChristoph Hellwig } 183cd82cca7SChristoph Hellwig } 184cd82cca7SChristoph Hellwig 185cd82cca7SChristoph Hellwig static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, 186cd82cca7SChristoph Hellwig unsigned int nr_pages) 187cd82cca7SChristoph Hellwig { 188fac7c6d5SPavel Begunkov struct block_device *bdev = iocb->ki_filp->private_data; 189cd82cca7SChristoph Hellwig struct blk_plug plug; 190cd82cca7SChristoph Hellwig struct blkdev_dio *dio; 191cd82cca7SChristoph Hellwig struct bio *bio; 192cd82cca7SChristoph Hellwig bool is_read = (iov_iter_rw(iter) == READ), is_sync; 193cd82cca7SChristoph Hellwig loff_t pos = iocb->ki_pos; 194cd82cca7SChristoph Hellwig int ret = 0; 195cd82cca7SChristoph Hellwig 196cd82cca7SChristoph Hellwig if ((pos | iov_iter_alignment(iter)) & 197cd82cca7SChristoph Hellwig (bdev_logical_block_size(bdev) - 1)) 198cd82cca7SChristoph Hellwig return -EINVAL; 199cd82cca7SChristoph Hellwig 200cd82cca7SChristoph Hellwig bio = bio_alloc_kiocb(iocb, nr_pages, &blkdev_dio_pool); 201cd82cca7SChristoph Hellwig 202cd82cca7SChristoph Hellwig dio = container_of(bio, struct blkdev_dio, bio); 203e71aa913SPavel Begunkov atomic_set(&dio->ref, 1); 204e71aa913SPavel Begunkov /* 205e71aa913SPavel Begunkov * Grab an extra reference to ensure the dio structure which is embedded 206e71aa913SPavel Begunkov * into the first bio stays around. 207e71aa913SPavel Begunkov */ 208e71aa913SPavel Begunkov bio_get(bio); 209e71aa913SPavel Begunkov 21009ce8744SJens Axboe is_sync = is_sync_kiocb(iocb); 21109ce8744SJens Axboe if (is_sync) { 21209ce8744SJens Axboe dio->flags = DIO_IS_SYNC; 213cd82cca7SChristoph Hellwig dio->waiter = current; 214cd82cca7SChristoph Hellwig } else { 21509ce8744SJens Axboe dio->flags = 0; 216cd82cca7SChristoph Hellwig dio->iocb = iocb; 217cd82cca7SChristoph Hellwig } 218cd82cca7SChristoph Hellwig 219cd82cca7SChristoph Hellwig dio->size = 0; 22009ce8744SJens Axboe if (is_read && iter_is_iovec(iter)) 22109ce8744SJens Axboe dio->flags |= DIO_SHOULD_DIRTY; 222cd82cca7SChristoph Hellwig 223cd82cca7SChristoph Hellwig blk_start_plug(&plug); 224cd82cca7SChristoph Hellwig 225cd82cca7SChristoph Hellwig for (;;) { 226cd82cca7SChristoph Hellwig bio_set_dev(bio, bdev); 2276549a874SPavel Begunkov bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT; 228cd82cca7SChristoph Hellwig bio->bi_write_hint = iocb->ki_hint; 229cd82cca7SChristoph Hellwig bio->bi_private = dio; 230cd82cca7SChristoph Hellwig bio->bi_end_io = blkdev_bio_end_io; 231cd82cca7SChristoph Hellwig bio->bi_ioprio = iocb->ki_ioprio; 232cd82cca7SChristoph Hellwig 233cd82cca7SChristoph Hellwig ret = bio_iov_iter_get_pages(bio, iter); 234cd82cca7SChristoph Hellwig if (unlikely(ret)) { 235cd82cca7SChristoph Hellwig bio->bi_status = BLK_STS_IOERR; 236cd82cca7SChristoph Hellwig bio_endio(bio); 237cd82cca7SChristoph Hellwig break; 238cd82cca7SChristoph Hellwig } 239cd82cca7SChristoph Hellwig 240cd82cca7SChristoph Hellwig if (is_read) { 241cd82cca7SChristoph Hellwig bio->bi_opf = REQ_OP_READ; 24209ce8744SJens Axboe if (dio->flags & DIO_SHOULD_DIRTY) 243cd82cca7SChristoph Hellwig bio_set_pages_dirty(bio); 244cd82cca7SChristoph Hellwig } else { 245cd82cca7SChristoph Hellwig bio->bi_opf = dio_bio_write_op(iocb); 246cd82cca7SChristoph Hellwig task_io_account_write(bio->bi_iter.bi_size); 247cd82cca7SChristoph Hellwig } 248cd82cca7SChristoph Hellwig if (iocb->ki_flags & IOCB_NOWAIT) 249cd82cca7SChristoph Hellwig bio->bi_opf |= REQ_NOWAIT; 250cd82cca7SChristoph Hellwig 251cd82cca7SChristoph Hellwig dio->size += bio->bi_iter.bi_size; 252cd82cca7SChristoph Hellwig pos += bio->bi_iter.bi_size; 253cd82cca7SChristoph Hellwig 254cd82cca7SChristoph Hellwig nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS); 255cd82cca7SChristoph Hellwig if (!nr_pages) { 2563e08773cSChristoph Hellwig submit_bio(bio); 257cd82cca7SChristoph Hellwig break; 258cd82cca7SChristoph Hellwig } 259cd82cca7SChristoph Hellwig atomic_inc(&dio->ref); 260cd82cca7SChristoph Hellwig submit_bio(bio); 261cd82cca7SChristoph Hellwig bio = bio_alloc(GFP_KERNEL, nr_pages); 262cd82cca7SChristoph Hellwig } 263cd82cca7SChristoph Hellwig 264cd82cca7SChristoph Hellwig blk_finish_plug(&plug); 265cd82cca7SChristoph Hellwig 266cd82cca7SChristoph Hellwig if (!is_sync) 267cd82cca7SChristoph Hellwig return -EIOCBQUEUED; 268cd82cca7SChristoph Hellwig 269cd82cca7SChristoph Hellwig for (;;) { 270cd82cca7SChristoph Hellwig set_current_state(TASK_UNINTERRUPTIBLE); 271cd82cca7SChristoph Hellwig if (!READ_ONCE(dio->waiter)) 272cd82cca7SChristoph Hellwig break; 273cd82cca7SChristoph Hellwig blk_io_schedule(); 274cd82cca7SChristoph Hellwig } 275cd82cca7SChristoph Hellwig __set_current_state(TASK_RUNNING); 276cd82cca7SChristoph Hellwig 277cd82cca7SChristoph Hellwig if (!ret) 278cd82cca7SChristoph Hellwig ret = blk_status_to_errno(dio->bio.bi_status); 279cd82cca7SChristoph Hellwig if (likely(!ret)) 280cd82cca7SChristoph Hellwig ret = dio->size; 281cd82cca7SChristoph Hellwig 282cd82cca7SChristoph Hellwig bio_put(&dio->bio); 283cd82cca7SChristoph Hellwig return ret; 284cd82cca7SChristoph Hellwig } 285cd82cca7SChristoph Hellwig 28654a88eb8SPavel Begunkov static void blkdev_bio_end_io_async(struct bio *bio) 28754a88eb8SPavel Begunkov { 28854a88eb8SPavel Begunkov struct blkdev_dio *dio = container_of(bio, struct blkdev_dio, bio); 28954a88eb8SPavel Begunkov struct kiocb *iocb = dio->iocb; 29054a88eb8SPavel Begunkov ssize_t ret; 29154a88eb8SPavel Begunkov 29254a88eb8SPavel Begunkov if (likely(!bio->bi_status)) { 29354a88eb8SPavel Begunkov ret = dio->size; 29454a88eb8SPavel Begunkov iocb->ki_pos += ret; 29554a88eb8SPavel Begunkov } else { 29654a88eb8SPavel Begunkov ret = blk_status_to_errno(bio->bi_status); 29754a88eb8SPavel Begunkov } 29854a88eb8SPavel Begunkov 299b6773cdbSLinus Torvalds iocb->ki_complete(iocb, ret); 30054a88eb8SPavel Begunkov 30154a88eb8SPavel Begunkov if (dio->flags & DIO_SHOULD_DIRTY) { 30254a88eb8SPavel Begunkov bio_check_pages_dirty(bio); 30354a88eb8SPavel Begunkov } else { 30454a88eb8SPavel Begunkov bio_release_pages(bio, false); 30554a88eb8SPavel Begunkov bio_put(bio); 30654a88eb8SPavel Begunkov } 30754a88eb8SPavel Begunkov } 30854a88eb8SPavel Begunkov 30954a88eb8SPavel Begunkov static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb, 31054a88eb8SPavel Begunkov struct iov_iter *iter, 31154a88eb8SPavel Begunkov unsigned int nr_pages) 31254a88eb8SPavel Begunkov { 31354a88eb8SPavel Begunkov struct block_device *bdev = iocb->ki_filp->private_data; 31454a88eb8SPavel Begunkov struct blkdev_dio *dio; 31554a88eb8SPavel Begunkov struct bio *bio; 31654a88eb8SPavel Begunkov loff_t pos = iocb->ki_pos; 31754a88eb8SPavel Begunkov int ret = 0; 31854a88eb8SPavel Begunkov 31954a88eb8SPavel Begunkov if ((pos | iov_iter_alignment(iter)) & 32054a88eb8SPavel Begunkov (bdev_logical_block_size(bdev) - 1)) 32154a88eb8SPavel Begunkov return -EINVAL; 32254a88eb8SPavel Begunkov 32354a88eb8SPavel Begunkov bio = bio_alloc_kiocb(iocb, nr_pages, &blkdev_dio_pool); 32454a88eb8SPavel Begunkov dio = container_of(bio, struct blkdev_dio, bio); 32554a88eb8SPavel Begunkov dio->flags = 0; 32654a88eb8SPavel Begunkov dio->iocb = iocb; 32754a88eb8SPavel Begunkov bio_set_dev(bio, bdev); 32854a88eb8SPavel Begunkov bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT; 32954a88eb8SPavel Begunkov bio->bi_write_hint = iocb->ki_hint; 33054a88eb8SPavel Begunkov bio->bi_end_io = blkdev_bio_end_io_async; 33154a88eb8SPavel Begunkov bio->bi_ioprio = iocb->ki_ioprio; 33254a88eb8SPavel Begunkov 3331bb6b810SPavel Begunkov if (iov_iter_is_bvec(iter)) { 3341bb6b810SPavel Begunkov /* 3351bb6b810SPavel Begunkov * Users don't rely on the iterator being in any particular 3361bb6b810SPavel Begunkov * state for async I/O returning -EIOCBQUEUED, hence we can 3371bb6b810SPavel Begunkov * avoid expensive iov_iter_advance(). Bypass 3381bb6b810SPavel Begunkov * bio_iov_iter_get_pages() and set the bvec directly. 3391bb6b810SPavel Begunkov */ 3401bb6b810SPavel Begunkov bio_iov_bvec_set(bio, iter); 3411bb6b810SPavel Begunkov } else { 34254a88eb8SPavel Begunkov ret = bio_iov_iter_get_pages(bio, iter); 34354a88eb8SPavel Begunkov if (unlikely(ret)) { 34475feae73SPavel Begunkov bio_put(bio); 34554a88eb8SPavel Begunkov return ret; 34654a88eb8SPavel Begunkov } 3471bb6b810SPavel Begunkov } 34854a88eb8SPavel Begunkov dio->size = bio->bi_iter.bi_size; 34954a88eb8SPavel Begunkov 35054a88eb8SPavel Begunkov if (iov_iter_rw(iter) == READ) { 35154a88eb8SPavel Begunkov bio->bi_opf = REQ_OP_READ; 35254a88eb8SPavel Begunkov if (iter_is_iovec(iter)) { 35354a88eb8SPavel Begunkov dio->flags |= DIO_SHOULD_DIRTY; 35454a88eb8SPavel Begunkov bio_set_pages_dirty(bio); 35554a88eb8SPavel Begunkov } 35654a88eb8SPavel Begunkov } else { 35754a88eb8SPavel Begunkov bio->bi_opf = dio_bio_write_op(iocb); 35854a88eb8SPavel Begunkov task_io_account_write(bio->bi_iter.bi_size); 35954a88eb8SPavel Begunkov } 36054a88eb8SPavel Begunkov 36154a88eb8SPavel Begunkov if (iocb->ki_flags & IOCB_HIPRI) { 362842e39b0SPavel Begunkov bio->bi_opf |= REQ_POLLED | REQ_NOWAIT; 36354a88eb8SPavel Begunkov submit_bio(bio); 36454a88eb8SPavel Begunkov WRITE_ONCE(iocb->private, bio); 36554a88eb8SPavel Begunkov } else { 366842e39b0SPavel Begunkov if (iocb->ki_flags & IOCB_NOWAIT) 367842e39b0SPavel Begunkov bio->bi_opf |= REQ_NOWAIT; 36854a88eb8SPavel Begunkov submit_bio(bio); 36954a88eb8SPavel Begunkov } 37054a88eb8SPavel Begunkov return -EIOCBQUEUED; 37154a88eb8SPavel Begunkov } 37254a88eb8SPavel Begunkov 373cd82cca7SChristoph Hellwig static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 374cd82cca7SChristoph Hellwig { 375cd82cca7SChristoph Hellwig unsigned int nr_pages; 376cd82cca7SChristoph Hellwig 377cd82cca7SChristoph Hellwig if (!iov_iter_count(iter)) 378cd82cca7SChristoph Hellwig return 0; 379cd82cca7SChristoph Hellwig 380cd82cca7SChristoph Hellwig nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1); 38154a88eb8SPavel Begunkov if (likely(nr_pages <= BIO_MAX_VECS)) { 38254a88eb8SPavel Begunkov if (is_sync_kiocb(iocb)) 383cd82cca7SChristoph Hellwig return __blkdev_direct_IO_simple(iocb, iter, nr_pages); 38454a88eb8SPavel Begunkov return __blkdev_direct_IO_async(iocb, iter, nr_pages); 38554a88eb8SPavel Begunkov } 386cd82cca7SChristoph Hellwig return __blkdev_direct_IO(iocb, iter, bio_max_segs(nr_pages)); 387cd82cca7SChristoph Hellwig } 388cd82cca7SChristoph Hellwig 389cd82cca7SChristoph Hellwig static int blkdev_writepage(struct page *page, struct writeback_control *wbc) 390cd82cca7SChristoph Hellwig { 391cd82cca7SChristoph Hellwig return block_write_full_page(page, blkdev_get_block, wbc); 392cd82cca7SChristoph Hellwig } 393cd82cca7SChristoph Hellwig 394cd82cca7SChristoph Hellwig static int blkdev_readpage(struct file * file, struct page * page) 395cd82cca7SChristoph Hellwig { 396cd82cca7SChristoph Hellwig return block_read_full_page(page, blkdev_get_block); 397cd82cca7SChristoph Hellwig } 398cd82cca7SChristoph Hellwig 399cd82cca7SChristoph Hellwig static void blkdev_readahead(struct readahead_control *rac) 400cd82cca7SChristoph Hellwig { 401cd82cca7SChristoph Hellwig mpage_readahead(rac, blkdev_get_block); 402cd82cca7SChristoph Hellwig } 403cd82cca7SChristoph Hellwig 404cd82cca7SChristoph Hellwig static int blkdev_write_begin(struct file *file, struct address_space *mapping, 405cd82cca7SChristoph Hellwig loff_t pos, unsigned len, unsigned flags, struct page **pagep, 406cd82cca7SChristoph Hellwig void **fsdata) 407cd82cca7SChristoph Hellwig { 408cd82cca7SChristoph Hellwig return block_write_begin(mapping, pos, len, flags, pagep, 409cd82cca7SChristoph Hellwig blkdev_get_block); 410cd82cca7SChristoph Hellwig } 411cd82cca7SChristoph Hellwig 412cd82cca7SChristoph Hellwig static int blkdev_write_end(struct file *file, struct address_space *mapping, 413cd82cca7SChristoph Hellwig loff_t pos, unsigned len, unsigned copied, struct page *page, 414cd82cca7SChristoph Hellwig void *fsdata) 415cd82cca7SChristoph Hellwig { 416cd82cca7SChristoph Hellwig int ret; 417cd82cca7SChristoph Hellwig ret = block_write_end(file, mapping, pos, len, copied, page, fsdata); 418cd82cca7SChristoph Hellwig 419cd82cca7SChristoph Hellwig unlock_page(page); 420cd82cca7SChristoph Hellwig put_page(page); 421cd82cca7SChristoph Hellwig 422cd82cca7SChristoph Hellwig return ret; 423cd82cca7SChristoph Hellwig } 424cd82cca7SChristoph Hellwig 425cd82cca7SChristoph Hellwig static int blkdev_writepages(struct address_space *mapping, 426cd82cca7SChristoph Hellwig struct writeback_control *wbc) 427cd82cca7SChristoph Hellwig { 428cd82cca7SChristoph Hellwig return generic_writepages(mapping, wbc); 429cd82cca7SChristoph Hellwig } 430cd82cca7SChristoph Hellwig 431cd82cca7SChristoph Hellwig const struct address_space_operations def_blk_aops = { 432*e621900aSMatthew Wilcox (Oracle) .dirty_folio = block_dirty_folio, 4337ba13abbSMatthew Wilcox (Oracle) .invalidate_folio = block_invalidate_folio, 434cd82cca7SChristoph Hellwig .readpage = blkdev_readpage, 435cd82cca7SChristoph Hellwig .readahead = blkdev_readahead, 436cd82cca7SChristoph Hellwig .writepage = blkdev_writepage, 437cd82cca7SChristoph Hellwig .write_begin = blkdev_write_begin, 438cd82cca7SChristoph Hellwig .write_end = blkdev_write_end, 439cd82cca7SChristoph Hellwig .writepages = blkdev_writepages, 440cd82cca7SChristoph Hellwig .direct_IO = blkdev_direct_IO, 441cd82cca7SChristoph Hellwig .migratepage = buffer_migrate_page_norefs, 442cd82cca7SChristoph Hellwig .is_dirty_writeback = buffer_check_dirty_writeback, 443cd82cca7SChristoph Hellwig }; 444cd82cca7SChristoph Hellwig 445cd82cca7SChristoph Hellwig /* 446cd82cca7SChristoph Hellwig * for a block special file file_inode(file)->i_size is zero 447cd82cca7SChristoph Hellwig * so we compute the size by hand (just as in block_read/write above) 448cd82cca7SChristoph Hellwig */ 449cd82cca7SChristoph Hellwig static loff_t blkdev_llseek(struct file *file, loff_t offset, int whence) 450cd82cca7SChristoph Hellwig { 451cd82cca7SChristoph Hellwig struct inode *bd_inode = bdev_file_inode(file); 452cd82cca7SChristoph Hellwig loff_t retval; 453cd82cca7SChristoph Hellwig 454cd82cca7SChristoph Hellwig inode_lock(bd_inode); 455cd82cca7SChristoph Hellwig retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode)); 456cd82cca7SChristoph Hellwig inode_unlock(bd_inode); 457cd82cca7SChristoph Hellwig return retval; 458cd82cca7SChristoph Hellwig } 459cd82cca7SChristoph Hellwig 460cd82cca7SChristoph Hellwig static int blkdev_fsync(struct file *filp, loff_t start, loff_t end, 461cd82cca7SChristoph Hellwig int datasync) 462cd82cca7SChristoph Hellwig { 463fac7c6d5SPavel Begunkov struct block_device *bdev = filp->private_data; 464cd82cca7SChristoph Hellwig int error; 465cd82cca7SChristoph Hellwig 466cd82cca7SChristoph Hellwig error = file_write_and_wait_range(filp, start, end); 467cd82cca7SChristoph Hellwig if (error) 468cd82cca7SChristoph Hellwig return error; 469cd82cca7SChristoph Hellwig 470cd82cca7SChristoph Hellwig /* 471cd82cca7SChristoph Hellwig * There is no need to serialise calls to blkdev_issue_flush with 472cd82cca7SChristoph Hellwig * i_mutex and doing so causes performance issues with concurrent 473cd82cca7SChristoph Hellwig * O_SYNC writers to a block device. 474cd82cca7SChristoph Hellwig */ 475cd82cca7SChristoph Hellwig error = blkdev_issue_flush(bdev); 476cd82cca7SChristoph Hellwig if (error == -EOPNOTSUPP) 477cd82cca7SChristoph Hellwig error = 0; 478cd82cca7SChristoph Hellwig 479cd82cca7SChristoph Hellwig return error; 480cd82cca7SChristoph Hellwig } 481cd82cca7SChristoph Hellwig 482cd82cca7SChristoph Hellwig static int blkdev_open(struct inode *inode, struct file *filp) 483cd82cca7SChristoph Hellwig { 484cd82cca7SChristoph Hellwig struct block_device *bdev; 485cd82cca7SChristoph Hellwig 486cd82cca7SChristoph Hellwig /* 487cd82cca7SChristoph Hellwig * Preserve backwards compatibility and allow large file access 488cd82cca7SChristoph Hellwig * even if userspace doesn't ask for it explicitly. Some mkfs 489cd82cca7SChristoph Hellwig * binary needs it. We might want to drop this workaround 490cd82cca7SChristoph Hellwig * during an unstable branch. 491cd82cca7SChristoph Hellwig */ 492cd82cca7SChristoph Hellwig filp->f_flags |= O_LARGEFILE; 493cd82cca7SChristoph Hellwig filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC; 494cd82cca7SChristoph Hellwig 495cd82cca7SChristoph Hellwig if (filp->f_flags & O_NDELAY) 496cd82cca7SChristoph Hellwig filp->f_mode |= FMODE_NDELAY; 497cd82cca7SChristoph Hellwig if (filp->f_flags & O_EXCL) 498cd82cca7SChristoph Hellwig filp->f_mode |= FMODE_EXCL; 499cd82cca7SChristoph Hellwig if ((filp->f_flags & O_ACCMODE) == 3) 500cd82cca7SChristoph Hellwig filp->f_mode |= FMODE_WRITE_IOCTL; 501cd82cca7SChristoph Hellwig 502cd82cca7SChristoph Hellwig bdev = blkdev_get_by_dev(inode->i_rdev, filp->f_mode, filp); 503cd82cca7SChristoph Hellwig if (IS_ERR(bdev)) 504cd82cca7SChristoph Hellwig return PTR_ERR(bdev); 505fac7c6d5SPavel Begunkov 506fac7c6d5SPavel Begunkov filp->private_data = bdev; 507cd82cca7SChristoph Hellwig filp->f_mapping = bdev->bd_inode->i_mapping; 508cd82cca7SChristoph Hellwig filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping); 509cd82cca7SChristoph Hellwig return 0; 510cd82cca7SChristoph Hellwig } 511cd82cca7SChristoph Hellwig 512cd82cca7SChristoph Hellwig static int blkdev_close(struct inode *inode, struct file *filp) 513cd82cca7SChristoph Hellwig { 514fac7c6d5SPavel Begunkov struct block_device *bdev = filp->private_data; 515cd82cca7SChristoph Hellwig 516cd82cca7SChristoph Hellwig blkdev_put(bdev, filp->f_mode); 517cd82cca7SChristoph Hellwig return 0; 518cd82cca7SChristoph Hellwig } 519cd82cca7SChristoph Hellwig 520cd82cca7SChristoph Hellwig /* 521cd82cca7SChristoph Hellwig * Write data to the block device. Only intended for the block device itself 522cd82cca7SChristoph Hellwig * and the raw driver which basically is a fake block device. 523cd82cca7SChristoph Hellwig * 524cd82cca7SChristoph Hellwig * Does not take i_mutex for the write and thus is not for general purpose 525cd82cca7SChristoph Hellwig * use. 526cd82cca7SChristoph Hellwig */ 527cd82cca7SChristoph Hellwig static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from) 528cd82cca7SChristoph Hellwig { 529fac7c6d5SPavel Begunkov struct block_device *bdev = iocb->ki_filp->private_data; 530fac7c6d5SPavel Begunkov struct inode *bd_inode = bdev->bd_inode; 531138c1a38SJens Axboe loff_t size = bdev_nr_bytes(bdev); 532cd82cca7SChristoph Hellwig struct blk_plug plug; 533cd82cca7SChristoph Hellwig size_t shorted = 0; 534cd82cca7SChristoph Hellwig ssize_t ret; 535cd82cca7SChristoph Hellwig 536fac7c6d5SPavel Begunkov if (bdev_read_only(bdev)) 537cd82cca7SChristoph Hellwig return -EPERM; 538cd82cca7SChristoph Hellwig 539cd82cca7SChristoph Hellwig if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(bd_inode->i_rdev)) 540cd82cca7SChristoph Hellwig return -ETXTBSY; 541cd82cca7SChristoph Hellwig 542cd82cca7SChristoph Hellwig if (!iov_iter_count(from)) 543cd82cca7SChristoph Hellwig return 0; 544cd82cca7SChristoph Hellwig 545cd82cca7SChristoph Hellwig if (iocb->ki_pos >= size) 546cd82cca7SChristoph Hellwig return -ENOSPC; 547cd82cca7SChristoph Hellwig 548cd82cca7SChristoph Hellwig if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT) 549cd82cca7SChristoph Hellwig return -EOPNOTSUPP; 550cd82cca7SChristoph Hellwig 551cd82cca7SChristoph Hellwig size -= iocb->ki_pos; 552cd82cca7SChristoph Hellwig if (iov_iter_count(from) > size) { 553cd82cca7SChristoph Hellwig shorted = iov_iter_count(from) - size; 554cd82cca7SChristoph Hellwig iov_iter_truncate(from, size); 555cd82cca7SChristoph Hellwig } 556cd82cca7SChristoph Hellwig 557cd82cca7SChristoph Hellwig blk_start_plug(&plug); 558cd82cca7SChristoph Hellwig ret = __generic_file_write_iter(iocb, from); 559cd82cca7SChristoph Hellwig if (ret > 0) 560cd82cca7SChristoph Hellwig ret = generic_write_sync(iocb, ret); 561cd82cca7SChristoph Hellwig iov_iter_reexpand(from, iov_iter_count(from) + shorted); 562cd82cca7SChristoph Hellwig blk_finish_plug(&plug); 563cd82cca7SChristoph Hellwig return ret; 564cd82cca7SChristoph Hellwig } 565cd82cca7SChristoph Hellwig 566cd82cca7SChristoph Hellwig static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to) 567cd82cca7SChristoph Hellwig { 568fac7c6d5SPavel Begunkov struct block_device *bdev = iocb->ki_filp->private_data; 569138c1a38SJens Axboe loff_t size = bdev_nr_bytes(bdev); 570cd82cca7SChristoph Hellwig loff_t pos = iocb->ki_pos; 571cd82cca7SChristoph Hellwig size_t shorted = 0; 572ceaa7625SJens Axboe ssize_t ret = 0; 5733e1f941dSIlya Dryomov size_t count; 574cd82cca7SChristoph Hellwig 5753e1f941dSIlya Dryomov if (unlikely(pos + iov_iter_count(to) > size)) { 576cd82cca7SChristoph Hellwig if (pos >= size) 577cd82cca7SChristoph Hellwig return 0; 578cd82cca7SChristoph Hellwig size -= pos; 5793e1f941dSIlya Dryomov shorted = iov_iter_count(to) - size; 580cd82cca7SChristoph Hellwig iov_iter_truncate(to, size); 581cd82cca7SChristoph Hellwig } 5823e1f941dSIlya Dryomov 5833e1f941dSIlya Dryomov count = iov_iter_count(to); 5843e1f941dSIlya Dryomov if (!count) 5853e1f941dSIlya Dryomov goto reexpand; /* skip atime */ 586cd82cca7SChristoph Hellwig 587ceaa7625SJens Axboe if (iocb->ki_flags & IOCB_DIRECT) { 588ceaa7625SJens Axboe struct address_space *mapping = iocb->ki_filp->f_mapping; 589ceaa7625SJens Axboe 590ceaa7625SJens Axboe if (iocb->ki_flags & IOCB_NOWAIT) { 5913e1f941dSIlya Dryomov if (filemap_range_needs_writeback(mapping, pos, 5923e1f941dSIlya Dryomov pos + count - 1)) { 5933e1f941dSIlya Dryomov ret = -EAGAIN; 5943e1f941dSIlya Dryomov goto reexpand; 5953e1f941dSIlya Dryomov } 596ceaa7625SJens Axboe } else { 5973e1f941dSIlya Dryomov ret = filemap_write_and_wait_range(mapping, pos, 5983e1f941dSIlya Dryomov pos + count - 1); 599ceaa7625SJens Axboe if (ret < 0) 6003e1f941dSIlya Dryomov goto reexpand; 601ceaa7625SJens Axboe } 602ceaa7625SJens Axboe 603ceaa7625SJens Axboe file_accessed(iocb->ki_filp); 604ceaa7625SJens Axboe 605ceaa7625SJens Axboe ret = blkdev_direct_IO(iocb, to); 606ceaa7625SJens Axboe if (ret >= 0) { 607ceaa7625SJens Axboe iocb->ki_pos += ret; 608ceaa7625SJens Axboe count -= ret; 609ceaa7625SJens Axboe } 6103e1f941dSIlya Dryomov iov_iter_revert(to, count - iov_iter_count(to)); 611ceaa7625SJens Axboe if (ret < 0 || !count) 6123e1f941dSIlya Dryomov goto reexpand; 613ceaa7625SJens Axboe } 614ceaa7625SJens Axboe 615ceaa7625SJens Axboe ret = filemap_read(iocb, to, ret); 6166450fe1fSPavel Begunkov 6173e1f941dSIlya Dryomov reexpand: 6186450fe1fSPavel Begunkov if (unlikely(shorted)) 619cd82cca7SChristoph Hellwig iov_iter_reexpand(to, iov_iter_count(to) + shorted); 620cd82cca7SChristoph Hellwig return ret; 621cd82cca7SChristoph Hellwig } 622cd82cca7SChristoph Hellwig 623cd82cca7SChristoph Hellwig #define BLKDEV_FALLOC_FL_SUPPORTED \ 624cd82cca7SChristoph Hellwig (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \ 625cd82cca7SChristoph Hellwig FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE) 626cd82cca7SChristoph Hellwig 627cd82cca7SChristoph Hellwig static long blkdev_fallocate(struct file *file, int mode, loff_t start, 628cd82cca7SChristoph Hellwig loff_t len) 629cd82cca7SChristoph Hellwig { 630f278eb3dSMing Lei struct inode *inode = bdev_file_inode(file); 631f278eb3dSMing Lei struct block_device *bdev = I_BDEV(inode); 632cd82cca7SChristoph Hellwig loff_t end = start + len - 1; 633cd82cca7SChristoph Hellwig loff_t isize; 634cd82cca7SChristoph Hellwig int error; 635cd82cca7SChristoph Hellwig 636cd82cca7SChristoph Hellwig /* Fail if we don't recognize the flags. */ 637cd82cca7SChristoph Hellwig if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED) 638cd82cca7SChristoph Hellwig return -EOPNOTSUPP; 639cd82cca7SChristoph Hellwig 640cd82cca7SChristoph Hellwig /* Don't go off the end of the device. */ 6412a93ad8fSChristoph Hellwig isize = bdev_nr_bytes(bdev); 642cd82cca7SChristoph Hellwig if (start >= isize) 643cd82cca7SChristoph Hellwig return -EINVAL; 644cd82cca7SChristoph Hellwig if (end >= isize) { 645cd82cca7SChristoph Hellwig if (mode & FALLOC_FL_KEEP_SIZE) { 646cd82cca7SChristoph Hellwig len = isize - start; 647cd82cca7SChristoph Hellwig end = start + len - 1; 648cd82cca7SChristoph Hellwig } else 649cd82cca7SChristoph Hellwig return -EINVAL; 650cd82cca7SChristoph Hellwig } 651cd82cca7SChristoph Hellwig 652cd82cca7SChristoph Hellwig /* 653cd82cca7SChristoph Hellwig * Don't allow IO that isn't aligned to logical block size. 654cd82cca7SChristoph Hellwig */ 655cd82cca7SChristoph Hellwig if ((start | len) & (bdev_logical_block_size(bdev) - 1)) 656cd82cca7SChristoph Hellwig return -EINVAL; 657cd82cca7SChristoph Hellwig 658f278eb3dSMing Lei filemap_invalidate_lock(inode->i_mapping); 659f278eb3dSMing Lei 660cd82cca7SChristoph Hellwig /* Invalidate the page cache, including dirty pages. */ 661cd82cca7SChristoph Hellwig error = truncate_bdev_range(bdev, file->f_mode, start, end); 662cd82cca7SChristoph Hellwig if (error) 663f278eb3dSMing Lei goto fail; 664cd82cca7SChristoph Hellwig 665cd82cca7SChristoph Hellwig switch (mode) { 666cd82cca7SChristoph Hellwig case FALLOC_FL_ZERO_RANGE: 667cd82cca7SChristoph Hellwig case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE: 6686549a874SPavel Begunkov error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT, 6696549a874SPavel Begunkov len >> SECTOR_SHIFT, GFP_KERNEL, 6706549a874SPavel Begunkov BLKDEV_ZERO_NOUNMAP); 671cd82cca7SChristoph Hellwig break; 672cd82cca7SChristoph Hellwig case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE: 6736549a874SPavel Begunkov error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT, 6746549a874SPavel Begunkov len >> SECTOR_SHIFT, GFP_KERNEL, 6756549a874SPavel Begunkov BLKDEV_ZERO_NOFALLBACK); 676cd82cca7SChristoph Hellwig break; 677cd82cca7SChristoph Hellwig case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE: 6786549a874SPavel Begunkov error = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT, 6796549a874SPavel Begunkov len >> SECTOR_SHIFT, GFP_KERNEL, 0); 680cd82cca7SChristoph Hellwig break; 681cd82cca7SChristoph Hellwig default: 682f278eb3dSMing Lei error = -EOPNOTSUPP; 683cd82cca7SChristoph Hellwig } 684cd82cca7SChristoph Hellwig 685f278eb3dSMing Lei fail: 686f278eb3dSMing Lei filemap_invalidate_unlock(inode->i_mapping); 687f278eb3dSMing Lei return error; 688cd82cca7SChristoph Hellwig } 689cd82cca7SChristoph Hellwig 690cd82cca7SChristoph Hellwig const struct file_operations def_blk_fops = { 691cd82cca7SChristoph Hellwig .open = blkdev_open, 692cd82cca7SChristoph Hellwig .release = blkdev_close, 693cd82cca7SChristoph Hellwig .llseek = blkdev_llseek, 694cd82cca7SChristoph Hellwig .read_iter = blkdev_read_iter, 695cd82cca7SChristoph Hellwig .write_iter = blkdev_write_iter, 6963e08773cSChristoph Hellwig .iopoll = iocb_bio_iopoll, 697cd82cca7SChristoph Hellwig .mmap = generic_file_mmap, 698cd82cca7SChristoph Hellwig .fsync = blkdev_fsync, 6998a709512SChristoph Hellwig .unlocked_ioctl = blkdev_ioctl, 700cd82cca7SChristoph Hellwig #ifdef CONFIG_COMPAT 701cd82cca7SChristoph Hellwig .compat_ioctl = compat_blkdev_ioctl, 702cd82cca7SChristoph Hellwig #endif 703cd82cca7SChristoph Hellwig .splice_read = generic_file_splice_read, 704cd82cca7SChristoph Hellwig .splice_write = iter_file_splice_write, 705cd82cca7SChristoph Hellwig .fallocate = blkdev_fallocate, 706cd82cca7SChristoph Hellwig }; 707cd82cca7SChristoph Hellwig 708cd82cca7SChristoph Hellwig static __init int blkdev_init(void) 709cd82cca7SChristoph Hellwig { 710cd82cca7SChristoph Hellwig return bioset_init(&blkdev_dio_pool, 4, 711cd82cca7SChristoph Hellwig offsetof(struct blkdev_dio, bio), 712cd82cca7SChristoph Hellwig BIOSET_NEED_BVECS|BIOSET_PERCPU_CACHE); 713cd82cca7SChristoph Hellwig } 714cd82cca7SChristoph Hellwig module_init(blkdev_init); 715