1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 1991, 1992 Linus Torvalds 4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE 5 * Copyright (C) 2016 - 2020 Christoph Hellwig 6 */ 7 #include <linux/init.h> 8 #include <linux/mm.h> 9 #include <linux/blkdev.h> 10 #include <linux/buffer_head.h> 11 #include <linux/mpage.h> 12 #include <linux/uio.h> 13 #include <linux/namei.h> 14 #include <linux/task_io_accounting_ops.h> 15 #include <linux/falloc.h> 16 #include <linux/suspend.h> 17 #include <linux/fs.h> 18 #include <linux/module.h> 19 #include "blk.h" 20 21 static inline struct inode *bdev_file_inode(struct file *file) 22 { 23 return file->f_mapping->host; 24 } 25 26 static int blkdev_get_block(struct inode *inode, sector_t iblock, 27 struct buffer_head *bh, int create) 28 { 29 bh->b_bdev = I_BDEV(inode); 30 bh->b_blocknr = iblock; 31 set_buffer_mapped(bh); 32 return 0; 33 } 34 35 static unsigned int dio_bio_write_op(struct kiocb *iocb) 36 { 37 unsigned int op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; 38 39 /* avoid the need for a I/O completion work item */ 40 if (iocb->ki_flags & IOCB_DSYNC) 41 op |= REQ_FUA; 42 return op; 43 } 44 45 #define DIO_INLINE_BIO_VECS 4 46 47 static void blkdev_bio_end_io_simple(struct bio *bio) 48 { 49 struct task_struct *waiter = bio->bi_private; 50 51 WRITE_ONCE(bio->bi_private, NULL); 52 blk_wake_io_task(waiter); 53 } 54 55 static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb, 56 struct iov_iter *iter, unsigned int nr_pages) 57 { 58 struct block_device *bdev = iocb->ki_filp->private_data; 59 struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs; 60 loff_t pos = iocb->ki_pos; 61 bool should_dirty = false; 62 struct bio bio; 63 ssize_t ret; 64 65 if ((pos | iov_iter_alignment(iter)) & 66 (bdev_logical_block_size(bdev) - 1)) 67 return -EINVAL; 68 69 if (nr_pages <= DIO_INLINE_BIO_VECS) 70 vecs = inline_vecs; 71 else { 72 vecs = kmalloc_array(nr_pages, sizeof(struct bio_vec), 73 GFP_KERNEL); 74 if (!vecs) 75 return -ENOMEM; 76 } 77 78 bio_init(&bio, vecs, nr_pages); 79 bio_set_dev(&bio, bdev); 80 bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT; 81 bio.bi_write_hint = iocb->ki_hint; 82 bio.bi_private = current; 83 bio.bi_end_io = blkdev_bio_end_io_simple; 84 bio.bi_ioprio = iocb->ki_ioprio; 85 86 ret = bio_iov_iter_get_pages(&bio, iter); 87 if (unlikely(ret)) 88 goto out; 89 ret = bio.bi_iter.bi_size; 90 91 if (iov_iter_rw(iter) == READ) { 92 bio.bi_opf = REQ_OP_READ; 93 if (iter_is_iovec(iter)) 94 should_dirty = true; 95 } else { 96 bio.bi_opf = dio_bio_write_op(iocb); 97 task_io_account_write(ret); 98 } 99 if (iocb->ki_flags & IOCB_NOWAIT) 100 bio.bi_opf |= REQ_NOWAIT; 101 if (iocb->ki_flags & IOCB_HIPRI) 102 bio_set_polled(&bio, iocb); 103 104 submit_bio(&bio); 105 for (;;) { 106 set_current_state(TASK_UNINTERRUPTIBLE); 107 if (!READ_ONCE(bio.bi_private)) 108 break; 109 if (!(iocb->ki_flags & IOCB_HIPRI) || !bio_poll(&bio, NULL, 0)) 110 blk_io_schedule(); 111 } 112 __set_current_state(TASK_RUNNING); 113 114 bio_release_pages(&bio, should_dirty); 115 if (unlikely(bio.bi_status)) 116 ret = blk_status_to_errno(bio.bi_status); 117 118 out: 119 if (vecs != inline_vecs) 120 kfree(vecs); 121 122 bio_uninit(&bio); 123 124 return ret; 125 } 126 127 enum { 128 DIO_SHOULD_DIRTY = 1, 129 DIO_IS_SYNC = 2, 130 }; 131 132 struct blkdev_dio { 133 union { 134 struct kiocb *iocb; 135 struct task_struct *waiter; 136 }; 137 size_t size; 138 atomic_t ref; 139 unsigned int flags; 140 struct bio bio ____cacheline_aligned_in_smp; 141 }; 142 143 static struct bio_set blkdev_dio_pool; 144 145 static void blkdev_bio_end_io(struct bio *bio) 146 { 147 struct blkdev_dio *dio = bio->bi_private; 148 bool should_dirty = dio->flags & DIO_SHOULD_DIRTY; 149 150 if (bio->bi_status && !dio->bio.bi_status) 151 dio->bio.bi_status = bio->bi_status; 152 153 if (atomic_dec_and_test(&dio->ref)) { 154 if (!(dio->flags & DIO_IS_SYNC)) { 155 struct kiocb *iocb = dio->iocb; 156 ssize_t ret; 157 158 WRITE_ONCE(iocb->private, NULL); 159 160 if (likely(!dio->bio.bi_status)) { 161 ret = dio->size; 162 iocb->ki_pos += ret; 163 } else { 164 ret = blk_status_to_errno(dio->bio.bi_status); 165 } 166 167 dio->iocb->ki_complete(iocb, ret); 168 bio_put(&dio->bio); 169 } else { 170 struct task_struct *waiter = dio->waiter; 171 172 WRITE_ONCE(dio->waiter, NULL); 173 blk_wake_io_task(waiter); 174 } 175 } 176 177 if (should_dirty) { 178 bio_check_pages_dirty(bio); 179 } else { 180 bio_release_pages(bio, false); 181 bio_put(bio); 182 } 183 } 184 185 static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, 186 unsigned int nr_pages) 187 { 188 struct block_device *bdev = iocb->ki_filp->private_data; 189 struct blk_plug plug; 190 struct blkdev_dio *dio; 191 struct bio *bio; 192 bool is_read = (iov_iter_rw(iter) == READ), is_sync; 193 loff_t pos = iocb->ki_pos; 194 int ret = 0; 195 196 if ((pos | iov_iter_alignment(iter)) & 197 (bdev_logical_block_size(bdev) - 1)) 198 return -EINVAL; 199 200 bio = bio_alloc_kiocb(iocb, nr_pages, &blkdev_dio_pool); 201 202 dio = container_of(bio, struct blkdev_dio, bio); 203 atomic_set(&dio->ref, 1); 204 /* 205 * Grab an extra reference to ensure the dio structure which is embedded 206 * into the first bio stays around. 207 */ 208 bio_get(bio); 209 210 is_sync = is_sync_kiocb(iocb); 211 if (is_sync) { 212 dio->flags = DIO_IS_SYNC; 213 dio->waiter = current; 214 } else { 215 dio->flags = 0; 216 dio->iocb = iocb; 217 } 218 219 dio->size = 0; 220 if (is_read && iter_is_iovec(iter)) 221 dio->flags |= DIO_SHOULD_DIRTY; 222 223 blk_start_plug(&plug); 224 225 for (;;) { 226 bio_set_dev(bio, bdev); 227 bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT; 228 bio->bi_write_hint = iocb->ki_hint; 229 bio->bi_private = dio; 230 bio->bi_end_io = blkdev_bio_end_io; 231 bio->bi_ioprio = iocb->ki_ioprio; 232 233 ret = bio_iov_iter_get_pages(bio, iter); 234 if (unlikely(ret)) { 235 bio->bi_status = BLK_STS_IOERR; 236 bio_endio(bio); 237 break; 238 } 239 240 if (is_read) { 241 bio->bi_opf = REQ_OP_READ; 242 if (dio->flags & DIO_SHOULD_DIRTY) 243 bio_set_pages_dirty(bio); 244 } else { 245 bio->bi_opf = dio_bio_write_op(iocb); 246 task_io_account_write(bio->bi_iter.bi_size); 247 } 248 if (iocb->ki_flags & IOCB_NOWAIT) 249 bio->bi_opf |= REQ_NOWAIT; 250 251 dio->size += bio->bi_iter.bi_size; 252 pos += bio->bi_iter.bi_size; 253 254 nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS); 255 if (!nr_pages) { 256 submit_bio(bio); 257 break; 258 } 259 atomic_inc(&dio->ref); 260 submit_bio(bio); 261 bio = bio_alloc(GFP_KERNEL, nr_pages); 262 } 263 264 blk_finish_plug(&plug); 265 266 if (!is_sync) 267 return -EIOCBQUEUED; 268 269 for (;;) { 270 set_current_state(TASK_UNINTERRUPTIBLE); 271 if (!READ_ONCE(dio->waiter)) 272 break; 273 blk_io_schedule(); 274 } 275 __set_current_state(TASK_RUNNING); 276 277 if (!ret) 278 ret = blk_status_to_errno(dio->bio.bi_status); 279 if (likely(!ret)) 280 ret = dio->size; 281 282 bio_put(&dio->bio); 283 return ret; 284 } 285 286 static void blkdev_bio_end_io_async(struct bio *bio) 287 { 288 struct blkdev_dio *dio = container_of(bio, struct blkdev_dio, bio); 289 struct kiocb *iocb = dio->iocb; 290 ssize_t ret; 291 292 WRITE_ONCE(iocb->private, NULL); 293 294 if (likely(!bio->bi_status)) { 295 ret = dio->size; 296 iocb->ki_pos += ret; 297 } else { 298 ret = blk_status_to_errno(bio->bi_status); 299 } 300 301 iocb->ki_complete(iocb, ret); 302 303 if (dio->flags & DIO_SHOULD_DIRTY) { 304 bio_check_pages_dirty(bio); 305 } else { 306 bio_release_pages(bio, false); 307 bio_put(bio); 308 } 309 } 310 311 static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb, 312 struct iov_iter *iter, 313 unsigned int nr_pages) 314 { 315 struct block_device *bdev = iocb->ki_filp->private_data; 316 struct blkdev_dio *dio; 317 struct bio *bio; 318 loff_t pos = iocb->ki_pos; 319 int ret = 0; 320 321 if ((pos | iov_iter_alignment(iter)) & 322 (bdev_logical_block_size(bdev) - 1)) 323 return -EINVAL; 324 325 bio = bio_alloc_kiocb(iocb, nr_pages, &blkdev_dio_pool); 326 dio = container_of(bio, struct blkdev_dio, bio); 327 dio->flags = 0; 328 dio->iocb = iocb; 329 bio_set_dev(bio, bdev); 330 bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT; 331 bio->bi_write_hint = iocb->ki_hint; 332 bio->bi_end_io = blkdev_bio_end_io_async; 333 bio->bi_ioprio = iocb->ki_ioprio; 334 335 if (iov_iter_is_bvec(iter)) { 336 /* 337 * Users don't rely on the iterator being in any particular 338 * state for async I/O returning -EIOCBQUEUED, hence we can 339 * avoid expensive iov_iter_advance(). Bypass 340 * bio_iov_iter_get_pages() and set the bvec directly. 341 */ 342 bio_iov_bvec_set(bio, iter); 343 } else { 344 ret = bio_iov_iter_get_pages(bio, iter); 345 if (unlikely(ret)) { 346 bio_put(bio); 347 return ret; 348 } 349 } 350 dio->size = bio->bi_iter.bi_size; 351 352 if (iov_iter_rw(iter) == READ) { 353 bio->bi_opf = REQ_OP_READ; 354 if (iter_is_iovec(iter)) { 355 dio->flags |= DIO_SHOULD_DIRTY; 356 bio_set_pages_dirty(bio); 357 } 358 } else { 359 bio->bi_opf = dio_bio_write_op(iocb); 360 task_io_account_write(bio->bi_iter.bi_size); 361 } 362 363 if (iocb->ki_flags & IOCB_HIPRI) { 364 bio->bi_opf |= REQ_POLLED | REQ_NOWAIT; 365 submit_bio(bio); 366 WRITE_ONCE(iocb->private, bio); 367 } else { 368 if (iocb->ki_flags & IOCB_NOWAIT) 369 bio->bi_opf |= REQ_NOWAIT; 370 submit_bio(bio); 371 } 372 return -EIOCBQUEUED; 373 } 374 375 static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 376 { 377 unsigned int nr_pages; 378 379 if (!iov_iter_count(iter)) 380 return 0; 381 382 nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1); 383 if (likely(nr_pages <= BIO_MAX_VECS)) { 384 if (is_sync_kiocb(iocb)) 385 return __blkdev_direct_IO_simple(iocb, iter, nr_pages); 386 return __blkdev_direct_IO_async(iocb, iter, nr_pages); 387 } 388 return __blkdev_direct_IO(iocb, iter, bio_max_segs(nr_pages)); 389 } 390 391 static int blkdev_writepage(struct page *page, struct writeback_control *wbc) 392 { 393 return block_write_full_page(page, blkdev_get_block, wbc); 394 } 395 396 static int blkdev_readpage(struct file * file, struct page * page) 397 { 398 return block_read_full_page(page, blkdev_get_block); 399 } 400 401 static void blkdev_readahead(struct readahead_control *rac) 402 { 403 mpage_readahead(rac, blkdev_get_block); 404 } 405 406 static int blkdev_write_begin(struct file *file, struct address_space *mapping, 407 loff_t pos, unsigned len, unsigned flags, struct page **pagep, 408 void **fsdata) 409 { 410 return block_write_begin(mapping, pos, len, flags, pagep, 411 blkdev_get_block); 412 } 413 414 static int blkdev_write_end(struct file *file, struct address_space *mapping, 415 loff_t pos, unsigned len, unsigned copied, struct page *page, 416 void *fsdata) 417 { 418 int ret; 419 ret = block_write_end(file, mapping, pos, len, copied, page, fsdata); 420 421 unlock_page(page); 422 put_page(page); 423 424 return ret; 425 } 426 427 static int blkdev_writepages(struct address_space *mapping, 428 struct writeback_control *wbc) 429 { 430 return generic_writepages(mapping, wbc); 431 } 432 433 const struct address_space_operations def_blk_aops = { 434 .set_page_dirty = __set_page_dirty_buffers, 435 .readpage = blkdev_readpage, 436 .readahead = blkdev_readahead, 437 .writepage = blkdev_writepage, 438 .write_begin = blkdev_write_begin, 439 .write_end = blkdev_write_end, 440 .writepages = blkdev_writepages, 441 .direct_IO = blkdev_direct_IO, 442 .migratepage = buffer_migrate_page_norefs, 443 .is_dirty_writeback = buffer_check_dirty_writeback, 444 }; 445 446 /* 447 * for a block special file file_inode(file)->i_size is zero 448 * so we compute the size by hand (just as in block_read/write above) 449 */ 450 static loff_t blkdev_llseek(struct file *file, loff_t offset, int whence) 451 { 452 struct inode *bd_inode = bdev_file_inode(file); 453 loff_t retval; 454 455 inode_lock(bd_inode); 456 retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode)); 457 inode_unlock(bd_inode); 458 return retval; 459 } 460 461 static int blkdev_fsync(struct file *filp, loff_t start, loff_t end, 462 int datasync) 463 { 464 struct block_device *bdev = filp->private_data; 465 int error; 466 467 error = file_write_and_wait_range(filp, start, end); 468 if (error) 469 return error; 470 471 /* 472 * There is no need to serialise calls to blkdev_issue_flush with 473 * i_mutex and doing so causes performance issues with concurrent 474 * O_SYNC writers to a block device. 475 */ 476 error = blkdev_issue_flush(bdev); 477 if (error == -EOPNOTSUPP) 478 error = 0; 479 480 return error; 481 } 482 483 static int blkdev_open(struct inode *inode, struct file *filp) 484 { 485 struct block_device *bdev; 486 487 /* 488 * Preserve backwards compatibility and allow large file access 489 * even if userspace doesn't ask for it explicitly. Some mkfs 490 * binary needs it. We might want to drop this workaround 491 * during an unstable branch. 492 */ 493 filp->f_flags |= O_LARGEFILE; 494 filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC; 495 496 if (filp->f_flags & O_NDELAY) 497 filp->f_mode |= FMODE_NDELAY; 498 if (filp->f_flags & O_EXCL) 499 filp->f_mode |= FMODE_EXCL; 500 if ((filp->f_flags & O_ACCMODE) == 3) 501 filp->f_mode |= FMODE_WRITE_IOCTL; 502 503 bdev = blkdev_get_by_dev(inode->i_rdev, filp->f_mode, filp); 504 if (IS_ERR(bdev)) 505 return PTR_ERR(bdev); 506 507 filp->private_data = bdev; 508 filp->f_mapping = bdev->bd_inode->i_mapping; 509 filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping); 510 return 0; 511 } 512 513 static int blkdev_close(struct inode *inode, struct file *filp) 514 { 515 struct block_device *bdev = filp->private_data; 516 517 blkdev_put(bdev, filp->f_mode); 518 return 0; 519 } 520 521 /* 522 * Write data to the block device. Only intended for the block device itself 523 * and the raw driver which basically is a fake block device. 524 * 525 * Does not take i_mutex for the write and thus is not for general purpose 526 * use. 527 */ 528 static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from) 529 { 530 struct block_device *bdev = iocb->ki_filp->private_data; 531 struct inode *bd_inode = bdev->bd_inode; 532 loff_t size = bdev_nr_bytes(bdev); 533 struct blk_plug plug; 534 size_t shorted = 0; 535 ssize_t ret; 536 537 if (bdev_read_only(bdev)) 538 return -EPERM; 539 540 if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(bd_inode->i_rdev)) 541 return -ETXTBSY; 542 543 if (!iov_iter_count(from)) 544 return 0; 545 546 if (iocb->ki_pos >= size) 547 return -ENOSPC; 548 549 if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT) 550 return -EOPNOTSUPP; 551 552 size -= iocb->ki_pos; 553 if (iov_iter_count(from) > size) { 554 shorted = iov_iter_count(from) - size; 555 iov_iter_truncate(from, size); 556 } 557 558 blk_start_plug(&plug); 559 ret = __generic_file_write_iter(iocb, from); 560 if (ret > 0) 561 ret = generic_write_sync(iocb, ret); 562 iov_iter_reexpand(from, iov_iter_count(from) + shorted); 563 blk_finish_plug(&plug); 564 return ret; 565 } 566 567 static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to) 568 { 569 struct block_device *bdev = iocb->ki_filp->private_data; 570 loff_t size = bdev_nr_bytes(bdev); 571 loff_t pos = iocb->ki_pos; 572 size_t shorted = 0; 573 ssize_t ret = 0; 574 size_t count; 575 576 if (unlikely(pos + iov_iter_count(to) > size)) { 577 if (pos >= size) 578 return 0; 579 size -= pos; 580 shorted = iov_iter_count(to) - size; 581 iov_iter_truncate(to, size); 582 } 583 584 count = iov_iter_count(to); 585 if (!count) 586 goto reexpand; /* skip atime */ 587 588 if (iocb->ki_flags & IOCB_DIRECT) { 589 struct address_space *mapping = iocb->ki_filp->f_mapping; 590 591 if (iocb->ki_flags & IOCB_NOWAIT) { 592 if (filemap_range_needs_writeback(mapping, pos, 593 pos + count - 1)) { 594 ret = -EAGAIN; 595 goto reexpand; 596 } 597 } else { 598 ret = filemap_write_and_wait_range(mapping, pos, 599 pos + count - 1); 600 if (ret < 0) 601 goto reexpand; 602 } 603 604 file_accessed(iocb->ki_filp); 605 606 ret = blkdev_direct_IO(iocb, to); 607 if (ret >= 0) { 608 iocb->ki_pos += ret; 609 count -= ret; 610 } 611 iov_iter_revert(to, count - iov_iter_count(to)); 612 if (ret < 0 || !count) 613 goto reexpand; 614 } 615 616 ret = filemap_read(iocb, to, ret); 617 618 reexpand: 619 if (unlikely(shorted)) 620 iov_iter_reexpand(to, iov_iter_count(to) + shorted); 621 return ret; 622 } 623 624 #define BLKDEV_FALLOC_FL_SUPPORTED \ 625 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \ 626 FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE) 627 628 static long blkdev_fallocate(struct file *file, int mode, loff_t start, 629 loff_t len) 630 { 631 struct inode *inode = bdev_file_inode(file); 632 struct block_device *bdev = I_BDEV(inode); 633 loff_t end = start + len - 1; 634 loff_t isize; 635 int error; 636 637 /* Fail if we don't recognize the flags. */ 638 if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED) 639 return -EOPNOTSUPP; 640 641 /* Don't go off the end of the device. */ 642 isize = bdev_nr_bytes(bdev); 643 if (start >= isize) 644 return -EINVAL; 645 if (end >= isize) { 646 if (mode & FALLOC_FL_KEEP_SIZE) { 647 len = isize - start; 648 end = start + len - 1; 649 } else 650 return -EINVAL; 651 } 652 653 /* 654 * Don't allow IO that isn't aligned to logical block size. 655 */ 656 if ((start | len) & (bdev_logical_block_size(bdev) - 1)) 657 return -EINVAL; 658 659 filemap_invalidate_lock(inode->i_mapping); 660 661 /* Invalidate the page cache, including dirty pages. */ 662 error = truncate_bdev_range(bdev, file->f_mode, start, end); 663 if (error) 664 goto fail; 665 666 switch (mode) { 667 case FALLOC_FL_ZERO_RANGE: 668 case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE: 669 error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT, 670 len >> SECTOR_SHIFT, GFP_KERNEL, 671 BLKDEV_ZERO_NOUNMAP); 672 break; 673 case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE: 674 error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT, 675 len >> SECTOR_SHIFT, GFP_KERNEL, 676 BLKDEV_ZERO_NOFALLBACK); 677 break; 678 case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE: 679 error = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT, 680 len >> SECTOR_SHIFT, GFP_KERNEL, 0); 681 break; 682 default: 683 error = -EOPNOTSUPP; 684 } 685 686 fail: 687 filemap_invalidate_unlock(inode->i_mapping); 688 return error; 689 } 690 691 const struct file_operations def_blk_fops = { 692 .open = blkdev_open, 693 .release = blkdev_close, 694 .llseek = blkdev_llseek, 695 .read_iter = blkdev_read_iter, 696 .write_iter = blkdev_write_iter, 697 .iopoll = iocb_bio_iopoll, 698 .mmap = generic_file_mmap, 699 .fsync = blkdev_fsync, 700 .unlocked_ioctl = blkdev_ioctl, 701 #ifdef CONFIG_COMPAT 702 .compat_ioctl = compat_blkdev_ioctl, 703 #endif 704 .splice_read = generic_file_splice_read, 705 .splice_write = iter_file_splice_write, 706 .fallocate = blkdev_fallocate, 707 }; 708 709 static __init int blkdev_init(void) 710 { 711 return bioset_init(&blkdev_dio_pool, 4, 712 offsetof(struct blkdev_dio, bio), 713 BIOSET_NEED_BVECS|BIOSET_PERCPU_CACHE); 714 } 715 module_init(blkdev_init); 716