loop.c (ab1cb278bc7027663adbfb0b81404f8398437e11) | loop.c (bc07c10a3603a5ab3ef01ba42b3d41f9ac63d1b6) |
---|---|
1/* 2 * linux/drivers/block/loop.c 3 * 4 * Written by Theodore Ts'o, 3/29/93 5 * 6 * Copyright 1993 by Theodore Ts'o. Redistribution of this file is 7 * permitted under the GNU General Public License. 8 * --- 431 unchanged lines hidden (view full) --- 440 struct file *file = lo->lo_backing_file; 441 int ret = vfs_fsync(file, 0); 442 if (unlikely(ret && ret != -EINVAL)) 443 ret = -EIO; 444 445 return ret; 446} 447 | 1/* 2 * linux/drivers/block/loop.c 3 * 4 * Written by Theodore Ts'o, 3/29/93 5 * 6 * Copyright 1993 by Theodore Ts'o. Redistribution of this file is 7 * permitted under the GNU General Public License. 8 * --- 431 unchanged lines hidden (view full) --- 440 struct file *file = lo->lo_backing_file; 441 int ret = vfs_fsync(file, 0); 442 if (unlikely(ret && ret != -EINVAL)) 443 ret = -EIO; 444 445 return ret; 446} 447 |
448static inline void handle_partial_read(struct loop_cmd *cmd, long bytes) 449{ 450 if (bytes < 0 || (cmd->rq->cmd_flags & REQ_WRITE)) 451 return; 452 453 if (unlikely(bytes < blk_rq_bytes(cmd->rq))) { 454 struct bio *bio = cmd->rq->bio; 455 456 bio_advance(bio, bytes); 457 zero_fill_bio(bio); 458 } 459} 460 461static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2) 462{ 463 struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb); 464 struct request *rq = cmd->rq; 465 466 handle_partial_read(cmd, ret); 467 468 if (ret > 0) 469 ret = 0; 470 else if (ret < 0) 471 ret = -EIO; 472 473 rq->errors = ret; 474 blk_mq_complete_request(rq); 475} 476 477static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, 478 loff_t pos, bool rw) 479{ 480 struct iov_iter iter; 481 struct bio_vec *bvec; 482 struct bio *bio = cmd->rq->bio; 483 struct file *file = lo->lo_backing_file; 484 int ret; 485 486 /* nomerge for loop request queue */ 487 WARN_ON(cmd->rq->bio != cmd->rq->biotail); 488 489 bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); 490 iov_iter_bvec(&iter, ITER_BVEC | rw, bvec, 491 bio_segments(bio), blk_rq_bytes(cmd->rq)); 492 493 cmd->iocb.ki_pos = pos; 494 cmd->iocb.ki_filp = file; 495 cmd->iocb.ki_complete = lo_rw_aio_complete; 496 cmd->iocb.ki_flags = IOCB_DIRECT; 497 498 if (rw == WRITE) 499 ret = file->f_op->write_iter(&cmd->iocb, &iter); 500 else 501 ret = file->f_op->read_iter(&cmd->iocb, &iter); 502 503 if (ret != -EIOCBQUEUED) 504 cmd->iocb.ki_complete(&cmd->iocb, ret, 0); 505 return 0; 506} 507 508 509static inline int lo_rw_simple(struct loop_device *lo, 510 struct request *rq, loff_t pos, bool rw) 511{ 512 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); 513 514 if (cmd->use_aio) 515 return lo_rw_aio(lo, cmd, pos, rw); 516 517 /* 518 * lo_write_simple and lo_read_simple should have been covered 519 * by io submit style function like lo_rw_aio(), one blocker 520 * is that lo_read_simple() need to call flush_dcache_page after 521 * the page is written from kernel, and it isn't easy to handle 522 * this in io submit style function which submits all segments 523 * of the req at one time. And direct read IO doesn't need to 524 * run flush_dcache_page(). 525 */ 526 if (rw == WRITE) 527 return lo_write_simple(lo, rq, pos); 528 else 529 return lo_read_simple(lo, rq, pos); 530} 531 |
|
448static int do_req_filebacked(struct loop_device *lo, struct request *rq) 449{ 450 loff_t pos; 451 int ret; 452 453 pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset; 454 455 if (rq->cmd_flags & REQ_WRITE) { 456 if (rq->cmd_flags & REQ_FLUSH) 457 ret = lo_req_flush(lo, rq); 458 else if (rq->cmd_flags & REQ_DISCARD) 459 ret = lo_discard(lo, rq, pos); 460 else if (lo->transfer) 461 ret = lo_write_transfer(lo, rq, pos); 462 else | 532static int do_req_filebacked(struct loop_device *lo, struct request *rq) 533{ 534 loff_t pos; 535 int ret; 536 537 pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset; 538 539 if (rq->cmd_flags & REQ_WRITE) { 540 if (rq->cmd_flags & REQ_FLUSH) 541 ret = lo_req_flush(lo, rq); 542 else if (rq->cmd_flags & REQ_DISCARD) 543 ret = lo_discard(lo, rq, pos); 544 else if (lo->transfer) 545 ret = lo_write_transfer(lo, rq, pos); 546 else |
463 ret = lo_write_simple(lo, rq, pos); | 547 ret = lo_rw_simple(lo, rq, pos, WRITE); |
464 465 } else { 466 if (lo->transfer) 467 ret = lo_read_transfer(lo, rq, pos); 468 else | 548 549 } else { 550 if (lo->transfer) 551 ret = lo_read_transfer(lo, rq, pos); 552 else |
469 ret = lo_read_simple(lo, rq, pos); | 553 ret = lo_rw_simple(lo, rq, pos, READ); |
470 } 471 472 return ret; 473} 474 475struct switch_request { 476 struct file *file; 477 struct completion wait; --- 1087 unchanged lines hidden (view full) --- 1565 struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); 1566 struct loop_device *lo = cmd->rq->q->queuedata; 1567 1568 blk_mq_start_request(bd->rq); 1569 1570 if (lo->lo_state != Lo_bound) 1571 return -EIO; 1572 | 554 } 555 556 return ret; 557} 558 559struct switch_request { 560 struct file *file; 561 struct completion wait; --- 1087 unchanged lines hidden (view full) --- 1649 struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); 1650 struct loop_device *lo = cmd->rq->q->queuedata; 1651 1652 blk_mq_start_request(bd->rq); 1653 1654 if (lo->lo_state != Lo_bound) 1655 return -EIO; 1656 |
1657 if (lo->use_dio && !(cmd->rq->cmd_flags & (REQ_FLUSH | 1658 REQ_DISCARD))) 1659 cmd->use_aio = true; 1660 else 1661 cmd->use_aio = false; 1662 |
|
1573 queue_kthread_work(&lo->worker, &cmd->work); 1574 1575 return BLK_MQ_RQ_QUEUE_OK; 1576} 1577 1578static void loop_handle_cmd(struct loop_cmd *cmd) 1579{ 1580 const bool write = cmd->rq->cmd_flags & REQ_WRITE; 1581 struct loop_device *lo = cmd->rq->q->queuedata; 1582 int ret = -EIO; 1583 1584 if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) 1585 goto failed; 1586 1587 ret = do_req_filebacked(lo, cmd->rq); 1588 1589 failed: 1590 if (ret) 1591 cmd->rq->errors = -EIO; | 1663 queue_kthread_work(&lo->worker, &cmd->work); 1664 1665 return BLK_MQ_RQ_QUEUE_OK; 1666} 1667 1668static void loop_handle_cmd(struct loop_cmd *cmd) 1669{ 1670 const bool write = cmd->rq->cmd_flags & REQ_WRITE; 1671 struct loop_device *lo = cmd->rq->q->queuedata; 1672 int ret = -EIO; 1673 1674 if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) 1675 goto failed; 1676 1677 ret = do_req_filebacked(lo, cmd->rq); 1678 1679 failed: 1680 if (ret) 1681 cmd->rq->errors = -EIO; |
1592 blk_mq_complete_request(cmd->rq); | 1682 /* complete non-aio request */ 1683 if (!cmd->use_aio || ret) 1684 blk_mq_complete_request(cmd->rq); |
1593} 1594 1595static void loop_queue_work(struct kthread_work *work) 1596{ 1597 struct loop_cmd *cmd = 1598 container_of(work, struct loop_cmd, work); 1599 1600 loop_handle_cmd(cmd); --- 362 unchanged lines hidden --- | 1685} 1686 1687static void loop_queue_work(struct kthread_work *work) 1688{ 1689 struct loop_cmd *cmd = 1690 container_of(work, struct loop_cmd, work); 1691 1692 loop_handle_cmd(cmd); --- 362 unchanged lines hidden --- |