xen-blkfront.c (1decabc1a70b97ef7412224df3dcb59d1227e3af) xen-blkfront.c (288dab8a35a0bde426a09870943c8d3ee3a50dab)
1/*
2 * blkfront.c
3 *
4 * XenLinux virtual block device driver.
5 *
6 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
7 * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
8 * Copyright (c) 2004, Christian Limpach

--- 182 unchanged lines hidden (view full) ---

191 struct gendisk *gd;
192 int vdevice;
193 blkif_vdev_t handle;
194 enum blkif_state connected;
195 /* Number of pages per ring buffer. */
196 unsigned int nr_ring_pages;
197 struct request_queue *rq;
198 unsigned int feature_flush;
1/*
2 * blkfront.c
3 *
4 * XenLinux virtual block device driver.
5 *
6 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
7 * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
8 * Copyright (c) 2004, Christian Limpach

--- 182 unchanged lines hidden (view full) ---

191 struct gendisk *gd;
192 int vdevice;
193 blkif_vdev_t handle;
194 enum blkif_state connected;
195 /* Number of pages per ring buffer. */
196 unsigned int nr_ring_pages;
197 struct request_queue *rq;
198 unsigned int feature_flush;
199 unsigned int feature_fua;
199 unsigned int feature_discard:1;
200 unsigned int feature_secdiscard:1;
201 unsigned int discard_granularity;
202 unsigned int discard_alignment;
203 unsigned int feature_persistent:1;
204 /* Number of 4KB segments handled */
205 unsigned int max_indirect_segments;
206 int is_ready;

--- 332 unchanged lines hidden (view full) ---

539
540 /* Fill out a communications ring structure. */
541 id = blkif_ring_get_request(rinfo, req, &ring_req);
542
543 ring_req->operation = BLKIF_OP_DISCARD;
544 ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
545 ring_req->u.discard.id = id;
546 ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
200 unsigned int feature_discard:1;
201 unsigned int feature_secdiscard:1;
202 unsigned int discard_granularity;
203 unsigned int discard_alignment;
204 unsigned int feature_persistent:1;
205 /* Number of 4KB segments handled */
206 unsigned int max_indirect_segments;
207 int is_ready;

--- 332 unchanged lines hidden (view full) ---

540
541 /* Fill out a communications ring structure. */
542 id = blkif_ring_get_request(rinfo, req, &ring_req);
543
544 ring_req->operation = BLKIF_OP_DISCARD;
545 ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
546 ring_req->u.discard.id = id;
547 ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
547 if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard)
548 if (req_op(req) == REQ_OP_SECURE_ERASE && info->feature_secdiscard)
548 ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
549 else
550 ring_req->u.discard.flag = 0;
551
552 /* Keep a private copy so we can reissue requests when recovering. */
553 rinfo->shadow[id].req = *ring_req;
554
555 return 0;

--- 182 unchanged lines hidden (view full) ---

738
739 rinfo->shadow[id].num_sg = num_sg;
740 if (num_grant > BLKIF_MAX_SEGMENTS_PER_REQUEST &&
741 likely(!require_extra_req)) {
742 /*
743 * The indirect operation can only be a BLKIF_OP_READ or
744 * BLKIF_OP_WRITE
745 */
549 ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
550 else
551 ring_req->u.discard.flag = 0;
552
553 /* Keep a private copy so we can reissue requests when recovering. */
554 rinfo->shadow[id].req = *ring_req;
555
556 return 0;

--- 182 unchanged lines hidden (view full) ---

739
740 rinfo->shadow[id].num_sg = num_sg;
741 if (num_grant > BLKIF_MAX_SEGMENTS_PER_REQUEST &&
742 likely(!require_extra_req)) {
743 /*
744 * The indirect operation can only be a BLKIF_OP_READ or
745 * BLKIF_OP_WRITE
746 */
746 BUG_ON(req->cmd_flags & (REQ_FLUSH | REQ_FUA));
747 BUG_ON(req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA);
747 ring_req->operation = BLKIF_OP_INDIRECT;
748 ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
749 BLKIF_OP_WRITE : BLKIF_OP_READ;
750 ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
751 ring_req->u.indirect.handle = info->handle;
752 ring_req->u.indirect.nr_segments = num_grant;
753 } else {
754 ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
755 ring_req->u.rw.handle = info->handle;
756 ring_req->operation = rq_data_dir(req) ?
757 BLKIF_OP_WRITE : BLKIF_OP_READ;
748 ring_req->operation = BLKIF_OP_INDIRECT;
749 ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
750 BLKIF_OP_WRITE : BLKIF_OP_READ;
751 ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
752 ring_req->u.indirect.handle = info->handle;
753 ring_req->u.indirect.nr_segments = num_grant;
754 } else {
755 ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
756 ring_req->u.rw.handle = info->handle;
757 ring_req->operation = rq_data_dir(req) ?
758 BLKIF_OP_WRITE : BLKIF_OP_READ;
758 if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
759 if (req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA) {
759 /*
760 * Ideally we can do an unordered flush-to-disk.
761 * In case the backend onlysupports barriers, use that.
762 * A barrier request a superset of FUA, so we can
763 * implement it the same way. (It's also a FLUSH+FUA,
764 * since it is guaranteed ordered WRT previous writes.)
765 */
760 /*
761 * Ideally we can do an unordered flush-to-disk.
762 * In case the backend onlysupports barriers, use that.
763 * A barrier request a superset of FUA, so we can
764 * implement it the same way. (It's also a FLUSH+FUA,
765 * since it is guaranteed ordered WRT previous writes.)
766 */
766 switch (info->feature_flush &
767 ((REQ_FLUSH|REQ_FUA))) {
768 case REQ_FLUSH|REQ_FUA:
767 if (info->feature_flush && info->feature_fua)
769 ring_req->operation =
770 BLKIF_OP_WRITE_BARRIER;
768 ring_req->operation =
769 BLKIF_OP_WRITE_BARRIER;
771 break;
772 case REQ_FLUSH:
770 else if (info->feature_flush)
773 ring_req->operation =
774 BLKIF_OP_FLUSH_DISKCACHE;
771 ring_req->operation =
772 BLKIF_OP_FLUSH_DISKCACHE;
775 break;
776 default:
773 else
777 ring_req->operation = 0;
774 ring_req->operation = 0;
778 }
779 }
780 ring_req->u.rw.nr_segments = num_grant;
781 if (unlikely(require_extra_req)) {
782 extra_id = blkif_ring_get_request(rinfo, req,
783 &extra_ring_req);
784 /*
785 * Only the first request contains the scatter-gather
786 * list.

--- 52 unchanged lines hidden (view full) ---

839 *
840 * @req: a request struct
841 */
842static int blkif_queue_request(struct request *req, struct blkfront_ring_info *rinfo)
843{
844 if (unlikely(rinfo->dev_info->connected != BLKIF_STATE_CONNECTED))
845 return 1;
846
775 }
776 ring_req->u.rw.nr_segments = num_grant;
777 if (unlikely(require_extra_req)) {
778 extra_id = blkif_ring_get_request(rinfo, req,
779 &extra_ring_req);
780 /*
781 * Only the first request contains the scatter-gather
782 * list.

--- 52 unchanged lines hidden (view full) ---

835 *
836 * @req: a request struct
837 */
838static int blkif_queue_request(struct request *req, struct blkfront_ring_info *rinfo)
839{
840 if (unlikely(rinfo->dev_info->connected != BLKIF_STATE_CONNECTED))
841 return 1;
842
847 if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE)))
843 if (unlikely(req_op(req) == REQ_OP_DISCARD ||
844 req_op(req) == REQ_OP_SECURE_ERASE))
848 return blkif_queue_discard_req(req, rinfo);
849 else
850 return blkif_queue_rw_req(req, rinfo);
851}
852
853static inline void flush_requests(struct blkfront_ring_info *rinfo)
854{
855 int notify;
856
857 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rinfo->ring, notify);
858
859 if (notify)
860 notify_remote_via_irq(rinfo->irq);
861}
862
863static inline bool blkif_request_flush_invalid(struct request *req,
864 struct blkfront_info *info)
865{
866 return ((req->cmd_type != REQ_TYPE_FS) ||
845 return blkif_queue_discard_req(req, rinfo);
846 else
847 return blkif_queue_rw_req(req, rinfo);
848}
849
850static inline void flush_requests(struct blkfront_ring_info *rinfo)
851{
852 int notify;
853
854 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rinfo->ring, notify);
855
856 if (notify)
857 notify_remote_via_irq(rinfo->irq);
858}
859
860static inline bool blkif_request_flush_invalid(struct request *req,
861 struct blkfront_info *info)
862{
863 return ((req->cmd_type != REQ_TYPE_FS) ||
867 ((req->cmd_flags & REQ_FLUSH) &&
868 !(info->feature_flush & REQ_FLUSH)) ||
864 ((req_op(req) == REQ_OP_FLUSH) &&
865 !info->feature_flush) ||
869 ((req->cmd_flags & REQ_FUA) &&
866 ((req->cmd_flags & REQ_FUA) &&
870 !(info->feature_flush & REQ_FUA)));
867 !info->feature_fua));
871}
872
873static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
874 const struct blk_mq_queue_data *qd)
875{
876 unsigned long flags;
868}
869
870static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
871 const struct blk_mq_queue_data *qd)
872{
873 unsigned long flags;
877 int qid = hctx->queue_num;
878 struct blkfront_info *info = hctx->queue->queuedata;
879 struct blkfront_ring_info *rinfo = NULL;
874 struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)hctx->driver_data;
880
875
881 BUG_ON(info->nr_rings <= qid);
882 rinfo = &info->rinfo[qid];
883 blk_mq_start_request(qd->rq);
884 spin_lock_irqsave(&rinfo->ring_lock, flags);
885 if (RING_FULL(&rinfo->ring))
886 goto out_busy;
887
888 if (blkif_request_flush_invalid(qd->rq, rinfo->dev_info))
889 goto out_err;
890

--- 9 unchanged lines hidden (view full) ---

900 return BLK_MQ_RQ_QUEUE_ERROR;
901
902out_busy:
903 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
904 blk_mq_stop_hw_queue(hctx);
905 return BLK_MQ_RQ_QUEUE_BUSY;
906}
907
876 blk_mq_start_request(qd->rq);
877 spin_lock_irqsave(&rinfo->ring_lock, flags);
878 if (RING_FULL(&rinfo->ring))
879 goto out_busy;
880
881 if (blkif_request_flush_invalid(qd->rq, rinfo->dev_info))
882 goto out_err;
883

--- 9 unchanged lines hidden (view full) ---

893 return BLK_MQ_RQ_QUEUE_ERROR;
894
895out_busy:
896 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
897 blk_mq_stop_hw_queue(hctx);
898 return BLK_MQ_RQ_QUEUE_BUSY;
899}
900
901static int blk_mq_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
902 unsigned int index)
903{
904 struct blkfront_info *info = (struct blkfront_info *)data;
905
906 BUG_ON(info->nr_rings <= index);
907 hctx->driver_data = &info->rinfo[index];
908 return 0;
909}
910
908static struct blk_mq_ops blkfront_mq_ops = {
909 .queue_rq = blkif_queue_rq,
910 .map_queue = blk_mq_map_queue,
911static struct blk_mq_ops blkfront_mq_ops = {
912 .queue_rq = blkif_queue_rq,
913 .map_queue = blk_mq_map_queue,
914 .init_hctx = blk_mq_init_hctx,
911};
912
913static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
914 unsigned int physical_sector_size,
915 unsigned int segments)
916{
917 struct request_queue *rq;
918 struct blkfront_info *info = gd->private_data;

--- 19 unchanged lines hidden (view full) ---

938 if (blk_mq_alloc_tag_set(&info->tag_set))
939 return -EINVAL;
940 rq = blk_mq_init_queue(&info->tag_set);
941 if (IS_ERR(rq)) {
942 blk_mq_free_tag_set(&info->tag_set);
943 return PTR_ERR(rq);
944 }
945
915};
916
917static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
918 unsigned int physical_sector_size,
919 unsigned int segments)
920{
921 struct request_queue *rq;
922 struct blkfront_info *info = gd->private_data;

--- 19 unchanged lines hidden (view full) ---

942 if (blk_mq_alloc_tag_set(&info->tag_set))
943 return -EINVAL;
944 rq = blk_mq_init_queue(&info->tag_set);
945 if (IS_ERR(rq)) {
946 blk_mq_free_tag_set(&info->tag_set);
947 return PTR_ERR(rq);
948 }
949
946 rq->queuedata = info;
947 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
948
949 if (info->feature_discard) {
950 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
951 blk_queue_max_discard_sectors(rq, get_capacity(gd));
952 rq->limits.discard_granularity = info->discard_granularity;
953 rq->limits.discard_alignment = info->discard_alignment;
954 if (info->feature_secdiscard)
950 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
951
952 if (info->feature_discard) {
953 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
954 blk_queue_max_discard_sectors(rq, get_capacity(gd));
955 rq->limits.discard_granularity = info->discard_granularity;
956 rq->limits.discard_alignment = info->discard_alignment;
957 if (info->feature_secdiscard)
955 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, rq);
958 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq);
956 }
957
958 /* Hard sector size and max sectors impersonate the equiv. hardware. */
959 blk_queue_logical_block_size(rq, sector_size);
960 blk_queue_physical_block_size(rq, physical_sector_size);
961 blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
962
963 /* Each segment in a request is up to an aligned page in size. */

--- 9 unchanged lines hidden (view full) ---

973 /* Make sure we don't use bounce buffers. */
974 blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
975
976 gd->queue = rq;
977
978 return 0;
979}
980
959 }
960
961 /* Hard sector size and max sectors impersonate the equiv. hardware. */
962 blk_queue_logical_block_size(rq, sector_size);
963 blk_queue_physical_block_size(rq, physical_sector_size);
964 blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
965
966 /* Each segment in a request is up to an aligned page in size. */

--- 9 unchanged lines hidden (view full) ---

976 /* Make sure we don't use bounce buffers. */
977 blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
978
979 gd->queue = rq;
980
981 return 0;
982}
983
981static const char *flush_info(unsigned int feature_flush)
984static const char *flush_info(struct blkfront_info *info)
982{
985{
983 switch (feature_flush & ((REQ_FLUSH | REQ_FUA))) {
984 case REQ_FLUSH|REQ_FUA:
986 if (info->feature_flush && info->feature_fua)
985 return "barrier: enabled;";
987 return "barrier: enabled;";
986 case REQ_FLUSH:
988 else if (info->feature_flush)
987 return "flush diskcache: enabled;";
989 return "flush diskcache: enabled;";
988 default:
990 else
989 return "barrier or flush: disabled;";
991 return "barrier or flush: disabled;";
990 }
991}
992
993static void xlvbd_flush(struct blkfront_info *info)
994{
992}
993
994static void xlvbd_flush(struct blkfront_info *info)
995{
995 blk_queue_write_cache(info->rq, info->feature_flush & REQ_FLUSH,
996 info->feature_flush & REQ_FUA);
996 blk_queue_write_cache(info->rq, info->feature_flush ? true : false,
997 info->feature_fua ? true : false);
997 pr_info("blkfront: %s: %s %s %s %s %s\n",
998 pr_info("blkfront: %s: %s %s %s %s %s\n",
998 info->gd->disk_name, flush_info(info->feature_flush),
999 info->gd->disk_name, flush_info(info),
999 "persistent grants:", info->feature_persistent ?
1000 "enabled;" : "disabled;", "indirect descriptors:",
1001 info->max_indirect_segments ? "enabled;" : "disabled;");
1002}
1003
1004static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
1005{
1006 int major;

--- 582 unchanged lines hidden (view full) ---

1589 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
1590 struct request_queue *rq = info->rq;
1591 printk(KERN_WARNING "blkfront: %s: %s op failed\n",
1592 info->gd->disk_name, op_name(bret->operation));
1593 error = -EOPNOTSUPP;
1594 info->feature_discard = 0;
1595 info->feature_secdiscard = 0;
1596 queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
1000 "persistent grants:", info->feature_persistent ?
1001 "enabled;" : "disabled;", "indirect descriptors:",
1002 info->max_indirect_segments ? "enabled;" : "disabled;");
1003}
1004
1005static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
1006{
1007 int major;

--- 582 unchanged lines hidden (view full) ---

1590 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
1591 struct request_queue *rq = info->rq;
1592 printk(KERN_WARNING "blkfront: %s: %s op failed\n",
1593 info->gd->disk_name, op_name(bret->operation));
1594 error = -EOPNOTSUPP;
1595 info->feature_discard = 0;
1596 info->feature_secdiscard = 0;
1597 queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
1597 queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq);
1598 queue_flag_clear(QUEUE_FLAG_SECERASE, rq);
1598 }
1599 blk_mq_complete_request(req, error);
1600 break;
1601 case BLKIF_OP_FLUSH_DISKCACHE:
1602 case BLKIF_OP_WRITE_BARRIER:
1603 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
1604 printk(KERN_WARNING "blkfront: %s: %s op failed\n",
1605 info->gd->disk_name, op_name(bret->operation));
1606 error = -EOPNOTSUPP;
1607 }
1608 if (unlikely(bret->status == BLKIF_RSP_ERROR &&
1609 rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
1610 printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
1611 info->gd->disk_name, op_name(bret->operation));
1612 error = -EOPNOTSUPP;
1613 }
1614 if (unlikely(error)) {
1615 if (error == -EOPNOTSUPP)
1616 error = 0;
1599 }
1600 blk_mq_complete_request(req, error);
1601 break;
1602 case BLKIF_OP_FLUSH_DISKCACHE:
1603 case BLKIF_OP_WRITE_BARRIER:
1604 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
1605 printk(KERN_WARNING "blkfront: %s: %s op failed\n",
1606 info->gd->disk_name, op_name(bret->operation));
1607 error = -EOPNOTSUPP;
1608 }
1609 if (unlikely(bret->status == BLKIF_RSP_ERROR &&
1610 rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
1611 printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
1612 info->gd->disk_name, op_name(bret->operation));
1613 error = -EOPNOTSUPP;
1614 }
1615 if (unlikely(error)) {
1616 if (error == -EOPNOTSUPP)
1617 error = 0;
1618 info->feature_fua = 0;
1617 info->feature_flush = 0;
1618 xlvbd_flush(info);
1619 }
1620 /* fall through */
1621 case BLKIF_OP_READ:
1622 case BLKIF_OP_WRITE:
1623 if (unlikely(bret->status != BLKIF_RSP_OKAY))
1624 dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "

--- 418 unchanged lines hidden (view full) ---

2043 for (i = 0; i < BLK_RING_SIZE(info); i++) {
2044 /* Not in use? */
2045 if (!copy[i].request)
2046 continue;
2047
2048 /*
2049 * Get the bios in the request so we can re-queue them.
2050 */
1619 info->feature_flush = 0;
1620 xlvbd_flush(info);
1621 }
1622 /* fall through */
1623 case BLKIF_OP_READ:
1624 case BLKIF_OP_WRITE:
1625 if (unlikely(bret->status != BLKIF_RSP_OKAY))
1626 dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "

--- 418 unchanged lines hidden (view full) ---

2045 for (i = 0; i < BLK_RING_SIZE(info); i++) {
2046 /* Not in use? */
2047 if (!copy[i].request)
2048 continue;
2049
2050 /*
2051 * Get the bios in the request so we can re-queue them.
2052 */
2051 if (copy[i].request->cmd_flags &
2052 (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
2053 if (req_op(copy[i].request) == REQ_OP_FLUSH ||
2054 req_op(copy[i].request) == REQ_OP_DISCARD ||
2055 req_op(copy[i].request) == REQ_OP_SECURE_ERASE ||
2056 copy[i].request->cmd_flags & REQ_FUA) {
2053 /*
2054 * Flush operations don't contain bios, so
2055 * we need to requeue the whole request
2057 /*
2058 * Flush operations don't contain bios, so
2059 * we need to requeue the whole request
2060 *
2061 * XXX: but this doesn't make any sense for a
2062 * write with the FUA flag set..
2056 */
2057 list_add(&copy[i].request->queuelist, &requests);
2058 continue;
2059 }
2060 merge_bio.head = copy[i].request->bio;
2061 merge_bio.tail = copy[i].request->biotail;
2062 bio_list_merge(&bio_list, &merge_bio);
2063 copy[i].request->bio = NULL;

--- 39 unchanged lines hidden (view full) ---

2103 offset = (i * segs * XEN_PAGE_SIZE) >> 9;
2104 size = min((unsigned int)(segs * XEN_PAGE_SIZE) >> 9,
2105 (unsigned int)bio_sectors(bio) - offset);
2106 cloned_bio = bio_clone(bio, GFP_NOIO);
2107 BUG_ON(cloned_bio == NULL);
2108 bio_trim(cloned_bio, offset, size);
2109 cloned_bio->bi_private = split_bio;
2110 cloned_bio->bi_end_io = split_bio_end;
2063 */
2064 list_add(&copy[i].request->queuelist, &requests);
2065 continue;
2066 }
2067 merge_bio.head = copy[i].request->bio;
2068 merge_bio.tail = copy[i].request->biotail;
2069 bio_list_merge(&bio_list, &merge_bio);
2070 copy[i].request->bio = NULL;

--- 39 unchanged lines hidden (view full) ---

2110 offset = (i * segs * XEN_PAGE_SIZE) >> 9;
2111 size = min((unsigned int)(segs * XEN_PAGE_SIZE) >> 9,
2112 (unsigned int)bio_sectors(bio) - offset);
2113 cloned_bio = bio_clone(bio, GFP_NOIO);
2114 BUG_ON(cloned_bio == NULL);
2115 bio_trim(cloned_bio, offset, size);
2116 cloned_bio->bi_private = split_bio;
2117 cloned_bio->bi_end_io = split_bio_end;
2111 submit_bio(cloned_bio->bi_rw, cloned_bio);
2118 submit_bio(cloned_bio);
2112 }
2113 /*
2114 * Now we have to wait for all those smaller bios to
2115 * end, so we can also end the "parent" bio.
2116 */
2117 continue;
2118 }
2119 /* We don't need to split this bio */
2119 }
2120 /*
2121 * Now we have to wait for all those smaller bios to
2122 * end, so we can also end the "parent" bio.
2123 */
2124 continue;
2125 }
2126 /* We don't need to split this bio */
2120 submit_bio(bio->bi_rw, bio);
2127 submit_bio(bio);
2121 }
2122
2123 return 0;
2124}
2125
2126/**
2127 * We are reconnecting to the backend, due to a suspend/resume, or a backend
2128 * driver restart. We tear down our blkif structure and recreate it, but

--- 9 unchanged lines hidden (view full) ---

2138
2139 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
2140
2141 err = negotiate_mq(info);
2142 if (err)
2143 return err;
2144
2145 err = talk_to_blkback(dev, info);
2128 }
2129
2130 return 0;
2131}
2132
2133/**
2134 * We are reconnecting to the backend, due to a suspend/resume, or a backend
2135 * driver restart. We tear down our blkif structure and recreate it, but

--- 9 unchanged lines hidden (view full) ---

2145
2146 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
2147
2148 err = negotiate_mq(info);
2149 if (err)
2150 return err;
2151
2152 err = talk_to_blkback(dev, info);
2146 if (!err)
2147 blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);
2148
2149 /*
2150 * We have to wait for the backend to switch to
2151 * connected state, since we want to read which
2152 * features it supports.
2153 */
2154
2155 return err;

--- 148 unchanged lines hidden (view full) ---

2304 */
2305static void blkfront_gather_backend_features(struct blkfront_info *info)
2306{
2307 int err;
2308 int barrier, flush, discard, persistent;
2309 unsigned int indirect_segments;
2310
2311 info->feature_flush = 0;
2153
2154 /*
2155 * We have to wait for the backend to switch to
2156 * connected state, since we want to read which
2157 * features it supports.
2158 */
2159
2160 return err;

--- 148 unchanged lines hidden (view full) ---

2309 */
2310static void blkfront_gather_backend_features(struct blkfront_info *info)
2311{
2312 int err;
2313 int barrier, flush, discard, persistent;
2314 unsigned int indirect_segments;
2315
2316 info->feature_flush = 0;
2317 info->feature_fua = 0;
2312
2313 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
2314 "feature-barrier", "%d", &barrier,
2315 NULL);
2316
2317 /*
2318 * If there's no "feature-barrier" defined, then it means
2319 * we're dealing with a very old backend which writes
2320 * synchronously; nothing to do.
2321 *
2322 * If there are barriers, then we use flush.
2323 */
2318
2319 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
2320 "feature-barrier", "%d", &barrier,
2321 NULL);
2322
2323 /*
2324 * If there's no "feature-barrier" defined, then it means
2325 * we're dealing with a very old backend which writes
2326 * synchronously; nothing to do.
2327 *
2328 * If there are barriers, then we use flush.
2329 */
2324 if (!err && barrier)
2325 info->feature_flush = REQ_FLUSH | REQ_FUA;
2330 if (!err && barrier) {
2331 info->feature_flush = 1;
2332 info->feature_fua = 1;
2333 }
2334
2326 /*
2327 * And if there is "feature-flush-cache" use that above
2328 * barriers.
2329 */
2330 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
2331 "feature-flush-cache", "%d", &flush,
2332 NULL);
2333
2335 /*
2336 * And if there is "feature-flush-cache" use that above
2337 * barriers.
2338 */
2339 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
2340 "feature-flush-cache", "%d", &flush,
2341 NULL);
2342
2334 if (!err && flush)
2335 info->feature_flush = REQ_FLUSH;
2343 if (!err && flush) {
2344 info->feature_flush = 1;
2345 info->feature_fua = 0;
2346 }
2336
2337 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
2338 "feature-discard", "%d", &discard,
2339 NULL);
2340
2341 if (!err && discard)
2342 blkfront_setup_discard(info);
2343

--- 132 unchanged lines hidden (view full) ---

2476 case XenbusStateInitialising:
2477 case XenbusStateInitialised:
2478 case XenbusStateReconfiguring:
2479 case XenbusStateReconfigured:
2480 case XenbusStateUnknown:
2481 break;
2482
2483 case XenbusStateConnected:
2347
2348 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
2349 "feature-discard", "%d", &discard,
2350 NULL);
2351
2352 if (!err && discard)
2353 blkfront_setup_discard(info);
2354

--- 132 unchanged lines hidden (view full) ---

2487 case XenbusStateInitialising:
2488 case XenbusStateInitialised:
2489 case XenbusStateReconfiguring:
2490 case XenbusStateReconfigured:
2491 case XenbusStateUnknown:
2492 break;
2493
2494 case XenbusStateConnected:
2484 /*
2485 * talk_to_blkback sets state to XenbusStateInitialised
2486 * and blkfront_connect sets it to XenbusStateConnected
2487 * (if connection went OK).
2488 *
2489 * If the backend (or toolstack) decides to poke at backend
2490 * state (and re-trigger the watch by setting the state repeatedly
2491 * to XenbusStateConnected (4)) we need to deal with this.
2492 * This is allowed as this is used to communicate to the guest
2493 * that the size of disk has changed!
2494 */
2495 if ((dev->state != XenbusStateInitialised) &&
2496 (dev->state != XenbusStateConnected)) {
2495 if (dev->state != XenbusStateInitialised) {
2497 if (talk_to_blkback(dev, info))
2498 break;
2499 }
2496 if (talk_to_blkback(dev, info))
2497 break;
2498 }
2500
2501 blkfront_connect(info);
2502 break;
2503
2504 case XenbusStateClosed:
2505 if (dev->state == XenbusStateClosed)
2506 break;
2507 /* Missed the backend's Closing state -- fallthrough */
2508 case XenbusStateClosing:

--- 215 unchanged lines hidden ---
2499 blkfront_connect(info);
2500 break;
2501
2502 case XenbusStateClosed:
2503 if (dev->state == XenbusStateClosed)
2504 break;
2505 /* Missed the backend's Closing state -- fallthrough */
2506 case XenbusStateClosing:

--- 215 unchanged lines hidden ---