dm-rq.c (4cc96131afce3eaae7c13dff41c6ba771cf10e96) dm-rq.c (e83068a5faafb8ca65d3b58bd1e1e3959ce1ddce)
1/*
2 * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm-core.h"
8#include "dm-rq.h"

--- 216 unchanged lines hidden (view full) ---

225
226static void free_rq_clone(struct request *clone)
227{
228 struct dm_rq_target_io *tio = clone->end_io_data;
229 struct mapped_device *md = tio->md;
230
231 blk_rq_unprep_clone(clone);
232
1/*
2 * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm-core.h"
8#include "dm-rq.h"

--- 216 unchanged lines hidden (view full) ---

225
226static void free_rq_clone(struct request *clone)
227{
228 struct dm_rq_target_io *tio = clone->end_io_data;
229 struct mapped_device *md = tio->md;
230
231 blk_rq_unprep_clone(clone);
232
233 if (md->type == DM_TYPE_MQ_REQUEST_BASED)
233 /*
234 * It is possible for a clone_old_rq() allocated clone to
235 * get passed in -- it may not yet have a request_queue.
236 * This is known to occur if the error target replaces
237 * a multipath target that has a request_fn queue stacked
238 * on blk-mq queue(s).
239 */
240 if (clone->q && clone->q->mq_ops)
234 /* stacked on blk-mq queue(s) */
235 tio->ti->type->release_clone_rq(clone);
236 else if (!md->queue->mq_ops)
237 /* request_fn queue stacked on request_fn queue(s) */
238 free_old_clone_request(md, clone);
239
240 if (!md->queue->mq_ops)
241 free_old_rq_tio(tio);

--- 314 unchanged lines hidden (view full) ---

556
557 init_tio(tio, rq, md);
558
559 table = dm_get_live_table(md, &srcu_idx);
560 /*
561 * Must clone a request if this .request_fn DM device
562 * is stacked on .request_fn device(s).
563 */
241 /* stacked on blk-mq queue(s) */
242 tio->ti->type->release_clone_rq(clone);
243 else if (!md->queue->mq_ops)
244 /* request_fn queue stacked on request_fn queue(s) */
245 free_old_clone_request(md, clone);
246
247 if (!md->queue->mq_ops)
248 free_old_rq_tio(tio);

--- 314 unchanged lines hidden (view full) ---

563
564 init_tio(tio, rq, md);
565
566 table = dm_get_live_table(md, &srcu_idx);
567 /*
568 * Must clone a request if this .request_fn DM device
569 * is stacked on .request_fn device(s).
570 */
564 if (!dm_table_mq_request_based(table)) {
571 if (!dm_table_all_blk_mq_devices(table)) {
565 if (!clone_old_rq(rq, md, tio, gfp_mask)) {
566 dm_put_live_table(md, srcu_idx);
567 free_old_rq_tio(tio);
568 return NULL;
569 }
570 }
571 dm_put_live_table(md, srcu_idx);
572

--- 133 unchanged lines hidden (view full) ---

706
707#define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000
708
709ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
710 const char *buf, size_t count)
711{
712 unsigned deadline;
713
572 if (!clone_old_rq(rq, md, tio, gfp_mask)) {
573 dm_put_live_table(md, srcu_idx);
574 free_old_rq_tio(tio);
575 return NULL;
576 }
577 }
578 dm_put_live_table(md, srcu_idx);
579

--- 133 unchanged lines hidden (view full) ---

713
714#define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000
715
716ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
717 const char *buf, size_t count)
718{
719 unsigned deadline;
720
714 if (!dm_request_based(md) || md->use_blk_mq)
721 if (dm_get_md_type(md) != DM_TYPE_REQUEST_BASED)
715 return count;
716
717 if (kstrtouint(buf, 10, &deadline))
718 return -EINVAL;
719
720 if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS)
721 deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS;
722

--- 158 unchanged lines hidden (view full) ---

881
882static struct blk_mq_ops dm_mq_ops = {
883 .queue_rq = dm_mq_queue_rq,
884 .map_queue = blk_mq_map_queue,
885 .complete = dm_softirq_done,
886 .init_request = dm_mq_init_request,
887};
888
722 return count;
723
724 if (kstrtouint(buf, 10, &deadline))
725 return -EINVAL;
726
727 if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS)
728 deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS;
729

--- 158 unchanged lines hidden (view full) ---

888
889static struct blk_mq_ops dm_mq_ops = {
890 .queue_rq = dm_mq_queue_rq,
891 .map_queue = blk_mq_map_queue,
892 .complete = dm_softirq_done,
893 .init_request = dm_mq_init_request,
894};
895
889int dm_mq_init_request_queue(struct mapped_device *md, struct dm_target *immutable_tgt)
896int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
890{
891 struct request_queue *q;
897{
898 struct request_queue *q;
899 struct dm_target *immutable_tgt;
892 int err;
893
900 int err;
901
894 if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) {
902 if (!dm_table_all_blk_mq_devices(t)) {
895 DMERR("request-based dm-mq may only be stacked on blk-mq device(s)");
896 return -EINVAL;
897 }
898
899 md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
900 if (!md->tag_set)
901 return -ENOMEM;
902
903 md->tag_set->ops = &dm_mq_ops;
904 md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
905 md->tag_set->numa_node = md->numa_node_id;
906 md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
907 md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
908 md->tag_set->driver_data = md;
909
910 md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
903 DMERR("request-based dm-mq may only be stacked on blk-mq device(s)");
904 return -EINVAL;
905 }
906
907 md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
908 if (!md->tag_set)
909 return -ENOMEM;
910
911 md->tag_set->ops = &dm_mq_ops;
912 md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
913 md->tag_set->numa_node = md->numa_node_id;
914 md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
915 md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
916 md->tag_set->driver_data = md;
917
918 md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
919 immutable_tgt = dm_table_get_immutable_target(t);
911 if (immutable_tgt && immutable_tgt->per_io_data_size) {
912 /* any target-specific per-io data is immediately after the tio */
913 md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
914 md->init_tio_pdu = true;
915 }
916
917 err = blk_mq_alloc_tag_set(md->tag_set);
918 if (err)

--- 41 unchanged lines hidden ---
920 if (immutable_tgt && immutable_tgt->per_io_data_size) {
921 /* any target-specific per-io data is immediately after the tio */
922 md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
923 md->init_tio_pdu = true;
924 }
925
926 err = blk_mq_alloc_tag_set(md->tag_set);
927 if (err)

--- 41 unchanged lines hidden ---