dm-mpath.c (4cc96131afce3eaae7c13dff41c6ba771cf10e96) dm-mpath.c (76e33fe4e2c4363c2b9f627472bd43dc235c3406)
1/*
2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include <linux/device-mapper.h>
9
10#include "dm-rq.h"
1/*
2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include <linux/device-mapper.h>
9
10#include "dm-rq.h"
11#include "dm-bio-record.h"
11#include "dm-path-selector.h"
12#include "dm-uevent.h"
13
14#include <linux/blkdev.h>
15#include <linux/ctype.h>
16#include <linux/init.h>
17#include <linux/mempool.h>
18#include <linux/module.h>

--- 73 unchanged lines hidden (view full) ---

92 /*
93 * We must use a mempool of dm_mpath_io structs so that we
94 * can resubmit bios on error.
95 */
96 mempool_t *mpio_pool;
97
98 struct mutex work_mutex;
99 struct work_struct trigger_event;
12#include "dm-path-selector.h"
13#include "dm-uevent.h"
14
15#include <linux/blkdev.h>
16#include <linux/ctype.h>
17#include <linux/init.h>
18#include <linux/mempool.h>
19#include <linux/module.h>

--- 73 unchanged lines hidden (view full) ---

93 /*
94 * We must use a mempool of dm_mpath_io structs so that we
95 * can resubmit bios on error.
96 */
97 mempool_t *mpio_pool;
98
99 struct mutex work_mutex;
100 struct work_struct trigger_event;
101
102 struct work_struct process_queued_bios;
103 struct bio_list queued_bios;
100};
101
102/*
104};
105
106/*
103 * Context information attached to each bio we process.
107 * Context information attached to each io we process.
104 */
105struct dm_mpath_io {
106 struct pgpath *pgpath;
107 size_t nr_bytes;
108 */
109struct dm_mpath_io {
110 struct pgpath *pgpath;
111 size_t nr_bytes;
112
113 /*
114 * FIXME: make request-based code _not_ include this member.
115 */
116 struct dm_bio_details bio_details;
108};
109
110typedef int (*action_fn) (struct pgpath *pgpath);
111
112static struct kmem_cache *_mpio_cache;
113
114static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
115static void trigger_event(struct work_struct *work);
116static void activate_path(struct work_struct *work);
117};
118
119typedef int (*action_fn) (struct pgpath *pgpath);
120
121static struct kmem_cache *_mpio_cache;
122
123static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
124static void trigger_event(struct work_struct *work);
125static void activate_path(struct work_struct *work);
126static void process_queued_bios(struct work_struct *work);
117
118/*-----------------------------------------------
119 * Multipath state flags.
120 *-----------------------------------------------*/
121
122#define MPATHF_QUEUE_IO 0 /* Must we queue all I/O? */
123#define MPATHF_QUEUE_IF_NO_PATH 1 /* Queue I/O if last path fails? */
124#define MPATHF_SAVED_QUEUE_IF_NO_PATH 2 /* Saved state during suspension */
125#define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3 /* If there's already a hw_handler present, don't change it. */
126#define MPATHF_PG_INIT_DISABLED 4 /* pg_init is not currently allowed */
127#define MPATHF_PG_INIT_REQUIRED 5 /* pg_init needs calling? */
128#define MPATHF_PG_INIT_DELAY_RETRY 6 /* Delay pg_init retry? */
127
128/*-----------------------------------------------
129 * Multipath state flags.
130 *-----------------------------------------------*/
131
132#define MPATHF_QUEUE_IO 0 /* Must we queue all I/O? */
133#define MPATHF_QUEUE_IF_NO_PATH 1 /* Queue I/O if last path fails? */
134#define MPATHF_SAVED_QUEUE_IF_NO_PATH 2 /* Saved state during suspension */
135#define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3 /* If there's already a hw_handler present, don't change it. */
136#define MPATHF_PG_INIT_DISABLED 4 /* pg_init is not currently allowed */
137#define MPATHF_PG_INIT_REQUIRED 5 /* pg_init needs calling? */
138#define MPATHF_PG_INIT_DELAY_RETRY 6 /* Delay pg_init retry? */
139#define MPATHF_BIO_BASED 7 /* Device is bio-based? */
129
130/*-----------------------------------------------
131 * Allocation routines
132 *-----------------------------------------------*/
133
134static struct pgpath *alloc_pgpath(void)
135{
136 struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);

--- 43 unchanged lines hidden (view full) ---

180 ps->type->destroy(ps);
181 dm_put_path_selector(ps->type);
182 }
183
184 free_pgpaths(&pg->pgpaths, ti);
185 kfree(pg);
186}
187
140
141/*-----------------------------------------------
142 * Allocation routines
143 *-----------------------------------------------*/
144
145static struct pgpath *alloc_pgpath(void)
146{
147 struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);

--- 43 unchanged lines hidden (view full) ---

191 ps->type->destroy(ps);
192 dm_put_path_selector(ps->type);
193 }
194
195 free_pgpaths(&pg->pgpaths, ti);
196 kfree(pg);
197}
198
188static struct multipath *alloc_multipath(struct dm_target *ti, bool use_blk_mq)
199static struct multipath *alloc_multipath(struct dm_target *ti, bool use_blk_mq,
200 bool bio_based)
189{
190 struct multipath *m;
191
192 m = kzalloc(sizeof(*m), GFP_KERNEL);
193 if (m) {
194 INIT_LIST_HEAD(&m->priority_groups);
195 spin_lock_init(&m->lock);
196 set_bit(MPATHF_QUEUE_IO, &m->flags);
197 atomic_set(&m->nr_valid_paths, 0);
198 atomic_set(&m->pg_init_in_progress, 0);
199 atomic_set(&m->pg_init_count, 0);
200 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
201 INIT_WORK(&m->trigger_event, trigger_event);
202 init_waitqueue_head(&m->pg_init_wait);
203 mutex_init(&m->work_mutex);
204
205 m->mpio_pool = NULL;
201{
202 struct multipath *m;
203
204 m = kzalloc(sizeof(*m), GFP_KERNEL);
205 if (m) {
206 INIT_LIST_HEAD(&m->priority_groups);
207 spin_lock_init(&m->lock);
208 set_bit(MPATHF_QUEUE_IO, &m->flags);
209 atomic_set(&m->nr_valid_paths, 0);
210 atomic_set(&m->pg_init_in_progress, 0);
211 atomic_set(&m->pg_init_count, 0);
212 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
213 INIT_WORK(&m->trigger_event, trigger_event);
214 init_waitqueue_head(&m->pg_init_wait);
215 mutex_init(&m->work_mutex);
216
217 m->mpio_pool = NULL;
206 if (!use_blk_mq) {
218 if (!use_blk_mq && !bio_based) {
207 unsigned min_ios = dm_get_reserved_rq_based_ios();
208
209 m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
210 if (!m->mpio_pool) {
211 kfree(m);
212 return NULL;
213 }
214 }
215
219 unsigned min_ios = dm_get_reserved_rq_based_ios();
220
221 m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
222 if (!m->mpio_pool) {
223 kfree(m);
224 return NULL;
225 }
226 }
227
228 if (bio_based) {
229 INIT_WORK(&m->process_queued_bios, process_queued_bios);
230 set_bit(MPATHF_BIO_BASED, &m->flags);
231 /*
232 * bio-based doesn't support any direct scsi_dh management;
233 * it just discovers if a scsi_dh is attached.
234 */
235 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
236 }
237
216 m->ti = ti;
217 ti->private = m;
218 }
219
220 return m;
221}
222
223static void free_multipath(struct multipath *m)

--- 43 unchanged lines hidden (view full) ---

267 if (m->mpio_pool) {
268 struct dm_mpath_io *mpio = info->ptr;
269
270 info->ptr = NULL;
271 mempool_free(mpio, m->mpio_pool);
272 }
273}
274
238 m->ti = ti;
239 ti->private = m;
240 }
241
242 return m;
243}
244
245static void free_multipath(struct multipath *m)

--- 43 unchanged lines hidden (view full) ---

289 if (m->mpio_pool) {
290 struct dm_mpath_io *mpio = info->ptr;
291
292 info->ptr = NULL;
293 mempool_free(mpio, m->mpio_pool);
294 }
295}
296
297static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio)
298{
299 return dm_per_bio_data(bio, sizeof(struct dm_mpath_io));
300}
301
302static struct dm_mpath_io *set_mpio_bio(struct multipath *m, struct bio *bio)
303{
304 struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
305
306 memset(mpio, 0, sizeof(*mpio));
307 dm_bio_record(&mpio->bio_details, bio);
308
309 return mpio;
310}
311
275/*-----------------------------------------------
276 * Path selection
277 *-----------------------------------------------*/
278
279static int __pg_init_all_paths(struct multipath *m)
280{
281 struct pgpath *pgpath;
282 unsigned long pg_init_delay = 0;

--- 143 unchanged lines hidden (view full) ---

426 * Check whether bios must be queued in the device-mapper core rather
427 * than here in the target.
428 *
429 * If m->queue_if_no_path and m->saved_queue_if_no_path hold the
430 * same value then we are not between multipath_presuspend()
431 * and multipath_resume() calls and we have no need to check
432 * for the DMF_NOFLUSH_SUSPENDING flag.
433 */
312/*-----------------------------------------------
313 * Path selection
314 *-----------------------------------------------*/
315
316static int __pg_init_all_paths(struct multipath *m)
317{
318 struct pgpath *pgpath;
319 unsigned long pg_init_delay = 0;

--- 143 unchanged lines hidden (view full) ---

463 * Check whether bios must be queued in the device-mapper core rather
464 * than here in the target.
465 *
466 * If m->queue_if_no_path and m->saved_queue_if_no_path hold the
467 * same value then we are not between multipath_presuspend()
468 * and multipath_resume() calls and we have no need to check
469 * for the DMF_NOFLUSH_SUSPENDING flag.
470 */
434static int must_push_back(struct multipath *m)
471static bool __must_push_back(struct multipath *m)
435{
472{
473 return ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) !=
474 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) &&
475 dm_noflush_suspending(m->ti));
476}
477
478static bool must_push_back_rq(struct multipath *m)
479{
436 return (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) ||
480 return (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) ||
437 ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) !=
438 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) &&
439 dm_noflush_suspending(m->ti)));
481 __must_push_back(m));
440}
441
482}
483
484static bool must_push_back_bio(struct multipath *m)
485{
486 return __must_push_back(m);
487}
488
442/*
489/*
443 * Map cloned requests
490 * Map cloned requests (request-based multipath)
444 */
445static int __multipath_map(struct dm_target *ti, struct request *clone,
446 union map_info *map_context,
447 struct request *rq, struct request **__clone)
448{
449 struct multipath *m = ti->private;
450 int r = DM_MAPIO_REQUEUE;
451 size_t nr_bytes = clone ? blk_rq_bytes(clone) : blk_rq_bytes(rq);
452 struct pgpath *pgpath;
453 struct block_device *bdev;
454 struct dm_mpath_io *mpio;
455
456 /* Do we need to select a new pgpath? */
457 pgpath = lockless_dereference(m->current_pgpath);
458 if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
459 pgpath = choose_pgpath(m, nr_bytes);
460
461 if (!pgpath) {
491 */
492static int __multipath_map(struct dm_target *ti, struct request *clone,
493 union map_info *map_context,
494 struct request *rq, struct request **__clone)
495{
496 struct multipath *m = ti->private;
497 int r = DM_MAPIO_REQUEUE;
498 size_t nr_bytes = clone ? blk_rq_bytes(clone) : blk_rq_bytes(rq);
499 struct pgpath *pgpath;
500 struct block_device *bdev;
501 struct dm_mpath_io *mpio;
502
503 /* Do we need to select a new pgpath? */
504 pgpath = lockless_dereference(m->current_pgpath);
505 if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
506 pgpath = choose_pgpath(m, nr_bytes);
507
508 if (!pgpath) {
462 if (!must_push_back(m))
509 if (!must_push_back_rq(m))
463 r = -EIO; /* Failed */
464 return r;
465 } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
466 test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
467 pg_init_all_paths(m);
468 return r;
469 }
470

--- 54 unchanged lines hidden (view full) ---

525}
526
527static void multipath_release_clone(struct request *clone)
528{
529 blk_mq_free_request(clone);
530}
531
532/*
510 r = -EIO; /* Failed */
511 return r;
512 } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
513 test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
514 pg_init_all_paths(m);
515 return r;
516 }
517

--- 54 unchanged lines hidden (view full) ---

572}
573
574static void multipath_release_clone(struct request *clone)
575{
576 blk_mq_free_request(clone);
577}
578
579/*
580 * Map cloned bios (bio-based multipath)
581 */
582static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_mpath_io *mpio)
583{
584 size_t nr_bytes = bio->bi_iter.bi_size;
585 struct pgpath *pgpath;
586 unsigned long flags;
587 bool queue_io;
588
589 /* Do we need to select a new pgpath? */
590 pgpath = lockless_dereference(m->current_pgpath);
591 queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
592 if (!pgpath || !queue_io)
593 pgpath = choose_pgpath(m, nr_bytes);
594
595 if ((pgpath && queue_io) ||
596 (!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) {
597 /* Queue for the daemon to resubmit */
598 spin_lock_irqsave(&m->lock, flags);
599 bio_list_add(&m->queued_bios, bio);
600 spin_unlock_irqrestore(&m->lock, flags);
601 /* PG_INIT_REQUIRED cannot be set without QUEUE_IO */
602 if (queue_io || test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
603 pg_init_all_paths(m);
604 else if (!queue_io)
605 queue_work(kmultipathd, &m->process_queued_bios);
606 return DM_MAPIO_SUBMITTED;
607 }
608
609 if (!pgpath) {
610 if (!must_push_back_bio(m))
611 return -EIO;
612 return DM_MAPIO_REQUEUE;
613 }
614
615 mpio->pgpath = pgpath;
616 mpio->nr_bytes = nr_bytes;
617
618 bio->bi_error = 0;
619 bio->bi_bdev = pgpath->path.dev->bdev;
620 bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
621
622 if (pgpath->pg->ps.type->start_io)
623 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
624 &pgpath->path,
625 nr_bytes);
626 return DM_MAPIO_REMAPPED;
627}
628
629static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
630{
631 struct multipath *m = ti->private;
632 struct dm_mpath_io *mpio = set_mpio_bio(m, bio);
633
634 return __multipath_map_bio(m, bio, mpio);
635}
636
637static void process_queued_bios_list(struct multipath *m)
638{
639 if (test_bit(MPATHF_BIO_BASED, &m->flags))
640 queue_work(kmultipathd, &m->process_queued_bios);
641}
642
643static void process_queued_bios(struct work_struct *work)
644{
645 int r;
646 unsigned long flags;
647 struct bio *bio;
648 struct bio_list bios;
649 struct blk_plug plug;
650 struct multipath *m =
651 container_of(work, struct multipath, process_queued_bios);
652
653 bio_list_init(&bios);
654
655 spin_lock_irqsave(&m->lock, flags);
656
657 if (bio_list_empty(&m->queued_bios)) {
658 spin_unlock_irqrestore(&m->lock, flags);
659 return;
660 }
661
662 bio_list_merge(&bios, &m->queued_bios);
663 bio_list_init(&m->queued_bios);
664
665 spin_unlock_irqrestore(&m->lock, flags);
666
667 blk_start_plug(&plug);
668 while ((bio = bio_list_pop(&bios))) {
669 r = __multipath_map_bio(m, bio, get_mpio_from_bio(bio));
670 if (r < 0 || r == DM_MAPIO_REQUEUE) {
671 bio->bi_error = r;
672 bio_endio(bio);
673 } else if (r == DM_MAPIO_REMAPPED)
674 generic_make_request(bio);
675 }
676 blk_finish_plug(&plug);
677}
678
679/*
533 * If we run out of usable paths, should we queue I/O or error it?
534 */
535static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
536 bool save_old_value)
537{
538 unsigned long flags;
539
540 spin_lock_irqsave(&m->lock, flags);

--- 11 unchanged lines hidden (view full) ---

552 }
553 if (queue_if_no_path)
554 set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
555 else
556 clear_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
557
558 spin_unlock_irqrestore(&m->lock, flags);
559
680 * If we run out of usable paths, should we queue I/O or error it?
681 */
682static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
683 bool save_old_value)
684{
685 unsigned long flags;
686
687 spin_lock_irqsave(&m->lock, flags);

--- 11 unchanged lines hidden (view full) ---

699 }
700 if (queue_if_no_path)
701 set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
702 else
703 clear_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
704
705 spin_unlock_irqrestore(&m->lock, flags);
706
560 if (!queue_if_no_path)
707 if (!queue_if_no_path) {
561 dm_table_run_md_queue_async(m->ti->table);
708 dm_table_run_md_queue_async(m->ti->table);
709 process_queued_bios_list(m);
710 }
562
563 return 0;
564}
565
566/*
567 * An event is triggered whenever a path is taken out of use.
568 * Includes path failure and PG bypass.
569 */

--- 223 unchanged lines hidden (view full) ---

793 };
794
795 if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
796 return -EINVAL;
797
798 if (!hw_argc)
799 return 0;
800
711
712 return 0;
713}
714
715/*
716 * An event is triggered whenever a path is taken out of use.
717 * Includes path failure and PG bypass.
718 */

--- 223 unchanged lines hidden (view full) ---

942 };
943
944 if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
945 return -EINVAL;
946
947 if (!hw_argc)
948 return 0;
949
950 if (test_bit(MPATHF_BIO_BASED, &m->flags)) {
951 dm_consume_args(as, hw_argc);
952 DMERR("bio-based multipath doesn't allow hardware handler args");
953 return 0;
954 }
955
801 m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
802
803 if (hw_argc > 1) {
804 char *p;
805 int i, j, len = 4;
806
807 for (i = 0; i <= hw_argc - 2; i++)
808 len += strlen(as->argv[i]) + 1;

--- 66 unchanged lines hidden (view full) ---

875
876 ti->error = "Unrecognised multipath feature request";
877 r = -EINVAL;
878 } while (argc && !r);
879
880 return r;
881}
882
956 m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
957
958 if (hw_argc > 1) {
959 char *p;
960 int i, j, len = 4;
961
962 for (i = 0; i <= hw_argc - 2; i++)
963 len += strlen(as->argv[i]) + 1;

--- 66 unchanged lines hidden (view full) ---

1030
1031 ti->error = "Unrecognised multipath feature request";
1032 r = -EINVAL;
1033 } while (argc && !r);
1034
1035 return r;
1036}
1037
883static int multipath_ctr(struct dm_target *ti, unsigned int argc,
884 char **argv)
1038static int __multipath_ctr(struct dm_target *ti, unsigned int argc,
1039 char **argv, bool bio_based)
885{
886 /* target arguments */
887 static struct dm_arg _args[] = {
888 {0, 1024, "invalid number of priority groups"},
889 {0, 1024, "invalid initial priority group number"},
890 };
891
892 int r;
893 struct multipath *m;
894 struct dm_arg_set as;
895 unsigned pg_count = 0;
896 unsigned next_pg_num;
897 bool use_blk_mq = dm_use_blk_mq(dm_table_get_md(ti->table));
898
899 as.argc = argc;
900 as.argv = argv;
901
1040{
1041 /* target arguments */
1042 static struct dm_arg _args[] = {
1043 {0, 1024, "invalid number of priority groups"},
1044 {0, 1024, "invalid initial priority group number"},
1045 };
1046
1047 int r;
1048 struct multipath *m;
1049 struct dm_arg_set as;
1050 unsigned pg_count = 0;
1051 unsigned next_pg_num;
1052 bool use_blk_mq = dm_use_blk_mq(dm_table_get_md(ti->table));
1053
1054 as.argc = argc;
1055 as.argv = argv;
1056
902 m = alloc_multipath(ti, use_blk_mq);
1057 m = alloc_multipath(ti, use_blk_mq, bio_based);
903 if (!m) {
904 ti->error = "can't allocate multipath";
905 return -EINVAL;
906 }
907
908 r = parse_features(&as, m);
909 if (r)
910 goto bad;

--- 42 unchanged lines hidden (view full) ---

953 ti->error = "priority group count mismatch";
954 r = -EINVAL;
955 goto bad;
956 }
957
958 ti->num_flush_bios = 1;
959 ti->num_discard_bios = 1;
960 ti->num_write_same_bios = 1;
1058 if (!m) {
1059 ti->error = "can't allocate multipath";
1060 return -EINVAL;
1061 }
1062
1063 r = parse_features(&as, m);
1064 if (r)
1065 goto bad;

--- 42 unchanged lines hidden (view full) ---

1108 ti->error = "priority group count mismatch";
1109 r = -EINVAL;
1110 goto bad;
1111 }
1112
1113 ti->num_flush_bios = 1;
1114 ti->num_discard_bios = 1;
1115 ti->num_write_same_bios = 1;
961 if (use_blk_mq)
1116 if (use_blk_mq || bio_based)
962 ti->per_io_data_size = sizeof(struct dm_mpath_io);
963
964 return 0;
965
966 bad:
967 free_multipath(m);
968 return r;
969}
970
1117 ti->per_io_data_size = sizeof(struct dm_mpath_io);
1118
1119 return 0;
1120
1121 bad:
1122 free_multipath(m);
1123 return r;
1124}
1125
1126static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
1127{
1128 return __multipath_ctr(ti, argc, argv, false);
1129}
1130
1131static int multipath_bio_ctr(struct dm_target *ti, unsigned argc, char **argv)
1132{
1133 return __multipath_ctr(ti, argc, argv, true);
1134}
1135
971static void multipath_wait_for_pg_init_completion(struct multipath *m)
972{
973 DECLARE_WAITQUEUE(wait, current);
974
975 add_wait_queue(&m->pg_init_wait, &wait);
976
977 while (1) {
978 set_current_state(TASK_UNINTERRUPTIBLE);

--- 99 unchanged lines hidden (view full) ---

1078
1079 dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
1080 pgpath->path.dev->name, nr_valid_paths);
1081
1082 schedule_work(&m->trigger_event);
1083
1084out:
1085 spin_unlock_irqrestore(&m->lock, flags);
1136static void multipath_wait_for_pg_init_completion(struct multipath *m)
1137{
1138 DECLARE_WAITQUEUE(wait, current);
1139
1140 add_wait_queue(&m->pg_init_wait, &wait);
1141
1142 while (1) {
1143 set_current_state(TASK_UNINTERRUPTIBLE);

--- 99 unchanged lines hidden (view full) ---

1243
1244 dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
1245 pgpath->path.dev->name, nr_valid_paths);
1246
1247 schedule_work(&m->trigger_event);
1248
1249out:
1250 spin_unlock_irqrestore(&m->lock, flags);
1086 if (run_queue)
1251 if (run_queue) {
1087 dm_table_run_md_queue_async(m->ti->table);
1252 dm_table_run_md_queue_async(m->ti->table);
1253 process_queued_bios_list(m);
1254 }
1088
1089 return r;
1090}
1091
1092/*
1093 * Fail or reinstate all paths that match the provided struct dm_dev.
1094 */
1095static int action_dev(struct multipath *m, struct dm_dev *dev,

--- 180 unchanged lines hidden (view full) ---

1276 else
1277 clear_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1278
1279 if (__pg_init_all_paths(m))
1280 goto out;
1281 }
1282 clear_bit(MPATHF_QUEUE_IO, &m->flags);
1283
1255
1256 return r;
1257}
1258
1259/*
1260 * Fail or reinstate all paths that match the provided struct dm_dev.
1261 */
1262static int action_dev(struct multipath *m, struct dm_dev *dev,

--- 180 unchanged lines hidden (view full) ---

1443 else
1444 clear_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1445
1446 if (__pg_init_all_paths(m))
1447 goto out;
1448 }
1449 clear_bit(MPATHF_QUEUE_IO, &m->flags);
1450
1451 process_queued_bios_list(m);
1452
1284 /*
1285 * Wake up any thread waiting to suspend.
1286 */
1287 wake_up(&m->pg_init_wait);
1288
1289out:
1290 spin_unlock_irqrestore(&m->lock, flags);
1291}

--- 50 unchanged lines hidden (view full) ---

1342 if (noretry_error(error))
1343 return error;
1344
1345 if (mpio->pgpath)
1346 fail_path(mpio->pgpath);
1347
1348 if (!atomic_read(&m->nr_valid_paths)) {
1349 if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1453 /*
1454 * Wake up any thread waiting to suspend.
1455 */
1456 wake_up(&m->pg_init_wait);
1457
1458out:
1459 spin_unlock_irqrestore(&m->lock, flags);
1460}

--- 50 unchanged lines hidden (view full) ---

1511 if (noretry_error(error))
1512 return error;
1513
1514 if (mpio->pgpath)
1515 fail_path(mpio->pgpath);
1516
1517 if (!atomic_read(&m->nr_valid_paths)) {
1518 if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1350 if (!must_push_back(m))
1519 if (!must_push_back_rq(m))
1351 r = -EIO;
1352 } else {
1353 if (error == -EBADE)
1354 r = error;
1355 }
1356 }
1357
1358 return r;

--- 17 unchanged lines hidden (view full) ---

1376 if (ps->type->end_io)
1377 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
1378 }
1379 clear_request_fn_mpio(m, map_context);
1380
1381 return r;
1382}
1383
1520 r = -EIO;
1521 } else {
1522 if (error == -EBADE)
1523 r = error;
1524 }
1525 }
1526
1527 return r;

--- 17 unchanged lines hidden (view full) ---

1545 if (ps->type->end_io)
1546 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
1547 }
1548 clear_request_fn_mpio(m, map_context);
1549
1550 return r;
1551}
1552
1553static int do_end_io_bio(struct multipath *m, struct bio *clone,
1554 int error, struct dm_mpath_io *mpio)
1555{
1556 unsigned long flags;
1557
1558 if (!error)
1559 return 0; /* I/O complete */
1560
1561 if (noretry_error(error))
1562 return error;
1563
1564 if (mpio->pgpath)
1565 fail_path(mpio->pgpath);
1566
1567 if (!atomic_read(&m->nr_valid_paths)) {
1568 if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1569 if (!must_push_back_bio(m))
1570 return -EIO;
1571 return DM_ENDIO_REQUEUE;
1572 } else {
1573 if (error == -EBADE)
1574 return error;
1575 }
1576 }
1577
1578 /* Queue for the daemon to resubmit */
1579 dm_bio_restore(&mpio->bio_details, clone);
1580
1581 spin_lock_irqsave(&m->lock, flags);
1582 bio_list_add(&m->queued_bios, clone);
1583 spin_unlock_irqrestore(&m->lock, flags);
1584 if (!test_bit(MPATHF_QUEUE_IO, &m->flags))
1585 queue_work(kmultipathd, &m->process_queued_bios);
1586
1587 return DM_ENDIO_INCOMPLETE;
1588}
1589
1590static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, int error)
1591{
1592 struct multipath *m = ti->private;
1593 struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
1594 struct pgpath *pgpath;
1595 struct path_selector *ps;
1596 int r;
1597
1598 BUG_ON(!mpio);
1599
1600 r = do_end_io_bio(m, clone, error, mpio);
1601 pgpath = mpio->pgpath;
1602 if (pgpath) {
1603 ps = &pgpath->pg->ps;
1604 if (ps->type->end_io)
1605 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
1606 }
1607
1608 return r;
1609}
1610
1384/*
1385 * Suspend can't complete until all the I/O is processed so if
1386 * the last path fails we must error any remaining I/O.
1387 * Note that if the freeze_bdev fails while suspending, the
1388 * queue_if_no_path state is lost - userspace should reset it.
1389 */
1390static void multipath_presuspend(struct dm_target *ti)
1391{

--- 245 unchanged lines hidden (view full) ---

1637 if (r == -ENOTCONN) {
1638 if (!lockless_dereference(m->current_pg)) {
1639 /* Path status changed, redo selection */
1640 (void) choose_pgpath(m, 0);
1641 }
1642 if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
1643 pg_init_all_paths(m);
1644 dm_table_run_md_queue_async(m->ti->table);
1611/*
1612 * Suspend can't complete until all the I/O is processed so if
1613 * the last path fails we must error any remaining I/O.
1614 * Note that if the freeze_bdev fails while suspending, the
1615 * queue_if_no_path state is lost - userspace should reset it.
1616 */
1617static void multipath_presuspend(struct dm_target *ti)
1618{

--- 245 unchanged lines hidden (view full) ---

1864 if (r == -ENOTCONN) {
1865 if (!lockless_dereference(m->current_pg)) {
1866 /* Path status changed, redo selection */
1867 (void) choose_pgpath(m, 0);
1868 }
1869 if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
1870 pg_init_all_paths(m);
1871 dm_table_run_md_queue_async(m->ti->table);
1872 process_queued_bios_list(m);
1645 }
1646
1647 /*
1648 * Only pass ioctls through if the device sizes match exactly.
1649 */
1650 if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
1651 return 1;
1652 return r;

--- 109 unchanged lines hidden (view full) ---

1762 .resume = multipath_resume,
1763 .status = multipath_status,
1764 .message = multipath_message,
1765 .prepare_ioctl = multipath_prepare_ioctl,
1766 .iterate_devices = multipath_iterate_devices,
1767 .busy = multipath_busy,
1768};
1769
1873 }
1874
1875 /*
1876 * Only pass ioctls through if the device sizes match exactly.
1877 */
1878 if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
1879 return 1;
1880 return r;

--- 109 unchanged lines hidden (view full) ---

1990 .resume = multipath_resume,
1991 .status = multipath_status,
1992 .message = multipath_message,
1993 .prepare_ioctl = multipath_prepare_ioctl,
1994 .iterate_devices = multipath_iterate_devices,
1995 .busy = multipath_busy,
1996};
1997
1998static struct target_type multipath_bio_target = {
1999 .name = "multipath-bio",
2000 .version = {1, 0, 0},
2001 .module = THIS_MODULE,
2002 .ctr = multipath_bio_ctr,
2003 .dtr = multipath_dtr,
2004 .map = multipath_map_bio,
2005 .end_io = multipath_end_io_bio,
2006 .presuspend = multipath_presuspend,
2007 .postsuspend = multipath_postsuspend,
2008 .resume = multipath_resume,
2009 .status = multipath_status,
2010 .message = multipath_message,
2011 .prepare_ioctl = multipath_prepare_ioctl,
2012 .iterate_devices = multipath_iterate_devices,
2013 .busy = multipath_busy,
2014};
2015
1770static int __init dm_multipath_init(void)
1771{
1772 int r;
1773
2016static int __init dm_multipath_init(void)
2017{
2018 int r;
2019
1774 /* allocate a slab for the dm_ios */
2020 /* allocate a slab for the dm_mpath_ios */
1775 _mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
1776 if (!_mpio_cache)
1777 return -ENOMEM;
1778
1779 r = dm_register_target(&multipath_target);
1780 if (r < 0) {
2021 _mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
2022 if (!_mpio_cache)
2023 return -ENOMEM;
2024
2025 r = dm_register_target(&multipath_target);
2026 if (r < 0) {
1781 DMERR("register failed %d", r);
2027 DMERR("request-based register failed %d", r);
1782 r = -EINVAL;
1783 goto bad_register_target;
1784 }
1785
2028 r = -EINVAL;
2029 goto bad_register_target;
2030 }
2031
2032 r = dm_register_target(&multipath_bio_target);
2033 if (r < 0) {
2034 DMERR("bio-based register failed %d", r);
2035 r = -EINVAL;
2036 goto bad_register_bio_based_target;
2037 }
2038
1786 kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
1787 if (!kmultipathd) {
1788 DMERR("failed to create workqueue kmpathd");
1789 r = -ENOMEM;
1790 goto bad_alloc_kmultipathd;
1791 }
1792
1793 /*

--- 5 unchanged lines hidden (view full) ---

1799 kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
1800 WQ_MEM_RECLAIM);
1801 if (!kmpath_handlerd) {
1802 DMERR("failed to create workqueue kmpath_handlerd");
1803 r = -ENOMEM;
1804 goto bad_alloc_kmpath_handlerd;
1805 }
1806
2039 kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
2040 if (!kmultipathd) {
2041 DMERR("failed to create workqueue kmpathd");
2042 r = -ENOMEM;
2043 goto bad_alloc_kmultipathd;
2044 }
2045
2046 /*

--- 5 unchanged lines hidden (view full) ---

2052 kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
2053 WQ_MEM_RECLAIM);
2054 if (!kmpath_handlerd) {
2055 DMERR("failed to create workqueue kmpath_handlerd");
2056 r = -ENOMEM;
2057 goto bad_alloc_kmpath_handlerd;
2058 }
2059
1807 DMINFO("version %u.%u.%u loaded",
1808 multipath_target.version[0], multipath_target.version[1],
1809 multipath_target.version[2]);
1810
1811 return 0;
1812
1813bad_alloc_kmpath_handlerd:
1814 destroy_workqueue(kmultipathd);
1815bad_alloc_kmultipathd:
2060 return 0;
2061
2062bad_alloc_kmpath_handlerd:
2063 destroy_workqueue(kmultipathd);
2064bad_alloc_kmultipathd:
2065 dm_unregister_target(&multipath_bio_target);
2066bad_register_bio_based_target:
1816 dm_unregister_target(&multipath_target);
1817bad_register_target:
1818 kmem_cache_destroy(_mpio_cache);
1819
1820 return r;
1821}
1822
1823static void __exit dm_multipath_exit(void)
1824{
1825 destroy_workqueue(kmpath_handlerd);
1826 destroy_workqueue(kmultipathd);
1827
1828 dm_unregister_target(&multipath_target);
2067 dm_unregister_target(&multipath_target);
2068bad_register_target:
2069 kmem_cache_destroy(_mpio_cache);
2070
2071 return r;
2072}
2073
2074static void __exit dm_multipath_exit(void)
2075{
2076 destroy_workqueue(kmpath_handlerd);
2077 destroy_workqueue(kmultipathd);
2078
2079 dm_unregister_target(&multipath_target);
2080 dm_unregister_target(&multipath_bio_target);
1829 kmem_cache_destroy(_mpio_cache);
1830}
1831
1832module_init(dm_multipath_init);
1833module_exit(dm_multipath_exit);
1834
1835MODULE_DESCRIPTION(DM_NAME " multipath target");
1836MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
1837MODULE_LICENSE("GPL");
2081 kmem_cache_destroy(_mpio_cache);
2082}
2083
2084module_init(dm_multipath_init);
2085module_exit(dm_multipath_exit);
2086
2087MODULE_DESCRIPTION(DM_NAME " multipath target");
2088MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
2089MODULE_LICENSE("GPL");