xref: /openbmc/linux/drivers/md/dm-mpath.c (revision 1c2dd16a)
1 /*
2  * Copyright (C) 2003 Sistina Software Limited.
3  * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include <linux/device-mapper.h>
9 
10 #include "dm-rq.h"
11 #include "dm-bio-record.h"
12 #include "dm-path-selector.h"
13 #include "dm-uevent.h"
14 
15 #include <linux/blkdev.h>
16 #include <linux/ctype.h>
17 #include <linux/init.h>
18 #include <linux/mempool.h>
19 #include <linux/module.h>
20 #include <linux/pagemap.h>
21 #include <linux/slab.h>
22 #include <linux/time.h>
23 #include <linux/workqueue.h>
24 #include <linux/delay.h>
25 #include <scsi/scsi_dh.h>
26 #include <linux/atomic.h>
27 #include <linux/blk-mq.h>
28 
29 #define DM_MSG_PREFIX "multipath"
30 #define DM_PG_INIT_DELAY_MSECS 2000
31 #define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
32 
33 /* Path properties */
34 struct pgpath {
35 	struct list_head list;
36 
37 	struct priority_group *pg;	/* Owning PG */
38 	unsigned fail_count;		/* Cumulative failure count */
39 
40 	struct dm_path path;
41 	struct delayed_work activate_path;
42 
43 	bool is_active:1;		/* Path status */
44 };
45 
46 #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
47 
48 /*
49  * Paths are grouped into Priority Groups and numbered from 1 upwards.
50  * Each has a path selector which controls which path gets used.
51  */
52 struct priority_group {
53 	struct list_head list;
54 
55 	struct multipath *m;		/* Owning multipath instance */
56 	struct path_selector ps;
57 
58 	unsigned pg_num;		/* Reference number */
59 	unsigned nr_pgpaths;		/* Number of paths in PG */
60 	struct list_head pgpaths;
61 
62 	bool bypassed:1;		/* Temporarily bypass this PG? */
63 };
64 
65 /* Multipath context */
66 struct multipath {
67 	struct list_head list;
68 	struct dm_target *ti;
69 
70 	const char *hw_handler_name;
71 	char *hw_handler_params;
72 
73 	spinlock_t lock;
74 
75 	unsigned nr_priority_groups;
76 	struct list_head priority_groups;
77 
78 	wait_queue_head_t pg_init_wait;	/* Wait for pg_init completion */
79 
80 	struct pgpath *current_pgpath;
81 	struct priority_group *current_pg;
82 	struct priority_group *next_pg;	/* Switch to this PG if set */
83 
84 	unsigned long flags;		/* Multipath state flags */
85 
86 	unsigned pg_init_retries;	/* Number of times to retry pg_init */
87 	unsigned pg_init_delay_msecs;	/* Number of msecs before pg_init retry */
88 
89 	atomic_t nr_valid_paths;	/* Total number of usable paths */
90 	atomic_t pg_init_in_progress;	/* Only one pg_init allowed at once */
91 	atomic_t pg_init_count;		/* Number of times pg_init called */
92 
93 	unsigned queue_mode;
94 
95 	struct mutex work_mutex;
96 	struct work_struct trigger_event;
97 
98 	struct work_struct process_queued_bios;
99 	struct bio_list queued_bios;
100 };
101 
102 /*
103  * Context information attached to each io we process.
104  */
105 struct dm_mpath_io {
106 	struct pgpath *pgpath;
107 	size_t nr_bytes;
108 };
109 
110 typedef int (*action_fn) (struct pgpath *pgpath);
111 
112 static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
113 static void trigger_event(struct work_struct *work);
114 static void activate_path(struct work_struct *work);
115 static void process_queued_bios(struct work_struct *work);
116 
117 /*-----------------------------------------------
118  * Multipath state flags.
119  *-----------------------------------------------*/
120 
121 #define MPATHF_QUEUE_IO 0			/* Must we queue all I/O? */
122 #define MPATHF_QUEUE_IF_NO_PATH 1		/* Queue I/O if last path fails? */
123 #define MPATHF_SAVED_QUEUE_IF_NO_PATH 2		/* Saved state during suspension */
124 #define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3	/* If there's already a hw_handler present, don't change it. */
125 #define MPATHF_PG_INIT_DISABLED 4		/* pg_init is not currently allowed */
126 #define MPATHF_PG_INIT_REQUIRED 5		/* pg_init needs calling? */
127 #define MPATHF_PG_INIT_DELAY_RETRY 6		/* Delay pg_init retry? */
128 
129 /*-----------------------------------------------
130  * Allocation routines
131  *-----------------------------------------------*/
132 
133 static struct pgpath *alloc_pgpath(void)
134 {
135 	struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
136 
137 	if (pgpath) {
138 		pgpath->is_active = true;
139 		INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
140 	}
141 
142 	return pgpath;
143 }
144 
145 static void free_pgpath(struct pgpath *pgpath)
146 {
147 	kfree(pgpath);
148 }
149 
150 static struct priority_group *alloc_priority_group(void)
151 {
152 	struct priority_group *pg;
153 
154 	pg = kzalloc(sizeof(*pg), GFP_KERNEL);
155 
156 	if (pg)
157 		INIT_LIST_HEAD(&pg->pgpaths);
158 
159 	return pg;
160 }
161 
162 static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
163 {
164 	struct pgpath *pgpath, *tmp;
165 
166 	list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
167 		list_del(&pgpath->list);
168 		dm_put_device(ti, pgpath->path.dev);
169 		free_pgpath(pgpath);
170 	}
171 }
172 
173 static void free_priority_group(struct priority_group *pg,
174 				struct dm_target *ti)
175 {
176 	struct path_selector *ps = &pg->ps;
177 
178 	if (ps->type) {
179 		ps->type->destroy(ps);
180 		dm_put_path_selector(ps->type);
181 	}
182 
183 	free_pgpaths(&pg->pgpaths, ti);
184 	kfree(pg);
185 }
186 
187 static struct multipath *alloc_multipath(struct dm_target *ti)
188 {
189 	struct multipath *m;
190 
191 	m = kzalloc(sizeof(*m), GFP_KERNEL);
192 	if (m) {
193 		INIT_LIST_HEAD(&m->priority_groups);
194 		spin_lock_init(&m->lock);
195 		set_bit(MPATHF_QUEUE_IO, &m->flags);
196 		atomic_set(&m->nr_valid_paths, 0);
197 		atomic_set(&m->pg_init_in_progress, 0);
198 		atomic_set(&m->pg_init_count, 0);
199 		m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
200 		INIT_WORK(&m->trigger_event, trigger_event);
201 		init_waitqueue_head(&m->pg_init_wait);
202 		mutex_init(&m->work_mutex);
203 
204 		m->queue_mode = DM_TYPE_NONE;
205 
206 		m->ti = ti;
207 		ti->private = m;
208 	}
209 
210 	return m;
211 }
212 
213 static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
214 {
215 	if (m->queue_mode == DM_TYPE_NONE) {
216 		/*
217 		 * Default to request-based.
218 		 */
219 		if (dm_use_blk_mq(dm_table_get_md(ti->table)))
220 			m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
221 		else
222 			m->queue_mode = DM_TYPE_REQUEST_BASED;
223 	} else if (m->queue_mode == DM_TYPE_BIO_BASED) {
224 		INIT_WORK(&m->process_queued_bios, process_queued_bios);
225 		/*
226 		 * bio-based doesn't support any direct scsi_dh management;
227 		 * it just discovers if a scsi_dh is attached.
228 		 */
229 		set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
230 	}
231 
232 	dm_table_set_type(ti->table, m->queue_mode);
233 
234 	return 0;
235 }
236 
237 static void free_multipath(struct multipath *m)
238 {
239 	struct priority_group *pg, *tmp;
240 
241 	list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
242 		list_del(&pg->list);
243 		free_priority_group(pg, m->ti);
244 	}
245 
246 	kfree(m->hw_handler_name);
247 	kfree(m->hw_handler_params);
248 	kfree(m);
249 }
250 
251 static struct dm_mpath_io *get_mpio(union map_info *info)
252 {
253 	return info->ptr;
254 }
255 
256 static size_t multipath_per_bio_data_size(void)
257 {
258 	return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details);
259 }
260 
261 static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio)
262 {
263 	return dm_per_bio_data(bio, multipath_per_bio_data_size());
264 }
265 
266 static struct dm_bio_details *get_bio_details_from_bio(struct bio *bio)
267 {
268 	/* dm_bio_details is immediately after the dm_mpath_io in bio's per-bio-data */
269 	struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
270 	void *bio_details = mpio + 1;
271 
272 	return bio_details;
273 }
274 
275 static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p,
276 					struct dm_bio_details **bio_details_p)
277 {
278 	struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
279 	struct dm_bio_details *bio_details = get_bio_details_from_bio(bio);
280 
281 	memset(mpio, 0, sizeof(*mpio));
282 	memset(bio_details, 0, sizeof(*bio_details));
283 	dm_bio_record(bio_details, bio);
284 
285 	if (mpio_p)
286 		*mpio_p = mpio;
287 	if (bio_details_p)
288 		*bio_details_p = bio_details;
289 }
290 
291 /*-----------------------------------------------
292  * Path selection
293  *-----------------------------------------------*/
294 
295 static int __pg_init_all_paths(struct multipath *m)
296 {
297 	struct pgpath *pgpath;
298 	unsigned long pg_init_delay = 0;
299 
300 	if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
301 		return 0;
302 
303 	atomic_inc(&m->pg_init_count);
304 	clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
305 
306 	/* Check here to reset pg_init_required */
307 	if (!m->current_pg)
308 		return 0;
309 
310 	if (test_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags))
311 		pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
312 						 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
313 	list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
314 		/* Skip failed paths */
315 		if (!pgpath->is_active)
316 			continue;
317 		if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
318 				       pg_init_delay))
319 			atomic_inc(&m->pg_init_in_progress);
320 	}
321 	return atomic_read(&m->pg_init_in_progress);
322 }
323 
324 static void pg_init_all_paths(struct multipath *m)
325 {
326 	unsigned long flags;
327 
328 	spin_lock_irqsave(&m->lock, flags);
329 	__pg_init_all_paths(m);
330 	spin_unlock_irqrestore(&m->lock, flags);
331 }
332 
333 static void __switch_pg(struct multipath *m, struct priority_group *pg)
334 {
335 	m->current_pg = pg;
336 
337 	/* Must we initialise the PG first, and queue I/O till it's ready? */
338 	if (m->hw_handler_name) {
339 		set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
340 		set_bit(MPATHF_QUEUE_IO, &m->flags);
341 	} else {
342 		clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
343 		clear_bit(MPATHF_QUEUE_IO, &m->flags);
344 	}
345 
346 	atomic_set(&m->pg_init_count, 0);
347 }
348 
349 static struct pgpath *choose_path_in_pg(struct multipath *m,
350 					struct priority_group *pg,
351 					size_t nr_bytes)
352 {
353 	unsigned long flags;
354 	struct dm_path *path;
355 	struct pgpath *pgpath;
356 
357 	path = pg->ps.type->select_path(&pg->ps, nr_bytes);
358 	if (!path)
359 		return ERR_PTR(-ENXIO);
360 
361 	pgpath = path_to_pgpath(path);
362 
363 	if (unlikely(lockless_dereference(m->current_pg) != pg)) {
364 		/* Only update current_pgpath if pg changed */
365 		spin_lock_irqsave(&m->lock, flags);
366 		m->current_pgpath = pgpath;
367 		__switch_pg(m, pg);
368 		spin_unlock_irqrestore(&m->lock, flags);
369 	}
370 
371 	return pgpath;
372 }
373 
374 static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
375 {
376 	unsigned long flags;
377 	struct priority_group *pg;
378 	struct pgpath *pgpath;
379 	unsigned bypassed = 1;
380 
381 	if (!atomic_read(&m->nr_valid_paths)) {
382 		clear_bit(MPATHF_QUEUE_IO, &m->flags);
383 		goto failed;
384 	}
385 
386 	/* Were we instructed to switch PG? */
387 	if (lockless_dereference(m->next_pg)) {
388 		spin_lock_irqsave(&m->lock, flags);
389 		pg = m->next_pg;
390 		if (!pg) {
391 			spin_unlock_irqrestore(&m->lock, flags);
392 			goto check_current_pg;
393 		}
394 		m->next_pg = NULL;
395 		spin_unlock_irqrestore(&m->lock, flags);
396 		pgpath = choose_path_in_pg(m, pg, nr_bytes);
397 		if (!IS_ERR_OR_NULL(pgpath))
398 			return pgpath;
399 	}
400 
401 	/* Don't change PG until it has no remaining paths */
402 check_current_pg:
403 	pg = lockless_dereference(m->current_pg);
404 	if (pg) {
405 		pgpath = choose_path_in_pg(m, pg, nr_bytes);
406 		if (!IS_ERR_OR_NULL(pgpath))
407 			return pgpath;
408 	}
409 
410 	/*
411 	 * Loop through priority groups until we find a valid path.
412 	 * First time we skip PGs marked 'bypassed'.
413 	 * Second time we only try the ones we skipped, but set
414 	 * pg_init_delay_retry so we do not hammer controllers.
415 	 */
416 	do {
417 		list_for_each_entry(pg, &m->priority_groups, list) {
418 			if (pg->bypassed == !!bypassed)
419 				continue;
420 			pgpath = choose_path_in_pg(m, pg, nr_bytes);
421 			if (!IS_ERR_OR_NULL(pgpath)) {
422 				if (!bypassed)
423 					set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
424 				return pgpath;
425 			}
426 		}
427 	} while (bypassed--);
428 
429 failed:
430 	spin_lock_irqsave(&m->lock, flags);
431 	m->current_pgpath = NULL;
432 	m->current_pg = NULL;
433 	spin_unlock_irqrestore(&m->lock, flags);
434 
435 	return NULL;
436 }
437 
438 /*
439  * Check whether bios must be queued in the device-mapper core rather
440  * than here in the target.
441  *
442  * If m->queue_if_no_path and m->saved_queue_if_no_path hold the
443  * same value then we are not between multipath_presuspend()
444  * and multipath_resume() calls and we have no need to check
445  * for the DMF_NOFLUSH_SUSPENDING flag.
446  */
447 static bool __must_push_back(struct multipath *m)
448 {
449 	return ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) !=
450 		 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) &&
451 		dm_noflush_suspending(m->ti));
452 }
453 
454 static bool must_push_back_rq(struct multipath *m)
455 {
456 	bool r;
457 	unsigned long flags;
458 
459 	spin_lock_irqsave(&m->lock, flags);
460 	r = (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) ||
461 	     __must_push_back(m));
462 	spin_unlock_irqrestore(&m->lock, flags);
463 
464 	return r;
465 }
466 
467 static bool must_push_back_bio(struct multipath *m)
468 {
469 	bool r;
470 	unsigned long flags;
471 
472 	spin_lock_irqsave(&m->lock, flags);
473 	r = __must_push_back(m);
474 	spin_unlock_irqrestore(&m->lock, flags);
475 
476 	return r;
477 }
478 
479 /*
480  * Map cloned requests (request-based multipath)
481  */
482 static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
483 				   union map_info *map_context,
484 				   struct request **__clone)
485 {
486 	struct multipath *m = ti->private;
487 	int r = DM_MAPIO_REQUEUE;
488 	size_t nr_bytes = blk_rq_bytes(rq);
489 	struct pgpath *pgpath;
490 	struct block_device *bdev;
491 	struct dm_mpath_io *mpio = get_mpio(map_context);
492 	struct request *clone;
493 
494 	/* Do we need to select a new pgpath? */
495 	pgpath = lockless_dereference(m->current_pgpath);
496 	if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
497 		pgpath = choose_pgpath(m, nr_bytes);
498 
499 	if (!pgpath) {
500 		if (must_push_back_rq(m))
501 			return DM_MAPIO_DELAY_REQUEUE;
502 		return -EIO;	/* Failed */
503 	} else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
504 		   test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
505 		pg_init_all_paths(m);
506 		return r;
507 	}
508 
509 	memset(mpio, 0, sizeof(*mpio));
510 	mpio->pgpath = pgpath;
511 	mpio->nr_bytes = nr_bytes;
512 
513 	bdev = pgpath->path.dev->bdev;
514 
515 	clone = blk_get_request(bdev_get_queue(bdev),
516 			rq->cmd_flags | REQ_NOMERGE,
517 			GFP_ATOMIC);
518 	if (IS_ERR(clone)) {
519 		/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
520 		return r;
521 	}
522 	clone->bio = clone->biotail = NULL;
523 	clone->rq_disk = bdev->bd_disk;
524 	clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
525 	*__clone = clone;
526 
527 	if (pgpath->pg->ps.type->start_io)
528 		pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
529 					      &pgpath->path,
530 					      nr_bytes);
531 	return DM_MAPIO_REMAPPED;
532 }
533 
534 static void multipath_release_clone(struct request *clone)
535 {
536 	blk_put_request(clone);
537 }
538 
539 /*
540  * Map cloned bios (bio-based multipath)
541  */
542 static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_mpath_io *mpio)
543 {
544 	size_t nr_bytes = bio->bi_iter.bi_size;
545 	struct pgpath *pgpath;
546 	unsigned long flags;
547 	bool queue_io;
548 
549 	/* Do we need to select a new pgpath? */
550 	pgpath = lockless_dereference(m->current_pgpath);
551 	queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
552 	if (!pgpath || !queue_io)
553 		pgpath = choose_pgpath(m, nr_bytes);
554 
555 	if ((pgpath && queue_io) ||
556 	    (!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) {
557 		/* Queue for the daemon to resubmit */
558 		spin_lock_irqsave(&m->lock, flags);
559 		bio_list_add(&m->queued_bios, bio);
560 		spin_unlock_irqrestore(&m->lock, flags);
561 		/* PG_INIT_REQUIRED cannot be set without QUEUE_IO */
562 		if (queue_io || test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
563 			pg_init_all_paths(m);
564 		else if (!queue_io)
565 			queue_work(kmultipathd, &m->process_queued_bios);
566 		return DM_MAPIO_SUBMITTED;
567 	}
568 
569 	if (!pgpath) {
570 		if (!must_push_back_bio(m))
571 			return -EIO;
572 		return DM_MAPIO_REQUEUE;
573 	}
574 
575 	mpio->pgpath = pgpath;
576 	mpio->nr_bytes = nr_bytes;
577 
578 	bio->bi_error = 0;
579 	bio->bi_bdev = pgpath->path.dev->bdev;
580 	bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
581 
582 	if (pgpath->pg->ps.type->start_io)
583 		pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
584 					      &pgpath->path,
585 					      nr_bytes);
586 	return DM_MAPIO_REMAPPED;
587 }
588 
589 static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
590 {
591 	struct multipath *m = ti->private;
592 	struct dm_mpath_io *mpio = NULL;
593 
594 	multipath_init_per_bio_data(bio, &mpio, NULL);
595 
596 	return __multipath_map_bio(m, bio, mpio);
597 }
598 
599 static void process_queued_io_list(struct multipath *m)
600 {
601 	if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
602 		dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
603 	else if (m->queue_mode == DM_TYPE_BIO_BASED)
604 		queue_work(kmultipathd, &m->process_queued_bios);
605 }
606 
607 static void process_queued_bios(struct work_struct *work)
608 {
609 	int r;
610 	unsigned long flags;
611 	struct bio *bio;
612 	struct bio_list bios;
613 	struct blk_plug plug;
614 	struct multipath *m =
615 		container_of(work, struct multipath, process_queued_bios);
616 
617 	bio_list_init(&bios);
618 
619 	spin_lock_irqsave(&m->lock, flags);
620 
621 	if (bio_list_empty(&m->queued_bios)) {
622 		spin_unlock_irqrestore(&m->lock, flags);
623 		return;
624 	}
625 
626 	bio_list_merge(&bios, &m->queued_bios);
627 	bio_list_init(&m->queued_bios);
628 
629 	spin_unlock_irqrestore(&m->lock, flags);
630 
631 	blk_start_plug(&plug);
632 	while ((bio = bio_list_pop(&bios))) {
633 		r = __multipath_map_bio(m, bio, get_mpio_from_bio(bio));
634 		if (r < 0 || r == DM_MAPIO_REQUEUE) {
635 			bio->bi_error = r;
636 			bio_endio(bio);
637 		} else if (r == DM_MAPIO_REMAPPED)
638 			generic_make_request(bio);
639 	}
640 	blk_finish_plug(&plug);
641 }
642 
643 /*
644  * If we run out of usable paths, should we queue I/O or error it?
645  */
646 static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
647 			    bool save_old_value)
648 {
649 	unsigned long flags;
650 
651 	spin_lock_irqsave(&m->lock, flags);
652 
653 	if (save_old_value) {
654 		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
655 			set_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
656 		else
657 			clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
658 	} else {
659 		if (queue_if_no_path)
660 			set_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
661 		else
662 			clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
663 	}
664 	if (queue_if_no_path)
665 		set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
666 	else
667 		clear_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
668 
669 	spin_unlock_irqrestore(&m->lock, flags);
670 
671 	if (!queue_if_no_path) {
672 		dm_table_run_md_queue_async(m->ti->table);
673 		process_queued_io_list(m);
674 	}
675 
676 	return 0;
677 }
678 
679 /*
680  * An event is triggered whenever a path is taken out of use.
681  * Includes path failure and PG bypass.
682  */
683 static void trigger_event(struct work_struct *work)
684 {
685 	struct multipath *m =
686 		container_of(work, struct multipath, trigger_event);
687 
688 	dm_table_event(m->ti->table);
689 }
690 
691 /*-----------------------------------------------------------------
692  * Constructor/argument parsing:
693  * <#multipath feature args> [<arg>]*
694  * <#hw_handler args> [hw_handler [<arg>]*]
695  * <#priority groups>
696  * <initial priority group>
697  *     [<selector> <#selector args> [<arg>]*
698  *      <#paths> <#per-path selector args>
699  *         [<path> [<arg>]* ]+ ]+
700  *---------------------------------------------------------------*/
701 static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
702 			       struct dm_target *ti)
703 {
704 	int r;
705 	struct path_selector_type *pst;
706 	unsigned ps_argc;
707 
708 	static struct dm_arg _args[] = {
709 		{0, 1024, "invalid number of path selector args"},
710 	};
711 
712 	pst = dm_get_path_selector(dm_shift_arg(as));
713 	if (!pst) {
714 		ti->error = "unknown path selector type";
715 		return -EINVAL;
716 	}
717 
718 	r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
719 	if (r) {
720 		dm_put_path_selector(pst);
721 		return -EINVAL;
722 	}
723 
724 	r = pst->create(&pg->ps, ps_argc, as->argv);
725 	if (r) {
726 		dm_put_path_selector(pst);
727 		ti->error = "path selector constructor failed";
728 		return r;
729 	}
730 
731 	pg->ps.type = pst;
732 	dm_consume_args(as, ps_argc);
733 
734 	return 0;
735 }
736 
737 static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
738 			       struct dm_target *ti)
739 {
740 	int r;
741 	struct pgpath *p;
742 	struct multipath *m = ti->private;
743 	struct request_queue *q = NULL;
744 	const char *attached_handler_name;
745 
746 	/* we need at least a path arg */
747 	if (as->argc < 1) {
748 		ti->error = "no device given";
749 		return ERR_PTR(-EINVAL);
750 	}
751 
752 	p = alloc_pgpath();
753 	if (!p)
754 		return ERR_PTR(-ENOMEM);
755 
756 	r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
757 			  &p->path.dev);
758 	if (r) {
759 		ti->error = "error getting device";
760 		goto bad;
761 	}
762 
763 	if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) || m->hw_handler_name)
764 		q = bdev_get_queue(p->path.dev->bdev);
765 
766 	if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
767 retain:
768 		attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
769 		if (attached_handler_name) {
770 			/*
771 			 * Clear any hw_handler_params associated with a
772 			 * handler that isn't already attached.
773 			 */
774 			if (m->hw_handler_name && strcmp(attached_handler_name, m->hw_handler_name)) {
775 				kfree(m->hw_handler_params);
776 				m->hw_handler_params = NULL;
777 			}
778 
779 			/*
780 			 * Reset hw_handler_name to match the attached handler
781 			 *
782 			 * NB. This modifies the table line to show the actual
783 			 * handler instead of the original table passed in.
784 			 */
785 			kfree(m->hw_handler_name);
786 			m->hw_handler_name = attached_handler_name;
787 		}
788 	}
789 
790 	if (m->hw_handler_name) {
791 		r = scsi_dh_attach(q, m->hw_handler_name);
792 		if (r == -EBUSY) {
793 			char b[BDEVNAME_SIZE];
794 
795 			printk(KERN_INFO "dm-mpath: retaining handler on device %s\n",
796 				bdevname(p->path.dev->bdev, b));
797 			goto retain;
798 		}
799 		if (r < 0) {
800 			ti->error = "error attaching hardware handler";
801 			dm_put_device(ti, p->path.dev);
802 			goto bad;
803 		}
804 
805 		if (m->hw_handler_params) {
806 			r = scsi_dh_set_params(q, m->hw_handler_params);
807 			if (r < 0) {
808 				ti->error = "unable to set hardware "
809 							"handler parameters";
810 				dm_put_device(ti, p->path.dev);
811 				goto bad;
812 			}
813 		}
814 	}
815 
816 	r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
817 	if (r) {
818 		dm_put_device(ti, p->path.dev);
819 		goto bad;
820 	}
821 
822 	return p;
823 
824  bad:
825 	free_pgpath(p);
826 	return ERR_PTR(r);
827 }
828 
829 static struct priority_group *parse_priority_group(struct dm_arg_set *as,
830 						   struct multipath *m)
831 {
832 	static struct dm_arg _args[] = {
833 		{1, 1024, "invalid number of paths"},
834 		{0, 1024, "invalid number of selector args"}
835 	};
836 
837 	int r;
838 	unsigned i, nr_selector_args, nr_args;
839 	struct priority_group *pg;
840 	struct dm_target *ti = m->ti;
841 
842 	if (as->argc < 2) {
843 		as->argc = 0;
844 		ti->error = "not enough priority group arguments";
845 		return ERR_PTR(-EINVAL);
846 	}
847 
848 	pg = alloc_priority_group();
849 	if (!pg) {
850 		ti->error = "couldn't allocate priority group";
851 		return ERR_PTR(-ENOMEM);
852 	}
853 	pg->m = m;
854 
855 	r = parse_path_selector(as, pg, ti);
856 	if (r)
857 		goto bad;
858 
859 	/*
860 	 * read the paths
861 	 */
862 	r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
863 	if (r)
864 		goto bad;
865 
866 	r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
867 	if (r)
868 		goto bad;
869 
870 	nr_args = 1 + nr_selector_args;
871 	for (i = 0; i < pg->nr_pgpaths; i++) {
872 		struct pgpath *pgpath;
873 		struct dm_arg_set path_args;
874 
875 		if (as->argc < nr_args) {
876 			ti->error = "not enough path parameters";
877 			r = -EINVAL;
878 			goto bad;
879 		}
880 
881 		path_args.argc = nr_args;
882 		path_args.argv = as->argv;
883 
884 		pgpath = parse_path(&path_args, &pg->ps, ti);
885 		if (IS_ERR(pgpath)) {
886 			r = PTR_ERR(pgpath);
887 			goto bad;
888 		}
889 
890 		pgpath->pg = pg;
891 		list_add_tail(&pgpath->list, &pg->pgpaths);
892 		dm_consume_args(as, nr_args);
893 	}
894 
895 	return pg;
896 
897  bad:
898 	free_priority_group(pg, ti);
899 	return ERR_PTR(r);
900 }
901 
902 static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
903 {
904 	unsigned hw_argc;
905 	int ret;
906 	struct dm_target *ti = m->ti;
907 
908 	static struct dm_arg _args[] = {
909 		{0, 1024, "invalid number of hardware handler args"},
910 	};
911 
912 	if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
913 		return -EINVAL;
914 
915 	if (!hw_argc)
916 		return 0;
917 
918 	if (m->queue_mode == DM_TYPE_BIO_BASED) {
919 		dm_consume_args(as, hw_argc);
920 		DMERR("bio-based multipath doesn't allow hardware handler args");
921 		return 0;
922 	}
923 
924 	m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
925 	if (!m->hw_handler_name)
926 		return -EINVAL;
927 
928 	if (hw_argc > 1) {
929 		char *p;
930 		int i, j, len = 4;
931 
932 		for (i = 0; i <= hw_argc - 2; i++)
933 			len += strlen(as->argv[i]) + 1;
934 		p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
935 		if (!p) {
936 			ti->error = "memory allocation failed";
937 			ret = -ENOMEM;
938 			goto fail;
939 		}
940 		j = sprintf(p, "%d", hw_argc - 1);
941 		for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
942 			j = sprintf(p, "%s", as->argv[i]);
943 	}
944 	dm_consume_args(as, hw_argc - 1);
945 
946 	return 0;
947 fail:
948 	kfree(m->hw_handler_name);
949 	m->hw_handler_name = NULL;
950 	return ret;
951 }
952 
953 static int parse_features(struct dm_arg_set *as, struct multipath *m)
954 {
955 	int r;
956 	unsigned argc;
957 	struct dm_target *ti = m->ti;
958 	const char *arg_name;
959 
960 	static struct dm_arg _args[] = {
961 		{0, 8, "invalid number of feature args"},
962 		{1, 50, "pg_init_retries must be between 1 and 50"},
963 		{0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
964 	};
965 
966 	r = dm_read_arg_group(_args, as, &argc, &ti->error);
967 	if (r)
968 		return -EINVAL;
969 
970 	if (!argc)
971 		return 0;
972 
973 	do {
974 		arg_name = dm_shift_arg(as);
975 		argc--;
976 
977 		if (!strcasecmp(arg_name, "queue_if_no_path")) {
978 			r = queue_if_no_path(m, true, false);
979 			continue;
980 		}
981 
982 		if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
983 			set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
984 			continue;
985 		}
986 
987 		if (!strcasecmp(arg_name, "pg_init_retries") &&
988 		    (argc >= 1)) {
989 			r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
990 			argc--;
991 			continue;
992 		}
993 
994 		if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
995 		    (argc >= 1)) {
996 			r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
997 			argc--;
998 			continue;
999 		}
1000 
1001 		if (!strcasecmp(arg_name, "queue_mode") &&
1002 		    (argc >= 1)) {
1003 			const char *queue_mode_name = dm_shift_arg(as);
1004 
1005 			if (!strcasecmp(queue_mode_name, "bio"))
1006 				m->queue_mode = DM_TYPE_BIO_BASED;
1007 			else if (!strcasecmp(queue_mode_name, "rq"))
1008 				m->queue_mode = DM_TYPE_REQUEST_BASED;
1009 			else if (!strcasecmp(queue_mode_name, "mq"))
1010 				m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
1011 			else {
1012 				ti->error = "Unknown 'queue_mode' requested";
1013 				r = -EINVAL;
1014 			}
1015 			argc--;
1016 			continue;
1017 		}
1018 
1019 		ti->error = "Unrecognised multipath feature request";
1020 		r = -EINVAL;
1021 	} while (argc && !r);
1022 
1023 	return r;
1024 }
1025 
1026 static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
1027 {
1028 	/* target arguments */
1029 	static struct dm_arg _args[] = {
1030 		{0, 1024, "invalid number of priority groups"},
1031 		{0, 1024, "invalid initial priority group number"},
1032 	};
1033 
1034 	int r;
1035 	struct multipath *m;
1036 	struct dm_arg_set as;
1037 	unsigned pg_count = 0;
1038 	unsigned next_pg_num;
1039 
1040 	as.argc = argc;
1041 	as.argv = argv;
1042 
1043 	m = alloc_multipath(ti);
1044 	if (!m) {
1045 		ti->error = "can't allocate multipath";
1046 		return -EINVAL;
1047 	}
1048 
1049 	r = parse_features(&as, m);
1050 	if (r)
1051 		goto bad;
1052 
1053 	r = alloc_multipath_stage2(ti, m);
1054 	if (r)
1055 		goto bad;
1056 
1057 	r = parse_hw_handler(&as, m);
1058 	if (r)
1059 		goto bad;
1060 
1061 	r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
1062 	if (r)
1063 		goto bad;
1064 
1065 	r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
1066 	if (r)
1067 		goto bad;
1068 
1069 	if ((!m->nr_priority_groups && next_pg_num) ||
1070 	    (m->nr_priority_groups && !next_pg_num)) {
1071 		ti->error = "invalid initial priority group";
1072 		r = -EINVAL;
1073 		goto bad;
1074 	}
1075 
1076 	/* parse the priority groups */
1077 	while (as.argc) {
1078 		struct priority_group *pg;
1079 		unsigned nr_valid_paths = atomic_read(&m->nr_valid_paths);
1080 
1081 		pg = parse_priority_group(&as, m);
1082 		if (IS_ERR(pg)) {
1083 			r = PTR_ERR(pg);
1084 			goto bad;
1085 		}
1086 
1087 		nr_valid_paths += pg->nr_pgpaths;
1088 		atomic_set(&m->nr_valid_paths, nr_valid_paths);
1089 
1090 		list_add_tail(&pg->list, &m->priority_groups);
1091 		pg_count++;
1092 		pg->pg_num = pg_count;
1093 		if (!--next_pg_num)
1094 			m->next_pg = pg;
1095 	}
1096 
1097 	if (pg_count != m->nr_priority_groups) {
1098 		ti->error = "priority group count mismatch";
1099 		r = -EINVAL;
1100 		goto bad;
1101 	}
1102 
1103 	ti->num_flush_bios = 1;
1104 	ti->num_discard_bios = 1;
1105 	ti->num_write_same_bios = 1;
1106 	ti->num_write_zeroes_bios = 1;
1107 	if (m->queue_mode == DM_TYPE_BIO_BASED)
1108 		ti->per_io_data_size = multipath_per_bio_data_size();
1109 	else
1110 		ti->per_io_data_size = sizeof(struct dm_mpath_io);
1111 
1112 	return 0;
1113 
1114  bad:
1115 	free_multipath(m);
1116 	return r;
1117 }
1118 
1119 static void multipath_wait_for_pg_init_completion(struct multipath *m)
1120 {
1121 	DEFINE_WAIT(wait);
1122 
1123 	while (1) {
1124 		prepare_to_wait(&m->pg_init_wait, &wait, TASK_UNINTERRUPTIBLE);
1125 
1126 		if (!atomic_read(&m->pg_init_in_progress))
1127 			break;
1128 
1129 		io_schedule();
1130 	}
1131 	finish_wait(&m->pg_init_wait, &wait);
1132 }
1133 
1134 static void flush_multipath_work(struct multipath *m)
1135 {
1136 	set_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
1137 	smp_mb__after_atomic();
1138 
1139 	flush_workqueue(kmpath_handlerd);
1140 	multipath_wait_for_pg_init_completion(m);
1141 	flush_workqueue(kmultipathd);
1142 	flush_work(&m->trigger_event);
1143 
1144 	clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
1145 	smp_mb__after_atomic();
1146 }
1147 
1148 static void multipath_dtr(struct dm_target *ti)
1149 {
1150 	struct multipath *m = ti->private;
1151 
1152 	flush_multipath_work(m);
1153 	free_multipath(m);
1154 }
1155 
1156 /*
1157  * Take a path out of use.
1158  */
1159 static int fail_path(struct pgpath *pgpath)
1160 {
1161 	unsigned long flags;
1162 	struct multipath *m = pgpath->pg->m;
1163 
1164 	spin_lock_irqsave(&m->lock, flags);
1165 
1166 	if (!pgpath->is_active)
1167 		goto out;
1168 
1169 	DMWARN("Failing path %s.", pgpath->path.dev->name);
1170 
1171 	pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
1172 	pgpath->is_active = false;
1173 	pgpath->fail_count++;
1174 
1175 	atomic_dec(&m->nr_valid_paths);
1176 
1177 	if (pgpath == m->current_pgpath)
1178 		m->current_pgpath = NULL;
1179 
1180 	dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
1181 		       pgpath->path.dev->name, atomic_read(&m->nr_valid_paths));
1182 
1183 	schedule_work(&m->trigger_event);
1184 
1185 out:
1186 	spin_unlock_irqrestore(&m->lock, flags);
1187 
1188 	return 0;
1189 }
1190 
1191 /*
1192  * Reinstate a previously-failed path
1193  */
1194 static int reinstate_path(struct pgpath *pgpath)
1195 {
1196 	int r = 0, run_queue = 0;
1197 	unsigned long flags;
1198 	struct multipath *m = pgpath->pg->m;
1199 	unsigned nr_valid_paths;
1200 
1201 	spin_lock_irqsave(&m->lock, flags);
1202 
1203 	if (pgpath->is_active)
1204 		goto out;
1205 
1206 	DMWARN("Reinstating path %s.", pgpath->path.dev->name);
1207 
1208 	r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
1209 	if (r)
1210 		goto out;
1211 
1212 	pgpath->is_active = true;
1213 
1214 	nr_valid_paths = atomic_inc_return(&m->nr_valid_paths);
1215 	if (nr_valid_paths == 1) {
1216 		m->current_pgpath = NULL;
1217 		run_queue = 1;
1218 	} else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
1219 		if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
1220 			atomic_inc(&m->pg_init_in_progress);
1221 	}
1222 
1223 	dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
1224 		       pgpath->path.dev->name, nr_valid_paths);
1225 
1226 	schedule_work(&m->trigger_event);
1227 
1228 out:
1229 	spin_unlock_irqrestore(&m->lock, flags);
1230 	if (run_queue) {
1231 		dm_table_run_md_queue_async(m->ti->table);
1232 		process_queued_io_list(m);
1233 	}
1234 
1235 	return r;
1236 }
1237 
1238 /*
1239  * Fail or reinstate all paths that match the provided struct dm_dev.
1240  */
1241 static int action_dev(struct multipath *m, struct dm_dev *dev,
1242 		      action_fn action)
1243 {
1244 	int r = -EINVAL;
1245 	struct pgpath *pgpath;
1246 	struct priority_group *pg;
1247 
1248 	list_for_each_entry(pg, &m->priority_groups, list) {
1249 		list_for_each_entry(pgpath, &pg->pgpaths, list) {
1250 			if (pgpath->path.dev == dev)
1251 				r = action(pgpath);
1252 		}
1253 	}
1254 
1255 	return r;
1256 }
1257 
1258 /*
1259  * Temporarily try to avoid having to use the specified PG
1260  */
1261 static void bypass_pg(struct multipath *m, struct priority_group *pg,
1262 		      bool bypassed)
1263 {
1264 	unsigned long flags;
1265 
1266 	spin_lock_irqsave(&m->lock, flags);
1267 
1268 	pg->bypassed = bypassed;
1269 	m->current_pgpath = NULL;
1270 	m->current_pg = NULL;
1271 
1272 	spin_unlock_irqrestore(&m->lock, flags);
1273 
1274 	schedule_work(&m->trigger_event);
1275 }
1276 
1277 /*
1278  * Switch to using the specified PG from the next I/O that gets mapped
1279  */
1280 static int switch_pg_num(struct multipath *m, const char *pgstr)
1281 {
1282 	struct priority_group *pg;
1283 	unsigned pgnum;
1284 	unsigned long flags;
1285 	char dummy;
1286 
1287 	if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1288 	    !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
1289 		DMWARN("invalid PG number supplied to switch_pg_num");
1290 		return -EINVAL;
1291 	}
1292 
1293 	spin_lock_irqsave(&m->lock, flags);
1294 	list_for_each_entry(pg, &m->priority_groups, list) {
1295 		pg->bypassed = false;
1296 		if (--pgnum)
1297 			continue;
1298 
1299 		m->current_pgpath = NULL;
1300 		m->current_pg = NULL;
1301 		m->next_pg = pg;
1302 	}
1303 	spin_unlock_irqrestore(&m->lock, flags);
1304 
1305 	schedule_work(&m->trigger_event);
1306 	return 0;
1307 }
1308 
1309 /*
1310  * Set/clear bypassed status of a PG.
1311  * PGs are numbered upwards from 1 in the order they were declared.
1312  */
1313 static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
1314 {
1315 	struct priority_group *pg;
1316 	unsigned pgnum;
1317 	char dummy;
1318 
1319 	if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1320 	    !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
1321 		DMWARN("invalid PG number supplied to bypass_pg");
1322 		return -EINVAL;
1323 	}
1324 
1325 	list_for_each_entry(pg, &m->priority_groups, list) {
1326 		if (!--pgnum)
1327 			break;
1328 	}
1329 
1330 	bypass_pg(m, pg, bypassed);
1331 	return 0;
1332 }
1333 
1334 /*
1335  * Should we retry pg_init immediately?
1336  */
1337 static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
1338 {
1339 	unsigned long flags;
1340 	bool limit_reached = false;
1341 
1342 	spin_lock_irqsave(&m->lock, flags);
1343 
1344 	if (atomic_read(&m->pg_init_count) <= m->pg_init_retries &&
1345 	    !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
1346 		set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
1347 	else
1348 		limit_reached = true;
1349 
1350 	spin_unlock_irqrestore(&m->lock, flags);
1351 
1352 	return limit_reached;
1353 }
1354 
1355 static void pg_init_done(void *data, int errors)
1356 {
1357 	struct pgpath *pgpath = data;
1358 	struct priority_group *pg = pgpath->pg;
1359 	struct multipath *m = pg->m;
1360 	unsigned long flags;
1361 	bool delay_retry = false;
1362 
1363 	/* device or driver problems */
1364 	switch (errors) {
1365 	case SCSI_DH_OK:
1366 		break;
1367 	case SCSI_DH_NOSYS:
1368 		if (!m->hw_handler_name) {
1369 			errors = 0;
1370 			break;
1371 		}
1372 		DMERR("Could not failover the device: Handler scsi_dh_%s "
1373 		      "Error %d.", m->hw_handler_name, errors);
1374 		/*
1375 		 * Fail path for now, so we do not ping pong
1376 		 */
1377 		fail_path(pgpath);
1378 		break;
1379 	case SCSI_DH_DEV_TEMP_BUSY:
1380 		/*
1381 		 * Probably doing something like FW upgrade on the
1382 		 * controller so try the other pg.
1383 		 */
1384 		bypass_pg(m, pg, true);
1385 		break;
1386 	case SCSI_DH_RETRY:
1387 		/* Wait before retrying. */
1388 		delay_retry = 1;
1389 	case SCSI_DH_IMM_RETRY:
1390 	case SCSI_DH_RES_TEMP_UNAVAIL:
1391 		if (pg_init_limit_reached(m, pgpath))
1392 			fail_path(pgpath);
1393 		errors = 0;
1394 		break;
1395 	case SCSI_DH_DEV_OFFLINED:
1396 	default:
1397 		/*
1398 		 * We probably do not want to fail the path for a device
1399 		 * error, but this is what the old dm did. In future
1400 		 * patches we can do more advanced handling.
1401 		 */
1402 		fail_path(pgpath);
1403 	}
1404 
1405 	spin_lock_irqsave(&m->lock, flags);
1406 	if (errors) {
1407 		if (pgpath == m->current_pgpath) {
1408 			DMERR("Could not failover device. Error %d.", errors);
1409 			m->current_pgpath = NULL;
1410 			m->current_pg = NULL;
1411 		}
1412 	} else if (!test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
1413 		pg->bypassed = false;
1414 
1415 	if (atomic_dec_return(&m->pg_init_in_progress) > 0)
1416 		/* Activations of other paths are still on going */
1417 		goto out;
1418 
1419 	if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
1420 		if (delay_retry)
1421 			set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1422 		else
1423 			clear_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1424 
1425 		if (__pg_init_all_paths(m))
1426 			goto out;
1427 	}
1428 	clear_bit(MPATHF_QUEUE_IO, &m->flags);
1429 
1430 	process_queued_io_list(m);
1431 
1432 	/*
1433 	 * Wake up any thread waiting to suspend.
1434 	 */
1435 	wake_up(&m->pg_init_wait);
1436 
1437 out:
1438 	spin_unlock_irqrestore(&m->lock, flags);
1439 }
1440 
1441 static void activate_path(struct work_struct *work)
1442 {
1443 	struct pgpath *pgpath =
1444 		container_of(work, struct pgpath, activate_path.work);
1445 	struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1446 
1447 	if (pgpath->is_active && !blk_queue_dying(q))
1448 		scsi_dh_activate(q, pg_init_done, pgpath);
1449 	else
1450 		pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
1451 }
1452 
1453 static int noretry_error(int error)
1454 {
1455 	switch (error) {
1456 	case -EBADE:
1457 		/*
1458 		 * EBADE signals an reservation conflict.
1459 		 * We shouldn't fail the path here as we can communicate with
1460 		 * the target.  We should failover to the next path, but in
1461 		 * doing so we might be causing a ping-pong between paths.
1462 		 * So just return the reservation conflict error.
1463 		 */
1464 	case -EOPNOTSUPP:
1465 	case -EREMOTEIO:
1466 	case -EILSEQ:
1467 	case -ENODATA:
1468 	case -ENOSPC:
1469 		return 1;
1470 	}
1471 
1472 	/* Anything else could be a path failure, so should be retried */
1473 	return 0;
1474 }
1475 
1476 /*
1477  * end_io handling
1478  */
1479 static int do_end_io(struct multipath *m, struct request *clone,
1480 		     int error, struct dm_mpath_io *mpio)
1481 {
1482 	/*
1483 	 * We don't queue any clone request inside the multipath target
1484 	 * during end I/O handling, since those clone requests don't have
1485 	 * bio clones.  If we queue them inside the multipath target,
1486 	 * we need to make bio clones, that requires memory allocation.
1487 	 * (See drivers/md/dm-rq.c:end_clone_bio() about why the clone requests
1488 	 *  don't have bio clones.)
1489 	 * Instead of queueing the clone request here, we queue the original
1490 	 * request into dm core, which will remake a clone request and
1491 	 * clone bios for it and resubmit it later.
1492 	 */
1493 	int r = DM_ENDIO_REQUEUE;
1494 
1495 	if (!error)
1496 		return 0;	/* I/O complete */
1497 
1498 	if (noretry_error(error))
1499 		return error;
1500 
1501 	if (mpio->pgpath)
1502 		fail_path(mpio->pgpath);
1503 
1504 	if (!atomic_read(&m->nr_valid_paths)) {
1505 		if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1506 			if (!must_push_back_rq(m))
1507 				r = -EIO;
1508 		}
1509 	}
1510 
1511 	return r;
1512 }
1513 
1514 static int multipath_end_io(struct dm_target *ti, struct request *clone,
1515 			    int error, union map_info *map_context)
1516 {
1517 	struct multipath *m = ti->private;
1518 	struct dm_mpath_io *mpio = get_mpio(map_context);
1519 	struct pgpath *pgpath;
1520 	struct path_selector *ps;
1521 	int r;
1522 
1523 	BUG_ON(!mpio);
1524 
1525 	r = do_end_io(m, clone, error, mpio);
1526 	pgpath = mpio->pgpath;
1527 	if (pgpath) {
1528 		ps = &pgpath->pg->ps;
1529 		if (ps->type->end_io)
1530 			ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
1531 	}
1532 
1533 	return r;
1534 }
1535 
1536 static int do_end_io_bio(struct multipath *m, struct bio *clone,
1537 			 int error, struct dm_mpath_io *mpio)
1538 {
1539 	unsigned long flags;
1540 
1541 	if (!error)
1542 		return 0;	/* I/O complete */
1543 
1544 	if (noretry_error(error))
1545 		return error;
1546 
1547 	if (mpio->pgpath)
1548 		fail_path(mpio->pgpath);
1549 
1550 	if (!atomic_read(&m->nr_valid_paths)) {
1551 		if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1552 			if (!must_push_back_bio(m))
1553 				return -EIO;
1554 			return DM_ENDIO_REQUEUE;
1555 		}
1556 	}
1557 
1558 	/* Queue for the daemon to resubmit */
1559 	dm_bio_restore(get_bio_details_from_bio(clone), clone);
1560 
1561 	spin_lock_irqsave(&m->lock, flags);
1562 	bio_list_add(&m->queued_bios, clone);
1563 	spin_unlock_irqrestore(&m->lock, flags);
1564 	if (!test_bit(MPATHF_QUEUE_IO, &m->flags))
1565 		queue_work(kmultipathd, &m->process_queued_bios);
1566 
1567 	return DM_ENDIO_INCOMPLETE;
1568 }
1569 
1570 static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, int error)
1571 {
1572 	struct multipath *m = ti->private;
1573 	struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
1574 	struct pgpath *pgpath;
1575 	struct path_selector *ps;
1576 	int r;
1577 
1578 	BUG_ON(!mpio);
1579 
1580 	r = do_end_io_bio(m, clone, error, mpio);
1581 	pgpath = mpio->pgpath;
1582 	if (pgpath) {
1583 		ps = &pgpath->pg->ps;
1584 		if (ps->type->end_io)
1585 			ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
1586 	}
1587 
1588 	return r;
1589 }
1590 
1591 /*
1592  * Suspend can't complete until all the I/O is processed so if
1593  * the last path fails we must error any remaining I/O.
1594  * Note that if the freeze_bdev fails while suspending, the
1595  * queue_if_no_path state is lost - userspace should reset it.
1596  */
1597 static void multipath_presuspend(struct dm_target *ti)
1598 {
1599 	struct multipath *m = ti->private;
1600 
1601 	queue_if_no_path(m, false, true);
1602 }
1603 
1604 static void multipath_postsuspend(struct dm_target *ti)
1605 {
1606 	struct multipath *m = ti->private;
1607 
1608 	mutex_lock(&m->work_mutex);
1609 	flush_multipath_work(m);
1610 	mutex_unlock(&m->work_mutex);
1611 }
1612 
1613 /*
1614  * Restore the queue_if_no_path setting.
1615  */
1616 static void multipath_resume(struct dm_target *ti)
1617 {
1618 	struct multipath *m = ti->private;
1619 	unsigned long flags;
1620 
1621 	spin_lock_irqsave(&m->lock, flags);
1622 	if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags))
1623 		set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
1624 	else
1625 		clear_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
1626 	spin_unlock_irqrestore(&m->lock, flags);
1627 }
1628 
1629 /*
1630  * Info output has the following format:
1631  * num_multipath_feature_args [multipath_feature_args]*
1632  * num_handler_status_args [handler_status_args]*
1633  * num_groups init_group_number
1634  *            [A|D|E num_ps_status_args [ps_status_args]*
1635  *             num_paths num_selector_args
1636  *             [path_dev A|F fail_count [selector_args]* ]+ ]+
1637  *
1638  * Table output has the following format (identical to the constructor string):
1639  * num_feature_args [features_args]*
1640  * num_handler_args hw_handler [hw_handler_args]*
1641  * num_groups init_group_number
1642  *     [priority selector-name num_ps_args [ps_args]*
1643  *      num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1644  */
1645 static void multipath_status(struct dm_target *ti, status_type_t type,
1646 			     unsigned status_flags, char *result, unsigned maxlen)
1647 {
1648 	int sz = 0;
1649 	unsigned long flags;
1650 	struct multipath *m = ti->private;
1651 	struct priority_group *pg;
1652 	struct pgpath *p;
1653 	unsigned pg_num;
1654 	char state;
1655 
1656 	spin_lock_irqsave(&m->lock, flags);
1657 
1658 	/* Features */
1659 	if (type == STATUSTYPE_INFO)
1660 		DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags),
1661 		       atomic_read(&m->pg_init_count));
1662 	else {
1663 		DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) +
1664 			      (m->pg_init_retries > 0) * 2 +
1665 			      (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
1666 			      test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) +
1667 			      (m->queue_mode != DM_TYPE_REQUEST_BASED) * 2);
1668 
1669 		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
1670 			DMEMIT("queue_if_no_path ");
1671 		if (m->pg_init_retries)
1672 			DMEMIT("pg_init_retries %u ", m->pg_init_retries);
1673 		if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1674 			DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
1675 		if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags))
1676 			DMEMIT("retain_attached_hw_handler ");
1677 		if (m->queue_mode != DM_TYPE_REQUEST_BASED) {
1678 			switch(m->queue_mode) {
1679 			case DM_TYPE_BIO_BASED:
1680 				DMEMIT("queue_mode bio ");
1681 				break;
1682 			case DM_TYPE_MQ_REQUEST_BASED:
1683 				DMEMIT("queue_mode mq ");
1684 				break;
1685 			}
1686 		}
1687 	}
1688 
1689 	if (!m->hw_handler_name || type == STATUSTYPE_INFO)
1690 		DMEMIT("0 ");
1691 	else
1692 		DMEMIT("1 %s ", m->hw_handler_name);
1693 
1694 	DMEMIT("%u ", m->nr_priority_groups);
1695 
1696 	if (m->next_pg)
1697 		pg_num = m->next_pg->pg_num;
1698 	else if (m->current_pg)
1699 		pg_num = m->current_pg->pg_num;
1700 	else
1701 		pg_num = (m->nr_priority_groups ? 1 : 0);
1702 
1703 	DMEMIT("%u ", pg_num);
1704 
1705 	switch (type) {
1706 	case STATUSTYPE_INFO:
1707 		list_for_each_entry(pg, &m->priority_groups, list) {
1708 			if (pg->bypassed)
1709 				state = 'D';	/* Disabled */
1710 			else if (pg == m->current_pg)
1711 				state = 'A';	/* Currently Active */
1712 			else
1713 				state = 'E';	/* Enabled */
1714 
1715 			DMEMIT("%c ", state);
1716 
1717 			if (pg->ps.type->status)
1718 				sz += pg->ps.type->status(&pg->ps, NULL, type,
1719 							  result + sz,
1720 							  maxlen - sz);
1721 			else
1722 				DMEMIT("0 ");
1723 
1724 			DMEMIT("%u %u ", pg->nr_pgpaths,
1725 			       pg->ps.type->info_args);
1726 
1727 			list_for_each_entry(p, &pg->pgpaths, list) {
1728 				DMEMIT("%s %s %u ", p->path.dev->name,
1729 				       p->is_active ? "A" : "F",
1730 				       p->fail_count);
1731 				if (pg->ps.type->status)
1732 					sz += pg->ps.type->status(&pg->ps,
1733 					      &p->path, type, result + sz,
1734 					      maxlen - sz);
1735 			}
1736 		}
1737 		break;
1738 
1739 	case STATUSTYPE_TABLE:
1740 		list_for_each_entry(pg, &m->priority_groups, list) {
1741 			DMEMIT("%s ", pg->ps.type->name);
1742 
1743 			if (pg->ps.type->status)
1744 				sz += pg->ps.type->status(&pg->ps, NULL, type,
1745 							  result + sz,
1746 							  maxlen - sz);
1747 			else
1748 				DMEMIT("0 ");
1749 
1750 			DMEMIT("%u %u ", pg->nr_pgpaths,
1751 			       pg->ps.type->table_args);
1752 
1753 			list_for_each_entry(p, &pg->pgpaths, list) {
1754 				DMEMIT("%s ", p->path.dev->name);
1755 				if (pg->ps.type->status)
1756 					sz += pg->ps.type->status(&pg->ps,
1757 					      &p->path, type, result + sz,
1758 					      maxlen - sz);
1759 			}
1760 		}
1761 		break;
1762 	}
1763 
1764 	spin_unlock_irqrestore(&m->lock, flags);
1765 }
1766 
1767 static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
1768 {
1769 	int r = -EINVAL;
1770 	struct dm_dev *dev;
1771 	struct multipath *m = ti->private;
1772 	action_fn action;
1773 
1774 	mutex_lock(&m->work_mutex);
1775 
1776 	if (dm_suspended(ti)) {
1777 		r = -EBUSY;
1778 		goto out;
1779 	}
1780 
1781 	if (argc == 1) {
1782 		if (!strcasecmp(argv[0], "queue_if_no_path")) {
1783 			r = queue_if_no_path(m, true, false);
1784 			goto out;
1785 		} else if (!strcasecmp(argv[0], "fail_if_no_path")) {
1786 			r = queue_if_no_path(m, false, false);
1787 			goto out;
1788 		}
1789 	}
1790 
1791 	if (argc != 2) {
1792 		DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);
1793 		goto out;
1794 	}
1795 
1796 	if (!strcasecmp(argv[0], "disable_group")) {
1797 		r = bypass_pg_num(m, argv[1], true);
1798 		goto out;
1799 	} else if (!strcasecmp(argv[0], "enable_group")) {
1800 		r = bypass_pg_num(m, argv[1], false);
1801 		goto out;
1802 	} else if (!strcasecmp(argv[0], "switch_group")) {
1803 		r = switch_pg_num(m, argv[1]);
1804 		goto out;
1805 	} else if (!strcasecmp(argv[0], "reinstate_path"))
1806 		action = reinstate_path;
1807 	else if (!strcasecmp(argv[0], "fail_path"))
1808 		action = fail_path;
1809 	else {
1810 		DMWARN("Unrecognised multipath message received: %s", argv[0]);
1811 		goto out;
1812 	}
1813 
1814 	r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
1815 	if (r) {
1816 		DMWARN("message: error getting device %s",
1817 		       argv[1]);
1818 		goto out;
1819 	}
1820 
1821 	r = action_dev(m, dev, action);
1822 
1823 	dm_put_device(ti, dev);
1824 
1825 out:
1826 	mutex_unlock(&m->work_mutex);
1827 	return r;
1828 }
1829 
1830 static int multipath_prepare_ioctl(struct dm_target *ti,
1831 		struct block_device **bdev, fmode_t *mode)
1832 {
1833 	struct multipath *m = ti->private;
1834 	struct pgpath *current_pgpath;
1835 	int r;
1836 
1837 	current_pgpath = lockless_dereference(m->current_pgpath);
1838 	if (!current_pgpath)
1839 		current_pgpath = choose_pgpath(m, 0);
1840 
1841 	if (current_pgpath) {
1842 		if (!test_bit(MPATHF_QUEUE_IO, &m->flags)) {
1843 			*bdev = current_pgpath->path.dev->bdev;
1844 			*mode = current_pgpath->path.dev->mode;
1845 			r = 0;
1846 		} else {
1847 			/* pg_init has not started or completed */
1848 			r = -ENOTCONN;
1849 		}
1850 	} else {
1851 		/* No path is available */
1852 		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
1853 			r = -ENOTCONN;
1854 		else
1855 			r = -EIO;
1856 	}
1857 
1858 	if (r == -ENOTCONN) {
1859 		if (!lockless_dereference(m->current_pg)) {
1860 			/* Path status changed, redo selection */
1861 			(void) choose_pgpath(m, 0);
1862 		}
1863 		if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
1864 			pg_init_all_paths(m);
1865 		dm_table_run_md_queue_async(m->ti->table);
1866 		process_queued_io_list(m);
1867 	}
1868 
1869 	/*
1870 	 * Only pass ioctls through if the device sizes match exactly.
1871 	 */
1872 	if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
1873 		return 1;
1874 	return r;
1875 }
1876 
1877 static int multipath_iterate_devices(struct dm_target *ti,
1878 				     iterate_devices_callout_fn fn, void *data)
1879 {
1880 	struct multipath *m = ti->private;
1881 	struct priority_group *pg;
1882 	struct pgpath *p;
1883 	int ret = 0;
1884 
1885 	list_for_each_entry(pg, &m->priority_groups, list) {
1886 		list_for_each_entry(p, &pg->pgpaths, list) {
1887 			ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
1888 			if (ret)
1889 				goto out;
1890 		}
1891 	}
1892 
1893 out:
1894 	return ret;
1895 }
1896 
1897 static int pgpath_busy(struct pgpath *pgpath)
1898 {
1899 	struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1900 
1901 	return blk_lld_busy(q);
1902 }
1903 
1904 /*
1905  * We return "busy", only when we can map I/Os but underlying devices
1906  * are busy (so even if we map I/Os now, the I/Os will wait on
1907  * the underlying queue).
1908  * In other words, if we want to kill I/Os or queue them inside us
1909  * due to map unavailability, we don't return "busy".  Otherwise,
1910  * dm core won't give us the I/Os and we can't do what we want.
1911  */
1912 static int multipath_busy(struct dm_target *ti)
1913 {
1914 	bool busy = false, has_active = false;
1915 	struct multipath *m = ti->private;
1916 	struct priority_group *pg, *next_pg;
1917 	struct pgpath *pgpath;
1918 
1919 	/* pg_init in progress */
1920 	if (atomic_read(&m->pg_init_in_progress))
1921 		return true;
1922 
1923 	/* no paths available, for blk-mq: rely on IO mapping to delay requeue */
1924 	if (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
1925 		return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED);
1926 
1927 	/* Guess which priority_group will be used at next mapping time */
1928 	pg = lockless_dereference(m->current_pg);
1929 	next_pg = lockless_dereference(m->next_pg);
1930 	if (unlikely(!lockless_dereference(m->current_pgpath) && next_pg))
1931 		pg = next_pg;
1932 
1933 	if (!pg) {
1934 		/*
1935 		 * We don't know which pg will be used at next mapping time.
1936 		 * We don't call choose_pgpath() here to avoid to trigger
1937 		 * pg_init just by busy checking.
1938 		 * So we don't know whether underlying devices we will be using
1939 		 * at next mapping time are busy or not. Just try mapping.
1940 		 */
1941 		return busy;
1942 	}
1943 
1944 	/*
1945 	 * If there is one non-busy active path at least, the path selector
1946 	 * will be able to select it. So we consider such a pg as not busy.
1947 	 */
1948 	busy = true;
1949 	list_for_each_entry(pgpath, &pg->pgpaths, list) {
1950 		if (pgpath->is_active) {
1951 			has_active = true;
1952 			if (!pgpath_busy(pgpath)) {
1953 				busy = false;
1954 				break;
1955 			}
1956 		}
1957 	}
1958 
1959 	if (!has_active) {
1960 		/*
1961 		 * No active path in this pg, so this pg won't be used and
1962 		 * the current_pg will be changed at next mapping time.
1963 		 * We need to try mapping to determine it.
1964 		 */
1965 		busy = false;
1966 	}
1967 
1968 	return busy;
1969 }
1970 
1971 /*-----------------------------------------------------------------
1972  * Module setup
1973  *---------------------------------------------------------------*/
1974 static struct target_type multipath_target = {
1975 	.name = "multipath",
1976 	.version = {1, 12, 0},
1977 	.features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
1978 	.module = THIS_MODULE,
1979 	.ctr = multipath_ctr,
1980 	.dtr = multipath_dtr,
1981 	.clone_and_map_rq = multipath_clone_and_map,
1982 	.release_clone_rq = multipath_release_clone,
1983 	.rq_end_io = multipath_end_io,
1984 	.map = multipath_map_bio,
1985 	.end_io = multipath_end_io_bio,
1986 	.presuspend = multipath_presuspend,
1987 	.postsuspend = multipath_postsuspend,
1988 	.resume = multipath_resume,
1989 	.status = multipath_status,
1990 	.message = multipath_message,
1991 	.prepare_ioctl = multipath_prepare_ioctl,
1992 	.iterate_devices = multipath_iterate_devices,
1993 	.busy = multipath_busy,
1994 };
1995 
1996 static int __init dm_multipath_init(void)
1997 {
1998 	int r;
1999 
2000 	r = dm_register_target(&multipath_target);
2001 	if (r < 0) {
2002 		DMERR("request-based register failed %d", r);
2003 		r = -EINVAL;
2004 		goto bad_register_target;
2005 	}
2006 
2007 	kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
2008 	if (!kmultipathd) {
2009 		DMERR("failed to create workqueue kmpathd");
2010 		r = -ENOMEM;
2011 		goto bad_alloc_kmultipathd;
2012 	}
2013 
2014 	/*
2015 	 * A separate workqueue is used to handle the device handlers
2016 	 * to avoid overloading existing workqueue. Overloading the
2017 	 * old workqueue would also create a bottleneck in the
2018 	 * path of the storage hardware device activation.
2019 	 */
2020 	kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
2021 						  WQ_MEM_RECLAIM);
2022 	if (!kmpath_handlerd) {
2023 		DMERR("failed to create workqueue kmpath_handlerd");
2024 		r = -ENOMEM;
2025 		goto bad_alloc_kmpath_handlerd;
2026 	}
2027 
2028 	return 0;
2029 
2030 bad_alloc_kmpath_handlerd:
2031 	destroy_workqueue(kmultipathd);
2032 bad_alloc_kmultipathd:
2033 	dm_unregister_target(&multipath_target);
2034 bad_register_target:
2035 	return r;
2036 }
2037 
2038 static void __exit dm_multipath_exit(void)
2039 {
2040 	destroy_workqueue(kmpath_handlerd);
2041 	destroy_workqueue(kmultipathd);
2042 
2043 	dm_unregister_target(&multipath_target);
2044 }
2045 
2046 module_init(dm_multipath_init);
2047 module_exit(dm_multipath_exit);
2048 
2049 MODULE_DESCRIPTION(DM_NAME " multipath target");
2050 MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
2051 MODULE_LICENSE("GPL");
2052