xref: /openbmc/linux/drivers/md/dm-mpath.c (revision c8f14e2b)
1 /*
2  * Copyright (C) 2003 Sistina Software Limited.
3  * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include <linux/device-mapper.h>
9 
10 #include "dm-rq.h"
11 #include "dm-bio-record.h"
12 #include "dm-path-selector.h"
13 #include "dm-uevent.h"
14 
15 #include <linux/blkdev.h>
16 #include <linux/ctype.h>
17 #include <linux/init.h>
18 #include <linux/mempool.h>
19 #include <linux/module.h>
20 #include <linux/pagemap.h>
21 #include <linux/slab.h>
22 #include <linux/time.h>
23 #include <linux/timer.h>
24 #include <linux/workqueue.h>
25 #include <linux/delay.h>
26 #include <scsi/scsi_dh.h>
27 #include <linux/atomic.h>
28 #include <linux/blk-mq.h>
29 
30 #define DM_MSG_PREFIX "multipath"
31 #define DM_PG_INIT_DELAY_MSECS 2000
32 #define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
33 #define QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT 0
34 
35 static unsigned long queue_if_no_path_timeout_secs = QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT;
36 
37 /* Path properties */
38 struct pgpath {
39 	struct list_head list;
40 
41 	struct priority_group *pg;	/* Owning PG */
42 	unsigned fail_count;		/* Cumulative failure count */
43 
44 	struct dm_path path;
45 	struct delayed_work activate_path;
46 
47 	bool is_active:1;		/* Path status */
48 };
49 
50 #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
51 
52 /*
53  * Paths are grouped into Priority Groups and numbered from 1 upwards.
54  * Each has a path selector which controls which path gets used.
55  */
56 struct priority_group {
57 	struct list_head list;
58 
59 	struct multipath *m;		/* Owning multipath instance */
60 	struct path_selector ps;
61 
62 	unsigned pg_num;		/* Reference number */
63 	unsigned nr_pgpaths;		/* Number of paths in PG */
64 	struct list_head pgpaths;
65 
66 	bool bypassed:1;		/* Temporarily bypass this PG? */
67 };
68 
69 /* Multipath context */
70 struct multipath {
71 	unsigned long flags;		/* Multipath state flags */
72 
73 	spinlock_t lock;
74 	enum dm_queue_mode queue_mode;
75 
76 	struct pgpath *current_pgpath;
77 	struct priority_group *current_pg;
78 	struct priority_group *next_pg;	/* Switch to this PG if set */
79 
80 	atomic_t nr_valid_paths;	/* Total number of usable paths */
81 	unsigned nr_priority_groups;
82 	struct list_head priority_groups;
83 
84 	const char *hw_handler_name;
85 	char *hw_handler_params;
86 	wait_queue_head_t pg_init_wait;	/* Wait for pg_init completion */
87 	unsigned pg_init_retries;	/* Number of times to retry pg_init */
88 	unsigned pg_init_delay_msecs;	/* Number of msecs before pg_init retry */
89 	atomic_t pg_init_in_progress;	/* Only one pg_init allowed at once */
90 	atomic_t pg_init_count;		/* Number of times pg_init called */
91 
92 	struct mutex work_mutex;
93 	struct work_struct trigger_event;
94 	struct dm_target *ti;
95 
96 	struct work_struct process_queued_bios;
97 	struct bio_list queued_bios;
98 
99 	struct timer_list nopath_timer;	/* Timeout for queue_if_no_path */
100 };
101 
102 /*
103  * Context information attached to each io we process.
104  */
105 struct dm_mpath_io {
106 	struct pgpath *pgpath;
107 	size_t nr_bytes;
108 };
109 
110 typedef int (*action_fn) (struct pgpath *pgpath);
111 
112 static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
113 static void trigger_event(struct work_struct *work);
114 static void activate_or_offline_path(struct pgpath *pgpath);
115 static void activate_path_work(struct work_struct *work);
116 static void process_queued_bios(struct work_struct *work);
117 static void queue_if_no_path_timeout_work(struct timer_list *t);
118 
119 /*-----------------------------------------------
120  * Multipath state flags.
121  *-----------------------------------------------*/
122 
123 #define MPATHF_QUEUE_IO 0			/* Must we queue all I/O? */
124 #define MPATHF_QUEUE_IF_NO_PATH 1		/* Queue I/O if last path fails? */
125 #define MPATHF_SAVED_QUEUE_IF_NO_PATH 2		/* Saved state during suspension */
126 #define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3	/* If there's already a hw_handler present, don't change it. */
127 #define MPATHF_PG_INIT_DISABLED 4		/* pg_init is not currently allowed */
128 #define MPATHF_PG_INIT_REQUIRED 5		/* pg_init needs calling? */
129 #define MPATHF_PG_INIT_DELAY_RETRY 6		/* Delay pg_init retry? */
130 
131 static bool mpath_double_check_test_bit(int MPATHF_bit, struct multipath *m)
132 {
133 	bool r = test_bit(MPATHF_bit, &m->flags);
134 
135 	if (r) {
136 		unsigned long flags;
137 		spin_lock_irqsave(&m->lock, flags);
138 		r = test_bit(MPATHF_bit, &m->flags);
139 		spin_unlock_irqrestore(&m->lock, flags);
140 	}
141 
142 	return r;
143 }
144 
145 /*-----------------------------------------------
146  * Allocation routines
147  *-----------------------------------------------*/
148 
149 static struct pgpath *alloc_pgpath(void)
150 {
151 	struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
152 
153 	if (!pgpath)
154 		return NULL;
155 
156 	pgpath->is_active = true;
157 
158 	return pgpath;
159 }
160 
161 static void free_pgpath(struct pgpath *pgpath)
162 {
163 	kfree(pgpath);
164 }
165 
166 static struct priority_group *alloc_priority_group(void)
167 {
168 	struct priority_group *pg;
169 
170 	pg = kzalloc(sizeof(*pg), GFP_KERNEL);
171 
172 	if (pg)
173 		INIT_LIST_HEAD(&pg->pgpaths);
174 
175 	return pg;
176 }
177 
178 static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
179 {
180 	struct pgpath *pgpath, *tmp;
181 
182 	list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
183 		list_del(&pgpath->list);
184 		dm_put_device(ti, pgpath->path.dev);
185 		free_pgpath(pgpath);
186 	}
187 }
188 
189 static void free_priority_group(struct priority_group *pg,
190 				struct dm_target *ti)
191 {
192 	struct path_selector *ps = &pg->ps;
193 
194 	if (ps->type) {
195 		ps->type->destroy(ps);
196 		dm_put_path_selector(ps->type);
197 	}
198 
199 	free_pgpaths(&pg->pgpaths, ti);
200 	kfree(pg);
201 }
202 
203 static struct multipath *alloc_multipath(struct dm_target *ti)
204 {
205 	struct multipath *m;
206 
207 	m = kzalloc(sizeof(*m), GFP_KERNEL);
208 	if (m) {
209 		INIT_LIST_HEAD(&m->priority_groups);
210 		spin_lock_init(&m->lock);
211 		atomic_set(&m->nr_valid_paths, 0);
212 		INIT_WORK(&m->trigger_event, trigger_event);
213 		mutex_init(&m->work_mutex);
214 
215 		m->queue_mode = DM_TYPE_NONE;
216 
217 		m->ti = ti;
218 		ti->private = m;
219 
220 		timer_setup(&m->nopath_timer, queue_if_no_path_timeout_work, 0);
221 	}
222 
223 	return m;
224 }
225 
226 static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
227 {
228 	if (m->queue_mode == DM_TYPE_NONE) {
229 		m->queue_mode = DM_TYPE_REQUEST_BASED;
230 	} else if (m->queue_mode == DM_TYPE_BIO_BASED) {
231 		INIT_WORK(&m->process_queued_bios, process_queued_bios);
232 		/*
233 		 * bio-based doesn't support any direct scsi_dh management;
234 		 * it just discovers if a scsi_dh is attached.
235 		 */
236 		set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
237 	}
238 
239 	dm_table_set_type(ti->table, m->queue_mode);
240 
241 	/*
242 	 * Init fields that are only used when a scsi_dh is attached
243 	 * - must do this unconditionally (really doesn't hurt non-SCSI uses)
244 	 */
245 	set_bit(MPATHF_QUEUE_IO, &m->flags);
246 	atomic_set(&m->pg_init_in_progress, 0);
247 	atomic_set(&m->pg_init_count, 0);
248 	m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
249 	init_waitqueue_head(&m->pg_init_wait);
250 
251 	return 0;
252 }
253 
254 static void free_multipath(struct multipath *m)
255 {
256 	struct priority_group *pg, *tmp;
257 
258 	list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
259 		list_del(&pg->list);
260 		free_priority_group(pg, m->ti);
261 	}
262 
263 	kfree(m->hw_handler_name);
264 	kfree(m->hw_handler_params);
265 	mutex_destroy(&m->work_mutex);
266 	kfree(m);
267 }
268 
269 static struct dm_mpath_io *get_mpio(union map_info *info)
270 {
271 	return info->ptr;
272 }
273 
274 static size_t multipath_per_bio_data_size(void)
275 {
276 	return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details);
277 }
278 
279 static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio)
280 {
281 	return dm_per_bio_data(bio, multipath_per_bio_data_size());
282 }
283 
284 static struct dm_bio_details *get_bio_details_from_mpio(struct dm_mpath_io *mpio)
285 {
286 	/* dm_bio_details is immediately after the dm_mpath_io in bio's per-bio-data */
287 	void *bio_details = mpio + 1;
288 	return bio_details;
289 }
290 
291 static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p)
292 {
293 	struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
294 	struct dm_bio_details *bio_details = get_bio_details_from_mpio(mpio);
295 
296 	mpio->nr_bytes = bio->bi_iter.bi_size;
297 	mpio->pgpath = NULL;
298 	*mpio_p = mpio;
299 
300 	dm_bio_record(bio_details, bio);
301 }
302 
303 /*-----------------------------------------------
304  * Path selection
305  *-----------------------------------------------*/
306 
307 static int __pg_init_all_paths(struct multipath *m)
308 {
309 	struct pgpath *pgpath;
310 	unsigned long pg_init_delay = 0;
311 
312 	lockdep_assert_held(&m->lock);
313 
314 	if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
315 		return 0;
316 
317 	atomic_inc(&m->pg_init_count);
318 	clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
319 
320 	/* Check here to reset pg_init_required */
321 	if (!m->current_pg)
322 		return 0;
323 
324 	if (test_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags))
325 		pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
326 						 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
327 	list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
328 		/* Skip failed paths */
329 		if (!pgpath->is_active)
330 			continue;
331 		if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
332 				       pg_init_delay))
333 			atomic_inc(&m->pg_init_in_progress);
334 	}
335 	return atomic_read(&m->pg_init_in_progress);
336 }
337 
338 static int pg_init_all_paths(struct multipath *m)
339 {
340 	int ret;
341 	unsigned long flags;
342 
343 	spin_lock_irqsave(&m->lock, flags);
344 	ret = __pg_init_all_paths(m);
345 	spin_unlock_irqrestore(&m->lock, flags);
346 
347 	return ret;
348 }
349 
350 static void __switch_pg(struct multipath *m, struct priority_group *pg)
351 {
352 	lockdep_assert_held(&m->lock);
353 
354 	m->current_pg = pg;
355 
356 	/* Must we initialise the PG first, and queue I/O till it's ready? */
357 	if (m->hw_handler_name) {
358 		set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
359 		set_bit(MPATHF_QUEUE_IO, &m->flags);
360 	} else {
361 		clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
362 		clear_bit(MPATHF_QUEUE_IO, &m->flags);
363 	}
364 
365 	atomic_set(&m->pg_init_count, 0);
366 }
367 
368 static struct pgpath *choose_path_in_pg(struct multipath *m,
369 					struct priority_group *pg,
370 					size_t nr_bytes)
371 {
372 	unsigned long flags;
373 	struct dm_path *path;
374 	struct pgpath *pgpath;
375 
376 	path = pg->ps.type->select_path(&pg->ps, nr_bytes);
377 	if (!path)
378 		return ERR_PTR(-ENXIO);
379 
380 	pgpath = path_to_pgpath(path);
381 
382 	if (unlikely(READ_ONCE(m->current_pg) != pg)) {
383 		/* Only update current_pgpath if pg changed */
384 		spin_lock_irqsave(&m->lock, flags);
385 		m->current_pgpath = pgpath;
386 		__switch_pg(m, pg);
387 		spin_unlock_irqrestore(&m->lock, flags);
388 	}
389 
390 	return pgpath;
391 }
392 
393 static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
394 {
395 	unsigned long flags;
396 	struct priority_group *pg;
397 	struct pgpath *pgpath;
398 	unsigned bypassed = 1;
399 
400 	if (!atomic_read(&m->nr_valid_paths)) {
401 		spin_lock_irqsave(&m->lock, flags);
402 		clear_bit(MPATHF_QUEUE_IO, &m->flags);
403 		spin_unlock_irqrestore(&m->lock, flags);
404 		goto failed;
405 	}
406 
407 	/* Were we instructed to switch PG? */
408 	if (READ_ONCE(m->next_pg)) {
409 		spin_lock_irqsave(&m->lock, flags);
410 		pg = m->next_pg;
411 		if (!pg) {
412 			spin_unlock_irqrestore(&m->lock, flags);
413 			goto check_current_pg;
414 		}
415 		m->next_pg = NULL;
416 		spin_unlock_irqrestore(&m->lock, flags);
417 		pgpath = choose_path_in_pg(m, pg, nr_bytes);
418 		if (!IS_ERR_OR_NULL(pgpath))
419 			return pgpath;
420 	}
421 
422 	/* Don't change PG until it has no remaining paths */
423 check_current_pg:
424 	pg = READ_ONCE(m->current_pg);
425 	if (pg) {
426 		pgpath = choose_path_in_pg(m, pg, nr_bytes);
427 		if (!IS_ERR_OR_NULL(pgpath))
428 			return pgpath;
429 	}
430 
431 	/*
432 	 * Loop through priority groups until we find a valid path.
433 	 * First time we skip PGs marked 'bypassed'.
434 	 * Second time we only try the ones we skipped, but set
435 	 * pg_init_delay_retry so we do not hammer controllers.
436 	 */
437 	do {
438 		list_for_each_entry(pg, &m->priority_groups, list) {
439 			if (pg->bypassed == !!bypassed)
440 				continue;
441 			pgpath = choose_path_in_pg(m, pg, nr_bytes);
442 			if (!IS_ERR_OR_NULL(pgpath)) {
443 				if (!bypassed) {
444 					spin_lock_irqsave(&m->lock, flags);
445 					set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
446 					spin_unlock_irqrestore(&m->lock, flags);
447 				}
448 				return pgpath;
449 			}
450 		}
451 	} while (bypassed--);
452 
453 failed:
454 	spin_lock_irqsave(&m->lock, flags);
455 	m->current_pgpath = NULL;
456 	m->current_pg = NULL;
457 	spin_unlock_irqrestore(&m->lock, flags);
458 
459 	return NULL;
460 }
461 
462 /*
463  * dm_report_EIO() is a macro instead of a function to make pr_debug_ratelimited()
464  * report the function name and line number of the function from which
465  * it has been invoked.
466  */
467 #define dm_report_EIO(m)						\
468 do {									\
469 	DMDEBUG_LIMIT("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d", \
470 		      dm_table_device_name((m)->ti->table),		\
471 		      test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags),	\
472 		      test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \
473 		      dm_noflush_suspending((m)->ti));			\
474 } while (0)
475 
476 /*
477  * Check whether bios must be queued in the device-mapper core rather
478  * than here in the target.
479  */
480 static bool __must_push_back(struct multipath *m)
481 {
482 	return dm_noflush_suspending(m->ti);
483 }
484 
485 static bool must_push_back_rq(struct multipath *m)
486 {
487 	unsigned long flags;
488 	bool ret;
489 
490 	spin_lock_irqsave(&m->lock, flags);
491 	ret = (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) || __must_push_back(m));
492 	spin_unlock_irqrestore(&m->lock, flags);
493 
494 	return ret;
495 }
496 
497 /*
498  * Map cloned requests (request-based multipath)
499  */
500 static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
501 				   union map_info *map_context,
502 				   struct request **__clone)
503 {
504 	struct multipath *m = ti->private;
505 	size_t nr_bytes = blk_rq_bytes(rq);
506 	struct pgpath *pgpath;
507 	struct block_device *bdev;
508 	struct dm_mpath_io *mpio = get_mpio(map_context);
509 	struct request_queue *q;
510 	struct request *clone;
511 
512 	/* Do we need to select a new pgpath? */
513 	pgpath = READ_ONCE(m->current_pgpath);
514 	if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
515 		pgpath = choose_pgpath(m, nr_bytes);
516 
517 	if (!pgpath) {
518 		if (must_push_back_rq(m))
519 			return DM_MAPIO_DELAY_REQUEUE;
520 		dm_report_EIO(m);	/* Failed */
521 		return DM_MAPIO_KILL;
522 	} else if (mpath_double_check_test_bit(MPATHF_QUEUE_IO, m) ||
523 		   mpath_double_check_test_bit(MPATHF_PG_INIT_REQUIRED, m)) {
524 		pg_init_all_paths(m);
525 		return DM_MAPIO_DELAY_REQUEUE;
526 	}
527 
528 	mpio->pgpath = pgpath;
529 	mpio->nr_bytes = nr_bytes;
530 
531 	bdev = pgpath->path.dev->bdev;
532 	q = bdev_get_queue(bdev);
533 	clone = blk_mq_alloc_request(q, rq->cmd_flags | REQ_NOMERGE,
534 			BLK_MQ_REQ_NOWAIT);
535 	if (IS_ERR(clone)) {
536 		/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
537 		if (blk_queue_dying(q)) {
538 			atomic_inc(&m->pg_init_in_progress);
539 			activate_or_offline_path(pgpath);
540 			return DM_MAPIO_DELAY_REQUEUE;
541 		}
542 
543 		/*
544 		 * blk-mq's SCHED_RESTART can cover this requeue, so we
545 		 * needn't deal with it by DELAY_REQUEUE. More importantly,
546 		 * we have to return DM_MAPIO_REQUEUE so that blk-mq can
547 		 * get the queue busy feedback (via BLK_STS_RESOURCE),
548 		 * otherwise I/O merging can suffer.
549 		 */
550 		return DM_MAPIO_REQUEUE;
551 	}
552 	clone->bio = clone->biotail = NULL;
553 	clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
554 	*__clone = clone;
555 
556 	if (pgpath->pg->ps.type->start_io)
557 		pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
558 					      &pgpath->path,
559 					      nr_bytes);
560 	return DM_MAPIO_REMAPPED;
561 }
562 
563 static void multipath_release_clone(struct request *clone,
564 				    union map_info *map_context)
565 {
566 	if (unlikely(map_context)) {
567 		/*
568 		 * non-NULL map_context means caller is still map
569 		 * method; must undo multipath_clone_and_map()
570 		 */
571 		struct dm_mpath_io *mpio = get_mpio(map_context);
572 		struct pgpath *pgpath = mpio->pgpath;
573 
574 		if (pgpath && pgpath->pg->ps.type->end_io)
575 			pgpath->pg->ps.type->end_io(&pgpath->pg->ps,
576 						    &pgpath->path,
577 						    mpio->nr_bytes,
578 						    clone->io_start_time_ns);
579 	}
580 
581 	blk_mq_free_request(clone);
582 }
583 
584 /*
585  * Map cloned bios (bio-based multipath)
586  */
587 
588 static void __multipath_queue_bio(struct multipath *m, struct bio *bio)
589 {
590 	/* Queue for the daemon to resubmit */
591 	bio_list_add(&m->queued_bios, bio);
592 	if (!test_bit(MPATHF_QUEUE_IO, &m->flags))
593 		queue_work(kmultipathd, &m->process_queued_bios);
594 }
595 
596 static void multipath_queue_bio(struct multipath *m, struct bio *bio)
597 {
598 	unsigned long flags;
599 
600 	spin_lock_irqsave(&m->lock, flags);
601 	__multipath_queue_bio(m, bio);
602 	spin_unlock_irqrestore(&m->lock, flags);
603 }
604 
605 static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
606 {
607 	struct pgpath *pgpath;
608 	unsigned long flags;
609 
610 	/* Do we need to select a new pgpath? */
611 	pgpath = READ_ONCE(m->current_pgpath);
612 	if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
613 		pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
614 
615 	if (!pgpath) {
616 		spin_lock_irqsave(&m->lock, flags);
617 		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
618 			__multipath_queue_bio(m, bio);
619 			pgpath = ERR_PTR(-EAGAIN);
620 		}
621 		spin_unlock_irqrestore(&m->lock, flags);
622 
623 	} else if (mpath_double_check_test_bit(MPATHF_QUEUE_IO, m) ||
624 		   mpath_double_check_test_bit(MPATHF_PG_INIT_REQUIRED, m)) {
625 		multipath_queue_bio(m, bio);
626 		pg_init_all_paths(m);
627 		return ERR_PTR(-EAGAIN);
628 	}
629 
630 	return pgpath;
631 }
632 
633 static int __multipath_map_bio(struct multipath *m, struct bio *bio,
634 			       struct dm_mpath_io *mpio)
635 {
636 	struct pgpath *pgpath = __map_bio(m, bio);
637 
638 	if (IS_ERR(pgpath))
639 		return DM_MAPIO_SUBMITTED;
640 
641 	if (!pgpath) {
642 		if (__must_push_back(m))
643 			return DM_MAPIO_REQUEUE;
644 		dm_report_EIO(m);
645 		return DM_MAPIO_KILL;
646 	}
647 
648 	mpio->pgpath = pgpath;
649 
650 	bio->bi_status = 0;
651 	bio_set_dev(bio, pgpath->path.dev->bdev);
652 	bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
653 
654 	if (pgpath->pg->ps.type->start_io)
655 		pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
656 					      &pgpath->path,
657 					      mpio->nr_bytes);
658 	return DM_MAPIO_REMAPPED;
659 }
660 
661 static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
662 {
663 	struct multipath *m = ti->private;
664 	struct dm_mpath_io *mpio = NULL;
665 
666 	multipath_init_per_bio_data(bio, &mpio);
667 	return __multipath_map_bio(m, bio, mpio);
668 }
669 
670 static void process_queued_io_list(struct multipath *m)
671 {
672 	if (m->queue_mode == DM_TYPE_REQUEST_BASED)
673 		dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
674 	else if (m->queue_mode == DM_TYPE_BIO_BASED)
675 		queue_work(kmultipathd, &m->process_queued_bios);
676 }
677 
678 static void process_queued_bios(struct work_struct *work)
679 {
680 	int r;
681 	unsigned long flags;
682 	struct bio *bio;
683 	struct bio_list bios;
684 	struct blk_plug plug;
685 	struct multipath *m =
686 		container_of(work, struct multipath, process_queued_bios);
687 
688 	bio_list_init(&bios);
689 
690 	spin_lock_irqsave(&m->lock, flags);
691 
692 	if (bio_list_empty(&m->queued_bios)) {
693 		spin_unlock_irqrestore(&m->lock, flags);
694 		return;
695 	}
696 
697 	bio_list_merge(&bios, &m->queued_bios);
698 	bio_list_init(&m->queued_bios);
699 
700 	spin_unlock_irqrestore(&m->lock, flags);
701 
702 	blk_start_plug(&plug);
703 	while ((bio = bio_list_pop(&bios))) {
704 		struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
705 		dm_bio_restore(get_bio_details_from_mpio(mpio), bio);
706 		r = __multipath_map_bio(m, bio, mpio);
707 		switch (r) {
708 		case DM_MAPIO_KILL:
709 			bio->bi_status = BLK_STS_IOERR;
710 			bio_endio(bio);
711 			break;
712 		case DM_MAPIO_REQUEUE:
713 			bio->bi_status = BLK_STS_DM_REQUEUE;
714 			bio_endio(bio);
715 			break;
716 		case DM_MAPIO_REMAPPED:
717 			submit_bio_noacct(bio);
718 			break;
719 		case DM_MAPIO_SUBMITTED:
720 			break;
721 		default:
722 			WARN_ONCE(true, "__multipath_map_bio() returned %d\n", r);
723 		}
724 	}
725 	blk_finish_plug(&plug);
726 }
727 
728 /*
729  * If we run out of usable paths, should we queue I/O or error it?
730  */
731 static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
732 			    bool save_old_value, const char *caller)
733 {
734 	unsigned long flags;
735 	bool queue_if_no_path_bit, saved_queue_if_no_path_bit;
736 	const char *dm_dev_name = dm_table_device_name(m->ti->table);
737 
738 	DMDEBUG("%s: %s caller=%s queue_if_no_path=%d save_old_value=%d",
739 		dm_dev_name, __func__, caller, queue_if_no_path, save_old_value);
740 
741 	spin_lock_irqsave(&m->lock, flags);
742 
743 	queue_if_no_path_bit = test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
744 	saved_queue_if_no_path_bit = test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
745 
746 	if (save_old_value) {
747 		if (unlikely(!queue_if_no_path_bit && saved_queue_if_no_path_bit)) {
748 			DMERR("%s: QIFNP disabled but saved as enabled, saving again loses state, not saving!",
749 			      dm_dev_name);
750 		} else
751 			assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path_bit);
752 	} else if (!queue_if_no_path && saved_queue_if_no_path_bit) {
753 		/* due to "fail_if_no_path" message, need to honor it. */
754 		clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
755 	}
756 	assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path);
757 
758 	DMDEBUG("%s: after %s changes; QIFNP = %d; SQIFNP = %d; DNFS = %d",
759 		dm_dev_name, __func__,
760 		test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags),
761 		test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags),
762 		dm_noflush_suspending(m->ti));
763 
764 	spin_unlock_irqrestore(&m->lock, flags);
765 
766 	if (!queue_if_no_path) {
767 		dm_table_run_md_queue_async(m->ti->table);
768 		process_queued_io_list(m);
769 	}
770 
771 	return 0;
772 }
773 
774 /*
775  * If the queue_if_no_path timeout fires, turn off queue_if_no_path and
776  * process any queued I/O.
777  */
778 static void queue_if_no_path_timeout_work(struct timer_list *t)
779 {
780 	struct multipath *m = from_timer(m, t, nopath_timer);
781 
782 	DMWARN("queue_if_no_path timeout on %s, failing queued IO",
783 	       dm_table_device_name(m->ti->table));
784 	queue_if_no_path(m, false, false, __func__);
785 }
786 
787 /*
788  * Enable the queue_if_no_path timeout if necessary.
789  * Called with m->lock held.
790  */
791 static void enable_nopath_timeout(struct multipath *m)
792 {
793 	unsigned long queue_if_no_path_timeout =
794 		READ_ONCE(queue_if_no_path_timeout_secs) * HZ;
795 
796 	lockdep_assert_held(&m->lock);
797 
798 	if (queue_if_no_path_timeout > 0 &&
799 	    atomic_read(&m->nr_valid_paths) == 0 &&
800 	    test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
801 		mod_timer(&m->nopath_timer,
802 			  jiffies + queue_if_no_path_timeout);
803 	}
804 }
805 
806 static void disable_nopath_timeout(struct multipath *m)
807 {
808 	del_timer_sync(&m->nopath_timer);
809 }
810 
811 /*
812  * An event is triggered whenever a path is taken out of use.
813  * Includes path failure and PG bypass.
814  */
815 static void trigger_event(struct work_struct *work)
816 {
817 	struct multipath *m =
818 		container_of(work, struct multipath, trigger_event);
819 
820 	dm_table_event(m->ti->table);
821 }
822 
823 /*-----------------------------------------------------------------
824  * Constructor/argument parsing:
825  * <#multipath feature args> [<arg>]*
826  * <#hw_handler args> [hw_handler [<arg>]*]
827  * <#priority groups>
828  * <initial priority group>
829  *     [<selector> <#selector args> [<arg>]*
830  *      <#paths> <#per-path selector args>
831  *         [<path> [<arg>]* ]+ ]+
832  *---------------------------------------------------------------*/
833 static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
834 			       struct dm_target *ti)
835 {
836 	int r;
837 	struct path_selector_type *pst;
838 	unsigned ps_argc;
839 
840 	static const struct dm_arg _args[] = {
841 		{0, 1024, "invalid number of path selector args"},
842 	};
843 
844 	pst = dm_get_path_selector(dm_shift_arg(as));
845 	if (!pst) {
846 		ti->error = "unknown path selector type";
847 		return -EINVAL;
848 	}
849 
850 	r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
851 	if (r) {
852 		dm_put_path_selector(pst);
853 		return -EINVAL;
854 	}
855 
856 	r = pst->create(&pg->ps, ps_argc, as->argv);
857 	if (r) {
858 		dm_put_path_selector(pst);
859 		ti->error = "path selector constructor failed";
860 		return r;
861 	}
862 
863 	pg->ps.type = pst;
864 	dm_consume_args(as, ps_argc);
865 
866 	return 0;
867 }
868 
869 static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
870 			 const char **attached_handler_name, char **error)
871 {
872 	struct request_queue *q = bdev_get_queue(bdev);
873 	int r;
874 
875 	if (mpath_double_check_test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, m)) {
876 retain:
877 		if (*attached_handler_name) {
878 			/*
879 			 * Clear any hw_handler_params associated with a
880 			 * handler that isn't already attached.
881 			 */
882 			if (m->hw_handler_name && strcmp(*attached_handler_name, m->hw_handler_name)) {
883 				kfree(m->hw_handler_params);
884 				m->hw_handler_params = NULL;
885 			}
886 
887 			/*
888 			 * Reset hw_handler_name to match the attached handler
889 			 *
890 			 * NB. This modifies the table line to show the actual
891 			 * handler instead of the original table passed in.
892 			 */
893 			kfree(m->hw_handler_name);
894 			m->hw_handler_name = *attached_handler_name;
895 			*attached_handler_name = NULL;
896 		}
897 	}
898 
899 	if (m->hw_handler_name) {
900 		r = scsi_dh_attach(q, m->hw_handler_name);
901 		if (r == -EBUSY) {
902 			DMINFO("retaining handler on device %pg", bdev);
903 			goto retain;
904 		}
905 		if (r < 0) {
906 			*error = "error attaching hardware handler";
907 			return r;
908 		}
909 
910 		if (m->hw_handler_params) {
911 			r = scsi_dh_set_params(q, m->hw_handler_params);
912 			if (r < 0) {
913 				*error = "unable to set hardware handler parameters";
914 				return r;
915 			}
916 		}
917 	}
918 
919 	return 0;
920 }
921 
922 static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
923 				 struct dm_target *ti)
924 {
925 	int r;
926 	struct pgpath *p;
927 	struct multipath *m = ti->private;
928 	struct request_queue *q;
929 	const char *attached_handler_name = NULL;
930 
931 	/* we need at least a path arg */
932 	if (as->argc < 1) {
933 		ti->error = "no device given";
934 		return ERR_PTR(-EINVAL);
935 	}
936 
937 	p = alloc_pgpath();
938 	if (!p)
939 		return ERR_PTR(-ENOMEM);
940 
941 	r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
942 			  &p->path.dev);
943 	if (r) {
944 		ti->error = "error getting device";
945 		goto bad;
946 	}
947 
948 	q = bdev_get_queue(p->path.dev->bdev);
949 	attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
950 	if (attached_handler_name || m->hw_handler_name) {
951 		INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
952 		r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error);
953 		kfree(attached_handler_name);
954 		if (r) {
955 			dm_put_device(ti, p->path.dev);
956 			goto bad;
957 		}
958 	}
959 
960 	r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
961 	if (r) {
962 		dm_put_device(ti, p->path.dev);
963 		goto bad;
964 	}
965 
966 	return p;
967  bad:
968 	free_pgpath(p);
969 	return ERR_PTR(r);
970 }
971 
972 static struct priority_group *parse_priority_group(struct dm_arg_set *as,
973 						   struct multipath *m)
974 {
975 	static const struct dm_arg _args[] = {
976 		{1, 1024, "invalid number of paths"},
977 		{0, 1024, "invalid number of selector args"}
978 	};
979 
980 	int r;
981 	unsigned i, nr_selector_args, nr_args;
982 	struct priority_group *pg;
983 	struct dm_target *ti = m->ti;
984 
985 	if (as->argc < 2) {
986 		as->argc = 0;
987 		ti->error = "not enough priority group arguments";
988 		return ERR_PTR(-EINVAL);
989 	}
990 
991 	pg = alloc_priority_group();
992 	if (!pg) {
993 		ti->error = "couldn't allocate priority group";
994 		return ERR_PTR(-ENOMEM);
995 	}
996 	pg->m = m;
997 
998 	r = parse_path_selector(as, pg, ti);
999 	if (r)
1000 		goto bad;
1001 
1002 	/*
1003 	 * read the paths
1004 	 */
1005 	r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
1006 	if (r)
1007 		goto bad;
1008 
1009 	r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
1010 	if (r)
1011 		goto bad;
1012 
1013 	nr_args = 1 + nr_selector_args;
1014 	for (i = 0; i < pg->nr_pgpaths; i++) {
1015 		struct pgpath *pgpath;
1016 		struct dm_arg_set path_args;
1017 
1018 		if (as->argc < nr_args) {
1019 			ti->error = "not enough path parameters";
1020 			r = -EINVAL;
1021 			goto bad;
1022 		}
1023 
1024 		path_args.argc = nr_args;
1025 		path_args.argv = as->argv;
1026 
1027 		pgpath = parse_path(&path_args, &pg->ps, ti);
1028 		if (IS_ERR(pgpath)) {
1029 			r = PTR_ERR(pgpath);
1030 			goto bad;
1031 		}
1032 
1033 		pgpath->pg = pg;
1034 		list_add_tail(&pgpath->list, &pg->pgpaths);
1035 		dm_consume_args(as, nr_args);
1036 	}
1037 
1038 	return pg;
1039 
1040  bad:
1041 	free_priority_group(pg, ti);
1042 	return ERR_PTR(r);
1043 }
1044 
1045 static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
1046 {
1047 	unsigned hw_argc;
1048 	int ret;
1049 	struct dm_target *ti = m->ti;
1050 
1051 	static const struct dm_arg _args[] = {
1052 		{0, 1024, "invalid number of hardware handler args"},
1053 	};
1054 
1055 	if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
1056 		return -EINVAL;
1057 
1058 	if (!hw_argc)
1059 		return 0;
1060 
1061 	if (m->queue_mode == DM_TYPE_BIO_BASED) {
1062 		dm_consume_args(as, hw_argc);
1063 		DMERR("bio-based multipath doesn't allow hardware handler args");
1064 		return 0;
1065 	}
1066 
1067 	m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
1068 	if (!m->hw_handler_name)
1069 		return -EINVAL;
1070 
1071 	if (hw_argc > 1) {
1072 		char *p;
1073 		int i, j, len = 4;
1074 
1075 		for (i = 0; i <= hw_argc - 2; i++)
1076 			len += strlen(as->argv[i]) + 1;
1077 		p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
1078 		if (!p) {
1079 			ti->error = "memory allocation failed";
1080 			ret = -ENOMEM;
1081 			goto fail;
1082 		}
1083 		j = sprintf(p, "%d", hw_argc - 1);
1084 		for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
1085 			j = sprintf(p, "%s", as->argv[i]);
1086 	}
1087 	dm_consume_args(as, hw_argc - 1);
1088 
1089 	return 0;
1090 fail:
1091 	kfree(m->hw_handler_name);
1092 	m->hw_handler_name = NULL;
1093 	return ret;
1094 }
1095 
1096 static int parse_features(struct dm_arg_set *as, struct multipath *m)
1097 {
1098 	int r;
1099 	unsigned argc;
1100 	struct dm_target *ti = m->ti;
1101 	const char *arg_name;
1102 
1103 	static const struct dm_arg _args[] = {
1104 		{0, 8, "invalid number of feature args"},
1105 		{1, 50, "pg_init_retries must be between 1 and 50"},
1106 		{0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
1107 	};
1108 
1109 	r = dm_read_arg_group(_args, as, &argc, &ti->error);
1110 	if (r)
1111 		return -EINVAL;
1112 
1113 	if (!argc)
1114 		return 0;
1115 
1116 	do {
1117 		arg_name = dm_shift_arg(as);
1118 		argc--;
1119 
1120 		if (!strcasecmp(arg_name, "queue_if_no_path")) {
1121 			r = queue_if_no_path(m, true, false, __func__);
1122 			continue;
1123 		}
1124 
1125 		if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
1126 			set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
1127 			continue;
1128 		}
1129 
1130 		if (!strcasecmp(arg_name, "pg_init_retries") &&
1131 		    (argc >= 1)) {
1132 			r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
1133 			argc--;
1134 			continue;
1135 		}
1136 
1137 		if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
1138 		    (argc >= 1)) {
1139 			r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
1140 			argc--;
1141 			continue;
1142 		}
1143 
1144 		if (!strcasecmp(arg_name, "queue_mode") &&
1145 		    (argc >= 1)) {
1146 			const char *queue_mode_name = dm_shift_arg(as);
1147 
1148 			if (!strcasecmp(queue_mode_name, "bio"))
1149 				m->queue_mode = DM_TYPE_BIO_BASED;
1150 			else if (!strcasecmp(queue_mode_name, "rq") ||
1151 				 !strcasecmp(queue_mode_name, "mq"))
1152 				m->queue_mode = DM_TYPE_REQUEST_BASED;
1153 			else {
1154 				ti->error = "Unknown 'queue_mode' requested";
1155 				r = -EINVAL;
1156 			}
1157 			argc--;
1158 			continue;
1159 		}
1160 
1161 		ti->error = "Unrecognised multipath feature request";
1162 		r = -EINVAL;
1163 	} while (argc && !r);
1164 
1165 	return r;
1166 }
1167 
1168 static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
1169 {
1170 	/* target arguments */
1171 	static const struct dm_arg _args[] = {
1172 		{0, 1024, "invalid number of priority groups"},
1173 		{0, 1024, "invalid initial priority group number"},
1174 	};
1175 
1176 	int r;
1177 	struct multipath *m;
1178 	struct dm_arg_set as;
1179 	unsigned pg_count = 0;
1180 	unsigned next_pg_num;
1181 	unsigned long flags;
1182 
1183 	as.argc = argc;
1184 	as.argv = argv;
1185 
1186 	m = alloc_multipath(ti);
1187 	if (!m) {
1188 		ti->error = "can't allocate multipath";
1189 		return -EINVAL;
1190 	}
1191 
1192 	r = parse_features(&as, m);
1193 	if (r)
1194 		goto bad;
1195 
1196 	r = alloc_multipath_stage2(ti, m);
1197 	if (r)
1198 		goto bad;
1199 
1200 	r = parse_hw_handler(&as, m);
1201 	if (r)
1202 		goto bad;
1203 
1204 	r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
1205 	if (r)
1206 		goto bad;
1207 
1208 	r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
1209 	if (r)
1210 		goto bad;
1211 
1212 	if ((!m->nr_priority_groups && next_pg_num) ||
1213 	    (m->nr_priority_groups && !next_pg_num)) {
1214 		ti->error = "invalid initial priority group";
1215 		r = -EINVAL;
1216 		goto bad;
1217 	}
1218 
1219 	/* parse the priority groups */
1220 	while (as.argc) {
1221 		struct priority_group *pg;
1222 		unsigned nr_valid_paths = atomic_read(&m->nr_valid_paths);
1223 
1224 		pg = parse_priority_group(&as, m);
1225 		if (IS_ERR(pg)) {
1226 			r = PTR_ERR(pg);
1227 			goto bad;
1228 		}
1229 
1230 		nr_valid_paths += pg->nr_pgpaths;
1231 		atomic_set(&m->nr_valid_paths, nr_valid_paths);
1232 
1233 		list_add_tail(&pg->list, &m->priority_groups);
1234 		pg_count++;
1235 		pg->pg_num = pg_count;
1236 		if (!--next_pg_num)
1237 			m->next_pg = pg;
1238 	}
1239 
1240 	if (pg_count != m->nr_priority_groups) {
1241 		ti->error = "priority group count mismatch";
1242 		r = -EINVAL;
1243 		goto bad;
1244 	}
1245 
1246 	spin_lock_irqsave(&m->lock, flags);
1247 	enable_nopath_timeout(m);
1248 	spin_unlock_irqrestore(&m->lock, flags);
1249 
1250 	ti->num_flush_bios = 1;
1251 	ti->num_discard_bios = 1;
1252 	ti->num_write_zeroes_bios = 1;
1253 	if (m->queue_mode == DM_TYPE_BIO_BASED)
1254 		ti->per_io_data_size = multipath_per_bio_data_size();
1255 	else
1256 		ti->per_io_data_size = sizeof(struct dm_mpath_io);
1257 
1258 	return 0;
1259 
1260  bad:
1261 	free_multipath(m);
1262 	return r;
1263 }
1264 
1265 static void multipath_wait_for_pg_init_completion(struct multipath *m)
1266 {
1267 	DEFINE_WAIT(wait);
1268 
1269 	while (1) {
1270 		prepare_to_wait(&m->pg_init_wait, &wait, TASK_UNINTERRUPTIBLE);
1271 
1272 		if (!atomic_read(&m->pg_init_in_progress))
1273 			break;
1274 
1275 		io_schedule();
1276 	}
1277 	finish_wait(&m->pg_init_wait, &wait);
1278 }
1279 
1280 static void flush_multipath_work(struct multipath *m)
1281 {
1282 	if (m->hw_handler_name) {
1283 		unsigned long flags;
1284 
1285 		if (!atomic_read(&m->pg_init_in_progress))
1286 			goto skip;
1287 
1288 		spin_lock_irqsave(&m->lock, flags);
1289 		if (atomic_read(&m->pg_init_in_progress) &&
1290 		    !test_and_set_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) {
1291 			spin_unlock_irqrestore(&m->lock, flags);
1292 
1293 			flush_workqueue(kmpath_handlerd);
1294 			multipath_wait_for_pg_init_completion(m);
1295 
1296 			spin_lock_irqsave(&m->lock, flags);
1297 			clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
1298 		}
1299 		spin_unlock_irqrestore(&m->lock, flags);
1300 	}
1301 skip:
1302 	if (m->queue_mode == DM_TYPE_BIO_BASED)
1303 		flush_work(&m->process_queued_bios);
1304 	flush_work(&m->trigger_event);
1305 }
1306 
1307 static void multipath_dtr(struct dm_target *ti)
1308 {
1309 	struct multipath *m = ti->private;
1310 
1311 	disable_nopath_timeout(m);
1312 	flush_multipath_work(m);
1313 	free_multipath(m);
1314 }
1315 
1316 /*
1317  * Take a path out of use.
1318  */
1319 static int fail_path(struct pgpath *pgpath)
1320 {
1321 	unsigned long flags;
1322 	struct multipath *m = pgpath->pg->m;
1323 
1324 	spin_lock_irqsave(&m->lock, flags);
1325 
1326 	if (!pgpath->is_active)
1327 		goto out;
1328 
1329 	DMWARN("%s: Failing path %s.",
1330 	       dm_table_device_name(m->ti->table),
1331 	       pgpath->path.dev->name);
1332 
1333 	pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
1334 	pgpath->is_active = false;
1335 	pgpath->fail_count++;
1336 
1337 	atomic_dec(&m->nr_valid_paths);
1338 
1339 	if (pgpath == m->current_pgpath)
1340 		m->current_pgpath = NULL;
1341 
1342 	dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
1343 		       pgpath->path.dev->name, atomic_read(&m->nr_valid_paths));
1344 
1345 	schedule_work(&m->trigger_event);
1346 
1347 	enable_nopath_timeout(m);
1348 
1349 out:
1350 	spin_unlock_irqrestore(&m->lock, flags);
1351 
1352 	return 0;
1353 }
1354 
1355 /*
1356  * Reinstate a previously-failed path
1357  */
1358 static int reinstate_path(struct pgpath *pgpath)
1359 {
1360 	int r = 0, run_queue = 0;
1361 	unsigned long flags;
1362 	struct multipath *m = pgpath->pg->m;
1363 	unsigned nr_valid_paths;
1364 
1365 	spin_lock_irqsave(&m->lock, flags);
1366 
1367 	if (pgpath->is_active)
1368 		goto out;
1369 
1370 	DMWARN("%s: Reinstating path %s.",
1371 	       dm_table_device_name(m->ti->table),
1372 	       pgpath->path.dev->name);
1373 
1374 	r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
1375 	if (r)
1376 		goto out;
1377 
1378 	pgpath->is_active = true;
1379 
1380 	nr_valid_paths = atomic_inc_return(&m->nr_valid_paths);
1381 	if (nr_valid_paths == 1) {
1382 		m->current_pgpath = NULL;
1383 		run_queue = 1;
1384 	} else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
1385 		if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
1386 			atomic_inc(&m->pg_init_in_progress);
1387 	}
1388 
1389 	dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
1390 		       pgpath->path.dev->name, nr_valid_paths);
1391 
1392 	schedule_work(&m->trigger_event);
1393 
1394 out:
1395 	spin_unlock_irqrestore(&m->lock, flags);
1396 	if (run_queue) {
1397 		dm_table_run_md_queue_async(m->ti->table);
1398 		process_queued_io_list(m);
1399 	}
1400 
1401 	if (pgpath->is_active)
1402 		disable_nopath_timeout(m);
1403 
1404 	return r;
1405 }
1406 
1407 /*
1408  * Fail or reinstate all paths that match the provided struct dm_dev.
1409  */
1410 static int action_dev(struct multipath *m, struct dm_dev *dev,
1411 		      action_fn action)
1412 {
1413 	int r = -EINVAL;
1414 	struct pgpath *pgpath;
1415 	struct priority_group *pg;
1416 
1417 	list_for_each_entry(pg, &m->priority_groups, list) {
1418 		list_for_each_entry(pgpath, &pg->pgpaths, list) {
1419 			if (pgpath->path.dev == dev)
1420 				r = action(pgpath);
1421 		}
1422 	}
1423 
1424 	return r;
1425 }
1426 
1427 /*
1428  * Temporarily try to avoid having to use the specified PG
1429  */
1430 static void bypass_pg(struct multipath *m, struct priority_group *pg,
1431 		      bool bypassed)
1432 {
1433 	unsigned long flags;
1434 
1435 	spin_lock_irqsave(&m->lock, flags);
1436 
1437 	pg->bypassed = bypassed;
1438 	m->current_pgpath = NULL;
1439 	m->current_pg = NULL;
1440 
1441 	spin_unlock_irqrestore(&m->lock, flags);
1442 
1443 	schedule_work(&m->trigger_event);
1444 }
1445 
1446 /*
1447  * Switch to using the specified PG from the next I/O that gets mapped
1448  */
1449 static int switch_pg_num(struct multipath *m, const char *pgstr)
1450 {
1451 	struct priority_group *pg;
1452 	unsigned pgnum;
1453 	unsigned long flags;
1454 	char dummy;
1455 
1456 	if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1457 	    !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
1458 		DMWARN("invalid PG number supplied to switch_pg_num");
1459 		return -EINVAL;
1460 	}
1461 
1462 	spin_lock_irqsave(&m->lock, flags);
1463 	list_for_each_entry(pg, &m->priority_groups, list) {
1464 		pg->bypassed = false;
1465 		if (--pgnum)
1466 			continue;
1467 
1468 		m->current_pgpath = NULL;
1469 		m->current_pg = NULL;
1470 		m->next_pg = pg;
1471 	}
1472 	spin_unlock_irqrestore(&m->lock, flags);
1473 
1474 	schedule_work(&m->trigger_event);
1475 	return 0;
1476 }
1477 
1478 /*
1479  * Set/clear bypassed status of a PG.
1480  * PGs are numbered upwards from 1 in the order they were declared.
1481  */
1482 static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
1483 {
1484 	struct priority_group *pg;
1485 	unsigned pgnum;
1486 	char dummy;
1487 
1488 	if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1489 	    !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
1490 		DMWARN("invalid PG number supplied to bypass_pg");
1491 		return -EINVAL;
1492 	}
1493 
1494 	list_for_each_entry(pg, &m->priority_groups, list) {
1495 		if (!--pgnum)
1496 			break;
1497 	}
1498 
1499 	bypass_pg(m, pg, bypassed);
1500 	return 0;
1501 }
1502 
1503 /*
1504  * Should we retry pg_init immediately?
1505  */
1506 static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
1507 {
1508 	unsigned long flags;
1509 	bool limit_reached = false;
1510 
1511 	spin_lock_irqsave(&m->lock, flags);
1512 
1513 	if (atomic_read(&m->pg_init_count) <= m->pg_init_retries &&
1514 	    !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
1515 		set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
1516 	else
1517 		limit_reached = true;
1518 
1519 	spin_unlock_irqrestore(&m->lock, flags);
1520 
1521 	return limit_reached;
1522 }
1523 
1524 static void pg_init_done(void *data, int errors)
1525 {
1526 	struct pgpath *pgpath = data;
1527 	struct priority_group *pg = pgpath->pg;
1528 	struct multipath *m = pg->m;
1529 	unsigned long flags;
1530 	bool delay_retry = false;
1531 
1532 	/* device or driver problems */
1533 	switch (errors) {
1534 	case SCSI_DH_OK:
1535 		break;
1536 	case SCSI_DH_NOSYS:
1537 		if (!m->hw_handler_name) {
1538 			errors = 0;
1539 			break;
1540 		}
1541 		DMERR("Could not failover the device: Handler scsi_dh_%s "
1542 		      "Error %d.", m->hw_handler_name, errors);
1543 		/*
1544 		 * Fail path for now, so we do not ping pong
1545 		 */
1546 		fail_path(pgpath);
1547 		break;
1548 	case SCSI_DH_DEV_TEMP_BUSY:
1549 		/*
1550 		 * Probably doing something like FW upgrade on the
1551 		 * controller so try the other pg.
1552 		 */
1553 		bypass_pg(m, pg, true);
1554 		break;
1555 	case SCSI_DH_RETRY:
1556 		/* Wait before retrying. */
1557 		delay_retry = true;
1558 		fallthrough;
1559 	case SCSI_DH_IMM_RETRY:
1560 	case SCSI_DH_RES_TEMP_UNAVAIL:
1561 		if (pg_init_limit_reached(m, pgpath))
1562 			fail_path(pgpath);
1563 		errors = 0;
1564 		break;
1565 	case SCSI_DH_DEV_OFFLINED:
1566 	default:
1567 		/*
1568 		 * We probably do not want to fail the path for a device
1569 		 * error, but this is what the old dm did. In future
1570 		 * patches we can do more advanced handling.
1571 		 */
1572 		fail_path(pgpath);
1573 	}
1574 
1575 	spin_lock_irqsave(&m->lock, flags);
1576 	if (errors) {
1577 		if (pgpath == m->current_pgpath) {
1578 			DMERR("Could not failover device. Error %d.", errors);
1579 			m->current_pgpath = NULL;
1580 			m->current_pg = NULL;
1581 		}
1582 	} else if (!test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
1583 		pg->bypassed = false;
1584 
1585 	if (atomic_dec_return(&m->pg_init_in_progress) > 0)
1586 		/* Activations of other paths are still on going */
1587 		goto out;
1588 
1589 	if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
1590 		if (delay_retry)
1591 			set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1592 		else
1593 			clear_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1594 
1595 		if (__pg_init_all_paths(m))
1596 			goto out;
1597 	}
1598 	clear_bit(MPATHF_QUEUE_IO, &m->flags);
1599 
1600 	process_queued_io_list(m);
1601 
1602 	/*
1603 	 * Wake up any thread waiting to suspend.
1604 	 */
1605 	wake_up(&m->pg_init_wait);
1606 
1607 out:
1608 	spin_unlock_irqrestore(&m->lock, flags);
1609 }
1610 
1611 static void activate_or_offline_path(struct pgpath *pgpath)
1612 {
1613 	struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1614 
1615 	if (pgpath->is_active && !blk_queue_dying(q))
1616 		scsi_dh_activate(q, pg_init_done, pgpath);
1617 	else
1618 		pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
1619 }
1620 
1621 static void activate_path_work(struct work_struct *work)
1622 {
1623 	struct pgpath *pgpath =
1624 		container_of(work, struct pgpath, activate_path.work);
1625 
1626 	activate_or_offline_path(pgpath);
1627 }
1628 
1629 static int multipath_end_io(struct dm_target *ti, struct request *clone,
1630 			    blk_status_t error, union map_info *map_context)
1631 {
1632 	struct dm_mpath_io *mpio = get_mpio(map_context);
1633 	struct pgpath *pgpath = mpio->pgpath;
1634 	int r = DM_ENDIO_DONE;
1635 
1636 	/*
1637 	 * We don't queue any clone request inside the multipath target
1638 	 * during end I/O handling, since those clone requests don't have
1639 	 * bio clones.  If we queue them inside the multipath target,
1640 	 * we need to make bio clones, that requires memory allocation.
1641 	 * (See drivers/md/dm-rq.c:end_clone_bio() about why the clone requests
1642 	 *  don't have bio clones.)
1643 	 * Instead of queueing the clone request here, we queue the original
1644 	 * request into dm core, which will remake a clone request and
1645 	 * clone bios for it and resubmit it later.
1646 	 */
1647 	if (error && blk_path_error(error)) {
1648 		struct multipath *m = ti->private;
1649 
1650 		if (error == BLK_STS_RESOURCE)
1651 			r = DM_ENDIO_DELAY_REQUEUE;
1652 		else
1653 			r = DM_ENDIO_REQUEUE;
1654 
1655 		if (pgpath)
1656 			fail_path(pgpath);
1657 
1658 		if (!atomic_read(&m->nr_valid_paths) &&
1659 		    !must_push_back_rq(m)) {
1660 			if (error == BLK_STS_IOERR)
1661 				dm_report_EIO(m);
1662 			/* complete with the original error */
1663 			r = DM_ENDIO_DONE;
1664 		}
1665 	}
1666 
1667 	if (pgpath) {
1668 		struct path_selector *ps = &pgpath->pg->ps;
1669 
1670 		if (ps->type->end_io)
1671 			ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes,
1672 					 clone->io_start_time_ns);
1673 	}
1674 
1675 	return r;
1676 }
1677 
1678 static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
1679 				blk_status_t *error)
1680 {
1681 	struct multipath *m = ti->private;
1682 	struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
1683 	struct pgpath *pgpath = mpio->pgpath;
1684 	unsigned long flags;
1685 	int r = DM_ENDIO_DONE;
1686 
1687 	if (!*error || !blk_path_error(*error))
1688 		goto done;
1689 
1690 	if (pgpath)
1691 		fail_path(pgpath);
1692 
1693 	if (!atomic_read(&m->nr_valid_paths)) {
1694 		spin_lock_irqsave(&m->lock, flags);
1695 		if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1696 			if (__must_push_back(m)) {
1697 				r = DM_ENDIO_REQUEUE;
1698 			} else {
1699 				dm_report_EIO(m);
1700 				*error = BLK_STS_IOERR;
1701 			}
1702 			spin_unlock_irqrestore(&m->lock, flags);
1703 			goto done;
1704 		}
1705 		spin_unlock_irqrestore(&m->lock, flags);
1706 	}
1707 
1708 	multipath_queue_bio(m, clone);
1709 	r = DM_ENDIO_INCOMPLETE;
1710 done:
1711 	if (pgpath) {
1712 		struct path_selector *ps = &pgpath->pg->ps;
1713 
1714 		if (ps->type->end_io)
1715 			ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes,
1716 					 dm_start_time_ns_from_clone(clone));
1717 	}
1718 
1719 	return r;
1720 }
1721 
1722 /*
1723  * Suspend with flush can't complete until all the I/O is processed
1724  * so if the last path fails we must error any remaining I/O.
1725  * - Note that if the freeze_bdev fails while suspending, the
1726  *   queue_if_no_path state is lost - userspace should reset it.
1727  * Otherwise, during noflush suspend, queue_if_no_path will not change.
1728  */
1729 static void multipath_presuspend(struct dm_target *ti)
1730 {
1731 	struct multipath *m = ti->private;
1732 
1733 	/* FIXME: bio-based shouldn't need to always disable queue_if_no_path */
1734 	if (m->queue_mode == DM_TYPE_BIO_BASED || !dm_noflush_suspending(m->ti))
1735 		queue_if_no_path(m, false, true, __func__);
1736 }
1737 
1738 static void multipath_postsuspend(struct dm_target *ti)
1739 {
1740 	struct multipath *m = ti->private;
1741 
1742 	mutex_lock(&m->work_mutex);
1743 	flush_multipath_work(m);
1744 	mutex_unlock(&m->work_mutex);
1745 }
1746 
1747 /*
1748  * Restore the queue_if_no_path setting.
1749  */
1750 static void multipath_resume(struct dm_target *ti)
1751 {
1752 	struct multipath *m = ti->private;
1753 	unsigned long flags;
1754 
1755 	spin_lock_irqsave(&m->lock, flags);
1756 	if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) {
1757 		set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
1758 		clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
1759 	}
1760 
1761 	DMDEBUG("%s: %s finished; QIFNP = %d; SQIFNP = %d",
1762 		dm_table_device_name(m->ti->table), __func__,
1763 		test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags),
1764 		test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags));
1765 
1766 	spin_unlock_irqrestore(&m->lock, flags);
1767 }
1768 
1769 /*
1770  * Info output has the following format:
1771  * num_multipath_feature_args [multipath_feature_args]*
1772  * num_handler_status_args [handler_status_args]*
1773  * num_groups init_group_number
1774  *            [A|D|E num_ps_status_args [ps_status_args]*
1775  *             num_paths num_selector_args
1776  *             [path_dev A|F fail_count [selector_args]* ]+ ]+
1777  *
1778  * Table output has the following format (identical to the constructor string):
1779  * num_feature_args [features_args]*
1780  * num_handler_args hw_handler [hw_handler_args]*
1781  * num_groups init_group_number
1782  *     [priority selector-name num_ps_args [ps_args]*
1783  *      num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1784  */
1785 static void multipath_status(struct dm_target *ti, status_type_t type,
1786 			     unsigned status_flags, char *result, unsigned maxlen)
1787 {
1788 	int sz = 0, pg_counter, pgpath_counter;
1789 	unsigned long flags;
1790 	struct multipath *m = ti->private;
1791 	struct priority_group *pg;
1792 	struct pgpath *p;
1793 	unsigned pg_num;
1794 	char state;
1795 
1796 	spin_lock_irqsave(&m->lock, flags);
1797 
1798 	/* Features */
1799 	if (type == STATUSTYPE_INFO)
1800 		DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags),
1801 		       atomic_read(&m->pg_init_count));
1802 	else {
1803 		DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) +
1804 			      (m->pg_init_retries > 0) * 2 +
1805 			      (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
1806 			      test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) +
1807 			      (m->queue_mode != DM_TYPE_REQUEST_BASED) * 2);
1808 
1809 		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
1810 			DMEMIT("queue_if_no_path ");
1811 		if (m->pg_init_retries)
1812 			DMEMIT("pg_init_retries %u ", m->pg_init_retries);
1813 		if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1814 			DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
1815 		if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags))
1816 			DMEMIT("retain_attached_hw_handler ");
1817 		if (m->queue_mode != DM_TYPE_REQUEST_BASED) {
1818 			switch(m->queue_mode) {
1819 			case DM_TYPE_BIO_BASED:
1820 				DMEMIT("queue_mode bio ");
1821 				break;
1822 			default:
1823 				WARN_ON_ONCE(true);
1824 				break;
1825 			}
1826 		}
1827 	}
1828 
1829 	if (!m->hw_handler_name || type == STATUSTYPE_INFO)
1830 		DMEMIT("0 ");
1831 	else
1832 		DMEMIT("1 %s ", m->hw_handler_name);
1833 
1834 	DMEMIT("%u ", m->nr_priority_groups);
1835 
1836 	if (m->next_pg)
1837 		pg_num = m->next_pg->pg_num;
1838 	else if (m->current_pg)
1839 		pg_num = m->current_pg->pg_num;
1840 	else
1841 		pg_num = (m->nr_priority_groups ? 1 : 0);
1842 
1843 	DMEMIT("%u ", pg_num);
1844 
1845 	switch (type) {
1846 	case STATUSTYPE_INFO:
1847 		list_for_each_entry(pg, &m->priority_groups, list) {
1848 			if (pg->bypassed)
1849 				state = 'D';	/* Disabled */
1850 			else if (pg == m->current_pg)
1851 				state = 'A';	/* Currently Active */
1852 			else
1853 				state = 'E';	/* Enabled */
1854 
1855 			DMEMIT("%c ", state);
1856 
1857 			if (pg->ps.type->status)
1858 				sz += pg->ps.type->status(&pg->ps, NULL, type,
1859 							  result + sz,
1860 							  maxlen - sz);
1861 			else
1862 				DMEMIT("0 ");
1863 
1864 			DMEMIT("%u %u ", pg->nr_pgpaths,
1865 			       pg->ps.type->info_args);
1866 
1867 			list_for_each_entry(p, &pg->pgpaths, list) {
1868 				DMEMIT("%s %s %u ", p->path.dev->name,
1869 				       p->is_active ? "A" : "F",
1870 				       p->fail_count);
1871 				if (pg->ps.type->status)
1872 					sz += pg->ps.type->status(&pg->ps,
1873 					      &p->path, type, result + sz,
1874 					      maxlen - sz);
1875 			}
1876 		}
1877 		break;
1878 
1879 	case STATUSTYPE_TABLE:
1880 		list_for_each_entry(pg, &m->priority_groups, list) {
1881 			DMEMIT("%s ", pg->ps.type->name);
1882 
1883 			if (pg->ps.type->status)
1884 				sz += pg->ps.type->status(&pg->ps, NULL, type,
1885 							  result + sz,
1886 							  maxlen - sz);
1887 			else
1888 				DMEMIT("0 ");
1889 
1890 			DMEMIT("%u %u ", pg->nr_pgpaths,
1891 			       pg->ps.type->table_args);
1892 
1893 			list_for_each_entry(p, &pg->pgpaths, list) {
1894 				DMEMIT("%s ", p->path.dev->name);
1895 				if (pg->ps.type->status)
1896 					sz += pg->ps.type->status(&pg->ps,
1897 					      &p->path, type, result + sz,
1898 					      maxlen - sz);
1899 			}
1900 		}
1901 		break;
1902 
1903 	case STATUSTYPE_IMA:
1904 		sz = 0; /*reset the result pointer*/
1905 
1906 		DMEMIT_TARGET_NAME_VERSION(ti->type);
1907 		DMEMIT(",nr_priority_groups=%u", m->nr_priority_groups);
1908 
1909 		pg_counter = 0;
1910 		list_for_each_entry(pg, &m->priority_groups, list) {
1911 			if (pg->bypassed)
1912 				state = 'D';	/* Disabled */
1913 			else if (pg == m->current_pg)
1914 				state = 'A';	/* Currently Active */
1915 			else
1916 				state = 'E';	/* Enabled */
1917 			DMEMIT(",pg_state_%d=%c", pg_counter, state);
1918 			DMEMIT(",nr_pgpaths_%d=%u", pg_counter, pg->nr_pgpaths);
1919 			DMEMIT(",path_selector_name_%d=%s", pg_counter, pg->ps.type->name);
1920 
1921 			pgpath_counter = 0;
1922 			list_for_each_entry(p, &pg->pgpaths, list) {
1923 				DMEMIT(",path_name_%d_%d=%s,is_active_%d_%d=%c,fail_count_%d_%d=%u",
1924 				       pg_counter, pgpath_counter, p->path.dev->name,
1925 				       pg_counter, pgpath_counter, p->is_active ? 'A' : 'F',
1926 				       pg_counter, pgpath_counter, p->fail_count);
1927 				if (pg->ps.type->status) {
1928 					DMEMIT(",path_selector_status_%d_%d=",
1929 					       pg_counter, pgpath_counter);
1930 					sz += pg->ps.type->status(&pg->ps, &p->path,
1931 								  type, result + sz,
1932 								  maxlen - sz);
1933 				}
1934 				pgpath_counter++;
1935 			}
1936 			pg_counter++;
1937 		}
1938 		DMEMIT(";");
1939 		break;
1940 	}
1941 
1942 	spin_unlock_irqrestore(&m->lock, flags);
1943 }
1944 
1945 static int multipath_message(struct dm_target *ti, unsigned argc, char **argv,
1946 			     char *result, unsigned maxlen)
1947 {
1948 	int r = -EINVAL;
1949 	struct dm_dev *dev;
1950 	struct multipath *m = ti->private;
1951 	action_fn action;
1952 	unsigned long flags;
1953 
1954 	mutex_lock(&m->work_mutex);
1955 
1956 	if (dm_suspended(ti)) {
1957 		r = -EBUSY;
1958 		goto out;
1959 	}
1960 
1961 	if (argc == 1) {
1962 		if (!strcasecmp(argv[0], "queue_if_no_path")) {
1963 			r = queue_if_no_path(m, true, false, __func__);
1964 			spin_lock_irqsave(&m->lock, flags);
1965 			enable_nopath_timeout(m);
1966 			spin_unlock_irqrestore(&m->lock, flags);
1967 			goto out;
1968 		} else if (!strcasecmp(argv[0], "fail_if_no_path")) {
1969 			r = queue_if_no_path(m, false, false, __func__);
1970 			disable_nopath_timeout(m);
1971 			goto out;
1972 		}
1973 	}
1974 
1975 	if (argc != 2) {
1976 		DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);
1977 		goto out;
1978 	}
1979 
1980 	if (!strcasecmp(argv[0], "disable_group")) {
1981 		r = bypass_pg_num(m, argv[1], true);
1982 		goto out;
1983 	} else if (!strcasecmp(argv[0], "enable_group")) {
1984 		r = bypass_pg_num(m, argv[1], false);
1985 		goto out;
1986 	} else if (!strcasecmp(argv[0], "switch_group")) {
1987 		r = switch_pg_num(m, argv[1]);
1988 		goto out;
1989 	} else if (!strcasecmp(argv[0], "reinstate_path"))
1990 		action = reinstate_path;
1991 	else if (!strcasecmp(argv[0], "fail_path"))
1992 		action = fail_path;
1993 	else {
1994 		DMWARN("Unrecognised multipath message received: %s", argv[0]);
1995 		goto out;
1996 	}
1997 
1998 	r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
1999 	if (r) {
2000 		DMWARN("message: error getting device %s",
2001 		       argv[1]);
2002 		goto out;
2003 	}
2004 
2005 	r = action_dev(m, dev, action);
2006 
2007 	dm_put_device(ti, dev);
2008 
2009 out:
2010 	mutex_unlock(&m->work_mutex);
2011 	return r;
2012 }
2013 
2014 static int multipath_prepare_ioctl(struct dm_target *ti,
2015 				   struct block_device **bdev)
2016 {
2017 	struct multipath *m = ti->private;
2018 	struct pgpath *pgpath;
2019 	unsigned long flags;
2020 	int r;
2021 
2022 	pgpath = READ_ONCE(m->current_pgpath);
2023 	if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
2024 		pgpath = choose_pgpath(m, 0);
2025 
2026 	if (pgpath) {
2027 		if (!mpath_double_check_test_bit(MPATHF_QUEUE_IO, m)) {
2028 			*bdev = pgpath->path.dev->bdev;
2029 			r = 0;
2030 		} else {
2031 			/* pg_init has not started or completed */
2032 			r = -ENOTCONN;
2033 		}
2034 	} else {
2035 		/* No path is available */
2036 		r = -EIO;
2037 		spin_lock_irqsave(&m->lock, flags);
2038 		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
2039 			r = -ENOTCONN;
2040 		spin_unlock_irqrestore(&m->lock, flags);
2041 	}
2042 
2043 	if (r == -ENOTCONN) {
2044 		if (!READ_ONCE(m->current_pg)) {
2045 			/* Path status changed, redo selection */
2046 			(void) choose_pgpath(m, 0);
2047 		}
2048 		spin_lock_irqsave(&m->lock, flags);
2049 		if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
2050 			(void) __pg_init_all_paths(m);
2051 		spin_unlock_irqrestore(&m->lock, flags);
2052 		dm_table_run_md_queue_async(m->ti->table);
2053 		process_queued_io_list(m);
2054 	}
2055 
2056 	/*
2057 	 * Only pass ioctls through if the device sizes match exactly.
2058 	 */
2059 	if (!r && ti->len != bdev_nr_sectors((*bdev)))
2060 		return 1;
2061 	return r;
2062 }
2063 
2064 static int multipath_iterate_devices(struct dm_target *ti,
2065 				     iterate_devices_callout_fn fn, void *data)
2066 {
2067 	struct multipath *m = ti->private;
2068 	struct priority_group *pg;
2069 	struct pgpath *p;
2070 	int ret = 0;
2071 
2072 	list_for_each_entry(pg, &m->priority_groups, list) {
2073 		list_for_each_entry(p, &pg->pgpaths, list) {
2074 			ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
2075 			if (ret)
2076 				goto out;
2077 		}
2078 	}
2079 
2080 out:
2081 	return ret;
2082 }
2083 
2084 static int pgpath_busy(struct pgpath *pgpath)
2085 {
2086 	struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
2087 
2088 	return blk_lld_busy(q);
2089 }
2090 
2091 /*
2092  * We return "busy", only when we can map I/Os but underlying devices
2093  * are busy (so even if we map I/Os now, the I/Os will wait on
2094  * the underlying queue).
2095  * In other words, if we want to kill I/Os or queue them inside us
2096  * due to map unavailability, we don't return "busy".  Otherwise,
2097  * dm core won't give us the I/Os and we can't do what we want.
2098  */
2099 static int multipath_busy(struct dm_target *ti)
2100 {
2101 	bool busy = false, has_active = false;
2102 	struct multipath *m = ti->private;
2103 	struct priority_group *pg, *next_pg;
2104 	struct pgpath *pgpath;
2105 
2106 	/* pg_init in progress */
2107 	if (atomic_read(&m->pg_init_in_progress))
2108 		return true;
2109 
2110 	/* no paths available, for blk-mq: rely on IO mapping to delay requeue */
2111 	if (!atomic_read(&m->nr_valid_paths)) {
2112 		unsigned long flags;
2113 		spin_lock_irqsave(&m->lock, flags);
2114 		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
2115 			spin_unlock_irqrestore(&m->lock, flags);
2116 			return (m->queue_mode != DM_TYPE_REQUEST_BASED);
2117 		}
2118 		spin_unlock_irqrestore(&m->lock, flags);
2119 	}
2120 
2121 	/* Guess which priority_group will be used at next mapping time */
2122 	pg = READ_ONCE(m->current_pg);
2123 	next_pg = READ_ONCE(m->next_pg);
2124 	if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg))
2125 		pg = next_pg;
2126 
2127 	if (!pg) {
2128 		/*
2129 		 * We don't know which pg will be used at next mapping time.
2130 		 * We don't call choose_pgpath() here to avoid to trigger
2131 		 * pg_init just by busy checking.
2132 		 * So we don't know whether underlying devices we will be using
2133 		 * at next mapping time are busy or not. Just try mapping.
2134 		 */
2135 		return busy;
2136 	}
2137 
2138 	/*
2139 	 * If there is one non-busy active path at least, the path selector
2140 	 * will be able to select it. So we consider such a pg as not busy.
2141 	 */
2142 	busy = true;
2143 	list_for_each_entry(pgpath, &pg->pgpaths, list) {
2144 		if (pgpath->is_active) {
2145 			has_active = true;
2146 			if (!pgpath_busy(pgpath)) {
2147 				busy = false;
2148 				break;
2149 			}
2150 		}
2151 	}
2152 
2153 	if (!has_active) {
2154 		/*
2155 		 * No active path in this pg, so this pg won't be used and
2156 		 * the current_pg will be changed at next mapping time.
2157 		 * We need to try mapping to determine it.
2158 		 */
2159 		busy = false;
2160 	}
2161 
2162 	return busy;
2163 }
2164 
2165 /*-----------------------------------------------------------------
2166  * Module setup
2167  *---------------------------------------------------------------*/
2168 static struct target_type multipath_target = {
2169 	.name = "multipath",
2170 	.version = {1, 14, 0},
2171 	.features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE |
2172 		    DM_TARGET_PASSES_INTEGRITY,
2173 	.module = THIS_MODULE,
2174 	.ctr = multipath_ctr,
2175 	.dtr = multipath_dtr,
2176 	.clone_and_map_rq = multipath_clone_and_map,
2177 	.release_clone_rq = multipath_release_clone,
2178 	.rq_end_io = multipath_end_io,
2179 	.map = multipath_map_bio,
2180 	.end_io = multipath_end_io_bio,
2181 	.presuspend = multipath_presuspend,
2182 	.postsuspend = multipath_postsuspend,
2183 	.resume = multipath_resume,
2184 	.status = multipath_status,
2185 	.message = multipath_message,
2186 	.prepare_ioctl = multipath_prepare_ioctl,
2187 	.iterate_devices = multipath_iterate_devices,
2188 	.busy = multipath_busy,
2189 };
2190 
2191 static int __init dm_multipath_init(void)
2192 {
2193 	int r;
2194 
2195 	kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
2196 	if (!kmultipathd) {
2197 		DMERR("failed to create workqueue kmpathd");
2198 		r = -ENOMEM;
2199 		goto bad_alloc_kmultipathd;
2200 	}
2201 
2202 	/*
2203 	 * A separate workqueue is used to handle the device handlers
2204 	 * to avoid overloading existing workqueue. Overloading the
2205 	 * old workqueue would also create a bottleneck in the
2206 	 * path of the storage hardware device activation.
2207 	 */
2208 	kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
2209 						  WQ_MEM_RECLAIM);
2210 	if (!kmpath_handlerd) {
2211 		DMERR("failed to create workqueue kmpath_handlerd");
2212 		r = -ENOMEM;
2213 		goto bad_alloc_kmpath_handlerd;
2214 	}
2215 
2216 	r = dm_register_target(&multipath_target);
2217 	if (r < 0) {
2218 		DMERR("request-based register failed %d", r);
2219 		r = -EINVAL;
2220 		goto bad_register_target;
2221 	}
2222 
2223 	return 0;
2224 
2225 bad_register_target:
2226 	destroy_workqueue(kmpath_handlerd);
2227 bad_alloc_kmpath_handlerd:
2228 	destroy_workqueue(kmultipathd);
2229 bad_alloc_kmultipathd:
2230 	return r;
2231 }
2232 
2233 static void __exit dm_multipath_exit(void)
2234 {
2235 	destroy_workqueue(kmpath_handlerd);
2236 	destroy_workqueue(kmultipathd);
2237 
2238 	dm_unregister_target(&multipath_target);
2239 }
2240 
2241 module_init(dm_multipath_init);
2242 module_exit(dm_multipath_exit);
2243 
2244 module_param_named(queue_if_no_path_timeout_secs,
2245 		   queue_if_no_path_timeout_secs, ulong, S_IRUGO | S_IWUSR);
2246 MODULE_PARM_DESC(queue_if_no_path_timeout_secs, "No available paths queue IO timeout in seconds");
2247 
2248 MODULE_DESCRIPTION(DM_NAME " multipath target");
2249 MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
2250 MODULE_LICENSE("GPL");
2251