Lines Matching refs:pgpath

41 struct pgpath {  struct
53 #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path) argument
79 struct pgpath *current_pgpath;
109 struct pgpath *pgpath; member
114 typedef int (*action_fn) (struct pgpath *pgpath);
118 static void activate_or_offline_path(struct pgpath *pgpath);
156 static struct pgpath *alloc_pgpath(void) in alloc_pgpath()
158 struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL); in alloc_pgpath() local
160 if (!pgpath) in alloc_pgpath()
163 pgpath->is_active = true; in alloc_pgpath()
165 return pgpath; in alloc_pgpath()
168 static void free_pgpath(struct pgpath *pgpath) in free_pgpath() argument
170 kfree(pgpath); in free_pgpath()
187 struct pgpath *pgpath, *tmp; in free_pgpaths() local
189 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) { in free_pgpaths()
190 list_del(&pgpath->list); in free_pgpaths()
191 dm_put_device(ti, pgpath->path.dev); in free_pgpaths()
192 free_pgpath(pgpath); in free_pgpaths()
304 mpio->pgpath = NULL; in multipath_init_per_bio_data()
318 struct pgpath *pgpath; in __pg_init_all_paths() local
336 list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) { in __pg_init_all_paths()
338 if (!pgpath->is_active) in __pg_init_all_paths()
340 if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path, in __pg_init_all_paths()
377 static struct pgpath *choose_path_in_pg(struct multipath *m, in choose_path_in_pg()
383 struct pgpath *pgpath; in choose_path_in_pg() local
389 pgpath = path_to_pgpath(path); in choose_path_in_pg()
394 m->current_pgpath = pgpath; in choose_path_in_pg()
399 return pgpath; in choose_path_in_pg()
402 static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes) in choose_pgpath()
406 struct pgpath *pgpath; in choose_pgpath() local
426 pgpath = choose_path_in_pg(m, pg, nr_bytes); in choose_pgpath()
427 if (!IS_ERR_OR_NULL(pgpath)) in choose_pgpath()
428 return pgpath; in choose_pgpath()
435 pgpath = choose_path_in_pg(m, pg, nr_bytes); in choose_pgpath()
436 if (!IS_ERR_OR_NULL(pgpath)) in choose_pgpath()
437 return pgpath; in choose_pgpath()
450 pgpath = choose_path_in_pg(m, pg, nr_bytes); in choose_pgpath()
451 if (!IS_ERR_OR_NULL(pgpath)) { in choose_pgpath()
457 return pgpath; in choose_pgpath()
513 struct pgpath *pgpath; in multipath_clone_and_map() local
520 pgpath = READ_ONCE(m->current_pgpath); in multipath_clone_and_map()
521 if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m)) in multipath_clone_and_map()
522 pgpath = choose_pgpath(m, nr_bytes); in multipath_clone_and_map()
524 if (!pgpath) { in multipath_clone_and_map()
535 mpio->pgpath = pgpath; in multipath_clone_and_map()
538 bdev = pgpath->path.dev->bdev; in multipath_clone_and_map()
546 activate_or_offline_path(pgpath); in multipath_clone_and_map()
563 if (pgpath->pg->ps.type->start_io) in multipath_clone_and_map()
564 pgpath->pg->ps.type->start_io(&pgpath->pg->ps, in multipath_clone_and_map()
565 &pgpath->path, in multipath_clone_and_map()
579 struct pgpath *pgpath = mpio->pgpath; in multipath_release_clone() local
581 if (pgpath && pgpath->pg->ps.type->end_io) in multipath_release_clone()
582 pgpath->pg->ps.type->end_io(&pgpath->pg->ps, in multipath_release_clone()
583 &pgpath->path, in multipath_release_clone()
612 static struct pgpath *__map_bio(struct multipath *m, struct bio *bio) in __map_bio()
614 struct pgpath *pgpath; in __map_bio() local
618 pgpath = READ_ONCE(m->current_pgpath); in __map_bio()
619 if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m)) in __map_bio()
620 pgpath = choose_pgpath(m, bio->bi_iter.bi_size); in __map_bio()
622 if (!pgpath) { in __map_bio()
626 pgpath = ERR_PTR(-EAGAIN); in __map_bio()
637 return pgpath; in __map_bio()
643 struct pgpath *pgpath = __map_bio(m, bio); in __multipath_map_bio() local
645 if (IS_ERR(pgpath)) in __multipath_map_bio()
648 if (!pgpath) { in __multipath_map_bio()
655 mpio->pgpath = pgpath; in __multipath_map_bio()
657 if (dm_ps_use_hr_timer(pgpath->pg->ps.type)) in __multipath_map_bio()
661 bio_set_dev(bio, pgpath->path.dev->bdev); in __multipath_map_bio()
664 if (pgpath->pg->ps.type->start_io) in __multipath_map_bio()
665 pgpath->pg->ps.type->start_io(&pgpath->pg->ps, in __multipath_map_bio()
666 &pgpath->path, in __multipath_map_bio()
935 static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps, in parse_path()
939 struct pgpath *p; in parse_path()
1028 struct pgpath *pgpath; in parse_priority_group() local
1040 pgpath = parse_path(&path_args, &pg->ps, ti); in parse_priority_group()
1041 if (IS_ERR(pgpath)) { in parse_priority_group()
1042 r = PTR_ERR(pgpath); in parse_priority_group()
1046 pgpath->pg = pg; in parse_priority_group()
1047 list_add_tail(&pgpath->list, &pg->pgpaths); in parse_priority_group()
1332 static int fail_path(struct pgpath *pgpath) in fail_path() argument
1335 struct multipath *m = pgpath->pg->m; in fail_path()
1339 if (!pgpath->is_active) in fail_path()
1344 pgpath->path.dev->name); in fail_path()
1346 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path); in fail_path()
1347 pgpath->is_active = false; in fail_path()
1348 pgpath->fail_count++; in fail_path()
1352 if (pgpath == m->current_pgpath) in fail_path()
1356 pgpath->path.dev->name, atomic_read(&m->nr_valid_paths)); in fail_path()
1371 static int reinstate_path(struct pgpath *pgpath) in reinstate_path() argument
1375 struct multipath *m = pgpath->pg->m; in reinstate_path()
1380 if (pgpath->is_active) in reinstate_path()
1385 pgpath->path.dev->name); in reinstate_path()
1387 r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path); in reinstate_path()
1391 pgpath->is_active = true; in reinstate_path()
1397 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { in reinstate_path()
1398 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work)) in reinstate_path()
1403 pgpath->path.dev->name, nr_valid_paths); in reinstate_path()
1414 if (pgpath->is_active) in reinstate_path()
1427 struct pgpath *pgpath; in action_dev() local
1431 list_for_each_entry(pgpath, &pg->pgpaths, list) { in action_dev()
1432 if (pgpath->path.dev == dev) in action_dev()
1433 r = action(pgpath); in action_dev()
1519 static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath) in pg_init_limit_reached() argument
1539 struct pgpath *pgpath = data; in pg_init_done() local
1540 struct priority_group *pg = pgpath->pg; in pg_init_done()
1559 fail_path(pgpath); in pg_init_done()
1574 if (pg_init_limit_reached(m, pgpath)) in pg_init_done()
1575 fail_path(pgpath); in pg_init_done()
1585 fail_path(pgpath); in pg_init_done()
1590 if (pgpath == m->current_pgpath) { in pg_init_done()
1624 static void activate_or_offline_path(struct pgpath *pgpath) in activate_or_offline_path() argument
1626 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); in activate_or_offline_path()
1628 if (pgpath->is_active && !blk_queue_dying(q)) in activate_or_offline_path()
1629 scsi_dh_activate(q, pg_init_done, pgpath); in activate_or_offline_path()
1631 pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED); in activate_or_offline_path()
1636 struct pgpath *pgpath = in activate_path_work() local
1637 container_of(work, struct pgpath, activate_path.work); in activate_path_work()
1639 activate_or_offline_path(pgpath); in activate_path_work()
1646 struct pgpath *pgpath = mpio->pgpath; in multipath_end_io() local
1668 if (pgpath) in multipath_end_io()
1669 fail_path(pgpath); in multipath_end_io()
1680 if (pgpath) { in multipath_end_io()
1681 struct path_selector *ps = &pgpath->pg->ps; in multipath_end_io()
1684 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes, in multipath_end_io()
1696 struct pgpath *pgpath = mpio->pgpath; in multipath_end_io_bio() local
1703 if (pgpath) in multipath_end_io_bio()
1704 fail_path(pgpath); in multipath_end_io_bio()
1724 if (pgpath) { in multipath_end_io_bio()
1725 struct path_selector *ps = &pgpath->pg->ps; in multipath_end_io_bio()
1728 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes, in multipath_end_io_bio()
1806 struct pgpath *p; in multipath_status()
2032 struct pgpath *pgpath; in multipath_prepare_ioctl() local
2036 pgpath = READ_ONCE(m->current_pgpath); in multipath_prepare_ioctl()
2037 if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m)) in multipath_prepare_ioctl()
2038 pgpath = choose_pgpath(m, 0); in multipath_prepare_ioctl()
2040 if (pgpath) { in multipath_prepare_ioctl()
2042 *bdev = pgpath->path.dev->bdev; in multipath_prepare_ioctl()
2083 struct pgpath *p; in multipath_iterate_devices()
2098 static int pgpath_busy(struct pgpath *pgpath) in pgpath_busy() argument
2100 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); in pgpath_busy()
2118 struct pgpath *pgpath; in multipath_busy() local
2158 list_for_each_entry(pgpath, &pg->pgpaths, list) { in multipath_busy()
2159 if (pgpath->is_active) { in multipath_busy()
2161 if (!pgpath_busy(pgpath)) { in multipath_busy()