dm-mpath.c (cfae5c9bb66325cd32d5f2ee41f14749f062a53c) dm-mpath.c (bab7cfc733f4453a502b7491b9ee37b091440ec4)
1/*
2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"

--- 49 unchanged lines hidden (view full) ---

58/* Multipath context */
59struct multipath {
60 struct list_head list;
61 struct dm_target *ti;
62
63 spinlock_t lock;
64
65 const char *hw_handler_name;
1/*
2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"

--- 49 unchanged lines hidden (view full) ---

58/* Multipath context */
59struct multipath {
60 struct list_head list;
61 struct dm_target *ti;
62
63 spinlock_t lock;
64
65 const char *hw_handler_name;
66 struct work_struct activate_path;
66 unsigned nr_priority_groups;
67 struct list_head priority_groups;
68 unsigned pg_init_required; /* pg_init needs calling? */
69 unsigned pg_init_in_progress; /* Only one pg_init allowed at once */
70
71 unsigned nr_valid_paths; /* Total number of usable paths */
72 struct pgpath *current_pgpath;
73 struct priority_group *current_pg;

--- 28 unchanged lines hidden (view full) ---

102};
103
104typedef int (*action_fn) (struct pgpath *pgpath);
105
106#define MIN_IOS 256 /* Mempool size */
107
108static struct kmem_cache *_mpio_cache;
109
67 unsigned nr_priority_groups;
68 struct list_head priority_groups;
69 unsigned pg_init_required; /* pg_init needs calling? */
70 unsigned pg_init_in_progress; /* Only one pg_init allowed at once */
71
72 unsigned nr_valid_paths; /* Total number of usable paths */
73 struct pgpath *current_pgpath;
74 struct priority_group *current_pg;

--- 28 unchanged lines hidden (view full) ---

103};
104
105typedef int (*action_fn) (struct pgpath *pgpath);
106
107#define MIN_IOS 256 /* Mempool size */
108
109static struct kmem_cache *_mpio_cache;
110
110static struct workqueue_struct *kmultipathd;
111static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
111static void process_queued_ios(struct work_struct *work);
112static void trigger_event(struct work_struct *work);
112static void process_queued_ios(struct work_struct *work);
113static void trigger_event(struct work_struct *work);
113static void pg_init_done(struct dm_path *, int);
114static void activate_path(struct work_struct *work);
114
115
116/*-----------------------------------------------
117 * Allocation routines
118 *-----------------------------------------------*/
119
120static struct pgpath *alloc_pgpath(void)
121{

--- 53 unchanged lines hidden (view full) ---

175
176 m = kzalloc(sizeof(*m), GFP_KERNEL);
177 if (m) {
178 INIT_LIST_HEAD(&m->priority_groups);
179 spin_lock_init(&m->lock);
180 m->queue_io = 1;
181 INIT_WORK(&m->process_queued_ios, process_queued_ios);
182 INIT_WORK(&m->trigger_event, trigger_event);
115
116
117/*-----------------------------------------------
118 * Allocation routines
119 *-----------------------------------------------*/
120
121static struct pgpath *alloc_pgpath(void)
122{

--- 53 unchanged lines hidden (view full) ---

176
177 m = kzalloc(sizeof(*m), GFP_KERNEL);
178 if (m) {
179 INIT_LIST_HEAD(&m->priority_groups);
180 spin_lock_init(&m->lock);
181 m->queue_io = 1;
182 INIT_WORK(&m->process_queued_ios, process_queued_ios);
183 INIT_WORK(&m->trigger_event, trigger_event);
184 INIT_WORK(&m->activate_path, activate_path);
183 m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
184 if (!m->mpio_pool) {
185 kfree(m);
186 return NULL;
187 }
188 m->ti = ti;
189 ti->private = m;
190 }

--- 236 unchanged lines hidden (view full) ---

427 m->pg_init_required = 0;
428 m->pg_init_in_progress = 1;
429 init_required = 1;
430 }
431
432out:
433 spin_unlock_irqrestore(&m->lock, flags);
434
185 m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
186 if (!m->mpio_pool) {
187 kfree(m);
188 return NULL;
189 }
190 m->ti = ti;
191 ti->private = m;
192 }

--- 236 unchanged lines hidden (view full) ---

429 m->pg_init_required = 0;
430 m->pg_init_in_progress = 1;
431 init_required = 1;
432 }
433
434out:
435 spin_unlock_irqrestore(&m->lock, flags);
436
435 if (init_required) {
436 struct dm_path *path = &pgpath->path;
437 int ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev));
438 pg_init_done(path, ret);
439 }
437 if (init_required)
438 queue_work(kmpath_handlerd, &m->activate_path);
440
441 if (!must_queue)
442 dispatch_queued_ios(m);
443}
444
445/*
446 * An event is triggered whenever a path is taken out of use.
447 * Includes path failure and PG bypass.

--- 338 unchanged lines hidden (view full) ---

786 free_multipath(m);
787 return r;
788}
789
790static void multipath_dtr(struct dm_target *ti)
791{
792 struct multipath *m = (struct multipath *) ti->private;
793
439
440 if (!must_queue)
441 dispatch_queued_ios(m);
442}
443
444/*
445 * An event is triggered whenever a path is taken out of use.
446 * Includes path failure and PG bypass.

--- 338 unchanged lines hidden (view full) ---

785 free_multipath(m);
786 return r;
787}
788
789static void multipath_dtr(struct dm_target *ti)
790{
791 struct multipath *m = (struct multipath *) ti->private;
792
793 flush_workqueue(kmpath_handlerd);
794 flush_workqueue(kmultipathd);
795 free_multipath(m);
796}
797
798/*
799 * Map bios, recording original fields for later in case we have to resubmit
800 */
801static int multipath_map(struct dm_target *ti, struct bio *bio,

--- 301 unchanged lines hidden (view full) ---

1103 pg->bypassed = 0;
1104 }
1105
1106 m->pg_init_in_progress = 0;
1107 queue_work(kmultipathd, &m->process_queued_ios);
1108 spin_unlock_irqrestore(&m->lock, flags);
1109}
1110
794 flush_workqueue(kmultipathd);
795 free_multipath(m);
796}
797
798/*
799 * Map bios, recording original fields for later in case we have to resubmit
800 */
801static int multipath_map(struct dm_target *ti, struct bio *bio,

--- 301 unchanged lines hidden (view full) ---

1103 pg->bypassed = 0;
1104 }
1105
1106 m->pg_init_in_progress = 0;
1107 queue_work(kmultipathd, &m->process_queued_ios);
1108 spin_unlock_irqrestore(&m->lock, flags);
1109}
1110
1111static void activate_path(struct work_struct *work)
1112{
1113 int ret;
1114 struct multipath *m =
1115 container_of(work, struct multipath, activate_path);
1116 struct dm_path *path = &m->current_pgpath->path;
1117
1118 ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev));
1119 pg_init_done(path, ret);
1120}
1121
1111/*
1112 * end_io handling
1113 */
1114static int do_end_io(struct multipath *m, struct bio *bio,
1115 int error, struct dm_mpath_io *mpio)
1116{
1117 unsigned long flags;
1118

--- 327 unchanged lines hidden (view full) ---

1446 kmultipathd = create_workqueue("kmpathd");
1447 if (!kmultipathd) {
1448 DMERR("failed to create workqueue kmpathd");
1449 dm_unregister_target(&multipath_target);
1450 kmem_cache_destroy(_mpio_cache);
1451 return -ENOMEM;
1452 }
1453
1122/*
1123 * end_io handling
1124 */
1125static int do_end_io(struct multipath *m, struct bio *bio,
1126 int error, struct dm_mpath_io *mpio)
1127{
1128 unsigned long flags;
1129

--- 327 unchanged lines hidden (view full) ---

1457 kmultipathd = create_workqueue("kmpathd");
1458 if (!kmultipathd) {
1459 DMERR("failed to create workqueue kmpathd");
1460 dm_unregister_target(&multipath_target);
1461 kmem_cache_destroy(_mpio_cache);
1462 return -ENOMEM;
1463 }
1464
1465 /*
1466 * A separate workqueue is used to handle the device handlers
1467 * to avoid overloading existing workqueue. Overloading the
1468 * old workqueue would also create a bottleneck in the
1469 * path of the storage hardware device activation.
1470 */
1471 kmpath_handlerd = create_singlethread_workqueue("kmpath_handlerd");
1472 if (!kmpath_handlerd) {
1473 DMERR("failed to create workqueue kmpath_handlerd");
1474 destroy_workqueue(kmultipathd);
1475 dm_unregister_target(&multipath_target);
1476 kmem_cache_destroy(_mpio_cache);
1477 return -ENOMEM;
1478 }
1479
1454 DMINFO("version %u.%u.%u loaded",
1455 multipath_target.version[0], multipath_target.version[1],
1456 multipath_target.version[2]);
1457
1458 return r;
1459}
1460
1461static void __exit dm_multipath_exit(void)
1462{
1463 int r;
1464
1480 DMINFO("version %u.%u.%u loaded",
1481 multipath_target.version[0], multipath_target.version[1],
1482 multipath_target.version[2]);
1483
1484 return r;
1485}
1486
1487static void __exit dm_multipath_exit(void)
1488{
1489 int r;
1490
1491 destroy_workqueue(kmpath_handlerd);
1465 destroy_workqueue(kmultipathd);
1466
1467 r = dm_unregister_target(&multipath_target);
1468 if (r < 0)
1469 DMERR("target unregister failed %d", r);
1470 kmem_cache_destroy(_mpio_cache);
1471}
1472
1473EXPORT_SYMBOL_GPL(dm_pg_init_complete);
1474
1475module_init(dm_multipath_init);
1476module_exit(dm_multipath_exit);
1477
1478MODULE_DESCRIPTION(DM_NAME " multipath target");
1479MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
1480MODULE_LICENSE("GPL");
1492 destroy_workqueue(kmultipathd);
1493
1494 r = dm_unregister_target(&multipath_target);
1495 if (r < 0)
1496 DMERR("target unregister failed %d", r);
1497 kmem_cache_destroy(_mpio_cache);
1498}
1499
1500EXPORT_SYMBOL_GPL(dm_pg_init_complete);
1501
1502module_init(dm_multipath_init);
1503module_exit(dm_multipath_exit);
1504
1505MODULE_DESCRIPTION(DM_NAME " multipath target");
1506MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
1507MODULE_LICENSE("GPL");