dm.c (97ae2b5c17d6cc988c6d49ae0cf95befb6b7081c) dm.c (2bec1f4a8832e74ebbe859f176d8a9cb20dd97f4)
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"

--- 6 unchanged lines hidden (view full) ---

15#include <linux/blkpg.h>
16#include <linux/bio.h>
17#include <linux/mempool.h>
18#include <linux/slab.h>
19#include <linux/idr.h>
20#include <linux/hdreg.h>
21#include <linux/delay.h>
22#include <linux/wait.h>
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"

--- 6 unchanged lines hidden (view full) ---

15#include <linux/blkpg.h>
16#include <linux/bio.h>
17#include <linux/mempool.h>
18#include <linux/slab.h>
19#include <linux/idr.h>
20#include <linux/hdreg.h>
21#include <linux/delay.h>
22#include <linux/wait.h>
23#include <linux/kthread.h>
23
24#include <trace/events/block.h>
25
26#define DM_MSG_PREFIX "core"
27
28#ifdef CONFIG_PRINTK
29/*
30 * ratelimit state to be used in DMXXX_LIMIT().

--- 42 unchanged lines hidden (view full) ---

73
74/*
75 * For request-based dm.
76 * One of these is allocated per request.
77 */
78struct dm_rq_target_io {
79 struct mapped_device *md;
80 struct dm_target *ti;
24
25#include <trace/events/block.h>
26
27#define DM_MSG_PREFIX "core"
28
29#ifdef CONFIG_PRINTK
30/*
31 * ratelimit state to be used in DMXXX_LIMIT().

--- 42 unchanged lines hidden (view full) ---

74
75/*
76 * For request-based dm.
77 * One of these is allocated per request.
78 */
79struct dm_rq_target_io {
80 struct mapped_device *md;
81 struct dm_target *ti;
81 struct request *orig, clone;
82 struct request *orig, *clone;
83 struct kthread_work work;
82 int error;
83 union map_info info;
84};
85
86/*
87 * For request-based dm - the bio clones we allocate are embedded in these
88 * structs.
89 *

--- 84 unchanged lines hidden (view full) ---

174 * Processing queue (flush)
175 */
176 struct workqueue_struct *wq;
177
178 /*
179 * io objects are allocated from here.
180 */
181 mempool_t *io_pool;
84 int error;
85 union map_info info;
86};
87
88/*
89 * For request-based dm - the bio clones we allocate are embedded in these
90 * structs.
91 *

--- 84 unchanged lines hidden (view full) ---

176 * Processing queue (flush)
177 */
178 struct workqueue_struct *wq;
179
180 /*
181 * io objects are allocated from here.
182 */
183 mempool_t *io_pool;
184 mempool_t *rq_pool;
182
183 struct bio_set *bs;
184
185 /*
186 * Event handling.
187 */
188 atomic_t event_nr;
189 wait_queue_head_t eventq;

--- 15 unchanged lines hidden (view full) ---

205
206 /* zero-length flush that will be cloned and submitted to targets */
207 struct bio flush_bio;
208
209 /* the number of internal suspends */
210 unsigned internal_suspend_count;
211
212 struct dm_stats stats;
185
186 struct bio_set *bs;
187
188 /*
189 * Event handling.
190 */
191 atomic_t event_nr;
192 wait_queue_head_t eventq;

--- 15 unchanged lines hidden (view full) ---

208
209 /* zero-length flush that will be cloned and submitted to targets */
210 struct bio flush_bio;
211
212 /* the number of internal suspends */
213 unsigned internal_suspend_count;
214
215 struct dm_stats stats;
216
217 struct kthread_worker kworker;
218 struct task_struct *kworker_task;
213};
214
215/*
216 * For mempools pre-allocation at the table loading time.
217 */
218struct dm_md_mempools {
219 mempool_t *io_pool;
219};
220
221/*
222 * For mempools pre-allocation at the table loading time.
223 */
224struct dm_md_mempools {
225 mempool_t *io_pool;
226 mempool_t *rq_pool;
220 struct bio_set *bs;
221};
222
223struct table_device {
224 struct list_head list;
225 atomic_t count;
226 struct dm_dev dm_dev;
227};
228
229#define RESERVED_BIO_BASED_IOS 16
230#define RESERVED_REQUEST_BASED_IOS 256
231#define RESERVED_MAX_IOS 1024
232static struct kmem_cache *_io_cache;
233static struct kmem_cache *_rq_tio_cache;
227 struct bio_set *bs;
228};
229
230struct table_device {
231 struct list_head list;
232 atomic_t count;
233 struct dm_dev dm_dev;
234};
235
236#define RESERVED_BIO_BASED_IOS 16
237#define RESERVED_REQUEST_BASED_IOS 256
238#define RESERVED_MAX_IOS 1024
239static struct kmem_cache *_io_cache;
240static struct kmem_cache *_rq_tio_cache;
241static struct kmem_cache *_rq_cache;
234
235/*
236 * Bio-based DM's mempools' reserved IOs set by the user.
237 */
238static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
239
240/*
241 * Request-based DM's mempools' reserved IOs set by the user.

--- 41 unchanged lines hidden (view full) ---

283 _io_cache = KMEM_CACHE(dm_io, 0);
284 if (!_io_cache)
285 return r;
286
287 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
288 if (!_rq_tio_cache)
289 goto out_free_io_cache;
290
242
243/*
244 * Bio-based DM's mempools' reserved IOs set by the user.
245 */
246static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
247
248/*
249 * Request-based DM's mempools' reserved IOs set by the user.

--- 41 unchanged lines hidden (view full) ---

291 _io_cache = KMEM_CACHE(dm_io, 0);
292 if (!_io_cache)
293 return r;
294
295 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
296 if (!_rq_tio_cache)
297 goto out_free_io_cache;
298
299 _rq_cache = kmem_cache_create("dm_clone_request", sizeof(struct request),
300 __alignof__(struct request), 0, NULL);
301 if (!_rq_cache)
302 goto out_free_rq_tio_cache;
303
291 r = dm_uevent_init();
292 if (r)
304 r = dm_uevent_init();
305 if (r)
293 goto out_free_rq_tio_cache;
306 goto out_free_rq_cache;
294
295 deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
296 if (!deferred_remove_workqueue) {
297 r = -ENOMEM;
298 goto out_uevent_exit;
299 }
300
301 _major = major;

--- 5 unchanged lines hidden (view full) ---

307 _major = r;
308
309 return 0;
310
311out_free_workqueue:
312 destroy_workqueue(deferred_remove_workqueue);
313out_uevent_exit:
314 dm_uevent_exit();
307
308 deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
309 if (!deferred_remove_workqueue) {
310 r = -ENOMEM;
311 goto out_uevent_exit;
312 }
313
314 _major = major;

--- 5 unchanged lines hidden (view full) ---

320 _major = r;
321
322 return 0;
323
324out_free_workqueue:
325 destroy_workqueue(deferred_remove_workqueue);
326out_uevent_exit:
327 dm_uevent_exit();
328out_free_rq_cache:
329 kmem_cache_destroy(_rq_cache);
315out_free_rq_tio_cache:
316 kmem_cache_destroy(_rq_tio_cache);
317out_free_io_cache:
318 kmem_cache_destroy(_io_cache);
319
320 return r;
321}
322
323static void local_exit(void)
324{
325 flush_scheduled_work();
326 destroy_workqueue(deferred_remove_workqueue);
327
330out_free_rq_tio_cache:
331 kmem_cache_destroy(_rq_tio_cache);
332out_free_io_cache:
333 kmem_cache_destroy(_io_cache);
334
335 return r;
336}
337
338static void local_exit(void)
339{
340 flush_scheduled_work();
341 destroy_workqueue(deferred_remove_workqueue);
342
343 kmem_cache_destroy(_rq_cache);
328 kmem_cache_destroy(_rq_tio_cache);
329 kmem_cache_destroy(_io_cache);
330 unregister_blkdev(_major, _name);
331 dm_uevent_exit();
332
333 _major = 0;
334
335 DMINFO("cleaned up");

--- 236 unchanged lines hidden (view full) ---

572 return mempool_alloc(md->io_pool, gfp_mask);
573}
574
575static void free_rq_tio(struct dm_rq_target_io *tio)
576{
577 mempool_free(tio, tio->md->io_pool);
578}
579
344 kmem_cache_destroy(_rq_tio_cache);
345 kmem_cache_destroy(_io_cache);
346 unregister_blkdev(_major, _name);
347 dm_uevent_exit();
348
349 _major = 0;
350
351 DMINFO("cleaned up");

--- 236 unchanged lines hidden (view full) ---

588 return mempool_alloc(md->io_pool, gfp_mask);
589}
590
591static void free_rq_tio(struct dm_rq_target_io *tio)
592{
593 mempool_free(tio, tio->md->io_pool);
594}
595
596static struct request *alloc_clone_request(struct mapped_device *md,
597 gfp_t gfp_mask)
598{
599 return mempool_alloc(md->rq_pool, gfp_mask);
600}
601
602static void free_clone_request(struct mapped_device *md, struct request *rq)
603{
604 mempool_free(rq, md->rq_pool);
605}
606
580static int md_in_flight(struct mapped_device *md)
581{
582 return atomic_read(&md->pending[READ]) +
583 atomic_read(&md->pending[WRITE]);
584}
585
586static void start_io_acct(struct dm_io *io)
587{

--- 399 unchanged lines hidden (view full) ---

987 blk_update_request(tio->orig, 0, nr_bytes);
988}
989
990/*
991 * Don't touch any member of the md after calling this function because
992 * the md may be freed in dm_put() at the end of this function.
993 * Or do dm_get() before calling this function and dm_put() later.
994 */
607static int md_in_flight(struct mapped_device *md)
608{
609 return atomic_read(&md->pending[READ]) +
610 atomic_read(&md->pending[WRITE]);
611}
612
613static void start_io_acct(struct dm_io *io)
614{

--- 399 unchanged lines hidden (view full) ---

1014 blk_update_request(tio->orig, 0, nr_bytes);
1015}
1016
1017/*
1018 * Don't touch any member of the md after calling this function because
1019 * the md may be freed in dm_put() at the end of this function.
1020 * Or do dm_get() before calling this function and dm_put() later.
1021 */
995static void rq_completed(struct mapped_device *md, int rw, int run_queue)
1022static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
996{
997 atomic_dec(&md->pending[rw]);
998
999 /* nudge anyone waiting on suspend queue */
1000 if (!md_in_flight(md))
1001 wake_up(&md->wait);
1002
1003 /*

--- 11 unchanged lines hidden (view full) ---

1015 dm_put(md);
1016}
1017
1018static void free_rq_clone(struct request *clone)
1019{
1020 struct dm_rq_target_io *tio = clone->end_io_data;
1021
1022 blk_rq_unprep_clone(clone);
1023{
1024 atomic_dec(&md->pending[rw]);
1025
1026 /* nudge anyone waiting on suspend queue */
1027 if (!md_in_flight(md))
1028 wake_up(&md->wait);
1029
1030 /*

--- 11 unchanged lines hidden (view full) ---

1042 dm_put(md);
1043}
1044
1045static void free_rq_clone(struct request *clone)
1046{
1047 struct dm_rq_target_io *tio = clone->end_io_data;
1048
1049 blk_rq_unprep_clone(clone);
1050 if (clone->q && clone->q->mq_ops)
1051 tio->ti->type->release_clone_rq(clone);
1052 else
1053 free_clone_request(tio->md, clone);
1023 free_rq_tio(tio);
1024}
1025
1026/*
1027 * Complete the clone and the original request.
1054 free_rq_tio(tio);
1055}
1056
1057/*
1058 * Complete the clone and the original request.
1028 * Must be called without queue lock.
1059 * Must be called without clone's queue lock held,
1060 * see end_clone_request() for more details.
1029 */
1030static void dm_end_request(struct request *clone, int error)
1031{
1032 int rw = rq_data_dir(clone);
1033 struct dm_rq_target_io *tio = clone->end_io_data;
1034 struct mapped_device *md = tio->md;
1035 struct request *rq = tio->orig;
1036

--- 12 unchanged lines hidden (view full) ---

1049
1050 free_rq_clone(clone);
1051 blk_end_request_all(rq, error);
1052 rq_completed(md, rw, true);
1053}
1054
1055static void dm_unprep_request(struct request *rq)
1056{
1061 */
1062static void dm_end_request(struct request *clone, int error)
1063{
1064 int rw = rq_data_dir(clone);
1065 struct dm_rq_target_io *tio = clone->end_io_data;
1066 struct mapped_device *md = tio->md;
1067 struct request *rq = tio->orig;
1068

--- 12 unchanged lines hidden (view full) ---

1081
1082 free_rq_clone(clone);
1083 blk_end_request_all(rq, error);
1084 rq_completed(md, rw, true);
1085}
1086
1087static void dm_unprep_request(struct request *rq)
1088{
1057 struct request *clone = rq->special;
1089 struct dm_rq_target_io *tio = rq->special;
1090 struct request *clone = tio->clone;
1058
1059 rq->special = NULL;
1060 rq->cmd_flags &= ~REQ_DONTPREP;
1061
1091
1092 rq->special = NULL;
1093 rq->cmd_flags &= ~REQ_DONTPREP;
1094
1062 free_rq_clone(clone);
1095 if (clone)
1096 free_rq_clone(clone);
1063}
1064
1065/*
1066 * Requeue the original request of a clone.
1067 */
1097}
1098
1099/*
1100 * Requeue the original request of a clone.
1101 */
1068void dm_requeue_unmapped_request(struct request *clone)
1102static void dm_requeue_unmapped_original_request(struct mapped_device *md,
1103 struct request *rq)
1069{
1104{
1070 int rw = rq_data_dir(clone);
1071 struct dm_rq_target_io *tio = clone->end_io_data;
1072 struct mapped_device *md = tio->md;
1073 struct request *rq = tio->orig;
1105 int rw = rq_data_dir(rq);
1074 struct request_queue *q = rq->q;
1075 unsigned long flags;
1076
1077 dm_unprep_request(rq);
1078
1079 spin_lock_irqsave(q->queue_lock, flags);
1080 blk_requeue_request(q, rq);
1081 spin_unlock_irqrestore(q->queue_lock, flags);
1082
1106 struct request_queue *q = rq->q;
1107 unsigned long flags;
1108
1109 dm_unprep_request(rq);
1110
1111 spin_lock_irqsave(q->queue_lock, flags);
1112 blk_requeue_request(q, rq);
1113 spin_unlock_irqrestore(q->queue_lock, flags);
1114
1083 rq_completed(md, rw, 0);
1115 rq_completed(md, rw, false);
1084}
1116}
1085EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request);
1086
1117
1118static void dm_requeue_unmapped_request(struct request *clone)
1119{
1120 struct dm_rq_target_io *tio = clone->end_io_data;
1121
1122 dm_requeue_unmapped_original_request(tio->md, tio->orig);
1123}
1124
1087static void __stop_queue(struct request_queue *q)
1088{
1089 blk_stop_queue(q);
1090}
1091
1092static void stop_queue(struct request_queue *q)
1093{
1094 unsigned long flags;

--- 51 unchanged lines hidden (view full) ---

1146}
1147
1148/*
1149 * Request completion handler for request-based dm
1150 */
1151static void dm_softirq_done(struct request *rq)
1152{
1153 bool mapped = true;
1125static void __stop_queue(struct request_queue *q)
1126{
1127 blk_stop_queue(q);
1128}
1129
1130static void stop_queue(struct request_queue *q)
1131{
1132 unsigned long flags;

--- 51 unchanged lines hidden (view full) ---

1184}
1185
1186/*
1187 * Request completion handler for request-based dm
1188 */
1189static void dm_softirq_done(struct request *rq)
1190{
1191 bool mapped = true;
1154 struct request *clone = rq->completion_data;
1155 struct dm_rq_target_io *tio = clone->end_io_data;
1192 struct dm_rq_target_io *tio = rq->special;
1193 struct request *clone = tio->clone;
1156
1194
1195 if (!clone) {
1196 blk_end_request_all(rq, tio->error);
1197 rq_completed(tio->md, rq_data_dir(rq), false);
1198 free_rq_tio(tio);
1199 return;
1200 }
1201
1157 if (rq->cmd_flags & REQ_FAILED)
1158 mapped = false;
1159
1160 dm_done(clone, tio->error, mapped);
1161}
1162
1163/*
1164 * Complete the clone and the original request with the error status
1165 * through softirq context.
1166 */
1202 if (rq->cmd_flags & REQ_FAILED)
1203 mapped = false;
1204
1205 dm_done(clone, tio->error, mapped);
1206}
1207
1208/*
1209 * Complete the clone and the original request with the error status
1210 * through softirq context.
1211 */
1167static void dm_complete_request(struct request *clone, int error)
1212static void dm_complete_request(struct request *rq, int error)
1168{
1213{
1169 struct dm_rq_target_io *tio = clone->end_io_data;
1170 struct request *rq = tio->orig;
1214 struct dm_rq_target_io *tio = rq->special;
1171
1172 tio->error = error;
1215
1216 tio->error = error;
1173 rq->completion_data = clone;
1174 blk_complete_request(rq);
1175}
1176
1177/*
1178 * Complete the not-mapped clone and the original request with the error status
1179 * through softirq context.
1180 * Target's rq_end_io() function isn't called.
1217 blk_complete_request(rq);
1218}
1219
1220/*
1221 * Complete the not-mapped clone and the original request with the error status
1222 * through softirq context.
1223 * Target's rq_end_io() function isn't called.
1181 * This may be used when the target's map_rq() function fails.
1224 * This may be used when the target's map_rq() or clone_and_map_rq() functions fail.
1182 */
1225 */
1183void dm_kill_unmapped_request(struct request *clone, int error)
1226static void dm_kill_unmapped_request(struct request *rq, int error)
1184{
1227{
1185 struct dm_rq_target_io *tio = clone->end_io_data;
1186 struct request *rq = tio->orig;
1187
1188 rq->cmd_flags |= REQ_FAILED;
1228 rq->cmd_flags |= REQ_FAILED;
1189 dm_complete_request(clone, error);
1229 dm_complete_request(rq, error);
1190}
1230}
1191EXPORT_SYMBOL_GPL(dm_kill_unmapped_request);
1192
1193/*
1231
1232/*
1194 * Called with the queue lock held
1233 * Called with the clone's queue lock held
1195 */
1196static void end_clone_request(struct request *clone, int error)
1197{
1234 */
1235static void end_clone_request(struct request *clone, int error)
1236{
1198 /*
1199 * For just cleaning up the information of the queue in which
1200 * the clone was dispatched.
1201 * The clone is *NOT* freed actually here because it is alloced from
1202 * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
1203 */
1204 __blk_put_request(clone->q, clone);
1237 struct dm_rq_target_io *tio = clone->end_io_data;
1205
1238
1239 if (!clone->q->mq_ops) {
1240 /*
1241 * For just cleaning up the information of the queue in which
1242 * the clone was dispatched.
1243 * The clone is *NOT* freed actually here because it is alloced
1244 * from dm own mempool (REQ_ALLOCED isn't set).
1245 */
1246 __blk_put_request(clone->q, clone);
1247 }
1248
1206 /*
1207 * Actual request completion is done in a softirq context which doesn't
1249 /*
1250 * Actual request completion is done in a softirq context which doesn't
1208 * hold the queue lock. Otherwise, deadlock could occur because:
1251 * hold the clone's queue lock. Otherwise, deadlock could occur because:
1209 * - another request may be submitted by the upper level driver
1210 * of the stacking during the completion
1211 * - the submission which requires queue lock may be done
1252 * - another request may be submitted by the upper level driver
1253 * of the stacking during the completion
1254 * - the submission which requires queue lock may be done
1212 * against this queue
1255 * against this clone's queue
1213 */
1256 */
1214 dm_complete_request(clone, error);
1257 dm_complete_request(tio->orig, error);
1215}
1216
1217/*
1218 * Return maximum size of I/O possible at the supplied sector up to the current
1219 * target boundary.
1220 */
1221static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
1222{

--- 461 unchanged lines hidden (view full) ---

1684 struct mapped_device *md = q->queuedata;
1685
1686 if (dm_request_based(md))
1687 blk_queue_bio(q, bio);
1688 else
1689 _dm_request(q, bio);
1690}
1691
1258}
1259
1260/*
1261 * Return maximum size of I/O possible at the supplied sector up to the current
1262 * target boundary.
1263 */
1264static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
1265{

--- 461 unchanged lines hidden (view full) ---

1727 struct mapped_device *md = q->queuedata;
1728
1729 if (dm_request_based(md))
1730 blk_queue_bio(q, bio);
1731 else
1732 _dm_request(q, bio);
1733}
1734
1692void dm_dispatch_request(struct request *rq)
1735static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
1693{
1694 int r;
1695
1736{
1737 int r;
1738
1696 if (blk_queue_io_stat(rq->q))
1697 rq->cmd_flags |= REQ_IO_STAT;
1739 if (blk_queue_io_stat(clone->q))
1740 clone->cmd_flags |= REQ_IO_STAT;
1698
1741
1699 rq->start_time = jiffies;
1700 r = blk_insert_cloned_request(rq->q, rq);
1742 clone->start_time = jiffies;
1743 r = blk_insert_cloned_request(clone->q, clone);
1701 if (r)
1744 if (r)
1745 /* must complete clone in terms of original request */
1702 dm_complete_request(rq, r);
1703}
1746 dm_complete_request(rq, r);
1747}
1704EXPORT_SYMBOL_GPL(dm_dispatch_request);
1705
1706static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
1707 void *data)
1708{
1709 struct dm_rq_target_io *tio = data;
1710 struct dm_rq_clone_bio_info *info =
1711 container_of(bio, struct dm_rq_clone_bio_info, clone);
1712
1713 info->orig = bio_orig;
1714 info->tio = tio;
1715 bio->bi_end_io = end_clone_bio;
1716
1717 return 0;
1718}
1719
1720static int setup_clone(struct request *clone, struct request *rq,
1748
1749static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
1750 void *data)
1751{
1752 struct dm_rq_target_io *tio = data;
1753 struct dm_rq_clone_bio_info *info =
1754 container_of(bio, struct dm_rq_clone_bio_info, clone);
1755
1756 info->orig = bio_orig;
1757 info->tio = tio;
1758 bio->bi_end_io = end_clone_bio;
1759
1760 return 0;
1761}
1762
1763static int setup_clone(struct request *clone, struct request *rq,
1721 struct dm_rq_target_io *tio)
1764 struct dm_rq_target_io *tio, gfp_t gfp_mask)
1722{
1723 int r;
1724
1765{
1766 int r;
1767
1725 r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC,
1768 r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask,
1726 dm_rq_bio_constructor, tio);
1727 if (r)
1728 return r;
1729
1730 clone->cmd = rq->cmd;
1731 clone->cmd_len = rq->cmd_len;
1732 clone->sense = rq->sense;
1733 clone->end_io = end_clone_request;
1734 clone->end_io_data = tio;
1735
1769 dm_rq_bio_constructor, tio);
1770 if (r)
1771 return r;
1772
1773 clone->cmd = rq->cmd;
1774 clone->cmd_len = rq->cmd_len;
1775 clone->sense = rq->sense;
1776 clone->end_io = end_clone_request;
1777 clone->end_io_data = tio;
1778
1779 tio->clone = clone;
1780
1736 return 0;
1737}
1738
1739static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1781 return 0;
1782}
1783
1784static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1740 gfp_t gfp_mask)
1785 struct dm_rq_target_io *tio, gfp_t gfp_mask)
1741{
1786{
1742 struct request *clone;
1787 struct request *clone = alloc_clone_request(md, gfp_mask);
1788
1789 if (!clone)
1790 return NULL;
1791
1792 blk_rq_init(NULL, clone);
1793 if (setup_clone(clone, rq, tio, gfp_mask)) {
1794 /* -ENOMEM */
1795 free_clone_request(md, clone);
1796 return NULL;
1797 }
1798
1799 return clone;
1800}
1801
1802static void map_tio_request(struct kthread_work *work);
1803
1804static struct dm_rq_target_io *prep_tio(struct request *rq,
1805 struct mapped_device *md, gfp_t gfp_mask)
1806{
1743 struct dm_rq_target_io *tio;
1807 struct dm_rq_target_io *tio;
1808 int srcu_idx;
1809 struct dm_table *table;
1744
1745 tio = alloc_rq_tio(md, gfp_mask);
1746 if (!tio)
1747 return NULL;
1748
1749 tio->md = md;
1750 tio->ti = NULL;
1810
1811 tio = alloc_rq_tio(md, gfp_mask);
1812 if (!tio)
1813 return NULL;
1814
1815 tio->md = md;
1816 tio->ti = NULL;
1817 tio->clone = NULL;
1751 tio->orig = rq;
1752 tio->error = 0;
1753 memset(&tio->info, 0, sizeof(tio->info));
1818 tio->orig = rq;
1819 tio->error = 0;
1820 memset(&tio->info, 0, sizeof(tio->info));
1821 init_kthread_work(&tio->work, map_tio_request);
1754
1822
1755 clone = &tio->clone;
1756 if (setup_clone(clone, rq, tio)) {
1757 /* -ENOMEM */
1758 free_rq_tio(tio);
1759 return NULL;
1823 table = dm_get_live_table(md, &srcu_idx);
1824 if (!dm_table_mq_request_based(table)) {
1825 if (!clone_rq(rq, md, tio, gfp_mask)) {
1826 dm_put_live_table(md, srcu_idx);
1827 free_rq_tio(tio);
1828 return NULL;
1829 }
1760 }
1830 }
1831 dm_put_live_table(md, srcu_idx);
1761
1832
1762 return clone;
1833 return tio;
1763}
1764
1765/*
1766 * Called with the queue lock held.
1767 */
1768static int dm_prep_fn(struct request_queue *q, struct request *rq)
1769{
1770 struct mapped_device *md = q->queuedata;
1834}
1835
1836/*
1837 * Called with the queue lock held.
1838 */
1839static int dm_prep_fn(struct request_queue *q, struct request *rq)
1840{
1841 struct mapped_device *md = q->queuedata;
1771 struct request *clone;
1842 struct dm_rq_target_io *tio;
1772
1773 if (unlikely(rq->special)) {
1774 DMWARN("Already has something in rq->special.");
1775 return BLKPREP_KILL;
1776 }
1777
1843
1844 if (unlikely(rq->special)) {
1845 DMWARN("Already has something in rq->special.");
1846 return BLKPREP_KILL;
1847 }
1848
1778 clone = clone_rq(rq, md, GFP_ATOMIC);
1779 if (!clone)
1849 tio = prep_tio(rq, md, GFP_ATOMIC);
1850 if (!tio)
1780 return BLKPREP_DEFER;
1781
1851 return BLKPREP_DEFER;
1852
1782 rq->special = clone;
1853 rq->special = tio;
1783 rq->cmd_flags |= REQ_DONTPREP;
1784
1785 return BLKPREP_OK;
1786}
1787
1788/*
1789 * Returns:
1854 rq->cmd_flags |= REQ_DONTPREP;
1855
1856 return BLKPREP_OK;
1857}
1858
1859/*
1860 * Returns:
1790 * 0 : the request has been processed (not requeued)
1791 * !0 : the request has been requeued
1861 * 0 : the request has been processed
1862 * DM_MAPIO_REQUEUE : the original request needs to be requeued
1863 * < 0 : the request was completed due to failure
1792 */
1864 */
1793static int map_request(struct dm_target *ti, struct request *clone,
1865static int map_request(struct dm_target *ti, struct request *rq,
1794 struct mapped_device *md)
1795{
1866 struct mapped_device *md)
1867{
1796 int r, requeued = 0;
1797 struct dm_rq_target_io *tio = clone->end_io_data;
1868 int r;
1869 struct dm_rq_target_io *tio = rq->special;
1870 struct request *clone = NULL;
1798
1871
1799 tio->ti = ti;
1800 r = ti->type->map_rq(ti, clone, &tio->info);
1872 if (tio->clone) {
1873 clone = tio->clone;
1874 r = ti->type->map_rq(ti, clone, &tio->info);
1875 } else {
1876 r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
1877 if (r < 0) {
1878 /* The target wants to complete the I/O */
1879 dm_kill_unmapped_request(rq, r);
1880 return r;
1881 }
1882 if (IS_ERR(clone))
1883 return DM_MAPIO_REQUEUE;
1884 if (setup_clone(clone, rq, tio, GFP_KERNEL)) {
1885 /* -ENOMEM */
1886 ti->type->release_clone_rq(clone);
1887 return DM_MAPIO_REQUEUE;
1888 }
1889 }
1890
1801 switch (r) {
1802 case DM_MAPIO_SUBMITTED:
1803 /* The target has taken the I/O to submit by itself later */
1804 break;
1805 case DM_MAPIO_REMAPPED:
1806 /* The target has remapped the I/O so dispatch it */
1807 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
1891 switch (r) {
1892 case DM_MAPIO_SUBMITTED:
1893 /* The target has taken the I/O to submit by itself later */
1894 break;
1895 case DM_MAPIO_REMAPPED:
1896 /* The target has remapped the I/O so dispatch it */
1897 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
1808 blk_rq_pos(tio->orig));
1809 dm_dispatch_request(clone);
1898 blk_rq_pos(rq));
1899 dm_dispatch_clone_request(clone, rq);
1810 break;
1811 case DM_MAPIO_REQUEUE:
1812 /* The target wants to requeue the I/O */
1813 dm_requeue_unmapped_request(clone);
1900 break;
1901 case DM_MAPIO_REQUEUE:
1902 /* The target wants to requeue the I/O */
1903 dm_requeue_unmapped_request(clone);
1814 requeued = 1;
1815 break;
1816 default:
1817 if (r > 0) {
1818 DMWARN("unimplemented target map return value: %d", r);
1819 BUG();
1820 }
1821
1822 /* The target wants to complete the I/O */
1904 break;
1905 default:
1906 if (r > 0) {
1907 DMWARN("unimplemented target map return value: %d", r);
1908 BUG();
1909 }
1910
1911 /* The target wants to complete the I/O */
1823 dm_kill_unmapped_request(clone, r);
1824 break;
1912 dm_kill_unmapped_request(rq, r);
1913 return r;
1825 }
1826
1914 }
1915
1827 return requeued;
1916 return 0;
1828}
1829
1917}
1918
1830static struct request *dm_start_request(struct mapped_device *md, struct request *orig)
1919static void map_tio_request(struct kthread_work *work)
1831{
1920{
1832 struct request *clone;
1921 struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
1922 struct request *rq = tio->orig;
1923 struct mapped_device *md = tio->md;
1833
1924
1925 if (map_request(tio->ti, rq, md) == DM_MAPIO_REQUEUE)
1926 dm_requeue_unmapped_original_request(md, rq);
1927}
1928
1929static void dm_start_request(struct mapped_device *md, struct request *orig)
1930{
1834 blk_start_request(orig);
1931 blk_start_request(orig);
1835 clone = orig->special;
1836 atomic_inc(&md->pending[rq_data_dir(clone)]);
1932 atomic_inc(&md->pending[rq_data_dir(orig)]);
1837
1838 /*
1839 * Hold the md reference here for the in-flight I/O.
1840 * We can't rely on the reference count by device opener,
1841 * because the device may be closed during the request completion
1842 * when all bios are completed.
1843 * See the comment in rq_completed() too.
1844 */
1845 dm_get(md);
1933
1934 /*
1935 * Hold the md reference here for the in-flight I/O.
1936 * We can't rely on the reference count by device opener,
1937 * because the device may be closed during the request completion
1938 * when all bios are completed.
1939 * See the comment in rq_completed() too.
1940 */
1941 dm_get(md);
1846
1847 return clone;
1848}
1849
1850/*
1851 * q->request_fn for request-based dm.
1852 * Called with the queue lock held.
1853 */
1854static void dm_request_fn(struct request_queue *q)
1855{
1856 struct mapped_device *md = q->queuedata;
1857 int srcu_idx;
1858 struct dm_table *map = dm_get_live_table(md, &srcu_idx);
1859 struct dm_target *ti;
1942}
1943
1944/*
1945 * q->request_fn for request-based dm.
1946 * Called with the queue lock held.
1947 */
1948static void dm_request_fn(struct request_queue *q)
1949{
1950 struct mapped_device *md = q->queuedata;
1951 int srcu_idx;
1952 struct dm_table *map = dm_get_live_table(md, &srcu_idx);
1953 struct dm_target *ti;
1860 struct request *rq, *clone;
1954 struct request *rq;
1955 struct dm_rq_target_io *tio;
1861 sector_t pos;
1862
1863 /*
1864 * For suspend, check blk_queue_stopped() and increment
1865 * ->pending within a single queue_lock not to increment the
1866 * number of in-flight I/Os after the queue is stopped in
1867 * dm_suspend().
1868 */

--- 5 unchanged lines hidden (view full) ---

1874 /* always use block 0 to find the target for flushes for now */
1875 pos = 0;
1876 if (!(rq->cmd_flags & REQ_FLUSH))
1877 pos = blk_rq_pos(rq);
1878
1879 ti = dm_table_find_target(map, pos);
1880 if (!dm_target_is_valid(ti)) {
1881 /*
1956 sector_t pos;
1957
1958 /*
1959 * For suspend, check blk_queue_stopped() and increment
1960 * ->pending within a single queue_lock not to increment the
1961 * number of in-flight I/Os after the queue is stopped in
1962 * dm_suspend().
1963 */

--- 5 unchanged lines hidden (view full) ---

1969 /* always use block 0 to find the target for flushes for now */
1970 pos = 0;
1971 if (!(rq->cmd_flags & REQ_FLUSH))
1972 pos = blk_rq_pos(rq);
1973
1974 ti = dm_table_find_target(map, pos);
1975 if (!dm_target_is_valid(ti)) {
1976 /*
1882 * Must perform setup, that dm_done() requires,
1977 * Must perform setup, that rq_completed() requires,
1883 * before calling dm_kill_unmapped_request
1884 */
1885 DMERR_LIMIT("request attempted access beyond the end of device");
1978 * before calling dm_kill_unmapped_request
1979 */
1980 DMERR_LIMIT("request attempted access beyond the end of device");
1886 clone = dm_start_request(md, rq);
1887 dm_kill_unmapped_request(clone, -EIO);
1981 dm_start_request(md, rq);
1982 dm_kill_unmapped_request(rq, -EIO);
1888 continue;
1889 }
1890
1891 if (ti->type->busy && ti->type->busy(ti))
1892 goto delay_and_out;
1893
1983 continue;
1984 }
1985
1986 if (ti->type->busy && ti->type->busy(ti))
1987 goto delay_and_out;
1988
1894 clone = dm_start_request(md, rq);
1989 dm_start_request(md, rq);
1895
1990
1896 spin_unlock(q->queue_lock);
1897 if (map_request(ti, clone, md))
1898 goto requeued;
1899
1991 tio = rq->special;
1992 /* Establish tio->ti before queuing work (map_tio_request) */
1993 tio->ti = ti;
1994 queue_kthread_work(&md->kworker, &tio->work);
1900 BUG_ON(!irqs_disabled());
1995 BUG_ON(!irqs_disabled());
1901 spin_lock(q->queue_lock);
1902 }
1903
1904 goto out;
1905
1996 }
1997
1998 goto out;
1999
1906requeued:
1907 BUG_ON(!irqs_disabled());
1908 spin_lock(q->queue_lock);
1909
1910delay_and_out:
1911 blk_delay_queue(q, HZ / 10);
1912out:
1913 dm_put_live_table(md, srcu_idx);
1914}
1915
1916int dm_underlying_device_busy(struct request_queue *q)
1917{

--- 169 unchanged lines hidden (view full) ---

2087 goto bad_disk;
2088
2089 atomic_set(&md->pending[0], 0);
2090 atomic_set(&md->pending[1], 0);
2091 init_waitqueue_head(&md->wait);
2092 INIT_WORK(&md->work, dm_wq_work);
2093 init_waitqueue_head(&md->eventq);
2094 init_completion(&md->kobj_holder.completion);
2000delay_and_out:
2001 blk_delay_queue(q, HZ / 10);
2002out:
2003 dm_put_live_table(md, srcu_idx);
2004}
2005
2006int dm_underlying_device_busy(struct request_queue *q)
2007{

--- 169 unchanged lines hidden (view full) ---

2177 goto bad_disk;
2178
2179 atomic_set(&md->pending[0], 0);
2180 atomic_set(&md->pending[1], 0);
2181 init_waitqueue_head(&md->wait);
2182 INIT_WORK(&md->work, dm_wq_work);
2183 init_waitqueue_head(&md->eventq);
2184 init_completion(&md->kobj_holder.completion);
2185 md->kworker_task = NULL;
2095
2096 md->disk->major = _major;
2097 md->disk->first_minor = minor;
2098 md->disk->fops = &dm_blk_dops;
2099 md->disk->queue = md->queue;
2100 md->disk->private_data = md;
2101 sprintf(md->disk->disk_name, "dm-%d", minor);
2102 add_disk(md->disk);

--- 44 unchanged lines hidden (view full) ---

2147
2148static void free_dev(struct mapped_device *md)
2149{
2150 int minor = MINOR(disk_devt(md->disk));
2151
2152 unlock_fs(md);
2153 bdput(md->bdev);
2154 destroy_workqueue(md->wq);
2186
2187 md->disk->major = _major;
2188 md->disk->first_minor = minor;
2189 md->disk->fops = &dm_blk_dops;
2190 md->disk->queue = md->queue;
2191 md->disk->private_data = md;
2192 sprintf(md->disk->disk_name, "dm-%d", minor);
2193 add_disk(md->disk);

--- 44 unchanged lines hidden (view full) ---

2238
2239static void free_dev(struct mapped_device *md)
2240{
2241 int minor = MINOR(disk_devt(md->disk));
2242
2243 unlock_fs(md);
2244 bdput(md->bdev);
2245 destroy_workqueue(md->wq);
2246
2247 if (md->kworker_task)
2248 kthread_stop(md->kworker_task);
2155 if (md->io_pool)
2156 mempool_destroy(md->io_pool);
2249 if (md->io_pool)
2250 mempool_destroy(md->io_pool);
2251 if (md->rq_pool)
2252 mempool_destroy(md->rq_pool);
2157 if (md->bs)
2158 bioset_free(md->bs);
2159 blk_integrity_unregister(md->disk);
2160 del_gendisk(md->disk);
2161 cleanup_srcu_struct(&md->io_barrier);
2162 free_table_devices(&md->table_devices);
2163 free_minor(minor);
2164

--- 17 unchanged lines hidden (view full) ---

2182 if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) {
2183 /*
2184 * Reload bioset because front_pad may have changed
2185 * because a different table was loaded.
2186 */
2187 bioset_free(md->bs);
2188 md->bs = p->bs;
2189 p->bs = NULL;
2253 if (md->bs)
2254 bioset_free(md->bs);
2255 blk_integrity_unregister(md->disk);
2256 del_gendisk(md->disk);
2257 cleanup_srcu_struct(&md->io_barrier);
2258 free_table_devices(&md->table_devices);
2259 free_minor(minor);
2260

--- 17 unchanged lines hidden (view full) ---

2278 if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) {
2279 /*
2280 * Reload bioset because front_pad may have changed
2281 * because a different table was loaded.
2282 */
2283 bioset_free(md->bs);
2284 md->bs = p->bs;
2285 p->bs = NULL;
2190 } else if (dm_table_get_type(t) == DM_TYPE_REQUEST_BASED) {
2191 /*
2192 * There's no need to reload with request-based dm
2193 * because the size of front_pad doesn't change.
2194 * Note for future: If you are to reload bioset,
2195 * prep-ed requests in the queue may refer
2196 * to bio from the old bioset, so you must walk
2197 * through the queue to unprep.
2198 */
2199 }
2286 }
2287 /*
2288 * There's no need to reload with request-based dm
2289 * because the size of front_pad doesn't change.
2290 * Note for future: If you are to reload bioset,
2291 * prep-ed requests in the queue may refer
2292 * to bio from the old bioset, so you must walk
2293 * through the queue to unprep.
2294 */
2200 goto out;
2201 }
2202
2295 goto out;
2296 }
2297
2203 BUG_ON(!p || md->io_pool || md->bs);
2298 BUG_ON(!p || md->io_pool || md->rq_pool || md->bs);
2204
2205 md->io_pool = p->io_pool;
2206 p->io_pool = NULL;
2299
2300 md->io_pool = p->io_pool;
2301 p->io_pool = NULL;
2302 md->rq_pool = p->rq_pool;
2303 p->rq_pool = NULL;
2207 md->bs = p->bs;
2208 p->bs = NULL;
2209
2210out:
2211 /* mempool bind completed, now no need any mempools in the table */
2212 dm_table_free_md_mempools(t);
2213}
2214

--- 186 unchanged lines hidden (view full) ---

2401}
2402
2403unsigned dm_get_md_type(struct mapped_device *md)
2404{
2405 BUG_ON(!mutex_is_locked(&md->type_lock));
2406 return md->type;
2407}
2408
2304 md->bs = p->bs;
2305 p->bs = NULL;
2306
2307out:
2308 /* mempool bind completed, now no need any mempools in the table */
2309 dm_table_free_md_mempools(t);
2310}
2311

--- 186 unchanged lines hidden (view full) ---

2498}
2499
2500unsigned dm_get_md_type(struct mapped_device *md)
2501{
2502 BUG_ON(!mutex_is_locked(&md->type_lock));
2503 return md->type;
2504}
2505
2506static bool dm_md_type_request_based(struct mapped_device *md)
2507{
2508 unsigned table_type = dm_get_md_type(md);
2509
2510 return (table_type == DM_TYPE_REQUEST_BASED ||
2511 table_type == DM_TYPE_MQ_REQUEST_BASED);
2512}
2513
2409struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
2410{
2411 return md->immutable_target_type;
2412}
2413
2414/*
2415 * The queue_limits are only valid as long as you have a reference
2416 * count on 'md'.

--- 21 unchanged lines hidden (view full) ---

2438 return 0;
2439
2440 md->queue = q;
2441 dm_init_md_queue(md);
2442 blk_queue_softirq_done(md->queue, dm_softirq_done);
2443 blk_queue_prep_rq(md->queue, dm_prep_fn);
2444 blk_queue_lld_busy(md->queue, dm_lld_busy);
2445
2514struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
2515{
2516 return md->immutable_target_type;
2517}
2518
2519/*
2520 * The queue_limits are only valid as long as you have a reference
2521 * count on 'md'.

--- 21 unchanged lines hidden (view full) ---

2543 return 0;
2544
2545 md->queue = q;
2546 dm_init_md_queue(md);
2547 blk_queue_softirq_done(md->queue, dm_softirq_done);
2548 blk_queue_prep_rq(md->queue, dm_prep_fn);
2549 blk_queue_lld_busy(md->queue, dm_lld_busy);
2550
2551 /* Also initialize the request-based DM worker thread */
2552 init_kthread_worker(&md->kworker);
2553 md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
2554 "kdmwork-%s", dm_device_name(md));
2555
2446 elv_register_queue(md->queue);
2447
2448 return 1;
2449}
2450
2451/*
2452 * Setup the DM device's queue based on md's type
2453 */
2454int dm_setup_md_queue(struct mapped_device *md)
2455{
2556 elv_register_queue(md->queue);
2557
2558 return 1;
2559}
2560
2561/*
2562 * Setup the DM device's queue based on md's type
2563 */
2564int dm_setup_md_queue(struct mapped_device *md)
2565{
2456 if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) &&
2457 !dm_init_request_based_queue(md)) {
2566 if (dm_md_type_request_based(md) && !dm_init_request_based_queue(md)) {
2458 DMWARN("Cannot initialize queue for request-based mapped device");
2459 return -EINVAL;
2460 }
2461
2462 return 0;
2463}
2464
2567 DMWARN("Cannot initialize queue for request-based mapped device");
2568 return -EINVAL;
2569 }
2570
2571 return 0;
2572}
2573
2465static struct mapped_device *dm_find_md(dev_t dev)
2574struct mapped_device *dm_get_md(dev_t dev)
2466{
2467 struct mapped_device *md;
2468 unsigned minor = MINOR(dev);
2469
2470 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
2471 return NULL;
2472
2473 spin_lock(&_minor_lock);
2474
2475 md = idr_find(&_minor_idr, minor);
2575{
2576 struct mapped_device *md;
2577 unsigned minor = MINOR(dev);
2578
2579 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
2580 return NULL;
2581
2582 spin_lock(&_minor_lock);
2583
2584 md = idr_find(&_minor_idr, minor);
2476 if (md && (md == MINOR_ALLOCED ||
2477 (MINOR(disk_devt(dm_disk(md))) != minor) ||
2478 dm_deleting_md(md) ||
2479 test_bit(DMF_FREEING, &md->flags))) {
2480 md = NULL;
2481 goto out;
2585 if (md) {
2586 if ((md == MINOR_ALLOCED ||
2587 (MINOR(disk_devt(dm_disk(md))) != minor) ||
2588 dm_deleting_md(md) ||
2589 test_bit(DMF_FREEING, &md->flags))) {
2590 md = NULL;
2591 goto out;
2592 }
2593 dm_get(md);
2482 }
2483
2484out:
2485 spin_unlock(&_minor_lock);
2486
2487 return md;
2488}
2594 }
2595
2596out:
2597 spin_unlock(&_minor_lock);
2598
2599 return md;
2600}
2489
2490struct mapped_device *dm_get_md(dev_t dev)
2491{
2492 struct mapped_device *md = dm_find_md(dev);
2493
2494 if (md)
2495 dm_get(md);
2496
2497 return md;
2498}
2499EXPORT_SYMBOL_GPL(dm_get_md);
2500
2501void *dm_get_mdptr(struct mapped_device *md)
2502{
2503 return md->interface_ptr;
2504}
2505
2506void dm_set_mdptr(struct mapped_device *md, void *ptr)

--- 21 unchanged lines hidden (view full) ---

2528 might_sleep();
2529
2530 spin_lock(&_minor_lock);
2531 map = dm_get_live_table(md, &srcu_idx);
2532 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2533 set_bit(DMF_FREEING, &md->flags);
2534 spin_unlock(&_minor_lock);
2535
2601EXPORT_SYMBOL_GPL(dm_get_md);
2602
2603void *dm_get_mdptr(struct mapped_device *md)
2604{
2605 return md->interface_ptr;
2606}
2607
2608void dm_set_mdptr(struct mapped_device *md, void *ptr)

--- 21 unchanged lines hidden (view full) ---

2630 might_sleep();
2631
2632 spin_lock(&_minor_lock);
2633 map = dm_get_live_table(md, &srcu_idx);
2634 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2635 set_bit(DMF_FREEING, &md->flags);
2636 spin_unlock(&_minor_lock);
2637
2638 if (dm_request_based(md))
2639 flush_kthread_worker(&md->kworker);
2640
2536 if (!dm_suspended_md(md)) {
2537 dm_table_presuspend_targets(map);
2538 dm_table_postsuspend_targets(map);
2539 }
2540
2541 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
2542 dm_put_live_table(md, srcu_idx);
2543

--- 227 unchanged lines hidden (view full) ---

2771 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2772 if (map)
2773 synchronize_srcu(&md->io_barrier);
2774
2775 /*
2776 * Stop md->queue before flushing md->wq in case request-based
2777 * dm defers requests to md->wq from md->queue.
2778 */
2641 if (!dm_suspended_md(md)) {
2642 dm_table_presuspend_targets(map);
2643 dm_table_postsuspend_targets(map);
2644 }
2645
2646 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
2647 dm_put_live_table(md, srcu_idx);
2648

--- 227 unchanged lines hidden (view full) ---

2876 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2877 if (map)
2878 synchronize_srcu(&md->io_barrier);
2879
2880 /*
2881 * Stop md->queue before flushing md->wq in case request-based
2882 * dm defers requests to md->wq from md->queue.
2883 */
2779 if (dm_request_based(md))
2884 if (dm_request_based(md)) {
2780 stop_queue(md->queue);
2885 stop_queue(md->queue);
2886 flush_kthread_worker(&md->kworker);
2887 }
2781
2782 flush_workqueue(md->wq);
2783
2784 /*
2785 * At this point no more requests are entering target request routines.
2786 * We call dm_wait_for_completion to wait for all existing requests
2787 * to finish.
2788 */

--- 329 unchanged lines hidden (view full) ---

3118 return __noflush_suspending(dm_table_get_md(ti->table));
3119}
3120EXPORT_SYMBOL_GPL(dm_noflush_suspending);
3121
3122struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size)
3123{
3124 struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL);
3125 struct kmem_cache *cachep;
2888
2889 flush_workqueue(md->wq);
2890
2891 /*
2892 * At this point no more requests are entering target request routines.
2893 * We call dm_wait_for_completion to wait for all existing requests
2894 * to finish.
2895 */

--- 329 unchanged lines hidden (view full) ---

3225 return __noflush_suspending(dm_table_get_md(ti->table));
3226}
3227EXPORT_SYMBOL_GPL(dm_noflush_suspending);
3228
3229struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size)
3230{
3231 struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL);
3232 struct kmem_cache *cachep;
3126 unsigned int pool_size;
3233 unsigned int pool_size = 0;
3127 unsigned int front_pad;
3128
3129 if (!pools)
3130 return NULL;
3131
3234 unsigned int front_pad;
3235
3236 if (!pools)
3237 return NULL;
3238
3132 if (type == DM_TYPE_BIO_BASED) {
3239 switch (type) {
3240 case DM_TYPE_BIO_BASED:
3133 cachep = _io_cache;
3134 pool_size = dm_get_reserved_bio_based_ios();
3135 front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
3241 cachep = _io_cache;
3242 pool_size = dm_get_reserved_bio_based_ios();
3243 front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
3136 } else if (type == DM_TYPE_REQUEST_BASED) {
3137 cachep = _rq_tio_cache;
3244 break;
3245 case DM_TYPE_REQUEST_BASED:
3138 pool_size = dm_get_reserved_rq_based_ios();
3246 pool_size = dm_get_reserved_rq_based_ios();
3247 pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache);
3248 if (!pools->rq_pool)
3249 goto out;
3250 /* fall through to setup remaining rq-based pools */
3251 case DM_TYPE_MQ_REQUEST_BASED:
3252 cachep = _rq_tio_cache;
3253 if (!pool_size)
3254 pool_size = dm_get_reserved_rq_based_ios();
3139 front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
3140 /* per_bio_data_size is not used. See __bind_mempools(). */
3141 WARN_ON(per_bio_data_size != 0);
3255 front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
3256 /* per_bio_data_size is not used. See __bind_mempools(). */
3257 WARN_ON(per_bio_data_size != 0);
3142 } else
3258 break;
3259 default:
3143 goto out;
3260 goto out;
3261 }
3144
3145 pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
3146 if (!pools->io_pool)
3147 goto out;
3148
3149 pools->bs = bioset_create_nobvec(pool_size, front_pad);
3150 if (!pools->bs)
3151 goto out;

--- 12 unchanged lines hidden (view full) ---

3164void dm_free_md_mempools(struct dm_md_mempools *pools)
3165{
3166 if (!pools)
3167 return;
3168
3169 if (pools->io_pool)
3170 mempool_destroy(pools->io_pool);
3171
3262
3263 pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
3264 if (!pools->io_pool)
3265 goto out;
3266
3267 pools->bs = bioset_create_nobvec(pool_size, front_pad);
3268 if (!pools->bs)
3269 goto out;

--- 12 unchanged lines hidden (view full) ---

3282void dm_free_md_mempools(struct dm_md_mempools *pools)
3283{
3284 if (!pools)
3285 return;
3286
3287 if (pools->io_pool)
3288 mempool_destroy(pools->io_pool);
3289
3290 if (pools->rq_pool)
3291 mempool_destroy(pools->rq_pool);
3292
3172 if (pools->bs)
3173 bioset_free(pools->bs);
3174
3175 kfree(pools);
3176}
3177
3178static const struct block_device_operations dm_blk_dops = {
3179 .open = dm_blk_open,

--- 24 unchanged lines hidden ---
3293 if (pools->bs)
3294 bioset_free(pools->bs);
3295
3296 kfree(pools);
3297}
3298
3299static const struct block_device_operations dm_blk_dops = {
3300 .open = dm_blk_open,

--- 24 unchanged lines hidden ---