dm.c (c5248f79f39e5254977a3916b2149c3ccffa2722) dm.c (eca7ee6dc01b21c669bce8c39d3d368509fb65e8)
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"

--- 319 unchanged lines hidden (view full) ---

328 _io_cache = KMEM_CACHE(dm_io, 0);
329 if (!_io_cache)
330 return r;
331
332 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
333 if (!_rq_tio_cache)
334 goto out_free_io_cache;
335
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"

--- 319 unchanged lines hidden (view full) ---

328 _io_cache = KMEM_CACHE(dm_io, 0);
329 if (!_io_cache)
330 return r;
331
332 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
333 if (!_rq_tio_cache)
334 goto out_free_io_cache;
335
336 _rq_cache = kmem_cache_create("dm_clone_request", sizeof(struct request),
336 _rq_cache = kmem_cache_create("dm_old_clone_request", sizeof(struct request),
337 __alignof__(struct request), 0, NULL);
338 if (!_rq_cache)
339 goto out_free_rq_tio_cache;
340
341 r = dm_uevent_init();
342 if (r)
343 goto out_free_rq_cache;
344

--- 302 unchanged lines hidden (view full) ---

647 mempool_free(io, md->io_pool);
648}
649
650static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
651{
652 bio_put(&tio->clone);
653}
654
337 __alignof__(struct request), 0, NULL);
338 if (!_rq_cache)
339 goto out_free_rq_tio_cache;
340
341 r = dm_uevent_init();
342 if (r)
343 goto out_free_rq_cache;
344

--- 302 unchanged lines hidden (view full) ---

647 mempool_free(io, md->io_pool);
648}
649
650static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
651{
652 bio_put(&tio->clone);
653}
654
655static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
656 gfp_t gfp_mask)
655static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md,
656 gfp_t gfp_mask)
657{
658 return mempool_alloc(md->io_pool, gfp_mask);
659}
660
657{
658 return mempool_alloc(md->io_pool, gfp_mask);
659}
660
661static void free_rq_tio(struct dm_rq_target_io *tio)
661static void free_old_rq_tio(struct dm_rq_target_io *tio)
662{
663 mempool_free(tio, tio->md->io_pool);
664}
665
662{
663 mempool_free(tio, tio->md->io_pool);
664}
665
666static struct request *alloc_clone_request(struct mapped_device *md,
667 gfp_t gfp_mask)
666static struct request *alloc_old_clone_request(struct mapped_device *md,
667 gfp_t gfp_mask)
668{
669 return mempool_alloc(md->rq_pool, gfp_mask);
670}
671
668{
669 return mempool_alloc(md->rq_pool, gfp_mask);
670}
671
672static void free_clone_request(struct mapped_device *md, struct request *rq)
672static void free_old_clone_request(struct mapped_device *md, struct request *rq)
673{
674 mempool_free(rq, md->rq_pool);
675}
676
677static int md_in_flight(struct mapped_device *md)
678{
679 return atomic_read(&md->pending[READ]) +
680 atomic_read(&md->pending[WRITE]);

--- 454 unchanged lines hidden (view full) ---

1135
1136 blk_rq_unprep_clone(clone);
1137
1138 if (md->type == DM_TYPE_MQ_REQUEST_BASED)
1139 /* stacked on blk-mq queue(s) */
1140 tio->ti->type->release_clone_rq(clone);
1141 else if (!md->queue->mq_ops)
1142 /* request_fn queue stacked on request_fn queue(s) */
673{
674 mempool_free(rq, md->rq_pool);
675}
676
677static int md_in_flight(struct mapped_device *md)
678{
679 return atomic_read(&md->pending[READ]) +
680 atomic_read(&md->pending[WRITE]);

--- 454 unchanged lines hidden (view full) ---

1135
1136 blk_rq_unprep_clone(clone);
1137
1138 if (md->type == DM_TYPE_MQ_REQUEST_BASED)
1139 /* stacked on blk-mq queue(s) */
1140 tio->ti->type->release_clone_rq(clone);
1141 else if (!md->queue->mq_ops)
1142 /* request_fn queue stacked on request_fn queue(s) */
1143 free_clone_request(md, clone);
1143 free_old_clone_request(md, clone);
1144
1145 if (!md->queue->mq_ops)
1144
1145 if (!md->queue->mq_ops)
1146 free_rq_tio(tio);
1146 free_old_rq_tio(tio);
1147}
1148
1149/*
1150 * Complete the clone and the original request.
1151 * Must be called without clone's queue lock held,
1152 * see end_clone_request() for more details.
1153 */
1154static void dm_end_request(struct request *clone, int error)

--- 33 unchanged lines hidden (view full) ---

1188 if (!rq->q->mq_ops) {
1189 rq->special = NULL;
1190 rq->cmd_flags &= ~REQ_DONTPREP;
1191 }
1192
1193 if (clone)
1194 free_rq_clone(clone);
1195 else if (!tio->md->queue->mq_ops)
1147}
1148
1149/*
1150 * Complete the clone and the original request.
1151 * Must be called without clone's queue lock held,
1152 * see end_clone_request() for more details.
1153 */
1154static void dm_end_request(struct request *clone, int error)

--- 33 unchanged lines hidden (view full) ---

1188 if (!rq->q->mq_ops) {
1189 rq->special = NULL;
1190 rq->cmd_flags &= ~REQ_DONTPREP;
1191 }
1192
1193 if (clone)
1194 free_rq_clone(clone);
1195 else if (!tio->md->queue->mq_ops)
1196 free_rq_tio(tio);
1196 free_old_rq_tio(tio);
1197}
1198
1199/*
1200 * Requeue the original request of a clone.
1201 */
1197}
1198
1199/*
1200 * Requeue the original request of a clone.
1201 */
1202static void old_requeue_request(struct request *rq)
1202static void dm_old_requeue_request(struct request *rq)
1203{
1204 struct request_queue *q = rq->q;
1205 unsigned long flags;
1206
1207 spin_lock_irqsave(q->queue_lock, flags);
1208 blk_requeue_request(q, rq);
1209 blk_run_queue_async(q);
1210 spin_unlock_irqrestore(q->queue_lock, flags);

--- 15 unchanged lines hidden (view full) ---

1226 struct request *rq)
1227{
1228 int rw = rq_data_dir(rq);
1229
1230 dm_unprep_request(rq);
1231
1232 rq_end_stats(md, rq);
1233 if (!rq->q->mq_ops)
1203{
1204 struct request_queue *q = rq->q;
1205 unsigned long flags;
1206
1207 spin_lock_irqsave(q->queue_lock, flags);
1208 blk_requeue_request(q, rq);
1209 blk_run_queue_async(q);
1210 spin_unlock_irqrestore(q->queue_lock, flags);

--- 15 unchanged lines hidden (view full) ---

1226 struct request *rq)
1227{
1228 int rw = rq_data_dir(rq);
1229
1230 dm_unprep_request(rq);
1231
1232 rq_end_stats(md, rq);
1233 if (!rq->q->mq_ops)
1234 old_requeue_request(rq);
1234 dm_old_requeue_request(rq);
1235 else
1236 dm_mq_requeue_request(rq);
1237
1238 rq_completed(md, rw, false);
1239}
1240
1235 else
1236 dm_mq_requeue_request(rq);
1237
1238 rq_completed(md, rw, false);
1239}
1240
1241static void old_stop_queue(struct request_queue *q)
1241static void dm_old_stop_queue(struct request_queue *q)
1242{
1243 unsigned long flags;
1244
1245 spin_lock_irqsave(q->queue_lock, flags);
1246 if (blk_queue_stopped(q)) {
1247 spin_unlock_irqrestore(q->queue_lock, flags);
1248 return;
1249 }
1250
1251 blk_stop_queue(q);
1252 spin_unlock_irqrestore(q->queue_lock, flags);
1253}
1254
1242{
1243 unsigned long flags;
1244
1245 spin_lock_irqsave(q->queue_lock, flags);
1246 if (blk_queue_stopped(q)) {
1247 spin_unlock_irqrestore(q->queue_lock, flags);
1248 return;
1249 }
1250
1251 blk_stop_queue(q);
1252 spin_unlock_irqrestore(q->queue_lock, flags);
1253}
1254
1255static void stop_queue(struct request_queue *q)
1255static void dm_stop_queue(struct request_queue *q)
1256{
1257 if (!q->mq_ops)
1256{
1257 if (!q->mq_ops)
1258 old_stop_queue(q);
1258 dm_old_stop_queue(q);
1259 else
1260 blk_mq_stop_hw_queues(q);
1261}
1262
1259 else
1260 blk_mq_stop_hw_queues(q);
1261}
1262
1263static void old_start_queue(struct request_queue *q)
1263static void dm_old_start_queue(struct request_queue *q)
1264{
1265 unsigned long flags;
1266
1267 spin_lock_irqsave(q->queue_lock, flags);
1268 if (blk_queue_stopped(q))
1269 blk_start_queue(q);
1270 spin_unlock_irqrestore(q->queue_lock, flags);
1271}
1272
1264{
1265 unsigned long flags;
1266
1267 spin_lock_irqsave(q->queue_lock, flags);
1268 if (blk_queue_stopped(q))
1269 blk_start_queue(q);
1270 spin_unlock_irqrestore(q->queue_lock, flags);
1271}
1272
1273static void start_queue(struct request_queue *q)
1273static void dm_start_queue(struct request_queue *q)
1274{
1275 if (!q->mq_ops)
1274{
1275 if (!q->mq_ops)
1276 old_start_queue(q);
1276 dm_old_start_queue(q);
1277 else {
1278 blk_mq_start_stopped_hw_queues(q, true);
1279 blk_mq_kick_requeue_list(q);
1280 }
1281}
1282
1283static void dm_done(struct request *clone, int error, bool mapped)
1284{

--- 38 unchanged lines hidden (view full) ---

1323 int rw;
1324
1325 if (!clone) {
1326 rq_end_stats(tio->md, rq);
1327 rw = rq_data_dir(rq);
1328 if (!rq->q->mq_ops) {
1329 blk_end_request_all(rq, tio->error);
1330 rq_completed(tio->md, rw, false);
1277 else {
1278 blk_mq_start_stopped_hw_queues(q, true);
1279 blk_mq_kick_requeue_list(q);
1280 }
1281}
1282
1283static void dm_done(struct request *clone, int error, bool mapped)
1284{

--- 38 unchanged lines hidden (view full) ---

1323 int rw;
1324
1325 if (!clone) {
1326 rq_end_stats(tio->md, rq);
1327 rw = rq_data_dir(rq);
1328 if (!rq->q->mq_ops) {
1329 blk_end_request_all(rq, tio->error);
1330 rq_completed(tio->md, rw, false);
1331 free_rq_tio(tio);
1331 free_old_rq_tio(tio);
1332 } else {
1333 blk_mq_end_request(rq, tio->error);
1334 rq_completed(tio->md, rw, false);
1335 }
1336 return;
1337 }
1338
1339 if (rq->cmd_flags & REQ_FAILED)

--- 25 unchanged lines hidden (view full) ---

1365 */
1366static void dm_kill_unmapped_request(struct request *rq, int error)
1367{
1368 rq->cmd_flags |= REQ_FAILED;
1369 dm_complete_request(rq, error);
1370}
1371
1372/*
1332 } else {
1333 blk_mq_end_request(rq, tio->error);
1334 rq_completed(tio->md, rw, false);
1335 }
1336 return;
1337 }
1338
1339 if (rq->cmd_flags & REQ_FAILED)

--- 25 unchanged lines hidden (view full) ---

1365 */
1366static void dm_kill_unmapped_request(struct request *rq, int error)
1367{
1368 rq->cmd_flags |= REQ_FAILED;
1369 dm_complete_request(rq, error);
1370}
1371
1372/*
1373 * Called with the clone's queue lock held (for non-blk-mq)
1373 * Called with the clone's queue lock held (in the case of .request_fn)
1374 */
1375static void end_clone_request(struct request *clone, int error)
1376{
1377 struct dm_rq_target_io *tio = clone->end_io_data;
1378
1379 if (!clone->q->mq_ops) {
1380 /*
1381 * For just cleaning up the information of the queue in which

--- 470 unchanged lines hidden (view full) ---

1852 clone->end_io = end_clone_request;
1853 clone->end_io_data = tio;
1854
1855 tio->clone = clone;
1856
1857 return 0;
1858}
1859
1374 */
1375static void end_clone_request(struct request *clone, int error)
1376{
1377 struct dm_rq_target_io *tio = clone->end_io_data;
1378
1379 if (!clone->q->mq_ops) {
1380 /*
1381 * For just cleaning up the information of the queue in which

--- 470 unchanged lines hidden (view full) ---

1852 clone->end_io = end_clone_request;
1853 clone->end_io_data = tio;
1854
1855 tio->clone = clone;
1856
1857 return 0;
1858}
1859
1860static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1861 struct dm_rq_target_io *tio, gfp_t gfp_mask)
1860static struct request *clone_old_rq(struct request *rq, struct mapped_device *md,
1861 struct dm_rq_target_io *tio, gfp_t gfp_mask)
1862{
1863 /*
1864 * Create clone for use with .request_fn request_queue
1865 */
1866 struct request *clone;
1867
1862{
1863 /*
1864 * Create clone for use with .request_fn request_queue
1865 */
1866 struct request *clone;
1867
1868 clone = alloc_clone_request(md, gfp_mask);
1868 clone = alloc_old_clone_request(md, gfp_mask);
1869 if (!clone)
1870 return NULL;
1871
1872 blk_rq_init(NULL, clone);
1873 if (setup_clone(clone, rq, tio, gfp_mask)) {
1874 /* -ENOMEM */
1869 if (!clone)
1870 return NULL;
1871
1872 blk_rq_init(NULL, clone);
1873 if (setup_clone(clone, rq, tio, gfp_mask)) {
1874 /* -ENOMEM */
1875 free_clone_request(md, clone);
1875 free_old_clone_request(md, clone);
1876 return NULL;
1877 }
1878
1879 return clone;
1880}
1881
1882static void map_tio_request(struct kthread_work *work);
1883

--- 5 unchanged lines hidden (view full) ---

1889 tio->clone = NULL;
1890 tio->orig = rq;
1891 tio->error = 0;
1892 memset(&tio->info, 0, sizeof(tio->info));
1893 if (md->kworker_task)
1894 init_kthread_work(&tio->work, map_tio_request);
1895}
1896
1876 return NULL;
1877 }
1878
1879 return clone;
1880}
1881
1882static void map_tio_request(struct kthread_work *work);
1883

--- 5 unchanged lines hidden (view full) ---

1889 tio->clone = NULL;
1890 tio->orig = rq;
1891 tio->error = 0;
1892 memset(&tio->info, 0, sizeof(tio->info));
1893 if (md->kworker_task)
1894 init_kthread_work(&tio->work, map_tio_request);
1895}
1896
1897static struct dm_rq_target_io *prep_tio(struct request *rq,
1898 struct mapped_device *md, gfp_t gfp_mask)
1897static struct dm_rq_target_io *dm_old_prep_tio(struct request *rq,
1898 struct mapped_device *md,
1899 gfp_t gfp_mask)
1899{
1900 struct dm_rq_target_io *tio;
1901 int srcu_idx;
1902 struct dm_table *table;
1903
1900{
1901 struct dm_rq_target_io *tio;
1902 int srcu_idx;
1903 struct dm_table *table;
1904
1904 tio = alloc_rq_tio(md, gfp_mask);
1905 tio = alloc_old_rq_tio(md, gfp_mask);
1905 if (!tio)
1906 return NULL;
1907
1908 init_tio(tio, rq, md);
1909
1910 table = dm_get_live_table(md, &srcu_idx);
1906 if (!tio)
1907 return NULL;
1908
1909 init_tio(tio, rq, md);
1910
1911 table = dm_get_live_table(md, &srcu_idx);
1912 /*
1913 * Must clone a request if this .request_fn DM device
1914 * is stacked on .request_fn device(s).
1915 */
1911 if (!dm_table_mq_request_based(table)) {
1916 if (!dm_table_mq_request_based(table)) {
1912 if (!clone_rq(rq, md, tio, gfp_mask)) {
1917 if (!clone_old_rq(rq, md, tio, gfp_mask)) {
1913 dm_put_live_table(md, srcu_idx);
1918 dm_put_live_table(md, srcu_idx);
1914 free_rq_tio(tio);
1919 free_old_rq_tio(tio);
1915 return NULL;
1916 }
1917 }
1918 dm_put_live_table(md, srcu_idx);
1919
1920 return tio;
1921}
1922
1923/*
1924 * Called with the queue lock held.
1925 */
1920 return NULL;
1921 }
1922 }
1923 dm_put_live_table(md, srcu_idx);
1924
1925 return tio;
1926}
1927
1928/*
1929 * Called with the queue lock held.
1930 */
1926static int dm_prep_fn(struct request_queue *q, struct request *rq)
1931static int dm_old_prep_fn(struct request_queue *q, struct request *rq)
1927{
1928 struct mapped_device *md = q->queuedata;
1929 struct dm_rq_target_io *tio;
1930
1931 if (unlikely(rq->special)) {
1932 DMWARN("Already has something in rq->special.");
1933 return BLKPREP_KILL;
1934 }
1935
1932{
1933 struct mapped_device *md = q->queuedata;
1934 struct dm_rq_target_io *tio;
1935
1936 if (unlikely(rq->special)) {
1937 DMWARN("Already has something in rq->special.");
1938 return BLKPREP_KILL;
1939 }
1940
1936 tio = prep_tio(rq, md, GFP_ATOMIC);
1941 tio = dm_old_prep_tio(rq, md, GFP_ATOMIC);
1937 if (!tio)
1938 return BLKPREP_DEFER;
1939
1940 rq->special = tio;
1941 rq->cmd_flags |= REQ_DONTPREP;
1942
1943 return BLKPREP_OK;
1944}

--- 286 unchanged lines hidden (view full) ---

2231 /*
2232 * Initialize data that will only be used by a non-blk-mq DM queue
2233 * - must do so here (in alloc_dev callchain) before queue is used
2234 */
2235 md->queue->queuedata = md;
2236 md->queue->backing_dev_info.congested_data = md;
2237}
2238
1942 if (!tio)
1943 return BLKPREP_DEFER;
1944
1945 rq->special = tio;
1946 rq->cmd_flags |= REQ_DONTPREP;
1947
1948 return BLKPREP_OK;
1949}

--- 286 unchanged lines hidden (view full) ---

2236 /*
2237 * Initialize data that will only be used by a non-blk-mq DM queue
2238 * - must do so here (in alloc_dev callchain) before queue is used
2239 */
2240 md->queue->queuedata = md;
2241 md->queue->backing_dev_info.congested_data = md;
2242}
2243
2239static void dm_init_old_md_queue(struct mapped_device *md)
2244static void dm_init_normal_md_queue(struct mapped_device *md)
2240{
2241 md->use_blk_mq = false;
2242 dm_init_md_queue(md);
2243
2244 /*
2245 * Initialize aspects of queue that aren't relevant for blk-mq
2246 */
2247 md->queue->backing_dev_info.congested_fn = dm_any_congested;

--- 250 unchanged lines hidden (view full) ---

2498 /*
2499 * The queue hasn't been stopped yet, if the old table type wasn't
2500 * for request-based during suspension. So stop it to prevent
2501 * I/O mapping before resume.
2502 * This must be done before setting the queue restrictions,
2503 * because request-based dm may be run just after the setting.
2504 */
2505 if (dm_table_request_based(t)) {
2245{
2246 md->use_blk_mq = false;
2247 dm_init_md_queue(md);
2248
2249 /*
2250 * Initialize aspects of queue that aren't relevant for blk-mq
2251 */
2252 md->queue->backing_dev_info.congested_fn = dm_any_congested;

--- 250 unchanged lines hidden (view full) ---

2503 /*
2504 * The queue hasn't been stopped yet, if the old table type wasn't
2505 * for request-based during suspension. So stop it to prevent
2506 * I/O mapping before resume.
2507 * This must be done before setting the queue restrictions,
2508 * because request-based dm may be run just after the setting.
2509 */
2510 if (dm_table_request_based(t)) {
2506 stop_queue(q);
2511 dm_stop_queue(q);
2507 /*
2508 * Leverage the fact that request-based DM targets are
2509 * immutable singletons and establish md->immutable_target
2510 * - used to optimize both dm_request_fn and dm_mq_queue_rq
2511 */
2512 md->immutable_target = dm_table_get_immutable_target(t);
2513 }
2514

--- 80 unchanged lines hidden (view full) ---

2595 */
2596struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
2597{
2598 BUG_ON(!atomic_read(&md->holders));
2599 return &md->queue->limits;
2600}
2601EXPORT_SYMBOL_GPL(dm_get_queue_limits);
2602
2512 /*
2513 * Leverage the fact that request-based DM targets are
2514 * immutable singletons and establish md->immutable_target
2515 * - used to optimize both dm_request_fn and dm_mq_queue_rq
2516 */
2517 md->immutable_target = dm_table_get_immutable_target(t);
2518 }
2519

--- 80 unchanged lines hidden (view full) ---

2600 */
2601struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
2602{
2603 BUG_ON(!atomic_read(&md->holders));
2604 return &md->queue->limits;
2605}
2606EXPORT_SYMBOL_GPL(dm_get_queue_limits);
2607
2603static void init_rq_based_worker_thread(struct mapped_device *md)
2608static void dm_old_init_rq_based_worker_thread(struct mapped_device *md)
2604{
2605 /* Initialize the request-based DM worker thread */
2606 init_kthread_worker(&md->kworker);
2607 md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
2608 "kdmwork-%s", dm_device_name(md));
2609}
2610
2611/*
2609{
2610 /* Initialize the request-based DM worker thread */
2611 init_kthread_worker(&md->kworker);
2612 md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
2613 "kdmwork-%s", dm_device_name(md));
2614}
2615
2616/*
2612 * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
2617 * Fully initialize a .request_fn request-based queue.
2613 */
2618 */
2614static int dm_init_request_based_queue(struct mapped_device *md)
2619static int dm_old_init_request_queue(struct mapped_device *md)
2615{
2616 struct request_queue *q = NULL;
2617
2618 /* Fully initialize the queue */
2619 q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
2620 if (!q)
2621 return -EINVAL;
2622
2623 /* disable dm_request_fn's merge heuristic by default */
2624 md->seq_rq_merge_deadline_usecs = 0;
2625
2626 md->queue = q;
2620{
2621 struct request_queue *q = NULL;
2622
2623 /* Fully initialize the queue */
2624 q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
2625 if (!q)
2626 return -EINVAL;
2627
2628 /* disable dm_request_fn's merge heuristic by default */
2629 md->seq_rq_merge_deadline_usecs = 0;
2630
2631 md->queue = q;
2627 dm_init_old_md_queue(md);
2632 dm_init_normal_md_queue(md);
2628 blk_queue_softirq_done(md->queue, dm_softirq_done);
2633 blk_queue_softirq_done(md->queue, dm_softirq_done);
2629 blk_queue_prep_rq(md->queue, dm_prep_fn);
2634 blk_queue_prep_rq(md->queue, dm_old_prep_fn);
2630
2635
2631 init_rq_based_worker_thread(md);
2636 dm_old_init_rq_based_worker_thread(md);
2632
2633 elv_register_queue(md->queue);
2634
2635 return 0;
2636}
2637
2638static int dm_mq_init_request(void *data, struct request *rq,
2639 unsigned int hctx_idx, unsigned int request_idx,

--- 54 unchanged lines hidden (view full) ---

2694
2695static struct blk_mq_ops dm_mq_ops = {
2696 .queue_rq = dm_mq_queue_rq,
2697 .map_queue = blk_mq_map_queue,
2698 .complete = dm_softirq_done,
2699 .init_request = dm_mq_init_request,
2700};
2701
2637
2638 elv_register_queue(md->queue);
2639
2640 return 0;
2641}
2642
2643static int dm_mq_init_request(void *data, struct request *rq,
2644 unsigned int hctx_idx, unsigned int request_idx,

--- 54 unchanged lines hidden (view full) ---

2699
2700static struct blk_mq_ops dm_mq_ops = {
2701 .queue_rq = dm_mq_queue_rq,
2702 .map_queue = blk_mq_map_queue,
2703 .complete = dm_softirq_done,
2704 .init_request = dm_mq_init_request,
2705};
2706
2702static int dm_init_request_based_blk_mq_queue(struct mapped_device *md)
2707static int dm_mq_init_request_queue(struct mapped_device *md)
2703{
2708{
2704 unsigned md_type = dm_get_md_type(md);
2705 struct request_queue *q;
2706 int err;
2707
2708 if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) {
2709 DMERR("request-based dm-mq may only be stacked on blk-mq device(s)");
2710 return -EINVAL;
2711 }
2712

--- 48 unchanged lines hidden (view full) ---

2761 */
2762int dm_setup_md_queue(struct mapped_device *md)
2763{
2764 int r;
2765 unsigned md_type = filter_md_type(dm_get_md_type(md), md);
2766
2767 switch (md_type) {
2768 case DM_TYPE_REQUEST_BASED:
2709 struct request_queue *q;
2710 int err;
2711
2712 if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) {
2713 DMERR("request-based dm-mq may only be stacked on blk-mq device(s)");
2714 return -EINVAL;
2715 }
2716

--- 48 unchanged lines hidden (view full) ---

2765 */
2766int dm_setup_md_queue(struct mapped_device *md)
2767{
2768 int r;
2769 unsigned md_type = filter_md_type(dm_get_md_type(md), md);
2770
2771 switch (md_type) {
2772 case DM_TYPE_REQUEST_BASED:
2769 r = dm_init_request_based_queue(md);
2773 r = dm_old_init_request_queue(md);
2770 if (r) {
2774 if (r) {
2771 DMWARN("Cannot initialize queue for request-based mapped device");
2775 DMERR("Cannot initialize queue for request-based mapped device");
2772 return r;
2773 }
2774 break;
2775 case DM_TYPE_MQ_REQUEST_BASED:
2776 return r;
2777 }
2778 break;
2779 case DM_TYPE_MQ_REQUEST_BASED:
2776 r = dm_init_request_based_blk_mq_queue(md);
2780 r = dm_mq_init_request_queue(md);
2777 if (r) {
2781 if (r) {
2778 DMWARN("Cannot initialize queue for request-based blk-mq mapped device");
2782 DMERR("Cannot initialize queue for request-based dm-mq mapped device");
2779 return r;
2780 }
2781 break;
2782 case DM_TYPE_BIO_BASED:
2783 return r;
2784 }
2785 break;
2786 case DM_TYPE_BIO_BASED:
2783 dm_init_old_md_queue(md);
2787 dm_init_normal_md_queue(md);
2784 blk_queue_make_request(md->queue, dm_make_request);
2785 /*
2786 * DM handles splitting bios as needed. Free the bio_split bioset
2787 * since it won't be used (saves 1 process per bio-based DM device).
2788 */
2789 bioset_free(md->queue->bio_split);
2790 md->queue->bio_split = NULL;
2791 break;

--- 326 unchanged lines hidden (view full) ---

3118 if (map)
3119 synchronize_srcu(&md->io_barrier);
3120
3121 /*
3122 * Stop md->queue before flushing md->wq in case request-based
3123 * dm defers requests to md->wq from md->queue.
3124 */
3125 if (dm_request_based(md)) {
2788 blk_queue_make_request(md->queue, dm_make_request);
2789 /*
2790 * DM handles splitting bios as needed. Free the bio_split bioset
2791 * since it won't be used (saves 1 process per bio-based DM device).
2792 */
2793 bioset_free(md->queue->bio_split);
2794 md->queue->bio_split = NULL;
2795 break;

--- 326 unchanged lines hidden (view full) ---

3122 if (map)
3123 synchronize_srcu(&md->io_barrier);
3124
3125 /*
3126 * Stop md->queue before flushing md->wq in case request-based
3127 * dm defers requests to md->wq from md->queue.
3128 */
3129 if (dm_request_based(md)) {
3126 stop_queue(md->queue);
3130 dm_stop_queue(md->queue);
3127 if (md->kworker_task)
3128 flush_kthread_worker(&md->kworker);
3129 }
3130
3131 flush_workqueue(md->wq);
3132
3133 /*
3134 * At this point no more requests are entering target request routines.

--- 7 unchanged lines hidden (view full) ---

3142 if (map)
3143 synchronize_srcu(&md->io_barrier);
3144
3145 /* were we interrupted ? */
3146 if (r < 0) {
3147 dm_queue_flush(md);
3148
3149 if (dm_request_based(md))
3131 if (md->kworker_task)
3132 flush_kthread_worker(&md->kworker);
3133 }
3134
3135 flush_workqueue(md->wq);
3136
3137 /*
3138 * At this point no more requests are entering target request routines.

--- 7 unchanged lines hidden (view full) ---

3146 if (map)
3147 synchronize_srcu(&md->io_barrier);
3148
3149 /* were we interrupted ? */
3150 if (r < 0) {
3151 dm_queue_flush(md);
3152
3153 if (dm_request_based(md))
3150 start_queue(md->queue);
3154 dm_start_queue(md->queue);
3151
3152 unlock_fs(md);
3153 dm_table_presuspend_undo_targets(map);
3154 /* pushback list is already flushed, so skip flush */
3155 }
3156
3157 return r;
3158}

--- 62 unchanged lines hidden (view full) ---

3221 dm_queue_flush(md);
3222
3223 /*
3224 * Flushing deferred I/Os must be done after targets are resumed
3225 * so that mapping of targets can work correctly.
3226 * Request-based dm is queueing the deferred I/Os in its request_queue.
3227 */
3228 if (dm_request_based(md))
3155
3156 unlock_fs(md);
3157 dm_table_presuspend_undo_targets(map);
3158 /* pushback list is already flushed, so skip flush */
3159 }
3160
3161 return r;
3162}

--- 62 unchanged lines hidden (view full) ---

3225 dm_queue_flush(md);
3226
3227 /*
3228 * Flushing deferred I/Os must be done after targets are resumed
3229 * so that mapping of targets can work correctly.
3230 * Request-based dm is queueing the deferred I/Os in its request_queue.
3231 */
3232 if (dm_request_based(md))
3229 start_queue(md->queue);
3233 dm_start_queue(md->queue);
3230
3231 unlock_fs(md);
3232
3233 return 0;
3234}
3235
3236int dm_resume(struct mapped_device *md)
3237{

--- 460 unchanged lines hidden ---
3234
3235 unlock_fs(md);
3236
3237 return 0;
3238}
3239
3240int dm_resume(struct mapped_device *md)
3241{

--- 460 unchanged lines hidden ---