dm.c (5f1b670d0bef508a5554d92525f5f6d00d640b38) dm.c (4ae9944d132b160d444fa3aa875307eb0fa3eeec)
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"

--- 976 unchanged lines hidden (view full) ---

985 if (unlikely(r == -EREMOTEIO && (bio->bi_rw & REQ_WRITE_SAME) &&
986 !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors))
987 disable_write_same(md);
988
989 free_tio(md, tio);
990 dec_pending(io, error);
991}
992
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"

--- 976 unchanged lines hidden (view full) ---

985 if (unlikely(r == -EREMOTEIO && (bio->bi_rw & REQ_WRITE_SAME) &&
986 !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors))
987 disable_write_same(md);
988
989 free_tio(md, tio);
990 dec_pending(io, error);
991}
992
993/*
994 * Partial completion handling for request-based dm
995 */
996static void end_clone_bio(struct bio *clone, int error)
997{
998 struct dm_rq_clone_bio_info *info =
999 container_of(clone, struct dm_rq_clone_bio_info, clone);
1000 struct dm_rq_target_io *tio = info->tio;
1001 struct bio *bio = info->orig;
1002 unsigned int nr_bytes = info->orig->bi_iter.bi_size;
1003
1004 bio_put(clone);
1005
1006 if (tio->error)
1007 /*
1008 * An error has already been detected on the request.
1009 * Once error occurred, just let clone->end_io() handle
1010 * the remainder.
1011 */
1012 return;
1013 else if (error) {
1014 /*
1015 * Don't notice the error to the upper layer yet.
1016 * The error handling decision is made by the target driver,
1017 * when the request is completed.
1018 */
1019 tio->error = error;
1020 return;
1021 }
1022
1023 /*
1024 * I/O for the bio successfully completed.
1025 * Notice the data completion to the upper layer.
1026 */
1027
1028 /*
1029 * bios are processed from the head of the list.
1030 * So the completing bio should always be rq->bio.
1031 * If it's not, something wrong is happening.
1032 */
1033 if (tio->orig->bio != bio)
1034 DMERR("bio completion is going in the middle of the request");
1035
1036 /*
1037 * Update the original request.
1038 * Do not use blk_end_request() here, because it may complete
1039 * the original request before the clone, and break the ordering.
1040 */
1041 blk_update_request(tio->orig, 0, nr_bytes);
1042}
1043
993static struct dm_rq_target_io *tio_from_request(struct request *rq)
994{
995 return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special);
996}
997
998/*
999 * Don't touch any member of the md after calling this function because
1000 * the md may be freed in dm_put() at the end of this function.

--- 32 unchanged lines hidden (view full) ---

1033
1034static void free_rq_clone(struct request *clone, bool must_be_mapped)
1035{
1036 struct dm_rq_target_io *tio = clone->end_io_data;
1037 struct mapped_device *md = tio->md;
1038
1039 WARN_ON_ONCE(must_be_mapped && !clone->q);
1040
1044static struct dm_rq_target_io *tio_from_request(struct request *rq)
1045{
1046 return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special);
1047}
1048
1049/*
1050 * Don't touch any member of the md after calling this function because
1051 * the md may be freed in dm_put() at the end of this function.

--- 32 unchanged lines hidden (view full) ---

1084
1085static void free_rq_clone(struct request *clone, bool must_be_mapped)
1086{
1087 struct dm_rq_target_io *tio = clone->end_io_data;
1088 struct mapped_device *md = tio->md;
1089
1090 WARN_ON_ONCE(must_be_mapped && !clone->q);
1091
1092 blk_rq_unprep_clone(clone);
1093
1041 if (md->type == DM_TYPE_MQ_REQUEST_BASED)
1042 /* stacked on blk-mq queue(s) */
1043 tio->ti->type->release_clone_rq(clone);
1044 else if (!md->queue->mq_ops)
1045 /* request_fn queue stacked on request_fn queue(s) */
1046 free_clone_request(md, clone);
1047 /*
1048 * NOTE: for the blk-mq queue stacked on request_fn queue(s) case:

--- 57 unchanged lines hidden (view full) ---

1106 */
1107static void old_requeue_request(struct request *rq)
1108{
1109 struct request_queue *q = rq->q;
1110 unsigned long flags;
1111
1112 spin_lock_irqsave(q->queue_lock, flags);
1113 blk_requeue_request(q, rq);
1094 if (md->type == DM_TYPE_MQ_REQUEST_BASED)
1095 /* stacked on blk-mq queue(s) */
1096 tio->ti->type->release_clone_rq(clone);
1097 else if (!md->queue->mq_ops)
1098 /* request_fn queue stacked on request_fn queue(s) */
1099 free_clone_request(md, clone);
1100 /*
1101 * NOTE: for the blk-mq queue stacked on request_fn queue(s) case:

--- 57 unchanged lines hidden (view full) ---

1159 */
1160static void old_requeue_request(struct request *rq)
1161{
1162 struct request_queue *q = rq->q;
1163 unsigned long flags;
1164
1165 spin_lock_irqsave(q->queue_lock, flags);
1166 blk_requeue_request(q, rq);
1167 blk_run_queue_async(q);
1114 spin_unlock_irqrestore(q->queue_lock, flags);
1115}
1116
1117static void dm_requeue_unmapped_original_request(struct mapped_device *md,
1118 struct request *rq)
1119{
1120 int rw = rq_data_dir(rq);
1121

--- 641 unchanged lines hidden (view full) ---

1763
1764 clone->start_time = jiffies;
1765 r = blk_insert_cloned_request(clone->q, clone);
1766 if (r)
1767 /* must complete clone in terms of original request */
1768 dm_complete_request(rq, r);
1769}
1770
1168 spin_unlock_irqrestore(q->queue_lock, flags);
1169}
1170
1171static void dm_requeue_unmapped_original_request(struct mapped_device *md,
1172 struct request *rq)
1173{
1174 int rw = rq_data_dir(rq);
1175

--- 641 unchanged lines hidden (view full) ---

1817
1818 clone->start_time = jiffies;
1819 r = blk_insert_cloned_request(clone->q, clone);
1820 if (r)
1821 /* must complete clone in terms of original request */
1822 dm_complete_request(rq, r);
1823}
1824
1771static void setup_clone(struct request *clone, struct request *rq,
1772 struct dm_rq_target_io *tio)
1825static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
1826 void *data)
1773{
1827{
1774 blk_rq_prep_clone(clone, rq);
1828 struct dm_rq_target_io *tio = data;
1829 struct dm_rq_clone_bio_info *info =
1830 container_of(bio, struct dm_rq_clone_bio_info, clone);
1831
1832 info->orig = bio_orig;
1833 info->tio = tio;
1834 bio->bi_end_io = end_clone_bio;
1835
1836 return 0;
1837}
1838
1839static int setup_clone(struct request *clone, struct request *rq,
1840 struct dm_rq_target_io *tio, gfp_t gfp_mask)
1841{
1842 int r;
1843
1844 r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask,
1845 dm_rq_bio_constructor, tio);
1846 if (r)
1847 return r;
1848
1849 clone->cmd = rq->cmd;
1850 clone->cmd_len = rq->cmd_len;
1851 clone->sense = rq->sense;
1775 clone->end_io = end_clone_request;
1776 clone->end_io_data = tio;
1852 clone->end_io = end_clone_request;
1853 clone->end_io_data = tio;
1854
1777 tio->clone = clone;
1855 tio->clone = clone;
1856
1857 return 0;
1778}
1779
1780static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1781 struct dm_rq_target_io *tio, gfp_t gfp_mask)
1782{
1783 /*
1784 * Do not allocate a clone if tio->clone was already set
1785 * (see: dm_mq_queue_rq).

--- 4 unchanged lines hidden (view full) ---

1790 if (alloc_clone) {
1791 clone = alloc_clone_request(md, gfp_mask);
1792 if (!clone)
1793 return NULL;
1794 } else
1795 clone = tio->clone;
1796
1797 blk_rq_init(NULL, clone);
1858}
1859
1860static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1861 struct dm_rq_target_io *tio, gfp_t gfp_mask)
1862{
1863 /*
1864 * Do not allocate a clone if tio->clone was already set
1865 * (see: dm_mq_queue_rq).

--- 4 unchanged lines hidden (view full) ---

1870 if (alloc_clone) {
1871 clone = alloc_clone_request(md, gfp_mask);
1872 if (!clone)
1873 return NULL;
1874 } else
1875 clone = tio->clone;
1876
1877 blk_rq_init(NULL, clone);
1798 setup_clone(clone, rq, tio);
1878 if (setup_clone(clone, rq, tio, gfp_mask)) {
1879 /* -ENOMEM */
1880 if (alloc_clone)
1881 free_clone_request(md, clone);
1882 return NULL;
1883 }
1799
1800 return clone;
1801}
1802
1803static void map_tio_request(struct kthread_work *work);
1804
1805static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
1806 struct mapped_device *md)

--- 77 unchanged lines hidden (view full) ---

1884 r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
1885 if (r < 0) {
1886 /* The target wants to complete the I/O */
1887 dm_kill_unmapped_request(rq, r);
1888 return r;
1889 }
1890 if (IS_ERR(clone))
1891 return DM_MAPIO_REQUEUE;
1884
1885 return clone;
1886}
1887
1888static void map_tio_request(struct kthread_work *work);
1889
1890static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
1891 struct mapped_device *md)

--- 77 unchanged lines hidden (view full) ---

1969 r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
1970 if (r < 0) {
1971 /* The target wants to complete the I/O */
1972 dm_kill_unmapped_request(rq, r);
1973 return r;
1974 }
1975 if (IS_ERR(clone))
1976 return DM_MAPIO_REQUEUE;
1892 setup_clone(clone, rq, tio);
1977 if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
1978 /* -ENOMEM */
1979 ti->type->release_clone_rq(clone);
1980 return DM_MAPIO_REQUEUE;
1981 }
1893 }
1894
1895 switch (r) {
1896 case DM_MAPIO_SUBMITTED:
1897 /* The target has taken the I/O to submit by itself later */
1898 break;
1899 case DM_MAPIO_REMAPPED:
1900 /* The target has remapped the I/O so dispatch it */

--- 437 unchanged lines hidden (view full) ---

2338 * Note for future: If you are to reload bioset,
2339 * prep-ed requests in the queue may refer
2340 * to bio from the old bioset, so you must walk
2341 * through the queue to unprep.
2342 */
2343 goto out;
2344 }
2345
1982 }
1983
1984 switch (r) {
1985 case DM_MAPIO_SUBMITTED:
1986 /* The target has taken the I/O to submit by itself later */
1987 break;
1988 case DM_MAPIO_REMAPPED:
1989 /* The target has remapped the I/O so dispatch it */

--- 437 unchanged lines hidden (view full) ---

2427 * Note for future: If you are to reload bioset,
2428 * prep-ed requests in the queue may refer
2429 * to bio from the old bioset, so you must walk
2430 * through the queue to unprep.
2431 */
2432 goto out;
2433 }
2434
2435 BUG_ON(!p || md->io_pool || md->rq_pool || md->bs);
2436
2346 md->io_pool = p->io_pool;
2347 p->io_pool = NULL;
2348 md->rq_pool = p->rq_pool;
2349 p->rq_pool = NULL;
2350 md->bs = p->bs;
2351 p->bs = NULL;
2352
2353out:

--- 1087 unchanged lines hidden (view full) ---

3441EXPORT_SYMBOL_GPL(dm_suspended);
3442
3443int dm_noflush_suspending(struct dm_target *ti)
3444{
3445 return __noflush_suspending(dm_table_get_md(ti->table));
3446}
3447EXPORT_SYMBOL_GPL(dm_noflush_suspending);
3448
2437 md->io_pool = p->io_pool;
2438 p->io_pool = NULL;
2439 md->rq_pool = p->rq_pool;
2440 p->rq_pool = NULL;
2441 md->bs = p->bs;
2442 p->bs = NULL;
2443
2444out:

--- 1087 unchanged lines hidden (view full) ---

3532EXPORT_SYMBOL_GPL(dm_suspended);
3533
3534int dm_noflush_suspending(struct dm_target *ti)
3535{
3536 return __noflush_suspending(dm_table_get_md(ti->table));
3537}
3538EXPORT_SYMBOL_GPL(dm_noflush_suspending);
3539
3449struct dm_md_mempools *dm_alloc_bio_mempools(unsigned integrity,
3450 unsigned per_bio_data_size)
3540struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type,
3541 unsigned integrity, unsigned per_bio_data_size)
3451{
3542{
3452 struct dm_md_mempools *pools;
3453 unsigned int pool_size = dm_get_reserved_bio_based_ios();
3543 struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL);
3544 struct kmem_cache *cachep = NULL;
3545 unsigned int pool_size = 0;
3454 unsigned int front_pad;
3455
3546 unsigned int front_pad;
3547
3456 pools = kzalloc(sizeof(*pools), GFP_KERNEL);
3457 if (!pools)
3458 return NULL;
3459
3548 if (!pools)
3549 return NULL;
3550
3460 front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) +
3461 offsetof(struct dm_target_io, clone);
3551 type = filter_md_type(type, md);
3462
3552
3463 pools->io_pool = mempool_create_slab_pool(pool_size, _io_cache);
3464 if (!pools->io_pool)
3465 goto out;
3553 switch (type) {
3554 case DM_TYPE_BIO_BASED:
3555 cachep = _io_cache;
3556 pool_size = dm_get_reserved_bio_based_ios();
3557 front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
3558 break;
3559 case DM_TYPE_REQUEST_BASED:
3560 cachep = _rq_tio_cache;
3561 pool_size = dm_get_reserved_rq_based_ios();
3562 pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache);
3563 if (!pools->rq_pool)
3564 goto out;
3565 /* fall through to setup remaining rq-based pools */
3566 case DM_TYPE_MQ_REQUEST_BASED:
3567 if (!pool_size)
3568 pool_size = dm_get_reserved_rq_based_ios();
3569 front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
3570 /* per_bio_data_size is not used. See __bind_mempools(). */
3571 WARN_ON(per_bio_data_size != 0);
3572 break;
3573 default:
3574 BUG();
3575 }
3466
3576
3577 if (cachep) {
3578 pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
3579 if (!pools->io_pool)
3580 goto out;
3581 }
3582
3467 pools->bs = bioset_create_nobvec(pool_size, front_pad);
3468 if (!pools->bs)
3469 goto out;
3470
3471 if (integrity && bioset_integrity_create(pools->bs, pool_size))
3472 goto out;
3473
3474 return pools;
3583 pools->bs = bioset_create_nobvec(pool_size, front_pad);
3584 if (!pools->bs)
3585 goto out;
3586
3587 if (integrity && bioset_integrity_create(pools->bs, pool_size))
3588 goto out;
3589
3590 return pools;
3475out:
3476 dm_free_md_mempools(pools);
3477 return NULL;
3478}
3479
3591
3480struct dm_md_mempools *dm_alloc_rq_mempools(struct mapped_device *md,
3481 unsigned type)
3482{
3483 unsigned int pool_size = dm_get_reserved_rq_based_ios();
3484 struct dm_md_mempools *pools;
3485
3486 pools = kzalloc(sizeof(*pools), GFP_KERNEL);
3487 if (!pools)
3488 return NULL;
3489
3490 if (filter_md_type(type, md) == DM_TYPE_REQUEST_BASED) {
3491 pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache);
3492 if (!pools->rq_pool)
3493 goto out;
3494 }
3495
3496 pools->io_pool = mempool_create_slab_pool(pool_size, _rq_tio_cache);
3497 if (!pools->io_pool)
3498 goto out;
3499
3500 return pools;
3501out:
3502 dm_free_md_mempools(pools);
3592out:
3593 dm_free_md_mempools(pools);
3594
3503 return NULL;
3504}
3505
3506void dm_free_md_mempools(struct dm_md_mempools *pools)
3507{
3508 if (!pools)
3509 return;
3510

--- 41 unchanged lines hidden ---
3595 return NULL;
3596}
3597
3598void dm_free_md_mempools(struct dm_md_mempools *pools)
3599{
3600 if (!pools)
3601 return;
3602

--- 41 unchanged lines hidden ---