dm.c (2eb6e1e3aa873f2bb62075bebe17fa108ee07374) dm.c (466d89a6bcd500f64896b514f78b32e8d0b0303a)
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"

--- 1002 unchanged lines hidden (view full) ---

1011 blk_update_request(tio->orig, 0, nr_bytes);
1012}
1013
1014/*
1015 * Don't touch any member of the md after calling this function because
1016 * the md may be freed in dm_put() at the end of this function.
1017 * Or do dm_get() before calling this function and dm_put() later.
1018 */
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"

--- 1002 unchanged lines hidden (view full) ---

1011 blk_update_request(tio->orig, 0, nr_bytes);
1012}
1013
1014/*
1015 * Don't touch any member of the md after calling this function because
1016 * the md may be freed in dm_put() at the end of this function.
1017 * Or do dm_get() before calling this function and dm_put() later.
1018 */
1019static void rq_completed(struct mapped_device *md, int rw, int run_queue)
1019static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
1020{
1021 atomic_dec(&md->pending[rw]);
1022
1023 /* nudge anyone waiting on suspend queue */
1024 if (!md_in_flight(md))
1025 wake_up(&md->wait);
1026
1027 /*

--- 17 unchanged lines hidden (view full) ---

1045
1046 blk_rq_unprep_clone(clone);
1047 free_clone_request(tio->md, clone);
1048 free_rq_tio(tio);
1049}
1050
1051/*
1052 * Complete the clone and the original request.
1020{
1021 atomic_dec(&md->pending[rw]);
1022
1023 /* nudge anyone waiting on suspend queue */
1024 if (!md_in_flight(md))
1025 wake_up(&md->wait);
1026
1027 /*

--- 17 unchanged lines hidden (view full) ---

1045
1046 blk_rq_unprep_clone(clone);
1047 free_clone_request(tio->md, clone);
1048 free_rq_tio(tio);
1049}
1050
1051/*
1052 * Complete the clone and the original request.
1053 * Must be called without queue lock.
1053 * Must be called without clone's queue lock held,
1054 * see end_clone_request() for more details.
1054 */
1055static void dm_end_request(struct request *clone, int error)
1056{
1057 int rw = rq_data_dir(clone);
1058 struct dm_rq_target_io *tio = clone->end_io_data;
1059 struct mapped_device *md = tio->md;
1060 struct request *rq = tio->orig;
1061

--- 12 unchanged lines hidden (view full) ---

1074
1075 free_rq_clone(clone);
1076 blk_end_request_all(rq, error);
1077 rq_completed(md, rw, true);
1078}
1079
1080static void dm_unprep_request(struct request *rq)
1081{
1055 */
1056static void dm_end_request(struct request *clone, int error)
1057{
1058 int rw = rq_data_dir(clone);
1059 struct dm_rq_target_io *tio = clone->end_io_data;
1060 struct mapped_device *md = tio->md;
1061 struct request *rq = tio->orig;
1062

--- 12 unchanged lines hidden (view full) ---

1075
1076 free_rq_clone(clone);
1077 blk_end_request_all(rq, error);
1078 rq_completed(md, rw, true);
1079}
1080
1081static void dm_unprep_request(struct request *rq)
1082{
1082 struct request *clone = rq->special;
1083 struct dm_rq_target_io *tio = rq->special;
1084 struct request *clone = tio->clone;
1083
1084 rq->special = NULL;
1085 rq->cmd_flags &= ~REQ_DONTPREP;
1086
1087 free_rq_clone(clone);
1088}
1089
1090/*
1091 * Requeue the original request of a clone.
1092 */
1085
1086 rq->special = NULL;
1087 rq->cmd_flags &= ~REQ_DONTPREP;
1088
1089 free_rq_clone(clone);
1090}
1091
1092/*
1093 * Requeue the original request of a clone.
1094 */
1093static void dm_requeue_unmapped_request(struct request *clone)
1095static void dm_requeue_unmapped_original_request(struct mapped_device *md,
1096 struct request *rq)
1094{
1097{
1095 int rw = rq_data_dir(clone);
1096 struct dm_rq_target_io *tio = clone->end_io_data;
1097 struct mapped_device *md = tio->md;
1098 struct request *rq = tio->orig;
1098 int rw = rq_data_dir(rq);
1099 struct request_queue *q = rq->q;
1100 unsigned long flags;
1101
1102 dm_unprep_request(rq);
1103
1104 spin_lock_irqsave(q->queue_lock, flags);
1105 blk_requeue_request(q, rq);
1106 spin_unlock_irqrestore(q->queue_lock, flags);
1107
1099 struct request_queue *q = rq->q;
1100 unsigned long flags;
1101
1102 dm_unprep_request(rq);
1103
1104 spin_lock_irqsave(q->queue_lock, flags);
1105 blk_requeue_request(q, rq);
1106 spin_unlock_irqrestore(q->queue_lock, flags);
1107
1108 rq_completed(md, rw, 0);
1108 rq_completed(md, rw, false);
1109}
1110
1109}
1110
1111static void dm_requeue_unmapped_request(struct request *clone)
1112{
1113 struct dm_rq_target_io *tio = clone->end_io_data;
1114
1115 dm_requeue_unmapped_original_request(tio->md, tio->orig);
1116}
1117
1111static void __stop_queue(struct request_queue *q)
1112{
1113 blk_stop_queue(q);
1114}
1115
1116static void stop_queue(struct request_queue *q)
1117{
1118 unsigned long flags;

--- 51 unchanged lines hidden (view full) ---

1170}
1171
1172/*
1173 * Request completion handler for request-based dm
1174 */
1175static void dm_softirq_done(struct request *rq)
1176{
1177 bool mapped = true;
1118static void __stop_queue(struct request_queue *q)
1119{
1120 blk_stop_queue(q);
1121}
1122
1123static void stop_queue(struct request_queue *q)
1124{
1125 unsigned long flags;

--- 51 unchanged lines hidden (view full) ---

1177}
1178
1179/*
1180 * Request completion handler for request-based dm
1181 */
1182static void dm_softirq_done(struct request *rq)
1183{
1184 bool mapped = true;
1178 struct request *clone = rq->completion_data;
1179 struct dm_rq_target_io *tio = clone->end_io_data;
1185 struct dm_rq_target_io *tio = rq->special;
1186 struct request *clone = tio->clone;
1180
1181 if (rq->cmd_flags & REQ_FAILED)
1182 mapped = false;
1183
1184 dm_done(clone, tio->error, mapped);
1185}
1186
1187/*
1188 * Complete the clone and the original request with the error status
1189 * through softirq context.
1190 */
1187
1188 if (rq->cmd_flags & REQ_FAILED)
1189 mapped = false;
1190
1191 dm_done(clone, tio->error, mapped);
1192}
1193
1194/*
1195 * Complete the clone and the original request with the error status
1196 * through softirq context.
1197 */
1191static void dm_complete_request(struct request *clone, int error)
1198static void dm_complete_request(struct request *rq, int error)
1192{
1199{
1193 struct dm_rq_target_io *tio = clone->end_io_data;
1194 struct request *rq = tio->orig;
1200 struct dm_rq_target_io *tio = rq->special;
1195
1196 tio->error = error;
1201
1202 tio->error = error;
1197 rq->completion_data = clone;
1198 blk_complete_request(rq);
1199}
1200
1201/*
1202 * Complete the not-mapped clone and the original request with the error status
1203 * through softirq context.
1204 * Target's rq_end_io() function isn't called.
1205 * This may be used when the target's map_rq() function fails.
1206 */
1203 blk_complete_request(rq);
1204}
1205
1206/*
1207 * Complete the not-mapped clone and the original request with the error status
1208 * through softirq context.
1209 * Target's rq_end_io() function isn't called.
1210 * This may be used when the target's map_rq() function fails.
1211 */
1207static void dm_kill_unmapped_request(struct request *clone, int error)
1212static void dm_kill_unmapped_request(struct request *rq, int error)
1208{
1213{
1209 struct dm_rq_target_io *tio = clone->end_io_data;
1210 struct request *rq = tio->orig;
1211
1212 rq->cmd_flags |= REQ_FAILED;
1214 rq->cmd_flags |= REQ_FAILED;
1213 dm_complete_request(clone, error);
1215 dm_complete_request(rq, error);
1214}
1215
1216/*
1216}
1217
1218/*
1217 * Called with the queue lock held
1219 * Called with the clone's queue lock held
1218 */
1219static void end_clone_request(struct request *clone, int error)
1220{
1220 */
1221static void end_clone_request(struct request *clone, int error)
1222{
1223 struct dm_rq_target_io *tio = clone->end_io_data;
1224
1221 /*
1222 * For just cleaning up the information of the queue in which
1223 * the clone was dispatched.
1224 * The clone is *NOT* freed actually here because it is alloced from
1225 * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
1226 */
1227 __blk_put_request(clone->q, clone);
1228
1229 /*
1230 * Actual request completion is done in a softirq context which doesn't
1225 /*
1226 * For just cleaning up the information of the queue in which
1227 * the clone was dispatched.
1228 * The clone is *NOT* freed actually here because it is alloced from
1229 * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
1230 */
1231 __blk_put_request(clone->q, clone);
1232
1233 /*
1234 * Actual request completion is done in a softirq context which doesn't
1231 * hold the queue lock. Otherwise, deadlock could occur because:
1235 * hold the clone's queue lock. Otherwise, deadlock could occur because:
1232 * - another request may be submitted by the upper level driver
1233 * of the stacking during the completion
1234 * - the submission which requires queue lock may be done
1236 * - another request may be submitted by the upper level driver
1237 * of the stacking during the completion
1238 * - the submission which requires queue lock may be done
1235 * against this queue
1239 * against this clone's queue
1236 */
1240 */
1237 dm_complete_request(clone, error);
1241 dm_complete_request(tio->orig, error);
1238}
1239
1240/*
1241 * Return maximum size of I/O possible at the supplied sector up to the current
1242 * target boundary.
1243 */
1244static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
1245{

--- 461 unchanged lines hidden (view full) ---

1707 struct mapped_device *md = q->queuedata;
1708
1709 if (dm_request_based(md))
1710 blk_queue_bio(q, bio);
1711 else
1712 _dm_request(q, bio);
1713}
1714
1242}
1243
1244/*
1245 * Return maximum size of I/O possible at the supplied sector up to the current
1246 * target boundary.
1247 */
1248static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
1249{

--- 461 unchanged lines hidden (view full) ---

1711 struct mapped_device *md = q->queuedata;
1712
1713 if (dm_request_based(md))
1714 blk_queue_bio(q, bio);
1715 else
1716 _dm_request(q, bio);
1717}
1718
1715static void dm_dispatch_request(struct request *rq)
1719static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
1716{
1717 int r;
1718
1720{
1721 int r;
1722
1719 if (blk_queue_io_stat(rq->q))
1720 rq->cmd_flags |= REQ_IO_STAT;
1723 if (blk_queue_io_stat(clone->q))
1724 clone->cmd_flags |= REQ_IO_STAT;
1721
1725
1722 rq->start_time = jiffies;
1723 r = blk_insert_cloned_request(rq->q, rq);
1726 clone->start_time = jiffies;
1727 r = blk_insert_cloned_request(clone->q, clone);
1724 if (r)
1728 if (r)
1729 /* must complete clone in terms of original request */
1725 dm_complete_request(rq, r);
1726}
1727
1728static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
1729 void *data)
1730{
1731 struct dm_rq_target_io *tio = data;
1732 struct dm_rq_clone_bio_info *info =

--- 22 unchanged lines hidden (view full) ---

1755 clone->end_io = end_clone_request;
1756 clone->end_io_data = tio;
1757
1758 tio->clone = clone;
1759
1760 return 0;
1761}
1762
1730 dm_complete_request(rq, r);
1731}
1732
1733static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
1734 void *data)
1735{
1736 struct dm_rq_target_io *tio = data;
1737 struct dm_rq_clone_bio_info *info =

--- 22 unchanged lines hidden (view full) ---

1760 clone->end_io = end_clone_request;
1761 clone->end_io_data = tio;
1762
1763 tio->clone = clone;
1764
1765 return 0;
1766}
1767
1763static struct request *__clone_rq(struct request *rq, struct mapped_device *md,
1764 struct dm_rq_target_io *tio, gfp_t gfp_mask)
1768static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1769 struct dm_rq_target_io *tio, gfp_t gfp_mask)
1765{
1766 struct request *clone = alloc_clone_request(md, gfp_mask);
1767
1768 if (!clone)
1769 return NULL;
1770
1771 blk_rq_init(NULL, clone);
1772 if (setup_clone(clone, rq, tio, gfp_mask)) {
1773 /* -ENOMEM */
1774 free_clone_request(md, clone);
1775 return NULL;
1776 }
1777
1778 return clone;
1779}
1780
1781static void map_tio_request(struct kthread_work *work);
1782
1770{
1771 struct request *clone = alloc_clone_request(md, gfp_mask);
1772
1773 if (!clone)
1774 return NULL;
1775
1776 blk_rq_init(NULL, clone);
1777 if (setup_clone(clone, rq, tio, gfp_mask)) {
1778 /* -ENOMEM */
1779 free_clone_request(md, clone);
1780 return NULL;
1781 }
1782
1783 return clone;
1784}
1785
1786static void map_tio_request(struct kthread_work *work);
1787
1783static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1784 gfp_t gfp_mask)
1788static struct dm_rq_target_io *prep_tio(struct request *rq,
1789 struct mapped_device *md, gfp_t gfp_mask)
1785{
1790{
1786 struct request *clone;
1787 struct dm_rq_target_io *tio;
1788
1789 tio = alloc_rq_tio(md, gfp_mask);
1790 if (!tio)
1791 return NULL;
1792
1793 tio->md = md;
1794 tio->ti = NULL;
1795 tio->clone = NULL;
1796 tio->orig = rq;
1797 tio->error = 0;
1798 memset(&tio->info, 0, sizeof(tio->info));
1799 init_kthread_work(&tio->work, map_tio_request);
1800
1791 struct dm_rq_target_io *tio;
1792
1793 tio = alloc_rq_tio(md, gfp_mask);
1794 if (!tio)
1795 return NULL;
1796
1797 tio->md = md;
1798 tio->ti = NULL;
1799 tio->clone = NULL;
1800 tio->orig = rq;
1801 tio->error = 0;
1802 memset(&tio->info, 0, sizeof(tio->info));
1803 init_kthread_work(&tio->work, map_tio_request);
1804
1801 clone = __clone_rq(rq, md, tio, GFP_ATOMIC);
1802 if (!clone) {
1805 if (!clone_rq(rq, md, tio, gfp_mask)) {
1803 free_rq_tio(tio);
1804 return NULL;
1805 }
1806
1806 free_rq_tio(tio);
1807 return NULL;
1808 }
1809
1807 return clone;
1810 return tio;
1808}
1809
1810/*
1811 * Called with the queue lock held.
1812 */
1813static int dm_prep_fn(struct request_queue *q, struct request *rq)
1814{
1815 struct mapped_device *md = q->queuedata;
1811}
1812
1813/*
1814 * Called with the queue lock held.
1815 */
1816static int dm_prep_fn(struct request_queue *q, struct request *rq)
1817{
1818 struct mapped_device *md = q->queuedata;
1816 struct request *clone;
1819 struct dm_rq_target_io *tio;
1817
1818 if (unlikely(rq->special)) {
1819 DMWARN("Already has something in rq->special.");
1820 return BLKPREP_KILL;
1821 }
1822
1820
1821 if (unlikely(rq->special)) {
1822 DMWARN("Already has something in rq->special.");
1823 return BLKPREP_KILL;
1824 }
1825
1823 clone = clone_rq(rq, md, GFP_ATOMIC);
1824 if (!clone)
1826 tio = prep_tio(rq, md, GFP_ATOMIC);
1827 if (!tio)
1825 return BLKPREP_DEFER;
1826
1828 return BLKPREP_DEFER;
1829
1827 rq->special = clone;
1830 rq->special = tio;
1828 rq->cmd_flags |= REQ_DONTPREP;
1829
1830 return BLKPREP_OK;
1831}
1832
1833/*
1834 * Returns:
1835 * 0 : the request has been processed (not requeued)
1836 * !0 : the request has been requeued
1837 */
1831 rq->cmd_flags |= REQ_DONTPREP;
1832
1833 return BLKPREP_OK;
1834}
1835
1836/*
1837 * Returns:
1838 * 0 : the request has been processed (not requeued)
1839 * !0 : the request has been requeued
1840 */
1838static int map_request(struct dm_target *ti, struct request *clone,
1841static int map_request(struct dm_target *ti, struct request *rq,
1839 struct mapped_device *md)
1840{
1841 int r, requeued = 0;
1842 struct mapped_device *md)
1843{
1844 int r, requeued = 0;
1842 struct dm_rq_target_io *tio = clone->end_io_data;
1845 struct dm_rq_target_io *tio = rq->special;
1846 struct request *clone = tio->clone;
1843
1844 r = ti->type->map_rq(ti, clone, &tio->info);
1845 switch (r) {
1846 case DM_MAPIO_SUBMITTED:
1847 /* The target has taken the I/O to submit by itself later */
1848 break;
1849 case DM_MAPIO_REMAPPED:
1850 /* The target has remapped the I/O so dispatch it */
1851 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
1847
1848 r = ti->type->map_rq(ti, clone, &tio->info);
1849 switch (r) {
1850 case DM_MAPIO_SUBMITTED:
1851 /* The target has taken the I/O to submit by itself later */
1852 break;
1853 case DM_MAPIO_REMAPPED:
1854 /* The target has remapped the I/O so dispatch it */
1855 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
1852 blk_rq_pos(tio->orig));
1853 dm_dispatch_request(clone);
1856 blk_rq_pos(rq));
1857 dm_dispatch_clone_request(clone, rq);
1854 break;
1855 case DM_MAPIO_REQUEUE:
1856 /* The target wants to requeue the I/O */
1857 dm_requeue_unmapped_request(clone);
1858 requeued = 1;
1859 break;
1860 default:
1861 if (r > 0) {
1862 DMWARN("unimplemented target map return value: %d", r);
1863 BUG();
1864 }
1865
1866 /* The target wants to complete the I/O */
1858 break;
1859 case DM_MAPIO_REQUEUE:
1860 /* The target wants to requeue the I/O */
1861 dm_requeue_unmapped_request(clone);
1862 requeued = 1;
1863 break;
1864 default:
1865 if (r > 0) {
1866 DMWARN("unimplemented target map return value: %d", r);
1867 BUG();
1868 }
1869
1870 /* The target wants to complete the I/O */
1867 dm_kill_unmapped_request(clone, r);
1871 dm_kill_unmapped_request(rq, r);
1868 break;
1869 }
1870
1871 return requeued;
1872}
1873
1874static void map_tio_request(struct kthread_work *work)
1875{
1876 struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
1877
1872 break;
1873 }
1874
1875 return requeued;
1876}
1877
1878static void map_tio_request(struct kthread_work *work)
1879{
1880 struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
1881
1878 map_request(tio->ti, tio->clone, tio->md);
1882 map_request(tio->ti, tio->orig, tio->md);
1879}
1880
1883}
1884
1881static struct request *dm_start_request(struct mapped_device *md, struct request *orig)
1885static void dm_start_request(struct mapped_device *md, struct request *orig)
1882{
1886{
1883 struct request *clone;
1884
1885 blk_start_request(orig);
1887 blk_start_request(orig);
1886 clone = orig->special;
1887 atomic_inc(&md->pending[rq_data_dir(clone)]);
1888 atomic_inc(&md->pending[rq_data_dir(orig)]);
1888
1889 /*
1890 * Hold the md reference here for the in-flight I/O.
1891 * We can't rely on the reference count by device opener,
1892 * because the device may be closed during the request completion
1893 * when all bios are completed.
1894 * See the comment in rq_completed() too.
1895 */
1896 dm_get(md);
1889
1890 /*
1891 * Hold the md reference here for the in-flight I/O.
1892 * We can't rely on the reference count by device opener,
1893 * because the device may be closed during the request completion
1894 * when all bios are completed.
1895 * See the comment in rq_completed() too.
1896 */
1897 dm_get(md);
1897
1898 return clone;
1899}
1900
1901/*
1902 * q->request_fn for request-based dm.
1903 * Called with the queue lock held.
1904 */
1905static void dm_request_fn(struct request_queue *q)
1906{
1907 struct mapped_device *md = q->queuedata;
1908 int srcu_idx;
1909 struct dm_table *map = dm_get_live_table(md, &srcu_idx);
1910 struct dm_target *ti;
1898}
1899
1900/*
1901 * q->request_fn for request-based dm.
1902 * Called with the queue lock held.
1903 */
1904static void dm_request_fn(struct request_queue *q)
1905{
1906 struct mapped_device *md = q->queuedata;
1907 int srcu_idx;
1908 struct dm_table *map = dm_get_live_table(md, &srcu_idx);
1909 struct dm_target *ti;
1911 struct request *rq, *clone;
1910 struct request *rq;
1912 struct dm_rq_target_io *tio;
1913 sector_t pos;
1914
1915 /*
1916 * For suspend, check blk_queue_stopped() and increment
1917 * ->pending within a single queue_lock not to increment the
1918 * number of in-flight I/Os after the queue is stopped in
1919 * dm_suspend().

--- 6 unchanged lines hidden (view full) ---

1926 /* always use block 0 to find the target for flushes for now */
1927 pos = 0;
1928 if (!(rq->cmd_flags & REQ_FLUSH))
1929 pos = blk_rq_pos(rq);
1930
1931 ti = dm_table_find_target(map, pos);
1932 if (!dm_target_is_valid(ti)) {
1933 /*
1911 struct dm_rq_target_io *tio;
1912 sector_t pos;
1913
1914 /*
1915 * For suspend, check blk_queue_stopped() and increment
1916 * ->pending within a single queue_lock not to increment the
1917 * number of in-flight I/Os after the queue is stopped in
1918 * dm_suspend().

--- 6 unchanged lines hidden (view full) ---

1925 /* always use block 0 to find the target for flushes for now */
1926 pos = 0;
1927 if (!(rq->cmd_flags & REQ_FLUSH))
1928 pos = blk_rq_pos(rq);
1929
1930 ti = dm_table_find_target(map, pos);
1931 if (!dm_target_is_valid(ti)) {
1932 /*
1934 * Must perform setup, that dm_done() requires,
1933 * Must perform setup, that rq_completed() requires,
1935 * before calling dm_kill_unmapped_request
1936 */
1937 DMERR_LIMIT("request attempted access beyond the end of device");
1934 * before calling dm_kill_unmapped_request
1935 */
1936 DMERR_LIMIT("request attempted access beyond the end of device");
1938 clone = dm_start_request(md, rq);
1939 dm_kill_unmapped_request(clone, -EIO);
1937 dm_start_request(md, rq);
1938 dm_kill_unmapped_request(rq, -EIO);
1940 continue;
1941 }
1942
1943 if (ti->type->busy && ti->type->busy(ti))
1944 goto delay_and_out;
1945
1939 continue;
1940 }
1941
1942 if (ti->type->busy && ti->type->busy(ti))
1943 goto delay_and_out;
1944
1946 clone = dm_start_request(md, rq);
1945 dm_start_request(md, rq);
1947
1948 tio = rq->special;
1949 /* Establish tio->ti before queuing work (map_tio_request) */
1950 tio->ti = ti;
1951 queue_kthread_work(&md->kworker, &tio->work);
1952 BUG_ON(!irqs_disabled());
1953 }
1954

--- 280 unchanged lines hidden (view full) ---

2235 if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) {
2236 /*
2237 * Reload bioset because front_pad may have changed
2238 * because a different table was loaded.
2239 */
2240 bioset_free(md->bs);
2241 md->bs = p->bs;
2242 p->bs = NULL;
1946
1947 tio = rq->special;
1948 /* Establish tio->ti before queuing work (map_tio_request) */
1949 tio->ti = ti;
1950 queue_kthread_work(&md->kworker, &tio->work);
1951 BUG_ON(!irqs_disabled());
1952 }
1953

--- 280 unchanged lines hidden (view full) ---

2234 if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) {
2235 /*
2236 * Reload bioset because front_pad may have changed
2237 * because a different table was loaded.
2238 */
2239 bioset_free(md->bs);
2240 md->bs = p->bs;
2241 p->bs = NULL;
2243 } else if (dm_table_get_type(t) == DM_TYPE_REQUEST_BASED) {
2244 /*
2245 * There's no need to reload with request-based dm
2246 * because the size of front_pad doesn't change.
2247 * Note for future: If you are to reload bioset,
2248 * prep-ed requests in the queue may refer
2249 * to bio from the old bioset, so you must walk
2250 * through the queue to unprep.
2251 */
2252 }
2242 }
2243 /*
2244 * There's no need to reload with request-based dm
2245 * because the size of front_pad doesn't change.
2246 * Note for future: If you are to reload bioset,
2247 * prep-ed requests in the queue may refer
2248 * to bio from the old bioset, so you must walk
2249 * through the queue to unprep.
2250 */
2253 goto out;
2254 }
2255
2256 BUG_ON(!p || md->io_pool || md->rq_pool || md->bs);
2257
2258 md->io_pool = p->io_pool;
2259 p->io_pool = NULL;
2260 md->rq_pool = p->rq_pool;

--- 1012 unchanged lines hidden ---
2251 goto out;
2252 }
2253
2254 BUG_ON(!p || md->io_pool || md->rq_pool || md->bs);
2255
2256 md->io_pool = p->io_pool;
2257 p->io_pool = NULL;
2258 md->rq_pool = p->rq_pool;

--- 1012 unchanged lines hidden ---