dm.c (bfebd1cdb497a57757c83f5fbf1a29931591e2a4) | dm.c (022333427a8aa4ccb318a9db90cea4e69ca1826b) |
---|---|
1/* 2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8#include "dm.h" --- 1063 unchanged lines hidden (view full) --- 1072 1073static void free_rq_clone(struct request *clone) 1074{ 1075 struct dm_rq_target_io *tio = clone->end_io_data; 1076 struct mapped_device *md = tio->md; 1077 1078 blk_rq_unprep_clone(clone); 1079 | 1/* 2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8#include "dm.h" --- 1063 unchanged lines hidden (view full) --- 1072 1073static void free_rq_clone(struct request *clone) 1074{ 1075 struct dm_rq_target_io *tio = clone->end_io_data; 1076 struct mapped_device *md = tio->md; 1077 1078 blk_rq_unprep_clone(clone); 1079 |
1080 if (clone->q && clone->q->mq_ops) | 1080 if (clone->q->mq_ops) |
1081 tio->ti->type->release_clone_rq(clone); | 1081 tio->ti->type->release_clone_rq(clone); |
1082 else | 1082 else if (!md->queue->mq_ops) 1083 /* request_fn queue stacked on request_fn queue(s) */ |
1083 free_clone_request(md, clone); 1084 1085 if (!md->queue->mq_ops) 1086 free_rq_tio(tio); 1087} 1088 1089/* 1090 * Complete the clone and the original request. --- 742 unchanged lines hidden (view full) --- 1833 tio->clone = clone; 1834 1835 return 0; 1836} 1837 1838static struct request *clone_rq(struct request *rq, struct mapped_device *md, 1839 struct dm_rq_target_io *tio, gfp_t gfp_mask) 1840{ | 1084 free_clone_request(md, clone); 1085 1086 if (!md->queue->mq_ops) 1087 free_rq_tio(tio); 1088} 1089 1090/* 1091 * Complete the clone and the original request. --- 742 unchanged lines hidden (view full) --- 1834 tio->clone = clone; 1835 1836 return 0; 1837} 1838 1839static struct request *clone_rq(struct request *rq, struct mapped_device *md, 1840 struct dm_rq_target_io *tio, gfp_t gfp_mask) 1841{ |
1841 struct request *clone = alloc_clone_request(md, gfp_mask); | 1842 /* 1843 * Do not allocate a clone if tio->clone was already set 1844 * (see: dm_mq_queue_rq). 1845 */ 1846 bool alloc_clone = !tio->clone; 1847 struct request *clone; |
1842 | 1848 |
1843 if (!clone) 1844 return NULL; | 1849 if (alloc_clone) { 1850 clone = alloc_clone_request(md, gfp_mask); 1851 if (!clone) 1852 return NULL; 1853 } else 1854 clone = tio->clone; |
1845 1846 blk_rq_init(NULL, clone); 1847 if (setup_clone(clone, rq, tio, gfp_mask)) { 1848 /* -ENOMEM */ | 1855 1856 blk_rq_init(NULL, clone); 1857 if (setup_clone(clone, rq, tio, gfp_mask)) { 1858 /* -ENOMEM */ |
1849 free_clone_request(md, clone); | 1859 if (alloc_clone) 1860 free_clone_request(md, clone); |
1850 return NULL; 1851 } 1852 1853 return clone; 1854} 1855 1856static void map_tio_request(struct kthread_work *work); 1857 1858static void init_tio(struct dm_rq_target_io *tio, struct request *rq, 1859 struct mapped_device *md) 1860{ 1861 tio->md = md; 1862 tio->ti = NULL; 1863 tio->clone = NULL; 1864 tio->orig = rq; 1865 tio->error = 0; 1866 memset(&tio->info, 0, sizeof(tio->info)); | 1861 return NULL; 1862 } 1863 1864 return clone; 1865} 1866 1867static void map_tio_request(struct kthread_work *work); 1868 1869static void init_tio(struct dm_rq_target_io *tio, struct request *rq, 1870 struct mapped_device *md) 1871{ 1872 tio->md = md; 1873 tio->ti = NULL; 1874 tio->clone = NULL; 1875 tio->orig = rq; 1876 tio->error = 0; 1877 memset(&tio->info, 0, sizeof(tio->info)); |
1867 init_kthread_work(&tio->work, map_tio_request); | 1878 if (md->kworker_task) 1879 init_kthread_work(&tio->work, map_tio_request); |
1868} 1869 1870static struct dm_rq_target_io *prep_tio(struct request *rq, 1871 struct mapped_device *md, gfp_t gfp_mask) 1872{ 1873 struct dm_rq_target_io *tio; 1874 int srcu_idx; 1875 struct dm_table *table; --- 60 unchanged lines hidden (view full) --- 1936 r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); 1937 if (r < 0) { 1938 /* The target wants to complete the I/O */ 1939 dm_kill_unmapped_request(rq, r); 1940 return r; 1941 } 1942 if (IS_ERR(clone)) 1943 return DM_MAPIO_REQUEUE; | 1880} 1881 1882static struct dm_rq_target_io *prep_tio(struct request *rq, 1883 struct mapped_device *md, gfp_t gfp_mask) 1884{ 1885 struct dm_rq_target_io *tio; 1886 int srcu_idx; 1887 struct dm_table *table; --- 60 unchanged lines hidden (view full) --- 1948 r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); 1949 if (r < 0) { 1950 /* The target wants to complete the I/O */ 1951 dm_kill_unmapped_request(rq, r); 1952 return r; 1953 } 1954 if (IS_ERR(clone)) 1955 return DM_MAPIO_REQUEUE; |
1944 if (setup_clone(clone, rq, tio, GFP_NOIO)) { | 1956 if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { |
1945 /* -ENOMEM */ 1946 ti->type->release_clone_rq(clone); 1947 return DM_MAPIO_REQUEUE; 1948 } 1949 } 1950 1951 switch (r) { 1952 case DM_MAPIO_SUBMITTED: --- 450 unchanged lines hidden (view full) --- 2403 md->io_pool = p->io_pool; 2404 p->io_pool = NULL; 2405 md->rq_pool = p->rq_pool; 2406 p->rq_pool = NULL; 2407 md->bs = p->bs; 2408 p->bs = NULL; 2409 2410out: | 1957 /* -ENOMEM */ 1958 ti->type->release_clone_rq(clone); 1959 return DM_MAPIO_REQUEUE; 1960 } 1961 } 1962 1963 switch (r) { 1964 case DM_MAPIO_SUBMITTED: --- 450 unchanged lines hidden (view full) --- 2415 md->io_pool = p->io_pool; 2416 p->io_pool = NULL; 2417 md->rq_pool = p->rq_pool; 2418 p->rq_pool = NULL; 2419 md->bs = p->bs; 2420 p->bs = NULL; 2421 2422out: |
2411 /* mempool bind completed, now no need any mempools in the table */ | 2423 /* mempool bind completed, no longer need any mempools in the table */ |
2412 dm_table_free_md_mempools(t); 2413} 2414 2415/* 2416 * Bind a table to the device. 2417 */ 2418static void event_callback(void *context) 2419{ --- 288 unchanged lines hidden (view full) --- 2708 if (ti->type->busy && ti->type->busy(ti)) 2709 return BLK_MQ_RQ_QUEUE_BUSY; 2710 2711 dm_start_request(md, rq); 2712 2713 /* Init tio using md established in .init_request */ 2714 init_tio(tio, rq, md); 2715 | 2424 dm_table_free_md_mempools(t); 2425} 2426 2427/* 2428 * Bind a table to the device. 2429 */ 2430static void event_callback(void *context) 2431{ --- 288 unchanged lines hidden (view full) --- 2720 if (ti->type->busy && ti->type->busy(ti)) 2721 return BLK_MQ_RQ_QUEUE_BUSY; 2722 2723 dm_start_request(md, rq); 2724 2725 /* Init tio using md established in .init_request */ 2726 init_tio(tio, rq, md); 2727 |
2716 /* Establish tio->ti before queuing work (map_tio_request) */ | 2728 /* 2729 * Establish tio->ti before queuing work (map_tio_request) 2730 * or making direct call to map_request(). 2731 */ |
2717 tio->ti = ti; | 2732 tio->ti = ti; |
2718 queue_kthread_work(&md->kworker, &tio->work); | |
2719 | 2733 |
2734 /* Clone the request if underlying devices aren't blk-mq */ 2735 if (dm_table_get_type(map) == DM_TYPE_REQUEST_BASED) { 2736 /* clone request is allocated at the end of the pdu */ 2737 tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io); 2738 if (!clone_rq(rq, md, tio, GFP_ATOMIC)) 2739 return BLK_MQ_RQ_QUEUE_BUSY; 2740 queue_kthread_work(&md->kworker, &tio->work); 2741 } else { 2742 /* Direct call is fine since .queue_rq allows allocations */ 2743 if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) 2744 dm_requeue_unmapped_original_request(md, rq); 2745 } 2746 |
|
2720 return BLK_MQ_RQ_QUEUE_OK; 2721} 2722 2723static struct blk_mq_ops dm_mq_ops = { 2724 .queue_rq = dm_mq_queue_rq, 2725 .map_queue = blk_mq_map_queue, 2726 .complete = dm_softirq_done, 2727 .init_request = dm_mq_init_request, 2728}; 2729 2730static int dm_init_request_based_blk_mq_queue(struct mapped_device *md) 2731{ | 2747 return BLK_MQ_RQ_QUEUE_OK; 2748} 2749 2750static struct blk_mq_ops dm_mq_ops = { 2751 .queue_rq = dm_mq_queue_rq, 2752 .map_queue = blk_mq_map_queue, 2753 .complete = dm_softirq_done, 2754 .init_request = dm_mq_init_request, 2755}; 2756 2757static int dm_init_request_based_blk_mq_queue(struct mapped_device *md) 2758{ |
2759 unsigned md_type = dm_get_md_type(md); |
|
2732 struct request_queue *q; 2733 int err; 2734 2735 memset(&md->tag_set, 0, sizeof(md->tag_set)); 2736 md->tag_set.ops = &dm_mq_ops; 2737 md->tag_set.queue_depth = BLKDEV_MAX_RQ; 2738 md->tag_set.numa_node = NUMA_NO_NODE; 2739 md->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; 2740 md->tag_set.nr_hw_queues = 1; | 2760 struct request_queue *q; 2761 int err; 2762 2763 memset(&md->tag_set, 0, sizeof(md->tag_set)); 2764 md->tag_set.ops = &dm_mq_ops; 2765 md->tag_set.queue_depth = BLKDEV_MAX_RQ; 2766 md->tag_set.numa_node = NUMA_NO_NODE; 2767 md->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; 2768 md->tag_set.nr_hw_queues = 1; |
2741 md->tag_set.cmd_size = sizeof(struct dm_rq_target_io); | 2769 if (md_type == DM_TYPE_REQUEST_BASED) { 2770 /* make the memory for non-blk-mq clone part of the pdu */ 2771 md->tag_set.cmd_size = sizeof(struct dm_rq_target_io) + sizeof(struct request); 2772 } else 2773 md->tag_set.cmd_size = sizeof(struct dm_rq_target_io); |
2742 md->tag_set.driver_data = md; 2743 2744 err = blk_mq_alloc_tag_set(&md->tag_set); 2745 if (err) 2746 return err; 2747 2748 q = blk_mq_init_allocated_queue(&md->tag_set, md->queue); 2749 if (IS_ERR(q)) { 2750 err = PTR_ERR(q); 2751 goto out_tag_set; 2752 } 2753 md->queue = q; 2754 dm_init_md_queue(md); 2755 2756 /* backfill 'mq' sysfs registration normally done in blk_register_queue */ 2757 blk_mq_register_disk(md->disk); 2758 | 2774 md->tag_set.driver_data = md; 2775 2776 err = blk_mq_alloc_tag_set(&md->tag_set); 2777 if (err) 2778 return err; 2779 2780 q = blk_mq_init_allocated_queue(&md->tag_set, md->queue); 2781 if (IS_ERR(q)) { 2782 err = PTR_ERR(q); 2783 goto out_tag_set; 2784 } 2785 md->queue = q; 2786 dm_init_md_queue(md); 2787 2788 /* backfill 'mq' sysfs registration normally done in blk_register_queue */ 2789 blk_mq_register_disk(md->disk); 2790 |
2759 init_rq_based_worker_thread(md); | 2791 if (md_type == DM_TYPE_REQUEST_BASED) 2792 init_rq_based_worker_thread(md); |
2760 2761 return 0; 2762 2763out_tag_set: 2764 blk_mq_free_tag_set(&md->tag_set); 2765 return err; 2766} 2767 --- 103 unchanged lines hidden (view full) --- 2871 2872 map = dm_get_live_table(md, &srcu_idx); 2873 2874 spin_lock(&_minor_lock); 2875 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2876 set_bit(DMF_FREEING, &md->flags); 2877 spin_unlock(&_minor_lock); 2878 | 2793 2794 return 0; 2795 2796out_tag_set: 2797 blk_mq_free_tag_set(&md->tag_set); 2798 return err; 2799} 2800 --- 103 unchanged lines hidden (view full) --- 2904 2905 map = dm_get_live_table(md, &srcu_idx); 2906 2907 spin_lock(&_minor_lock); 2908 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2909 set_bit(DMF_FREEING, &md->flags); 2910 spin_unlock(&_minor_lock); 2911 |
2879 if (dm_request_based(md)) | 2912 if (dm_request_based(md) && md->kworker_task) |
2880 flush_kthread_worker(&md->kworker); 2881 2882 /* 2883 * Take suspend_lock so that presuspend and postsuspend methods 2884 * do not race with internal suspend. 2885 */ 2886 mutex_lock(&md->suspend_lock); 2887 if (!dm_suspended_md(md)) { --- 237 unchanged lines hidden (view full) --- 3125 synchronize_srcu(&md->io_barrier); 3126 3127 /* 3128 * Stop md->queue before flushing md->wq in case request-based 3129 * dm defers requests to md->wq from md->queue. 3130 */ 3131 if (dm_request_based(md)) { 3132 stop_queue(md->queue); | 2913 flush_kthread_worker(&md->kworker); 2914 2915 /* 2916 * Take suspend_lock so that presuspend and postsuspend methods 2917 * do not race with internal suspend. 2918 */ 2919 mutex_lock(&md->suspend_lock); 2920 if (!dm_suspended_md(md)) { --- 237 unchanged lines hidden (view full) --- 3158 synchronize_srcu(&md->io_barrier); 3159 3160 /* 3161 * Stop md->queue before flushing md->wq in case request-based 3162 * dm defers requests to md->wq from md->queue. 3163 */ 3164 if (dm_request_based(md)) { 3165 stop_queue(md->queue); |
3133 flush_kthread_worker(&md->kworker); | 3166 if (md->kworker_task) 3167 flush_kthread_worker(&md->kworker); |
3134 } 3135 3136 flush_workqueue(md->wq); 3137 3138 /* 3139 * At this point no more requests are entering target request routines. 3140 * We call dm_wait_for_completion to wait for all existing requests 3141 * to finish. --- 432 unchanged lines hidden --- | 3168 } 3169 3170 flush_workqueue(md->wq); 3171 3172 /* 3173 * At this point no more requests are entering target request routines. 3174 * We call dm_wait_for_completion to wait for all existing requests 3175 * to finish. --- 432 unchanged lines hidden --- |