blk-mq.c (a7e7388dced47a10ca13ae95ca975ea2830f196b) | blk-mq.c (63064be150e4b1ba1e4af594ef5aa81adf21a52d) |
---|---|
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Block multiqueue core code 4 * 5 * Copyright (C) 2013-2014 Jens Axboe 6 * Copyright (C) 2013-2014 Christoph Hellwig 7 */ 8#include <linux/kernel.h> --- 2378 unchanged lines hidden (view full) --- 2387 kfree(tags->rqs); 2388 tags->rqs = NULL; 2389 kfree(tags->static_rqs); 2390 tags->static_rqs = NULL; 2391 2392 blk_mq_free_tags(tags, flags); 2393} 2394 | 1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Block multiqueue core code 4 * 5 * Copyright (C) 2013-2014 Jens Axboe 6 * Copyright (C) 2013-2014 Christoph Hellwig 7 */ 8#include <linux/kernel.h> --- 2378 unchanged lines hidden (view full) --- 2387 kfree(tags->rqs); 2388 tags->rqs = NULL; 2389 kfree(tags->static_rqs); 2390 tags->static_rqs = NULL; 2391 2392 blk_mq_free_tags(tags, flags); 2393} 2394 |
2395struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, 2396 unsigned int hctx_idx, 2397 unsigned int nr_tags, 2398 unsigned int reserved_tags, 2399 unsigned int flags) | 2395static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, 2396 unsigned int hctx_idx, 2397 unsigned int nr_tags, 2398 unsigned int reserved_tags, 2399 unsigned int flags) |
2400{ 2401 struct blk_mq_tags *tags; 2402 int node; 2403 2404 node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx); 2405 if (node == NUMA_NO_NODE) 2406 node = set->numa_node; 2407 --- 31 unchanged lines hidden (view full) --- 2439 if (ret) 2440 return ret; 2441 } 2442 2443 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 2444 return 0; 2445} 2446 | 2400{ 2401 struct blk_mq_tags *tags; 2402 int node; 2403 2404 node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx); 2405 if (node == NUMA_NO_NODE) 2406 node = set->numa_node; 2407 --- 31 unchanged lines hidden (view full) --- 2439 if (ret) 2440 return ret; 2441 } 2442 2443 WRITE_ONCE(rq->state, MQ_RQ_IDLE); 2444 return 0; 2445} 2446 |
2447int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 2448 unsigned int hctx_idx, unsigned int depth) | 2447static int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, 2448 struct blk_mq_tags *tags, 2449 unsigned int hctx_idx, unsigned int depth) |
2449{ 2450 unsigned int i, j, entries_per_page, max_order = 4; 2451 size_t rq_size, left; 2452 int node; 2453 2454 node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx); 2455 if (node == NUMA_NO_NODE) 2456 node = set->numa_node; --- 394 unchanged lines hidden (view full) --- 2851 for (j = 0; j < set->nr_maps; j++) { 2852 hctx = blk_mq_map_queue_type(q, j, i); 2853 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE) 2854 hctx->numa_node = cpu_to_node(i); 2855 } 2856 } 2857} 2858 | 2450{ 2451 unsigned int i, j, entries_per_page, max_order = 4; 2452 size_t rq_size, left; 2453 int node; 2454 2455 node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx); 2456 if (node == NUMA_NO_NODE) 2457 node = set->numa_node; --- 394 unchanged lines hidden (view full) --- 2852 for (j = 0; j < set->nr_maps; j++) { 2853 hctx = blk_mq_map_queue_type(q, j, i); 2854 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE) 2855 hctx->numa_node = cpu_to_node(i); 2856 } 2857 } 2858} 2859 |
2859static bool __blk_mq_alloc_map_and_request(struct blk_mq_tag_set *set, 2860 int hctx_idx) | 2860struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, 2861 unsigned int hctx_idx, 2862 unsigned int depth) |
2861{ | 2863{ |
2862 unsigned int flags = set->flags; 2863 int ret = 0; | 2864 struct blk_mq_tags *tags; 2865 int ret; |
2864 | 2866 |
2865 set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx, 2866 set->queue_depth, set->reserved_tags, flags); 2867 if (!set->tags[hctx_idx]) 2868 return false; | 2867 tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags, 2868 set->flags); 2869 if (!tags) 2870 return NULL; |
2869 | 2871 |
2870 ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx, 2871 set->queue_depth); 2872 if (!ret) 2873 return true; | 2872 ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth); 2873 if (ret) { 2874 blk_mq_free_rq_map(tags, set->flags); 2875 return NULL; 2876 } |
2874 | 2877 |
2875 blk_mq_free_rq_map(set->tags[hctx_idx], flags); 2876 set->tags[hctx_idx] = NULL; 2877 return false; | 2878 return tags; |
2878} 2879 | 2879} 2880 |
2881static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, 2882 int hctx_idx) 2883{ 2884 set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx, 2885 set->queue_depth); 2886 2887 return set->tags[hctx_idx]; 2888} 2889 |
|
2880static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set, 2881 unsigned int hctx_idx) 2882{ 2883 unsigned int flags = set->flags; 2884 2885 if (set->tags && set->tags[hctx_idx]) { 2886 blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx); 2887 blk_mq_free_rq_map(set->tags[hctx_idx], flags); --- 26 unchanged lines hidden (view full) --- 2914 if (!set->map[j].nr_queues) { 2915 ctx->hctxs[j] = blk_mq_map_queue_type(q, 2916 HCTX_TYPE_DEFAULT, i); 2917 continue; 2918 } 2919 hctx_idx = set->map[j].mq_map[i]; 2920 /* unmapped hw queue can be remapped after CPU topo changed */ 2921 if (!set->tags[hctx_idx] && | 2890static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set, 2891 unsigned int hctx_idx) 2892{ 2893 unsigned int flags = set->flags; 2894 2895 if (set->tags && set->tags[hctx_idx]) { 2896 blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx); 2897 blk_mq_free_rq_map(set->tags[hctx_idx], flags); --- 26 unchanged lines hidden (view full) --- 2924 if (!set->map[j].nr_queues) { 2925 ctx->hctxs[j] = blk_mq_map_queue_type(q, 2926 HCTX_TYPE_DEFAULT, i); 2927 continue; 2928 } 2929 hctx_idx = set->map[j].mq_map[i]; 2930 /* unmapped hw queue can be remapped after CPU topo changed */ 2931 if (!set->tags[hctx_idx] && |
2922 !__blk_mq_alloc_map_and_request(set, hctx_idx)) { | 2932 !__blk_mq_alloc_map_and_rqs(set, hctx_idx)) { |
2923 /* 2924 * If tags initialization fail for some hctx, 2925 * that hctx won't be brought online. In this 2926 * case, remap the current ctx to hctx[0] which 2927 * is guaranteed to always have tags allocated 2928 */ 2929 set->map[j].mq_map[i] = 0; 2930 } --- 416 unchanged lines hidden (view full) --- 3347 blk_mq_del_queue_tag_set(q); 3348} 3349 3350static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) 3351{ 3352 int i; 3353 3354 for (i = 0; i < set->nr_hw_queues; i++) { | 2933 /* 2934 * If tags initialization fail for some hctx, 2935 * that hctx won't be brought online. In this 2936 * case, remap the current ctx to hctx[0] which 2937 * is guaranteed to always have tags allocated 2938 */ 2939 set->map[j].mq_map[i] = 0; 2940 } --- 416 unchanged lines hidden (view full) --- 3357 blk_mq_del_queue_tag_set(q); 3358} 3359 3360static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) 3361{ 3362 int i; 3363 3364 for (i = 0; i < set->nr_hw_queues; i++) { |
3355 if (!__blk_mq_alloc_map_and_request(set, i)) | 3365 if (!__blk_mq_alloc_map_and_rqs(set, i)) |
3356 goto out_unwind; 3357 cond_resched(); 3358 } 3359 3360 return 0; 3361 3362out_unwind: 3363 while (--i >= 0) 3364 blk_mq_free_map_and_requests(set, i); 3365 3366 return -ENOMEM; 3367} 3368 3369/* 3370 * Allocate the request maps associated with this tag_set. Note that this 3371 * may reduce the depth asked for, if memory is tight. set->queue_depth 3372 * will be updated to reflect the allocated depth. 3373 */ | 3366 goto out_unwind; 3367 cond_resched(); 3368 } 3369 3370 return 0; 3371 3372out_unwind: 3373 while (--i >= 0) 3374 blk_mq_free_map_and_requests(set, i); 3375 3376 return -ENOMEM; 3377} 3378 3379/* 3380 * Allocate the request maps associated with this tag_set. Note that this 3381 * may reduce the depth asked for, if memory is tight. set->queue_depth 3382 * will be updated to reflect the allocated depth. 3383 */ |
3374static int blk_mq_alloc_map_and_requests(struct blk_mq_tag_set *set) | 3384static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set) |
3375{ 3376 unsigned int depth; 3377 int err; 3378 3379 depth = set->queue_depth; 3380 do { 3381 err = __blk_mq_alloc_rq_maps(set); 3382 if (!err) --- 149 unchanged lines hidden (view full) --- 3532 goto out_free_mq_map; 3533 set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues; 3534 } 3535 3536 ret = blk_mq_update_queue_map(set); 3537 if (ret) 3538 goto out_free_mq_map; 3539 | 3385{ 3386 unsigned int depth; 3387 int err; 3388 3389 depth = set->queue_depth; 3390 do { 3391 err = __blk_mq_alloc_rq_maps(set); 3392 if (!err) --- 149 unchanged lines hidden (view full) --- 3542 goto out_free_mq_map; 3543 set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues; 3544 } 3545 3546 ret = blk_mq_update_queue_map(set); 3547 if (ret) 3548 goto out_free_mq_map; 3549 |
3540 ret = blk_mq_alloc_map_and_requests(set); | 3550 ret = blk_mq_alloc_set_map_and_rqs(set); |
3541 if (ret) 3542 goto out_free_mq_map; 3543 3544 if (blk_mq_is_sbitmap_shared(set->flags)) { 3545 atomic_set(&set->active_queues_shared_sbitmap, 0); 3546 3547 if (blk_mq_init_shared_sbitmap(set)) { 3548 ret = -ENOMEM; --- 496 unchanged lines hidden --- | 3551 if (ret) 3552 goto out_free_mq_map; 3553 3554 if (blk_mq_is_sbitmap_shared(set->flags)) { 3555 atomic_set(&set->active_queues_shared_sbitmap, 0); 3556 3557 if (blk_mq_init_shared_sbitmap(set)) { 3558 ret = -ENOMEM; --- 496 unchanged lines hidden --- |