xref: /openbmc/linux/block/blk-mq-cpumap.c (revision 6189f1b0)
1 /*
2  * CPU <-> hardware queue mapping helpers
3  *
4  * Copyright (C) 2013-2014 Jens Axboe
5  */
6 #include <linux/kernel.h>
7 #include <linux/threads.h>
8 #include <linux/module.h>
9 #include <linux/mm.h>
10 #include <linux/smp.h>
11 #include <linux/cpu.h>
12 
13 #include <linux/blk-mq.h>
14 #include "blk.h"
15 #include "blk-mq.h"
16 
17 static int cpu_to_queue_index(unsigned int nr_cpus, unsigned int nr_queues,
18 			      const int cpu)
19 {
20 	return cpu * nr_queues / nr_cpus;
21 }
22 
23 static int get_first_sibling(unsigned int cpu)
24 {
25 	unsigned int ret;
26 
27 	ret = cpumask_first(topology_sibling_cpumask(cpu));
28 	if (ret < nr_cpu_ids)
29 		return ret;
30 
31 	return cpu;
32 }
33 
34 int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)
35 {
36 	unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
37 	cpumask_var_t cpus;
38 
39 	if (!alloc_cpumask_var(&cpus, GFP_ATOMIC))
40 		return 1;
41 
42 	cpumask_clear(cpus);
43 	nr_cpus = nr_uniq_cpus = 0;
44 	for_each_online_cpu(i) {
45 		nr_cpus++;
46 		first_sibling = get_first_sibling(i);
47 		if (!cpumask_test_cpu(first_sibling, cpus))
48 			nr_uniq_cpus++;
49 		cpumask_set_cpu(i, cpus);
50 	}
51 
52 	queue = 0;
53 	for_each_possible_cpu(i) {
54 		if (!cpu_online(i)) {
55 			map[i] = 0;
56 			continue;
57 		}
58 
59 		/*
60 		 * Easy case - we have equal or more hardware queues. Or
61 		 * there are no thread siblings to take into account. Do
62 		 * 1:1 if enough, or sequential mapping if less.
63 		 */
64 		if (nr_queues >= nr_cpus || nr_cpus == nr_uniq_cpus) {
65 			map[i] = cpu_to_queue_index(nr_cpus, nr_queues, queue);
66 			queue++;
67 			continue;
68 		}
69 
70 		/*
71 		 * Less then nr_cpus queues, and we have some number of
72 		 * threads per cores. Map sibling threads to the same
73 		 * queue.
74 		 */
75 		first_sibling = get_first_sibling(i);
76 		if (first_sibling == i) {
77 			map[i] = cpu_to_queue_index(nr_uniq_cpus, nr_queues,
78 							queue);
79 			queue++;
80 		} else
81 			map[i] = map[first_sibling];
82 	}
83 
84 	free_cpumask_var(cpus);
85 	return 0;
86 }
87 
88 unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set)
89 {
90 	unsigned int *map;
91 
92 	/* If cpus are offline, map them to first hctx */
93 	map = kzalloc_node(sizeof(*map) * nr_cpu_ids, GFP_KERNEL,
94 				set->numa_node);
95 	if (!map)
96 		return NULL;
97 
98 	if (!blk_mq_update_queue_map(map, set->nr_hw_queues))
99 		return map;
100 
101 	kfree(map);
102 	return NULL;
103 }
104 
105 /*
106  * We have no quick way of doing reverse lookups. This is only used at
107  * queue init time, so runtime isn't important.
108  */
109 int blk_mq_hw_queue_to_node(unsigned int *mq_map, unsigned int index)
110 {
111 	int i;
112 
113 	for_each_possible_cpu(i) {
114 		if (index == mq_map[i])
115 			return cpu_to_node(i);
116 	}
117 
118 	return NUMA_NO_NODE;
119 }
120