1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
29a0ef98eSChristoph Hellwig /*
39a0ef98eSChristoph Hellwig * Copyright (C) 2016 Thomas Gleixner.
49a0ef98eSChristoph Hellwig * Copyright (C) 2016-2017 Christoph Hellwig.
59a0ef98eSChristoph Hellwig */
65e385a6eSChristoph Hellwig #include <linux/interrupt.h>
75e385a6eSChristoph Hellwig #include <linux/kernel.h>
85e385a6eSChristoph Hellwig #include <linux/slab.h>
95e385a6eSChristoph Hellwig #include <linux/cpu.h>
10*f7b3ea8cSMing Lei #include <linux/group_cpus.h>
115c903e10SMing Lei
default_calc_sets(struct irq_affinity * affd,unsigned int affvecs)12c66d4bd1SMing Lei static void default_calc_sets(struct irq_affinity *affd, unsigned int affvecs)
13c66d4bd1SMing Lei {
14c66d4bd1SMing Lei affd->nr_sets = 1;
15c66d4bd1SMing Lei affd->set_size[0] = affvecs;
16c66d4bd1SMing Lei }
17c66d4bd1SMing Lei
18b3e6aaa8SMing Lei /**
19b3e6aaa8SMing Lei * irq_create_affinity_masks - Create affinity masks for multiqueue spreading
20b3e6aaa8SMing Lei * @nvecs: The total number of vectors
21b3e6aaa8SMing Lei * @affd: Description of the affinity requirements
22b3e6aaa8SMing Lei *
23bec04037SDou Liyang * Returns the irq_affinity_desc pointer or NULL if allocation failed.
24b3e6aaa8SMing Lei */
25bec04037SDou Liyang struct irq_affinity_desc *
irq_create_affinity_masks(unsigned int nvecs,struct irq_affinity * affd)269cfef55bSMing Lei irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
27b3e6aaa8SMing Lei {
28c66d4bd1SMing Lei unsigned int affvecs, curvec, usedvecs, i;
29bec04037SDou Liyang struct irq_affinity_desc *masks = NULL;
30b3e6aaa8SMing Lei
31b3e6aaa8SMing Lei /*
32c66d4bd1SMing Lei * Determine the number of vectors which need interrupt affinities
33c66d4bd1SMing Lei * assigned. If the pre/post request exhausts the available vectors
34c66d4bd1SMing Lei * then nothing to do here except for invoking the calc_sets()
35491beed3SMing Lei * callback so the device driver can adjust to the situation.
36b3e6aaa8SMing Lei */
37491beed3SMing Lei if (nvecs > affd->pre_vectors + affd->post_vectors)
38c66d4bd1SMing Lei affvecs = nvecs - affd->pre_vectors - affd->post_vectors;
39c66d4bd1SMing Lei else
40c66d4bd1SMing Lei affvecs = 0;
41c66d4bd1SMing Lei
42c66d4bd1SMing Lei /*
43c66d4bd1SMing Lei * Simple invocations do not provide a calc_sets() callback. Install
44a6a309edSThomas Gleixner * the generic one.
45c66d4bd1SMing Lei */
46a6a309edSThomas Gleixner if (!affd->calc_sets)
47c66d4bd1SMing Lei affd->calc_sets = default_calc_sets;
48c66d4bd1SMing Lei
49a6a309edSThomas Gleixner /* Recalculate the sets */
50c66d4bd1SMing Lei affd->calc_sets(affd, affvecs);
51b3e6aaa8SMing Lei
529cfef55bSMing Lei if (WARN_ON_ONCE(affd->nr_sets > IRQ_AFFINITY_MAX_SETS))
539cfef55bSMing Lei return NULL;
549cfef55bSMing Lei
55c66d4bd1SMing Lei /* Nothing to assign? */
56c66d4bd1SMing Lei if (!affvecs)
57c66d4bd1SMing Lei return NULL;
58c66d4bd1SMing Lei
59b3e6aaa8SMing Lei masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL);
60b3e6aaa8SMing Lei if (!masks)
61347253c4SMing Lei return NULL;
62b3e6aaa8SMing Lei
63b3e6aaa8SMing Lei /* Fill out vectors at the beginning that don't need affinity */
64b3e6aaa8SMing Lei for (curvec = 0; curvec < affd->pre_vectors; curvec++)
65bec04037SDou Liyang cpumask_copy(&masks[curvec].mask, irq_default_affinity);
66c66d4bd1SMing Lei
676da4b3abSJens Axboe /*
686da4b3abSJens Axboe * Spread on present CPUs starting from affd->pre_vectors. If we
696da4b3abSJens Axboe * have multiple sets, build each sets affinity mask separately.
706da4b3abSJens Axboe */
71c66d4bd1SMing Lei for (i = 0, usedvecs = 0; i < affd->nr_sets; i++) {
72c66d4bd1SMing Lei unsigned int this_vecs = affd->set_size[i];
73e7bdd7f0SMing Lei int j;
74523f1ea7SMing Lei struct cpumask *result = group_cpus_evenly(this_vecs);
756da4b3abSJens Axboe
76e7bdd7f0SMing Lei if (!result) {
776da4b3abSJens Axboe kfree(masks);
78347253c4SMing Lei return NULL;
796da4b3abSJens Axboe }
80e7bdd7f0SMing Lei
81e7bdd7f0SMing Lei for (j = 0; j < this_vecs; j++)
82e7bdd7f0SMing Lei cpumask_copy(&masks[curvec + j].mask, &result[j]);
83e7bdd7f0SMing Lei kfree(result);
84e7bdd7f0SMing Lei
856da4b3abSJens Axboe curvec += this_vecs;
866da4b3abSJens Axboe usedvecs += this_vecs;
876da4b3abSJens Axboe }
8867c93c21SChristoph Hellwig
8967c93c21SChristoph Hellwig /* Fill out vectors at the end that don't need affinity */
90d3056812SMing Lei if (usedvecs >= affvecs)
91d3056812SMing Lei curvec = affd->pre_vectors + affvecs;
92d3056812SMing Lei else
93d3056812SMing Lei curvec = affd->pre_vectors + usedvecs;
9467c93c21SChristoph Hellwig for (; curvec < nvecs; curvec++)
95bec04037SDou Liyang cpumask_copy(&masks[curvec].mask, irq_default_affinity);
96d3056812SMing Lei
97c410abbbSDou Liyang /* Mark the managed interrupts */
98c410abbbSDou Liyang for (i = affd->pre_vectors; i < nvecs - affd->post_vectors; i++)
99c410abbbSDou Liyang masks[i].is_managed = 1;
100c410abbbSDou Liyang
10134c3d981SThomas Gleixner return masks;
10234c3d981SThomas Gleixner }
10334c3d981SThomas Gleixner
10434c3d981SThomas Gleixner /**
105212bd846SChristoph Hellwig * irq_calc_affinity_vectors - Calculate the optimal number of vectors
1066f9a22bcSMichael Hernandez * @minvec: The minimum number of vectors available
10734c3d981SThomas Gleixner * @maxvec: The maximum number of vectors available
108212bd846SChristoph Hellwig * @affd: Description of the affinity requirements
10934c3d981SThomas Gleixner */
irq_calc_affinity_vectors(unsigned int minvec,unsigned int maxvec,const struct irq_affinity * affd)1100145c30eSThomas Gleixner unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
1110145c30eSThomas Gleixner const struct irq_affinity *affd)
11234c3d981SThomas Gleixner {
1130145c30eSThomas Gleixner unsigned int resv = affd->pre_vectors + affd->post_vectors;
1140145c30eSThomas Gleixner unsigned int set_vecs;
11534c3d981SThomas Gleixner
1166f9a22bcSMichael Hernandez if (resv > minvec)
1176f9a22bcSMichael Hernandez return 0;
1186f9a22bcSMichael Hernandez
119c66d4bd1SMing Lei if (affd->calc_sets) {
120c66d4bd1SMing Lei set_vecs = maxvec - resv;
1216da4b3abSJens Axboe } else {
122428e2116SSebastian Andrzej Siewior cpus_read_lock();
1236da4b3abSJens Axboe set_vecs = cpumask_weight(cpu_possible_mask);
124428e2116SSebastian Andrzej Siewior cpus_read_unlock();
1256da4b3abSJens Axboe }
1266da4b3abSJens Axboe
1270145c30eSThomas Gleixner return resv + min(set_vecs, maxvec - resv);
12834c3d981SThomas Gleixner }
129