1 /*
2  * Copyright(c) 2015 - 2020 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47 #include <linux/topology.h>
48 #include <linux/cpumask.h>
49 #include <linux/module.h>
50 #include <linux/interrupt.h>
51 #include <linux/numa.h>
52 
53 #include "hfi.h"
54 #include "affinity.h"
55 #include "sdma.h"
56 #include "trace.h"
57 
58 struct hfi1_affinity_node_list node_affinity = {
59 	.list = LIST_HEAD_INIT(node_affinity.list),
60 	.lock = __MUTEX_INITIALIZER(node_affinity.lock)
61 };
62 
63 /* Name of IRQ types, indexed by enum irq_type */
64 static const char * const irq_type_names[] = {
65 	"SDMA",
66 	"RCVCTXT",
67 	"NETDEVCTXT",
68 	"GENERAL",
69 	"OTHER",
70 };
71 
72 /* Per NUMA node count of HFI devices */
73 static unsigned int *hfi1_per_node_cntr;
74 
75 static inline void init_cpu_mask_set(struct cpu_mask_set *set)
76 {
77 	cpumask_clear(&set->mask);
78 	cpumask_clear(&set->used);
79 	set->gen = 0;
80 }
81 
82 /* Increment generation of CPU set if needed */
83 static void _cpu_mask_set_gen_inc(struct cpu_mask_set *set)
84 {
85 	if (cpumask_equal(&set->mask, &set->used)) {
86 		/*
87 		 * We've used up all the CPUs, bump up the generation
88 		 * and reset the 'used' map
89 		 */
90 		set->gen++;
91 		cpumask_clear(&set->used);
92 	}
93 }
94 
95 static void _cpu_mask_set_gen_dec(struct cpu_mask_set *set)
96 {
97 	if (cpumask_empty(&set->used) && set->gen) {
98 		set->gen--;
99 		cpumask_copy(&set->used, &set->mask);
100 	}
101 }
102 
103 /* Get the first CPU from the list of unused CPUs in a CPU set data structure */
104 static int cpu_mask_set_get_first(struct cpu_mask_set *set, cpumask_var_t diff)
105 {
106 	int cpu;
107 
108 	if (!diff || !set)
109 		return -EINVAL;
110 
111 	_cpu_mask_set_gen_inc(set);
112 
113 	/* Find out CPUs left in CPU mask */
114 	cpumask_andnot(diff, &set->mask, &set->used);
115 
116 	cpu = cpumask_first(diff);
117 	if (cpu >= nr_cpu_ids) /* empty */
118 		cpu = -EINVAL;
119 	else
120 		cpumask_set_cpu(cpu, &set->used);
121 
122 	return cpu;
123 }
124 
125 static void cpu_mask_set_put(struct cpu_mask_set *set, int cpu)
126 {
127 	if (!set)
128 		return;
129 
130 	cpumask_clear_cpu(cpu, &set->used);
131 	_cpu_mask_set_gen_dec(set);
132 }
133 
134 /* Initialize non-HT cpu cores mask */
135 void init_real_cpu_mask(void)
136 {
137 	int possible, curr_cpu, i, ht;
138 
139 	cpumask_clear(&node_affinity.real_cpu_mask);
140 
141 	/* Start with cpu online mask as the real cpu mask */
142 	cpumask_copy(&node_affinity.real_cpu_mask, cpu_online_mask);
143 
144 	/*
145 	 * Remove HT cores from the real cpu mask.  Do this in two steps below.
146 	 */
147 	possible = cpumask_weight(&node_affinity.real_cpu_mask);
148 	ht = cpumask_weight(topology_sibling_cpumask(
149 				cpumask_first(&node_affinity.real_cpu_mask)));
150 	/*
151 	 * Step 1.  Skip over the first N HT siblings and use them as the
152 	 * "real" cores.  Assumes that HT cores are not enumerated in
153 	 * succession (except in the single core case).
154 	 */
155 	curr_cpu = cpumask_first(&node_affinity.real_cpu_mask);
156 	for (i = 0; i < possible / ht; i++)
157 		curr_cpu = cpumask_next(curr_cpu, &node_affinity.real_cpu_mask);
158 	/*
159 	 * Step 2.  Remove the remaining HT siblings.  Use cpumask_next() to
160 	 * skip any gaps.
161 	 */
162 	for (; i < possible; i++) {
163 		cpumask_clear_cpu(curr_cpu, &node_affinity.real_cpu_mask);
164 		curr_cpu = cpumask_next(curr_cpu, &node_affinity.real_cpu_mask);
165 	}
166 }
167 
168 int node_affinity_init(void)
169 {
170 	int node;
171 	struct pci_dev *dev = NULL;
172 	const struct pci_device_id *ids = hfi1_pci_tbl;
173 
174 	cpumask_clear(&node_affinity.proc.used);
175 	cpumask_copy(&node_affinity.proc.mask, cpu_online_mask);
176 
177 	node_affinity.proc.gen = 0;
178 	node_affinity.num_core_siblings =
179 				cpumask_weight(topology_sibling_cpumask(
180 					cpumask_first(&node_affinity.proc.mask)
181 					));
182 	node_affinity.num_possible_nodes = num_possible_nodes();
183 	node_affinity.num_online_nodes = num_online_nodes();
184 	node_affinity.num_online_cpus = num_online_cpus();
185 
186 	/*
187 	 * The real cpu mask is part of the affinity struct but it has to be
188 	 * initialized early. It is needed to calculate the number of user
189 	 * contexts in set_up_context_variables().
190 	 */
191 	init_real_cpu_mask();
192 
193 	hfi1_per_node_cntr = kcalloc(node_affinity.num_possible_nodes,
194 				     sizeof(*hfi1_per_node_cntr), GFP_KERNEL);
195 	if (!hfi1_per_node_cntr)
196 		return -ENOMEM;
197 
198 	while (ids->vendor) {
199 		dev = NULL;
200 		while ((dev = pci_get_device(ids->vendor, ids->device, dev))) {
201 			node = pcibus_to_node(dev->bus);
202 			if (node < 0)
203 				goto out;
204 
205 			hfi1_per_node_cntr[node]++;
206 		}
207 		ids++;
208 	}
209 
210 	return 0;
211 
212 out:
213 	/*
214 	 * Invalid PCI NUMA node information found, note it, and populate
215 	 * our database 1:1.
216 	 */
217 	pr_err("HFI: Invalid PCI NUMA node. Performance may be affected\n");
218 	pr_err("HFI: System BIOS may need to be upgraded\n");
219 	for (node = 0; node < node_affinity.num_possible_nodes; node++)
220 		hfi1_per_node_cntr[node] = 1;
221 
222 	return 0;
223 }
224 
225 static void node_affinity_destroy(struct hfi1_affinity_node *entry)
226 {
227 	free_percpu(entry->comp_vect_affinity);
228 	kfree(entry);
229 }
230 
231 void node_affinity_destroy_all(void)
232 {
233 	struct list_head *pos, *q;
234 	struct hfi1_affinity_node *entry;
235 
236 	mutex_lock(&node_affinity.lock);
237 	list_for_each_safe(pos, q, &node_affinity.list) {
238 		entry = list_entry(pos, struct hfi1_affinity_node,
239 				   list);
240 		list_del(pos);
241 		node_affinity_destroy(entry);
242 	}
243 	mutex_unlock(&node_affinity.lock);
244 	kfree(hfi1_per_node_cntr);
245 }
246 
247 static struct hfi1_affinity_node *node_affinity_allocate(int node)
248 {
249 	struct hfi1_affinity_node *entry;
250 
251 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
252 	if (!entry)
253 		return NULL;
254 	entry->node = node;
255 	entry->comp_vect_affinity = alloc_percpu(u16);
256 	INIT_LIST_HEAD(&entry->list);
257 
258 	return entry;
259 }
260 
261 /*
262  * It appends an entry to the list.
263  * It *must* be called with node_affinity.lock held.
264  */
265 static void node_affinity_add_tail(struct hfi1_affinity_node *entry)
266 {
267 	list_add_tail(&entry->list, &node_affinity.list);
268 }
269 
270 /* It must be called with node_affinity.lock held */
271 static struct hfi1_affinity_node *node_affinity_lookup(int node)
272 {
273 	struct list_head *pos;
274 	struct hfi1_affinity_node *entry;
275 
276 	list_for_each(pos, &node_affinity.list) {
277 		entry = list_entry(pos, struct hfi1_affinity_node, list);
278 		if (entry->node == node)
279 			return entry;
280 	}
281 
282 	return NULL;
283 }
284 
285 static int per_cpu_affinity_get(cpumask_var_t possible_cpumask,
286 				u16 __percpu *comp_vect_affinity)
287 {
288 	int curr_cpu;
289 	u16 cntr;
290 	u16 prev_cntr;
291 	int ret_cpu;
292 
293 	if (!possible_cpumask) {
294 		ret_cpu = -EINVAL;
295 		goto fail;
296 	}
297 
298 	if (!comp_vect_affinity) {
299 		ret_cpu = -EINVAL;
300 		goto fail;
301 	}
302 
303 	ret_cpu = cpumask_first(possible_cpumask);
304 	if (ret_cpu >= nr_cpu_ids) {
305 		ret_cpu = -EINVAL;
306 		goto fail;
307 	}
308 
309 	prev_cntr = *per_cpu_ptr(comp_vect_affinity, ret_cpu);
310 	for_each_cpu(curr_cpu, possible_cpumask) {
311 		cntr = *per_cpu_ptr(comp_vect_affinity, curr_cpu);
312 
313 		if (cntr < prev_cntr) {
314 			ret_cpu = curr_cpu;
315 			prev_cntr = cntr;
316 		}
317 	}
318 
319 	*per_cpu_ptr(comp_vect_affinity, ret_cpu) += 1;
320 
321 fail:
322 	return ret_cpu;
323 }
324 
325 static int per_cpu_affinity_put_max(cpumask_var_t possible_cpumask,
326 				    u16 __percpu *comp_vect_affinity)
327 {
328 	int curr_cpu;
329 	int max_cpu;
330 	u16 cntr;
331 	u16 prev_cntr;
332 
333 	if (!possible_cpumask)
334 		return -EINVAL;
335 
336 	if (!comp_vect_affinity)
337 		return -EINVAL;
338 
339 	max_cpu = cpumask_first(possible_cpumask);
340 	if (max_cpu >= nr_cpu_ids)
341 		return -EINVAL;
342 
343 	prev_cntr = *per_cpu_ptr(comp_vect_affinity, max_cpu);
344 	for_each_cpu(curr_cpu, possible_cpumask) {
345 		cntr = *per_cpu_ptr(comp_vect_affinity, curr_cpu);
346 
347 		if (cntr > prev_cntr) {
348 			max_cpu = curr_cpu;
349 			prev_cntr = cntr;
350 		}
351 	}
352 
353 	*per_cpu_ptr(comp_vect_affinity, max_cpu) -= 1;
354 
355 	return max_cpu;
356 }
357 
358 /*
359  * Non-interrupt CPUs are used first, then interrupt CPUs.
360  * Two already allocated cpu masks must be passed.
361  */
362 static int _dev_comp_vect_cpu_get(struct hfi1_devdata *dd,
363 				  struct hfi1_affinity_node *entry,
364 				  cpumask_var_t non_intr_cpus,
365 				  cpumask_var_t available_cpus)
366 	__must_hold(&node_affinity.lock)
367 {
368 	int cpu;
369 	struct cpu_mask_set *set = dd->comp_vect;
370 
371 	lockdep_assert_held(&node_affinity.lock);
372 	if (!non_intr_cpus) {
373 		cpu = -1;
374 		goto fail;
375 	}
376 
377 	if (!available_cpus) {
378 		cpu = -1;
379 		goto fail;
380 	}
381 
382 	/* Available CPUs for pinning completion vectors */
383 	_cpu_mask_set_gen_inc(set);
384 	cpumask_andnot(available_cpus, &set->mask, &set->used);
385 
386 	/* Available CPUs without SDMA engine interrupts */
387 	cpumask_andnot(non_intr_cpus, available_cpus,
388 		       &entry->def_intr.used);
389 
390 	/* If there are non-interrupt CPUs available, use them first */
391 	if (!cpumask_empty(non_intr_cpus))
392 		cpu = cpumask_first(non_intr_cpus);
393 	else /* Otherwise, use interrupt CPUs */
394 		cpu = cpumask_first(available_cpus);
395 
396 	if (cpu >= nr_cpu_ids) { /* empty */
397 		cpu = -1;
398 		goto fail;
399 	}
400 	cpumask_set_cpu(cpu, &set->used);
401 
402 fail:
403 	return cpu;
404 }
405 
406 static void _dev_comp_vect_cpu_put(struct hfi1_devdata *dd, int cpu)
407 {
408 	struct cpu_mask_set *set = dd->comp_vect;
409 
410 	if (cpu < 0)
411 		return;
412 
413 	cpu_mask_set_put(set, cpu);
414 }
415 
416 /* _dev_comp_vect_mappings_destroy() is reentrant */
417 static void _dev_comp_vect_mappings_destroy(struct hfi1_devdata *dd)
418 {
419 	int i, cpu;
420 
421 	if (!dd->comp_vect_mappings)
422 		return;
423 
424 	for (i = 0; i < dd->comp_vect_possible_cpus; i++) {
425 		cpu = dd->comp_vect_mappings[i];
426 		_dev_comp_vect_cpu_put(dd, cpu);
427 		dd->comp_vect_mappings[i] = -1;
428 		hfi1_cdbg(AFFINITY,
429 			  "[%s] Release CPU %d from completion vector %d",
430 			  rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), cpu, i);
431 	}
432 
433 	kfree(dd->comp_vect_mappings);
434 	dd->comp_vect_mappings = NULL;
435 }
436 
437 /*
438  * This function creates the table for looking up CPUs for completion vectors.
439  * num_comp_vectors needs to have been initilized before calling this function.
440  */
441 static int _dev_comp_vect_mappings_create(struct hfi1_devdata *dd,
442 					  struct hfi1_affinity_node *entry)
443 	__must_hold(&node_affinity.lock)
444 {
445 	int i, cpu, ret;
446 	cpumask_var_t non_intr_cpus;
447 	cpumask_var_t available_cpus;
448 
449 	lockdep_assert_held(&node_affinity.lock);
450 
451 	if (!zalloc_cpumask_var(&non_intr_cpus, GFP_KERNEL))
452 		return -ENOMEM;
453 
454 	if (!zalloc_cpumask_var(&available_cpus, GFP_KERNEL)) {
455 		free_cpumask_var(non_intr_cpus);
456 		return -ENOMEM;
457 	}
458 
459 	dd->comp_vect_mappings = kcalloc(dd->comp_vect_possible_cpus,
460 					 sizeof(*dd->comp_vect_mappings),
461 					 GFP_KERNEL);
462 	if (!dd->comp_vect_mappings) {
463 		ret = -ENOMEM;
464 		goto fail;
465 	}
466 	for (i = 0; i < dd->comp_vect_possible_cpus; i++)
467 		dd->comp_vect_mappings[i] = -1;
468 
469 	for (i = 0; i < dd->comp_vect_possible_cpus; i++) {
470 		cpu = _dev_comp_vect_cpu_get(dd, entry, non_intr_cpus,
471 					     available_cpus);
472 		if (cpu < 0) {
473 			ret = -EINVAL;
474 			goto fail;
475 		}
476 
477 		dd->comp_vect_mappings[i] = cpu;
478 		hfi1_cdbg(AFFINITY,
479 			  "[%s] Completion Vector %d -> CPU %d",
480 			  rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), i, cpu);
481 	}
482 
483 	free_cpumask_var(available_cpus);
484 	free_cpumask_var(non_intr_cpus);
485 	return 0;
486 
487 fail:
488 	free_cpumask_var(available_cpus);
489 	free_cpumask_var(non_intr_cpus);
490 	_dev_comp_vect_mappings_destroy(dd);
491 
492 	return ret;
493 }
494 
495 int hfi1_comp_vectors_set_up(struct hfi1_devdata *dd)
496 {
497 	int ret;
498 	struct hfi1_affinity_node *entry;
499 
500 	mutex_lock(&node_affinity.lock);
501 	entry = node_affinity_lookup(dd->node);
502 	if (!entry) {
503 		ret = -EINVAL;
504 		goto unlock;
505 	}
506 	ret = _dev_comp_vect_mappings_create(dd, entry);
507 unlock:
508 	mutex_unlock(&node_affinity.lock);
509 
510 	return ret;
511 }
512 
513 void hfi1_comp_vectors_clean_up(struct hfi1_devdata *dd)
514 {
515 	_dev_comp_vect_mappings_destroy(dd);
516 }
517 
518 int hfi1_comp_vect_mappings_lookup(struct rvt_dev_info *rdi, int comp_vect)
519 {
520 	struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
521 	struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
522 
523 	if (!dd->comp_vect_mappings)
524 		return -EINVAL;
525 	if (comp_vect >= dd->comp_vect_possible_cpus)
526 		return -EINVAL;
527 
528 	return dd->comp_vect_mappings[comp_vect];
529 }
530 
531 /*
532  * It assumes dd->comp_vect_possible_cpus is available.
533  */
534 static int _dev_comp_vect_cpu_mask_init(struct hfi1_devdata *dd,
535 					struct hfi1_affinity_node *entry,
536 					bool first_dev_init)
537 	__must_hold(&node_affinity.lock)
538 {
539 	int i, j, curr_cpu;
540 	int possible_cpus_comp_vect = 0;
541 	struct cpumask *dev_comp_vect_mask = &dd->comp_vect->mask;
542 
543 	lockdep_assert_held(&node_affinity.lock);
544 	/*
545 	 * If there's only one CPU available for completion vectors, then
546 	 * there will only be one completion vector available. Othewise,
547 	 * the number of completion vector available will be the number of
548 	 * available CPUs divide it by the number of devices in the
549 	 * local NUMA node.
550 	 */
551 	if (cpumask_weight(&entry->comp_vect_mask) == 1) {
552 		possible_cpus_comp_vect = 1;
553 		dd_dev_warn(dd,
554 			    "Number of kernel receive queues is too large for completion vector affinity to be effective\n");
555 	} else {
556 		possible_cpus_comp_vect +=
557 			cpumask_weight(&entry->comp_vect_mask) /
558 				       hfi1_per_node_cntr[dd->node];
559 
560 		/*
561 		 * If the completion vector CPUs available doesn't divide
562 		 * evenly among devices, then the first device device to be
563 		 * initialized gets an extra CPU.
564 		 */
565 		if (first_dev_init &&
566 		    cpumask_weight(&entry->comp_vect_mask) %
567 		    hfi1_per_node_cntr[dd->node] != 0)
568 			possible_cpus_comp_vect++;
569 	}
570 
571 	dd->comp_vect_possible_cpus = possible_cpus_comp_vect;
572 
573 	/* Reserving CPUs for device completion vector */
574 	for (i = 0; i < dd->comp_vect_possible_cpus; i++) {
575 		curr_cpu = per_cpu_affinity_get(&entry->comp_vect_mask,
576 						entry->comp_vect_affinity);
577 		if (curr_cpu < 0)
578 			goto fail;
579 
580 		cpumask_set_cpu(curr_cpu, dev_comp_vect_mask);
581 	}
582 
583 	hfi1_cdbg(AFFINITY,
584 		  "[%s] Completion vector affinity CPU set(s) %*pbl",
585 		  rvt_get_ibdev_name(&(dd)->verbs_dev.rdi),
586 		  cpumask_pr_args(dev_comp_vect_mask));
587 
588 	return 0;
589 
590 fail:
591 	for (j = 0; j < i; j++)
592 		per_cpu_affinity_put_max(&entry->comp_vect_mask,
593 					 entry->comp_vect_affinity);
594 
595 	return curr_cpu;
596 }
597 
598 /*
599  * It assumes dd->comp_vect_possible_cpus is available.
600  */
601 static void _dev_comp_vect_cpu_mask_clean_up(struct hfi1_devdata *dd,
602 					     struct hfi1_affinity_node *entry)
603 	__must_hold(&node_affinity.lock)
604 {
605 	int i, cpu;
606 
607 	lockdep_assert_held(&node_affinity.lock);
608 	if (!dd->comp_vect_possible_cpus)
609 		return;
610 
611 	for (i = 0; i < dd->comp_vect_possible_cpus; i++) {
612 		cpu = per_cpu_affinity_put_max(&dd->comp_vect->mask,
613 					       entry->comp_vect_affinity);
614 		/* Clearing CPU in device completion vector cpu mask */
615 		if (cpu >= 0)
616 			cpumask_clear_cpu(cpu, &dd->comp_vect->mask);
617 	}
618 
619 	dd->comp_vect_possible_cpus = 0;
620 }
621 
622 /*
623  * Interrupt affinity.
624  *
625  * non-rcv avail gets a default mask that
626  * starts as possible cpus with threads reset
627  * and each rcv avail reset.
628  *
629  * rcv avail gets node relative 1 wrapping back
630  * to the node relative 1 as necessary.
631  *
632  */
633 int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
634 {
635 	struct hfi1_affinity_node *entry;
636 	const struct cpumask *local_mask;
637 	int curr_cpu, possible, i, ret;
638 	bool new_entry = false;
639 
640 	local_mask = cpumask_of_node(dd->node);
641 	if (cpumask_first(local_mask) >= nr_cpu_ids)
642 		local_mask = topology_core_cpumask(0);
643 
644 	mutex_lock(&node_affinity.lock);
645 	entry = node_affinity_lookup(dd->node);
646 
647 	/*
648 	 * If this is the first time this NUMA node's affinity is used,
649 	 * create an entry in the global affinity structure and initialize it.
650 	 */
651 	if (!entry) {
652 		entry = node_affinity_allocate(dd->node);
653 		if (!entry) {
654 			dd_dev_err(dd,
655 				   "Unable to allocate global affinity node\n");
656 			ret = -ENOMEM;
657 			goto fail;
658 		}
659 		new_entry = true;
660 
661 		init_cpu_mask_set(&entry->def_intr);
662 		init_cpu_mask_set(&entry->rcv_intr);
663 		cpumask_clear(&entry->comp_vect_mask);
664 		cpumask_clear(&entry->general_intr_mask);
665 		/* Use the "real" cpu mask of this node as the default */
666 		cpumask_and(&entry->def_intr.mask, &node_affinity.real_cpu_mask,
667 			    local_mask);
668 
669 		/* fill in the receive list */
670 		possible = cpumask_weight(&entry->def_intr.mask);
671 		curr_cpu = cpumask_first(&entry->def_intr.mask);
672 
673 		if (possible == 1) {
674 			/* only one CPU, everyone will use it */
675 			cpumask_set_cpu(curr_cpu, &entry->rcv_intr.mask);
676 			cpumask_set_cpu(curr_cpu, &entry->general_intr_mask);
677 		} else {
678 			/*
679 			 * The general/control context will be the first CPU in
680 			 * the default list, so it is removed from the default
681 			 * list and added to the general interrupt list.
682 			 */
683 			cpumask_clear_cpu(curr_cpu, &entry->def_intr.mask);
684 			cpumask_set_cpu(curr_cpu, &entry->general_intr_mask);
685 			curr_cpu = cpumask_next(curr_cpu,
686 						&entry->def_intr.mask);
687 
688 			/*
689 			 * Remove the remaining kernel receive queues from
690 			 * the default list and add them to the receive list.
691 			 */
692 			for (i = 0;
693 			     i < (dd->n_krcv_queues - 1) *
694 				  hfi1_per_node_cntr[dd->node];
695 			     i++) {
696 				cpumask_clear_cpu(curr_cpu,
697 						  &entry->def_intr.mask);
698 				cpumask_set_cpu(curr_cpu,
699 						&entry->rcv_intr.mask);
700 				curr_cpu = cpumask_next(curr_cpu,
701 							&entry->def_intr.mask);
702 				if (curr_cpu >= nr_cpu_ids)
703 					break;
704 			}
705 
706 			/*
707 			 * If there ends up being 0 CPU cores leftover for SDMA
708 			 * engines, use the same CPU cores as general/control
709 			 * context.
710 			 */
711 			if (cpumask_weight(&entry->def_intr.mask) == 0)
712 				cpumask_copy(&entry->def_intr.mask,
713 					     &entry->general_intr_mask);
714 		}
715 
716 		/* Determine completion vector CPUs for the entire node */
717 		cpumask_and(&entry->comp_vect_mask,
718 			    &node_affinity.real_cpu_mask, local_mask);
719 		cpumask_andnot(&entry->comp_vect_mask,
720 			       &entry->comp_vect_mask,
721 			       &entry->rcv_intr.mask);
722 		cpumask_andnot(&entry->comp_vect_mask,
723 			       &entry->comp_vect_mask,
724 			       &entry->general_intr_mask);
725 
726 		/*
727 		 * If there ends up being 0 CPU cores leftover for completion
728 		 * vectors, use the same CPU core as the general/control
729 		 * context.
730 		 */
731 		if (cpumask_weight(&entry->comp_vect_mask) == 0)
732 			cpumask_copy(&entry->comp_vect_mask,
733 				     &entry->general_intr_mask);
734 	}
735 
736 	ret = _dev_comp_vect_cpu_mask_init(dd, entry, new_entry);
737 	if (ret < 0)
738 		goto fail;
739 
740 	if (new_entry)
741 		node_affinity_add_tail(entry);
742 
743 	dd->affinity_entry = entry;
744 	mutex_unlock(&node_affinity.lock);
745 
746 	return 0;
747 
748 fail:
749 	if (new_entry)
750 		node_affinity_destroy(entry);
751 	mutex_unlock(&node_affinity.lock);
752 	return ret;
753 }
754 
755 void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd)
756 {
757 	struct hfi1_affinity_node *entry;
758 
759 	mutex_lock(&node_affinity.lock);
760 	if (!dd->affinity_entry)
761 		goto unlock;
762 	entry = node_affinity_lookup(dd->node);
763 	if (!entry)
764 		goto unlock;
765 
766 	/*
767 	 * Free device completion vector CPUs to be used by future
768 	 * completion vectors
769 	 */
770 	_dev_comp_vect_cpu_mask_clean_up(dd, entry);
771 unlock:
772 	dd->affinity_entry = NULL;
773 	mutex_unlock(&node_affinity.lock);
774 }
775 
776 /*
777  * Function updates the irq affinity hint for msix after it has been changed
778  * by the user using the /proc/irq interface. This function only accepts
779  * one cpu in the mask.
780  */
781 static void hfi1_update_sdma_affinity(struct hfi1_msix_entry *msix, int cpu)
782 {
783 	struct sdma_engine *sde = msix->arg;
784 	struct hfi1_devdata *dd = sde->dd;
785 	struct hfi1_affinity_node *entry;
786 	struct cpu_mask_set *set;
787 	int i, old_cpu;
788 
789 	if (cpu > num_online_cpus() || cpu == sde->cpu)
790 		return;
791 
792 	mutex_lock(&node_affinity.lock);
793 	entry = node_affinity_lookup(dd->node);
794 	if (!entry)
795 		goto unlock;
796 
797 	old_cpu = sde->cpu;
798 	sde->cpu = cpu;
799 	cpumask_clear(&msix->mask);
800 	cpumask_set_cpu(cpu, &msix->mask);
801 	dd_dev_dbg(dd, "IRQ: %u, type %s engine %u -> cpu: %d\n",
802 		   msix->irq, irq_type_names[msix->type],
803 		   sde->this_idx, cpu);
804 	irq_set_affinity_hint(msix->irq, &msix->mask);
805 
806 	/*
807 	 * Set the new cpu in the hfi1_affinity_node and clean
808 	 * the old cpu if it is not used by any other IRQ
809 	 */
810 	set = &entry->def_intr;
811 	cpumask_set_cpu(cpu, &set->mask);
812 	cpumask_set_cpu(cpu, &set->used);
813 	for (i = 0; i < dd->msix_info.max_requested; i++) {
814 		struct hfi1_msix_entry *other_msix;
815 
816 		other_msix = &dd->msix_info.msix_entries[i];
817 		if (other_msix->type != IRQ_SDMA || other_msix == msix)
818 			continue;
819 
820 		if (cpumask_test_cpu(old_cpu, &other_msix->mask))
821 			goto unlock;
822 	}
823 	cpumask_clear_cpu(old_cpu, &set->mask);
824 	cpumask_clear_cpu(old_cpu, &set->used);
825 unlock:
826 	mutex_unlock(&node_affinity.lock);
827 }
828 
829 static void hfi1_irq_notifier_notify(struct irq_affinity_notify *notify,
830 				     const cpumask_t *mask)
831 {
832 	int cpu = cpumask_first(mask);
833 	struct hfi1_msix_entry *msix = container_of(notify,
834 						    struct hfi1_msix_entry,
835 						    notify);
836 
837 	/* Only one CPU configuration supported currently */
838 	hfi1_update_sdma_affinity(msix, cpu);
839 }
840 
841 static void hfi1_irq_notifier_release(struct kref *ref)
842 {
843 	/*
844 	 * This is required by affinity notifier. We don't have anything to
845 	 * free here.
846 	 */
847 }
848 
849 static void hfi1_setup_sdma_notifier(struct hfi1_msix_entry *msix)
850 {
851 	struct irq_affinity_notify *notify = &msix->notify;
852 
853 	notify->irq = msix->irq;
854 	notify->notify = hfi1_irq_notifier_notify;
855 	notify->release = hfi1_irq_notifier_release;
856 
857 	if (irq_set_affinity_notifier(notify->irq, notify))
858 		pr_err("Failed to register sdma irq affinity notifier for irq %d\n",
859 		       notify->irq);
860 }
861 
862 static void hfi1_cleanup_sdma_notifier(struct hfi1_msix_entry *msix)
863 {
864 	struct irq_affinity_notify *notify = &msix->notify;
865 
866 	if (irq_set_affinity_notifier(notify->irq, NULL))
867 		pr_err("Failed to cleanup sdma irq affinity notifier for irq %d\n",
868 		       notify->irq);
869 }
870 
871 /*
872  * Function sets the irq affinity for msix.
873  * It *must* be called with node_affinity.lock held.
874  */
875 static int get_irq_affinity(struct hfi1_devdata *dd,
876 			    struct hfi1_msix_entry *msix)
877 {
878 	cpumask_var_t diff;
879 	struct hfi1_affinity_node *entry;
880 	struct cpu_mask_set *set = NULL;
881 	struct sdma_engine *sde = NULL;
882 	struct hfi1_ctxtdata *rcd = NULL;
883 	char extra[64];
884 	int cpu = -1;
885 
886 	extra[0] = '\0';
887 	cpumask_clear(&msix->mask);
888 
889 	entry = node_affinity_lookup(dd->node);
890 
891 	switch (msix->type) {
892 	case IRQ_SDMA:
893 		sde = (struct sdma_engine *)msix->arg;
894 		scnprintf(extra, 64, "engine %u", sde->this_idx);
895 		set = &entry->def_intr;
896 		break;
897 	case IRQ_GENERAL:
898 		cpu = cpumask_first(&entry->general_intr_mask);
899 		break;
900 	case IRQ_RCVCTXT:
901 		rcd = (struct hfi1_ctxtdata *)msix->arg;
902 		if (rcd->ctxt == HFI1_CTRL_CTXT)
903 			cpu = cpumask_first(&entry->general_intr_mask);
904 		else
905 			set = &entry->rcv_intr;
906 		scnprintf(extra, 64, "ctxt %u", rcd->ctxt);
907 		break;
908 	case IRQ_NETDEVCTXT:
909 		rcd = (struct hfi1_ctxtdata *)msix->arg;
910 		set = &entry->def_intr;
911 		scnprintf(extra, 64, "ctxt %u", rcd->ctxt);
912 		break;
913 	default:
914 		dd_dev_err(dd, "Invalid IRQ type %d\n", msix->type);
915 		return -EINVAL;
916 	}
917 
918 	/*
919 	 * The general and control contexts are placed on a particular
920 	 * CPU, which is set above. Skip accounting for it. Everything else
921 	 * finds its CPU here.
922 	 */
923 	if (cpu == -1 && set) {
924 		if (!zalloc_cpumask_var(&diff, GFP_KERNEL))
925 			return -ENOMEM;
926 
927 		cpu = cpu_mask_set_get_first(set, diff);
928 		if (cpu < 0) {
929 			free_cpumask_var(diff);
930 			dd_dev_err(dd, "Failure to obtain CPU for IRQ\n");
931 			return cpu;
932 		}
933 
934 		free_cpumask_var(diff);
935 	}
936 
937 	cpumask_set_cpu(cpu, &msix->mask);
938 	dd_dev_info(dd, "IRQ: %u, type %s %s -> cpu: %d\n",
939 		    msix->irq, irq_type_names[msix->type],
940 		    extra, cpu);
941 	irq_set_affinity_hint(msix->irq, &msix->mask);
942 
943 	if (msix->type == IRQ_SDMA) {
944 		sde->cpu = cpu;
945 		hfi1_setup_sdma_notifier(msix);
946 	}
947 
948 	return 0;
949 }
950 
951 int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
952 {
953 	int ret;
954 
955 	mutex_lock(&node_affinity.lock);
956 	ret = get_irq_affinity(dd, msix);
957 	mutex_unlock(&node_affinity.lock);
958 	return ret;
959 }
960 
961 void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
962 			   struct hfi1_msix_entry *msix)
963 {
964 	struct cpu_mask_set *set = NULL;
965 	struct hfi1_affinity_node *entry;
966 
967 	mutex_lock(&node_affinity.lock);
968 	entry = node_affinity_lookup(dd->node);
969 
970 	switch (msix->type) {
971 	case IRQ_SDMA:
972 		set = &entry->def_intr;
973 		hfi1_cleanup_sdma_notifier(msix);
974 		break;
975 	case IRQ_GENERAL:
976 		/* Don't do accounting for general contexts */
977 		break;
978 	case IRQ_RCVCTXT: {
979 		struct hfi1_ctxtdata *rcd = msix->arg;
980 
981 		/* Don't do accounting for control contexts */
982 		if (rcd->ctxt != HFI1_CTRL_CTXT)
983 			set = &entry->rcv_intr;
984 		break;
985 	}
986 	case IRQ_NETDEVCTXT:
987 		set = &entry->def_intr;
988 		break;
989 	default:
990 		mutex_unlock(&node_affinity.lock);
991 		return;
992 	}
993 
994 	if (set) {
995 		cpumask_andnot(&set->used, &set->used, &msix->mask);
996 		_cpu_mask_set_gen_dec(set);
997 	}
998 
999 	irq_set_affinity_hint(msix->irq, NULL);
1000 	cpumask_clear(&msix->mask);
1001 	mutex_unlock(&node_affinity.lock);
1002 }
1003 
1004 /* This should be called with node_affinity.lock held */
1005 static void find_hw_thread_mask(uint hw_thread_no, cpumask_var_t hw_thread_mask,
1006 				struct hfi1_affinity_node_list *affinity)
1007 {
1008 	int possible, curr_cpu, i;
1009 	uint num_cores_per_socket = node_affinity.num_online_cpus /
1010 					affinity->num_core_siblings /
1011 						node_affinity.num_online_nodes;
1012 
1013 	cpumask_copy(hw_thread_mask, &affinity->proc.mask);
1014 	if (affinity->num_core_siblings > 0) {
1015 		/* Removing other siblings not needed for now */
1016 		possible = cpumask_weight(hw_thread_mask);
1017 		curr_cpu = cpumask_first(hw_thread_mask);
1018 		for (i = 0;
1019 		     i < num_cores_per_socket * node_affinity.num_online_nodes;
1020 		     i++)
1021 			curr_cpu = cpumask_next(curr_cpu, hw_thread_mask);
1022 
1023 		for (; i < possible; i++) {
1024 			cpumask_clear_cpu(curr_cpu, hw_thread_mask);
1025 			curr_cpu = cpumask_next(curr_cpu, hw_thread_mask);
1026 		}
1027 
1028 		/* Identifying correct HW threads within physical cores */
1029 		cpumask_shift_left(hw_thread_mask, hw_thread_mask,
1030 				   num_cores_per_socket *
1031 				   node_affinity.num_online_nodes *
1032 				   hw_thread_no);
1033 	}
1034 }
1035 
1036 int hfi1_get_proc_affinity(int node)
1037 {
1038 	int cpu = -1, ret, i;
1039 	struct hfi1_affinity_node *entry;
1040 	cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask;
1041 	const struct cpumask *node_mask,
1042 		*proc_mask = current->cpus_ptr;
1043 	struct hfi1_affinity_node_list *affinity = &node_affinity;
1044 	struct cpu_mask_set *set = &affinity->proc;
1045 
1046 	/*
1047 	 * check whether process/context affinity has already
1048 	 * been set
1049 	 */
1050 	if (current->nr_cpus_allowed == 1) {
1051 		hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl",
1052 			  current->pid, current->comm,
1053 			  cpumask_pr_args(proc_mask));
1054 		/*
1055 		 * Mark the pre-set CPU as used. This is atomic so we don't
1056 		 * need the lock
1057 		 */
1058 		cpu = cpumask_first(proc_mask);
1059 		cpumask_set_cpu(cpu, &set->used);
1060 		goto done;
1061 	} else if (current->nr_cpus_allowed < cpumask_weight(&set->mask)) {
1062 		hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl",
1063 			  current->pid, current->comm,
1064 			  cpumask_pr_args(proc_mask));
1065 		goto done;
1066 	}
1067 
1068 	/*
1069 	 * The process does not have a preset CPU affinity so find one to
1070 	 * recommend using the following algorithm:
1071 	 *
1072 	 * For each user process that is opening a context on HFI Y:
1073 	 *  a) If all cores are filled, reinitialize the bitmask
1074 	 *  b) Fill real cores first, then HT cores (First set of HT
1075 	 *     cores on all physical cores, then second set of HT core,
1076 	 *     and, so on) in the following order:
1077 	 *
1078 	 *     1. Same NUMA node as HFI Y and not running an IRQ
1079 	 *        handler
1080 	 *     2. Same NUMA node as HFI Y and running an IRQ handler
1081 	 *     3. Different NUMA node to HFI Y and not running an IRQ
1082 	 *        handler
1083 	 *     4. Different NUMA node to HFI Y and running an IRQ
1084 	 *        handler
1085 	 *  c) Mark core as filled in the bitmask. As user processes are
1086 	 *     done, clear cores from the bitmask.
1087 	 */
1088 
1089 	ret = zalloc_cpumask_var(&diff, GFP_KERNEL);
1090 	if (!ret)
1091 		goto done;
1092 	ret = zalloc_cpumask_var(&hw_thread_mask, GFP_KERNEL);
1093 	if (!ret)
1094 		goto free_diff;
1095 	ret = zalloc_cpumask_var(&available_mask, GFP_KERNEL);
1096 	if (!ret)
1097 		goto free_hw_thread_mask;
1098 	ret = zalloc_cpumask_var(&intrs_mask, GFP_KERNEL);
1099 	if (!ret)
1100 		goto free_available_mask;
1101 
1102 	mutex_lock(&affinity->lock);
1103 	/*
1104 	 * If we've used all available HW threads, clear the mask and start
1105 	 * overloading.
1106 	 */
1107 	_cpu_mask_set_gen_inc(set);
1108 
1109 	/*
1110 	 * If NUMA node has CPUs used by interrupt handlers, include them in the
1111 	 * interrupt handler mask.
1112 	 */
1113 	entry = node_affinity_lookup(node);
1114 	if (entry) {
1115 		cpumask_copy(intrs_mask, (entry->def_intr.gen ?
1116 					  &entry->def_intr.mask :
1117 					  &entry->def_intr.used));
1118 		cpumask_or(intrs_mask, intrs_mask, (entry->rcv_intr.gen ?
1119 						    &entry->rcv_intr.mask :
1120 						    &entry->rcv_intr.used));
1121 		cpumask_or(intrs_mask, intrs_mask, &entry->general_intr_mask);
1122 	}
1123 	hfi1_cdbg(PROC, "CPUs used by interrupts: %*pbl",
1124 		  cpumask_pr_args(intrs_mask));
1125 
1126 	cpumask_copy(hw_thread_mask, &set->mask);
1127 
1128 	/*
1129 	 * If HT cores are enabled, identify which HW threads within the
1130 	 * physical cores should be used.
1131 	 */
1132 	if (affinity->num_core_siblings > 0) {
1133 		for (i = 0; i < affinity->num_core_siblings; i++) {
1134 			find_hw_thread_mask(i, hw_thread_mask, affinity);
1135 
1136 			/*
1137 			 * If there's at least one available core for this HW
1138 			 * thread number, stop looking for a core.
1139 			 *
1140 			 * diff will always be not empty at least once in this
1141 			 * loop as the used mask gets reset when
1142 			 * (set->mask == set->used) before this loop.
1143 			 */
1144 			cpumask_andnot(diff, hw_thread_mask, &set->used);
1145 			if (!cpumask_empty(diff))
1146 				break;
1147 		}
1148 	}
1149 	hfi1_cdbg(PROC, "Same available HW thread on all physical CPUs: %*pbl",
1150 		  cpumask_pr_args(hw_thread_mask));
1151 
1152 	node_mask = cpumask_of_node(node);
1153 	hfi1_cdbg(PROC, "Device on NUMA %u, CPUs %*pbl", node,
1154 		  cpumask_pr_args(node_mask));
1155 
1156 	/* Get cpumask of available CPUs on preferred NUMA */
1157 	cpumask_and(available_mask, hw_thread_mask, node_mask);
1158 	cpumask_andnot(available_mask, available_mask, &set->used);
1159 	hfi1_cdbg(PROC, "Available CPUs on NUMA %u: %*pbl", node,
1160 		  cpumask_pr_args(available_mask));
1161 
1162 	/*
1163 	 * At first, we don't want to place processes on the same
1164 	 * CPUs as interrupt handlers. Then, CPUs running interrupt
1165 	 * handlers are used.
1166 	 *
1167 	 * 1) If diff is not empty, then there are CPUs not running
1168 	 *    non-interrupt handlers available, so diff gets copied
1169 	 *    over to available_mask.
1170 	 * 2) If diff is empty, then all CPUs not running interrupt
1171 	 *    handlers are taken, so available_mask contains all
1172 	 *    available CPUs running interrupt handlers.
1173 	 * 3) If available_mask is empty, then all CPUs on the
1174 	 *    preferred NUMA node are taken, so other NUMA nodes are
1175 	 *    used for process assignments using the same method as
1176 	 *    the preferred NUMA node.
1177 	 */
1178 	cpumask_andnot(diff, available_mask, intrs_mask);
1179 	if (!cpumask_empty(diff))
1180 		cpumask_copy(available_mask, diff);
1181 
1182 	/* If we don't have CPUs on the preferred node, use other NUMA nodes */
1183 	if (cpumask_empty(available_mask)) {
1184 		cpumask_andnot(available_mask, hw_thread_mask, &set->used);
1185 		/* Excluding preferred NUMA cores */
1186 		cpumask_andnot(available_mask, available_mask, node_mask);
1187 		hfi1_cdbg(PROC,
1188 			  "Preferred NUMA node cores are taken, cores available in other NUMA nodes: %*pbl",
1189 			  cpumask_pr_args(available_mask));
1190 
1191 		/*
1192 		 * At first, we don't want to place processes on the same
1193 		 * CPUs as interrupt handlers.
1194 		 */
1195 		cpumask_andnot(diff, available_mask, intrs_mask);
1196 		if (!cpumask_empty(diff))
1197 			cpumask_copy(available_mask, diff);
1198 	}
1199 	hfi1_cdbg(PROC, "Possible CPUs for process: %*pbl",
1200 		  cpumask_pr_args(available_mask));
1201 
1202 	cpu = cpumask_first(available_mask);
1203 	if (cpu >= nr_cpu_ids) /* empty */
1204 		cpu = -1;
1205 	else
1206 		cpumask_set_cpu(cpu, &set->used);
1207 
1208 	mutex_unlock(&affinity->lock);
1209 	hfi1_cdbg(PROC, "Process assigned to CPU %d", cpu);
1210 
1211 	free_cpumask_var(intrs_mask);
1212 free_available_mask:
1213 	free_cpumask_var(available_mask);
1214 free_hw_thread_mask:
1215 	free_cpumask_var(hw_thread_mask);
1216 free_diff:
1217 	free_cpumask_var(diff);
1218 done:
1219 	return cpu;
1220 }
1221 
1222 void hfi1_put_proc_affinity(int cpu)
1223 {
1224 	struct hfi1_affinity_node_list *affinity = &node_affinity;
1225 	struct cpu_mask_set *set = &affinity->proc;
1226 
1227 	if (cpu < 0)
1228 		return;
1229 
1230 	mutex_lock(&affinity->lock);
1231 	cpu_mask_set_put(set, cpu);
1232 	hfi1_cdbg(PROC, "Returning CPU %d for future process assignment", cpu);
1233 	mutex_unlock(&affinity->lock);
1234 }
1235