1 /* 2 * Copyright(c) 2015 - 2018 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 #include <linux/topology.h> 48 #include <linux/cpumask.h> 49 #include <linux/module.h> 50 #include <linux/interrupt.h> 51 52 #include "hfi.h" 53 #include "affinity.h" 54 #include "sdma.h" 55 #include "trace.h" 56 57 struct hfi1_affinity_node_list node_affinity = { 58 .list = LIST_HEAD_INIT(node_affinity.list), 59 .lock = __MUTEX_INITIALIZER(node_affinity.lock) 60 }; 61 62 /* Name of IRQ types, indexed by enum irq_type */ 63 static const char * const irq_type_names[] = { 64 "SDMA", 65 "RCVCTXT", 66 "GENERAL", 67 "OTHER", 68 }; 69 70 /* Per NUMA node count of HFI devices */ 71 static unsigned int *hfi1_per_node_cntr; 72 73 static inline void init_cpu_mask_set(struct cpu_mask_set *set) 74 { 75 cpumask_clear(&set->mask); 76 cpumask_clear(&set->used); 77 set->gen = 0; 78 } 79 80 /* Increment generation of CPU set if needed */ 81 static void _cpu_mask_set_gen_inc(struct cpu_mask_set *set) 82 { 83 if (cpumask_equal(&set->mask, &set->used)) { 84 /* 85 * We've used up all the CPUs, bump up the generation 86 * and reset the 'used' map 87 */ 88 set->gen++; 89 cpumask_clear(&set->used); 90 } 91 } 92 93 static void _cpu_mask_set_gen_dec(struct cpu_mask_set *set) 94 { 95 if (cpumask_empty(&set->used) && set->gen) { 96 set->gen--; 97 cpumask_copy(&set->used, &set->mask); 98 } 99 } 100 101 /* Get the first CPU from the list of unused CPUs in a CPU set data structure */ 102 static int cpu_mask_set_get_first(struct cpu_mask_set *set, cpumask_var_t diff) 103 { 104 int cpu; 105 106 if (!diff || !set) 107 return -EINVAL; 108 109 _cpu_mask_set_gen_inc(set); 110 111 /* Find out CPUs left in CPU mask */ 112 cpumask_andnot(diff, &set->mask, &set->used); 113 114 cpu = cpumask_first(diff); 115 if (cpu >= nr_cpu_ids) /* empty */ 116 cpu = -EINVAL; 117 else 118 cpumask_set_cpu(cpu, &set->used); 119 120 return cpu; 121 } 122 123 static void cpu_mask_set_put(struct cpu_mask_set *set, int cpu) 124 { 125 if (!set) 126 return; 127 128 cpumask_clear_cpu(cpu, &set->used); 129 _cpu_mask_set_gen_dec(set); 130 } 131 132 /* Initialize non-HT cpu cores mask */ 133 void init_real_cpu_mask(void) 134 { 135 int possible, curr_cpu, i, ht; 136 137 cpumask_clear(&node_affinity.real_cpu_mask); 138 139 /* Start with cpu online mask as the real cpu mask */ 140 cpumask_copy(&node_affinity.real_cpu_mask, cpu_online_mask); 141 142 /* 143 * Remove HT cores from the real cpu mask. Do this in two steps below. 144 */ 145 possible = cpumask_weight(&node_affinity.real_cpu_mask); 146 ht = cpumask_weight(topology_sibling_cpumask( 147 cpumask_first(&node_affinity.real_cpu_mask))); 148 /* 149 * Step 1. Skip over the first N HT siblings and use them as the 150 * "real" cores. Assumes that HT cores are not enumerated in 151 * succession (except in the single core case). 152 */ 153 curr_cpu = cpumask_first(&node_affinity.real_cpu_mask); 154 for (i = 0; i < possible / ht; i++) 155 curr_cpu = cpumask_next(curr_cpu, &node_affinity.real_cpu_mask); 156 /* 157 * Step 2. Remove the remaining HT siblings. Use cpumask_next() to 158 * skip any gaps. 159 */ 160 for (; i < possible; i++) { 161 cpumask_clear_cpu(curr_cpu, &node_affinity.real_cpu_mask); 162 curr_cpu = cpumask_next(curr_cpu, &node_affinity.real_cpu_mask); 163 } 164 } 165 166 int node_affinity_init(void) 167 { 168 int node; 169 struct pci_dev *dev = NULL; 170 const struct pci_device_id *ids = hfi1_pci_tbl; 171 172 cpumask_clear(&node_affinity.proc.used); 173 cpumask_copy(&node_affinity.proc.mask, cpu_online_mask); 174 175 node_affinity.proc.gen = 0; 176 node_affinity.num_core_siblings = 177 cpumask_weight(topology_sibling_cpumask( 178 cpumask_first(&node_affinity.proc.mask) 179 )); 180 node_affinity.num_possible_nodes = num_possible_nodes(); 181 node_affinity.num_online_nodes = num_online_nodes(); 182 node_affinity.num_online_cpus = num_online_cpus(); 183 184 /* 185 * The real cpu mask is part of the affinity struct but it has to be 186 * initialized early. It is needed to calculate the number of user 187 * contexts in set_up_context_variables(). 188 */ 189 init_real_cpu_mask(); 190 191 hfi1_per_node_cntr = kcalloc(node_affinity.num_possible_nodes, 192 sizeof(*hfi1_per_node_cntr), GFP_KERNEL); 193 if (!hfi1_per_node_cntr) 194 return -ENOMEM; 195 196 while (ids->vendor) { 197 dev = NULL; 198 while ((dev = pci_get_device(ids->vendor, ids->device, dev))) { 199 node = pcibus_to_node(dev->bus); 200 if (node < 0) 201 node = numa_node_id(); 202 203 hfi1_per_node_cntr[node]++; 204 } 205 ids++; 206 } 207 208 return 0; 209 } 210 211 static void node_affinity_destroy(struct hfi1_affinity_node *entry) 212 { 213 free_percpu(entry->comp_vect_affinity); 214 kfree(entry); 215 } 216 217 void node_affinity_destroy_all(void) 218 { 219 struct list_head *pos, *q; 220 struct hfi1_affinity_node *entry; 221 222 mutex_lock(&node_affinity.lock); 223 list_for_each_safe(pos, q, &node_affinity.list) { 224 entry = list_entry(pos, struct hfi1_affinity_node, 225 list); 226 list_del(pos); 227 node_affinity_destroy(entry); 228 } 229 mutex_unlock(&node_affinity.lock); 230 kfree(hfi1_per_node_cntr); 231 } 232 233 static struct hfi1_affinity_node *node_affinity_allocate(int node) 234 { 235 struct hfi1_affinity_node *entry; 236 237 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 238 if (!entry) 239 return NULL; 240 entry->node = node; 241 entry->comp_vect_affinity = alloc_percpu(u16); 242 INIT_LIST_HEAD(&entry->list); 243 244 return entry; 245 } 246 247 /* 248 * It appends an entry to the list. 249 * It *must* be called with node_affinity.lock held. 250 */ 251 static void node_affinity_add_tail(struct hfi1_affinity_node *entry) 252 { 253 list_add_tail(&entry->list, &node_affinity.list); 254 } 255 256 /* It must be called with node_affinity.lock held */ 257 static struct hfi1_affinity_node *node_affinity_lookup(int node) 258 { 259 struct list_head *pos; 260 struct hfi1_affinity_node *entry; 261 262 list_for_each(pos, &node_affinity.list) { 263 entry = list_entry(pos, struct hfi1_affinity_node, list); 264 if (entry->node == node) 265 return entry; 266 } 267 268 return NULL; 269 } 270 271 static int per_cpu_affinity_get(cpumask_var_t possible_cpumask, 272 u16 __percpu *comp_vect_affinity) 273 { 274 int curr_cpu; 275 u16 cntr; 276 u16 prev_cntr; 277 int ret_cpu; 278 279 if (!possible_cpumask) { 280 ret_cpu = -EINVAL; 281 goto fail; 282 } 283 284 if (!comp_vect_affinity) { 285 ret_cpu = -EINVAL; 286 goto fail; 287 } 288 289 ret_cpu = cpumask_first(possible_cpumask); 290 if (ret_cpu >= nr_cpu_ids) { 291 ret_cpu = -EINVAL; 292 goto fail; 293 } 294 295 prev_cntr = *per_cpu_ptr(comp_vect_affinity, ret_cpu); 296 for_each_cpu(curr_cpu, possible_cpumask) { 297 cntr = *per_cpu_ptr(comp_vect_affinity, curr_cpu); 298 299 if (cntr < prev_cntr) { 300 ret_cpu = curr_cpu; 301 prev_cntr = cntr; 302 } 303 } 304 305 *per_cpu_ptr(comp_vect_affinity, ret_cpu) += 1; 306 307 fail: 308 return ret_cpu; 309 } 310 311 static int per_cpu_affinity_put_max(cpumask_var_t possible_cpumask, 312 u16 __percpu *comp_vect_affinity) 313 { 314 int curr_cpu; 315 int max_cpu; 316 u16 cntr; 317 u16 prev_cntr; 318 319 if (!possible_cpumask) 320 return -EINVAL; 321 322 if (!comp_vect_affinity) 323 return -EINVAL; 324 325 max_cpu = cpumask_first(possible_cpumask); 326 if (max_cpu >= nr_cpu_ids) 327 return -EINVAL; 328 329 prev_cntr = *per_cpu_ptr(comp_vect_affinity, max_cpu); 330 for_each_cpu(curr_cpu, possible_cpumask) { 331 cntr = *per_cpu_ptr(comp_vect_affinity, curr_cpu); 332 333 if (cntr > prev_cntr) { 334 max_cpu = curr_cpu; 335 prev_cntr = cntr; 336 } 337 } 338 339 *per_cpu_ptr(comp_vect_affinity, max_cpu) -= 1; 340 341 return max_cpu; 342 } 343 344 /* 345 * Non-interrupt CPUs are used first, then interrupt CPUs. 346 * Two already allocated cpu masks must be passed. 347 */ 348 static int _dev_comp_vect_cpu_get(struct hfi1_devdata *dd, 349 struct hfi1_affinity_node *entry, 350 cpumask_var_t non_intr_cpus, 351 cpumask_var_t available_cpus) 352 __must_hold(&node_affinity.lock) 353 { 354 int cpu; 355 struct cpu_mask_set *set = dd->comp_vect; 356 357 lockdep_assert_held(&node_affinity.lock); 358 if (!non_intr_cpus) { 359 cpu = -1; 360 goto fail; 361 } 362 363 if (!available_cpus) { 364 cpu = -1; 365 goto fail; 366 } 367 368 /* Available CPUs for pinning completion vectors */ 369 _cpu_mask_set_gen_inc(set); 370 cpumask_andnot(available_cpus, &set->mask, &set->used); 371 372 /* Available CPUs without SDMA engine interrupts */ 373 cpumask_andnot(non_intr_cpus, available_cpus, 374 &entry->def_intr.used); 375 376 /* If there are non-interrupt CPUs available, use them first */ 377 if (!cpumask_empty(non_intr_cpus)) 378 cpu = cpumask_first(non_intr_cpus); 379 else /* Otherwise, use interrupt CPUs */ 380 cpu = cpumask_first(available_cpus); 381 382 if (cpu >= nr_cpu_ids) { /* empty */ 383 cpu = -1; 384 goto fail; 385 } 386 cpumask_set_cpu(cpu, &set->used); 387 388 fail: 389 return cpu; 390 } 391 392 static void _dev_comp_vect_cpu_put(struct hfi1_devdata *dd, int cpu) 393 { 394 struct cpu_mask_set *set = dd->comp_vect; 395 396 if (cpu < 0) 397 return; 398 399 cpu_mask_set_put(set, cpu); 400 } 401 402 /* _dev_comp_vect_mappings_destroy() is reentrant */ 403 static void _dev_comp_vect_mappings_destroy(struct hfi1_devdata *dd) 404 { 405 int i, cpu; 406 407 if (!dd->comp_vect_mappings) 408 return; 409 410 for (i = 0; i < dd->comp_vect_possible_cpus; i++) { 411 cpu = dd->comp_vect_mappings[i]; 412 _dev_comp_vect_cpu_put(dd, cpu); 413 dd->comp_vect_mappings[i] = -1; 414 hfi1_cdbg(AFFINITY, 415 "[%s] Release CPU %d from completion vector %d", 416 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), cpu, i); 417 } 418 419 kfree(dd->comp_vect_mappings); 420 dd->comp_vect_mappings = NULL; 421 } 422 423 /* 424 * This function creates the table for looking up CPUs for completion vectors. 425 * num_comp_vectors needs to have been initilized before calling this function. 426 */ 427 static int _dev_comp_vect_mappings_create(struct hfi1_devdata *dd, 428 struct hfi1_affinity_node *entry) 429 __must_hold(&node_affinity.lock) 430 { 431 int i, cpu, ret; 432 cpumask_var_t non_intr_cpus; 433 cpumask_var_t available_cpus; 434 435 lockdep_assert_held(&node_affinity.lock); 436 437 if (!zalloc_cpumask_var(&non_intr_cpus, GFP_KERNEL)) 438 return -ENOMEM; 439 440 if (!zalloc_cpumask_var(&available_cpus, GFP_KERNEL)) { 441 free_cpumask_var(non_intr_cpus); 442 return -ENOMEM; 443 } 444 445 dd->comp_vect_mappings = kcalloc(dd->comp_vect_possible_cpus, 446 sizeof(*dd->comp_vect_mappings), 447 GFP_KERNEL); 448 if (!dd->comp_vect_mappings) { 449 ret = -ENOMEM; 450 goto fail; 451 } 452 for (i = 0; i < dd->comp_vect_possible_cpus; i++) 453 dd->comp_vect_mappings[i] = -1; 454 455 for (i = 0; i < dd->comp_vect_possible_cpus; i++) { 456 cpu = _dev_comp_vect_cpu_get(dd, entry, non_intr_cpus, 457 available_cpus); 458 if (cpu < 0) { 459 ret = -EINVAL; 460 goto fail; 461 } 462 463 dd->comp_vect_mappings[i] = cpu; 464 hfi1_cdbg(AFFINITY, 465 "[%s] Completion Vector %d -> CPU %d", 466 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), i, cpu); 467 } 468 469 return 0; 470 471 fail: 472 free_cpumask_var(available_cpus); 473 free_cpumask_var(non_intr_cpus); 474 _dev_comp_vect_mappings_destroy(dd); 475 476 return ret; 477 } 478 479 int hfi1_comp_vectors_set_up(struct hfi1_devdata *dd) 480 { 481 int ret; 482 struct hfi1_affinity_node *entry; 483 484 mutex_lock(&node_affinity.lock); 485 entry = node_affinity_lookup(dd->node); 486 if (!entry) { 487 ret = -EINVAL; 488 goto unlock; 489 } 490 ret = _dev_comp_vect_mappings_create(dd, entry); 491 unlock: 492 mutex_unlock(&node_affinity.lock); 493 494 return ret; 495 } 496 497 void hfi1_comp_vectors_clean_up(struct hfi1_devdata *dd) 498 { 499 _dev_comp_vect_mappings_destroy(dd); 500 } 501 502 int hfi1_comp_vect_mappings_lookup(struct rvt_dev_info *rdi, int comp_vect) 503 { 504 struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi); 505 struct hfi1_devdata *dd = dd_from_dev(verbs_dev); 506 507 if (!dd->comp_vect_mappings) 508 return -EINVAL; 509 if (comp_vect >= dd->comp_vect_possible_cpus) 510 return -EINVAL; 511 512 return dd->comp_vect_mappings[comp_vect]; 513 } 514 515 /* 516 * It assumes dd->comp_vect_possible_cpus is available. 517 */ 518 static int _dev_comp_vect_cpu_mask_init(struct hfi1_devdata *dd, 519 struct hfi1_affinity_node *entry, 520 bool first_dev_init) 521 __must_hold(&node_affinity.lock) 522 { 523 int i, j, curr_cpu; 524 int possible_cpus_comp_vect = 0; 525 struct cpumask *dev_comp_vect_mask = &dd->comp_vect->mask; 526 527 lockdep_assert_held(&node_affinity.lock); 528 /* 529 * If there's only one CPU available for completion vectors, then 530 * there will only be one completion vector available. Othewise, 531 * the number of completion vector available will be the number of 532 * available CPUs divide it by the number of devices in the 533 * local NUMA node. 534 */ 535 if (cpumask_weight(&entry->comp_vect_mask) == 1) { 536 possible_cpus_comp_vect = 1; 537 dd_dev_warn(dd, 538 "Number of kernel receive queues is too large for completion vector affinity to be effective\n"); 539 } else { 540 possible_cpus_comp_vect += 541 cpumask_weight(&entry->comp_vect_mask) / 542 hfi1_per_node_cntr[dd->node]; 543 544 /* 545 * If the completion vector CPUs available doesn't divide 546 * evenly among devices, then the first device device to be 547 * initialized gets an extra CPU. 548 */ 549 if (first_dev_init && 550 cpumask_weight(&entry->comp_vect_mask) % 551 hfi1_per_node_cntr[dd->node] != 0) 552 possible_cpus_comp_vect++; 553 } 554 555 dd->comp_vect_possible_cpus = possible_cpus_comp_vect; 556 557 /* Reserving CPUs for device completion vector */ 558 for (i = 0; i < dd->comp_vect_possible_cpus; i++) { 559 curr_cpu = per_cpu_affinity_get(&entry->comp_vect_mask, 560 entry->comp_vect_affinity); 561 if (curr_cpu < 0) 562 goto fail; 563 564 cpumask_set_cpu(curr_cpu, dev_comp_vect_mask); 565 } 566 567 hfi1_cdbg(AFFINITY, 568 "[%s] Completion vector affinity CPU set(s) %*pbl", 569 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), 570 cpumask_pr_args(dev_comp_vect_mask)); 571 572 return 0; 573 574 fail: 575 for (j = 0; j < i; j++) 576 per_cpu_affinity_put_max(&entry->comp_vect_mask, 577 entry->comp_vect_affinity); 578 579 return curr_cpu; 580 } 581 582 /* 583 * It assumes dd->comp_vect_possible_cpus is available. 584 */ 585 static void _dev_comp_vect_cpu_mask_clean_up(struct hfi1_devdata *dd, 586 struct hfi1_affinity_node *entry) 587 __must_hold(&node_affinity.lock) 588 { 589 int i, cpu; 590 591 lockdep_assert_held(&node_affinity.lock); 592 if (!dd->comp_vect_possible_cpus) 593 return; 594 595 for (i = 0; i < dd->comp_vect_possible_cpus; i++) { 596 cpu = per_cpu_affinity_put_max(&dd->comp_vect->mask, 597 entry->comp_vect_affinity); 598 /* Clearing CPU in device completion vector cpu mask */ 599 if (cpu >= 0) 600 cpumask_clear_cpu(cpu, &dd->comp_vect->mask); 601 } 602 603 dd->comp_vect_possible_cpus = 0; 604 } 605 606 /* 607 * Interrupt affinity. 608 * 609 * non-rcv avail gets a default mask that 610 * starts as possible cpus with threads reset 611 * and each rcv avail reset. 612 * 613 * rcv avail gets node relative 1 wrapping back 614 * to the node relative 1 as necessary. 615 * 616 */ 617 int hfi1_dev_affinity_init(struct hfi1_devdata *dd) 618 { 619 int node = pcibus_to_node(dd->pcidev->bus); 620 struct hfi1_affinity_node *entry; 621 const struct cpumask *local_mask; 622 int curr_cpu, possible, i, ret; 623 bool new_entry = false; 624 625 if (node < 0) 626 node = numa_node_id(); 627 dd->node = node; 628 629 local_mask = cpumask_of_node(dd->node); 630 if (cpumask_first(local_mask) >= nr_cpu_ids) 631 local_mask = topology_core_cpumask(0); 632 633 mutex_lock(&node_affinity.lock); 634 entry = node_affinity_lookup(dd->node); 635 636 /* 637 * If this is the first time this NUMA node's affinity is used, 638 * create an entry in the global affinity structure and initialize it. 639 */ 640 if (!entry) { 641 entry = node_affinity_allocate(node); 642 if (!entry) { 643 dd_dev_err(dd, 644 "Unable to allocate global affinity node\n"); 645 ret = -ENOMEM; 646 goto fail; 647 } 648 new_entry = true; 649 650 init_cpu_mask_set(&entry->def_intr); 651 init_cpu_mask_set(&entry->rcv_intr); 652 cpumask_clear(&entry->comp_vect_mask); 653 cpumask_clear(&entry->general_intr_mask); 654 /* Use the "real" cpu mask of this node as the default */ 655 cpumask_and(&entry->def_intr.mask, &node_affinity.real_cpu_mask, 656 local_mask); 657 658 /* fill in the receive list */ 659 possible = cpumask_weight(&entry->def_intr.mask); 660 curr_cpu = cpumask_first(&entry->def_intr.mask); 661 662 if (possible == 1) { 663 /* only one CPU, everyone will use it */ 664 cpumask_set_cpu(curr_cpu, &entry->rcv_intr.mask); 665 cpumask_set_cpu(curr_cpu, &entry->general_intr_mask); 666 } else { 667 /* 668 * The general/control context will be the first CPU in 669 * the default list, so it is removed from the default 670 * list and added to the general interrupt list. 671 */ 672 cpumask_clear_cpu(curr_cpu, &entry->def_intr.mask); 673 cpumask_set_cpu(curr_cpu, &entry->general_intr_mask); 674 curr_cpu = cpumask_next(curr_cpu, 675 &entry->def_intr.mask); 676 677 /* 678 * Remove the remaining kernel receive queues from 679 * the default list and add them to the receive list. 680 */ 681 for (i = 0; 682 i < (dd->n_krcv_queues - 1) * 683 hfi1_per_node_cntr[dd->node]; 684 i++) { 685 cpumask_clear_cpu(curr_cpu, 686 &entry->def_intr.mask); 687 cpumask_set_cpu(curr_cpu, 688 &entry->rcv_intr.mask); 689 curr_cpu = cpumask_next(curr_cpu, 690 &entry->def_intr.mask); 691 if (curr_cpu >= nr_cpu_ids) 692 break; 693 } 694 695 /* 696 * If there ends up being 0 CPU cores leftover for SDMA 697 * engines, use the same CPU cores as general/control 698 * context. 699 */ 700 if (cpumask_weight(&entry->def_intr.mask) == 0) 701 cpumask_copy(&entry->def_intr.mask, 702 &entry->general_intr_mask); 703 } 704 705 /* Determine completion vector CPUs for the entire node */ 706 cpumask_and(&entry->comp_vect_mask, 707 &node_affinity.real_cpu_mask, local_mask); 708 cpumask_andnot(&entry->comp_vect_mask, 709 &entry->comp_vect_mask, 710 &entry->rcv_intr.mask); 711 cpumask_andnot(&entry->comp_vect_mask, 712 &entry->comp_vect_mask, 713 &entry->general_intr_mask); 714 715 /* 716 * If there ends up being 0 CPU cores leftover for completion 717 * vectors, use the same CPU core as the general/control 718 * context. 719 */ 720 if (cpumask_weight(&entry->comp_vect_mask) == 0) 721 cpumask_copy(&entry->comp_vect_mask, 722 &entry->general_intr_mask); 723 } 724 725 ret = _dev_comp_vect_cpu_mask_init(dd, entry, new_entry); 726 if (ret < 0) 727 goto fail; 728 729 if (new_entry) 730 node_affinity_add_tail(entry); 731 732 mutex_unlock(&node_affinity.lock); 733 734 return 0; 735 736 fail: 737 if (new_entry) 738 node_affinity_destroy(entry); 739 mutex_unlock(&node_affinity.lock); 740 return ret; 741 } 742 743 void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd) 744 { 745 struct hfi1_affinity_node *entry; 746 747 if (dd->node < 0) 748 return; 749 750 mutex_lock(&node_affinity.lock); 751 entry = node_affinity_lookup(dd->node); 752 if (!entry) 753 goto unlock; 754 755 /* 756 * Free device completion vector CPUs to be used by future 757 * completion vectors 758 */ 759 _dev_comp_vect_cpu_mask_clean_up(dd, entry); 760 unlock: 761 mutex_unlock(&node_affinity.lock); 762 dd->node = -1; 763 } 764 765 /* 766 * Function updates the irq affinity hint for msix after it has been changed 767 * by the user using the /proc/irq interface. This function only accepts 768 * one cpu in the mask. 769 */ 770 static void hfi1_update_sdma_affinity(struct hfi1_msix_entry *msix, int cpu) 771 { 772 struct sdma_engine *sde = msix->arg; 773 struct hfi1_devdata *dd = sde->dd; 774 struct hfi1_affinity_node *entry; 775 struct cpu_mask_set *set; 776 int i, old_cpu; 777 778 if (cpu > num_online_cpus() || cpu == sde->cpu) 779 return; 780 781 mutex_lock(&node_affinity.lock); 782 entry = node_affinity_lookup(dd->node); 783 if (!entry) 784 goto unlock; 785 786 old_cpu = sde->cpu; 787 sde->cpu = cpu; 788 cpumask_clear(&msix->mask); 789 cpumask_set_cpu(cpu, &msix->mask); 790 dd_dev_dbg(dd, "IRQ: %u, type %s engine %u -> cpu: %d\n", 791 msix->irq, irq_type_names[msix->type], 792 sde->this_idx, cpu); 793 irq_set_affinity_hint(msix->irq, &msix->mask); 794 795 /* 796 * Set the new cpu in the hfi1_affinity_node and clean 797 * the old cpu if it is not used by any other IRQ 798 */ 799 set = &entry->def_intr; 800 cpumask_set_cpu(cpu, &set->mask); 801 cpumask_set_cpu(cpu, &set->used); 802 for (i = 0; i < dd->num_msix_entries; i++) { 803 struct hfi1_msix_entry *other_msix; 804 805 other_msix = &dd->msix_entries[i]; 806 if (other_msix->type != IRQ_SDMA || other_msix == msix) 807 continue; 808 809 if (cpumask_test_cpu(old_cpu, &other_msix->mask)) 810 goto unlock; 811 } 812 cpumask_clear_cpu(old_cpu, &set->mask); 813 cpumask_clear_cpu(old_cpu, &set->used); 814 unlock: 815 mutex_unlock(&node_affinity.lock); 816 } 817 818 static void hfi1_irq_notifier_notify(struct irq_affinity_notify *notify, 819 const cpumask_t *mask) 820 { 821 int cpu = cpumask_first(mask); 822 struct hfi1_msix_entry *msix = container_of(notify, 823 struct hfi1_msix_entry, 824 notify); 825 826 /* Only one CPU configuration supported currently */ 827 hfi1_update_sdma_affinity(msix, cpu); 828 } 829 830 static void hfi1_irq_notifier_release(struct kref *ref) 831 { 832 /* 833 * This is required by affinity notifier. We don't have anything to 834 * free here. 835 */ 836 } 837 838 static void hfi1_setup_sdma_notifier(struct hfi1_msix_entry *msix) 839 { 840 struct irq_affinity_notify *notify = &msix->notify; 841 842 notify->irq = msix->irq; 843 notify->notify = hfi1_irq_notifier_notify; 844 notify->release = hfi1_irq_notifier_release; 845 846 if (irq_set_affinity_notifier(notify->irq, notify)) 847 pr_err("Failed to register sdma irq affinity notifier for irq %d\n", 848 notify->irq); 849 } 850 851 static void hfi1_cleanup_sdma_notifier(struct hfi1_msix_entry *msix) 852 { 853 struct irq_affinity_notify *notify = &msix->notify; 854 855 if (irq_set_affinity_notifier(notify->irq, NULL)) 856 pr_err("Failed to cleanup sdma irq affinity notifier for irq %d\n", 857 notify->irq); 858 } 859 860 /* 861 * Function sets the irq affinity for msix. 862 * It *must* be called with node_affinity.lock held. 863 */ 864 static int get_irq_affinity(struct hfi1_devdata *dd, 865 struct hfi1_msix_entry *msix) 866 { 867 cpumask_var_t diff; 868 struct hfi1_affinity_node *entry; 869 struct cpu_mask_set *set = NULL; 870 struct sdma_engine *sde = NULL; 871 struct hfi1_ctxtdata *rcd = NULL; 872 char extra[64]; 873 int cpu = -1; 874 875 extra[0] = '\0'; 876 cpumask_clear(&msix->mask); 877 878 entry = node_affinity_lookup(dd->node); 879 880 switch (msix->type) { 881 case IRQ_SDMA: 882 sde = (struct sdma_engine *)msix->arg; 883 scnprintf(extra, 64, "engine %u", sde->this_idx); 884 set = &entry->def_intr; 885 break; 886 case IRQ_GENERAL: 887 cpu = cpumask_first(&entry->general_intr_mask); 888 break; 889 case IRQ_RCVCTXT: 890 rcd = (struct hfi1_ctxtdata *)msix->arg; 891 if (rcd->ctxt == HFI1_CTRL_CTXT) 892 cpu = cpumask_first(&entry->general_intr_mask); 893 else 894 set = &entry->rcv_intr; 895 scnprintf(extra, 64, "ctxt %u", rcd->ctxt); 896 break; 897 default: 898 dd_dev_err(dd, "Invalid IRQ type %d\n", msix->type); 899 return -EINVAL; 900 } 901 902 /* 903 * The general and control contexts are placed on a particular 904 * CPU, which is set above. Skip accounting for it. Everything else 905 * finds its CPU here. 906 */ 907 if (cpu == -1 && set) { 908 if (!zalloc_cpumask_var(&diff, GFP_KERNEL)) 909 return -ENOMEM; 910 911 cpu = cpu_mask_set_get_first(set, diff); 912 if (cpu < 0) { 913 free_cpumask_var(diff); 914 dd_dev_err(dd, "Failure to obtain CPU for IRQ\n"); 915 return cpu; 916 } 917 918 free_cpumask_var(diff); 919 } 920 921 cpumask_set_cpu(cpu, &msix->mask); 922 dd_dev_info(dd, "IRQ: %u, type %s %s -> cpu: %d\n", 923 msix->irq, irq_type_names[msix->type], 924 extra, cpu); 925 irq_set_affinity_hint(msix->irq, &msix->mask); 926 927 if (msix->type == IRQ_SDMA) { 928 sde->cpu = cpu; 929 hfi1_setup_sdma_notifier(msix); 930 } 931 932 return 0; 933 } 934 935 int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix) 936 { 937 int ret; 938 939 mutex_lock(&node_affinity.lock); 940 ret = get_irq_affinity(dd, msix); 941 mutex_unlock(&node_affinity.lock); 942 return ret; 943 } 944 945 void hfi1_put_irq_affinity(struct hfi1_devdata *dd, 946 struct hfi1_msix_entry *msix) 947 { 948 struct cpu_mask_set *set = NULL; 949 struct hfi1_ctxtdata *rcd; 950 struct hfi1_affinity_node *entry; 951 952 mutex_lock(&node_affinity.lock); 953 entry = node_affinity_lookup(dd->node); 954 955 switch (msix->type) { 956 case IRQ_SDMA: 957 set = &entry->def_intr; 958 hfi1_cleanup_sdma_notifier(msix); 959 break; 960 case IRQ_GENERAL: 961 /* Don't do accounting for general contexts */ 962 break; 963 case IRQ_RCVCTXT: 964 rcd = (struct hfi1_ctxtdata *)msix->arg; 965 /* Don't do accounting for control contexts */ 966 if (rcd->ctxt != HFI1_CTRL_CTXT) 967 set = &entry->rcv_intr; 968 break; 969 default: 970 mutex_unlock(&node_affinity.lock); 971 return; 972 } 973 974 if (set) { 975 cpumask_andnot(&set->used, &set->used, &msix->mask); 976 _cpu_mask_set_gen_dec(set); 977 } 978 979 irq_set_affinity_hint(msix->irq, NULL); 980 cpumask_clear(&msix->mask); 981 mutex_unlock(&node_affinity.lock); 982 } 983 984 /* This should be called with node_affinity.lock held */ 985 static void find_hw_thread_mask(uint hw_thread_no, cpumask_var_t hw_thread_mask, 986 struct hfi1_affinity_node_list *affinity) 987 { 988 int possible, curr_cpu, i; 989 uint num_cores_per_socket = node_affinity.num_online_cpus / 990 affinity->num_core_siblings / 991 node_affinity.num_online_nodes; 992 993 cpumask_copy(hw_thread_mask, &affinity->proc.mask); 994 if (affinity->num_core_siblings > 0) { 995 /* Removing other siblings not needed for now */ 996 possible = cpumask_weight(hw_thread_mask); 997 curr_cpu = cpumask_first(hw_thread_mask); 998 for (i = 0; 999 i < num_cores_per_socket * node_affinity.num_online_nodes; 1000 i++) 1001 curr_cpu = cpumask_next(curr_cpu, hw_thread_mask); 1002 1003 for (; i < possible; i++) { 1004 cpumask_clear_cpu(curr_cpu, hw_thread_mask); 1005 curr_cpu = cpumask_next(curr_cpu, hw_thread_mask); 1006 } 1007 1008 /* Identifying correct HW threads within physical cores */ 1009 cpumask_shift_left(hw_thread_mask, hw_thread_mask, 1010 num_cores_per_socket * 1011 node_affinity.num_online_nodes * 1012 hw_thread_no); 1013 } 1014 } 1015 1016 int hfi1_get_proc_affinity(int node) 1017 { 1018 int cpu = -1, ret, i; 1019 struct hfi1_affinity_node *entry; 1020 cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask; 1021 const struct cpumask *node_mask, 1022 *proc_mask = ¤t->cpus_allowed; 1023 struct hfi1_affinity_node_list *affinity = &node_affinity; 1024 struct cpu_mask_set *set = &affinity->proc; 1025 1026 /* 1027 * check whether process/context affinity has already 1028 * been set 1029 */ 1030 if (cpumask_weight(proc_mask) == 1) { 1031 hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl", 1032 current->pid, current->comm, 1033 cpumask_pr_args(proc_mask)); 1034 /* 1035 * Mark the pre-set CPU as used. This is atomic so we don't 1036 * need the lock 1037 */ 1038 cpu = cpumask_first(proc_mask); 1039 cpumask_set_cpu(cpu, &set->used); 1040 goto done; 1041 } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) { 1042 hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl", 1043 current->pid, current->comm, 1044 cpumask_pr_args(proc_mask)); 1045 goto done; 1046 } 1047 1048 /* 1049 * The process does not have a preset CPU affinity so find one to 1050 * recommend using the following algorithm: 1051 * 1052 * For each user process that is opening a context on HFI Y: 1053 * a) If all cores are filled, reinitialize the bitmask 1054 * b) Fill real cores first, then HT cores (First set of HT 1055 * cores on all physical cores, then second set of HT core, 1056 * and, so on) in the following order: 1057 * 1058 * 1. Same NUMA node as HFI Y and not running an IRQ 1059 * handler 1060 * 2. Same NUMA node as HFI Y and running an IRQ handler 1061 * 3. Different NUMA node to HFI Y and not running an IRQ 1062 * handler 1063 * 4. Different NUMA node to HFI Y and running an IRQ 1064 * handler 1065 * c) Mark core as filled in the bitmask. As user processes are 1066 * done, clear cores from the bitmask. 1067 */ 1068 1069 ret = zalloc_cpumask_var(&diff, GFP_KERNEL); 1070 if (!ret) 1071 goto done; 1072 ret = zalloc_cpumask_var(&hw_thread_mask, GFP_KERNEL); 1073 if (!ret) 1074 goto free_diff; 1075 ret = zalloc_cpumask_var(&available_mask, GFP_KERNEL); 1076 if (!ret) 1077 goto free_hw_thread_mask; 1078 ret = zalloc_cpumask_var(&intrs_mask, GFP_KERNEL); 1079 if (!ret) 1080 goto free_available_mask; 1081 1082 mutex_lock(&affinity->lock); 1083 /* 1084 * If we've used all available HW threads, clear the mask and start 1085 * overloading. 1086 */ 1087 _cpu_mask_set_gen_inc(set); 1088 1089 /* 1090 * If NUMA node has CPUs used by interrupt handlers, include them in the 1091 * interrupt handler mask. 1092 */ 1093 entry = node_affinity_lookup(node); 1094 if (entry) { 1095 cpumask_copy(intrs_mask, (entry->def_intr.gen ? 1096 &entry->def_intr.mask : 1097 &entry->def_intr.used)); 1098 cpumask_or(intrs_mask, intrs_mask, (entry->rcv_intr.gen ? 1099 &entry->rcv_intr.mask : 1100 &entry->rcv_intr.used)); 1101 cpumask_or(intrs_mask, intrs_mask, &entry->general_intr_mask); 1102 } 1103 hfi1_cdbg(PROC, "CPUs used by interrupts: %*pbl", 1104 cpumask_pr_args(intrs_mask)); 1105 1106 cpumask_copy(hw_thread_mask, &set->mask); 1107 1108 /* 1109 * If HT cores are enabled, identify which HW threads within the 1110 * physical cores should be used. 1111 */ 1112 if (affinity->num_core_siblings > 0) { 1113 for (i = 0; i < affinity->num_core_siblings; i++) { 1114 find_hw_thread_mask(i, hw_thread_mask, affinity); 1115 1116 /* 1117 * If there's at least one available core for this HW 1118 * thread number, stop looking for a core. 1119 * 1120 * diff will always be not empty at least once in this 1121 * loop as the used mask gets reset when 1122 * (set->mask == set->used) before this loop. 1123 */ 1124 cpumask_andnot(diff, hw_thread_mask, &set->used); 1125 if (!cpumask_empty(diff)) 1126 break; 1127 } 1128 } 1129 hfi1_cdbg(PROC, "Same available HW thread on all physical CPUs: %*pbl", 1130 cpumask_pr_args(hw_thread_mask)); 1131 1132 node_mask = cpumask_of_node(node); 1133 hfi1_cdbg(PROC, "Device on NUMA %u, CPUs %*pbl", node, 1134 cpumask_pr_args(node_mask)); 1135 1136 /* Get cpumask of available CPUs on preferred NUMA */ 1137 cpumask_and(available_mask, hw_thread_mask, node_mask); 1138 cpumask_andnot(available_mask, available_mask, &set->used); 1139 hfi1_cdbg(PROC, "Available CPUs on NUMA %u: %*pbl", node, 1140 cpumask_pr_args(available_mask)); 1141 1142 /* 1143 * At first, we don't want to place processes on the same 1144 * CPUs as interrupt handlers. Then, CPUs running interrupt 1145 * handlers are used. 1146 * 1147 * 1) If diff is not empty, then there are CPUs not running 1148 * non-interrupt handlers available, so diff gets copied 1149 * over to available_mask. 1150 * 2) If diff is empty, then all CPUs not running interrupt 1151 * handlers are taken, so available_mask contains all 1152 * available CPUs running interrupt handlers. 1153 * 3) If available_mask is empty, then all CPUs on the 1154 * preferred NUMA node are taken, so other NUMA nodes are 1155 * used for process assignments using the same method as 1156 * the preferred NUMA node. 1157 */ 1158 cpumask_andnot(diff, available_mask, intrs_mask); 1159 if (!cpumask_empty(diff)) 1160 cpumask_copy(available_mask, diff); 1161 1162 /* If we don't have CPUs on the preferred node, use other NUMA nodes */ 1163 if (cpumask_empty(available_mask)) { 1164 cpumask_andnot(available_mask, hw_thread_mask, &set->used); 1165 /* Excluding preferred NUMA cores */ 1166 cpumask_andnot(available_mask, available_mask, node_mask); 1167 hfi1_cdbg(PROC, 1168 "Preferred NUMA node cores are taken, cores available in other NUMA nodes: %*pbl", 1169 cpumask_pr_args(available_mask)); 1170 1171 /* 1172 * At first, we don't want to place processes on the same 1173 * CPUs as interrupt handlers. 1174 */ 1175 cpumask_andnot(diff, available_mask, intrs_mask); 1176 if (!cpumask_empty(diff)) 1177 cpumask_copy(available_mask, diff); 1178 } 1179 hfi1_cdbg(PROC, "Possible CPUs for process: %*pbl", 1180 cpumask_pr_args(available_mask)); 1181 1182 cpu = cpumask_first(available_mask); 1183 if (cpu >= nr_cpu_ids) /* empty */ 1184 cpu = -1; 1185 else 1186 cpumask_set_cpu(cpu, &set->used); 1187 1188 mutex_unlock(&affinity->lock); 1189 hfi1_cdbg(PROC, "Process assigned to CPU %d", cpu); 1190 1191 free_cpumask_var(intrs_mask); 1192 free_available_mask: 1193 free_cpumask_var(available_mask); 1194 free_hw_thread_mask: 1195 free_cpumask_var(hw_thread_mask); 1196 free_diff: 1197 free_cpumask_var(diff); 1198 done: 1199 return cpu; 1200 } 1201 1202 void hfi1_put_proc_affinity(int cpu) 1203 { 1204 struct hfi1_affinity_node_list *affinity = &node_affinity; 1205 struct cpu_mask_set *set = &affinity->proc; 1206 1207 if (cpu < 0) 1208 return; 1209 1210 mutex_lock(&affinity->lock); 1211 cpu_mask_set_put(set, cpu); 1212 hfi1_cdbg(PROC, "Returning CPU %d for future process assignment", cpu); 1213 mutex_unlock(&affinity->lock); 1214 } 1215