1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Local APIC related interfaces to support IOAPIC, MSI, etc. 4 * 5 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo 6 * Moved from arch/x86/kernel/apic/io_apic.c. 7 * Jiang Liu <jiang.liu@linux.intel.com> 8 * Enable support of hierarchical irqdomains 9 */ 10 #include <linux/interrupt.h> 11 #include <linux/irq.h> 12 #include <linux/seq_file.h> 13 #include <linux/init.h> 14 #include <linux/compiler.h> 15 #include <linux/slab.h> 16 #include <asm/irqdomain.h> 17 #include <asm/hw_irq.h> 18 #include <asm/traps.h> 19 #include <asm/apic.h> 20 #include <asm/i8259.h> 21 #include <asm/desc.h> 22 #include <asm/irq_remapping.h> 23 24 #include <asm/trace/irq_vectors.h> 25 26 struct apic_chip_data { 27 struct irq_cfg hw_irq_cfg; 28 unsigned int vector; 29 unsigned int prev_vector; 30 unsigned int cpu; 31 unsigned int prev_cpu; 32 unsigned int irq; 33 struct hlist_node clist; 34 unsigned int move_in_progress : 1, 35 is_managed : 1, 36 can_reserve : 1, 37 has_reserved : 1; 38 }; 39 40 struct irq_domain *x86_vector_domain; 41 EXPORT_SYMBOL_GPL(x86_vector_domain); 42 static DEFINE_RAW_SPINLOCK(vector_lock); 43 static cpumask_var_t vector_searchmask; 44 static struct irq_chip lapic_controller; 45 static struct irq_matrix *vector_matrix; 46 #ifdef CONFIG_SMP 47 static DEFINE_PER_CPU(struct hlist_head, cleanup_list); 48 #endif 49 50 void lock_vector_lock(void) 51 { 52 /* Used to the online set of cpus does not change 53 * during assign_irq_vector. 54 */ 55 raw_spin_lock(&vector_lock); 56 } 57 58 void unlock_vector_lock(void) 59 { 60 raw_spin_unlock(&vector_lock); 61 } 62 63 void init_irq_alloc_info(struct irq_alloc_info *info, 64 const struct cpumask *mask) 65 { 66 memset(info, 0, sizeof(*info)); 67 info->mask = mask; 68 } 69 70 void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src) 71 { 72 if (src) 73 *dst = *src; 74 else 75 memset(dst, 0, sizeof(*dst)); 76 } 77 78 static struct apic_chip_data *apic_chip_data(struct irq_data *irqd) 79 { 80 if (!irqd) 81 return NULL; 82 83 while (irqd->parent_data) 84 irqd = irqd->parent_data; 85 86 return irqd->chip_data; 87 } 88 89 struct irq_cfg *irqd_cfg(struct irq_data *irqd) 90 { 91 struct apic_chip_data *apicd = apic_chip_data(irqd); 92 93 return apicd ? &apicd->hw_irq_cfg : NULL; 94 } 95 EXPORT_SYMBOL_GPL(irqd_cfg); 96 97 struct irq_cfg *irq_cfg(unsigned int irq) 98 { 99 return irqd_cfg(irq_get_irq_data(irq)); 100 } 101 102 static struct apic_chip_data *alloc_apic_chip_data(int node) 103 { 104 struct apic_chip_data *apicd; 105 106 apicd = kzalloc_node(sizeof(*apicd), GFP_KERNEL, node); 107 if (apicd) 108 INIT_HLIST_NODE(&apicd->clist); 109 return apicd; 110 } 111 112 static void free_apic_chip_data(struct apic_chip_data *apicd) 113 { 114 kfree(apicd); 115 } 116 117 static void apic_update_irq_cfg(struct irq_data *irqd, unsigned int vector, 118 unsigned int cpu) 119 { 120 struct apic_chip_data *apicd = apic_chip_data(irqd); 121 122 lockdep_assert_held(&vector_lock); 123 124 apicd->hw_irq_cfg.vector = vector; 125 apicd->hw_irq_cfg.dest_apicid = apic->calc_dest_apicid(cpu); 126 irq_data_update_effective_affinity(irqd, cpumask_of(cpu)); 127 trace_vector_config(irqd->irq, vector, cpu, 128 apicd->hw_irq_cfg.dest_apicid); 129 } 130 131 static void apic_update_vector(struct irq_data *irqd, unsigned int newvec, 132 unsigned int newcpu) 133 { 134 struct apic_chip_data *apicd = apic_chip_data(irqd); 135 struct irq_desc *desc = irq_data_to_desc(irqd); 136 bool managed = irqd_affinity_is_managed(irqd); 137 138 lockdep_assert_held(&vector_lock); 139 140 trace_vector_update(irqd->irq, newvec, newcpu, apicd->vector, 141 apicd->cpu); 142 143 /* 144 * If there is no vector associated or if the associated vector is 145 * the shutdown vector, which is associated to make PCI/MSI 146 * shutdown mode work, then there is nothing to release. Clear out 147 * prev_vector for this and the offlined target case. 148 */ 149 apicd->prev_vector = 0; 150 if (!apicd->vector || apicd->vector == MANAGED_IRQ_SHUTDOWN_VECTOR) 151 goto setnew; 152 /* 153 * If the target CPU of the previous vector is online, then mark 154 * the vector as move in progress and store it for cleanup when the 155 * first interrupt on the new vector arrives. If the target CPU is 156 * offline then the regular release mechanism via the cleanup 157 * vector is not possible and the vector can be immediately freed 158 * in the underlying matrix allocator. 159 */ 160 if (cpu_online(apicd->cpu)) { 161 apicd->move_in_progress = true; 162 apicd->prev_vector = apicd->vector; 163 apicd->prev_cpu = apicd->cpu; 164 } else { 165 irq_matrix_free(vector_matrix, apicd->cpu, apicd->vector, 166 managed); 167 } 168 169 setnew: 170 apicd->vector = newvec; 171 apicd->cpu = newcpu; 172 BUG_ON(!IS_ERR_OR_NULL(per_cpu(vector_irq, newcpu)[newvec])); 173 per_cpu(vector_irq, newcpu)[newvec] = desc; 174 } 175 176 static void vector_assign_managed_shutdown(struct irq_data *irqd) 177 { 178 unsigned int cpu = cpumask_first(cpu_online_mask); 179 180 apic_update_irq_cfg(irqd, MANAGED_IRQ_SHUTDOWN_VECTOR, cpu); 181 } 182 183 static int reserve_managed_vector(struct irq_data *irqd) 184 { 185 const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd); 186 struct apic_chip_data *apicd = apic_chip_data(irqd); 187 unsigned long flags; 188 int ret; 189 190 raw_spin_lock_irqsave(&vector_lock, flags); 191 apicd->is_managed = true; 192 ret = irq_matrix_reserve_managed(vector_matrix, affmsk); 193 raw_spin_unlock_irqrestore(&vector_lock, flags); 194 trace_vector_reserve_managed(irqd->irq, ret); 195 return ret; 196 } 197 198 static void reserve_irq_vector_locked(struct irq_data *irqd) 199 { 200 struct apic_chip_data *apicd = apic_chip_data(irqd); 201 202 irq_matrix_reserve(vector_matrix); 203 apicd->can_reserve = true; 204 apicd->has_reserved = true; 205 irqd_set_can_reserve(irqd); 206 trace_vector_reserve(irqd->irq, 0); 207 vector_assign_managed_shutdown(irqd); 208 } 209 210 static int reserve_irq_vector(struct irq_data *irqd) 211 { 212 unsigned long flags; 213 214 raw_spin_lock_irqsave(&vector_lock, flags); 215 reserve_irq_vector_locked(irqd); 216 raw_spin_unlock_irqrestore(&vector_lock, flags); 217 return 0; 218 } 219 220 static int 221 assign_vector_locked(struct irq_data *irqd, const struct cpumask *dest) 222 { 223 struct apic_chip_data *apicd = apic_chip_data(irqd); 224 bool resvd = apicd->has_reserved; 225 unsigned int cpu = apicd->cpu; 226 int vector = apicd->vector; 227 228 lockdep_assert_held(&vector_lock); 229 230 /* 231 * If the current target CPU is online and in the new requested 232 * affinity mask, there is no point in moving the interrupt from 233 * one CPU to another. 234 */ 235 if (vector && cpu_online(cpu) && cpumask_test_cpu(cpu, dest)) 236 return 0; 237 238 /* 239 * Careful here. @apicd might either have move_in_progress set or 240 * be enqueued for cleanup. Assigning a new vector would either 241 * leave a stale vector on some CPU around or in case of a pending 242 * cleanup corrupt the hlist. 243 */ 244 if (apicd->move_in_progress || !hlist_unhashed(&apicd->clist)) 245 return -EBUSY; 246 247 vector = irq_matrix_alloc(vector_matrix, dest, resvd, &cpu); 248 trace_vector_alloc(irqd->irq, vector, resvd, vector); 249 if (vector < 0) 250 return vector; 251 apic_update_vector(irqd, vector, cpu); 252 apic_update_irq_cfg(irqd, vector, cpu); 253 254 return 0; 255 } 256 257 static int assign_irq_vector(struct irq_data *irqd, const struct cpumask *dest) 258 { 259 unsigned long flags; 260 int ret; 261 262 raw_spin_lock_irqsave(&vector_lock, flags); 263 cpumask_and(vector_searchmask, dest, cpu_online_mask); 264 ret = assign_vector_locked(irqd, vector_searchmask); 265 raw_spin_unlock_irqrestore(&vector_lock, flags); 266 return ret; 267 } 268 269 static int assign_irq_vector_any_locked(struct irq_data *irqd) 270 { 271 /* Get the affinity mask - either irq_default_affinity or (user) set */ 272 const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd); 273 int node = irq_data_get_node(irqd); 274 275 if (node == NUMA_NO_NODE) 276 goto all; 277 /* Try the intersection of @affmsk and node mask */ 278 cpumask_and(vector_searchmask, cpumask_of_node(node), affmsk); 279 if (!assign_vector_locked(irqd, vector_searchmask)) 280 return 0; 281 /* Try the node mask */ 282 if (!assign_vector_locked(irqd, cpumask_of_node(node))) 283 return 0; 284 all: 285 /* Try the full affinity mask */ 286 cpumask_and(vector_searchmask, affmsk, cpu_online_mask); 287 if (!assign_vector_locked(irqd, vector_searchmask)) 288 return 0; 289 /* Try the full online mask */ 290 return assign_vector_locked(irqd, cpu_online_mask); 291 } 292 293 static int 294 assign_irq_vector_policy(struct irq_data *irqd, struct irq_alloc_info *info) 295 { 296 if (irqd_affinity_is_managed(irqd)) 297 return reserve_managed_vector(irqd); 298 if (info->mask) 299 return assign_irq_vector(irqd, info->mask); 300 /* 301 * Make only a global reservation with no guarantee. A real vector 302 * is associated at activation time. 303 */ 304 return reserve_irq_vector(irqd); 305 } 306 307 static int 308 assign_managed_vector(struct irq_data *irqd, const struct cpumask *dest) 309 { 310 const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd); 311 struct apic_chip_data *apicd = apic_chip_data(irqd); 312 int vector, cpu; 313 314 cpumask_and(vector_searchmask, dest, affmsk); 315 316 /* set_affinity might call here for nothing */ 317 if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask)) 318 return 0; 319 vector = irq_matrix_alloc_managed(vector_matrix, vector_searchmask, 320 &cpu); 321 trace_vector_alloc_managed(irqd->irq, vector, vector); 322 if (vector < 0) 323 return vector; 324 apic_update_vector(irqd, vector, cpu); 325 apic_update_irq_cfg(irqd, vector, cpu); 326 return 0; 327 } 328 329 static void clear_irq_vector(struct irq_data *irqd) 330 { 331 struct apic_chip_data *apicd = apic_chip_data(irqd); 332 bool managed = irqd_affinity_is_managed(irqd); 333 unsigned int vector = apicd->vector; 334 335 lockdep_assert_held(&vector_lock); 336 337 if (!vector) 338 return; 339 340 trace_vector_clear(irqd->irq, vector, apicd->cpu, apicd->prev_vector, 341 apicd->prev_cpu); 342 343 per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_SHUTDOWN; 344 irq_matrix_free(vector_matrix, apicd->cpu, vector, managed); 345 apicd->vector = 0; 346 347 /* Clean up move in progress */ 348 vector = apicd->prev_vector; 349 if (!vector) 350 return; 351 352 per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_SHUTDOWN; 353 irq_matrix_free(vector_matrix, apicd->prev_cpu, vector, managed); 354 apicd->prev_vector = 0; 355 apicd->move_in_progress = 0; 356 hlist_del_init(&apicd->clist); 357 } 358 359 static void x86_vector_deactivate(struct irq_domain *dom, struct irq_data *irqd) 360 { 361 struct apic_chip_data *apicd = apic_chip_data(irqd); 362 unsigned long flags; 363 364 trace_vector_deactivate(irqd->irq, apicd->is_managed, 365 apicd->can_reserve, false); 366 367 /* Regular fixed assigned interrupt */ 368 if (!apicd->is_managed && !apicd->can_reserve) 369 return; 370 /* If the interrupt has a global reservation, nothing to do */ 371 if (apicd->has_reserved) 372 return; 373 374 raw_spin_lock_irqsave(&vector_lock, flags); 375 clear_irq_vector(irqd); 376 if (apicd->can_reserve) 377 reserve_irq_vector_locked(irqd); 378 else 379 vector_assign_managed_shutdown(irqd); 380 raw_spin_unlock_irqrestore(&vector_lock, flags); 381 } 382 383 static int activate_reserved(struct irq_data *irqd) 384 { 385 struct apic_chip_data *apicd = apic_chip_data(irqd); 386 int ret; 387 388 ret = assign_irq_vector_any_locked(irqd); 389 if (!ret) { 390 apicd->has_reserved = false; 391 /* 392 * Core might have disabled reservation mode after 393 * allocating the irq descriptor. Ideally this should 394 * happen before allocation time, but that would require 395 * completely convoluted ways of transporting that 396 * information. 397 */ 398 if (!irqd_can_reserve(irqd)) 399 apicd->can_reserve = false; 400 } 401 402 /* 403 * Check to ensure that the effective affinity mask is a subset 404 * the user supplied affinity mask, and warn the user if it is not 405 */ 406 if (!cpumask_subset(irq_data_get_effective_affinity_mask(irqd), 407 irq_data_get_affinity_mask(irqd))) { 408 pr_warn("irq %u: Affinity broken due to vector space exhaustion.\n", 409 irqd->irq); 410 } 411 412 return ret; 413 } 414 415 static int activate_managed(struct irq_data *irqd) 416 { 417 const struct cpumask *dest = irq_data_get_affinity_mask(irqd); 418 int ret; 419 420 cpumask_and(vector_searchmask, dest, cpu_online_mask); 421 if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) { 422 /* Something in the core code broke! Survive gracefully */ 423 pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq); 424 return -EINVAL; 425 } 426 427 ret = assign_managed_vector(irqd, vector_searchmask); 428 /* 429 * This should not happen. The vector reservation got buggered. Handle 430 * it gracefully. 431 */ 432 if (WARN_ON_ONCE(ret < 0)) { 433 pr_err("Managed startup irq %u, no vector available\n", 434 irqd->irq); 435 } 436 return ret; 437 } 438 439 static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd, 440 bool reserve) 441 { 442 struct apic_chip_data *apicd = apic_chip_data(irqd); 443 unsigned long flags; 444 int ret = 0; 445 446 trace_vector_activate(irqd->irq, apicd->is_managed, 447 apicd->can_reserve, reserve); 448 449 /* Nothing to do for fixed assigned vectors */ 450 if (!apicd->can_reserve && !apicd->is_managed) 451 return 0; 452 453 raw_spin_lock_irqsave(&vector_lock, flags); 454 if (reserve || irqd_is_managed_and_shutdown(irqd)) 455 vector_assign_managed_shutdown(irqd); 456 else if (apicd->is_managed) 457 ret = activate_managed(irqd); 458 else if (apicd->has_reserved) 459 ret = activate_reserved(irqd); 460 raw_spin_unlock_irqrestore(&vector_lock, flags); 461 return ret; 462 } 463 464 static void vector_free_reserved_and_managed(struct irq_data *irqd) 465 { 466 const struct cpumask *dest = irq_data_get_affinity_mask(irqd); 467 struct apic_chip_data *apicd = apic_chip_data(irqd); 468 469 trace_vector_teardown(irqd->irq, apicd->is_managed, 470 apicd->has_reserved); 471 472 if (apicd->has_reserved) 473 irq_matrix_remove_reserved(vector_matrix); 474 if (apicd->is_managed) 475 irq_matrix_remove_managed(vector_matrix, dest); 476 } 477 478 static void x86_vector_free_irqs(struct irq_domain *domain, 479 unsigned int virq, unsigned int nr_irqs) 480 { 481 struct apic_chip_data *apicd; 482 struct irq_data *irqd; 483 unsigned long flags; 484 int i; 485 486 for (i = 0; i < nr_irqs; i++) { 487 irqd = irq_domain_get_irq_data(x86_vector_domain, virq + i); 488 if (irqd && irqd->chip_data) { 489 raw_spin_lock_irqsave(&vector_lock, flags); 490 clear_irq_vector(irqd); 491 vector_free_reserved_and_managed(irqd); 492 apicd = irqd->chip_data; 493 irq_domain_reset_irq_data(irqd); 494 raw_spin_unlock_irqrestore(&vector_lock, flags); 495 free_apic_chip_data(apicd); 496 } 497 } 498 } 499 500 static bool vector_configure_legacy(unsigned int virq, struct irq_data *irqd, 501 struct apic_chip_data *apicd) 502 { 503 unsigned long flags; 504 bool realloc = false; 505 506 apicd->vector = ISA_IRQ_VECTOR(virq); 507 apicd->cpu = 0; 508 509 raw_spin_lock_irqsave(&vector_lock, flags); 510 /* 511 * If the interrupt is activated, then it must stay at this vector 512 * position. That's usually the timer interrupt (0). 513 */ 514 if (irqd_is_activated(irqd)) { 515 trace_vector_setup(virq, true, 0); 516 apic_update_irq_cfg(irqd, apicd->vector, apicd->cpu); 517 } else { 518 /* Release the vector */ 519 apicd->can_reserve = true; 520 irqd_set_can_reserve(irqd); 521 clear_irq_vector(irqd); 522 realloc = true; 523 } 524 raw_spin_unlock_irqrestore(&vector_lock, flags); 525 return realloc; 526 } 527 528 static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, 529 unsigned int nr_irqs, void *arg) 530 { 531 struct irq_alloc_info *info = arg; 532 struct apic_chip_data *apicd; 533 struct irq_data *irqd; 534 int i, err, node; 535 536 if (disable_apic) 537 return -ENXIO; 538 539 /* Currently vector allocator can't guarantee contiguous allocations */ 540 if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1) 541 return -ENOSYS; 542 543 for (i = 0; i < nr_irqs; i++) { 544 irqd = irq_domain_get_irq_data(domain, virq + i); 545 BUG_ON(!irqd); 546 node = irq_data_get_node(irqd); 547 WARN_ON_ONCE(irqd->chip_data); 548 apicd = alloc_apic_chip_data(node); 549 if (!apicd) { 550 err = -ENOMEM; 551 goto error; 552 } 553 554 apicd->irq = virq + i; 555 irqd->chip = &lapic_controller; 556 irqd->chip_data = apicd; 557 irqd->hwirq = virq + i; 558 irqd_set_single_target(irqd); 559 /* 560 * Prevent that any of these interrupts is invoked in 561 * non interrupt context via e.g. generic_handle_irq() 562 * as that can corrupt the affinity move state. 563 */ 564 irqd_set_handle_enforce_irqctx(irqd); 565 /* 566 * Legacy vectors are already assigned when the IOAPIC 567 * takes them over. They stay on the same vector. This is 568 * required for check_timer() to work correctly as it might 569 * switch back to legacy mode. Only update the hardware 570 * config. 571 */ 572 if (info->flags & X86_IRQ_ALLOC_LEGACY) { 573 if (!vector_configure_legacy(virq + i, irqd, apicd)) 574 continue; 575 } 576 577 err = assign_irq_vector_policy(irqd, info); 578 trace_vector_setup(virq + i, false, err); 579 if (err) { 580 irqd->chip_data = NULL; 581 free_apic_chip_data(apicd); 582 goto error; 583 } 584 } 585 586 return 0; 587 588 error: 589 x86_vector_free_irqs(domain, virq, i); 590 return err; 591 } 592 593 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS 594 static void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d, 595 struct irq_data *irqd, int ind) 596 { 597 struct apic_chip_data apicd; 598 unsigned long flags; 599 int irq; 600 601 if (!irqd) { 602 irq_matrix_debug_show(m, vector_matrix, ind); 603 return; 604 } 605 606 irq = irqd->irq; 607 if (irq < nr_legacy_irqs() && !test_bit(irq, &io_apic_irqs)) { 608 seq_printf(m, "%*sVector: %5d\n", ind, "", ISA_IRQ_VECTOR(irq)); 609 seq_printf(m, "%*sTarget: Legacy PIC all CPUs\n", ind, ""); 610 return; 611 } 612 613 if (!irqd->chip_data) { 614 seq_printf(m, "%*sVector: Not assigned\n", ind, ""); 615 return; 616 } 617 618 raw_spin_lock_irqsave(&vector_lock, flags); 619 memcpy(&apicd, irqd->chip_data, sizeof(apicd)); 620 raw_spin_unlock_irqrestore(&vector_lock, flags); 621 622 seq_printf(m, "%*sVector: %5u\n", ind, "", apicd.vector); 623 seq_printf(m, "%*sTarget: %5u\n", ind, "", apicd.cpu); 624 if (apicd.prev_vector) { 625 seq_printf(m, "%*sPrevious vector: %5u\n", ind, "", apicd.prev_vector); 626 seq_printf(m, "%*sPrevious target: %5u\n", ind, "", apicd.prev_cpu); 627 } 628 seq_printf(m, "%*smove_in_progress: %u\n", ind, "", apicd.move_in_progress ? 1 : 0); 629 seq_printf(m, "%*sis_managed: %u\n", ind, "", apicd.is_managed ? 1 : 0); 630 seq_printf(m, "%*scan_reserve: %u\n", ind, "", apicd.can_reserve ? 1 : 0); 631 seq_printf(m, "%*shas_reserved: %u\n", ind, "", apicd.has_reserved ? 1 : 0); 632 seq_printf(m, "%*scleanup_pending: %u\n", ind, "", !hlist_unhashed(&apicd.clist)); 633 } 634 #endif 635 636 static const struct irq_domain_ops x86_vector_domain_ops = { 637 .alloc = x86_vector_alloc_irqs, 638 .free = x86_vector_free_irqs, 639 .activate = x86_vector_activate, 640 .deactivate = x86_vector_deactivate, 641 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS 642 .debug_show = x86_vector_debug_show, 643 #endif 644 }; 645 646 int __init arch_probe_nr_irqs(void) 647 { 648 int nr; 649 650 if (nr_irqs > (NR_VECTORS * nr_cpu_ids)) 651 nr_irqs = NR_VECTORS * nr_cpu_ids; 652 653 nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids; 654 #if defined(CONFIG_PCI_MSI) 655 /* 656 * for MSI and HT dyn irq 657 */ 658 if (gsi_top <= NR_IRQS_LEGACY) 659 nr += 8 * nr_cpu_ids; 660 else 661 nr += gsi_top * 16; 662 #endif 663 if (nr < nr_irqs) 664 nr_irqs = nr; 665 666 /* 667 * We don't know if PIC is present at this point so we need to do 668 * probe() to get the right number of legacy IRQs. 669 */ 670 return legacy_pic->probe(); 671 } 672 673 void lapic_assign_legacy_vector(unsigned int irq, bool replace) 674 { 675 /* 676 * Use assign system here so it wont get accounted as allocated 677 * and moveable in the cpu hotplug check and it prevents managed 678 * irq reservation from touching it. 679 */ 680 irq_matrix_assign_system(vector_matrix, ISA_IRQ_VECTOR(irq), replace); 681 } 682 683 void __init lapic_assign_system_vectors(void) 684 { 685 unsigned int i, vector = 0; 686 687 for_each_set_bit_from(vector, system_vectors, NR_VECTORS) 688 irq_matrix_assign_system(vector_matrix, vector, false); 689 690 if (nr_legacy_irqs() > 1) 691 lapic_assign_legacy_vector(PIC_CASCADE_IR, false); 692 693 /* System vectors are reserved, online it */ 694 irq_matrix_online(vector_matrix); 695 696 /* Mark the preallocated legacy interrupts */ 697 for (i = 0; i < nr_legacy_irqs(); i++) { 698 if (i != PIC_CASCADE_IR) 699 irq_matrix_assign(vector_matrix, ISA_IRQ_VECTOR(i)); 700 } 701 } 702 703 int __init arch_early_irq_init(void) 704 { 705 struct fwnode_handle *fn; 706 707 fn = irq_domain_alloc_named_fwnode("VECTOR"); 708 BUG_ON(!fn); 709 x86_vector_domain = irq_domain_create_tree(fn, &x86_vector_domain_ops, 710 NULL); 711 BUG_ON(x86_vector_domain == NULL); 712 irq_domain_free_fwnode(fn); 713 irq_set_default_host(x86_vector_domain); 714 715 arch_init_msi_domain(x86_vector_domain); 716 717 BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL)); 718 719 /* 720 * Allocate the vector matrix allocator data structure and limit the 721 * search area. 722 */ 723 vector_matrix = irq_alloc_matrix(NR_VECTORS, FIRST_EXTERNAL_VECTOR, 724 FIRST_SYSTEM_VECTOR); 725 BUG_ON(!vector_matrix); 726 727 return arch_early_ioapic_init(); 728 } 729 730 #ifdef CONFIG_SMP 731 732 static struct irq_desc *__setup_vector_irq(int vector) 733 { 734 int isairq = vector - ISA_IRQ_VECTOR(0); 735 736 /* Check whether the irq is in the legacy space */ 737 if (isairq < 0 || isairq >= nr_legacy_irqs()) 738 return VECTOR_UNUSED; 739 /* Check whether the irq is handled by the IOAPIC */ 740 if (test_bit(isairq, &io_apic_irqs)) 741 return VECTOR_UNUSED; 742 return irq_to_desc(isairq); 743 } 744 745 /* Online the local APIC infrastructure and initialize the vectors */ 746 void lapic_online(void) 747 { 748 unsigned int vector; 749 750 lockdep_assert_held(&vector_lock); 751 752 /* Online the vector matrix array for this CPU */ 753 irq_matrix_online(vector_matrix); 754 755 /* 756 * The interrupt affinity logic never targets interrupts to offline 757 * CPUs. The exception are the legacy PIC interrupts. In general 758 * they are only targeted to CPU0, but depending on the platform 759 * they can be distributed to any online CPU in hardware. The 760 * kernel has no influence on that. So all active legacy vectors 761 * must be installed on all CPUs. All non legacy interrupts can be 762 * cleared. 763 */ 764 for (vector = 0; vector < NR_VECTORS; vector++) 765 this_cpu_write(vector_irq[vector], __setup_vector_irq(vector)); 766 } 767 768 void lapic_offline(void) 769 { 770 lock_vector_lock(); 771 irq_matrix_offline(vector_matrix); 772 unlock_vector_lock(); 773 } 774 775 static int apic_set_affinity(struct irq_data *irqd, 776 const struct cpumask *dest, bool force) 777 { 778 struct apic_chip_data *apicd = apic_chip_data(irqd); 779 int err; 780 781 /* 782 * Core code can call here for inactive interrupts. For inactive 783 * interrupts which use managed or reservation mode there is no 784 * point in going through the vector assignment right now as the 785 * activation will assign a vector which fits the destination 786 * cpumask. Let the core code store the destination mask and be 787 * done with it. 788 */ 789 if (!irqd_is_activated(irqd) && 790 (apicd->is_managed || apicd->can_reserve)) 791 return IRQ_SET_MASK_OK; 792 793 raw_spin_lock(&vector_lock); 794 cpumask_and(vector_searchmask, dest, cpu_online_mask); 795 if (irqd_affinity_is_managed(irqd)) 796 err = assign_managed_vector(irqd, vector_searchmask); 797 else 798 err = assign_vector_locked(irqd, vector_searchmask); 799 raw_spin_unlock(&vector_lock); 800 return err ? err : IRQ_SET_MASK_OK; 801 } 802 803 #else 804 # define apic_set_affinity NULL 805 #endif 806 807 static int apic_retrigger_irq(struct irq_data *irqd) 808 { 809 struct apic_chip_data *apicd = apic_chip_data(irqd); 810 unsigned long flags; 811 812 raw_spin_lock_irqsave(&vector_lock, flags); 813 apic->send_IPI(apicd->cpu, apicd->vector); 814 raw_spin_unlock_irqrestore(&vector_lock, flags); 815 816 return 1; 817 } 818 819 void apic_ack_irq(struct irq_data *irqd) 820 { 821 irq_move_irq(irqd); 822 ack_APIC_irq(); 823 } 824 825 void apic_ack_edge(struct irq_data *irqd) 826 { 827 irq_complete_move(irqd_cfg(irqd)); 828 apic_ack_irq(irqd); 829 } 830 831 static struct irq_chip lapic_controller = { 832 .name = "APIC", 833 .irq_ack = apic_ack_edge, 834 .irq_set_affinity = apic_set_affinity, 835 .irq_retrigger = apic_retrigger_irq, 836 }; 837 838 #ifdef CONFIG_SMP 839 840 static void free_moved_vector(struct apic_chip_data *apicd) 841 { 842 unsigned int vector = apicd->prev_vector; 843 unsigned int cpu = apicd->prev_cpu; 844 bool managed = apicd->is_managed; 845 846 /* 847 * Managed interrupts are usually not migrated away 848 * from an online CPU, but CPU isolation 'managed_irq' 849 * can make that happen. 850 * 1) Activation does not take the isolation into account 851 * to keep the code simple 852 * 2) Migration away from an isolated CPU can happen when 853 * a non-isolated CPU which is in the calculated 854 * affinity mask comes online. 855 */ 856 trace_vector_free_moved(apicd->irq, cpu, vector, managed); 857 irq_matrix_free(vector_matrix, cpu, vector, managed); 858 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED; 859 hlist_del_init(&apicd->clist); 860 apicd->prev_vector = 0; 861 apicd->move_in_progress = 0; 862 } 863 864 asmlinkage __visible void __irq_entry smp_irq_move_cleanup_interrupt(void) 865 { 866 struct hlist_head *clhead = this_cpu_ptr(&cleanup_list); 867 struct apic_chip_data *apicd; 868 struct hlist_node *tmp; 869 870 entering_ack_irq(); 871 /* Prevent vectors vanishing under us */ 872 raw_spin_lock(&vector_lock); 873 874 hlist_for_each_entry_safe(apicd, tmp, clhead, clist) { 875 unsigned int irr, vector = apicd->prev_vector; 876 877 /* 878 * Paranoia: Check if the vector that needs to be cleaned 879 * up is registered at the APICs IRR. If so, then this is 880 * not the best time to clean it up. Clean it up in the 881 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR 882 * to this CPU. IRQ_MOVE_CLEANUP_VECTOR is the lowest 883 * priority external vector, so on return from this 884 * interrupt the device interrupt will happen first. 885 */ 886 irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); 887 if (irr & (1U << (vector % 32))) { 888 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR); 889 continue; 890 } 891 free_moved_vector(apicd); 892 } 893 894 raw_spin_unlock(&vector_lock); 895 exiting_irq(); 896 } 897 898 static void __send_cleanup_vector(struct apic_chip_data *apicd) 899 { 900 unsigned int cpu; 901 902 raw_spin_lock(&vector_lock); 903 apicd->move_in_progress = 0; 904 cpu = apicd->prev_cpu; 905 if (cpu_online(cpu)) { 906 hlist_add_head(&apicd->clist, per_cpu_ptr(&cleanup_list, cpu)); 907 apic->send_IPI(cpu, IRQ_MOVE_CLEANUP_VECTOR); 908 } else { 909 apicd->prev_vector = 0; 910 } 911 raw_spin_unlock(&vector_lock); 912 } 913 914 void send_cleanup_vector(struct irq_cfg *cfg) 915 { 916 struct apic_chip_data *apicd; 917 918 apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg); 919 if (apicd->move_in_progress) 920 __send_cleanup_vector(apicd); 921 } 922 923 static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector) 924 { 925 struct apic_chip_data *apicd; 926 927 apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg); 928 if (likely(!apicd->move_in_progress)) 929 return; 930 931 if (vector == apicd->vector && apicd->cpu == smp_processor_id()) 932 __send_cleanup_vector(apicd); 933 } 934 935 void irq_complete_move(struct irq_cfg *cfg) 936 { 937 __irq_complete_move(cfg, ~get_irq_regs()->orig_ax); 938 } 939 940 /* 941 * Called from fixup_irqs() with @desc->lock held and interrupts disabled. 942 */ 943 void irq_force_complete_move(struct irq_desc *desc) 944 { 945 struct apic_chip_data *apicd; 946 struct irq_data *irqd; 947 unsigned int vector; 948 949 /* 950 * The function is called for all descriptors regardless of which 951 * irqdomain they belong to. For example if an IRQ is provided by 952 * an irq_chip as part of a GPIO driver, the chip data for that 953 * descriptor is specific to the irq_chip in question. 954 * 955 * Check first that the chip_data is what we expect 956 * (apic_chip_data) before touching it any further. 957 */ 958 irqd = irq_domain_get_irq_data(x86_vector_domain, 959 irq_desc_get_irq(desc)); 960 if (!irqd) 961 return; 962 963 raw_spin_lock(&vector_lock); 964 apicd = apic_chip_data(irqd); 965 if (!apicd) 966 goto unlock; 967 968 /* 969 * If prev_vector is empty, no action required. 970 */ 971 vector = apicd->prev_vector; 972 if (!vector) 973 goto unlock; 974 975 /* 976 * This is tricky. If the cleanup of the old vector has not been 977 * done yet, then the following setaffinity call will fail with 978 * -EBUSY. This can leave the interrupt in a stale state. 979 * 980 * All CPUs are stuck in stop machine with interrupts disabled so 981 * calling __irq_complete_move() would be completely pointless. 982 * 983 * 1) The interrupt is in move_in_progress state. That means that we 984 * have not seen an interrupt since the io_apic was reprogrammed to 985 * the new vector. 986 * 987 * 2) The interrupt has fired on the new vector, but the cleanup IPIs 988 * have not been processed yet. 989 */ 990 if (apicd->move_in_progress) { 991 /* 992 * In theory there is a race: 993 * 994 * set_ioapic(new_vector) <-- Interrupt is raised before update 995 * is effective, i.e. it's raised on 996 * the old vector. 997 * 998 * So if the target cpu cannot handle that interrupt before 999 * the old vector is cleaned up, we get a spurious interrupt 1000 * and in the worst case the ioapic irq line becomes stale. 1001 * 1002 * But in case of cpu hotplug this should be a non issue 1003 * because if the affinity update happens right before all 1004 * cpus rendevouz in stop machine, there is no way that the 1005 * interrupt can be blocked on the target cpu because all cpus 1006 * loops first with interrupts enabled in stop machine, so the 1007 * old vector is not yet cleaned up when the interrupt fires. 1008 * 1009 * So the only way to run into this issue is if the delivery 1010 * of the interrupt on the apic/system bus would be delayed 1011 * beyond the point where the target cpu disables interrupts 1012 * in stop machine. I doubt that it can happen, but at least 1013 * there is a theroretical chance. Virtualization might be 1014 * able to expose this, but AFAICT the IOAPIC emulation is not 1015 * as stupid as the real hardware. 1016 * 1017 * Anyway, there is nothing we can do about that at this point 1018 * w/o refactoring the whole fixup_irq() business completely. 1019 * We print at least the irq number and the old vector number, 1020 * so we have the necessary information when a problem in that 1021 * area arises. 1022 */ 1023 pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n", 1024 irqd->irq, vector); 1025 } 1026 free_moved_vector(apicd); 1027 unlock: 1028 raw_spin_unlock(&vector_lock); 1029 } 1030 1031 #ifdef CONFIG_HOTPLUG_CPU 1032 /* 1033 * Note, this is not accurate accounting, but at least good enough to 1034 * prevent that the actual interrupt move will run out of vectors. 1035 */ 1036 int lapic_can_unplug_cpu(void) 1037 { 1038 unsigned int rsvd, avl, tomove, cpu = smp_processor_id(); 1039 int ret = 0; 1040 1041 raw_spin_lock(&vector_lock); 1042 tomove = irq_matrix_allocated(vector_matrix); 1043 avl = irq_matrix_available(vector_matrix, true); 1044 if (avl < tomove) { 1045 pr_warn("CPU %u has %u vectors, %u available. Cannot disable CPU\n", 1046 cpu, tomove, avl); 1047 ret = -ENOSPC; 1048 goto out; 1049 } 1050 rsvd = irq_matrix_reserved(vector_matrix); 1051 if (avl < rsvd) { 1052 pr_warn("Reserved vectors %u > available %u. IRQ request may fail\n", 1053 rsvd, avl); 1054 } 1055 out: 1056 raw_spin_unlock(&vector_lock); 1057 return ret; 1058 } 1059 #endif /* HOTPLUG_CPU */ 1060 #endif /* SMP */ 1061 1062 static void __init print_APIC_field(int base) 1063 { 1064 int i; 1065 1066 printk(KERN_DEBUG); 1067 1068 for (i = 0; i < 8; i++) 1069 pr_cont("%08x", apic_read(base + i*0x10)); 1070 1071 pr_cont("\n"); 1072 } 1073 1074 static void __init print_local_APIC(void *dummy) 1075 { 1076 unsigned int i, v, ver, maxlvt; 1077 u64 icr; 1078 1079 pr_debug("printing local APIC contents on CPU#%d/%d:\n", 1080 smp_processor_id(), hard_smp_processor_id()); 1081 v = apic_read(APIC_ID); 1082 pr_info("... APIC ID: %08x (%01x)\n", v, read_apic_id()); 1083 v = apic_read(APIC_LVR); 1084 pr_info("... APIC VERSION: %08x\n", v); 1085 ver = GET_APIC_VERSION(v); 1086 maxlvt = lapic_get_maxlvt(); 1087 1088 v = apic_read(APIC_TASKPRI); 1089 pr_debug("... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK); 1090 1091 /* !82489DX */ 1092 if (APIC_INTEGRATED(ver)) { 1093 if (!APIC_XAPIC(ver)) { 1094 v = apic_read(APIC_ARBPRI); 1095 pr_debug("... APIC ARBPRI: %08x (%02x)\n", 1096 v, v & APIC_ARBPRI_MASK); 1097 } 1098 v = apic_read(APIC_PROCPRI); 1099 pr_debug("... APIC PROCPRI: %08x\n", v); 1100 } 1101 1102 /* 1103 * Remote read supported only in the 82489DX and local APIC for 1104 * Pentium processors. 1105 */ 1106 if (!APIC_INTEGRATED(ver) || maxlvt == 3) { 1107 v = apic_read(APIC_RRR); 1108 pr_debug("... APIC RRR: %08x\n", v); 1109 } 1110 1111 v = apic_read(APIC_LDR); 1112 pr_debug("... APIC LDR: %08x\n", v); 1113 if (!x2apic_enabled()) { 1114 v = apic_read(APIC_DFR); 1115 pr_debug("... APIC DFR: %08x\n", v); 1116 } 1117 v = apic_read(APIC_SPIV); 1118 pr_debug("... APIC SPIV: %08x\n", v); 1119 1120 pr_debug("... APIC ISR field:\n"); 1121 print_APIC_field(APIC_ISR); 1122 pr_debug("... APIC TMR field:\n"); 1123 print_APIC_field(APIC_TMR); 1124 pr_debug("... APIC IRR field:\n"); 1125 print_APIC_field(APIC_IRR); 1126 1127 /* !82489DX */ 1128 if (APIC_INTEGRATED(ver)) { 1129 /* Due to the Pentium erratum 3AP. */ 1130 if (maxlvt > 3) 1131 apic_write(APIC_ESR, 0); 1132 1133 v = apic_read(APIC_ESR); 1134 pr_debug("... APIC ESR: %08x\n", v); 1135 } 1136 1137 icr = apic_icr_read(); 1138 pr_debug("... APIC ICR: %08x\n", (u32)icr); 1139 pr_debug("... APIC ICR2: %08x\n", (u32)(icr >> 32)); 1140 1141 v = apic_read(APIC_LVTT); 1142 pr_debug("... APIC LVTT: %08x\n", v); 1143 1144 if (maxlvt > 3) { 1145 /* PC is LVT#4. */ 1146 v = apic_read(APIC_LVTPC); 1147 pr_debug("... APIC LVTPC: %08x\n", v); 1148 } 1149 v = apic_read(APIC_LVT0); 1150 pr_debug("... APIC LVT0: %08x\n", v); 1151 v = apic_read(APIC_LVT1); 1152 pr_debug("... APIC LVT1: %08x\n", v); 1153 1154 if (maxlvt > 2) { 1155 /* ERR is LVT#3. */ 1156 v = apic_read(APIC_LVTERR); 1157 pr_debug("... APIC LVTERR: %08x\n", v); 1158 } 1159 1160 v = apic_read(APIC_TMICT); 1161 pr_debug("... APIC TMICT: %08x\n", v); 1162 v = apic_read(APIC_TMCCT); 1163 pr_debug("... APIC TMCCT: %08x\n", v); 1164 v = apic_read(APIC_TDCR); 1165 pr_debug("... APIC TDCR: %08x\n", v); 1166 1167 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) { 1168 v = apic_read(APIC_EFEAT); 1169 maxlvt = (v >> 16) & 0xff; 1170 pr_debug("... APIC EFEAT: %08x\n", v); 1171 v = apic_read(APIC_ECTRL); 1172 pr_debug("... APIC ECTRL: %08x\n", v); 1173 for (i = 0; i < maxlvt; i++) { 1174 v = apic_read(APIC_EILVTn(i)); 1175 pr_debug("... APIC EILVT%d: %08x\n", i, v); 1176 } 1177 } 1178 pr_cont("\n"); 1179 } 1180 1181 static void __init print_local_APICs(int maxcpu) 1182 { 1183 int cpu; 1184 1185 if (!maxcpu) 1186 return; 1187 1188 preempt_disable(); 1189 for_each_online_cpu(cpu) { 1190 if (cpu >= maxcpu) 1191 break; 1192 smp_call_function_single(cpu, print_local_APIC, NULL, 1); 1193 } 1194 preempt_enable(); 1195 } 1196 1197 static void __init print_PIC(void) 1198 { 1199 unsigned int v; 1200 unsigned long flags; 1201 1202 if (!nr_legacy_irqs()) 1203 return; 1204 1205 pr_debug("\nprinting PIC contents\n"); 1206 1207 raw_spin_lock_irqsave(&i8259A_lock, flags); 1208 1209 v = inb(0xa1) << 8 | inb(0x21); 1210 pr_debug("... PIC IMR: %04x\n", v); 1211 1212 v = inb(0xa0) << 8 | inb(0x20); 1213 pr_debug("... PIC IRR: %04x\n", v); 1214 1215 outb(0x0b, 0xa0); 1216 outb(0x0b, 0x20); 1217 v = inb(0xa0) << 8 | inb(0x20); 1218 outb(0x0a, 0xa0); 1219 outb(0x0a, 0x20); 1220 1221 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 1222 1223 pr_debug("... PIC ISR: %04x\n", v); 1224 1225 v = inb(0x4d1) << 8 | inb(0x4d0); 1226 pr_debug("... PIC ELCR: %04x\n", v); 1227 } 1228 1229 static int show_lapic __initdata = 1; 1230 static __init int setup_show_lapic(char *arg) 1231 { 1232 int num = -1; 1233 1234 if (strcmp(arg, "all") == 0) { 1235 show_lapic = CONFIG_NR_CPUS; 1236 } else { 1237 get_option(&arg, &num); 1238 if (num >= 0) 1239 show_lapic = num; 1240 } 1241 1242 return 1; 1243 } 1244 __setup("show_lapic=", setup_show_lapic); 1245 1246 static int __init print_ICs(void) 1247 { 1248 if (apic_verbosity == APIC_QUIET) 1249 return 0; 1250 1251 print_PIC(); 1252 1253 /* don't print out if apic is not there */ 1254 if (!boot_cpu_has(X86_FEATURE_APIC) && !apic_from_smp_config()) 1255 return 0; 1256 1257 print_local_APICs(show_lapic); 1258 print_IO_APICs(); 1259 1260 return 0; 1261 } 1262 1263 late_initcall(print_ICs); 1264