vector.c (e3beca48a45b5e0e6e6a4e0124276b8248dcc9bb) | vector.c (baedb87d1b53532f81b4bd0387f83b05d4f7eb9a) |
---|---|
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Local APIC related interfaces to support IOAPIC, MSI, etc. 4 * 5 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo 6 * Moved from arch/x86/kernel/apic/io_apic.c. 7 * Jiang Liu <jiang.liu@linux.intel.com> 8 * Enable support of hierarchical irqdomains --- 432 unchanged lines hidden (view full) --- 441{ 442 struct apic_chip_data *apicd = apic_chip_data(irqd); 443 unsigned long flags; 444 int ret = 0; 445 446 trace_vector_activate(irqd->irq, apicd->is_managed, 447 apicd->can_reserve, reserve); 448 | 1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Local APIC related interfaces to support IOAPIC, MSI, etc. 4 * 5 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo 6 * Moved from arch/x86/kernel/apic/io_apic.c. 7 * Jiang Liu <jiang.liu@linux.intel.com> 8 * Enable support of hierarchical irqdomains --- 432 unchanged lines hidden (view full) --- 441{ 442 struct apic_chip_data *apicd = apic_chip_data(irqd); 443 unsigned long flags; 444 int ret = 0; 445 446 trace_vector_activate(irqd->irq, apicd->is_managed, 447 apicd->can_reserve, reserve); 448 |
449 /* Nothing to do for fixed assigned vectors */ 450 if (!apicd->can_reserve && !apicd->is_managed) 451 return 0; 452 | |
453 raw_spin_lock_irqsave(&vector_lock, flags); | 449 raw_spin_lock_irqsave(&vector_lock, flags); |
454 if (reserve || irqd_is_managed_and_shutdown(irqd)) | 450 if (!apicd->can_reserve && !apicd->is_managed) 451 assign_irq_vector_any_locked(irqd); 452 else if (reserve || irqd_is_managed_and_shutdown(irqd)) |
455 vector_assign_managed_shutdown(irqd); 456 else if (apicd->is_managed) 457 ret = activate_managed(irqd); 458 else if (apicd->has_reserved) 459 ret = activate_reserved(irqd); 460 raw_spin_unlock_irqrestore(&vector_lock, flags); 461 return ret; 462} --- 306 unchanged lines hidden (view full) --- 769 lock_vector_lock(); 770 irq_matrix_offline(vector_matrix); 771 unlock_vector_lock(); 772} 773 774static int apic_set_affinity(struct irq_data *irqd, 775 const struct cpumask *dest, bool force) 776{ | 453 vector_assign_managed_shutdown(irqd); 454 else if (apicd->is_managed) 455 ret = activate_managed(irqd); 456 else if (apicd->has_reserved) 457 ret = activate_reserved(irqd); 458 raw_spin_unlock_irqrestore(&vector_lock, flags); 459 return ret; 460} --- 306 unchanged lines hidden (view full) --- 767 lock_vector_lock(); 768 irq_matrix_offline(vector_matrix); 769 unlock_vector_lock(); 770} 771 772static int apic_set_affinity(struct irq_data *irqd, 773 const struct cpumask *dest, bool force) 774{ |
777 struct apic_chip_data *apicd = apic_chip_data(irqd); | |
778 int err; 779 | 775 int err; 776 |
780 /* 781 * Core code can call here for inactive interrupts. For inactive 782 * interrupts which use managed or reservation mode there is no 783 * point in going through the vector assignment right now as the 784 * activation will assign a vector which fits the destination 785 * cpumask. Let the core code store the destination mask and be 786 * done with it. 787 */ 788 if (!irqd_is_activated(irqd) && 789 (apicd->is_managed || apicd->can_reserve)) 790 return IRQ_SET_MASK_OK; | 777 if (WARN_ON_ONCE(!irqd_is_activated(irqd))) 778 return -EIO; |
791 792 raw_spin_lock(&vector_lock); 793 cpumask_and(vector_searchmask, dest, cpu_online_mask); 794 if (irqd_affinity_is_managed(irqd)) 795 err = assign_managed_vector(irqd, vector_searchmask); 796 else 797 err = assign_vector_locked(irqd, vector_searchmask); 798 raw_spin_unlock(&vector_lock); --- 463 unchanged lines hidden --- | 779 780 raw_spin_lock(&vector_lock); 781 cpumask_and(vector_searchmask, dest, cpu_online_mask); 782 if (irqd_affinity_is_managed(irqd)) 783 err = assign_managed_vector(irqd, vector_searchmask); 784 else 785 err = assign_vector_locked(irqd, vector_searchmask); 786 raw_spin_unlock(&vector_lock); --- 463 unchanged lines hidden --- |