Lines Matching refs:apicd

102 	struct apic_chip_data *apicd = apic_chip_data(irqd);  in irqd_cfg()  local
104 return apicd ? &apicd->hw_irq_cfg : NULL; in irqd_cfg()
115 struct apic_chip_data *apicd; in alloc_apic_chip_data() local
117 apicd = kzalloc_node(sizeof(*apicd), GFP_KERNEL, node); in alloc_apic_chip_data()
118 if (apicd) in alloc_apic_chip_data()
119 INIT_HLIST_NODE(&apicd->clist); in alloc_apic_chip_data()
120 return apicd; in alloc_apic_chip_data()
123 static void free_apic_chip_data(struct apic_chip_data *apicd) in free_apic_chip_data() argument
125 kfree(apicd); in free_apic_chip_data()
131 struct apic_chip_data *apicd = apic_chip_data(irqd); in apic_update_irq_cfg() local
135 apicd->hw_irq_cfg.vector = vector; in apic_update_irq_cfg()
136 apicd->hw_irq_cfg.dest_apicid = apic->calc_dest_apicid(cpu); in apic_update_irq_cfg()
139 apicd->hw_irq_cfg.dest_apicid); in apic_update_irq_cfg()
145 struct apic_chip_data *apicd = apic_chip_data(irqd); in apic_update_vector() local
151 trace_vector_update(irqd->irq, newvec, newcpu, apicd->vector, in apic_update_vector()
152 apicd->cpu); in apic_update_vector()
160 apicd->prev_vector = 0; in apic_update_vector()
161 if (!apicd->vector || apicd->vector == MANAGED_IRQ_SHUTDOWN_VECTOR) in apic_update_vector()
171 if (cpu_online(apicd->cpu)) { in apic_update_vector()
172 apicd->move_in_progress = true; in apic_update_vector()
173 apicd->prev_vector = apicd->vector; in apic_update_vector()
174 apicd->prev_cpu = apicd->cpu; in apic_update_vector()
175 WARN_ON_ONCE(apicd->cpu == newcpu); in apic_update_vector()
177 irq_matrix_free(vector_matrix, apicd->cpu, apicd->vector, in apic_update_vector()
182 apicd->vector = newvec; in apic_update_vector()
183 apicd->cpu = newcpu; in apic_update_vector()
198 struct apic_chip_data *apicd = apic_chip_data(irqd); in reserve_managed_vector() local
203 apicd->is_managed = true; in reserve_managed_vector()
212 struct apic_chip_data *apicd = apic_chip_data(irqd); in reserve_irq_vector_locked() local
215 apicd->can_reserve = true; in reserve_irq_vector_locked()
216 apicd->has_reserved = true; in reserve_irq_vector_locked()
235 struct apic_chip_data *apicd = apic_chip_data(irqd); in assign_vector_locked() local
236 bool resvd = apicd->has_reserved; in assign_vector_locked()
237 unsigned int cpu = apicd->cpu; in assign_vector_locked()
238 int vector = apicd->vector; in assign_vector_locked()
256 if (apicd->move_in_progress || !hlist_unhashed(&apicd->clist)) in assign_vector_locked()
327 struct apic_chip_data *apicd = apic_chip_data(irqd); in assign_managed_vector() local
333 if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask)) in assign_managed_vector()
347 struct apic_chip_data *apicd = apic_chip_data(irqd); in clear_irq_vector() local
349 unsigned int vector = apicd->vector; in clear_irq_vector()
356 trace_vector_clear(irqd->irq, vector, apicd->cpu, apicd->prev_vector, in clear_irq_vector()
357 apicd->prev_cpu); in clear_irq_vector()
359 per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_SHUTDOWN; in clear_irq_vector()
360 irq_matrix_free(vector_matrix, apicd->cpu, vector, managed); in clear_irq_vector()
361 apicd->vector = 0; in clear_irq_vector()
364 vector = apicd->prev_vector; in clear_irq_vector()
368 per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_SHUTDOWN; in clear_irq_vector()
369 irq_matrix_free(vector_matrix, apicd->prev_cpu, vector, managed); in clear_irq_vector()
370 apicd->prev_vector = 0; in clear_irq_vector()
371 apicd->move_in_progress = 0; in clear_irq_vector()
372 hlist_del_init(&apicd->clist); in clear_irq_vector()
377 struct apic_chip_data *apicd = apic_chip_data(irqd); in x86_vector_deactivate() local
380 trace_vector_deactivate(irqd->irq, apicd->is_managed, in x86_vector_deactivate()
381 apicd->can_reserve, false); in x86_vector_deactivate()
384 if (!apicd->is_managed && !apicd->can_reserve) in x86_vector_deactivate()
387 if (apicd->has_reserved) in x86_vector_deactivate()
392 if (apicd->can_reserve) in x86_vector_deactivate()
401 struct apic_chip_data *apicd = apic_chip_data(irqd); in activate_reserved() local
406 apicd->has_reserved = false; in activate_reserved()
415 apicd->can_reserve = false; in activate_reserved()
458 struct apic_chip_data *apicd = apic_chip_data(irqd); in x86_vector_activate() local
462 trace_vector_activate(irqd->irq, apicd->is_managed, in x86_vector_activate()
463 apicd->can_reserve, reserve); in x86_vector_activate()
466 if (!apicd->can_reserve && !apicd->is_managed) in x86_vector_activate()
470 else if (apicd->is_managed) in x86_vector_activate()
472 else if (apicd->has_reserved) in x86_vector_activate()
481 struct apic_chip_data *apicd = apic_chip_data(irqd); in vector_free_reserved_and_managed() local
483 trace_vector_teardown(irqd->irq, apicd->is_managed, in vector_free_reserved_and_managed()
484 apicd->has_reserved); in vector_free_reserved_and_managed()
486 if (apicd->has_reserved) in vector_free_reserved_and_managed()
488 if (apicd->is_managed) in vector_free_reserved_and_managed()
495 struct apic_chip_data *apicd; in x86_vector_free_irqs() local
506 apicd = irqd->chip_data; in x86_vector_free_irqs()
509 free_apic_chip_data(apicd); in x86_vector_free_irqs()
515 struct apic_chip_data *apicd) in vector_configure_legacy() argument
520 apicd->vector = ISA_IRQ_VECTOR(virq); in vector_configure_legacy()
521 apicd->cpu = 0; in vector_configure_legacy()
530 apic_update_irq_cfg(irqd, apicd->vector, apicd->cpu); in vector_configure_legacy()
533 apicd->can_reserve = true; in vector_configure_legacy()
546 struct apic_chip_data *apicd; in x86_vector_alloc_irqs() local
566 apicd = alloc_apic_chip_data(node); in x86_vector_alloc_irqs()
567 if (!apicd) { in x86_vector_alloc_irqs()
572 apicd->irq = virq + i; in x86_vector_alloc_irqs()
574 irqd->chip_data = apicd; in x86_vector_alloc_irqs()
595 if (!vector_configure_legacy(virq + i, irqd, apicd)) in x86_vector_alloc_irqs()
603 free_apic_chip_data(apicd); in x86_vector_alloc_irqs()
619 struct apic_chip_data apicd; in x86_vector_debug_show() local
641 memcpy(&apicd, irqd->chip_data, sizeof(apicd)); in x86_vector_debug_show()
644 seq_printf(m, "%*sVector: %5u\n", ind, "", apicd.vector); in x86_vector_debug_show()
645 seq_printf(m, "%*sTarget: %5u\n", ind, "", apicd.cpu); in x86_vector_debug_show()
646 if (apicd.prev_vector) { in x86_vector_debug_show()
647 seq_printf(m, "%*sPrevious vector: %5u\n", ind, "", apicd.prev_vector); in x86_vector_debug_show()
648 seq_printf(m, "%*sPrevious target: %5u\n", ind, "", apicd.prev_cpu); in x86_vector_debug_show()
650 seq_printf(m, "%*smove_in_progress: %u\n", ind, "", apicd.move_in_progress ? 1 : 0); in x86_vector_debug_show()
651 seq_printf(m, "%*sis_managed: %u\n", ind, "", apicd.is_managed ? 1 : 0); in x86_vector_debug_show()
652 seq_printf(m, "%*scan_reserve: %u\n", ind, "", apicd.can_reserve ? 1 : 0); in x86_vector_debug_show()
653 seq_printf(m, "%*shas_reserved: %u\n", ind, "", apicd.has_reserved ? 1 : 0); in x86_vector_debug_show()
654 seq_printf(m, "%*scleanup_pending: %u\n", ind, "", !hlist_unhashed(&apicd.clist)); in x86_vector_debug_show()
897 struct apic_chip_data *apicd = apic_chip_data(irqd); in apic_retrigger_irq() local
901 __apic_send_IPI(apicd->cpu, apicd->vector); in apic_retrigger_irq()
935 static void free_moved_vector(struct apic_chip_data *apicd) in free_moved_vector() argument
937 unsigned int vector = apicd->prev_vector; in free_moved_vector()
938 unsigned int cpu = apicd->prev_cpu; in free_moved_vector()
939 bool managed = apicd->is_managed; in free_moved_vector()
951 trace_vector_free_moved(apicd->irq, cpu, vector, managed); in free_moved_vector()
954 hlist_del_init(&apicd->clist); in free_moved_vector()
955 apicd->prev_vector = 0; in free_moved_vector()
956 apicd->move_in_progress = 0; in free_moved_vector()
961 struct apic_chip_data *apicd; in __vector_cleanup() local
967 hlist_for_each_entry_safe(apicd, tmp, &cl->head, clist) { in __vector_cleanup()
968 unsigned int irr, vector = apicd->prev_vector; in __vector_cleanup()
984 pr_warn_once("Moved interrupt pending in old target APIC %u\n", apicd->irq); in __vector_cleanup()
988 free_moved_vector(apicd); in __vector_cleanup()
1009 static void __vector_schedule_cleanup(struct apic_chip_data *apicd) in __vector_schedule_cleanup() argument
1011 unsigned int cpu = apicd->prev_cpu; in __vector_schedule_cleanup()
1014 apicd->move_in_progress = 0; in __vector_schedule_cleanup()
1018 hlist_add_head(&apicd->clist, &cl->head); in __vector_schedule_cleanup()
1039 pr_warn("IRQ %u schedule cleanup for offline CPU %u\n", apicd->irq, cpu); in __vector_schedule_cleanup()
1040 free_moved_vector(apicd); in __vector_schedule_cleanup()
1047 struct apic_chip_data *apicd; in vector_schedule_cleanup() local
1049 apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg); in vector_schedule_cleanup()
1050 if (apicd->move_in_progress) in vector_schedule_cleanup()
1051 __vector_schedule_cleanup(apicd); in vector_schedule_cleanup()
1056 struct apic_chip_data *apicd; in irq_complete_move() local
1058 apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg); in irq_complete_move()
1059 if (likely(!apicd->move_in_progress)) in irq_complete_move()
1068 if (apicd->cpu == smp_processor_id()) in irq_complete_move()
1069 __vector_schedule_cleanup(apicd); in irq_complete_move()
1078 struct apic_chip_data *apicd; in irq_force_complete_move() local
1097 apicd = apic_chip_data(irqd); in irq_force_complete_move()
1098 if (!apicd) in irq_force_complete_move()
1105 vector = apicd->prev_vector; in irq_force_complete_move()
1106 if (!vector || (apicd->cpu != cpu && apicd->prev_cpu != cpu)) in irq_force_complete_move()
1124 if (apicd->move_in_progress) { in irq_force_complete_move()
1160 free_moved_vector(apicd); in irq_force_complete_move()