xref: /openbmc/linux/arch/x86/kernel/apic/vector.c (revision b181f7029bd71238ac2754ce7052dffd69432085)
1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
274afab7aSJiang Liu /*
3fd2fa6c1SBjorn Helgaas  * Local APIC related interfaces to support IOAPIC, MSI, etc.
474afab7aSJiang Liu  *
574afab7aSJiang Liu  * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
674afab7aSJiang Liu  *	Moved from arch/x86/kernel/apic/io_apic.c.
7b5dc8e6cSJiang Liu  * Jiang Liu <jiang.liu@linux.intel.com>
8b5dc8e6cSJiang Liu  *	Enable support of hierarchical irqdomains
974afab7aSJiang Liu  */
1074afab7aSJiang Liu #include <linux/interrupt.h>
11447ae316SNicolai Stange #include <linux/irq.h>
1265d7ed57SThomas Gleixner #include <linux/seq_file.h>
1374afab7aSJiang Liu #include <linux/init.h>
1474afab7aSJiang Liu #include <linux/compiler.h>
1574afab7aSJiang Liu #include <linux/slab.h>
16d746d1ebSJiang Liu #include <asm/irqdomain.h>
1774afab7aSJiang Liu #include <asm/hw_irq.h>
18ad3bc25aSBorislav Petkov #include <asm/traps.h>
1974afab7aSJiang Liu #include <asm/apic.h>
2074afab7aSJiang Liu #include <asm/i8259.h>
2174afab7aSJiang Liu #include <asm/desc.h>
2274afab7aSJiang Liu #include <asm/irq_remapping.h>
2374afab7aSJiang Liu 
248d1e3dcaSThomas Gleixner #include <asm/trace/irq_vectors.h>
258d1e3dcaSThomas Gleixner 
267f3262edSJiang Liu struct apic_chip_data {
27ba224feaSThomas Gleixner 	struct irq_cfg		hw_irq_cfg;
28ba224feaSThomas Gleixner 	unsigned int		vector;
29ba224feaSThomas Gleixner 	unsigned int		prev_vector;
30029c6e1cSThomas Gleixner 	unsigned int		cpu;
31029c6e1cSThomas Gleixner 	unsigned int		prev_cpu;
3269cde000SThomas Gleixner 	unsigned int		irq;
33dccfe314SThomas Gleixner 	struct hlist_node	clist;
342db1f959SThomas Gleixner 	unsigned int		move_in_progress	: 1,
354900be83SThomas Gleixner 				is_managed		: 1,
364900be83SThomas Gleixner 				can_reserve		: 1,
374900be83SThomas Gleixner 				has_reserved		: 1;
387f3262edSJiang Liu };
397f3262edSJiang Liu 
40b5dc8e6cSJiang Liu struct irq_domain *x86_vector_domain;
41c8f3e518SJake Oshins EXPORT_SYMBOL_GPL(x86_vector_domain);
4274afab7aSJiang Liu static DEFINE_RAW_SPINLOCK(vector_lock);
4369cde000SThomas Gleixner static cpumask_var_t vector_searchmask;
44b5dc8e6cSJiang Liu static struct irq_chip lapic_controller;
450fa115daSThomas Gleixner static struct irq_matrix *vector_matrix;
46dccfe314SThomas Gleixner #ifdef CONFIG_SMP
47bdc1dad2SThomas Gleixner 
48bdc1dad2SThomas Gleixner static void vector_cleanup_callback(struct timer_list *tmr);
49bdc1dad2SThomas Gleixner 
50bdc1dad2SThomas Gleixner struct vector_cleanup {
51bdc1dad2SThomas Gleixner 	struct hlist_head	head;
52bdc1dad2SThomas Gleixner 	struct timer_list	timer;
53bdc1dad2SThomas Gleixner };
54bdc1dad2SThomas Gleixner 
55bdc1dad2SThomas Gleixner static DEFINE_PER_CPU(struct vector_cleanup, vector_cleanup) = {
56bdc1dad2SThomas Gleixner 	.head	= HLIST_HEAD_INIT,
57bdc1dad2SThomas Gleixner 	.timer	= __TIMER_INITIALIZER(vector_cleanup_callback, TIMER_PINNED),
58bdc1dad2SThomas Gleixner };
59dccfe314SThomas Gleixner #endif
6074afab7aSJiang Liu 
lock_vector_lock(void)6174afab7aSJiang Liu void lock_vector_lock(void)
6274afab7aSJiang Liu {
6374afab7aSJiang Liu 	/* Used to the online set of cpus does not change
6474afab7aSJiang Liu 	 * during assign_irq_vector.
6574afab7aSJiang Liu 	 */
6674afab7aSJiang Liu 	raw_spin_lock(&vector_lock);
6774afab7aSJiang Liu }
6874afab7aSJiang Liu 
unlock_vector_lock(void)6974afab7aSJiang Liu void unlock_vector_lock(void)
7074afab7aSJiang Liu {
7174afab7aSJiang Liu 	raw_spin_unlock(&vector_lock);
7274afab7aSJiang Liu }
7374afab7aSJiang Liu 
init_irq_alloc_info(struct irq_alloc_info * info,const struct cpumask * mask)7499a1482dSThomas Gleixner void init_irq_alloc_info(struct irq_alloc_info *info,
7599a1482dSThomas Gleixner 			 const struct cpumask *mask)
7699a1482dSThomas Gleixner {
7799a1482dSThomas Gleixner 	memset(info, 0, sizeof(*info));
7899a1482dSThomas Gleixner 	info->mask = mask;
7999a1482dSThomas Gleixner }
8099a1482dSThomas Gleixner 
copy_irq_alloc_info(struct irq_alloc_info * dst,struct irq_alloc_info * src)8199a1482dSThomas Gleixner void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
8299a1482dSThomas Gleixner {
8399a1482dSThomas Gleixner 	if (src)
8499a1482dSThomas Gleixner 		*dst = *src;
8599a1482dSThomas Gleixner 	else
8699a1482dSThomas Gleixner 		memset(dst, 0, sizeof(*dst));
8799a1482dSThomas Gleixner }
8899a1482dSThomas Gleixner 
apic_chip_data(struct irq_data * irqd)8986ba6551SThomas Gleixner static struct apic_chip_data *apic_chip_data(struct irq_data *irqd)
9074afab7aSJiang Liu {
9186ba6551SThomas Gleixner 	if (!irqd)
92b5dc8e6cSJiang Liu 		return NULL;
93b5dc8e6cSJiang Liu 
9486ba6551SThomas Gleixner 	while (irqd->parent_data)
9586ba6551SThomas Gleixner 		irqd = irqd->parent_data;
96b5dc8e6cSJiang Liu 
9786ba6551SThomas Gleixner 	return irqd->chip_data;
9874afab7aSJiang Liu }
9974afab7aSJiang Liu 
irqd_cfg(struct irq_data * irqd)10086ba6551SThomas Gleixner struct irq_cfg *irqd_cfg(struct irq_data *irqd)
10174afab7aSJiang Liu {
10286ba6551SThomas Gleixner 	struct apic_chip_data *apicd = apic_chip_data(irqd);
10374afab7aSJiang Liu 
104ba224feaSThomas Gleixner 	return apicd ? &apicd->hw_irq_cfg : NULL;
1057f3262edSJiang Liu }
106c8f3e518SJake Oshins EXPORT_SYMBOL_GPL(irqd_cfg);
1077f3262edSJiang Liu 
irq_cfg(unsigned int irq)1087f3262edSJiang Liu struct irq_cfg *irq_cfg(unsigned int irq)
1097f3262edSJiang Liu {
1107f3262edSJiang Liu 	return irqd_cfg(irq_get_irq_data(irq));
1117f3262edSJiang Liu }
1127f3262edSJiang Liu 
alloc_apic_chip_data(int node)1137f3262edSJiang Liu static struct apic_chip_data *alloc_apic_chip_data(int node)
1147f3262edSJiang Liu {
11586ba6551SThomas Gleixner 	struct apic_chip_data *apicd;
1167f3262edSJiang Liu 
11786ba6551SThomas Gleixner 	apicd = kzalloc_node(sizeof(*apicd), GFP_KERNEL, node);
11869cde000SThomas Gleixner 	if (apicd)
119dccfe314SThomas Gleixner 		INIT_HLIST_NODE(&apicd->clist);
12086ba6551SThomas Gleixner 	return apicd;
12174afab7aSJiang Liu }
12274afab7aSJiang Liu 
free_apic_chip_data(struct apic_chip_data * apicd)12386ba6551SThomas Gleixner static void free_apic_chip_data(struct apic_chip_data *apicd)
12474afab7aSJiang Liu {
12586ba6551SThomas Gleixner 	kfree(apicd);
12674afab7aSJiang Liu }
12774afab7aSJiang Liu 
apic_update_irq_cfg(struct irq_data * irqd,unsigned int vector,unsigned int cpu)128ba224feaSThomas Gleixner static void apic_update_irq_cfg(struct irq_data *irqd, unsigned int vector,
129ba224feaSThomas Gleixner 				unsigned int cpu)
13074afab7aSJiang Liu {
13169cde000SThomas Gleixner 	struct apic_chip_data *apicd = apic_chip_data(irqd);
13274afab7aSJiang Liu 
13369cde000SThomas Gleixner 	lockdep_assert_held(&vector_lock);
13474afab7aSJiang Liu 
135ba224feaSThomas Gleixner 	apicd->hw_irq_cfg.vector = vector;
136ba224feaSThomas Gleixner 	apicd->hw_irq_cfg.dest_apicid = apic->calc_dest_apicid(cpu);
137ba224feaSThomas Gleixner 	irq_data_update_effective_affinity(irqd, cpumask_of(cpu));
138ba224feaSThomas Gleixner 	trace_vector_config(irqd->irq, vector, cpu,
139ba224feaSThomas Gleixner 			    apicd->hw_irq_cfg.dest_apicid);
14074afab7aSJiang Liu }
14174afab7aSJiang Liu 
apic_update_vector(struct irq_data * irqd,unsigned int newvec,unsigned int newcpu)14269cde000SThomas Gleixner static void apic_update_vector(struct irq_data *irqd, unsigned int newvec,
14369cde000SThomas Gleixner 			       unsigned int newcpu)
14469cde000SThomas Gleixner {
14569cde000SThomas Gleixner 	struct apic_chip_data *apicd = apic_chip_data(irqd);
14669cde000SThomas Gleixner 	struct irq_desc *desc = irq_data_to_desc(irqd);
147e84cf6aaSThomas Gleixner 	bool managed = irqd_affinity_is_managed(irqd);
14869cde000SThomas Gleixner 
14969cde000SThomas Gleixner 	lockdep_assert_held(&vector_lock);
15069cde000SThomas Gleixner 
151ba224feaSThomas Gleixner 	trace_vector_update(irqd->irq, newvec, newcpu, apicd->vector,
15269cde000SThomas Gleixner 			    apicd->cpu);
15369cde000SThomas Gleixner 
154e84cf6aaSThomas Gleixner 	/*
155e84cf6aaSThomas Gleixner 	 * If there is no vector associated or if the associated vector is
156e84cf6aaSThomas Gleixner 	 * the shutdown vector, which is associated to make PCI/MSI
157e84cf6aaSThomas Gleixner 	 * shutdown mode work, then there is nothing to release. Clear out
158e84cf6aaSThomas Gleixner 	 * prev_vector for this and the offlined target case.
159e84cf6aaSThomas Gleixner 	 */
160e84cf6aaSThomas Gleixner 	apicd->prev_vector = 0;
161e84cf6aaSThomas Gleixner 	if (!apicd->vector || apicd->vector == MANAGED_IRQ_SHUTDOWN_VECTOR)
162e84cf6aaSThomas Gleixner 		goto setnew;
163e84cf6aaSThomas Gleixner 	/*
164e84cf6aaSThomas Gleixner 	 * If the target CPU of the previous vector is online, then mark
165e84cf6aaSThomas Gleixner 	 * the vector as move in progress and store it for cleanup when the
166e84cf6aaSThomas Gleixner 	 * first interrupt on the new vector arrives. If the target CPU is
167e84cf6aaSThomas Gleixner 	 * offline then the regular release mechanism via the cleanup
168e84cf6aaSThomas Gleixner 	 * vector is not possible and the vector can be immediately freed
169e84cf6aaSThomas Gleixner 	 * in the underlying matrix allocator.
170e84cf6aaSThomas Gleixner 	 */
171e84cf6aaSThomas Gleixner 	if (cpu_online(apicd->cpu)) {
17269cde000SThomas Gleixner 		apicd->move_in_progress = true;
173ba224feaSThomas Gleixner 		apicd->prev_vector = apicd->vector;
17469cde000SThomas Gleixner 		apicd->prev_cpu = apicd->cpu;
175e027ffffSThomas Gleixner 		WARN_ON_ONCE(apicd->cpu == newcpu);
17669cde000SThomas Gleixner 	} else {
177e84cf6aaSThomas Gleixner 		irq_matrix_free(vector_matrix, apicd->cpu, apicd->vector,
178e84cf6aaSThomas Gleixner 				managed);
17974afab7aSJiang Liu 	}
18074afab7aSJiang Liu 
181e84cf6aaSThomas Gleixner setnew:
182ba224feaSThomas Gleixner 	apicd->vector = newvec;
18369cde000SThomas Gleixner 	apicd->cpu = newcpu;
18469cde000SThomas Gleixner 	BUG_ON(!IS_ERR_OR_NULL(per_cpu(vector_irq, newcpu)[newvec]));
18569cde000SThomas Gleixner 	per_cpu(vector_irq, newcpu)[newvec] = desc;
18674afab7aSJiang Liu }
18795ffeb4bSThomas Gleixner 
vector_assign_managed_shutdown(struct irq_data * irqd)1882db1f959SThomas Gleixner static void vector_assign_managed_shutdown(struct irq_data *irqd)
1892db1f959SThomas Gleixner {
1902db1f959SThomas Gleixner 	unsigned int cpu = cpumask_first(cpu_online_mask);
1912db1f959SThomas Gleixner 
1922db1f959SThomas Gleixner 	apic_update_irq_cfg(irqd, MANAGED_IRQ_SHUTDOWN_VECTOR, cpu);
1932db1f959SThomas Gleixner }
1942db1f959SThomas Gleixner 
reserve_managed_vector(struct irq_data * irqd)1952db1f959SThomas Gleixner static int reserve_managed_vector(struct irq_data *irqd)
1962db1f959SThomas Gleixner {
1972db1f959SThomas Gleixner 	const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
1982db1f959SThomas Gleixner 	struct apic_chip_data *apicd = apic_chip_data(irqd);
1992db1f959SThomas Gleixner 	unsigned long flags;
2002db1f959SThomas Gleixner 	int ret;
2012db1f959SThomas Gleixner 
2022db1f959SThomas Gleixner 	raw_spin_lock_irqsave(&vector_lock, flags);
2032db1f959SThomas Gleixner 	apicd->is_managed = true;
2042db1f959SThomas Gleixner 	ret = irq_matrix_reserve_managed(vector_matrix, affmsk);
2052db1f959SThomas Gleixner 	raw_spin_unlock_irqrestore(&vector_lock, flags);
2062db1f959SThomas Gleixner 	trace_vector_reserve_managed(irqd->irq, ret);
2072db1f959SThomas Gleixner 	return ret;
2082db1f959SThomas Gleixner }
2092db1f959SThomas Gleixner 
reserve_irq_vector_locked(struct irq_data * irqd)2104900be83SThomas Gleixner static void reserve_irq_vector_locked(struct irq_data *irqd)
2114900be83SThomas Gleixner {
2124900be83SThomas Gleixner 	struct apic_chip_data *apicd = apic_chip_data(irqd);
2134900be83SThomas Gleixner 
2144900be83SThomas Gleixner 	irq_matrix_reserve(vector_matrix);
2154900be83SThomas Gleixner 	apicd->can_reserve = true;
2164900be83SThomas Gleixner 	apicd->has_reserved = true;
217945f50a5SThomas Gleixner 	irqd_set_can_reserve(irqd);
2184900be83SThomas Gleixner 	trace_vector_reserve(irqd->irq, 0);
2194900be83SThomas Gleixner 	vector_assign_managed_shutdown(irqd);
2204900be83SThomas Gleixner }
2214900be83SThomas Gleixner 
reserve_irq_vector(struct irq_data * irqd)2224900be83SThomas Gleixner static int reserve_irq_vector(struct irq_data *irqd)
2234900be83SThomas Gleixner {
2244900be83SThomas Gleixner 	unsigned long flags;
2254900be83SThomas Gleixner 
2264900be83SThomas Gleixner 	raw_spin_lock_irqsave(&vector_lock, flags);
2274900be83SThomas Gleixner 	reserve_irq_vector_locked(irqd);
2284900be83SThomas Gleixner 	raw_spin_unlock_irqrestore(&vector_lock, flags);
2294900be83SThomas Gleixner 	return 0;
2304900be83SThomas Gleixner }
2314900be83SThomas Gleixner 
23227733971SDou Liyang static int
assign_vector_locked(struct irq_data * irqd,const struct cpumask * dest)23327733971SDou Liyang assign_vector_locked(struct irq_data *irqd, const struct cpumask *dest)
23469cde000SThomas Gleixner {
23569cde000SThomas Gleixner 	struct apic_chip_data *apicd = apic_chip_data(irqd);
2364900be83SThomas Gleixner 	bool resvd = apicd->has_reserved;
23769cde000SThomas Gleixner 	unsigned int cpu = apicd->cpu;
238ba224feaSThomas Gleixner 	int vector = apicd->vector;
239ba224feaSThomas Gleixner 
240ba224feaSThomas Gleixner 	lockdep_assert_held(&vector_lock);
24169cde000SThomas Gleixner 
24295ffeb4bSThomas Gleixner 	/*
24369cde000SThomas Gleixner 	 * If the current target CPU is online and in the new requested
24469cde000SThomas Gleixner 	 * affinity mask, there is no point in moving the interrupt from
24569cde000SThomas Gleixner 	 * one CPU to another.
24695ffeb4bSThomas Gleixner 	 */
24769cde000SThomas Gleixner 	if (vector && cpu_online(cpu) && cpumask_test_cpu(cpu, dest))
24869cde000SThomas Gleixner 		return 0;
24969cde000SThomas Gleixner 
25080ae7b1aSThomas Gleixner 	/*
25180ae7b1aSThomas Gleixner 	 * Careful here. @apicd might either have move_in_progress set or
25280ae7b1aSThomas Gleixner 	 * be enqueued for cleanup. Assigning a new vector would either
25380ae7b1aSThomas Gleixner 	 * leave a stale vector on some CPU around or in case of a pending
25480ae7b1aSThomas Gleixner 	 * cleanup corrupt the hlist.
25580ae7b1aSThomas Gleixner 	 */
25680ae7b1aSThomas Gleixner 	if (apicd->move_in_progress || !hlist_unhashed(&apicd->clist))
25780ae7b1aSThomas Gleixner 		return -EBUSY;
25880ae7b1aSThomas Gleixner 
2594900be83SThomas Gleixner 	vector = irq_matrix_alloc(vector_matrix, dest, resvd, &cpu);
2604900be83SThomas Gleixner 	trace_vector_alloc(irqd->irq, vector, resvd, vector);
26169cde000SThomas Gleixner 	if (vector < 0)
26269cde000SThomas Gleixner 		return vector;
26327733971SDou Liyang 	apic_update_vector(irqd, vector, cpu);
26427733971SDou Liyang 	apic_update_irq_cfg(irqd, vector, cpu);
26569cde000SThomas Gleixner 
2663716fd27SThomas Gleixner 	return 0;
26774afab7aSJiang Liu }
26874afab7aSJiang Liu 
assign_irq_vector(struct irq_data * irqd,const struct cpumask * dest)26969cde000SThomas Gleixner static int assign_irq_vector(struct irq_data *irqd, const struct cpumask *dest)
27074afab7aSJiang Liu {
27174afab7aSJiang Liu 	unsigned long flags;
27269cde000SThomas Gleixner 	int ret;
27374afab7aSJiang Liu 
27474afab7aSJiang Liu 	raw_spin_lock_irqsave(&vector_lock, flags);
27569cde000SThomas Gleixner 	cpumask_and(vector_searchmask, dest, cpu_online_mask);
27669cde000SThomas Gleixner 	ret = assign_vector_locked(irqd, vector_searchmask);
27774afab7aSJiang Liu 	raw_spin_unlock_irqrestore(&vector_lock, flags);
27869cde000SThomas Gleixner 	return ret;
27974afab7aSJiang Liu }
28074afab7aSJiang Liu 
assign_irq_vector_any_locked(struct irq_data * irqd)2812db1f959SThomas Gleixner static int assign_irq_vector_any_locked(struct irq_data *irqd)
282486ca539SJiang Liu {
283d6ffc6acSThomas Gleixner 	/* Get the affinity mask - either irq_default_affinity or (user) set */
284d6ffc6acSThomas Gleixner 	const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
2852db1f959SThomas Gleixner 	int node = irq_data_get_node(irqd);
2862db1f959SThomas Gleixner 
287190113b4SThomas Gleixner 	if (node != NUMA_NO_NODE) {
288d6ffc6acSThomas Gleixner 		/* Try the intersection of @affmsk and node mask */
289d6ffc6acSThomas Gleixner 		cpumask_and(vector_searchmask, cpumask_of_node(node), affmsk);
290d6ffc6acSThomas Gleixner 		if (!assign_vector_locked(irqd, vector_searchmask))
291d6ffc6acSThomas Gleixner 			return 0;
292190113b4SThomas Gleixner 	}
293190113b4SThomas Gleixner 
294d6ffc6acSThomas Gleixner 	/* Try the full affinity mask */
295d6ffc6acSThomas Gleixner 	cpumask_and(vector_searchmask, affmsk, cpu_online_mask);
296d6ffc6acSThomas Gleixner 	if (!assign_vector_locked(irqd, vector_searchmask))
297d6ffc6acSThomas Gleixner 		return 0;
298190113b4SThomas Gleixner 
299190113b4SThomas Gleixner 	if (node != NUMA_NO_NODE) {
300190113b4SThomas Gleixner 		/* Try the node mask */
301190113b4SThomas Gleixner 		if (!assign_vector_locked(irqd, cpumask_of_node(node)))
302190113b4SThomas Gleixner 			return 0;
303190113b4SThomas Gleixner 	}
304190113b4SThomas Gleixner 
305d6ffc6acSThomas Gleixner 	/* Try the full online mask */
3062db1f959SThomas Gleixner 	return assign_vector_locked(irqd, cpu_online_mask);
3072db1f959SThomas Gleixner }
3082db1f959SThomas Gleixner 
3092db1f959SThomas Gleixner static int
assign_irq_vector_policy(struct irq_data * irqd,struct irq_alloc_info * info)3102db1f959SThomas Gleixner assign_irq_vector_policy(struct irq_data *irqd, struct irq_alloc_info *info)
3112db1f959SThomas Gleixner {
3122db1f959SThomas Gleixner 	if (irqd_affinity_is_managed(irqd))
3132db1f959SThomas Gleixner 		return reserve_managed_vector(irqd);
314258d86eeSThomas Gleixner 	if (info->mask)
31569cde000SThomas Gleixner 		return assign_irq_vector(irqd, info->mask);
316464d1230SThomas Gleixner 	/*
317464d1230SThomas Gleixner 	 * Make only a global reservation with no guarantee. A real vector
318464d1230SThomas Gleixner 	 * is associated at activation time.
319464d1230SThomas Gleixner 	 */
3204900be83SThomas Gleixner 	return reserve_irq_vector(irqd);
3212db1f959SThomas Gleixner }
3222db1f959SThomas Gleixner 
3232db1f959SThomas Gleixner static int
assign_managed_vector(struct irq_data * irqd,const struct cpumask * dest)3242db1f959SThomas Gleixner assign_managed_vector(struct irq_data *irqd, const struct cpumask *dest)
3252db1f959SThomas Gleixner {
3262db1f959SThomas Gleixner 	const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
3272db1f959SThomas Gleixner 	struct apic_chip_data *apicd = apic_chip_data(irqd);
3282db1f959SThomas Gleixner 	int vector, cpu;
3292db1f959SThomas Gleixner 
33076f99ae5SDou Liyang 	cpumask_and(vector_searchmask, dest, affmsk);
33176f99ae5SDou Liyang 
3322db1f959SThomas Gleixner 	/* set_affinity might call here for nothing */
3332db1f959SThomas Gleixner 	if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask))
334486ca539SJiang Liu 		return 0;
33576f99ae5SDou Liyang 	vector = irq_matrix_alloc_managed(vector_matrix, vector_searchmask,
33676f99ae5SDou Liyang 					  &cpu);
3372db1f959SThomas Gleixner 	trace_vector_alloc_managed(irqd->irq, vector, vector);
3382db1f959SThomas Gleixner 	if (vector < 0)
3392db1f959SThomas Gleixner 		return vector;
3402db1f959SThomas Gleixner 	apic_update_vector(irqd, vector, cpu);
3412db1f959SThomas Gleixner 	apic_update_irq_cfg(irqd, vector, cpu);
3422db1f959SThomas Gleixner 	return 0;
343486ca539SJiang Liu }
344486ca539SJiang Liu 
clear_irq_vector(struct irq_data * irqd)34569cde000SThomas Gleixner static void clear_irq_vector(struct irq_data *irqd)
34674afab7aSJiang Liu {
34769cde000SThomas Gleixner 	struct apic_chip_data *apicd = apic_chip_data(irqd);
3482db1f959SThomas Gleixner 	bool managed = irqd_affinity_is_managed(irqd);
349ba224feaSThomas Gleixner 	unsigned int vector = apicd->vector;
35074afab7aSJiang Liu 
35169cde000SThomas Gleixner 	lockdep_assert_held(&vector_lock);
352ba224feaSThomas Gleixner 
353dccfe314SThomas Gleixner 	if (!vector)
3541bdb8970SKeith Busch 		return;
35574afab7aSJiang Liu 
356ba224feaSThomas Gleixner 	trace_vector_clear(irqd->irq, vector, apicd->cpu, apicd->prev_vector,
35769cde000SThomas Gleixner 			   apicd->prev_cpu);
35869cde000SThomas Gleixner 
359b7107a67SThomas Gleixner 	per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_SHUTDOWN;
3602db1f959SThomas Gleixner 	irq_matrix_free(vector_matrix, apicd->cpu, vector, managed);
361ba224feaSThomas Gleixner 	apicd->vector = 0;
36274afab7aSJiang Liu 
363dccfe314SThomas Gleixner 	/* Clean up move in progress */
364ba224feaSThomas Gleixner 	vector = apicd->prev_vector;
365dccfe314SThomas Gleixner 	if (!vector)
36674afab7aSJiang Liu 		return;
36774afab7aSJiang Liu 
368b7107a67SThomas Gleixner 	per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_SHUTDOWN;
3692db1f959SThomas Gleixner 	irq_matrix_free(vector_matrix, apicd->prev_cpu, vector, managed);
370ba224feaSThomas Gleixner 	apicd->prev_vector = 0;
37186ba6551SThomas Gleixner 	apicd->move_in_progress = 0;
372dccfe314SThomas Gleixner 	hlist_del_init(&apicd->clist);
37374afab7aSJiang Liu }
37474afab7aSJiang Liu 
x86_vector_deactivate(struct irq_domain * dom,struct irq_data * irqd)3752db1f959SThomas Gleixner static void x86_vector_deactivate(struct irq_domain *dom, struct irq_data *irqd)
3762db1f959SThomas Gleixner {
3772db1f959SThomas Gleixner 	struct apic_chip_data *apicd = apic_chip_data(irqd);
3782db1f959SThomas Gleixner 	unsigned long flags;
3792db1f959SThomas Gleixner 
3802db1f959SThomas Gleixner 	trace_vector_deactivate(irqd->irq, apicd->is_managed,
3814900be83SThomas Gleixner 				apicd->can_reserve, false);
3822db1f959SThomas Gleixner 
3834900be83SThomas Gleixner 	/* Regular fixed assigned interrupt */
3844900be83SThomas Gleixner 	if (!apicd->is_managed && !apicd->can_reserve)
3854900be83SThomas Gleixner 		return;
3864900be83SThomas Gleixner 	/* If the interrupt has a global reservation, nothing to do */
3874900be83SThomas Gleixner 	if (apicd->has_reserved)
3882db1f959SThomas Gleixner 		return;
3892db1f959SThomas Gleixner 
3902db1f959SThomas Gleixner 	raw_spin_lock_irqsave(&vector_lock, flags);
3912db1f959SThomas Gleixner 	clear_irq_vector(irqd);
3924900be83SThomas Gleixner 	if (apicd->can_reserve)
3934900be83SThomas Gleixner 		reserve_irq_vector_locked(irqd);
3944900be83SThomas Gleixner 	else
3952db1f959SThomas Gleixner 		vector_assign_managed_shutdown(irqd);
3962db1f959SThomas Gleixner 	raw_spin_unlock_irqrestore(&vector_lock, flags);
3972db1f959SThomas Gleixner }
3982db1f959SThomas Gleixner 
activate_reserved(struct irq_data * irqd)3994900be83SThomas Gleixner static int activate_reserved(struct irq_data *irqd)
4004900be83SThomas Gleixner {
4014900be83SThomas Gleixner 	struct apic_chip_data *apicd = apic_chip_data(irqd);
4024900be83SThomas Gleixner 	int ret;
4034900be83SThomas Gleixner 
4044900be83SThomas Gleixner 	ret = assign_irq_vector_any_locked(irqd);
405bc976233SThomas Gleixner 	if (!ret) {
4064900be83SThomas Gleixner 		apicd->has_reserved = false;
407bc976233SThomas Gleixner 		/*
408bc976233SThomas Gleixner 		 * Core might have disabled reservation mode after
409bc976233SThomas Gleixner 		 * allocating the irq descriptor. Ideally this should
410bc976233SThomas Gleixner 		 * happen before allocation time, but that would require
411bc976233SThomas Gleixner 		 * completely convoluted ways of transporting that
412bc976233SThomas Gleixner 		 * information.
413bc976233SThomas Gleixner 		 */
414bc976233SThomas Gleixner 		if (!irqd_can_reserve(irqd))
415bc976233SThomas Gleixner 			apicd->can_reserve = false;
416bc976233SThomas Gleixner 	}
417743dac49SNeil Horman 
418743dac49SNeil Horman 	/*
419743dac49SNeil Horman 	 * Check to ensure that the effective affinity mask is a subset
420743dac49SNeil Horman 	 * the user supplied affinity mask, and warn the user if it is not
421743dac49SNeil Horman 	 */
422743dac49SNeil Horman 	if (!cpumask_subset(irq_data_get_effective_affinity_mask(irqd),
423743dac49SNeil Horman 			    irq_data_get_affinity_mask(irqd))) {
424743dac49SNeil Horman 		pr_warn("irq %u: Affinity broken due to vector space exhaustion.\n",
425743dac49SNeil Horman 			irqd->irq);
426743dac49SNeil Horman 	}
427743dac49SNeil Horman 
4284900be83SThomas Gleixner 	return ret;
4294900be83SThomas Gleixner }
4304900be83SThomas Gleixner 
activate_managed(struct irq_data * irqd)4312db1f959SThomas Gleixner static int activate_managed(struct irq_data *irqd)
4322db1f959SThomas Gleixner {
4332db1f959SThomas Gleixner 	const struct cpumask *dest = irq_data_get_affinity_mask(irqd);
4342db1f959SThomas Gleixner 	int ret;
4352db1f959SThomas Gleixner 
4362db1f959SThomas Gleixner 	cpumask_and(vector_searchmask, dest, cpu_online_mask);
4372db1f959SThomas Gleixner 	if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) {
4382db1f959SThomas Gleixner 		/* Something in the core code broke! Survive gracefully */
4392db1f959SThomas Gleixner 		pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq);
44047b7360cSThomas Gleixner 		return -EINVAL;
4412db1f959SThomas Gleixner 	}
4422db1f959SThomas Gleixner 
4432db1f959SThomas Gleixner 	ret = assign_managed_vector(irqd, vector_searchmask);
4442db1f959SThomas Gleixner 	/*
4452db1f959SThomas Gleixner 	 * This should not happen. The vector reservation got buggered.  Handle
4462db1f959SThomas Gleixner 	 * it gracefully.
4472db1f959SThomas Gleixner 	 */
4482db1f959SThomas Gleixner 	if (WARN_ON_ONCE(ret < 0)) {
4492db1f959SThomas Gleixner 		pr_err("Managed startup irq %u, no vector available\n",
4502db1f959SThomas Gleixner 		       irqd->irq);
4512db1f959SThomas Gleixner 	}
4522db1f959SThomas Gleixner 	return ret;
4532db1f959SThomas Gleixner }
4542db1f959SThomas Gleixner 
x86_vector_activate(struct irq_domain * dom,struct irq_data * irqd,bool reserve)4552db1f959SThomas Gleixner static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd,
456702cb0a0SThomas Gleixner 			       bool reserve)
4572db1f959SThomas Gleixner {
4582db1f959SThomas Gleixner 	struct apic_chip_data *apicd = apic_chip_data(irqd);
4592db1f959SThomas Gleixner 	unsigned long flags;
4602db1f959SThomas Gleixner 	int ret = 0;
4612db1f959SThomas Gleixner 
4622db1f959SThomas Gleixner 	trace_vector_activate(irqd->irq, apicd->is_managed,
463702cb0a0SThomas Gleixner 			      apicd->can_reserve, reserve);
4642db1f959SThomas Gleixner 
4652db1f959SThomas Gleixner 	raw_spin_lock_irqsave(&vector_lock, flags);
466baedb87dSThomas Gleixner 	if (!apicd->can_reserve && !apicd->is_managed)
467baedb87dSThomas Gleixner 		assign_irq_vector_any_locked(irqd);
468baedb87dSThomas Gleixner 	else if (reserve || irqd_is_managed_and_shutdown(irqd))
4692db1f959SThomas Gleixner 		vector_assign_managed_shutdown(irqd);
4704900be83SThomas Gleixner 	else if (apicd->is_managed)
4712db1f959SThomas Gleixner 		ret = activate_managed(irqd);
4724900be83SThomas Gleixner 	else if (apicd->has_reserved)
4734900be83SThomas Gleixner 		ret = activate_reserved(irqd);
4742db1f959SThomas Gleixner 	raw_spin_unlock_irqrestore(&vector_lock, flags);
4752db1f959SThomas Gleixner 	return ret;
4762db1f959SThomas Gleixner }
4772db1f959SThomas Gleixner 
vector_free_reserved_and_managed(struct irq_data * irqd)4782db1f959SThomas Gleixner static void vector_free_reserved_and_managed(struct irq_data *irqd)
4792db1f959SThomas Gleixner {
4802db1f959SThomas Gleixner 	const struct cpumask *dest = irq_data_get_affinity_mask(irqd);
4812db1f959SThomas Gleixner 	struct apic_chip_data *apicd = apic_chip_data(irqd);
4822db1f959SThomas Gleixner 
4834900be83SThomas Gleixner 	trace_vector_teardown(irqd->irq, apicd->is_managed,
4844900be83SThomas Gleixner 			      apicd->has_reserved);
4852db1f959SThomas Gleixner 
4864900be83SThomas Gleixner 	if (apicd->has_reserved)
4874900be83SThomas Gleixner 		irq_matrix_remove_reserved(vector_matrix);
4882db1f959SThomas Gleixner 	if (apicd->is_managed)
4892db1f959SThomas Gleixner 		irq_matrix_remove_managed(vector_matrix, dest);
4902db1f959SThomas Gleixner }
4912db1f959SThomas Gleixner 
x86_vector_free_irqs(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)492b5dc8e6cSJiang Liu static void x86_vector_free_irqs(struct irq_domain *domain,
493b5dc8e6cSJiang Liu 				 unsigned int virq, unsigned int nr_irqs)
494b5dc8e6cSJiang Liu {
49586ba6551SThomas Gleixner 	struct apic_chip_data *apicd;
49686ba6551SThomas Gleixner 	struct irq_data *irqd;
497111abebaSJiang Liu 	unsigned long flags;
498b5dc8e6cSJiang Liu 	int i;
499b5dc8e6cSJiang Liu 
500b5dc8e6cSJiang Liu 	for (i = 0; i < nr_irqs; i++) {
50186ba6551SThomas Gleixner 		irqd = irq_domain_get_irq_data(x86_vector_domain, virq + i);
50286ba6551SThomas Gleixner 		if (irqd && irqd->chip_data) {
503111abebaSJiang Liu 			raw_spin_lock_irqsave(&vector_lock, flags);
50469cde000SThomas Gleixner 			clear_irq_vector(irqd);
5052db1f959SThomas Gleixner 			vector_free_reserved_and_managed(irqd);
50686ba6551SThomas Gleixner 			apicd = irqd->chip_data;
50786ba6551SThomas Gleixner 			irq_domain_reset_irq_data(irqd);
508111abebaSJiang Liu 			raw_spin_unlock_irqrestore(&vector_lock, flags);
50986ba6551SThomas Gleixner 			free_apic_chip_data(apicd);
510b5dc8e6cSJiang Liu 		}
511b5dc8e6cSJiang Liu 	}
512b5dc8e6cSJiang Liu }
513b5dc8e6cSJiang Liu 
vector_configure_legacy(unsigned int virq,struct irq_data * irqd,struct apic_chip_data * apicd)514464d1230SThomas Gleixner static bool vector_configure_legacy(unsigned int virq, struct irq_data *irqd,
515464d1230SThomas Gleixner 				    struct apic_chip_data *apicd)
516464d1230SThomas Gleixner {
517464d1230SThomas Gleixner 	unsigned long flags;
518464d1230SThomas Gleixner 	bool realloc = false;
519464d1230SThomas Gleixner 
520464d1230SThomas Gleixner 	apicd->vector = ISA_IRQ_VECTOR(virq);
521464d1230SThomas Gleixner 	apicd->cpu = 0;
522464d1230SThomas Gleixner 
523464d1230SThomas Gleixner 	raw_spin_lock_irqsave(&vector_lock, flags);
524464d1230SThomas Gleixner 	/*
525464d1230SThomas Gleixner 	 * If the interrupt is activated, then it must stay at this vector
526464d1230SThomas Gleixner 	 * position. That's usually the timer interrupt (0).
527464d1230SThomas Gleixner 	 */
528464d1230SThomas Gleixner 	if (irqd_is_activated(irqd)) {
529464d1230SThomas Gleixner 		trace_vector_setup(virq, true, 0);
530464d1230SThomas Gleixner 		apic_update_irq_cfg(irqd, apicd->vector, apicd->cpu);
531464d1230SThomas Gleixner 	} else {
532464d1230SThomas Gleixner 		/* Release the vector */
533464d1230SThomas Gleixner 		apicd->can_reserve = true;
534945f50a5SThomas Gleixner 		irqd_set_can_reserve(irqd);
535464d1230SThomas Gleixner 		clear_irq_vector(irqd);
536464d1230SThomas Gleixner 		realloc = true;
537464d1230SThomas Gleixner 	}
538464d1230SThomas Gleixner 	raw_spin_unlock_irqrestore(&vector_lock, flags);
539464d1230SThomas Gleixner 	return realloc;
540464d1230SThomas Gleixner }
541464d1230SThomas Gleixner 
x86_vector_alloc_irqs(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)542b5dc8e6cSJiang Liu static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
543b5dc8e6cSJiang Liu 				 unsigned int nr_irqs, void *arg)
544b5dc8e6cSJiang Liu {
545b5dc8e6cSJiang Liu 	struct irq_alloc_info *info = arg;
54686ba6551SThomas Gleixner 	struct apic_chip_data *apicd;
54786ba6551SThomas Gleixner 	struct irq_data *irqd;
5485f2dbbc5SJiang Liu 	int i, err, node;
549b5dc8e6cSJiang Liu 
55049062454SThomas Gleixner 	if (apic_is_disabled)
551b5dc8e6cSJiang Liu 		return -ENXIO;
552b5dc8e6cSJiang Liu 
5539a98bc2cSThomas Gleixner 	/*
5549a98bc2cSThomas Gleixner 	 * Catch any attempt to touch the cascade interrupt on a PIC
5559a98bc2cSThomas Gleixner 	 * equipped system.
5569a98bc2cSThomas Gleixner 	 */
5579a98bc2cSThomas Gleixner 	if (WARN_ON_ONCE(info->flags & X86_IRQ_ALLOC_LEGACY &&
5589a98bc2cSThomas Gleixner 			 virq == PIC_CASCADE_IR))
5599a98bc2cSThomas Gleixner 		return -EINVAL;
5609a98bc2cSThomas Gleixner 
561b5dc8e6cSJiang Liu 	for (i = 0; i < nr_irqs; i++) {
56286ba6551SThomas Gleixner 		irqd = irq_domain_get_irq_data(domain, virq + i);
56386ba6551SThomas Gleixner 		BUG_ON(!irqd);
56486ba6551SThomas Gleixner 		node = irq_data_get_node(irqd);
5654ef76eb6SThomas Gleixner 		WARN_ON_ONCE(irqd->chip_data);
56686ba6551SThomas Gleixner 		apicd = alloc_apic_chip_data(node);
56786ba6551SThomas Gleixner 		if (!apicd) {
568b5dc8e6cSJiang Liu 			err = -ENOMEM;
569b5dc8e6cSJiang Liu 			goto error;
570b5dc8e6cSJiang Liu 		}
571b5dc8e6cSJiang Liu 
57269cde000SThomas Gleixner 		apicd->irq = virq + i;
57386ba6551SThomas Gleixner 		irqd->chip = &lapic_controller;
57486ba6551SThomas Gleixner 		irqd->chip_data = apicd;
57586ba6551SThomas Gleixner 		irqd->hwirq = virq + i;
57686ba6551SThomas Gleixner 		irqd_set_single_target(irqd);
5774ef76eb6SThomas Gleixner 		/*
578008f1d60SThomas Gleixner 		 * Prevent that any of these interrupts is invoked in
579008f1d60SThomas Gleixner 		 * non interrupt context via e.g. generic_handle_irq()
580008f1d60SThomas Gleixner 		 * as that can corrupt the affinity move state.
581008f1d60SThomas Gleixner 		 */
582008f1d60SThomas Gleixner 		irqd_set_handle_enforce_irqctx(irqd);
583f0c7bacaSThomas Gleixner 
584f0c7bacaSThomas Gleixner 		/* Don't invoke affinity setter on deactivated interrupts */
585f0c7bacaSThomas Gleixner 		irqd_set_affinity_on_activate(irqd);
586f0c7bacaSThomas Gleixner 
587008f1d60SThomas Gleixner 		/*
58869cde000SThomas Gleixner 		 * Legacy vectors are already assigned when the IOAPIC
58969cde000SThomas Gleixner 		 * takes them over. They stay on the same vector. This is
59069cde000SThomas Gleixner 		 * required for check_timer() to work correctly as it might
59169cde000SThomas Gleixner 		 * switch back to legacy mode. Only update the hardware
59269cde000SThomas Gleixner 		 * config.
5934ef76eb6SThomas Gleixner 		 */
5944ef76eb6SThomas Gleixner 		if (info->flags & X86_IRQ_ALLOC_LEGACY) {
595464d1230SThomas Gleixner 			if (!vector_configure_legacy(virq + i, irqd, apicd))
59669cde000SThomas Gleixner 				continue;
5974ef76eb6SThomas Gleixner 		}
5984ef76eb6SThomas Gleixner 
5992db1f959SThomas Gleixner 		err = assign_irq_vector_policy(irqd, info);
60069cde000SThomas Gleixner 		trace_vector_setup(virq + i, false, err);
60145d55e7bSThomas Gleixner 		if (err) {
60245d55e7bSThomas Gleixner 			irqd->chip_data = NULL;
60345d55e7bSThomas Gleixner 			free_apic_chip_data(apicd);
604b5dc8e6cSJiang Liu 			goto error;
605b5dc8e6cSJiang Liu 		}
60645d55e7bSThomas Gleixner 	}
607b5dc8e6cSJiang Liu 
608b5dc8e6cSJiang Liu 	return 0;
609b5dc8e6cSJiang Liu 
610b5dc8e6cSJiang Liu error:
61145d55e7bSThomas Gleixner 	x86_vector_free_irqs(domain, virq, i);
612b5dc8e6cSJiang Liu 	return err;
613b5dc8e6cSJiang Liu }
614b5dc8e6cSJiang Liu 
61565d7ed57SThomas Gleixner #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
x86_vector_debug_show(struct seq_file * m,struct irq_domain * d,struct irq_data * irqd,int ind)616d553d03fSColin Ian King static void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d,
61765d7ed57SThomas Gleixner 				  struct irq_data *irqd, int ind)
61865d7ed57SThomas Gleixner {
619a07771acSThomas Gleixner 	struct apic_chip_data apicd;
62065d7ed57SThomas Gleixner 	unsigned long flags;
62165d7ed57SThomas Gleixner 	int irq;
62265d7ed57SThomas Gleixner 
62365d7ed57SThomas Gleixner 	if (!irqd) {
62465d7ed57SThomas Gleixner 		irq_matrix_debug_show(m, vector_matrix, ind);
62565d7ed57SThomas Gleixner 		return;
62665d7ed57SThomas Gleixner 	}
62765d7ed57SThomas Gleixner 
62865d7ed57SThomas Gleixner 	irq = irqd->irq;
62965d7ed57SThomas Gleixner 	if (irq < nr_legacy_irqs() && !test_bit(irq, &io_apic_irqs)) {
63065d7ed57SThomas Gleixner 		seq_printf(m, "%*sVector: %5d\n", ind, "", ISA_IRQ_VECTOR(irq));
63165d7ed57SThomas Gleixner 		seq_printf(m, "%*sTarget: Legacy PIC all CPUs\n", ind, "");
63265d7ed57SThomas Gleixner 		return;
63365d7ed57SThomas Gleixner 	}
63465d7ed57SThomas Gleixner 
635a07771acSThomas Gleixner 	if (!irqd->chip_data) {
63665d7ed57SThomas Gleixner 		seq_printf(m, "%*sVector: Not assigned\n", ind, "");
63765d7ed57SThomas Gleixner 		return;
63865d7ed57SThomas Gleixner 	}
63965d7ed57SThomas Gleixner 
64065d7ed57SThomas Gleixner 	raw_spin_lock_irqsave(&vector_lock, flags);
641a07771acSThomas Gleixner 	memcpy(&apicd, irqd->chip_data, sizeof(apicd));
64265d7ed57SThomas Gleixner 	raw_spin_unlock_irqrestore(&vector_lock, flags);
643a07771acSThomas Gleixner 
644a07771acSThomas Gleixner 	seq_printf(m, "%*sVector: %5u\n", ind, "", apicd.vector);
645a07771acSThomas Gleixner 	seq_printf(m, "%*sTarget: %5u\n", ind, "", apicd.cpu);
646a07771acSThomas Gleixner 	if (apicd.prev_vector) {
647a07771acSThomas Gleixner 		seq_printf(m, "%*sPrevious vector: %5u\n", ind, "", apicd.prev_vector);
648a07771acSThomas Gleixner 		seq_printf(m, "%*sPrevious target: %5u\n", ind, "", apicd.prev_cpu);
64965d7ed57SThomas Gleixner 	}
650a07771acSThomas Gleixner 	seq_printf(m, "%*smove_in_progress: %u\n", ind, "", apicd.move_in_progress ? 1 : 0);
651a07771acSThomas Gleixner 	seq_printf(m, "%*sis_managed:       %u\n", ind, "", apicd.is_managed ? 1 : 0);
652a07771acSThomas Gleixner 	seq_printf(m, "%*scan_reserve:      %u\n", ind, "", apicd.can_reserve ? 1 : 0);
653a07771acSThomas Gleixner 	seq_printf(m, "%*shas_reserved:     %u\n", ind, "", apicd.has_reserved ? 1 : 0);
654a07771acSThomas Gleixner 	seq_printf(m, "%*scleanup_pending:  %u\n", ind, "", !hlist_unhashed(&apicd.clist));
65565d7ed57SThomas Gleixner }
65665d7ed57SThomas Gleixner #endif
65765d7ed57SThomas Gleixner 
x86_fwspec_is_ioapic(struct irq_fwspec * fwspec)6586452ea2aSDavid Woodhouse int x86_fwspec_is_ioapic(struct irq_fwspec *fwspec)
6596452ea2aSDavid Woodhouse {
6606452ea2aSDavid Woodhouse 	if (fwspec->param_count != 1)
6616452ea2aSDavid Woodhouse 		return 0;
6626452ea2aSDavid Woodhouse 
6636452ea2aSDavid Woodhouse 	if (is_fwnode_irqchip(fwspec->fwnode)) {
6646452ea2aSDavid Woodhouse 		const char *fwname = fwnode_get_name(fwspec->fwnode);
6656452ea2aSDavid Woodhouse 		return fwname && !strncmp(fwname, "IO-APIC-", 8) &&
6666452ea2aSDavid Woodhouse 			simple_strtol(fwname+8, NULL, 10) == fwspec->param[0];
6676452ea2aSDavid Woodhouse 	}
6686452ea2aSDavid Woodhouse 	return to_of_node(fwspec->fwnode) &&
6696452ea2aSDavid Woodhouse 		of_device_is_compatible(to_of_node(fwspec->fwnode),
6706452ea2aSDavid Woodhouse 					"intel,ce4100-ioapic");
6716452ea2aSDavid Woodhouse }
6726452ea2aSDavid Woodhouse 
x86_fwspec_is_hpet(struct irq_fwspec * fwspec)6736452ea2aSDavid Woodhouse int x86_fwspec_is_hpet(struct irq_fwspec *fwspec)
6746452ea2aSDavid Woodhouse {
6756452ea2aSDavid Woodhouse 	if (fwspec->param_count != 1)
6766452ea2aSDavid Woodhouse 		return 0;
6776452ea2aSDavid Woodhouse 
6786452ea2aSDavid Woodhouse 	if (is_fwnode_irqchip(fwspec->fwnode)) {
6796452ea2aSDavid Woodhouse 		const char *fwname = fwnode_get_name(fwspec->fwnode);
6806452ea2aSDavid Woodhouse 		return fwname && !strncmp(fwname, "HPET-MSI-", 9) &&
6816452ea2aSDavid Woodhouse 			simple_strtol(fwname+9, NULL, 10) == fwspec->param[0];
6826452ea2aSDavid Woodhouse 	}
6836452ea2aSDavid Woodhouse 	return 0;
6846452ea2aSDavid Woodhouse }
6856452ea2aSDavid Woodhouse 
x86_vector_select(struct irq_domain * d,struct irq_fwspec * fwspec,enum irq_domain_bus_token bus_token)6866452ea2aSDavid Woodhouse static int x86_vector_select(struct irq_domain *d, struct irq_fwspec *fwspec,
6876452ea2aSDavid Woodhouse 			     enum irq_domain_bus_token bus_token)
6886452ea2aSDavid Woodhouse {
6896452ea2aSDavid Woodhouse 	/*
6906452ea2aSDavid Woodhouse 	 * HPET and I/OAPIC cannot be parented in the vector domain
6916452ea2aSDavid Woodhouse 	 * if IRQ remapping is enabled. APIC IDs above 15 bits are
6926452ea2aSDavid Woodhouse 	 * only permitted if IRQ remapping is enabled, so check that.
6936452ea2aSDavid Woodhouse 	 */
6949132d720SThomas Gleixner 	if (apic_id_valid(32768))
6956452ea2aSDavid Woodhouse 		return 0;
6966452ea2aSDavid Woodhouse 
6976452ea2aSDavid Woodhouse 	return x86_fwspec_is_ioapic(fwspec) || x86_fwspec_is_hpet(fwspec);
6986452ea2aSDavid Woodhouse }
6996452ea2aSDavid Woodhouse 
700eb18cf55SThomas Gleixner static const struct irq_domain_ops x86_vector_domain_ops = {
7016452ea2aSDavid Woodhouse 	.select		= x86_vector_select,
702b5dc8e6cSJiang Liu 	.alloc		= x86_vector_alloc_irqs,
703b5dc8e6cSJiang Liu 	.free		= x86_vector_free_irqs,
7042db1f959SThomas Gleixner 	.activate	= x86_vector_activate,
7052db1f959SThomas Gleixner 	.deactivate	= x86_vector_deactivate,
70665d7ed57SThomas Gleixner #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
70765d7ed57SThomas Gleixner 	.debug_show	= x86_vector_debug_show,
70865d7ed57SThomas Gleixner #endif
709b5dc8e6cSJiang Liu };
710b5dc8e6cSJiang Liu 
arch_probe_nr_irqs(void)71111d686e9SJiang Liu int __init arch_probe_nr_irqs(void)
71211d686e9SJiang Liu {
71311d686e9SJiang Liu 	int nr;
71411d686e9SJiang Liu 
71511d686e9SJiang Liu 	if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
71611d686e9SJiang Liu 		nr_irqs = NR_VECTORS * nr_cpu_ids;
71711d686e9SJiang Liu 
71811d686e9SJiang Liu 	nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids;
719fd2fa6c1SBjorn Helgaas #if defined(CONFIG_PCI_MSI)
72011d686e9SJiang Liu 	/*
72111d686e9SJiang Liu 	 * for MSI and HT dyn irq
72211d686e9SJiang Liu 	 */
72311d686e9SJiang Liu 	if (gsi_top <= NR_IRQS_LEGACY)
72411d686e9SJiang Liu 		nr +=  8 * nr_cpu_ids;
72511d686e9SJiang Liu 	else
72611d686e9SJiang Liu 		nr += gsi_top * 16;
72711d686e9SJiang Liu #endif
72811d686e9SJiang Liu 	if (nr < nr_irqs)
72911d686e9SJiang Liu 		nr_irqs = nr;
73011d686e9SJiang Liu 
7318c058b0bSVitaly Kuznetsov 	/*
7328c058b0bSVitaly Kuznetsov 	 * We don't know if PIC is present at this point so we need to do
7338c058b0bSVitaly Kuznetsov 	 * probe() to get the right number of legacy IRQs.
7348c058b0bSVitaly Kuznetsov 	 */
7358c058b0bSVitaly Kuznetsov 	return legacy_pic->probe();
73611d686e9SJiang Liu }
73711d686e9SJiang Liu 
lapic_assign_legacy_vector(unsigned int irq,bool replace)7380fa115daSThomas Gleixner void lapic_assign_legacy_vector(unsigned int irq, bool replace)
7390fa115daSThomas Gleixner {
7400fa115daSThomas Gleixner 	/*
7410fa115daSThomas Gleixner 	 * Use assign system here so it wont get accounted as allocated
7420fa115daSThomas Gleixner 	 * and moveable in the cpu hotplug check and it prevents managed
7430fa115daSThomas Gleixner 	 * irq reservation from touching it.
7440fa115daSThomas Gleixner 	 */
7450fa115daSThomas Gleixner 	irq_matrix_assign_system(vector_matrix, ISA_IRQ_VECTOR(irq), replace);
7460fa115daSThomas Gleixner }
7470fa115daSThomas Gleixner 
lapic_update_legacy_vectors(void)7487d65f9e8SThomas Gleixner void __init lapic_update_legacy_vectors(void)
7497d65f9e8SThomas Gleixner {
7507d65f9e8SThomas Gleixner 	unsigned int i;
7517d65f9e8SThomas Gleixner 
7527d65f9e8SThomas Gleixner 	if (IS_ENABLED(CONFIG_X86_IO_APIC) && nr_ioapics > 0)
7537d65f9e8SThomas Gleixner 		return;
7547d65f9e8SThomas Gleixner 
7557d65f9e8SThomas Gleixner 	/*
7567d65f9e8SThomas Gleixner 	 * If the IO/APIC is disabled via config, kernel command line or
7577d65f9e8SThomas Gleixner 	 * lack of enumeration then all legacy interrupts are routed
7587d65f9e8SThomas Gleixner 	 * through the PIC. Make sure that they are marked as legacy
7597d65f9e8SThomas Gleixner 	 * vectors. PIC_CASCADE_IRQ has already been marked in
7607d65f9e8SThomas Gleixner 	 * lapic_assign_system_vectors().
7617d65f9e8SThomas Gleixner 	 */
7627d65f9e8SThomas Gleixner 	for (i = 0; i < nr_legacy_irqs(); i++) {
7637d65f9e8SThomas Gleixner 		if (i != PIC_CASCADE_IR)
7647d65f9e8SThomas Gleixner 			lapic_assign_legacy_vector(i, true);
7657d65f9e8SThomas Gleixner 	}
7667d65f9e8SThomas Gleixner }
7677d65f9e8SThomas Gleixner 
lapic_assign_system_vectors(void)7680fa115daSThomas Gleixner void __init lapic_assign_system_vectors(void)
7690fa115daSThomas Gleixner {
770749443deSYury Norov 	unsigned int i, vector;
7710fa115daSThomas Gleixner 
772749443deSYury Norov 	for_each_set_bit(vector, system_vectors, NR_VECTORS)
7730fa115daSThomas Gleixner 		irq_matrix_assign_system(vector_matrix, vector, false);
7740fa115daSThomas Gleixner 
7750fa115daSThomas Gleixner 	if (nr_legacy_irqs() > 1)
7760fa115daSThomas Gleixner 		lapic_assign_legacy_vector(PIC_CASCADE_IR, false);
7770fa115daSThomas Gleixner 
7780fa115daSThomas Gleixner 	/* System vectors are reserved, online it */
7790fa115daSThomas Gleixner 	irq_matrix_online(vector_matrix);
7800fa115daSThomas Gleixner 
7810fa115daSThomas Gleixner 	/* Mark the preallocated legacy interrupts */
7820fa115daSThomas Gleixner 	for (i = 0; i < nr_legacy_irqs(); i++) {
7839a98bc2cSThomas Gleixner 		/*
7849a98bc2cSThomas Gleixner 		 * Don't touch the cascade interrupt. It's unusable
7859a98bc2cSThomas Gleixner 		 * on PIC equipped machines. See the large comment
7869a98bc2cSThomas Gleixner 		 * in the IO/APIC code.
7879a98bc2cSThomas Gleixner 		 */
7880fa115daSThomas Gleixner 		if (i != PIC_CASCADE_IR)
7890fa115daSThomas Gleixner 			irq_matrix_assign(vector_matrix, ISA_IRQ_VECTOR(i));
7900fa115daSThomas Gleixner 	}
7910fa115daSThomas Gleixner }
7920fa115daSThomas Gleixner 
arch_early_irq_init(void)79311d686e9SJiang Liu int __init arch_early_irq_init(void)
79411d686e9SJiang Liu {
7959d35f859SThomas Gleixner 	struct fwnode_handle *fn;
7969d35f859SThomas Gleixner 
7979d35f859SThomas Gleixner 	fn = irq_domain_alloc_named_fwnode("VECTOR");
7989d35f859SThomas Gleixner 	BUG_ON(!fn);
7999d35f859SThomas Gleixner 	x86_vector_domain = irq_domain_create_tree(fn, &x86_vector_domain_ops,
800b5dc8e6cSJiang Liu 						   NULL);
801b5dc8e6cSJiang Liu 	BUG_ON(x86_vector_domain == NULL);
802b5dc8e6cSJiang Liu 	irq_set_default_host(x86_vector_domain);
803b5dc8e6cSJiang Liu 
8043716fd27SThomas Gleixner 	BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
805f7fa7aeeSJiang Liu 
8060fa115daSThomas Gleixner 	/*
8070fa115daSThomas Gleixner 	 * Allocate the vector matrix allocator data structure and limit the
8080fa115daSThomas Gleixner 	 * search area.
8090fa115daSThomas Gleixner 	 */
8100fa115daSThomas Gleixner 	vector_matrix = irq_alloc_matrix(NR_VECTORS, FIRST_EXTERNAL_VECTOR,
8110fa115daSThomas Gleixner 					 FIRST_SYSTEM_VECTOR);
8120fa115daSThomas Gleixner 	BUG_ON(!vector_matrix);
8130fa115daSThomas Gleixner 
81411d686e9SJiang Liu 	return arch_early_ioapic_init();
81511d686e9SJiang Liu }
81611d686e9SJiang Liu 
817ba801640SThomas Gleixner #ifdef CONFIG_SMP
81874afab7aSJiang Liu 
__setup_vector_irq(int vector)819f0cc6ccaSThomas Gleixner static struct irq_desc *__setup_vector_irq(int vector)
820f0cc6ccaSThomas Gleixner {
821f0cc6ccaSThomas Gleixner 	int isairq = vector - ISA_IRQ_VECTOR(0);
822f0cc6ccaSThomas Gleixner 
823f0cc6ccaSThomas Gleixner 	/* Check whether the irq is in the legacy space */
824f0cc6ccaSThomas Gleixner 	if (isairq < 0 || isairq >= nr_legacy_irqs())
825f0cc6ccaSThomas Gleixner 		return VECTOR_UNUSED;
826f0cc6ccaSThomas Gleixner 	/* Check whether the irq is handled by the IOAPIC */
827f0cc6ccaSThomas Gleixner 	if (test_bit(isairq, &io_apic_irqs))
828f0cc6ccaSThomas Gleixner 		return VECTOR_UNUSED;
829f0cc6ccaSThomas Gleixner 	return irq_to_desc(isairq);
83074afab7aSJiang Liu }
83174afab7aSJiang Liu 
8320fa115daSThomas Gleixner /* Online the local APIC infrastructure and initialize the vectors */
lapic_online(void)8330fa115daSThomas Gleixner void lapic_online(void)
83474afab7aSJiang Liu {
835f0cc6ccaSThomas Gleixner 	unsigned int vector;
83674afab7aSJiang Liu 
8375a3f75e3SThomas Gleixner 	lockdep_assert_held(&vector_lock);
8380fa115daSThomas Gleixner 
8390fa115daSThomas Gleixner 	/* Online the vector matrix array for this CPU */
8400fa115daSThomas Gleixner 	irq_matrix_online(vector_matrix);
8410fa115daSThomas Gleixner 
84274afab7aSJiang Liu 	/*
843f0cc6ccaSThomas Gleixner 	 * The interrupt affinity logic never targets interrupts to offline
844f0cc6ccaSThomas Gleixner 	 * CPUs. The exception are the legacy PIC interrupts. In general
845f0cc6ccaSThomas Gleixner 	 * they are only targeted to CPU0, but depending on the platform
846f0cc6ccaSThomas Gleixner 	 * they can be distributed to any online CPU in hardware. The
847f0cc6ccaSThomas Gleixner 	 * kernel has no influence on that. So all active legacy vectors
848f0cc6ccaSThomas Gleixner 	 * must be installed on all CPUs. All non legacy interrupts can be
849f0cc6ccaSThomas Gleixner 	 * cleared.
85074afab7aSJiang Liu 	 */
851f0cc6ccaSThomas Gleixner 	for (vector = 0; vector < NR_VECTORS; vector++)
852f0cc6ccaSThomas Gleixner 		this_cpu_write(vector_irq[vector], __setup_vector_irq(vector));
85374afab7aSJiang Liu }
85474afab7aSJiang Liu 
855bdc1dad2SThomas Gleixner static void __vector_cleanup(struct vector_cleanup *cl, bool check_irr);
856bdc1dad2SThomas Gleixner 
lapic_offline(void)8570fa115daSThomas Gleixner void lapic_offline(void)
8580fa115daSThomas Gleixner {
859bdc1dad2SThomas Gleixner 	struct vector_cleanup *cl = this_cpu_ptr(&vector_cleanup);
860bdc1dad2SThomas Gleixner 
8610fa115daSThomas Gleixner 	lock_vector_lock();
862bdc1dad2SThomas Gleixner 
863bdc1dad2SThomas Gleixner 	/* In case the vector cleanup timer has not expired */
864bdc1dad2SThomas Gleixner 	__vector_cleanup(cl, false);
865bdc1dad2SThomas Gleixner 
8660fa115daSThomas Gleixner 	irq_matrix_offline(vector_matrix);
867bdc1dad2SThomas Gleixner 	WARN_ON_ONCE(try_to_del_timer_sync(&cl->timer) < 0);
868bdc1dad2SThomas Gleixner 	WARN_ON_ONCE(!hlist_empty(&cl->head));
869bdc1dad2SThomas Gleixner 
8700fa115daSThomas Gleixner 	unlock_vector_lock();
8710fa115daSThomas Gleixner }
8720fa115daSThomas Gleixner 
apic_set_affinity(struct irq_data * irqd,const struct cpumask * dest,bool force)873ba801640SThomas Gleixner static int apic_set_affinity(struct irq_data *irqd,
874ba801640SThomas Gleixner 			     const struct cpumask *dest, bool force)
875ba801640SThomas Gleixner {
876ba801640SThomas Gleixner 	int err;
877ba801640SThomas Gleixner 
878baedb87dSThomas Gleixner 	if (WARN_ON_ONCE(!irqd_is_activated(irqd)))
879baedb87dSThomas Gleixner 		return -EIO;
88002edee15SThomas Gleixner 
8812db1f959SThomas Gleixner 	raw_spin_lock(&vector_lock);
8822db1f959SThomas Gleixner 	cpumask_and(vector_searchmask, dest, cpu_online_mask);
8832db1f959SThomas Gleixner 	if (irqd_affinity_is_managed(irqd))
8842db1f959SThomas Gleixner 		err = assign_managed_vector(irqd, vector_searchmask);
8852db1f959SThomas Gleixner 	else
8862db1f959SThomas Gleixner 		err = assign_vector_locked(irqd, vector_searchmask);
8872db1f959SThomas Gleixner 	raw_spin_unlock(&vector_lock);
888ba801640SThomas Gleixner 	return err ? err : IRQ_SET_MASK_OK;
889ba801640SThomas Gleixner }
890ba801640SThomas Gleixner 
891ba801640SThomas Gleixner #else
892ba801640SThomas Gleixner # define apic_set_affinity	NULL
893ba801640SThomas Gleixner #endif
894ba801640SThomas Gleixner 
apic_retrigger_irq(struct irq_data * irqd)89586ba6551SThomas Gleixner static int apic_retrigger_irq(struct irq_data *irqd)
89674afab7aSJiang Liu {
89786ba6551SThomas Gleixner 	struct apic_chip_data *apicd = apic_chip_data(irqd);
89874afab7aSJiang Liu 	unsigned long flags;
89974afab7aSJiang Liu 
90074afab7aSJiang Liu 	raw_spin_lock_irqsave(&vector_lock, flags);
90128b82352SDave Hansen 	__apic_send_IPI(apicd->cpu, apicd->vector);
90274afab7aSJiang Liu 	raw_spin_unlock_irqrestore(&vector_lock, flags);
90374afab7aSJiang Liu 
90474afab7aSJiang Liu 	return 1;
90574afab7aSJiang Liu }
90674afab7aSJiang Liu 
apic_ack_irq(struct irq_data * irqd)907c0255770SThomas Gleixner void apic_ack_irq(struct irq_data *irqd)
908c0255770SThomas Gleixner {
909c0255770SThomas Gleixner 	irq_move_irq(irqd);
910670c04adSDave Hansen 	apic_eoi();
911c0255770SThomas Gleixner }
912c0255770SThomas Gleixner 
apic_ack_edge(struct irq_data * irqd)91386ba6551SThomas Gleixner void apic_ack_edge(struct irq_data *irqd)
91474afab7aSJiang Liu {
91586ba6551SThomas Gleixner 	irq_complete_move(irqd_cfg(irqd));
916c0255770SThomas Gleixner 	apic_ack_irq(irqd);
91774afab7aSJiang Liu }
91874afab7aSJiang Liu 
x86_vector_msi_compose_msg(struct irq_data * data,struct msi_msg * msg)919f598181aSDavid Woodhouse static void x86_vector_msi_compose_msg(struct irq_data *data,
920f598181aSDavid Woodhouse 				       struct msi_msg *msg)
921f598181aSDavid Woodhouse {
922f598181aSDavid Woodhouse        __irq_msi_compose_msg(irqd_cfg(data), msg, false);
923f598181aSDavid Woodhouse }
924f598181aSDavid Woodhouse 
925b5dc8e6cSJiang Liu static struct irq_chip lapic_controller = {
9268947dfb2SThomas Gleixner 	.name			= "APIC",
927b5dc8e6cSJiang Liu 	.irq_ack		= apic_ack_edge,
92868f9f440SJiang Liu 	.irq_set_affinity	= apic_set_affinity,
929b0a19555SThomas Gleixner 	.irq_compose_msi_msg	= x86_vector_msi_compose_msg,
930b5dc8e6cSJiang Liu 	.irq_retrigger		= apic_retrigger_irq,
931b5dc8e6cSJiang Liu };
932b5dc8e6cSJiang Liu 
93374afab7aSJiang Liu #ifdef CONFIG_SMP
934dccfe314SThomas Gleixner 
free_moved_vector(struct apic_chip_data * apicd)93569cde000SThomas Gleixner static void free_moved_vector(struct apic_chip_data *apicd)
93669cde000SThomas Gleixner {
937ba224feaSThomas Gleixner 	unsigned int vector = apicd->prev_vector;
93869cde000SThomas Gleixner 	unsigned int cpu = apicd->prev_cpu;
9392db1f959SThomas Gleixner 	bool managed = apicd->is_managed;
94069cde000SThomas Gleixner 
9412db1f959SThomas Gleixner 	/*
942469ff207SPeter Xu 	 * Managed interrupts are usually not migrated away
943469ff207SPeter Xu 	 * from an online CPU, but CPU isolation 'managed_irq'
944469ff207SPeter Xu 	 * can make that happen.
945469ff207SPeter Xu 	 * 1) Activation does not take the isolation into account
946469ff207SPeter Xu 	 *    to keep the code simple
947469ff207SPeter Xu 	 * 2) Migration away from an isolated CPU can happen when
948469ff207SPeter Xu 	 *    a non-isolated CPU which is in the calculated
949469ff207SPeter Xu 	 *    affinity mask comes online.
9502db1f959SThomas Gleixner 	 */
9510696d059SThomas Gleixner 	trace_vector_free_moved(apicd->irq, cpu, vector, managed);
9522db1f959SThomas Gleixner 	irq_matrix_free(vector_matrix, cpu, vector, managed);
9530696d059SThomas Gleixner 	per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
95469cde000SThomas Gleixner 	hlist_del_init(&apicd->clist);
955ba224feaSThomas Gleixner 	apicd->prev_vector = 0;
95669cde000SThomas Gleixner 	apicd->move_in_progress = 0;
95769cde000SThomas Gleixner }
95869cde000SThomas Gleixner 
__vector_cleanup(struct vector_cleanup * cl,bool check_irr)959bdc1dad2SThomas Gleixner static void __vector_cleanup(struct vector_cleanup *cl, bool check_irr)
960dccfe314SThomas Gleixner {
961dccfe314SThomas Gleixner 	struct apic_chip_data *apicd;
962dccfe314SThomas Gleixner 	struct hlist_node *tmp;
963bdc1dad2SThomas Gleixner 	bool rearm = false;
964dccfe314SThomas Gleixner 
965bdc1dad2SThomas Gleixner 	lockdep_assert_held(&vector_lock);
966dccfe314SThomas Gleixner 
967bdc1dad2SThomas Gleixner 	hlist_for_each_entry_safe(apicd, tmp, &cl->head, clist) {
968ba224feaSThomas Gleixner 		unsigned int irr, vector = apicd->prev_vector;
969dccfe314SThomas Gleixner 
970dccfe314SThomas Gleixner 		/*
971dccfe314SThomas Gleixner 		 * Paranoia: Check if the vector that needs to be cleaned
972bdc1dad2SThomas Gleixner 		 * up is registered at the APICs IRR. That's clearly a
973bdc1dad2SThomas Gleixner 		 * hardware issue if the vector arrived on the old target
974bdc1dad2SThomas Gleixner 		 * _after_ interrupts were disabled above. Keep @apicd
975bdc1dad2SThomas Gleixner 		 * on the list and schedule the timer again to give the CPU
976bdc1dad2SThomas Gleixner 		 * a chance to handle the pending interrupt.
977bdc1dad2SThomas Gleixner 		 *
978bdc1dad2SThomas Gleixner 		 * Do not check IRR when called from lapic_offline(), because
979bdc1dad2SThomas Gleixner 		 * fixup_irqs() was just called to scan IRR for set bits and
980bdc1dad2SThomas Gleixner 		 * forward them to new destination CPUs via IPIs.
981dccfe314SThomas Gleixner 		 */
982bdc1dad2SThomas Gleixner 		irr = check_irr ? apic_read(APIC_IRR + (vector / 32 * 0x10)) : 0;
983dccfe314SThomas Gleixner 		if (irr & (1U << (vector % 32))) {
984bdc1dad2SThomas Gleixner 			pr_warn_once("Moved interrupt pending in old target APIC %u\n", apicd->irq);
985bdc1dad2SThomas Gleixner 			rearm = true;
986dccfe314SThomas Gleixner 			continue;
987dccfe314SThomas Gleixner 		}
98869cde000SThomas Gleixner 		free_moved_vector(apicd);
989dccfe314SThomas Gleixner 	}
990dccfe314SThomas Gleixner 
991bdc1dad2SThomas Gleixner 	/*
992bdc1dad2SThomas Gleixner 	 * Must happen under vector_lock to make the timer_pending() check
993bdc1dad2SThomas Gleixner 	 * in __vector_schedule_cleanup() race free against the rearm here.
994bdc1dad2SThomas Gleixner 	 */
995bdc1dad2SThomas Gleixner 	if (rearm)
996bdc1dad2SThomas Gleixner 		mod_timer(&cl->timer, jiffies + 1);
997bdc1dad2SThomas Gleixner }
998bdc1dad2SThomas Gleixner 
vector_cleanup_callback(struct timer_list * tmr)999bdc1dad2SThomas Gleixner static void vector_cleanup_callback(struct timer_list *tmr)
1000bdc1dad2SThomas Gleixner {
1001bdc1dad2SThomas Gleixner 	struct vector_cleanup *cl = container_of(tmr, typeof(*cl), timer);
1002bdc1dad2SThomas Gleixner 
1003bdc1dad2SThomas Gleixner 	/* Prevent vectors vanishing under us */
1004bdc1dad2SThomas Gleixner 	raw_spin_lock_irq(&vector_lock);
1005bdc1dad2SThomas Gleixner 	__vector_cleanup(cl, true);
1006bdc1dad2SThomas Gleixner 	raw_spin_unlock_irq(&vector_lock);
1007dccfe314SThomas Gleixner }
1008dccfe314SThomas Gleixner 
__vector_schedule_cleanup(struct apic_chip_data * apicd)1009a539cc86SThomas Gleixner static void __vector_schedule_cleanup(struct apic_chip_data *apicd)
101074afab7aSJiang Liu {
1011bdc1dad2SThomas Gleixner 	unsigned int cpu = apicd->prev_cpu;
1012dccfe314SThomas Gleixner 
1013c1684f50SThomas Gleixner 	raw_spin_lock(&vector_lock);
101486ba6551SThomas Gleixner 	apicd->move_in_progress = 0;
1015dccfe314SThomas Gleixner 	if (cpu_online(cpu)) {
1016bdc1dad2SThomas Gleixner 		struct vector_cleanup *cl = per_cpu_ptr(&vector_cleanup, cpu);
1017bdc1dad2SThomas Gleixner 
1018bdc1dad2SThomas Gleixner 		hlist_add_head(&apicd->clist, &cl->head);
1019bdc1dad2SThomas Gleixner 
1020bdc1dad2SThomas Gleixner 		/*
1021bdc1dad2SThomas Gleixner 		 * The lockless timer_pending() check is safe here. If it
1022bdc1dad2SThomas Gleixner 		 * returns true, then the callback will observe this new
1023bdc1dad2SThomas Gleixner 		 * apic data in the hlist as everything is serialized by
1024bdc1dad2SThomas Gleixner 		 * vector lock.
1025bdc1dad2SThomas Gleixner 		 *
1026bdc1dad2SThomas Gleixner 		 * If it returns false then the timer is either not armed
1027bdc1dad2SThomas Gleixner 		 * or the other CPU executes the callback, which again
1028bdc1dad2SThomas Gleixner 		 * would be blocked on vector lock. Rearming it in the
1029bdc1dad2SThomas Gleixner 		 * latter case makes it fire for nothing.
1030bdc1dad2SThomas Gleixner 		 *
1031bdc1dad2SThomas Gleixner 		 * This is also safe against the callback rearming the timer
1032bdc1dad2SThomas Gleixner 		 * because that's serialized via vector lock too.
1033bdc1dad2SThomas Gleixner 		 */
1034bdc1dad2SThomas Gleixner 		if (!timer_pending(&cl->timer)) {
1035bdc1dad2SThomas Gleixner 			cl->timer.expires = jiffies + 1;
1036bdc1dad2SThomas Gleixner 			add_timer_on(&cl->timer, cpu);
1037bdc1dad2SThomas Gleixner 		}
1038dccfe314SThomas Gleixner 	} else {
1039*59f86a29SDongli Zhang 		pr_warn("IRQ %u schedule cleanup for offline CPU %u\n", apicd->irq, cpu);
1040*59f86a29SDongli Zhang 		free_moved_vector(apicd);
1041dccfe314SThomas Gleixner 	}
1042c1684f50SThomas Gleixner 	raw_spin_unlock(&vector_lock);
104374afab7aSJiang Liu }
104474afab7aSJiang Liu 
vector_schedule_cleanup(struct irq_cfg * cfg)1045a539cc86SThomas Gleixner void vector_schedule_cleanup(struct irq_cfg *cfg)
1046c6c2002bSJiang Liu {
104786ba6551SThomas Gleixner 	struct apic_chip_data *apicd;
10487f3262edSJiang Liu 
1049ba224feaSThomas Gleixner 	apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg);
105086ba6551SThomas Gleixner 	if (apicd->move_in_progress)
1051a539cc86SThomas Gleixner 		__vector_schedule_cleanup(apicd);
1052c6c2002bSJiang Liu }
1053c6c2002bSJiang Liu 
irq_complete_move(struct irq_cfg * cfg)1054e027ffffSThomas Gleixner void irq_complete_move(struct irq_cfg *cfg)
105574afab7aSJiang Liu {
105686ba6551SThomas Gleixner 	struct apic_chip_data *apicd;
105774afab7aSJiang Liu 
1058ba224feaSThomas Gleixner 	apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg);
105986ba6551SThomas Gleixner 	if (likely(!apicd->move_in_progress))
106074afab7aSJiang Liu 		return;
106174afab7aSJiang Liu 
1062e027ffffSThomas Gleixner 	/*
1063e027ffffSThomas Gleixner 	 * If the interrupt arrived on the new target CPU, cleanup the
1064e027ffffSThomas Gleixner 	 * vector on the old target CPU. A vector check is not required
1065e027ffffSThomas Gleixner 	 * because an interrupt can never move from one vector to another
1066e027ffffSThomas Gleixner 	 * on the same CPU.
1067e027ffffSThomas Gleixner 	 */
1068e027ffffSThomas Gleixner 	if (apicd->cpu == smp_processor_id())
1069a539cc86SThomas Gleixner 		__vector_schedule_cleanup(apicd);
107074afab7aSJiang Liu }
107174afab7aSJiang Liu 
107290a2282eSThomas Gleixner /*
1073551adc60SThomas Gleixner  * Called from fixup_irqs() with @desc->lock held and interrupts disabled.
107490a2282eSThomas Gleixner  */
irq_force_complete_move(struct irq_desc * desc)107590a2282eSThomas Gleixner void irq_force_complete_move(struct irq_desc *desc)
107674afab7aSJiang Liu {
1077*59f86a29SDongli Zhang 	unsigned int cpu = smp_processor_id();
107886ba6551SThomas Gleixner 	struct apic_chip_data *apicd;
1079dccfe314SThomas Gleixner 	struct irq_data *irqd;
1080dccfe314SThomas Gleixner 	unsigned int vector;
108174afab7aSJiang Liu 
1082db91aa79SMika Westerberg 	/*
1083db91aa79SMika Westerberg 	 * The function is called for all descriptors regardless of which
1084db91aa79SMika Westerberg 	 * irqdomain they belong to. For example if an IRQ is provided by
1085db91aa79SMika Westerberg 	 * an irq_chip as part of a GPIO driver, the chip data for that
1086db91aa79SMika Westerberg 	 * descriptor is specific to the irq_chip in question.
1087db91aa79SMika Westerberg 	 *
1088db91aa79SMika Westerberg 	 * Check first that the chip_data is what we expect
1089db91aa79SMika Westerberg 	 * (apic_chip_data) before touching it any further.
1090db91aa79SMika Westerberg 	 */
109186ba6551SThomas Gleixner 	irqd = irq_domain_get_irq_data(x86_vector_domain,
1092db91aa79SMika Westerberg 				       irq_desc_get_irq(desc));
109386ba6551SThomas Gleixner 	if (!irqd)
1094db91aa79SMika Westerberg 		return;
1095db91aa79SMika Westerberg 
1096dccfe314SThomas Gleixner 	raw_spin_lock(&vector_lock);
109786ba6551SThomas Gleixner 	apicd = apic_chip_data(irqd);
1098dccfe314SThomas Gleixner 	if (!apicd)
1099dccfe314SThomas Gleixner 		goto unlock;
110056d7d2f4SThomas Gleixner 
110156d7d2f4SThomas Gleixner 	/*
1102*59f86a29SDongli Zhang 	 * If prev_vector is empty or the descriptor is neither currently
1103*59f86a29SDongli Zhang 	 * nor previously on the outgoing CPU no action required.
1104dccfe314SThomas Gleixner 	 */
1105ba224feaSThomas Gleixner 	vector = apicd->prev_vector;
1106*59f86a29SDongli Zhang 	if (!vector || (apicd->cpu != cpu && apicd->prev_cpu != cpu))
1107dccfe314SThomas Gleixner 		goto unlock;
1108dccfe314SThomas Gleixner 
1109dccfe314SThomas Gleixner 	/*
1110dccfe314SThomas Gleixner 	 * This is tricky. If the cleanup of the old vector has not been
111198229aa3SThomas Gleixner 	 * done yet, then the following setaffinity call will fail with
111298229aa3SThomas Gleixner 	 * -EBUSY. This can leave the interrupt in a stale state.
111398229aa3SThomas Gleixner 	 *
1114551adc60SThomas Gleixner 	 * All CPUs are stuck in stop machine with interrupts disabled so
1115551adc60SThomas Gleixner 	 * calling __irq_complete_move() would be completely pointless.
1116dccfe314SThomas Gleixner 	 *
1117551adc60SThomas Gleixner 	 * 1) The interrupt is in move_in_progress state. That means that we
1118551adc60SThomas Gleixner 	 *    have not seen an interrupt since the io_apic was reprogrammed to
1119551adc60SThomas Gleixner 	 *    the new vector.
1120551adc60SThomas Gleixner 	 *
1121551adc60SThomas Gleixner 	 * 2) The interrupt has fired on the new vector, but the cleanup IPIs
1122551adc60SThomas Gleixner 	 *    have not been processed yet.
1123551adc60SThomas Gleixner 	 */
112486ba6551SThomas Gleixner 	if (apicd->move_in_progress) {
1125551adc60SThomas Gleixner 		/*
1126551adc60SThomas Gleixner 		 * In theory there is a race:
1127551adc60SThomas Gleixner 		 *
1128551adc60SThomas Gleixner 		 * set_ioapic(new_vector) <-- Interrupt is raised before update
1129551adc60SThomas Gleixner 		 *			      is effective, i.e. it's raised on
1130551adc60SThomas Gleixner 		 *			      the old vector.
1131551adc60SThomas Gleixner 		 *
1132551adc60SThomas Gleixner 		 * So if the target cpu cannot handle that interrupt before
1133551adc60SThomas Gleixner 		 * the old vector is cleaned up, we get a spurious interrupt
1134551adc60SThomas Gleixner 		 * and in the worst case the ioapic irq line becomes stale.
1135551adc60SThomas Gleixner 		 *
1136551adc60SThomas Gleixner 		 * But in case of cpu hotplug this should be a non issue
1137551adc60SThomas Gleixner 		 * because if the affinity update happens right before all
1138d9f6e12fSIngo Molnar 		 * cpus rendezvous in stop machine, there is no way that the
1139551adc60SThomas Gleixner 		 * interrupt can be blocked on the target cpu because all cpus
1140551adc60SThomas Gleixner 		 * loops first with interrupts enabled in stop machine, so the
1141551adc60SThomas Gleixner 		 * old vector is not yet cleaned up when the interrupt fires.
1142551adc60SThomas Gleixner 		 *
1143551adc60SThomas Gleixner 		 * So the only way to run into this issue is if the delivery
1144551adc60SThomas Gleixner 		 * of the interrupt on the apic/system bus would be delayed
1145551adc60SThomas Gleixner 		 * beyond the point where the target cpu disables interrupts
1146551adc60SThomas Gleixner 		 * in stop machine. I doubt that it can happen, but at least
1147d9f6e12fSIngo Molnar 		 * there is a theoretical chance. Virtualization might be
1148551adc60SThomas Gleixner 		 * able to expose this, but AFAICT the IOAPIC emulation is not
1149551adc60SThomas Gleixner 		 * as stupid as the real hardware.
1150551adc60SThomas Gleixner 		 *
1151551adc60SThomas Gleixner 		 * Anyway, there is nothing we can do about that at this point
1152551adc60SThomas Gleixner 		 * w/o refactoring the whole fixup_irq() business completely.
1153551adc60SThomas Gleixner 		 * We print at least the irq number and the old vector number,
1154551adc60SThomas Gleixner 		 * so we have the necessary information when a problem in that
1155551adc60SThomas Gleixner 		 * area arises.
1156551adc60SThomas Gleixner 		 */
1157551adc60SThomas Gleixner 		pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n",
1158dccfe314SThomas Gleixner 			irqd->irq, vector);
1159551adc60SThomas Gleixner 	}
116069cde000SThomas Gleixner 	free_moved_vector(apicd);
1161dccfe314SThomas Gleixner unlock:
116256d7d2f4SThomas Gleixner 	raw_spin_unlock(&vector_lock);
116374afab7aSJiang Liu }
11642cffad7bSThomas Gleixner 
11652cffad7bSThomas Gleixner #ifdef CONFIG_HOTPLUG_CPU
11662cffad7bSThomas Gleixner /*
11672cffad7bSThomas Gleixner  * Note, this is not accurate accounting, but at least good enough to
11682cffad7bSThomas Gleixner  * prevent that the actual interrupt move will run out of vectors.
11692cffad7bSThomas Gleixner  */
lapic_can_unplug_cpu(void)11702cffad7bSThomas Gleixner int lapic_can_unplug_cpu(void)
11712cffad7bSThomas Gleixner {
11722cffad7bSThomas Gleixner 	unsigned int rsvd, avl, tomove, cpu = smp_processor_id();
11732cffad7bSThomas Gleixner 	int ret = 0;
11742cffad7bSThomas Gleixner 
11752cffad7bSThomas Gleixner 	raw_spin_lock(&vector_lock);
11762cffad7bSThomas Gleixner 	tomove = irq_matrix_allocated(vector_matrix);
11772cffad7bSThomas Gleixner 	avl = irq_matrix_available(vector_matrix, true);
11782cffad7bSThomas Gleixner 	if (avl < tomove) {
11792cffad7bSThomas Gleixner 		pr_warn("CPU %u has %u vectors, %u available. Cannot disable CPU\n",
11802cffad7bSThomas Gleixner 			cpu, tomove, avl);
11812cffad7bSThomas Gleixner 		ret = -ENOSPC;
11822cffad7bSThomas Gleixner 		goto out;
11832cffad7bSThomas Gleixner 	}
11842cffad7bSThomas Gleixner 	rsvd = irq_matrix_reserved(vector_matrix);
11852cffad7bSThomas Gleixner 	if (avl < rsvd) {
11862cffad7bSThomas Gleixner 		pr_warn("Reserved vectors %u > available %u. IRQ request may fail\n",
11872cffad7bSThomas Gleixner 			rsvd, avl);
11882cffad7bSThomas Gleixner 	}
11892cffad7bSThomas Gleixner out:
11902cffad7bSThomas Gleixner 	raw_spin_unlock(&vector_lock);
11912cffad7bSThomas Gleixner 	return ret;
11922cffad7bSThomas Gleixner }
11932cffad7bSThomas Gleixner #endif /* HOTPLUG_CPU */
11942cffad7bSThomas Gleixner #endif /* SMP */
119574afab7aSJiang Liu 
print_APIC_field(int base)119674afab7aSJiang Liu static void __init print_APIC_field(int base)
119774afab7aSJiang Liu {
119874afab7aSJiang Liu 	int i;
119974afab7aSJiang Liu 
120074afab7aSJiang Liu 	printk(KERN_DEBUG);
120174afab7aSJiang Liu 
120274afab7aSJiang Liu 	for (i = 0; i < 8; i++)
120374afab7aSJiang Liu 		pr_cont("%08x", apic_read(base + i*0x10));
120474afab7aSJiang Liu 
120574afab7aSJiang Liu 	pr_cont("\n");
120674afab7aSJiang Liu }
120774afab7aSJiang Liu 
print_local_APIC(void * dummy)120874afab7aSJiang Liu static void __init print_local_APIC(void *dummy)
120974afab7aSJiang Liu {
121074afab7aSJiang Liu 	unsigned int i, v, ver, maxlvt;
121174afab7aSJiang Liu 	u64 icr;
121274afab7aSJiang Liu 
1213849d3569SJiang Liu 	pr_debug("printing local APIC contents on CPU#%d/%d:\n",
1214a6625b47SThomas Gleixner 		 smp_processor_id(), read_apic_id());
121574afab7aSJiang Liu 	v = apic_read(APIC_ID);
1216849d3569SJiang Liu 	pr_info("... APIC ID:      %08x (%01x)\n", v, read_apic_id());
121774afab7aSJiang Liu 	v = apic_read(APIC_LVR);
1218849d3569SJiang Liu 	pr_info("... APIC VERSION: %08x\n", v);
121974afab7aSJiang Liu 	ver = GET_APIC_VERSION(v);
122074afab7aSJiang Liu 	maxlvt = lapic_get_maxlvt();
122174afab7aSJiang Liu 
122274afab7aSJiang Liu 	v = apic_read(APIC_TASKPRI);
1223849d3569SJiang Liu 	pr_debug("... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
122474afab7aSJiang Liu 
122574afab7aSJiang Liu 	/* !82489DX */
122674afab7aSJiang Liu 	if (APIC_INTEGRATED(ver)) {
122774afab7aSJiang Liu 		if (!APIC_XAPIC(ver)) {
122874afab7aSJiang Liu 			v = apic_read(APIC_ARBPRI);
1229849d3569SJiang Liu 			pr_debug("... APIC ARBPRI: %08x (%02x)\n",
1230849d3569SJiang Liu 				 v, v & APIC_ARBPRI_MASK);
123174afab7aSJiang Liu 		}
123274afab7aSJiang Liu 		v = apic_read(APIC_PROCPRI);
1233849d3569SJiang Liu 		pr_debug("... APIC PROCPRI: %08x\n", v);
123474afab7aSJiang Liu 	}
123574afab7aSJiang Liu 
123674afab7aSJiang Liu 	/*
123774afab7aSJiang Liu 	 * Remote read supported only in the 82489DX and local APIC for
123874afab7aSJiang Liu 	 * Pentium processors.
123974afab7aSJiang Liu 	 */
124074afab7aSJiang Liu 	if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
124174afab7aSJiang Liu 		v = apic_read(APIC_RRR);
1242849d3569SJiang Liu 		pr_debug("... APIC RRR: %08x\n", v);
124374afab7aSJiang Liu 	}
124474afab7aSJiang Liu 
124574afab7aSJiang Liu 	v = apic_read(APIC_LDR);
1246849d3569SJiang Liu 	pr_debug("... APIC LDR: %08x\n", v);
124774afab7aSJiang Liu 	if (!x2apic_enabled()) {
124874afab7aSJiang Liu 		v = apic_read(APIC_DFR);
1249849d3569SJiang Liu 		pr_debug("... APIC DFR: %08x\n", v);
125074afab7aSJiang Liu 	}
125174afab7aSJiang Liu 	v = apic_read(APIC_SPIV);
1252849d3569SJiang Liu 	pr_debug("... APIC SPIV: %08x\n", v);
125374afab7aSJiang Liu 
1254849d3569SJiang Liu 	pr_debug("... APIC ISR field:\n");
125574afab7aSJiang Liu 	print_APIC_field(APIC_ISR);
1256849d3569SJiang Liu 	pr_debug("... APIC TMR field:\n");
125774afab7aSJiang Liu 	print_APIC_field(APIC_TMR);
1258849d3569SJiang Liu 	pr_debug("... APIC IRR field:\n");
125974afab7aSJiang Liu 	print_APIC_field(APIC_IRR);
126074afab7aSJiang Liu 
126174afab7aSJiang Liu 	/* !82489DX */
126274afab7aSJiang Liu 	if (APIC_INTEGRATED(ver)) {
126374afab7aSJiang Liu 		/* Due to the Pentium erratum 3AP. */
126474afab7aSJiang Liu 		if (maxlvt > 3)
126574afab7aSJiang Liu 			apic_write(APIC_ESR, 0);
126674afab7aSJiang Liu 
126774afab7aSJiang Liu 		v = apic_read(APIC_ESR);
1268849d3569SJiang Liu 		pr_debug("... APIC ESR: %08x\n", v);
126974afab7aSJiang Liu 	}
127074afab7aSJiang Liu 
127174afab7aSJiang Liu 	icr = apic_icr_read();
1272849d3569SJiang Liu 	pr_debug("... APIC ICR: %08x\n", (u32)icr);
1273849d3569SJiang Liu 	pr_debug("... APIC ICR2: %08x\n", (u32)(icr >> 32));
127474afab7aSJiang Liu 
127574afab7aSJiang Liu 	v = apic_read(APIC_LVTT);
1276849d3569SJiang Liu 	pr_debug("... APIC LVTT: %08x\n", v);
127774afab7aSJiang Liu 
127874afab7aSJiang Liu 	if (maxlvt > 3) {
127974afab7aSJiang Liu 		/* PC is LVT#4. */
128074afab7aSJiang Liu 		v = apic_read(APIC_LVTPC);
1281849d3569SJiang Liu 		pr_debug("... APIC LVTPC: %08x\n", v);
128274afab7aSJiang Liu 	}
128374afab7aSJiang Liu 	v = apic_read(APIC_LVT0);
1284849d3569SJiang Liu 	pr_debug("... APIC LVT0: %08x\n", v);
128574afab7aSJiang Liu 	v = apic_read(APIC_LVT1);
1286849d3569SJiang Liu 	pr_debug("... APIC LVT1: %08x\n", v);
128774afab7aSJiang Liu 
128874afab7aSJiang Liu 	if (maxlvt > 2) {
128974afab7aSJiang Liu 		/* ERR is LVT#3. */
129074afab7aSJiang Liu 		v = apic_read(APIC_LVTERR);
1291849d3569SJiang Liu 		pr_debug("... APIC LVTERR: %08x\n", v);
129274afab7aSJiang Liu 	}
129374afab7aSJiang Liu 
129474afab7aSJiang Liu 	v = apic_read(APIC_TMICT);
1295849d3569SJiang Liu 	pr_debug("... APIC TMICT: %08x\n", v);
129674afab7aSJiang Liu 	v = apic_read(APIC_TMCCT);
1297849d3569SJiang Liu 	pr_debug("... APIC TMCCT: %08x\n", v);
129874afab7aSJiang Liu 	v = apic_read(APIC_TDCR);
1299849d3569SJiang Liu 	pr_debug("... APIC TDCR: %08x\n", v);
130074afab7aSJiang Liu 
130174afab7aSJiang Liu 	if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
130274afab7aSJiang Liu 		v = apic_read(APIC_EFEAT);
130374afab7aSJiang Liu 		maxlvt = (v >> 16) & 0xff;
1304849d3569SJiang Liu 		pr_debug("... APIC EFEAT: %08x\n", v);
130574afab7aSJiang Liu 		v = apic_read(APIC_ECTRL);
1306849d3569SJiang Liu 		pr_debug("... APIC ECTRL: %08x\n", v);
130774afab7aSJiang Liu 		for (i = 0; i < maxlvt; i++) {
130874afab7aSJiang Liu 			v = apic_read(APIC_EILVTn(i));
1309849d3569SJiang Liu 			pr_debug("... APIC EILVT%d: %08x\n", i, v);
131074afab7aSJiang Liu 		}
131174afab7aSJiang Liu 	}
131274afab7aSJiang Liu 	pr_cont("\n");
131374afab7aSJiang Liu }
131474afab7aSJiang Liu 
print_local_APICs(int maxcpu)131574afab7aSJiang Liu static void __init print_local_APICs(int maxcpu)
131674afab7aSJiang Liu {
131774afab7aSJiang Liu 	int cpu;
131874afab7aSJiang Liu 
131974afab7aSJiang Liu 	if (!maxcpu)
132074afab7aSJiang Liu 		return;
132174afab7aSJiang Liu 
132274afab7aSJiang Liu 	preempt_disable();
132374afab7aSJiang Liu 	for_each_online_cpu(cpu) {
132474afab7aSJiang Liu 		if (cpu >= maxcpu)
132574afab7aSJiang Liu 			break;
132674afab7aSJiang Liu 		smp_call_function_single(cpu, print_local_APIC, NULL, 1);
132774afab7aSJiang Liu 	}
132874afab7aSJiang Liu 	preempt_enable();
132974afab7aSJiang Liu }
133074afab7aSJiang Liu 
print_PIC(void)133174afab7aSJiang Liu static void __init print_PIC(void)
133274afab7aSJiang Liu {
133374afab7aSJiang Liu 	unsigned int v;
133474afab7aSJiang Liu 	unsigned long flags;
133574afab7aSJiang Liu 
133674afab7aSJiang Liu 	if (!nr_legacy_irqs())
133774afab7aSJiang Liu 		return;
133874afab7aSJiang Liu 
1339849d3569SJiang Liu 	pr_debug("\nprinting PIC contents\n");
134074afab7aSJiang Liu 
134174afab7aSJiang Liu 	raw_spin_lock_irqsave(&i8259A_lock, flags);
134274afab7aSJiang Liu 
134374afab7aSJiang Liu 	v = inb(0xa1) << 8 | inb(0x21);
1344849d3569SJiang Liu 	pr_debug("... PIC  IMR: %04x\n", v);
134574afab7aSJiang Liu 
134674afab7aSJiang Liu 	v = inb(0xa0) << 8 | inb(0x20);
1347849d3569SJiang Liu 	pr_debug("... PIC  IRR: %04x\n", v);
134874afab7aSJiang Liu 
134974afab7aSJiang Liu 	outb(0x0b, 0xa0);
135074afab7aSJiang Liu 	outb(0x0b, 0x20);
135174afab7aSJiang Liu 	v = inb(0xa0) << 8 | inb(0x20);
135274afab7aSJiang Liu 	outb(0x0a, 0xa0);
135374afab7aSJiang Liu 	outb(0x0a, 0x20);
135474afab7aSJiang Liu 
135574afab7aSJiang Liu 	raw_spin_unlock_irqrestore(&i8259A_lock, flags);
135674afab7aSJiang Liu 
1357849d3569SJiang Liu 	pr_debug("... PIC  ISR: %04x\n", v);
135874afab7aSJiang Liu 
1359d2531661SMaciej W. Rozycki 	v = inb(PIC_ELCR2) << 8 | inb(PIC_ELCR1);
1360849d3569SJiang Liu 	pr_debug("... PIC ELCR: %04x\n", v);
136174afab7aSJiang Liu }
136274afab7aSJiang Liu 
136374afab7aSJiang Liu static int show_lapic __initdata = 1;
setup_show_lapic(char * arg)136474afab7aSJiang Liu static __init int setup_show_lapic(char *arg)
136574afab7aSJiang Liu {
136674afab7aSJiang Liu 	int num = -1;
136774afab7aSJiang Liu 
136874afab7aSJiang Liu 	if (strcmp(arg, "all") == 0) {
136974afab7aSJiang Liu 		show_lapic = CONFIG_NR_CPUS;
137074afab7aSJiang Liu 	} else {
137174afab7aSJiang Liu 		get_option(&arg, &num);
137274afab7aSJiang Liu 		if (num >= 0)
137374afab7aSJiang Liu 			show_lapic = num;
137474afab7aSJiang Liu 	}
137574afab7aSJiang Liu 
137674afab7aSJiang Liu 	return 1;
137774afab7aSJiang Liu }
137874afab7aSJiang Liu __setup("show_lapic=", setup_show_lapic);
137974afab7aSJiang Liu 
print_ICs(void)138074afab7aSJiang Liu static int __init print_ICs(void)
138174afab7aSJiang Liu {
138274afab7aSJiang Liu 	if (apic_verbosity == APIC_QUIET)
138374afab7aSJiang Liu 		return 0;
138474afab7aSJiang Liu 
138574afab7aSJiang Liu 	print_PIC();
138674afab7aSJiang Liu 
138774afab7aSJiang Liu 	/* don't print out if apic is not there */
138893984fbdSBorislav Petkov 	if (!boot_cpu_has(X86_FEATURE_APIC) && !apic_from_smp_config())
138974afab7aSJiang Liu 		return 0;
139074afab7aSJiang Liu 
139174afab7aSJiang Liu 	print_local_APICs(show_lapic);
139274afab7aSJiang Liu 	print_IO_APICs();
139374afab7aSJiang Liu 
139474afab7aSJiang Liu 	return 0;
139574afab7aSJiang Liu }
139674afab7aSJiang Liu 
139774afab7aSJiang Liu late_initcall(print_ICs);
1398