xref: /openbmc/linux/arch/x86/kernel/apic/x2apic_phys.c (revision f62bae50)
1 #include <linux/threads.h>
2 #include <linux/cpumask.h>
3 #include <linux/string.h>
4 #include <linux/kernel.h>
5 #include <linux/ctype.h>
6 #include <linux/init.h>
7 #include <linux/dmar.h>
8 
9 #include <asm/smp.h>
10 #include <asm/apic.h>
11 #include <asm/ipi.h>
12 
13 static int x2apic_phys;
14 
15 static int set_x2apic_phys_mode(char *arg)
16 {
17 	x2apic_phys = 1;
18 	return 0;
19 }
20 early_param("x2apic_phys", set_x2apic_phys_mode);
21 
22 static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
23 {
24 	if (cpu_has_x2apic && x2apic_phys)
25 		return 1;
26 
27 	return 0;
28 }
29 
30 /* Start with all IRQs pointing to boot CPU.  IRQ balancing will shift them. */
31 
32 static const struct cpumask *x2apic_target_cpus(void)
33 {
34 	return cpumask_of(0);
35 }
36 
37 static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
38 {
39 	cpumask_clear(retmask);
40 	cpumask_set_cpu(cpu, retmask);
41 }
42 
43 static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
44 				   unsigned int dest)
45 {
46 	unsigned long cfg;
47 
48 	cfg = __prepare_ICR(0, vector, dest);
49 
50 	/*
51 	 * send the IPI.
52 	 */
53 	native_x2apic_icr_write(cfg, apicid);
54 }
55 
56 static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
57 {
58 	unsigned long query_cpu;
59 	unsigned long flags;
60 
61 	local_irq_save(flags);
62 	for_each_cpu(query_cpu, mask) {
63 		__x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
64 				       vector, APIC_DEST_PHYSICAL);
65 	}
66 	local_irq_restore(flags);
67 }
68 
69 static void
70  x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
71 {
72 	unsigned long this_cpu = smp_processor_id();
73 	unsigned long query_cpu;
74 	unsigned long flags;
75 
76 	local_irq_save(flags);
77 	for_each_cpu(query_cpu, mask) {
78 		if (query_cpu != this_cpu)
79 			__x2apic_send_IPI_dest(
80 				per_cpu(x86_cpu_to_apicid, query_cpu),
81 				vector, APIC_DEST_PHYSICAL);
82 	}
83 	local_irq_restore(flags);
84 }
85 
86 static void x2apic_send_IPI_allbutself(int vector)
87 {
88 	unsigned long this_cpu = smp_processor_id();
89 	unsigned long query_cpu;
90 	unsigned long flags;
91 
92 	local_irq_save(flags);
93 	for_each_online_cpu(query_cpu) {
94 		if (query_cpu == this_cpu)
95 			continue;
96 		__x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
97 				       vector, APIC_DEST_PHYSICAL);
98 	}
99 	local_irq_restore(flags);
100 }
101 
102 static void x2apic_send_IPI_all(int vector)
103 {
104 	x2apic_send_IPI_mask(cpu_online_mask, vector);
105 }
106 
107 static int x2apic_apic_id_registered(void)
108 {
109 	return 1;
110 }
111 
112 static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
113 {
114 	/*
115 	 * We're using fixed IRQ delivery, can only return one phys APIC ID.
116 	 * May as well be the first.
117 	 */
118 	int cpu = cpumask_first(cpumask);
119 
120 	if ((unsigned)cpu < nr_cpu_ids)
121 		return per_cpu(x86_cpu_to_apicid, cpu);
122 	else
123 		return BAD_APICID;
124 }
125 
126 static unsigned int
127 x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
128 			      const struct cpumask *andmask)
129 {
130 	int cpu;
131 
132 	/*
133 	 * We're using fixed IRQ delivery, can only return one phys APIC ID.
134 	 * May as well be the first.
135 	 */
136 	for_each_cpu_and(cpu, cpumask, andmask) {
137 		if (cpumask_test_cpu(cpu, cpu_online_mask))
138 			break;
139 	}
140 
141 	if (cpu < nr_cpu_ids)
142 		return per_cpu(x86_cpu_to_apicid, cpu);
143 
144 	return BAD_APICID;
145 }
146 
147 static unsigned int x2apic_phys_get_apic_id(unsigned long x)
148 {
149 	return x;
150 }
151 
152 static unsigned long set_apic_id(unsigned int id)
153 {
154 	return id;
155 }
156 
157 static int x2apic_phys_pkg_id(int initial_apicid, int index_msb)
158 {
159 	return current_cpu_data.initial_apicid >> index_msb;
160 }
161 
162 static void x2apic_send_IPI_self(int vector)
163 {
164 	apic_write(APIC_SELF_IPI, vector);
165 }
166 
167 static void init_x2apic_ldr(void)
168 {
169 }
170 
171 struct apic apic_x2apic_phys = {
172 
173 	.name				= "physical x2apic",
174 	.probe				= NULL,
175 	.acpi_madt_oem_check		= x2apic_acpi_madt_oem_check,
176 	.apic_id_registered		= x2apic_apic_id_registered,
177 
178 	.irq_delivery_mode		= dest_Fixed,
179 	.irq_dest_mode			= 0, /* physical */
180 
181 	.target_cpus			= x2apic_target_cpus,
182 	.disable_esr			= 0,
183 	.dest_logical			= 0,
184 	.check_apicid_used		= NULL,
185 	.check_apicid_present		= NULL,
186 
187 	.vector_allocation_domain	= x2apic_vector_allocation_domain,
188 	.init_apic_ldr			= init_x2apic_ldr,
189 
190 	.ioapic_phys_id_map		= NULL,
191 	.setup_apic_routing		= NULL,
192 	.multi_timer_check		= NULL,
193 	.apicid_to_node			= NULL,
194 	.cpu_to_logical_apicid		= NULL,
195 	.cpu_present_to_apicid		= default_cpu_present_to_apicid,
196 	.apicid_to_cpu_present		= NULL,
197 	.setup_portio_remap		= NULL,
198 	.check_phys_apicid_present	= default_check_phys_apicid_present,
199 	.enable_apic_mode		= NULL,
200 	.phys_pkg_id			= x2apic_phys_pkg_id,
201 	.mps_oem_check			= NULL,
202 
203 	.get_apic_id			= x2apic_phys_get_apic_id,
204 	.set_apic_id			= set_apic_id,
205 	.apic_id_mask			= 0xFFFFFFFFu,
206 
207 	.cpu_mask_to_apicid		= x2apic_cpu_mask_to_apicid,
208 	.cpu_mask_to_apicid_and		= x2apic_cpu_mask_to_apicid_and,
209 
210 	.send_IPI_mask			= x2apic_send_IPI_mask,
211 	.send_IPI_mask_allbutself	= x2apic_send_IPI_mask_allbutself,
212 	.send_IPI_allbutself		= x2apic_send_IPI_allbutself,
213 	.send_IPI_all			= x2apic_send_IPI_all,
214 	.send_IPI_self			= x2apic_send_IPI_self,
215 
216 	.wakeup_cpu			= NULL,
217 	.trampoline_phys_low		= DEFAULT_TRAMPOLINE_PHYS_LOW,
218 	.trampoline_phys_high		= DEFAULT_TRAMPOLINE_PHYS_HIGH,
219 	.wait_for_init_deassert		= NULL,
220 	.smp_callin_clear_local_apic	= NULL,
221 	.inquire_remote_apic		= NULL,
222 
223 	.read				= native_apic_msr_read,
224 	.write				= native_apic_msr_write,
225 	.icr_read			= native_x2apic_icr_read,
226 	.icr_write			= native_x2apic_icr_write,
227 	.wait_icr_idle			= native_x2apic_wait_icr_idle,
228 	.safe_wait_icr_idle		= native_safe_x2apic_wait_icr_idle,
229 };
230