xref: /openbmc/linux/arch/x86/kernel/apic/x2apic_phys.c (revision b627b4ed)
1 #include <linux/threads.h>
2 #include <linux/cpumask.h>
3 #include <linux/string.h>
4 #include <linux/kernel.h>
5 #include <linux/ctype.h>
6 #include <linux/init.h>
7 #include <linux/dmar.h>
8 
9 #include <asm/smp.h>
10 #include <asm/apic.h>
11 #include <asm/ipi.h>
12 
13 int x2apic_phys;
14 
15 static int set_x2apic_phys_mode(char *arg)
16 {
17 	x2apic_phys = 1;
18 	return 0;
19 }
20 early_param("x2apic_phys", set_x2apic_phys_mode);
21 
22 static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
23 {
24 	if (x2apic_phys)
25 		return x2apic_enabled();
26 	else
27 		return 0;
28 }
29 
30 /* Start with all IRQs pointing to boot CPU.  IRQ balancing will shift them. */
31 
32 static const struct cpumask *x2apic_target_cpus(void)
33 {
34 	return cpumask_of(0);
35 }
36 
37 static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
38 {
39 	cpumask_clear(retmask);
40 	cpumask_set_cpu(cpu, retmask);
41 }
42 
43 static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
44 				   unsigned int dest)
45 {
46 	unsigned long cfg;
47 
48 	cfg = __prepare_ICR(0, vector, dest);
49 
50 	/*
51 	 * send the IPI.
52 	 */
53 	native_x2apic_icr_write(cfg, apicid);
54 }
55 
56 static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
57 {
58 	unsigned long query_cpu;
59 	unsigned long flags;
60 
61 	x2apic_wrmsr_fence();
62 
63 	local_irq_save(flags);
64 	for_each_cpu(query_cpu, mask) {
65 		__x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
66 				       vector, APIC_DEST_PHYSICAL);
67 	}
68 	local_irq_restore(flags);
69 }
70 
71 static void
72  x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
73 {
74 	unsigned long this_cpu = smp_processor_id();
75 	unsigned long query_cpu;
76 	unsigned long flags;
77 
78 	x2apic_wrmsr_fence();
79 
80 	local_irq_save(flags);
81 	for_each_cpu(query_cpu, mask) {
82 		if (query_cpu != this_cpu)
83 			__x2apic_send_IPI_dest(
84 				per_cpu(x86_cpu_to_apicid, query_cpu),
85 				vector, APIC_DEST_PHYSICAL);
86 	}
87 	local_irq_restore(flags);
88 }
89 
90 static void x2apic_send_IPI_allbutself(int vector)
91 {
92 	unsigned long this_cpu = smp_processor_id();
93 	unsigned long query_cpu;
94 	unsigned long flags;
95 
96 	x2apic_wrmsr_fence();
97 
98 	local_irq_save(flags);
99 	for_each_online_cpu(query_cpu) {
100 		if (query_cpu == this_cpu)
101 			continue;
102 		__x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
103 				       vector, APIC_DEST_PHYSICAL);
104 	}
105 	local_irq_restore(flags);
106 }
107 
108 static void x2apic_send_IPI_all(int vector)
109 {
110 	x2apic_send_IPI_mask(cpu_online_mask, vector);
111 }
112 
113 static int x2apic_apic_id_registered(void)
114 {
115 	return 1;
116 }
117 
118 static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
119 {
120 	/*
121 	 * We're using fixed IRQ delivery, can only return one phys APIC ID.
122 	 * May as well be the first.
123 	 */
124 	int cpu = cpumask_first(cpumask);
125 
126 	if ((unsigned)cpu < nr_cpu_ids)
127 		return per_cpu(x86_cpu_to_apicid, cpu);
128 	else
129 		return BAD_APICID;
130 }
131 
132 static unsigned int
133 x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
134 			      const struct cpumask *andmask)
135 {
136 	int cpu;
137 
138 	/*
139 	 * We're using fixed IRQ delivery, can only return one phys APIC ID.
140 	 * May as well be the first.
141 	 */
142 	for_each_cpu_and(cpu, cpumask, andmask) {
143 		if (cpumask_test_cpu(cpu, cpu_online_mask))
144 			break;
145 	}
146 
147 	if (cpu < nr_cpu_ids)
148 		return per_cpu(x86_cpu_to_apicid, cpu);
149 
150 	return BAD_APICID;
151 }
152 
153 static unsigned int x2apic_phys_get_apic_id(unsigned long x)
154 {
155 	return x;
156 }
157 
158 static unsigned long set_apic_id(unsigned int id)
159 {
160 	return id;
161 }
162 
163 static int x2apic_phys_pkg_id(int initial_apicid, int index_msb)
164 {
165 	return current_cpu_data.initial_apicid >> index_msb;
166 }
167 
168 static void x2apic_send_IPI_self(int vector)
169 {
170 	apic_write(APIC_SELF_IPI, vector);
171 }
172 
173 static void init_x2apic_ldr(void)
174 {
175 }
176 
177 struct apic apic_x2apic_phys = {
178 
179 	.name				= "physical x2apic",
180 	.probe				= NULL,
181 	.acpi_madt_oem_check		= x2apic_acpi_madt_oem_check,
182 	.apic_id_registered		= x2apic_apic_id_registered,
183 
184 	.irq_delivery_mode		= dest_Fixed,
185 	.irq_dest_mode			= 0, /* physical */
186 
187 	.target_cpus			= x2apic_target_cpus,
188 	.disable_esr			= 0,
189 	.dest_logical			= 0,
190 	.check_apicid_used		= NULL,
191 	.check_apicid_present		= NULL,
192 
193 	.vector_allocation_domain	= x2apic_vector_allocation_domain,
194 	.init_apic_ldr			= init_x2apic_ldr,
195 
196 	.ioapic_phys_id_map		= NULL,
197 	.setup_apic_routing		= NULL,
198 	.multi_timer_check		= NULL,
199 	.apicid_to_node			= NULL,
200 	.cpu_to_logical_apicid		= NULL,
201 	.cpu_present_to_apicid		= default_cpu_present_to_apicid,
202 	.apicid_to_cpu_present		= NULL,
203 	.setup_portio_remap		= NULL,
204 	.check_phys_apicid_present	= default_check_phys_apicid_present,
205 	.enable_apic_mode		= NULL,
206 	.phys_pkg_id			= x2apic_phys_pkg_id,
207 	.mps_oem_check			= NULL,
208 
209 	.get_apic_id			= x2apic_phys_get_apic_id,
210 	.set_apic_id			= set_apic_id,
211 	.apic_id_mask			= 0xFFFFFFFFu,
212 
213 	.cpu_mask_to_apicid		= x2apic_cpu_mask_to_apicid,
214 	.cpu_mask_to_apicid_and		= x2apic_cpu_mask_to_apicid_and,
215 
216 	.send_IPI_mask			= x2apic_send_IPI_mask,
217 	.send_IPI_mask_allbutself	= x2apic_send_IPI_mask_allbutself,
218 	.send_IPI_allbutself		= x2apic_send_IPI_allbutself,
219 	.send_IPI_all			= x2apic_send_IPI_all,
220 	.send_IPI_self			= x2apic_send_IPI_self,
221 
222 	.trampoline_phys_low		= DEFAULT_TRAMPOLINE_PHYS_LOW,
223 	.trampoline_phys_high		= DEFAULT_TRAMPOLINE_PHYS_HIGH,
224 	.wait_for_init_deassert		= NULL,
225 	.smp_callin_clear_local_apic	= NULL,
226 	.inquire_remote_apic		= NULL,
227 
228 	.read				= native_apic_msr_read,
229 	.write				= native_apic_msr_write,
230 	.icr_read			= native_x2apic_icr_read,
231 	.icr_write			= native_x2apic_icr_write,
232 	.wait_icr_idle			= native_x2apic_wait_icr_idle,
233 	.safe_wait_icr_idle		= native_safe_x2apic_wait_icr_idle,
234 };
235