1 #include <linux/threads.h>
2 #include <linux/cpumask.h>
3 #include <linux/string.h>
4 #include <linux/kernel.h>
5 #include <linux/ctype.h>
6 #include <linux/init.h>
7 #include <linux/dmar.h>
8 
9 #include <asm/smp.h>
10 #include <asm/apic.h>
11 #include <asm/ipi.h>
12 
13 DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid);
14 
15 static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
16 {
17 	return x2apic_enabled();
18 }
19 
20 /* Start with all IRQs pointing to boot CPU.  IRQ balancing will shift them. */
21 
22 static const struct cpumask *x2apic_target_cpus(void)
23 {
24 	return cpumask_of(0);
25 }
26 
27 /*
28  * for now each logical cpu is in its own vector allocation domain.
29  */
30 static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
31 {
32 	cpumask_clear(retmask);
33 	cpumask_set_cpu(cpu, retmask);
34 }
35 
36 static void
37  __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest)
38 {
39 	unsigned long cfg;
40 
41 	cfg = __prepare_ICR(0, vector, dest);
42 
43 	/*
44 	 * send the IPI.
45 	 */
46 	native_x2apic_icr_write(cfg, apicid);
47 }
48 
49 /*
50  * for now, we send the IPI's one by one in the cpumask.
51  * TBD: Based on the cpu mask, we can send the IPI's to the cluster group
52  * at once. We have 16 cpu's in a cluster. This will minimize IPI register
53  * writes.
54  */
55 static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
56 {
57 	unsigned long query_cpu;
58 	unsigned long flags;
59 
60 	local_irq_save(flags);
61 	for_each_cpu(query_cpu, mask) {
62 		__x2apic_send_IPI_dest(
63 			per_cpu(x86_cpu_to_logical_apicid, query_cpu),
64 			vector, apic->dest_logical);
65 	}
66 	local_irq_restore(flags);
67 }
68 
69 static void
70  x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
71 {
72 	unsigned long this_cpu = smp_processor_id();
73 	unsigned long query_cpu;
74 	unsigned long flags;
75 
76 	local_irq_save(flags);
77 	for_each_cpu(query_cpu, mask) {
78 		if (query_cpu == this_cpu)
79 			continue;
80 		__x2apic_send_IPI_dest(
81 				per_cpu(x86_cpu_to_logical_apicid, query_cpu),
82 				vector, apic->dest_logical);
83 	}
84 	local_irq_restore(flags);
85 }
86 
87 static void x2apic_send_IPI_allbutself(int vector)
88 {
89 	unsigned long this_cpu = smp_processor_id();
90 	unsigned long query_cpu;
91 	unsigned long flags;
92 
93 	local_irq_save(flags);
94 	for_each_online_cpu(query_cpu) {
95 		if (query_cpu == this_cpu)
96 			continue;
97 		__x2apic_send_IPI_dest(
98 				per_cpu(x86_cpu_to_logical_apicid, query_cpu),
99 				vector, apic->dest_logical);
100 	}
101 	local_irq_restore(flags);
102 }
103 
104 static void x2apic_send_IPI_all(int vector)
105 {
106 	x2apic_send_IPI_mask(cpu_online_mask, vector);
107 }
108 
109 static int x2apic_apic_id_registered(void)
110 {
111 	return 1;
112 }
113 
114 static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
115 {
116 	/*
117 	 * We're using fixed IRQ delivery, can only return one logical APIC ID.
118 	 * May as well be the first.
119 	 */
120 	int cpu = cpumask_first(cpumask);
121 
122 	if ((unsigned)cpu < nr_cpu_ids)
123 		return per_cpu(x86_cpu_to_logical_apicid, cpu);
124 	else
125 		return BAD_APICID;
126 }
127 
128 static unsigned int
129 x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
130 			      const struct cpumask *andmask)
131 {
132 	int cpu;
133 
134 	/*
135 	 * We're using fixed IRQ delivery, can only return one logical APIC ID.
136 	 * May as well be the first.
137 	 */
138 	for_each_cpu_and(cpu, cpumask, andmask) {
139 		if (cpumask_test_cpu(cpu, cpu_online_mask))
140 			break;
141 	}
142 
143 	if (cpu < nr_cpu_ids)
144 		return per_cpu(x86_cpu_to_logical_apicid, cpu);
145 
146 	return BAD_APICID;
147 }
148 
149 static unsigned int x2apic_cluster_phys_get_apic_id(unsigned long x)
150 {
151 	unsigned int id;
152 
153 	id = x;
154 	return id;
155 }
156 
157 static unsigned long set_apic_id(unsigned int id)
158 {
159 	unsigned long x;
160 
161 	x = id;
162 	return x;
163 }
164 
165 static int x2apic_cluster_phys_pkg_id(int initial_apicid, int index_msb)
166 {
167 	return current_cpu_data.initial_apicid >> index_msb;
168 }
169 
170 static void x2apic_send_IPI_self(int vector)
171 {
172 	apic_write(APIC_SELF_IPI, vector);
173 }
174 
175 static void init_x2apic_ldr(void)
176 {
177 	int cpu = smp_processor_id();
178 
179 	per_cpu(x86_cpu_to_logical_apicid, cpu) = apic_read(APIC_LDR);
180 }
181 
182 struct apic apic_x2apic_cluster = {
183 
184 	.name				= "cluster x2apic",
185 	.probe				= NULL,
186 	.acpi_madt_oem_check		= x2apic_acpi_madt_oem_check,
187 	.apic_id_registered		= x2apic_apic_id_registered,
188 
189 	.irq_delivery_mode		= dest_LowestPrio,
190 	.irq_dest_mode			= 1, /* logical */
191 
192 	.target_cpus			= x2apic_target_cpus,
193 	.disable_esr			= 0,
194 	.dest_logical			= APIC_DEST_LOGICAL,
195 	.check_apicid_used		= NULL,
196 	.check_apicid_present		= NULL,
197 
198 	.vector_allocation_domain	= x2apic_vector_allocation_domain,
199 	.init_apic_ldr			= init_x2apic_ldr,
200 
201 	.ioapic_phys_id_map		= NULL,
202 	.setup_apic_routing		= NULL,
203 	.multi_timer_check		= NULL,
204 	.apicid_to_node			= NULL,
205 	.cpu_to_logical_apicid		= NULL,
206 	.cpu_present_to_apicid		= default_cpu_present_to_apicid,
207 	.apicid_to_cpu_present		= NULL,
208 	.setup_portio_remap		= NULL,
209 	.check_phys_apicid_present	= default_check_phys_apicid_present,
210 	.enable_apic_mode		= NULL,
211 	.phys_pkg_id			= x2apic_cluster_phys_pkg_id,
212 	.mps_oem_check			= NULL,
213 
214 	.get_apic_id			= x2apic_cluster_phys_get_apic_id,
215 	.set_apic_id			= set_apic_id,
216 	.apic_id_mask			= 0xFFFFFFFFu,
217 
218 	.cpu_mask_to_apicid		= x2apic_cpu_mask_to_apicid,
219 	.cpu_mask_to_apicid_and		= x2apic_cpu_mask_to_apicid_and,
220 
221 	.send_IPI_mask			= x2apic_send_IPI_mask,
222 	.send_IPI_mask_allbutself	= x2apic_send_IPI_mask_allbutself,
223 	.send_IPI_allbutself		= x2apic_send_IPI_allbutself,
224 	.send_IPI_all			= x2apic_send_IPI_all,
225 	.send_IPI_self			= x2apic_send_IPI_self,
226 
227 	.wakeup_cpu			= wakeup_secondary_cpu_via_init,
228 	.trampoline_phys_low		= DEFAULT_TRAMPOLINE_PHYS_LOW,
229 	.trampoline_phys_high		= DEFAULT_TRAMPOLINE_PHYS_HIGH,
230 	.wait_for_init_deassert		= NULL,
231 	.smp_callin_clear_local_apic	= NULL,
232 	.inquire_remote_apic		= NULL,
233 
234 	.read				= native_apic_msr_read,
235 	.write				= native_apic_msr_write,
236 	.icr_read			= native_x2apic_icr_read,
237 	.icr_write			= native_x2apic_icr_write,
238 	.wait_icr_idle			= native_x2apic_wait_icr_idle,
239 	.safe_wait_icr_idle		= native_safe_x2apic_wait_icr_idle,
240 };
241