1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/cpuhotplug.h>
4 #include <linux/cpumask.h>
5 #include <linux/slab.h>
6 #include <linux/mm.h>
7 
8 #include <asm/apic.h>
9 
10 #include "local.h"
11 
12 #define apic_cluster(apicid) ((apicid) >> 4)
13 
14 /*
15  * __x2apic_send_IPI_mask() possibly needs to read
16  * x86_cpu_to_logical_apicid for all online cpus in a sequential way.
17  * Using per cpu variable would cost one cache line per cpu.
18  */
19 static u32 *x86_cpu_to_logical_apicid __read_mostly;
20 
21 static DEFINE_PER_CPU(cpumask_var_t, ipi_mask);
22 static DEFINE_PER_CPU_READ_MOSTLY(struct cpumask *, cluster_masks);
23 
24 static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
25 {
26 	return x2apic_enabled();
27 }
28 
29 static void x2apic_send_IPI(int cpu, int vector)
30 {
31 	u32 dest = x86_cpu_to_logical_apicid[cpu];
32 
33 	/* x2apic MSRs are special and need a special fence: */
34 	weak_wrmsr_fence();
35 	__x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL);
36 }
37 
38 static void
39 __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
40 {
41 	unsigned int cpu, clustercpu;
42 	struct cpumask *tmpmsk;
43 	unsigned long flags;
44 	u32 dest;
45 
46 	/* x2apic MSRs are special and need a special fence: */
47 	weak_wrmsr_fence();
48 	local_irq_save(flags);
49 
50 	tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask);
51 	cpumask_copy(tmpmsk, mask);
52 	/* If IPI should not be sent to self, clear current CPU */
53 	if (apic_dest != APIC_DEST_ALLINC)
54 		__cpumask_clear_cpu(smp_processor_id(), tmpmsk);
55 
56 	/* Collapse cpus in a cluster so a single IPI per cluster is sent */
57 	for_each_cpu(cpu, tmpmsk) {
58 		struct cpumask *cmsk = per_cpu(cluster_masks, cpu);
59 
60 		dest = 0;
61 		for_each_cpu_and(clustercpu, tmpmsk, cmsk)
62 			dest |= x86_cpu_to_logical_apicid[clustercpu];
63 
64 		if (!dest)
65 			continue;
66 
67 		__x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL);
68 		/* Remove cluster CPUs from tmpmask */
69 		cpumask_andnot(tmpmsk, tmpmsk, cmsk);
70 	}
71 
72 	local_irq_restore(flags);
73 }
74 
75 static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
76 {
77 	__x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLINC);
78 }
79 
80 static void
81 x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
82 {
83 	__x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT);
84 }
85 
86 static u32 x2apic_calc_apicid(unsigned int cpu)
87 {
88 	return x86_cpu_to_logical_apicid[cpu];
89 }
90 
91 static void init_x2apic_ldr(void)
92 {
93 	struct cpumask *cmsk = this_cpu_read(cluster_masks);
94 
95 	BUG_ON(!cmsk);
96 
97 	cpumask_set_cpu(smp_processor_id(), cmsk);
98 }
99 
100 /*
101  * As an optimisation during boot, set the cluster_mask for all present
102  * CPUs at once, to prevent each of them having to iterate over the others
103  * to find the existing cluster_mask.
104  */
105 static void prefill_clustermask(struct cpumask *cmsk, unsigned int cpu, u32 cluster)
106 {
107 	int cpu_i;
108 
109 	for_each_present_cpu(cpu_i) {
110 		struct cpumask **cpu_cmsk = &per_cpu(cluster_masks, cpu_i);
111 		u32 apicid = apic->cpu_present_to_apicid(cpu_i);
112 
113 		if (apicid == BAD_APICID || cpu_i == cpu || apic_cluster(apicid) != cluster)
114 			continue;
115 
116 		if (WARN_ON_ONCE(*cpu_cmsk == cmsk))
117 			continue;
118 
119 		BUG_ON(*cpu_cmsk);
120 		*cpu_cmsk = cmsk;
121 	}
122 }
123 
124 static int alloc_clustermask(unsigned int cpu, u32 cluster, int node)
125 {
126 	struct cpumask *cmsk = NULL;
127 	unsigned int cpu_i;
128 
129 	/*
130 	 * At boot time, the CPU present mask is stable. The cluster mask is
131 	 * allocated for the first CPU in the cluster and propagated to all
132 	 * present siblings in the cluster. If the cluster mask is already set
133 	 * on entry to this function for a given CPU, there is nothing to do.
134 	 */
135 	if (per_cpu(cluster_masks, cpu))
136 		return 0;
137 
138 	if (system_state < SYSTEM_RUNNING)
139 		goto alloc;
140 
141 	/*
142 	 * On post boot hotplug for a CPU which was not present at boot time,
143 	 * iterate over all possible CPUs (even those which are not present
144 	 * any more) to find any existing cluster mask.
145 	 */
146 	for_each_possible_cpu(cpu_i) {
147 		u32 apicid = apic->cpu_present_to_apicid(cpu_i);
148 
149 		if (apicid != BAD_APICID && apic_cluster(apicid) == cluster) {
150 			cmsk = per_cpu(cluster_masks, cpu_i);
151 			/*
152 			 * If the cluster is already initialized, just store
153 			 * the mask and return. There's no need to propagate.
154 			 */
155 			if (cmsk) {
156 				per_cpu(cluster_masks, cpu) = cmsk;
157 				return 0;
158 			}
159 		}
160 	}
161 	/*
162 	 * No CPU in the cluster has ever been initialized, so fall through to
163 	 * the boot time code which will also populate the cluster mask for any
164 	 * other CPU in the cluster which is (now) present.
165 	 */
166 alloc:
167 	cmsk = kzalloc_node(sizeof(*cmsk), GFP_KERNEL, node);
168 	if (!cmsk)
169 		return -ENOMEM;
170 	per_cpu(cluster_masks, cpu) = cmsk;
171 	prefill_clustermask(cmsk, cpu, cluster);
172 
173 	return 0;
174 }
175 
176 static int x2apic_prepare_cpu(unsigned int cpu)
177 {
178 	u32 phys_apicid = apic->cpu_present_to_apicid(cpu);
179 	u32 cluster = apic_cluster(phys_apicid);
180 	u32 logical_apicid = (cluster << 16) | (1 << (phys_apicid & 0xf));
181 
182 	x86_cpu_to_logical_apicid[cpu] = logical_apicid;
183 
184 	if (alloc_clustermask(cpu, cluster, cpu_to_node(cpu)) < 0)
185 		return -ENOMEM;
186 	if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL))
187 		return -ENOMEM;
188 	return 0;
189 }
190 
191 static int x2apic_dead_cpu(unsigned int dead_cpu)
192 {
193 	struct cpumask *cmsk = per_cpu(cluster_masks, dead_cpu);
194 
195 	if (cmsk)
196 		cpumask_clear_cpu(dead_cpu, cmsk);
197 	free_cpumask_var(per_cpu(ipi_mask, dead_cpu));
198 	return 0;
199 }
200 
201 static int x2apic_cluster_probe(void)
202 {
203 	u32 slots;
204 
205 	if (!x2apic_mode)
206 		return 0;
207 
208 	slots = max_t(u32, L1_CACHE_BYTES/sizeof(u32), nr_cpu_ids);
209 	x86_cpu_to_logical_apicid = kcalloc(slots, sizeof(u32), GFP_KERNEL);
210 	if (!x86_cpu_to_logical_apicid)
211 		return 0;
212 
213 	if (cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "x86/x2apic:prepare",
214 			      x2apic_prepare_cpu, x2apic_dead_cpu) < 0) {
215 		pr_err("Failed to register X2APIC_PREPARE\n");
216 		kfree(x86_cpu_to_logical_apicid);
217 		x86_cpu_to_logical_apicid = NULL;
218 		return 0;
219 	}
220 	init_x2apic_ldr();
221 	return 1;
222 }
223 
224 static struct apic apic_x2apic_cluster __ro_after_init = {
225 
226 	.name				= "cluster x2apic",
227 	.probe				= x2apic_cluster_probe,
228 	.acpi_madt_oem_check		= x2apic_acpi_madt_oem_check,
229 
230 	.delivery_mode			= APIC_DELIVERY_MODE_FIXED,
231 	.dest_mode_logical		= true,
232 
233 	.disable_esr			= 0,
234 
235 	.check_apicid_used		= NULL,
236 	.init_apic_ldr			= init_x2apic_ldr,
237 	.ioapic_phys_id_map		= NULL,
238 	.cpu_present_to_apicid		= default_cpu_present_to_apicid,
239 	.phys_pkg_id			= x2apic_phys_pkg_id,
240 
241 	.max_apic_id			= UINT_MAX,
242 	.x2apic_set_max_apicid		= true,
243 	.get_apic_id			= x2apic_get_apic_id,
244 	.set_apic_id			= x2apic_set_apic_id,
245 
246 	.calc_dest_apicid		= x2apic_calc_apicid,
247 
248 	.send_IPI			= x2apic_send_IPI,
249 	.send_IPI_mask			= x2apic_send_IPI_mask,
250 	.send_IPI_mask_allbutself	= x2apic_send_IPI_mask_allbutself,
251 	.send_IPI_allbutself		= x2apic_send_IPI_allbutself,
252 	.send_IPI_all			= x2apic_send_IPI_all,
253 	.send_IPI_self			= x2apic_send_IPI_self,
254 
255 	.read				= native_apic_msr_read,
256 	.write				= native_apic_msr_write,
257 	.eoi				= native_apic_msr_eoi,
258 	.icr_read			= native_x2apic_icr_read,
259 	.icr_write			= native_x2apic_icr_write,
260 };
261 
262 apic_driver(apic_x2apic_cluster);
263