xref: /openbmc/linux/arch/x86/kernel/apic/bigsmp_32.c (revision 2b6163bf)
1 /*
2  * APIC driver for "bigsmp" xAPIC machines with more than 8 virtual CPUs.
3  *
4  * Drives the local APIC in "clustered mode".
5  */
6 #include <linux/threads.h>
7 #include <linux/cpumask.h>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/dmi.h>
11 #include <linux/smp.h>
12 
13 #include <asm/apicdef.h>
14 #include <asm/fixmap.h>
15 #include <asm/mpspec.h>
16 #include <asm/apic.h>
17 #include <asm/ipi.h>
18 
19 static inline unsigned bigsmp_get_apic_id(unsigned long x)
20 {
21 	return (x >> 24) & 0xFF;
22 }
23 
24 static inline int bigsmp_apic_id_registered(void)
25 {
26 	return 1;
27 }
28 
29 static inline const cpumask_t *bigsmp_target_cpus(void)
30 {
31 #ifdef CONFIG_SMP
32 	return &cpu_online_map;
33 #else
34 	return &cpumask_of_cpu(0);
35 #endif
36 }
37 
38 static inline unsigned long
39 bigsmp_check_apicid_used(physid_mask_t bitmap, int apicid)
40 {
41 	return 0;
42 }
43 
44 static inline unsigned long bigsmp_check_apicid_present(int bit)
45 {
46 	return 1;
47 }
48 
49 static inline unsigned long calculate_ldr(int cpu)
50 {
51 	unsigned long val, id;
52 
53 	val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
54 	id = per_cpu(x86_bios_cpu_apicid, cpu);
55 	val |= SET_APIC_LOGICAL_ID(id);
56 
57 	return val;
58 }
59 
60 /*
61  * Set up the logical destination ID.
62  *
63  * Intel recommends to set DFR, LDR and TPR before enabling
64  * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
65  * document number 292116).  So here it goes...
66  */
67 static inline void bigsmp_init_apic_ldr(void)
68 {
69 	unsigned long val;
70 	int cpu = smp_processor_id();
71 
72 	apic_write(APIC_DFR, APIC_DFR_FLAT);
73 	val = calculate_ldr(cpu);
74 	apic_write(APIC_LDR, val);
75 }
76 
77 static inline void bigsmp_setup_apic_routing(void)
78 {
79 	printk(KERN_INFO
80 		"Enabling APIC mode:  Physflat.  Using %d I/O APICs\n",
81 		nr_ioapics);
82 }
83 
84 static inline int bigsmp_apicid_to_node(int logical_apicid)
85 {
86 	return apicid_2_node[hard_smp_processor_id()];
87 }
88 
89 static inline int bigsmp_cpu_present_to_apicid(int mps_cpu)
90 {
91 	if (mps_cpu < nr_cpu_ids)
92 		return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
93 
94 	return BAD_APICID;
95 }
96 
97 static inline physid_mask_t bigsmp_apicid_to_cpu_present(int phys_apicid)
98 {
99 	return physid_mask_of_physid(phys_apicid);
100 }
101 
102 /* Mapping from cpu number to logical apicid */
103 static inline int bigsmp_cpu_to_logical_apicid(int cpu)
104 {
105 	if (cpu >= nr_cpu_ids)
106 		return BAD_APICID;
107 	return cpu_physical_id(cpu);
108 }
109 
110 static inline physid_mask_t bigsmp_ioapic_phys_id_map(physid_mask_t phys_map)
111 {
112 	/* For clustered we don't have a good way to do this yet - hack */
113 	return physids_promote(0xFFL);
114 }
115 
116 static inline void bigsmp_setup_portio_remap(void)
117 {
118 }
119 
120 static inline int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid)
121 {
122 	return 1;
123 }
124 
125 /* As we are using single CPU as destination, pick only one CPU here */
126 static inline unsigned int bigsmp_cpu_mask_to_apicid(const cpumask_t *cpumask)
127 {
128 	return bigsmp_cpu_to_logical_apicid(first_cpu(*cpumask));
129 }
130 
131 static inline unsigned int
132 bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
133 			      const struct cpumask *andmask)
134 {
135 	int cpu;
136 
137 	/*
138 	 * We're using fixed IRQ delivery, can only return one phys APIC ID.
139 	 * May as well be the first.
140 	 */
141 	for_each_cpu_and(cpu, cpumask, andmask) {
142 		if (cpumask_test_cpu(cpu, cpu_online_mask))
143 			break;
144 	}
145 	if (cpu < nr_cpu_ids)
146 		return bigsmp_cpu_to_logical_apicid(cpu);
147 
148 	return BAD_APICID;
149 }
150 
151 static inline int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb)
152 {
153 	return cpuid_apic >> index_msb;
154 }
155 
156 static inline void bigsmp_send_IPI_mask(const struct cpumask *mask, int vector)
157 {
158 	default_send_IPI_mask_sequence_phys(mask, vector);
159 }
160 
161 static inline void bigsmp_send_IPI_allbutself(int vector)
162 {
163 	default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
164 }
165 
166 static inline void bigsmp_send_IPI_all(int vector)
167 {
168 	bigsmp_send_IPI_mask(cpu_online_mask, vector);
169 }
170 
171 static int dmi_bigsmp; /* can be set by dmi scanners */
172 
173 static int hp_ht_bigsmp(const struct dmi_system_id *d)
174 {
175 	printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident);
176 	dmi_bigsmp = 1;
177 
178 	return 0;
179 }
180 
181 
182 static const struct dmi_system_id bigsmp_dmi_table[] = {
183 	{ hp_ht_bigsmp, "HP ProLiant DL760 G2",
184 		{	DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
185 			DMI_MATCH(DMI_BIOS_VERSION, "P44-"),
186 		}
187 	},
188 
189 	{ hp_ht_bigsmp, "HP ProLiant DL740",
190 		{	DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
191 			DMI_MATCH(DMI_BIOS_VERSION, "P47-"),
192 		}
193 	},
194 	{ } /* NULL entry stops DMI scanning */
195 };
196 
197 static void bigsmp_vector_allocation_domain(int cpu, cpumask_t *retmask)
198 {
199 	cpus_clear(*retmask);
200 	cpu_set(cpu, *retmask);
201 }
202 
203 static int probe_bigsmp(void)
204 {
205 	if (def_to_bigsmp)
206 		dmi_bigsmp = 1;
207 	else
208 		dmi_check_system(bigsmp_dmi_table);
209 
210 	return dmi_bigsmp;
211 }
212 
213 struct apic apic_bigsmp = {
214 
215 	.name				= "bigsmp",
216 	.probe				= probe_bigsmp,
217 	.acpi_madt_oem_check		= NULL,
218 	.apic_id_registered		= bigsmp_apic_id_registered,
219 
220 	.irq_delivery_mode		= dest_Fixed,
221 	/* phys delivery to target CPU: */
222 	.irq_dest_mode			= 0,
223 
224 	.target_cpus			= bigsmp_target_cpus,
225 	.disable_esr			= 1,
226 	.dest_logical			= 0,
227 	.check_apicid_used		= bigsmp_check_apicid_used,
228 	.check_apicid_present		= bigsmp_check_apicid_present,
229 
230 	.vector_allocation_domain	= bigsmp_vector_allocation_domain,
231 	.init_apic_ldr			= bigsmp_init_apic_ldr,
232 
233 	.ioapic_phys_id_map		= bigsmp_ioapic_phys_id_map,
234 	.setup_apic_routing		= bigsmp_setup_apic_routing,
235 	.multi_timer_check		= NULL,
236 	.apicid_to_node			= bigsmp_apicid_to_node,
237 	.cpu_to_logical_apicid		= bigsmp_cpu_to_logical_apicid,
238 	.cpu_present_to_apicid		= bigsmp_cpu_present_to_apicid,
239 	.apicid_to_cpu_present		= bigsmp_apicid_to_cpu_present,
240 	.setup_portio_remap		= NULL,
241 	.check_phys_apicid_present	= bigsmp_check_phys_apicid_present,
242 	.enable_apic_mode		= NULL,
243 	.phys_pkg_id			= bigsmp_phys_pkg_id,
244 	.mps_oem_check			= NULL,
245 
246 	.get_apic_id			= bigsmp_get_apic_id,
247 	.set_apic_id			= NULL,
248 	.apic_id_mask			= 0xFF << 24,
249 
250 	.cpu_mask_to_apicid		= bigsmp_cpu_mask_to_apicid,
251 	.cpu_mask_to_apicid_and		= bigsmp_cpu_mask_to_apicid_and,
252 
253 	.send_IPI_mask			= bigsmp_send_IPI_mask,
254 	.send_IPI_mask_allbutself	= NULL,
255 	.send_IPI_allbutself		= bigsmp_send_IPI_allbutself,
256 	.send_IPI_all			= bigsmp_send_IPI_all,
257 	.send_IPI_self			= default_send_IPI_self,
258 
259 	.wakeup_cpu			= wakeup_secondary_cpu_via_init,
260 	.trampoline_phys_low		= DEFAULT_TRAMPOLINE_PHYS_LOW,
261 	.trampoline_phys_high		= DEFAULT_TRAMPOLINE_PHYS_HIGH,
262 
263 	.wait_for_init_deassert		= default_wait_for_init_deassert,
264 
265 	.smp_callin_clear_local_apic	= NULL,
266 	.inquire_remote_apic		= default_inquire_remote_apic,
267 
268 	.read				= native_apic_mem_read,
269 	.write				= native_apic_mem_write,
270 	.icr_read			= native_apic_icr_read,
271 	.icr_write			= native_apic_icr_write,
272 	.wait_icr_idle			= native_apic_wait_icr_idle,
273 	.safe_wait_icr_idle		= native_safe_apic_wait_icr_idle,
274 };
275