xref: /openbmc/linux/arch/x86/kernel/apic/bigsmp_32.c (revision 0da85d1e)
1 /*
2  * APIC driver for "bigsmp" xAPIC machines with more than 8 virtual CPUs.
3  *
4  * Drives the local APIC in "clustered mode".
5  */
6 #include <linux/threads.h>
7 #include <linux/cpumask.h>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/dmi.h>
11 #include <linux/smp.h>
12 
13 #include <asm/apicdef.h>
14 #include <asm/fixmap.h>
15 #include <asm/mpspec.h>
16 #include <asm/apic.h>
17 #include <asm/ipi.h>
18 
19 static unsigned bigsmp_get_apic_id(unsigned long x)
20 {
21 	return (x >> 24) & 0xFF;
22 }
23 
24 static int bigsmp_apic_id_registered(void)
25 {
26 	return 1;
27 }
28 
29 static unsigned long bigsmp_check_apicid_used(physid_mask_t *map, int apicid)
30 {
31 	return 0;
32 }
33 
34 static int bigsmp_early_logical_apicid(int cpu)
35 {
36 	/* on bigsmp, logical apicid is the same as physical */
37 	return early_per_cpu(x86_cpu_to_apicid, cpu);
38 }
39 
40 static inline unsigned long calculate_ldr(int cpu)
41 {
42 	unsigned long val, id;
43 
44 	val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
45 	id = per_cpu(x86_bios_cpu_apicid, cpu);
46 	val |= SET_APIC_LOGICAL_ID(id);
47 
48 	return val;
49 }
50 
51 /*
52  * Set up the logical destination ID.
53  *
54  * Intel recommends to set DFR, LDR and TPR before enabling
55  * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
56  * document number 292116).  So here it goes...
57  */
58 static void bigsmp_init_apic_ldr(void)
59 {
60 	unsigned long val;
61 	int cpu = smp_processor_id();
62 
63 	apic_write(APIC_DFR, APIC_DFR_FLAT);
64 	val = calculate_ldr(cpu);
65 	apic_write(APIC_LDR, val);
66 }
67 
68 static void bigsmp_setup_apic_routing(void)
69 {
70 	printk(KERN_INFO
71 		"Enabling APIC mode:  Physflat.  Using %d I/O APICs\n",
72 		nr_ioapics);
73 }
74 
75 static int bigsmp_cpu_present_to_apicid(int mps_cpu)
76 {
77 	if (mps_cpu < nr_cpu_ids)
78 		return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
79 
80 	return BAD_APICID;
81 }
82 
83 static void bigsmp_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
84 {
85 	/* For clustered we don't have a good way to do this yet - hack */
86 	physids_promote(0xFFL, retmap);
87 }
88 
89 static int bigsmp_check_phys_apicid_present(int phys_apicid)
90 {
91 	return 1;
92 }
93 
94 static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb)
95 {
96 	return cpuid_apic >> index_msb;
97 }
98 
99 static inline void bigsmp_send_IPI_mask(const struct cpumask *mask, int vector)
100 {
101 	default_send_IPI_mask_sequence_phys(mask, vector);
102 }
103 
104 static void bigsmp_send_IPI_allbutself(int vector)
105 {
106 	default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
107 }
108 
109 static void bigsmp_send_IPI_all(int vector)
110 {
111 	bigsmp_send_IPI_mask(cpu_online_mask, vector);
112 }
113 
114 static int dmi_bigsmp; /* can be set by dmi scanners */
115 
116 static int hp_ht_bigsmp(const struct dmi_system_id *d)
117 {
118 	printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident);
119 	dmi_bigsmp = 1;
120 
121 	return 0;
122 }
123 
124 
125 static const struct dmi_system_id bigsmp_dmi_table[] = {
126 	{ hp_ht_bigsmp, "HP ProLiant DL760 G2",
127 		{	DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
128 			DMI_MATCH(DMI_BIOS_VERSION, "P44-"),
129 		}
130 	},
131 
132 	{ hp_ht_bigsmp, "HP ProLiant DL740",
133 		{	DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
134 			DMI_MATCH(DMI_BIOS_VERSION, "P47-"),
135 		}
136 	},
137 	{ } /* NULL entry stops DMI scanning */
138 };
139 
140 static int probe_bigsmp(void)
141 {
142 	if (def_to_bigsmp)
143 		dmi_bigsmp = 1;
144 	else
145 		dmi_check_system(bigsmp_dmi_table);
146 
147 	return dmi_bigsmp;
148 }
149 
150 static struct apic apic_bigsmp = {
151 
152 	.name				= "bigsmp",
153 	.probe				= probe_bigsmp,
154 	.acpi_madt_oem_check		= NULL,
155 	.apic_id_valid			= default_apic_id_valid,
156 	.apic_id_registered		= bigsmp_apic_id_registered,
157 
158 	.irq_delivery_mode		= dest_Fixed,
159 	/* phys delivery to target CPU: */
160 	.irq_dest_mode			= 0,
161 
162 	.target_cpus			= default_target_cpus,
163 	.disable_esr			= 1,
164 	.dest_logical			= 0,
165 	.check_apicid_used		= bigsmp_check_apicid_used,
166 
167 	.vector_allocation_domain	= default_vector_allocation_domain,
168 	.init_apic_ldr			= bigsmp_init_apic_ldr,
169 
170 	.ioapic_phys_id_map		= bigsmp_ioapic_phys_id_map,
171 	.setup_apic_routing		= bigsmp_setup_apic_routing,
172 	.cpu_present_to_apicid		= bigsmp_cpu_present_to_apicid,
173 	.apicid_to_cpu_present		= physid_set_mask_of_physid,
174 	.check_phys_apicid_present	= bigsmp_check_phys_apicid_present,
175 	.phys_pkg_id			= bigsmp_phys_pkg_id,
176 
177 	.get_apic_id			= bigsmp_get_apic_id,
178 	.set_apic_id			= NULL,
179 	.apic_id_mask			= 0xFF << 24,
180 
181 	.cpu_mask_to_apicid_and		= default_cpu_mask_to_apicid_and,
182 
183 	.send_IPI_mask			= bigsmp_send_IPI_mask,
184 	.send_IPI_mask_allbutself	= NULL,
185 	.send_IPI_allbutself		= bigsmp_send_IPI_allbutself,
186 	.send_IPI_all			= bigsmp_send_IPI_all,
187 	.send_IPI_self			= default_send_IPI_self,
188 
189 	.wait_for_init_deassert		= true,
190 	.inquire_remote_apic		= default_inquire_remote_apic,
191 
192 	.read				= native_apic_mem_read,
193 	.write				= native_apic_mem_write,
194 	.eoi_write			= native_apic_mem_write,
195 	.icr_read			= native_apic_icr_read,
196 	.icr_write			= native_apic_icr_write,
197 	.wait_icr_idle			= native_apic_wait_icr_idle,
198 	.safe_wait_icr_idle		= native_safe_apic_wait_icr_idle,
199 
200 	.x86_32_early_logical_apicid	= bigsmp_early_logical_apicid,
201 };
202 
203 void __init generic_bigsmp_probe(void)
204 {
205 	unsigned int cpu;
206 
207 	if (!probe_bigsmp())
208 		return;
209 
210 	apic = &apic_bigsmp;
211 
212 	for_each_possible_cpu(cpu) {
213 		if (early_per_cpu(x86_cpu_to_logical_apicid,
214 				  cpu) == BAD_APICID)
215 			continue;
216 		early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
217 			bigsmp_early_logical_apicid(cpu);
218 	}
219 
220 	pr_info("Overriding APIC driver with %s\n", apic_bigsmp.name);
221 }
222 
223 apic_driver(apic_bigsmp);
224