xref: /openbmc/linux/drivers/acpi/processor_core.c (revision b34e08d5)
1 /*
2  * Copyright (C) 2005 Intel Corporation
3  * Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
4  *
5  *	Alex Chiang <achiang@hp.com>
6  *	- Unified x86/ia64 implementations
7  *	Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
8  *	- Added _PDC for platforms with Intel CPUs
9  */
10 #include <linux/export.h>
11 #include <linux/dmi.h>
12 #include <linux/slab.h>
13 #include <linux/acpi.h>
14 #include <acpi/processor.h>
15 
16 #include "internal.h"
17 
18 #define _COMPONENT		ACPI_PROCESSOR_COMPONENT
19 ACPI_MODULE_NAME("processor_core");
20 
21 static int map_lapic_id(struct acpi_subtable_header *entry,
22 		 u32 acpi_id, int *apic_id)
23 {
24 	struct acpi_madt_local_apic *lapic =
25 		(struct acpi_madt_local_apic *)entry;
26 
27 	if (!(lapic->lapic_flags & ACPI_MADT_ENABLED))
28 		return -ENODEV;
29 
30 	if (lapic->processor_id != acpi_id)
31 		return -EINVAL;
32 
33 	*apic_id = lapic->id;
34 	return 0;
35 }
36 
37 static int map_x2apic_id(struct acpi_subtable_header *entry,
38 			 int device_declaration, u32 acpi_id, int *apic_id)
39 {
40 	struct acpi_madt_local_x2apic *apic =
41 		(struct acpi_madt_local_x2apic *)entry;
42 
43 	if (!(apic->lapic_flags & ACPI_MADT_ENABLED))
44 		return -ENODEV;
45 
46 	if (device_declaration && (apic->uid == acpi_id)) {
47 		*apic_id = apic->local_apic_id;
48 		return 0;
49 	}
50 
51 	return -EINVAL;
52 }
53 
54 static int map_lsapic_id(struct acpi_subtable_header *entry,
55 		int device_declaration, u32 acpi_id, int *apic_id)
56 {
57 	struct acpi_madt_local_sapic *lsapic =
58 		(struct acpi_madt_local_sapic *)entry;
59 
60 	if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))
61 		return -ENODEV;
62 
63 	if (device_declaration) {
64 		if ((entry->length < 16) || (lsapic->uid != acpi_id))
65 			return -EINVAL;
66 	} else if (lsapic->processor_id != acpi_id)
67 		return -EINVAL;
68 
69 	*apic_id = (lsapic->id << 8) | lsapic->eid;
70 	return 0;
71 }
72 
73 static int map_madt_entry(int type, u32 acpi_id)
74 {
75 	unsigned long madt_end, entry;
76 	static struct acpi_table_madt *madt;
77 	static int read_madt;
78 	int apic_id = -1;
79 
80 	if (!read_madt) {
81 		if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
82 					(struct acpi_table_header **)&madt)))
83 			madt = NULL;
84 		read_madt++;
85 	}
86 
87 	if (!madt)
88 		return apic_id;
89 
90 	entry = (unsigned long)madt;
91 	madt_end = entry + madt->header.length;
92 
93 	/* Parse all entries looking for a match. */
94 
95 	entry += sizeof(struct acpi_table_madt);
96 	while (entry + sizeof(struct acpi_subtable_header) < madt_end) {
97 		struct acpi_subtable_header *header =
98 			(struct acpi_subtable_header *)entry;
99 		if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
100 			if (!map_lapic_id(header, acpi_id, &apic_id))
101 				break;
102 		} else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
103 			if (!map_x2apic_id(header, type, acpi_id, &apic_id))
104 				break;
105 		} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
106 			if (!map_lsapic_id(header, type, acpi_id, &apic_id))
107 				break;
108 		}
109 		entry += header->length;
110 	}
111 	return apic_id;
112 }
113 
114 static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
115 {
116 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
117 	union acpi_object *obj;
118 	struct acpi_subtable_header *header;
119 	int apic_id = -1;
120 
121 	if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
122 		goto exit;
123 
124 	if (!buffer.length || !buffer.pointer)
125 		goto exit;
126 
127 	obj = buffer.pointer;
128 	if (obj->type != ACPI_TYPE_BUFFER ||
129 	    obj->buffer.length < sizeof(struct acpi_subtable_header)) {
130 		goto exit;
131 	}
132 
133 	header = (struct acpi_subtable_header *)obj->buffer.pointer;
134 	if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
135 		map_lapic_id(header, acpi_id, &apic_id);
136 	} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
137 		map_lsapic_id(header, type, acpi_id, &apic_id);
138 	}
139 
140 exit:
141 	kfree(buffer.pointer);
142 	return apic_id;
143 }
144 
145 int acpi_get_apicid(acpi_handle handle, int type, u32 acpi_id)
146 {
147 	int apic_id;
148 
149 	apic_id = map_mat_entry(handle, type, acpi_id);
150 	if (apic_id == -1)
151 		apic_id = map_madt_entry(type, acpi_id);
152 
153 	return apic_id;
154 }
155 
156 int acpi_map_cpuid(int apic_id, u32 acpi_id)
157 {
158 #ifdef CONFIG_SMP
159 	int i;
160 #endif
161 
162 	if (apic_id == -1) {
163 		/*
164 		 * On UP processor, there is no _MAT or MADT table.
165 		 * So above apic_id is always set to -1.
166 		 *
167 		 * BIOS may define multiple CPU handles even for UP processor.
168 		 * For example,
169 		 *
170 		 * Scope (_PR)
171                  * {
172 		 *     Processor (CPU0, 0x00, 0x00000410, 0x06) {}
173 		 *     Processor (CPU1, 0x01, 0x00000410, 0x06) {}
174 		 *     Processor (CPU2, 0x02, 0x00000410, 0x06) {}
175 		 *     Processor (CPU3, 0x03, 0x00000410, 0x06) {}
176 		 * }
177 		 *
178 		 * Ignores apic_id and always returns 0 for the processor
179 		 * handle with acpi id 0 if nr_cpu_ids is 1.
180 		 * This should be the case if SMP tables are not found.
181 		 * Return -1 for other CPU's handle.
182 		 */
183 		if (nr_cpu_ids <= 1 && acpi_id == 0)
184 			return acpi_id;
185 		else
186 			return apic_id;
187 	}
188 
189 #ifdef CONFIG_SMP
190 	for_each_possible_cpu(i) {
191 		if (cpu_physical_id(i) == apic_id)
192 			return i;
193 	}
194 #else
195 	/* In UP kernel, only processor 0 is valid */
196 	if (apic_id == 0)
197 		return apic_id;
198 #endif
199 	return -1;
200 }
201 
202 int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
203 {
204 	int apic_id;
205 
206 	apic_id = acpi_get_apicid(handle, type, acpi_id);
207 
208 	return acpi_map_cpuid(apic_id, acpi_id);
209 }
210 EXPORT_SYMBOL_GPL(acpi_get_cpuid);
211 
212 static bool __init processor_physically_present(acpi_handle handle)
213 {
214 	int cpuid, type;
215 	u32 acpi_id;
216 	acpi_status status;
217 	acpi_object_type acpi_type;
218 	unsigned long long tmp;
219 	union acpi_object object = { 0 };
220 	struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
221 
222 	status = acpi_get_type(handle, &acpi_type);
223 	if (ACPI_FAILURE(status))
224 		return false;
225 
226 	switch (acpi_type) {
227 	case ACPI_TYPE_PROCESSOR:
228 		status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
229 		if (ACPI_FAILURE(status))
230 			return false;
231 		acpi_id = object.processor.proc_id;
232 		break;
233 	case ACPI_TYPE_DEVICE:
234 		status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp);
235 		if (ACPI_FAILURE(status))
236 			return false;
237 		acpi_id = tmp;
238 		break;
239 	default:
240 		return false;
241 	}
242 
243 	type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
244 	cpuid = acpi_get_cpuid(handle, type, acpi_id);
245 
246 	if (cpuid == -1)
247 		return false;
248 
249 	return true;
250 }
251 
252 static void acpi_set_pdc_bits(u32 *buf)
253 {
254 	buf[0] = ACPI_PDC_REVISION_ID;
255 	buf[1] = 1;
256 
257 	/* Enable coordination with firmware's _TSD info */
258 	buf[2] = ACPI_PDC_SMP_T_SWCOORD;
259 
260 	/* Twiddle arch-specific bits needed for _PDC */
261 	arch_acpi_set_pdc_bits(buf);
262 }
263 
264 static struct acpi_object_list *acpi_processor_alloc_pdc(void)
265 {
266 	struct acpi_object_list *obj_list;
267 	union acpi_object *obj;
268 	u32 *buf;
269 
270 	/* allocate and initialize pdc. It will be used later. */
271 	obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
272 	if (!obj_list) {
273 		printk(KERN_ERR "Memory allocation error\n");
274 		return NULL;
275 	}
276 
277 	obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
278 	if (!obj) {
279 		printk(KERN_ERR "Memory allocation error\n");
280 		kfree(obj_list);
281 		return NULL;
282 	}
283 
284 	buf = kmalloc(12, GFP_KERNEL);
285 	if (!buf) {
286 		printk(KERN_ERR "Memory allocation error\n");
287 		kfree(obj);
288 		kfree(obj_list);
289 		return NULL;
290 	}
291 
292 	acpi_set_pdc_bits(buf);
293 
294 	obj->type = ACPI_TYPE_BUFFER;
295 	obj->buffer.length = 12;
296 	obj->buffer.pointer = (u8 *) buf;
297 	obj_list->count = 1;
298 	obj_list->pointer = obj;
299 
300 	return obj_list;
301 }
302 
303 /*
304  * _PDC is required for a BIOS-OS handshake for most of the newer
305  * ACPI processor features.
306  */
307 static acpi_status
308 acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in)
309 {
310 	acpi_status status = AE_OK;
311 
312 	if (boot_option_idle_override == IDLE_NOMWAIT) {
313 		/*
314 		 * If mwait is disabled for CPU C-states, the C2C3_FFH access
315 		 * mode will be disabled in the parameter of _PDC object.
316 		 * Of course C1_FFH access mode will also be disabled.
317 		 */
318 		union acpi_object *obj;
319 		u32 *buffer = NULL;
320 
321 		obj = pdc_in->pointer;
322 		buffer = (u32 *)(obj->buffer.pointer);
323 		buffer[2] &= ~(ACPI_PDC_C_C2C3_FFH | ACPI_PDC_C_C1_FFH);
324 
325 	}
326 	status = acpi_evaluate_object(handle, "_PDC", pdc_in, NULL);
327 
328 	if (ACPI_FAILURE(status))
329 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
330 		    "Could not evaluate _PDC, using legacy perf. control.\n"));
331 
332 	return status;
333 }
334 
335 void acpi_processor_set_pdc(acpi_handle handle)
336 {
337 	struct acpi_object_list *obj_list;
338 
339 	if (arch_has_acpi_pdc() == false)
340 		return;
341 
342 	obj_list = acpi_processor_alloc_pdc();
343 	if (!obj_list)
344 		return;
345 
346 	acpi_processor_eval_pdc(handle, obj_list);
347 
348 	kfree(obj_list->pointer->buffer.pointer);
349 	kfree(obj_list->pointer);
350 	kfree(obj_list);
351 }
352 
353 static acpi_status __init
354 early_init_pdc(acpi_handle handle, u32 lvl, void *context, void **rv)
355 {
356 	if (processor_physically_present(handle) == false)
357 		return AE_OK;
358 
359 	acpi_processor_set_pdc(handle);
360 	return AE_OK;
361 }
362 
363 #if defined(CONFIG_X86) || defined(CONFIG_IA64)
364 static int __init set_no_mwait(const struct dmi_system_id *id)
365 {
366 	pr_notice(PREFIX "%s detected - disabling mwait for CPU C-states\n",
367 		  id->ident);
368 	boot_option_idle_override = IDLE_NOMWAIT;
369 	return 0;
370 }
371 
372 static struct dmi_system_id processor_idle_dmi_table[] __initdata = {
373 	{
374 	set_no_mwait, "Extensa 5220", {
375 	DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
376 	DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
377 	DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
378 	DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL},
379 	{},
380 };
381 
382 static void __init processor_dmi_check(void)
383 {
384 	/*
385 	 * Check whether the system is DMI table. If yes, OSPM
386 	 * should not use mwait for CPU-states.
387 	 */
388 	dmi_check_system(processor_idle_dmi_table);
389 }
390 #else
391 static inline void processor_dmi_check(void) {}
392 #endif
393 
394 void __init acpi_early_processor_set_pdc(void)
395 {
396 	processor_dmi_check();
397 
398 	acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
399 			    ACPI_UINT32_MAX,
400 			    early_init_pdc, NULL, NULL, NULL);
401 	acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, early_init_pdc, NULL, NULL);
402 }
403