xref: /openbmc/linux/drivers/hwmon/coretemp.c (revision 75f25bd3)
1 /*
2  * coretemp.c - Linux kernel module for hardware monitoring
3  *
4  * Copyright (C) 2007 Rudolf Marek <r.marek@assembler.cz>
5  *
6  * Inspired from many hwmon drivers
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; version 2 of the License.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20  * 02110-1301 USA.
21  */
22 
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24 
25 #include <linux/module.h>
26 #include <linux/init.h>
27 #include <linux/slab.h>
28 #include <linux/jiffies.h>
29 #include <linux/hwmon.h>
30 #include <linux/sysfs.h>
31 #include <linux/hwmon-sysfs.h>
32 #include <linux/err.h>
33 #include <linux/mutex.h>
34 #include <linux/list.h>
35 #include <linux/platform_device.h>
36 #include <linux/cpu.h>
37 #include <linux/pci.h>
38 #include <linux/smp.h>
39 #include <asm/msr.h>
40 #include <asm/processor.h>
41 
42 #define DRVNAME	"coretemp"
43 
44 #define BASE_SYSFS_ATTR_NO	2	/* Sysfs Base attr no for coretemp */
45 #define NUM_REAL_CORES		16	/* Number of Real cores per cpu */
46 #define CORETEMP_NAME_LENGTH	17	/* String Length of attrs */
47 #define MAX_CORE_ATTRS		4	/* Maximum no of basic attrs */
48 #define MAX_THRESH_ATTRS	3	/* Maximum no of Threshold attrs */
49 #define TOTAL_ATTRS		(MAX_CORE_ATTRS + MAX_THRESH_ATTRS)
50 #define MAX_CORE_DATA		(NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
51 
52 #ifdef CONFIG_SMP
53 #define TO_PHYS_ID(cpu)		cpu_data(cpu).phys_proc_id
54 #define TO_CORE_ID(cpu)		cpu_data(cpu).cpu_core_id
55 #define TO_ATTR_NO(cpu)		(TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
56 #define for_each_sibling(i, cpu)	for_each_cpu(i, cpu_sibling_mask(cpu))
57 #else
58 #define TO_PHYS_ID(cpu)		(cpu)
59 #define TO_CORE_ID(cpu)		(cpu)
60 #define TO_ATTR_NO(cpu)		(cpu)
61 #define for_each_sibling(i, cpu)	for (i = 0; false; )
62 #endif
63 
64 /*
65  * Per-Core Temperature Data
66  * @last_updated: The time when the current temperature value was updated
67  *		earlier (in jiffies).
68  * @cpu_core_id: The CPU Core from which temperature values should be read
69  *		This value is passed as "id" field to rdmsr/wrmsr functions.
70  * @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS,
71  *		from where the temperature values should be read.
72  * @intrpt_reg: One of IA32_THERM_INTERRUPT or IA32_PACKAGE_THERM_INTERRUPT,
73  *		from where the thresholds are read.
74  * @attr_size:  Total number of pre-core attrs displayed in the sysfs.
75  * @is_pkg_data: If this is 1, the temp_data holds pkgtemp data.
76  *		Otherwise, temp_data holds coretemp data.
77  * @valid: If this is 1, the current temperature is valid.
78  */
79 struct temp_data {
80 	int temp;
81 	int ttarget;
82 	int tmin;
83 	int tjmax;
84 	unsigned long last_updated;
85 	unsigned int cpu;
86 	u32 cpu_core_id;
87 	u32 status_reg;
88 	u32 intrpt_reg;
89 	int attr_size;
90 	bool is_pkg_data;
91 	bool valid;
92 	struct sensor_device_attribute sd_attrs[TOTAL_ATTRS];
93 	char attr_name[TOTAL_ATTRS][CORETEMP_NAME_LENGTH];
94 	struct mutex update_lock;
95 };
96 
97 /* Platform Data per Physical CPU */
98 struct platform_data {
99 	struct device *hwmon_dev;
100 	u16 phys_proc_id;
101 	struct temp_data *core_data[MAX_CORE_DATA];
102 	struct device_attribute name_attr;
103 };
104 
105 struct pdev_entry {
106 	struct list_head list;
107 	struct platform_device *pdev;
108 	u16 phys_proc_id;
109 };
110 
111 static LIST_HEAD(pdev_list);
112 static DEFINE_MUTEX(pdev_list_mutex);
113 
114 static ssize_t show_name(struct device *dev,
115 			struct device_attribute *devattr, char *buf)
116 {
117 	return sprintf(buf, "%s\n", DRVNAME);
118 }
119 
120 static ssize_t show_label(struct device *dev,
121 				struct device_attribute *devattr, char *buf)
122 {
123 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
124 	struct platform_data *pdata = dev_get_drvdata(dev);
125 	struct temp_data *tdata = pdata->core_data[attr->index];
126 
127 	if (tdata->is_pkg_data)
128 		return sprintf(buf, "Physical id %u\n", pdata->phys_proc_id);
129 
130 	return sprintf(buf, "Core %u\n", tdata->cpu_core_id);
131 }
132 
133 static ssize_t show_crit_alarm(struct device *dev,
134 				struct device_attribute *devattr, char *buf)
135 {
136 	u32 eax, edx;
137 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
138 	struct platform_data *pdata = dev_get_drvdata(dev);
139 	struct temp_data *tdata = pdata->core_data[attr->index];
140 
141 	rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
142 
143 	return sprintf(buf, "%d\n", (eax >> 5) & 1);
144 }
145 
146 static ssize_t show_max_alarm(struct device *dev,
147 				struct device_attribute *devattr, char *buf)
148 {
149 	u32 eax, edx;
150 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
151 	struct platform_data *pdata = dev_get_drvdata(dev);
152 	struct temp_data *tdata = pdata->core_data[attr->index];
153 
154 	rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
155 
156 	return sprintf(buf, "%d\n", !!(eax & THERM_STATUS_THRESHOLD1));
157 }
158 
159 static ssize_t show_tjmax(struct device *dev,
160 			struct device_attribute *devattr, char *buf)
161 {
162 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
163 	struct platform_data *pdata = dev_get_drvdata(dev);
164 
165 	return sprintf(buf, "%d\n", pdata->core_data[attr->index]->tjmax);
166 }
167 
168 static ssize_t show_ttarget(struct device *dev,
169 				struct device_attribute *devattr, char *buf)
170 {
171 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
172 	struct platform_data *pdata = dev_get_drvdata(dev);
173 
174 	return sprintf(buf, "%d\n", pdata->core_data[attr->index]->ttarget);
175 }
176 
177 static ssize_t store_ttarget(struct device *dev,
178 				struct device_attribute *devattr,
179 				const char *buf, size_t count)
180 {
181 	struct platform_data *pdata = dev_get_drvdata(dev);
182 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
183 	struct temp_data *tdata = pdata->core_data[attr->index];
184 	u32 eax, edx;
185 	unsigned long val;
186 	int diff;
187 
188 	if (strict_strtoul(buf, 10, &val))
189 		return -EINVAL;
190 
191 	/*
192 	 * THERM_MASK_THRESHOLD1 is 7 bits wide. Values are entered in terms
193 	 * of milli degree celsius. Hence don't accept val > (127 * 1000)
194 	 */
195 	if (val > tdata->tjmax || val > 127000)
196 		return -EINVAL;
197 
198 	diff = (tdata->tjmax - val) / 1000;
199 
200 	mutex_lock(&tdata->update_lock);
201 	rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx);
202 	eax = (eax & ~THERM_MASK_THRESHOLD1) |
203 				(diff << THERM_SHIFT_THRESHOLD1);
204 	wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx);
205 	tdata->ttarget = val;
206 	mutex_unlock(&tdata->update_lock);
207 
208 	return count;
209 }
210 
211 static ssize_t show_tmin(struct device *dev,
212 			struct device_attribute *devattr, char *buf)
213 {
214 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
215 	struct platform_data *pdata = dev_get_drvdata(dev);
216 
217 	return sprintf(buf, "%d\n", pdata->core_data[attr->index]->tmin);
218 }
219 
220 static ssize_t store_tmin(struct device *dev,
221 				struct device_attribute *devattr,
222 				const char *buf, size_t count)
223 {
224 	struct platform_data *pdata = dev_get_drvdata(dev);
225 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
226 	struct temp_data *tdata = pdata->core_data[attr->index];
227 	u32 eax, edx;
228 	unsigned long val;
229 	int diff;
230 
231 	if (strict_strtoul(buf, 10, &val))
232 		return -EINVAL;
233 
234 	/*
235 	 * THERM_MASK_THRESHOLD0 is 7 bits wide. Values are entered in terms
236 	 * of milli degree celsius. Hence don't accept val > (127 * 1000)
237 	 */
238 	if (val > tdata->tjmax || val > 127000)
239 		return -EINVAL;
240 
241 	diff = (tdata->tjmax - val) / 1000;
242 
243 	mutex_lock(&tdata->update_lock);
244 	rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx);
245 	eax = (eax & ~THERM_MASK_THRESHOLD0) |
246 				(diff << THERM_SHIFT_THRESHOLD0);
247 	wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx);
248 	tdata->tmin = val;
249 	mutex_unlock(&tdata->update_lock);
250 
251 	return count;
252 }
253 
254 static ssize_t show_temp(struct device *dev,
255 			struct device_attribute *devattr, char *buf)
256 {
257 	u32 eax, edx;
258 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
259 	struct platform_data *pdata = dev_get_drvdata(dev);
260 	struct temp_data *tdata = pdata->core_data[attr->index];
261 
262 	mutex_lock(&tdata->update_lock);
263 
264 	/* Check whether the time interval has elapsed */
265 	if (!tdata->valid || time_after(jiffies, tdata->last_updated + HZ)) {
266 		rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
267 		tdata->valid = 0;
268 		/* Check whether the data is valid */
269 		if (eax & 0x80000000) {
270 			tdata->temp = tdata->tjmax -
271 					((eax >> 16) & 0x7f) * 1000;
272 			tdata->valid = 1;
273 		}
274 		tdata->last_updated = jiffies;
275 	}
276 
277 	mutex_unlock(&tdata->update_lock);
278 	return tdata->valid ? sprintf(buf, "%d\n", tdata->temp) : -EAGAIN;
279 }
280 
281 static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
282 {
283 	/* The 100C is default for both mobile and non mobile CPUs */
284 
285 	int tjmax = 100000;
286 	int tjmax_ee = 85000;
287 	int usemsr_ee = 1;
288 	int err;
289 	u32 eax, edx;
290 	struct pci_dev *host_bridge;
291 
292 	/* Early chips have no MSR for TjMax */
293 
294 	if (c->x86_model == 0xf && c->x86_mask < 4)
295 		usemsr_ee = 0;
296 
297 	/* Atom CPUs */
298 
299 	if (c->x86_model == 0x1c) {
300 		usemsr_ee = 0;
301 
302 		host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
303 
304 		if (host_bridge && host_bridge->vendor == PCI_VENDOR_ID_INTEL
305 		    && (host_bridge->device == 0xa000	/* NM10 based nettop */
306 		    || host_bridge->device == 0xa010))	/* NM10 based netbook */
307 			tjmax = 100000;
308 		else
309 			tjmax = 90000;
310 
311 		pci_dev_put(host_bridge);
312 	}
313 
314 	if (c->x86_model > 0xe && usemsr_ee) {
315 		u8 platform_id;
316 
317 		/*
318 		 * Now we can detect the mobile CPU using Intel provided table
319 		 * http://softwarecommunity.intel.com/Wiki/Mobility/720.htm
320 		 * For Core2 cores, check MSR 0x17, bit 28 1 = Mobile CPU
321 		 */
322 		err = rdmsr_safe_on_cpu(id, 0x17, &eax, &edx);
323 		if (err) {
324 			dev_warn(dev,
325 				 "Unable to access MSR 0x17, assuming desktop"
326 				 " CPU\n");
327 			usemsr_ee = 0;
328 		} else if (c->x86_model < 0x17 && !(eax & 0x10000000)) {
329 			/*
330 			 * Trust bit 28 up to Penryn, I could not find any
331 			 * documentation on that; if you happen to know
332 			 * someone at Intel please ask
333 			 */
334 			usemsr_ee = 0;
335 		} else {
336 			/* Platform ID bits 52:50 (EDX starts at bit 32) */
337 			platform_id = (edx >> 18) & 0x7;
338 
339 			/*
340 			 * Mobile Penryn CPU seems to be platform ID 7 or 5
341 			 * (guesswork)
342 			 */
343 			if (c->x86_model == 0x17 &&
344 			    (platform_id == 5 || platform_id == 7)) {
345 				/*
346 				 * If MSR EE bit is set, set it to 90 degrees C,
347 				 * otherwise 105 degrees C
348 				 */
349 				tjmax_ee = 90000;
350 				tjmax = 105000;
351 			}
352 		}
353 	}
354 
355 	if (usemsr_ee) {
356 		err = rdmsr_safe_on_cpu(id, 0xee, &eax, &edx);
357 		if (err) {
358 			dev_warn(dev,
359 				 "Unable to access MSR 0xEE, for Tjmax, left"
360 				 " at default\n");
361 		} else if (eax & 0x40000000) {
362 			tjmax = tjmax_ee;
363 		}
364 	} else if (tjmax == 100000) {
365 		/*
366 		 * If we don't use msr EE it means we are desktop CPU
367 		 * (with exeception of Atom)
368 		 */
369 		dev_warn(dev, "Using relative temperature scale!\n");
370 	}
371 
372 	return tjmax;
373 }
374 
375 static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
376 {
377 	/* The 100C is default for both mobile and non mobile CPUs */
378 	int err;
379 	u32 eax, edx;
380 	u32 val;
381 
382 	/*
383 	 * A new feature of current Intel(R) processors, the
384 	 * IA32_TEMPERATURE_TARGET contains the TjMax value
385 	 */
386 	err = rdmsr_safe_on_cpu(id, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
387 	if (err) {
388 		dev_warn(dev, "Unable to read TjMax from CPU.\n");
389 	} else {
390 		val = (eax >> 16) & 0xff;
391 		/*
392 		 * If the TjMax is not plausible, an assumption
393 		 * will be used
394 		 */
395 		if (val) {
396 			dev_info(dev, "TjMax is %d C.\n", val);
397 			return val * 1000;
398 		}
399 	}
400 
401 	/*
402 	 * An assumption is made for early CPUs and unreadable MSR.
403 	 * NOTE: the calculated value may not be correct.
404 	 */
405 	return adjust_tjmax(c, id, dev);
406 }
407 
408 static void __devinit get_ucode_rev_on_cpu(void *edx)
409 {
410 	u32 eax;
411 
412 	wrmsr(MSR_IA32_UCODE_REV, 0, 0);
413 	sync_core();
414 	rdmsr(MSR_IA32_UCODE_REV, eax, *(u32 *)edx);
415 }
416 
417 static int get_pkg_tjmax(unsigned int cpu, struct device *dev)
418 {
419 	int err;
420 	u32 eax, edx, val;
421 
422 	err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
423 	if (!err) {
424 		val = (eax >> 16) & 0xff;
425 		if (val)
426 			return val * 1000;
427 	}
428 	dev_warn(dev, "Unable to read Pkg-TjMax from CPU:%u\n", cpu);
429 	return 100000; /* Default TjMax: 100 degree celsius */
430 }
431 
432 static int create_name_attr(struct platform_data *pdata, struct device *dev)
433 {
434 	sysfs_attr_init(&pdata->name_attr.attr);
435 	pdata->name_attr.attr.name = "name";
436 	pdata->name_attr.attr.mode = S_IRUGO;
437 	pdata->name_attr.show = show_name;
438 	return device_create_file(dev, &pdata->name_attr);
439 }
440 
441 static int create_core_attrs(struct temp_data *tdata, struct device *dev,
442 				int attr_no)
443 {
444 	int err, i;
445 	static ssize_t (*rd_ptr[TOTAL_ATTRS]) (struct device *dev,
446 			struct device_attribute *devattr, char *buf) = {
447 			show_label, show_crit_alarm, show_temp, show_tjmax,
448 			show_max_alarm, show_ttarget, show_tmin };
449 	static ssize_t (*rw_ptr[TOTAL_ATTRS]) (struct device *dev,
450 			struct device_attribute *devattr, const char *buf,
451 			size_t count) = { NULL, NULL, NULL, NULL, NULL,
452 					store_ttarget, store_tmin };
453 	static const char *names[TOTAL_ATTRS] = {
454 					"temp%d_label", "temp%d_crit_alarm",
455 					"temp%d_input", "temp%d_crit",
456 					"temp%d_max_alarm", "temp%d_max",
457 					"temp%d_max_hyst" };
458 
459 	for (i = 0; i < tdata->attr_size; i++) {
460 		snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i],
461 			attr_no);
462 		sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr);
463 		tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i];
464 		tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO;
465 		if (rw_ptr[i]) {
466 			tdata->sd_attrs[i].dev_attr.attr.mode |= S_IWUSR;
467 			tdata->sd_attrs[i].dev_attr.store = rw_ptr[i];
468 		}
469 		tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
470 		tdata->sd_attrs[i].index = attr_no;
471 		err = device_create_file(dev, &tdata->sd_attrs[i].dev_attr);
472 		if (err)
473 			goto exit_free;
474 	}
475 	return 0;
476 
477 exit_free:
478 	while (--i >= 0)
479 		device_remove_file(dev, &tdata->sd_attrs[i].dev_attr);
480 	return err;
481 }
482 
483 
484 static int __devinit chk_ucode_version(struct platform_device *pdev)
485 {
486 	struct cpuinfo_x86 *c = &cpu_data(pdev->id);
487 	int err;
488 	u32 edx;
489 
490 	/*
491 	 * Check if we have problem with errata AE18 of Core processors:
492 	 * Readings might stop update when processor visited too deep sleep,
493 	 * fixed for stepping D0 (6EC).
494 	 */
495 	if (c->x86_model == 0xe && c->x86_mask < 0xc) {
496 		/* check for microcode update */
497 		err = smp_call_function_single(pdev->id, get_ucode_rev_on_cpu,
498 					       &edx, 1);
499 		if (err) {
500 			dev_err(&pdev->dev,
501 				"Cannot determine microcode revision of "
502 				"CPU#%u (%d)!\n", pdev->id, err);
503 			return -ENODEV;
504 		} else if (edx < 0x39) {
505 			dev_err(&pdev->dev,
506 				"Errata AE18 not fixed, update BIOS or "
507 				"microcode of the CPU!\n");
508 			return -ENODEV;
509 		}
510 	}
511 	return 0;
512 }
513 
514 static struct platform_device *coretemp_get_pdev(unsigned int cpu)
515 {
516 	u16 phys_proc_id = TO_PHYS_ID(cpu);
517 	struct pdev_entry *p;
518 
519 	mutex_lock(&pdev_list_mutex);
520 
521 	list_for_each_entry(p, &pdev_list, list)
522 		if (p->phys_proc_id == phys_proc_id) {
523 			mutex_unlock(&pdev_list_mutex);
524 			return p->pdev;
525 		}
526 
527 	mutex_unlock(&pdev_list_mutex);
528 	return NULL;
529 }
530 
531 static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
532 {
533 	struct temp_data *tdata;
534 
535 	tdata = kzalloc(sizeof(struct temp_data), GFP_KERNEL);
536 	if (!tdata)
537 		return NULL;
538 
539 	tdata->status_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_STATUS :
540 							MSR_IA32_THERM_STATUS;
541 	tdata->intrpt_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_INTERRUPT :
542 						MSR_IA32_THERM_INTERRUPT;
543 	tdata->is_pkg_data = pkg_flag;
544 	tdata->cpu = cpu;
545 	tdata->cpu_core_id = TO_CORE_ID(cpu);
546 	tdata->attr_size = MAX_CORE_ATTRS;
547 	mutex_init(&tdata->update_lock);
548 	return tdata;
549 }
550 
551 static int create_core_data(struct platform_data *pdata,
552 				struct platform_device *pdev,
553 				unsigned int cpu, int pkg_flag)
554 {
555 	struct temp_data *tdata;
556 	struct cpuinfo_x86 *c = &cpu_data(cpu);
557 	u32 eax, edx;
558 	int err, attr_no;
559 
560 	/*
561 	 * Find attr number for sysfs:
562 	 * We map the attr number to core id of the CPU
563 	 * The attr number is always core id + 2
564 	 * The Pkgtemp will always show up as temp1_*, if available
565 	 */
566 	attr_no = pkg_flag ? 1 : TO_ATTR_NO(cpu);
567 
568 	if (attr_no > MAX_CORE_DATA - 1)
569 		return -ERANGE;
570 
571 	/*
572 	 * Provide a single set of attributes for all HT siblings of a core
573 	 * to avoid duplicate sensors (the processor ID and core ID of all
574 	 * HT siblings of a core are the same).
575 	 * Skip if a HT sibling of this core is already registered.
576 	 * This is not an error.
577 	 */
578 	if (pdata->core_data[attr_no] != NULL)
579 		return 0;
580 
581 	tdata = init_temp_data(cpu, pkg_flag);
582 	if (!tdata)
583 		return -ENOMEM;
584 
585 	/* Test if we can access the status register */
586 	err = rdmsr_safe_on_cpu(cpu, tdata->status_reg, &eax, &edx);
587 	if (err)
588 		goto exit_free;
589 
590 	/* We can access status register. Get Critical Temperature */
591 	if (pkg_flag)
592 		tdata->tjmax = get_pkg_tjmax(pdev->id, &pdev->dev);
593 	else
594 		tdata->tjmax = get_tjmax(c, cpu, &pdev->dev);
595 
596 	/*
597 	 * Test if we can access the intrpt register. If so, increase the
598 	 * 'size' enough to have ttarget/tmin/max_alarm interfaces.
599 	 * Initialize ttarget with bits 16:22 of MSR_IA32_THERM_INTERRUPT
600 	 */
601 	err = rdmsr_safe_on_cpu(cpu, tdata->intrpt_reg, &eax, &edx);
602 	if (!err) {
603 		tdata->attr_size += MAX_THRESH_ATTRS;
604 		tdata->ttarget = tdata->tjmax - ((eax >> 16) & 0x7f) * 1000;
605 	}
606 
607 	pdata->core_data[attr_no] = tdata;
608 
609 	/* Create sysfs interfaces */
610 	err = create_core_attrs(tdata, &pdev->dev, attr_no);
611 	if (err)
612 		goto exit_free;
613 
614 	return 0;
615 exit_free:
616 	kfree(tdata);
617 	return err;
618 }
619 
620 static void coretemp_add_core(unsigned int cpu, int pkg_flag)
621 {
622 	struct platform_data *pdata;
623 	struct platform_device *pdev = coretemp_get_pdev(cpu);
624 	int err;
625 
626 	if (!pdev)
627 		return;
628 
629 	pdata = platform_get_drvdata(pdev);
630 
631 	err = create_core_data(pdata, pdev, cpu, pkg_flag);
632 	if (err)
633 		dev_err(&pdev->dev, "Adding Core %u failed\n", cpu);
634 }
635 
636 static void coretemp_remove_core(struct platform_data *pdata,
637 				struct device *dev, int indx)
638 {
639 	int i;
640 	struct temp_data *tdata = pdata->core_data[indx];
641 
642 	/* Remove the sysfs attributes */
643 	for (i = 0; i < tdata->attr_size; i++)
644 		device_remove_file(dev, &tdata->sd_attrs[i].dev_attr);
645 
646 	kfree(pdata->core_data[indx]);
647 	pdata->core_data[indx] = NULL;
648 }
649 
650 static int __devinit coretemp_probe(struct platform_device *pdev)
651 {
652 	struct platform_data *pdata;
653 	int err;
654 
655 	/* Check the microcode version of the CPU */
656 	err = chk_ucode_version(pdev);
657 	if (err)
658 		return err;
659 
660 	/* Initialize the per-package data structures */
661 	pdata = kzalloc(sizeof(struct platform_data), GFP_KERNEL);
662 	if (!pdata)
663 		return -ENOMEM;
664 
665 	err = create_name_attr(pdata, &pdev->dev);
666 	if (err)
667 		goto exit_free;
668 
669 	pdata->phys_proc_id = TO_PHYS_ID(pdev->id);
670 	platform_set_drvdata(pdev, pdata);
671 
672 	pdata->hwmon_dev = hwmon_device_register(&pdev->dev);
673 	if (IS_ERR(pdata->hwmon_dev)) {
674 		err = PTR_ERR(pdata->hwmon_dev);
675 		dev_err(&pdev->dev, "Class registration failed (%d)\n", err);
676 		goto exit_name;
677 	}
678 	return 0;
679 
680 exit_name:
681 	device_remove_file(&pdev->dev, &pdata->name_attr);
682 	platform_set_drvdata(pdev, NULL);
683 exit_free:
684 	kfree(pdata);
685 	return err;
686 }
687 
688 static int __devexit coretemp_remove(struct platform_device *pdev)
689 {
690 	struct platform_data *pdata = platform_get_drvdata(pdev);
691 	int i;
692 
693 	for (i = MAX_CORE_DATA - 1; i >= 0; --i)
694 		if (pdata->core_data[i])
695 			coretemp_remove_core(pdata, &pdev->dev, i);
696 
697 	device_remove_file(&pdev->dev, &pdata->name_attr);
698 	hwmon_device_unregister(pdata->hwmon_dev);
699 	platform_set_drvdata(pdev, NULL);
700 	kfree(pdata);
701 	return 0;
702 }
703 
704 static struct platform_driver coretemp_driver = {
705 	.driver = {
706 		.owner = THIS_MODULE,
707 		.name = DRVNAME,
708 	},
709 	.probe = coretemp_probe,
710 	.remove = __devexit_p(coretemp_remove),
711 };
712 
713 static int __cpuinit coretemp_device_add(unsigned int cpu)
714 {
715 	int err;
716 	struct platform_device *pdev;
717 	struct pdev_entry *pdev_entry;
718 
719 	mutex_lock(&pdev_list_mutex);
720 
721 	pdev = platform_device_alloc(DRVNAME, cpu);
722 	if (!pdev) {
723 		err = -ENOMEM;
724 		pr_err("Device allocation failed\n");
725 		goto exit;
726 	}
727 
728 	pdev_entry = kzalloc(sizeof(struct pdev_entry), GFP_KERNEL);
729 	if (!pdev_entry) {
730 		err = -ENOMEM;
731 		goto exit_device_put;
732 	}
733 
734 	err = platform_device_add(pdev);
735 	if (err) {
736 		pr_err("Device addition failed (%d)\n", err);
737 		goto exit_device_free;
738 	}
739 
740 	pdev_entry->pdev = pdev;
741 	pdev_entry->phys_proc_id = TO_PHYS_ID(cpu);
742 
743 	list_add_tail(&pdev_entry->list, &pdev_list);
744 	mutex_unlock(&pdev_list_mutex);
745 
746 	return 0;
747 
748 exit_device_free:
749 	kfree(pdev_entry);
750 exit_device_put:
751 	platform_device_put(pdev);
752 exit:
753 	mutex_unlock(&pdev_list_mutex);
754 	return err;
755 }
756 
757 static void coretemp_device_remove(unsigned int cpu)
758 {
759 	struct pdev_entry *p, *n;
760 	u16 phys_proc_id = TO_PHYS_ID(cpu);
761 
762 	mutex_lock(&pdev_list_mutex);
763 	list_for_each_entry_safe(p, n, &pdev_list, list) {
764 		if (p->phys_proc_id != phys_proc_id)
765 			continue;
766 		platform_device_unregister(p->pdev);
767 		list_del(&p->list);
768 		kfree(p);
769 	}
770 	mutex_unlock(&pdev_list_mutex);
771 }
772 
773 static bool is_any_core_online(struct platform_data *pdata)
774 {
775 	int i;
776 
777 	/* Find online cores, except pkgtemp data */
778 	for (i = MAX_CORE_DATA - 1; i >= 0; --i) {
779 		if (pdata->core_data[i] &&
780 			!pdata->core_data[i]->is_pkg_data) {
781 			return true;
782 		}
783 	}
784 	return false;
785 }
786 
787 static void __cpuinit get_core_online(unsigned int cpu)
788 {
789 	struct cpuinfo_x86 *c = &cpu_data(cpu);
790 	struct platform_device *pdev = coretemp_get_pdev(cpu);
791 	int err;
792 
793 	/*
794 	 * CPUID.06H.EAX[0] indicates whether the CPU has thermal
795 	 * sensors. We check this bit only, all the early CPUs
796 	 * without thermal sensors will be filtered out.
797 	 */
798 	if (!cpu_has(c, X86_FEATURE_DTS))
799 		return;
800 
801 	if (!pdev) {
802 		/*
803 		 * Alright, we have DTS support.
804 		 * We are bringing the _first_ core in this pkg
805 		 * online. So, initialize per-pkg data structures and
806 		 * then bring this core online.
807 		 */
808 		err = coretemp_device_add(cpu);
809 		if (err)
810 			return;
811 		/*
812 		 * Check whether pkgtemp support is available.
813 		 * If so, add interfaces for pkgtemp.
814 		 */
815 		if (cpu_has(c, X86_FEATURE_PTS))
816 			coretemp_add_core(cpu, 1);
817 	}
818 	/*
819 	 * Physical CPU device already exists.
820 	 * So, just add interfaces for this core.
821 	 */
822 	coretemp_add_core(cpu, 0);
823 }
824 
825 static void __cpuinit put_core_offline(unsigned int cpu)
826 {
827 	int i, indx;
828 	struct platform_data *pdata;
829 	struct platform_device *pdev = coretemp_get_pdev(cpu);
830 
831 	/* If the physical CPU device does not exist, just return */
832 	if (!pdev)
833 		return;
834 
835 	pdata = platform_get_drvdata(pdev);
836 
837 	indx = TO_ATTR_NO(cpu);
838 
839 	if (pdata->core_data[indx] && pdata->core_data[indx]->cpu == cpu)
840 		coretemp_remove_core(pdata, &pdev->dev, indx);
841 
842 	/*
843 	 * If a HT sibling of a core is taken offline, but another HT sibling
844 	 * of the same core is still online, register the alternate sibling.
845 	 * This ensures that exactly one set of attributes is provided as long
846 	 * as at least one HT sibling of a core is online.
847 	 */
848 	for_each_sibling(i, cpu) {
849 		if (i != cpu) {
850 			get_core_online(i);
851 			/*
852 			 * Display temperature sensor data for one HT sibling
853 			 * per core only, so abort the loop after one such
854 			 * sibling has been found.
855 			 */
856 			break;
857 		}
858 	}
859 	/*
860 	 * If all cores in this pkg are offline, remove the device.
861 	 * coretemp_device_remove calls unregister_platform_device,
862 	 * which in turn calls coretemp_remove. This removes the
863 	 * pkgtemp entry and does other clean ups.
864 	 */
865 	if (!is_any_core_online(pdata))
866 		coretemp_device_remove(cpu);
867 }
868 
869 static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb,
870 				 unsigned long action, void *hcpu)
871 {
872 	unsigned int cpu = (unsigned long) hcpu;
873 
874 	switch (action) {
875 	case CPU_ONLINE:
876 	case CPU_DOWN_FAILED:
877 		get_core_online(cpu);
878 		break;
879 	case CPU_DOWN_PREPARE:
880 		put_core_offline(cpu);
881 		break;
882 	}
883 	return NOTIFY_OK;
884 }
885 
886 static struct notifier_block coretemp_cpu_notifier __refdata = {
887 	.notifier_call = coretemp_cpu_callback,
888 };
889 
890 static int __init coretemp_init(void)
891 {
892 	int i, err = -ENODEV;
893 
894 	/* quick check if we run Intel */
895 	if (cpu_data(0).x86_vendor != X86_VENDOR_INTEL)
896 		goto exit;
897 
898 	err = platform_driver_register(&coretemp_driver);
899 	if (err)
900 		goto exit;
901 
902 	for_each_online_cpu(i)
903 		get_core_online(i);
904 
905 #ifndef CONFIG_HOTPLUG_CPU
906 	if (list_empty(&pdev_list)) {
907 		err = -ENODEV;
908 		goto exit_driver_unreg;
909 	}
910 #endif
911 
912 	register_hotcpu_notifier(&coretemp_cpu_notifier);
913 	return 0;
914 
915 #ifndef CONFIG_HOTPLUG_CPU
916 exit_driver_unreg:
917 	platform_driver_unregister(&coretemp_driver);
918 #endif
919 exit:
920 	return err;
921 }
922 
923 static void __exit coretemp_exit(void)
924 {
925 	struct pdev_entry *p, *n;
926 
927 	unregister_hotcpu_notifier(&coretemp_cpu_notifier);
928 	mutex_lock(&pdev_list_mutex);
929 	list_for_each_entry_safe(p, n, &pdev_list, list) {
930 		platform_device_unregister(p->pdev);
931 		list_del(&p->list);
932 		kfree(p);
933 	}
934 	mutex_unlock(&pdev_list_mutex);
935 	platform_driver_unregister(&coretemp_driver);
936 }
937 
938 MODULE_AUTHOR("Rudolf Marek <r.marek@assembler.cz>");
939 MODULE_DESCRIPTION("Intel Core temperature monitor");
940 MODULE_LICENSE("GPL");
941 
942 module_init(coretemp_init)
943 module_exit(coretemp_exit)
944