xref: /openbmc/linux/drivers/hwmon/k10temp.c (revision 0a73d21e)
1 /*
2  * k10temp.c - AMD Family 10h/11h/12h/14h/15h/16h processor hardware monitoring
3  *
4  * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de>
5  *
6  *
7  * This driver is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This driver is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
14  * See the GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this driver; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include <linux/err.h>
21 #include <linux/hwmon.h>
22 #include <linux/hwmon-sysfs.h>
23 #include <linux/init.h>
24 #include <linux/module.h>
25 #include <linux/pci.h>
26 #include <asm/processor.h>
27 
28 MODULE_DESCRIPTION("AMD Family 10h+ CPU core temperature monitor");
29 MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
30 MODULE_LICENSE("GPL");
31 
32 static bool force;
33 module_param(force, bool, 0444);
34 MODULE_PARM_DESC(force, "force loading on processors with erratum 319");
35 
36 /* Provide lock for writing to NB_SMU_IND_ADDR */
37 static DEFINE_MUTEX(nb_smu_ind_mutex);
38 
39 #ifndef PCI_DEVICE_ID_AMD_17H_DF_F3
40 #define PCI_DEVICE_ID_AMD_17H_DF_F3	0x1463
41 #endif
42 
43 /* CPUID function 0x80000001, ebx */
44 #define CPUID_PKGTYPE_MASK	0xf0000000
45 #define CPUID_PKGTYPE_F		0x00000000
46 #define CPUID_PKGTYPE_AM2R2_AM3	0x10000000
47 
48 /* DRAM controller (PCI function 2) */
49 #define REG_DCT0_CONFIG_HIGH		0x094
50 #define  DDR3_MODE			0x00000100
51 
52 /* miscellaneous (PCI function 3) */
53 #define REG_HARDWARE_THERMAL_CONTROL	0x64
54 #define  HTC_ENABLE			0x00000001
55 
56 #define REG_REPORTED_TEMPERATURE	0xa4
57 
58 #define REG_NORTHBRIDGE_CAPABILITIES	0xe8
59 #define  NB_CAP_HTC			0x00000400
60 
61 /*
62  * For F15h M60h, functionality of REG_REPORTED_TEMPERATURE
63  * has been moved to D0F0xBC_xD820_0CA4 [Reported Temperature
64  * Control]
65  */
66 #define F15H_M60H_REPORTED_TEMP_CTRL_OFFSET	0xd8200ca4
67 
68 /* F17h M01h Access througn SMN */
69 #define F17H_M01H_REPORTED_TEMP_CTRL_OFFSET	0x00059800
70 
71 struct k10temp_data {
72 	struct pci_dev *pdev;
73 	void (*read_tempreg)(struct pci_dev *pdev, u32 *regval);
74 	int temp_offset;
75 };
76 
77 struct tctl_offset {
78 	u8 model;
79 	char const *id;
80 	int offset;
81 };
82 
83 static const struct tctl_offset tctl_offset_table[] = {
84 	{ 0x17, "AMD Ryzen 5 1600X", 20000 },
85 	{ 0x17, "AMD Ryzen 7 1700X", 20000 },
86 	{ 0x17, "AMD Ryzen 7 1800X", 20000 },
87 	{ 0x17, "AMD Ryzen Threadripper 1950X", 27000 },
88 	{ 0x17, "AMD Ryzen Threadripper 1920X", 27000 },
89 	{ 0x17, "AMD Ryzen Threadripper 1900X", 27000 },
90 	{ 0x17, "AMD Ryzen Threadripper 1950", 10000 },
91 	{ 0x17, "AMD Ryzen Threadripper 1920", 10000 },
92 	{ 0x17, "AMD Ryzen Threadripper 1910", 10000 },
93 };
94 
95 static void read_tempreg_pci(struct pci_dev *pdev, u32 *regval)
96 {
97 	pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, regval);
98 }
99 
100 static void amd_nb_index_read(struct pci_dev *pdev, unsigned int devfn,
101 			      unsigned int base, int offset, u32 *val)
102 {
103 	mutex_lock(&nb_smu_ind_mutex);
104 	pci_bus_write_config_dword(pdev->bus, devfn,
105 				   base, offset);
106 	pci_bus_read_config_dword(pdev->bus, devfn,
107 				  base + 4, val);
108 	mutex_unlock(&nb_smu_ind_mutex);
109 }
110 
111 static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval)
112 {
113 	amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8,
114 			  F15H_M60H_REPORTED_TEMP_CTRL_OFFSET, regval);
115 }
116 
117 static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval)
118 {
119 	amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0x60,
120 			  F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval);
121 }
122 
123 static ssize_t temp1_input_show(struct device *dev,
124 				struct device_attribute *attr, char *buf)
125 {
126 	struct k10temp_data *data = dev_get_drvdata(dev);
127 	u32 regval;
128 	unsigned int temp;
129 
130 	data->read_tempreg(data->pdev, &regval);
131 	temp = (regval >> 21) * 125;
132 	if (temp > data->temp_offset)
133 		temp -= data->temp_offset;
134 	else
135 		temp = 0;
136 
137 	return sprintf(buf, "%u\n", temp);
138 }
139 
140 static ssize_t temp1_max_show(struct device *dev,
141 			      struct device_attribute *attr, char *buf)
142 {
143 	return sprintf(buf, "%d\n", 70 * 1000);
144 }
145 
146 static ssize_t show_temp_crit(struct device *dev,
147 			      struct device_attribute *devattr, char *buf)
148 {
149 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
150 	struct k10temp_data *data = dev_get_drvdata(dev);
151 	int show_hyst = attr->index;
152 	u32 regval;
153 	int value;
154 
155 	pci_read_config_dword(data->pdev,
156 			      REG_HARDWARE_THERMAL_CONTROL, &regval);
157 	value = ((regval >> 16) & 0x7f) * 500 + 52000;
158 	if (show_hyst)
159 		value -= ((regval >> 24) & 0xf) * 500;
160 	return sprintf(buf, "%d\n", value);
161 }
162 
163 static DEVICE_ATTR_RO(temp1_input);
164 static DEVICE_ATTR_RO(temp1_max);
165 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp_crit, NULL, 0);
166 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, show_temp_crit, NULL, 1);
167 
168 static umode_t k10temp_is_visible(struct kobject *kobj,
169 				  struct attribute *attr, int index)
170 {
171 	struct device *dev = container_of(kobj, struct device, kobj);
172 	struct k10temp_data *data = dev_get_drvdata(dev);
173 	struct pci_dev *pdev = data->pdev;
174 
175 	if (index >= 2) {
176 		u32 reg_caps, reg_htc;
177 
178 		pci_read_config_dword(pdev, REG_NORTHBRIDGE_CAPABILITIES,
179 				      &reg_caps);
180 		pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL,
181 				      &reg_htc);
182 		if (!(reg_caps & NB_CAP_HTC) || !(reg_htc & HTC_ENABLE))
183 			return 0;
184 	}
185 	return attr->mode;
186 }
187 
188 static struct attribute *k10temp_attrs[] = {
189 	&dev_attr_temp1_input.attr,
190 	&dev_attr_temp1_max.attr,
191 	&sensor_dev_attr_temp1_crit.dev_attr.attr,
192 	&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
193 	NULL
194 };
195 
196 static const struct attribute_group k10temp_group = {
197 	.attrs = k10temp_attrs,
198 	.is_visible = k10temp_is_visible,
199 };
200 __ATTRIBUTE_GROUPS(k10temp);
201 
202 static bool has_erratum_319(struct pci_dev *pdev)
203 {
204 	u32 pkg_type, reg_dram_cfg;
205 
206 	if (boot_cpu_data.x86 != 0x10)
207 		return false;
208 
209 	/*
210 	 * Erratum 319: The thermal sensor of Socket F/AM2+ processors
211 	 *              may be unreliable.
212 	 */
213 	pkg_type = cpuid_ebx(0x80000001) & CPUID_PKGTYPE_MASK;
214 	if (pkg_type == CPUID_PKGTYPE_F)
215 		return true;
216 	if (pkg_type != CPUID_PKGTYPE_AM2R2_AM3)
217 		return false;
218 
219 	/* DDR3 memory implies socket AM3, which is good */
220 	pci_bus_read_config_dword(pdev->bus,
221 				  PCI_DEVFN(PCI_SLOT(pdev->devfn), 2),
222 				  REG_DCT0_CONFIG_HIGH, &reg_dram_cfg);
223 	if (reg_dram_cfg & DDR3_MODE)
224 		return false;
225 
226 	/*
227 	 * Unfortunately it is possible to run a socket AM3 CPU with DDR2
228 	 * memory. We blacklist all the cores which do exist in socket AM2+
229 	 * format. It still isn't perfect, as RB-C2 cores exist in both AM2+
230 	 * and AM3 formats, but that's the best we can do.
231 	 */
232 	return boot_cpu_data.x86_model < 4 ||
233 	       (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2);
234 }
235 
236 static int k10temp_probe(struct pci_dev *pdev,
237 				   const struct pci_device_id *id)
238 {
239 	int unreliable = has_erratum_319(pdev);
240 	struct device *dev = &pdev->dev;
241 	struct k10temp_data *data;
242 	struct device *hwmon_dev;
243 	int i;
244 
245 	if (unreliable) {
246 		if (!force) {
247 			dev_err(dev,
248 				"unreliable CPU thermal sensor; monitoring disabled\n");
249 			return -ENODEV;
250 		}
251 		dev_warn(dev,
252 			 "unreliable CPU thermal sensor; check erratum 319\n");
253 	}
254 
255 	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
256 	if (!data)
257 		return -ENOMEM;
258 
259 	data->pdev = pdev;
260 
261 	if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 ||
262 					  boot_cpu_data.x86_model == 0x70))
263 		data->read_tempreg = read_tempreg_nb_f15;
264 	else if (boot_cpu_data.x86 == 0x17)
265 		data->read_tempreg = read_tempreg_nb_f17;
266 	else
267 		data->read_tempreg = read_tempreg_pci;
268 
269 	for (i = 0; i < ARRAY_SIZE(tctl_offset_table); i++) {
270 		const struct tctl_offset *entry = &tctl_offset_table[i];
271 
272 		if (boot_cpu_data.x86 == entry->model &&
273 		    strstr(boot_cpu_data.x86_model_id, entry->id)) {
274 			data->temp_offset = entry->offset;
275 			break;
276 		}
277 	}
278 
279 	hwmon_dev = devm_hwmon_device_register_with_groups(dev, "k10temp", data,
280 							   k10temp_groups);
281 	return PTR_ERR_OR_ZERO(hwmon_dev);
282 }
283 
284 static const struct pci_device_id k10temp_id_table[] = {
285 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
286 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
287 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
288 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
289 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
290 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
291 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
292 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
293 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
294 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
295 	{}
296 };
297 MODULE_DEVICE_TABLE(pci, k10temp_id_table);
298 
299 static struct pci_driver k10temp_driver = {
300 	.name = "k10temp",
301 	.id_table = k10temp_id_table,
302 	.probe = k10temp_probe,
303 };
304 
305 module_pci_driver(k10temp_driver);
306