xref: /openbmc/linux/drivers/gpu/drm/i915/i915_hwmon.c (revision ff40b576)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include <linux/hwmon.h>
7 #include <linux/hwmon-sysfs.h>
8 #include <linux/types.h>
9 
10 #include "i915_drv.h"
11 #include "i915_hwmon.h"
12 #include "i915_reg.h"
13 #include "intel_mchbar_regs.h"
14 #include "intel_pcode.h"
15 #include "gt/intel_gt.h"
16 #include "gt/intel_gt_regs.h"
17 
18 /*
19  * SF_* - scale factors for particular quantities according to hwmon spec.
20  * - voltage  - millivolts
21  * - power  - microwatts
22  * - curr   - milliamperes
23  * - energy - microjoules
24  * - time   - milliseconds
25  */
26 #define SF_VOLTAGE	1000
27 #define SF_POWER	1000000
28 #define SF_CURR		1000
29 #define SF_ENERGY	1000000
30 #define SF_TIME		1000
31 
32 struct hwm_reg {
33 	i915_reg_t gt_perf_status;
34 	i915_reg_t pkg_power_sku_unit;
35 	i915_reg_t pkg_power_sku;
36 	i915_reg_t pkg_rapl_limit;
37 	i915_reg_t energy_status_all;
38 	i915_reg_t energy_status_tile;
39 };
40 
41 struct hwm_energy_info {
42 	u32 reg_val_prev;
43 	long accum_energy;			/* Accumulated energy for energy1_input */
44 };
45 
46 struct hwm_drvdata {
47 	struct i915_hwmon *hwmon;
48 	struct intel_uncore *uncore;
49 	struct device *hwmon_dev;
50 	struct hwm_energy_info ei;		/*  Energy info for energy1_input */
51 	char name[12];
52 	int gt_n;
53 };
54 
55 struct i915_hwmon {
56 	struct hwm_drvdata ddat;
57 	struct hwm_drvdata ddat_gt[I915_MAX_GT];
58 	struct mutex hwmon_lock;		/* counter overflow logic and rmw */
59 	struct hwm_reg rg;
60 	int scl_shift_power;
61 	int scl_shift_energy;
62 	int scl_shift_time;
63 };
64 
65 static void
66 hwm_locked_with_pm_intel_uncore_rmw(struct hwm_drvdata *ddat,
67 				    i915_reg_t reg, u32 clear, u32 set)
68 {
69 	struct i915_hwmon *hwmon = ddat->hwmon;
70 	struct intel_uncore *uncore = ddat->uncore;
71 	intel_wakeref_t wakeref;
72 
73 	mutex_lock(&hwmon->hwmon_lock);
74 
75 	with_intel_runtime_pm(uncore->rpm, wakeref)
76 		intel_uncore_rmw(uncore, reg, clear, set);
77 
78 	mutex_unlock(&hwmon->hwmon_lock);
79 }
80 
81 /*
82  * This function's return type of u64 allows for the case where the scaling
83  * of the field taken from the 32-bit register value might cause a result to
84  * exceed 32 bits.
85  */
86 static u64
87 hwm_field_read_and_scale(struct hwm_drvdata *ddat, i915_reg_t rgadr,
88 			 u32 field_msk, int nshift, u32 scale_factor)
89 {
90 	struct intel_uncore *uncore = ddat->uncore;
91 	intel_wakeref_t wakeref;
92 	u32 reg_value;
93 
94 	with_intel_runtime_pm(uncore->rpm, wakeref)
95 		reg_value = intel_uncore_read(uncore, rgadr);
96 
97 	reg_value = REG_FIELD_GET(field_msk, reg_value);
98 
99 	return mul_u64_u32_shr(reg_value, scale_factor, nshift);
100 }
101 
102 /*
103  * hwm_energy - Obtain energy value
104  *
105  * The underlying energy hardware register is 32-bits and is subject to
106  * overflow. How long before overflow? For example, with an example
107  * scaling bit shift of 14 bits (see register *PACKAGE_POWER_SKU_UNIT) and
108  * a power draw of 1000 watts, the 32-bit counter will overflow in
109  * approximately 4.36 minutes.
110  *
111  * Examples:
112  *    1 watt:  (2^32 >> 14) /    1 W / (60 * 60 * 24) secs/day -> 3 days
113  * 1000 watts: (2^32 >> 14) / 1000 W / 60             secs/min -> 4.36 minutes
114  *
115  * The function significantly increases overflow duration (from 4.36
116  * minutes) by accumulating the energy register into a 'long' as allowed by
117  * the hwmon API. Using x86_64 128 bit arithmetic (see mul_u64_u32_shr()),
118  * a 'long' of 63 bits, SF_ENERGY of 1e6 (~20 bits) and
119  * hwmon->scl_shift_energy of 14 bits we have 57 (63 - 20 + 14) bits before
120  * energy1_input overflows. This at 1000 W is an overflow duration of 278 years.
121  */
122 static void
123 hwm_energy(struct hwm_drvdata *ddat, long *energy)
124 {
125 	struct intel_uncore *uncore = ddat->uncore;
126 	struct i915_hwmon *hwmon = ddat->hwmon;
127 	struct hwm_energy_info *ei = &ddat->ei;
128 	intel_wakeref_t wakeref;
129 	i915_reg_t rgaddr;
130 	u32 reg_val;
131 
132 	if (ddat->gt_n >= 0)
133 		rgaddr = hwmon->rg.energy_status_tile;
134 	else
135 		rgaddr = hwmon->rg.energy_status_all;
136 
137 	mutex_lock(&hwmon->hwmon_lock);
138 
139 	with_intel_runtime_pm(uncore->rpm, wakeref)
140 		reg_val = intel_uncore_read(uncore, rgaddr);
141 
142 	if (reg_val >= ei->reg_val_prev)
143 		ei->accum_energy += reg_val - ei->reg_val_prev;
144 	else
145 		ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val;
146 	ei->reg_val_prev = reg_val;
147 
148 	*energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
149 				  hwmon->scl_shift_energy);
150 	mutex_unlock(&hwmon->hwmon_lock);
151 }
152 
153 static ssize_t
154 hwm_power1_max_interval_show(struct device *dev, struct device_attribute *attr,
155 			     char *buf)
156 {
157 	struct hwm_drvdata *ddat = dev_get_drvdata(dev);
158 	struct i915_hwmon *hwmon = ddat->hwmon;
159 	intel_wakeref_t wakeref;
160 	u32 r, x, y, x_w = 2; /* 2 bits */
161 	u64 tau4, out;
162 
163 	with_intel_runtime_pm(ddat->uncore->rpm, wakeref)
164 		r = intel_uncore_read(ddat->uncore, hwmon->rg.pkg_rapl_limit);
165 
166 	x = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_X, r);
167 	y = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_Y, r);
168 	/*
169 	 * tau = 1.x * power(2,y), x = bits(23:22), y = bits(21:17)
170 	 *     = (4 | x) << (y - 2)
171 	 * where (y - 2) ensures a 1.x fixed point representation of 1.x
172 	 * However because y can be < 2, we compute
173 	 *     tau4 = (4 | x) << y
174 	 * but add 2 when doing the final right shift to account for units
175 	 */
176 	tau4 = ((1 << x_w) | x) << y;
177 	/* val in hwmon interface units (millisec) */
178 	out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
179 
180 	return sysfs_emit(buf, "%llu\n", out);
181 }
182 
183 static ssize_t
184 hwm_power1_max_interval_store(struct device *dev,
185 			      struct device_attribute *attr,
186 			      const char *buf, size_t count)
187 {
188 	struct hwm_drvdata *ddat = dev_get_drvdata(dev);
189 	struct i915_hwmon *hwmon = ddat->hwmon;
190 	u32 x, y, rxy, x_w = 2; /* 2 bits */
191 	u64 tau4, r, max_win;
192 	unsigned long val;
193 	int ret;
194 
195 	ret = kstrtoul(buf, 0, &val);
196 	if (ret)
197 		return ret;
198 
199 	/*
200 	 * Max HW supported tau in '1.x * power(2,y)' format, x = 0, y = 0x12
201 	 * The hwmon->scl_shift_time default of 0xa results in a max tau of 256 seconds
202 	 */
203 #define PKG_MAX_WIN_DEFAULT 0x12ull
204 
205 	/*
206 	 * val must be < max in hwmon interface units. The steps below are
207 	 * explained in i915_power1_max_interval_show()
208 	 */
209 	r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
210 	x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
211 	y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
212 	tau4 = ((1 << x_w) | x) << y;
213 	max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
214 
215 	if (val > max_win)
216 		return -EINVAL;
217 
218 	/* val in hw units */
219 	val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time, SF_TIME);
220 	/* Convert to 1.x * power(2,y) */
221 	if (!val) {
222 		/* Avoid ilog2(0) */
223 		y = 0;
224 		x = 0;
225 	} else {
226 		y = ilog2(val);
227 		/* x = (val - (1 << y)) >> (y - 2); */
228 		x = (val - (1ul << y)) << x_w >> y;
229 	}
230 
231 	rxy = REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_X, x) | REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_Y, y);
232 
233 	hwm_locked_with_pm_intel_uncore_rmw(ddat, hwmon->rg.pkg_rapl_limit,
234 					    PKG_PWR_LIM_1_TIME, rxy);
235 	return count;
236 }
237 
238 static SENSOR_DEVICE_ATTR(power1_max_interval, 0664,
239 			  hwm_power1_max_interval_show,
240 			  hwm_power1_max_interval_store, 0);
241 
242 static struct attribute *hwm_attributes[] = {
243 	&sensor_dev_attr_power1_max_interval.dev_attr.attr,
244 	NULL
245 };
246 
247 static umode_t hwm_attributes_visible(struct kobject *kobj,
248 				      struct attribute *attr, int index)
249 {
250 	struct device *dev = kobj_to_dev(kobj);
251 	struct hwm_drvdata *ddat = dev_get_drvdata(dev);
252 	struct i915_hwmon *hwmon = ddat->hwmon;
253 
254 	if (attr == &sensor_dev_attr_power1_max_interval.dev_attr.attr)
255 		return i915_mmio_reg_valid(hwmon->rg.pkg_rapl_limit) ? attr->mode : 0;
256 
257 	return 0;
258 }
259 
260 static const struct attribute_group hwm_attrgroup = {
261 	.attrs = hwm_attributes,
262 	.is_visible = hwm_attributes_visible,
263 };
264 
265 static const struct attribute_group *hwm_groups[] = {
266 	&hwm_attrgroup,
267 	NULL
268 };
269 
270 static const struct hwmon_channel_info *hwm_info[] = {
271 	HWMON_CHANNEL_INFO(in, HWMON_I_INPUT),
272 	HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_CRIT),
273 	HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT),
274 	HWMON_CHANNEL_INFO(curr, HWMON_C_CRIT),
275 	NULL
276 };
277 
278 static const struct hwmon_channel_info *hwm_gt_info[] = {
279 	HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT),
280 	NULL
281 };
282 
283 /* I1 is exposed as power_crit or as curr_crit depending on bit 31 */
284 static int hwm_pcode_read_i1(struct drm_i915_private *i915, u32 *uval)
285 {
286 	/* Avoid ILLEGAL_SUBCOMMAND "mailbox access failed" warning in snb_pcode_read */
287 	if (IS_DG1(i915) || IS_DG2(i915))
288 		return -ENXIO;
289 
290 	return snb_pcode_read_p(&i915->uncore, PCODE_POWER_SETUP,
291 				POWER_SETUP_SUBCOMMAND_READ_I1, 0, uval);
292 }
293 
294 static int hwm_pcode_write_i1(struct drm_i915_private *i915, u32 uval)
295 {
296 	return  snb_pcode_write_p(&i915->uncore, PCODE_POWER_SETUP,
297 				  POWER_SETUP_SUBCOMMAND_WRITE_I1, 0, uval);
298 }
299 
300 static umode_t
301 hwm_in_is_visible(const struct hwm_drvdata *ddat, u32 attr)
302 {
303 	struct drm_i915_private *i915 = ddat->uncore->i915;
304 
305 	switch (attr) {
306 	case hwmon_in_input:
307 		return IS_DG1(i915) || IS_DG2(i915) ? 0444 : 0;
308 	default:
309 		return 0;
310 	}
311 }
312 
313 static int
314 hwm_in_read(struct hwm_drvdata *ddat, u32 attr, long *val)
315 {
316 	struct i915_hwmon *hwmon = ddat->hwmon;
317 	intel_wakeref_t wakeref;
318 	u32 reg_value;
319 
320 	switch (attr) {
321 	case hwmon_in_input:
322 		with_intel_runtime_pm(ddat->uncore->rpm, wakeref)
323 			reg_value = intel_uncore_read(ddat->uncore, hwmon->rg.gt_perf_status);
324 		/* HW register value in units of 2.5 millivolt */
325 		*val = DIV_ROUND_CLOSEST(REG_FIELD_GET(GEN12_VOLTAGE_MASK, reg_value) * 25, 10);
326 		return 0;
327 	default:
328 		return -EOPNOTSUPP;
329 	}
330 }
331 
332 static umode_t
333 hwm_power_is_visible(const struct hwm_drvdata *ddat, u32 attr, int chan)
334 {
335 	struct drm_i915_private *i915 = ddat->uncore->i915;
336 	struct i915_hwmon *hwmon = ddat->hwmon;
337 	u32 uval;
338 
339 	switch (attr) {
340 	case hwmon_power_max:
341 		return i915_mmio_reg_valid(hwmon->rg.pkg_rapl_limit) ? 0664 : 0;
342 	case hwmon_power_rated_max:
343 		return i915_mmio_reg_valid(hwmon->rg.pkg_power_sku) ? 0444 : 0;
344 	case hwmon_power_crit:
345 		return (hwm_pcode_read_i1(i915, &uval) ||
346 			!(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
347 	default:
348 		return 0;
349 	}
350 }
351 
352 #define PL1_DISABLE 0
353 
354 /*
355  * HW allows arbitrary PL1 limits to be set but silently clamps these values to
356  * "typical but not guaranteed" min/max values in rg.pkg_power_sku. Follow the
357  * same pattern for sysfs, allow arbitrary PL1 limits to be set but display
358  * clamped values when read. Write/read I1 also follows the same pattern.
359  */
360 static int
361 hwm_power_max_read(struct hwm_drvdata *ddat, long *val)
362 {
363 	struct i915_hwmon *hwmon = ddat->hwmon;
364 	intel_wakeref_t wakeref;
365 	u64 r, min, max;
366 
367 	/* Check if PL1 limit is disabled */
368 	with_intel_runtime_pm(ddat->uncore->rpm, wakeref)
369 		r = intel_uncore_read(ddat->uncore, hwmon->rg.pkg_rapl_limit);
370 	if (!(r & PKG_PWR_LIM_1_EN)) {
371 		*val = PL1_DISABLE;
372 		return 0;
373 	}
374 
375 	*val = hwm_field_read_and_scale(ddat,
376 					hwmon->rg.pkg_rapl_limit,
377 					PKG_PWR_LIM_1,
378 					hwmon->scl_shift_power,
379 					SF_POWER);
380 
381 	with_intel_runtime_pm(ddat->uncore->rpm, wakeref)
382 		r = intel_uncore_read64(ddat->uncore, hwmon->rg.pkg_power_sku);
383 	min = REG_FIELD_GET(PKG_MIN_PWR, r);
384 	min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power);
385 	max = REG_FIELD_GET(PKG_MAX_PWR, r);
386 	max = mul_u64_u32_shr(max, SF_POWER, hwmon->scl_shift_power);
387 
388 	if (min && max)
389 		*val = clamp_t(u64, *val, min, max);
390 
391 	return 0;
392 }
393 
394 static int
395 hwm_power_max_write(struct hwm_drvdata *ddat, long val)
396 {
397 	struct i915_hwmon *hwmon = ddat->hwmon;
398 	intel_wakeref_t wakeref;
399 	u32 nval;
400 
401 	/* Disable PL1 limit and verify, because the limit cannot be disabled on all platforms */
402 	if (val == PL1_DISABLE) {
403 		mutex_lock(&hwmon->hwmon_lock);
404 		with_intel_runtime_pm(ddat->uncore->rpm, wakeref) {
405 			intel_uncore_rmw(ddat->uncore, hwmon->rg.pkg_rapl_limit,
406 					 PKG_PWR_LIM_1_EN, 0);
407 			nval = intel_uncore_read(ddat->uncore, hwmon->rg.pkg_rapl_limit);
408 		}
409 		mutex_unlock(&hwmon->hwmon_lock);
410 
411 		if (nval & PKG_PWR_LIM_1_EN)
412 			return -ENODEV;
413 		return 0;
414 	}
415 
416 	/* Computation in 64-bits to avoid overflow. Round to nearest. */
417 	nval = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_power, SF_POWER);
418 	nval = PKG_PWR_LIM_1_EN | REG_FIELD_PREP(PKG_PWR_LIM_1, nval);
419 
420 	hwm_locked_with_pm_intel_uncore_rmw(ddat, hwmon->rg.pkg_rapl_limit,
421 					    PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1,
422 					    nval);
423 	return 0;
424 }
425 
426 static int
427 hwm_power_read(struct hwm_drvdata *ddat, u32 attr, int chan, long *val)
428 {
429 	struct i915_hwmon *hwmon = ddat->hwmon;
430 	int ret;
431 	u32 uval;
432 
433 	switch (attr) {
434 	case hwmon_power_max:
435 		return hwm_power_max_read(ddat, val);
436 	case hwmon_power_rated_max:
437 		*val = hwm_field_read_and_scale(ddat,
438 						hwmon->rg.pkg_power_sku,
439 						PKG_PKG_TDP,
440 						hwmon->scl_shift_power,
441 						SF_POWER);
442 		return 0;
443 	case hwmon_power_crit:
444 		ret = hwm_pcode_read_i1(ddat->uncore->i915, &uval);
445 		if (ret)
446 			return ret;
447 		if (!(uval & POWER_SETUP_I1_WATTS))
448 			return -ENODEV;
449 		*val = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
450 				       SF_POWER, POWER_SETUP_I1_SHIFT);
451 		return 0;
452 	default:
453 		return -EOPNOTSUPP;
454 	}
455 }
456 
457 static int
458 hwm_power_write(struct hwm_drvdata *ddat, u32 attr, int chan, long val)
459 {
460 	u32 uval;
461 
462 	switch (attr) {
463 	case hwmon_power_max:
464 		return hwm_power_max_write(ddat, val);
465 	case hwmon_power_crit:
466 		uval = DIV_ROUND_CLOSEST_ULL(val << POWER_SETUP_I1_SHIFT, SF_POWER);
467 		return hwm_pcode_write_i1(ddat->uncore->i915, uval);
468 	default:
469 		return -EOPNOTSUPP;
470 	}
471 }
472 
473 static umode_t
474 hwm_energy_is_visible(const struct hwm_drvdata *ddat, u32 attr)
475 {
476 	struct i915_hwmon *hwmon = ddat->hwmon;
477 	i915_reg_t rgaddr;
478 
479 	switch (attr) {
480 	case hwmon_energy_input:
481 		if (ddat->gt_n >= 0)
482 			rgaddr = hwmon->rg.energy_status_tile;
483 		else
484 			rgaddr = hwmon->rg.energy_status_all;
485 		return i915_mmio_reg_valid(rgaddr) ? 0444 : 0;
486 	default:
487 		return 0;
488 	}
489 }
490 
491 static int
492 hwm_energy_read(struct hwm_drvdata *ddat, u32 attr, long *val)
493 {
494 	switch (attr) {
495 	case hwmon_energy_input:
496 		hwm_energy(ddat, val);
497 		return 0;
498 	default:
499 		return -EOPNOTSUPP;
500 	}
501 }
502 
503 static umode_t
504 hwm_curr_is_visible(const struct hwm_drvdata *ddat, u32 attr)
505 {
506 	struct drm_i915_private *i915 = ddat->uncore->i915;
507 	u32 uval;
508 
509 	switch (attr) {
510 	case hwmon_curr_crit:
511 		return (hwm_pcode_read_i1(i915, &uval) ||
512 			(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
513 	default:
514 		return 0;
515 	}
516 }
517 
518 static int
519 hwm_curr_read(struct hwm_drvdata *ddat, u32 attr, long *val)
520 {
521 	int ret;
522 	u32 uval;
523 
524 	switch (attr) {
525 	case hwmon_curr_crit:
526 		ret = hwm_pcode_read_i1(ddat->uncore->i915, &uval);
527 		if (ret)
528 			return ret;
529 		if (uval & POWER_SETUP_I1_WATTS)
530 			return -ENODEV;
531 		*val = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
532 				       SF_CURR, POWER_SETUP_I1_SHIFT);
533 		return 0;
534 	default:
535 		return -EOPNOTSUPP;
536 	}
537 }
538 
539 static int
540 hwm_curr_write(struct hwm_drvdata *ddat, u32 attr, long val)
541 {
542 	u32 uval;
543 
544 	switch (attr) {
545 	case hwmon_curr_crit:
546 		uval = DIV_ROUND_CLOSEST_ULL(val << POWER_SETUP_I1_SHIFT, SF_CURR);
547 		return hwm_pcode_write_i1(ddat->uncore->i915, uval);
548 	default:
549 		return -EOPNOTSUPP;
550 	}
551 }
552 
553 static umode_t
554 hwm_is_visible(const void *drvdata, enum hwmon_sensor_types type,
555 	       u32 attr, int channel)
556 {
557 	struct hwm_drvdata *ddat = (struct hwm_drvdata *)drvdata;
558 
559 	switch (type) {
560 	case hwmon_in:
561 		return hwm_in_is_visible(ddat, attr);
562 	case hwmon_power:
563 		return hwm_power_is_visible(ddat, attr, channel);
564 	case hwmon_energy:
565 		return hwm_energy_is_visible(ddat, attr);
566 	case hwmon_curr:
567 		return hwm_curr_is_visible(ddat, attr);
568 	default:
569 		return 0;
570 	}
571 }
572 
573 static int
574 hwm_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
575 	 int channel, long *val)
576 {
577 	struct hwm_drvdata *ddat = dev_get_drvdata(dev);
578 
579 	switch (type) {
580 	case hwmon_in:
581 		return hwm_in_read(ddat, attr, val);
582 	case hwmon_power:
583 		return hwm_power_read(ddat, attr, channel, val);
584 	case hwmon_energy:
585 		return hwm_energy_read(ddat, attr, val);
586 	case hwmon_curr:
587 		return hwm_curr_read(ddat, attr, val);
588 	default:
589 		return -EOPNOTSUPP;
590 	}
591 }
592 
593 static int
594 hwm_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
595 	  int channel, long val)
596 {
597 	struct hwm_drvdata *ddat = dev_get_drvdata(dev);
598 
599 	switch (type) {
600 	case hwmon_power:
601 		return hwm_power_write(ddat, attr, channel, val);
602 	case hwmon_curr:
603 		return hwm_curr_write(ddat, attr, val);
604 	default:
605 		return -EOPNOTSUPP;
606 	}
607 }
608 
609 static const struct hwmon_ops hwm_ops = {
610 	.is_visible = hwm_is_visible,
611 	.read = hwm_read,
612 	.write = hwm_write,
613 };
614 
615 static const struct hwmon_chip_info hwm_chip_info = {
616 	.ops = &hwm_ops,
617 	.info = hwm_info,
618 };
619 
620 static umode_t
621 hwm_gt_is_visible(const void *drvdata, enum hwmon_sensor_types type,
622 		  u32 attr, int channel)
623 {
624 	struct hwm_drvdata *ddat = (struct hwm_drvdata *)drvdata;
625 
626 	switch (type) {
627 	case hwmon_energy:
628 		return hwm_energy_is_visible(ddat, attr);
629 	default:
630 		return 0;
631 	}
632 }
633 
634 static int
635 hwm_gt_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
636 	    int channel, long *val)
637 {
638 	struct hwm_drvdata *ddat = dev_get_drvdata(dev);
639 
640 	switch (type) {
641 	case hwmon_energy:
642 		return hwm_energy_read(ddat, attr, val);
643 	default:
644 		return -EOPNOTSUPP;
645 	}
646 }
647 
648 static const struct hwmon_ops hwm_gt_ops = {
649 	.is_visible = hwm_gt_is_visible,
650 	.read = hwm_gt_read,
651 };
652 
653 static const struct hwmon_chip_info hwm_gt_chip_info = {
654 	.ops = &hwm_gt_ops,
655 	.info = hwm_gt_info,
656 };
657 
658 static void
659 hwm_get_preregistration_info(struct drm_i915_private *i915)
660 {
661 	struct i915_hwmon *hwmon = i915->hwmon;
662 	struct intel_uncore *uncore = &i915->uncore;
663 	struct hwm_drvdata *ddat = &hwmon->ddat;
664 	intel_wakeref_t wakeref;
665 	u32 val_sku_unit = 0;
666 	struct intel_gt *gt;
667 	long energy;
668 	int i;
669 
670 	/* Available for all Gen12+/dGfx */
671 	hwmon->rg.gt_perf_status = GEN12_RPSTAT1;
672 
673 	if (IS_DG1(i915) || IS_DG2(i915)) {
674 		hwmon->rg.pkg_power_sku_unit = PCU_PACKAGE_POWER_SKU_UNIT;
675 		hwmon->rg.pkg_power_sku = PCU_PACKAGE_POWER_SKU;
676 		hwmon->rg.pkg_rapl_limit = PCU_PACKAGE_RAPL_LIMIT;
677 		hwmon->rg.energy_status_all = PCU_PACKAGE_ENERGY_STATUS;
678 		hwmon->rg.energy_status_tile = INVALID_MMIO_REG;
679 	} else if (IS_XEHPSDV(i915)) {
680 		hwmon->rg.pkg_power_sku_unit = GT0_PACKAGE_POWER_SKU_UNIT;
681 		hwmon->rg.pkg_power_sku = INVALID_MMIO_REG;
682 		hwmon->rg.pkg_rapl_limit = GT0_PACKAGE_RAPL_LIMIT;
683 		hwmon->rg.energy_status_all = GT0_PLATFORM_ENERGY_STATUS;
684 		hwmon->rg.energy_status_tile = GT0_PACKAGE_ENERGY_STATUS;
685 	} else {
686 		hwmon->rg.pkg_power_sku_unit = INVALID_MMIO_REG;
687 		hwmon->rg.pkg_power_sku = INVALID_MMIO_REG;
688 		hwmon->rg.pkg_rapl_limit = INVALID_MMIO_REG;
689 		hwmon->rg.energy_status_all = INVALID_MMIO_REG;
690 		hwmon->rg.energy_status_tile = INVALID_MMIO_REG;
691 	}
692 
693 	with_intel_runtime_pm(uncore->rpm, wakeref) {
694 		/*
695 		 * The contents of register hwmon->rg.pkg_power_sku_unit do not change,
696 		 * so read it once and store the shift values.
697 		 */
698 		if (i915_mmio_reg_valid(hwmon->rg.pkg_power_sku_unit))
699 			val_sku_unit = intel_uncore_read(uncore,
700 							 hwmon->rg.pkg_power_sku_unit);
701 	}
702 
703 	hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
704 	hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
705 	hwmon->scl_shift_time = REG_FIELD_GET(PKG_TIME_UNIT, val_sku_unit);
706 
707 	/*
708 	 * Initialize 'struct hwm_energy_info', i.e. set fields to the
709 	 * first value of the energy register read
710 	 */
711 	if (i915_mmio_reg_valid(hwmon->rg.energy_status_all))
712 		hwm_energy(ddat, &energy);
713 	if (i915_mmio_reg_valid(hwmon->rg.energy_status_tile)) {
714 		for_each_gt(gt, i915, i)
715 			hwm_energy(&hwmon->ddat_gt[i], &energy);
716 	}
717 }
718 
719 void i915_hwmon_register(struct drm_i915_private *i915)
720 {
721 	struct device *dev = i915->drm.dev;
722 	struct i915_hwmon *hwmon;
723 	struct device *hwmon_dev;
724 	struct hwm_drvdata *ddat;
725 	struct hwm_drvdata *ddat_gt;
726 	struct intel_gt *gt;
727 	int i;
728 
729 	/* hwmon is available only for dGfx */
730 	if (!IS_DGFX(i915))
731 		return;
732 
733 	hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL);
734 	if (!hwmon)
735 		return;
736 
737 	i915->hwmon = hwmon;
738 	mutex_init(&hwmon->hwmon_lock);
739 	ddat = &hwmon->ddat;
740 
741 	ddat->hwmon = hwmon;
742 	ddat->uncore = &i915->uncore;
743 	snprintf(ddat->name, sizeof(ddat->name), "i915");
744 	ddat->gt_n = -1;
745 
746 	for_each_gt(gt, i915, i) {
747 		ddat_gt = hwmon->ddat_gt + i;
748 
749 		ddat_gt->hwmon = hwmon;
750 		ddat_gt->uncore = gt->uncore;
751 		snprintf(ddat_gt->name, sizeof(ddat_gt->name), "i915_gt%u", i);
752 		ddat_gt->gt_n = i;
753 	}
754 
755 	hwm_get_preregistration_info(i915);
756 
757 	/*  hwmon_dev points to device hwmon<i> */
758 	hwmon_dev = devm_hwmon_device_register_with_info(dev, ddat->name,
759 							 ddat,
760 							 &hwm_chip_info,
761 							 hwm_groups);
762 	if (IS_ERR(hwmon_dev)) {
763 		i915->hwmon = NULL;
764 		return;
765 	}
766 
767 	ddat->hwmon_dev = hwmon_dev;
768 
769 	for_each_gt(gt, i915, i) {
770 		ddat_gt = hwmon->ddat_gt + i;
771 		/*
772 		 * Create per-gt directories only if a per-gt attribute is
773 		 * visible. Currently this is only energy
774 		 */
775 		if (!hwm_gt_is_visible(ddat_gt, hwmon_energy, hwmon_energy_input, 0))
776 			continue;
777 
778 		hwmon_dev = devm_hwmon_device_register_with_info(dev, ddat_gt->name,
779 								 ddat_gt,
780 								 &hwm_gt_chip_info,
781 								 NULL);
782 		if (!IS_ERR(hwmon_dev))
783 			ddat_gt->hwmon_dev = hwmon_dev;
784 	}
785 }
786 
787 void i915_hwmon_unregister(struct drm_i915_private *i915)
788 {
789 	fetch_and_zero(&i915->hwmon);
790 }
791