1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * isst_tpmi.c: SST TPMI interface core
4  *
5  * Copyright (c) 2023, Intel Corporation.
6  * All Rights Reserved.
7  *
8  * This information will be useful to understand flows:
9  * In the current generation of platforms, TPMI is supported via OOB
10  * PCI device. This PCI device has one instance per CPU package.
11  * There is a unique TPMI ID for SST. Each TPMI ID also has multiple
12  * entries, representing per power domain information.
13  *
14  * There is one dev file for complete SST information and control same as the
15  * prior generation of hardware. User spaces don't need to know how the
16  * information is presented by the hardware. The TPMI core module implements
17  * the hardware mapping.
18  */
19 
20 #include <linux/auxiliary_bus.h>
21 #include <linux/delay.h>
22 #include <linux/intel_tpmi.h>
23 #include <linux/fs.h>
24 #include <linux/io.h>
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <uapi/linux/isst_if.h>
28 
29 #include "isst_tpmi_core.h"
30 #include "isst_if_common.h"
31 
32 /* Supported SST hardware version by this driver */
33 #define ISST_HEADER_VERSION		1
34 
35 /*
36  * Used to indicate if value read from MMIO needs to get multiplied
37  * to get to a standard unit or not.
38  */
39 #define SST_MUL_FACTOR_NONE    1
40 
41 /* Define 100 as a scaling factor frequency ratio to frequency conversion */
42 #define SST_MUL_FACTOR_FREQ    100
43 
44 /* All SST regs are 64 bit size */
45 #define SST_REG_SIZE   8
46 
47 /**
48  * struct sst_header -	SST main header
49  * @interface_version:	Version number for this interface
50  * @cap_mask:		Bitmask of the supported sub features. 1=the sub feature is enabled.
51  *			0=disabled.
52  *			Bit[8]= SST_CP enable (1), disable (0)
53  *			bit[9]= SST_PP enable (1), disable (0)
54  *			other bits are reserved for future use
55  * @cp_offset:		Qword (8 bytes) offset to the SST_CP register bank
56  * @pp_offset:		Qword (8 bytes) offset to the SST_PP register bank
57  * @reserved:		Reserved for future use
58  *
59  * This register allows SW to discover SST capability and the offsets to SST-CP
60  * and SST-PP register banks.
61  */
62 struct sst_header {
63 	u8 interface_version;
64 	u8 cap_mask;
65 	u8 cp_offset;
66 	u8 pp_offset;
67 	u32 reserved;
68 } __packed;
69 
70 /**
71  * struct cp_header -	SST-CP (core-power) header
72  * @feature_id:		0=SST-CP, 1=SST-PP, 2=SST-BF, 3=SST-TF
73  * @feature_rev:	Interface Version number for this SST feature
74  * @ratio_unit:		Frequency ratio unit. 00: 100MHz. All others are reserved
75  * @reserved:		Reserved for future use
76  *
77  * This structure is used store SST-CP header. This is packed to the same
78  * format as defined in the specifications.
79  */
80 struct cp_header {
81 	u64 feature_id :4;
82 	u64 feature_rev :8;
83 	u64 ratio_unit :2;
84 	u64 reserved :50;
85 } __packed;
86 
87 /**
88  * struct pp_header -	SST-PP (Perf profile) header
89  * @feature_id:		0=SST-CP, 1=SST-PP, 2=SST-BF, 3=SST-TF
90  * @feature_rev:	Interface Version number for this SST feature
91  * @level_en_mask:	SST-PP level enable/disable fuse mask
92  * @allowed_level_mask:	Allowed level mask used for dynamic config level switching
93  * @reserved0:		Reserved for future use
94  * @ratio_unit:		Frequency ratio unit. 00: 100MHz. All others are reserved
95  * @block_size:		Size of PP block in Qword unit (8 bytes)
96  * @dynamic_switch:	If set (1), dynamic switching of SST PP is supported
97  * @memory_ratio_unit:	Memory Controller frequency ratio unit. 00: 100MHz, others reserved
98  * @reserved1:		Reserved for future use
99  *
100  * This structure is used store SST-PP header. This is packed to the same
101  * format as defined in the specifications.
102  */
103 struct pp_header {
104 	u64 feature_id :4;
105 	u64 feature_rev :8;
106 	u64 level_en_mask :8;
107 	u64 allowed_level_mask :8;
108 	u64 reserved0 :4;
109 	u64 ratio_unit :2;
110 	u64 block_size :8;
111 	u64 dynamic_switch :1;
112 	u64 memory_ratio_unit :2;
113 	u64 reserved1 :19;
114 } __packed;
115 
116 /**
117  * struct feature_offset -	Offsets to SST-PP features
118  * @pp_offset:		Qword offset within PP level for the SST_PP register bank
119  * @bf_offset:		Qword offset within PP level for the SST_BF register bank
120  * @tf_offset:		Qword offset within PP level for the SST_TF register bank
121  * @reserved:		Reserved for future use
122  *
123  * This structure is used store offsets for SST features in the register bank.
124  * This is packed to the same format as defined in the specifications.
125  */
126 struct feature_offset {
127 	u64 pp_offset :8;
128 	u64 bf_offset :8;
129 	u64 tf_offset :8;
130 	u64 reserved :40;
131 } __packed;
132 
133 /**
134  * struct levels_offset -	Offsets to each SST PP level
135  * @sst_pp_level0_offset:	Qword offset to the register block of PP level 0
136  * @sst_pp_level1_offset:	Qword offset to the register block of PP level 1
137  * @sst_pp_level2_offset:	Qword offset to the register block of PP level 2
138  * @sst_pp_level3_offset:	Qword offset to the register block of PP level 3
139  * @sst_pp_level4_offset:	Qword offset to the register block of PP level 4
140  * @reserved:			Reserved for future use
141  *
142  * This structure is used store offsets of SST PP levels in the register bank.
143  * This is packed to the same format as defined in the specifications.
144  */
145 struct levels_offset {
146 	u64 sst_pp_level0_offset :8;
147 	u64 sst_pp_level1_offset :8;
148 	u64 sst_pp_level2_offset :8;
149 	u64 sst_pp_level3_offset :8;
150 	u64 sst_pp_level4_offset :8;
151 	u64 reserved :24;
152 } __packed;
153 
154 /**
155  * struct pp_control_offset -	Offsets for SST PP controls
156  * @perf_level:		A SST-PP level that SW intends to switch to
157  * @perf_level_lock:	SST-PP level select lock. 0 - unlocked. 1 - locked till next reset
158  * @resvd0:		Reserved for future use
159  * @current_state:	Bit mask to control the enable(1)/disable(0) state of each feature
160  *			of the current PP level, bit 0 = BF, bit 1 = TF, bit 2-7 = reserved
161  * @reserved:		Reserved for future use
162  *
163  * This structure is used store offsets of SST PP controls in the register bank.
164  * This is packed to the same format as defined in the specifications.
165  */
166 struct pp_control_offset {
167 	u64 perf_level :3;
168 	u64 perf_level_lock :1;
169 	u64 resvd0 :4;
170 	u64 current_state :8;
171 	u64 reserved :48;
172 } __packed;
173 
174 /**
175  * struct pp_status_offset -	Offsets for SST PP status fields
176  * @sst_pp_level:	Returns the current SST-PP level
177  * @sst_pp_lock:	Returns the lock bit setting of perf_level_lock in pp_control_offset
178  * @error_type:		Returns last error of SST-PP level change request. 0: no error,
179  *			1: level change not allowed, others: reserved
180  * @feature_state:	Bit mask to indicate the enable(1)/disable(0) state of each feature of the
181  *			current PP level. bit 0 = BF, bit 1 = TF, bit 2-7 reserved
182  * @reserved0:		Reserved for future use
183  * @feature_error_type: Returns last error of the specific feature. Three error_type bits per
184  *			feature. i.e. ERROR_TYPE[2:0] for BF, ERROR_TYPE[5:3] for TF, etc.
185  *			0x0: no error, 0x1: The specific feature is not supported by the hardware.
186  *			0x2-0x6: Reserved. 0x7: feature state change is not allowed.
187  * @reserved1:		Reserved for future use
188  *
189  * This structure is used store offsets of SST PP status in the register bank.
190  * This is packed to the same format as defined in the specifications.
191  */
192 struct pp_status_offset {
193 	u64 sst_pp_level :3;
194 	u64 sst_pp_lock :1;
195 	u64 error_type :4;
196 	u64 feature_state :8;
197 	u64 reserved0 :16;
198 	u64 feature_error_type : 24;
199 	u64 reserved1 :8;
200 } __packed;
201 
202 /**
203  * struct perf_level -	Used to store perf level and mmio offset
204  * @mmio_offset:	mmio offset for a perf level
205  * @level:		perf level for this offset
206  *
207  * This structure is used store final mmio offset of each perf level from the
208  * SST base mmio offset.
209  */
210 struct perf_level {
211 	int mmio_offset;
212 	int level;
213 };
214 
215 /**
216  * struct tpmi_per_power_domain_info -	Store per power_domain SST info
217  * @package_id:		Package id for this power_domain
218  * @power_domain_id:	Power domain id, Each entry from the SST-TPMI instance is a power_domain.
219  * @max_level:		Max possible PP level possible for this power_domain
220  * @ratio_unit:		Ratio unit for converting to MHz
221  * @avx_levels:		Number of AVX levels
222  * @pp_block_size:	Block size from PP header
223  * @sst_header:		Store SST header for this power_domain
224  * @cp_header:		Store SST-CP header for this power_domain
225  * @pp_header:		Store SST-PP header for this power_domain
226  * @perf_levels:	Pointer to each perf level to map level to mmio offset
227  * @feature_offsets:	Store feature offsets for each PP-level
228  * @control_offset:	Store the control offset for each PP-level
229  * @status_offset:	Store the status offset for each PP-level
230  * @sst_base:		Mapped SST base IO memory
231  * @auxdev:		Auxiliary device instance enumerated this instance
232  * @saved_sst_cp_control: Save SST-CP control configuration to store restore for suspend/resume
233  * @saved_clos_configs:	Save SST-CP CLOS configuration to store restore for suspend/resume
234  * @saved_clos_assocs:	Save SST-CP CLOS association to store restore for suspend/resume
235  * @saved_pp_control:	Save SST-PP control information to store restore for suspend/resume
236  *
237  * This structure is used store complete SST information for a power_domain. This information
238  * is used to read/write request for any SST IOCTL. Each physical CPU package can have multiple
239  * power_domains. Each power domain describes its own SST information and has its own controls.
240  */
241 struct tpmi_per_power_domain_info {
242 	int package_id;
243 	int power_domain_id;
244 	int max_level;
245 	int ratio_unit;
246 	int avx_levels;
247 	int pp_block_size;
248 	struct sst_header sst_header;
249 	struct cp_header cp_header;
250 	struct pp_header pp_header;
251 	struct perf_level *perf_levels;
252 	struct feature_offset feature_offsets;
253 	struct pp_control_offset control_offset;
254 	struct pp_status_offset status_offset;
255 	void __iomem *sst_base;
256 	struct auxiliary_device *auxdev;
257 	u64 saved_sst_cp_control;
258 	u64 saved_clos_configs[4];
259 	u64 saved_clos_assocs[4];
260 	u64 saved_pp_control;
261 };
262 
263 /**
264  * struct tpmi_sst_struct -	Store sst info for a package
265  * @package_id:			Package id for this aux device instance
266  * @number_of_power_domains:	Number of power_domains pointed by power_domain_info pointer
267  * @power_domain_info:		Pointer to power domains information
268  *
269  * This structure is used store full SST information for a package.
270  * Each package has a unique OOB PCI device, which enumerates TPMI.
271  * Each Package will have multiple power_domains.
272  */
273 struct tpmi_sst_struct {
274 	int package_id;
275 	int number_of_power_domains;
276 	struct tpmi_per_power_domain_info *power_domain_info;
277 };
278 
279 /**
280  * struct tpmi_sst_common_struct -	Store all SST instances
281  * @max_index:		Maximum instances currently present
282  * @sst_inst:		Pointer to per package instance
283  *
284  * Stores every SST Package instance.
285  */
286 struct tpmi_sst_common_struct {
287 	int max_index;
288 	struct tpmi_sst_struct **sst_inst;
289 };
290 
291 /*
292  * Each IOCTL request is processed under this lock. Also used to protect
293  * registration functions and common data structures.
294  */
295 static DEFINE_MUTEX(isst_tpmi_dev_lock);
296 
297 /* Usage count to track, number of TPMI SST instances registered to this core. */
298 static int isst_core_usage_count;
299 
300 /* Stores complete SST information for every package and power_domain */
301 static struct tpmi_sst_common_struct isst_common;
302 
303 #define SST_MAX_AVX_LEVELS	3
304 
305 #define SST_PP_OFFSET_0		8
306 #define SST_PP_OFFSET_1		16
307 #define SST_PP_OFFSET_SIZE	8
308 
sst_add_perf_profiles(struct auxiliary_device * auxdev,struct tpmi_per_power_domain_info * pd_info,int levels)309 static int sst_add_perf_profiles(struct auxiliary_device *auxdev,
310 				 struct tpmi_per_power_domain_info *pd_info,
311 				 int levels)
312 {
313 	u64 perf_level_offsets;
314 	int i;
315 
316 	pd_info->perf_levels = devm_kcalloc(&auxdev->dev, levels,
317 					    sizeof(struct perf_level),
318 					    GFP_KERNEL);
319 	if (!pd_info->perf_levels)
320 		return 0;
321 
322 	pd_info->ratio_unit = pd_info->pp_header.ratio_unit;
323 	pd_info->avx_levels = SST_MAX_AVX_LEVELS;
324 	pd_info->pp_block_size = pd_info->pp_header.block_size;
325 
326 	/* Read PP Offset 0: Get feature offset with PP level */
327 	*((u64 *)&pd_info->feature_offsets) = readq(pd_info->sst_base +
328 						    pd_info->sst_header.pp_offset +
329 						    SST_PP_OFFSET_0);
330 
331 	perf_level_offsets = readq(pd_info->sst_base + pd_info->sst_header.pp_offset +
332 				   SST_PP_OFFSET_1);
333 
334 	for (i = 0; i < levels; ++i) {
335 		u64 offset;
336 
337 		offset = perf_level_offsets & (0xffULL << (i * SST_PP_OFFSET_SIZE));
338 		offset >>= (i * 8);
339 		offset &= 0xff;
340 		offset *= 8; /* Convert to byte from QWORD offset */
341 		pd_info->perf_levels[i].mmio_offset = pd_info->sst_header.pp_offset + offset;
342 	}
343 
344 	return 0;
345 }
346 
sst_main(struct auxiliary_device * auxdev,struct tpmi_per_power_domain_info * pd_info)347 static int sst_main(struct auxiliary_device *auxdev, struct tpmi_per_power_domain_info *pd_info)
348 {
349 	int i, mask, levels;
350 
351 	*((u64 *)&pd_info->sst_header) = readq(pd_info->sst_base);
352 	pd_info->sst_header.cp_offset *= 8;
353 	pd_info->sst_header.pp_offset *= 8;
354 
355 	if (pd_info->sst_header.interface_version != ISST_HEADER_VERSION) {
356 		dev_err(&auxdev->dev, "SST: Unsupported version:%x\n",
357 			pd_info->sst_header.interface_version);
358 		return -ENODEV;
359 	}
360 
361 	/* Read SST CP Header */
362 	*((u64 *)&pd_info->cp_header) = readq(pd_info->sst_base + pd_info->sst_header.cp_offset);
363 
364 	/* Read PP header */
365 	*((u64 *)&pd_info->pp_header) = readq(pd_info->sst_base + pd_info->sst_header.pp_offset);
366 
367 	/* Force level_en_mask level 0 */
368 	pd_info->pp_header.level_en_mask |= 0x01;
369 
370 	mask = 0x01;
371 	levels = 0;
372 	for (i = 0; i < 8; ++i) {
373 		if (pd_info->pp_header.level_en_mask & mask)
374 			levels = i;
375 		mask <<= 1;
376 	}
377 	pd_info->max_level = levels;
378 	sst_add_perf_profiles(auxdev, pd_info, levels + 1);
379 
380 	return 0;
381 }
382 
383 /*
384  * Map a package and power_domain id to SST information structure unique for a power_domain.
385  * The caller should call under isst_tpmi_dev_lock.
386  */
get_instance(int pkg_id,int power_domain_id)387 static struct tpmi_per_power_domain_info *get_instance(int pkg_id, int power_domain_id)
388 {
389 	struct tpmi_per_power_domain_info *power_domain_info;
390 	struct tpmi_sst_struct *sst_inst;
391 
392 	if (pkg_id < 0 || pkg_id > isst_common.max_index ||
393 	    pkg_id >= topology_max_packages())
394 		return NULL;
395 
396 	sst_inst = isst_common.sst_inst[pkg_id];
397 	if (!sst_inst)
398 		return NULL;
399 
400 	if (power_domain_id < 0 || power_domain_id >= sst_inst->number_of_power_domains)
401 		return NULL;
402 
403 	power_domain_info = &sst_inst->power_domain_info[power_domain_id];
404 
405 	if (power_domain_info && !power_domain_info->sst_base)
406 		return NULL;
407 
408 	return power_domain_info;
409 }
410 
disable_dynamic_sst_features(void)411 static bool disable_dynamic_sst_features(void)
412 {
413 	u64 value;
414 
415 	rdmsrl(MSR_PM_ENABLE, value);
416 	return !(value & 0x1);
417 }
418 
419 #define _read_cp_info(name_str, name, offset, start, width, mult_factor)\
420 {\
421 	u64 val, mask;\
422 	\
423 	val = readq(power_domain_info->sst_base + power_domain_info->sst_header.cp_offset +\
424 			(offset));\
425 	mask = GENMASK_ULL((start + width - 1), start);\
426 	val &= mask; \
427 	val >>= start;\
428 	name = (val * mult_factor);\
429 }
430 
431 #define _write_cp_info(name_str, name, offset, start, width, div_factor)\
432 {\
433 	u64 val, mask;\
434 	\
435 	val = readq(power_domain_info->sst_base +\
436 		    power_domain_info->sst_header.cp_offset + (offset));\
437 	mask = GENMASK_ULL((start + width - 1), start);\
438 	val &= ~mask;\
439 	val |= (name / div_factor) << start;\
440 	writeq(val, power_domain_info->sst_base + power_domain_info->sst_header.cp_offset +\
441 		(offset));\
442 }
443 
444 #define	SST_CP_CONTROL_OFFSET	8
445 #define	SST_CP_STATUS_OFFSET	16
446 
447 #define SST_CP_ENABLE_START		0
448 #define SST_CP_ENABLE_WIDTH		1
449 
450 #define SST_CP_PRIORITY_TYPE_START	1
451 #define SST_CP_PRIORITY_TYPE_WIDTH	1
452 
isst_if_core_power_state(void __user * argp)453 static long isst_if_core_power_state(void __user *argp)
454 {
455 	struct tpmi_per_power_domain_info *power_domain_info;
456 	struct isst_core_power core_power;
457 
458 	if (disable_dynamic_sst_features())
459 		return -EFAULT;
460 
461 	if (copy_from_user(&core_power, argp, sizeof(core_power)))
462 		return -EFAULT;
463 
464 	power_domain_info = get_instance(core_power.socket_id, core_power.power_domain_id);
465 	if (!power_domain_info)
466 		return -EINVAL;
467 
468 	if (core_power.get_set) {
469 		_write_cp_info("cp_enable", core_power.enable, SST_CP_CONTROL_OFFSET,
470 			       SST_CP_ENABLE_START, SST_CP_ENABLE_WIDTH, SST_MUL_FACTOR_NONE)
471 		_write_cp_info("cp_prio_type", core_power.priority_type, SST_CP_CONTROL_OFFSET,
472 			       SST_CP_PRIORITY_TYPE_START, SST_CP_PRIORITY_TYPE_WIDTH,
473 			       SST_MUL_FACTOR_NONE)
474 	} else {
475 		/* get */
476 		_read_cp_info("cp_enable", core_power.enable, SST_CP_STATUS_OFFSET,
477 			      SST_CP_ENABLE_START, SST_CP_ENABLE_WIDTH, SST_MUL_FACTOR_NONE)
478 		_read_cp_info("cp_prio_type", core_power.priority_type, SST_CP_STATUS_OFFSET,
479 			      SST_CP_PRIORITY_TYPE_START, SST_CP_PRIORITY_TYPE_WIDTH,
480 			      SST_MUL_FACTOR_NONE)
481 		core_power.supported = !!(power_domain_info->sst_header.cap_mask & BIT(0));
482 		if (copy_to_user(argp, &core_power, sizeof(core_power)))
483 			return -EFAULT;
484 	}
485 
486 	return 0;
487 }
488 
489 #define SST_CLOS_CONFIG_0_OFFSET	24
490 
491 #define SST_CLOS_CONFIG_PRIO_START	4
492 #define SST_CLOS_CONFIG_PRIO_WIDTH	4
493 
494 #define SST_CLOS_CONFIG_MIN_START	8
495 #define SST_CLOS_CONFIG_MIN_WIDTH	8
496 
497 #define SST_CLOS_CONFIG_MAX_START	16
498 #define SST_CLOS_CONFIG_MAX_WIDTH	8
499 
isst_if_clos_param(void __user * argp)500 static long isst_if_clos_param(void __user *argp)
501 {
502 	struct tpmi_per_power_domain_info *power_domain_info;
503 	struct isst_clos_param clos_param;
504 
505 	if (copy_from_user(&clos_param, argp, sizeof(clos_param)))
506 		return -EFAULT;
507 
508 	power_domain_info = get_instance(clos_param.socket_id, clos_param.power_domain_id);
509 	if (!power_domain_info)
510 		return -EINVAL;
511 
512 	if (clos_param.get_set) {
513 		_write_cp_info("clos.min_freq", clos_param.min_freq_mhz,
514 			       (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
515 			       SST_CLOS_CONFIG_MIN_START, SST_CLOS_CONFIG_MIN_WIDTH,
516 			       SST_MUL_FACTOR_FREQ);
517 		_write_cp_info("clos.max_freq", clos_param.max_freq_mhz,
518 			       (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
519 			       SST_CLOS_CONFIG_MAX_START, SST_CLOS_CONFIG_MAX_WIDTH,
520 			       SST_MUL_FACTOR_FREQ);
521 		_write_cp_info("clos.prio", clos_param.prop_prio,
522 			       (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
523 			       SST_CLOS_CONFIG_PRIO_START, SST_CLOS_CONFIG_PRIO_WIDTH,
524 			       SST_MUL_FACTOR_NONE);
525 	} else {
526 		/* get */
527 		_read_cp_info("clos.min_freq", clos_param.min_freq_mhz,
528 				(SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
529 				SST_CLOS_CONFIG_MIN_START, SST_CLOS_CONFIG_MIN_WIDTH,
530 				SST_MUL_FACTOR_FREQ)
531 		_read_cp_info("clos.max_freq", clos_param.max_freq_mhz,
532 				(SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
533 				SST_CLOS_CONFIG_MAX_START, SST_CLOS_CONFIG_MAX_WIDTH,
534 				SST_MUL_FACTOR_FREQ)
535 		_read_cp_info("clos.prio", clos_param.prop_prio,
536 				(SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
537 				SST_CLOS_CONFIG_PRIO_START, SST_CLOS_CONFIG_PRIO_WIDTH,
538 				SST_MUL_FACTOR_NONE)
539 
540 		if (copy_to_user(argp, &clos_param, sizeof(clos_param)))
541 			return -EFAULT;
542 	}
543 
544 	return 0;
545 }
546 
547 #define SST_CLOS_ASSOC_0_OFFSET		56
548 #define SST_CLOS_ASSOC_CPUS_PER_REG	16
549 #define SST_CLOS_ASSOC_BITS_PER_CPU	4
550 
isst_if_clos_assoc(void __user * argp)551 static long isst_if_clos_assoc(void __user *argp)
552 {
553 	struct isst_if_clos_assoc_cmds assoc_cmds;
554 	unsigned char __user *ptr;
555 	int i;
556 
557 	/* Each multi command has u16 command count as the first field */
558 	if (copy_from_user(&assoc_cmds, argp, sizeof(assoc_cmds)))
559 		return -EFAULT;
560 
561 	if (!assoc_cmds.cmd_count || assoc_cmds.cmd_count > ISST_IF_CMD_LIMIT)
562 		return -EINVAL;
563 
564 	ptr = argp + offsetof(struct isst_if_clos_assoc_cmds, assoc_info);
565 	for (i = 0; i < assoc_cmds.cmd_count; ++i) {
566 		struct tpmi_per_power_domain_info *power_domain_info;
567 		struct isst_if_clos_assoc clos_assoc;
568 		int punit_id, punit_cpu_no, pkg_id;
569 		struct tpmi_sst_struct *sst_inst;
570 		int offset, shift, cpu;
571 		u64 val, mask, clos;
572 
573 		if (copy_from_user(&clos_assoc, ptr, sizeof(clos_assoc)))
574 			return -EFAULT;
575 
576 		if (clos_assoc.socket_id > topology_max_packages())
577 			return -EINVAL;
578 
579 		cpu = clos_assoc.logical_cpu;
580 		clos = clos_assoc.clos;
581 
582 		if (assoc_cmds.punit_cpu_map)
583 			punit_cpu_no = cpu;
584 		else
585 			return -EOPNOTSUPP;
586 
587 		if (punit_cpu_no < 0)
588 			return -EINVAL;
589 
590 		punit_id = clos_assoc.power_domain_id;
591 		pkg_id = clos_assoc.socket_id;
592 
593 		sst_inst = isst_common.sst_inst[pkg_id];
594 
595 		if (clos_assoc.power_domain_id > sst_inst->number_of_power_domains)
596 			return -EINVAL;
597 
598 		power_domain_info = &sst_inst->power_domain_info[punit_id];
599 
600 		offset = SST_CLOS_ASSOC_0_OFFSET +
601 				(punit_cpu_no / SST_CLOS_ASSOC_CPUS_PER_REG) * SST_REG_SIZE;
602 		shift = punit_cpu_no % SST_CLOS_ASSOC_CPUS_PER_REG;
603 		shift *= SST_CLOS_ASSOC_BITS_PER_CPU;
604 
605 		val = readq(power_domain_info->sst_base +
606 				power_domain_info->sst_header.cp_offset + offset);
607 		if (assoc_cmds.get_set) {
608 			mask = GENMASK_ULL((shift + SST_CLOS_ASSOC_BITS_PER_CPU - 1), shift);
609 			val &= ~mask;
610 			val |= (clos << shift);
611 			writeq(val, power_domain_info->sst_base +
612 					power_domain_info->sst_header.cp_offset + offset);
613 		} else {
614 			val >>= shift;
615 			clos_assoc.clos = val & GENMASK(SST_CLOS_ASSOC_BITS_PER_CPU - 1, 0);
616 			if (copy_to_user(ptr, &clos_assoc, sizeof(clos_assoc)))
617 				return -EFAULT;
618 		}
619 
620 		ptr += sizeof(clos_assoc);
621 	}
622 
623 	return 0;
624 }
625 
626 #define _read_pp_info(name_str, name, offset, start, width, mult_factor)\
627 {\
628 	u64 val, _mask;\
629 	\
630 	val = readq(power_domain_info->sst_base + power_domain_info->sst_header.pp_offset +\
631 		    (offset));\
632 	_mask = GENMASK_ULL((start + width - 1), start);\
633 	val &= _mask;\
634 	val >>= start;\
635 	name = (val * mult_factor);\
636 }
637 
638 #define _write_pp_info(name_str, name, offset, start, width, div_factor)\
639 {\
640 	u64 val, _mask;\
641 	\
642 	val = readq(power_domain_info->sst_base + power_domain_info->sst_header.pp_offset +\
643 		    (offset));\
644 	_mask = GENMASK((start + width - 1), start);\
645 	val &= ~_mask;\
646 	val |= (name / div_factor) << start;\
647 	writeq(val, power_domain_info->sst_base + power_domain_info->sst_header.pp_offset +\
648 	      (offset));\
649 }
650 
651 #define _read_bf_level_info(name_str, name, level, offset, start, width, mult_factor)\
652 {\
653 	u64 val, _mask;\
654 	\
655 	val = readq(power_domain_info->sst_base +\
656 		    power_domain_info->perf_levels[level].mmio_offset +\
657 		(power_domain_info->feature_offsets.bf_offset * 8) + (offset));\
658 	_mask = GENMASK_ULL((start + width - 1), start);\
659 	val &= _mask; \
660 	val >>= start;\
661 	name = (val * mult_factor);\
662 }
663 
664 #define _read_tf_level_info(name_str, name, level, offset, start, width, mult_factor)\
665 {\
666 	u64 val, _mask;\
667 	\
668 	val = readq(power_domain_info->sst_base +\
669 		    power_domain_info->perf_levels[level].mmio_offset +\
670 		(power_domain_info->feature_offsets.tf_offset * 8) + (offset));\
671 	_mask = GENMASK_ULL((start + width - 1), start);\
672 	val &= _mask; \
673 	val >>= start;\
674 	name = (val * mult_factor);\
675 }
676 
677 #define SST_PP_STATUS_OFFSET	32
678 
679 #define SST_PP_LEVEL_START	0
680 #define SST_PP_LEVEL_WIDTH	3
681 
682 #define SST_PP_LOCK_START	3
683 #define SST_PP_LOCK_WIDTH	1
684 
685 #define SST_PP_FEATURE_STATE_START	8
686 #define SST_PP_FEATURE_STATE_WIDTH	8
687 
688 #define SST_BF_FEATURE_SUPPORTED_START	12
689 #define SST_BF_FEATURE_SUPPORTED_WIDTH	1
690 
691 #define SST_TF_FEATURE_SUPPORTED_START	12
692 #define SST_TF_FEATURE_SUPPORTED_WIDTH	1
693 
isst_if_get_perf_level(void __user * argp)694 static int isst_if_get_perf_level(void __user *argp)
695 {
696 	struct isst_perf_level_info perf_level;
697 	struct tpmi_per_power_domain_info *power_domain_info;
698 
699 	if (copy_from_user(&perf_level, argp, sizeof(perf_level)))
700 		return -EFAULT;
701 
702 	power_domain_info = get_instance(perf_level.socket_id, perf_level.power_domain_id);
703 	if (!power_domain_info)
704 		return -EINVAL;
705 
706 	perf_level.max_level = power_domain_info->max_level;
707 	perf_level.level_mask = power_domain_info->pp_header.allowed_level_mask;
708 	perf_level.feature_rev = power_domain_info->pp_header.feature_rev;
709 	_read_pp_info("current_level", perf_level.current_level, SST_PP_STATUS_OFFSET,
710 		      SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE)
711 	_read_pp_info("locked", perf_level.locked, SST_PP_STATUS_OFFSET,
712 		      SST_PP_LOCK_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE)
713 	_read_pp_info("feature_state", perf_level.feature_state, SST_PP_STATUS_OFFSET,
714 		      SST_PP_FEATURE_STATE_START, SST_PP_FEATURE_STATE_WIDTH, SST_MUL_FACTOR_NONE)
715 	perf_level.enabled = !!(power_domain_info->sst_header.cap_mask & BIT(1));
716 
717 	_read_bf_level_info("bf_support", perf_level.sst_bf_support, 0, 0,
718 			    SST_BF_FEATURE_SUPPORTED_START, SST_BF_FEATURE_SUPPORTED_WIDTH,
719 			    SST_MUL_FACTOR_NONE);
720 	_read_tf_level_info("tf_support", perf_level.sst_tf_support, 0, 0,
721 			    SST_TF_FEATURE_SUPPORTED_START, SST_TF_FEATURE_SUPPORTED_WIDTH,
722 			    SST_MUL_FACTOR_NONE);
723 
724 	if (copy_to_user(argp, &perf_level, sizeof(perf_level)))
725 		return -EFAULT;
726 
727 	return 0;
728 }
729 
730 #define SST_PP_CONTROL_OFFSET		24
731 #define SST_PP_LEVEL_CHANGE_TIME_MS	5
732 #define SST_PP_LEVEL_CHANGE_RETRY_COUNT	3
733 
isst_if_set_perf_level(void __user * argp)734 static int isst_if_set_perf_level(void __user *argp)
735 {
736 	struct isst_perf_level_control perf_level;
737 	struct tpmi_per_power_domain_info *power_domain_info;
738 	int level, retry = 0;
739 
740 	if (disable_dynamic_sst_features())
741 		return -EFAULT;
742 
743 	if (copy_from_user(&perf_level, argp, sizeof(perf_level)))
744 		return -EFAULT;
745 
746 	power_domain_info = get_instance(perf_level.socket_id, perf_level.power_domain_id);
747 	if (!power_domain_info)
748 		return -EINVAL;
749 
750 	if (!(power_domain_info->pp_header.allowed_level_mask & BIT(perf_level.level)))
751 		return -EINVAL;
752 
753 	_read_pp_info("current_level", level, SST_PP_STATUS_OFFSET,
754 		      SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE)
755 
756 	/* If the requested new level is same as the current level, reject */
757 	if (perf_level.level == level)
758 		return -EINVAL;
759 
760 	_write_pp_info("perf_level", perf_level.level, SST_PP_CONTROL_OFFSET,
761 		       SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE)
762 
763 	/* It is possible that firmware is busy (although unlikely), so retry */
764 	do {
765 		/* Give time to FW to process */
766 		msleep(SST_PP_LEVEL_CHANGE_TIME_MS);
767 
768 		_read_pp_info("current_level", level, SST_PP_STATUS_OFFSET,
769 			      SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE)
770 
771 		/* Check if the new level is active */
772 		if (perf_level.level == level)
773 			break;
774 
775 	} while (retry++ < SST_PP_LEVEL_CHANGE_RETRY_COUNT);
776 
777 	/* If the level change didn't happen, return fault */
778 	if (perf_level.level != level)
779 		return -EFAULT;
780 
781 	/* Reset the feature state on level change */
782 	_write_pp_info("perf_feature", 0, SST_PP_CONTROL_OFFSET,
783 		       SST_PP_FEATURE_STATE_START, SST_PP_FEATURE_STATE_WIDTH,
784 		       SST_MUL_FACTOR_NONE)
785 
786 	/* Give time to FW to process */
787 	msleep(SST_PP_LEVEL_CHANGE_TIME_MS);
788 
789 	return 0;
790 }
791 
isst_if_set_perf_feature(void __user * argp)792 static int isst_if_set_perf_feature(void __user *argp)
793 {
794 	struct isst_perf_feature_control perf_feature;
795 	struct tpmi_per_power_domain_info *power_domain_info;
796 
797 	if (disable_dynamic_sst_features())
798 		return -EFAULT;
799 
800 	if (copy_from_user(&perf_feature, argp, sizeof(perf_feature)))
801 		return -EFAULT;
802 
803 	power_domain_info = get_instance(perf_feature.socket_id, perf_feature.power_domain_id);
804 	if (!power_domain_info)
805 		return -EINVAL;
806 
807 	_write_pp_info("perf_feature", perf_feature.feature, SST_PP_CONTROL_OFFSET,
808 		       SST_PP_FEATURE_STATE_START, SST_PP_FEATURE_STATE_WIDTH,
809 		       SST_MUL_FACTOR_NONE)
810 
811 	return 0;
812 }
813 
814 #define _read_pp_level_info(name_str, name, level, offset, start, width, mult_factor)\
815 {\
816 	u64 val, _mask;\
817 	\
818 	val = readq(power_domain_info->sst_base +\
819 		    power_domain_info->perf_levels[level].mmio_offset +\
820 		(power_domain_info->feature_offsets.pp_offset * 8) + (offset));\
821 	_mask = GENMASK_ULL((start + width - 1), start);\
822 	val &= _mask; \
823 	val >>= start;\
824 	name = (val * mult_factor);\
825 }
826 
827 #define SST_PP_INFO_0_OFFSET	0
828 #define SST_PP_INFO_1_OFFSET	8
829 #define SST_PP_INFO_2_OFFSET	16
830 #define SST_PP_INFO_3_OFFSET	24
831 
832 /* SST_PP_INFO_4_OFFSET to SST_PP_INFO_9_OFFSET are trl levels */
833 #define SST_PP_INFO_4_OFFSET	32
834 
835 #define SST_PP_INFO_10_OFFSET	80
836 #define SST_PP_INFO_11_OFFSET	88
837 
838 #define SST_PP_P1_SSE_START	0
839 #define SST_PP_P1_SSE_WIDTH	8
840 
841 #define SST_PP_P1_AVX2_START	8
842 #define SST_PP_P1_AVX2_WIDTH	8
843 
844 #define SST_PP_P1_AVX512_START	16
845 #define SST_PP_P1_AVX512_WIDTH	8
846 
847 #define SST_PP_P1_AMX_START	24
848 #define SST_PP_P1_AMX_WIDTH	8
849 
850 #define SST_PP_TDP_START	32
851 #define SST_PP_TDP_WIDTH	15
852 
853 #define SST_PP_T_PROCHOT_START	47
854 #define SST_PP_T_PROCHOT_WIDTH	8
855 
856 #define SST_PP_MAX_MEMORY_FREQ_START	55
857 #define SST_PP_MAX_MEMORY_FREQ_WIDTH	7
858 
859 #define SST_PP_COOLING_TYPE_START	62
860 #define SST_PP_COOLING_TYPE_WIDTH	2
861 
862 #define SST_PP_TRL_0_RATIO_0_START	0
863 #define SST_PP_TRL_0_RATIO_0_WIDTH	8
864 
865 #define SST_PP_TRL_CORES_BUCKET_0_START	0
866 #define SST_PP_TRL_CORES_BUCKET_0_WIDTH	8
867 
868 #define SST_PP_CORE_RATIO_P0_START	0
869 #define SST_PP_CORE_RATIO_P0_WIDTH	8
870 
871 #define SST_PP_CORE_RATIO_P1_START	8
872 #define SST_PP_CORE_RATIO_P1_WIDTH	8
873 
874 #define SST_PP_CORE_RATIO_PN_START	16
875 #define SST_PP_CORE_RATIO_PN_WIDTH	8
876 
877 #define SST_PP_CORE_RATIO_PM_START	24
878 #define SST_PP_CORE_RATIO_PM_WIDTH	8
879 
880 #define SST_PP_CORE_RATIO_P0_FABRIC_START	32
881 #define SST_PP_CORE_RATIO_P0_FABRIC_WIDTH	8
882 
883 #define SST_PP_CORE_RATIO_P1_FABRIC_START	40
884 #define SST_PP_CORE_RATIO_P1_FABRIC_WIDTH	8
885 
886 #define SST_PP_CORE_RATIO_PM_FABRIC_START	48
887 #define SST_PP_CORE_RATIO_PM_FABRIC_WIDTH	8
888 
isst_if_get_perf_level_info(void __user * argp)889 static int isst_if_get_perf_level_info(void __user *argp)
890 {
891 	struct isst_perf_level_data_info perf_level;
892 	struct tpmi_per_power_domain_info *power_domain_info;
893 	int i, j;
894 
895 	if (copy_from_user(&perf_level, argp, sizeof(perf_level)))
896 		return -EFAULT;
897 
898 	power_domain_info = get_instance(perf_level.socket_id, perf_level.power_domain_id);
899 	if (!power_domain_info)
900 		return -EINVAL;
901 
902 	if (perf_level.level > power_domain_info->max_level)
903 		return -EINVAL;
904 
905 	if (!(power_domain_info->pp_header.level_en_mask & BIT(perf_level.level)))
906 		return -EINVAL;
907 
908 	_read_pp_level_info("tdp_ratio", perf_level.tdp_ratio, perf_level.level,
909 			    SST_PP_INFO_0_OFFSET, SST_PP_P1_SSE_START, SST_PP_P1_SSE_WIDTH,
910 			    SST_MUL_FACTOR_NONE)
911 	_read_pp_level_info("base_freq_mhz", perf_level.base_freq_mhz, perf_level.level,
912 			    SST_PP_INFO_0_OFFSET, SST_PP_P1_SSE_START, SST_PP_P1_SSE_WIDTH,
913 			    SST_MUL_FACTOR_FREQ)
914 	_read_pp_level_info("base_freq_avx2_mhz", perf_level.base_freq_avx2_mhz, perf_level.level,
915 			    SST_PP_INFO_0_OFFSET, SST_PP_P1_AVX2_START, SST_PP_P1_AVX2_WIDTH,
916 			    SST_MUL_FACTOR_FREQ)
917 	_read_pp_level_info("base_freq_avx512_mhz", perf_level.base_freq_avx512_mhz,
918 			    perf_level.level, SST_PP_INFO_0_OFFSET, SST_PP_P1_AVX512_START,
919 			    SST_PP_P1_AVX512_WIDTH, SST_MUL_FACTOR_FREQ)
920 	_read_pp_level_info("base_freq_amx_mhz", perf_level.base_freq_amx_mhz, perf_level.level,
921 			    SST_PP_INFO_0_OFFSET, SST_PP_P1_AMX_START, SST_PP_P1_AMX_WIDTH,
922 			    SST_MUL_FACTOR_FREQ)
923 
924 	_read_pp_level_info("thermal_design_power_w", perf_level.thermal_design_power_w,
925 			    perf_level.level, SST_PP_INFO_1_OFFSET, SST_PP_TDP_START,
926 			    SST_PP_TDP_WIDTH, SST_MUL_FACTOR_NONE)
927 	perf_level.thermal_design_power_w /= 8; /* units are in 1/8th watt */
928 	_read_pp_level_info("tjunction_max_c", perf_level.tjunction_max_c, perf_level.level,
929 			    SST_PP_INFO_1_OFFSET, SST_PP_T_PROCHOT_START, SST_PP_T_PROCHOT_WIDTH,
930 			    SST_MUL_FACTOR_NONE)
931 	_read_pp_level_info("max_memory_freq_mhz", perf_level.max_memory_freq_mhz,
932 			    perf_level.level, SST_PP_INFO_1_OFFSET, SST_PP_MAX_MEMORY_FREQ_START,
933 			    SST_PP_MAX_MEMORY_FREQ_WIDTH, SST_MUL_FACTOR_FREQ)
934 	_read_pp_level_info("cooling_type", perf_level.cooling_type, perf_level.level,
935 			    SST_PP_INFO_1_OFFSET, SST_PP_COOLING_TYPE_START,
936 			    SST_PP_COOLING_TYPE_WIDTH, SST_MUL_FACTOR_NONE)
937 
938 	for (i = 0; i < TRL_MAX_LEVELS; ++i) {
939 		for (j = 0; j < TRL_MAX_BUCKETS; ++j)
940 			_read_pp_level_info("trl*_bucket*_freq_mhz",
941 					    perf_level.trl_freq_mhz[i][j], perf_level.level,
942 					    SST_PP_INFO_4_OFFSET + (i * SST_PP_TRL_0_RATIO_0_WIDTH),
943 					    j * SST_PP_TRL_0_RATIO_0_WIDTH,
944 					    SST_PP_TRL_0_RATIO_0_WIDTH,
945 					    SST_MUL_FACTOR_FREQ);
946 	}
947 
948 	for (i = 0; i < TRL_MAX_BUCKETS; ++i)
949 		_read_pp_level_info("bucket*_core_count", perf_level.bucket_core_counts[i],
950 				    perf_level.level, SST_PP_INFO_10_OFFSET,
951 				    SST_PP_TRL_CORES_BUCKET_0_WIDTH * i,
952 				    SST_PP_TRL_CORES_BUCKET_0_WIDTH, SST_MUL_FACTOR_NONE)
953 
954 	perf_level.max_buckets = TRL_MAX_BUCKETS;
955 	perf_level.max_trl_levels = TRL_MAX_LEVELS;
956 
957 	_read_pp_level_info("p0_freq_mhz", perf_level.p0_freq_mhz, perf_level.level,
958 			    SST_PP_INFO_11_OFFSET, SST_PP_CORE_RATIO_P0_START,
959 			    SST_PP_CORE_RATIO_P0_WIDTH, SST_MUL_FACTOR_FREQ)
960 	_read_pp_level_info("p1_freq_mhz", perf_level.p1_freq_mhz, perf_level.level,
961 			    SST_PP_INFO_11_OFFSET, SST_PP_CORE_RATIO_P1_START,
962 			    SST_PP_CORE_RATIO_P1_WIDTH, SST_MUL_FACTOR_FREQ)
963 	_read_pp_level_info("pn_freq_mhz", perf_level.pn_freq_mhz, perf_level.level,
964 			    SST_PP_INFO_11_OFFSET, SST_PP_CORE_RATIO_PN_START,
965 			    SST_PP_CORE_RATIO_PN_WIDTH, SST_MUL_FACTOR_FREQ)
966 	_read_pp_level_info("pm_freq_mhz", perf_level.pm_freq_mhz, perf_level.level,
967 			    SST_PP_INFO_11_OFFSET, SST_PP_CORE_RATIO_PM_START,
968 			    SST_PP_CORE_RATIO_PM_WIDTH, SST_MUL_FACTOR_FREQ)
969 	_read_pp_level_info("p0_fabric_freq_mhz", perf_level.p0_fabric_freq_mhz,
970 			    perf_level.level, SST_PP_INFO_11_OFFSET,
971 			    SST_PP_CORE_RATIO_P0_FABRIC_START,
972 			    SST_PP_CORE_RATIO_P0_FABRIC_WIDTH, SST_MUL_FACTOR_FREQ)
973 	_read_pp_level_info("p1_fabric_freq_mhz", perf_level.p1_fabric_freq_mhz,
974 			    perf_level.level, SST_PP_INFO_11_OFFSET,
975 			    SST_PP_CORE_RATIO_P1_FABRIC_START,
976 			    SST_PP_CORE_RATIO_P1_FABRIC_WIDTH, SST_MUL_FACTOR_FREQ)
977 	_read_pp_level_info("pm_fabric_freq_mhz", perf_level.pm_fabric_freq_mhz,
978 			    perf_level.level, SST_PP_INFO_11_OFFSET,
979 			    SST_PP_CORE_RATIO_PM_FABRIC_START,
980 			    SST_PP_CORE_RATIO_PM_FABRIC_WIDTH, SST_MUL_FACTOR_FREQ)
981 
982 	if (copy_to_user(argp, &perf_level, sizeof(perf_level)))
983 		return -EFAULT;
984 
985 	return 0;
986 }
987 
988 #define SST_PP_FUSED_CORE_COUNT_START	0
989 #define SST_PP_FUSED_CORE_COUNT_WIDTH	8
990 
991 #define SST_PP_RSLVD_CORE_COUNT_START	8
992 #define SST_PP_RSLVD_CORE_COUNT_WIDTH	8
993 
994 #define SST_PP_RSLVD_CORE_MASK_START	0
995 #define SST_PP_RSLVD_CORE_MASK_WIDTH	64
996 
isst_if_get_perf_level_mask(void __user * argp)997 static int isst_if_get_perf_level_mask(void __user *argp)
998 {
999 	static struct isst_perf_level_cpu_mask cpumask;
1000 	struct tpmi_per_power_domain_info *power_domain_info;
1001 	u64 mask;
1002 
1003 	if (copy_from_user(&cpumask, argp, sizeof(cpumask)))
1004 		return -EFAULT;
1005 
1006 	power_domain_info = get_instance(cpumask.socket_id, cpumask.power_domain_id);
1007 	if (!power_domain_info)
1008 		return -EINVAL;
1009 
1010 	_read_pp_level_info("mask", mask, cpumask.level, SST_PP_INFO_2_OFFSET,
1011 			    SST_PP_RSLVD_CORE_MASK_START, SST_PP_RSLVD_CORE_MASK_WIDTH,
1012 			    SST_MUL_FACTOR_NONE)
1013 
1014 	cpumask.mask = mask;
1015 
1016 	if (!cpumask.punit_cpu_map)
1017 		return -EOPNOTSUPP;
1018 
1019 	if (copy_to_user(argp, &cpumask, sizeof(cpumask)))
1020 		return -EFAULT;
1021 
1022 	return 0;
1023 }
1024 
1025 #define SST_BF_INFO_0_OFFSET	0
1026 #define SST_BF_INFO_1_OFFSET	8
1027 
1028 #define SST_BF_P1_HIGH_START	13
1029 #define SST_BF_P1_HIGH_WIDTH	8
1030 
1031 #define SST_BF_P1_LOW_START	21
1032 #define SST_BF_P1_LOW_WIDTH	8
1033 
1034 #define SST_BF_T_PROHOT_START	38
1035 #define SST_BF_T_PROHOT_WIDTH	8
1036 
1037 #define SST_BF_TDP_START	46
1038 #define SST_BF_TDP_WIDTH	15
1039 
isst_if_get_base_freq_info(void __user * argp)1040 static int isst_if_get_base_freq_info(void __user *argp)
1041 {
1042 	static struct isst_base_freq_info base_freq;
1043 	struct tpmi_per_power_domain_info *power_domain_info;
1044 
1045 	if (copy_from_user(&base_freq, argp, sizeof(base_freq)))
1046 		return -EFAULT;
1047 
1048 	power_domain_info = get_instance(base_freq.socket_id, base_freq.power_domain_id);
1049 	if (!power_domain_info)
1050 		return -EINVAL;
1051 
1052 	if (base_freq.level > power_domain_info->max_level)
1053 		return -EINVAL;
1054 
1055 	_read_bf_level_info("p1_high", base_freq.high_base_freq_mhz, base_freq.level,
1056 			    SST_BF_INFO_0_OFFSET, SST_BF_P1_HIGH_START, SST_BF_P1_HIGH_WIDTH,
1057 			    SST_MUL_FACTOR_FREQ)
1058 	_read_bf_level_info("p1_low", base_freq.low_base_freq_mhz, base_freq.level,
1059 			    SST_BF_INFO_0_OFFSET, SST_BF_P1_LOW_START, SST_BF_P1_LOW_WIDTH,
1060 			    SST_MUL_FACTOR_FREQ)
1061 	_read_bf_level_info("BF-TJ", base_freq.tjunction_max_c, base_freq.level,
1062 			    SST_BF_INFO_0_OFFSET, SST_BF_T_PROHOT_START, SST_BF_T_PROHOT_WIDTH,
1063 			    SST_MUL_FACTOR_NONE)
1064 	_read_bf_level_info("BF-tdp", base_freq.thermal_design_power_w, base_freq.level,
1065 			    SST_BF_INFO_0_OFFSET, SST_BF_TDP_START, SST_BF_TDP_WIDTH,
1066 			    SST_MUL_FACTOR_NONE)
1067 	base_freq.thermal_design_power_w /= 8; /*unit = 1/8th watt*/
1068 
1069 	if (copy_to_user(argp, &base_freq, sizeof(base_freq)))
1070 		return -EFAULT;
1071 
1072 	return 0;
1073 }
1074 
1075 #define P1_HI_CORE_MASK_START	0
1076 #define P1_HI_CORE_MASK_WIDTH	64
1077 
isst_if_get_base_freq_mask(void __user * argp)1078 static int isst_if_get_base_freq_mask(void __user *argp)
1079 {
1080 	static struct isst_perf_level_cpu_mask cpumask;
1081 	struct tpmi_per_power_domain_info *power_domain_info;
1082 	u64 mask;
1083 
1084 	if (copy_from_user(&cpumask, argp, sizeof(cpumask)))
1085 		return -EFAULT;
1086 
1087 	power_domain_info = get_instance(cpumask.socket_id, cpumask.power_domain_id);
1088 	if (!power_domain_info)
1089 		return -EINVAL;
1090 
1091 	_read_bf_level_info("BF-cpumask", mask, cpumask.level, SST_BF_INFO_1_OFFSET,
1092 			    P1_HI_CORE_MASK_START, P1_HI_CORE_MASK_WIDTH,
1093 			    SST_MUL_FACTOR_NONE)
1094 
1095 	cpumask.mask = mask;
1096 
1097 	if (!cpumask.punit_cpu_map)
1098 		return -EOPNOTSUPP;
1099 
1100 	if (copy_to_user(argp, &cpumask, sizeof(cpumask)))
1101 		return -EFAULT;
1102 
1103 	return 0;
1104 }
1105 
isst_if_get_tpmi_instance_count(void __user * argp)1106 static int isst_if_get_tpmi_instance_count(void __user *argp)
1107 {
1108 	struct isst_tpmi_instance_count tpmi_inst;
1109 	struct tpmi_sst_struct *sst_inst;
1110 	int i;
1111 
1112 	if (copy_from_user(&tpmi_inst, argp, sizeof(tpmi_inst)))
1113 		return -EFAULT;
1114 
1115 	if (tpmi_inst.socket_id >= topology_max_packages())
1116 		return -EINVAL;
1117 
1118 	tpmi_inst.count = isst_common.sst_inst[tpmi_inst.socket_id]->number_of_power_domains;
1119 
1120 	sst_inst = isst_common.sst_inst[tpmi_inst.socket_id];
1121 	tpmi_inst.valid_mask = 0;
1122 	for (i = 0; i < sst_inst->number_of_power_domains; ++i) {
1123 		struct tpmi_per_power_domain_info *pd_info;
1124 
1125 		pd_info = &sst_inst->power_domain_info[i];
1126 		if (pd_info->sst_base)
1127 			tpmi_inst.valid_mask |= BIT(i);
1128 	}
1129 
1130 	if (copy_to_user(argp, &tpmi_inst, sizeof(tpmi_inst)))
1131 		return -EFAULT;
1132 
1133 	return 0;
1134 }
1135 
1136 #define SST_TF_INFO_0_OFFSET	0
1137 #define SST_TF_INFO_1_OFFSET	8
1138 #define SST_TF_INFO_2_OFFSET	16
1139 
1140 #define SST_TF_MAX_LP_CLIP_RATIOS	TRL_MAX_LEVELS
1141 
1142 #define SST_TF_LP_CLIP_RATIO_0_START	16
1143 #define SST_TF_LP_CLIP_RATIO_0_WIDTH	8
1144 
1145 #define SST_TF_RATIO_0_START	0
1146 #define SST_TF_RATIO_0_WIDTH	8
1147 
1148 #define SST_TF_NUM_CORE_0_START 0
1149 #define SST_TF_NUM_CORE_0_WIDTH 8
1150 
isst_if_get_turbo_freq_info(void __user * argp)1151 static int isst_if_get_turbo_freq_info(void __user *argp)
1152 {
1153 	static struct isst_turbo_freq_info turbo_freq;
1154 	struct tpmi_per_power_domain_info *power_domain_info;
1155 	int i, j;
1156 
1157 	if (copy_from_user(&turbo_freq, argp, sizeof(turbo_freq)))
1158 		return -EFAULT;
1159 
1160 	power_domain_info = get_instance(turbo_freq.socket_id, turbo_freq.power_domain_id);
1161 	if (!power_domain_info)
1162 		return -EINVAL;
1163 
1164 	if (turbo_freq.level > power_domain_info->max_level)
1165 		return -EINVAL;
1166 
1167 	turbo_freq.max_buckets = TRL_MAX_BUCKETS;
1168 	turbo_freq.max_trl_levels = TRL_MAX_LEVELS;
1169 	turbo_freq.max_clip_freqs = SST_TF_MAX_LP_CLIP_RATIOS;
1170 
1171 	for (i = 0; i < turbo_freq.max_clip_freqs; ++i)
1172 		_read_tf_level_info("lp_clip*", turbo_freq.lp_clip_freq_mhz[i],
1173 				    turbo_freq.level, SST_TF_INFO_0_OFFSET,
1174 				    SST_TF_LP_CLIP_RATIO_0_START +
1175 				    (i * SST_TF_LP_CLIP_RATIO_0_WIDTH),
1176 				    SST_TF_LP_CLIP_RATIO_0_WIDTH, SST_MUL_FACTOR_FREQ)
1177 
1178 	for (i = 0; i < TRL_MAX_LEVELS; ++i) {
1179 		for (j = 0; j < TRL_MAX_BUCKETS; ++j)
1180 			_read_tf_level_info("cydn*_bucket_*_trl",
1181 					    turbo_freq.trl_freq_mhz[i][j], turbo_freq.level,
1182 					    SST_TF_INFO_2_OFFSET + (i * SST_TF_RATIO_0_WIDTH),
1183 					    j * SST_TF_RATIO_0_WIDTH, SST_TF_RATIO_0_WIDTH,
1184 					    SST_MUL_FACTOR_FREQ)
1185 	}
1186 
1187 	for (i = 0; i < TRL_MAX_BUCKETS; ++i)
1188 		_read_tf_level_info("bucket_*_core_count", turbo_freq.bucket_core_counts[i],
1189 				    turbo_freq.level, SST_TF_INFO_1_OFFSET,
1190 				    SST_TF_NUM_CORE_0_WIDTH * i, SST_TF_NUM_CORE_0_WIDTH,
1191 				    SST_MUL_FACTOR_NONE)
1192 
1193 	if (copy_to_user(argp, &turbo_freq, sizeof(turbo_freq)))
1194 		return -EFAULT;
1195 
1196 	return 0;
1197 }
1198 
isst_if_def_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1199 static long isst_if_def_ioctl(struct file *file, unsigned int cmd,
1200 			      unsigned long arg)
1201 {
1202 	void __user *argp = (void __user *)arg;
1203 	long ret = -ENOTTY;
1204 
1205 	mutex_lock(&isst_tpmi_dev_lock);
1206 	switch (cmd) {
1207 	case ISST_IF_COUNT_TPMI_INSTANCES:
1208 		ret = isst_if_get_tpmi_instance_count(argp);
1209 		break;
1210 	case ISST_IF_CORE_POWER_STATE:
1211 		ret = isst_if_core_power_state(argp);
1212 		break;
1213 	case ISST_IF_CLOS_PARAM:
1214 		ret = isst_if_clos_param(argp);
1215 		break;
1216 	case ISST_IF_CLOS_ASSOC:
1217 		ret = isst_if_clos_assoc(argp);
1218 		break;
1219 	case ISST_IF_PERF_LEVELS:
1220 		ret = isst_if_get_perf_level(argp);
1221 		break;
1222 	case ISST_IF_PERF_SET_LEVEL:
1223 		ret = isst_if_set_perf_level(argp);
1224 		break;
1225 	case ISST_IF_PERF_SET_FEATURE:
1226 		ret = isst_if_set_perf_feature(argp);
1227 		break;
1228 	case ISST_IF_GET_PERF_LEVEL_INFO:
1229 		ret = isst_if_get_perf_level_info(argp);
1230 		break;
1231 	case ISST_IF_GET_PERF_LEVEL_CPU_MASK:
1232 		ret = isst_if_get_perf_level_mask(argp);
1233 		break;
1234 	case ISST_IF_GET_BASE_FREQ_INFO:
1235 		ret = isst_if_get_base_freq_info(argp);
1236 		break;
1237 	case ISST_IF_GET_BASE_FREQ_CPU_MASK:
1238 		ret = isst_if_get_base_freq_mask(argp);
1239 		break;
1240 	case ISST_IF_GET_TURBO_FREQ_INFO:
1241 		ret = isst_if_get_turbo_freq_info(argp);
1242 		break;
1243 	default:
1244 		break;
1245 	}
1246 	mutex_unlock(&isst_tpmi_dev_lock);
1247 
1248 	return ret;
1249 }
1250 
1251 #define TPMI_SST_AUTO_SUSPEND_DELAY_MS	2000
1252 
tpmi_sst_dev_add(struct auxiliary_device * auxdev)1253 int tpmi_sst_dev_add(struct auxiliary_device *auxdev)
1254 {
1255 	struct intel_tpmi_plat_info *plat_info;
1256 	struct tpmi_sst_struct *tpmi_sst;
1257 	int i, ret, pkg = 0, inst = 0;
1258 	int num_resources;
1259 
1260 	plat_info = tpmi_get_platform_data(auxdev);
1261 	if (!plat_info) {
1262 		dev_err(&auxdev->dev, "No platform info\n");
1263 		return -EINVAL;
1264 	}
1265 
1266 	pkg = plat_info->package_id;
1267 	if (pkg >= topology_max_packages()) {
1268 		dev_err(&auxdev->dev, "Invalid package id :%x\n", pkg);
1269 		return -EINVAL;
1270 	}
1271 
1272 	if (isst_common.sst_inst[pkg])
1273 		return -EEXIST;
1274 
1275 	num_resources = tpmi_get_resource_count(auxdev);
1276 
1277 	if (!num_resources)
1278 		return -EINVAL;
1279 
1280 	tpmi_sst = devm_kzalloc(&auxdev->dev, sizeof(*tpmi_sst), GFP_KERNEL);
1281 	if (!tpmi_sst)
1282 		return -ENOMEM;
1283 
1284 	tpmi_sst->power_domain_info = devm_kcalloc(&auxdev->dev, num_resources,
1285 						   sizeof(*tpmi_sst->power_domain_info),
1286 						   GFP_KERNEL);
1287 	if (!tpmi_sst->power_domain_info)
1288 		return -ENOMEM;
1289 
1290 	tpmi_sst->number_of_power_domains = num_resources;
1291 
1292 	for (i = 0; i < num_resources; ++i) {
1293 		struct resource *res;
1294 
1295 		res = tpmi_get_resource_at_index(auxdev, i);
1296 		if (!res) {
1297 			tpmi_sst->power_domain_info[i].sst_base = NULL;
1298 			continue;
1299 		}
1300 
1301 		tpmi_sst->power_domain_info[i].package_id = pkg;
1302 		tpmi_sst->power_domain_info[i].power_domain_id = i;
1303 		tpmi_sst->power_domain_info[i].auxdev = auxdev;
1304 		tpmi_sst->power_domain_info[i].sst_base = devm_ioremap_resource(&auxdev->dev, res);
1305 		if (IS_ERR(tpmi_sst->power_domain_info[i].sst_base))
1306 			return PTR_ERR(tpmi_sst->power_domain_info[i].sst_base);
1307 
1308 		ret = sst_main(auxdev, &tpmi_sst->power_domain_info[i]);
1309 		if (ret) {
1310 			devm_iounmap(&auxdev->dev, tpmi_sst->power_domain_info[i].sst_base);
1311 			tpmi_sst->power_domain_info[i].sst_base =  NULL;
1312 			continue;
1313 		}
1314 
1315 		++inst;
1316 	}
1317 
1318 	if (!inst)
1319 		return -ENODEV;
1320 
1321 	tpmi_sst->package_id = pkg;
1322 	auxiliary_set_drvdata(auxdev, tpmi_sst);
1323 
1324 	mutex_lock(&isst_tpmi_dev_lock);
1325 	if (isst_common.max_index < pkg)
1326 		isst_common.max_index = pkg;
1327 	isst_common.sst_inst[pkg] = tpmi_sst;
1328 	mutex_unlock(&isst_tpmi_dev_lock);
1329 
1330 	return 0;
1331 }
1332 EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_add, INTEL_TPMI_SST);
1333 
tpmi_sst_dev_remove(struct auxiliary_device * auxdev)1334 void tpmi_sst_dev_remove(struct auxiliary_device *auxdev)
1335 {
1336 	struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev);
1337 
1338 	mutex_lock(&isst_tpmi_dev_lock);
1339 	isst_common.sst_inst[tpmi_sst->package_id] = NULL;
1340 	mutex_unlock(&isst_tpmi_dev_lock);
1341 }
1342 EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_remove, INTEL_TPMI_SST);
1343 
tpmi_sst_dev_suspend(struct auxiliary_device * auxdev)1344 void tpmi_sst_dev_suspend(struct auxiliary_device *auxdev)
1345 {
1346 	struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev);
1347 	struct tpmi_per_power_domain_info *power_domain_info = tpmi_sst->power_domain_info;
1348 	void __iomem *cp_base;
1349 
1350 	cp_base = power_domain_info->sst_base + power_domain_info->sst_header.cp_offset;
1351 	power_domain_info->saved_sst_cp_control = readq(cp_base + SST_CP_CONTROL_OFFSET);
1352 
1353 	memcpy_fromio(power_domain_info->saved_clos_configs, cp_base + SST_CLOS_CONFIG_0_OFFSET,
1354 		      sizeof(power_domain_info->saved_clos_configs));
1355 
1356 	memcpy_fromio(power_domain_info->saved_clos_assocs, cp_base + SST_CLOS_ASSOC_0_OFFSET,
1357 		      sizeof(power_domain_info->saved_clos_assocs));
1358 
1359 	power_domain_info->saved_pp_control = readq(power_domain_info->sst_base +
1360 						    power_domain_info->sst_header.pp_offset +
1361 						    SST_PP_CONTROL_OFFSET);
1362 }
1363 EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_suspend, INTEL_TPMI_SST);
1364 
tpmi_sst_dev_resume(struct auxiliary_device * auxdev)1365 void tpmi_sst_dev_resume(struct auxiliary_device *auxdev)
1366 {
1367 	struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev);
1368 	struct tpmi_per_power_domain_info *power_domain_info = tpmi_sst->power_domain_info;
1369 	void __iomem *cp_base;
1370 
1371 	cp_base = power_domain_info->sst_base + power_domain_info->sst_header.cp_offset;
1372 	writeq(power_domain_info->saved_sst_cp_control, cp_base + SST_CP_CONTROL_OFFSET);
1373 
1374 	memcpy_toio(cp_base + SST_CLOS_CONFIG_0_OFFSET, power_domain_info->saved_clos_configs,
1375 		    sizeof(power_domain_info->saved_clos_configs));
1376 
1377 	memcpy_toio(cp_base + SST_CLOS_ASSOC_0_OFFSET, power_domain_info->saved_clos_assocs,
1378 		    sizeof(power_domain_info->saved_clos_assocs));
1379 
1380 	writeq(power_domain_info->saved_pp_control, power_domain_info->sst_base +
1381 				power_domain_info->sst_header.pp_offset + SST_PP_CONTROL_OFFSET);
1382 }
1383 EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_resume, INTEL_TPMI_SST);
1384 
1385 #define ISST_TPMI_API_VERSION	0x02
1386 
tpmi_sst_init(void)1387 int tpmi_sst_init(void)
1388 {
1389 	struct isst_if_cmd_cb cb;
1390 	int ret = 0;
1391 
1392 	mutex_lock(&isst_tpmi_dev_lock);
1393 
1394 	if (isst_core_usage_count) {
1395 		++isst_core_usage_count;
1396 		goto init_done;
1397 	}
1398 
1399 	isst_common.sst_inst = kcalloc(topology_max_packages(),
1400 				       sizeof(*isst_common.sst_inst),
1401 				       GFP_KERNEL);
1402 	if (!isst_common.sst_inst) {
1403 		ret = -ENOMEM;
1404 		goto init_done;
1405 	}
1406 
1407 	memset(&cb, 0, sizeof(cb));
1408 	cb.cmd_size = sizeof(struct isst_if_io_reg);
1409 	cb.offset = offsetof(struct isst_if_io_regs, io_reg);
1410 	cb.cmd_callback = NULL;
1411 	cb.api_version = ISST_TPMI_API_VERSION;
1412 	cb.def_ioctl = isst_if_def_ioctl;
1413 	cb.owner = THIS_MODULE;
1414 	ret = isst_if_cdev_register(ISST_IF_DEV_TPMI, &cb);
1415 	if (ret)
1416 		kfree(isst_common.sst_inst);
1417 	else
1418 		++isst_core_usage_count;
1419 init_done:
1420 	mutex_unlock(&isst_tpmi_dev_lock);
1421 	return ret;
1422 }
1423 EXPORT_SYMBOL_NS_GPL(tpmi_sst_init, INTEL_TPMI_SST);
1424 
tpmi_sst_exit(void)1425 void tpmi_sst_exit(void)
1426 {
1427 	mutex_lock(&isst_tpmi_dev_lock);
1428 	if (isst_core_usage_count)
1429 		--isst_core_usage_count;
1430 
1431 	if (!isst_core_usage_count) {
1432 		isst_if_cdev_unregister(ISST_IF_DEV_TPMI);
1433 		kfree(isst_common.sst_inst);
1434 	}
1435 	mutex_unlock(&isst_tpmi_dev_lock);
1436 }
1437 EXPORT_SYMBOL_NS_GPL(tpmi_sst_exit, INTEL_TPMI_SST);
1438 
1439 MODULE_IMPORT_NS(INTEL_TPMI);
1440 MODULE_IMPORT_NS(INTEL_TPMI_POWER_DOMAIN);
1441 
1442 MODULE_LICENSE("GPL");
1443