xref: /openbmc/linux/drivers/platform/x86/intel/pmc/core.c (revision 36db6e8484ed455bbb320d89a119378897ae991c)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Intel Core SoC Power Management Controller Driver
4   *
5   * Copyright (c) 2016, Intel Corporation.
6   * All Rights Reserved.
7   *
8   * Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
9   *          Vishwanath Somayaji <vishwanath.somayaji@intel.com>
10   */
11  
12  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13  
14  #include <linux/bitfield.h>
15  #include <linux/debugfs.h>
16  #include <linux/delay.h>
17  #include <linux/dmi.h>
18  #include <linux/io.h>
19  #include <linux/module.h>
20  #include <linux/pci.h>
21  #include <linux/slab.h>
22  #include <linux/suspend.h>
23  
24  #include <asm/cpu_device_id.h>
25  #include <asm/intel-family.h>
26  #include <asm/msr.h>
27  #include <asm/tsc.h>
28  
29  #include "core.h"
30  
31  /* Maximum number of modes supported by platfoms that has low power mode capability */
32  const char *pmc_lpm_modes[] = {
33  	"S0i2.0",
34  	"S0i2.1",
35  	"S0i2.2",
36  	"S0i3.0",
37  	"S0i3.1",
38  	"S0i3.2",
39  	"S0i3.3",
40  	"S0i3.4",
41  	NULL
42  };
43  
44  /* PKGC MSRs are common across Intel Core SoCs */
45  const struct pmc_bit_map msr_map[] = {
46  	{"Package C2",                  MSR_PKG_C2_RESIDENCY},
47  	{"Package C3",                  MSR_PKG_C3_RESIDENCY},
48  	{"Package C6",                  MSR_PKG_C6_RESIDENCY},
49  	{"Package C7",                  MSR_PKG_C7_RESIDENCY},
50  	{"Package C8",                  MSR_PKG_C8_RESIDENCY},
51  	{"Package C9",                  MSR_PKG_C9_RESIDENCY},
52  	{"Package C10",                 MSR_PKG_C10_RESIDENCY},
53  	{}
54  };
55  
pmc_core_reg_read(struct pmc * pmc,int reg_offset)56  static inline u32 pmc_core_reg_read(struct pmc *pmc, int reg_offset)
57  {
58  	return readl(pmc->regbase + reg_offset);
59  }
60  
pmc_core_reg_write(struct pmc * pmc,int reg_offset,u32 val)61  static inline void pmc_core_reg_write(struct pmc *pmc, int reg_offset,
62  				      u32 val)
63  {
64  	writel(val, pmc->regbase + reg_offset);
65  }
66  
pmc_core_adjust_slp_s0_step(struct pmc * pmc,u32 value)67  static inline u64 pmc_core_adjust_slp_s0_step(struct pmc *pmc, u32 value)
68  {
69  	/*
70  	 * ADL PCH does not have the SLP_S0 counter and LPM Residency counters are
71  	 * used as a workaround which uses 30.5 usec tick. All other client
72  	 * programs have the legacy SLP_S0 residency counter that is using the 122
73  	 * usec tick.
74  	 */
75  	const int lpm_adj_x2 = pmc->map->lpm_res_counter_step_x2;
76  
77  	if (pmc->map == &adl_reg_map)
78  		return (u64)value * GET_X2_COUNTER((u64)lpm_adj_x2);
79  	else
80  		return (u64)value * pmc->map->slp_s0_res_counter_step;
81  }
82  
set_etr3(struct pmc_dev * pmcdev)83  static int set_etr3(struct pmc_dev *pmcdev)
84  {
85  	struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
86  	const struct pmc_reg_map *map = pmc->map;
87  	u32 reg;
88  	int err;
89  
90  	if (!map->etr3_offset)
91  		return -EOPNOTSUPP;
92  
93  	mutex_lock(&pmcdev->lock);
94  
95  	/* check if CF9 is locked */
96  	reg = pmc_core_reg_read(pmc, map->etr3_offset);
97  	if (reg & ETR3_CF9LOCK) {
98  		err = -EACCES;
99  		goto out_unlock;
100  	}
101  
102  	/* write CF9 global reset bit */
103  	reg |= ETR3_CF9GR;
104  	pmc_core_reg_write(pmc, map->etr3_offset, reg);
105  
106  	reg = pmc_core_reg_read(pmc, map->etr3_offset);
107  	if (!(reg & ETR3_CF9GR)) {
108  		err = -EIO;
109  		goto out_unlock;
110  	}
111  
112  	err = 0;
113  
114  out_unlock:
115  	mutex_unlock(&pmcdev->lock);
116  	return err;
117  }
etr3_is_visible(struct kobject * kobj,struct attribute * attr,int idx)118  static umode_t etr3_is_visible(struct kobject *kobj,
119  				struct attribute *attr,
120  				int idx)
121  {
122  	struct device *dev = kobj_to_dev(kobj);
123  	struct pmc_dev *pmcdev = dev_get_drvdata(dev);
124  	struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
125  	const struct pmc_reg_map *map = pmc->map;
126  	u32 reg;
127  
128  	mutex_lock(&pmcdev->lock);
129  	reg = pmc_core_reg_read(pmc, map->etr3_offset);
130  	mutex_unlock(&pmcdev->lock);
131  
132  	return reg & ETR3_CF9LOCK ? attr->mode & (SYSFS_PREALLOC | 0444) : attr->mode;
133  }
134  
etr3_show(struct device * dev,struct device_attribute * attr,char * buf)135  static ssize_t etr3_show(struct device *dev,
136  				 struct device_attribute *attr, char *buf)
137  {
138  	struct pmc_dev *pmcdev = dev_get_drvdata(dev);
139  	struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
140  	const struct pmc_reg_map *map = pmc->map;
141  	u32 reg;
142  
143  	if (!map->etr3_offset)
144  		return -EOPNOTSUPP;
145  
146  	mutex_lock(&pmcdev->lock);
147  
148  	reg = pmc_core_reg_read(pmc, map->etr3_offset);
149  	reg &= ETR3_CF9GR | ETR3_CF9LOCK;
150  
151  	mutex_unlock(&pmcdev->lock);
152  
153  	return sysfs_emit(buf, "0x%08x", reg);
154  }
155  
etr3_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)156  static ssize_t etr3_store(struct device *dev,
157  				  struct device_attribute *attr,
158  				  const char *buf, size_t len)
159  {
160  	struct pmc_dev *pmcdev = dev_get_drvdata(dev);
161  	int err;
162  	u32 reg;
163  
164  	err = kstrtouint(buf, 16, &reg);
165  	if (err)
166  		return err;
167  
168  	/* allow only CF9 writes */
169  	if (reg != ETR3_CF9GR)
170  		return -EINVAL;
171  
172  	err = set_etr3(pmcdev);
173  	if (err)
174  		return err;
175  
176  	return len;
177  }
178  static DEVICE_ATTR_RW(etr3);
179  
180  static struct attribute *pmc_attrs[] = {
181  	&dev_attr_etr3.attr,
182  	NULL
183  };
184  
185  static const struct attribute_group pmc_attr_group = {
186  	.attrs = pmc_attrs,
187  	.is_visible = etr3_is_visible,
188  };
189  
190  static const struct attribute_group *pmc_dev_groups[] = {
191  	&pmc_attr_group,
192  	NULL
193  };
194  
pmc_core_dev_state_get(void * data,u64 * val)195  static int pmc_core_dev_state_get(void *data, u64 *val)
196  {
197  	struct pmc *pmc = data;
198  	const struct pmc_reg_map *map = pmc->map;
199  	u32 value;
200  
201  	value = pmc_core_reg_read(pmc, map->slp_s0_offset);
202  	*val = pmc_core_adjust_slp_s0_step(pmc, value);
203  
204  	return 0;
205  }
206  
207  DEFINE_DEBUGFS_ATTRIBUTE(pmc_core_dev_state, pmc_core_dev_state_get, NULL, "%llu\n");
208  
pmc_core_check_read_lock_bit(struct pmc * pmc)209  static int pmc_core_check_read_lock_bit(struct pmc *pmc)
210  {
211  	u32 value;
212  
213  	value = pmc_core_reg_read(pmc, pmc->map->pm_cfg_offset);
214  	return value & BIT(pmc->map->pm_read_disable_bit);
215  }
216  
pmc_core_slps0_display(struct pmc * pmc,struct device * dev,struct seq_file * s)217  static void pmc_core_slps0_display(struct pmc *pmc, struct device *dev,
218  				   struct seq_file *s)
219  {
220  	const struct pmc_bit_map **maps = pmc->map->slps0_dbg_maps;
221  	const struct pmc_bit_map *map;
222  	int offset = pmc->map->slps0_dbg_offset;
223  	u32 data;
224  
225  	while (*maps) {
226  		map = *maps;
227  		data = pmc_core_reg_read(pmc, offset);
228  		offset += 4;
229  		while (map->name) {
230  			if (dev)
231  				dev_info(dev, "SLP_S0_DBG: %-32s\tState: %s\n",
232  					map->name,
233  					data & map->bit_mask ? "Yes" : "No");
234  			if (s)
235  				seq_printf(s, "SLP_S0_DBG: %-32s\tState: %s\n",
236  					   map->name,
237  					   data & map->bit_mask ? "Yes" : "No");
238  			++map;
239  		}
240  		++maps;
241  	}
242  }
243  
pmc_core_lpm_get_arr_size(const struct pmc_bit_map ** maps)244  static int pmc_core_lpm_get_arr_size(const struct pmc_bit_map **maps)
245  {
246  	int idx;
247  
248  	for (idx = 0; maps[idx]; idx++)
249  		;/* Nothing */
250  
251  	return idx;
252  }
253  
pmc_core_lpm_display(struct pmc * pmc,struct device * dev,struct seq_file * s,u32 offset,int pmc_index,const char * str,const struct pmc_bit_map ** maps)254  static void pmc_core_lpm_display(struct pmc *pmc, struct device *dev,
255  				 struct seq_file *s, u32 offset, int pmc_index,
256  				 const char *str,
257  				 const struct pmc_bit_map **maps)
258  {
259  	int index, idx, len = 32, bit_mask, arr_size;
260  	u32 *lpm_regs;
261  
262  	arr_size = pmc_core_lpm_get_arr_size(maps);
263  	lpm_regs = kmalloc_array(arr_size, sizeof(*lpm_regs), GFP_KERNEL);
264  	if (!lpm_regs)
265  		return;
266  
267  	for (index = 0; index < arr_size; index++) {
268  		lpm_regs[index] = pmc_core_reg_read(pmc, offset);
269  		offset += 4;
270  	}
271  
272  	for (idx = 0; idx < arr_size; idx++) {
273  		if (dev)
274  			dev_info(dev, "\nPMC%d:LPM_%s_%d:\t0x%x\n", pmc_index, str, idx,
275  				lpm_regs[idx]);
276  		if (s)
277  			seq_printf(s, "\nPMC%d:LPM_%s_%d:\t0x%x\n", pmc_index, str, idx,
278  				   lpm_regs[idx]);
279  		for (index = 0; maps[idx][index].name && index < len; index++) {
280  			bit_mask = maps[idx][index].bit_mask;
281  			if (dev)
282  				dev_info(dev, "PMC%d:%-30s %-30d\n", pmc_index,
283  					maps[idx][index].name,
284  					lpm_regs[idx] & bit_mask ? 1 : 0);
285  			if (s)
286  				seq_printf(s, "PMC%d:%-30s %-30d\n", pmc_index,
287  					   maps[idx][index].name,
288  					   lpm_regs[idx] & bit_mask ? 1 : 0);
289  		}
290  	}
291  
292  	kfree(lpm_regs);
293  }
294  
295  static bool slps0_dbg_latch;
296  
pmc_core_reg_read_byte(struct pmc * pmc,int offset)297  static inline u8 pmc_core_reg_read_byte(struct pmc *pmc, int offset)
298  {
299  	return readb(pmc->regbase + offset);
300  }
301  
pmc_core_display_map(struct seq_file * s,int index,int idx,int ip,int pmc_index,u8 pf_reg,const struct pmc_bit_map ** pf_map)302  static void pmc_core_display_map(struct seq_file *s, int index, int idx, int ip,
303  				 int pmc_index, u8 pf_reg, const struct pmc_bit_map **pf_map)
304  {
305  	seq_printf(s, "PMC%d:PCH IP: %-2d - %-32s\tState: %s\n",
306  		   pmc_index, ip, pf_map[idx][index].name,
307  		   pf_map[idx][index].bit_mask & pf_reg ? "Off" : "On");
308  }
309  
pmc_core_ppfear_show(struct seq_file * s,void * unused)310  static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
311  {
312  	struct pmc_dev *pmcdev = s->private;
313  	int i;
314  
315  	for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
316  		struct pmc *pmc = pmcdev->pmcs[i];
317  		const struct pmc_bit_map **maps;
318  		u8 pf_regs[PPFEAR_MAX_NUM_ENTRIES];
319  		int index, iter, idx, ip = 0;
320  
321  		if (!pmc)
322  			continue;
323  
324  		maps = pmc->map->pfear_sts;
325  		iter = pmc->map->ppfear0_offset;
326  
327  		for (index = 0; index < pmc->map->ppfear_buckets &&
328  		     index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++)
329  			pf_regs[index] = pmc_core_reg_read_byte(pmc, iter);
330  
331  		for (idx = 0; maps[idx]; idx++) {
332  			for (index = 0; maps[idx][index].name &&
333  			     index < pmc->map->ppfear_buckets * 8; ip++, index++)
334  				pmc_core_display_map(s, index, idx, ip, i,
335  						     pf_regs[index / 8], maps);
336  		}
337  	}
338  
339  	return 0;
340  }
341  DEFINE_SHOW_ATTRIBUTE(pmc_core_ppfear);
342  
343  /* This function should return link status, 0 means ready */
pmc_core_mtpmc_link_status(struct pmc * pmc)344  static int pmc_core_mtpmc_link_status(struct pmc *pmc)
345  {
346  	u32 value;
347  
348  	value = pmc_core_reg_read(pmc, SPT_PMC_PM_STS_OFFSET);
349  	return value & BIT(SPT_PMC_MSG_FULL_STS_BIT);
350  }
351  
pmc_core_send_msg(struct pmc * pmc,u32 * addr_xram)352  static int pmc_core_send_msg(struct pmc *pmc, u32 *addr_xram)
353  {
354  	u32 dest;
355  	int timeout;
356  
357  	for (timeout = NUM_RETRIES; timeout > 0; timeout--) {
358  		if (pmc_core_mtpmc_link_status(pmc) == 0)
359  			break;
360  		msleep(5);
361  	}
362  
363  	if (timeout <= 0 && pmc_core_mtpmc_link_status(pmc))
364  		return -EBUSY;
365  
366  	dest = (*addr_xram & MTPMC_MASK) | (1U << 1);
367  	pmc_core_reg_write(pmc, SPT_PMC_MTPMC_OFFSET, dest);
368  	return 0;
369  }
370  
pmc_core_mphy_pg_show(struct seq_file * s,void * unused)371  static int pmc_core_mphy_pg_show(struct seq_file *s, void *unused)
372  {
373  	struct pmc_dev *pmcdev = s->private;
374  	struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
375  	const struct pmc_bit_map *map = pmc->map->mphy_sts;
376  	u32 mphy_core_reg_low, mphy_core_reg_high;
377  	u32 val_low, val_high;
378  	int index, err = 0;
379  
380  	if (pmcdev->pmc_xram_read_bit) {
381  		seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
382  		return 0;
383  	}
384  
385  	mphy_core_reg_low  = (SPT_PMC_MPHY_CORE_STS_0 << 16);
386  	mphy_core_reg_high = (SPT_PMC_MPHY_CORE_STS_1 << 16);
387  
388  	mutex_lock(&pmcdev->lock);
389  
390  	if (pmc_core_send_msg(pmc, &mphy_core_reg_low) != 0) {
391  		err = -EBUSY;
392  		goto out_unlock;
393  	}
394  
395  	msleep(10);
396  	val_low = pmc_core_reg_read(pmc, SPT_PMC_MFPMC_OFFSET);
397  
398  	if (pmc_core_send_msg(pmc, &mphy_core_reg_high) != 0) {
399  		err = -EBUSY;
400  		goto out_unlock;
401  	}
402  
403  	msleep(10);
404  	val_high = pmc_core_reg_read(pmc, SPT_PMC_MFPMC_OFFSET);
405  
406  	for (index = 0; index < 8 && map[index].name; index++) {
407  		seq_printf(s, "%-32s\tState: %s\n",
408  			   map[index].name,
409  			   map[index].bit_mask & val_low ? "Not power gated" :
410  			   "Power gated");
411  	}
412  
413  	for (index = 8; map[index].name; index++) {
414  		seq_printf(s, "%-32s\tState: %s\n",
415  			   map[index].name,
416  			   map[index].bit_mask & val_high ? "Not power gated" :
417  			   "Power gated");
418  	}
419  
420  out_unlock:
421  	mutex_unlock(&pmcdev->lock);
422  	return err;
423  }
424  DEFINE_SHOW_ATTRIBUTE(pmc_core_mphy_pg);
425  
pmc_core_pll_show(struct seq_file * s,void * unused)426  static int pmc_core_pll_show(struct seq_file *s, void *unused)
427  {
428  	struct pmc_dev *pmcdev = s->private;
429  	struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
430  	const struct pmc_bit_map *map = pmc->map->pll_sts;
431  	u32 mphy_common_reg, val;
432  	int index, err = 0;
433  
434  	if (pmcdev->pmc_xram_read_bit) {
435  		seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
436  		return 0;
437  	}
438  
439  	mphy_common_reg  = (SPT_PMC_MPHY_COM_STS_0 << 16);
440  	mutex_lock(&pmcdev->lock);
441  
442  	if (pmc_core_send_msg(pmc, &mphy_common_reg) != 0) {
443  		err = -EBUSY;
444  		goto out_unlock;
445  	}
446  
447  	/* Observed PMC HW response latency for MTPMC-MFPMC is ~10 ms */
448  	msleep(10);
449  	val = pmc_core_reg_read(pmc, SPT_PMC_MFPMC_OFFSET);
450  
451  	for (index = 0; map[index].name ; index++) {
452  		seq_printf(s, "%-32s\tState: %s\n",
453  			   map[index].name,
454  			   map[index].bit_mask & val ? "Active" : "Idle");
455  	}
456  
457  out_unlock:
458  	mutex_unlock(&pmcdev->lock);
459  	return err;
460  }
461  DEFINE_SHOW_ATTRIBUTE(pmc_core_pll);
462  
pmc_core_send_ltr_ignore(struct pmc_dev * pmcdev,u32 value,int ignore)463  int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore)
464  {
465  	struct pmc *pmc;
466  	const struct pmc_reg_map *map;
467  	u32 reg;
468  	int pmc_index, ltr_index;
469  
470  	ltr_index = value;
471  	/* For platforms with multiple pmcs, ltr index value given by user
472  	 * is based on the contiguous indexes from ltr_show output.
473  	 * pmc index and ltr index needs to be calculated from it.
474  	 */
475  	for (pmc_index = 0; pmc_index < ARRAY_SIZE(pmcdev->pmcs) && ltr_index >= 0; pmc_index++) {
476  		pmc = pmcdev->pmcs[pmc_index];
477  
478  		if (!pmc)
479  			continue;
480  
481  		map = pmc->map;
482  		if (ltr_index <= map->ltr_ignore_max)
483  			break;
484  
485  		/* Along with IP names, ltr_show map includes CURRENT_PLATFORM
486  		 * and AGGREGATED_SYSTEM values per PMC. Take these two index
487  		 * values into account in ltr_index calculation. Also, to start
488  		 * ltr index from zero for next pmc, subtract it by 1.
489  		 */
490  		ltr_index = ltr_index - (map->ltr_ignore_max + 2) - 1;
491  	}
492  
493  	if (pmc_index >= ARRAY_SIZE(pmcdev->pmcs) || ltr_index < 0)
494  		return -EINVAL;
495  
496  	pr_debug("ltr_ignore for pmc%d: ltr_index:%d\n", pmc_index, ltr_index);
497  
498  	mutex_lock(&pmcdev->lock);
499  
500  	reg = pmc_core_reg_read(pmc, map->ltr_ignore_offset);
501  	if (ignore)
502  		reg |= BIT(ltr_index);
503  	else
504  		reg &= ~BIT(ltr_index);
505  	pmc_core_reg_write(pmc, map->ltr_ignore_offset, reg);
506  
507  	mutex_unlock(&pmcdev->lock);
508  
509  	return 0;
510  }
511  
pmc_core_ltr_ignore_write(struct file * file,const char __user * userbuf,size_t count,loff_t * ppos)512  static ssize_t pmc_core_ltr_ignore_write(struct file *file,
513  					 const char __user *userbuf,
514  					 size_t count, loff_t *ppos)
515  {
516  	struct seq_file *s = file->private_data;
517  	struct pmc_dev *pmcdev = s->private;
518  	u32 buf_size, value;
519  	int err;
520  
521  	buf_size = min_t(u32, count, 64);
522  
523  	err = kstrtou32_from_user(userbuf, buf_size, 10, &value);
524  	if (err)
525  		return err;
526  
527  	err = pmc_core_send_ltr_ignore(pmcdev, value, 1);
528  
529  	return err == 0 ? count : err;
530  }
531  
pmc_core_ltr_ignore_show(struct seq_file * s,void * unused)532  static int pmc_core_ltr_ignore_show(struct seq_file *s, void *unused)
533  {
534  	return 0;
535  }
536  
pmc_core_ltr_ignore_open(struct inode * inode,struct file * file)537  static int pmc_core_ltr_ignore_open(struct inode *inode, struct file *file)
538  {
539  	return single_open(file, pmc_core_ltr_ignore_show, inode->i_private);
540  }
541  
542  static const struct file_operations pmc_core_ltr_ignore_ops = {
543  	.open           = pmc_core_ltr_ignore_open,
544  	.read           = seq_read,
545  	.write          = pmc_core_ltr_ignore_write,
546  	.llseek         = seq_lseek,
547  	.release        = single_release,
548  };
549  
pmc_core_slps0_dbg_latch(struct pmc_dev * pmcdev,bool reset)550  static void pmc_core_slps0_dbg_latch(struct pmc_dev *pmcdev, bool reset)
551  {
552  	struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
553  	const struct pmc_reg_map *map = pmc->map;
554  	u32 fd;
555  
556  	mutex_lock(&pmcdev->lock);
557  
558  	if (!reset && !slps0_dbg_latch)
559  		goto out_unlock;
560  
561  	fd = pmc_core_reg_read(pmc, map->slps0_dbg_offset);
562  	if (reset)
563  		fd &= ~CNP_PMC_LATCH_SLPS0_EVENTS;
564  	else
565  		fd |= CNP_PMC_LATCH_SLPS0_EVENTS;
566  	pmc_core_reg_write(pmc, map->slps0_dbg_offset, fd);
567  
568  	slps0_dbg_latch = false;
569  
570  out_unlock:
571  	mutex_unlock(&pmcdev->lock);
572  }
573  
pmc_core_slps0_dbg_show(struct seq_file * s,void * unused)574  static int pmc_core_slps0_dbg_show(struct seq_file *s, void *unused)
575  {
576  	struct pmc_dev *pmcdev = s->private;
577  
578  	pmc_core_slps0_dbg_latch(pmcdev, false);
579  	pmc_core_slps0_display(pmcdev->pmcs[PMC_IDX_MAIN], NULL, s);
580  	pmc_core_slps0_dbg_latch(pmcdev, true);
581  
582  	return 0;
583  }
584  DEFINE_SHOW_ATTRIBUTE(pmc_core_slps0_dbg);
585  
convert_ltr_scale(u32 val)586  static u32 convert_ltr_scale(u32 val)
587  {
588  	/*
589  	 * As per PCIE specification supporting document
590  	 * ECN_LatencyTolnReporting_14Aug08.pdf the Latency
591  	 * Tolerance Reporting data payload is encoded in a
592  	 * 3 bit scale and 10 bit value fields. Values are
593  	 * multiplied by the indicated scale to yield an absolute time
594  	 * value, expressible in a range from 1 nanosecond to
595  	 * 2^25*(2^10-1) = 34,326,183,936 nanoseconds.
596  	 *
597  	 * scale encoding is as follows:
598  	 *
599  	 * ----------------------------------------------
600  	 * |scale factor	|	Multiplier (ns)	|
601  	 * ----------------------------------------------
602  	 * |	0		|	1		|
603  	 * |	1		|	32		|
604  	 * |	2		|	1024		|
605  	 * |	3		|	32768		|
606  	 * |	4		|	1048576		|
607  	 * |	5		|	33554432	|
608  	 * |	6		|	Invalid		|
609  	 * |	7		|	Invalid		|
610  	 * ----------------------------------------------
611  	 */
612  	if (val > 5) {
613  		pr_warn("Invalid LTR scale factor.\n");
614  		return 0;
615  	}
616  
617  	return 1U << (5 * val);
618  }
619  
pmc_core_ltr_show(struct seq_file * s,void * unused)620  static int pmc_core_ltr_show(struct seq_file *s, void *unused)
621  {
622  	struct pmc_dev *pmcdev = s->private;
623  	u64 decoded_snoop_ltr, decoded_non_snoop_ltr, val;
624  	u32 ltr_raw_data, scale;
625  	u16 snoop_ltr, nonsnoop_ltr;
626  	int i, index, ltr_index = 0;
627  
628  	for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
629  		struct pmc *pmc = pmcdev->pmcs[i];
630  		const struct pmc_bit_map *map;
631  
632  		if (!pmc)
633  			continue;
634  
635  		map = pmc->map->ltr_show_sts;
636  		for (index = 0; map[index].name; index++) {
637  			decoded_snoop_ltr = decoded_non_snoop_ltr = 0;
638  			ltr_raw_data = pmc_core_reg_read(pmc,
639  							 map[index].bit_mask);
640  			snoop_ltr = ltr_raw_data & ~MTPMC_MASK;
641  			nonsnoop_ltr = (ltr_raw_data >> 0x10) & ~MTPMC_MASK;
642  
643  			if (FIELD_GET(LTR_REQ_NONSNOOP, ltr_raw_data)) {
644  				scale = FIELD_GET(LTR_DECODED_SCALE, nonsnoop_ltr);
645  				val = FIELD_GET(LTR_DECODED_VAL, nonsnoop_ltr);
646  				decoded_non_snoop_ltr = val * convert_ltr_scale(scale);
647  			}
648  			if (FIELD_GET(LTR_REQ_SNOOP, ltr_raw_data)) {
649  				scale = FIELD_GET(LTR_DECODED_SCALE, snoop_ltr);
650  				val = FIELD_GET(LTR_DECODED_VAL, snoop_ltr);
651  				decoded_snoop_ltr = val * convert_ltr_scale(scale);
652  			}
653  
654  			seq_printf(s, "%d\tPMC%d:%-32s\tLTR: RAW: 0x%-16x\tNon-Snoop(ns): %-16llu\tSnoop(ns): %-16llu\n",
655  				   ltr_index, i, map[index].name, ltr_raw_data,
656  				   decoded_non_snoop_ltr,
657  				   decoded_snoop_ltr);
658  			ltr_index++;
659  		}
660  	}
661  	return 0;
662  }
663  DEFINE_SHOW_ATTRIBUTE(pmc_core_ltr);
664  
adjust_lpm_residency(struct pmc * pmc,u32 offset,const int lpm_adj_x2)665  static inline u64 adjust_lpm_residency(struct pmc *pmc, u32 offset,
666  				       const int lpm_adj_x2)
667  {
668  	u64 lpm_res = pmc_core_reg_read(pmc, offset);
669  
670  	return GET_X2_COUNTER((u64)lpm_adj_x2 * lpm_res);
671  }
672  
pmc_core_substate_res_show(struct seq_file * s,void * unused)673  static int pmc_core_substate_res_show(struct seq_file *s, void *unused)
674  {
675  	struct pmc_dev *pmcdev = s->private;
676  	struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
677  	const int lpm_adj_x2 = pmc->map->lpm_res_counter_step_x2;
678  	u32 offset = pmc->map->lpm_residency_offset;
679  	int i, mode;
680  
681  	seq_printf(s, "%-10s %-15s\n", "Substate", "Residency");
682  
683  	pmc_for_each_mode(i, mode, pmcdev) {
684  		seq_printf(s, "%-10s %-15llu\n", pmc_lpm_modes[mode],
685  			   adjust_lpm_residency(pmc, offset + (4 * mode), lpm_adj_x2));
686  	}
687  
688  	return 0;
689  }
690  DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_res);
691  
pmc_core_substate_sts_regs_show(struct seq_file * s,void * unused)692  static int pmc_core_substate_sts_regs_show(struct seq_file *s, void *unused)
693  {
694  	struct pmc_dev *pmcdev = s->private;
695  	int i;
696  
697  	for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
698  		struct pmc *pmc = pmcdev->pmcs[i];
699  		const struct pmc_bit_map **maps;
700  		u32 offset;
701  
702  		if (!pmc)
703  			continue;
704  		maps = pmc->map->lpm_sts;
705  		offset = pmc->map->lpm_status_offset;
706  		pmc_core_lpm_display(pmc, NULL, s, offset, i, "STATUS", maps);
707  	}
708  
709  	return 0;
710  }
711  DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_sts_regs);
712  
pmc_core_substate_l_sts_regs_show(struct seq_file * s,void * unused)713  static int pmc_core_substate_l_sts_regs_show(struct seq_file *s, void *unused)
714  {
715  	struct pmc_dev *pmcdev = s->private;
716  	int i;
717  
718  	for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
719  		struct pmc *pmc = pmcdev->pmcs[i];
720  		const struct pmc_bit_map **maps;
721  		u32 offset;
722  
723  		if (!pmc)
724  			continue;
725  		maps = pmc->map->lpm_sts;
726  		offset = pmc->map->lpm_live_status_offset;
727  		pmc_core_lpm_display(pmc, NULL, s, offset, i, "LIVE_STATUS", maps);
728  	}
729  
730  	return 0;
731  }
732  DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_l_sts_regs);
733  
pmc_core_substate_req_header_show(struct seq_file * s)734  static void pmc_core_substate_req_header_show(struct seq_file *s)
735  {
736  	struct pmc_dev *pmcdev = s->private;
737  	int i, mode;
738  
739  	seq_printf(s, "%30s |", "Element");
740  	pmc_for_each_mode(i, mode, pmcdev)
741  		seq_printf(s, " %9s |", pmc_lpm_modes[mode]);
742  
743  	seq_printf(s, " %9s |\n", "Status");
744  }
745  
pmc_core_substate_req_regs_show(struct seq_file * s,void * unused)746  static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused)
747  {
748  	struct pmc_dev *pmcdev = s->private;
749  	struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
750  	const struct pmc_bit_map **maps = pmc->map->lpm_sts;
751  	const struct pmc_bit_map *map;
752  	const int num_maps = pmc->map->lpm_num_maps;
753  	u32 sts_offset = pmc->map->lpm_status_offset;
754  	u32 *lpm_req_regs = pmc->lpm_req_regs;
755  	int mp;
756  
757  	/* Display the header */
758  	pmc_core_substate_req_header_show(s);
759  
760  	/* Loop over maps */
761  	for (mp = 0; mp < num_maps; mp++) {
762  		u32 req_mask = 0;
763  		u32 lpm_status;
764  		int mode, idx, i, len = 32;
765  
766  		/*
767  		 * Capture the requirements and create a mask so that we only
768  		 * show an element if it's required for at least one of the
769  		 * enabled low power modes
770  		 */
771  		pmc_for_each_mode(idx, mode, pmcdev)
772  			req_mask |= lpm_req_regs[mp + (mode * num_maps)];
773  
774  		/* Get the last latched status for this map */
775  		lpm_status = pmc_core_reg_read(pmc, sts_offset + (mp * 4));
776  
777  		/*  Loop over elements in this map */
778  		map = maps[mp];
779  		for (i = 0; map[i].name && i < len; i++) {
780  			u32 bit_mask = map[i].bit_mask;
781  
782  			if (!(bit_mask & req_mask))
783  				/*
784  				 * Not required for any enabled states
785  				 * so don't display
786  				 */
787  				continue;
788  
789  			/* Display the element name in the first column */
790  			seq_printf(s, "%30s |", map[i].name);
791  
792  			/* Loop over the enabled states and display if required */
793  			pmc_for_each_mode(idx, mode, pmcdev) {
794  				if (lpm_req_regs[mp + (mode * num_maps)] & bit_mask)
795  					seq_printf(s, " %9s |",
796  						   "Required");
797  				else
798  					seq_printf(s, " %9s |", " ");
799  			}
800  
801  			/* In Status column, show the last captured state of this agent */
802  			if (lpm_status & bit_mask)
803  				seq_printf(s, " %9s |", "Yes");
804  			else
805  				seq_printf(s, " %9s |", " ");
806  
807  			seq_puts(s, "\n");
808  		}
809  	}
810  
811  	return 0;
812  }
813  DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_req_regs);
814  
pmc_core_lpm_latch_mode_show(struct seq_file * s,void * unused)815  static int pmc_core_lpm_latch_mode_show(struct seq_file *s, void *unused)
816  {
817  	struct pmc_dev *pmcdev = s->private;
818  	struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
819  	bool c10;
820  	u32 reg;
821  	int idx, mode;
822  
823  	reg = pmc_core_reg_read(pmc, pmc->map->lpm_sts_latch_en_offset);
824  	if (reg & LPM_STS_LATCH_MODE) {
825  		seq_puts(s, "c10");
826  		c10 = false;
827  	} else {
828  		seq_puts(s, "[c10]");
829  		c10 = true;
830  	}
831  
832  	pmc_for_each_mode(idx, mode, pmcdev) {
833  		if ((BIT(mode) & reg) && !c10)
834  			seq_printf(s, " [%s]", pmc_lpm_modes[mode]);
835  		else
836  			seq_printf(s, " %s", pmc_lpm_modes[mode]);
837  	}
838  
839  	seq_puts(s, " clear\n");
840  
841  	return 0;
842  }
843  
pmc_core_lpm_latch_mode_write(struct file * file,const char __user * userbuf,size_t count,loff_t * ppos)844  static ssize_t pmc_core_lpm_latch_mode_write(struct file *file,
845  					     const char __user *userbuf,
846  					     size_t count, loff_t *ppos)
847  {
848  	struct seq_file *s = file->private_data;
849  	struct pmc_dev *pmcdev = s->private;
850  	struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
851  	bool clear = false, c10 = false;
852  	unsigned char buf[8];
853  	int idx, m, mode;
854  	u32 reg;
855  
856  	if (count > sizeof(buf) - 1)
857  		return -EINVAL;
858  	if (copy_from_user(buf, userbuf, count))
859  		return -EFAULT;
860  	buf[count] = '\0';
861  
862  	/*
863  	 * Allowed strings are:
864  	 *	Any enabled substate, e.g. 'S0i2.0'
865  	 *	'c10'
866  	 *	'clear'
867  	 */
868  	mode = sysfs_match_string(pmc_lpm_modes, buf);
869  
870  	/* Check string matches enabled mode */
871  	pmc_for_each_mode(idx, m, pmcdev)
872  		if (mode == m)
873  			break;
874  
875  	if (mode != m || mode < 0) {
876  		if (sysfs_streq(buf, "clear"))
877  			clear = true;
878  		else if (sysfs_streq(buf, "c10"))
879  			c10 = true;
880  		else
881  			return -EINVAL;
882  	}
883  
884  	if (clear) {
885  		mutex_lock(&pmcdev->lock);
886  
887  		reg = pmc_core_reg_read(pmc, pmc->map->etr3_offset);
888  		reg |= ETR3_CLEAR_LPM_EVENTS;
889  		pmc_core_reg_write(pmc, pmc->map->etr3_offset, reg);
890  
891  		mutex_unlock(&pmcdev->lock);
892  
893  		return count;
894  	}
895  
896  	if (c10) {
897  		mutex_lock(&pmcdev->lock);
898  
899  		reg = pmc_core_reg_read(pmc, pmc->map->lpm_sts_latch_en_offset);
900  		reg &= ~LPM_STS_LATCH_MODE;
901  		pmc_core_reg_write(pmc, pmc->map->lpm_sts_latch_en_offset, reg);
902  
903  		mutex_unlock(&pmcdev->lock);
904  
905  		return count;
906  	}
907  
908  	/*
909  	 * For LPM mode latching we set the latch enable bit and selected mode
910  	 * and clear everything else.
911  	 */
912  	reg = LPM_STS_LATCH_MODE | BIT(mode);
913  	mutex_lock(&pmcdev->lock);
914  	pmc_core_reg_write(pmc, pmc->map->lpm_sts_latch_en_offset, reg);
915  	mutex_unlock(&pmcdev->lock);
916  
917  	return count;
918  }
919  DEFINE_PMC_CORE_ATTR_WRITE(pmc_core_lpm_latch_mode);
920  
pmc_core_pkgc_show(struct seq_file * s,void * unused)921  static int pmc_core_pkgc_show(struct seq_file *s, void *unused)
922  {
923  	struct pmc *pmc = s->private;
924  	const struct pmc_bit_map *map = pmc->map->msr_sts;
925  	u64 pcstate_count;
926  	int index;
927  
928  	for (index = 0; map[index].name ; index++) {
929  		if (rdmsrl_safe(map[index].bit_mask, &pcstate_count))
930  			continue;
931  
932  		pcstate_count *= 1000;
933  		do_div(pcstate_count, tsc_khz);
934  		seq_printf(s, "%-8s : %llu\n", map[index].name,
935  			   pcstate_count);
936  	}
937  
938  	return 0;
939  }
940  DEFINE_SHOW_ATTRIBUTE(pmc_core_pkgc);
941  
pmc_core_pri_verify(u32 lpm_pri,u8 * mode_order)942  static bool pmc_core_pri_verify(u32 lpm_pri, u8 *mode_order)
943  {
944  	int i, j;
945  
946  	if (!lpm_pri)
947  		return false;
948  	/*
949  	 * Each byte contains the priority level for 2 modes (7:4 and 3:0).
950  	 * In a 32 bit register this allows for describing 8 modes. Store the
951  	 * levels and look for values out of range.
952  	 */
953  	for (i = 0; i < 8; i++) {
954  		int level = lpm_pri & GENMASK(3, 0);
955  
956  		if (level >= LPM_MAX_NUM_MODES)
957  			return false;
958  
959  		mode_order[i] = level;
960  		lpm_pri >>= 4;
961  	}
962  
963  	/* Check that we have unique values */
964  	for (i = 0; i < LPM_MAX_NUM_MODES - 1; i++)
965  		for (j = i + 1; j < LPM_MAX_NUM_MODES; j++)
966  			if (mode_order[i] == mode_order[j])
967  				return false;
968  
969  	return true;
970  }
971  
pmc_core_get_low_power_modes(struct platform_device * pdev)972  static void pmc_core_get_low_power_modes(struct platform_device *pdev)
973  {
974  	struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
975  	struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
976  	u8 pri_order[LPM_MAX_NUM_MODES] = LPM_DEFAULT_PRI;
977  	u8 mode_order[LPM_MAX_NUM_MODES];
978  	u32 lpm_pri;
979  	u32 lpm_en;
980  	int mode, i, p;
981  
982  	/* Use LPM Maps to indicate support for substates */
983  	if (!pmc->map->lpm_num_maps)
984  		return;
985  
986  	lpm_en = pmc_core_reg_read(pmc, pmc->map->lpm_en_offset);
987  	/* For MTL, BIT 31 is not an lpm mode but a enable bit.
988  	 * Lower byte is enough to cover the number of lpm modes for all
989  	 * platforms and hence mask the upper 3 bytes.
990  	 */
991  	pmcdev->num_lpm_modes = hweight32(lpm_en & 0xFF);
992  
993  	/* Read 32 bit LPM_PRI register */
994  	lpm_pri = pmc_core_reg_read(pmc, pmc->map->lpm_priority_offset);
995  
996  
997  	/*
998  	 * If lpm_pri value passes verification, then override the default
999  	 * modes here. Otherwise stick with the default.
1000  	 */
1001  	if (pmc_core_pri_verify(lpm_pri, mode_order))
1002  		/* Get list of modes in priority order */
1003  		for (mode = 0; mode < LPM_MAX_NUM_MODES; mode++)
1004  			pri_order[mode_order[mode]] = mode;
1005  	else
1006  		dev_warn(&pdev->dev, "Assuming a default substate order for this platform\n");
1007  
1008  	/*
1009  	 * Loop through all modes from lowest to highest priority,
1010  	 * and capture all enabled modes in order
1011  	 */
1012  	i = 0;
1013  	for (p = LPM_MAX_NUM_MODES - 1; p >= 0; p--) {
1014  		int mode = pri_order[p];
1015  
1016  		if (!(BIT(mode) & lpm_en))
1017  			continue;
1018  
1019  		pmcdev->lpm_en_modes[i++] = mode;
1020  	}
1021  }
1022  
get_primary_reg_base(struct pmc * pmc)1023  int get_primary_reg_base(struct pmc *pmc)
1024  {
1025  	u64 slp_s0_addr;
1026  
1027  	if (lpit_read_residency_count_address(&slp_s0_addr)) {
1028  		pmc->base_addr = PMC_BASE_ADDR_DEFAULT;
1029  
1030  		if (page_is_ram(PHYS_PFN(pmc->base_addr)))
1031  			return -ENODEV;
1032  	} else {
1033  		pmc->base_addr = slp_s0_addr - pmc->map->slp_s0_offset;
1034  	}
1035  
1036  	pmc->regbase = ioremap(pmc->base_addr, pmc->map->regmap_length);
1037  	if (!pmc->regbase)
1038  		return -ENOMEM;
1039  	return 0;
1040  }
1041  
pmc_core_dbgfs_unregister(struct pmc_dev * pmcdev)1042  static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
1043  {
1044  	debugfs_remove_recursive(pmcdev->dbgfs_dir);
1045  }
1046  
pmc_core_dbgfs_register(struct pmc_dev * pmcdev)1047  static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
1048  {
1049  	struct pmc *primary_pmc = pmcdev->pmcs[PMC_IDX_MAIN];
1050  	struct dentry *dir;
1051  
1052  	dir = debugfs_create_dir("pmc_core", NULL);
1053  	pmcdev->dbgfs_dir = dir;
1054  
1055  	debugfs_create_file("slp_s0_residency_usec", 0444, dir, primary_pmc,
1056  			    &pmc_core_dev_state);
1057  
1058  	if (primary_pmc->map->pfear_sts)
1059  		debugfs_create_file("pch_ip_power_gating_status", 0444, dir,
1060  				    pmcdev, &pmc_core_ppfear_fops);
1061  
1062  	debugfs_create_file("ltr_ignore", 0644, dir, pmcdev,
1063  			    &pmc_core_ltr_ignore_ops);
1064  
1065  	debugfs_create_file("ltr_show", 0444, dir, pmcdev, &pmc_core_ltr_fops);
1066  
1067  	debugfs_create_file("package_cstate_show", 0444, dir, primary_pmc,
1068  			    &pmc_core_pkgc_fops);
1069  
1070  	if (primary_pmc->map->pll_sts)
1071  		debugfs_create_file("pll_status", 0444, dir, pmcdev,
1072  				    &pmc_core_pll_fops);
1073  
1074  	if (primary_pmc->map->mphy_sts)
1075  		debugfs_create_file("mphy_core_lanes_power_gating_status",
1076  				    0444, dir, pmcdev,
1077  				    &pmc_core_mphy_pg_fops);
1078  
1079  	if (primary_pmc->map->slps0_dbg_maps) {
1080  		debugfs_create_file("slp_s0_debug_status", 0444,
1081  				    dir, pmcdev,
1082  				    &pmc_core_slps0_dbg_fops);
1083  
1084  		debugfs_create_bool("slp_s0_dbg_latch", 0644,
1085  				    dir, &slps0_dbg_latch);
1086  	}
1087  
1088  	if (primary_pmc->map->lpm_en_offset) {
1089  		debugfs_create_file("substate_residencies", 0444,
1090  				    pmcdev->dbgfs_dir, pmcdev,
1091  				    &pmc_core_substate_res_fops);
1092  	}
1093  
1094  	if (primary_pmc->map->lpm_status_offset) {
1095  		debugfs_create_file("substate_status_registers", 0444,
1096  				    pmcdev->dbgfs_dir, pmcdev,
1097  				    &pmc_core_substate_sts_regs_fops);
1098  		debugfs_create_file("substate_live_status_registers", 0444,
1099  				    pmcdev->dbgfs_dir, pmcdev,
1100  				    &pmc_core_substate_l_sts_regs_fops);
1101  		debugfs_create_file("lpm_latch_mode", 0644,
1102  				    pmcdev->dbgfs_dir, pmcdev,
1103  				    &pmc_core_lpm_latch_mode_fops);
1104  	}
1105  
1106  	if (primary_pmc->lpm_req_regs) {
1107  		debugfs_create_file("substate_requirements", 0444,
1108  				    pmcdev->dbgfs_dir, pmcdev,
1109  				    &pmc_core_substate_req_regs_fops);
1110  	}
1111  }
1112  
1113  static const struct x86_cpu_id intel_pmc_core_ids[] = {
1114  	X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L,		spt_core_init),
1115  	X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE,		spt_core_init),
1116  	X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L,		spt_core_init),
1117  	X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE,		spt_core_init),
1118  	X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L,	cnp_core_init),
1119  	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L,		icl_core_init),
1120  	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_NNPI,	icl_core_init),
1121  	X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE,		cnp_core_init),
1122  	X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L,		cnp_core_init),
1123  	X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L,		tgl_core_init),
1124  	X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE,		tgl_core_init),
1125  	X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT,	tgl_core_init),
1126  	X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L,	icl_core_init),
1127  	X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE,		tgl_core_init),
1128  	X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L,		tgl_core_init),
1129  	X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT,	tgl_core_init),
1130  	X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE,		adl_core_init),
1131  	X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P,        tgl_core_init),
1132  	X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE,		adl_core_init),
1133  	X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S,	adl_core_init),
1134  	X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L,	mtl_core_init),
1135  	{}
1136  };
1137  
1138  MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_ids);
1139  
1140  static const struct pci_device_id pmc_pci_ids[] = {
1141  	{ PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID) },
1142  	{ }
1143  };
1144  
1145  /*
1146   * This quirk can be used on those platforms where
1147   * the platform BIOS enforces 24Mhz crystal to shutdown
1148   * before PMC can assert SLP_S0#.
1149   */
1150  static bool xtal_ignore;
quirk_xtal_ignore(const struct dmi_system_id * id)1151  static int quirk_xtal_ignore(const struct dmi_system_id *id)
1152  {
1153  	xtal_ignore = true;
1154  	return 0;
1155  }
1156  
pmc_core_xtal_ignore(struct pmc * pmc)1157  static void pmc_core_xtal_ignore(struct pmc *pmc)
1158  {
1159  	u32 value;
1160  
1161  	value = pmc_core_reg_read(pmc, pmc->map->pm_vric1_offset);
1162  	/* 24MHz Crystal Shutdown Qualification Disable */
1163  	value |= SPT_PMC_VRIC1_XTALSDQDIS;
1164  	/* Low Voltage Mode Enable */
1165  	value &= ~SPT_PMC_VRIC1_SLPS0LVEN;
1166  	pmc_core_reg_write(pmc, pmc->map->pm_vric1_offset, value);
1167  }
1168  
1169  static const struct dmi_system_id pmc_core_dmi_table[]  = {
1170  	{
1171  	.callback = quirk_xtal_ignore,
1172  	.ident = "HP Elite x2 1013 G3",
1173  	.matches = {
1174  		DMI_MATCH(DMI_SYS_VENDOR, "HP"),
1175  		DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite x2 1013 G3"),
1176  		},
1177  	},
1178  	{}
1179  };
1180  
pmc_core_do_dmi_quirks(struct pmc * pmc)1181  static void pmc_core_do_dmi_quirks(struct pmc *pmc)
1182  {
1183  	dmi_check_system(pmc_core_dmi_table);
1184  
1185  	if (xtal_ignore)
1186  		pmc_core_xtal_ignore(pmc);
1187  }
1188  
pmc_core_clean_structure(struct platform_device * pdev)1189  static void pmc_core_clean_structure(struct platform_device *pdev)
1190  {
1191  	struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
1192  	int i;
1193  
1194  	for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
1195  		struct pmc *pmc = pmcdev->pmcs[i];
1196  
1197  		if (pmc)
1198  			iounmap(pmc->regbase);
1199  	}
1200  
1201  	if (pmcdev->ssram_pcidev) {
1202  		pci_dev_put(pmcdev->ssram_pcidev);
1203  		pci_disable_device(pmcdev->ssram_pcidev);
1204  	}
1205  	platform_set_drvdata(pdev, NULL);
1206  	mutex_destroy(&pmcdev->lock);
1207  }
1208  
pmc_core_probe(struct platform_device * pdev)1209  static int pmc_core_probe(struct platform_device *pdev)
1210  {
1211  	static bool device_initialized;
1212  	struct pmc_dev *pmcdev;
1213  	const struct x86_cpu_id *cpu_id;
1214  	int (*core_init)(struct pmc_dev *pmcdev);
1215  	struct pmc *primary_pmc;
1216  	int ret;
1217  
1218  	if (device_initialized)
1219  		return -ENODEV;
1220  
1221  	pmcdev = devm_kzalloc(&pdev->dev, sizeof(*pmcdev), GFP_KERNEL);
1222  	if (!pmcdev)
1223  		return -ENOMEM;
1224  
1225  	platform_set_drvdata(pdev, pmcdev);
1226  	pmcdev->pdev = pdev;
1227  
1228  	cpu_id = x86_match_cpu(intel_pmc_core_ids);
1229  	if (!cpu_id)
1230  		return -ENODEV;
1231  
1232  	core_init = (int (*)(struct pmc_dev *))cpu_id->driver_data;
1233  
1234  	/* Primary PMC */
1235  	primary_pmc = devm_kzalloc(&pdev->dev, sizeof(*primary_pmc), GFP_KERNEL);
1236  	if (!primary_pmc)
1237  		return -ENOMEM;
1238  	pmcdev->pmcs[PMC_IDX_MAIN] = primary_pmc;
1239  
1240  	/*
1241  	 * Coffee Lake has CPU ID of Kaby Lake and Cannon Lake PCH. So here
1242  	 * Sunrisepoint PCH regmap can't be used. Use Cannon Lake PCH regmap
1243  	 * in this case.
1244  	 */
1245  	if (core_init == spt_core_init && !pci_dev_present(pmc_pci_ids))
1246  		core_init = cnp_core_init;
1247  
1248  	mutex_init(&pmcdev->lock);
1249  	ret = core_init(pmcdev);
1250  	if (ret) {
1251  		pmc_core_clean_structure(pdev);
1252  		return ret;
1253  	}
1254  
1255  	pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit(primary_pmc);
1256  	pmc_core_get_low_power_modes(pdev);
1257  	pmc_core_do_dmi_quirks(primary_pmc);
1258  
1259  	pmc_core_dbgfs_register(pmcdev);
1260  	pm_report_max_hw_sleep(FIELD_MAX(SLP_S0_RES_COUNTER_MASK) *
1261  			       pmc_core_adjust_slp_s0_step(primary_pmc, 1));
1262  
1263  	device_initialized = true;
1264  	dev_info(&pdev->dev, " initialized\n");
1265  
1266  	return 0;
1267  }
1268  
pmc_core_remove(struct platform_device * pdev)1269  static void pmc_core_remove(struct platform_device *pdev)
1270  {
1271  	struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
1272  	pmc_core_dbgfs_unregister(pmcdev);
1273  	pmc_core_clean_structure(pdev);
1274  }
1275  
1276  static bool warn_on_s0ix_failures;
1277  module_param(warn_on_s0ix_failures, bool, 0644);
1278  MODULE_PARM_DESC(warn_on_s0ix_failures, "Check and warn for S0ix failures");
1279  
pmc_core_suspend(struct device * dev)1280  static __maybe_unused int pmc_core_suspend(struct device *dev)
1281  {
1282  	struct pmc_dev *pmcdev = dev_get_drvdata(dev);
1283  	struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
1284  
1285  	if (pmcdev->suspend)
1286  		pmcdev->suspend(pmcdev);
1287  
1288  	/* Check if the syspend will actually use S0ix */
1289  	if (pm_suspend_via_firmware())
1290  		return 0;
1291  
1292  	/* Save PC10 residency for checking later */
1293  	if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pmcdev->pc10_counter))
1294  		return -EIO;
1295  
1296  	/* Save S0ix residency for checking later */
1297  	if (pmc_core_dev_state_get(pmc, &pmcdev->s0ix_counter))
1298  		return -EIO;
1299  
1300  	return 0;
1301  }
1302  
pmc_core_is_pc10_failed(struct pmc_dev * pmcdev)1303  static inline bool pmc_core_is_pc10_failed(struct pmc_dev *pmcdev)
1304  {
1305  	u64 pc10_counter;
1306  
1307  	if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pc10_counter))
1308  		return false;
1309  
1310  	if (pc10_counter == pmcdev->pc10_counter)
1311  		return true;
1312  
1313  	return false;
1314  }
1315  
pmc_core_is_s0ix_failed(struct pmc_dev * pmcdev)1316  static inline bool pmc_core_is_s0ix_failed(struct pmc_dev *pmcdev)
1317  {
1318  	u64 s0ix_counter;
1319  
1320  	if (pmc_core_dev_state_get(pmcdev->pmcs[PMC_IDX_MAIN], &s0ix_counter))
1321  		return false;
1322  
1323  	pm_report_hw_sleep_time((u32)(s0ix_counter - pmcdev->s0ix_counter));
1324  
1325  	if (s0ix_counter == pmcdev->s0ix_counter)
1326  		return true;
1327  
1328  	return false;
1329  }
1330  
pmc_core_resume_common(struct pmc_dev * pmcdev)1331  int pmc_core_resume_common(struct pmc_dev *pmcdev)
1332  {
1333  	struct device *dev = &pmcdev->pdev->dev;
1334  	struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
1335  	const struct pmc_bit_map **maps = pmc->map->lpm_sts;
1336  	int offset = pmc->map->lpm_status_offset;
1337  	int i;
1338  
1339  	/* Check if the syspend used S0ix */
1340  	if (pm_suspend_via_firmware())
1341  		return 0;
1342  
1343  	if (!pmc_core_is_s0ix_failed(pmcdev))
1344  		return 0;
1345  
1346  	if (!warn_on_s0ix_failures)
1347  		return 0;
1348  
1349  	if (pmc_core_is_pc10_failed(pmcdev)) {
1350  		/* S0ix failed because of PC10 entry failure */
1351  		dev_info(dev, "CPU did not enter PC10!!! (PC10 cnt=0x%llx)\n",
1352  			 pmcdev->pc10_counter);
1353  		return 0;
1354  	}
1355  
1356  	/* The real interesting case - S0ix failed - lets ask PMC why. */
1357  	dev_warn(dev, "CPU did not enter SLP_S0!!! (S0ix cnt=%llu)\n",
1358  		 pmcdev->s0ix_counter);
1359  
1360  	if (pmc->map->slps0_dbg_maps)
1361  		pmc_core_slps0_display(pmc, dev, NULL);
1362  
1363  	for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
1364  		struct pmc *pmc = pmcdev->pmcs[i];
1365  
1366  		if (!pmc)
1367  			continue;
1368  		if (pmc->map->lpm_sts)
1369  			pmc_core_lpm_display(pmc, dev, NULL, offset, i, "STATUS", maps);
1370  	}
1371  
1372  	return 0;
1373  }
1374  
pmc_core_resume(struct device * dev)1375  static __maybe_unused int pmc_core_resume(struct device *dev)
1376  {
1377  	struct pmc_dev *pmcdev = dev_get_drvdata(dev);
1378  
1379  	if (pmcdev->resume)
1380  		return pmcdev->resume(pmcdev);
1381  
1382  	return pmc_core_resume_common(pmcdev);
1383  }
1384  
1385  static const struct dev_pm_ops pmc_core_pm_ops = {
1386  	SET_LATE_SYSTEM_SLEEP_PM_OPS(pmc_core_suspend, pmc_core_resume)
1387  };
1388  
1389  static const struct acpi_device_id pmc_core_acpi_ids[] = {
1390  	{"INT33A1", 0}, /* _HID for Intel Power Engine, _CID PNP0D80*/
1391  	{ }
1392  };
1393  MODULE_DEVICE_TABLE(acpi, pmc_core_acpi_ids);
1394  
1395  static struct platform_driver pmc_core_driver = {
1396  	.driver = {
1397  		.name = "intel_pmc_core",
1398  		.acpi_match_table = ACPI_PTR(pmc_core_acpi_ids),
1399  		.pm = &pmc_core_pm_ops,
1400  		.dev_groups = pmc_dev_groups,
1401  	},
1402  	.probe = pmc_core_probe,
1403  	.remove_new = pmc_core_remove,
1404  };
1405  
1406  module_platform_driver(pmc_core_driver);
1407  
1408  MODULE_LICENSE("GPL v2");
1409  MODULE_DESCRIPTION("Intel PMC Core Driver");
1410