1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * intel-tpmi : Driver to enumerate TPMI features and create devices
4 *
5 * Copyright (c) 2023, Intel Corporation.
6 * All Rights Reserved.
7 *
8 * The TPMI (Topology Aware Register and PM Capsule Interface) provides a
9 * flexible, extendable and PCIe enumerable MMIO interface for PM features.
10 *
11 * For example Intel RAPL (Running Average Power Limit) provides a MMIO
12 * interface using TPMI. This has advantage over traditional MSR
13 * (Model Specific Register) interface, where a thread needs to be scheduled
14 * on the target CPU to read or write. Also the RAPL features vary between
15 * CPU models, and hence lot of model specific code. Here TPMI provides an
16 * architectural interface by providing hierarchical tables and fields,
17 * which will not need any model specific implementation.
18 *
19 * The TPMI interface uses a PCI VSEC structure to expose the location of
20 * MMIO region.
21 *
22 * This VSEC structure is present in the PCI configuration space of the
23 * Intel Out-of-Band (OOB) device, which is handled by the Intel VSEC
24 * driver. The Intel VSEC driver parses VSEC structures present in the PCI
25 * configuration space of the given device and creates an auxiliary device
26 * object for each of them. In particular, it creates an auxiliary device
27 * object representing TPMI that can be bound by an auxiliary driver.
28 *
29 * This TPMI driver will bind to the TPMI auxiliary device object created
30 * by the Intel VSEC driver.
31 *
32 * The TPMI specification defines a PFS (PM Feature Structure) table.
33 * This table is present in the TPMI MMIO region. The starting address
34 * of PFS is derived from the tBIR (Bar Indicator Register) and "Address"
35 * field from the VSEC header.
36 *
37 * Each TPMI PM feature has one entry in the PFS with a unique TPMI
38 * ID and its access details. The TPMI driver creates device nodes
39 * for the supported PM features.
40 *
41 * The names of the devices created by the TPMI driver start with the
42 * "intel_vsec.tpmi-" prefix which is followed by a specific name of the
43 * given PM feature (for example, "intel_vsec.tpmi-rapl.0").
44 *
45 * The device nodes are create by using interface "intel_vsec_add_aux()"
46 * provided by the Intel VSEC driver.
47 */
48
49 #include <linux/auxiliary_bus.h>
50 #include <linux/bitfield.h>
51 #include <linux/debugfs.h>
52 #include <linux/delay.h>
53 #include <linux/intel_tpmi.h>
54 #include <linux/io.h>
55 #include <linux/iopoll.h>
56 #include <linux/module.h>
57 #include <linux/pci.h>
58 #include <linux/security.h>
59 #include <linux/sizes.h>
60 #include <linux/string_helpers.h>
61
62 #include "vsec.h"
63
64 /**
65 * struct intel_tpmi_pfs_entry - TPMI PM Feature Structure (PFS) entry
66 * @tpmi_id: TPMI feature identifier (what the feature is and its data format).
67 * @num_entries: Number of feature interface instances present in the PFS.
68 * This represents the maximum number of Power domains in the SoC.
69 * @entry_size: Interface instance entry size in 32-bit words.
70 * @cap_offset: Offset from the PM_Features base address to the base of the PM VSEC
71 * register bank in KB.
72 * @attribute: Feature attribute: 0=BIOS. 1=OS. 2-3=Reserved.
73 * @reserved: Bits for use in the future.
74 *
75 * Represents one TPMI feature entry data in the PFS retrieved as is
76 * from the hardware.
77 */
78 struct intel_tpmi_pfs_entry {
79 u64 tpmi_id:8;
80 u64 num_entries:8;
81 u64 entry_size:16;
82 u64 cap_offset:16;
83 u64 attribute:2;
84 u64 reserved:14;
85 } __packed;
86
87 /**
88 * struct intel_tpmi_pm_feature - TPMI PM Feature information for a TPMI ID
89 * @pfs_header: PFS header retireved from the hardware.
90 * @vsec_offset: Starting MMIO address for this feature in bytes. Essentially
91 * this offset = "Address" from VSEC header + PFS Capability
92 * offset for this feature entry.
93 * @vsec_dev: Pointer to intel_vsec_device structure for this TPMI device
94 *
95 * Represents TPMI instance information for one TPMI ID.
96 */
97 struct intel_tpmi_pm_feature {
98 struct intel_tpmi_pfs_entry pfs_header;
99 u64 vsec_offset;
100 struct intel_vsec_device *vsec_dev;
101 };
102
103 /**
104 * struct intel_tpmi_info - TPMI information for all IDs in an instance
105 * @tpmi_features: Pointer to a list of TPMI feature instances
106 * @vsec_dev: Pointer to intel_vsec_device structure for this TPMI device
107 * @feature_count: Number of TPMI of TPMI instances pointed by tpmi_features
108 * @pfs_start: Start of PFS offset for the TPMI instances in this device
109 * @plat_info: Stores platform info which can be used by the client drivers
110 * @tpmi_control_mem: Memory mapped IO for getting control information
111 * @dbgfs_dir: debugfs entry pointer
112 *
113 * Stores the information for all TPMI devices enumerated from a single PCI device.
114 */
115 struct intel_tpmi_info {
116 struct intel_tpmi_pm_feature *tpmi_features;
117 struct intel_vsec_device *vsec_dev;
118 int feature_count;
119 u64 pfs_start;
120 struct intel_tpmi_plat_info plat_info;
121 void __iomem *tpmi_control_mem;
122 struct dentry *dbgfs_dir;
123 };
124
125 /**
126 * struct tpmi_info_header - CPU package ID to PCI device mapping information
127 * @fn: PCI function number
128 * @dev: PCI device number
129 * @bus: PCI bus number
130 * @pkg: CPU Package id
131 * @reserved: Reserved for future use
132 * @lock: When set to 1 the register is locked and becomes read-only
133 * until next reset. Not for use by the OS driver.
134 *
135 * The structure to read hardware provided mapping information.
136 */
137 struct tpmi_info_header {
138 u64 fn:3;
139 u64 dev:5;
140 u64 bus:8;
141 u64 pkg:8;
142 u64 reserved:39;
143 u64 lock:1;
144 } __packed;
145
146 /*
147 * List of supported TMPI IDs.
148 * Some TMPI IDs are not used by Linux, so the numbers are not consecutive.
149 */
150 enum intel_tpmi_id {
151 TPMI_ID_RAPL = 0, /* Running Average Power Limit */
152 TPMI_ID_PEM = 1, /* Power and Perf excursion Monitor */
153 TPMI_ID_UNCORE = 2, /* Uncore Frequency Scaling */
154 TPMI_ID_SST = 5, /* Speed Select Technology */
155 TPMI_CONTROL_ID = 0x80, /* Special ID for getting feature status */
156 TPMI_INFO_ID = 0x81, /* Special ID for PCI BDF and Package ID information */
157 };
158
159 /*
160 * The size from hardware is in u32 units. This size is from a trusted hardware,
161 * but better to verify for pre silicon platforms. Set size to 0, when invalid.
162 */
163 #define TPMI_GET_SINGLE_ENTRY_SIZE(pfs) \
164 ({ \
165 pfs->pfs_header.entry_size > SZ_1K ? 0 : pfs->pfs_header.entry_size << 2; \
166 })
167
168 /* Used during auxbus device creation */
169 static DEFINE_IDA(intel_vsec_tpmi_ida);
170
tpmi_get_platform_data(struct auxiliary_device * auxdev)171 struct intel_tpmi_plat_info *tpmi_get_platform_data(struct auxiliary_device *auxdev)
172 {
173 struct intel_vsec_device *vsec_dev = auxdev_to_ivdev(auxdev);
174
175 return vsec_dev->priv_data;
176 }
177 EXPORT_SYMBOL_NS_GPL(tpmi_get_platform_data, INTEL_TPMI);
178
tpmi_get_resource_count(struct auxiliary_device * auxdev)179 int tpmi_get_resource_count(struct auxiliary_device *auxdev)
180 {
181 struct intel_vsec_device *vsec_dev = auxdev_to_ivdev(auxdev);
182
183 if (vsec_dev)
184 return vsec_dev->num_resources;
185
186 return 0;
187 }
188 EXPORT_SYMBOL_NS_GPL(tpmi_get_resource_count, INTEL_TPMI);
189
tpmi_get_resource_at_index(struct auxiliary_device * auxdev,int index)190 struct resource *tpmi_get_resource_at_index(struct auxiliary_device *auxdev, int index)
191 {
192 struct intel_vsec_device *vsec_dev = auxdev_to_ivdev(auxdev);
193
194 if (vsec_dev && index < vsec_dev->num_resources)
195 return &vsec_dev->resource[index];
196
197 return NULL;
198 }
199 EXPORT_SYMBOL_NS_GPL(tpmi_get_resource_at_index, INTEL_TPMI);
200
201 /* TPMI Control Interface */
202
203 #define TPMI_CONTROL_STATUS_OFFSET 0x00
204 #define TPMI_COMMAND_OFFSET 0x08
205
206 /*
207 * Spec is calling for max 1 seconds to get ownership at the worst
208 * case. Read at 10 ms timeouts and repeat up to 1 second.
209 */
210 #define TPMI_CONTROL_TIMEOUT_US (10 * USEC_PER_MSEC)
211 #define TPMI_CONTROL_TIMEOUT_MAX_US (1 * USEC_PER_SEC)
212
213 #define TPMI_RB_TIMEOUT_US (10 * USEC_PER_MSEC)
214 #define TPMI_RB_TIMEOUT_MAX_US USEC_PER_SEC
215
216 /* TPMI Control status register defines */
217
218 #define TPMI_CONTROL_STATUS_RB BIT_ULL(0)
219
220 #define TPMI_CONTROL_STATUS_OWNER GENMASK_ULL(5, 4)
221 #define TPMI_OWNER_NONE 0
222 #define TPMI_OWNER_IN_BAND 1
223
224 #define TPMI_CONTROL_STATUS_CPL BIT_ULL(6)
225 #define TPMI_CONTROL_STATUS_RESULT GENMASK_ULL(15, 8)
226 #define TPMI_CONTROL_STATUS_LEN GENMASK_ULL(31, 16)
227
228 #define TPMI_CMD_PKT_LEN 2
229 #define TPMI_CMD_STATUS_SUCCESS 0x40
230
231 /* TPMI command data registers */
232 #define TMPI_CONTROL_DATA_CMD GENMASK_ULL(7, 0)
233 #define TMPI_CONTROL_DATA_VAL GENMASK_ULL(63, 32)
234 #define TPMI_CONTROL_DATA_VAL_FEATURE GENMASK_ULL(48, 40)
235
236 /* Command to send via control interface */
237 #define TPMI_CONTROL_GET_STATE_CMD 0x10
238
239 #define TPMI_CONTROL_CMD_MASK GENMASK_ULL(48, 40)
240
241 #define TPMI_CMD_LEN_MASK GENMASK_ULL(18, 16)
242
243 #define TPMI_STATE_DISABLED BIT_ULL(0)
244 #define TPMI_STATE_LOCKED BIT_ULL(31)
245
246 /* Mutex to complete get feature status without interruption */
247 static DEFINE_MUTEX(tpmi_dev_lock);
248
tpmi_wait_for_owner(struct intel_tpmi_info * tpmi_info,u8 owner)249 static int tpmi_wait_for_owner(struct intel_tpmi_info *tpmi_info, u8 owner)
250 {
251 u64 control;
252
253 return readq_poll_timeout(tpmi_info->tpmi_control_mem + TPMI_CONTROL_STATUS_OFFSET,
254 control, owner == FIELD_GET(TPMI_CONTROL_STATUS_OWNER, control),
255 TPMI_CONTROL_TIMEOUT_US, TPMI_CONTROL_TIMEOUT_MAX_US);
256 }
257
tpmi_read_feature_status(struct intel_tpmi_info * tpmi_info,int feature_id,int * locked,int * disabled)258 static int tpmi_read_feature_status(struct intel_tpmi_info *tpmi_info, int feature_id,
259 int *locked, int *disabled)
260 {
261 u64 control, data;
262 int ret;
263
264 if (!tpmi_info->tpmi_control_mem)
265 return -EFAULT;
266
267 mutex_lock(&tpmi_dev_lock);
268
269 /* Wait for owner bit set to 0 (none) */
270 ret = tpmi_wait_for_owner(tpmi_info, TPMI_OWNER_NONE);
271 if (ret)
272 goto err_unlock;
273
274 /* set command id to 0x10 for TPMI_GET_STATE */
275 data = FIELD_PREP(TMPI_CONTROL_DATA_CMD, TPMI_CONTROL_GET_STATE_CMD);
276
277 /* 32 bits for DATA offset and +8 for feature_id field */
278 data |= FIELD_PREP(TPMI_CONTROL_DATA_VAL_FEATURE, feature_id);
279
280 /* Write at command offset for qword access */
281 writeq(data, tpmi_info->tpmi_control_mem + TPMI_COMMAND_OFFSET);
282
283 /* Wait for owner bit set to in-band */
284 ret = tpmi_wait_for_owner(tpmi_info, TPMI_OWNER_IN_BAND);
285 if (ret)
286 goto err_unlock;
287
288 /* Set Run Busy and packet length of 2 dwords */
289 control = TPMI_CONTROL_STATUS_RB;
290 control |= FIELD_PREP(TPMI_CONTROL_STATUS_LEN, TPMI_CMD_PKT_LEN);
291
292 /* Write at status offset for qword access */
293 writeq(control, tpmi_info->tpmi_control_mem + TPMI_CONTROL_STATUS_OFFSET);
294
295 /* Wait for Run Busy clear */
296 ret = readq_poll_timeout(tpmi_info->tpmi_control_mem + TPMI_CONTROL_STATUS_OFFSET,
297 control, !(control & TPMI_CONTROL_STATUS_RB),
298 TPMI_RB_TIMEOUT_US, TPMI_RB_TIMEOUT_MAX_US);
299 if (ret)
300 goto done_proc;
301
302 control = FIELD_GET(TPMI_CONTROL_STATUS_RESULT, control);
303 if (control != TPMI_CMD_STATUS_SUCCESS) {
304 ret = -EBUSY;
305 goto done_proc;
306 }
307
308 /* Response is ready */
309 data = readq(tpmi_info->tpmi_control_mem + TPMI_COMMAND_OFFSET);
310 data = FIELD_GET(TMPI_CONTROL_DATA_VAL, data);
311
312 *disabled = 0;
313 *locked = 0;
314
315 if (!(data & TPMI_STATE_DISABLED))
316 *disabled = 1;
317
318 if (data & TPMI_STATE_LOCKED)
319 *locked = 1;
320
321 ret = 0;
322
323 done_proc:
324 /* Set CPL "completion" bit */
325 writeq(TPMI_CONTROL_STATUS_CPL, tpmi_info->tpmi_control_mem + TPMI_CONTROL_STATUS_OFFSET);
326
327 err_unlock:
328 mutex_unlock(&tpmi_dev_lock);
329
330 return ret;
331 }
332
tpmi_get_feature_status(struct auxiliary_device * auxdev,int feature_id,int * locked,int * disabled)333 int tpmi_get_feature_status(struct auxiliary_device *auxdev, int feature_id,
334 int *locked, int *disabled)
335 {
336 struct intel_vsec_device *intel_vsec_dev = dev_to_ivdev(auxdev->dev.parent);
337 struct intel_tpmi_info *tpmi_info = auxiliary_get_drvdata(&intel_vsec_dev->auxdev);
338
339 return tpmi_read_feature_status(tpmi_info, feature_id, locked, disabled);
340 }
341 EXPORT_SYMBOL_NS_GPL(tpmi_get_feature_status, INTEL_TPMI);
342
tpmi_pfs_dbg_show(struct seq_file * s,void * unused)343 static int tpmi_pfs_dbg_show(struct seq_file *s, void *unused)
344 {
345 struct intel_tpmi_info *tpmi_info = s->private;
346 struct intel_tpmi_pm_feature *pfs;
347 int locked, disabled, ret, i;
348
349 seq_printf(s, "tpmi PFS start offset 0x:%llx\n", tpmi_info->pfs_start);
350 seq_puts(s, "tpmi_id\t\tentries\t\tsize\t\tcap_offset\tattribute\tvsec_offset\tlocked\tdisabled\n");
351 for (i = 0; i < tpmi_info->feature_count; ++i) {
352 pfs = &tpmi_info->tpmi_features[i];
353 ret = tpmi_read_feature_status(tpmi_info, pfs->pfs_header.tpmi_id, &locked,
354 &disabled);
355 if (ret) {
356 locked = 'U';
357 disabled = 'U';
358 } else {
359 disabled = disabled ? 'Y' : 'N';
360 locked = locked ? 'Y' : 'N';
361 }
362 seq_printf(s, "0x%02x\t\t0x%02x\t\t0x%04x\t\t0x%04x\t\t0x%02x\t\t0x%016llx\t%c\t%c\n",
363 pfs->pfs_header.tpmi_id, pfs->pfs_header.num_entries,
364 pfs->pfs_header.entry_size, pfs->pfs_header.cap_offset,
365 pfs->pfs_header.attribute, pfs->vsec_offset, locked, disabled);
366 }
367
368 return 0;
369 }
370 DEFINE_SHOW_ATTRIBUTE(tpmi_pfs_dbg);
371
372 #define MEM_DUMP_COLUMN_COUNT 8
373
tpmi_mem_dump_show(struct seq_file * s,void * unused)374 static int tpmi_mem_dump_show(struct seq_file *s, void *unused)
375 {
376 size_t row_size = MEM_DUMP_COLUMN_COUNT * sizeof(u32);
377 struct intel_tpmi_pm_feature *pfs = s->private;
378 int count, ret = 0;
379 void __iomem *mem;
380 u32 size;
381 u64 off;
382 u8 *buffer;
383
384 size = TPMI_GET_SINGLE_ENTRY_SIZE(pfs);
385 if (!size)
386 return -EIO;
387
388 buffer = kmalloc(size, GFP_KERNEL);
389 if (!buffer)
390 return -ENOMEM;
391
392 off = pfs->vsec_offset;
393
394 mutex_lock(&tpmi_dev_lock);
395
396 for (count = 0; count < pfs->pfs_header.num_entries; ++count) {
397 seq_printf(s, "TPMI Instance:%d offset:0x%llx\n", count, off);
398
399 mem = ioremap(off, size);
400 if (!mem) {
401 ret = -ENOMEM;
402 break;
403 }
404
405 memcpy_fromio(buffer, mem, size);
406
407 seq_hex_dump(s, " ", DUMP_PREFIX_OFFSET, row_size, sizeof(u32), buffer, size,
408 false);
409
410 iounmap(mem);
411
412 off += size;
413 }
414
415 mutex_unlock(&tpmi_dev_lock);
416
417 kfree(buffer);
418
419 return ret;
420 }
421 DEFINE_SHOW_ATTRIBUTE(tpmi_mem_dump);
422
mem_write(struct file * file,const char __user * userbuf,size_t len,loff_t * ppos)423 static ssize_t mem_write(struct file *file, const char __user *userbuf, size_t len, loff_t *ppos)
424 {
425 struct seq_file *m = file->private_data;
426 struct intel_tpmi_pm_feature *pfs = m->private;
427 u32 addr, value, punit, size;
428 u32 num_elems, *array;
429 void __iomem *mem;
430 int ret;
431
432 size = TPMI_GET_SINGLE_ENTRY_SIZE(pfs);
433 if (!size)
434 return -EIO;
435
436 ret = parse_int_array_user(userbuf, len, (int **)&array);
437 if (ret < 0)
438 return ret;
439
440 num_elems = *array;
441 if (num_elems != 3) {
442 ret = -EINVAL;
443 goto exit_write;
444 }
445
446 punit = array[1];
447 addr = array[2];
448 value = array[3];
449
450 if (punit >= pfs->pfs_header.num_entries) {
451 ret = -EINVAL;
452 goto exit_write;
453 }
454
455 if (addr >= size) {
456 ret = -EINVAL;
457 goto exit_write;
458 }
459
460 mutex_lock(&tpmi_dev_lock);
461
462 mem = ioremap(pfs->vsec_offset + punit * size, size);
463 if (!mem) {
464 ret = -ENOMEM;
465 goto unlock_mem_write;
466 }
467
468 writel(value, mem + addr);
469
470 iounmap(mem);
471
472 ret = len;
473
474 unlock_mem_write:
475 mutex_unlock(&tpmi_dev_lock);
476
477 exit_write:
478 kfree(array);
479
480 return ret;
481 }
482
mem_write_show(struct seq_file * s,void * unused)483 static int mem_write_show(struct seq_file *s, void *unused)
484 {
485 return 0;
486 }
487
mem_write_open(struct inode * inode,struct file * file)488 static int mem_write_open(struct inode *inode, struct file *file)
489 {
490 return single_open(file, mem_write_show, inode->i_private);
491 }
492
493 static const struct file_operations mem_write_ops = {
494 .open = mem_write_open,
495 .read = seq_read,
496 .write = mem_write,
497 .llseek = seq_lseek,
498 .release = single_release,
499 };
500
501 #define tpmi_to_dev(info) (&info->vsec_dev->pcidev->dev)
502
tpmi_dbgfs_register(struct intel_tpmi_info * tpmi_info)503 static void tpmi_dbgfs_register(struct intel_tpmi_info *tpmi_info)
504 {
505 char name[64];
506 int i;
507
508 snprintf(name, sizeof(name), "tpmi-%s", dev_name(tpmi_to_dev(tpmi_info)));
509 tpmi_info->dbgfs_dir = debugfs_create_dir(name, NULL);
510
511 debugfs_create_file("pfs_dump", 0444, tpmi_info->dbgfs_dir, tpmi_info, &tpmi_pfs_dbg_fops);
512
513 for (i = 0; i < tpmi_info->feature_count; ++i) {
514 struct intel_tpmi_pm_feature *pfs;
515 struct dentry *dir;
516
517 pfs = &tpmi_info->tpmi_features[i];
518 snprintf(name, sizeof(name), "tpmi-id-%02x", pfs->pfs_header.tpmi_id);
519 dir = debugfs_create_dir(name, tpmi_info->dbgfs_dir);
520
521 debugfs_create_file("mem_dump", 0444, dir, pfs, &tpmi_mem_dump_fops);
522 debugfs_create_file("mem_write", 0644, dir, pfs, &mem_write_ops);
523 }
524 }
525
tpmi_set_control_base(struct auxiliary_device * auxdev,struct intel_tpmi_info * tpmi_info,struct intel_tpmi_pm_feature * pfs)526 static void tpmi_set_control_base(struct auxiliary_device *auxdev,
527 struct intel_tpmi_info *tpmi_info,
528 struct intel_tpmi_pm_feature *pfs)
529 {
530 void __iomem *mem;
531 u32 size;
532
533 size = TPMI_GET_SINGLE_ENTRY_SIZE(pfs);
534 if (!size)
535 return;
536
537 mem = devm_ioremap(&auxdev->dev, pfs->vsec_offset, size);
538 if (!mem)
539 return;
540
541 /* mem is pointing to TPMI CONTROL base */
542 tpmi_info->tpmi_control_mem = mem;
543 }
544
intel_tpmi_name(enum intel_tpmi_id id)545 static const char *intel_tpmi_name(enum intel_tpmi_id id)
546 {
547 switch (id) {
548 case TPMI_ID_RAPL:
549 return "rapl";
550 case TPMI_ID_PEM:
551 return "pem";
552 case TPMI_ID_UNCORE:
553 return "uncore";
554 case TPMI_ID_SST:
555 return "sst";
556 default:
557 return NULL;
558 }
559 }
560
561 /* String Length for tpmi-"feature_name(upto 8 bytes)" */
562 #define TPMI_FEATURE_NAME_LEN 14
563
tpmi_create_device(struct intel_tpmi_info * tpmi_info,struct intel_tpmi_pm_feature * pfs,u64 pfs_start)564 static int tpmi_create_device(struct intel_tpmi_info *tpmi_info,
565 struct intel_tpmi_pm_feature *pfs,
566 u64 pfs_start)
567 {
568 struct intel_vsec_device *vsec_dev = tpmi_info->vsec_dev;
569 char feature_id_name[TPMI_FEATURE_NAME_LEN];
570 struct intel_vsec_device *feature_vsec_dev;
571 struct resource *res, *tmp;
572 const char *name;
573 int i;
574
575 name = intel_tpmi_name(pfs->pfs_header.tpmi_id);
576 if (!name)
577 return -EOPNOTSUPP;
578
579 res = kcalloc(pfs->pfs_header.num_entries, sizeof(*res), GFP_KERNEL);
580 if (!res)
581 return -ENOMEM;
582
583 feature_vsec_dev = kzalloc(sizeof(*feature_vsec_dev), GFP_KERNEL);
584 if (!feature_vsec_dev) {
585 kfree(res);
586 return -ENOMEM;
587 }
588
589 snprintf(feature_id_name, sizeof(feature_id_name), "tpmi-%s", name);
590
591 for (i = 0, tmp = res; i < pfs->pfs_header.num_entries; i++, tmp++) {
592 u64 entry_size_bytes = pfs->pfs_header.entry_size * sizeof(u32);
593
594 tmp->start = pfs->vsec_offset + entry_size_bytes * i;
595 tmp->end = tmp->start + entry_size_bytes - 1;
596 tmp->flags = IORESOURCE_MEM;
597 }
598
599 feature_vsec_dev->pcidev = vsec_dev->pcidev;
600 feature_vsec_dev->resource = res;
601 feature_vsec_dev->num_resources = pfs->pfs_header.num_entries;
602 feature_vsec_dev->priv_data = &tpmi_info->plat_info;
603 feature_vsec_dev->priv_data_size = sizeof(tpmi_info->plat_info);
604 feature_vsec_dev->ida = &intel_vsec_tpmi_ida;
605
606 /*
607 * intel_vsec_add_aux() is resource managed, no explicit
608 * delete is required on error or on module unload.
609 * feature_vsec_dev and res memory are also freed as part of
610 * device deletion.
611 */
612 return intel_vsec_add_aux(vsec_dev->pcidev, &vsec_dev->auxdev.dev,
613 feature_vsec_dev, feature_id_name);
614 }
615
tpmi_create_devices(struct intel_tpmi_info * tpmi_info)616 static int tpmi_create_devices(struct intel_tpmi_info *tpmi_info)
617 {
618 struct intel_vsec_device *vsec_dev = tpmi_info->vsec_dev;
619 int ret, i;
620
621 for (i = 0; i < vsec_dev->num_resources; i++) {
622 ret = tpmi_create_device(tpmi_info, &tpmi_info->tpmi_features[i],
623 tpmi_info->pfs_start);
624 /*
625 * Fail, if the supported features fails to create device,
626 * otherwise, continue. Even if one device failed to create,
627 * fail the loading of driver. Since intel_vsec_add_aux()
628 * is resource managed, no clean up is required for the
629 * successfully created devices.
630 */
631 if (ret && ret != -EOPNOTSUPP)
632 return ret;
633 }
634
635 return 0;
636 }
637
638 #define TPMI_INFO_BUS_INFO_OFFSET 0x08
639
tpmi_process_info(struct intel_tpmi_info * tpmi_info,struct intel_tpmi_pm_feature * pfs)640 static int tpmi_process_info(struct intel_tpmi_info *tpmi_info,
641 struct intel_tpmi_pm_feature *pfs)
642 {
643 struct tpmi_info_header header;
644 void __iomem *info_mem;
645
646 info_mem = ioremap(pfs->vsec_offset + TPMI_INFO_BUS_INFO_OFFSET,
647 pfs->pfs_header.entry_size * sizeof(u32) - TPMI_INFO_BUS_INFO_OFFSET);
648 if (!info_mem)
649 return -ENOMEM;
650
651 memcpy_fromio(&header, info_mem, sizeof(header));
652
653 tpmi_info->plat_info.package_id = header.pkg;
654 tpmi_info->plat_info.bus_number = header.bus;
655 tpmi_info->plat_info.device_number = header.dev;
656 tpmi_info->plat_info.function_number = header.fn;
657
658 iounmap(info_mem);
659
660 return 0;
661 }
662
tpmi_fetch_pfs_header(struct intel_tpmi_pm_feature * pfs,u64 start,int size)663 static int tpmi_fetch_pfs_header(struct intel_tpmi_pm_feature *pfs, u64 start, int size)
664 {
665 void __iomem *pfs_mem;
666
667 pfs_mem = ioremap(start, size);
668 if (!pfs_mem)
669 return -ENOMEM;
670
671 memcpy_fromio(&pfs->pfs_header, pfs_mem, sizeof(pfs->pfs_header));
672
673 iounmap(pfs_mem);
674
675 return 0;
676 }
677
678 #define TPMI_CAP_OFFSET_UNIT 1024
679
intel_vsec_tpmi_init(struct auxiliary_device * auxdev)680 static int intel_vsec_tpmi_init(struct auxiliary_device *auxdev)
681 {
682 struct intel_vsec_device *vsec_dev = auxdev_to_ivdev(auxdev);
683 struct pci_dev *pci_dev = vsec_dev->pcidev;
684 struct intel_tpmi_info *tpmi_info;
685 u64 pfs_start = 0;
686 int ret, i;
687
688 tpmi_info = devm_kzalloc(&auxdev->dev, sizeof(*tpmi_info), GFP_KERNEL);
689 if (!tpmi_info)
690 return -ENOMEM;
691
692 tpmi_info->vsec_dev = vsec_dev;
693 tpmi_info->feature_count = vsec_dev->num_resources;
694 tpmi_info->plat_info.bus_number = pci_dev->bus->number;
695
696 tpmi_info->tpmi_features = devm_kcalloc(&auxdev->dev, vsec_dev->num_resources,
697 sizeof(*tpmi_info->tpmi_features),
698 GFP_KERNEL);
699 if (!tpmi_info->tpmi_features)
700 return -ENOMEM;
701
702 for (i = 0; i < vsec_dev->num_resources; i++) {
703 struct intel_tpmi_pm_feature *pfs;
704 struct resource *res;
705 u64 res_start;
706 int size, ret;
707
708 pfs = &tpmi_info->tpmi_features[i];
709 pfs->vsec_dev = vsec_dev;
710
711 res = &vsec_dev->resource[i];
712 if (!res)
713 continue;
714
715 res_start = res->start;
716 size = resource_size(res);
717 if (size < 0)
718 continue;
719
720 ret = tpmi_fetch_pfs_header(pfs, res_start, size);
721 if (ret)
722 continue;
723
724 if (!pfs_start)
725 pfs_start = res_start;
726
727 pfs->vsec_offset = pfs_start + pfs->pfs_header.cap_offset * TPMI_CAP_OFFSET_UNIT;
728
729 /*
730 * Process TPMI_INFO to get PCI device to CPU package ID.
731 * Device nodes for TPMI features are not created in this
732 * for loop. So, the mapping information will be available
733 * when actual device nodes created outside this
734 * loop via tpmi_create_devices().
735 */
736 if (pfs->pfs_header.tpmi_id == TPMI_INFO_ID) {
737 ret = tpmi_process_info(tpmi_info, pfs);
738 if (ret)
739 return ret;
740 }
741
742 if (pfs->pfs_header.tpmi_id == TPMI_CONTROL_ID)
743 tpmi_set_control_base(auxdev, tpmi_info, pfs);
744 }
745
746 tpmi_info->pfs_start = pfs_start;
747
748 auxiliary_set_drvdata(auxdev, tpmi_info);
749
750 ret = tpmi_create_devices(tpmi_info);
751 if (ret)
752 return ret;
753
754 /*
755 * Allow debugfs when security policy allows. Everything this debugfs
756 * interface provides, can also be done via /dev/mem access. If
757 * /dev/mem interface is locked, don't allow debugfs to present any
758 * information. Also check for CAP_SYS_RAWIO as /dev/mem interface.
759 */
760 if (!security_locked_down(LOCKDOWN_DEV_MEM) && capable(CAP_SYS_RAWIO))
761 tpmi_dbgfs_register(tpmi_info);
762
763 return 0;
764 }
765
tpmi_probe(struct auxiliary_device * auxdev,const struct auxiliary_device_id * id)766 static int tpmi_probe(struct auxiliary_device *auxdev,
767 const struct auxiliary_device_id *id)
768 {
769 return intel_vsec_tpmi_init(auxdev);
770 }
771
tpmi_remove(struct auxiliary_device * auxdev)772 static void tpmi_remove(struct auxiliary_device *auxdev)
773 {
774 struct intel_tpmi_info *tpmi_info = auxiliary_get_drvdata(auxdev);
775
776 debugfs_remove_recursive(tpmi_info->dbgfs_dir);
777 }
778
779 static const struct auxiliary_device_id tpmi_id_table[] = {
780 { .name = "intel_vsec.tpmi" },
781 {}
782 };
783 MODULE_DEVICE_TABLE(auxiliary, tpmi_id_table);
784
785 static struct auxiliary_driver tpmi_aux_driver = {
786 .id_table = tpmi_id_table,
787 .probe = tpmi_probe,
788 .remove = tpmi_remove,
789 };
790
791 module_auxiliary_driver(tpmi_aux_driver);
792
793 MODULE_IMPORT_NS(INTEL_VSEC);
794 MODULE_DESCRIPTION("Intel TPMI enumeration module");
795 MODULE_LICENSE("GPL");
796