1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Intel Platform Monitory Technology Telemetry driver 4 * 5 * Copyright (c) 2020, Intel Corporation. 6 * All Rights Reserved. 7 * 8 * Author: "Alexander Duyck" <alexander.h.duyck@linux.intel.com> 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/mm.h> 14 #include <linux/pci.h> 15 16 #include "class.h" 17 18 #define PMT_XA_START 0 19 #define PMT_XA_MAX INT_MAX 20 #define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX) 21 22 /* 23 * Early implementations of PMT on client platforms have some 24 * differences from the server platforms (which use the Out Of Band 25 * Management Services Module OOBMSM). This list tracks those 26 * platforms as needed to handle those differences. Newer client 27 * platforms are expected to be fully compatible with server. 28 */ 29 static const struct pci_device_id pmt_telem_early_client_pci_ids[] = { 30 { PCI_VDEVICE(INTEL, 0x467d) }, /* ADL */ 31 { PCI_VDEVICE(INTEL, 0x490e) }, /* DG1 */ 32 { PCI_VDEVICE(INTEL, 0x9a0d) }, /* TGL */ 33 { } 34 }; 35 36 bool intel_pmt_is_early_client_hw(struct device *dev) 37 { 38 struct pci_dev *parent = to_pci_dev(dev->parent); 39 40 return !!pci_match_id(pmt_telem_early_client_pci_ids, parent); 41 } 42 EXPORT_SYMBOL_GPL(intel_pmt_is_early_client_hw); 43 44 /* 45 * sysfs 46 */ 47 static ssize_t 48 intel_pmt_read(struct file *filp, struct kobject *kobj, 49 struct bin_attribute *attr, char *buf, loff_t off, 50 size_t count) 51 { 52 struct intel_pmt_entry *entry = container_of(attr, 53 struct intel_pmt_entry, 54 pmt_bin_attr); 55 56 if (off < 0) 57 return -EINVAL; 58 59 if (off >= entry->size) 60 return 0; 61 62 if (count > entry->size - off) 63 count = entry->size - off; 64 65 memcpy_fromio(buf, entry->base + off, count); 66 67 return count; 68 } 69 70 static int 71 intel_pmt_mmap(struct file *filp, struct kobject *kobj, 72 struct bin_attribute *attr, struct vm_area_struct *vma) 73 { 74 struct intel_pmt_entry *entry = container_of(attr, 75 struct intel_pmt_entry, 76 pmt_bin_attr); 77 unsigned long vsize = vma->vm_end - vma->vm_start; 78 struct device *dev = kobj_to_dev(kobj); 79 unsigned long phys = entry->base_addr; 80 unsigned long pfn = PFN_DOWN(phys); 81 unsigned long psize; 82 83 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) 84 return -EROFS; 85 86 psize = (PFN_UP(entry->base_addr + entry->size) - pfn) * PAGE_SIZE; 87 if (vsize > psize) { 88 dev_err(dev, "Requested mmap size is too large\n"); 89 return -EINVAL; 90 } 91 92 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 93 if (io_remap_pfn_range(vma, vma->vm_start, pfn, 94 vsize, vma->vm_page_prot)) 95 return -EAGAIN; 96 97 return 0; 98 } 99 100 static ssize_t 101 guid_show(struct device *dev, struct device_attribute *attr, char *buf) 102 { 103 struct intel_pmt_entry *entry = dev_get_drvdata(dev); 104 105 return sprintf(buf, "0x%x\n", entry->guid); 106 } 107 static DEVICE_ATTR_RO(guid); 108 109 static ssize_t size_show(struct device *dev, struct device_attribute *attr, 110 char *buf) 111 { 112 struct intel_pmt_entry *entry = dev_get_drvdata(dev); 113 114 return sprintf(buf, "%zu\n", entry->size); 115 } 116 static DEVICE_ATTR_RO(size); 117 118 static ssize_t 119 offset_show(struct device *dev, struct device_attribute *attr, char *buf) 120 { 121 struct intel_pmt_entry *entry = dev_get_drvdata(dev); 122 123 return sprintf(buf, "%lu\n", offset_in_page(entry->base_addr)); 124 } 125 static DEVICE_ATTR_RO(offset); 126 127 static struct attribute *intel_pmt_attrs[] = { 128 &dev_attr_guid.attr, 129 &dev_attr_size.attr, 130 &dev_attr_offset.attr, 131 NULL 132 }; 133 ATTRIBUTE_GROUPS(intel_pmt); 134 135 static struct class intel_pmt_class = { 136 .name = "intel_pmt", 137 .owner = THIS_MODULE, 138 .dev_groups = intel_pmt_groups, 139 }; 140 141 static int intel_pmt_populate_entry(struct intel_pmt_entry *entry, 142 struct intel_pmt_header *header, 143 struct device *dev, 144 struct resource *disc_res) 145 { 146 struct pci_dev *pci_dev = to_pci_dev(dev->parent); 147 u8 bir; 148 149 /* 150 * The base offset should always be 8 byte aligned. 151 * 152 * For non-local access types the lower 3 bits of base offset 153 * contains the index of the base address register where the 154 * telemetry can be found. 155 */ 156 bir = GET_BIR(header->base_offset); 157 158 /* Local access and BARID only for now */ 159 switch (header->access_type) { 160 case ACCESS_LOCAL: 161 if (bir) { 162 dev_err(dev, 163 "Unsupported BAR index %d for access type %d\n", 164 bir, header->access_type); 165 return -EINVAL; 166 } 167 /* 168 * For access_type LOCAL, the base address is as follows: 169 * base address = end of discovery region + base offset 170 */ 171 entry->base_addr = disc_res->end + 1 + header->base_offset; 172 173 /* 174 * Some hardware use a different calculation for the base address 175 * when access_type == ACCESS_LOCAL. On the these systems 176 * ACCCESS_LOCAL refers to an address in the same BAR as the 177 * header but at a fixed offset. But as the header address was 178 * supplied to the driver, we don't know which BAR it was in. 179 * So search for the bar whose range includes the header address. 180 */ 181 if (intel_pmt_is_early_client_hw(dev)) { 182 int i; 183 184 entry->base_addr = 0; 185 for (i = 0; i < 6; i++) 186 if (disc_res->start >= pci_resource_start(pci_dev, i) && 187 (disc_res->start <= pci_resource_end(pci_dev, i))) { 188 entry->base_addr = pci_resource_start(pci_dev, i) + 189 header->base_offset; 190 break; 191 } 192 if (!entry->base_addr) 193 return -EINVAL; 194 } 195 196 break; 197 case ACCESS_BARID: 198 /* 199 * If another BAR was specified then the base offset 200 * represents the offset within that BAR. SO retrieve the 201 * address from the parent PCI device and add offset. 202 */ 203 entry->base_addr = pci_resource_start(pci_dev, bir) + 204 GET_ADDRESS(header->base_offset); 205 break; 206 default: 207 dev_err(dev, "Unsupported access type %d\n", 208 header->access_type); 209 return -EINVAL; 210 } 211 212 entry->guid = header->guid; 213 entry->size = header->size; 214 215 return 0; 216 } 217 218 static int intel_pmt_dev_register(struct intel_pmt_entry *entry, 219 struct intel_pmt_namespace *ns, 220 struct device *parent) 221 { 222 struct resource res = {0}; 223 struct device *dev; 224 int ret; 225 226 ret = xa_alloc(ns->xa, &entry->devid, entry, PMT_XA_LIMIT, GFP_KERNEL); 227 if (ret) 228 return ret; 229 230 dev = device_create(&intel_pmt_class, parent, MKDEV(0, 0), entry, 231 "%s%d", ns->name, entry->devid); 232 233 if (IS_ERR(dev)) { 234 dev_err(parent, "Could not create %s%d device node\n", 235 ns->name, entry->devid); 236 ret = PTR_ERR(dev); 237 goto fail_dev_create; 238 } 239 240 entry->kobj = &dev->kobj; 241 242 if (ns->attr_grp) { 243 ret = sysfs_create_group(entry->kobj, ns->attr_grp); 244 if (ret) 245 goto fail_sysfs; 246 } 247 248 /* if size is 0 assume no data buffer, so no file needed */ 249 if (!entry->size) 250 return 0; 251 252 res.start = entry->base_addr; 253 res.end = res.start + entry->size - 1; 254 res.flags = IORESOURCE_MEM; 255 256 entry->base = devm_ioremap_resource(dev, &res); 257 if (IS_ERR(entry->base)) { 258 ret = PTR_ERR(entry->base); 259 goto fail_ioremap; 260 } 261 262 sysfs_bin_attr_init(&entry->pmt_bin_attr); 263 entry->pmt_bin_attr.attr.name = ns->name; 264 entry->pmt_bin_attr.attr.mode = 0440; 265 entry->pmt_bin_attr.mmap = intel_pmt_mmap; 266 entry->pmt_bin_attr.read = intel_pmt_read; 267 entry->pmt_bin_attr.size = entry->size; 268 269 ret = sysfs_create_bin_file(&dev->kobj, &entry->pmt_bin_attr); 270 if (!ret) 271 return 0; 272 273 fail_ioremap: 274 if (ns->attr_grp) 275 sysfs_remove_group(entry->kobj, ns->attr_grp); 276 fail_sysfs: 277 device_unregister(dev); 278 fail_dev_create: 279 xa_erase(ns->xa, entry->devid); 280 281 return ret; 282 } 283 284 int intel_pmt_dev_create(struct intel_pmt_entry *entry, 285 struct intel_pmt_namespace *ns, 286 struct platform_device *pdev, int idx) 287 { 288 struct intel_pmt_header header; 289 struct resource *disc_res; 290 int ret = -ENODEV; 291 292 disc_res = platform_get_resource(pdev, IORESOURCE_MEM, idx); 293 if (!disc_res) 294 return ret; 295 296 entry->disc_table = devm_platform_ioremap_resource(pdev, idx); 297 if (IS_ERR(entry->disc_table)) 298 return PTR_ERR(entry->disc_table); 299 300 ret = ns->pmt_header_decode(entry, &header, &pdev->dev); 301 if (ret) 302 return ret; 303 304 ret = intel_pmt_populate_entry(entry, &header, &pdev->dev, disc_res); 305 if (ret) 306 return ret; 307 308 return intel_pmt_dev_register(entry, ns, &pdev->dev); 309 310 } 311 EXPORT_SYMBOL_GPL(intel_pmt_dev_create); 312 313 void intel_pmt_dev_destroy(struct intel_pmt_entry *entry, 314 struct intel_pmt_namespace *ns) 315 { 316 struct device *dev = kobj_to_dev(entry->kobj); 317 318 if (entry->size) 319 sysfs_remove_bin_file(entry->kobj, &entry->pmt_bin_attr); 320 321 if (ns->attr_grp) 322 sysfs_remove_group(entry->kobj, ns->attr_grp); 323 324 device_unregister(dev); 325 xa_erase(ns->xa, entry->devid); 326 } 327 EXPORT_SYMBOL_GPL(intel_pmt_dev_destroy); 328 329 static int __init pmt_class_init(void) 330 { 331 return class_register(&intel_pmt_class); 332 } 333 334 static void __exit pmt_class_exit(void) 335 { 336 class_unregister(&intel_pmt_class); 337 } 338 339 module_init(pmt_class_init); 340 module_exit(pmt_class_exit); 341 342 MODULE_AUTHOR("Alexander Duyck <alexander.h.duyck@linux.intel.com>"); 343 MODULE_DESCRIPTION("Intel PMT Class driver"); 344 MODULE_LICENSE("GPL v2"); 345