1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2003-2019, Intel Corporation. All rights reserved. 4 * Intel Management Engine Interface (Intel MEI) Linux driver 5 */ 6 7 #include <linux/module.h> 8 #include <linux/moduleparam.h> 9 #include <linux/kernel.h> 10 #include <linux/device.h> 11 #include <linux/fs.h> 12 #include <linux/errno.h> 13 #include <linux/types.h> 14 #include <linux/fcntl.h> 15 #include <linux/pci.h> 16 #include <linux/poll.h> 17 #include <linux/ioctl.h> 18 #include <linux/cdev.h> 19 #include <linux/sched.h> 20 #include <linux/uuid.h> 21 #include <linux/compat.h> 22 #include <linux/jiffies.h> 23 #include <linux/interrupt.h> 24 25 #include <linux/pm_domain.h> 26 #include <linux/pm_runtime.h> 27 28 #include <linux/mei.h> 29 30 #include "mei_dev.h" 31 #include "client.h" 32 #include "hw-me-regs.h" 33 #include "hw-me.h" 34 35 /* mei_pci_tbl - PCI Device ID Table */ 36 static const struct pci_device_id mei_me_pci_tbl[] = { 37 {MEI_PCI_DEVICE(MEI_DEV_ID_82946GZ, MEI_ME_ICH_CFG)}, 38 {MEI_PCI_DEVICE(MEI_DEV_ID_82G35, MEI_ME_ICH_CFG)}, 39 {MEI_PCI_DEVICE(MEI_DEV_ID_82Q965, MEI_ME_ICH_CFG)}, 40 {MEI_PCI_DEVICE(MEI_DEV_ID_82G965, MEI_ME_ICH_CFG)}, 41 {MEI_PCI_DEVICE(MEI_DEV_ID_82GM965, MEI_ME_ICH_CFG)}, 42 {MEI_PCI_DEVICE(MEI_DEV_ID_82GME965, MEI_ME_ICH_CFG)}, 43 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q35, MEI_ME_ICH_CFG)}, 44 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82G33, MEI_ME_ICH_CFG)}, 45 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q33, MEI_ME_ICH_CFG)}, 46 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82X38, MEI_ME_ICH_CFG)}, 47 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_3200, MEI_ME_ICH_CFG)}, 48 49 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_6, MEI_ME_ICH_CFG)}, 50 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_7, MEI_ME_ICH_CFG)}, 51 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_8, MEI_ME_ICH_CFG)}, 52 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_9, MEI_ME_ICH_CFG)}, 53 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_10, MEI_ME_ICH_CFG)}, 54 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_1, MEI_ME_ICH_CFG)}, 55 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_2, MEI_ME_ICH_CFG)}, 56 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_3, MEI_ME_ICH_CFG)}, 57 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_4, MEI_ME_ICH_CFG)}, 58 59 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_1, MEI_ME_ICH10_CFG)}, 60 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_2, MEI_ME_ICH10_CFG)}, 61 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_3, MEI_ME_ICH10_CFG)}, 62 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_4, MEI_ME_ICH10_CFG)}, 63 64 {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, MEI_ME_PCH_CFG)}, 65 {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, MEI_ME_PCH_CFG)}, 66 {MEI_PCI_DEVICE(MEI_DEV_ID_CPT_1, MEI_ME_PCH_CPT_PBG_CFG)}, 67 {MEI_PCI_DEVICE(MEI_DEV_ID_PBG_1, MEI_ME_PCH_CPT_PBG_CFG)}, 68 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH_CFG)}, 69 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH_CFG)}, 70 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH_CFG)}, 71 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_CFG)}, 72 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_CFG)}, 73 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, MEI_ME_PCH8_CFG)}, 74 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, MEI_ME_PCH8_SPS_CFG)}, 75 {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, MEI_ME_PCH8_CFG)}, 76 {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, MEI_ME_PCH8_CFG)}, 77 78 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT, MEI_ME_PCH8_CFG)}, 79 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)}, 80 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)}, 81 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)}, 82 {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_CFG)}, 83 84 {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)}, 85 {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)}, 86 87 {MEI_PCI_DEVICE(MEI_DEV_ID_DNV_IE, MEI_ME_PCH8_CFG)}, 88 89 {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)}, 90 91 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)}, 92 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)}, 93 94 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH12_CFG)}, 95 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_4, MEI_ME_PCH8_CFG)}, 96 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)}, 97 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)}, 98 99 {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)}, 100 101 {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH12_CFG)}, 102 103 {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH12_CFG)}, 104 {MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)}, 105 106 /* required last entry */ 107 {0, } 108 }; 109 110 MODULE_DEVICE_TABLE(pci, mei_me_pci_tbl); 111 112 #ifdef CONFIG_PM 113 static inline void mei_me_set_pm_domain(struct mei_device *dev); 114 static inline void mei_me_unset_pm_domain(struct mei_device *dev); 115 #else 116 static inline void mei_me_set_pm_domain(struct mei_device *dev) {} 117 static inline void mei_me_unset_pm_domain(struct mei_device *dev) {} 118 #endif /* CONFIG_PM */ 119 120 /** 121 * mei_me_quirk_probe - probe for devices that doesn't valid ME interface 122 * 123 * @pdev: PCI device structure 124 * @cfg: per generation config 125 * 126 * Return: true if ME Interface is valid, false otherwise 127 */ 128 static bool mei_me_quirk_probe(struct pci_dev *pdev, 129 const struct mei_cfg *cfg) 130 { 131 if (cfg->quirk_probe && cfg->quirk_probe(pdev)) { 132 dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n"); 133 return false; 134 } 135 136 return true; 137 } 138 139 /** 140 * mei_me_probe - Device Initialization Routine 141 * 142 * @pdev: PCI device structure 143 * @ent: entry in kcs_pci_tbl 144 * 145 * Return: 0 on success, <0 on failure. 146 */ 147 static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 148 { 149 const struct mei_cfg *cfg; 150 struct mei_device *dev; 151 struct mei_me_hw *hw; 152 unsigned int irqflags; 153 int err; 154 155 cfg = mei_me_get_cfg(ent->driver_data); 156 if (!cfg) 157 return -ENODEV; 158 159 if (!mei_me_quirk_probe(pdev, cfg)) 160 return -ENODEV; 161 162 /* enable pci dev */ 163 err = pcim_enable_device(pdev); 164 if (err) { 165 dev_err(&pdev->dev, "failed to enable pci device.\n"); 166 goto end; 167 } 168 /* set PCI host mastering */ 169 pci_set_master(pdev); 170 /* pci request regions and mapping IO device memory for mei driver */ 171 err = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME); 172 if (err) { 173 dev_err(&pdev->dev, "failed to get pci regions.\n"); 174 goto end; 175 } 176 177 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) || 178 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { 179 180 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 181 if (err) 182 err = dma_set_coherent_mask(&pdev->dev, 183 DMA_BIT_MASK(32)); 184 } 185 if (err) { 186 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); 187 goto end; 188 } 189 190 /* allocates and initializes the mei dev structure */ 191 dev = mei_me_dev_init(pdev, cfg); 192 if (!dev) { 193 err = -ENOMEM; 194 goto end; 195 } 196 hw = to_me_hw(dev); 197 hw->mem_addr = pcim_iomap_table(pdev)[0]; 198 199 pci_enable_msi(pdev); 200 201 /* request and enable interrupt */ 202 irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED; 203 204 err = request_threaded_irq(pdev->irq, 205 mei_me_irq_quick_handler, 206 mei_me_irq_thread_handler, 207 irqflags, KBUILD_MODNAME, dev); 208 if (err) { 209 dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n", 210 pdev->irq); 211 goto end; 212 } 213 214 if (mei_start(dev)) { 215 dev_err(&pdev->dev, "init hw failure.\n"); 216 err = -ENODEV; 217 goto release_irq; 218 } 219 220 pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT); 221 pm_runtime_use_autosuspend(&pdev->dev); 222 223 err = mei_register(dev, &pdev->dev); 224 if (err) 225 goto stop; 226 227 pci_set_drvdata(pdev, dev); 228 229 /* 230 * MEI requires to resume from runtime suspend mode 231 * in order to perform link reset flow upon system suspend. 232 */ 233 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP); 234 235 /* 236 * ME maps runtime suspend/resume to D0i states, 237 * hence we need to go around native PCI runtime service which 238 * eventually brings the device into D3cold/hot state, 239 * but the mei device cannot wake up from D3 unlike from D0i3. 240 * To get around the PCI device native runtime pm, 241 * ME uses runtime pm domain handlers which take precedence 242 * over the driver's pm handlers. 243 */ 244 mei_me_set_pm_domain(dev); 245 246 if (mei_pg_is_enabled(dev)) { 247 pm_runtime_put_noidle(&pdev->dev); 248 if (hw->d0i3_supported) 249 pm_runtime_allow(&pdev->dev); 250 } 251 252 dev_dbg(&pdev->dev, "initialization successful.\n"); 253 254 return 0; 255 256 stop: 257 mei_stop(dev); 258 release_irq: 259 mei_cancel_work(dev); 260 mei_disable_interrupts(dev); 261 free_irq(pdev->irq, dev); 262 end: 263 dev_err(&pdev->dev, "initialization failed.\n"); 264 return err; 265 } 266 267 /** 268 * mei_me_shutdown - Device Removal Routine 269 * 270 * @pdev: PCI device structure 271 * 272 * mei_me_shutdown is called from the reboot notifier 273 * it's a simplified version of remove so we go down 274 * faster. 275 */ 276 static void mei_me_shutdown(struct pci_dev *pdev) 277 { 278 struct mei_device *dev; 279 280 dev = pci_get_drvdata(pdev); 281 if (!dev) 282 return; 283 284 dev_dbg(&pdev->dev, "shutdown\n"); 285 mei_stop(dev); 286 287 mei_me_unset_pm_domain(dev); 288 289 mei_disable_interrupts(dev); 290 free_irq(pdev->irq, dev); 291 } 292 293 /** 294 * mei_me_remove - Device Removal Routine 295 * 296 * @pdev: PCI device structure 297 * 298 * mei_me_remove is called by the PCI subsystem to alert the driver 299 * that it should release a PCI device. 300 */ 301 static void mei_me_remove(struct pci_dev *pdev) 302 { 303 struct mei_device *dev; 304 305 dev = pci_get_drvdata(pdev); 306 if (!dev) 307 return; 308 309 if (mei_pg_is_enabled(dev)) 310 pm_runtime_get_noresume(&pdev->dev); 311 312 dev_dbg(&pdev->dev, "stop\n"); 313 mei_stop(dev); 314 315 mei_me_unset_pm_domain(dev); 316 317 mei_disable_interrupts(dev); 318 319 free_irq(pdev->irq, dev); 320 321 mei_deregister(dev); 322 } 323 324 #ifdef CONFIG_PM_SLEEP 325 static int mei_me_pci_suspend(struct device *device) 326 { 327 struct pci_dev *pdev = to_pci_dev(device); 328 struct mei_device *dev = pci_get_drvdata(pdev); 329 330 if (!dev) 331 return -ENODEV; 332 333 dev_dbg(&pdev->dev, "suspend\n"); 334 335 mei_stop(dev); 336 337 mei_disable_interrupts(dev); 338 339 free_irq(pdev->irq, dev); 340 pci_disable_msi(pdev); 341 342 return 0; 343 } 344 345 static int mei_me_pci_resume(struct device *device) 346 { 347 struct pci_dev *pdev = to_pci_dev(device); 348 struct mei_device *dev; 349 unsigned int irqflags; 350 int err; 351 352 dev = pci_get_drvdata(pdev); 353 if (!dev) 354 return -ENODEV; 355 356 pci_enable_msi(pdev); 357 358 irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED; 359 360 /* request and enable interrupt */ 361 err = request_threaded_irq(pdev->irq, 362 mei_me_irq_quick_handler, 363 mei_me_irq_thread_handler, 364 irqflags, KBUILD_MODNAME, dev); 365 366 if (err) { 367 dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n", 368 pdev->irq); 369 return err; 370 } 371 372 err = mei_restart(dev); 373 if (err) 374 return err; 375 376 /* Start timer if stopped in suspend */ 377 schedule_delayed_work(&dev->timer_work, HZ); 378 379 return 0; 380 } 381 #endif /* CONFIG_PM_SLEEP */ 382 383 #ifdef CONFIG_PM 384 static int mei_me_pm_runtime_idle(struct device *device) 385 { 386 struct mei_device *dev; 387 388 dev_dbg(device, "rpm: me: runtime_idle\n"); 389 390 dev = dev_get_drvdata(device); 391 if (!dev) 392 return -ENODEV; 393 if (mei_write_is_idle(dev)) 394 pm_runtime_autosuspend(device); 395 396 return -EBUSY; 397 } 398 399 static int mei_me_pm_runtime_suspend(struct device *device) 400 { 401 struct mei_device *dev; 402 int ret; 403 404 dev_dbg(device, "rpm: me: runtime suspend\n"); 405 406 dev = dev_get_drvdata(device); 407 if (!dev) 408 return -ENODEV; 409 410 mutex_lock(&dev->device_lock); 411 412 if (mei_write_is_idle(dev)) 413 ret = mei_me_pg_enter_sync(dev); 414 else 415 ret = -EAGAIN; 416 417 mutex_unlock(&dev->device_lock); 418 419 dev_dbg(device, "rpm: me: runtime suspend ret=%d\n", ret); 420 421 if (ret && ret != -EAGAIN) 422 schedule_work(&dev->reset_work); 423 424 return ret; 425 } 426 427 static int mei_me_pm_runtime_resume(struct device *device) 428 { 429 struct mei_device *dev; 430 int ret; 431 432 dev_dbg(device, "rpm: me: runtime resume\n"); 433 434 dev = dev_get_drvdata(device); 435 if (!dev) 436 return -ENODEV; 437 438 mutex_lock(&dev->device_lock); 439 440 ret = mei_me_pg_exit_sync(dev); 441 442 mutex_unlock(&dev->device_lock); 443 444 dev_dbg(device, "rpm: me: runtime resume ret = %d\n", ret); 445 446 if (ret) 447 schedule_work(&dev->reset_work); 448 449 return ret; 450 } 451 452 /** 453 * mei_me_set_pm_domain - fill and set pm domain structure for device 454 * 455 * @dev: mei_device 456 */ 457 static inline void mei_me_set_pm_domain(struct mei_device *dev) 458 { 459 struct pci_dev *pdev = to_pci_dev(dev->dev); 460 461 if (pdev->dev.bus && pdev->dev.bus->pm) { 462 dev->pg_domain.ops = *pdev->dev.bus->pm; 463 464 dev->pg_domain.ops.runtime_suspend = mei_me_pm_runtime_suspend; 465 dev->pg_domain.ops.runtime_resume = mei_me_pm_runtime_resume; 466 dev->pg_domain.ops.runtime_idle = mei_me_pm_runtime_idle; 467 468 dev_pm_domain_set(&pdev->dev, &dev->pg_domain); 469 } 470 } 471 472 /** 473 * mei_me_unset_pm_domain - clean pm domain structure for device 474 * 475 * @dev: mei_device 476 */ 477 static inline void mei_me_unset_pm_domain(struct mei_device *dev) 478 { 479 /* stop using pm callbacks if any */ 480 dev_pm_domain_set(dev->dev, NULL); 481 } 482 483 static const struct dev_pm_ops mei_me_pm_ops = { 484 SET_SYSTEM_SLEEP_PM_OPS(mei_me_pci_suspend, 485 mei_me_pci_resume) 486 SET_RUNTIME_PM_OPS( 487 mei_me_pm_runtime_suspend, 488 mei_me_pm_runtime_resume, 489 mei_me_pm_runtime_idle) 490 }; 491 492 #define MEI_ME_PM_OPS (&mei_me_pm_ops) 493 #else 494 #define MEI_ME_PM_OPS NULL 495 #endif /* CONFIG_PM */ 496 /* 497 * PCI driver structure 498 */ 499 static struct pci_driver mei_me_driver = { 500 .name = KBUILD_MODNAME, 501 .id_table = mei_me_pci_tbl, 502 .probe = mei_me_probe, 503 .remove = mei_me_remove, 504 .shutdown = mei_me_shutdown, 505 .driver.pm = MEI_ME_PM_OPS, 506 .driver.probe_type = PROBE_PREFER_ASYNCHRONOUS, 507 }; 508 509 module_pci_driver(mei_me_driver); 510 511 MODULE_AUTHOR("Intel Corporation"); 512 MODULE_DESCRIPTION("Intel(R) Management Engine Interface"); 513 MODULE_LICENSE("GPL v2"); 514