1 /* 2 * 3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Copyright (c) 2003-2012, Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 */ 16 #include <linux/module.h> 17 #include <linux/moduleparam.h> 18 #include <linux/kernel.h> 19 #include <linux/device.h> 20 #include <linux/fs.h> 21 #include <linux/errno.h> 22 #include <linux/types.h> 23 #include <linux/fcntl.h> 24 #include <linux/pci.h> 25 #include <linux/poll.h> 26 #include <linux/ioctl.h> 27 #include <linux/cdev.h> 28 #include <linux/sched.h> 29 #include <linux/uuid.h> 30 #include <linux/compat.h> 31 #include <linux/jiffies.h> 32 #include <linux/interrupt.h> 33 34 #include <linux/pm_domain.h> 35 #include <linux/pm_runtime.h> 36 37 #include <linux/mei.h> 38 39 #include "mei_dev.h" 40 #include "client.h" 41 #include "hw-me-regs.h" 42 #include "hw-me.h" 43 44 /* mei_pci_tbl - PCI Device ID Table */ 45 static const struct pci_device_id mei_me_pci_tbl[] = { 46 {MEI_PCI_DEVICE(MEI_DEV_ID_82946GZ, MEI_ME_ICH_CFG)}, 47 {MEI_PCI_DEVICE(MEI_DEV_ID_82G35, MEI_ME_ICH_CFG)}, 48 {MEI_PCI_DEVICE(MEI_DEV_ID_82Q965, MEI_ME_ICH_CFG)}, 49 {MEI_PCI_DEVICE(MEI_DEV_ID_82G965, MEI_ME_ICH_CFG)}, 50 {MEI_PCI_DEVICE(MEI_DEV_ID_82GM965, MEI_ME_ICH_CFG)}, 51 {MEI_PCI_DEVICE(MEI_DEV_ID_82GME965, MEI_ME_ICH_CFG)}, 52 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q35, MEI_ME_ICH_CFG)}, 53 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82G33, MEI_ME_ICH_CFG)}, 54 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q33, MEI_ME_ICH_CFG)}, 55 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82X38, MEI_ME_ICH_CFG)}, 56 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_3200, MEI_ME_ICH_CFG)}, 57 58 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_6, MEI_ME_ICH_CFG)}, 59 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_7, MEI_ME_ICH_CFG)}, 60 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_8, MEI_ME_ICH_CFG)}, 61 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_9, MEI_ME_ICH_CFG)}, 62 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_10, MEI_ME_ICH_CFG)}, 63 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_1, MEI_ME_ICH_CFG)}, 64 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_2, MEI_ME_ICH_CFG)}, 65 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_3, MEI_ME_ICH_CFG)}, 66 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_4, MEI_ME_ICH_CFG)}, 67 68 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_1, MEI_ME_ICH10_CFG)}, 69 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_2, MEI_ME_ICH10_CFG)}, 70 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_3, MEI_ME_ICH10_CFG)}, 71 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_4, MEI_ME_ICH10_CFG)}, 72 73 {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, MEI_ME_PCH_CFG)}, 74 {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, MEI_ME_PCH_CFG)}, 75 {MEI_PCI_DEVICE(MEI_DEV_ID_CPT_1, MEI_ME_PCH_CPT_PBG_CFG)}, 76 {MEI_PCI_DEVICE(MEI_DEV_ID_PBG_1, MEI_ME_PCH_CPT_PBG_CFG)}, 77 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH_CFG)}, 78 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH_CFG)}, 79 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH_CFG)}, 80 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_CFG)}, 81 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_CFG)}, 82 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, MEI_ME_PCH8_CFG)}, 83 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, MEI_ME_PCH8_SPS_CFG)}, 84 {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, MEI_ME_PCH8_CFG)}, 85 {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, MEI_ME_PCH8_CFG)}, 86 87 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT, MEI_ME_PCH8_CFG)}, 88 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)}, 89 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)}, 90 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)}, 91 {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH8_CFG)}, 92 93 {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)}, 94 {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)}, 95 96 {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)}, 97 98 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)}, 99 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)}, 100 101 /* required last entry */ 102 {0, } 103 }; 104 105 MODULE_DEVICE_TABLE(pci, mei_me_pci_tbl); 106 107 #ifdef CONFIG_PM 108 static inline void mei_me_set_pm_domain(struct mei_device *dev); 109 static inline void mei_me_unset_pm_domain(struct mei_device *dev); 110 #else 111 static inline void mei_me_set_pm_domain(struct mei_device *dev) {} 112 static inline void mei_me_unset_pm_domain(struct mei_device *dev) {} 113 #endif /* CONFIG_PM */ 114 115 /** 116 * mei_me_quirk_probe - probe for devices that doesn't valid ME interface 117 * 118 * @pdev: PCI device structure 119 * @cfg: per generation config 120 * 121 * Return: true if ME Interface is valid, false otherwise 122 */ 123 static bool mei_me_quirk_probe(struct pci_dev *pdev, 124 const struct mei_cfg *cfg) 125 { 126 if (cfg->quirk_probe && cfg->quirk_probe(pdev)) { 127 dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n"); 128 return false; 129 } 130 131 return true; 132 } 133 134 /** 135 * mei_me_probe - Device Initialization Routine 136 * 137 * @pdev: PCI device structure 138 * @ent: entry in kcs_pci_tbl 139 * 140 * Return: 0 on success, <0 on failure. 141 */ 142 static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 143 { 144 const struct mei_cfg *cfg; 145 struct mei_device *dev; 146 struct mei_me_hw *hw; 147 unsigned int irqflags; 148 int err; 149 150 cfg = mei_me_get_cfg(ent->driver_data); 151 if (!cfg) 152 return -ENODEV; 153 154 if (!mei_me_quirk_probe(pdev, cfg)) 155 return -ENODEV; 156 157 /* enable pci dev */ 158 err = pcim_enable_device(pdev); 159 if (err) { 160 dev_err(&pdev->dev, "failed to enable pci device.\n"); 161 goto end; 162 } 163 /* set PCI host mastering */ 164 pci_set_master(pdev); 165 /* pci request regions and mapping IO device memory for mei driver */ 166 err = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME); 167 if (err) { 168 dev_err(&pdev->dev, "failed to get pci regions.\n"); 169 goto end; 170 } 171 172 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) || 173 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { 174 175 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 176 if (err) 177 err = dma_set_coherent_mask(&pdev->dev, 178 DMA_BIT_MASK(32)); 179 } 180 if (err) { 181 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); 182 goto end; 183 } 184 185 /* allocates and initializes the mei dev structure */ 186 dev = mei_me_dev_init(pdev, cfg); 187 if (!dev) { 188 err = -ENOMEM; 189 goto end; 190 } 191 hw = to_me_hw(dev); 192 hw->mem_addr = pcim_iomap_table(pdev)[0]; 193 194 pci_enable_msi(pdev); 195 196 /* request and enable interrupt */ 197 irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED; 198 199 err = request_threaded_irq(pdev->irq, 200 mei_me_irq_quick_handler, 201 mei_me_irq_thread_handler, 202 irqflags, KBUILD_MODNAME, dev); 203 if (err) { 204 dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n", 205 pdev->irq); 206 goto end; 207 } 208 209 if (mei_start(dev)) { 210 dev_err(&pdev->dev, "init hw failure.\n"); 211 err = -ENODEV; 212 goto release_irq; 213 } 214 215 pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT); 216 pm_runtime_use_autosuspend(&pdev->dev); 217 218 err = mei_register(dev, &pdev->dev); 219 if (err) 220 goto stop; 221 222 pci_set_drvdata(pdev, dev); 223 224 /* 225 * MEI requires to resume from runtime suspend mode 226 * in order to perform link reset flow upon system suspend. 227 */ 228 pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; 229 230 /* 231 * ME maps runtime suspend/resume to D0i states, 232 * hence we need to go around native PCI runtime service which 233 * eventually brings the device into D3cold/hot state, 234 * but the mei device cannot wake up from D3 unlike from D0i3. 235 * To get around the PCI device native runtime pm, 236 * ME uses runtime pm domain handlers which take precedence 237 * over the driver's pm handlers. 238 */ 239 mei_me_set_pm_domain(dev); 240 241 if (mei_pg_is_enabled(dev)) 242 pm_runtime_put_noidle(&pdev->dev); 243 244 dev_dbg(&pdev->dev, "initialization successful.\n"); 245 246 return 0; 247 248 stop: 249 mei_stop(dev); 250 release_irq: 251 mei_cancel_work(dev); 252 mei_disable_interrupts(dev); 253 free_irq(pdev->irq, dev); 254 end: 255 dev_err(&pdev->dev, "initialization failed.\n"); 256 return err; 257 } 258 259 /** 260 * mei_me_shutdown - Device Removal Routine 261 * 262 * @pdev: PCI device structure 263 * 264 * mei_me_shutdown is called from the reboot notifier 265 * it's a simplified version of remove so we go down 266 * faster. 267 */ 268 static void mei_me_shutdown(struct pci_dev *pdev) 269 { 270 struct mei_device *dev; 271 272 dev = pci_get_drvdata(pdev); 273 if (!dev) 274 return; 275 276 dev_dbg(&pdev->dev, "shutdown\n"); 277 mei_stop(dev); 278 279 mei_me_unset_pm_domain(dev); 280 281 mei_disable_interrupts(dev); 282 free_irq(pdev->irq, dev); 283 } 284 285 /** 286 * mei_me_remove - Device Removal Routine 287 * 288 * @pdev: PCI device structure 289 * 290 * mei_me_remove is called by the PCI subsystem to alert the driver 291 * that it should release a PCI device. 292 */ 293 static void mei_me_remove(struct pci_dev *pdev) 294 { 295 struct mei_device *dev; 296 297 dev = pci_get_drvdata(pdev); 298 if (!dev) 299 return; 300 301 if (mei_pg_is_enabled(dev)) 302 pm_runtime_get_noresume(&pdev->dev); 303 304 dev_dbg(&pdev->dev, "stop\n"); 305 mei_stop(dev); 306 307 mei_me_unset_pm_domain(dev); 308 309 mei_disable_interrupts(dev); 310 311 free_irq(pdev->irq, dev); 312 313 mei_deregister(dev); 314 } 315 316 #ifdef CONFIG_PM_SLEEP 317 static int mei_me_pci_suspend(struct device *device) 318 { 319 struct pci_dev *pdev = to_pci_dev(device); 320 struct mei_device *dev = pci_get_drvdata(pdev); 321 322 if (!dev) 323 return -ENODEV; 324 325 dev_dbg(&pdev->dev, "suspend\n"); 326 327 mei_stop(dev); 328 329 mei_disable_interrupts(dev); 330 331 free_irq(pdev->irq, dev); 332 pci_disable_msi(pdev); 333 334 return 0; 335 } 336 337 static int mei_me_pci_resume(struct device *device) 338 { 339 struct pci_dev *pdev = to_pci_dev(device); 340 struct mei_device *dev; 341 unsigned int irqflags; 342 int err; 343 344 dev = pci_get_drvdata(pdev); 345 if (!dev) 346 return -ENODEV; 347 348 pci_enable_msi(pdev); 349 350 irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED; 351 352 /* request and enable interrupt */ 353 err = request_threaded_irq(pdev->irq, 354 mei_me_irq_quick_handler, 355 mei_me_irq_thread_handler, 356 irqflags, KBUILD_MODNAME, dev); 357 358 if (err) { 359 dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n", 360 pdev->irq); 361 return err; 362 } 363 364 err = mei_restart(dev); 365 if (err) 366 return err; 367 368 /* Start timer if stopped in suspend */ 369 schedule_delayed_work(&dev->timer_work, HZ); 370 371 return 0; 372 } 373 #endif /* CONFIG_PM_SLEEP */ 374 375 #ifdef CONFIG_PM 376 static int mei_me_pm_runtime_idle(struct device *device) 377 { 378 struct pci_dev *pdev = to_pci_dev(device); 379 struct mei_device *dev; 380 381 dev_dbg(&pdev->dev, "rpm: me: runtime_idle\n"); 382 383 dev = pci_get_drvdata(pdev); 384 if (!dev) 385 return -ENODEV; 386 if (mei_write_is_idle(dev)) 387 pm_runtime_autosuspend(device); 388 389 return -EBUSY; 390 } 391 392 static int mei_me_pm_runtime_suspend(struct device *device) 393 { 394 struct pci_dev *pdev = to_pci_dev(device); 395 struct mei_device *dev; 396 int ret; 397 398 dev_dbg(&pdev->dev, "rpm: me: runtime suspend\n"); 399 400 dev = pci_get_drvdata(pdev); 401 if (!dev) 402 return -ENODEV; 403 404 mutex_lock(&dev->device_lock); 405 406 if (mei_write_is_idle(dev)) 407 ret = mei_me_pg_enter_sync(dev); 408 else 409 ret = -EAGAIN; 410 411 mutex_unlock(&dev->device_lock); 412 413 dev_dbg(&pdev->dev, "rpm: me: runtime suspend ret=%d\n", ret); 414 415 if (ret && ret != -EAGAIN) 416 schedule_work(&dev->reset_work); 417 418 return ret; 419 } 420 421 static int mei_me_pm_runtime_resume(struct device *device) 422 { 423 struct pci_dev *pdev = to_pci_dev(device); 424 struct mei_device *dev; 425 int ret; 426 427 dev_dbg(&pdev->dev, "rpm: me: runtime resume\n"); 428 429 dev = pci_get_drvdata(pdev); 430 if (!dev) 431 return -ENODEV; 432 433 mutex_lock(&dev->device_lock); 434 435 ret = mei_me_pg_exit_sync(dev); 436 437 mutex_unlock(&dev->device_lock); 438 439 dev_dbg(&pdev->dev, "rpm: me: runtime resume ret = %d\n", ret); 440 441 if (ret) 442 schedule_work(&dev->reset_work); 443 444 return ret; 445 } 446 447 /** 448 * mei_me_set_pm_domain - fill and set pm domain structure for device 449 * 450 * @dev: mei_device 451 */ 452 static inline void mei_me_set_pm_domain(struct mei_device *dev) 453 { 454 struct pci_dev *pdev = to_pci_dev(dev->dev); 455 456 if (pdev->dev.bus && pdev->dev.bus->pm) { 457 dev->pg_domain.ops = *pdev->dev.bus->pm; 458 459 dev->pg_domain.ops.runtime_suspend = mei_me_pm_runtime_suspend; 460 dev->pg_domain.ops.runtime_resume = mei_me_pm_runtime_resume; 461 dev->pg_domain.ops.runtime_idle = mei_me_pm_runtime_idle; 462 463 dev_pm_domain_set(&pdev->dev, &dev->pg_domain); 464 } 465 } 466 467 /** 468 * mei_me_unset_pm_domain - clean pm domain structure for device 469 * 470 * @dev: mei_device 471 */ 472 static inline void mei_me_unset_pm_domain(struct mei_device *dev) 473 { 474 /* stop using pm callbacks if any */ 475 dev_pm_domain_set(dev->dev, NULL); 476 } 477 478 static const struct dev_pm_ops mei_me_pm_ops = { 479 SET_SYSTEM_SLEEP_PM_OPS(mei_me_pci_suspend, 480 mei_me_pci_resume) 481 SET_RUNTIME_PM_OPS( 482 mei_me_pm_runtime_suspend, 483 mei_me_pm_runtime_resume, 484 mei_me_pm_runtime_idle) 485 }; 486 487 #define MEI_ME_PM_OPS (&mei_me_pm_ops) 488 #else 489 #define MEI_ME_PM_OPS NULL 490 #endif /* CONFIG_PM */ 491 /* 492 * PCI driver structure 493 */ 494 static struct pci_driver mei_me_driver = { 495 .name = KBUILD_MODNAME, 496 .id_table = mei_me_pci_tbl, 497 .probe = mei_me_probe, 498 .remove = mei_me_remove, 499 .shutdown = mei_me_shutdown, 500 .driver.pm = MEI_ME_PM_OPS, 501 .driver.probe_type = PROBE_PREFER_ASYNCHRONOUS, 502 }; 503 504 module_pci_driver(mei_me_driver); 505 506 MODULE_AUTHOR("Intel Corporation"); 507 MODULE_DESCRIPTION("Intel(R) Management Engine Interface"); 508 MODULE_LICENSE("GPL v2"); 509