1 /* 2 * 3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Copyright (c) 2013-2014, Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 */ 16 17 #include <linux/module.h> 18 #include <linux/kernel.h> 19 #include <linux/device.h> 20 #include <linux/fs.h> 21 #include <linux/errno.h> 22 #include <linux/types.h> 23 #include <linux/pci.h> 24 #include <linux/init.h> 25 #include <linux/sched.h> 26 #include <linux/uuid.h> 27 #include <linux/jiffies.h> 28 #include <linux/interrupt.h> 29 #include <linux/workqueue.h> 30 #include <linux/pm_runtime.h> 31 32 #include <linux/mei.h> 33 34 35 #include "mei_dev.h" 36 #include "hw-txe.h" 37 38 static const struct pci_device_id mei_txe_pci_tbl[] = { 39 {PCI_VDEVICE(INTEL, 0x0F18)}, /* Baytrail */ 40 41 {0, } 42 }; 43 MODULE_DEVICE_TABLE(pci, mei_txe_pci_tbl); 44 45 #ifdef CONFIG_PM_RUNTIME 46 static inline void mei_txe_set_pm_domain(struct mei_device *dev); 47 static inline void mei_txe_unset_pm_domain(struct mei_device *dev); 48 #else 49 static inline void mei_txe_set_pm_domain(struct mei_device *dev) {} 50 static inline void mei_txe_unset_pm_domain(struct mei_device *dev) {} 51 #endif /* CONFIG_PM_RUNTIME */ 52 53 static void mei_txe_pci_iounmap(struct pci_dev *pdev, struct mei_txe_hw *hw) 54 { 55 int i; 56 57 for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) { 58 if (hw->mem_addr[i]) { 59 pci_iounmap(pdev, hw->mem_addr[i]); 60 hw->mem_addr[i] = NULL; 61 } 62 } 63 } 64 /** 65 * mei_probe - Device Initialization Routine 66 * 67 * @pdev: PCI device structure 68 * @ent: entry in mei_txe_pci_tbl 69 * 70 * Return: 0 on success, <0 on failure. 71 */ 72 static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 73 { 74 struct mei_device *dev; 75 struct mei_txe_hw *hw; 76 int err; 77 int i; 78 79 /* enable pci dev */ 80 err = pci_enable_device(pdev); 81 if (err) { 82 dev_err(&pdev->dev, "failed to enable pci device.\n"); 83 goto end; 84 } 85 /* set PCI host mastering */ 86 pci_set_master(pdev); 87 /* pci request regions for mei driver */ 88 err = pci_request_regions(pdev, KBUILD_MODNAME); 89 if (err) { 90 dev_err(&pdev->dev, "failed to get pci regions.\n"); 91 goto disable_device; 92 } 93 94 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); 95 if (err) { 96 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 97 if (err) { 98 dev_err(&pdev->dev, "No suitable DMA available.\n"); 99 goto release_regions; 100 } 101 } 102 103 /* allocates and initializes the mei dev structure */ 104 dev = mei_txe_dev_init(pdev); 105 if (!dev) { 106 err = -ENOMEM; 107 goto release_regions; 108 } 109 hw = to_txe_hw(dev); 110 111 /* mapping IO device memory */ 112 for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) { 113 hw->mem_addr[i] = pci_iomap(pdev, i, 0); 114 if (!hw->mem_addr[i]) { 115 dev_err(&pdev->dev, "mapping I/O device memory failure.\n"); 116 err = -ENOMEM; 117 goto free_device; 118 } 119 } 120 121 122 pci_enable_msi(pdev); 123 124 /* clear spurious interrupts */ 125 mei_clear_interrupts(dev); 126 127 /* request and enable interrupt */ 128 if (pci_dev_msi_enabled(pdev)) 129 err = request_threaded_irq(pdev->irq, 130 NULL, 131 mei_txe_irq_thread_handler, 132 IRQF_ONESHOT, KBUILD_MODNAME, dev); 133 else 134 err = request_threaded_irq(pdev->irq, 135 mei_txe_irq_quick_handler, 136 mei_txe_irq_thread_handler, 137 IRQF_SHARED, KBUILD_MODNAME, dev); 138 if (err) { 139 dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n", 140 pdev->irq); 141 goto free_device; 142 } 143 144 if (mei_start(dev)) { 145 dev_err(&pdev->dev, "init hw failure.\n"); 146 err = -ENODEV; 147 goto release_irq; 148 } 149 150 pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT); 151 pm_runtime_use_autosuspend(&pdev->dev); 152 153 err = mei_register(dev, &pdev->dev); 154 if (err) 155 goto release_irq; 156 157 pci_set_drvdata(pdev, dev); 158 159 /* 160 * For not wake-able HW runtime pm framework 161 * can't be used on pci device level. 162 * Use domain runtime pm callbacks instead. 163 */ 164 if (!pci_dev_run_wake(pdev)) 165 mei_txe_set_pm_domain(dev); 166 167 pm_runtime_put_noidle(&pdev->dev); 168 169 return 0; 170 171 release_irq: 172 173 mei_cancel_work(dev); 174 175 /* disable interrupts */ 176 mei_disable_interrupts(dev); 177 178 free_irq(pdev->irq, dev); 179 pci_disable_msi(pdev); 180 181 free_device: 182 mei_txe_pci_iounmap(pdev, hw); 183 184 kfree(dev); 185 release_regions: 186 pci_release_regions(pdev); 187 disable_device: 188 pci_disable_device(pdev); 189 end: 190 dev_err(&pdev->dev, "initialization failed.\n"); 191 return err; 192 } 193 194 /** 195 * mei_remove - Device Removal Routine 196 * 197 * @pdev: PCI device structure 198 * 199 * mei_remove is called by the PCI subsystem to alert the driver 200 * that it should release a PCI device. 201 */ 202 static void mei_txe_remove(struct pci_dev *pdev) 203 { 204 struct mei_device *dev; 205 struct mei_txe_hw *hw; 206 207 dev = pci_get_drvdata(pdev); 208 if (!dev) { 209 dev_err(&pdev->dev, "mei: dev =NULL\n"); 210 return; 211 } 212 213 pm_runtime_get_noresume(&pdev->dev); 214 215 hw = to_txe_hw(dev); 216 217 mei_stop(dev); 218 219 if (!pci_dev_run_wake(pdev)) 220 mei_txe_unset_pm_domain(dev); 221 222 /* disable interrupts */ 223 mei_disable_interrupts(dev); 224 free_irq(pdev->irq, dev); 225 pci_disable_msi(pdev); 226 227 pci_set_drvdata(pdev, NULL); 228 229 mei_txe_pci_iounmap(pdev, hw); 230 231 mei_deregister(dev); 232 233 kfree(dev); 234 235 pci_release_regions(pdev); 236 pci_disable_device(pdev); 237 } 238 239 240 #ifdef CONFIG_PM_SLEEP 241 static int mei_txe_pci_suspend(struct device *device) 242 { 243 struct pci_dev *pdev = to_pci_dev(device); 244 struct mei_device *dev = pci_get_drvdata(pdev); 245 246 if (!dev) 247 return -ENODEV; 248 249 dev_dbg(&pdev->dev, "suspend\n"); 250 251 mei_stop(dev); 252 253 mei_disable_interrupts(dev); 254 255 free_irq(pdev->irq, dev); 256 pci_disable_msi(pdev); 257 258 return 0; 259 } 260 261 static int mei_txe_pci_resume(struct device *device) 262 { 263 struct pci_dev *pdev = to_pci_dev(device); 264 struct mei_device *dev; 265 int err; 266 267 dev = pci_get_drvdata(pdev); 268 if (!dev) 269 return -ENODEV; 270 271 pci_enable_msi(pdev); 272 273 mei_clear_interrupts(dev); 274 275 /* request and enable interrupt */ 276 if (pci_dev_msi_enabled(pdev)) 277 err = request_threaded_irq(pdev->irq, 278 NULL, 279 mei_txe_irq_thread_handler, 280 IRQF_ONESHOT, KBUILD_MODNAME, dev); 281 else 282 err = request_threaded_irq(pdev->irq, 283 mei_txe_irq_quick_handler, 284 mei_txe_irq_thread_handler, 285 IRQF_SHARED, KBUILD_MODNAME, dev); 286 if (err) { 287 dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n", 288 pdev->irq); 289 return err; 290 } 291 292 err = mei_restart(dev); 293 294 return err; 295 } 296 #endif /* CONFIG_PM_SLEEP */ 297 298 #ifdef CONFIG_PM_RUNTIME 299 static int mei_txe_pm_runtime_idle(struct device *device) 300 { 301 struct pci_dev *pdev = to_pci_dev(device); 302 struct mei_device *dev; 303 304 dev_dbg(&pdev->dev, "rpm: txe: runtime_idle\n"); 305 306 dev = pci_get_drvdata(pdev); 307 if (!dev) 308 return -ENODEV; 309 if (mei_write_is_idle(dev)) 310 pm_runtime_autosuspend(device); 311 312 return -EBUSY; 313 } 314 static int mei_txe_pm_runtime_suspend(struct device *device) 315 { 316 struct pci_dev *pdev = to_pci_dev(device); 317 struct mei_device *dev; 318 int ret; 319 320 dev_dbg(&pdev->dev, "rpm: txe: runtime suspend\n"); 321 322 dev = pci_get_drvdata(pdev); 323 if (!dev) 324 return -ENODEV; 325 326 mutex_lock(&dev->device_lock); 327 328 if (mei_write_is_idle(dev)) 329 ret = mei_txe_aliveness_set_sync(dev, 0); 330 else 331 ret = -EAGAIN; 332 333 /* 334 * If everything is okay we're about to enter PCI low 335 * power state (D3) therefor we need to disable the 336 * interrupts towards host. 337 * However if device is not wakeable we do not enter 338 * D-low state and we need to keep the interrupt kicking 339 */ 340 if (!ret && pci_dev_run_wake(pdev)) 341 mei_disable_interrupts(dev); 342 343 dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret); 344 345 mutex_unlock(&dev->device_lock); 346 return ret; 347 } 348 349 static int mei_txe_pm_runtime_resume(struct device *device) 350 { 351 struct pci_dev *pdev = to_pci_dev(device); 352 struct mei_device *dev; 353 int ret; 354 355 dev_dbg(&pdev->dev, "rpm: txe: runtime resume\n"); 356 357 dev = pci_get_drvdata(pdev); 358 if (!dev) 359 return -ENODEV; 360 361 mutex_lock(&dev->device_lock); 362 363 mei_enable_interrupts(dev); 364 365 ret = mei_txe_aliveness_set_sync(dev, 1); 366 367 mutex_unlock(&dev->device_lock); 368 369 dev_dbg(&pdev->dev, "rpm: txe: runtime resume ret = %d\n", ret); 370 371 return ret; 372 } 373 374 /** 375 * mei_txe_set_pm_domain - fill and set pm domain structure for device 376 * 377 * @dev: mei_device 378 */ 379 static inline void mei_txe_set_pm_domain(struct mei_device *dev) 380 { 381 struct pci_dev *pdev = to_pci_dev(dev->dev); 382 383 if (pdev->dev.bus && pdev->dev.bus->pm) { 384 dev->pg_domain.ops = *pdev->dev.bus->pm; 385 386 dev->pg_domain.ops.runtime_suspend = mei_txe_pm_runtime_suspend; 387 dev->pg_domain.ops.runtime_resume = mei_txe_pm_runtime_resume; 388 dev->pg_domain.ops.runtime_idle = mei_txe_pm_runtime_idle; 389 390 pdev->dev.pm_domain = &dev->pg_domain; 391 } 392 } 393 394 /** 395 * mei_txe_unset_pm_domain - clean pm domain structure for device 396 * 397 * @dev: mei_device 398 */ 399 static inline void mei_txe_unset_pm_domain(struct mei_device *dev) 400 { 401 /* stop using pm callbacks if any */ 402 dev->dev->pm_domain = NULL; 403 } 404 #endif /* CONFIG_PM_RUNTIME */ 405 406 #ifdef CONFIG_PM 407 static const struct dev_pm_ops mei_txe_pm_ops = { 408 SET_SYSTEM_SLEEP_PM_OPS(mei_txe_pci_suspend, 409 mei_txe_pci_resume) 410 SET_RUNTIME_PM_OPS( 411 mei_txe_pm_runtime_suspend, 412 mei_txe_pm_runtime_resume, 413 mei_txe_pm_runtime_idle) 414 }; 415 416 #define MEI_TXE_PM_OPS (&mei_txe_pm_ops) 417 #else 418 #define MEI_TXE_PM_OPS NULL 419 #endif /* CONFIG_PM */ 420 421 /* 422 * PCI driver structure 423 */ 424 static struct pci_driver mei_txe_driver = { 425 .name = KBUILD_MODNAME, 426 .id_table = mei_txe_pci_tbl, 427 .probe = mei_txe_probe, 428 .remove = mei_txe_remove, 429 .shutdown = mei_txe_remove, 430 .driver.pm = MEI_TXE_PM_OPS, 431 }; 432 433 module_pci_driver(mei_txe_driver); 434 435 MODULE_AUTHOR("Intel Corporation"); 436 MODULE_DESCRIPTION("Intel(R) Trusted Execution Environment Interface"); 437 MODULE_LICENSE("GPL v2"); 438