1 /* 2 * 3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Copyright (c) 2013-2014, Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 */ 16 17 #include <linux/module.h> 18 #include <linux/kernel.h> 19 #include <linux/device.h> 20 #include <linux/fs.h> 21 #include <linux/errno.h> 22 #include <linux/types.h> 23 #include <linux/pci.h> 24 #include <linux/init.h> 25 #include <linux/sched.h> 26 #include <linux/uuid.h> 27 #include <linux/jiffies.h> 28 #include <linux/interrupt.h> 29 #include <linux/workqueue.h> 30 #include <linux/pm_domain.h> 31 #include <linux/pm_runtime.h> 32 33 #include <linux/mei.h> 34 35 36 #include "mei_dev.h" 37 #include "hw-txe.h" 38 39 static const struct pci_device_id mei_txe_pci_tbl[] = { 40 {PCI_VDEVICE(INTEL, 0x0F18)}, /* Baytrail */ 41 {PCI_VDEVICE(INTEL, 0x2298)}, /* Cherrytrail */ 42 43 {0, } 44 }; 45 MODULE_DEVICE_TABLE(pci, mei_txe_pci_tbl); 46 47 #ifdef CONFIG_PM 48 static inline void mei_txe_set_pm_domain(struct mei_device *dev); 49 static inline void mei_txe_unset_pm_domain(struct mei_device *dev); 50 #else 51 static inline void mei_txe_set_pm_domain(struct mei_device *dev) {} 52 static inline void mei_txe_unset_pm_domain(struct mei_device *dev) {} 53 #endif /* CONFIG_PM */ 54 55 /** 56 * mei_txe_probe - Device Initialization Routine 57 * 58 * @pdev: PCI device structure 59 * @ent: entry in mei_txe_pci_tbl 60 * 61 * Return: 0 on success, <0 on failure. 62 */ 63 static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 64 { 65 struct mei_device *dev; 66 struct mei_txe_hw *hw; 67 const int mask = BIT(SEC_BAR) | BIT(BRIDGE_BAR); 68 int err; 69 70 /* enable pci dev */ 71 err = pcim_enable_device(pdev); 72 if (err) { 73 dev_err(&pdev->dev, "failed to enable pci device.\n"); 74 goto end; 75 } 76 /* set PCI host mastering */ 77 pci_set_master(pdev); 78 /* pci request regions and mapping IO device memory for mei driver */ 79 err = pcim_iomap_regions(pdev, mask, KBUILD_MODNAME); 80 if (err) { 81 dev_err(&pdev->dev, "failed to get pci regions.\n"); 82 goto end; 83 } 84 85 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); 86 if (err) { 87 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 88 if (err) { 89 dev_err(&pdev->dev, "No suitable DMA available.\n"); 90 goto end; 91 } 92 } 93 94 /* allocates and initializes the mei dev structure */ 95 dev = mei_txe_dev_init(pdev); 96 if (!dev) { 97 err = -ENOMEM; 98 goto end; 99 } 100 hw = to_txe_hw(dev); 101 hw->mem_addr = pcim_iomap_table(pdev); 102 103 pci_enable_msi(pdev); 104 105 /* clear spurious interrupts */ 106 mei_clear_interrupts(dev); 107 108 /* request and enable interrupt */ 109 if (pci_dev_msi_enabled(pdev)) 110 err = request_threaded_irq(pdev->irq, 111 NULL, 112 mei_txe_irq_thread_handler, 113 IRQF_ONESHOT, KBUILD_MODNAME, dev); 114 else 115 err = request_threaded_irq(pdev->irq, 116 mei_txe_irq_quick_handler, 117 mei_txe_irq_thread_handler, 118 IRQF_SHARED, KBUILD_MODNAME, dev); 119 if (err) { 120 dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n", 121 pdev->irq); 122 goto end; 123 } 124 125 if (mei_start(dev)) { 126 dev_err(&pdev->dev, "init hw failure.\n"); 127 err = -ENODEV; 128 goto release_irq; 129 } 130 131 pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT); 132 pm_runtime_use_autosuspend(&pdev->dev); 133 134 err = mei_register(dev, &pdev->dev); 135 if (err) 136 goto stop; 137 138 pci_set_drvdata(pdev, dev); 139 140 /* 141 * For not wake-able HW runtime pm framework 142 * can't be used on pci device level. 143 * Use domain runtime pm callbacks instead. 144 */ 145 if (!pci_dev_run_wake(pdev)) 146 mei_txe_set_pm_domain(dev); 147 148 pm_runtime_put_noidle(&pdev->dev); 149 150 return 0; 151 152 stop: 153 mei_stop(dev); 154 release_irq: 155 mei_cancel_work(dev); 156 mei_disable_interrupts(dev); 157 free_irq(pdev->irq, dev); 158 end: 159 dev_err(&pdev->dev, "initialization failed.\n"); 160 return err; 161 } 162 163 /** 164 * mei_txe_remove - Device Removal Routine 165 * 166 * @pdev: PCI device structure 167 * 168 * mei_remove is called by the PCI subsystem to alert the driver 169 * that it should release a PCI device. 170 */ 171 static void mei_txe_remove(struct pci_dev *pdev) 172 { 173 struct mei_device *dev; 174 175 dev = pci_get_drvdata(pdev); 176 if (!dev) { 177 dev_err(&pdev->dev, "mei: dev == NULL\n"); 178 return; 179 } 180 181 pm_runtime_get_noresume(&pdev->dev); 182 183 mei_stop(dev); 184 185 if (!pci_dev_run_wake(pdev)) 186 mei_txe_unset_pm_domain(dev); 187 188 mei_disable_interrupts(dev); 189 free_irq(pdev->irq, dev); 190 191 mei_deregister(dev); 192 } 193 194 195 #ifdef CONFIG_PM_SLEEP 196 static int mei_txe_pci_suspend(struct device *device) 197 { 198 struct pci_dev *pdev = to_pci_dev(device); 199 struct mei_device *dev = pci_get_drvdata(pdev); 200 201 if (!dev) 202 return -ENODEV; 203 204 dev_dbg(&pdev->dev, "suspend\n"); 205 206 mei_stop(dev); 207 208 mei_disable_interrupts(dev); 209 210 free_irq(pdev->irq, dev); 211 pci_disable_msi(pdev); 212 213 return 0; 214 } 215 216 static int mei_txe_pci_resume(struct device *device) 217 { 218 struct pci_dev *pdev = to_pci_dev(device); 219 struct mei_device *dev; 220 int err; 221 222 dev = pci_get_drvdata(pdev); 223 if (!dev) 224 return -ENODEV; 225 226 pci_enable_msi(pdev); 227 228 mei_clear_interrupts(dev); 229 230 /* request and enable interrupt */ 231 if (pci_dev_msi_enabled(pdev)) 232 err = request_threaded_irq(pdev->irq, 233 NULL, 234 mei_txe_irq_thread_handler, 235 IRQF_ONESHOT, KBUILD_MODNAME, dev); 236 else 237 err = request_threaded_irq(pdev->irq, 238 mei_txe_irq_quick_handler, 239 mei_txe_irq_thread_handler, 240 IRQF_SHARED, KBUILD_MODNAME, dev); 241 if (err) { 242 dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n", 243 pdev->irq); 244 return err; 245 } 246 247 err = mei_restart(dev); 248 249 return err; 250 } 251 #endif /* CONFIG_PM_SLEEP */ 252 253 #ifdef CONFIG_PM 254 static int mei_txe_pm_runtime_idle(struct device *device) 255 { 256 struct pci_dev *pdev = to_pci_dev(device); 257 struct mei_device *dev; 258 259 dev_dbg(&pdev->dev, "rpm: txe: runtime_idle\n"); 260 261 dev = pci_get_drvdata(pdev); 262 if (!dev) 263 return -ENODEV; 264 if (mei_write_is_idle(dev)) 265 pm_runtime_autosuspend(device); 266 267 return -EBUSY; 268 } 269 static int mei_txe_pm_runtime_suspend(struct device *device) 270 { 271 struct pci_dev *pdev = to_pci_dev(device); 272 struct mei_device *dev; 273 int ret; 274 275 dev_dbg(&pdev->dev, "rpm: txe: runtime suspend\n"); 276 277 dev = pci_get_drvdata(pdev); 278 if (!dev) 279 return -ENODEV; 280 281 mutex_lock(&dev->device_lock); 282 283 if (mei_write_is_idle(dev)) 284 ret = mei_txe_aliveness_set_sync(dev, 0); 285 else 286 ret = -EAGAIN; 287 288 /* 289 * If everything is okay we're about to enter PCI low 290 * power state (D3) therefor we need to disable the 291 * interrupts towards host. 292 * However if device is not wakeable we do not enter 293 * D-low state and we need to keep the interrupt kicking 294 */ 295 if (!ret && pci_dev_run_wake(pdev)) 296 mei_disable_interrupts(dev); 297 298 dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret); 299 300 mutex_unlock(&dev->device_lock); 301 302 if (ret && ret != -EAGAIN) 303 schedule_work(&dev->reset_work); 304 305 return ret; 306 } 307 308 static int mei_txe_pm_runtime_resume(struct device *device) 309 { 310 struct pci_dev *pdev = to_pci_dev(device); 311 struct mei_device *dev; 312 int ret; 313 314 dev_dbg(&pdev->dev, "rpm: txe: runtime resume\n"); 315 316 dev = pci_get_drvdata(pdev); 317 if (!dev) 318 return -ENODEV; 319 320 mutex_lock(&dev->device_lock); 321 322 mei_enable_interrupts(dev); 323 324 ret = mei_txe_aliveness_set_sync(dev, 1); 325 326 mutex_unlock(&dev->device_lock); 327 328 dev_dbg(&pdev->dev, "rpm: txe: runtime resume ret = %d\n", ret); 329 330 if (ret) 331 schedule_work(&dev->reset_work); 332 333 return ret; 334 } 335 336 /** 337 * mei_txe_set_pm_domain - fill and set pm domain structure for device 338 * 339 * @dev: mei_device 340 */ 341 static inline void mei_txe_set_pm_domain(struct mei_device *dev) 342 { 343 struct pci_dev *pdev = to_pci_dev(dev->dev); 344 345 if (pdev->dev.bus && pdev->dev.bus->pm) { 346 dev->pg_domain.ops = *pdev->dev.bus->pm; 347 348 dev->pg_domain.ops.runtime_suspend = mei_txe_pm_runtime_suspend; 349 dev->pg_domain.ops.runtime_resume = mei_txe_pm_runtime_resume; 350 dev->pg_domain.ops.runtime_idle = mei_txe_pm_runtime_idle; 351 352 dev_pm_domain_set(&pdev->dev, &dev->pg_domain); 353 } 354 } 355 356 /** 357 * mei_txe_unset_pm_domain - clean pm domain structure for device 358 * 359 * @dev: mei_device 360 */ 361 static inline void mei_txe_unset_pm_domain(struct mei_device *dev) 362 { 363 /* stop using pm callbacks if any */ 364 dev_pm_domain_set(dev->dev, NULL); 365 } 366 367 static const struct dev_pm_ops mei_txe_pm_ops = { 368 SET_SYSTEM_SLEEP_PM_OPS(mei_txe_pci_suspend, 369 mei_txe_pci_resume) 370 SET_RUNTIME_PM_OPS( 371 mei_txe_pm_runtime_suspend, 372 mei_txe_pm_runtime_resume, 373 mei_txe_pm_runtime_idle) 374 }; 375 376 #define MEI_TXE_PM_OPS (&mei_txe_pm_ops) 377 #else 378 #define MEI_TXE_PM_OPS NULL 379 #endif /* CONFIG_PM */ 380 381 /* 382 * PCI driver structure 383 */ 384 static struct pci_driver mei_txe_driver = { 385 .name = KBUILD_MODNAME, 386 .id_table = mei_txe_pci_tbl, 387 .probe = mei_txe_probe, 388 .remove = mei_txe_remove, 389 .shutdown = mei_txe_remove, 390 .driver.pm = MEI_TXE_PM_OPS, 391 }; 392 393 module_pci_driver(mei_txe_driver); 394 395 MODULE_AUTHOR("Intel Corporation"); 396 MODULE_DESCRIPTION("Intel(R) Trusted Execution Environment Interface"); 397 MODULE_LICENSE("GPL v2"); 398