xref: /openbmc/linux/drivers/misc/mei/pci-me.c (revision 92a76f6d)
1 /*
2  *
3  * Intel Management Engine Interface (Intel MEI) Linux driver
4  * Copyright (c) 2003-2012, Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  */
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/kernel.h>
19 #include <linux/device.h>
20 #include <linux/fs.h>
21 #include <linux/errno.h>
22 #include <linux/types.h>
23 #include <linux/fcntl.h>
24 #include <linux/pci.h>
25 #include <linux/poll.h>
26 #include <linux/ioctl.h>
27 #include <linux/cdev.h>
28 #include <linux/sched.h>
29 #include <linux/uuid.h>
30 #include <linux/compat.h>
31 #include <linux/jiffies.h>
32 #include <linux/interrupt.h>
33 
34 #include <linux/pm_domain.h>
35 #include <linux/pm_runtime.h>
36 
37 #include <linux/mei.h>
38 
39 #include "mei_dev.h"
40 #include "client.h"
41 #include "hw-me-regs.h"
42 #include "hw-me.h"
43 
44 /* mei_pci_tbl - PCI Device ID Table */
45 static const struct pci_device_id mei_me_pci_tbl[] = {
46 	{MEI_PCI_DEVICE(MEI_DEV_ID_82946GZ, mei_me_legacy_cfg)},
47 	{MEI_PCI_DEVICE(MEI_DEV_ID_82G35, mei_me_legacy_cfg)},
48 	{MEI_PCI_DEVICE(MEI_DEV_ID_82Q965, mei_me_legacy_cfg)},
49 	{MEI_PCI_DEVICE(MEI_DEV_ID_82G965, mei_me_legacy_cfg)},
50 	{MEI_PCI_DEVICE(MEI_DEV_ID_82GM965, mei_me_legacy_cfg)},
51 	{MEI_PCI_DEVICE(MEI_DEV_ID_82GME965, mei_me_legacy_cfg)},
52 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q35, mei_me_legacy_cfg)},
53 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82G33, mei_me_legacy_cfg)},
54 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q33, mei_me_legacy_cfg)},
55 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82X38, mei_me_legacy_cfg)},
56 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_3200, mei_me_legacy_cfg)},
57 
58 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_6, mei_me_legacy_cfg)},
59 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_7, mei_me_legacy_cfg)},
60 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_8, mei_me_legacy_cfg)},
61 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_9, mei_me_legacy_cfg)},
62 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_10, mei_me_legacy_cfg)},
63 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_1, mei_me_legacy_cfg)},
64 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_2, mei_me_legacy_cfg)},
65 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_3, mei_me_legacy_cfg)},
66 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_4, mei_me_legacy_cfg)},
67 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_1, mei_me_ich_cfg)},
68 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_2, mei_me_ich_cfg)},
69 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_3, mei_me_ich_cfg)},
70 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_4, mei_me_ich_cfg)},
71 
72 	{MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, mei_me_pch_cfg)},
73 	{MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, mei_me_pch_cfg)},
74 	{MEI_PCI_DEVICE(MEI_DEV_ID_CPT_1, mei_me_pch_cpt_pbg_cfg)},
75 	{MEI_PCI_DEVICE(MEI_DEV_ID_PBG_1, mei_me_pch_cpt_pbg_cfg)},
76 	{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, mei_me_pch_cfg)},
77 	{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, mei_me_pch_cfg)},
78 	{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, mei_me_pch_cfg)},
79 	{MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, mei_me_pch8_sps_cfg)},
80 	{MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, mei_me_pch8_sps_cfg)},
81 	{MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, mei_me_pch8_cfg)},
82 	{MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, mei_me_pch8_sps_cfg)},
83 	{MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, mei_me_pch8_cfg)},
84 	{MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, mei_me_pch8_cfg)},
85 
86 	{MEI_PCI_DEVICE(MEI_DEV_ID_SPT, mei_me_pch8_cfg)},
87 	{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, mei_me_pch8_cfg)},
88 	{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_cfg)},
89 	{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_cfg)},
90 
91 	{MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, mei_me_pch8_cfg)},
92 	{MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, mei_me_pch8_cfg)},
93 
94 	/* required last entry */
95 	{0, }
96 };
97 
98 MODULE_DEVICE_TABLE(pci, mei_me_pci_tbl);
99 
100 #ifdef CONFIG_PM
101 static inline void mei_me_set_pm_domain(struct mei_device *dev);
102 static inline void mei_me_unset_pm_domain(struct mei_device *dev);
103 #else
104 static inline void mei_me_set_pm_domain(struct mei_device *dev) {}
105 static inline void mei_me_unset_pm_domain(struct mei_device *dev) {}
106 #endif /* CONFIG_PM */
107 
108 /**
109  * mei_me_quirk_probe - probe for devices that doesn't valid ME interface
110  *
111  * @pdev: PCI device structure
112  * @cfg: per generation config
113  *
114  * Return: true if ME Interface is valid, false otherwise
115  */
116 static bool mei_me_quirk_probe(struct pci_dev *pdev,
117 				const struct mei_cfg *cfg)
118 {
119 	if (cfg->quirk_probe && cfg->quirk_probe(pdev)) {
120 		dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
121 		return false;
122 	}
123 
124 	return true;
125 }
126 
127 /**
128  * mei_me_probe - Device Initialization Routine
129  *
130  * @pdev: PCI device structure
131  * @ent: entry in kcs_pci_tbl
132  *
133  * Return: 0 on success, <0 on failure.
134  */
135 static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
136 {
137 	const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data);
138 	struct mei_device *dev;
139 	struct mei_me_hw *hw;
140 	unsigned int irqflags;
141 	int err;
142 
143 
144 	if (!mei_me_quirk_probe(pdev, cfg))
145 		return -ENODEV;
146 
147 	/* enable pci dev */
148 	err = pci_enable_device(pdev);
149 	if (err) {
150 		dev_err(&pdev->dev, "failed to enable pci device.\n");
151 		goto end;
152 	}
153 	/* set PCI host mastering  */
154 	pci_set_master(pdev);
155 	/* pci request regions for mei driver */
156 	err = pci_request_regions(pdev, KBUILD_MODNAME);
157 	if (err) {
158 		dev_err(&pdev->dev, "failed to get pci regions.\n");
159 		goto disable_device;
160 	}
161 
162 	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
163 	    dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
164 
165 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
166 		if (err)
167 			err = dma_set_coherent_mask(&pdev->dev,
168 						    DMA_BIT_MASK(32));
169 	}
170 	if (err) {
171 		dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
172 		goto release_regions;
173 	}
174 
175 
176 	/* allocates and initializes the mei dev structure */
177 	dev = mei_me_dev_init(pdev, cfg);
178 	if (!dev) {
179 		err = -ENOMEM;
180 		goto release_regions;
181 	}
182 	hw = to_me_hw(dev);
183 	/* mapping  IO device memory */
184 	hw->mem_addr = pci_iomap(pdev, 0, 0);
185 	if (!hw->mem_addr) {
186 		dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
187 		err = -ENOMEM;
188 		goto free_device;
189 	}
190 	pci_enable_msi(pdev);
191 
192 	 /* request and enable interrupt */
193 	irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED;
194 
195 	err = request_threaded_irq(pdev->irq,
196 			mei_me_irq_quick_handler,
197 			mei_me_irq_thread_handler,
198 			irqflags, KBUILD_MODNAME, dev);
199 	if (err) {
200 		dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
201 		       pdev->irq);
202 		goto disable_msi;
203 	}
204 
205 	if (mei_start(dev)) {
206 		dev_err(&pdev->dev, "init hw failure.\n");
207 		err = -ENODEV;
208 		goto release_irq;
209 	}
210 
211 	pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT);
212 	pm_runtime_use_autosuspend(&pdev->dev);
213 
214 	err = mei_register(dev, &pdev->dev);
215 	if (err)
216 		goto stop;
217 
218 	pci_set_drvdata(pdev, dev);
219 
220 	schedule_delayed_work(&dev->timer_work, HZ);
221 
222 	/*
223 	* For not wake-able HW runtime pm framework
224 	* can't be used on pci device level.
225 	* Use domain runtime pm callbacks instead.
226 	*/
227 	if (!pci_dev_run_wake(pdev))
228 		mei_me_set_pm_domain(dev);
229 
230 	if (mei_pg_is_enabled(dev))
231 		pm_runtime_put_noidle(&pdev->dev);
232 
233 	dev_dbg(&pdev->dev, "initialization successful.\n");
234 
235 	return 0;
236 
237 stop:
238 	mei_stop(dev);
239 release_irq:
240 	mei_cancel_work(dev);
241 	mei_disable_interrupts(dev);
242 	free_irq(pdev->irq, dev);
243 disable_msi:
244 	pci_disable_msi(pdev);
245 	pci_iounmap(pdev, hw->mem_addr);
246 free_device:
247 	kfree(dev);
248 release_regions:
249 	pci_release_regions(pdev);
250 disable_device:
251 	pci_disable_device(pdev);
252 end:
253 	dev_err(&pdev->dev, "initialization failed.\n");
254 	return err;
255 }
256 
257 /**
258  * mei_me_remove - Device Removal Routine
259  *
260  * @pdev: PCI device structure
261  *
262  * mei_remove is called by the PCI subsystem to alert the driver
263  * that it should release a PCI device.
264  */
265 static void mei_me_remove(struct pci_dev *pdev)
266 {
267 	struct mei_device *dev;
268 	struct mei_me_hw *hw;
269 
270 	dev = pci_get_drvdata(pdev);
271 	if (!dev)
272 		return;
273 
274 	if (mei_pg_is_enabled(dev))
275 		pm_runtime_get_noresume(&pdev->dev);
276 
277 	hw = to_me_hw(dev);
278 
279 
280 	dev_dbg(&pdev->dev, "stop\n");
281 	mei_stop(dev);
282 
283 	if (!pci_dev_run_wake(pdev))
284 		mei_me_unset_pm_domain(dev);
285 
286 	/* disable interrupts */
287 	mei_disable_interrupts(dev);
288 
289 	free_irq(pdev->irq, dev);
290 	pci_disable_msi(pdev);
291 
292 	if (hw->mem_addr)
293 		pci_iounmap(pdev, hw->mem_addr);
294 
295 	mei_deregister(dev);
296 
297 	kfree(dev);
298 
299 	pci_release_regions(pdev);
300 	pci_disable_device(pdev);
301 
302 
303 }
304 #ifdef CONFIG_PM_SLEEP
305 static int mei_me_pci_suspend(struct device *device)
306 {
307 	struct pci_dev *pdev = to_pci_dev(device);
308 	struct mei_device *dev = pci_get_drvdata(pdev);
309 
310 	if (!dev)
311 		return -ENODEV;
312 
313 	dev_dbg(&pdev->dev, "suspend\n");
314 
315 	mei_stop(dev);
316 
317 	mei_disable_interrupts(dev);
318 
319 	free_irq(pdev->irq, dev);
320 	pci_disable_msi(pdev);
321 
322 	return 0;
323 }
324 
325 static int mei_me_pci_resume(struct device *device)
326 {
327 	struct pci_dev *pdev = to_pci_dev(device);
328 	struct mei_device *dev;
329 	unsigned int irqflags;
330 	int err;
331 
332 	dev = pci_get_drvdata(pdev);
333 	if (!dev)
334 		return -ENODEV;
335 
336 	pci_enable_msi(pdev);
337 
338 	irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED;
339 
340 	/* request and enable interrupt */
341 	err = request_threaded_irq(pdev->irq,
342 			mei_me_irq_quick_handler,
343 			mei_me_irq_thread_handler,
344 			irqflags, KBUILD_MODNAME, dev);
345 
346 	if (err) {
347 		dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
348 				pdev->irq);
349 		return err;
350 	}
351 
352 	err = mei_restart(dev);
353 	if (err)
354 		return err;
355 
356 	/* Start timer if stopped in suspend */
357 	schedule_delayed_work(&dev->timer_work, HZ);
358 
359 	return 0;
360 }
361 #endif /* CONFIG_PM_SLEEP */
362 
363 #ifdef CONFIG_PM
364 static int mei_me_pm_runtime_idle(struct device *device)
365 {
366 	struct pci_dev *pdev = to_pci_dev(device);
367 	struct mei_device *dev;
368 
369 	dev_dbg(&pdev->dev, "rpm: me: runtime_idle\n");
370 
371 	dev = pci_get_drvdata(pdev);
372 	if (!dev)
373 		return -ENODEV;
374 	if (mei_write_is_idle(dev))
375 		pm_runtime_autosuspend(device);
376 
377 	return -EBUSY;
378 }
379 
380 static int mei_me_pm_runtime_suspend(struct device *device)
381 {
382 	struct pci_dev *pdev = to_pci_dev(device);
383 	struct mei_device *dev;
384 	int ret;
385 
386 	dev_dbg(&pdev->dev, "rpm: me: runtime suspend\n");
387 
388 	dev = pci_get_drvdata(pdev);
389 	if (!dev)
390 		return -ENODEV;
391 
392 	mutex_lock(&dev->device_lock);
393 
394 	if (mei_write_is_idle(dev))
395 		ret = mei_me_pg_enter_sync(dev);
396 	else
397 		ret = -EAGAIN;
398 
399 	mutex_unlock(&dev->device_lock);
400 
401 	dev_dbg(&pdev->dev, "rpm: me: runtime suspend ret=%d\n", ret);
402 
403 	return ret;
404 }
405 
406 static int mei_me_pm_runtime_resume(struct device *device)
407 {
408 	struct pci_dev *pdev = to_pci_dev(device);
409 	struct mei_device *dev;
410 	int ret;
411 
412 	dev_dbg(&pdev->dev, "rpm: me: runtime resume\n");
413 
414 	dev = pci_get_drvdata(pdev);
415 	if (!dev)
416 		return -ENODEV;
417 
418 	mutex_lock(&dev->device_lock);
419 
420 	ret = mei_me_pg_exit_sync(dev);
421 
422 	mutex_unlock(&dev->device_lock);
423 
424 	dev_dbg(&pdev->dev, "rpm: me: runtime resume ret = %d\n", ret);
425 
426 	return ret;
427 }
428 
429 /**
430  * mei_me_set_pm_domain - fill and set pm domain structure for device
431  *
432  * @dev: mei_device
433  */
434 static inline void mei_me_set_pm_domain(struct mei_device *dev)
435 {
436 	struct pci_dev *pdev  = to_pci_dev(dev->dev);
437 
438 	if (pdev->dev.bus && pdev->dev.bus->pm) {
439 		dev->pg_domain.ops = *pdev->dev.bus->pm;
440 
441 		dev->pg_domain.ops.runtime_suspend = mei_me_pm_runtime_suspend;
442 		dev->pg_domain.ops.runtime_resume = mei_me_pm_runtime_resume;
443 		dev->pg_domain.ops.runtime_idle = mei_me_pm_runtime_idle;
444 
445 		dev_pm_domain_set(&pdev->dev, &dev->pg_domain);
446 	}
447 }
448 
449 /**
450  * mei_me_unset_pm_domain - clean pm domain structure for device
451  *
452  * @dev: mei_device
453  */
454 static inline void mei_me_unset_pm_domain(struct mei_device *dev)
455 {
456 	/* stop using pm callbacks if any */
457 	dev_pm_domain_set(dev->dev, NULL);
458 }
459 
460 static const struct dev_pm_ops mei_me_pm_ops = {
461 	SET_SYSTEM_SLEEP_PM_OPS(mei_me_pci_suspend,
462 				mei_me_pci_resume)
463 	SET_RUNTIME_PM_OPS(
464 		mei_me_pm_runtime_suspend,
465 		mei_me_pm_runtime_resume,
466 		mei_me_pm_runtime_idle)
467 };
468 
469 #define MEI_ME_PM_OPS	(&mei_me_pm_ops)
470 #else
471 #define MEI_ME_PM_OPS	NULL
472 #endif /* CONFIG_PM */
473 /*
474  *  PCI driver structure
475  */
476 static struct pci_driver mei_me_driver = {
477 	.name = KBUILD_MODNAME,
478 	.id_table = mei_me_pci_tbl,
479 	.probe = mei_me_probe,
480 	.remove = mei_me_remove,
481 	.shutdown = mei_me_remove,
482 	.driver.pm = MEI_ME_PM_OPS,
483 };
484 
485 module_pci_driver(mei_me_driver);
486 
487 MODULE_AUTHOR("Intel Corporation");
488 MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
489 MODULE_LICENSE("GPL v2");
490