xref: /openbmc/linux/drivers/misc/mei/pci-me.c (revision 6189f1b0)
1 /*
2  *
3  * Intel Management Engine Interface (Intel MEI) Linux driver
4  * Copyright (c) 2003-2012, Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  */
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/kernel.h>
19 #include <linux/device.h>
20 #include <linux/fs.h>
21 #include <linux/errno.h>
22 #include <linux/types.h>
23 #include <linux/fcntl.h>
24 #include <linux/pci.h>
25 #include <linux/poll.h>
26 #include <linux/ioctl.h>
27 #include <linux/cdev.h>
28 #include <linux/sched.h>
29 #include <linux/uuid.h>
30 #include <linux/compat.h>
31 #include <linux/jiffies.h>
32 #include <linux/interrupt.h>
33 
34 #include <linux/pm_runtime.h>
35 
36 #include <linux/mei.h>
37 
38 #include "mei_dev.h"
39 #include "client.h"
40 #include "hw-me-regs.h"
41 #include "hw-me.h"
42 
43 /* mei_pci_tbl - PCI Device ID Table */
44 static const struct pci_device_id mei_me_pci_tbl[] = {
45 	{MEI_PCI_DEVICE(MEI_DEV_ID_82946GZ, mei_me_legacy_cfg)},
46 	{MEI_PCI_DEVICE(MEI_DEV_ID_82G35, mei_me_legacy_cfg)},
47 	{MEI_PCI_DEVICE(MEI_DEV_ID_82Q965, mei_me_legacy_cfg)},
48 	{MEI_PCI_DEVICE(MEI_DEV_ID_82G965, mei_me_legacy_cfg)},
49 	{MEI_PCI_DEVICE(MEI_DEV_ID_82GM965, mei_me_legacy_cfg)},
50 	{MEI_PCI_DEVICE(MEI_DEV_ID_82GME965, mei_me_legacy_cfg)},
51 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q35, mei_me_legacy_cfg)},
52 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82G33, mei_me_legacy_cfg)},
53 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q33, mei_me_legacy_cfg)},
54 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82X38, mei_me_legacy_cfg)},
55 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_3200, mei_me_legacy_cfg)},
56 
57 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_6, mei_me_legacy_cfg)},
58 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_7, mei_me_legacy_cfg)},
59 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_8, mei_me_legacy_cfg)},
60 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_9, mei_me_legacy_cfg)},
61 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_10, mei_me_legacy_cfg)},
62 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_1, mei_me_legacy_cfg)},
63 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_2, mei_me_legacy_cfg)},
64 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_3, mei_me_legacy_cfg)},
65 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_4, mei_me_legacy_cfg)},
66 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_1, mei_me_ich_cfg)},
67 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_2, mei_me_ich_cfg)},
68 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_3, mei_me_ich_cfg)},
69 	{MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_4, mei_me_ich_cfg)},
70 
71 	{MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, mei_me_pch_cfg)},
72 	{MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, mei_me_pch_cfg)},
73 	{MEI_PCI_DEVICE(MEI_DEV_ID_CPT_1, mei_me_pch_cpt_pbg_cfg)},
74 	{MEI_PCI_DEVICE(MEI_DEV_ID_PBG_1, mei_me_pch_cpt_pbg_cfg)},
75 	{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, mei_me_pch_cfg)},
76 	{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, mei_me_pch_cfg)},
77 	{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, mei_me_pch_cfg)},
78 	{MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, mei_me_pch8_sps_cfg)},
79 	{MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, mei_me_pch8_sps_cfg)},
80 	{MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, mei_me_pch8_cfg)},
81 	{MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, mei_me_pch8_sps_cfg)},
82 	{MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, mei_me_pch8_cfg)},
83 	{MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, mei_me_pch8_cfg)},
84 
85 	/* required last entry */
86 	{0, }
87 };
88 
89 MODULE_DEVICE_TABLE(pci, mei_me_pci_tbl);
90 
91 #ifdef CONFIG_PM
92 static inline void mei_me_set_pm_domain(struct mei_device *dev);
93 static inline void mei_me_unset_pm_domain(struct mei_device *dev);
94 #else
95 static inline void mei_me_set_pm_domain(struct mei_device *dev) {}
96 static inline void mei_me_unset_pm_domain(struct mei_device *dev) {}
97 #endif /* CONFIG_PM */
98 
99 /**
100  * mei_me_quirk_probe - probe for devices that doesn't valid ME interface
101  *
102  * @pdev: PCI device structure
103  * @cfg: per generation config
104  *
105  * Return: true if ME Interface is valid, false otherwise
106  */
107 static bool mei_me_quirk_probe(struct pci_dev *pdev,
108 				const struct mei_cfg *cfg)
109 {
110 	if (cfg->quirk_probe && cfg->quirk_probe(pdev)) {
111 		dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
112 		return false;
113 	}
114 
115 	return true;
116 }
117 
118 /**
119  * mei_me_probe - Device Initialization Routine
120  *
121  * @pdev: PCI device structure
122  * @ent: entry in kcs_pci_tbl
123  *
124  * Return: 0 on success, <0 on failure.
125  */
126 static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
127 {
128 	const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data);
129 	struct mei_device *dev;
130 	struct mei_me_hw *hw;
131 	int err;
132 
133 
134 	if (!mei_me_quirk_probe(pdev, cfg))
135 		return -ENODEV;
136 
137 	/* enable pci dev */
138 	err = pci_enable_device(pdev);
139 	if (err) {
140 		dev_err(&pdev->dev, "failed to enable pci device.\n");
141 		goto end;
142 	}
143 	/* set PCI host mastering  */
144 	pci_set_master(pdev);
145 	/* pci request regions for mei driver */
146 	err = pci_request_regions(pdev, KBUILD_MODNAME);
147 	if (err) {
148 		dev_err(&pdev->dev, "failed to get pci regions.\n");
149 		goto disable_device;
150 	}
151 
152 	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
153 	    dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
154 
155 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
156 		if (err)
157 			err = dma_set_coherent_mask(&pdev->dev,
158 						    DMA_BIT_MASK(32));
159 	}
160 	if (err) {
161 		dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
162 		goto release_regions;
163 	}
164 
165 
166 	/* allocates and initializes the mei dev structure */
167 	dev = mei_me_dev_init(pdev, cfg);
168 	if (!dev) {
169 		err = -ENOMEM;
170 		goto release_regions;
171 	}
172 	hw = to_me_hw(dev);
173 	/* mapping  IO device memory */
174 	hw->mem_addr = pci_iomap(pdev, 0, 0);
175 	if (!hw->mem_addr) {
176 		dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
177 		err = -ENOMEM;
178 		goto free_device;
179 	}
180 	pci_enable_msi(pdev);
181 
182 	 /* request and enable interrupt */
183 	if (pci_dev_msi_enabled(pdev))
184 		err = request_threaded_irq(pdev->irq,
185 			NULL,
186 			mei_me_irq_thread_handler,
187 			IRQF_ONESHOT, KBUILD_MODNAME, dev);
188 	else
189 		err = request_threaded_irq(pdev->irq,
190 			mei_me_irq_quick_handler,
191 			mei_me_irq_thread_handler,
192 			IRQF_SHARED, KBUILD_MODNAME, dev);
193 
194 	if (err) {
195 		dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
196 		       pdev->irq);
197 		goto disable_msi;
198 	}
199 
200 	if (mei_start(dev)) {
201 		dev_err(&pdev->dev, "init hw failure.\n");
202 		err = -ENODEV;
203 		goto release_irq;
204 	}
205 
206 	pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT);
207 	pm_runtime_use_autosuspend(&pdev->dev);
208 
209 	err = mei_register(dev, &pdev->dev);
210 	if (err)
211 		goto release_irq;
212 
213 	pci_set_drvdata(pdev, dev);
214 
215 	schedule_delayed_work(&dev->timer_work, HZ);
216 
217 	/*
218 	* For not wake-able HW runtime pm framework
219 	* can't be used on pci device level.
220 	* Use domain runtime pm callbacks instead.
221 	*/
222 	if (!pci_dev_run_wake(pdev))
223 		mei_me_set_pm_domain(dev);
224 
225 	if (mei_pg_is_enabled(dev))
226 		pm_runtime_put_noidle(&pdev->dev);
227 
228 	dev_dbg(&pdev->dev, "initialization successful.\n");
229 
230 	return 0;
231 
232 release_irq:
233 	mei_cancel_work(dev);
234 	mei_disable_interrupts(dev);
235 	free_irq(pdev->irq, dev);
236 disable_msi:
237 	pci_disable_msi(pdev);
238 	pci_iounmap(pdev, hw->mem_addr);
239 free_device:
240 	kfree(dev);
241 release_regions:
242 	pci_release_regions(pdev);
243 disable_device:
244 	pci_disable_device(pdev);
245 end:
246 	dev_err(&pdev->dev, "initialization failed.\n");
247 	return err;
248 }
249 
250 /**
251  * mei_me_remove - Device Removal Routine
252  *
253  * @pdev: PCI device structure
254  *
255  * mei_remove is called by the PCI subsystem to alert the driver
256  * that it should release a PCI device.
257  */
258 static void mei_me_remove(struct pci_dev *pdev)
259 {
260 	struct mei_device *dev;
261 	struct mei_me_hw *hw;
262 
263 	dev = pci_get_drvdata(pdev);
264 	if (!dev)
265 		return;
266 
267 	if (mei_pg_is_enabled(dev))
268 		pm_runtime_get_noresume(&pdev->dev);
269 
270 	hw = to_me_hw(dev);
271 
272 
273 	dev_dbg(&pdev->dev, "stop\n");
274 	mei_stop(dev);
275 
276 	if (!pci_dev_run_wake(pdev))
277 		mei_me_unset_pm_domain(dev);
278 
279 	/* disable interrupts */
280 	mei_disable_interrupts(dev);
281 
282 	free_irq(pdev->irq, dev);
283 	pci_disable_msi(pdev);
284 
285 	if (hw->mem_addr)
286 		pci_iounmap(pdev, hw->mem_addr);
287 
288 	mei_deregister(dev);
289 
290 	kfree(dev);
291 
292 	pci_release_regions(pdev);
293 	pci_disable_device(pdev);
294 
295 
296 }
297 #ifdef CONFIG_PM_SLEEP
298 static int mei_me_pci_suspend(struct device *device)
299 {
300 	struct pci_dev *pdev = to_pci_dev(device);
301 	struct mei_device *dev = pci_get_drvdata(pdev);
302 
303 	if (!dev)
304 		return -ENODEV;
305 
306 	dev_dbg(&pdev->dev, "suspend\n");
307 
308 	mei_stop(dev);
309 
310 	mei_disable_interrupts(dev);
311 
312 	free_irq(pdev->irq, dev);
313 	pci_disable_msi(pdev);
314 
315 	return 0;
316 }
317 
318 static int mei_me_pci_resume(struct device *device)
319 {
320 	struct pci_dev *pdev = to_pci_dev(device);
321 	struct mei_device *dev;
322 	int err;
323 
324 	dev = pci_get_drvdata(pdev);
325 	if (!dev)
326 		return -ENODEV;
327 
328 	pci_enable_msi(pdev);
329 
330 	/* request and enable interrupt */
331 	if (pci_dev_msi_enabled(pdev))
332 		err = request_threaded_irq(pdev->irq,
333 			NULL,
334 			mei_me_irq_thread_handler,
335 			IRQF_ONESHOT, KBUILD_MODNAME, dev);
336 	else
337 		err = request_threaded_irq(pdev->irq,
338 			mei_me_irq_quick_handler,
339 			mei_me_irq_thread_handler,
340 			IRQF_SHARED, KBUILD_MODNAME, dev);
341 
342 	if (err) {
343 		dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
344 				pdev->irq);
345 		return err;
346 	}
347 
348 	err = mei_restart(dev);
349 	if (err)
350 		return err;
351 
352 	/* Start timer if stopped in suspend */
353 	schedule_delayed_work(&dev->timer_work, HZ);
354 
355 	return 0;
356 }
357 #endif /* CONFIG_PM_SLEEP */
358 
359 #ifdef CONFIG_PM
360 static int mei_me_pm_runtime_idle(struct device *device)
361 {
362 	struct pci_dev *pdev = to_pci_dev(device);
363 	struct mei_device *dev;
364 
365 	dev_dbg(&pdev->dev, "rpm: me: runtime_idle\n");
366 
367 	dev = pci_get_drvdata(pdev);
368 	if (!dev)
369 		return -ENODEV;
370 	if (mei_write_is_idle(dev))
371 		pm_runtime_autosuspend(device);
372 
373 	return -EBUSY;
374 }
375 
376 static int mei_me_pm_runtime_suspend(struct device *device)
377 {
378 	struct pci_dev *pdev = to_pci_dev(device);
379 	struct mei_device *dev;
380 	int ret;
381 
382 	dev_dbg(&pdev->dev, "rpm: me: runtime suspend\n");
383 
384 	dev = pci_get_drvdata(pdev);
385 	if (!dev)
386 		return -ENODEV;
387 
388 	mutex_lock(&dev->device_lock);
389 
390 	if (mei_write_is_idle(dev))
391 		ret = mei_me_pg_enter_sync(dev);
392 	else
393 		ret = -EAGAIN;
394 
395 	mutex_unlock(&dev->device_lock);
396 
397 	dev_dbg(&pdev->dev, "rpm: me: runtime suspend ret=%d\n", ret);
398 
399 	return ret;
400 }
401 
402 static int mei_me_pm_runtime_resume(struct device *device)
403 {
404 	struct pci_dev *pdev = to_pci_dev(device);
405 	struct mei_device *dev;
406 	int ret;
407 
408 	dev_dbg(&pdev->dev, "rpm: me: runtime resume\n");
409 
410 	dev = pci_get_drvdata(pdev);
411 	if (!dev)
412 		return -ENODEV;
413 
414 	mutex_lock(&dev->device_lock);
415 
416 	ret = mei_me_pg_exit_sync(dev);
417 
418 	mutex_unlock(&dev->device_lock);
419 
420 	dev_dbg(&pdev->dev, "rpm: me: runtime resume ret = %d\n", ret);
421 
422 	return ret;
423 }
424 
425 /**
426  * mei_me_set_pm_domain - fill and set pm domain structure for device
427  *
428  * @dev: mei_device
429  */
430 static inline void mei_me_set_pm_domain(struct mei_device *dev)
431 {
432 	struct pci_dev *pdev  = to_pci_dev(dev->dev);
433 
434 	if (pdev->dev.bus && pdev->dev.bus->pm) {
435 		dev->pg_domain.ops = *pdev->dev.bus->pm;
436 
437 		dev->pg_domain.ops.runtime_suspend = mei_me_pm_runtime_suspend;
438 		dev->pg_domain.ops.runtime_resume = mei_me_pm_runtime_resume;
439 		dev->pg_domain.ops.runtime_idle = mei_me_pm_runtime_idle;
440 
441 		pdev->dev.pm_domain = &dev->pg_domain;
442 	}
443 }
444 
445 /**
446  * mei_me_unset_pm_domain - clean pm domain structure for device
447  *
448  * @dev: mei_device
449  */
450 static inline void mei_me_unset_pm_domain(struct mei_device *dev)
451 {
452 	/* stop using pm callbacks if any */
453 	dev->dev->pm_domain = NULL;
454 }
455 
456 static const struct dev_pm_ops mei_me_pm_ops = {
457 	SET_SYSTEM_SLEEP_PM_OPS(mei_me_pci_suspend,
458 				mei_me_pci_resume)
459 	SET_RUNTIME_PM_OPS(
460 		mei_me_pm_runtime_suspend,
461 		mei_me_pm_runtime_resume,
462 		mei_me_pm_runtime_idle)
463 };
464 
465 #define MEI_ME_PM_OPS	(&mei_me_pm_ops)
466 #else
467 #define MEI_ME_PM_OPS	NULL
468 #endif /* CONFIG_PM */
469 /*
470  *  PCI driver structure
471  */
472 static struct pci_driver mei_me_driver = {
473 	.name = KBUILD_MODNAME,
474 	.id_table = mei_me_pci_tbl,
475 	.probe = mei_me_probe,
476 	.remove = mei_me_remove,
477 	.shutdown = mei_me_remove,
478 	.driver.pm = MEI_ME_PM_OPS,
479 };
480 
481 module_pci_driver(mei_me_driver);
482 
483 MODULE_AUTHOR("Intel Corporation");
484 MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
485 MODULE_LICENSE("GPL v2");
486