xref: /openbmc/linux/drivers/pci/pcie/pme.c (revision aa66ea10)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PCIe Native PME support
4  *
5  * Copyright (C) 2007 - 2009 Intel Corp
6  * Copyright (C) 2007 - 2009 Shaohua Li <shaohua.li@intel.com>
7  * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
8  */
9 
10 #define dev_fmt(fmt) "PME: " fmt
11 
12 #include <linux/pci.h>
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/device.h>
19 #include <linux/pm_runtime.h>
20 
21 #include "../pci.h"
22 #include "portdrv.h"
23 
24 /*
25  * If this switch is set, MSI will not be used for PCIe PME signaling.  This
26  * causes the PCIe port driver to use INTx interrupts only, but it turns out
27  * that using MSI for PCIe PME signaling doesn't play well with PCIe PME-based
28  * wake-up from system sleep states.
29  */
30 bool pcie_pme_msi_disabled;
31 
pcie_pme_setup(char * str)32 static int __init pcie_pme_setup(char *str)
33 {
34 	if (!strncmp(str, "nomsi", 5))
35 		pcie_pme_msi_disabled = true;
36 
37 	return 1;
38 }
39 __setup("pcie_pme=", pcie_pme_setup);
40 
41 struct pcie_pme_service_data {
42 	spinlock_t lock;
43 	struct pcie_device *srv;
44 	struct work_struct work;
45 	bool noirq; /* If set, keep the PME interrupt disabled. */
46 };
47 
48 /**
49  * pcie_pme_interrupt_enable - Enable/disable PCIe PME interrupt generation.
50  * @dev: PCIe root port or event collector.
51  * @enable: Enable or disable the interrupt.
52  */
pcie_pme_interrupt_enable(struct pci_dev * dev,bool enable)53 void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable)
54 {
55 	if (enable)
56 		pcie_capability_set_word(dev, PCI_EXP_RTCTL,
57 					 PCI_EXP_RTCTL_PMEIE);
58 	else
59 		pcie_capability_clear_word(dev, PCI_EXP_RTCTL,
60 					   PCI_EXP_RTCTL_PMEIE);
61 }
62 
63 /**
64  * pcie_pme_walk_bus - Scan a PCI bus for devices asserting PME#.
65  * @bus: PCI bus to scan.
66  *
67  * Scan given PCI bus and all buses under it for devices asserting PME#.
68  */
pcie_pme_walk_bus(struct pci_bus * bus)69 static bool pcie_pme_walk_bus(struct pci_bus *bus)
70 {
71 	struct pci_dev *dev;
72 	bool ret = false;
73 
74 	list_for_each_entry(dev, &bus->devices, bus_list) {
75 		/* Skip PCIe devices in case we started from a root port. */
76 		if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) {
77 			if (dev->pme_poll)
78 				dev->pme_poll = false;
79 
80 			pci_wakeup_event(dev);
81 			pm_request_resume(&dev->dev);
82 			ret = true;
83 		}
84 
85 		if (dev->subordinate && pcie_pme_walk_bus(dev->subordinate))
86 			ret = true;
87 	}
88 
89 	return ret;
90 }
91 
92 /**
93  * pcie_pme_from_pci_bridge - Check if PCIe-PCI bridge generated a PME.
94  * @bus: Secondary bus of the bridge.
95  * @devfn: Device/function number to check.
96  *
97  * PME from PCI devices under a PCIe-PCI bridge may be converted to an in-band
98  * PCIe PME message.  In such that case the bridge should use the Requester ID
99  * of device/function number 0 on its secondary bus.
100  */
pcie_pme_from_pci_bridge(struct pci_bus * bus,u8 devfn)101 static bool pcie_pme_from_pci_bridge(struct pci_bus *bus, u8 devfn)
102 {
103 	struct pci_dev *dev;
104 	bool found = false;
105 
106 	if (devfn)
107 		return false;
108 
109 	dev = pci_dev_get(bus->self);
110 	if (!dev)
111 		return false;
112 
113 	if (pci_is_pcie(dev) && pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE) {
114 		down_read(&pci_bus_sem);
115 		if (pcie_pme_walk_bus(bus))
116 			found = true;
117 		up_read(&pci_bus_sem);
118 	}
119 
120 	pci_dev_put(dev);
121 	return found;
122 }
123 
124 /**
125  * pcie_pme_handle_request - Find device that generated PME and handle it.
126  * @port: Root port or event collector that generated the PME interrupt.
127  * @req_id: PCIe Requester ID of the device that generated the PME.
128  */
pcie_pme_handle_request(struct pci_dev * port,u16 req_id)129 static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id)
130 {
131 	u8 busnr = req_id >> 8, devfn = req_id & 0xff;
132 	struct pci_bus *bus;
133 	struct pci_dev *dev;
134 	bool found = false;
135 
136 	/* First, check if the PME is from the root port itself. */
137 	if (port->devfn == devfn && port->bus->number == busnr) {
138 		if (port->pme_poll)
139 			port->pme_poll = false;
140 
141 		if (pci_check_pme_status(port)) {
142 			pm_request_resume(&port->dev);
143 			found = true;
144 		} else {
145 			/*
146 			 * Apparently, the root port generated the PME on behalf
147 			 * of a non-PCIe device downstream.  If this is done by
148 			 * a root port, the Requester ID field in its status
149 			 * register may contain either the root port's, or the
150 			 * source device's information (PCI Express Base
151 			 * Specification, Rev. 2.0, Section 6.1.9).
152 			 */
153 			down_read(&pci_bus_sem);
154 			found = pcie_pme_walk_bus(port->subordinate);
155 			up_read(&pci_bus_sem);
156 		}
157 		goto out;
158 	}
159 
160 	/* Second, find the bus the source device is on. */
161 	bus = pci_find_bus(pci_domain_nr(port->bus), busnr);
162 	if (!bus)
163 		goto out;
164 
165 	/* Next, check if the PME is from a PCIe-PCI bridge. */
166 	found = pcie_pme_from_pci_bridge(bus, devfn);
167 	if (found)
168 		goto out;
169 
170 	/* Finally, try to find the PME source on the bus. */
171 	down_read(&pci_bus_sem);
172 	list_for_each_entry(dev, &bus->devices, bus_list) {
173 		pci_dev_get(dev);
174 		if (dev->devfn == devfn) {
175 			found = true;
176 			break;
177 		}
178 		pci_dev_put(dev);
179 	}
180 	up_read(&pci_bus_sem);
181 
182 	if (found) {
183 		/* The device is there, but we have to check its PME status. */
184 		found = pci_check_pme_status(dev);
185 		if (found) {
186 			if (dev->pme_poll)
187 				dev->pme_poll = false;
188 
189 			pci_wakeup_event(dev);
190 			pm_request_resume(&dev->dev);
191 		}
192 		pci_dev_put(dev);
193 	} else if (devfn) {
194 		/*
195 		 * The device is not there, but we can still try to recover by
196 		 * assuming that the PME was reported by a PCIe-PCI bridge that
197 		 * used devfn different from zero.
198 		 */
199 		pci_info(port, "interrupt generated for non-existent device %02x:%02x.%d\n",
200 			 busnr, PCI_SLOT(devfn), PCI_FUNC(devfn));
201 		found = pcie_pme_from_pci_bridge(bus, 0);
202 	}
203 
204  out:
205 	if (!found)
206 		pci_info(port, "Spurious native interrupt!\n");
207 }
208 
209 /**
210  * pcie_pme_work_fn - Work handler for PCIe PME interrupt.
211  * @work: Work structure giving access to service data.
212  */
pcie_pme_work_fn(struct work_struct * work)213 static void pcie_pme_work_fn(struct work_struct *work)
214 {
215 	struct pcie_pme_service_data *data =
216 			container_of(work, struct pcie_pme_service_data, work);
217 	struct pci_dev *port = data->srv->port;
218 	u32 rtsta;
219 
220 	spin_lock_irq(&data->lock);
221 
222 	for (;;) {
223 		if (data->noirq)
224 			break;
225 
226 		pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
227 		if (PCI_POSSIBLE_ERROR(rtsta))
228 			break;
229 
230 		if (rtsta & PCI_EXP_RTSTA_PME) {
231 			/*
232 			 * Clear PME status of the port.  If there are other
233 			 * pending PMEs, the status will be set again.
234 			 */
235 			pcie_clear_root_pme_status(port);
236 
237 			spin_unlock_irq(&data->lock);
238 			pcie_pme_handle_request(port, rtsta & 0xffff);
239 			spin_lock_irq(&data->lock);
240 
241 			continue;
242 		}
243 
244 		/* No need to loop if there are no more PMEs pending. */
245 		if (!(rtsta & PCI_EXP_RTSTA_PENDING))
246 			break;
247 
248 		spin_unlock_irq(&data->lock);
249 		cpu_relax();
250 		spin_lock_irq(&data->lock);
251 	}
252 
253 	if (!data->noirq)
254 		pcie_pme_interrupt_enable(port, true);
255 
256 	spin_unlock_irq(&data->lock);
257 }
258 
259 /**
260  * pcie_pme_irq - Interrupt handler for PCIe root port PME interrupt.
261  * @irq: Interrupt vector.
262  * @context: Interrupt context pointer.
263  */
pcie_pme_irq(int irq,void * context)264 static irqreturn_t pcie_pme_irq(int irq, void *context)
265 {
266 	struct pci_dev *port;
267 	struct pcie_pme_service_data *data;
268 	u32 rtsta;
269 	unsigned long flags;
270 
271 	port = ((struct pcie_device *)context)->port;
272 	data = get_service_data((struct pcie_device *)context);
273 
274 	spin_lock_irqsave(&data->lock, flags);
275 	pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
276 
277 	if (PCI_POSSIBLE_ERROR(rtsta) || !(rtsta & PCI_EXP_RTSTA_PME)) {
278 		spin_unlock_irqrestore(&data->lock, flags);
279 		return IRQ_NONE;
280 	}
281 
282 	pcie_pme_interrupt_enable(port, false);
283 	spin_unlock_irqrestore(&data->lock, flags);
284 
285 	/* We don't use pm_wq, because it's freezable. */
286 	schedule_work(&data->work);
287 
288 	return IRQ_HANDLED;
289 }
290 
291 /**
292  * pcie_pme_can_wakeup - Set the wakeup capability flag.
293  * @dev: PCI device to handle.
294  * @ign: Ignored.
295  */
pcie_pme_can_wakeup(struct pci_dev * dev,void * ign)296 static int pcie_pme_can_wakeup(struct pci_dev *dev, void *ign)
297 {
298 	device_set_wakeup_capable(&dev->dev, true);
299 	return 0;
300 }
301 
302 /**
303  * pcie_pme_mark_devices - Set the wakeup flag for devices below a port.
304  * @port: PCIe root port or event collector to handle.
305  *
306  * For each device below given root port, including the port itself (or for each
307  * root complex integrated endpoint if @port is a root complex event collector)
308  * set the flag indicating that it can signal run-time wake-up events.
309  */
pcie_pme_mark_devices(struct pci_dev * port)310 static void pcie_pme_mark_devices(struct pci_dev *port)
311 {
312 	pcie_pme_can_wakeup(port, NULL);
313 
314 	if (pci_pcie_type(port) == PCI_EXP_TYPE_RC_EC)
315 		pcie_walk_rcec(port, pcie_pme_can_wakeup, NULL);
316 	else if (port->subordinate)
317 		pci_walk_bus(port->subordinate, pcie_pme_can_wakeup, NULL);
318 }
319 
320 /**
321  * pcie_pme_probe - Initialize PCIe PME service for given root port.
322  * @srv: PCIe service to initialize.
323  */
pcie_pme_probe(struct pcie_device * srv)324 static int pcie_pme_probe(struct pcie_device *srv)
325 {
326 	struct pci_dev *port = srv->port;
327 	struct pcie_pme_service_data *data;
328 	int type = pci_pcie_type(port);
329 	int ret;
330 
331 	/* Limit to Root Ports or Root Complex Event Collectors */
332 	if (type != PCI_EXP_TYPE_RC_EC &&
333 	    type != PCI_EXP_TYPE_ROOT_PORT)
334 		return -ENODEV;
335 
336 	data = kzalloc(sizeof(*data), GFP_KERNEL);
337 	if (!data)
338 		return -ENOMEM;
339 
340 	spin_lock_init(&data->lock);
341 	INIT_WORK(&data->work, pcie_pme_work_fn);
342 	data->srv = srv;
343 	set_service_data(srv, data);
344 
345 	pcie_pme_interrupt_enable(port, false);
346 	pcie_clear_root_pme_status(port);
347 
348 	ret = request_irq(srv->irq, pcie_pme_irq, IRQF_SHARED, "PCIe PME", srv);
349 	if (ret) {
350 		kfree(data);
351 		return ret;
352 	}
353 
354 	pci_info(port, "Signaling with IRQ %d\n", srv->irq);
355 
356 	pcie_pme_mark_devices(port);
357 	pcie_pme_interrupt_enable(port, true);
358 	return 0;
359 }
360 
pcie_pme_check_wakeup(struct pci_bus * bus)361 static bool pcie_pme_check_wakeup(struct pci_bus *bus)
362 {
363 	struct pci_dev *dev;
364 
365 	if (!bus)
366 		return false;
367 
368 	list_for_each_entry(dev, &bus->devices, bus_list)
369 		if (device_may_wakeup(&dev->dev)
370 		    || pcie_pme_check_wakeup(dev->subordinate))
371 			return true;
372 
373 	return false;
374 }
375 
pcie_pme_disable_interrupt(struct pci_dev * port,struct pcie_pme_service_data * data)376 static void pcie_pme_disable_interrupt(struct pci_dev *port,
377 				       struct pcie_pme_service_data *data)
378 {
379 	spin_lock_irq(&data->lock);
380 	pcie_pme_interrupt_enable(port, false);
381 	pcie_clear_root_pme_status(port);
382 	data->noirq = true;
383 	spin_unlock_irq(&data->lock);
384 }
385 
386 /**
387  * pcie_pme_suspend - Suspend PCIe PME service device.
388  * @srv: PCIe service device to suspend.
389  */
pcie_pme_suspend(struct pcie_device * srv)390 static int pcie_pme_suspend(struct pcie_device *srv)
391 {
392 	struct pcie_pme_service_data *data = get_service_data(srv);
393 	struct pci_dev *port = srv->port;
394 	bool wakeup;
395 	int ret;
396 
397 	if (device_may_wakeup(&port->dev)) {
398 		wakeup = true;
399 	} else {
400 		down_read(&pci_bus_sem);
401 		wakeup = pcie_pme_check_wakeup(port->subordinate);
402 		up_read(&pci_bus_sem);
403 	}
404 	if (wakeup) {
405 		ret = enable_irq_wake(srv->irq);
406 		if (!ret)
407 			return 0;
408 	}
409 
410 	pcie_pme_disable_interrupt(port, data);
411 
412 	synchronize_irq(srv->irq);
413 
414 	return 0;
415 }
416 
417 /**
418  * pcie_pme_resume - Resume PCIe PME service device.
419  * @srv: PCIe service device to resume.
420  */
pcie_pme_resume(struct pcie_device * srv)421 static int pcie_pme_resume(struct pcie_device *srv)
422 {
423 	struct pcie_pme_service_data *data = get_service_data(srv);
424 
425 	spin_lock_irq(&data->lock);
426 	if (data->noirq) {
427 		struct pci_dev *port = srv->port;
428 
429 		pcie_clear_root_pme_status(port);
430 		pcie_pme_interrupt_enable(port, true);
431 		data->noirq = false;
432 	} else {
433 		disable_irq_wake(srv->irq);
434 	}
435 	spin_unlock_irq(&data->lock);
436 
437 	return 0;
438 }
439 
440 /**
441  * pcie_pme_remove - Prepare PCIe PME service device for removal.
442  * @srv: PCIe service device to remove.
443  */
pcie_pme_remove(struct pcie_device * srv)444 static void pcie_pme_remove(struct pcie_device *srv)
445 {
446 	struct pcie_pme_service_data *data = get_service_data(srv);
447 
448 	pcie_pme_disable_interrupt(srv->port, data);
449 	free_irq(srv->irq, srv);
450 	cancel_work_sync(&data->work);
451 	kfree(data);
452 }
453 
454 static struct pcie_port_service_driver pcie_pme_driver = {
455 	.name		= "pcie_pme",
456 	.port_type	= PCIE_ANY_PORT,
457 	.service	= PCIE_PORT_SERVICE_PME,
458 
459 	.probe		= pcie_pme_probe,
460 	.suspend	= pcie_pme_suspend,
461 	.resume		= pcie_pme_resume,
462 	.remove		= pcie_pme_remove,
463 };
464 
465 /**
466  * pcie_pme_init - Register the PCIe PME service driver.
467  */
pcie_pme_init(void)468 int __init pcie_pme_init(void)
469 {
470 	return pcie_port_service_register(&pcie_pme_driver);
471 }
472