xref: /openbmc/linux/drivers/pci/pcie/dpc.c (revision 0661cb2a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PCI Express Downstream Port Containment services driver
4  * Author: Keith Busch <keith.busch@intel.com>
5  *
6  * Copyright (C) 2016 Intel Corp.
7  */
8 
9 #define dev_fmt(fmt) "DPC: " fmt
10 
11 #include <linux/aer.h>
12 #include <linux/delay.h>
13 #include <linux/interrupt.h>
14 #include <linux/init.h>
15 #include <linux/pci.h>
16 
17 #include "portdrv.h"
18 #include "../pci.h"
19 
20 static const char * const rp_pio_error_string[] = {
21 	"Configuration Request received UR Completion",	 /* Bit Position 0  */
22 	"Configuration Request received CA Completion",	 /* Bit Position 1  */
23 	"Configuration Request Completion Timeout",	 /* Bit Position 2  */
24 	NULL,
25 	NULL,
26 	NULL,
27 	NULL,
28 	NULL,
29 	"I/O Request received UR Completion",		 /* Bit Position 8  */
30 	"I/O Request received CA Completion",		 /* Bit Position 9  */
31 	"I/O Request Completion Timeout",		 /* Bit Position 10 */
32 	NULL,
33 	NULL,
34 	NULL,
35 	NULL,
36 	NULL,
37 	"Memory Request received UR Completion",	 /* Bit Position 16 */
38 	"Memory Request received CA Completion",	 /* Bit Position 17 */
39 	"Memory Request Completion Timeout",		 /* Bit Position 18 */
40 };
41 
42 void pci_save_dpc_state(struct pci_dev *dev)
43 {
44 	struct pci_cap_saved_state *save_state;
45 	u16 *cap;
46 
47 	if (!pci_is_pcie(dev))
48 		return;
49 
50 	save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_DPC);
51 	if (!save_state)
52 		return;
53 
54 	cap = (u16 *)&save_state->cap.data[0];
55 	pci_read_config_word(dev, dev->dpc_cap + PCI_EXP_DPC_CTL, cap);
56 }
57 
58 void pci_restore_dpc_state(struct pci_dev *dev)
59 {
60 	struct pci_cap_saved_state *save_state;
61 	u16 *cap;
62 
63 	if (!pci_is_pcie(dev))
64 		return;
65 
66 	save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_DPC);
67 	if (!save_state)
68 		return;
69 
70 	cap = (u16 *)&save_state->cap.data[0];
71 	pci_write_config_word(dev, dev->dpc_cap + PCI_EXP_DPC_CTL, *cap);
72 }
73 
74 static DECLARE_WAIT_QUEUE_HEAD(dpc_completed_waitqueue);
75 
76 #ifdef CONFIG_HOTPLUG_PCI_PCIE
77 static bool dpc_completed(struct pci_dev *pdev)
78 {
79 	u16 status;
80 
81 	pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_STATUS, &status);
82 	if ((status != 0xffff) && (status & PCI_EXP_DPC_STATUS_TRIGGER))
83 		return false;
84 
85 	if (test_bit(PCI_DPC_RECOVERING, &pdev->priv_flags))
86 		return false;
87 
88 	return true;
89 }
90 
91 /**
92  * pci_dpc_recovered - whether DPC triggered and has recovered successfully
93  * @pdev: PCI device
94  *
95  * Return true if DPC was triggered for @pdev and has recovered successfully.
96  * Wait for recovery if it hasn't completed yet.  Called from the PCIe hotplug
97  * driver to recognize and ignore Link Down/Up events caused by DPC.
98  */
99 bool pci_dpc_recovered(struct pci_dev *pdev)
100 {
101 	struct pci_host_bridge *host;
102 
103 	if (!pdev->dpc_cap)
104 		return false;
105 
106 	/*
107 	 * Synchronization between hotplug and DPC is not supported
108 	 * if DPC is owned by firmware and EDR is not enabled.
109 	 */
110 	host = pci_find_host_bridge(pdev->bus);
111 	if (!host->native_dpc && !IS_ENABLED(CONFIG_PCIE_EDR))
112 		return false;
113 
114 	/*
115 	 * Need a timeout in case DPC never completes due to failure of
116 	 * dpc_wait_rp_inactive().  The spec doesn't mandate a time limit,
117 	 * but reports indicate that DPC completes within 4 seconds.
118 	 */
119 	wait_event_timeout(dpc_completed_waitqueue, dpc_completed(pdev),
120 			   msecs_to_jiffies(4000));
121 
122 	return test_and_clear_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
123 }
124 #endif /* CONFIG_HOTPLUG_PCI_PCIE */
125 
126 static int dpc_wait_rp_inactive(struct pci_dev *pdev)
127 {
128 	unsigned long timeout = jiffies + HZ;
129 	u16 cap = pdev->dpc_cap, status;
130 
131 	pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
132 	while (status & PCI_EXP_DPC_RP_BUSY &&
133 					!time_after(jiffies, timeout)) {
134 		msleep(10);
135 		pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
136 	}
137 	if (status & PCI_EXP_DPC_RP_BUSY) {
138 		pci_warn(pdev, "root port still busy\n");
139 		return -EBUSY;
140 	}
141 	return 0;
142 }
143 
144 pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
145 {
146 	pci_ers_result_t ret;
147 	u16 cap;
148 
149 	set_bit(PCI_DPC_RECOVERING, &pdev->priv_flags);
150 
151 	/*
152 	 * DPC disables the Link automatically in hardware, so it has
153 	 * already been reset by the time we get here.
154 	 */
155 	cap = pdev->dpc_cap;
156 
157 	/*
158 	 * Wait until the Link is inactive, then clear DPC Trigger Status
159 	 * to allow the Port to leave DPC.
160 	 */
161 	if (!pcie_wait_for_link(pdev, false))
162 		pci_info(pdev, "Data Link Layer Link Active not cleared in 1000 msec\n");
163 
164 	if (pdev->dpc_rp_extensions && dpc_wait_rp_inactive(pdev)) {
165 		clear_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
166 		ret = PCI_ERS_RESULT_DISCONNECT;
167 		goto out;
168 	}
169 
170 	pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
171 			      PCI_EXP_DPC_STATUS_TRIGGER);
172 
173 	if (!pcie_wait_for_link(pdev, true)) {
174 		pci_info(pdev, "Data Link Layer Link Active not set in 1000 msec\n");
175 		clear_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
176 		ret = PCI_ERS_RESULT_DISCONNECT;
177 	} else {
178 		set_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
179 		ret = PCI_ERS_RESULT_RECOVERED;
180 	}
181 out:
182 	clear_bit(PCI_DPC_RECOVERING, &pdev->priv_flags);
183 	wake_up_all(&dpc_completed_waitqueue);
184 	return ret;
185 }
186 
187 static void dpc_process_rp_pio_error(struct pci_dev *pdev)
188 {
189 	u16 cap = pdev->dpc_cap, dpc_status, first_error;
190 	u32 status, mask, sev, syserr, exc, dw0, dw1, dw2, dw3, log, prefix;
191 	int i;
192 
193 	pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, &status);
194 	pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_MASK, &mask);
195 	pci_err(pdev, "rp_pio_status: %#010x, rp_pio_mask: %#010x\n",
196 		status, mask);
197 
198 	pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_SEVERITY, &sev);
199 	pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_SYSERROR, &syserr);
200 	pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_EXCEPTION, &exc);
201 	pci_err(pdev, "RP PIO severity=%#010x, syserror=%#010x, exception=%#010x\n",
202 		sev, syserr, exc);
203 
204 	/* Get First Error Pointer */
205 	pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &dpc_status);
206 	first_error = (dpc_status & 0x1f00) >> 8;
207 
208 	for (i = 0; i < ARRAY_SIZE(rp_pio_error_string); i++) {
209 		if ((status & ~mask) & (1 << i))
210 			pci_err(pdev, "[%2d] %s%s\n", i, rp_pio_error_string[i],
211 				first_error == i ? " (First)" : "");
212 	}
213 
214 	if (pdev->dpc_rp_log_size < 4)
215 		goto clear_status;
216 	pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG,
217 			      &dw0);
218 	pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 4,
219 			      &dw1);
220 	pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 8,
221 			      &dw2);
222 	pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 12,
223 			      &dw3);
224 	pci_err(pdev, "TLP Header: %#010x %#010x %#010x %#010x\n",
225 		dw0, dw1, dw2, dw3);
226 
227 	if (pdev->dpc_rp_log_size < 5)
228 		goto clear_status;
229 	pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_IMPSPEC_LOG, &log);
230 	pci_err(pdev, "RP PIO ImpSpec Log %#010x\n", log);
231 
232 	for (i = 0; i < pdev->dpc_rp_log_size - 5; i++) {
233 		pci_read_config_dword(pdev,
234 			cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG, &prefix);
235 		pci_err(pdev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix);
236 	}
237  clear_status:
238 	pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, status);
239 }
240 
241 static int dpc_get_aer_uncorrect_severity(struct pci_dev *dev,
242 					  struct aer_err_info *info)
243 {
244 	int pos = dev->aer_cap;
245 	u32 status, mask, sev;
246 
247 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
248 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask);
249 	status &= ~mask;
250 	if (!status)
251 		return 0;
252 
253 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &sev);
254 	status &= sev;
255 	if (status)
256 		info->severity = AER_FATAL;
257 	else
258 		info->severity = AER_NONFATAL;
259 
260 	return 1;
261 }
262 
263 void dpc_process_error(struct pci_dev *pdev)
264 {
265 	u16 cap = pdev->dpc_cap, status, source, reason, ext_reason;
266 	struct aer_err_info info;
267 
268 	pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
269 	pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID, &source);
270 
271 	pci_info(pdev, "containment event, status:%#06x source:%#06x\n",
272 		 status, source);
273 
274 	reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN) >> 1;
275 	ext_reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT) >> 5;
276 	pci_warn(pdev, "%s detected\n",
277 		 (reason == 0) ? "unmasked uncorrectable error" :
278 		 (reason == 1) ? "ERR_NONFATAL" :
279 		 (reason == 2) ? "ERR_FATAL" :
280 		 (ext_reason == 0) ? "RP PIO error" :
281 		 (ext_reason == 1) ? "software trigger" :
282 				     "reserved error");
283 
284 	/* show RP PIO error detail information */
285 	if (pdev->dpc_rp_extensions && reason == 3 && ext_reason == 0)
286 		dpc_process_rp_pio_error(pdev);
287 	else if (reason == 0 &&
288 		 dpc_get_aer_uncorrect_severity(pdev, &info) &&
289 		 aer_get_device_error_info(pdev, &info)) {
290 		aer_print_error(pdev, &info);
291 		pci_aer_clear_nonfatal_status(pdev);
292 		pci_aer_clear_fatal_status(pdev);
293 	}
294 }
295 
296 static irqreturn_t dpc_handler(int irq, void *context)
297 {
298 	struct pci_dev *pdev = context;
299 
300 	dpc_process_error(pdev);
301 
302 	/* We configure DPC so it only triggers on ERR_FATAL */
303 	pcie_do_recovery(pdev, pci_channel_io_frozen, dpc_reset_link);
304 
305 	return IRQ_HANDLED;
306 }
307 
308 static irqreturn_t dpc_irq(int irq, void *context)
309 {
310 	struct pci_dev *pdev = context;
311 	u16 cap = pdev->dpc_cap, status;
312 
313 	pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
314 
315 	if (!(status & PCI_EXP_DPC_STATUS_INTERRUPT) || status == (u16)(~0))
316 		return IRQ_NONE;
317 
318 	pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
319 			      PCI_EXP_DPC_STATUS_INTERRUPT);
320 	if (status & PCI_EXP_DPC_STATUS_TRIGGER)
321 		return IRQ_WAKE_THREAD;
322 	return IRQ_HANDLED;
323 }
324 
325 void pci_dpc_init(struct pci_dev *pdev)
326 {
327 	u16 cap;
328 
329 	pdev->dpc_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DPC);
330 	if (!pdev->dpc_cap)
331 		return;
332 
333 	pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CAP, &cap);
334 	if (!(cap & PCI_EXP_DPC_CAP_RP_EXT))
335 		return;
336 
337 	pdev->dpc_rp_extensions = true;
338 	pdev->dpc_rp_log_size = (cap & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8;
339 	if (pdev->dpc_rp_log_size < 4 || pdev->dpc_rp_log_size > 9) {
340 		pci_err(pdev, "RP PIO log size %u is invalid\n",
341 			pdev->dpc_rp_log_size);
342 		pdev->dpc_rp_log_size = 0;
343 	}
344 }
345 
346 #define FLAG(x, y) (((x) & (y)) ? '+' : '-')
347 static int dpc_probe(struct pcie_device *dev)
348 {
349 	struct pci_dev *pdev = dev->port;
350 	struct device *device = &dev->device;
351 	int status;
352 	u16 ctl, cap;
353 
354 	if (!pcie_aer_is_native(pdev) && !pcie_ports_dpc_native)
355 		return -ENOTSUPP;
356 
357 	status = devm_request_threaded_irq(device, dev->irq, dpc_irq,
358 					   dpc_handler, IRQF_SHARED,
359 					   "pcie-dpc", pdev);
360 	if (status) {
361 		pci_warn(pdev, "request IRQ%d failed: %d\n", dev->irq,
362 			 status);
363 		return status;
364 	}
365 
366 	pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CAP, &cap);
367 	pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, &ctl);
368 
369 	ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN;
370 	pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, ctl);
371 	pci_info(pdev, "enabled with IRQ %d\n", dev->irq);
372 
373 	pci_info(pdev, "error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n",
374 		 cap & PCI_EXP_DPC_IRQ, FLAG(cap, PCI_EXP_DPC_CAP_RP_EXT),
375 		 FLAG(cap, PCI_EXP_DPC_CAP_POISONED_TLP),
376 		 FLAG(cap, PCI_EXP_DPC_CAP_SW_TRIGGER), pdev->dpc_rp_log_size,
377 		 FLAG(cap, PCI_EXP_DPC_CAP_DL_ACTIVE));
378 
379 	pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_DPC, sizeof(u16));
380 	return status;
381 }
382 
383 static void dpc_remove(struct pcie_device *dev)
384 {
385 	struct pci_dev *pdev = dev->port;
386 	u16 ctl;
387 
388 	pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, &ctl);
389 	ctl &= ~(PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN);
390 	pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, ctl);
391 }
392 
393 static struct pcie_port_service_driver dpcdriver = {
394 	.name		= "dpc",
395 	.port_type	= PCIE_ANY_PORT,
396 	.service	= PCIE_PORT_SERVICE_DPC,
397 	.probe		= dpc_probe,
398 	.remove		= dpc_remove,
399 };
400 
401 int __init pcie_dpc_init(void)
402 {
403 	return pcie_port_service_register(&dpcdriver);
404 }
405