1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * PCI Express Downstream Port Containment services driver
4 * Author: Keith Busch <keith.busch@intel.com>
5 *
6 * Copyright (C) 2016 Intel Corp.
7 */
8
9 #define dev_fmt(fmt) "DPC: " fmt
10
11 #include <linux/aer.h>
12 #include <linux/bitfield.h>
13 #include <linux/delay.h>
14 #include <linux/interrupt.h>
15 #include <linux/init.h>
16 #include <linux/pci.h>
17
18 #include "portdrv.h"
19 #include "../pci.h"
20
21 static const char * const rp_pio_error_string[] = {
22 "Configuration Request received UR Completion", /* Bit Position 0 */
23 "Configuration Request received CA Completion", /* Bit Position 1 */
24 "Configuration Request Completion Timeout", /* Bit Position 2 */
25 NULL,
26 NULL,
27 NULL,
28 NULL,
29 NULL,
30 "I/O Request received UR Completion", /* Bit Position 8 */
31 "I/O Request received CA Completion", /* Bit Position 9 */
32 "I/O Request Completion Timeout", /* Bit Position 10 */
33 NULL,
34 NULL,
35 NULL,
36 NULL,
37 NULL,
38 "Memory Request received UR Completion", /* Bit Position 16 */
39 "Memory Request received CA Completion", /* Bit Position 17 */
40 "Memory Request Completion Timeout", /* Bit Position 18 */
41 };
42
pci_save_dpc_state(struct pci_dev * dev)43 void pci_save_dpc_state(struct pci_dev *dev)
44 {
45 struct pci_cap_saved_state *save_state;
46 u16 *cap;
47
48 if (!pci_is_pcie(dev))
49 return;
50
51 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_DPC);
52 if (!save_state)
53 return;
54
55 cap = (u16 *)&save_state->cap.data[0];
56 pci_read_config_word(dev, dev->dpc_cap + PCI_EXP_DPC_CTL, cap);
57 }
58
pci_restore_dpc_state(struct pci_dev * dev)59 void pci_restore_dpc_state(struct pci_dev *dev)
60 {
61 struct pci_cap_saved_state *save_state;
62 u16 *cap;
63
64 if (!pci_is_pcie(dev))
65 return;
66
67 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_DPC);
68 if (!save_state)
69 return;
70
71 cap = (u16 *)&save_state->cap.data[0];
72 pci_write_config_word(dev, dev->dpc_cap + PCI_EXP_DPC_CTL, *cap);
73 }
74
75 static DECLARE_WAIT_QUEUE_HEAD(dpc_completed_waitqueue);
76
77 #ifdef CONFIG_HOTPLUG_PCI_PCIE
dpc_completed(struct pci_dev * pdev)78 static bool dpc_completed(struct pci_dev *pdev)
79 {
80 u16 status;
81
82 pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_STATUS, &status);
83 if ((!PCI_POSSIBLE_ERROR(status)) && (status & PCI_EXP_DPC_STATUS_TRIGGER))
84 return false;
85
86 if (test_bit(PCI_DPC_RECOVERING, &pdev->priv_flags))
87 return false;
88
89 return true;
90 }
91
92 /**
93 * pci_dpc_recovered - whether DPC triggered and has recovered successfully
94 * @pdev: PCI device
95 *
96 * Return true if DPC was triggered for @pdev and has recovered successfully.
97 * Wait for recovery if it hasn't completed yet. Called from the PCIe hotplug
98 * driver to recognize and ignore Link Down/Up events caused by DPC.
99 */
pci_dpc_recovered(struct pci_dev * pdev)100 bool pci_dpc_recovered(struct pci_dev *pdev)
101 {
102 struct pci_host_bridge *host;
103
104 if (!pdev->dpc_cap)
105 return false;
106
107 /*
108 * Synchronization between hotplug and DPC is not supported
109 * if DPC is owned by firmware and EDR is not enabled.
110 */
111 host = pci_find_host_bridge(pdev->bus);
112 if (!host->native_dpc && !IS_ENABLED(CONFIG_PCIE_EDR))
113 return false;
114
115 /*
116 * Need a timeout in case DPC never completes due to failure of
117 * dpc_wait_rp_inactive(). The spec doesn't mandate a time limit,
118 * but reports indicate that DPC completes within 4 seconds.
119 */
120 wait_event_timeout(dpc_completed_waitqueue, dpc_completed(pdev),
121 msecs_to_jiffies(4000));
122
123 return test_and_clear_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
124 }
125 #endif /* CONFIG_HOTPLUG_PCI_PCIE */
126
dpc_wait_rp_inactive(struct pci_dev * pdev)127 static int dpc_wait_rp_inactive(struct pci_dev *pdev)
128 {
129 unsigned long timeout = jiffies + HZ;
130 u16 cap = pdev->dpc_cap, status;
131
132 pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
133 while (status & PCI_EXP_DPC_RP_BUSY &&
134 !time_after(jiffies, timeout)) {
135 msleep(10);
136 pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
137 }
138 if (status & PCI_EXP_DPC_RP_BUSY) {
139 pci_warn(pdev, "root port still busy\n");
140 return -EBUSY;
141 }
142 return 0;
143 }
144
dpc_reset_link(struct pci_dev * pdev)145 pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
146 {
147 pci_ers_result_t ret;
148 u16 cap;
149
150 set_bit(PCI_DPC_RECOVERING, &pdev->priv_flags);
151
152 /*
153 * DPC disables the Link automatically in hardware, so it has
154 * already been reset by the time we get here.
155 */
156 cap = pdev->dpc_cap;
157
158 /*
159 * Wait until the Link is inactive, then clear DPC Trigger Status
160 * to allow the Port to leave DPC.
161 */
162 if (!pcie_wait_for_link(pdev, false))
163 pci_info(pdev, "Data Link Layer Link Active not cleared in 1000 msec\n");
164
165 if (pdev->dpc_rp_extensions && dpc_wait_rp_inactive(pdev)) {
166 clear_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
167 ret = PCI_ERS_RESULT_DISCONNECT;
168 goto out;
169 }
170
171 pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
172 PCI_EXP_DPC_STATUS_TRIGGER);
173
174 if (pci_bridge_wait_for_secondary_bus(pdev, "DPC")) {
175 clear_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
176 ret = PCI_ERS_RESULT_DISCONNECT;
177 } else {
178 set_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
179 ret = PCI_ERS_RESULT_RECOVERED;
180 }
181 out:
182 clear_bit(PCI_DPC_RECOVERING, &pdev->priv_flags);
183 wake_up_all(&dpc_completed_waitqueue);
184 return ret;
185 }
186
dpc_process_rp_pio_error(struct pci_dev * pdev)187 static void dpc_process_rp_pio_error(struct pci_dev *pdev)
188 {
189 u16 cap = pdev->dpc_cap, dpc_status, first_error;
190 u32 status, mask, sev, syserr, exc, dw0, dw1, dw2, dw3, log, prefix;
191 int i;
192
193 pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, &status);
194 pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_MASK, &mask);
195 pci_err(pdev, "rp_pio_status: %#010x, rp_pio_mask: %#010x\n",
196 status, mask);
197
198 pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_SEVERITY, &sev);
199 pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_SYSERROR, &syserr);
200 pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_EXCEPTION, &exc);
201 pci_err(pdev, "RP PIO severity=%#010x, syserror=%#010x, exception=%#010x\n",
202 sev, syserr, exc);
203
204 /* Get First Error Pointer */
205 pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &dpc_status);
206 first_error = FIELD_GET(PCI_EXP_DPC_RP_PIO_FEP, dpc_status);
207
208 for (i = 0; i < ARRAY_SIZE(rp_pio_error_string); i++) {
209 if ((status & ~mask) & (1 << i))
210 pci_err(pdev, "[%2d] %s%s\n", i, rp_pio_error_string[i],
211 first_error == i ? " (First)" : "");
212 }
213
214 if (pdev->dpc_rp_log_size < 4)
215 goto clear_status;
216 pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG,
217 &dw0);
218 pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 4,
219 &dw1);
220 pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 8,
221 &dw2);
222 pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 12,
223 &dw3);
224 pci_err(pdev, "TLP Header: %#010x %#010x %#010x %#010x\n",
225 dw0, dw1, dw2, dw3);
226
227 if (pdev->dpc_rp_log_size < 5)
228 goto clear_status;
229 pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_IMPSPEC_LOG, &log);
230 pci_err(pdev, "RP PIO ImpSpec Log %#010x\n", log);
231
232 for (i = 0; i < pdev->dpc_rp_log_size - 5; i++) {
233 pci_read_config_dword(pdev,
234 cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG + i * 4, &prefix);
235 pci_err(pdev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix);
236 }
237 clear_status:
238 pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, status);
239 }
240
dpc_get_aer_uncorrect_severity(struct pci_dev * dev,struct aer_err_info * info)241 static int dpc_get_aer_uncorrect_severity(struct pci_dev *dev,
242 struct aer_err_info *info)
243 {
244 int pos = dev->aer_cap;
245 u32 status, mask, sev;
246
247 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
248 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask);
249 status &= ~mask;
250 if (!status)
251 return 0;
252
253 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &sev);
254 status &= sev;
255 if (status)
256 info->severity = AER_FATAL;
257 else
258 info->severity = AER_NONFATAL;
259
260 return 1;
261 }
262
dpc_process_error(struct pci_dev * pdev)263 void dpc_process_error(struct pci_dev *pdev)
264 {
265 u16 cap = pdev->dpc_cap, status, source, reason, ext_reason;
266 struct aer_err_info info;
267
268 pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
269 pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID, &source);
270
271 pci_info(pdev, "containment event, status:%#06x source:%#06x\n",
272 status, source);
273
274 reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN) >> 1;
275 ext_reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT) >> 5;
276 pci_warn(pdev, "%s detected\n",
277 (reason == 0) ? "unmasked uncorrectable error" :
278 (reason == 1) ? "ERR_NONFATAL" :
279 (reason == 2) ? "ERR_FATAL" :
280 (ext_reason == 0) ? "RP PIO error" :
281 (ext_reason == 1) ? "software trigger" :
282 "reserved error");
283
284 /* show RP PIO error detail information */
285 if (pdev->dpc_rp_extensions && reason == 3 && ext_reason == 0)
286 dpc_process_rp_pio_error(pdev);
287 else if (reason == 0 &&
288 dpc_get_aer_uncorrect_severity(pdev, &info) &&
289 aer_get_device_error_info(pdev, &info)) {
290 aer_print_error(pdev, &info);
291 pci_aer_clear_nonfatal_status(pdev);
292 pci_aer_clear_fatal_status(pdev);
293 }
294 }
295
dpc_handler(int irq,void * context)296 static irqreturn_t dpc_handler(int irq, void *context)
297 {
298 struct pci_dev *pdev = context;
299
300 dpc_process_error(pdev);
301
302 /* We configure DPC so it only triggers on ERR_FATAL */
303 pcie_do_recovery(pdev, pci_channel_io_frozen, dpc_reset_link);
304
305 return IRQ_HANDLED;
306 }
307
dpc_irq(int irq,void * context)308 static irqreturn_t dpc_irq(int irq, void *context)
309 {
310 struct pci_dev *pdev = context;
311 u16 cap = pdev->dpc_cap, status;
312
313 pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
314
315 if (!(status & PCI_EXP_DPC_STATUS_INTERRUPT) || PCI_POSSIBLE_ERROR(status))
316 return IRQ_NONE;
317
318 pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
319 PCI_EXP_DPC_STATUS_INTERRUPT);
320 if (status & PCI_EXP_DPC_STATUS_TRIGGER)
321 return IRQ_WAKE_THREAD;
322 return IRQ_HANDLED;
323 }
324
pci_dpc_init(struct pci_dev * pdev)325 void pci_dpc_init(struct pci_dev *pdev)
326 {
327 u16 cap;
328
329 pdev->dpc_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DPC);
330 if (!pdev->dpc_cap)
331 return;
332
333 pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CAP, &cap);
334 if (!(cap & PCI_EXP_DPC_CAP_RP_EXT))
335 return;
336
337 pdev->dpc_rp_extensions = true;
338
339 /* Quirks may set dpc_rp_log_size if device or firmware is buggy */
340 if (!pdev->dpc_rp_log_size) {
341 pdev->dpc_rp_log_size =
342 FIELD_GET(PCI_EXP_DPC_RP_PIO_LOG_SIZE, cap);
343 if (pdev->dpc_rp_log_size < 4 || pdev->dpc_rp_log_size > 9) {
344 pci_err(pdev, "RP PIO log size %u is invalid\n",
345 pdev->dpc_rp_log_size);
346 pdev->dpc_rp_log_size = 0;
347 }
348 }
349 }
350
351 #define FLAG(x, y) (((x) & (y)) ? '+' : '-')
dpc_probe(struct pcie_device * dev)352 static int dpc_probe(struct pcie_device *dev)
353 {
354 struct pci_dev *pdev = dev->port;
355 struct device *device = &dev->device;
356 int status;
357 u16 ctl, cap;
358
359 if (!pcie_aer_is_native(pdev) && !pcie_ports_dpc_native)
360 return -ENOTSUPP;
361
362 status = devm_request_threaded_irq(device, dev->irq, dpc_irq,
363 dpc_handler, IRQF_SHARED,
364 "pcie-dpc", pdev);
365 if (status) {
366 pci_warn(pdev, "request IRQ%d failed: %d\n", dev->irq,
367 status);
368 return status;
369 }
370
371 pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CAP, &cap);
372 pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, &ctl);
373
374 ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN;
375 pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, ctl);
376 pci_info(pdev, "enabled with IRQ %d\n", dev->irq);
377
378 pci_info(pdev, "error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n",
379 cap & PCI_EXP_DPC_IRQ, FLAG(cap, PCI_EXP_DPC_CAP_RP_EXT),
380 FLAG(cap, PCI_EXP_DPC_CAP_POISONED_TLP),
381 FLAG(cap, PCI_EXP_DPC_CAP_SW_TRIGGER), pdev->dpc_rp_log_size,
382 FLAG(cap, PCI_EXP_DPC_CAP_DL_ACTIVE));
383
384 pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_DPC, sizeof(u16));
385 return status;
386 }
387
dpc_remove(struct pcie_device * dev)388 static void dpc_remove(struct pcie_device *dev)
389 {
390 struct pci_dev *pdev = dev->port;
391 u16 ctl;
392
393 pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, &ctl);
394 ctl &= ~(PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN);
395 pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, ctl);
396 }
397
398 static struct pcie_port_service_driver dpcdriver = {
399 .name = "dpc",
400 .port_type = PCIE_ANY_PORT,
401 .service = PCIE_PORT_SERVICE_DPC,
402 .probe = dpc_probe,
403 .remove = dpc_remove,
404 };
405
pcie_dpc_init(void)406 int __init pcie_dpc_init(void)
407 {
408 return pcie_port_service_register(&dpcdriver);
409 }
410