xref: /openbmc/linux/drivers/dma/ioat/init.c (revision a9a08845)
1 /*
2  * Intel I/OAT DMA Linux driver
3  * Copyright(c) 2004 - 2015 Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * The full GNU General Public License is included in this distribution in
15  * the file called "COPYING".
16  *
17  */
18 
19 #include <linux/init.h>
20 #include <linux/module.h>
21 #include <linux/slab.h>
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/dmaengine.h>
25 #include <linux/delay.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/workqueue.h>
28 #include <linux/prefetch.h>
29 #include <linux/dca.h>
30 #include <linux/aer.h>
31 #include <linux/sizes.h>
32 #include "dma.h"
33 #include "registers.h"
34 #include "hw.h"
35 
36 #include "../dmaengine.h"
37 
38 MODULE_VERSION(IOAT_DMA_VERSION);
39 MODULE_LICENSE("Dual BSD/GPL");
40 MODULE_AUTHOR("Intel Corporation");
41 
42 static const struct pci_device_id ioat_pci_tbl[] = {
43 	/* I/OAT v3 platforms */
44 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) },
45 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) },
46 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) },
47 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) },
48 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) },
49 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) },
50 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) },
51 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) },
52 
53 	/* I/OAT v3.2 platforms */
54 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF0) },
55 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF1) },
56 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF2) },
57 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF3) },
58 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF4) },
59 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF5) },
60 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF6) },
61 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF7) },
62 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) },
63 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) },
64 
65 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB0) },
66 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB1) },
67 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB2) },
68 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB3) },
69 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB4) },
70 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB5) },
71 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB6) },
72 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB7) },
73 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) },
74 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) },
75 
76 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB0) },
77 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB1) },
78 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB2) },
79 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB3) },
80 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB4) },
81 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB5) },
82 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB6) },
83 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB7) },
84 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) },
85 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) },
86 
87 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW0) },
88 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW1) },
89 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW2) },
90 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW3) },
91 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW4) },
92 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW5) },
93 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW6) },
94 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW7) },
95 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW8) },
96 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW9) },
97 
98 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX0) },
99 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX1) },
100 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX2) },
101 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX3) },
102 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX4) },
103 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX5) },
104 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX6) },
105 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX7) },
106 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX8) },
107 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX9) },
108 
109 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SKX) },
110 
111 	/* I/OAT v3.3 platforms */
112 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) },
113 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) },
114 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) },
115 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) },
116 
117 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE0) },
118 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE1) },
119 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) },
120 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) },
121 
122 	{ 0, }
123 };
124 MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
125 
126 static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
127 static void ioat_remove(struct pci_dev *pdev);
128 static void
129 ioat_init_channel(struct ioatdma_device *ioat_dma,
130 		  struct ioatdma_chan *ioat_chan, int idx);
131 static void ioat_intr_quirk(struct ioatdma_device *ioat_dma);
132 static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma);
133 static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma);
134 
135 static int ioat_dca_enabled = 1;
136 module_param(ioat_dca_enabled, int, 0644);
137 MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
138 int ioat_pending_level = 4;
139 module_param(ioat_pending_level, int, 0644);
140 MODULE_PARM_DESC(ioat_pending_level,
141 		 "high-water mark for pushing ioat descriptors (default: 4)");
142 static char ioat_interrupt_style[32] = "msix";
143 module_param_string(ioat_interrupt_style, ioat_interrupt_style,
144 		    sizeof(ioat_interrupt_style), 0644);
145 MODULE_PARM_DESC(ioat_interrupt_style,
146 		 "set ioat interrupt style: msix (default), msi, intx");
147 
148 struct kmem_cache *ioat_cache;
149 struct kmem_cache *ioat_sed_cache;
150 
151 static bool is_jf_ioat(struct pci_dev *pdev)
152 {
153 	switch (pdev->device) {
154 	case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
155 	case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
156 	case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
157 	case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
158 	case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
159 	case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
160 	case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
161 	case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
162 	case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
163 	case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
164 		return true;
165 	default:
166 		return false;
167 	}
168 }
169 
170 static bool is_snb_ioat(struct pci_dev *pdev)
171 {
172 	switch (pdev->device) {
173 	case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
174 	case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
175 	case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
176 	case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
177 	case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
178 	case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
179 	case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
180 	case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
181 	case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
182 	case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
183 		return true;
184 	default:
185 		return false;
186 	}
187 }
188 
189 static bool is_ivb_ioat(struct pci_dev *pdev)
190 {
191 	switch (pdev->device) {
192 	case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
193 	case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
194 	case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
195 	case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
196 	case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
197 	case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
198 	case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
199 	case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
200 	case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
201 	case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
202 		return true;
203 	default:
204 		return false;
205 	}
206 
207 }
208 
209 static bool is_hsw_ioat(struct pci_dev *pdev)
210 {
211 	switch (pdev->device) {
212 	case PCI_DEVICE_ID_INTEL_IOAT_HSW0:
213 	case PCI_DEVICE_ID_INTEL_IOAT_HSW1:
214 	case PCI_DEVICE_ID_INTEL_IOAT_HSW2:
215 	case PCI_DEVICE_ID_INTEL_IOAT_HSW3:
216 	case PCI_DEVICE_ID_INTEL_IOAT_HSW4:
217 	case PCI_DEVICE_ID_INTEL_IOAT_HSW5:
218 	case PCI_DEVICE_ID_INTEL_IOAT_HSW6:
219 	case PCI_DEVICE_ID_INTEL_IOAT_HSW7:
220 	case PCI_DEVICE_ID_INTEL_IOAT_HSW8:
221 	case PCI_DEVICE_ID_INTEL_IOAT_HSW9:
222 		return true;
223 	default:
224 		return false;
225 	}
226 
227 }
228 
229 static bool is_bdx_ioat(struct pci_dev *pdev)
230 {
231 	switch (pdev->device) {
232 	case PCI_DEVICE_ID_INTEL_IOAT_BDX0:
233 	case PCI_DEVICE_ID_INTEL_IOAT_BDX1:
234 	case PCI_DEVICE_ID_INTEL_IOAT_BDX2:
235 	case PCI_DEVICE_ID_INTEL_IOAT_BDX3:
236 	case PCI_DEVICE_ID_INTEL_IOAT_BDX4:
237 	case PCI_DEVICE_ID_INTEL_IOAT_BDX5:
238 	case PCI_DEVICE_ID_INTEL_IOAT_BDX6:
239 	case PCI_DEVICE_ID_INTEL_IOAT_BDX7:
240 	case PCI_DEVICE_ID_INTEL_IOAT_BDX8:
241 	case PCI_DEVICE_ID_INTEL_IOAT_BDX9:
242 		return true;
243 	default:
244 		return false;
245 	}
246 }
247 
248 static inline bool is_skx_ioat(struct pci_dev *pdev)
249 {
250 	return (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SKX) ? true : false;
251 }
252 
253 static bool is_xeon_cb32(struct pci_dev *pdev)
254 {
255 	return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
256 		is_hsw_ioat(pdev) || is_bdx_ioat(pdev) || is_skx_ioat(pdev);
257 }
258 
259 bool is_bwd_ioat(struct pci_dev *pdev)
260 {
261 	switch (pdev->device) {
262 	case PCI_DEVICE_ID_INTEL_IOAT_BWD0:
263 	case PCI_DEVICE_ID_INTEL_IOAT_BWD1:
264 	case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
265 	case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
266 	/* even though not Atom, BDX-DE has same DMA silicon */
267 	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
268 	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
269 	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
270 	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
271 		return true;
272 	default:
273 		return false;
274 	}
275 }
276 
277 static bool is_bwd_noraid(struct pci_dev *pdev)
278 {
279 	switch (pdev->device) {
280 	case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
281 	case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
282 	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
283 	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
284 	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
285 	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
286 		return true;
287 	default:
288 		return false;
289 	}
290 
291 }
292 
293 /*
294  * Perform a IOAT transaction to verify the HW works.
295  */
296 #define IOAT_TEST_SIZE 2000
297 
298 static void ioat_dma_test_callback(void *dma_async_param)
299 {
300 	struct completion *cmp = dma_async_param;
301 
302 	complete(cmp);
303 }
304 
305 /**
306  * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
307  * @ioat_dma: dma device to be tested
308  */
309 static int ioat_dma_self_test(struct ioatdma_device *ioat_dma)
310 {
311 	int i;
312 	u8 *src;
313 	u8 *dest;
314 	struct dma_device *dma = &ioat_dma->dma_dev;
315 	struct device *dev = &ioat_dma->pdev->dev;
316 	struct dma_chan *dma_chan;
317 	struct dma_async_tx_descriptor *tx;
318 	dma_addr_t dma_dest, dma_src;
319 	dma_cookie_t cookie;
320 	int err = 0;
321 	struct completion cmp;
322 	unsigned long tmo;
323 	unsigned long flags;
324 
325 	src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
326 	if (!src)
327 		return -ENOMEM;
328 	dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
329 	if (!dest) {
330 		kfree(src);
331 		return -ENOMEM;
332 	}
333 
334 	/* Fill in src buffer */
335 	for (i = 0; i < IOAT_TEST_SIZE; i++)
336 		src[i] = (u8)i;
337 
338 	/* Start copy, using first DMA channel */
339 	dma_chan = container_of(dma->channels.next, struct dma_chan,
340 				device_node);
341 	if (dma->device_alloc_chan_resources(dma_chan) < 1) {
342 		dev_err(dev, "selftest cannot allocate chan resource\n");
343 		err = -ENODEV;
344 		goto out;
345 	}
346 
347 	dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
348 	if (dma_mapping_error(dev, dma_src)) {
349 		dev_err(dev, "mapping src buffer failed\n");
350 		err = -ENOMEM;
351 		goto free_resources;
352 	}
353 	dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
354 	if (dma_mapping_error(dev, dma_dest)) {
355 		dev_err(dev, "mapping dest buffer failed\n");
356 		err = -ENOMEM;
357 		goto unmap_src;
358 	}
359 	flags = DMA_PREP_INTERRUPT;
360 	tx = ioat_dma->dma_dev.device_prep_dma_memcpy(dma_chan, dma_dest,
361 						      dma_src, IOAT_TEST_SIZE,
362 						      flags);
363 	if (!tx) {
364 		dev_err(dev, "Self-test prep failed, disabling\n");
365 		err = -ENODEV;
366 		goto unmap_dma;
367 	}
368 
369 	async_tx_ack(tx);
370 	init_completion(&cmp);
371 	tx->callback = ioat_dma_test_callback;
372 	tx->callback_param = &cmp;
373 	cookie = tx->tx_submit(tx);
374 	if (cookie < 0) {
375 		dev_err(dev, "Self-test setup failed, disabling\n");
376 		err = -ENODEV;
377 		goto unmap_dma;
378 	}
379 	dma->device_issue_pending(dma_chan);
380 
381 	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
382 
383 	if (tmo == 0 ||
384 	    dma->device_tx_status(dma_chan, cookie, NULL)
385 					!= DMA_COMPLETE) {
386 		dev_err(dev, "Self-test copy timed out, disabling\n");
387 		err = -ENODEV;
388 		goto unmap_dma;
389 	}
390 	if (memcmp(src, dest, IOAT_TEST_SIZE)) {
391 		dev_err(dev, "Self-test copy failed compare, disabling\n");
392 		err = -ENODEV;
393 		goto unmap_dma;
394 	}
395 
396 unmap_dma:
397 	dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
398 unmap_src:
399 	dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
400 free_resources:
401 	dma->device_free_chan_resources(dma_chan);
402 out:
403 	kfree(src);
404 	kfree(dest);
405 	return err;
406 }
407 
408 /**
409  * ioat_dma_setup_interrupts - setup interrupt handler
410  * @ioat_dma: ioat dma device
411  */
412 int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma)
413 {
414 	struct ioatdma_chan *ioat_chan;
415 	struct pci_dev *pdev = ioat_dma->pdev;
416 	struct device *dev = &pdev->dev;
417 	struct msix_entry *msix;
418 	int i, j, msixcnt;
419 	int err = -EINVAL;
420 	u8 intrctrl = 0;
421 
422 	if (!strcmp(ioat_interrupt_style, "msix"))
423 		goto msix;
424 	if (!strcmp(ioat_interrupt_style, "msi"))
425 		goto msi;
426 	if (!strcmp(ioat_interrupt_style, "intx"))
427 		goto intx;
428 	dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
429 	goto err_no_irq;
430 
431 msix:
432 	/* The number of MSI-X vectors should equal the number of channels */
433 	msixcnt = ioat_dma->dma_dev.chancnt;
434 	for (i = 0; i < msixcnt; i++)
435 		ioat_dma->msix_entries[i].entry = i;
436 
437 	err = pci_enable_msix_exact(pdev, ioat_dma->msix_entries, msixcnt);
438 	if (err)
439 		goto msi;
440 
441 	for (i = 0; i < msixcnt; i++) {
442 		msix = &ioat_dma->msix_entries[i];
443 		ioat_chan = ioat_chan_by_index(ioat_dma, i);
444 		err = devm_request_irq(dev, msix->vector,
445 				       ioat_dma_do_interrupt_msix, 0,
446 				       "ioat-msix", ioat_chan);
447 		if (err) {
448 			for (j = 0; j < i; j++) {
449 				msix = &ioat_dma->msix_entries[j];
450 				ioat_chan = ioat_chan_by_index(ioat_dma, j);
451 				devm_free_irq(dev, msix->vector, ioat_chan);
452 			}
453 			goto msi;
454 		}
455 	}
456 	intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
457 	ioat_dma->irq_mode = IOAT_MSIX;
458 	goto done;
459 
460 msi:
461 	err = pci_enable_msi(pdev);
462 	if (err)
463 		goto intx;
464 
465 	err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
466 			       "ioat-msi", ioat_dma);
467 	if (err) {
468 		pci_disable_msi(pdev);
469 		goto intx;
470 	}
471 	ioat_dma->irq_mode = IOAT_MSI;
472 	goto done;
473 
474 intx:
475 	err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
476 			       IRQF_SHARED, "ioat-intx", ioat_dma);
477 	if (err)
478 		goto err_no_irq;
479 
480 	ioat_dma->irq_mode = IOAT_INTX;
481 done:
482 	if (is_bwd_ioat(pdev))
483 		ioat_intr_quirk(ioat_dma);
484 	intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
485 	writeb(intrctrl, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
486 	return 0;
487 
488 err_no_irq:
489 	/* Disable all interrupt generation */
490 	writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
491 	ioat_dma->irq_mode = IOAT_NOIRQ;
492 	dev_err(dev, "no usable interrupts\n");
493 	return err;
494 }
495 
496 static void ioat_disable_interrupts(struct ioatdma_device *ioat_dma)
497 {
498 	/* Disable all interrupt generation */
499 	writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
500 }
501 
502 static int ioat_probe(struct ioatdma_device *ioat_dma)
503 {
504 	int err = -ENODEV;
505 	struct dma_device *dma = &ioat_dma->dma_dev;
506 	struct pci_dev *pdev = ioat_dma->pdev;
507 	struct device *dev = &pdev->dev;
508 
509 	ioat_dma->completion_pool = dma_pool_create("completion_pool", dev,
510 						    sizeof(u64),
511 						    SMP_CACHE_BYTES,
512 						    SMP_CACHE_BYTES);
513 
514 	if (!ioat_dma->completion_pool) {
515 		err = -ENOMEM;
516 		goto err_out;
517 	}
518 
519 	ioat_enumerate_channels(ioat_dma);
520 
521 	dma_cap_set(DMA_MEMCPY, dma->cap_mask);
522 	dma->dev = &pdev->dev;
523 
524 	if (!dma->chancnt) {
525 		dev_err(dev, "channel enumeration error\n");
526 		goto err_setup_interrupts;
527 	}
528 
529 	err = ioat_dma_setup_interrupts(ioat_dma);
530 	if (err)
531 		goto err_setup_interrupts;
532 
533 	err = ioat3_dma_self_test(ioat_dma);
534 	if (err)
535 		goto err_self_test;
536 
537 	return 0;
538 
539 err_self_test:
540 	ioat_disable_interrupts(ioat_dma);
541 err_setup_interrupts:
542 	dma_pool_destroy(ioat_dma->completion_pool);
543 err_out:
544 	return err;
545 }
546 
547 static int ioat_register(struct ioatdma_device *ioat_dma)
548 {
549 	int err = dma_async_device_register(&ioat_dma->dma_dev);
550 
551 	if (err) {
552 		ioat_disable_interrupts(ioat_dma);
553 		dma_pool_destroy(ioat_dma->completion_pool);
554 	}
555 
556 	return err;
557 }
558 
559 static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
560 {
561 	struct dma_device *dma = &ioat_dma->dma_dev;
562 
563 	ioat_disable_interrupts(ioat_dma);
564 
565 	ioat_kobject_del(ioat_dma);
566 
567 	dma_async_device_unregister(dma);
568 
569 	dma_pool_destroy(ioat_dma->completion_pool);
570 
571 	INIT_LIST_HEAD(&dma->channels);
572 }
573 
574 /**
575  * ioat_enumerate_channels - find and initialize the device's channels
576  * @ioat_dma: the ioat dma device to be enumerated
577  */
578 static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
579 {
580 	struct ioatdma_chan *ioat_chan;
581 	struct device *dev = &ioat_dma->pdev->dev;
582 	struct dma_device *dma = &ioat_dma->dma_dev;
583 	u8 xfercap_log;
584 	int i;
585 
586 	INIT_LIST_HEAD(&dma->channels);
587 	dma->chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET);
588 	dma->chancnt &= 0x1f; /* bits [4:0] valid */
589 	if (dma->chancnt > ARRAY_SIZE(ioat_dma->idx)) {
590 		dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
591 			 dma->chancnt, ARRAY_SIZE(ioat_dma->idx));
592 		dma->chancnt = ARRAY_SIZE(ioat_dma->idx);
593 	}
594 	xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET);
595 	xfercap_log &= 0x1f; /* bits [4:0] valid */
596 	if (xfercap_log == 0)
597 		return 0;
598 	dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
599 
600 	for (i = 0; i < dma->chancnt; i++) {
601 		ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL);
602 		if (!ioat_chan)
603 			break;
604 
605 		ioat_init_channel(ioat_dma, ioat_chan, i);
606 		ioat_chan->xfercap_log = xfercap_log;
607 		spin_lock_init(&ioat_chan->prep_lock);
608 		if (ioat_reset_hw(ioat_chan)) {
609 			i = 0;
610 			break;
611 		}
612 	}
613 	dma->chancnt = i;
614 	return i;
615 }
616 
617 /**
618  * ioat_free_chan_resources - release all the descriptors
619  * @chan: the channel to be cleaned
620  */
621 static void ioat_free_chan_resources(struct dma_chan *c)
622 {
623 	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
624 	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
625 	struct ioat_ring_ent *desc;
626 	const int total_descs = 1 << ioat_chan->alloc_order;
627 	int descs;
628 	int i;
629 
630 	/* Before freeing channel resources first check
631 	 * if they have been previously allocated for this channel.
632 	 */
633 	if (!ioat_chan->ring)
634 		return;
635 
636 	ioat_stop(ioat_chan);
637 	ioat_reset_hw(ioat_chan);
638 
639 	spin_lock_bh(&ioat_chan->cleanup_lock);
640 	spin_lock_bh(&ioat_chan->prep_lock);
641 	descs = ioat_ring_space(ioat_chan);
642 	dev_dbg(to_dev(ioat_chan), "freeing %d idle descriptors\n", descs);
643 	for (i = 0; i < descs; i++) {
644 		desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head + i);
645 		ioat_free_ring_ent(desc, c);
646 	}
647 
648 	if (descs < total_descs)
649 		dev_err(to_dev(ioat_chan), "Freeing %d in use descriptors!\n",
650 			total_descs - descs);
651 
652 	for (i = 0; i < total_descs - descs; i++) {
653 		desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail + i);
654 		dump_desc_dbg(ioat_chan, desc);
655 		ioat_free_ring_ent(desc, c);
656 	}
657 
658 	for (i = 0; i < ioat_chan->desc_chunks; i++) {
659 		dma_free_coherent(to_dev(ioat_chan), SZ_2M,
660 				  ioat_chan->descs[i].virt,
661 				  ioat_chan->descs[i].hw);
662 		ioat_chan->descs[i].virt = NULL;
663 		ioat_chan->descs[i].hw = 0;
664 	}
665 	ioat_chan->desc_chunks = 0;
666 
667 	kfree(ioat_chan->ring);
668 	ioat_chan->ring = NULL;
669 	ioat_chan->alloc_order = 0;
670 	dma_pool_free(ioat_dma->completion_pool, ioat_chan->completion,
671 		      ioat_chan->completion_dma);
672 	spin_unlock_bh(&ioat_chan->prep_lock);
673 	spin_unlock_bh(&ioat_chan->cleanup_lock);
674 
675 	ioat_chan->last_completion = 0;
676 	ioat_chan->completion_dma = 0;
677 	ioat_chan->dmacount = 0;
678 }
679 
680 /* ioat_alloc_chan_resources - allocate/initialize ioat descriptor ring
681  * @chan: channel to be initialized
682  */
683 static int ioat_alloc_chan_resources(struct dma_chan *c)
684 {
685 	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
686 	struct ioat_ring_ent **ring;
687 	u64 status;
688 	int order;
689 	int i = 0;
690 	u32 chanerr;
691 
692 	/* have we already been set up? */
693 	if (ioat_chan->ring)
694 		return 1 << ioat_chan->alloc_order;
695 
696 	/* Setup register to interrupt and write completion status on error */
697 	writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
698 
699 	/* allocate a completion writeback area */
700 	/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
701 	ioat_chan->completion =
702 		dma_pool_zalloc(ioat_chan->ioat_dma->completion_pool,
703 				GFP_NOWAIT, &ioat_chan->completion_dma);
704 	if (!ioat_chan->completion)
705 		return -ENOMEM;
706 
707 	writel(((u64)ioat_chan->completion_dma) & 0x00000000FFFFFFFF,
708 	       ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
709 	writel(((u64)ioat_chan->completion_dma) >> 32,
710 	       ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
711 
712 	order = IOAT_MAX_ORDER;
713 	ring = ioat_alloc_ring(c, order, GFP_NOWAIT);
714 	if (!ring)
715 		return -ENOMEM;
716 
717 	spin_lock_bh(&ioat_chan->cleanup_lock);
718 	spin_lock_bh(&ioat_chan->prep_lock);
719 	ioat_chan->ring = ring;
720 	ioat_chan->head = 0;
721 	ioat_chan->issued = 0;
722 	ioat_chan->tail = 0;
723 	ioat_chan->alloc_order = order;
724 	set_bit(IOAT_RUN, &ioat_chan->state);
725 	spin_unlock_bh(&ioat_chan->prep_lock);
726 	spin_unlock_bh(&ioat_chan->cleanup_lock);
727 
728 	ioat_start_null_desc(ioat_chan);
729 
730 	/* check that we got off the ground */
731 	do {
732 		udelay(1);
733 		status = ioat_chansts(ioat_chan);
734 	} while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
735 
736 	if (is_ioat_active(status) || is_ioat_idle(status))
737 		return 1 << ioat_chan->alloc_order;
738 
739 	chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
740 
741 	dev_WARN(to_dev(ioat_chan),
742 		 "failed to start channel chanerr: %#x\n", chanerr);
743 	ioat_free_chan_resources(c);
744 	return -EFAULT;
745 }
746 
747 /* common channel initialization */
748 static void
749 ioat_init_channel(struct ioatdma_device *ioat_dma,
750 		  struct ioatdma_chan *ioat_chan, int idx)
751 {
752 	struct dma_device *dma = &ioat_dma->dma_dev;
753 	struct dma_chan *c = &ioat_chan->dma_chan;
754 	unsigned long data = (unsigned long) c;
755 
756 	ioat_chan->ioat_dma = ioat_dma;
757 	ioat_chan->reg_base = ioat_dma->reg_base + (0x80 * (idx + 1));
758 	spin_lock_init(&ioat_chan->cleanup_lock);
759 	ioat_chan->dma_chan.device = dma;
760 	dma_cookie_init(&ioat_chan->dma_chan);
761 	list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels);
762 	ioat_dma->idx[idx] = ioat_chan;
763 	timer_setup(&ioat_chan->timer, ioat_timer_event, 0);
764 	tasklet_init(&ioat_chan->cleanup_task, ioat_cleanup_event, data);
765 }
766 
767 #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
768 static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
769 {
770 	int i, src_idx;
771 	struct page *dest;
772 	struct page *xor_srcs[IOAT_NUM_SRC_TEST];
773 	struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1];
774 	dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1];
775 	dma_addr_t dest_dma;
776 	struct dma_async_tx_descriptor *tx;
777 	struct dma_chan *dma_chan;
778 	dma_cookie_t cookie;
779 	u8 cmp_byte = 0;
780 	u32 cmp_word;
781 	u32 xor_val_result;
782 	int err = 0;
783 	struct completion cmp;
784 	unsigned long tmo;
785 	struct device *dev = &ioat_dma->pdev->dev;
786 	struct dma_device *dma = &ioat_dma->dma_dev;
787 	u8 op = 0;
788 
789 	dev_dbg(dev, "%s\n", __func__);
790 
791 	if (!dma_has_cap(DMA_XOR, dma->cap_mask))
792 		return 0;
793 
794 	for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
795 		xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
796 		if (!xor_srcs[src_idx]) {
797 			while (src_idx--)
798 				__free_page(xor_srcs[src_idx]);
799 			return -ENOMEM;
800 		}
801 	}
802 
803 	dest = alloc_page(GFP_KERNEL);
804 	if (!dest) {
805 		while (src_idx--)
806 			__free_page(xor_srcs[src_idx]);
807 		return -ENOMEM;
808 	}
809 
810 	/* Fill in src buffers */
811 	for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
812 		u8 *ptr = page_address(xor_srcs[src_idx]);
813 
814 		for (i = 0; i < PAGE_SIZE; i++)
815 			ptr[i] = (1 << src_idx);
816 	}
817 
818 	for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++)
819 		cmp_byte ^= (u8) (1 << src_idx);
820 
821 	cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
822 			(cmp_byte << 8) | cmp_byte;
823 
824 	memset(page_address(dest), 0, PAGE_SIZE);
825 
826 	dma_chan = container_of(dma->channels.next, struct dma_chan,
827 				device_node);
828 	if (dma->device_alloc_chan_resources(dma_chan) < 1) {
829 		err = -ENODEV;
830 		goto out;
831 	}
832 
833 	/* test xor */
834 	op = IOAT_OP_XOR;
835 
836 	dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
837 	if (dma_mapping_error(dev, dest_dma)) {
838 		err = -ENOMEM;
839 		goto free_resources;
840 	}
841 
842 	for (i = 0; i < IOAT_NUM_SRC_TEST; i++) {
843 		dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
844 					   DMA_TO_DEVICE);
845 		if (dma_mapping_error(dev, dma_srcs[i])) {
846 			err = -ENOMEM;
847 			goto dma_unmap;
848 		}
849 	}
850 	tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
851 				      IOAT_NUM_SRC_TEST, PAGE_SIZE,
852 				      DMA_PREP_INTERRUPT);
853 
854 	if (!tx) {
855 		dev_err(dev, "Self-test xor prep failed\n");
856 		err = -ENODEV;
857 		goto dma_unmap;
858 	}
859 
860 	async_tx_ack(tx);
861 	init_completion(&cmp);
862 	tx->callback = ioat_dma_test_callback;
863 	tx->callback_param = &cmp;
864 	cookie = tx->tx_submit(tx);
865 	if (cookie < 0) {
866 		dev_err(dev, "Self-test xor setup failed\n");
867 		err = -ENODEV;
868 		goto dma_unmap;
869 	}
870 	dma->device_issue_pending(dma_chan);
871 
872 	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
873 
874 	if (tmo == 0 ||
875 	    dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
876 		dev_err(dev, "Self-test xor timed out\n");
877 		err = -ENODEV;
878 		goto dma_unmap;
879 	}
880 
881 	for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
882 		dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
883 
884 	dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
885 	for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
886 		u32 *ptr = page_address(dest);
887 
888 		if (ptr[i] != cmp_word) {
889 			dev_err(dev, "Self-test xor failed compare\n");
890 			err = -ENODEV;
891 			goto free_resources;
892 		}
893 	}
894 	dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
895 
896 	dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
897 
898 	/* skip validate if the capability is not present */
899 	if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
900 		goto free_resources;
901 
902 	op = IOAT_OP_XOR_VAL;
903 
904 	/* validate the sources with the destintation page */
905 	for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
906 		xor_val_srcs[i] = xor_srcs[i];
907 	xor_val_srcs[i] = dest;
908 
909 	xor_val_result = 1;
910 
911 	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
912 		dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
913 					   DMA_TO_DEVICE);
914 		if (dma_mapping_error(dev, dma_srcs[i])) {
915 			err = -ENOMEM;
916 			goto dma_unmap;
917 		}
918 	}
919 	tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
920 					  IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
921 					  &xor_val_result, DMA_PREP_INTERRUPT);
922 	if (!tx) {
923 		dev_err(dev, "Self-test zero prep failed\n");
924 		err = -ENODEV;
925 		goto dma_unmap;
926 	}
927 
928 	async_tx_ack(tx);
929 	init_completion(&cmp);
930 	tx->callback = ioat_dma_test_callback;
931 	tx->callback_param = &cmp;
932 	cookie = tx->tx_submit(tx);
933 	if (cookie < 0) {
934 		dev_err(dev, "Self-test zero setup failed\n");
935 		err = -ENODEV;
936 		goto dma_unmap;
937 	}
938 	dma->device_issue_pending(dma_chan);
939 
940 	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
941 
942 	if (tmo == 0 ||
943 	    dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
944 		dev_err(dev, "Self-test validate timed out\n");
945 		err = -ENODEV;
946 		goto dma_unmap;
947 	}
948 
949 	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
950 		dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
951 
952 	if (xor_val_result != 0) {
953 		dev_err(dev, "Self-test validate failed compare\n");
954 		err = -ENODEV;
955 		goto free_resources;
956 	}
957 
958 	memset(page_address(dest), 0, PAGE_SIZE);
959 
960 	/* test for non-zero parity sum */
961 	op = IOAT_OP_XOR_VAL;
962 
963 	xor_val_result = 0;
964 	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
965 		dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
966 					   DMA_TO_DEVICE);
967 		if (dma_mapping_error(dev, dma_srcs[i])) {
968 			err = -ENOMEM;
969 			goto dma_unmap;
970 		}
971 	}
972 	tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
973 					  IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
974 					  &xor_val_result, DMA_PREP_INTERRUPT);
975 	if (!tx) {
976 		dev_err(dev, "Self-test 2nd zero prep failed\n");
977 		err = -ENODEV;
978 		goto dma_unmap;
979 	}
980 
981 	async_tx_ack(tx);
982 	init_completion(&cmp);
983 	tx->callback = ioat_dma_test_callback;
984 	tx->callback_param = &cmp;
985 	cookie = tx->tx_submit(tx);
986 	if (cookie < 0) {
987 		dev_err(dev, "Self-test  2nd zero setup failed\n");
988 		err = -ENODEV;
989 		goto dma_unmap;
990 	}
991 	dma->device_issue_pending(dma_chan);
992 
993 	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
994 
995 	if (tmo == 0 ||
996 	    dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
997 		dev_err(dev, "Self-test 2nd validate timed out\n");
998 		err = -ENODEV;
999 		goto dma_unmap;
1000 	}
1001 
1002 	if (xor_val_result != SUM_CHECK_P_RESULT) {
1003 		dev_err(dev, "Self-test validate failed compare\n");
1004 		err = -ENODEV;
1005 		goto dma_unmap;
1006 	}
1007 
1008 	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1009 		dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
1010 
1011 	goto free_resources;
1012 dma_unmap:
1013 	if (op == IOAT_OP_XOR) {
1014 		while (--i >= 0)
1015 			dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
1016 				       DMA_TO_DEVICE);
1017 		dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
1018 	} else if (op == IOAT_OP_XOR_VAL) {
1019 		while (--i >= 0)
1020 			dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
1021 				       DMA_TO_DEVICE);
1022 	}
1023 free_resources:
1024 	dma->device_free_chan_resources(dma_chan);
1025 out:
1026 	src_idx = IOAT_NUM_SRC_TEST;
1027 	while (src_idx--)
1028 		__free_page(xor_srcs[src_idx]);
1029 	__free_page(dest);
1030 	return err;
1031 }
1032 
1033 static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma)
1034 {
1035 	int rc;
1036 
1037 	rc = ioat_dma_self_test(ioat_dma);
1038 	if (rc)
1039 		return rc;
1040 
1041 	rc = ioat_xor_val_self_test(ioat_dma);
1042 
1043 	return rc;
1044 }
1045 
1046 static void ioat_intr_quirk(struct ioatdma_device *ioat_dma)
1047 {
1048 	struct dma_device *dma;
1049 	struct dma_chan *c;
1050 	struct ioatdma_chan *ioat_chan;
1051 	u32 errmask;
1052 
1053 	dma = &ioat_dma->dma_dev;
1054 
1055 	/*
1056 	 * if we have descriptor write back error status, we mask the
1057 	 * error interrupts
1058 	 */
1059 	if (ioat_dma->cap & IOAT_CAP_DWBES) {
1060 		list_for_each_entry(c, &dma->channels, device_node) {
1061 			ioat_chan = to_ioat_chan(c);
1062 			errmask = readl(ioat_chan->reg_base +
1063 					IOAT_CHANERR_MASK_OFFSET);
1064 			errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR |
1065 				   IOAT_CHANERR_XOR_Q_ERR;
1066 			writel(errmask, ioat_chan->reg_base +
1067 					IOAT_CHANERR_MASK_OFFSET);
1068 		}
1069 	}
1070 }
1071 
1072 static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
1073 {
1074 	struct pci_dev *pdev = ioat_dma->pdev;
1075 	int dca_en = system_has_dca_enabled(pdev);
1076 	struct dma_device *dma;
1077 	struct dma_chan *c;
1078 	struct ioatdma_chan *ioat_chan;
1079 	int err;
1080 	u16 val16;
1081 
1082 	dma = &ioat_dma->dma_dev;
1083 	dma->device_prep_dma_memcpy = ioat_dma_prep_memcpy_lock;
1084 	dma->device_issue_pending = ioat_issue_pending;
1085 	dma->device_alloc_chan_resources = ioat_alloc_chan_resources;
1086 	dma->device_free_chan_resources = ioat_free_chan_resources;
1087 
1088 	dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
1089 	dma->device_prep_dma_interrupt = ioat_prep_interrupt_lock;
1090 
1091 	ioat_dma->cap = readl(ioat_dma->reg_base + IOAT_DMA_CAP_OFFSET);
1092 
1093 	if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev))
1094 		ioat_dma->cap &=
1095 			~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
1096 
1097 	/* dca is incompatible with raid operations */
1098 	if (dca_en && (ioat_dma->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
1099 		ioat_dma->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
1100 
1101 	if (ioat_dma->cap & IOAT_CAP_XOR) {
1102 		dma->max_xor = 8;
1103 
1104 		dma_cap_set(DMA_XOR, dma->cap_mask);
1105 		dma->device_prep_dma_xor = ioat_prep_xor;
1106 
1107 		dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
1108 		dma->device_prep_dma_xor_val = ioat_prep_xor_val;
1109 	}
1110 
1111 	if (ioat_dma->cap & IOAT_CAP_PQ) {
1112 
1113 		dma->device_prep_dma_pq = ioat_prep_pq;
1114 		dma->device_prep_dma_pq_val = ioat_prep_pq_val;
1115 		dma_cap_set(DMA_PQ, dma->cap_mask);
1116 		dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
1117 
1118 		if (ioat_dma->cap & IOAT_CAP_RAID16SS)
1119 			dma_set_maxpq(dma, 16, 0);
1120 		else
1121 			dma_set_maxpq(dma, 8, 0);
1122 
1123 		if (!(ioat_dma->cap & IOAT_CAP_XOR)) {
1124 			dma->device_prep_dma_xor = ioat_prep_pqxor;
1125 			dma->device_prep_dma_xor_val = ioat_prep_pqxor_val;
1126 			dma_cap_set(DMA_XOR, dma->cap_mask);
1127 			dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
1128 
1129 			if (ioat_dma->cap & IOAT_CAP_RAID16SS)
1130 				dma->max_xor = 16;
1131 			else
1132 				dma->max_xor = 8;
1133 		}
1134 	}
1135 
1136 	dma->device_tx_status = ioat_tx_status;
1137 
1138 	/* starting with CB3.3 super extended descriptors are supported */
1139 	if (ioat_dma->cap & IOAT_CAP_RAID16SS) {
1140 		char pool_name[14];
1141 		int i;
1142 
1143 		for (i = 0; i < MAX_SED_POOLS; i++) {
1144 			snprintf(pool_name, 14, "ioat_hw%d_sed", i);
1145 
1146 			/* allocate SED DMA pool */
1147 			ioat_dma->sed_hw_pool[i] = dmam_pool_create(pool_name,
1148 					&pdev->dev,
1149 					SED_SIZE * (i + 1), 64, 0);
1150 			if (!ioat_dma->sed_hw_pool[i])
1151 				return -ENOMEM;
1152 
1153 		}
1154 	}
1155 
1156 	if (!(ioat_dma->cap & (IOAT_CAP_XOR | IOAT_CAP_PQ)))
1157 		dma_cap_set(DMA_PRIVATE, dma->cap_mask);
1158 
1159 	err = ioat_probe(ioat_dma);
1160 	if (err)
1161 		return err;
1162 
1163 	list_for_each_entry(c, &dma->channels, device_node) {
1164 		ioat_chan = to_ioat_chan(c);
1165 		writel(IOAT_DMA_DCA_ANY_CPU,
1166 		       ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
1167 	}
1168 
1169 	err = ioat_register(ioat_dma);
1170 	if (err)
1171 		return err;
1172 
1173 	ioat_kobject_add(ioat_dma, &ioat_ktype);
1174 
1175 	if (dca)
1176 		ioat_dma->dca = ioat_dca_init(pdev, ioat_dma->reg_base);
1177 
1178 	/* disable relaxed ordering */
1179 	err = pcie_capability_read_word(pdev, IOAT_DEVCTRL_OFFSET, &val16);
1180 	if (err)
1181 		return err;
1182 
1183 	/* clear relaxed ordering enable */
1184 	val16 &= ~IOAT_DEVCTRL_ROE;
1185 	err = pcie_capability_write_word(pdev, IOAT_DEVCTRL_OFFSET, val16);
1186 	if (err)
1187 		return err;
1188 
1189 	return 0;
1190 }
1191 
1192 static void ioat_shutdown(struct pci_dev *pdev)
1193 {
1194 	struct ioatdma_device *ioat_dma = pci_get_drvdata(pdev);
1195 	struct ioatdma_chan *ioat_chan;
1196 	int i;
1197 
1198 	if (!ioat_dma)
1199 		return;
1200 
1201 	for (i = 0; i < IOAT_MAX_CHANS; i++) {
1202 		ioat_chan = ioat_dma->idx[i];
1203 		if (!ioat_chan)
1204 			continue;
1205 
1206 		spin_lock_bh(&ioat_chan->prep_lock);
1207 		set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
1208 		del_timer_sync(&ioat_chan->timer);
1209 		spin_unlock_bh(&ioat_chan->prep_lock);
1210 		/* this should quiesce then reset */
1211 		ioat_reset_hw(ioat_chan);
1212 	}
1213 
1214 	ioat_disable_interrupts(ioat_dma);
1215 }
1216 
1217 static void ioat_resume(struct ioatdma_device *ioat_dma)
1218 {
1219 	struct ioatdma_chan *ioat_chan;
1220 	u32 chanerr;
1221 	int i;
1222 
1223 	for (i = 0; i < IOAT_MAX_CHANS; i++) {
1224 		ioat_chan = ioat_dma->idx[i];
1225 		if (!ioat_chan)
1226 			continue;
1227 
1228 		spin_lock_bh(&ioat_chan->prep_lock);
1229 		clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
1230 		spin_unlock_bh(&ioat_chan->prep_lock);
1231 
1232 		chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
1233 		writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
1234 
1235 		/* no need to reset as shutdown already did that */
1236 	}
1237 }
1238 
1239 #define DRV_NAME "ioatdma"
1240 
1241 static pci_ers_result_t ioat_pcie_error_detected(struct pci_dev *pdev,
1242 						 enum pci_channel_state error)
1243 {
1244 	dev_dbg(&pdev->dev, "%s: PCIe AER error %d\n", DRV_NAME, error);
1245 
1246 	/* quiesce and block I/O */
1247 	ioat_shutdown(pdev);
1248 
1249 	return PCI_ERS_RESULT_NEED_RESET;
1250 }
1251 
1252 static pci_ers_result_t ioat_pcie_error_slot_reset(struct pci_dev *pdev)
1253 {
1254 	pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED;
1255 	int err;
1256 
1257 	dev_dbg(&pdev->dev, "%s post reset handling\n", DRV_NAME);
1258 
1259 	if (pci_enable_device_mem(pdev) < 0) {
1260 		dev_err(&pdev->dev,
1261 			"Failed to enable PCIe device after reset.\n");
1262 		result = PCI_ERS_RESULT_DISCONNECT;
1263 	} else {
1264 		pci_set_master(pdev);
1265 		pci_restore_state(pdev);
1266 		pci_save_state(pdev);
1267 		pci_wake_from_d3(pdev, false);
1268 	}
1269 
1270 	err = pci_cleanup_aer_uncorrect_error_status(pdev);
1271 	if (err) {
1272 		dev_err(&pdev->dev,
1273 			"AER uncorrect error status clear failed: %#x\n", err);
1274 	}
1275 
1276 	return result;
1277 }
1278 
1279 static void ioat_pcie_error_resume(struct pci_dev *pdev)
1280 {
1281 	struct ioatdma_device *ioat_dma = pci_get_drvdata(pdev);
1282 
1283 	dev_dbg(&pdev->dev, "%s: AER handling resuming\n", DRV_NAME);
1284 
1285 	/* initialize and bring everything back */
1286 	ioat_resume(ioat_dma);
1287 }
1288 
1289 static const struct pci_error_handlers ioat_err_handler = {
1290 	.error_detected = ioat_pcie_error_detected,
1291 	.slot_reset = ioat_pcie_error_slot_reset,
1292 	.resume = ioat_pcie_error_resume,
1293 };
1294 
1295 static struct pci_driver ioat_pci_driver = {
1296 	.name		= DRV_NAME,
1297 	.id_table	= ioat_pci_tbl,
1298 	.probe		= ioat_pci_probe,
1299 	.remove		= ioat_remove,
1300 	.shutdown	= ioat_shutdown,
1301 	.err_handler	= &ioat_err_handler,
1302 };
1303 
1304 static struct ioatdma_device *
1305 alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase)
1306 {
1307 	struct device *dev = &pdev->dev;
1308 	struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
1309 
1310 	if (!d)
1311 		return NULL;
1312 	d->pdev = pdev;
1313 	d->reg_base = iobase;
1314 	return d;
1315 }
1316 
1317 static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1318 {
1319 	void __iomem * const *iomap;
1320 	struct device *dev = &pdev->dev;
1321 	struct ioatdma_device *device;
1322 	int err;
1323 
1324 	err = pcim_enable_device(pdev);
1325 	if (err)
1326 		return err;
1327 
1328 	err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME);
1329 	if (err)
1330 		return err;
1331 	iomap = pcim_iomap_table(pdev);
1332 	if (!iomap)
1333 		return -ENOMEM;
1334 
1335 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1336 	if (err)
1337 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1338 	if (err)
1339 		return err;
1340 
1341 	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1342 	if (err)
1343 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1344 	if (err)
1345 		return err;
1346 
1347 	device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]);
1348 	if (!device)
1349 		return -ENOMEM;
1350 	pci_set_master(pdev);
1351 	pci_set_drvdata(pdev, device);
1352 
1353 	device->version = readb(device->reg_base + IOAT_VER_OFFSET);
1354 	if (device->version >= IOAT_VER_3_0) {
1355 		if (is_skx_ioat(pdev))
1356 			device->version = IOAT_VER_3_2;
1357 		err = ioat3_dma_probe(device, ioat_dca_enabled);
1358 
1359 		if (device->version >= IOAT_VER_3_3)
1360 			pci_enable_pcie_error_reporting(pdev);
1361 	} else
1362 		return -ENODEV;
1363 
1364 	if (err) {
1365 		dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
1366 		pci_disable_pcie_error_reporting(pdev);
1367 		return -ENODEV;
1368 	}
1369 
1370 	return 0;
1371 }
1372 
1373 static void ioat_remove(struct pci_dev *pdev)
1374 {
1375 	struct ioatdma_device *device = pci_get_drvdata(pdev);
1376 
1377 	if (!device)
1378 		return;
1379 
1380 	dev_err(&pdev->dev, "Removing dma and dca services\n");
1381 	if (device->dca) {
1382 		unregister_dca_provider(device->dca, &pdev->dev);
1383 		free_dca_provider(device->dca);
1384 		device->dca = NULL;
1385 	}
1386 
1387 	pci_disable_pcie_error_reporting(pdev);
1388 	ioat_dma_remove(device);
1389 }
1390 
1391 static int __init ioat_init_module(void)
1392 {
1393 	int err = -ENOMEM;
1394 
1395 	pr_info("%s: Intel(R) QuickData Technology Driver %s\n",
1396 		DRV_NAME, IOAT_DMA_VERSION);
1397 
1398 	ioat_cache = kmem_cache_create("ioat", sizeof(struct ioat_ring_ent),
1399 					0, SLAB_HWCACHE_ALIGN, NULL);
1400 	if (!ioat_cache)
1401 		return -ENOMEM;
1402 
1403 	ioat_sed_cache = KMEM_CACHE(ioat_sed_ent, 0);
1404 	if (!ioat_sed_cache)
1405 		goto err_ioat_cache;
1406 
1407 	err = pci_register_driver(&ioat_pci_driver);
1408 	if (err)
1409 		goto err_ioat3_cache;
1410 
1411 	return 0;
1412 
1413  err_ioat3_cache:
1414 	kmem_cache_destroy(ioat_sed_cache);
1415 
1416  err_ioat_cache:
1417 	kmem_cache_destroy(ioat_cache);
1418 
1419 	return err;
1420 }
1421 module_init(ioat_init_module);
1422 
1423 static void __exit ioat_exit_module(void)
1424 {
1425 	pci_unregister_driver(&ioat_pci_driver);
1426 	kmem_cache_destroy(ioat_cache);
1427 }
1428 module_exit(ioat_exit_module);
1429