xref: /openbmc/linux/drivers/dma/ioat/dma.c (revision 2612e3bbc0386368a850140a6c9b990cd496a5ec)
14fa9c49fSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2584ec227SDan Williams /*
3584ec227SDan Williams  * Intel I/OAT DMA Linux driver
485596a19SDave Jiang  * Copyright(c) 2004 - 2015 Intel Corporation.
5584ec227SDan Williams  */
6584ec227SDan Williams 
7584ec227SDan Williams /*
8584ec227SDan Williams  * This driver supports an Intel I/OAT DMA engine, which does asynchronous
9584ec227SDan Williams  * copy operations.
10584ec227SDan Williams  */
11584ec227SDan Williams 
12584ec227SDan Williams #include <linux/init.h>
13584ec227SDan Williams #include <linux/module.h>
145a0e3ad6STejun Heo #include <linux/slab.h>
15584ec227SDan Williams #include <linux/pci.h>
16584ec227SDan Williams #include <linux/interrupt.h>
17584ec227SDan Williams #include <linux/dmaengine.h>
18584ec227SDan Williams #include <linux/delay.h>
19584ec227SDan Williams #include <linux/dma-mapping.h>
20584ec227SDan Williams #include <linux/workqueue.h>
2170c71606SPaul Gortmaker #include <linux/prefetch.h>
22dd4645ebSDave Jiang #include <linux/sizes.h>
23584ec227SDan Williams #include "dma.h"
24584ec227SDan Williams #include "registers.h"
25584ec227SDan Williams #include "hw.h"
26584ec227SDan Williams 
27d2ebfb33SRussell King - ARM Linux #include "../dmaengine.h"
28d2ebfb33SRussell King - ARM Linux 
290143db65SJason Yan static int completion_timeout = 200;
3087730ccbSLeonid Ravich module_param(completion_timeout, int, 0644);
3187730ccbSLeonid Ravich MODULE_PARM_DESC(completion_timeout,
3287730ccbSLeonid Ravich 		"set ioat completion timeout [msec] (default 200 [msec])");
330143db65SJason Yan static int idle_timeout = 2000;
3487730ccbSLeonid Ravich module_param(idle_timeout, int, 0644);
3587730ccbSLeonid Ravich MODULE_PARM_DESC(idle_timeout,
364967a780SColin Ian King 		"set ioat idle timeout [msec] (default 2000 [msec])");
3787730ccbSLeonid Ravich 
3887730ccbSLeonid Ravich #define IDLE_TIMEOUT msecs_to_jiffies(idle_timeout)
3987730ccbSLeonid Ravich #define COMPLETION_TIMEOUT msecs_to_jiffies(completion_timeout)
4087730ccbSLeonid Ravich 
41aed681d1SDave Jiang static char *chanerr_str[] = {
42d46dc995SDave Jiang 	"DMA Transfer Source Address Error",
43aed681d1SDave Jiang 	"DMA Transfer Destination Address Error",
44aed681d1SDave Jiang 	"Next Descriptor Address Error",
45aed681d1SDave Jiang 	"Descriptor Error",
46aed681d1SDave Jiang 	"Chan Address Value Error",
47aed681d1SDave Jiang 	"CHANCMD Error",
48aed681d1SDave Jiang 	"Chipset Uncorrectable Data Integrity Error",
49aed681d1SDave Jiang 	"DMA Uncorrectable Data Integrity Error",
50aed681d1SDave Jiang 	"Read Data Error",
51aed681d1SDave Jiang 	"Write Data Error",
52aed681d1SDave Jiang 	"Descriptor Control Error",
53aed681d1SDave Jiang 	"Descriptor Transfer Size Error",
54aed681d1SDave Jiang 	"Completion Address Error",
55aed681d1SDave Jiang 	"Interrupt Configuration Error",
56aed681d1SDave Jiang 	"Super extended descriptor Address Error",
57aed681d1SDave Jiang 	"Unaffiliated Error",
58aed681d1SDave Jiang 	"CRC or XOR P Error",
59aed681d1SDave Jiang 	"XOR Q Error",
60aed681d1SDave Jiang 	"Descriptor Count Error",
61aed681d1SDave Jiang 	"DIF All F detect Error",
62aed681d1SDave Jiang 	"Guard Tag verification Error",
63aed681d1SDave Jiang 	"Application Tag verification Error",
64aed681d1SDave Jiang 	"Reference Tag verification Error",
65aed681d1SDave Jiang 	"Bundle Bit Error",
66aed681d1SDave Jiang 	"Result DIF All F detect Error",
67aed681d1SDave Jiang 	"Result Guard Tag verification Error",
68aed681d1SDave Jiang 	"Result Application Tag verification Error",
69aed681d1SDave Jiang 	"Result Reference Tag verification Error",
70aed681d1SDave Jiang };
71aed681d1SDave Jiang 
723372de58SDave Jiang static void ioat_eh(struct ioatdma_chan *ioat_chan);
733372de58SDave Jiang 
ioat_print_chanerrs(struct ioatdma_chan * ioat_chan,u32 chanerr)74aed681d1SDave Jiang static void ioat_print_chanerrs(struct ioatdma_chan *ioat_chan, u32 chanerr)
75aed681d1SDave Jiang {
76aed681d1SDave Jiang 	int i;
77aed681d1SDave Jiang 
781b779416SColin Ian King 	for (i = 0; i < ARRAY_SIZE(chanerr_str); i++) {
79aed681d1SDave Jiang 		if ((chanerr >> i) & 1) {
80aed681d1SDave Jiang 			dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
81aed681d1SDave Jiang 				i, chanerr_str[i]);
82aed681d1SDave Jiang 		}
83aed681d1SDave Jiang 	}
84aed681d1SDave Jiang }
85aed681d1SDave Jiang 
86584ec227SDan Williams /**
87584ec227SDan Williams  * ioat_dma_do_interrupt - handler used for single vector interrupt mode
88584ec227SDan Williams  * @irq: interrupt id
89584ec227SDan Williams  * @data: interrupt data
90584ec227SDan Williams  */
ioat_dma_do_interrupt(int irq,void * data)91c0f28ce6SDave Jiang irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
92584ec227SDan Williams {
93584ec227SDan Williams 	struct ioatdma_device *instance = data;
945a976888SDave Jiang 	struct ioatdma_chan *ioat_chan;
95584ec227SDan Williams 	unsigned long attnstatus;
96584ec227SDan Williams 	int bit;
97584ec227SDan Williams 	u8 intrctrl;
98584ec227SDan Williams 
99584ec227SDan Williams 	intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
100584ec227SDan Williams 
101584ec227SDan Williams 	if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
102584ec227SDan Williams 		return IRQ_NONE;
103584ec227SDan Williams 
104584ec227SDan Williams 	if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
105584ec227SDan Williams 		writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
106584ec227SDan Williams 		return IRQ_NONE;
107584ec227SDan Williams 	}
108584ec227SDan Williams 
109584ec227SDan Williams 	attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
110984b3f57SAkinobu Mita 	for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
1115a976888SDave Jiang 		ioat_chan = ioat_chan_by_index(instance, bit);
1125a976888SDave Jiang 		if (test_bit(IOAT_RUN, &ioat_chan->state))
1135a976888SDave Jiang 			tasklet_schedule(&ioat_chan->cleanup_task);
114584ec227SDan Williams 	}
115584ec227SDan Williams 
116584ec227SDan Williams 	writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
117584ec227SDan Williams 	return IRQ_HANDLED;
118584ec227SDan Williams }
119584ec227SDan Williams 
120584ec227SDan Williams /**
121584ec227SDan Williams  * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
122584ec227SDan Williams  * @irq: interrupt id
123584ec227SDan Williams  * @data: interrupt data
124584ec227SDan Williams  */
ioat_dma_do_interrupt_msix(int irq,void * data)125c0f28ce6SDave Jiang irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
126584ec227SDan Williams {
1275a976888SDave Jiang 	struct ioatdma_chan *ioat_chan = data;
128584ec227SDan Williams 
1295a976888SDave Jiang 	if (test_bit(IOAT_RUN, &ioat_chan->state))
1305a976888SDave Jiang 		tasklet_schedule(&ioat_chan->cleanup_task);
131584ec227SDan Williams 
132584ec227SDan Williams 	return IRQ_HANDLED;
133584ec227SDan Williams }
134584ec227SDan Williams 
ioat_stop(struct ioatdma_chan * ioat_chan)1355a976888SDave Jiang void ioat_stop(struct ioatdma_chan *ioat_chan)
136da87ca4dSDan Williams {
13755f878ecSDave Jiang 	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
13855f878ecSDave Jiang 	struct pci_dev *pdev = ioat_dma->pdev;
1395a976888SDave Jiang 	int chan_id = chan_num(ioat_chan);
140da87ca4dSDan Williams 	struct msix_entry *msix;
141da87ca4dSDan Williams 
142da87ca4dSDan Williams 	/* 1/ stop irq from firing tasklets
143da87ca4dSDan Williams 	 * 2/ stop the tasklet from re-arming irqs
144da87ca4dSDan Williams 	 */
1455a976888SDave Jiang 	clear_bit(IOAT_RUN, &ioat_chan->state);
146da87ca4dSDan Williams 
147da87ca4dSDan Williams 	/* flush inflight interrupts */
14855f878ecSDave Jiang 	switch (ioat_dma->irq_mode) {
149da87ca4dSDan Williams 	case IOAT_MSIX:
15055f878ecSDave Jiang 		msix = &ioat_dma->msix_entries[chan_id];
151da87ca4dSDan Williams 		synchronize_irq(msix->vector);
152da87ca4dSDan Williams 		break;
153da87ca4dSDan Williams 	case IOAT_MSI:
154da87ca4dSDan Williams 	case IOAT_INTX:
155da87ca4dSDan Williams 		synchronize_irq(pdev->irq);
156da87ca4dSDan Williams 		break;
157da87ca4dSDan Williams 	default:
158da87ca4dSDan Williams 		break;
159da87ca4dSDan Williams 	}
160da87ca4dSDan Williams 
161da87ca4dSDan Williams 	/* flush inflight timers */
1625a976888SDave Jiang 	del_timer_sync(&ioat_chan->timer);
163da87ca4dSDan Williams 
164da87ca4dSDan Williams 	/* flush inflight tasklet runs */
1655a976888SDave Jiang 	tasklet_kill(&ioat_chan->cleanup_task);
166da87ca4dSDan Williams 
167da87ca4dSDan Williams 	/* final cleanup now that everything is quiesced and can't re-arm */
1683b8040deSAllen Pais 	ioat_cleanup_event(&ioat_chan->cleanup_task);
169da87ca4dSDan Williams }
170da87ca4dSDan Williams 
__ioat_issue_pending(struct ioatdma_chan * ioat_chan)1713372de58SDave Jiang static void __ioat_issue_pending(struct ioatdma_chan *ioat_chan)
172885b2010SDave Jiang {
173885b2010SDave Jiang 	ioat_chan->dmacount += ioat_ring_pending(ioat_chan);
174885b2010SDave Jiang 	ioat_chan->issued = ioat_chan->head;
175885b2010SDave Jiang 	writew(ioat_chan->dmacount,
176885b2010SDave Jiang 	       ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
177885b2010SDave Jiang 	dev_dbg(to_dev(ioat_chan),
178885b2010SDave Jiang 		"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
179885b2010SDave Jiang 		__func__, ioat_chan->head, ioat_chan->tail,
180885b2010SDave Jiang 		ioat_chan->issued, ioat_chan->dmacount);
181885b2010SDave Jiang }
182885b2010SDave Jiang 
ioat_issue_pending(struct dma_chan * c)183885b2010SDave Jiang void ioat_issue_pending(struct dma_chan *c)
184885b2010SDave Jiang {
185885b2010SDave Jiang 	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
186885b2010SDave Jiang 
187885b2010SDave Jiang 	if (ioat_ring_pending(ioat_chan)) {
188885b2010SDave Jiang 		spin_lock_bh(&ioat_chan->prep_lock);
189885b2010SDave Jiang 		__ioat_issue_pending(ioat_chan);
190885b2010SDave Jiang 		spin_unlock_bh(&ioat_chan->prep_lock);
191885b2010SDave Jiang 	}
192885b2010SDave Jiang }
193885b2010SDave Jiang 
194885b2010SDave Jiang /**
195885b2010SDave Jiang  * ioat_update_pending - log pending descriptors
196c1309fd0SLee Jones  * @ioat_chan: ioat+ channel
197885b2010SDave Jiang  *
198885b2010SDave Jiang  * Check if the number of unsubmitted descriptors has exceeded the
199885b2010SDave Jiang  * watermark.  Called with prep_lock held
200885b2010SDave Jiang  */
ioat_update_pending(struct ioatdma_chan * ioat_chan)201885b2010SDave Jiang static void ioat_update_pending(struct ioatdma_chan *ioat_chan)
202885b2010SDave Jiang {
203885b2010SDave Jiang 	if (ioat_ring_pending(ioat_chan) > ioat_pending_level)
204885b2010SDave Jiang 		__ioat_issue_pending(ioat_chan);
205885b2010SDave Jiang }
206885b2010SDave Jiang 
__ioat_start_null_desc(struct ioatdma_chan * ioat_chan)207885b2010SDave Jiang static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
208885b2010SDave Jiang {
209885b2010SDave Jiang 	struct ioat_ring_ent *desc;
210885b2010SDave Jiang 	struct ioat_dma_descriptor *hw;
211885b2010SDave Jiang 
212885b2010SDave Jiang 	if (ioat_ring_space(ioat_chan) < 1) {
213885b2010SDave Jiang 		dev_err(to_dev(ioat_chan),
214885b2010SDave Jiang 			"Unable to start null desc - ring full\n");
215885b2010SDave Jiang 		return;
216885b2010SDave Jiang 	}
217885b2010SDave Jiang 
218885b2010SDave Jiang 	dev_dbg(to_dev(ioat_chan),
219885b2010SDave Jiang 		"%s: head: %#x tail: %#x issued: %#x\n",
220885b2010SDave Jiang 		__func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
221885b2010SDave Jiang 	desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
222885b2010SDave Jiang 
223885b2010SDave Jiang 	hw = desc->hw;
224885b2010SDave Jiang 	hw->ctl = 0;
225885b2010SDave Jiang 	hw->ctl_f.null = 1;
226885b2010SDave Jiang 	hw->ctl_f.int_en = 1;
227885b2010SDave Jiang 	hw->ctl_f.compl_write = 1;
228885b2010SDave Jiang 	/* set size to non-zero value (channel returns error when size is 0) */
229885b2010SDave Jiang 	hw->size = NULL_DESC_BUFFER_SIZE;
230885b2010SDave Jiang 	hw->src_addr = 0;
231885b2010SDave Jiang 	hw->dst_addr = 0;
232885b2010SDave Jiang 	async_tx_ack(&desc->txd);
233885b2010SDave Jiang 	ioat_set_chainaddr(ioat_chan, desc->txd.phys);
234885b2010SDave Jiang 	dump_desc_dbg(ioat_chan, desc);
235885b2010SDave Jiang 	/* make sure descriptors are written before we submit */
236885b2010SDave Jiang 	wmb();
237885b2010SDave Jiang 	ioat_chan->head += 1;
238885b2010SDave Jiang 	__ioat_issue_pending(ioat_chan);
239885b2010SDave Jiang }
240885b2010SDave Jiang 
ioat_start_null_desc(struct ioatdma_chan * ioat_chan)241c0f28ce6SDave Jiang void ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
242885b2010SDave Jiang {
243885b2010SDave Jiang 	spin_lock_bh(&ioat_chan->prep_lock);
244ad4a7b50SDave Jiang 	if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
245885b2010SDave Jiang 		__ioat_start_null_desc(ioat_chan);
246885b2010SDave Jiang 	spin_unlock_bh(&ioat_chan->prep_lock);
247885b2010SDave Jiang }
248885b2010SDave Jiang 
__ioat_restart_chan(struct ioatdma_chan * ioat_chan)2493372de58SDave Jiang static void __ioat_restart_chan(struct ioatdma_chan *ioat_chan)
250885b2010SDave Jiang {
251885b2010SDave Jiang 	/* set the tail to be re-issued */
252885b2010SDave Jiang 	ioat_chan->issued = ioat_chan->tail;
253885b2010SDave Jiang 	ioat_chan->dmacount = 0;
254885b2010SDave Jiang 	mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
255885b2010SDave Jiang 
256885b2010SDave Jiang 	dev_dbg(to_dev(ioat_chan),
257885b2010SDave Jiang 		"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
258885b2010SDave Jiang 		__func__, ioat_chan->head, ioat_chan->tail,
259885b2010SDave Jiang 		ioat_chan->issued, ioat_chan->dmacount);
260885b2010SDave Jiang 
261885b2010SDave Jiang 	if (ioat_ring_pending(ioat_chan)) {
262885b2010SDave Jiang 		struct ioat_ring_ent *desc;
263885b2010SDave Jiang 
264885b2010SDave Jiang 		desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
265885b2010SDave Jiang 		ioat_set_chainaddr(ioat_chan, desc->txd.phys);
266885b2010SDave Jiang 		__ioat_issue_pending(ioat_chan);
267885b2010SDave Jiang 	} else
268885b2010SDave Jiang 		__ioat_start_null_desc(ioat_chan);
269885b2010SDave Jiang }
270885b2010SDave Jiang 
ioat_quiesce(struct ioatdma_chan * ioat_chan,unsigned long tmo)2713372de58SDave Jiang static int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo)
272885b2010SDave Jiang {
273885b2010SDave Jiang 	unsigned long end = jiffies + tmo;
274885b2010SDave Jiang 	int err = 0;
275885b2010SDave Jiang 	u32 status;
276885b2010SDave Jiang 
277885b2010SDave Jiang 	status = ioat_chansts(ioat_chan);
278885b2010SDave Jiang 	if (is_ioat_active(status) || is_ioat_idle(status))
279885b2010SDave Jiang 		ioat_suspend(ioat_chan);
280885b2010SDave Jiang 	while (is_ioat_active(status) || is_ioat_idle(status)) {
281885b2010SDave Jiang 		if (tmo && time_after(jiffies, end)) {
282885b2010SDave Jiang 			err = -ETIMEDOUT;
283885b2010SDave Jiang 			break;
284885b2010SDave Jiang 		}
285885b2010SDave Jiang 		status = ioat_chansts(ioat_chan);
286885b2010SDave Jiang 		cpu_relax();
287885b2010SDave Jiang 	}
288885b2010SDave Jiang 
289885b2010SDave Jiang 	return err;
290885b2010SDave Jiang }
291885b2010SDave Jiang 
ioat_reset_sync(struct ioatdma_chan * ioat_chan,unsigned long tmo)2923372de58SDave Jiang static int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo)
293885b2010SDave Jiang {
294885b2010SDave Jiang 	unsigned long end = jiffies + tmo;
295885b2010SDave Jiang 	int err = 0;
296885b2010SDave Jiang 
297885b2010SDave Jiang 	ioat_reset(ioat_chan);
298885b2010SDave Jiang 	while (ioat_reset_pending(ioat_chan)) {
299885b2010SDave Jiang 		if (end && time_after(jiffies, end)) {
300885b2010SDave Jiang 			err = -ETIMEDOUT;
301885b2010SDave Jiang 			break;
302885b2010SDave Jiang 		}
303885b2010SDave Jiang 		cpu_relax();
304885b2010SDave Jiang 	}
305885b2010SDave Jiang 
306885b2010SDave Jiang 	return err;
307885b2010SDave Jiang }
308885b2010SDave Jiang 
ioat_tx_submit_unlock(struct dma_async_tx_descriptor * tx)309885b2010SDave Jiang static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
3105c65cb93SDave Jiang 	__releases(&ioat_chan->prep_lock)
311885b2010SDave Jiang {
312885b2010SDave Jiang 	struct dma_chan *c = tx->chan;
313885b2010SDave Jiang 	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
314885b2010SDave Jiang 	dma_cookie_t cookie;
315885b2010SDave Jiang 
316885b2010SDave Jiang 	cookie = dma_cookie_assign(tx);
317885b2010SDave Jiang 	dev_dbg(to_dev(ioat_chan), "%s: cookie: %d\n", __func__, cookie);
318885b2010SDave Jiang 
319885b2010SDave Jiang 	if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
320885b2010SDave Jiang 		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
321885b2010SDave Jiang 
322885b2010SDave Jiang 	/* make descriptor updates visible before advancing ioat->head,
323885b2010SDave Jiang 	 * this is purposefully not smp_wmb() since we are also
324885b2010SDave Jiang 	 * publishing the descriptor updates to a dma device
325885b2010SDave Jiang 	 */
326885b2010SDave Jiang 	wmb();
327885b2010SDave Jiang 
328885b2010SDave Jiang 	ioat_chan->head += ioat_chan->produce;
329885b2010SDave Jiang 
330885b2010SDave Jiang 	ioat_update_pending(ioat_chan);
331885b2010SDave Jiang 	spin_unlock_bh(&ioat_chan->prep_lock);
332885b2010SDave Jiang 
333885b2010SDave Jiang 	return cookie;
334885b2010SDave Jiang }
335885b2010SDave Jiang 
336885b2010SDave Jiang static struct ioat_ring_ent *
ioat_alloc_ring_ent(struct dma_chan * chan,int idx,gfp_t flags)337dd4645ebSDave Jiang ioat_alloc_ring_ent(struct dma_chan *chan, int idx, gfp_t flags)
338885b2010SDave Jiang {
339885b2010SDave Jiang 	struct ioat_dma_descriptor *hw;
340885b2010SDave Jiang 	struct ioat_ring_ent *desc;
341dd4645ebSDave Jiang 	struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
342dd4645ebSDave Jiang 	int chunk;
343885b2010SDave Jiang 	dma_addr_t phys;
344dd4645ebSDave Jiang 	u8 *pos;
345dd4645ebSDave Jiang 	off_t offs;
346885b2010SDave Jiang 
347bd2bf302SLeonid Ravich 	chunk = idx / IOAT_DESCS_PER_CHUNK;
348bd2bf302SLeonid Ravich 	idx &= (IOAT_DESCS_PER_CHUNK - 1);
349dd4645ebSDave Jiang 	offs = idx * IOAT_DESC_SZ;
350dd4645ebSDave Jiang 	pos = (u8 *)ioat_chan->descs[chunk].virt + offs;
351dd4645ebSDave Jiang 	phys = ioat_chan->descs[chunk].hw + offs;
352dd4645ebSDave Jiang 	hw = (struct ioat_dma_descriptor *)pos;
353885b2010SDave Jiang 	memset(hw, 0, sizeof(*hw));
354885b2010SDave Jiang 
355885b2010SDave Jiang 	desc = kmem_cache_zalloc(ioat_cache, flags);
356dd4645ebSDave Jiang 	if (!desc)
357885b2010SDave Jiang 		return NULL;
358885b2010SDave Jiang 
359885b2010SDave Jiang 	dma_async_tx_descriptor_init(&desc->txd, chan);
360885b2010SDave Jiang 	desc->txd.tx_submit = ioat_tx_submit_unlock;
361885b2010SDave Jiang 	desc->hw = hw;
362885b2010SDave Jiang 	desc->txd.phys = phys;
363885b2010SDave Jiang 	return desc;
364885b2010SDave Jiang }
365885b2010SDave Jiang 
ioat_free_ring_ent(struct ioat_ring_ent * desc,struct dma_chan * chan)366c0f28ce6SDave Jiang void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
367885b2010SDave Jiang {
368885b2010SDave Jiang 	kmem_cache_free(ioat_cache, desc);
369885b2010SDave Jiang }
370885b2010SDave Jiang 
371c0f28ce6SDave Jiang struct ioat_ring_ent **
ioat_alloc_ring(struct dma_chan * c,int order,gfp_t flags)372885b2010SDave Jiang ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
373885b2010SDave Jiang {
374dd4645ebSDave Jiang 	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
375e0100d40SDave Jiang 	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
376885b2010SDave Jiang 	struct ioat_ring_ent **ring;
377dd4645ebSDave Jiang 	int total_descs = 1 << order;
378dd4645ebSDave Jiang 	int i, chunks;
379885b2010SDave Jiang 
380885b2010SDave Jiang 	/* allocate the array to hold the software ring */
381dd4645ebSDave Jiang 	ring = kcalloc(total_descs, sizeof(*ring), flags);
382885b2010SDave Jiang 	if (!ring)
383885b2010SDave Jiang 		return NULL;
384dd4645ebSDave Jiang 
385bd2bf302SLeonid Ravich 	chunks = (total_descs * IOAT_DESC_SZ) / IOAT_CHUNK_SIZE;
386bd2bf302SLeonid Ravich 	ioat_chan->desc_chunks = chunks;
387dd4645ebSDave Jiang 
388dd4645ebSDave Jiang 	for (i = 0; i < chunks; i++) {
389dd4645ebSDave Jiang 		struct ioat_descs *descs = &ioat_chan->descs[i];
390dd4645ebSDave Jiang 
391dd4645ebSDave Jiang 		descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
392028926e4SLogan Gunthorpe 					IOAT_CHUNK_SIZE, &descs->hw, flags);
393b0b5ce10SAlexander.Barabash@dell.com 		if (!descs->virt) {
394dd4645ebSDave Jiang 			int idx;
395dd4645ebSDave Jiang 
396dd4645ebSDave Jiang 			for (idx = 0; idx < i; idx++) {
397b0b5ce10SAlexander.Barabash@dell.com 				descs = &ioat_chan->descs[idx];
398bd2bf302SLeonid Ravich 				dma_free_coherent(to_dev(ioat_chan),
399bd2bf302SLeonid Ravich 						IOAT_CHUNK_SIZE,
400dd4645ebSDave Jiang 						descs->virt, descs->hw);
401dd4645ebSDave Jiang 				descs->virt = NULL;
402dd4645ebSDave Jiang 				descs->hw = 0;
403dd4645ebSDave Jiang 			}
404dd4645ebSDave Jiang 
405dd4645ebSDave Jiang 			ioat_chan->desc_chunks = 0;
406dd4645ebSDave Jiang 			kfree(ring);
407dd4645ebSDave Jiang 			return NULL;
408dd4645ebSDave Jiang 		}
409dd4645ebSDave Jiang 	}
410dd4645ebSDave Jiang 
411dd4645ebSDave Jiang 	for (i = 0; i < total_descs; i++) {
412dd4645ebSDave Jiang 		ring[i] = ioat_alloc_ring_ent(c, i, flags);
413885b2010SDave Jiang 		if (!ring[i]) {
414dd4645ebSDave Jiang 			int idx;
415dd4645ebSDave Jiang 
416885b2010SDave Jiang 			while (i--)
417885b2010SDave Jiang 				ioat_free_ring_ent(ring[i], c);
418dd4645ebSDave Jiang 
419dd4645ebSDave Jiang 			for (idx = 0; idx < ioat_chan->desc_chunks; idx++) {
420dd4645ebSDave Jiang 				dma_free_coherent(to_dev(ioat_chan),
421bd2bf302SLeonid Ravich 						  IOAT_CHUNK_SIZE,
422dd4645ebSDave Jiang 						  ioat_chan->descs[idx].virt,
423dd4645ebSDave Jiang 						  ioat_chan->descs[idx].hw);
424dd4645ebSDave Jiang 				ioat_chan->descs[idx].virt = NULL;
425dd4645ebSDave Jiang 				ioat_chan->descs[idx].hw = 0;
426dd4645ebSDave Jiang 			}
427dd4645ebSDave Jiang 
428dd4645ebSDave Jiang 			ioat_chan->desc_chunks = 0;
429885b2010SDave Jiang 			kfree(ring);
430885b2010SDave Jiang 			return NULL;
431885b2010SDave Jiang 		}
432885b2010SDave Jiang 		set_desc_id(ring[i], i);
433885b2010SDave Jiang 	}
434885b2010SDave Jiang 
435885b2010SDave Jiang 	/* link descs */
436dd4645ebSDave Jiang 	for (i = 0; i < total_descs-1; i++) {
437885b2010SDave Jiang 		struct ioat_ring_ent *next = ring[i+1];
438885b2010SDave Jiang 		struct ioat_dma_descriptor *hw = ring[i]->hw;
439885b2010SDave Jiang 
440885b2010SDave Jiang 		hw->next = next->txd.phys;
441885b2010SDave Jiang 	}
442885b2010SDave Jiang 	ring[i]->hw->next = ring[0]->txd.phys;
443885b2010SDave Jiang 
444e0100d40SDave Jiang 	/* setup descriptor pre-fetching for v3.4 */
445e0100d40SDave Jiang 	if (ioat_dma->cap & IOAT_CAP_DPS) {
446e0100d40SDave Jiang 		u16 drsctl = IOAT_CHAN_DRSZ_2MB | IOAT_CHAN_DRS_EN;
447e0100d40SDave Jiang 
448e0100d40SDave Jiang 		if (chunks == 1)
449e0100d40SDave Jiang 			drsctl |= IOAT_CHAN_DRS_AUTOWRAP;
450e0100d40SDave Jiang 
451e0100d40SDave Jiang 		writew(drsctl, ioat_chan->reg_base + IOAT_CHAN_DRSCTL_OFFSET);
452e0100d40SDave Jiang 
453e0100d40SDave Jiang 	}
454e0100d40SDave Jiang 
455885b2010SDave Jiang 	return ring;
456885b2010SDave Jiang }
457885b2010SDave Jiang 
458885b2010SDave Jiang /**
459885b2010SDave Jiang  * ioat_check_space_lock - verify space and grab ring producer lock
460c1309fd0SLee Jones  * @ioat_chan: ioat,3 channel (ring) to operate on
461885b2010SDave Jiang  * @num_descs: allocation length
462885b2010SDave Jiang  */
ioat_check_space_lock(struct ioatdma_chan * ioat_chan,int num_descs)463885b2010SDave Jiang int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
4645c65cb93SDave Jiang 	__acquires(&ioat_chan->prep_lock)
465885b2010SDave Jiang {
466885b2010SDave Jiang 	spin_lock_bh(&ioat_chan->prep_lock);
467885b2010SDave Jiang 	/* never allow the last descriptor to be consumed, we need at
468885b2010SDave Jiang 	 * least one free at all times to allow for on-the-fly ring
469885b2010SDave Jiang 	 * resizing.
470885b2010SDave Jiang 	 */
471885b2010SDave Jiang 	if (likely(ioat_ring_space(ioat_chan) > num_descs)) {
472885b2010SDave Jiang 		dev_dbg(to_dev(ioat_chan), "%s: num_descs: %d (%x:%x:%x)\n",
473885b2010SDave Jiang 			__func__, num_descs, ioat_chan->head,
474885b2010SDave Jiang 			ioat_chan->tail, ioat_chan->issued);
475885b2010SDave Jiang 		ioat_chan->produce = num_descs;
476885b2010SDave Jiang 		return 0;  /* with ioat->prep_lock held */
477885b2010SDave Jiang 	}
478885b2010SDave Jiang 	spin_unlock_bh(&ioat_chan->prep_lock);
479885b2010SDave Jiang 
480885b2010SDave Jiang 	dev_dbg_ratelimited(to_dev(ioat_chan),
481885b2010SDave Jiang 			    "%s: ring full! num_descs: %d (%x:%x:%x)\n",
482885b2010SDave Jiang 			    __func__, num_descs, ioat_chan->head,
483885b2010SDave Jiang 			    ioat_chan->tail, ioat_chan->issued);
484885b2010SDave Jiang 
485885b2010SDave Jiang 	/* progress reclaim in the allocation failure case we may be
486885b2010SDave Jiang 	 * called under bh_disabled so we need to trigger the timer
487885b2010SDave Jiang 	 * event directly
488885b2010SDave Jiang 	 */
489885b2010SDave Jiang 	if (time_is_before_jiffies(ioat_chan->timer.expires)
490885b2010SDave Jiang 	    && timer_pending(&ioat_chan->timer)) {
491885b2010SDave Jiang 		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
492bcdc4bd3SKees Cook 		ioat_timer_event(&ioat_chan->timer);
493885b2010SDave Jiang 	}
494885b2010SDave Jiang 
495885b2010SDave Jiang 	return -ENOMEM;
496885b2010SDave Jiang }
4973372de58SDave Jiang 
desc_has_ext(struct ioat_ring_ent * desc)4983372de58SDave Jiang static bool desc_has_ext(struct ioat_ring_ent *desc)
4993372de58SDave Jiang {
5003372de58SDave Jiang 	struct ioat_dma_descriptor *hw = desc->hw;
5013372de58SDave Jiang 
5023372de58SDave Jiang 	if (hw->ctl_f.op == IOAT_OP_XOR ||
5033372de58SDave Jiang 	    hw->ctl_f.op == IOAT_OP_XOR_VAL) {
5043372de58SDave Jiang 		struct ioat_xor_descriptor *xor = desc->xor;
5053372de58SDave Jiang 
5063372de58SDave Jiang 		if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
5073372de58SDave Jiang 			return true;
5083372de58SDave Jiang 	} else if (hw->ctl_f.op == IOAT_OP_PQ ||
5093372de58SDave Jiang 		   hw->ctl_f.op == IOAT_OP_PQ_VAL) {
5103372de58SDave Jiang 		struct ioat_pq_descriptor *pq = desc->pq;
5113372de58SDave Jiang 
5123372de58SDave Jiang 		if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
5133372de58SDave Jiang 			return true;
5143372de58SDave Jiang 	}
5153372de58SDave Jiang 
5163372de58SDave Jiang 	return false;
5173372de58SDave Jiang }
5183372de58SDave Jiang 
5193372de58SDave Jiang static void
ioat_free_sed(struct ioatdma_device * ioat_dma,struct ioat_sed_ent * sed)5203372de58SDave Jiang ioat_free_sed(struct ioatdma_device *ioat_dma, struct ioat_sed_ent *sed)
5213372de58SDave Jiang {
5223372de58SDave Jiang 	if (!sed)
5233372de58SDave Jiang 		return;
5243372de58SDave Jiang 
5253372de58SDave Jiang 	dma_pool_free(ioat_dma->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
5263372de58SDave Jiang 	kmem_cache_free(ioat_sed_cache, sed);
5273372de58SDave Jiang }
5283372de58SDave Jiang 
ioat_get_current_completion(struct ioatdma_chan * ioat_chan)5293372de58SDave Jiang static u64 ioat_get_current_completion(struct ioatdma_chan *ioat_chan)
5303372de58SDave Jiang {
5313372de58SDave Jiang 	u64 phys_complete;
5323372de58SDave Jiang 	u64 completion;
5333372de58SDave Jiang 
5343372de58SDave Jiang 	completion = *ioat_chan->completion;
5353372de58SDave Jiang 	phys_complete = ioat_chansts_to_addr(completion);
5363372de58SDave Jiang 
5373372de58SDave Jiang 	dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__,
5383372de58SDave Jiang 		(unsigned long long) phys_complete);
5393372de58SDave Jiang 
5403372de58SDave Jiang 	return phys_complete;
5413372de58SDave Jiang }
5423372de58SDave Jiang 
ioat_cleanup_preamble(struct ioatdma_chan * ioat_chan,u64 * phys_complete)5433372de58SDave Jiang static bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan,
5443372de58SDave Jiang 				   u64 *phys_complete)
5453372de58SDave Jiang {
5463372de58SDave Jiang 	*phys_complete = ioat_get_current_completion(ioat_chan);
5473372de58SDave Jiang 	if (*phys_complete == ioat_chan->last_completion)
5483372de58SDave Jiang 		return false;
5493372de58SDave Jiang 
5503372de58SDave Jiang 	clear_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
5513372de58SDave Jiang 	mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
5523372de58SDave Jiang 
5533372de58SDave Jiang 	return true;
5543372de58SDave Jiang }
5553372de58SDave Jiang 
5563372de58SDave Jiang static void
desc_get_errstat(struct ioatdma_chan * ioat_chan,struct ioat_ring_ent * desc)5573372de58SDave Jiang desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc)
5583372de58SDave Jiang {
5593372de58SDave Jiang 	struct ioat_dma_descriptor *hw = desc->hw;
5603372de58SDave Jiang 
5613372de58SDave Jiang 	switch (hw->ctl_f.op) {
5623372de58SDave Jiang 	case IOAT_OP_PQ_VAL:
5633372de58SDave Jiang 	case IOAT_OP_PQ_VAL_16S:
5643372de58SDave Jiang 	{
5653372de58SDave Jiang 		struct ioat_pq_descriptor *pq = desc->pq;
5663372de58SDave Jiang 
5673372de58SDave Jiang 		/* check if there's error written */
5683372de58SDave Jiang 		if (!pq->dwbes_f.wbes)
5693372de58SDave Jiang 			return;
5703372de58SDave Jiang 
5713372de58SDave Jiang 		/* need to set a chanerr var for checking to clear later */
5723372de58SDave Jiang 
5733372de58SDave Jiang 		if (pq->dwbes_f.p_val_err)
5743372de58SDave Jiang 			*desc->result |= SUM_CHECK_P_RESULT;
5753372de58SDave Jiang 
5763372de58SDave Jiang 		if (pq->dwbes_f.q_val_err)
5773372de58SDave Jiang 			*desc->result |= SUM_CHECK_Q_RESULT;
5783372de58SDave Jiang 
5793372de58SDave Jiang 		return;
5803372de58SDave Jiang 	}
5813372de58SDave Jiang 	default:
5823372de58SDave Jiang 		return;
5833372de58SDave Jiang 	}
5843372de58SDave Jiang }
5853372de58SDave Jiang 
5863372de58SDave Jiang /**
587*f62141acSPeter Zijlstra  * __ioat_cleanup - reclaim used descriptors
588c1309fd0SLee Jones  * @ioat_chan: channel (ring) to clean
589c1309fd0SLee Jones  * @phys_complete: zeroed (or not) completion address (from status)
5903372de58SDave Jiang  */
__ioat_cleanup(struct ioatdma_chan * ioat_chan,dma_addr_t phys_complete)591*f62141acSPeter Zijlstra static void __ioat_cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
5923372de58SDave Jiang {
5933372de58SDave Jiang 	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
5943372de58SDave Jiang 	struct ioat_ring_ent *desc;
5953372de58SDave Jiang 	bool seen_current = false;
5963372de58SDave Jiang 	int idx = ioat_chan->tail, i;
5973372de58SDave Jiang 	u16 active;
5983372de58SDave Jiang 
5993372de58SDave Jiang 	dev_dbg(to_dev(ioat_chan), "%s: head: %#x tail: %#x issued: %#x\n",
6003372de58SDave Jiang 		__func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
6013372de58SDave Jiang 
6023372de58SDave Jiang 	/*
6033372de58SDave Jiang 	 * At restart of the channel, the completion address and the
6043372de58SDave Jiang 	 * channel status will be 0 due to starting a new chain. Since
6053372de58SDave Jiang 	 * it's new chain and the first descriptor "fails", there is
6063372de58SDave Jiang 	 * nothing to clean up. We do not want to reap the entire submitted
6073372de58SDave Jiang 	 * chain due to this 0 address value and then BUG.
6083372de58SDave Jiang 	 */
6093372de58SDave Jiang 	if (!phys_complete)
6103372de58SDave Jiang 		return;
6113372de58SDave Jiang 
6123372de58SDave Jiang 	active = ioat_ring_active(ioat_chan);
6133372de58SDave Jiang 	for (i = 0; i < active && !seen_current; i++) {
6143372de58SDave Jiang 		struct dma_async_tx_descriptor *tx;
6153372de58SDave Jiang 
6163372de58SDave Jiang 		prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
6173372de58SDave Jiang 		desc = ioat_get_ring_ent(ioat_chan, idx + i);
6183372de58SDave Jiang 		dump_desc_dbg(ioat_chan, desc);
6193372de58SDave Jiang 
6203372de58SDave Jiang 		/* set err stat if we are using dwbes */
6213372de58SDave Jiang 		if (ioat_dma->cap & IOAT_CAP_DWBES)
6223372de58SDave Jiang 			desc_get_errstat(ioat_chan, desc);
6233372de58SDave Jiang 
6243372de58SDave Jiang 		tx = &desc->txd;
6253372de58SDave Jiang 		if (tx->cookie) {
6263372de58SDave Jiang 			dma_cookie_complete(tx);
6273372de58SDave Jiang 			dma_descriptor_unmap(tx);
62863992864SDave Jiang 			dmaengine_desc_get_callback_invoke(tx, NULL);
6293372de58SDave Jiang 			tx->callback = NULL;
6309546d4cdSDave Jiang 			tx->callback_result = NULL;
6313372de58SDave Jiang 		}
6323372de58SDave Jiang 
6333372de58SDave Jiang 		if (tx->phys == phys_complete)
6343372de58SDave Jiang 			seen_current = true;
6353372de58SDave Jiang 
6363372de58SDave Jiang 		/* skip extended descriptors */
6373372de58SDave Jiang 		if (desc_has_ext(desc)) {
6383372de58SDave Jiang 			BUG_ON(i + 1 >= active);
6393372de58SDave Jiang 			i++;
6403372de58SDave Jiang 		}
6413372de58SDave Jiang 
6423372de58SDave Jiang 		/* cleanup super extended descriptors */
6433372de58SDave Jiang 		if (desc->sed) {
6443372de58SDave Jiang 			ioat_free_sed(ioat_dma, desc->sed);
6453372de58SDave Jiang 			desc->sed = NULL;
6463372de58SDave Jiang 		}
6473372de58SDave Jiang 	}
6483372de58SDave Jiang 
6493372de58SDave Jiang 	/* finish all descriptor reads before incrementing tail */
6503372de58SDave Jiang 	smp_mb();
6513372de58SDave Jiang 	ioat_chan->tail = idx + i;
6523372de58SDave Jiang 	/* no active descs have written a completion? */
6533372de58SDave Jiang 	BUG_ON(active && !seen_current);
6543372de58SDave Jiang 	ioat_chan->last_completion = phys_complete;
6553372de58SDave Jiang 
6563372de58SDave Jiang 	if (active - i == 0) {
6573372de58SDave Jiang 		dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n",
6583372de58SDave Jiang 			__func__);
659898ec89dSDave Jiang 		mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
6603372de58SDave Jiang 	}
6613372de58SDave Jiang 
662268e2519SUjjal Singh 	/* microsecond delay by sysfs variable  per pending descriptor */
663268e2519SUjjal Singh 	if (ioat_chan->intr_coalesce != ioat_chan->prev_intr_coalesce) {
664268e2519SUjjal Singh 		writew(min((ioat_chan->intr_coalesce * (active - i)),
665268e2519SUjjal Singh 		       IOAT_INTRDELAY_MASK),
6663372de58SDave Jiang 		       ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET);
667268e2519SUjjal Singh 		ioat_chan->prev_intr_coalesce = ioat_chan->intr_coalesce;
668268e2519SUjjal Singh 	}
6693372de58SDave Jiang }
6703372de58SDave Jiang 
ioat_cleanup(struct ioatdma_chan * ioat_chan)6713372de58SDave Jiang static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
6723372de58SDave Jiang {
6733372de58SDave Jiang 	u64 phys_complete;
6743372de58SDave Jiang 
6753372de58SDave Jiang 	spin_lock_bh(&ioat_chan->cleanup_lock);
6763372de58SDave Jiang 
6773372de58SDave Jiang 	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
678*f62141acSPeter Zijlstra 		__ioat_cleanup(ioat_chan, phys_complete);
6793372de58SDave Jiang 
6803372de58SDave Jiang 	if (is_ioat_halted(*ioat_chan->completion)) {
6813372de58SDave Jiang 		u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
6823372de58SDave Jiang 
6839546d4cdSDave Jiang 		if (chanerr &
6849546d4cdSDave Jiang 		    (IOAT_CHANERR_HANDLE_MASK | IOAT_CHANERR_RECOVER_MASK)) {
685898ec89dSDave Jiang 			mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
6863372de58SDave Jiang 			ioat_eh(ioat_chan);
6873372de58SDave Jiang 		}
6883372de58SDave Jiang 	}
6893372de58SDave Jiang 
6903372de58SDave Jiang 	spin_unlock_bh(&ioat_chan->cleanup_lock);
6913372de58SDave Jiang }
6923372de58SDave Jiang 
ioat_cleanup_event(struct tasklet_struct * t)6933b8040deSAllen Pais void ioat_cleanup_event(struct tasklet_struct *t)
6943372de58SDave Jiang {
6953b8040deSAllen Pais 	struct ioatdma_chan *ioat_chan = from_tasklet(ioat_chan, t, cleanup_task);
6963372de58SDave Jiang 
6973372de58SDave Jiang 	ioat_cleanup(ioat_chan);
6983372de58SDave Jiang 	if (!test_bit(IOAT_RUN, &ioat_chan->state))
6993372de58SDave Jiang 		return;
7003372de58SDave Jiang 	writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
7013372de58SDave Jiang }
7023372de58SDave Jiang 
ioat_restart_channel(struct ioatdma_chan * ioat_chan)7033372de58SDave Jiang static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
7043372de58SDave Jiang {
7053372de58SDave Jiang 	u64 phys_complete;
7063372de58SDave Jiang 
7074cb0e601SDave Jiang 	/* set the completion address register again */
7084cb0e601SDave Jiang 	writel(lower_32_bits(ioat_chan->completion_dma),
7094cb0e601SDave Jiang 	       ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
7104cb0e601SDave Jiang 	writel(upper_32_bits(ioat_chan->completion_dma),
7114cb0e601SDave Jiang 	       ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
7124cb0e601SDave Jiang 
7133372de58SDave Jiang 	ioat_quiesce(ioat_chan, 0);
7143372de58SDave Jiang 	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
715*f62141acSPeter Zijlstra 		__ioat_cleanup(ioat_chan, phys_complete);
7163372de58SDave Jiang 
7173372de58SDave Jiang 	__ioat_restart_chan(ioat_chan);
7183372de58SDave Jiang }
7193372de58SDave Jiang 
7209546d4cdSDave Jiang 
ioat_abort_descs(struct ioatdma_chan * ioat_chan)7219546d4cdSDave Jiang static void ioat_abort_descs(struct ioatdma_chan *ioat_chan)
7229546d4cdSDave Jiang {
7239546d4cdSDave Jiang 	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
7249546d4cdSDave Jiang 	struct ioat_ring_ent *desc;
7259546d4cdSDave Jiang 	u16 active;
7269546d4cdSDave Jiang 	int idx = ioat_chan->tail, i;
7279546d4cdSDave Jiang 
7289546d4cdSDave Jiang 	/*
7299546d4cdSDave Jiang 	 * We assume that the failed descriptor has been processed.
7309546d4cdSDave Jiang 	 * Now we are just returning all the remaining submitted
7319546d4cdSDave Jiang 	 * descriptors to abort.
7329546d4cdSDave Jiang 	 */
7339546d4cdSDave Jiang 	active = ioat_ring_active(ioat_chan);
7349546d4cdSDave Jiang 
7359546d4cdSDave Jiang 	/* we skip the failed descriptor that tail points to */
7369546d4cdSDave Jiang 	for (i = 1; i < active; i++) {
7379546d4cdSDave Jiang 		struct dma_async_tx_descriptor *tx;
7389546d4cdSDave Jiang 
7399546d4cdSDave Jiang 		prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
7409546d4cdSDave Jiang 		desc = ioat_get_ring_ent(ioat_chan, idx + i);
7419546d4cdSDave Jiang 
7429546d4cdSDave Jiang 		tx = &desc->txd;
7439546d4cdSDave Jiang 		if (tx->cookie) {
7449546d4cdSDave Jiang 			struct dmaengine_result res;
7459546d4cdSDave Jiang 
7469546d4cdSDave Jiang 			dma_cookie_complete(tx);
7479546d4cdSDave Jiang 			dma_descriptor_unmap(tx);
7489546d4cdSDave Jiang 			res.result = DMA_TRANS_ABORTED;
7499546d4cdSDave Jiang 			dmaengine_desc_get_callback_invoke(tx, &res);
7509546d4cdSDave Jiang 			tx->callback = NULL;
7519546d4cdSDave Jiang 			tx->callback_result = NULL;
7529546d4cdSDave Jiang 		}
7539546d4cdSDave Jiang 
7549546d4cdSDave Jiang 		/* skip extended descriptors */
7559546d4cdSDave Jiang 		if (desc_has_ext(desc)) {
7569546d4cdSDave Jiang 			WARN_ON(i + 1 >= active);
7579546d4cdSDave Jiang 			i++;
7589546d4cdSDave Jiang 		}
7599546d4cdSDave Jiang 
7609546d4cdSDave Jiang 		/* cleanup super extended descriptors */
7619546d4cdSDave Jiang 		if (desc->sed) {
7629546d4cdSDave Jiang 			ioat_free_sed(ioat_dma, desc->sed);
7639546d4cdSDave Jiang 			desc->sed = NULL;
7649546d4cdSDave Jiang 		}
7659546d4cdSDave Jiang 	}
7669546d4cdSDave Jiang 
7679546d4cdSDave Jiang 	smp_mb(); /* finish all descriptor reads before incrementing tail */
7689546d4cdSDave Jiang 	ioat_chan->tail = idx + active;
7699546d4cdSDave Jiang 
7709546d4cdSDave Jiang 	desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
7719546d4cdSDave Jiang 	ioat_chan->last_completion = *ioat_chan->completion = desc->txd.phys;
7729546d4cdSDave Jiang }
7739546d4cdSDave Jiang 
ioat_eh(struct ioatdma_chan * ioat_chan)7743372de58SDave Jiang static void ioat_eh(struct ioatdma_chan *ioat_chan)
7753372de58SDave Jiang {
7763372de58SDave Jiang 	struct pci_dev *pdev = to_pdev(ioat_chan);
7773372de58SDave Jiang 	struct ioat_dma_descriptor *hw;
7783372de58SDave Jiang 	struct dma_async_tx_descriptor *tx;
7793372de58SDave Jiang 	u64 phys_complete;
7803372de58SDave Jiang 	struct ioat_ring_ent *desc;
7813372de58SDave Jiang 	u32 err_handled = 0;
7823372de58SDave Jiang 	u32 chanerr_int;
7833372de58SDave Jiang 	u32 chanerr;
7849546d4cdSDave Jiang 	bool abort = false;
7859546d4cdSDave Jiang 	struct dmaengine_result res;
7863372de58SDave Jiang 
7873372de58SDave Jiang 	/* cleanup so tail points to descriptor that caused the error */
7883372de58SDave Jiang 	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
789*f62141acSPeter Zijlstra 		__ioat_cleanup(ioat_chan, phys_complete);
7903372de58SDave Jiang 
7913372de58SDave Jiang 	chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
7923372de58SDave Jiang 	pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
7933372de58SDave Jiang 
7943372de58SDave Jiang 	dev_dbg(to_dev(ioat_chan), "%s: error = %x:%x\n",
7953372de58SDave Jiang 		__func__, chanerr, chanerr_int);
7963372de58SDave Jiang 
7973372de58SDave Jiang 	desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
7983372de58SDave Jiang 	hw = desc->hw;
7993372de58SDave Jiang 	dump_desc_dbg(ioat_chan, desc);
8003372de58SDave Jiang 
8013372de58SDave Jiang 	switch (hw->ctl_f.op) {
8023372de58SDave Jiang 	case IOAT_OP_XOR_VAL:
8033372de58SDave Jiang 		if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
8043372de58SDave Jiang 			*desc->result |= SUM_CHECK_P_RESULT;
8053372de58SDave Jiang 			err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
8063372de58SDave Jiang 		}
8073372de58SDave Jiang 		break;
8083372de58SDave Jiang 	case IOAT_OP_PQ_VAL:
8093372de58SDave Jiang 	case IOAT_OP_PQ_VAL_16S:
8103372de58SDave Jiang 		if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
8113372de58SDave Jiang 			*desc->result |= SUM_CHECK_P_RESULT;
8123372de58SDave Jiang 			err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
8133372de58SDave Jiang 		}
8143372de58SDave Jiang 		if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
8153372de58SDave Jiang 			*desc->result |= SUM_CHECK_Q_RESULT;
8163372de58SDave Jiang 			err_handled |= IOAT_CHANERR_XOR_Q_ERR;
8173372de58SDave Jiang 		}
8183372de58SDave Jiang 		break;
8193372de58SDave Jiang 	}
8203372de58SDave Jiang 
8219546d4cdSDave Jiang 	if (chanerr & IOAT_CHANERR_RECOVER_MASK) {
8229546d4cdSDave Jiang 		if (chanerr & IOAT_CHANERR_READ_DATA_ERR) {
8239546d4cdSDave Jiang 			res.result = DMA_TRANS_READ_FAILED;
8249546d4cdSDave Jiang 			err_handled |= IOAT_CHANERR_READ_DATA_ERR;
8259546d4cdSDave Jiang 		} else if (chanerr & IOAT_CHANERR_WRITE_DATA_ERR) {
8269546d4cdSDave Jiang 			res.result = DMA_TRANS_WRITE_FAILED;
8279546d4cdSDave Jiang 			err_handled |= IOAT_CHANERR_WRITE_DATA_ERR;
8289546d4cdSDave Jiang 		}
8299546d4cdSDave Jiang 
8309546d4cdSDave Jiang 		abort = true;
8319546d4cdSDave Jiang 	} else
8329546d4cdSDave Jiang 		res.result = DMA_TRANS_NOERROR;
8339546d4cdSDave Jiang 
8343372de58SDave Jiang 	/* fault on unhandled error or spurious halt */
8353372de58SDave Jiang 	if (chanerr ^ err_handled || chanerr == 0) {
8363372de58SDave Jiang 		dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n",
8373372de58SDave Jiang 			__func__, chanerr, err_handled);
838aed681d1SDave Jiang 		dev_err(to_dev(ioat_chan), "Errors handled:\n");
839aed681d1SDave Jiang 		ioat_print_chanerrs(ioat_chan, err_handled);
840aed681d1SDave Jiang 		dev_err(to_dev(ioat_chan), "Errors not handled:\n");
841aed681d1SDave Jiang 		ioat_print_chanerrs(ioat_chan, (chanerr & ~err_handled));
842aed681d1SDave Jiang 
8433372de58SDave Jiang 		BUG();
8449546d4cdSDave Jiang 	}
8459546d4cdSDave Jiang 
8469546d4cdSDave Jiang 	/* cleanup the faulty descriptor since we are continuing */
8473372de58SDave Jiang 	tx = &desc->txd;
8483372de58SDave Jiang 	if (tx->cookie) {
8493372de58SDave Jiang 		dma_cookie_complete(tx);
8503372de58SDave Jiang 		dma_descriptor_unmap(tx);
8519546d4cdSDave Jiang 		dmaengine_desc_get_callback_invoke(tx, &res);
8523372de58SDave Jiang 		tx->callback = NULL;
8539546d4cdSDave Jiang 		tx->callback_result = NULL;
8543372de58SDave Jiang 	}
8553372de58SDave Jiang 
8563372de58SDave Jiang 	/* mark faulting descriptor as complete */
8573372de58SDave Jiang 	*ioat_chan->completion = desc->txd.phys;
8583372de58SDave Jiang 
8593372de58SDave Jiang 	spin_lock_bh(&ioat_chan->prep_lock);
8609546d4cdSDave Jiang 	/* we need abort all descriptors */
8619546d4cdSDave Jiang 	if (abort) {
8629546d4cdSDave Jiang 		ioat_abort_descs(ioat_chan);
8639546d4cdSDave Jiang 		/* clean up the channel, we could be in weird state */
8649546d4cdSDave Jiang 		ioat_reset_hw(ioat_chan);
8659546d4cdSDave Jiang 	}
8669546d4cdSDave Jiang 
8679546d4cdSDave Jiang 	writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
8689546d4cdSDave Jiang 	pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
8699546d4cdSDave Jiang 
8703372de58SDave Jiang 	ioat_restart_channel(ioat_chan);
8713372de58SDave Jiang 	spin_unlock_bh(&ioat_chan->prep_lock);
8723372de58SDave Jiang }
8733372de58SDave Jiang 
check_active(struct ioatdma_chan * ioat_chan)8743372de58SDave Jiang static void check_active(struct ioatdma_chan *ioat_chan)
8753372de58SDave Jiang {
8763372de58SDave Jiang 	if (ioat_ring_active(ioat_chan)) {
8773372de58SDave Jiang 		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
8783372de58SDave Jiang 		return;
8793372de58SDave Jiang 	}
8803372de58SDave Jiang 
8813372de58SDave Jiang 	if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
882898ec89dSDave Jiang 		mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
8833372de58SDave Jiang }
8843372de58SDave Jiang 
ioat_reboot_chan(struct ioatdma_chan * ioat_chan)8855a87c506SLeonid Ravich static void ioat_reboot_chan(struct ioatdma_chan *ioat_chan)
8865a87c506SLeonid Ravich {
8875a87c506SLeonid Ravich 	spin_lock_bh(&ioat_chan->prep_lock);
8885a87c506SLeonid Ravich 	set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
8895a87c506SLeonid Ravich 	spin_unlock_bh(&ioat_chan->prep_lock);
8905a87c506SLeonid Ravich 
8915a87c506SLeonid Ravich 	ioat_abort_descs(ioat_chan);
8925a87c506SLeonid Ravich 	dev_warn(to_dev(ioat_chan), "Reset channel...\n");
8935a87c506SLeonid Ravich 	ioat_reset_hw(ioat_chan);
8945a87c506SLeonid Ravich 	dev_warn(to_dev(ioat_chan), "Restart channel...\n");
8955a87c506SLeonid Ravich 	ioat_restart_channel(ioat_chan);
8965a87c506SLeonid Ravich 
8975a87c506SLeonid Ravich 	spin_lock_bh(&ioat_chan->prep_lock);
8985a87c506SLeonid Ravich 	clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
8995a87c506SLeonid Ravich 	spin_unlock_bh(&ioat_chan->prep_lock);
9005a87c506SLeonid Ravich }
9015a87c506SLeonid Ravich 
ioat_timer_event(struct timer_list * t)902bcdc4bd3SKees Cook void ioat_timer_event(struct timer_list *t)
9033372de58SDave Jiang {
904bcdc4bd3SKees Cook 	struct ioatdma_chan *ioat_chan = from_timer(ioat_chan, t, timer);
9053372de58SDave Jiang 	dma_addr_t phys_complete;
9063372de58SDave Jiang 	u64 status;
9073372de58SDave Jiang 
9083372de58SDave Jiang 	status = ioat_chansts(ioat_chan);
9093372de58SDave Jiang 
9103372de58SDave Jiang 	/* when halted due to errors check for channel
9113372de58SDave Jiang 	 * programming errors before advancing the completion state
9123372de58SDave Jiang 	 */
9133372de58SDave Jiang 	if (is_ioat_halted(status)) {
9143372de58SDave Jiang 		u32 chanerr;
9153372de58SDave Jiang 
9163372de58SDave Jiang 		chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
9173372de58SDave Jiang 		dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n",
9183372de58SDave Jiang 			__func__, chanerr);
919aed681d1SDave Jiang 		dev_err(to_dev(ioat_chan), "Errors:\n");
920aed681d1SDave Jiang 		ioat_print_chanerrs(ioat_chan, chanerr);
921aed681d1SDave Jiang 
9229546d4cdSDave Jiang 		if (test_bit(IOAT_RUN, &ioat_chan->state)) {
9239546d4cdSDave Jiang 			spin_lock_bh(&ioat_chan->cleanup_lock);
9245a87c506SLeonid Ravich 			ioat_reboot_chan(ioat_chan);
9259546d4cdSDave Jiang 			spin_unlock_bh(&ioat_chan->cleanup_lock);
9269546d4cdSDave Jiang 		}
9279546d4cdSDave Jiang 
9283372de58SDave Jiang 		return;
9293372de58SDave Jiang 	}
9303372de58SDave Jiang 
9318a695db0SDave Jiang 	spin_lock_bh(&ioat_chan->cleanup_lock);
9328a695db0SDave Jiang 
9338a695db0SDave Jiang 	/* handle the no-actives case */
9348a695db0SDave Jiang 	if (!ioat_ring_active(ioat_chan)) {
9358a695db0SDave Jiang 		spin_lock_bh(&ioat_chan->prep_lock);
9368a695db0SDave Jiang 		check_active(ioat_chan);
9378a695db0SDave Jiang 		spin_unlock_bh(&ioat_chan->prep_lock);
9382baedcb6SLeonid Ravich 		goto unlock_out;
9392baedcb6SLeonid Ravich 	}
9402baedcb6SLeonid Ravich 
9412baedcb6SLeonid Ravich 	/* handle the missed cleanup case */
9422baedcb6SLeonid Ravich 	if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) {
9432baedcb6SLeonid Ravich 		/* timer restarted in ioat_cleanup_preamble
9442baedcb6SLeonid Ravich 		 * and IOAT_COMPLETION_ACK cleared
9452baedcb6SLeonid Ravich 		 */
946*f62141acSPeter Zijlstra 		__ioat_cleanup(ioat_chan, phys_complete);
9472baedcb6SLeonid Ravich 		goto unlock_out;
9488a695db0SDave Jiang 	}
9498a695db0SDave Jiang 
9503372de58SDave Jiang 	/* if we haven't made progress and we have already
9513372de58SDave Jiang 	 * acknowledged a pending completion once, then be more
9523372de58SDave Jiang 	 * forceful with a restart
9533372de58SDave Jiang 	 */
9542baedcb6SLeonid Ravich 	if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
9558a695db0SDave Jiang 		u32 chanerr;
9568a695db0SDave Jiang 
9578a695db0SDave Jiang 		chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
958aed681d1SDave Jiang 		dev_err(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
9598a695db0SDave Jiang 			status, chanerr);
960aed681d1SDave Jiang 		dev_err(to_dev(ioat_chan), "Errors:\n");
961aed681d1SDave Jiang 		ioat_print_chanerrs(ioat_chan, chanerr);
962aed681d1SDave Jiang 
963aed681d1SDave Jiang 		dev_dbg(to_dev(ioat_chan), "Active descriptors: %d\n",
9648a695db0SDave Jiang 			ioat_ring_active(ioat_chan));
9658a695db0SDave Jiang 
9665a87c506SLeonid Ravich 		ioat_reboot_chan(ioat_chan);
9673372de58SDave Jiang 
9682baedcb6SLeonid Ravich 		goto unlock_out;
9692baedcb6SLeonid Ravich 	}
9702baedcb6SLeonid Ravich 
971db474931SLeonid Ravich 	/* handle missed issue pending case */
972db474931SLeonid Ravich 	if (ioat_ring_pending(ioat_chan)) {
973db474931SLeonid Ravich 		dev_warn(to_dev(ioat_chan),
974db474931SLeonid Ravich 			"Completion timeout with pending descriptors\n");
975db474931SLeonid Ravich 		spin_lock_bh(&ioat_chan->prep_lock);
976db474931SLeonid Ravich 		__ioat_issue_pending(ioat_chan);
977db474931SLeonid Ravich 		spin_unlock_bh(&ioat_chan->prep_lock);
978db474931SLeonid Ravich 	}
979db474931SLeonid Ravich 
9802baedcb6SLeonid Ravich 	set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
9813372de58SDave Jiang 	mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
9822baedcb6SLeonid Ravich unlock_out:
9833372de58SDave Jiang 	spin_unlock_bh(&ioat_chan->cleanup_lock);
9843372de58SDave Jiang }
9853372de58SDave Jiang 
9863372de58SDave Jiang enum dma_status
ioat_tx_status(struct dma_chan * c,dma_cookie_t cookie,struct dma_tx_state * txstate)9873372de58SDave Jiang ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
9883372de58SDave Jiang 		struct dma_tx_state *txstate)
9893372de58SDave Jiang {
9903372de58SDave Jiang 	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
9913372de58SDave Jiang 	enum dma_status ret;
9923372de58SDave Jiang 
9933372de58SDave Jiang 	ret = dma_cookie_status(c, cookie, txstate);
9943372de58SDave Jiang 	if (ret == DMA_COMPLETE)
9953372de58SDave Jiang 		return ret;
9963372de58SDave Jiang 
9973372de58SDave Jiang 	ioat_cleanup(ioat_chan);
9983372de58SDave Jiang 
9993372de58SDave Jiang 	return dma_cookie_status(c, cookie, txstate);
10003372de58SDave Jiang }
10013372de58SDave Jiang 
ioat_reset_hw(struct ioatdma_chan * ioat_chan)10023372de58SDave Jiang int ioat_reset_hw(struct ioatdma_chan *ioat_chan)
10033372de58SDave Jiang {
10043372de58SDave Jiang 	/* throw away whatever the channel was doing and get it
10053372de58SDave Jiang 	 * initialized, with ioat3 specific workarounds
10063372de58SDave Jiang 	 */
10073372de58SDave Jiang 	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
10083372de58SDave Jiang 	struct pci_dev *pdev = ioat_dma->pdev;
10093372de58SDave Jiang 	u32 chanerr;
10103372de58SDave Jiang 	u16 dev_id;
10113372de58SDave Jiang 	int err;
10123372de58SDave Jiang 
10133372de58SDave Jiang 	ioat_quiesce(ioat_chan, msecs_to_jiffies(100));
10143372de58SDave Jiang 
10153372de58SDave Jiang 	chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
10163372de58SDave Jiang 	writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
10173372de58SDave Jiang 
10183372de58SDave Jiang 	if (ioat_dma->version < IOAT_VER_3_3) {
10193372de58SDave Jiang 		/* clear any pending errors */
10203372de58SDave Jiang 		err = pci_read_config_dword(pdev,
10213372de58SDave Jiang 				IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
10223372de58SDave Jiang 		if (err) {
10233372de58SDave Jiang 			dev_err(&pdev->dev,
10243372de58SDave Jiang 				"channel error register unreachable\n");
10253372de58SDave Jiang 			return err;
10263372de58SDave Jiang 		}
10273372de58SDave Jiang 		pci_write_config_dword(pdev,
10283372de58SDave Jiang 				IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
10293372de58SDave Jiang 
10303372de58SDave Jiang 		/* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
10313372de58SDave Jiang 		 * (workaround for spurious config parity error after restart)
10323372de58SDave Jiang 		 */
10333372de58SDave Jiang 		pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
10343372de58SDave Jiang 		if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
10353372de58SDave Jiang 			pci_write_config_dword(pdev,
10363372de58SDave Jiang 					       IOAT_PCI_DMAUNCERRSTS_OFFSET,
10373372de58SDave Jiang 					       0x10);
10383372de58SDave Jiang 		}
10393372de58SDave Jiang 	}
10403372de58SDave Jiang 
1041c997e30eSDave Jiang 	if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
1042c997e30eSDave Jiang 		ioat_dma->msixtba0 = readq(ioat_dma->reg_base + 0x1000);
1043c997e30eSDave Jiang 		ioat_dma->msixdata0 = readq(ioat_dma->reg_base + 0x1008);
1044c997e30eSDave Jiang 		ioat_dma->msixpba = readq(ioat_dma->reg_base + 0x1800);
1045c997e30eSDave Jiang 	}
1046c997e30eSDave Jiang 
1047c997e30eSDave Jiang 
10483372de58SDave Jiang 	err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200));
1049c997e30eSDave Jiang 	if (!err) {
1050c997e30eSDave Jiang 		if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
1051c997e30eSDave Jiang 			writeq(ioat_dma->msixtba0, ioat_dma->reg_base + 0x1000);
1052c997e30eSDave Jiang 			writeq(ioat_dma->msixdata0, ioat_dma->reg_base + 0x1008);
1053c997e30eSDave Jiang 			writeq(ioat_dma->msixpba, ioat_dma->reg_base + 0x1800);
1054c997e30eSDave Jiang 		}
1055c997e30eSDave Jiang 	}
10563372de58SDave Jiang 
10573372de58SDave Jiang 	if (err)
10583372de58SDave Jiang 		dev_err(&pdev->dev, "Failed to reset: %d\n", err);
10593372de58SDave Jiang 
10603372de58SDave Jiang 	return err;
10613372de58SDave Jiang }
1062