xref: /openbmc/linux/drivers/dma/ioat/dma.c (revision f8e17c17)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Intel I/OAT DMA Linux driver
4  * Copyright(c) 2004 - 2015 Intel Corporation.
5  */
6 
7 /*
8  * This driver supports an Intel I/OAT DMA engine, which does asynchronous
9  * copy operations.
10  */
11 
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/pci.h>
16 #include <linux/interrupt.h>
17 #include <linux/dmaengine.h>
18 #include <linux/delay.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/workqueue.h>
21 #include <linux/prefetch.h>
22 #include <linux/sizes.h>
23 #include "dma.h"
24 #include "registers.h"
25 #include "hw.h"
26 
27 #include "../dmaengine.h"
28 
29 static char *chanerr_str[] = {
30 	"DMA Transfer Source Address Error",
31 	"DMA Transfer Destination Address Error",
32 	"Next Descriptor Address Error",
33 	"Descriptor Error",
34 	"Chan Address Value Error",
35 	"CHANCMD Error",
36 	"Chipset Uncorrectable Data Integrity Error",
37 	"DMA Uncorrectable Data Integrity Error",
38 	"Read Data Error",
39 	"Write Data Error",
40 	"Descriptor Control Error",
41 	"Descriptor Transfer Size Error",
42 	"Completion Address Error",
43 	"Interrupt Configuration Error",
44 	"Super extended descriptor Address Error",
45 	"Unaffiliated Error",
46 	"CRC or XOR P Error",
47 	"XOR Q Error",
48 	"Descriptor Count Error",
49 	"DIF All F detect Error",
50 	"Guard Tag verification Error",
51 	"Application Tag verification Error",
52 	"Reference Tag verification Error",
53 	"Bundle Bit Error",
54 	"Result DIF All F detect Error",
55 	"Result Guard Tag verification Error",
56 	"Result Application Tag verification Error",
57 	"Result Reference Tag verification Error",
58 };
59 
60 static void ioat_eh(struct ioatdma_chan *ioat_chan);
61 
62 static void ioat_print_chanerrs(struct ioatdma_chan *ioat_chan, u32 chanerr)
63 {
64 	int i;
65 
66 	for (i = 0; i < ARRAY_SIZE(chanerr_str); i++) {
67 		if ((chanerr >> i) & 1) {
68 			dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
69 				i, chanerr_str[i]);
70 		}
71 	}
72 }
73 
74 /**
75  * ioat_dma_do_interrupt - handler used for single vector interrupt mode
76  * @irq: interrupt id
77  * @data: interrupt data
78  */
79 irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
80 {
81 	struct ioatdma_device *instance = data;
82 	struct ioatdma_chan *ioat_chan;
83 	unsigned long attnstatus;
84 	int bit;
85 	u8 intrctrl;
86 
87 	intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
88 
89 	if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
90 		return IRQ_NONE;
91 
92 	if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
93 		writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
94 		return IRQ_NONE;
95 	}
96 
97 	attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
98 	for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
99 		ioat_chan = ioat_chan_by_index(instance, bit);
100 		if (test_bit(IOAT_RUN, &ioat_chan->state))
101 			tasklet_schedule(&ioat_chan->cleanup_task);
102 	}
103 
104 	writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
105 	return IRQ_HANDLED;
106 }
107 
108 /**
109  * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
110  * @irq: interrupt id
111  * @data: interrupt data
112  */
113 irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
114 {
115 	struct ioatdma_chan *ioat_chan = data;
116 
117 	if (test_bit(IOAT_RUN, &ioat_chan->state))
118 		tasklet_schedule(&ioat_chan->cleanup_task);
119 
120 	return IRQ_HANDLED;
121 }
122 
123 void ioat_stop(struct ioatdma_chan *ioat_chan)
124 {
125 	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
126 	struct pci_dev *pdev = ioat_dma->pdev;
127 	int chan_id = chan_num(ioat_chan);
128 	struct msix_entry *msix;
129 
130 	/* 1/ stop irq from firing tasklets
131 	 * 2/ stop the tasklet from re-arming irqs
132 	 */
133 	clear_bit(IOAT_RUN, &ioat_chan->state);
134 
135 	/* flush inflight interrupts */
136 	switch (ioat_dma->irq_mode) {
137 	case IOAT_MSIX:
138 		msix = &ioat_dma->msix_entries[chan_id];
139 		synchronize_irq(msix->vector);
140 		break;
141 	case IOAT_MSI:
142 	case IOAT_INTX:
143 		synchronize_irq(pdev->irq);
144 		break;
145 	default:
146 		break;
147 	}
148 
149 	/* flush inflight timers */
150 	del_timer_sync(&ioat_chan->timer);
151 
152 	/* flush inflight tasklet runs */
153 	tasklet_kill(&ioat_chan->cleanup_task);
154 
155 	/* final cleanup now that everything is quiesced and can't re-arm */
156 	ioat_cleanup_event((unsigned long)&ioat_chan->dma_chan);
157 }
158 
159 static void __ioat_issue_pending(struct ioatdma_chan *ioat_chan)
160 {
161 	ioat_chan->dmacount += ioat_ring_pending(ioat_chan);
162 	ioat_chan->issued = ioat_chan->head;
163 	writew(ioat_chan->dmacount,
164 	       ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
165 	dev_dbg(to_dev(ioat_chan),
166 		"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
167 		__func__, ioat_chan->head, ioat_chan->tail,
168 		ioat_chan->issued, ioat_chan->dmacount);
169 }
170 
171 void ioat_issue_pending(struct dma_chan *c)
172 {
173 	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
174 
175 	if (ioat_ring_pending(ioat_chan)) {
176 		spin_lock_bh(&ioat_chan->prep_lock);
177 		__ioat_issue_pending(ioat_chan);
178 		spin_unlock_bh(&ioat_chan->prep_lock);
179 	}
180 }
181 
182 /**
183  * ioat_update_pending - log pending descriptors
184  * @ioat: ioat+ channel
185  *
186  * Check if the number of unsubmitted descriptors has exceeded the
187  * watermark.  Called with prep_lock held
188  */
189 static void ioat_update_pending(struct ioatdma_chan *ioat_chan)
190 {
191 	if (ioat_ring_pending(ioat_chan) > ioat_pending_level)
192 		__ioat_issue_pending(ioat_chan);
193 }
194 
195 static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
196 {
197 	struct ioat_ring_ent *desc;
198 	struct ioat_dma_descriptor *hw;
199 
200 	if (ioat_ring_space(ioat_chan) < 1) {
201 		dev_err(to_dev(ioat_chan),
202 			"Unable to start null desc - ring full\n");
203 		return;
204 	}
205 
206 	dev_dbg(to_dev(ioat_chan),
207 		"%s: head: %#x tail: %#x issued: %#x\n",
208 		__func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
209 	desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
210 
211 	hw = desc->hw;
212 	hw->ctl = 0;
213 	hw->ctl_f.null = 1;
214 	hw->ctl_f.int_en = 1;
215 	hw->ctl_f.compl_write = 1;
216 	/* set size to non-zero value (channel returns error when size is 0) */
217 	hw->size = NULL_DESC_BUFFER_SIZE;
218 	hw->src_addr = 0;
219 	hw->dst_addr = 0;
220 	async_tx_ack(&desc->txd);
221 	ioat_set_chainaddr(ioat_chan, desc->txd.phys);
222 	dump_desc_dbg(ioat_chan, desc);
223 	/* make sure descriptors are written before we submit */
224 	wmb();
225 	ioat_chan->head += 1;
226 	__ioat_issue_pending(ioat_chan);
227 }
228 
229 void ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
230 {
231 	spin_lock_bh(&ioat_chan->prep_lock);
232 	if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
233 		__ioat_start_null_desc(ioat_chan);
234 	spin_unlock_bh(&ioat_chan->prep_lock);
235 }
236 
237 static void __ioat_restart_chan(struct ioatdma_chan *ioat_chan)
238 {
239 	/* set the tail to be re-issued */
240 	ioat_chan->issued = ioat_chan->tail;
241 	ioat_chan->dmacount = 0;
242 	mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
243 
244 	dev_dbg(to_dev(ioat_chan),
245 		"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
246 		__func__, ioat_chan->head, ioat_chan->tail,
247 		ioat_chan->issued, ioat_chan->dmacount);
248 
249 	if (ioat_ring_pending(ioat_chan)) {
250 		struct ioat_ring_ent *desc;
251 
252 		desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
253 		ioat_set_chainaddr(ioat_chan, desc->txd.phys);
254 		__ioat_issue_pending(ioat_chan);
255 	} else
256 		__ioat_start_null_desc(ioat_chan);
257 }
258 
259 static int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo)
260 {
261 	unsigned long end = jiffies + tmo;
262 	int err = 0;
263 	u32 status;
264 
265 	status = ioat_chansts(ioat_chan);
266 	if (is_ioat_active(status) || is_ioat_idle(status))
267 		ioat_suspend(ioat_chan);
268 	while (is_ioat_active(status) || is_ioat_idle(status)) {
269 		if (tmo && time_after(jiffies, end)) {
270 			err = -ETIMEDOUT;
271 			break;
272 		}
273 		status = ioat_chansts(ioat_chan);
274 		cpu_relax();
275 	}
276 
277 	return err;
278 }
279 
280 static int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo)
281 {
282 	unsigned long end = jiffies + tmo;
283 	int err = 0;
284 
285 	ioat_reset(ioat_chan);
286 	while (ioat_reset_pending(ioat_chan)) {
287 		if (end && time_after(jiffies, end)) {
288 			err = -ETIMEDOUT;
289 			break;
290 		}
291 		cpu_relax();
292 	}
293 
294 	return err;
295 }
296 
297 static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
298 	__releases(&ioat_chan->prep_lock)
299 {
300 	struct dma_chan *c = tx->chan;
301 	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
302 	dma_cookie_t cookie;
303 
304 	cookie = dma_cookie_assign(tx);
305 	dev_dbg(to_dev(ioat_chan), "%s: cookie: %d\n", __func__, cookie);
306 
307 	if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
308 		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
309 
310 	/* make descriptor updates visible before advancing ioat->head,
311 	 * this is purposefully not smp_wmb() since we are also
312 	 * publishing the descriptor updates to a dma device
313 	 */
314 	wmb();
315 
316 	ioat_chan->head += ioat_chan->produce;
317 
318 	ioat_update_pending(ioat_chan);
319 	spin_unlock_bh(&ioat_chan->prep_lock);
320 
321 	return cookie;
322 }
323 
324 static struct ioat_ring_ent *
325 ioat_alloc_ring_ent(struct dma_chan *chan, int idx, gfp_t flags)
326 {
327 	struct ioat_dma_descriptor *hw;
328 	struct ioat_ring_ent *desc;
329 	struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
330 	int chunk;
331 	dma_addr_t phys;
332 	u8 *pos;
333 	off_t offs;
334 
335 	chunk = idx / IOAT_DESCS_PER_2M;
336 	idx &= (IOAT_DESCS_PER_2M - 1);
337 	offs = idx * IOAT_DESC_SZ;
338 	pos = (u8 *)ioat_chan->descs[chunk].virt + offs;
339 	phys = ioat_chan->descs[chunk].hw + offs;
340 	hw = (struct ioat_dma_descriptor *)pos;
341 	memset(hw, 0, sizeof(*hw));
342 
343 	desc = kmem_cache_zalloc(ioat_cache, flags);
344 	if (!desc)
345 		return NULL;
346 
347 	dma_async_tx_descriptor_init(&desc->txd, chan);
348 	desc->txd.tx_submit = ioat_tx_submit_unlock;
349 	desc->hw = hw;
350 	desc->txd.phys = phys;
351 	return desc;
352 }
353 
354 void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
355 {
356 	kmem_cache_free(ioat_cache, desc);
357 }
358 
359 struct ioat_ring_ent **
360 ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
361 {
362 	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
363 	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
364 	struct ioat_ring_ent **ring;
365 	int total_descs = 1 << order;
366 	int i, chunks;
367 
368 	/* allocate the array to hold the software ring */
369 	ring = kcalloc(total_descs, sizeof(*ring), flags);
370 	if (!ring)
371 		return NULL;
372 
373 	ioat_chan->desc_chunks = chunks = (total_descs * IOAT_DESC_SZ) / SZ_2M;
374 
375 	for (i = 0; i < chunks; i++) {
376 		struct ioat_descs *descs = &ioat_chan->descs[i];
377 
378 		descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
379 						 SZ_2M, &descs->hw, flags);
380 		if (!descs->virt) {
381 			int idx;
382 
383 			for (idx = 0; idx < i; idx++) {
384 				descs = &ioat_chan->descs[idx];
385 				dma_free_coherent(to_dev(ioat_chan), SZ_2M,
386 						  descs->virt, descs->hw);
387 				descs->virt = NULL;
388 				descs->hw = 0;
389 			}
390 
391 			ioat_chan->desc_chunks = 0;
392 			kfree(ring);
393 			return NULL;
394 		}
395 	}
396 
397 	for (i = 0; i < total_descs; i++) {
398 		ring[i] = ioat_alloc_ring_ent(c, i, flags);
399 		if (!ring[i]) {
400 			int idx;
401 
402 			while (i--)
403 				ioat_free_ring_ent(ring[i], c);
404 
405 			for (idx = 0; idx < ioat_chan->desc_chunks; idx++) {
406 				dma_free_coherent(to_dev(ioat_chan),
407 						  SZ_2M,
408 						  ioat_chan->descs[idx].virt,
409 						  ioat_chan->descs[idx].hw);
410 				ioat_chan->descs[idx].virt = NULL;
411 				ioat_chan->descs[idx].hw = 0;
412 			}
413 
414 			ioat_chan->desc_chunks = 0;
415 			kfree(ring);
416 			return NULL;
417 		}
418 		set_desc_id(ring[i], i);
419 	}
420 
421 	/* link descs */
422 	for (i = 0; i < total_descs-1; i++) {
423 		struct ioat_ring_ent *next = ring[i+1];
424 		struct ioat_dma_descriptor *hw = ring[i]->hw;
425 
426 		hw->next = next->txd.phys;
427 	}
428 	ring[i]->hw->next = ring[0]->txd.phys;
429 
430 	/* setup descriptor pre-fetching for v3.4 */
431 	if (ioat_dma->cap & IOAT_CAP_DPS) {
432 		u16 drsctl = IOAT_CHAN_DRSZ_2MB | IOAT_CHAN_DRS_EN;
433 
434 		if (chunks == 1)
435 			drsctl |= IOAT_CHAN_DRS_AUTOWRAP;
436 
437 		writew(drsctl, ioat_chan->reg_base + IOAT_CHAN_DRSCTL_OFFSET);
438 
439 	}
440 
441 	return ring;
442 }
443 
444 /**
445  * ioat_check_space_lock - verify space and grab ring producer lock
446  * @ioat: ioat,3 channel (ring) to operate on
447  * @num_descs: allocation length
448  */
449 int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
450 	__acquires(&ioat_chan->prep_lock)
451 {
452 	spin_lock_bh(&ioat_chan->prep_lock);
453 	/* never allow the last descriptor to be consumed, we need at
454 	 * least one free at all times to allow for on-the-fly ring
455 	 * resizing.
456 	 */
457 	if (likely(ioat_ring_space(ioat_chan) > num_descs)) {
458 		dev_dbg(to_dev(ioat_chan), "%s: num_descs: %d (%x:%x:%x)\n",
459 			__func__, num_descs, ioat_chan->head,
460 			ioat_chan->tail, ioat_chan->issued);
461 		ioat_chan->produce = num_descs;
462 		return 0;  /* with ioat->prep_lock held */
463 	}
464 	spin_unlock_bh(&ioat_chan->prep_lock);
465 
466 	dev_dbg_ratelimited(to_dev(ioat_chan),
467 			    "%s: ring full! num_descs: %d (%x:%x:%x)\n",
468 			    __func__, num_descs, ioat_chan->head,
469 			    ioat_chan->tail, ioat_chan->issued);
470 
471 	/* progress reclaim in the allocation failure case we may be
472 	 * called under bh_disabled so we need to trigger the timer
473 	 * event directly
474 	 */
475 	if (time_is_before_jiffies(ioat_chan->timer.expires)
476 	    && timer_pending(&ioat_chan->timer)) {
477 		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
478 		ioat_timer_event(&ioat_chan->timer);
479 	}
480 
481 	return -ENOMEM;
482 }
483 
484 static bool desc_has_ext(struct ioat_ring_ent *desc)
485 {
486 	struct ioat_dma_descriptor *hw = desc->hw;
487 
488 	if (hw->ctl_f.op == IOAT_OP_XOR ||
489 	    hw->ctl_f.op == IOAT_OP_XOR_VAL) {
490 		struct ioat_xor_descriptor *xor = desc->xor;
491 
492 		if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
493 			return true;
494 	} else if (hw->ctl_f.op == IOAT_OP_PQ ||
495 		   hw->ctl_f.op == IOAT_OP_PQ_VAL) {
496 		struct ioat_pq_descriptor *pq = desc->pq;
497 
498 		if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
499 			return true;
500 	}
501 
502 	return false;
503 }
504 
505 static void
506 ioat_free_sed(struct ioatdma_device *ioat_dma, struct ioat_sed_ent *sed)
507 {
508 	if (!sed)
509 		return;
510 
511 	dma_pool_free(ioat_dma->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
512 	kmem_cache_free(ioat_sed_cache, sed);
513 }
514 
515 static u64 ioat_get_current_completion(struct ioatdma_chan *ioat_chan)
516 {
517 	u64 phys_complete;
518 	u64 completion;
519 
520 	completion = *ioat_chan->completion;
521 	phys_complete = ioat_chansts_to_addr(completion);
522 
523 	dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__,
524 		(unsigned long long) phys_complete);
525 
526 	return phys_complete;
527 }
528 
529 static bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan,
530 				   u64 *phys_complete)
531 {
532 	*phys_complete = ioat_get_current_completion(ioat_chan);
533 	if (*phys_complete == ioat_chan->last_completion)
534 		return false;
535 
536 	clear_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
537 	mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
538 
539 	return true;
540 }
541 
542 static void
543 desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc)
544 {
545 	struct ioat_dma_descriptor *hw = desc->hw;
546 
547 	switch (hw->ctl_f.op) {
548 	case IOAT_OP_PQ_VAL:
549 	case IOAT_OP_PQ_VAL_16S:
550 	{
551 		struct ioat_pq_descriptor *pq = desc->pq;
552 
553 		/* check if there's error written */
554 		if (!pq->dwbes_f.wbes)
555 			return;
556 
557 		/* need to set a chanerr var for checking to clear later */
558 
559 		if (pq->dwbes_f.p_val_err)
560 			*desc->result |= SUM_CHECK_P_RESULT;
561 
562 		if (pq->dwbes_f.q_val_err)
563 			*desc->result |= SUM_CHECK_Q_RESULT;
564 
565 		return;
566 	}
567 	default:
568 		return;
569 	}
570 }
571 
572 /**
573  * __cleanup - reclaim used descriptors
574  * @ioat: channel (ring) to clean
575  */
576 static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
577 {
578 	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
579 	struct ioat_ring_ent *desc;
580 	bool seen_current = false;
581 	int idx = ioat_chan->tail, i;
582 	u16 active;
583 
584 	dev_dbg(to_dev(ioat_chan), "%s: head: %#x tail: %#x issued: %#x\n",
585 		__func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
586 
587 	/*
588 	 * At restart of the channel, the completion address and the
589 	 * channel status will be 0 due to starting a new chain. Since
590 	 * it's new chain and the first descriptor "fails", there is
591 	 * nothing to clean up. We do not want to reap the entire submitted
592 	 * chain due to this 0 address value and then BUG.
593 	 */
594 	if (!phys_complete)
595 		return;
596 
597 	active = ioat_ring_active(ioat_chan);
598 	for (i = 0; i < active && !seen_current; i++) {
599 		struct dma_async_tx_descriptor *tx;
600 
601 		prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
602 		desc = ioat_get_ring_ent(ioat_chan, idx + i);
603 		dump_desc_dbg(ioat_chan, desc);
604 
605 		/* set err stat if we are using dwbes */
606 		if (ioat_dma->cap & IOAT_CAP_DWBES)
607 			desc_get_errstat(ioat_chan, desc);
608 
609 		tx = &desc->txd;
610 		if (tx->cookie) {
611 			dma_cookie_complete(tx);
612 			dma_descriptor_unmap(tx);
613 			dmaengine_desc_get_callback_invoke(tx, NULL);
614 			tx->callback = NULL;
615 			tx->callback_result = NULL;
616 		}
617 
618 		if (tx->phys == phys_complete)
619 			seen_current = true;
620 
621 		/* skip extended descriptors */
622 		if (desc_has_ext(desc)) {
623 			BUG_ON(i + 1 >= active);
624 			i++;
625 		}
626 
627 		/* cleanup super extended descriptors */
628 		if (desc->sed) {
629 			ioat_free_sed(ioat_dma, desc->sed);
630 			desc->sed = NULL;
631 		}
632 	}
633 
634 	/* finish all descriptor reads before incrementing tail */
635 	smp_mb();
636 	ioat_chan->tail = idx + i;
637 	/* no active descs have written a completion? */
638 	BUG_ON(active && !seen_current);
639 	ioat_chan->last_completion = phys_complete;
640 
641 	if (active - i == 0) {
642 		dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n",
643 			__func__);
644 		mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
645 	}
646 
647 	/* microsecond delay by sysfs variable  per pending descriptor */
648 	if (ioat_chan->intr_coalesce != ioat_chan->prev_intr_coalesce) {
649 		writew(min((ioat_chan->intr_coalesce * (active - i)),
650 		       IOAT_INTRDELAY_MASK),
651 		       ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET);
652 		ioat_chan->prev_intr_coalesce = ioat_chan->intr_coalesce;
653 	}
654 }
655 
656 static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
657 {
658 	u64 phys_complete;
659 
660 	spin_lock_bh(&ioat_chan->cleanup_lock);
661 
662 	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
663 		__cleanup(ioat_chan, phys_complete);
664 
665 	if (is_ioat_halted(*ioat_chan->completion)) {
666 		u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
667 
668 		if (chanerr &
669 		    (IOAT_CHANERR_HANDLE_MASK | IOAT_CHANERR_RECOVER_MASK)) {
670 			mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
671 			ioat_eh(ioat_chan);
672 		}
673 	}
674 
675 	spin_unlock_bh(&ioat_chan->cleanup_lock);
676 }
677 
678 void ioat_cleanup_event(unsigned long data)
679 {
680 	struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data);
681 
682 	ioat_cleanup(ioat_chan);
683 	if (!test_bit(IOAT_RUN, &ioat_chan->state))
684 		return;
685 	writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
686 }
687 
688 static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
689 {
690 	u64 phys_complete;
691 
692 	/* set the completion address register again */
693 	writel(lower_32_bits(ioat_chan->completion_dma),
694 	       ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
695 	writel(upper_32_bits(ioat_chan->completion_dma),
696 	       ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
697 
698 	ioat_quiesce(ioat_chan, 0);
699 	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
700 		__cleanup(ioat_chan, phys_complete);
701 
702 	__ioat_restart_chan(ioat_chan);
703 }
704 
705 
706 static void ioat_abort_descs(struct ioatdma_chan *ioat_chan)
707 {
708 	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
709 	struct ioat_ring_ent *desc;
710 	u16 active;
711 	int idx = ioat_chan->tail, i;
712 
713 	/*
714 	 * We assume that the failed descriptor has been processed.
715 	 * Now we are just returning all the remaining submitted
716 	 * descriptors to abort.
717 	 */
718 	active = ioat_ring_active(ioat_chan);
719 
720 	/* we skip the failed descriptor that tail points to */
721 	for (i = 1; i < active; i++) {
722 		struct dma_async_tx_descriptor *tx;
723 
724 		prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
725 		desc = ioat_get_ring_ent(ioat_chan, idx + i);
726 
727 		tx = &desc->txd;
728 		if (tx->cookie) {
729 			struct dmaengine_result res;
730 
731 			dma_cookie_complete(tx);
732 			dma_descriptor_unmap(tx);
733 			res.result = DMA_TRANS_ABORTED;
734 			dmaengine_desc_get_callback_invoke(tx, &res);
735 			tx->callback = NULL;
736 			tx->callback_result = NULL;
737 		}
738 
739 		/* skip extended descriptors */
740 		if (desc_has_ext(desc)) {
741 			WARN_ON(i + 1 >= active);
742 			i++;
743 		}
744 
745 		/* cleanup super extended descriptors */
746 		if (desc->sed) {
747 			ioat_free_sed(ioat_dma, desc->sed);
748 			desc->sed = NULL;
749 		}
750 	}
751 
752 	smp_mb(); /* finish all descriptor reads before incrementing tail */
753 	ioat_chan->tail = idx + active;
754 
755 	desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
756 	ioat_chan->last_completion = *ioat_chan->completion = desc->txd.phys;
757 }
758 
759 static void ioat_eh(struct ioatdma_chan *ioat_chan)
760 {
761 	struct pci_dev *pdev = to_pdev(ioat_chan);
762 	struct ioat_dma_descriptor *hw;
763 	struct dma_async_tx_descriptor *tx;
764 	u64 phys_complete;
765 	struct ioat_ring_ent *desc;
766 	u32 err_handled = 0;
767 	u32 chanerr_int;
768 	u32 chanerr;
769 	bool abort = false;
770 	struct dmaengine_result res;
771 
772 	/* cleanup so tail points to descriptor that caused the error */
773 	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
774 		__cleanup(ioat_chan, phys_complete);
775 
776 	chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
777 	pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
778 
779 	dev_dbg(to_dev(ioat_chan), "%s: error = %x:%x\n",
780 		__func__, chanerr, chanerr_int);
781 
782 	desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
783 	hw = desc->hw;
784 	dump_desc_dbg(ioat_chan, desc);
785 
786 	switch (hw->ctl_f.op) {
787 	case IOAT_OP_XOR_VAL:
788 		if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
789 			*desc->result |= SUM_CHECK_P_RESULT;
790 			err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
791 		}
792 		break;
793 	case IOAT_OP_PQ_VAL:
794 	case IOAT_OP_PQ_VAL_16S:
795 		if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
796 			*desc->result |= SUM_CHECK_P_RESULT;
797 			err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
798 		}
799 		if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
800 			*desc->result |= SUM_CHECK_Q_RESULT;
801 			err_handled |= IOAT_CHANERR_XOR_Q_ERR;
802 		}
803 		break;
804 	}
805 
806 	if (chanerr & IOAT_CHANERR_RECOVER_MASK) {
807 		if (chanerr & IOAT_CHANERR_READ_DATA_ERR) {
808 			res.result = DMA_TRANS_READ_FAILED;
809 			err_handled |= IOAT_CHANERR_READ_DATA_ERR;
810 		} else if (chanerr & IOAT_CHANERR_WRITE_DATA_ERR) {
811 			res.result = DMA_TRANS_WRITE_FAILED;
812 			err_handled |= IOAT_CHANERR_WRITE_DATA_ERR;
813 		}
814 
815 		abort = true;
816 	} else
817 		res.result = DMA_TRANS_NOERROR;
818 
819 	/* fault on unhandled error or spurious halt */
820 	if (chanerr ^ err_handled || chanerr == 0) {
821 		dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n",
822 			__func__, chanerr, err_handled);
823 		dev_err(to_dev(ioat_chan), "Errors handled:\n");
824 		ioat_print_chanerrs(ioat_chan, err_handled);
825 		dev_err(to_dev(ioat_chan), "Errors not handled:\n");
826 		ioat_print_chanerrs(ioat_chan, (chanerr & ~err_handled));
827 
828 		BUG();
829 	}
830 
831 	/* cleanup the faulty descriptor since we are continuing */
832 	tx = &desc->txd;
833 	if (tx->cookie) {
834 		dma_cookie_complete(tx);
835 		dma_descriptor_unmap(tx);
836 		dmaengine_desc_get_callback_invoke(tx, &res);
837 		tx->callback = NULL;
838 		tx->callback_result = NULL;
839 	}
840 
841 	/* mark faulting descriptor as complete */
842 	*ioat_chan->completion = desc->txd.phys;
843 
844 	spin_lock_bh(&ioat_chan->prep_lock);
845 	/* we need abort all descriptors */
846 	if (abort) {
847 		ioat_abort_descs(ioat_chan);
848 		/* clean up the channel, we could be in weird state */
849 		ioat_reset_hw(ioat_chan);
850 	}
851 
852 	writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
853 	pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
854 
855 	ioat_restart_channel(ioat_chan);
856 	spin_unlock_bh(&ioat_chan->prep_lock);
857 }
858 
859 static void check_active(struct ioatdma_chan *ioat_chan)
860 {
861 	if (ioat_ring_active(ioat_chan)) {
862 		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
863 		return;
864 	}
865 
866 	if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
867 		mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
868 }
869 
870 void ioat_timer_event(struct timer_list *t)
871 {
872 	struct ioatdma_chan *ioat_chan = from_timer(ioat_chan, t, timer);
873 	dma_addr_t phys_complete;
874 	u64 status;
875 
876 	status = ioat_chansts(ioat_chan);
877 
878 	/* when halted due to errors check for channel
879 	 * programming errors before advancing the completion state
880 	 */
881 	if (is_ioat_halted(status)) {
882 		u32 chanerr;
883 
884 		chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
885 		dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n",
886 			__func__, chanerr);
887 		dev_err(to_dev(ioat_chan), "Errors:\n");
888 		ioat_print_chanerrs(ioat_chan, chanerr);
889 
890 		if (test_bit(IOAT_RUN, &ioat_chan->state)) {
891 			spin_lock_bh(&ioat_chan->cleanup_lock);
892 			spin_lock_bh(&ioat_chan->prep_lock);
893 			set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
894 			spin_unlock_bh(&ioat_chan->prep_lock);
895 
896 			ioat_abort_descs(ioat_chan);
897 			dev_warn(to_dev(ioat_chan), "Reset channel...\n");
898 			ioat_reset_hw(ioat_chan);
899 			dev_warn(to_dev(ioat_chan), "Restart channel...\n");
900 			ioat_restart_channel(ioat_chan);
901 
902 			spin_lock_bh(&ioat_chan->prep_lock);
903 			clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
904 			spin_unlock_bh(&ioat_chan->prep_lock);
905 			spin_unlock_bh(&ioat_chan->cleanup_lock);
906 		}
907 
908 		return;
909 	}
910 
911 	spin_lock_bh(&ioat_chan->cleanup_lock);
912 
913 	/* handle the no-actives case */
914 	if (!ioat_ring_active(ioat_chan)) {
915 		spin_lock_bh(&ioat_chan->prep_lock);
916 		check_active(ioat_chan);
917 		spin_unlock_bh(&ioat_chan->prep_lock);
918 		spin_unlock_bh(&ioat_chan->cleanup_lock);
919 		return;
920 	}
921 
922 	/* if we haven't made progress and we have already
923 	 * acknowledged a pending completion once, then be more
924 	 * forceful with a restart
925 	 */
926 	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
927 		__cleanup(ioat_chan, phys_complete);
928 	else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
929 		u32 chanerr;
930 
931 		chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
932 		dev_err(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
933 			status, chanerr);
934 		dev_err(to_dev(ioat_chan), "Errors:\n");
935 		ioat_print_chanerrs(ioat_chan, chanerr);
936 
937 		dev_dbg(to_dev(ioat_chan), "Active descriptors: %d\n",
938 			ioat_ring_active(ioat_chan));
939 
940 		spin_lock_bh(&ioat_chan->prep_lock);
941 		set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
942 		spin_unlock_bh(&ioat_chan->prep_lock);
943 
944 		ioat_abort_descs(ioat_chan);
945 		dev_warn(to_dev(ioat_chan), "Resetting channel...\n");
946 		ioat_reset_hw(ioat_chan);
947 		dev_warn(to_dev(ioat_chan), "Restarting channel...\n");
948 		ioat_restart_channel(ioat_chan);
949 
950 		spin_lock_bh(&ioat_chan->prep_lock);
951 		clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
952 		spin_unlock_bh(&ioat_chan->prep_lock);
953 		spin_unlock_bh(&ioat_chan->cleanup_lock);
954 		return;
955 	} else
956 		set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
957 
958 	mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
959 	spin_unlock_bh(&ioat_chan->cleanup_lock);
960 }
961 
962 enum dma_status
963 ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
964 		struct dma_tx_state *txstate)
965 {
966 	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
967 	enum dma_status ret;
968 
969 	ret = dma_cookie_status(c, cookie, txstate);
970 	if (ret == DMA_COMPLETE)
971 		return ret;
972 
973 	ioat_cleanup(ioat_chan);
974 
975 	return dma_cookie_status(c, cookie, txstate);
976 }
977 
978 int ioat_reset_hw(struct ioatdma_chan *ioat_chan)
979 {
980 	/* throw away whatever the channel was doing and get it
981 	 * initialized, with ioat3 specific workarounds
982 	 */
983 	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
984 	struct pci_dev *pdev = ioat_dma->pdev;
985 	u32 chanerr;
986 	u16 dev_id;
987 	int err;
988 
989 	ioat_quiesce(ioat_chan, msecs_to_jiffies(100));
990 
991 	chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
992 	writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
993 
994 	if (ioat_dma->version < IOAT_VER_3_3) {
995 		/* clear any pending errors */
996 		err = pci_read_config_dword(pdev,
997 				IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
998 		if (err) {
999 			dev_err(&pdev->dev,
1000 				"channel error register unreachable\n");
1001 			return err;
1002 		}
1003 		pci_write_config_dword(pdev,
1004 				IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
1005 
1006 		/* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1007 		 * (workaround for spurious config parity error after restart)
1008 		 */
1009 		pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
1010 		if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
1011 			pci_write_config_dword(pdev,
1012 					       IOAT_PCI_DMAUNCERRSTS_OFFSET,
1013 					       0x10);
1014 		}
1015 	}
1016 
1017 	if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
1018 		ioat_dma->msixtba0 = readq(ioat_dma->reg_base + 0x1000);
1019 		ioat_dma->msixdata0 = readq(ioat_dma->reg_base + 0x1008);
1020 		ioat_dma->msixpba = readq(ioat_dma->reg_base + 0x1800);
1021 	}
1022 
1023 
1024 	err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200));
1025 	if (!err) {
1026 		if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
1027 			writeq(ioat_dma->msixtba0, ioat_dma->reg_base + 0x1000);
1028 			writeq(ioat_dma->msixdata0, ioat_dma->reg_base + 0x1008);
1029 			writeq(ioat_dma->msixpba, ioat_dma->reg_base + 0x1800);
1030 		}
1031 	}
1032 
1033 	if (err)
1034 		dev_err(&pdev->dev, "Failed to reset: %d\n", err);
1035 
1036 	return err;
1037 }
1038