xref: /openbmc/linux/drivers/dma/ioat/dma.c (revision b85d4594)
1 /*
2  * Intel I/OAT DMA Linux driver
3  * Copyright(c) 2004 - 2015 Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * The full GNU General Public License is included in this distribution in
15  * the file called "COPYING".
16  *
17  */
18 
19 /*
20  * This driver supports an Intel I/OAT DMA engine, which does asynchronous
21  * copy operations.
22  */
23 
24 #include <linux/init.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/pci.h>
28 #include <linux/interrupt.h>
29 #include <linux/dmaengine.h>
30 #include <linux/delay.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/workqueue.h>
33 #include <linux/prefetch.h>
34 #include "dma.h"
35 #include "registers.h"
36 #include "hw.h"
37 
38 #include "../dmaengine.h"
39 
40 static void ioat_eh(struct ioatdma_chan *ioat_chan);
41 
42 /**
43  * ioat_dma_do_interrupt - handler used for single vector interrupt mode
44  * @irq: interrupt id
45  * @data: interrupt data
46  */
47 irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
48 {
49 	struct ioatdma_device *instance = data;
50 	struct ioatdma_chan *ioat_chan;
51 	unsigned long attnstatus;
52 	int bit;
53 	u8 intrctrl;
54 
55 	intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
56 
57 	if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
58 		return IRQ_NONE;
59 
60 	if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
61 		writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
62 		return IRQ_NONE;
63 	}
64 
65 	attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
66 	for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
67 		ioat_chan = ioat_chan_by_index(instance, bit);
68 		if (test_bit(IOAT_RUN, &ioat_chan->state))
69 			tasklet_schedule(&ioat_chan->cleanup_task);
70 	}
71 
72 	writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
73 	return IRQ_HANDLED;
74 }
75 
76 /**
77  * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
78  * @irq: interrupt id
79  * @data: interrupt data
80  */
81 irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
82 {
83 	struct ioatdma_chan *ioat_chan = data;
84 
85 	if (test_bit(IOAT_RUN, &ioat_chan->state))
86 		tasklet_schedule(&ioat_chan->cleanup_task);
87 
88 	return IRQ_HANDLED;
89 }
90 
91 void ioat_stop(struct ioatdma_chan *ioat_chan)
92 {
93 	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
94 	struct pci_dev *pdev = ioat_dma->pdev;
95 	int chan_id = chan_num(ioat_chan);
96 	struct msix_entry *msix;
97 
98 	/* 1/ stop irq from firing tasklets
99 	 * 2/ stop the tasklet from re-arming irqs
100 	 */
101 	clear_bit(IOAT_RUN, &ioat_chan->state);
102 
103 	/* flush inflight interrupts */
104 	switch (ioat_dma->irq_mode) {
105 	case IOAT_MSIX:
106 		msix = &ioat_dma->msix_entries[chan_id];
107 		synchronize_irq(msix->vector);
108 		break;
109 	case IOAT_MSI:
110 	case IOAT_INTX:
111 		synchronize_irq(pdev->irq);
112 		break;
113 	default:
114 		break;
115 	}
116 
117 	/* flush inflight timers */
118 	del_timer_sync(&ioat_chan->timer);
119 
120 	/* flush inflight tasklet runs */
121 	tasklet_kill(&ioat_chan->cleanup_task);
122 
123 	/* final cleanup now that everything is quiesced and can't re-arm */
124 	ioat_cleanup_event((unsigned long)&ioat_chan->dma_chan);
125 }
126 
127 static void __ioat_issue_pending(struct ioatdma_chan *ioat_chan)
128 {
129 	ioat_chan->dmacount += ioat_ring_pending(ioat_chan);
130 	ioat_chan->issued = ioat_chan->head;
131 	writew(ioat_chan->dmacount,
132 	       ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
133 	dev_dbg(to_dev(ioat_chan),
134 		"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
135 		__func__, ioat_chan->head, ioat_chan->tail,
136 		ioat_chan->issued, ioat_chan->dmacount);
137 }
138 
139 void ioat_issue_pending(struct dma_chan *c)
140 {
141 	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
142 
143 	if (ioat_ring_pending(ioat_chan)) {
144 		spin_lock_bh(&ioat_chan->prep_lock);
145 		__ioat_issue_pending(ioat_chan);
146 		spin_unlock_bh(&ioat_chan->prep_lock);
147 	}
148 }
149 
150 /**
151  * ioat_update_pending - log pending descriptors
152  * @ioat: ioat+ channel
153  *
154  * Check if the number of unsubmitted descriptors has exceeded the
155  * watermark.  Called with prep_lock held
156  */
157 static void ioat_update_pending(struct ioatdma_chan *ioat_chan)
158 {
159 	if (ioat_ring_pending(ioat_chan) > ioat_pending_level)
160 		__ioat_issue_pending(ioat_chan);
161 }
162 
163 static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
164 {
165 	struct ioat_ring_ent *desc;
166 	struct ioat_dma_descriptor *hw;
167 
168 	if (ioat_ring_space(ioat_chan) < 1) {
169 		dev_err(to_dev(ioat_chan),
170 			"Unable to start null desc - ring full\n");
171 		return;
172 	}
173 
174 	dev_dbg(to_dev(ioat_chan),
175 		"%s: head: %#x tail: %#x issued: %#x\n",
176 		__func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
177 	desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
178 
179 	hw = desc->hw;
180 	hw->ctl = 0;
181 	hw->ctl_f.null = 1;
182 	hw->ctl_f.int_en = 1;
183 	hw->ctl_f.compl_write = 1;
184 	/* set size to non-zero value (channel returns error when size is 0) */
185 	hw->size = NULL_DESC_BUFFER_SIZE;
186 	hw->src_addr = 0;
187 	hw->dst_addr = 0;
188 	async_tx_ack(&desc->txd);
189 	ioat_set_chainaddr(ioat_chan, desc->txd.phys);
190 	dump_desc_dbg(ioat_chan, desc);
191 	/* make sure descriptors are written before we submit */
192 	wmb();
193 	ioat_chan->head += 1;
194 	__ioat_issue_pending(ioat_chan);
195 }
196 
197 void ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
198 {
199 	spin_lock_bh(&ioat_chan->prep_lock);
200 	__ioat_start_null_desc(ioat_chan);
201 	spin_unlock_bh(&ioat_chan->prep_lock);
202 }
203 
204 static void __ioat_restart_chan(struct ioatdma_chan *ioat_chan)
205 {
206 	/* set the tail to be re-issued */
207 	ioat_chan->issued = ioat_chan->tail;
208 	ioat_chan->dmacount = 0;
209 	mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
210 
211 	dev_dbg(to_dev(ioat_chan),
212 		"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
213 		__func__, ioat_chan->head, ioat_chan->tail,
214 		ioat_chan->issued, ioat_chan->dmacount);
215 
216 	if (ioat_ring_pending(ioat_chan)) {
217 		struct ioat_ring_ent *desc;
218 
219 		desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
220 		ioat_set_chainaddr(ioat_chan, desc->txd.phys);
221 		__ioat_issue_pending(ioat_chan);
222 	} else
223 		__ioat_start_null_desc(ioat_chan);
224 }
225 
226 static int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo)
227 {
228 	unsigned long end = jiffies + tmo;
229 	int err = 0;
230 	u32 status;
231 
232 	status = ioat_chansts(ioat_chan);
233 	if (is_ioat_active(status) || is_ioat_idle(status))
234 		ioat_suspend(ioat_chan);
235 	while (is_ioat_active(status) || is_ioat_idle(status)) {
236 		if (tmo && time_after(jiffies, end)) {
237 			err = -ETIMEDOUT;
238 			break;
239 		}
240 		status = ioat_chansts(ioat_chan);
241 		cpu_relax();
242 	}
243 
244 	return err;
245 }
246 
247 static int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo)
248 {
249 	unsigned long end = jiffies + tmo;
250 	int err = 0;
251 
252 	ioat_reset(ioat_chan);
253 	while (ioat_reset_pending(ioat_chan)) {
254 		if (end && time_after(jiffies, end)) {
255 			err = -ETIMEDOUT;
256 			break;
257 		}
258 		cpu_relax();
259 	}
260 
261 	return err;
262 }
263 
264 static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
265 	__releases(&ioat_chan->prep_lock)
266 {
267 	struct dma_chan *c = tx->chan;
268 	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
269 	dma_cookie_t cookie;
270 
271 	cookie = dma_cookie_assign(tx);
272 	dev_dbg(to_dev(ioat_chan), "%s: cookie: %d\n", __func__, cookie);
273 
274 	if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
275 		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
276 
277 	/* make descriptor updates visible before advancing ioat->head,
278 	 * this is purposefully not smp_wmb() since we are also
279 	 * publishing the descriptor updates to a dma device
280 	 */
281 	wmb();
282 
283 	ioat_chan->head += ioat_chan->produce;
284 
285 	ioat_update_pending(ioat_chan);
286 	spin_unlock_bh(&ioat_chan->prep_lock);
287 
288 	return cookie;
289 }
290 
291 static struct ioat_ring_ent *
292 ioat_alloc_ring_ent(struct dma_chan *chan, gfp_t flags)
293 {
294 	struct ioat_dma_descriptor *hw;
295 	struct ioat_ring_ent *desc;
296 	struct ioatdma_device *ioat_dma;
297 	dma_addr_t phys;
298 
299 	ioat_dma = to_ioatdma_device(chan->device);
300 	hw = pci_pool_alloc(ioat_dma->dma_pool, flags, &phys);
301 	if (!hw)
302 		return NULL;
303 	memset(hw, 0, sizeof(*hw));
304 
305 	desc = kmem_cache_zalloc(ioat_cache, flags);
306 	if (!desc) {
307 		pci_pool_free(ioat_dma->dma_pool, hw, phys);
308 		return NULL;
309 	}
310 
311 	dma_async_tx_descriptor_init(&desc->txd, chan);
312 	desc->txd.tx_submit = ioat_tx_submit_unlock;
313 	desc->hw = hw;
314 	desc->txd.phys = phys;
315 	return desc;
316 }
317 
318 void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
319 {
320 	struct ioatdma_device *ioat_dma;
321 
322 	ioat_dma = to_ioatdma_device(chan->device);
323 	pci_pool_free(ioat_dma->dma_pool, desc->hw, desc->txd.phys);
324 	kmem_cache_free(ioat_cache, desc);
325 }
326 
327 struct ioat_ring_ent **
328 ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
329 {
330 	struct ioat_ring_ent **ring;
331 	int descs = 1 << order;
332 	int i;
333 
334 	if (order > ioat_get_max_alloc_order())
335 		return NULL;
336 
337 	/* allocate the array to hold the software ring */
338 	ring = kcalloc(descs, sizeof(*ring), flags);
339 	if (!ring)
340 		return NULL;
341 	for (i = 0; i < descs; i++) {
342 		ring[i] = ioat_alloc_ring_ent(c, flags);
343 		if (!ring[i]) {
344 			while (i--)
345 				ioat_free_ring_ent(ring[i], c);
346 			kfree(ring);
347 			return NULL;
348 		}
349 		set_desc_id(ring[i], i);
350 	}
351 
352 	/* link descs */
353 	for (i = 0; i < descs-1; i++) {
354 		struct ioat_ring_ent *next = ring[i+1];
355 		struct ioat_dma_descriptor *hw = ring[i]->hw;
356 
357 		hw->next = next->txd.phys;
358 	}
359 	ring[i]->hw->next = ring[0]->txd.phys;
360 
361 	return ring;
362 }
363 
364 static bool reshape_ring(struct ioatdma_chan *ioat_chan, int order)
365 {
366 	/* reshape differs from normal ring allocation in that we want
367 	 * to allocate a new software ring while only
368 	 * extending/truncating the hardware ring
369 	 */
370 	struct dma_chan *c = &ioat_chan->dma_chan;
371 	const u32 curr_size = ioat_ring_size(ioat_chan);
372 	const u16 active = ioat_ring_active(ioat_chan);
373 	const u32 new_size = 1 << order;
374 	struct ioat_ring_ent **ring;
375 	u32 i;
376 
377 	if (order > ioat_get_max_alloc_order())
378 		return false;
379 
380 	/* double check that we have at least 1 free descriptor */
381 	if (active == curr_size)
382 		return false;
383 
384 	/* when shrinking, verify that we can hold the current active
385 	 * set in the new ring
386 	 */
387 	if (active >= new_size)
388 		return false;
389 
390 	/* allocate the array to hold the software ring */
391 	ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT);
392 	if (!ring)
393 		return false;
394 
395 	/* allocate/trim descriptors as needed */
396 	if (new_size > curr_size) {
397 		/* copy current descriptors to the new ring */
398 		for (i = 0; i < curr_size; i++) {
399 			u16 curr_idx = (ioat_chan->tail+i) & (curr_size-1);
400 			u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
401 
402 			ring[new_idx] = ioat_chan->ring[curr_idx];
403 			set_desc_id(ring[new_idx], new_idx);
404 		}
405 
406 		/* add new descriptors to the ring */
407 		for (i = curr_size; i < new_size; i++) {
408 			u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
409 
410 			ring[new_idx] = ioat_alloc_ring_ent(c, GFP_NOWAIT);
411 			if (!ring[new_idx]) {
412 				while (i--) {
413 					u16 new_idx = (ioat_chan->tail+i) &
414 						       (new_size-1);
415 
416 					ioat_free_ring_ent(ring[new_idx], c);
417 				}
418 				kfree(ring);
419 				return false;
420 			}
421 			set_desc_id(ring[new_idx], new_idx);
422 		}
423 
424 		/* hw link new descriptors */
425 		for (i = curr_size-1; i < new_size; i++) {
426 			u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
427 			struct ioat_ring_ent *next =
428 				ring[(new_idx+1) & (new_size-1)];
429 			struct ioat_dma_descriptor *hw = ring[new_idx]->hw;
430 
431 			hw->next = next->txd.phys;
432 		}
433 	} else {
434 		struct ioat_dma_descriptor *hw;
435 		struct ioat_ring_ent *next;
436 
437 		/* copy current descriptors to the new ring, dropping the
438 		 * removed descriptors
439 		 */
440 		for (i = 0; i < new_size; i++) {
441 			u16 curr_idx = (ioat_chan->tail+i) & (curr_size-1);
442 			u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
443 
444 			ring[new_idx] = ioat_chan->ring[curr_idx];
445 			set_desc_id(ring[new_idx], new_idx);
446 		}
447 
448 		/* free deleted descriptors */
449 		for (i = new_size; i < curr_size; i++) {
450 			struct ioat_ring_ent *ent;
451 
452 			ent = ioat_get_ring_ent(ioat_chan, ioat_chan->tail+i);
453 			ioat_free_ring_ent(ent, c);
454 		}
455 
456 		/* fix up hardware ring */
457 		hw = ring[(ioat_chan->tail+new_size-1) & (new_size-1)]->hw;
458 		next = ring[(ioat_chan->tail+new_size) & (new_size-1)];
459 		hw->next = next->txd.phys;
460 	}
461 
462 	dev_dbg(to_dev(ioat_chan), "%s: allocated %d descriptors\n",
463 		__func__, new_size);
464 
465 	kfree(ioat_chan->ring);
466 	ioat_chan->ring = ring;
467 	ioat_chan->alloc_order = order;
468 
469 	return true;
470 }
471 
472 /**
473  * ioat_check_space_lock - verify space and grab ring producer lock
474  * @ioat: ioat,3 channel (ring) to operate on
475  * @num_descs: allocation length
476  */
477 int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
478 	__acquires(&ioat_chan->prep_lock)
479 {
480 	bool retry;
481 
482  retry:
483 	spin_lock_bh(&ioat_chan->prep_lock);
484 	/* never allow the last descriptor to be consumed, we need at
485 	 * least one free at all times to allow for on-the-fly ring
486 	 * resizing.
487 	 */
488 	if (likely(ioat_ring_space(ioat_chan) > num_descs)) {
489 		dev_dbg(to_dev(ioat_chan), "%s: num_descs: %d (%x:%x:%x)\n",
490 			__func__, num_descs, ioat_chan->head,
491 			ioat_chan->tail, ioat_chan->issued);
492 		ioat_chan->produce = num_descs;
493 		return 0;  /* with ioat->prep_lock held */
494 	}
495 	retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &ioat_chan->state);
496 	spin_unlock_bh(&ioat_chan->prep_lock);
497 
498 	/* is another cpu already trying to expand the ring? */
499 	if (retry)
500 		goto retry;
501 
502 	spin_lock_bh(&ioat_chan->cleanup_lock);
503 	spin_lock_bh(&ioat_chan->prep_lock);
504 	retry = reshape_ring(ioat_chan, ioat_chan->alloc_order + 1);
505 	clear_bit(IOAT_RESHAPE_PENDING, &ioat_chan->state);
506 	spin_unlock_bh(&ioat_chan->prep_lock);
507 	spin_unlock_bh(&ioat_chan->cleanup_lock);
508 
509 	/* if we were able to expand the ring retry the allocation */
510 	if (retry)
511 		goto retry;
512 
513 	dev_dbg_ratelimited(to_dev(ioat_chan),
514 			    "%s: ring full! num_descs: %d (%x:%x:%x)\n",
515 			    __func__, num_descs, ioat_chan->head,
516 			    ioat_chan->tail, ioat_chan->issued);
517 
518 	/* progress reclaim in the allocation failure case we may be
519 	 * called under bh_disabled so we need to trigger the timer
520 	 * event directly
521 	 */
522 	if (time_is_before_jiffies(ioat_chan->timer.expires)
523 	    && timer_pending(&ioat_chan->timer)) {
524 		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
525 		ioat_timer_event((unsigned long)ioat_chan);
526 	}
527 
528 	return -ENOMEM;
529 }
530 
531 static bool desc_has_ext(struct ioat_ring_ent *desc)
532 {
533 	struct ioat_dma_descriptor *hw = desc->hw;
534 
535 	if (hw->ctl_f.op == IOAT_OP_XOR ||
536 	    hw->ctl_f.op == IOAT_OP_XOR_VAL) {
537 		struct ioat_xor_descriptor *xor = desc->xor;
538 
539 		if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
540 			return true;
541 	} else if (hw->ctl_f.op == IOAT_OP_PQ ||
542 		   hw->ctl_f.op == IOAT_OP_PQ_VAL) {
543 		struct ioat_pq_descriptor *pq = desc->pq;
544 
545 		if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
546 			return true;
547 	}
548 
549 	return false;
550 }
551 
552 static void
553 ioat_free_sed(struct ioatdma_device *ioat_dma, struct ioat_sed_ent *sed)
554 {
555 	if (!sed)
556 		return;
557 
558 	dma_pool_free(ioat_dma->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
559 	kmem_cache_free(ioat_sed_cache, sed);
560 }
561 
562 static u64 ioat_get_current_completion(struct ioatdma_chan *ioat_chan)
563 {
564 	u64 phys_complete;
565 	u64 completion;
566 
567 	completion = *ioat_chan->completion;
568 	phys_complete = ioat_chansts_to_addr(completion);
569 
570 	dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__,
571 		(unsigned long long) phys_complete);
572 
573 	return phys_complete;
574 }
575 
576 static bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan,
577 				   u64 *phys_complete)
578 {
579 	*phys_complete = ioat_get_current_completion(ioat_chan);
580 	if (*phys_complete == ioat_chan->last_completion)
581 		return false;
582 
583 	clear_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
584 	mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
585 
586 	return true;
587 }
588 
589 static void
590 desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc)
591 {
592 	struct ioat_dma_descriptor *hw = desc->hw;
593 
594 	switch (hw->ctl_f.op) {
595 	case IOAT_OP_PQ_VAL:
596 	case IOAT_OP_PQ_VAL_16S:
597 	{
598 		struct ioat_pq_descriptor *pq = desc->pq;
599 
600 		/* check if there's error written */
601 		if (!pq->dwbes_f.wbes)
602 			return;
603 
604 		/* need to set a chanerr var for checking to clear later */
605 
606 		if (pq->dwbes_f.p_val_err)
607 			*desc->result |= SUM_CHECK_P_RESULT;
608 
609 		if (pq->dwbes_f.q_val_err)
610 			*desc->result |= SUM_CHECK_Q_RESULT;
611 
612 		return;
613 	}
614 	default:
615 		return;
616 	}
617 }
618 
619 /**
620  * __cleanup - reclaim used descriptors
621  * @ioat: channel (ring) to clean
622  */
623 static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
624 {
625 	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
626 	struct ioat_ring_ent *desc;
627 	bool seen_current = false;
628 	int idx = ioat_chan->tail, i;
629 	u16 active;
630 
631 	dev_dbg(to_dev(ioat_chan), "%s: head: %#x tail: %#x issued: %#x\n",
632 		__func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
633 
634 	/*
635 	 * At restart of the channel, the completion address and the
636 	 * channel status will be 0 due to starting a new chain. Since
637 	 * it's new chain and the first descriptor "fails", there is
638 	 * nothing to clean up. We do not want to reap the entire submitted
639 	 * chain due to this 0 address value and then BUG.
640 	 */
641 	if (!phys_complete)
642 		return;
643 
644 	active = ioat_ring_active(ioat_chan);
645 	for (i = 0; i < active && !seen_current; i++) {
646 		struct dma_async_tx_descriptor *tx;
647 
648 		smp_read_barrier_depends();
649 		prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
650 		desc = ioat_get_ring_ent(ioat_chan, idx + i);
651 		dump_desc_dbg(ioat_chan, desc);
652 
653 		/* set err stat if we are using dwbes */
654 		if (ioat_dma->cap & IOAT_CAP_DWBES)
655 			desc_get_errstat(ioat_chan, desc);
656 
657 		tx = &desc->txd;
658 		if (tx->cookie) {
659 			dma_cookie_complete(tx);
660 			dma_descriptor_unmap(tx);
661 			if (tx->callback) {
662 				tx->callback(tx->callback_param);
663 				tx->callback = NULL;
664 			}
665 		}
666 
667 		if (tx->phys == phys_complete)
668 			seen_current = true;
669 
670 		/* skip extended descriptors */
671 		if (desc_has_ext(desc)) {
672 			BUG_ON(i + 1 >= active);
673 			i++;
674 		}
675 
676 		/* cleanup super extended descriptors */
677 		if (desc->sed) {
678 			ioat_free_sed(ioat_dma, desc->sed);
679 			desc->sed = NULL;
680 		}
681 	}
682 
683 	/* finish all descriptor reads before incrementing tail */
684 	smp_mb();
685 	ioat_chan->tail = idx + i;
686 	/* no active descs have written a completion? */
687 	BUG_ON(active && !seen_current);
688 	ioat_chan->last_completion = phys_complete;
689 
690 	if (active - i == 0) {
691 		dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n",
692 			__func__);
693 		mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
694 	}
695 
696 	/* 5 microsecond delay per pending descriptor */
697 	writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK),
698 	       ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET);
699 }
700 
701 static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
702 {
703 	u64 phys_complete;
704 
705 	spin_lock_bh(&ioat_chan->cleanup_lock);
706 
707 	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
708 		__cleanup(ioat_chan, phys_complete);
709 
710 	if (is_ioat_halted(*ioat_chan->completion)) {
711 		u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
712 
713 		if (chanerr & IOAT_CHANERR_HANDLE_MASK) {
714 			mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
715 			ioat_eh(ioat_chan);
716 		}
717 	}
718 
719 	spin_unlock_bh(&ioat_chan->cleanup_lock);
720 }
721 
722 void ioat_cleanup_event(unsigned long data)
723 {
724 	struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data);
725 
726 	ioat_cleanup(ioat_chan);
727 	if (!test_bit(IOAT_RUN, &ioat_chan->state))
728 		return;
729 	writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
730 }
731 
732 static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
733 {
734 	u64 phys_complete;
735 
736 	ioat_quiesce(ioat_chan, 0);
737 	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
738 		__cleanup(ioat_chan, phys_complete);
739 
740 	__ioat_restart_chan(ioat_chan);
741 }
742 
743 static void ioat_eh(struct ioatdma_chan *ioat_chan)
744 {
745 	struct pci_dev *pdev = to_pdev(ioat_chan);
746 	struct ioat_dma_descriptor *hw;
747 	struct dma_async_tx_descriptor *tx;
748 	u64 phys_complete;
749 	struct ioat_ring_ent *desc;
750 	u32 err_handled = 0;
751 	u32 chanerr_int;
752 	u32 chanerr;
753 
754 	/* cleanup so tail points to descriptor that caused the error */
755 	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
756 		__cleanup(ioat_chan, phys_complete);
757 
758 	chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
759 	pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
760 
761 	dev_dbg(to_dev(ioat_chan), "%s: error = %x:%x\n",
762 		__func__, chanerr, chanerr_int);
763 
764 	desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
765 	hw = desc->hw;
766 	dump_desc_dbg(ioat_chan, desc);
767 
768 	switch (hw->ctl_f.op) {
769 	case IOAT_OP_XOR_VAL:
770 		if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
771 			*desc->result |= SUM_CHECK_P_RESULT;
772 			err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
773 		}
774 		break;
775 	case IOAT_OP_PQ_VAL:
776 	case IOAT_OP_PQ_VAL_16S:
777 		if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
778 			*desc->result |= SUM_CHECK_P_RESULT;
779 			err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
780 		}
781 		if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
782 			*desc->result |= SUM_CHECK_Q_RESULT;
783 			err_handled |= IOAT_CHANERR_XOR_Q_ERR;
784 		}
785 		break;
786 	}
787 
788 	/* fault on unhandled error or spurious halt */
789 	if (chanerr ^ err_handled || chanerr == 0) {
790 		dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n",
791 			__func__, chanerr, err_handled);
792 		BUG();
793 	} else { /* cleanup the faulty descriptor */
794 		tx = &desc->txd;
795 		if (tx->cookie) {
796 			dma_cookie_complete(tx);
797 			dma_descriptor_unmap(tx);
798 			if (tx->callback) {
799 				tx->callback(tx->callback_param);
800 				tx->callback = NULL;
801 			}
802 		}
803 	}
804 
805 	writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
806 	pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
807 
808 	/* mark faulting descriptor as complete */
809 	*ioat_chan->completion = desc->txd.phys;
810 
811 	spin_lock_bh(&ioat_chan->prep_lock);
812 	ioat_restart_channel(ioat_chan);
813 	spin_unlock_bh(&ioat_chan->prep_lock);
814 }
815 
816 static void check_active(struct ioatdma_chan *ioat_chan)
817 {
818 	if (ioat_ring_active(ioat_chan)) {
819 		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
820 		return;
821 	}
822 
823 	if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
824 		mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
825 	else if (ioat_chan->alloc_order > ioat_get_alloc_order()) {
826 		/* if the ring is idle, empty, and oversized try to step
827 		 * down the size
828 		 */
829 		reshape_ring(ioat_chan, ioat_chan->alloc_order - 1);
830 
831 		/* keep shrinking until we get back to our minimum
832 		 * default size
833 		 */
834 		if (ioat_chan->alloc_order > ioat_get_alloc_order())
835 			mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
836 	}
837 
838 }
839 
840 void ioat_timer_event(unsigned long data)
841 {
842 	struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data);
843 	dma_addr_t phys_complete;
844 	u64 status;
845 
846 	status = ioat_chansts(ioat_chan);
847 
848 	/* when halted due to errors check for channel
849 	 * programming errors before advancing the completion state
850 	 */
851 	if (is_ioat_halted(status)) {
852 		u32 chanerr;
853 
854 		chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
855 		dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n",
856 			__func__, chanerr);
857 		if (test_bit(IOAT_RUN, &ioat_chan->state))
858 			BUG_ON(is_ioat_bug(chanerr));
859 		else /* we never got off the ground */
860 			return;
861 	}
862 
863 	/* if we haven't made progress and we have already
864 	 * acknowledged a pending completion once, then be more
865 	 * forceful with a restart
866 	 */
867 	spin_lock_bh(&ioat_chan->cleanup_lock);
868 	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
869 		__cleanup(ioat_chan, phys_complete);
870 	else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
871 		spin_lock_bh(&ioat_chan->prep_lock);
872 		ioat_restart_channel(ioat_chan);
873 		spin_unlock_bh(&ioat_chan->prep_lock);
874 		spin_unlock_bh(&ioat_chan->cleanup_lock);
875 		return;
876 	} else {
877 		set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
878 		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
879 	}
880 
881 
882 	if (ioat_ring_active(ioat_chan))
883 		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
884 	else {
885 		spin_lock_bh(&ioat_chan->prep_lock);
886 		check_active(ioat_chan);
887 		spin_unlock_bh(&ioat_chan->prep_lock);
888 	}
889 	spin_unlock_bh(&ioat_chan->cleanup_lock);
890 }
891 
892 enum dma_status
893 ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
894 		struct dma_tx_state *txstate)
895 {
896 	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
897 	enum dma_status ret;
898 
899 	ret = dma_cookie_status(c, cookie, txstate);
900 	if (ret == DMA_COMPLETE)
901 		return ret;
902 
903 	ioat_cleanup(ioat_chan);
904 
905 	return dma_cookie_status(c, cookie, txstate);
906 }
907 
908 static int ioat_irq_reinit(struct ioatdma_device *ioat_dma)
909 {
910 	struct pci_dev *pdev = ioat_dma->pdev;
911 	int irq = pdev->irq, i;
912 
913 	if (!is_bwd_ioat(pdev))
914 		return 0;
915 
916 	switch (ioat_dma->irq_mode) {
917 	case IOAT_MSIX:
918 		for (i = 0; i < ioat_dma->dma_dev.chancnt; i++) {
919 			struct msix_entry *msix = &ioat_dma->msix_entries[i];
920 			struct ioatdma_chan *ioat_chan;
921 
922 			ioat_chan = ioat_chan_by_index(ioat_dma, i);
923 			devm_free_irq(&pdev->dev, msix->vector, ioat_chan);
924 		}
925 
926 		pci_disable_msix(pdev);
927 		break;
928 	case IOAT_MSI:
929 		pci_disable_msi(pdev);
930 		/* fall through */
931 	case IOAT_INTX:
932 		devm_free_irq(&pdev->dev, irq, ioat_dma);
933 		break;
934 	default:
935 		return 0;
936 	}
937 	ioat_dma->irq_mode = IOAT_NOIRQ;
938 
939 	return ioat_dma_setup_interrupts(ioat_dma);
940 }
941 
942 int ioat_reset_hw(struct ioatdma_chan *ioat_chan)
943 {
944 	/* throw away whatever the channel was doing and get it
945 	 * initialized, with ioat3 specific workarounds
946 	 */
947 	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
948 	struct pci_dev *pdev = ioat_dma->pdev;
949 	u32 chanerr;
950 	u16 dev_id;
951 	int err;
952 
953 	ioat_quiesce(ioat_chan, msecs_to_jiffies(100));
954 
955 	chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
956 	writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
957 
958 	if (ioat_dma->version < IOAT_VER_3_3) {
959 		/* clear any pending errors */
960 		err = pci_read_config_dword(pdev,
961 				IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
962 		if (err) {
963 			dev_err(&pdev->dev,
964 				"channel error register unreachable\n");
965 			return err;
966 		}
967 		pci_write_config_dword(pdev,
968 				IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
969 
970 		/* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
971 		 * (workaround for spurious config parity error after restart)
972 		 */
973 		pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
974 		if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
975 			pci_write_config_dword(pdev,
976 					       IOAT_PCI_DMAUNCERRSTS_OFFSET,
977 					       0x10);
978 		}
979 	}
980 
981 	err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200));
982 	if (!err)
983 		err = ioat_irq_reinit(ioat_dma);
984 
985 	if (err)
986 		dev_err(&pdev->dev, "Failed to reset: %d\n", err);
987 
988 	return err;
989 }
990