xref: /openbmc/linux/drivers/dma/ioat/dma.c (revision c4ee0af3)
1 /*
2  * Intel I/OAT DMA Linux driver
3  * Copyright(c) 2004 - 2009 Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc.,
16  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  */
22 
23 /*
24  * This driver supports an Intel I/OAT DMA engine, which does asynchronous
25  * copy operations.
26  */
27 
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/slab.h>
31 #include <linux/pci.h>
32 #include <linux/interrupt.h>
33 #include <linux/dmaengine.h>
34 #include <linux/delay.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/workqueue.h>
37 #include <linux/prefetch.h>
38 #include <linux/i7300_idle.h>
39 #include "dma.h"
40 #include "registers.h"
41 #include "hw.h"
42 
43 #include "../dmaengine.h"
44 
45 int ioat_pending_level = 4;
46 module_param(ioat_pending_level, int, 0644);
47 MODULE_PARM_DESC(ioat_pending_level,
48 		 "high-water mark for pushing ioat descriptors (default: 4)");
49 
50 /* internal functions */
51 static void ioat1_cleanup(struct ioat_dma_chan *ioat);
52 static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat);
53 
54 /**
55  * ioat_dma_do_interrupt - handler used for single vector interrupt mode
56  * @irq: interrupt id
57  * @data: interrupt data
58  */
59 static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
60 {
61 	struct ioatdma_device *instance = data;
62 	struct ioat_chan_common *chan;
63 	unsigned long attnstatus;
64 	int bit;
65 	u8 intrctrl;
66 
67 	intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
68 
69 	if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
70 		return IRQ_NONE;
71 
72 	if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
73 		writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
74 		return IRQ_NONE;
75 	}
76 
77 	attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
78 	for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
79 		chan = ioat_chan_by_index(instance, bit);
80 		tasklet_schedule(&chan->cleanup_task);
81 	}
82 
83 	writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
84 	return IRQ_HANDLED;
85 }
86 
87 /**
88  * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
89  * @irq: interrupt id
90  * @data: interrupt data
91  */
92 static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
93 {
94 	struct ioat_chan_common *chan = data;
95 
96 	tasklet_schedule(&chan->cleanup_task);
97 
98 	return IRQ_HANDLED;
99 }
100 
101 /* common channel initialization */
102 void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx)
103 {
104 	struct dma_device *dma = &device->common;
105 	struct dma_chan *c = &chan->common;
106 	unsigned long data = (unsigned long) c;
107 
108 	chan->device = device;
109 	chan->reg_base = device->reg_base + (0x80 * (idx + 1));
110 	spin_lock_init(&chan->cleanup_lock);
111 	chan->common.device = dma;
112 	dma_cookie_init(&chan->common);
113 	list_add_tail(&chan->common.device_node, &dma->channels);
114 	device->idx[idx] = chan;
115 	init_timer(&chan->timer);
116 	chan->timer.function = device->timer_fn;
117 	chan->timer.data = data;
118 	tasklet_init(&chan->cleanup_task, device->cleanup_fn, data);
119 	tasklet_disable(&chan->cleanup_task);
120 }
121 
122 /**
123  * ioat1_dma_enumerate_channels - find and initialize the device's channels
124  * @device: the device to be enumerated
125  */
126 static int ioat1_enumerate_channels(struct ioatdma_device *device)
127 {
128 	u8 xfercap_scale;
129 	u32 xfercap;
130 	int i;
131 	struct ioat_dma_chan *ioat;
132 	struct device *dev = &device->pdev->dev;
133 	struct dma_device *dma = &device->common;
134 
135 	INIT_LIST_HEAD(&dma->channels);
136 	dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
137 	dma->chancnt &= 0x1f; /* bits [4:0] valid */
138 	if (dma->chancnt > ARRAY_SIZE(device->idx)) {
139 		dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
140 			 dma->chancnt, ARRAY_SIZE(device->idx));
141 		dma->chancnt = ARRAY_SIZE(device->idx);
142 	}
143 	xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
144 	xfercap_scale &= 0x1f; /* bits [4:0] valid */
145 	xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
146 	dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap);
147 
148 #ifdef  CONFIG_I7300_IDLE_IOAT_CHANNEL
149 	if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
150 		dma->chancnt--;
151 #endif
152 	for (i = 0; i < dma->chancnt; i++) {
153 		ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
154 		if (!ioat)
155 			break;
156 
157 		ioat_init_channel(device, &ioat->base, i);
158 		ioat->xfercap = xfercap;
159 		spin_lock_init(&ioat->desc_lock);
160 		INIT_LIST_HEAD(&ioat->free_desc);
161 		INIT_LIST_HEAD(&ioat->used_desc);
162 	}
163 	dma->chancnt = i;
164 	return i;
165 }
166 
167 /**
168  * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
169  *                                 descriptors to hw
170  * @chan: DMA channel handle
171  */
172 static inline void
173 __ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat)
174 {
175 	void __iomem *reg_base = ioat->base.reg_base;
176 
177 	dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n",
178 		__func__, ioat->pending);
179 	ioat->pending = 0;
180 	writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET);
181 }
182 
183 static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
184 {
185 	struct ioat_dma_chan *ioat = to_ioat_chan(chan);
186 
187 	if (ioat->pending > 0) {
188 		spin_lock_bh(&ioat->desc_lock);
189 		__ioat1_dma_memcpy_issue_pending(ioat);
190 		spin_unlock_bh(&ioat->desc_lock);
191 	}
192 }
193 
194 /**
195  * ioat1_reset_channel - restart a channel
196  * @ioat: IOAT DMA channel handle
197  */
198 static void ioat1_reset_channel(struct ioat_dma_chan *ioat)
199 {
200 	struct ioat_chan_common *chan = &ioat->base;
201 	void __iomem *reg_base = chan->reg_base;
202 	u32 chansts, chanerr;
203 
204 	dev_warn(to_dev(chan), "reset\n");
205 	chanerr = readl(reg_base + IOAT_CHANERR_OFFSET);
206 	chansts = *chan->completion & IOAT_CHANSTS_STATUS;
207 	if (chanerr) {
208 		dev_err(to_dev(chan),
209 			"chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
210 			chan_num(chan), chansts, chanerr);
211 		writel(chanerr, reg_base + IOAT_CHANERR_OFFSET);
212 	}
213 
214 	/*
215 	 * whack it upside the head with a reset
216 	 * and wait for things to settle out.
217 	 * force the pending count to a really big negative
218 	 * to make sure no one forces an issue_pending
219 	 * while we're waiting.
220 	 */
221 
222 	ioat->pending = INT_MIN;
223 	writeb(IOAT_CHANCMD_RESET,
224 	       reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
225 	set_bit(IOAT_RESET_PENDING, &chan->state);
226 	mod_timer(&chan->timer, jiffies + RESET_DELAY);
227 }
228 
229 static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
230 {
231 	struct dma_chan *c = tx->chan;
232 	struct ioat_dma_chan *ioat = to_ioat_chan(c);
233 	struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
234 	struct ioat_chan_common *chan = &ioat->base;
235 	struct ioat_desc_sw *first;
236 	struct ioat_desc_sw *chain_tail;
237 	dma_cookie_t cookie;
238 
239 	spin_lock_bh(&ioat->desc_lock);
240 	/* cookie incr and addition to used_list must be atomic */
241 	cookie = dma_cookie_assign(tx);
242 	dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
243 
244 	/* write address into NextDescriptor field of last desc in chain */
245 	first = to_ioat_desc(desc->tx_list.next);
246 	chain_tail = to_ioat_desc(ioat->used_desc.prev);
247 	/* make descriptor updates globally visible before chaining */
248 	wmb();
249 	chain_tail->hw->next = first->txd.phys;
250 	list_splice_tail_init(&desc->tx_list, &ioat->used_desc);
251 	dump_desc_dbg(ioat, chain_tail);
252 	dump_desc_dbg(ioat, first);
253 
254 	if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
255 		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
256 
257 	ioat->active += desc->hw->tx_cnt;
258 	ioat->pending += desc->hw->tx_cnt;
259 	if (ioat->pending >= ioat_pending_level)
260 		__ioat1_dma_memcpy_issue_pending(ioat);
261 	spin_unlock_bh(&ioat->desc_lock);
262 
263 	return cookie;
264 }
265 
266 /**
267  * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
268  * @ioat: the channel supplying the memory pool for the descriptors
269  * @flags: allocation flags
270  */
271 static struct ioat_desc_sw *
272 ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags)
273 {
274 	struct ioat_dma_descriptor *desc;
275 	struct ioat_desc_sw *desc_sw;
276 	struct ioatdma_device *ioatdma_device;
277 	dma_addr_t phys;
278 
279 	ioatdma_device = ioat->base.device;
280 	desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
281 	if (unlikely(!desc))
282 		return NULL;
283 
284 	desc_sw = kzalloc(sizeof(*desc_sw), flags);
285 	if (unlikely(!desc_sw)) {
286 		pci_pool_free(ioatdma_device->dma_pool, desc, phys);
287 		return NULL;
288 	}
289 
290 	memset(desc, 0, sizeof(*desc));
291 
292 	INIT_LIST_HEAD(&desc_sw->tx_list);
293 	dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common);
294 	desc_sw->txd.tx_submit = ioat1_tx_submit;
295 	desc_sw->hw = desc;
296 	desc_sw->txd.phys = phys;
297 	set_desc_id(desc_sw, -1);
298 
299 	return desc_sw;
300 }
301 
302 static int ioat_initial_desc_count = 256;
303 module_param(ioat_initial_desc_count, int, 0644);
304 MODULE_PARM_DESC(ioat_initial_desc_count,
305 		 "ioat1: initial descriptors per channel (default: 256)");
306 /**
307  * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors
308  * @chan: the channel to be filled out
309  */
310 static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
311 {
312 	struct ioat_dma_chan *ioat = to_ioat_chan(c);
313 	struct ioat_chan_common *chan = &ioat->base;
314 	struct ioat_desc_sw *desc;
315 	u32 chanerr;
316 	int i;
317 	LIST_HEAD(tmp_list);
318 
319 	/* have we already been set up? */
320 	if (!list_empty(&ioat->free_desc))
321 		return ioat->desccount;
322 
323 	/* Setup register to interrupt and write completion status on error */
324 	writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
325 
326 	chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
327 	if (chanerr) {
328 		dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
329 		writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
330 	}
331 
332 	/* Allocate descriptors */
333 	for (i = 0; i < ioat_initial_desc_count; i++) {
334 		desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL);
335 		if (!desc) {
336 			dev_err(to_dev(chan), "Only %d initial descriptors\n", i);
337 			break;
338 		}
339 		set_desc_id(desc, i);
340 		list_add_tail(&desc->node, &tmp_list);
341 	}
342 	spin_lock_bh(&ioat->desc_lock);
343 	ioat->desccount = i;
344 	list_splice(&tmp_list, &ioat->free_desc);
345 	spin_unlock_bh(&ioat->desc_lock);
346 
347 	/* allocate a completion writeback area */
348 	/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
349 	chan->completion = pci_pool_alloc(chan->device->completion_pool,
350 					  GFP_KERNEL, &chan->completion_dma);
351 	memset(chan->completion, 0, sizeof(*chan->completion));
352 	writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
353 	       chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
354 	writel(((u64) chan->completion_dma) >> 32,
355 	       chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
356 
357 	tasklet_enable(&chan->cleanup_task);
358 	ioat1_dma_start_null_desc(ioat);  /* give chain to dma device */
359 	dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
360 		__func__, ioat->desccount);
361 	return ioat->desccount;
362 }
363 
364 /**
365  * ioat1_dma_free_chan_resources - release all the descriptors
366  * @chan: the channel to be cleaned
367  */
368 static void ioat1_dma_free_chan_resources(struct dma_chan *c)
369 {
370 	struct ioat_dma_chan *ioat = to_ioat_chan(c);
371 	struct ioat_chan_common *chan = &ioat->base;
372 	struct ioatdma_device *ioatdma_device = chan->device;
373 	struct ioat_desc_sw *desc, *_desc;
374 	int in_use_descs = 0;
375 
376 	/* Before freeing channel resources first check
377 	 * if they have been previously allocated for this channel.
378 	 */
379 	if (ioat->desccount == 0)
380 		return;
381 
382 	tasklet_disable(&chan->cleanup_task);
383 	del_timer_sync(&chan->timer);
384 	ioat1_cleanup(ioat);
385 
386 	/* Delay 100ms after reset to allow internal DMA logic to quiesce
387 	 * before removing DMA descriptor resources.
388 	 */
389 	writeb(IOAT_CHANCMD_RESET,
390 	       chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
391 	mdelay(100);
392 
393 	spin_lock_bh(&ioat->desc_lock);
394 	list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) {
395 		dev_dbg(to_dev(chan), "%s: freeing %d from used list\n",
396 			__func__, desc_id(desc));
397 		dump_desc_dbg(ioat, desc);
398 		in_use_descs++;
399 		list_del(&desc->node);
400 		pci_pool_free(ioatdma_device->dma_pool, desc->hw,
401 			      desc->txd.phys);
402 		kfree(desc);
403 	}
404 	list_for_each_entry_safe(desc, _desc,
405 				 &ioat->free_desc, node) {
406 		list_del(&desc->node);
407 		pci_pool_free(ioatdma_device->dma_pool, desc->hw,
408 			      desc->txd.phys);
409 		kfree(desc);
410 	}
411 	spin_unlock_bh(&ioat->desc_lock);
412 
413 	pci_pool_free(ioatdma_device->completion_pool,
414 		      chan->completion,
415 		      chan->completion_dma);
416 
417 	/* one is ok since we left it on there on purpose */
418 	if (in_use_descs > 1)
419 		dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
420 			in_use_descs - 1);
421 
422 	chan->last_completion = 0;
423 	chan->completion_dma = 0;
424 	ioat->pending = 0;
425 	ioat->desccount = 0;
426 }
427 
428 /**
429  * ioat1_dma_get_next_descriptor - return the next available descriptor
430  * @ioat: IOAT DMA channel handle
431  *
432  * Gets the next descriptor from the chain, and must be called with the
433  * channel's desc_lock held.  Allocates more descriptors if the channel
434  * has run out.
435  */
436 static struct ioat_desc_sw *
437 ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat)
438 {
439 	struct ioat_desc_sw *new;
440 
441 	if (!list_empty(&ioat->free_desc)) {
442 		new = to_ioat_desc(ioat->free_desc.next);
443 		list_del(&new->node);
444 	} else {
445 		/* try to get another desc */
446 		new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC);
447 		if (!new) {
448 			dev_err(to_dev(&ioat->base), "alloc failed\n");
449 			return NULL;
450 		}
451 	}
452 	dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n",
453 		__func__, desc_id(new));
454 	prefetch(new->hw);
455 	return new;
456 }
457 
458 static struct dma_async_tx_descriptor *
459 ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
460 		      dma_addr_t dma_src, size_t len, unsigned long flags)
461 {
462 	struct ioat_dma_chan *ioat = to_ioat_chan(c);
463 	struct ioat_desc_sw *desc;
464 	size_t copy;
465 	LIST_HEAD(chain);
466 	dma_addr_t src = dma_src;
467 	dma_addr_t dest = dma_dest;
468 	size_t total_len = len;
469 	struct ioat_dma_descriptor *hw = NULL;
470 	int tx_cnt = 0;
471 
472 	spin_lock_bh(&ioat->desc_lock);
473 	desc = ioat1_dma_get_next_descriptor(ioat);
474 	do {
475 		if (!desc)
476 			break;
477 
478 		tx_cnt++;
479 		copy = min_t(size_t, len, ioat->xfercap);
480 
481 		hw = desc->hw;
482 		hw->size = copy;
483 		hw->ctl = 0;
484 		hw->src_addr = src;
485 		hw->dst_addr = dest;
486 
487 		list_add_tail(&desc->node, &chain);
488 
489 		len -= copy;
490 		dest += copy;
491 		src += copy;
492 		if (len) {
493 			struct ioat_desc_sw *next;
494 
495 			async_tx_ack(&desc->txd);
496 			next = ioat1_dma_get_next_descriptor(ioat);
497 			hw->next = next ? next->txd.phys : 0;
498 			dump_desc_dbg(ioat, desc);
499 			desc = next;
500 		} else
501 			hw->next = 0;
502 	} while (len);
503 
504 	if (!desc) {
505 		struct ioat_chan_common *chan = &ioat->base;
506 
507 		dev_err(to_dev(chan),
508 			"chan%d - get_next_desc failed\n", chan_num(chan));
509 		list_splice(&chain, &ioat->free_desc);
510 		spin_unlock_bh(&ioat->desc_lock);
511 		return NULL;
512 	}
513 	spin_unlock_bh(&ioat->desc_lock);
514 
515 	desc->txd.flags = flags;
516 	desc->len = total_len;
517 	list_splice(&chain, &desc->tx_list);
518 	hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
519 	hw->ctl_f.compl_write = 1;
520 	hw->tx_cnt = tx_cnt;
521 	dump_desc_dbg(ioat, desc);
522 
523 	return &desc->txd;
524 }
525 
526 static void ioat1_cleanup_event(unsigned long data)
527 {
528 	struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
529 
530 	ioat1_cleanup(ioat);
531 	writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
532 }
533 
534 dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan)
535 {
536 	dma_addr_t phys_complete;
537 	u64 completion;
538 
539 	completion = *chan->completion;
540 	phys_complete = ioat_chansts_to_addr(completion);
541 
542 	dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
543 		(unsigned long long) phys_complete);
544 
545 	if (is_ioat_halted(completion)) {
546 		u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
547 		dev_err(to_dev(chan), "Channel halted, chanerr = %x\n",
548 			chanerr);
549 
550 		/* TODO do something to salvage the situation */
551 	}
552 
553 	return phys_complete;
554 }
555 
556 bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
557 			   dma_addr_t *phys_complete)
558 {
559 	*phys_complete = ioat_get_current_completion(chan);
560 	if (*phys_complete == chan->last_completion)
561 		return false;
562 	clear_bit(IOAT_COMPLETION_ACK, &chan->state);
563 	mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
564 
565 	return true;
566 }
567 
568 static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete)
569 {
570 	struct ioat_chan_common *chan = &ioat->base;
571 	struct list_head *_desc, *n;
572 	struct dma_async_tx_descriptor *tx;
573 
574 	dev_dbg(to_dev(chan), "%s: phys_complete: %llx\n",
575 		 __func__, (unsigned long long) phys_complete);
576 	list_for_each_safe(_desc, n, &ioat->used_desc) {
577 		struct ioat_desc_sw *desc;
578 
579 		prefetch(n);
580 		desc = list_entry(_desc, typeof(*desc), node);
581 		tx = &desc->txd;
582 		/*
583 		 * Incoming DMA requests may use multiple descriptors,
584 		 * due to exceeding xfercap, perhaps. If so, only the
585 		 * last one will have a cookie, and require unmapping.
586 		 */
587 		dump_desc_dbg(ioat, desc);
588 		if (tx->cookie) {
589 			dma_cookie_complete(tx);
590 			dma_descriptor_unmap(tx);
591 			ioat->active -= desc->hw->tx_cnt;
592 			if (tx->callback) {
593 				tx->callback(tx->callback_param);
594 				tx->callback = NULL;
595 			}
596 		}
597 
598 		if (tx->phys != phys_complete) {
599 			/*
600 			 * a completed entry, but not the last, so clean
601 			 * up if the client is done with the descriptor
602 			 */
603 			if (async_tx_test_ack(tx))
604 				list_move_tail(&desc->node, &ioat->free_desc);
605 		} else {
606 			/*
607 			 * last used desc. Do not remove, so we can
608 			 * append from it.
609 			 */
610 
611 			/* if nothing else is pending, cancel the
612 			 * completion timeout
613 			 */
614 			if (n == &ioat->used_desc) {
615 				dev_dbg(to_dev(chan),
616 					"%s cancel completion timeout\n",
617 					__func__);
618 				clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
619 			}
620 
621 			/* TODO check status bits? */
622 			break;
623 		}
624 	}
625 
626 	chan->last_completion = phys_complete;
627 }
628 
629 /**
630  * ioat1_cleanup - cleanup up finished descriptors
631  * @chan: ioat channel to be cleaned up
632  *
633  * To prevent lock contention we defer cleanup when the locks are
634  * contended with a terminal timeout that forces cleanup and catches
635  * completion notification errors.
636  */
637 static void ioat1_cleanup(struct ioat_dma_chan *ioat)
638 {
639 	struct ioat_chan_common *chan = &ioat->base;
640 	dma_addr_t phys_complete;
641 
642 	prefetch(chan->completion);
643 
644 	if (!spin_trylock_bh(&chan->cleanup_lock))
645 		return;
646 
647 	if (!ioat_cleanup_preamble(chan, &phys_complete)) {
648 		spin_unlock_bh(&chan->cleanup_lock);
649 		return;
650 	}
651 
652 	if (!spin_trylock_bh(&ioat->desc_lock)) {
653 		spin_unlock_bh(&chan->cleanup_lock);
654 		return;
655 	}
656 
657 	__cleanup(ioat, phys_complete);
658 
659 	spin_unlock_bh(&ioat->desc_lock);
660 	spin_unlock_bh(&chan->cleanup_lock);
661 }
662 
663 static void ioat1_timer_event(unsigned long data)
664 {
665 	struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
666 	struct ioat_chan_common *chan = &ioat->base;
667 
668 	dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state);
669 
670 	spin_lock_bh(&chan->cleanup_lock);
671 	if (test_and_clear_bit(IOAT_RESET_PENDING, &chan->state)) {
672 		struct ioat_desc_sw *desc;
673 
674 		spin_lock_bh(&ioat->desc_lock);
675 
676 		/* restart active descriptors */
677 		desc = to_ioat_desc(ioat->used_desc.prev);
678 		ioat_set_chainaddr(ioat, desc->txd.phys);
679 		ioat_start(chan);
680 
681 		ioat->pending = 0;
682 		set_bit(IOAT_COMPLETION_PENDING, &chan->state);
683 		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
684 		spin_unlock_bh(&ioat->desc_lock);
685 	} else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
686 		dma_addr_t phys_complete;
687 
688 		spin_lock_bh(&ioat->desc_lock);
689 		/* if we haven't made progress and we have already
690 		 * acknowledged a pending completion once, then be more
691 		 * forceful with a restart
692 		 */
693 		if (ioat_cleanup_preamble(chan, &phys_complete))
694 			__cleanup(ioat, phys_complete);
695 		else if (test_bit(IOAT_COMPLETION_ACK, &chan->state))
696 			ioat1_reset_channel(ioat);
697 		else {
698 			u64 status = ioat_chansts(chan);
699 
700 			/* manually update the last completion address */
701 			if (ioat_chansts_to_addr(status) != 0)
702 				*chan->completion = status;
703 
704 			set_bit(IOAT_COMPLETION_ACK, &chan->state);
705 			mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
706 		}
707 		spin_unlock_bh(&ioat->desc_lock);
708 	}
709 	spin_unlock_bh(&chan->cleanup_lock);
710 }
711 
712 enum dma_status
713 ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
714 		   struct dma_tx_state *txstate)
715 {
716 	struct ioat_chan_common *chan = to_chan_common(c);
717 	struct ioatdma_device *device = chan->device;
718 	enum dma_status ret;
719 
720 	ret = dma_cookie_status(c, cookie, txstate);
721 	if (ret == DMA_COMPLETE)
722 		return ret;
723 
724 	device->cleanup_fn((unsigned long) c);
725 
726 	return dma_cookie_status(c, cookie, txstate);
727 }
728 
729 static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
730 {
731 	struct ioat_chan_common *chan = &ioat->base;
732 	struct ioat_desc_sw *desc;
733 	struct ioat_dma_descriptor *hw;
734 
735 	spin_lock_bh(&ioat->desc_lock);
736 
737 	desc = ioat1_dma_get_next_descriptor(ioat);
738 
739 	if (!desc) {
740 		dev_err(to_dev(chan),
741 			"Unable to start null desc - get next desc failed\n");
742 		spin_unlock_bh(&ioat->desc_lock);
743 		return;
744 	}
745 
746 	hw = desc->hw;
747 	hw->ctl = 0;
748 	hw->ctl_f.null = 1;
749 	hw->ctl_f.int_en = 1;
750 	hw->ctl_f.compl_write = 1;
751 	/* set size to non-zero value (channel returns error when size is 0) */
752 	hw->size = NULL_DESC_BUFFER_SIZE;
753 	hw->src_addr = 0;
754 	hw->dst_addr = 0;
755 	async_tx_ack(&desc->txd);
756 	hw->next = 0;
757 	list_add_tail(&desc->node, &ioat->used_desc);
758 	dump_desc_dbg(ioat, desc);
759 
760 	ioat_set_chainaddr(ioat, desc->txd.phys);
761 	ioat_start(chan);
762 	spin_unlock_bh(&ioat->desc_lock);
763 }
764 
765 /*
766  * Perform a IOAT transaction to verify the HW works.
767  */
768 #define IOAT_TEST_SIZE 2000
769 
770 static void ioat_dma_test_callback(void *dma_async_param)
771 {
772 	struct completion *cmp = dma_async_param;
773 
774 	complete(cmp);
775 }
776 
777 /**
778  * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
779  * @device: device to be tested
780  */
781 int ioat_dma_self_test(struct ioatdma_device *device)
782 {
783 	int i;
784 	u8 *src;
785 	u8 *dest;
786 	struct dma_device *dma = &device->common;
787 	struct device *dev = &device->pdev->dev;
788 	struct dma_chan *dma_chan;
789 	struct dma_async_tx_descriptor *tx;
790 	dma_addr_t dma_dest, dma_src;
791 	dma_cookie_t cookie;
792 	int err = 0;
793 	struct completion cmp;
794 	unsigned long tmo;
795 	unsigned long flags;
796 
797 	src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
798 	if (!src)
799 		return -ENOMEM;
800 	dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
801 	if (!dest) {
802 		kfree(src);
803 		return -ENOMEM;
804 	}
805 
806 	/* Fill in src buffer */
807 	for (i = 0; i < IOAT_TEST_SIZE; i++)
808 		src[i] = (u8)i;
809 
810 	/* Start copy, using first DMA channel */
811 	dma_chan = container_of(dma->channels.next, struct dma_chan,
812 				device_node);
813 	if (dma->device_alloc_chan_resources(dma_chan) < 1) {
814 		dev_err(dev, "selftest cannot allocate chan resource\n");
815 		err = -ENODEV;
816 		goto out;
817 	}
818 
819 	dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
820 	dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
821 	flags = DMA_PREP_INTERRUPT;
822 	tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
823 						   IOAT_TEST_SIZE, flags);
824 	if (!tx) {
825 		dev_err(dev, "Self-test prep failed, disabling\n");
826 		err = -ENODEV;
827 		goto unmap_dma;
828 	}
829 
830 	async_tx_ack(tx);
831 	init_completion(&cmp);
832 	tx->callback = ioat_dma_test_callback;
833 	tx->callback_param = &cmp;
834 	cookie = tx->tx_submit(tx);
835 	if (cookie < 0) {
836 		dev_err(dev, "Self-test setup failed, disabling\n");
837 		err = -ENODEV;
838 		goto unmap_dma;
839 	}
840 	dma->device_issue_pending(dma_chan);
841 
842 	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
843 
844 	if (tmo == 0 ||
845 	    dma->device_tx_status(dma_chan, cookie, NULL)
846 					!= DMA_COMPLETE) {
847 		dev_err(dev, "Self-test copy timed out, disabling\n");
848 		err = -ENODEV;
849 		goto unmap_dma;
850 	}
851 	if (memcmp(src, dest, IOAT_TEST_SIZE)) {
852 		dev_err(dev, "Self-test copy failed compare, disabling\n");
853 		err = -ENODEV;
854 		goto free_resources;
855 	}
856 
857 unmap_dma:
858 	dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
859 	dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
860 free_resources:
861 	dma->device_free_chan_resources(dma_chan);
862 out:
863 	kfree(src);
864 	kfree(dest);
865 	return err;
866 }
867 
868 static char ioat_interrupt_style[32] = "msix";
869 module_param_string(ioat_interrupt_style, ioat_interrupt_style,
870 		    sizeof(ioat_interrupt_style), 0644);
871 MODULE_PARM_DESC(ioat_interrupt_style,
872 		 "set ioat interrupt style: msix (default), msi, intx");
873 
874 /**
875  * ioat_dma_setup_interrupts - setup interrupt handler
876  * @device: ioat device
877  */
878 int ioat_dma_setup_interrupts(struct ioatdma_device *device)
879 {
880 	struct ioat_chan_common *chan;
881 	struct pci_dev *pdev = device->pdev;
882 	struct device *dev = &pdev->dev;
883 	struct msix_entry *msix;
884 	int i, j, msixcnt;
885 	int err = -EINVAL;
886 	u8 intrctrl = 0;
887 
888 	if (!strcmp(ioat_interrupt_style, "msix"))
889 		goto msix;
890 	if (!strcmp(ioat_interrupt_style, "msi"))
891 		goto msi;
892 	if (!strcmp(ioat_interrupt_style, "intx"))
893 		goto intx;
894 	dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
895 	goto err_no_irq;
896 
897 msix:
898 	/* The number of MSI-X vectors should equal the number of channels */
899 	msixcnt = device->common.chancnt;
900 	for (i = 0; i < msixcnt; i++)
901 		device->msix_entries[i].entry = i;
902 
903 	err = pci_enable_msix(pdev, device->msix_entries, msixcnt);
904 	if (err)
905 		goto msi;
906 
907 	for (i = 0; i < msixcnt; i++) {
908 		msix = &device->msix_entries[i];
909 		chan = ioat_chan_by_index(device, i);
910 		err = devm_request_irq(dev, msix->vector,
911 				       ioat_dma_do_interrupt_msix, 0,
912 				       "ioat-msix", chan);
913 		if (err) {
914 			for (j = 0; j < i; j++) {
915 				msix = &device->msix_entries[j];
916 				chan = ioat_chan_by_index(device, j);
917 				devm_free_irq(dev, msix->vector, chan);
918 			}
919 			goto msi;
920 		}
921 	}
922 	intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
923 	device->irq_mode = IOAT_MSIX;
924 	goto done;
925 
926 msi:
927 	err = pci_enable_msi(pdev);
928 	if (err)
929 		goto intx;
930 
931 	err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
932 			       "ioat-msi", device);
933 	if (err) {
934 		pci_disable_msi(pdev);
935 		goto intx;
936 	}
937 	device->irq_mode = IOAT_MSI;
938 	goto done;
939 
940 intx:
941 	err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
942 			       IRQF_SHARED, "ioat-intx", device);
943 	if (err)
944 		goto err_no_irq;
945 
946 	device->irq_mode = IOAT_INTX;
947 done:
948 	if (device->intr_quirk)
949 		device->intr_quirk(device);
950 	intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
951 	writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
952 	return 0;
953 
954 err_no_irq:
955 	/* Disable all interrupt generation */
956 	writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
957 	device->irq_mode = IOAT_NOIRQ;
958 	dev_err(dev, "no usable interrupts\n");
959 	return err;
960 }
961 EXPORT_SYMBOL(ioat_dma_setup_interrupts);
962 
963 static void ioat_disable_interrupts(struct ioatdma_device *device)
964 {
965 	/* Disable all interrupt generation */
966 	writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
967 }
968 
969 int ioat_probe(struct ioatdma_device *device)
970 {
971 	int err = -ENODEV;
972 	struct dma_device *dma = &device->common;
973 	struct pci_dev *pdev = device->pdev;
974 	struct device *dev = &pdev->dev;
975 
976 	/* DMA coherent memory pool for DMA descriptor allocations */
977 	device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
978 					   sizeof(struct ioat_dma_descriptor),
979 					   64, 0);
980 	if (!device->dma_pool) {
981 		err = -ENOMEM;
982 		goto err_dma_pool;
983 	}
984 
985 	device->completion_pool = pci_pool_create("completion_pool", pdev,
986 						  sizeof(u64), SMP_CACHE_BYTES,
987 						  SMP_CACHE_BYTES);
988 
989 	if (!device->completion_pool) {
990 		err = -ENOMEM;
991 		goto err_completion_pool;
992 	}
993 
994 	device->enumerate_channels(device);
995 
996 	dma_cap_set(DMA_MEMCPY, dma->cap_mask);
997 	dma->dev = &pdev->dev;
998 
999 	if (!dma->chancnt) {
1000 		dev_err(dev, "channel enumeration error\n");
1001 		goto err_setup_interrupts;
1002 	}
1003 
1004 	err = ioat_dma_setup_interrupts(device);
1005 	if (err)
1006 		goto err_setup_interrupts;
1007 
1008 	err = device->self_test(device);
1009 	if (err)
1010 		goto err_self_test;
1011 
1012 	return 0;
1013 
1014 err_self_test:
1015 	ioat_disable_interrupts(device);
1016 err_setup_interrupts:
1017 	pci_pool_destroy(device->completion_pool);
1018 err_completion_pool:
1019 	pci_pool_destroy(device->dma_pool);
1020 err_dma_pool:
1021 	return err;
1022 }
1023 
1024 int ioat_register(struct ioatdma_device *device)
1025 {
1026 	int err = dma_async_device_register(&device->common);
1027 
1028 	if (err) {
1029 		ioat_disable_interrupts(device);
1030 		pci_pool_destroy(device->completion_pool);
1031 		pci_pool_destroy(device->dma_pool);
1032 	}
1033 
1034 	return err;
1035 }
1036 
1037 /* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */
1038 static void ioat1_intr_quirk(struct ioatdma_device *device)
1039 {
1040 	struct pci_dev *pdev = device->pdev;
1041 	u32 dmactrl;
1042 
1043 	pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
1044 	if (pdev->msi_enabled)
1045 		dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
1046 	else
1047 		dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN;
1048 	pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl);
1049 }
1050 
1051 static ssize_t ring_size_show(struct dma_chan *c, char *page)
1052 {
1053 	struct ioat_dma_chan *ioat = to_ioat_chan(c);
1054 
1055 	return sprintf(page, "%d\n", ioat->desccount);
1056 }
1057 static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
1058 
1059 static ssize_t ring_active_show(struct dma_chan *c, char *page)
1060 {
1061 	struct ioat_dma_chan *ioat = to_ioat_chan(c);
1062 
1063 	return sprintf(page, "%d\n", ioat->active);
1064 }
1065 static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
1066 
1067 static ssize_t cap_show(struct dma_chan *c, char *page)
1068 {
1069 	struct dma_device *dma = c->device;
1070 
1071 	return sprintf(page, "copy%s%s%s%s%s\n",
1072 		       dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "",
1073 		       dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "",
1074 		       dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "",
1075 		       dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "",
1076 		       dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : "");
1077 
1078 }
1079 struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap);
1080 
1081 static ssize_t version_show(struct dma_chan *c, char *page)
1082 {
1083 	struct dma_device *dma = c->device;
1084 	struct ioatdma_device *device = to_ioatdma_device(dma);
1085 
1086 	return sprintf(page, "%d.%d\n",
1087 		       device->version >> 4, device->version & 0xf);
1088 }
1089 struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version);
1090 
1091 static struct attribute *ioat1_attrs[] = {
1092 	&ring_size_attr.attr,
1093 	&ring_active_attr.attr,
1094 	&ioat_cap_attr.attr,
1095 	&ioat_version_attr.attr,
1096 	NULL,
1097 };
1098 
1099 static ssize_t
1100 ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
1101 {
1102 	struct ioat_sysfs_entry *entry;
1103 	struct ioat_chan_common *chan;
1104 
1105 	entry = container_of(attr, struct ioat_sysfs_entry, attr);
1106 	chan = container_of(kobj, struct ioat_chan_common, kobj);
1107 
1108 	if (!entry->show)
1109 		return -EIO;
1110 	return entry->show(&chan->common, page);
1111 }
1112 
1113 const struct sysfs_ops ioat_sysfs_ops = {
1114 	.show	= ioat_attr_show,
1115 };
1116 
1117 static struct kobj_type ioat1_ktype = {
1118 	.sysfs_ops = &ioat_sysfs_ops,
1119 	.default_attrs = ioat1_attrs,
1120 };
1121 
1122 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type)
1123 {
1124 	struct dma_device *dma = &device->common;
1125 	struct dma_chan *c;
1126 
1127 	list_for_each_entry(c, &dma->channels, device_node) {
1128 		struct ioat_chan_common *chan = to_chan_common(c);
1129 		struct kobject *parent = &c->dev->device.kobj;
1130 		int err;
1131 
1132 		err = kobject_init_and_add(&chan->kobj, type, parent, "quickdata");
1133 		if (err) {
1134 			dev_warn(to_dev(chan),
1135 				 "sysfs init error (%d), continuing...\n", err);
1136 			kobject_put(&chan->kobj);
1137 			set_bit(IOAT_KOBJ_INIT_FAIL, &chan->state);
1138 		}
1139 	}
1140 }
1141 
1142 void ioat_kobject_del(struct ioatdma_device *device)
1143 {
1144 	struct dma_device *dma = &device->common;
1145 	struct dma_chan *c;
1146 
1147 	list_for_each_entry(c, &dma->channels, device_node) {
1148 		struct ioat_chan_common *chan = to_chan_common(c);
1149 
1150 		if (!test_bit(IOAT_KOBJ_INIT_FAIL, &chan->state)) {
1151 			kobject_del(&chan->kobj);
1152 			kobject_put(&chan->kobj);
1153 		}
1154 	}
1155 }
1156 
1157 int ioat1_dma_probe(struct ioatdma_device *device, int dca)
1158 {
1159 	struct pci_dev *pdev = device->pdev;
1160 	struct dma_device *dma;
1161 	int err;
1162 
1163 	device->intr_quirk = ioat1_intr_quirk;
1164 	device->enumerate_channels = ioat1_enumerate_channels;
1165 	device->self_test = ioat_dma_self_test;
1166 	device->timer_fn = ioat1_timer_event;
1167 	device->cleanup_fn = ioat1_cleanup_event;
1168 	dma = &device->common;
1169 	dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
1170 	dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
1171 	dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
1172 	dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
1173 	dma->device_tx_status = ioat_dma_tx_status;
1174 
1175 	err = ioat_probe(device);
1176 	if (err)
1177 		return err;
1178 	ioat_set_tcp_copy_break(4096);
1179 	err = ioat_register(device);
1180 	if (err)
1181 		return err;
1182 	ioat_kobject_add(device, &ioat1_ktype);
1183 
1184 	if (dca)
1185 		device->dca = ioat_dca_init(pdev, device->reg_base);
1186 
1187 	return err;
1188 }
1189 
1190 void ioat_dma_remove(struct ioatdma_device *device)
1191 {
1192 	struct dma_device *dma = &device->common;
1193 
1194 	ioat_disable_interrupts(device);
1195 
1196 	ioat_kobject_del(device);
1197 
1198 	dma_async_device_unregister(dma);
1199 
1200 	pci_pool_destroy(device->dma_pool);
1201 	pci_pool_destroy(device->completion_pool);
1202 
1203 	INIT_LIST_HEAD(&dma->channels);
1204 }
1205