1 /*
2  * Copyright (c) 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/mm.h>
33 #include <linux/types.h>
34 #include <linux/device.h>
35 #include <linux/dmapool.h>
36 #include <linux/slab.h>
37 #include <linux/list.h>
38 #include <linux/highmem.h>
39 #include <linux/io.h>
40 #include <linux/uio.h>
41 #include <linux/rbtree.h>
42 #include <linux/spinlock.h>
43 #include <linux/delay.h>
44 
45 #include "qib.h"
46 #include "qib_user_sdma.h"
47 
48 /* minimum size of header */
49 #define QIB_USER_SDMA_MIN_HEADER_LENGTH 64
50 /* expected size of headers (for dma_pool) */
51 #define QIB_USER_SDMA_EXP_HEADER_LENGTH 64
52 /* attempt to drain the queue for 5secs */
53 #define QIB_USER_SDMA_DRAIN_TIMEOUT 250
54 
55 /*
56  * track how many times a process open this driver.
57  */
58 static struct rb_root qib_user_sdma_rb_root = RB_ROOT;
59 
60 struct qib_user_sdma_rb_node {
61 	struct rb_node node;
62 	int refcount;
63 	pid_t pid;
64 };
65 
66 struct qib_user_sdma_pkt {
67 	struct list_head list;  /* list element */
68 
69 	u8  tiddma;		/* if this is NEW tid-sdma */
70 	u8  largepkt;		/* this is large pkt from kmalloc */
71 	u16 frag_size;		/* frag size used by PSM */
72 	u16 index;              /* last header index or push index */
73 	u16 naddr;              /* dimension of addr (1..3) ... */
74 	u16 addrlimit;		/* addr array size */
75 	u16 tidsmidx;		/* current tidsm index */
76 	u16 tidsmcount;		/* tidsm array item count */
77 	u16 payload_size;	/* payload size so far for header */
78 	u32 bytes_togo;		/* bytes for processing */
79 	u32 counter;            /* sdma pkts queued counter for this entry */
80 	struct qib_tid_session_member *tidsm;	/* tid session member array */
81 	struct qib_user_sdma_queue *pq;	/* which pq this pkt belongs to */
82 	u64 added;              /* global descq number of entries */
83 
84 	struct {
85 		u16 offset;                     /* offset for kvaddr, addr */
86 		u16 length;                     /* length in page */
87 		u16 first_desc;			/* first desc */
88 		u16 last_desc;			/* last desc */
89 		u16 put_page;                   /* should we put_page? */
90 		u16 dma_mapped;                 /* is page dma_mapped? */
91 		u16 dma_length;			/* for dma_unmap_page() */
92 		u16 padding;
93 		struct page *page;              /* may be NULL (coherent mem) */
94 		void *kvaddr;                   /* FIXME: only for pio hack */
95 		dma_addr_t addr;
96 	} addr[4];   /* max pages, any more and we coalesce */
97 };
98 
99 struct qib_user_sdma_queue {
100 	/*
101 	 * pkts sent to dma engine are queued on this
102 	 * list head.  the type of the elements of this
103 	 * list are struct qib_user_sdma_pkt...
104 	 */
105 	struct list_head sent;
106 
107 	/*
108 	 * Because above list will be accessed by both process and
109 	 * signal handler, we need a spinlock for it.
110 	 */
111 	spinlock_t sent_lock ____cacheline_aligned_in_smp;
112 
113 	/* headers with expected length are allocated from here... */
114 	char header_cache_name[64];
115 	struct dma_pool *header_cache;
116 
117 	/* packets are allocated from the slab cache... */
118 	char pkt_slab_name[64];
119 	struct kmem_cache *pkt_slab;
120 
121 	/* as packets go on the queued queue, they are counted... */
122 	u32 counter;
123 	u32 sent_counter;
124 	/* pending packets, not sending yet */
125 	u32 num_pending;
126 	/* sending packets, not complete yet */
127 	u32 num_sending;
128 	/* global descq number of entry of last sending packet */
129 	u64 added;
130 
131 	/* dma page table */
132 	struct rb_root dma_pages_root;
133 
134 	struct qib_user_sdma_rb_node *sdma_rb_node;
135 
136 	/* protect everything above... */
137 	struct mutex lock;
138 };
139 
140 static struct qib_user_sdma_rb_node *
141 qib_user_sdma_rb_search(struct rb_root *root, pid_t pid)
142 {
143 	struct qib_user_sdma_rb_node *sdma_rb_node;
144 	struct rb_node *node = root->rb_node;
145 
146 	while (node) {
147 		sdma_rb_node = rb_entry(node, struct qib_user_sdma_rb_node,
148 					node);
149 		if (pid < sdma_rb_node->pid)
150 			node = node->rb_left;
151 		else if (pid > sdma_rb_node->pid)
152 			node = node->rb_right;
153 		else
154 			return sdma_rb_node;
155 	}
156 	return NULL;
157 }
158 
159 static int
160 qib_user_sdma_rb_insert(struct rb_root *root, struct qib_user_sdma_rb_node *new)
161 {
162 	struct rb_node **node = &(root->rb_node);
163 	struct rb_node *parent = NULL;
164 	struct qib_user_sdma_rb_node *got;
165 
166 	while (*node) {
167 		got = rb_entry(*node, struct qib_user_sdma_rb_node, node);
168 		parent = *node;
169 		if (new->pid < got->pid)
170 			node = &((*node)->rb_left);
171 		else if (new->pid > got->pid)
172 			node = &((*node)->rb_right);
173 		else
174 			return 0;
175 	}
176 
177 	rb_link_node(&new->node, parent, node);
178 	rb_insert_color(&new->node, root);
179 	return 1;
180 }
181 
182 struct qib_user_sdma_queue *
183 qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
184 {
185 	struct qib_user_sdma_queue *pq =
186 		kmalloc(sizeof(struct qib_user_sdma_queue), GFP_KERNEL);
187 	struct qib_user_sdma_rb_node *sdma_rb_node;
188 
189 	if (!pq)
190 		goto done;
191 
192 	pq->counter = 0;
193 	pq->sent_counter = 0;
194 	pq->num_pending = 0;
195 	pq->num_sending = 0;
196 	pq->added = 0;
197 	pq->sdma_rb_node = NULL;
198 
199 	INIT_LIST_HEAD(&pq->sent);
200 	spin_lock_init(&pq->sent_lock);
201 	mutex_init(&pq->lock);
202 
203 	snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
204 		 "qib-user-sdma-pkts-%u-%02u.%02u", unit, ctxt, sctxt);
205 	pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,
206 					 sizeof(struct qib_user_sdma_pkt),
207 					 0, 0, NULL);
208 
209 	if (!pq->pkt_slab)
210 		goto err_kfree;
211 
212 	snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),
213 		 "qib-user-sdma-headers-%u-%02u.%02u", unit, ctxt, sctxt);
214 	pq->header_cache = dma_pool_create(pq->header_cache_name,
215 					   dev,
216 					   QIB_USER_SDMA_EXP_HEADER_LENGTH,
217 					   4, 0);
218 	if (!pq->header_cache)
219 		goto err_slab;
220 
221 	pq->dma_pages_root = RB_ROOT;
222 
223 	sdma_rb_node = qib_user_sdma_rb_search(&qib_user_sdma_rb_root,
224 					current->pid);
225 	if (sdma_rb_node) {
226 		sdma_rb_node->refcount++;
227 	} else {
228 		int ret;
229 
230 		sdma_rb_node = kmalloc(sizeof(
231 			struct qib_user_sdma_rb_node), GFP_KERNEL);
232 		if (!sdma_rb_node)
233 			goto err_rb;
234 
235 		sdma_rb_node->refcount = 1;
236 		sdma_rb_node->pid = current->pid;
237 
238 		ret = qib_user_sdma_rb_insert(&qib_user_sdma_rb_root,
239 					sdma_rb_node);
240 	}
241 	pq->sdma_rb_node = sdma_rb_node;
242 
243 	goto done;
244 
245 err_rb:
246 	dma_pool_destroy(pq->header_cache);
247 err_slab:
248 	kmem_cache_destroy(pq->pkt_slab);
249 err_kfree:
250 	kfree(pq);
251 	pq = NULL;
252 
253 done:
254 	return pq;
255 }
256 
257 static void qib_user_sdma_init_frag(struct qib_user_sdma_pkt *pkt,
258 				    int i, u16 offset, u16 len,
259 				    u16 first_desc, u16 last_desc,
260 				    u16 put_page, u16 dma_mapped,
261 				    struct page *page, void *kvaddr,
262 				    dma_addr_t dma_addr, u16 dma_length)
263 {
264 	pkt->addr[i].offset = offset;
265 	pkt->addr[i].length = len;
266 	pkt->addr[i].first_desc = first_desc;
267 	pkt->addr[i].last_desc = last_desc;
268 	pkt->addr[i].put_page = put_page;
269 	pkt->addr[i].dma_mapped = dma_mapped;
270 	pkt->addr[i].page = page;
271 	pkt->addr[i].kvaddr = kvaddr;
272 	pkt->addr[i].addr = dma_addr;
273 	pkt->addr[i].dma_length = dma_length;
274 }
275 
276 static void *qib_user_sdma_alloc_header(struct qib_user_sdma_queue *pq,
277 				size_t len, dma_addr_t *dma_addr)
278 {
279 	void *hdr;
280 
281 	if (len == QIB_USER_SDMA_EXP_HEADER_LENGTH)
282 		hdr = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
283 					     dma_addr);
284 	else
285 		hdr = NULL;
286 
287 	if (!hdr) {
288 		hdr = kmalloc(len, GFP_KERNEL);
289 		if (!hdr)
290 			return NULL;
291 
292 		*dma_addr = 0;
293 	}
294 
295 	return hdr;
296 }
297 
298 static int qib_user_sdma_page_to_frags(const struct qib_devdata *dd,
299 				       struct qib_user_sdma_queue *pq,
300 				       struct qib_user_sdma_pkt *pkt,
301 				       struct page *page, u16 put,
302 				       u16 offset, u16 len, void *kvaddr)
303 {
304 	__le16 *pbc16;
305 	void *pbcvaddr;
306 	struct qib_message_header *hdr;
307 	u16 newlen, pbclen, lastdesc, dma_mapped;
308 	u32 vcto;
309 	union qib_seqnum seqnum;
310 	dma_addr_t pbcdaddr;
311 	dma_addr_t dma_addr =
312 		dma_map_page(&dd->pcidev->dev,
313 			page, offset, len, DMA_TO_DEVICE);
314 	int ret = 0;
315 
316 	if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
317 		/*
318 		 * dma mapping error, pkt has not managed
319 		 * this page yet, return the page here so
320 		 * the caller can ignore this page.
321 		 */
322 		if (put) {
323 			put_page(page);
324 		} else {
325 			/* coalesce case */
326 			kunmap(page);
327 			__free_page(page);
328 		}
329 		ret = -ENOMEM;
330 		goto done;
331 	}
332 	offset = 0;
333 	dma_mapped = 1;
334 
335 
336 next_fragment:
337 
338 	/*
339 	 * In tid-sdma, the transfer length is restricted by
340 	 * receiver side current tid page length.
341 	 */
342 	if (pkt->tiddma && len > pkt->tidsm[pkt->tidsmidx].length)
343 		newlen = pkt->tidsm[pkt->tidsmidx].length;
344 	else
345 		newlen = len;
346 
347 	/*
348 	 * Then the transfer length is restricted by MTU.
349 	 * the last descriptor flag is determined by:
350 	 * 1. the current packet is at frag size length.
351 	 * 2. the current tid page is done if tid-sdma.
352 	 * 3. there is no more byte togo if sdma.
353 	 */
354 	lastdesc = 0;
355 	if ((pkt->payload_size + newlen) >= pkt->frag_size) {
356 		newlen = pkt->frag_size - pkt->payload_size;
357 		lastdesc = 1;
358 	} else if (pkt->tiddma) {
359 		if (newlen == pkt->tidsm[pkt->tidsmidx].length)
360 			lastdesc = 1;
361 	} else {
362 		if (newlen == pkt->bytes_togo)
363 			lastdesc = 1;
364 	}
365 
366 	/* fill the next fragment in this page */
367 	qib_user_sdma_init_frag(pkt, pkt->naddr, /* index */
368 		offset, newlen,		/* offset, len */
369 		0, lastdesc,		/* first last desc */
370 		put, dma_mapped,	/* put page, dma mapped */
371 		page, kvaddr,		/* struct page, virt addr */
372 		dma_addr, len);		/* dma addr, dma length */
373 	pkt->bytes_togo -= newlen;
374 	pkt->payload_size += newlen;
375 	pkt->naddr++;
376 	if (pkt->naddr == pkt->addrlimit) {
377 		ret = -EFAULT;
378 		goto done;
379 	}
380 
381 	/* If there is no more byte togo. (lastdesc==1) */
382 	if (pkt->bytes_togo == 0) {
383 		/* The packet is done, header is not dma mapped yet.
384 		 * it should be from kmalloc */
385 		if (!pkt->addr[pkt->index].addr) {
386 			pkt->addr[pkt->index].addr =
387 				dma_map_single(&dd->pcidev->dev,
388 					pkt->addr[pkt->index].kvaddr,
389 					pkt->addr[pkt->index].dma_length,
390 					DMA_TO_DEVICE);
391 			if (dma_mapping_error(&dd->pcidev->dev,
392 					pkt->addr[pkt->index].addr)) {
393 				ret = -ENOMEM;
394 				goto done;
395 			}
396 			pkt->addr[pkt->index].dma_mapped = 1;
397 		}
398 
399 		goto done;
400 	}
401 
402 	/* If tid-sdma, advance tid info. */
403 	if (pkt->tiddma) {
404 		pkt->tidsm[pkt->tidsmidx].length -= newlen;
405 		if (pkt->tidsm[pkt->tidsmidx].length) {
406 			pkt->tidsm[pkt->tidsmidx].offset += newlen;
407 		} else {
408 			pkt->tidsmidx++;
409 			if (pkt->tidsmidx == pkt->tidsmcount) {
410 				ret = -EFAULT;
411 				goto done;
412 			}
413 		}
414 	}
415 
416 	/*
417 	 * If this is NOT the last descriptor. (newlen==len)
418 	 * the current packet is not done yet, but the current
419 	 * send side page is done.
420 	 */
421 	if (lastdesc == 0)
422 		goto done;
423 
424 	/*
425 	 * If running this driver under PSM with message size
426 	 * fitting into one transfer unit, it is not possible
427 	 * to pass this line. otherwise, it is a buggggg.
428 	 */
429 
430 	/*
431 	 * Since the current packet is done, and there are more
432 	 * bytes togo, we need to create a new sdma header, copying
433 	 * from previous sdma header and modify both.
434 	 */
435 	pbclen = pkt->addr[pkt->index].length;
436 	pbcvaddr = qib_user_sdma_alloc_header(pq, pbclen, &pbcdaddr);
437 	if (!pbcvaddr) {
438 		ret = -ENOMEM;
439 		goto done;
440 	}
441 	/* Copy the previous sdma header to new sdma header */
442 	pbc16 = (__le16 *)pkt->addr[pkt->index].kvaddr;
443 	memcpy(pbcvaddr, pbc16, pbclen);
444 
445 	/* Modify the previous sdma header */
446 	hdr = (struct qib_message_header *)&pbc16[4];
447 
448 	/* New pbc length */
449 	pbc16[0] = cpu_to_le16(le16_to_cpu(pbc16[0])-(pkt->bytes_togo>>2));
450 
451 	/* New packet length */
452 	hdr->lrh[2] = cpu_to_be16(le16_to_cpu(pbc16[0]));
453 
454 	if (pkt->tiddma) {
455 		/* turn on the header suppression */
456 		hdr->iph.pkt_flags =
457 			cpu_to_le16(le16_to_cpu(hdr->iph.pkt_flags)|0x2);
458 		/* turn off ACK_REQ: 0x04 and EXPECTED_DONE: 0x20 */
459 		hdr->flags &= ~(0x04|0x20);
460 	} else {
461 		/* turn off extra bytes: 20-21 bits */
462 		hdr->bth[0] = cpu_to_be32(be32_to_cpu(hdr->bth[0])&0xFFCFFFFF);
463 		/* turn off ACK_REQ: 0x04 */
464 		hdr->flags &= ~(0x04);
465 	}
466 
467 	/* New kdeth checksum */
468 	vcto = le32_to_cpu(hdr->iph.ver_ctxt_tid_offset);
469 	hdr->iph.chksum = cpu_to_le16(QIB_LRH_BTH +
470 		be16_to_cpu(hdr->lrh[2]) -
471 		((vcto>>16)&0xFFFF) - (vcto&0xFFFF) -
472 		le16_to_cpu(hdr->iph.pkt_flags));
473 
474 	/* The packet is done, header is not dma mapped yet.
475 	 * it should be from kmalloc */
476 	if (!pkt->addr[pkt->index].addr) {
477 		pkt->addr[pkt->index].addr =
478 			dma_map_single(&dd->pcidev->dev,
479 				pkt->addr[pkt->index].kvaddr,
480 				pkt->addr[pkt->index].dma_length,
481 				DMA_TO_DEVICE);
482 		if (dma_mapping_error(&dd->pcidev->dev,
483 				pkt->addr[pkt->index].addr)) {
484 			ret = -ENOMEM;
485 			goto done;
486 		}
487 		pkt->addr[pkt->index].dma_mapped = 1;
488 	}
489 
490 	/* Modify the new sdma header */
491 	pbc16 = (__le16 *)pbcvaddr;
492 	hdr = (struct qib_message_header *)&pbc16[4];
493 
494 	/* New pbc length */
495 	pbc16[0] = cpu_to_le16(le16_to_cpu(pbc16[0])-(pkt->payload_size>>2));
496 
497 	/* New packet length */
498 	hdr->lrh[2] = cpu_to_be16(le16_to_cpu(pbc16[0]));
499 
500 	if (pkt->tiddma) {
501 		/* Set new tid and offset for new sdma header */
502 		hdr->iph.ver_ctxt_tid_offset = cpu_to_le32(
503 			(le32_to_cpu(hdr->iph.ver_ctxt_tid_offset)&0xFF000000) +
504 			(pkt->tidsm[pkt->tidsmidx].tid<<QLOGIC_IB_I_TID_SHIFT) +
505 			(pkt->tidsm[pkt->tidsmidx].offset>>2));
506 	} else {
507 		/* Middle protocol new packet offset */
508 		hdr->uwords[2] += pkt->payload_size;
509 	}
510 
511 	/* New kdeth checksum */
512 	vcto = le32_to_cpu(hdr->iph.ver_ctxt_tid_offset);
513 	hdr->iph.chksum = cpu_to_le16(QIB_LRH_BTH +
514 		be16_to_cpu(hdr->lrh[2]) -
515 		((vcto>>16)&0xFFFF) - (vcto&0xFFFF) -
516 		le16_to_cpu(hdr->iph.pkt_flags));
517 
518 	/* Next sequence number in new sdma header */
519 	seqnum.val = be32_to_cpu(hdr->bth[2]);
520 	if (pkt->tiddma)
521 		seqnum.seq++;
522 	else
523 		seqnum.pkt++;
524 	hdr->bth[2] = cpu_to_be32(seqnum.val);
525 
526 	/* Init new sdma header. */
527 	qib_user_sdma_init_frag(pkt, pkt->naddr, /* index */
528 		0, pbclen,		/* offset, len */
529 		1, 0,			/* first last desc */
530 		0, 0,			/* put page, dma mapped */
531 		NULL, pbcvaddr,		/* struct page, virt addr */
532 		pbcdaddr, pbclen);	/* dma addr, dma length */
533 	pkt->index = pkt->naddr;
534 	pkt->payload_size = 0;
535 	pkt->naddr++;
536 	if (pkt->naddr == pkt->addrlimit) {
537 		ret = -EFAULT;
538 		goto done;
539 	}
540 
541 	/* Prepare for next fragment in this page */
542 	if (newlen != len) {
543 		if (dma_mapped) {
544 			put = 0;
545 			dma_mapped = 0;
546 			page = NULL;
547 			kvaddr = NULL;
548 		}
549 		len -= newlen;
550 		offset += newlen;
551 
552 		goto next_fragment;
553 	}
554 
555 done:
556 	return ret;
557 }
558 
559 /* we've too many pages in the iovec, coalesce to a single page */
560 static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
561 				  struct qib_user_sdma_queue *pq,
562 				  struct qib_user_sdma_pkt *pkt,
563 				  const struct iovec *iov,
564 				  unsigned long niov)
565 {
566 	int ret = 0;
567 	struct page *page = alloc_page(GFP_KERNEL);
568 	void *mpage_save;
569 	char *mpage;
570 	int i;
571 	int len = 0;
572 
573 	if (!page) {
574 		ret = -ENOMEM;
575 		goto done;
576 	}
577 
578 	mpage = kmap(page);
579 	mpage_save = mpage;
580 	for (i = 0; i < niov; i++) {
581 		int cfur;
582 
583 		cfur = copy_from_user(mpage,
584 				      iov[i].iov_base, iov[i].iov_len);
585 		if (cfur) {
586 			ret = -EFAULT;
587 			goto free_unmap;
588 		}
589 
590 		mpage += iov[i].iov_len;
591 		len += iov[i].iov_len;
592 	}
593 
594 	ret = qib_user_sdma_page_to_frags(dd, pq, pkt,
595 			page, 0, 0, len, mpage_save);
596 	goto done;
597 
598 free_unmap:
599 	kunmap(page);
600 	__free_page(page);
601 done:
602 	return ret;
603 }
604 
605 /*
606  * How many pages in this iovec element?
607  */
608 static int qib_user_sdma_num_pages(const struct iovec *iov)
609 {
610 	const unsigned long addr  = (unsigned long) iov->iov_base;
611 	const unsigned long  len  = iov->iov_len;
612 	const unsigned long spage = addr & PAGE_MASK;
613 	const unsigned long epage = (addr + len - 1) & PAGE_MASK;
614 
615 	return 1 + ((epage - spage) >> PAGE_SHIFT);
616 }
617 
618 static void qib_user_sdma_free_pkt_frag(struct device *dev,
619 					struct qib_user_sdma_queue *pq,
620 					struct qib_user_sdma_pkt *pkt,
621 					int frag)
622 {
623 	const int i = frag;
624 
625 	if (pkt->addr[i].page) {
626 		/* only user data has page */
627 		if (pkt->addr[i].dma_mapped)
628 			dma_unmap_page(dev,
629 				       pkt->addr[i].addr,
630 				       pkt->addr[i].dma_length,
631 				       DMA_TO_DEVICE);
632 
633 		if (pkt->addr[i].kvaddr)
634 			kunmap(pkt->addr[i].page);
635 
636 		if (pkt->addr[i].put_page)
637 			put_page(pkt->addr[i].page);
638 		else
639 			__free_page(pkt->addr[i].page);
640 	} else if (pkt->addr[i].kvaddr) {
641 		/* for headers */
642 		if (pkt->addr[i].dma_mapped) {
643 			/* from kmalloc & dma mapped */
644 			dma_unmap_single(dev,
645 				       pkt->addr[i].addr,
646 				       pkt->addr[i].dma_length,
647 				       DMA_TO_DEVICE);
648 			kfree(pkt->addr[i].kvaddr);
649 		} else if (pkt->addr[i].addr) {
650 			/* free coherent mem from cache... */
651 			dma_pool_free(pq->header_cache,
652 			      pkt->addr[i].kvaddr, pkt->addr[i].addr);
653 		} else {
654 			/* from kmalloc but not dma mapped */
655 			kfree(pkt->addr[i].kvaddr);
656 		}
657 	}
658 }
659 
660 /* return number of pages pinned... */
661 static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
662 				   struct qib_user_sdma_queue *pq,
663 				   struct qib_user_sdma_pkt *pkt,
664 				   unsigned long addr, int tlen, int npages)
665 {
666 	struct page *pages[8];
667 	int i, j;
668 	int ret = 0;
669 
670 	while (npages) {
671 		if (npages > 8)
672 			j = 8;
673 		else
674 			j = npages;
675 
676 		ret = get_user_pages_fast(addr, j, 0, pages);
677 		if (ret != j) {
678 			i = 0;
679 			j = ret;
680 			ret = -ENOMEM;
681 			goto free_pages;
682 		}
683 
684 		for (i = 0; i < j; i++) {
685 			/* map the pages... */
686 			unsigned long fofs = addr & ~PAGE_MASK;
687 			int flen = ((fofs + tlen) > PAGE_SIZE) ?
688 				(PAGE_SIZE - fofs) : tlen;
689 
690 			ret = qib_user_sdma_page_to_frags(dd, pq, pkt,
691 				pages[i], 1, fofs, flen, NULL);
692 			if (ret < 0) {
693 				/* current page has beed taken
694 				 * care of inside above call.
695 				 */
696 				i++;
697 				goto free_pages;
698 			}
699 
700 			addr += flen;
701 			tlen -= flen;
702 		}
703 
704 		npages -= j;
705 	}
706 
707 	goto done;
708 
709 	/* if error, return all pages not managed by pkt */
710 free_pages:
711 	while (i < j)
712 		put_page(pages[i++]);
713 
714 done:
715 	return ret;
716 }
717 
718 static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
719 				 struct qib_user_sdma_queue *pq,
720 				 struct qib_user_sdma_pkt *pkt,
721 				 const struct iovec *iov,
722 				 unsigned long niov)
723 {
724 	int ret = 0;
725 	unsigned long idx;
726 
727 	for (idx = 0; idx < niov; idx++) {
728 		const int npages = qib_user_sdma_num_pages(iov + idx);
729 		const unsigned long addr = (unsigned long) iov[idx].iov_base;
730 
731 		ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr,
732 					      iov[idx].iov_len, npages);
733 		if (ret < 0)
734 			goto free_pkt;
735 	}
736 
737 	goto done;
738 
739 free_pkt:
740 	/* we need to ignore the first entry here */
741 	for (idx = 1; idx < pkt->naddr; idx++)
742 		qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
743 
744 	/* need to dma unmap the first entry, this is to restore to
745 	 * the original state so that caller can free the memory in
746 	 * error condition. Caller does not know if dma mapped or not*/
747 	if (pkt->addr[0].dma_mapped) {
748 		dma_unmap_single(&dd->pcidev->dev,
749 		       pkt->addr[0].addr,
750 		       pkt->addr[0].dma_length,
751 		       DMA_TO_DEVICE);
752 		pkt->addr[0].addr = 0;
753 		pkt->addr[0].dma_mapped = 0;
754 	}
755 
756 done:
757 	return ret;
758 }
759 
760 static int qib_user_sdma_init_payload(const struct qib_devdata *dd,
761 				      struct qib_user_sdma_queue *pq,
762 				      struct qib_user_sdma_pkt *pkt,
763 				      const struct iovec *iov,
764 				      unsigned long niov, int npages)
765 {
766 	int ret = 0;
767 
768 	if (pkt->frag_size == pkt->bytes_togo &&
769 			npages >= ARRAY_SIZE(pkt->addr))
770 		ret = qib_user_sdma_coalesce(dd, pq, pkt, iov, niov);
771 	else
772 		ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
773 
774 	return ret;
775 }
776 
777 /* free a packet list -- return counter value of last packet */
778 static void qib_user_sdma_free_pkt_list(struct device *dev,
779 					struct qib_user_sdma_queue *pq,
780 					struct list_head *list)
781 {
782 	struct qib_user_sdma_pkt *pkt, *pkt_next;
783 
784 	list_for_each_entry_safe(pkt, pkt_next, list, list) {
785 		int i;
786 
787 		for (i = 0; i < pkt->naddr; i++)
788 			qib_user_sdma_free_pkt_frag(dev, pq, pkt, i);
789 
790 		if (pkt->largepkt)
791 			kfree(pkt);
792 		else
793 			kmem_cache_free(pq->pkt_slab, pkt);
794 	}
795 	INIT_LIST_HEAD(list);
796 }
797 
798 /*
799  * copy headers, coalesce etc -- pq->lock must be held
800  *
801  * we queue all the packets to list, returning the
802  * number of bytes total.  list must be empty initially,
803  * as, if there is an error we clean it...
804  */
805 static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
806 				    struct qib_pportdata *ppd,
807 				    struct qib_user_sdma_queue *pq,
808 				    const struct iovec *iov,
809 				    unsigned long niov,
810 				    struct list_head *list,
811 				    int *maxpkts, int *ndesc)
812 {
813 	unsigned long idx = 0;
814 	int ret = 0;
815 	int npkts = 0;
816 	__le32 *pbc;
817 	dma_addr_t dma_addr;
818 	struct qib_user_sdma_pkt *pkt = NULL;
819 	size_t len;
820 	size_t nw;
821 	u32 counter = pq->counter;
822 	u16 frag_size;
823 
824 	while (idx < niov && npkts < *maxpkts) {
825 		const unsigned long addr = (unsigned long) iov[idx].iov_base;
826 		const unsigned long idx_save = idx;
827 		unsigned pktnw;
828 		unsigned pktnwc;
829 		int nfrags = 0;
830 		int npages = 0;
831 		int bytes_togo = 0;
832 		int tiddma = 0;
833 		int cfur;
834 
835 		len = iov[idx].iov_len;
836 		nw = len >> 2;
837 
838 		if (len < QIB_USER_SDMA_MIN_HEADER_LENGTH ||
839 		    len > PAGE_SIZE || len & 3 || addr & 3) {
840 			ret = -EINVAL;
841 			goto free_list;
842 		}
843 
844 		pbc = qib_user_sdma_alloc_header(pq, len, &dma_addr);
845 		if (!pbc) {
846 			ret = -ENOMEM;
847 			goto free_list;
848 		}
849 
850 		cfur = copy_from_user(pbc, iov[idx].iov_base, len);
851 		if (cfur) {
852 			ret = -EFAULT;
853 			goto free_pbc;
854 		}
855 
856 		/*
857 		 * This assignment is a bit strange.  it's because the
858 		 * the pbc counts the number of 32 bit words in the full
859 		 * packet _except_ the first word of the pbc itself...
860 		 */
861 		pktnwc = nw - 1;
862 
863 		/*
864 		 * pktnw computation yields the number of 32 bit words
865 		 * that the caller has indicated in the PBC.  note that
866 		 * this is one less than the total number of words that
867 		 * goes to the send DMA engine as the first 32 bit word
868 		 * of the PBC itself is not counted.  Armed with this count,
869 		 * we can verify that the packet is consistent with the
870 		 * iovec lengths.
871 		 */
872 		pktnw = le32_to_cpu(*pbc) & 0xFFFF;
873 		if (pktnw < pktnwc) {
874 			ret = -EINVAL;
875 			goto free_pbc;
876 		}
877 
878 		idx++;
879 		while (pktnwc < pktnw && idx < niov) {
880 			const size_t slen = iov[idx].iov_len;
881 			const unsigned long faddr =
882 				(unsigned long) iov[idx].iov_base;
883 
884 			if (slen & 3 || faddr & 3 || !slen) {
885 				ret = -EINVAL;
886 				goto free_pbc;
887 			}
888 
889 			npages += qib_user_sdma_num_pages(&iov[idx]);
890 
891 			bytes_togo += slen;
892 			pktnwc += slen >> 2;
893 			idx++;
894 			nfrags++;
895 		}
896 
897 		if (pktnwc != pktnw) {
898 			ret = -EINVAL;
899 			goto free_pbc;
900 		}
901 
902 		frag_size = ((le32_to_cpu(*pbc))>>16) & 0xFFFF;
903 		if (((frag_size ? frag_size : bytes_togo) + len) >
904 						ppd->ibmaxlen) {
905 			ret = -EINVAL;
906 			goto free_pbc;
907 		}
908 
909 		if (frag_size) {
910 			int pktsize, tidsmsize, n;
911 
912 			n = npages*((2*PAGE_SIZE/frag_size)+1);
913 			pktsize = sizeof(*pkt) + sizeof(pkt->addr[0])*n;
914 
915 			/*
916 			 * Determine if this is tid-sdma or just sdma.
917 			 */
918 			tiddma = (((le32_to_cpu(pbc[7])>>
919 				QLOGIC_IB_I_TID_SHIFT)&
920 				QLOGIC_IB_I_TID_MASK) !=
921 				QLOGIC_IB_I_TID_MASK);
922 
923 			if (tiddma)
924 				tidsmsize = iov[idx].iov_len;
925 			else
926 				tidsmsize = 0;
927 
928 			pkt = kmalloc(pktsize+tidsmsize, GFP_KERNEL);
929 			if (!pkt) {
930 				ret = -ENOMEM;
931 				goto free_pbc;
932 			}
933 			pkt->largepkt = 1;
934 			pkt->frag_size = frag_size;
935 			pkt->addrlimit = n + ARRAY_SIZE(pkt->addr);
936 
937 			if (tiddma) {
938 				char *tidsm = (char *)pkt + pktsize;
939 
940 				cfur = copy_from_user(tidsm,
941 					iov[idx].iov_base, tidsmsize);
942 				if (cfur) {
943 					ret = -EFAULT;
944 					goto free_pkt;
945 				}
946 				pkt->tidsm =
947 					(struct qib_tid_session_member *)tidsm;
948 				pkt->tidsmcount = tidsmsize/
949 					sizeof(struct qib_tid_session_member);
950 				pkt->tidsmidx = 0;
951 				idx++;
952 			}
953 
954 			/*
955 			 * pbc 'fill1' field is borrowed to pass frag size,
956 			 * we need to clear it after picking frag size, the
957 			 * hardware requires this field to be zero.
958 			 */
959 			*pbc = cpu_to_le32(le32_to_cpu(*pbc) & 0x0000FFFF);
960 		} else {
961 			pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
962 			if (!pkt) {
963 				ret = -ENOMEM;
964 				goto free_pbc;
965 			}
966 			pkt->largepkt = 0;
967 			pkt->frag_size = bytes_togo;
968 			pkt->addrlimit = ARRAY_SIZE(pkt->addr);
969 		}
970 		pkt->bytes_togo = bytes_togo;
971 		pkt->payload_size = 0;
972 		pkt->counter = counter;
973 		pkt->tiddma = tiddma;
974 
975 		/* setup the first header */
976 		qib_user_sdma_init_frag(pkt, 0, /* index */
977 			0, len,		/* offset, len */
978 			1, 0,		/* first last desc */
979 			0, 0,		/* put page, dma mapped */
980 			NULL, pbc,	/* struct page, virt addr */
981 			dma_addr, len);	/* dma addr, dma length */
982 		pkt->index = 0;
983 		pkt->naddr = 1;
984 
985 		if (nfrags) {
986 			ret = qib_user_sdma_init_payload(dd, pq, pkt,
987 							 iov + idx_save + 1,
988 							 nfrags, npages);
989 			if (ret < 0)
990 				goto free_pkt;
991 		} else {
992 			/* since there is no payload, mark the
993 			 * header as the last desc. */
994 			pkt->addr[0].last_desc = 1;
995 
996 			if (dma_addr == 0) {
997 				/*
998 				 * the header is not dma mapped yet.
999 				 * it should be from kmalloc.
1000 				 */
1001 				dma_addr = dma_map_single(&dd->pcidev->dev,
1002 					pbc, len, DMA_TO_DEVICE);
1003 				if (dma_mapping_error(&dd->pcidev->dev,
1004 								dma_addr)) {
1005 					ret = -ENOMEM;
1006 					goto free_pkt;
1007 				}
1008 				pkt->addr[0].addr = dma_addr;
1009 				pkt->addr[0].dma_mapped = 1;
1010 			}
1011 		}
1012 
1013 		counter++;
1014 		npkts++;
1015 		pkt->pq = pq;
1016 		pkt->index = 0; /* reset index for push on hw */
1017 		*ndesc += pkt->naddr;
1018 
1019 		list_add_tail(&pkt->list, list);
1020 	}
1021 
1022 	*maxpkts = npkts;
1023 	ret = idx;
1024 	goto done;
1025 
1026 free_pkt:
1027 	if (pkt->largepkt)
1028 		kfree(pkt);
1029 	else
1030 		kmem_cache_free(pq->pkt_slab, pkt);
1031 free_pbc:
1032 	if (dma_addr)
1033 		dma_pool_free(pq->header_cache, pbc, dma_addr);
1034 	else
1035 		kfree(pbc);
1036 free_list:
1037 	qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
1038 done:
1039 	return ret;
1040 }
1041 
1042 static void qib_user_sdma_set_complete_counter(struct qib_user_sdma_queue *pq,
1043 					       u32 c)
1044 {
1045 	pq->sent_counter = c;
1046 }
1047 
1048 /* try to clean out queue -- needs pq->lock */
1049 static int qib_user_sdma_queue_clean(struct qib_pportdata *ppd,
1050 				     struct qib_user_sdma_queue *pq)
1051 {
1052 	struct qib_devdata *dd = ppd->dd;
1053 	struct list_head free_list;
1054 	struct qib_user_sdma_pkt *pkt;
1055 	struct qib_user_sdma_pkt *pkt_prev;
1056 	unsigned long flags;
1057 	int ret = 0;
1058 
1059 	if (!pq->num_sending)
1060 		return 0;
1061 
1062 	INIT_LIST_HEAD(&free_list);
1063 
1064 	/*
1065 	 * We need this spin lock here because interrupt handler
1066 	 * might modify this list in qib_user_sdma_send_desc(), also
1067 	 * we can not get interrupted, otherwise it is a deadlock.
1068 	 */
1069 	spin_lock_irqsave(&pq->sent_lock, flags);
1070 	list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
1071 		s64 descd = ppd->sdma_descq_removed - pkt->added;
1072 
1073 		if (descd < 0)
1074 			break;
1075 
1076 		list_move_tail(&pkt->list, &free_list);
1077 
1078 		/* one more packet cleaned */
1079 		ret++;
1080 		pq->num_sending--;
1081 	}
1082 	spin_unlock_irqrestore(&pq->sent_lock, flags);
1083 
1084 	if (!list_empty(&free_list)) {
1085 		u32 counter;
1086 
1087 		pkt = list_entry(free_list.prev,
1088 				 struct qib_user_sdma_pkt, list);
1089 		counter = pkt->counter;
1090 
1091 		qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
1092 		qib_user_sdma_set_complete_counter(pq, counter);
1093 	}
1094 
1095 	return ret;
1096 }
1097 
1098 void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq)
1099 {
1100 	if (!pq)
1101 		return;
1102 
1103 	pq->sdma_rb_node->refcount--;
1104 	if (pq->sdma_rb_node->refcount == 0) {
1105 		rb_erase(&pq->sdma_rb_node->node, &qib_user_sdma_rb_root);
1106 		kfree(pq->sdma_rb_node);
1107 	}
1108 	dma_pool_destroy(pq->header_cache);
1109 	kmem_cache_destroy(pq->pkt_slab);
1110 	kfree(pq);
1111 }
1112 
1113 /* clean descriptor queue, returns > 0 if some elements cleaned */
1114 static int qib_user_sdma_hwqueue_clean(struct qib_pportdata *ppd)
1115 {
1116 	int ret;
1117 	unsigned long flags;
1118 
1119 	spin_lock_irqsave(&ppd->sdma_lock, flags);
1120 	ret = qib_sdma_make_progress(ppd);
1121 	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1122 
1123 	return ret;
1124 }
1125 
1126 /* we're in close, drain packets so that we can cleanup successfully... */
1127 void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
1128 			       struct qib_user_sdma_queue *pq)
1129 {
1130 	struct qib_devdata *dd = ppd->dd;
1131 	unsigned long flags;
1132 	int i;
1133 
1134 	if (!pq)
1135 		return;
1136 
1137 	for (i = 0; i < QIB_USER_SDMA_DRAIN_TIMEOUT; i++) {
1138 		mutex_lock(&pq->lock);
1139 		if (!pq->num_pending && !pq->num_sending) {
1140 			mutex_unlock(&pq->lock);
1141 			break;
1142 		}
1143 		qib_user_sdma_hwqueue_clean(ppd);
1144 		qib_user_sdma_queue_clean(ppd, pq);
1145 		mutex_unlock(&pq->lock);
1146 		msleep(20);
1147 	}
1148 
1149 	if (pq->num_pending || pq->num_sending) {
1150 		struct qib_user_sdma_pkt *pkt;
1151 		struct qib_user_sdma_pkt *pkt_prev;
1152 		struct list_head free_list;
1153 
1154 		mutex_lock(&pq->lock);
1155 		spin_lock_irqsave(&ppd->sdma_lock, flags);
1156 		/*
1157 		 * Since we hold sdma_lock, it is safe without sent_lock.
1158 		 */
1159 		if (pq->num_pending) {
1160 			list_for_each_entry_safe(pkt, pkt_prev,
1161 					&ppd->sdma_userpending, list) {
1162 				if (pkt->pq == pq) {
1163 					list_move_tail(&pkt->list, &pq->sent);
1164 					pq->num_pending--;
1165 					pq->num_sending++;
1166 				}
1167 			}
1168 		}
1169 		spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1170 
1171 		qib_dev_err(dd, "user sdma lists not empty: forcing!\n");
1172 		INIT_LIST_HEAD(&free_list);
1173 		list_splice_init(&pq->sent, &free_list);
1174 		pq->num_sending = 0;
1175 		qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
1176 		mutex_unlock(&pq->lock);
1177 	}
1178 }
1179 
1180 static inline __le64 qib_sdma_make_desc0(u8 gen,
1181 					 u64 addr, u64 dwlen, u64 dwoffset)
1182 {
1183 	return cpu_to_le64(/* SDmaPhyAddr[31:0] */
1184 			   ((addr & 0xfffffffcULL) << 32) |
1185 			   /* SDmaGeneration[1:0] */
1186 			   ((gen & 3ULL) << 30) |
1187 			   /* SDmaDwordCount[10:0] */
1188 			   ((dwlen & 0x7ffULL) << 16) |
1189 			   /* SDmaBufOffset[12:2] */
1190 			   (dwoffset & 0x7ffULL));
1191 }
1192 
1193 static inline __le64 qib_sdma_make_first_desc0(__le64 descq)
1194 {
1195 	return descq | cpu_to_le64(1ULL << 12);
1196 }
1197 
1198 static inline __le64 qib_sdma_make_last_desc0(__le64 descq)
1199 {
1200 					      /* last */  /* dma head */
1201 	return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);
1202 }
1203 
1204 static inline __le64 qib_sdma_make_desc1(u64 addr)
1205 {
1206 	/* SDmaPhyAddr[47:32] */
1207 	return cpu_to_le64(addr >> 32);
1208 }
1209 
1210 static void qib_user_sdma_send_frag(struct qib_pportdata *ppd,
1211 				    struct qib_user_sdma_pkt *pkt, int idx,
1212 				    unsigned ofs, u16 tail, u8 gen)
1213 {
1214 	const u64 addr = (u64) pkt->addr[idx].addr +
1215 		(u64) pkt->addr[idx].offset;
1216 	const u64 dwlen = (u64) pkt->addr[idx].length / 4;
1217 	__le64 *descqp;
1218 	__le64 descq0;
1219 
1220 	descqp = &ppd->sdma_descq[tail].qw[0];
1221 
1222 	descq0 = qib_sdma_make_desc0(gen, addr, dwlen, ofs);
1223 	if (pkt->addr[idx].first_desc)
1224 		descq0 = qib_sdma_make_first_desc0(descq0);
1225 	if (pkt->addr[idx].last_desc) {
1226 		descq0 = qib_sdma_make_last_desc0(descq0);
1227 		if (ppd->sdma_intrequest) {
1228 			descq0 |= cpu_to_le64(1ULL << 15);
1229 			ppd->sdma_intrequest = 0;
1230 		}
1231 	}
1232 
1233 	descqp[0] = descq0;
1234 	descqp[1] = qib_sdma_make_desc1(addr);
1235 }
1236 
1237 void qib_user_sdma_send_desc(struct qib_pportdata *ppd,
1238 				struct list_head *pktlist)
1239 {
1240 	struct qib_devdata *dd = ppd->dd;
1241 	u16 nfree, nsent;
1242 	u16 tail, tail_c;
1243 	u8 gen, gen_c;
1244 
1245 	nfree = qib_sdma_descq_freecnt(ppd);
1246 	if (!nfree)
1247 		return;
1248 
1249 retry:
1250 	nsent = 0;
1251 	tail_c = tail = ppd->sdma_descq_tail;
1252 	gen_c = gen = ppd->sdma_generation;
1253 	while (!list_empty(pktlist)) {
1254 		struct qib_user_sdma_pkt *pkt =
1255 			list_entry(pktlist->next, struct qib_user_sdma_pkt,
1256 				   list);
1257 		int i, j, c = 0;
1258 		unsigned ofs = 0;
1259 		u16 dtail = tail;
1260 
1261 		for (i = pkt->index; i < pkt->naddr && nfree; i++) {
1262 			qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail, gen);
1263 			ofs += pkt->addr[i].length >> 2;
1264 
1265 			if (++tail == ppd->sdma_descq_cnt) {
1266 				tail = 0;
1267 				++gen;
1268 				ppd->sdma_intrequest = 1;
1269 			} else if (tail == (ppd->sdma_descq_cnt>>1)) {
1270 				ppd->sdma_intrequest = 1;
1271 			}
1272 			nfree--;
1273 			if (pkt->addr[i].last_desc == 0)
1274 				continue;
1275 
1276 			/*
1277 			 * If the packet is >= 2KB mtu equivalent, we
1278 			 * have to use the large buffers, and have to
1279 			 * mark each descriptor as part of a large
1280 			 * buffer packet.
1281 			 */
1282 			if (ofs > dd->piosize2kmax_dwords) {
1283 				for (j = pkt->index; j <= i; j++) {
1284 					ppd->sdma_descq[dtail].qw[0] |=
1285 						cpu_to_le64(1ULL << 14);
1286 					if (++dtail == ppd->sdma_descq_cnt)
1287 						dtail = 0;
1288 				}
1289 			}
1290 			c += i + 1 - pkt->index;
1291 			pkt->index = i + 1; /* index for next first */
1292 			tail_c = dtail = tail;
1293 			gen_c = gen;
1294 			ofs = 0;  /* reset for next packet */
1295 		}
1296 
1297 		ppd->sdma_descq_added += c;
1298 		nsent += c;
1299 		if (pkt->index == pkt->naddr) {
1300 			pkt->added = ppd->sdma_descq_added;
1301 			pkt->pq->added = pkt->added;
1302 			pkt->pq->num_pending--;
1303 			spin_lock(&pkt->pq->sent_lock);
1304 			pkt->pq->num_sending++;
1305 			list_move_tail(&pkt->list, &pkt->pq->sent);
1306 			spin_unlock(&pkt->pq->sent_lock);
1307 		}
1308 		if (!nfree || (nsent<<2) > ppd->sdma_descq_cnt)
1309 			break;
1310 	}
1311 
1312 	/* advance the tail on the chip if necessary */
1313 	if (ppd->sdma_descq_tail != tail_c) {
1314 		ppd->sdma_generation = gen_c;
1315 		dd->f_sdma_update_tail(ppd, tail_c);
1316 	}
1317 
1318 	if (nfree && !list_empty(pktlist))
1319 		goto retry;
1320 }
1321 
1322 /* pq->lock must be held, get packets on the wire... */
1323 static int qib_user_sdma_push_pkts(struct qib_pportdata *ppd,
1324 				 struct qib_user_sdma_queue *pq,
1325 				 struct list_head *pktlist, int count)
1326 {
1327 	unsigned long flags;
1328 
1329 	if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE)))
1330 		return -ECOMM;
1331 
1332 	/* non-blocking mode */
1333 	if (pq->sdma_rb_node->refcount > 1) {
1334 		spin_lock_irqsave(&ppd->sdma_lock, flags);
1335 		if (unlikely(!__qib_sdma_running(ppd))) {
1336 			spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1337 			return -ECOMM;
1338 		}
1339 		pq->num_pending += count;
1340 		list_splice_tail_init(pktlist, &ppd->sdma_userpending);
1341 		qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending);
1342 		spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1343 		return 0;
1344 	}
1345 
1346 	/* In this case, descriptors from this process are not
1347 	 * linked to ppd pending queue, interrupt handler
1348 	 * won't update this process, it is OK to directly
1349 	 * modify without sdma lock.
1350 	 */
1351 
1352 
1353 	pq->num_pending += count;
1354 	/*
1355 	 * Blocking mode for single rail process, we must
1356 	 * release/regain sdma_lock to give other process
1357 	 * chance to make progress. This is important for
1358 	 * performance.
1359 	 */
1360 	do {
1361 		spin_lock_irqsave(&ppd->sdma_lock, flags);
1362 		if (unlikely(!__qib_sdma_running(ppd))) {
1363 			spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1364 			return -ECOMM;
1365 		}
1366 		qib_user_sdma_send_desc(ppd, pktlist);
1367 		if (!list_empty(pktlist))
1368 			qib_sdma_make_progress(ppd);
1369 		spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1370 	} while (!list_empty(pktlist));
1371 
1372 	return 0;
1373 }
1374 
1375 int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
1376 			 struct qib_user_sdma_queue *pq,
1377 			 const struct iovec *iov,
1378 			 unsigned long dim)
1379 {
1380 	struct qib_devdata *dd = rcd->dd;
1381 	struct qib_pportdata *ppd = rcd->ppd;
1382 	int ret = 0;
1383 	struct list_head list;
1384 	int npkts = 0;
1385 
1386 	INIT_LIST_HEAD(&list);
1387 
1388 	mutex_lock(&pq->lock);
1389 
1390 	/* why not -ECOMM like qib_user_sdma_push_pkts() below? */
1391 	if (!qib_sdma_running(ppd))
1392 		goto done_unlock;
1393 
1394 	/* if I have packets not complete yet */
1395 	if (pq->added > ppd->sdma_descq_removed)
1396 		qib_user_sdma_hwqueue_clean(ppd);
1397 	/* if I have complete packets to be freed */
1398 	if (pq->num_sending)
1399 		qib_user_sdma_queue_clean(ppd, pq);
1400 
1401 	while (dim) {
1402 		int mxp = 1;
1403 		int ndesc = 0;
1404 
1405 		ret = qib_user_sdma_queue_pkts(dd, ppd, pq,
1406 				iov, dim, &list, &mxp, &ndesc);
1407 		if (ret < 0)
1408 			goto done_unlock;
1409 		else {
1410 			dim -= ret;
1411 			iov += ret;
1412 		}
1413 
1414 		/* force packets onto the sdma hw queue... */
1415 		if (!list_empty(&list)) {
1416 			/*
1417 			 * Lazily clean hw queue.
1418 			 */
1419 			if (qib_sdma_descq_freecnt(ppd) < ndesc) {
1420 				qib_user_sdma_hwqueue_clean(ppd);
1421 				if (pq->num_sending)
1422 					qib_user_sdma_queue_clean(ppd, pq);
1423 			}
1424 
1425 			ret = qib_user_sdma_push_pkts(ppd, pq, &list, mxp);
1426 			if (ret < 0)
1427 				goto done_unlock;
1428 			else {
1429 				npkts += mxp;
1430 				pq->counter += mxp;
1431 			}
1432 		}
1433 	}
1434 
1435 done_unlock:
1436 	if (!list_empty(&list))
1437 		qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);
1438 	mutex_unlock(&pq->lock);
1439 
1440 	return (ret < 0) ? ret : npkts;
1441 }
1442 
1443 int qib_user_sdma_make_progress(struct qib_pportdata *ppd,
1444 				struct qib_user_sdma_queue *pq)
1445 {
1446 	int ret = 0;
1447 
1448 	mutex_lock(&pq->lock);
1449 	qib_user_sdma_hwqueue_clean(ppd);
1450 	ret = qib_user_sdma_queue_clean(ppd, pq);
1451 	mutex_unlock(&pq->lock);
1452 
1453 	return ret;
1454 }
1455 
1456 u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq)
1457 {
1458 	return pq ? pq->sent_counter : 0;
1459 }
1460 
1461 u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq)
1462 {
1463 	return pq ? pq->counter : 0;
1464 }
1465