1 /*
2  * Copyright (c) 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/mm.h>
33 #include <linux/types.h>
34 #include <linux/device.h>
35 #include <linux/dmapool.h>
36 #include <linux/slab.h>
37 #include <linux/list.h>
38 #include <linux/highmem.h>
39 #include <linux/io.h>
40 #include <linux/uio.h>
41 #include <linux/rbtree.h>
42 #include <linux/spinlock.h>
43 #include <linux/delay.h>
44 
45 #include "qib.h"
46 #include "qib_user_sdma.h"
47 
48 /* minimum size of header */
49 #define QIB_USER_SDMA_MIN_HEADER_LENGTH 64
50 /* expected size of headers (for dma_pool) */
51 #define QIB_USER_SDMA_EXP_HEADER_LENGTH 64
52 /* attempt to drain the queue for 5secs */
53 #define QIB_USER_SDMA_DRAIN_TIMEOUT 250
54 
55 /*
56  * track how many times a process open this driver.
57  */
58 static struct rb_root qib_user_sdma_rb_root = RB_ROOT;
59 
60 struct qib_user_sdma_rb_node {
61 	struct rb_node node;
62 	int refcount;
63 	pid_t pid;
64 };
65 
66 struct qib_user_sdma_pkt {
67 	struct list_head list;  /* list element */
68 
69 	u8  tiddma;		/* if this is NEW tid-sdma */
70 	u8  largepkt;		/* this is large pkt from kmalloc */
71 	u16 frag_size;		/* frag size used by PSM */
72 	u16 index;              /* last header index or push index */
73 	u16 naddr;              /* dimension of addr (1..3) ... */
74 	u16 addrlimit;		/* addr array size */
75 	u16 tidsmidx;		/* current tidsm index */
76 	u16 tidsmcount;		/* tidsm array item count */
77 	u16 payload_size;	/* payload size so far for header */
78 	u32 bytes_togo;		/* bytes for processing */
79 	u32 counter;            /* sdma pkts queued counter for this entry */
80 	struct qib_tid_session_member *tidsm;	/* tid session member array */
81 	struct qib_user_sdma_queue *pq;	/* which pq this pkt belongs to */
82 	u64 added;              /* global descq number of entries */
83 
84 	struct {
85 		u16 offset;                     /* offset for kvaddr, addr */
86 		u16 length;                     /* length in page */
87 		u16 first_desc;			/* first desc */
88 		u16 last_desc;			/* last desc */
89 		u16 put_page;                   /* should we put_page? */
90 		u16 dma_mapped;                 /* is page dma_mapped? */
91 		u16 dma_length;			/* for dma_unmap_page() */
92 		u16 padding;
93 		struct page *page;              /* may be NULL (coherent mem) */
94 		void *kvaddr;                   /* FIXME: only for pio hack */
95 		dma_addr_t addr;
96 	} addr[4];   /* max pages, any more and we coalesce */
97 };
98 
99 struct qib_user_sdma_queue {
100 	/*
101 	 * pkts sent to dma engine are queued on this
102 	 * list head.  the type of the elements of this
103 	 * list are struct qib_user_sdma_pkt...
104 	 */
105 	struct list_head sent;
106 
107 	/*
108 	 * Because above list will be accessed by both process and
109 	 * signal handler, we need a spinlock for it.
110 	 */
111 	spinlock_t sent_lock ____cacheline_aligned_in_smp;
112 
113 	/* headers with expected length are allocated from here... */
114 	char header_cache_name[64];
115 	struct dma_pool *header_cache;
116 
117 	/* packets are allocated from the slab cache... */
118 	char pkt_slab_name[64];
119 	struct kmem_cache *pkt_slab;
120 
121 	/* as packets go on the queued queue, they are counted... */
122 	u32 counter;
123 	u32 sent_counter;
124 	/* pending packets, not sending yet */
125 	u32 num_pending;
126 	/* sending packets, not complete yet */
127 	u32 num_sending;
128 	/* global descq number of entry of last sending packet */
129 	u64 added;
130 
131 	/* dma page table */
132 	struct rb_root dma_pages_root;
133 
134 	struct qib_user_sdma_rb_node *sdma_rb_node;
135 
136 	/* protect everything above... */
137 	struct mutex lock;
138 };
139 
140 static struct qib_user_sdma_rb_node *
141 qib_user_sdma_rb_search(struct rb_root *root, pid_t pid)
142 {
143 	struct qib_user_sdma_rb_node *sdma_rb_node;
144 	struct rb_node *node = root->rb_node;
145 
146 	while (node) {
147 		sdma_rb_node = container_of(node,
148 			struct qib_user_sdma_rb_node, node);
149 		if (pid < sdma_rb_node->pid)
150 			node = node->rb_left;
151 		else if (pid > sdma_rb_node->pid)
152 			node = node->rb_right;
153 		else
154 			return sdma_rb_node;
155 	}
156 	return NULL;
157 }
158 
159 static int
160 qib_user_sdma_rb_insert(struct rb_root *root, struct qib_user_sdma_rb_node *new)
161 {
162 	struct rb_node **node = &(root->rb_node);
163 	struct rb_node *parent = NULL;
164 	struct qib_user_sdma_rb_node *got;
165 
166 	while (*node) {
167 		got = container_of(*node, struct qib_user_sdma_rb_node, node);
168 		parent = *node;
169 		if (new->pid < got->pid)
170 			node = &((*node)->rb_left);
171 		else if (new->pid > got->pid)
172 			node = &((*node)->rb_right);
173 		else
174 			return 0;
175 	}
176 
177 	rb_link_node(&new->node, parent, node);
178 	rb_insert_color(&new->node, root);
179 	return 1;
180 }
181 
182 struct qib_user_sdma_queue *
183 qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
184 {
185 	struct qib_user_sdma_queue *pq =
186 		kmalloc(sizeof(struct qib_user_sdma_queue), GFP_KERNEL);
187 	struct qib_user_sdma_rb_node *sdma_rb_node;
188 
189 	if (!pq)
190 		goto done;
191 
192 	pq->counter = 0;
193 	pq->sent_counter = 0;
194 	pq->num_pending = 0;
195 	pq->num_sending = 0;
196 	pq->added = 0;
197 	pq->sdma_rb_node = NULL;
198 
199 	INIT_LIST_HEAD(&pq->sent);
200 	spin_lock_init(&pq->sent_lock);
201 	mutex_init(&pq->lock);
202 
203 	snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
204 		 "qib-user-sdma-pkts-%u-%02u.%02u", unit, ctxt, sctxt);
205 	pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,
206 					 sizeof(struct qib_user_sdma_pkt),
207 					 0, 0, NULL);
208 
209 	if (!pq->pkt_slab)
210 		goto err_kfree;
211 
212 	snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),
213 		 "qib-user-sdma-headers-%u-%02u.%02u", unit, ctxt, sctxt);
214 	pq->header_cache = dma_pool_create(pq->header_cache_name,
215 					   dev,
216 					   QIB_USER_SDMA_EXP_HEADER_LENGTH,
217 					   4, 0);
218 	if (!pq->header_cache)
219 		goto err_slab;
220 
221 	pq->dma_pages_root = RB_ROOT;
222 
223 	sdma_rb_node = qib_user_sdma_rb_search(&qib_user_sdma_rb_root,
224 					current->pid);
225 	if (sdma_rb_node) {
226 		sdma_rb_node->refcount++;
227 	} else {
228 		int ret;
229 
230 		sdma_rb_node = kmalloc(sizeof(
231 			struct qib_user_sdma_rb_node), GFP_KERNEL);
232 		if (!sdma_rb_node)
233 			goto err_rb;
234 
235 		sdma_rb_node->refcount = 1;
236 		sdma_rb_node->pid = current->pid;
237 
238 		ret = qib_user_sdma_rb_insert(&qib_user_sdma_rb_root,
239 					sdma_rb_node);
240 		BUG_ON(ret == 0);
241 	}
242 	pq->sdma_rb_node = sdma_rb_node;
243 
244 	goto done;
245 
246 err_rb:
247 	dma_pool_destroy(pq->header_cache);
248 err_slab:
249 	kmem_cache_destroy(pq->pkt_slab);
250 err_kfree:
251 	kfree(pq);
252 	pq = NULL;
253 
254 done:
255 	return pq;
256 }
257 
258 static void qib_user_sdma_init_frag(struct qib_user_sdma_pkt *pkt,
259 				    int i, u16 offset, u16 len,
260 				    u16 first_desc, u16 last_desc,
261 				    u16 put_page, u16 dma_mapped,
262 				    struct page *page, void *kvaddr,
263 				    dma_addr_t dma_addr, u16 dma_length)
264 {
265 	pkt->addr[i].offset = offset;
266 	pkt->addr[i].length = len;
267 	pkt->addr[i].first_desc = first_desc;
268 	pkt->addr[i].last_desc = last_desc;
269 	pkt->addr[i].put_page = put_page;
270 	pkt->addr[i].dma_mapped = dma_mapped;
271 	pkt->addr[i].page = page;
272 	pkt->addr[i].kvaddr = kvaddr;
273 	pkt->addr[i].addr = dma_addr;
274 	pkt->addr[i].dma_length = dma_length;
275 }
276 
277 static void *qib_user_sdma_alloc_header(struct qib_user_sdma_queue *pq,
278 				size_t len, dma_addr_t *dma_addr)
279 {
280 	void *hdr;
281 
282 	if (len == QIB_USER_SDMA_EXP_HEADER_LENGTH)
283 		hdr = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
284 					     dma_addr);
285 	else
286 		hdr = NULL;
287 
288 	if (!hdr) {
289 		hdr = kmalloc(len, GFP_KERNEL);
290 		if (!hdr)
291 			return NULL;
292 
293 		*dma_addr = 0;
294 	}
295 
296 	return hdr;
297 }
298 
299 static int qib_user_sdma_page_to_frags(const struct qib_devdata *dd,
300 				       struct qib_user_sdma_queue *pq,
301 				       struct qib_user_sdma_pkt *pkt,
302 				       struct page *page, u16 put,
303 				       u16 offset, u16 len, void *kvaddr)
304 {
305 	__le16 *pbc16;
306 	void *pbcvaddr;
307 	struct qib_message_header *hdr;
308 	u16 newlen, pbclen, lastdesc, dma_mapped;
309 	u32 vcto;
310 	union qib_seqnum seqnum;
311 	dma_addr_t pbcdaddr;
312 	dma_addr_t dma_addr =
313 		dma_map_page(&dd->pcidev->dev,
314 			page, offset, len, DMA_TO_DEVICE);
315 	int ret = 0;
316 
317 	if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
318 		/*
319 		 * dma mapping error, pkt has not managed
320 		 * this page yet, return the page here so
321 		 * the caller can ignore this page.
322 		 */
323 		if (put) {
324 			put_page(page);
325 		} else {
326 			/* coalesce case */
327 			kunmap(page);
328 			__free_page(page);
329 		}
330 		ret = -ENOMEM;
331 		goto done;
332 	}
333 	offset = 0;
334 	dma_mapped = 1;
335 
336 
337 next_fragment:
338 
339 	/*
340 	 * In tid-sdma, the transfer length is restricted by
341 	 * receiver side current tid page length.
342 	 */
343 	if (pkt->tiddma && len > pkt->tidsm[pkt->tidsmidx].length)
344 		newlen = pkt->tidsm[pkt->tidsmidx].length;
345 	else
346 		newlen = len;
347 
348 	/*
349 	 * Then the transfer length is restricted by MTU.
350 	 * the last descriptor flag is determined by:
351 	 * 1. the current packet is at frag size length.
352 	 * 2. the current tid page is done if tid-sdma.
353 	 * 3. there is no more byte togo if sdma.
354 	 */
355 	lastdesc = 0;
356 	if ((pkt->payload_size + newlen) >= pkt->frag_size) {
357 		newlen = pkt->frag_size - pkt->payload_size;
358 		lastdesc = 1;
359 	} else if (pkt->tiddma) {
360 		if (newlen == pkt->tidsm[pkt->tidsmidx].length)
361 			lastdesc = 1;
362 	} else {
363 		if (newlen == pkt->bytes_togo)
364 			lastdesc = 1;
365 	}
366 
367 	/* fill the next fragment in this page */
368 	qib_user_sdma_init_frag(pkt, pkt->naddr, /* index */
369 		offset, newlen,		/* offset, len */
370 		0, lastdesc,		/* first last desc */
371 		put, dma_mapped,	/* put page, dma mapped */
372 		page, kvaddr,		/* struct page, virt addr */
373 		dma_addr, len);		/* dma addr, dma length */
374 	pkt->bytes_togo -= newlen;
375 	pkt->payload_size += newlen;
376 	pkt->naddr++;
377 	if (pkt->naddr == pkt->addrlimit) {
378 		ret = -EFAULT;
379 		goto done;
380 	}
381 
382 	/* If there is no more byte togo. (lastdesc==1) */
383 	if (pkt->bytes_togo == 0) {
384 		/* The packet is done, header is not dma mapped yet.
385 		 * it should be from kmalloc */
386 		if (!pkt->addr[pkt->index].addr) {
387 			pkt->addr[pkt->index].addr =
388 				dma_map_single(&dd->pcidev->dev,
389 					pkt->addr[pkt->index].kvaddr,
390 					pkt->addr[pkt->index].dma_length,
391 					DMA_TO_DEVICE);
392 			if (dma_mapping_error(&dd->pcidev->dev,
393 					pkt->addr[pkt->index].addr)) {
394 				ret = -ENOMEM;
395 				goto done;
396 			}
397 			pkt->addr[pkt->index].dma_mapped = 1;
398 		}
399 
400 		goto done;
401 	}
402 
403 	/* If tid-sdma, advance tid info. */
404 	if (pkt->tiddma) {
405 		pkt->tidsm[pkt->tidsmidx].length -= newlen;
406 		if (pkt->tidsm[pkt->tidsmidx].length) {
407 			pkt->tidsm[pkt->tidsmidx].offset += newlen;
408 		} else {
409 			pkt->tidsmidx++;
410 			if (pkt->tidsmidx == pkt->tidsmcount) {
411 				ret = -EFAULT;
412 				goto done;
413 			}
414 		}
415 	}
416 
417 	/*
418 	 * If this is NOT the last descriptor. (newlen==len)
419 	 * the current packet is not done yet, but the current
420 	 * send side page is done.
421 	 */
422 	if (lastdesc == 0)
423 		goto done;
424 
425 	/*
426 	 * If running this driver under PSM with message size
427 	 * fitting into one transfer unit, it is not possible
428 	 * to pass this line. otherwise, it is a buggggg.
429 	 */
430 
431 	/*
432 	 * Since the current packet is done, and there are more
433 	 * bytes togo, we need to create a new sdma header, copying
434 	 * from previous sdma header and modify both.
435 	 */
436 	pbclen = pkt->addr[pkt->index].length;
437 	pbcvaddr = qib_user_sdma_alloc_header(pq, pbclen, &pbcdaddr);
438 	if (!pbcvaddr) {
439 		ret = -ENOMEM;
440 		goto done;
441 	}
442 	/* Copy the previous sdma header to new sdma header */
443 	pbc16 = (__le16 *)pkt->addr[pkt->index].kvaddr;
444 	memcpy(pbcvaddr, pbc16, pbclen);
445 
446 	/* Modify the previous sdma header */
447 	hdr = (struct qib_message_header *)&pbc16[4];
448 
449 	/* New pbc length */
450 	pbc16[0] = cpu_to_le16(le16_to_cpu(pbc16[0])-(pkt->bytes_togo>>2));
451 
452 	/* New packet length */
453 	hdr->lrh[2] = cpu_to_be16(le16_to_cpu(pbc16[0]));
454 
455 	if (pkt->tiddma) {
456 		/* turn on the header suppression */
457 		hdr->iph.pkt_flags =
458 			cpu_to_le16(le16_to_cpu(hdr->iph.pkt_flags)|0x2);
459 		/* turn off ACK_REQ: 0x04 and EXPECTED_DONE: 0x20 */
460 		hdr->flags &= ~(0x04|0x20);
461 	} else {
462 		/* turn off extra bytes: 20-21 bits */
463 		hdr->bth[0] = cpu_to_be32(be32_to_cpu(hdr->bth[0])&0xFFCFFFFF);
464 		/* turn off ACK_REQ: 0x04 */
465 		hdr->flags &= ~(0x04);
466 	}
467 
468 	/* New kdeth checksum */
469 	vcto = le32_to_cpu(hdr->iph.ver_ctxt_tid_offset);
470 	hdr->iph.chksum = cpu_to_le16(QIB_LRH_BTH +
471 		be16_to_cpu(hdr->lrh[2]) -
472 		((vcto>>16)&0xFFFF) - (vcto&0xFFFF) -
473 		le16_to_cpu(hdr->iph.pkt_flags));
474 
475 	/* The packet is done, header is not dma mapped yet.
476 	 * it should be from kmalloc */
477 	if (!pkt->addr[pkt->index].addr) {
478 		pkt->addr[pkt->index].addr =
479 			dma_map_single(&dd->pcidev->dev,
480 				pkt->addr[pkt->index].kvaddr,
481 				pkt->addr[pkt->index].dma_length,
482 				DMA_TO_DEVICE);
483 		if (dma_mapping_error(&dd->pcidev->dev,
484 				pkt->addr[pkt->index].addr)) {
485 			ret = -ENOMEM;
486 			goto done;
487 		}
488 		pkt->addr[pkt->index].dma_mapped = 1;
489 	}
490 
491 	/* Modify the new sdma header */
492 	pbc16 = (__le16 *)pbcvaddr;
493 	hdr = (struct qib_message_header *)&pbc16[4];
494 
495 	/* New pbc length */
496 	pbc16[0] = cpu_to_le16(le16_to_cpu(pbc16[0])-(pkt->payload_size>>2));
497 
498 	/* New packet length */
499 	hdr->lrh[2] = cpu_to_be16(le16_to_cpu(pbc16[0]));
500 
501 	if (pkt->tiddma) {
502 		/* Set new tid and offset for new sdma header */
503 		hdr->iph.ver_ctxt_tid_offset = cpu_to_le32(
504 			(le32_to_cpu(hdr->iph.ver_ctxt_tid_offset)&0xFF000000) +
505 			(pkt->tidsm[pkt->tidsmidx].tid<<QLOGIC_IB_I_TID_SHIFT) +
506 			(pkt->tidsm[pkt->tidsmidx].offset>>2));
507 	} else {
508 		/* Middle protocol new packet offset */
509 		hdr->uwords[2] += pkt->payload_size;
510 	}
511 
512 	/* New kdeth checksum */
513 	vcto = le32_to_cpu(hdr->iph.ver_ctxt_tid_offset);
514 	hdr->iph.chksum = cpu_to_le16(QIB_LRH_BTH +
515 		be16_to_cpu(hdr->lrh[2]) -
516 		((vcto>>16)&0xFFFF) - (vcto&0xFFFF) -
517 		le16_to_cpu(hdr->iph.pkt_flags));
518 
519 	/* Next sequence number in new sdma header */
520 	seqnum.val = be32_to_cpu(hdr->bth[2]);
521 	if (pkt->tiddma)
522 		seqnum.seq++;
523 	else
524 		seqnum.pkt++;
525 	hdr->bth[2] = cpu_to_be32(seqnum.val);
526 
527 	/* Init new sdma header. */
528 	qib_user_sdma_init_frag(pkt, pkt->naddr, /* index */
529 		0, pbclen,		/* offset, len */
530 		1, 0,			/* first last desc */
531 		0, 0,			/* put page, dma mapped */
532 		NULL, pbcvaddr,		/* struct page, virt addr */
533 		pbcdaddr, pbclen);	/* dma addr, dma length */
534 	pkt->index = pkt->naddr;
535 	pkt->payload_size = 0;
536 	pkt->naddr++;
537 	if (pkt->naddr == pkt->addrlimit) {
538 		ret = -EFAULT;
539 		goto done;
540 	}
541 
542 	/* Prepare for next fragment in this page */
543 	if (newlen != len) {
544 		if (dma_mapped) {
545 			put = 0;
546 			dma_mapped = 0;
547 			page = NULL;
548 			kvaddr = NULL;
549 		}
550 		len -= newlen;
551 		offset += newlen;
552 
553 		goto next_fragment;
554 	}
555 
556 done:
557 	return ret;
558 }
559 
560 /* we've too many pages in the iovec, coalesce to a single page */
561 static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
562 				  struct qib_user_sdma_queue *pq,
563 				  struct qib_user_sdma_pkt *pkt,
564 				  const struct iovec *iov,
565 				  unsigned long niov)
566 {
567 	int ret = 0;
568 	struct page *page = alloc_page(GFP_KERNEL);
569 	void *mpage_save;
570 	char *mpage;
571 	int i;
572 	int len = 0;
573 
574 	if (!page) {
575 		ret = -ENOMEM;
576 		goto done;
577 	}
578 
579 	mpage = kmap(page);
580 	mpage_save = mpage;
581 	for (i = 0; i < niov; i++) {
582 		int cfur;
583 
584 		cfur = copy_from_user(mpage,
585 				      iov[i].iov_base, iov[i].iov_len);
586 		if (cfur) {
587 			ret = -EFAULT;
588 			goto free_unmap;
589 		}
590 
591 		mpage += iov[i].iov_len;
592 		len += iov[i].iov_len;
593 	}
594 
595 	ret = qib_user_sdma_page_to_frags(dd, pq, pkt,
596 			page, 0, 0, len, mpage_save);
597 	goto done;
598 
599 free_unmap:
600 	kunmap(page);
601 	__free_page(page);
602 done:
603 	return ret;
604 }
605 
606 /*
607  * How many pages in this iovec element?
608  */
609 static int qib_user_sdma_num_pages(const struct iovec *iov)
610 {
611 	const unsigned long addr  = (unsigned long) iov->iov_base;
612 	const unsigned long  len  = iov->iov_len;
613 	const unsigned long spage = addr & PAGE_MASK;
614 	const unsigned long epage = (addr + len - 1) & PAGE_MASK;
615 
616 	return 1 + ((epage - spage) >> PAGE_SHIFT);
617 }
618 
619 static void qib_user_sdma_free_pkt_frag(struct device *dev,
620 					struct qib_user_sdma_queue *pq,
621 					struct qib_user_sdma_pkt *pkt,
622 					int frag)
623 {
624 	const int i = frag;
625 
626 	if (pkt->addr[i].page) {
627 		/* only user data has page */
628 		if (pkt->addr[i].dma_mapped)
629 			dma_unmap_page(dev,
630 				       pkt->addr[i].addr,
631 				       pkt->addr[i].dma_length,
632 				       DMA_TO_DEVICE);
633 
634 		if (pkt->addr[i].kvaddr)
635 			kunmap(pkt->addr[i].page);
636 
637 		if (pkt->addr[i].put_page)
638 			put_page(pkt->addr[i].page);
639 		else
640 			__free_page(pkt->addr[i].page);
641 	} else if (pkt->addr[i].kvaddr) {
642 		/* for headers */
643 		if (pkt->addr[i].dma_mapped) {
644 			/* from kmalloc & dma mapped */
645 			dma_unmap_single(dev,
646 				       pkt->addr[i].addr,
647 				       pkt->addr[i].dma_length,
648 				       DMA_TO_DEVICE);
649 			kfree(pkt->addr[i].kvaddr);
650 		} else if (pkt->addr[i].addr) {
651 			/* free coherent mem from cache... */
652 			dma_pool_free(pq->header_cache,
653 			      pkt->addr[i].kvaddr, pkt->addr[i].addr);
654 		} else {
655 			/* from kmalloc but not dma mapped */
656 			kfree(pkt->addr[i].kvaddr);
657 		}
658 	}
659 }
660 
661 /* return number of pages pinned... */
662 static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
663 				   struct qib_user_sdma_queue *pq,
664 				   struct qib_user_sdma_pkt *pkt,
665 				   unsigned long addr, int tlen, int npages)
666 {
667 	struct page *pages[8];
668 	int i, j;
669 	int ret = 0;
670 
671 	while (npages) {
672 		if (npages > 8)
673 			j = 8;
674 		else
675 			j = npages;
676 
677 		ret = get_user_pages_fast(addr, j, 0, pages);
678 		if (ret != j) {
679 			i = 0;
680 			j = ret;
681 			ret = -ENOMEM;
682 			goto free_pages;
683 		}
684 
685 		for (i = 0; i < j; i++) {
686 			/* map the pages... */
687 			unsigned long fofs = addr & ~PAGE_MASK;
688 			int flen = ((fofs + tlen) > PAGE_SIZE) ?
689 				(PAGE_SIZE - fofs) : tlen;
690 
691 			ret = qib_user_sdma_page_to_frags(dd, pq, pkt,
692 				pages[i], 1, fofs, flen, NULL);
693 			if (ret < 0) {
694 				/* current page has beed taken
695 				 * care of inside above call.
696 				 */
697 				i++;
698 				goto free_pages;
699 			}
700 
701 			addr += flen;
702 			tlen -= flen;
703 		}
704 
705 		npages -= j;
706 	}
707 
708 	goto done;
709 
710 	/* if error, return all pages not managed by pkt */
711 free_pages:
712 	while (i < j)
713 		put_page(pages[i++]);
714 
715 done:
716 	return ret;
717 }
718 
719 static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
720 				 struct qib_user_sdma_queue *pq,
721 				 struct qib_user_sdma_pkt *pkt,
722 				 const struct iovec *iov,
723 				 unsigned long niov)
724 {
725 	int ret = 0;
726 	unsigned long idx;
727 
728 	for (idx = 0; idx < niov; idx++) {
729 		const int npages = qib_user_sdma_num_pages(iov + idx);
730 		const unsigned long addr = (unsigned long) iov[idx].iov_base;
731 
732 		ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr,
733 					      iov[idx].iov_len, npages);
734 		if (ret < 0)
735 			goto free_pkt;
736 	}
737 
738 	goto done;
739 
740 free_pkt:
741 	/* we need to ignore the first entry here */
742 	for (idx = 1; idx < pkt->naddr; idx++)
743 		qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
744 
745 	/* need to dma unmap the first entry, this is to restore to
746 	 * the original state so that caller can free the memory in
747 	 * error condition. Caller does not know if dma mapped or not*/
748 	if (pkt->addr[0].dma_mapped) {
749 		dma_unmap_single(&dd->pcidev->dev,
750 		       pkt->addr[0].addr,
751 		       pkt->addr[0].dma_length,
752 		       DMA_TO_DEVICE);
753 		pkt->addr[0].addr = 0;
754 		pkt->addr[0].dma_mapped = 0;
755 	}
756 
757 done:
758 	return ret;
759 }
760 
761 static int qib_user_sdma_init_payload(const struct qib_devdata *dd,
762 				      struct qib_user_sdma_queue *pq,
763 				      struct qib_user_sdma_pkt *pkt,
764 				      const struct iovec *iov,
765 				      unsigned long niov, int npages)
766 {
767 	int ret = 0;
768 
769 	if (pkt->frag_size == pkt->bytes_togo &&
770 			npages >= ARRAY_SIZE(pkt->addr))
771 		ret = qib_user_sdma_coalesce(dd, pq, pkt, iov, niov);
772 	else
773 		ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
774 
775 	return ret;
776 }
777 
778 /* free a packet list -- return counter value of last packet */
779 static void qib_user_sdma_free_pkt_list(struct device *dev,
780 					struct qib_user_sdma_queue *pq,
781 					struct list_head *list)
782 {
783 	struct qib_user_sdma_pkt *pkt, *pkt_next;
784 
785 	list_for_each_entry_safe(pkt, pkt_next, list, list) {
786 		int i;
787 
788 		for (i = 0; i < pkt->naddr; i++)
789 			qib_user_sdma_free_pkt_frag(dev, pq, pkt, i);
790 
791 		if (pkt->largepkt)
792 			kfree(pkt);
793 		else
794 			kmem_cache_free(pq->pkt_slab, pkt);
795 	}
796 	INIT_LIST_HEAD(list);
797 }
798 
799 /*
800  * copy headers, coalesce etc -- pq->lock must be held
801  *
802  * we queue all the packets to list, returning the
803  * number of bytes total.  list must be empty initially,
804  * as, if there is an error we clean it...
805  */
806 static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
807 				    struct qib_pportdata *ppd,
808 				    struct qib_user_sdma_queue *pq,
809 				    const struct iovec *iov,
810 				    unsigned long niov,
811 				    struct list_head *list,
812 				    int *maxpkts, int *ndesc)
813 {
814 	unsigned long idx = 0;
815 	int ret = 0;
816 	int npkts = 0;
817 	__le32 *pbc;
818 	dma_addr_t dma_addr;
819 	struct qib_user_sdma_pkt *pkt = NULL;
820 	size_t len;
821 	size_t nw;
822 	u32 counter = pq->counter;
823 	u16 frag_size;
824 
825 	while (idx < niov && npkts < *maxpkts) {
826 		const unsigned long addr = (unsigned long) iov[idx].iov_base;
827 		const unsigned long idx_save = idx;
828 		unsigned pktnw;
829 		unsigned pktnwc;
830 		int nfrags = 0;
831 		int npages = 0;
832 		int bytes_togo = 0;
833 		int tiddma = 0;
834 		int cfur;
835 
836 		len = iov[idx].iov_len;
837 		nw = len >> 2;
838 
839 		if (len < QIB_USER_SDMA_MIN_HEADER_LENGTH ||
840 		    len > PAGE_SIZE || len & 3 || addr & 3) {
841 			ret = -EINVAL;
842 			goto free_list;
843 		}
844 
845 		pbc = qib_user_sdma_alloc_header(pq, len, &dma_addr);
846 		if (!pbc) {
847 			ret = -ENOMEM;
848 			goto free_list;
849 		}
850 
851 		cfur = copy_from_user(pbc, iov[idx].iov_base, len);
852 		if (cfur) {
853 			ret = -EFAULT;
854 			goto free_pbc;
855 		}
856 
857 		/*
858 		 * This assignment is a bit strange.  it's because the
859 		 * the pbc counts the number of 32 bit words in the full
860 		 * packet _except_ the first word of the pbc itself...
861 		 */
862 		pktnwc = nw - 1;
863 
864 		/*
865 		 * pktnw computation yields the number of 32 bit words
866 		 * that the caller has indicated in the PBC.  note that
867 		 * this is one less than the total number of words that
868 		 * goes to the send DMA engine as the first 32 bit word
869 		 * of the PBC itself is not counted.  Armed with this count,
870 		 * we can verify that the packet is consistent with the
871 		 * iovec lengths.
872 		 */
873 		pktnw = le32_to_cpu(*pbc) & 0xFFFF;
874 		if (pktnw < pktnwc) {
875 			ret = -EINVAL;
876 			goto free_pbc;
877 		}
878 
879 		idx++;
880 		while (pktnwc < pktnw && idx < niov) {
881 			const size_t slen = iov[idx].iov_len;
882 			const unsigned long faddr =
883 				(unsigned long) iov[idx].iov_base;
884 
885 			if (slen & 3 || faddr & 3 || !slen) {
886 				ret = -EINVAL;
887 				goto free_pbc;
888 			}
889 
890 			npages += qib_user_sdma_num_pages(&iov[idx]);
891 
892 			bytes_togo += slen;
893 			pktnwc += slen >> 2;
894 			idx++;
895 			nfrags++;
896 		}
897 
898 		if (pktnwc != pktnw) {
899 			ret = -EINVAL;
900 			goto free_pbc;
901 		}
902 
903 		frag_size = ((le32_to_cpu(*pbc))>>16) & 0xFFFF;
904 		if (((frag_size ? frag_size : bytes_togo) + len) >
905 						ppd->ibmaxlen) {
906 			ret = -EINVAL;
907 			goto free_pbc;
908 		}
909 
910 		if (frag_size) {
911 			int pktsize, tidsmsize, n;
912 
913 			n = npages*((2*PAGE_SIZE/frag_size)+1);
914 			pktsize = sizeof(*pkt) + sizeof(pkt->addr[0])*n;
915 
916 			/*
917 			 * Determine if this is tid-sdma or just sdma.
918 			 */
919 			tiddma = (((le32_to_cpu(pbc[7])>>
920 				QLOGIC_IB_I_TID_SHIFT)&
921 				QLOGIC_IB_I_TID_MASK) !=
922 				QLOGIC_IB_I_TID_MASK);
923 
924 			if (tiddma)
925 				tidsmsize = iov[idx].iov_len;
926 			else
927 				tidsmsize = 0;
928 
929 			pkt = kmalloc(pktsize+tidsmsize, GFP_KERNEL);
930 			if (!pkt) {
931 				ret = -ENOMEM;
932 				goto free_pbc;
933 			}
934 			pkt->largepkt = 1;
935 			pkt->frag_size = frag_size;
936 			pkt->addrlimit = n + ARRAY_SIZE(pkt->addr);
937 
938 			if (tiddma) {
939 				char *tidsm = (char *)pkt + pktsize;
940 
941 				cfur = copy_from_user(tidsm,
942 					iov[idx].iov_base, tidsmsize);
943 				if (cfur) {
944 					ret = -EFAULT;
945 					goto free_pkt;
946 				}
947 				pkt->tidsm =
948 					(struct qib_tid_session_member *)tidsm;
949 				pkt->tidsmcount = tidsmsize/
950 					sizeof(struct qib_tid_session_member);
951 				pkt->tidsmidx = 0;
952 				idx++;
953 			}
954 
955 			/*
956 			 * pbc 'fill1' field is borrowed to pass frag size,
957 			 * we need to clear it after picking frag size, the
958 			 * hardware requires this field to be zero.
959 			 */
960 			*pbc = cpu_to_le32(le32_to_cpu(*pbc) & 0x0000FFFF);
961 		} else {
962 			pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
963 			if (!pkt) {
964 				ret = -ENOMEM;
965 				goto free_pbc;
966 			}
967 			pkt->largepkt = 0;
968 			pkt->frag_size = bytes_togo;
969 			pkt->addrlimit = ARRAY_SIZE(pkt->addr);
970 		}
971 		pkt->bytes_togo = bytes_togo;
972 		pkt->payload_size = 0;
973 		pkt->counter = counter;
974 		pkt->tiddma = tiddma;
975 
976 		/* setup the first header */
977 		qib_user_sdma_init_frag(pkt, 0, /* index */
978 			0, len,		/* offset, len */
979 			1, 0,		/* first last desc */
980 			0, 0,		/* put page, dma mapped */
981 			NULL, pbc,	/* struct page, virt addr */
982 			dma_addr, len);	/* dma addr, dma length */
983 		pkt->index = 0;
984 		pkt->naddr = 1;
985 
986 		if (nfrags) {
987 			ret = qib_user_sdma_init_payload(dd, pq, pkt,
988 							 iov + idx_save + 1,
989 							 nfrags, npages);
990 			if (ret < 0)
991 				goto free_pkt;
992 		} else {
993 			/* since there is no payload, mark the
994 			 * header as the last desc. */
995 			pkt->addr[0].last_desc = 1;
996 
997 			if (dma_addr == 0) {
998 				/*
999 				 * the header is not dma mapped yet.
1000 				 * it should be from kmalloc.
1001 				 */
1002 				dma_addr = dma_map_single(&dd->pcidev->dev,
1003 					pbc, len, DMA_TO_DEVICE);
1004 				if (dma_mapping_error(&dd->pcidev->dev,
1005 								dma_addr)) {
1006 					ret = -ENOMEM;
1007 					goto free_pkt;
1008 				}
1009 				pkt->addr[0].addr = dma_addr;
1010 				pkt->addr[0].dma_mapped = 1;
1011 			}
1012 		}
1013 
1014 		counter++;
1015 		npkts++;
1016 		pkt->pq = pq;
1017 		pkt->index = 0; /* reset index for push on hw */
1018 		*ndesc += pkt->naddr;
1019 
1020 		list_add_tail(&pkt->list, list);
1021 	}
1022 
1023 	*maxpkts = npkts;
1024 	ret = idx;
1025 	goto done;
1026 
1027 free_pkt:
1028 	if (pkt->largepkt)
1029 		kfree(pkt);
1030 	else
1031 		kmem_cache_free(pq->pkt_slab, pkt);
1032 free_pbc:
1033 	if (dma_addr)
1034 		dma_pool_free(pq->header_cache, pbc, dma_addr);
1035 	else
1036 		kfree(pbc);
1037 free_list:
1038 	qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
1039 done:
1040 	return ret;
1041 }
1042 
1043 static void qib_user_sdma_set_complete_counter(struct qib_user_sdma_queue *pq,
1044 					       u32 c)
1045 {
1046 	pq->sent_counter = c;
1047 }
1048 
1049 /* try to clean out queue -- needs pq->lock */
1050 static int qib_user_sdma_queue_clean(struct qib_pportdata *ppd,
1051 				     struct qib_user_sdma_queue *pq)
1052 {
1053 	struct qib_devdata *dd = ppd->dd;
1054 	struct list_head free_list;
1055 	struct qib_user_sdma_pkt *pkt;
1056 	struct qib_user_sdma_pkt *pkt_prev;
1057 	unsigned long flags;
1058 	int ret = 0;
1059 
1060 	if (!pq->num_sending)
1061 		return 0;
1062 
1063 	INIT_LIST_HEAD(&free_list);
1064 
1065 	/*
1066 	 * We need this spin lock here because interrupt handler
1067 	 * might modify this list in qib_user_sdma_send_desc(), also
1068 	 * we can not get interrupted, otherwise it is a deadlock.
1069 	 */
1070 	spin_lock_irqsave(&pq->sent_lock, flags);
1071 	list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
1072 		s64 descd = ppd->sdma_descq_removed - pkt->added;
1073 
1074 		if (descd < 0)
1075 			break;
1076 
1077 		list_move_tail(&pkt->list, &free_list);
1078 
1079 		/* one more packet cleaned */
1080 		ret++;
1081 		pq->num_sending--;
1082 	}
1083 	spin_unlock_irqrestore(&pq->sent_lock, flags);
1084 
1085 	if (!list_empty(&free_list)) {
1086 		u32 counter;
1087 
1088 		pkt = list_entry(free_list.prev,
1089 				 struct qib_user_sdma_pkt, list);
1090 		counter = pkt->counter;
1091 
1092 		qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
1093 		qib_user_sdma_set_complete_counter(pq, counter);
1094 	}
1095 
1096 	return ret;
1097 }
1098 
1099 void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq)
1100 {
1101 	if (!pq)
1102 		return;
1103 
1104 	pq->sdma_rb_node->refcount--;
1105 	if (pq->sdma_rb_node->refcount == 0) {
1106 		rb_erase(&pq->sdma_rb_node->node, &qib_user_sdma_rb_root);
1107 		kfree(pq->sdma_rb_node);
1108 	}
1109 	dma_pool_destroy(pq->header_cache);
1110 	kmem_cache_destroy(pq->pkt_slab);
1111 	kfree(pq);
1112 }
1113 
1114 /* clean descriptor queue, returns > 0 if some elements cleaned */
1115 static int qib_user_sdma_hwqueue_clean(struct qib_pportdata *ppd)
1116 {
1117 	int ret;
1118 	unsigned long flags;
1119 
1120 	spin_lock_irqsave(&ppd->sdma_lock, flags);
1121 	ret = qib_sdma_make_progress(ppd);
1122 	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1123 
1124 	return ret;
1125 }
1126 
1127 /* we're in close, drain packets so that we can cleanup successfully... */
1128 void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
1129 			       struct qib_user_sdma_queue *pq)
1130 {
1131 	struct qib_devdata *dd = ppd->dd;
1132 	unsigned long flags;
1133 	int i;
1134 
1135 	if (!pq)
1136 		return;
1137 
1138 	for (i = 0; i < QIB_USER_SDMA_DRAIN_TIMEOUT; i++) {
1139 		mutex_lock(&pq->lock);
1140 		if (!pq->num_pending && !pq->num_sending) {
1141 			mutex_unlock(&pq->lock);
1142 			break;
1143 		}
1144 		qib_user_sdma_hwqueue_clean(ppd);
1145 		qib_user_sdma_queue_clean(ppd, pq);
1146 		mutex_unlock(&pq->lock);
1147 		msleep(20);
1148 	}
1149 
1150 	if (pq->num_pending || pq->num_sending) {
1151 		struct qib_user_sdma_pkt *pkt;
1152 		struct qib_user_sdma_pkt *pkt_prev;
1153 		struct list_head free_list;
1154 
1155 		mutex_lock(&pq->lock);
1156 		spin_lock_irqsave(&ppd->sdma_lock, flags);
1157 		/*
1158 		 * Since we hold sdma_lock, it is safe without sent_lock.
1159 		 */
1160 		if (pq->num_pending) {
1161 			list_for_each_entry_safe(pkt, pkt_prev,
1162 					&ppd->sdma_userpending, list) {
1163 				if (pkt->pq == pq) {
1164 					list_move_tail(&pkt->list, &pq->sent);
1165 					pq->num_pending--;
1166 					pq->num_sending++;
1167 				}
1168 			}
1169 		}
1170 		spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1171 
1172 		qib_dev_err(dd, "user sdma lists not empty: forcing!\n");
1173 		INIT_LIST_HEAD(&free_list);
1174 		list_splice_init(&pq->sent, &free_list);
1175 		pq->num_sending = 0;
1176 		qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
1177 		mutex_unlock(&pq->lock);
1178 	}
1179 }
1180 
1181 static inline __le64 qib_sdma_make_desc0(u8 gen,
1182 					 u64 addr, u64 dwlen, u64 dwoffset)
1183 {
1184 	return cpu_to_le64(/* SDmaPhyAddr[31:0] */
1185 			   ((addr & 0xfffffffcULL) << 32) |
1186 			   /* SDmaGeneration[1:0] */
1187 			   ((gen & 3ULL) << 30) |
1188 			   /* SDmaDwordCount[10:0] */
1189 			   ((dwlen & 0x7ffULL) << 16) |
1190 			   /* SDmaBufOffset[12:2] */
1191 			   (dwoffset & 0x7ffULL));
1192 }
1193 
1194 static inline __le64 qib_sdma_make_first_desc0(__le64 descq)
1195 {
1196 	return descq | cpu_to_le64(1ULL << 12);
1197 }
1198 
1199 static inline __le64 qib_sdma_make_last_desc0(__le64 descq)
1200 {
1201 					      /* last */  /* dma head */
1202 	return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);
1203 }
1204 
1205 static inline __le64 qib_sdma_make_desc1(u64 addr)
1206 {
1207 	/* SDmaPhyAddr[47:32] */
1208 	return cpu_to_le64(addr >> 32);
1209 }
1210 
1211 static void qib_user_sdma_send_frag(struct qib_pportdata *ppd,
1212 				    struct qib_user_sdma_pkt *pkt, int idx,
1213 				    unsigned ofs, u16 tail, u8 gen)
1214 {
1215 	const u64 addr = (u64) pkt->addr[idx].addr +
1216 		(u64) pkt->addr[idx].offset;
1217 	const u64 dwlen = (u64) pkt->addr[idx].length / 4;
1218 	__le64 *descqp;
1219 	__le64 descq0;
1220 
1221 	descqp = &ppd->sdma_descq[tail].qw[0];
1222 
1223 	descq0 = qib_sdma_make_desc0(gen, addr, dwlen, ofs);
1224 	if (pkt->addr[idx].first_desc)
1225 		descq0 = qib_sdma_make_first_desc0(descq0);
1226 	if (pkt->addr[idx].last_desc) {
1227 		descq0 = qib_sdma_make_last_desc0(descq0);
1228 		if (ppd->sdma_intrequest) {
1229 			descq0 |= cpu_to_le64(1ULL << 15);
1230 			ppd->sdma_intrequest = 0;
1231 		}
1232 	}
1233 
1234 	descqp[0] = descq0;
1235 	descqp[1] = qib_sdma_make_desc1(addr);
1236 }
1237 
1238 void qib_user_sdma_send_desc(struct qib_pportdata *ppd,
1239 				struct list_head *pktlist)
1240 {
1241 	struct qib_devdata *dd = ppd->dd;
1242 	u16 nfree, nsent;
1243 	u16 tail, tail_c;
1244 	u8 gen, gen_c;
1245 
1246 	nfree = qib_sdma_descq_freecnt(ppd);
1247 	if (!nfree)
1248 		return;
1249 
1250 retry:
1251 	nsent = 0;
1252 	tail_c = tail = ppd->sdma_descq_tail;
1253 	gen_c = gen = ppd->sdma_generation;
1254 	while (!list_empty(pktlist)) {
1255 		struct qib_user_sdma_pkt *pkt =
1256 			list_entry(pktlist->next, struct qib_user_sdma_pkt,
1257 				   list);
1258 		int i, j, c = 0;
1259 		unsigned ofs = 0;
1260 		u16 dtail = tail;
1261 
1262 		for (i = pkt->index; i < pkt->naddr && nfree; i++) {
1263 			qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail, gen);
1264 			ofs += pkt->addr[i].length >> 2;
1265 
1266 			if (++tail == ppd->sdma_descq_cnt) {
1267 				tail = 0;
1268 				++gen;
1269 				ppd->sdma_intrequest = 1;
1270 			} else if (tail == (ppd->sdma_descq_cnt>>1)) {
1271 				ppd->sdma_intrequest = 1;
1272 			}
1273 			nfree--;
1274 			if (pkt->addr[i].last_desc == 0)
1275 				continue;
1276 
1277 			/*
1278 			 * If the packet is >= 2KB mtu equivalent, we
1279 			 * have to use the large buffers, and have to
1280 			 * mark each descriptor as part of a large
1281 			 * buffer packet.
1282 			 */
1283 			if (ofs > dd->piosize2kmax_dwords) {
1284 				for (j = pkt->index; j <= i; j++) {
1285 					ppd->sdma_descq[dtail].qw[0] |=
1286 						cpu_to_le64(1ULL << 14);
1287 					if (++dtail == ppd->sdma_descq_cnt)
1288 						dtail = 0;
1289 				}
1290 			}
1291 			c += i + 1 - pkt->index;
1292 			pkt->index = i + 1; /* index for next first */
1293 			tail_c = dtail = tail;
1294 			gen_c = gen;
1295 			ofs = 0;  /* reset for next packet */
1296 		}
1297 
1298 		ppd->sdma_descq_added += c;
1299 		nsent += c;
1300 		if (pkt->index == pkt->naddr) {
1301 			pkt->added = ppd->sdma_descq_added;
1302 			pkt->pq->added = pkt->added;
1303 			pkt->pq->num_pending--;
1304 			spin_lock(&pkt->pq->sent_lock);
1305 			pkt->pq->num_sending++;
1306 			list_move_tail(&pkt->list, &pkt->pq->sent);
1307 			spin_unlock(&pkt->pq->sent_lock);
1308 		}
1309 		if (!nfree || (nsent<<2) > ppd->sdma_descq_cnt)
1310 			break;
1311 	}
1312 
1313 	/* advance the tail on the chip if necessary */
1314 	if (ppd->sdma_descq_tail != tail_c) {
1315 		ppd->sdma_generation = gen_c;
1316 		dd->f_sdma_update_tail(ppd, tail_c);
1317 	}
1318 
1319 	if (nfree && !list_empty(pktlist))
1320 		goto retry;
1321 }
1322 
1323 /* pq->lock must be held, get packets on the wire... */
1324 static int qib_user_sdma_push_pkts(struct qib_pportdata *ppd,
1325 				 struct qib_user_sdma_queue *pq,
1326 				 struct list_head *pktlist, int count)
1327 {
1328 	unsigned long flags;
1329 
1330 	if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE)))
1331 		return -ECOMM;
1332 
1333 	/* non-blocking mode */
1334 	if (pq->sdma_rb_node->refcount > 1) {
1335 		spin_lock_irqsave(&ppd->sdma_lock, flags);
1336 		if (unlikely(!__qib_sdma_running(ppd))) {
1337 			spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1338 			return -ECOMM;
1339 		}
1340 		pq->num_pending += count;
1341 		list_splice_tail_init(pktlist, &ppd->sdma_userpending);
1342 		qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending);
1343 		spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1344 		return 0;
1345 	}
1346 
1347 	/* In this case, descriptors from this process are not
1348 	 * linked to ppd pending queue, interrupt handler
1349 	 * won't update this process, it is OK to directly
1350 	 * modify without sdma lock.
1351 	 */
1352 
1353 
1354 	pq->num_pending += count;
1355 	/*
1356 	 * Blocking mode for single rail process, we must
1357 	 * release/regain sdma_lock to give other process
1358 	 * chance to make progress. This is important for
1359 	 * performance.
1360 	 */
1361 	do {
1362 		spin_lock_irqsave(&ppd->sdma_lock, flags);
1363 		if (unlikely(!__qib_sdma_running(ppd))) {
1364 			spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1365 			return -ECOMM;
1366 		}
1367 		qib_user_sdma_send_desc(ppd, pktlist);
1368 		if (!list_empty(pktlist))
1369 			qib_sdma_make_progress(ppd);
1370 		spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1371 	} while (!list_empty(pktlist));
1372 
1373 	return 0;
1374 }
1375 
1376 int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
1377 			 struct qib_user_sdma_queue *pq,
1378 			 const struct iovec *iov,
1379 			 unsigned long dim)
1380 {
1381 	struct qib_devdata *dd = rcd->dd;
1382 	struct qib_pportdata *ppd = rcd->ppd;
1383 	int ret = 0;
1384 	struct list_head list;
1385 	int npkts = 0;
1386 
1387 	INIT_LIST_HEAD(&list);
1388 
1389 	mutex_lock(&pq->lock);
1390 
1391 	/* why not -ECOMM like qib_user_sdma_push_pkts() below? */
1392 	if (!qib_sdma_running(ppd))
1393 		goto done_unlock;
1394 
1395 	/* if I have packets not complete yet */
1396 	if (pq->added > ppd->sdma_descq_removed)
1397 		qib_user_sdma_hwqueue_clean(ppd);
1398 	/* if I have complete packets to be freed */
1399 	if (pq->num_sending)
1400 		qib_user_sdma_queue_clean(ppd, pq);
1401 
1402 	while (dim) {
1403 		int mxp = 1;
1404 		int ndesc = 0;
1405 
1406 		ret = qib_user_sdma_queue_pkts(dd, ppd, pq,
1407 				iov, dim, &list, &mxp, &ndesc);
1408 		if (ret < 0)
1409 			goto done_unlock;
1410 		else {
1411 			dim -= ret;
1412 			iov += ret;
1413 		}
1414 
1415 		/* force packets onto the sdma hw queue... */
1416 		if (!list_empty(&list)) {
1417 			/*
1418 			 * Lazily clean hw queue.
1419 			 */
1420 			if (qib_sdma_descq_freecnt(ppd) < ndesc) {
1421 				qib_user_sdma_hwqueue_clean(ppd);
1422 				if (pq->num_sending)
1423 					qib_user_sdma_queue_clean(ppd, pq);
1424 			}
1425 
1426 			ret = qib_user_sdma_push_pkts(ppd, pq, &list, mxp);
1427 			if (ret < 0)
1428 				goto done_unlock;
1429 			else {
1430 				npkts += mxp;
1431 				pq->counter += mxp;
1432 			}
1433 		}
1434 	}
1435 
1436 done_unlock:
1437 	if (!list_empty(&list))
1438 		qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);
1439 	mutex_unlock(&pq->lock);
1440 
1441 	return (ret < 0) ? ret : npkts;
1442 }
1443 
1444 int qib_user_sdma_make_progress(struct qib_pportdata *ppd,
1445 				struct qib_user_sdma_queue *pq)
1446 {
1447 	int ret = 0;
1448 
1449 	mutex_lock(&pq->lock);
1450 	qib_user_sdma_hwqueue_clean(ppd);
1451 	ret = qib_user_sdma_queue_clean(ppd, pq);
1452 	mutex_unlock(&pq->lock);
1453 
1454 	return ret;
1455 }
1456 
1457 u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq)
1458 {
1459 	return pq ? pq->sent_counter : 0;
1460 }
1461 
1462 u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq)
1463 {
1464 	return pq ? pq->counter : 0;
1465 }
1466