1 /*
2  * Copyright (c) 2012 Intel Corporation. All rights reserved.
3  * Copyright (c) 2007 - 2012 QLogic Corporation. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/spinlock.h>
35 #include <linux/netdevice.h>
36 #include <linux/moduleparam.h>
37 
38 #include "qib.h"
39 #include "qib_common.h"
40 
41 /* default pio off, sdma on */
42 static ushort sdma_descq_cnt = 256;
43 module_param_named(sdma_descq_cnt, sdma_descq_cnt, ushort, S_IRUGO);
44 MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
45 
46 /*
47  * Bits defined in the send DMA descriptor.
48  */
49 #define SDMA_DESC_LAST          (1ULL << 11)
50 #define SDMA_DESC_FIRST         (1ULL << 12)
51 #define SDMA_DESC_DMA_HEAD      (1ULL << 13)
52 #define SDMA_DESC_USE_LARGE_BUF (1ULL << 14)
53 #define SDMA_DESC_INTR          (1ULL << 15)
54 #define SDMA_DESC_COUNT_LSB     16
55 #define SDMA_DESC_GEN_LSB       30
56 
57 char *qib_sdma_state_names[] = {
58 	[qib_sdma_state_s00_hw_down]          = "s00_HwDown",
59 	[qib_sdma_state_s10_hw_start_up_wait] = "s10_HwStartUpWait",
60 	[qib_sdma_state_s20_idle]             = "s20_Idle",
61 	[qib_sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
62 	[qib_sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
63 	[qib_sdma_state_s50_hw_halt_wait]     = "s50_HwHaltWait",
64 	[qib_sdma_state_s99_running]          = "s99_Running",
65 };
66 
67 char *qib_sdma_event_names[] = {
68 	[qib_sdma_event_e00_go_hw_down]   = "e00_GoHwDown",
69 	[qib_sdma_event_e10_go_hw_start]  = "e10_GoHwStart",
70 	[qib_sdma_event_e20_hw_started]   = "e20_HwStarted",
71 	[qib_sdma_event_e30_go_running]   = "e30_GoRunning",
72 	[qib_sdma_event_e40_sw_cleaned]   = "e40_SwCleaned",
73 	[qib_sdma_event_e50_hw_cleaned]   = "e50_HwCleaned",
74 	[qib_sdma_event_e60_hw_halted]    = "e60_HwHalted",
75 	[qib_sdma_event_e70_go_idle]      = "e70_GoIdle",
76 	[qib_sdma_event_e7220_err_halted] = "e7220_ErrHalted",
77 	[qib_sdma_event_e7322_err_halted] = "e7322_ErrHalted",
78 	[qib_sdma_event_e90_timer_tick]   = "e90_TimerTick",
79 };
80 
81 /* declare all statics here rather than keep sorting */
82 static int alloc_sdma(struct qib_pportdata *);
83 static void sdma_complete(struct kref *);
84 static void sdma_finalput(struct qib_sdma_state *);
85 static void sdma_get(struct qib_sdma_state *);
86 static void sdma_put(struct qib_sdma_state *);
87 static void sdma_set_state(struct qib_pportdata *, enum qib_sdma_states);
88 static void sdma_start_sw_clean_up(struct qib_pportdata *);
89 static void sdma_sw_clean_up_task(unsigned long);
90 static void unmap_desc(struct qib_pportdata *, unsigned);
91 
92 static void sdma_get(struct qib_sdma_state *ss)
93 {
94 	kref_get(&ss->kref);
95 }
96 
97 static void sdma_complete(struct kref *kref)
98 {
99 	struct qib_sdma_state *ss =
100 		container_of(kref, struct qib_sdma_state, kref);
101 
102 	complete(&ss->comp);
103 }
104 
105 static void sdma_put(struct qib_sdma_state *ss)
106 {
107 	kref_put(&ss->kref, sdma_complete);
108 }
109 
110 static void sdma_finalput(struct qib_sdma_state *ss)
111 {
112 	sdma_put(ss);
113 	wait_for_completion(&ss->comp);
114 }
115 
116 /*
117  * Complete all the sdma requests on the active list, in the correct
118  * order, and with appropriate processing.   Called when cleaning up
119  * after sdma shutdown, and when new sdma requests are submitted for
120  * a link that is down.   This matches what is done for requests
121  * that complete normally, it's just the full list.
122  *
123  * Must be called with sdma_lock held
124  */
125 static void clear_sdma_activelist(struct qib_pportdata *ppd)
126 {
127 	struct qib_sdma_txreq *txp, *txp_next;
128 
129 	list_for_each_entry_safe(txp, txp_next, &ppd->sdma_activelist, list) {
130 		list_del_init(&txp->list);
131 		if (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) {
132 			unsigned idx;
133 
134 			idx = txp->start_idx;
135 			while (idx != txp->next_descq_idx) {
136 				unmap_desc(ppd, idx);
137 				if (++idx == ppd->sdma_descq_cnt)
138 					idx = 0;
139 			}
140 		}
141 		if (txp->callback)
142 			(*txp->callback)(txp, QIB_SDMA_TXREQ_S_ABORTED);
143 	}
144 }
145 
146 static void sdma_sw_clean_up_task(unsigned long opaque)
147 {
148 	struct qib_pportdata *ppd = (struct qib_pportdata *) opaque;
149 	unsigned long flags;
150 
151 	spin_lock_irqsave(&ppd->sdma_lock, flags);
152 
153 	/*
154 	 * At this point, the following should always be true:
155 	 * - We are halted, so no more descriptors are getting retired.
156 	 * - We are not running, so no one is submitting new work.
157 	 * - Only we can send the e40_sw_cleaned, so we can't start
158 	 *   running again until we say so.  So, the active list and
159 	 *   descq are ours to play with.
160 	 */
161 
162 	/* Process all retired requests. */
163 	qib_sdma_make_progress(ppd);
164 
165 	clear_sdma_activelist(ppd);
166 
167 	/*
168 	 * Resync count of added and removed.  It is VERY important that
169 	 * sdma_descq_removed NEVER decrement - user_sdma depends on it.
170 	 */
171 	ppd->sdma_descq_removed = ppd->sdma_descq_added;
172 
173 	/*
174 	 * Reset our notion of head and tail.
175 	 * Note that the HW registers will be reset when switching states
176 	 * due to calling __qib_sdma_process_event() below.
177 	 */
178 	ppd->sdma_descq_tail = 0;
179 	ppd->sdma_descq_head = 0;
180 	ppd->sdma_head_dma[0] = 0;
181 	ppd->sdma_generation = 0;
182 
183 	__qib_sdma_process_event(ppd, qib_sdma_event_e40_sw_cleaned);
184 
185 	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
186 }
187 
188 /*
189  * This is called when changing to state qib_sdma_state_s10_hw_start_up_wait
190  * as a result of send buffer errors or send DMA descriptor errors.
191  * We want to disarm the buffers in these cases.
192  */
193 static void sdma_hw_start_up(struct qib_pportdata *ppd)
194 {
195 	struct qib_sdma_state *ss = &ppd->sdma_state;
196 	unsigned bufno;
197 
198 	for (bufno = ss->first_sendbuf; bufno < ss->last_sendbuf; ++bufno)
199 		ppd->dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_BUF(bufno));
200 
201 	ppd->dd->f_sdma_hw_start_up(ppd);
202 }
203 
204 static void sdma_sw_tear_down(struct qib_pportdata *ppd)
205 {
206 	struct qib_sdma_state *ss = &ppd->sdma_state;
207 
208 	/* Releasing this reference means the state machine has stopped. */
209 	sdma_put(ss);
210 }
211 
212 static void sdma_start_sw_clean_up(struct qib_pportdata *ppd)
213 {
214 	tasklet_hi_schedule(&ppd->sdma_sw_clean_up_task);
215 }
216 
217 static void sdma_set_state(struct qib_pportdata *ppd,
218 	enum qib_sdma_states next_state)
219 {
220 	struct qib_sdma_state *ss = &ppd->sdma_state;
221 	struct sdma_set_state_action *action = ss->set_state_action;
222 	unsigned op = 0;
223 
224 	/* debugging bookkeeping */
225 	ss->previous_state = ss->current_state;
226 	ss->previous_op = ss->current_op;
227 
228 	ss->current_state = next_state;
229 
230 	if (action[next_state].op_enable)
231 		op |= QIB_SDMA_SENDCTRL_OP_ENABLE;
232 
233 	if (action[next_state].op_intenable)
234 		op |= QIB_SDMA_SENDCTRL_OP_INTENABLE;
235 
236 	if (action[next_state].op_halt)
237 		op |= QIB_SDMA_SENDCTRL_OP_HALT;
238 
239 	if (action[next_state].op_drain)
240 		op |= QIB_SDMA_SENDCTRL_OP_DRAIN;
241 
242 	if (action[next_state].go_s99_running_tofalse)
243 		ss->go_s99_running = 0;
244 
245 	if (action[next_state].go_s99_running_totrue)
246 		ss->go_s99_running = 1;
247 
248 	ss->current_op = op;
249 
250 	ppd->dd->f_sdma_sendctrl(ppd, ss->current_op);
251 }
252 
253 static void unmap_desc(struct qib_pportdata *ppd, unsigned head)
254 {
255 	__le64 *descqp = &ppd->sdma_descq[head].qw[0];
256 	u64 desc[2];
257 	dma_addr_t addr;
258 	size_t len;
259 
260 	desc[0] = le64_to_cpu(descqp[0]);
261 	desc[1] = le64_to_cpu(descqp[1]);
262 
263 	addr = (desc[1] << 32) | (desc[0] >> 32);
264 	len = (desc[0] >> 14) & (0x7ffULL << 2);
265 	dma_unmap_single(&ppd->dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
266 }
267 
268 static int alloc_sdma(struct qib_pportdata *ppd)
269 {
270 	ppd->sdma_descq_cnt = sdma_descq_cnt;
271 	if (!ppd->sdma_descq_cnt)
272 		ppd->sdma_descq_cnt = 256;
273 
274 	/* Allocate memory for SendDMA descriptor FIFO */
275 	ppd->sdma_descq = dma_alloc_coherent(&ppd->dd->pcidev->dev,
276 		ppd->sdma_descq_cnt * sizeof(u64[2]), &ppd->sdma_descq_phys,
277 		GFP_KERNEL);
278 
279 	if (!ppd->sdma_descq) {
280 		qib_dev_err(ppd->dd,
281 			"failed to allocate SendDMA descriptor FIFO memory\n");
282 		goto bail;
283 	}
284 
285 	/* Allocate memory for DMA of head register to memory */
286 	ppd->sdma_head_dma = dma_alloc_coherent(&ppd->dd->pcidev->dev,
287 		PAGE_SIZE, &ppd->sdma_head_phys, GFP_KERNEL);
288 	if (!ppd->sdma_head_dma) {
289 		qib_dev_err(ppd->dd,
290 			"failed to allocate SendDMA head memory\n");
291 		goto cleanup_descq;
292 	}
293 	ppd->sdma_head_dma[0] = 0;
294 	return 0;
295 
296 cleanup_descq:
297 	dma_free_coherent(&ppd->dd->pcidev->dev,
298 		ppd->sdma_descq_cnt * sizeof(u64[2]), (void *)ppd->sdma_descq,
299 		ppd->sdma_descq_phys);
300 	ppd->sdma_descq = NULL;
301 	ppd->sdma_descq_phys = 0;
302 bail:
303 	ppd->sdma_descq_cnt = 0;
304 	return -ENOMEM;
305 }
306 
307 static void free_sdma(struct qib_pportdata *ppd)
308 {
309 	struct qib_devdata *dd = ppd->dd;
310 
311 	if (ppd->sdma_head_dma) {
312 		dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
313 				  (void *)ppd->sdma_head_dma,
314 				  ppd->sdma_head_phys);
315 		ppd->sdma_head_dma = NULL;
316 		ppd->sdma_head_phys = 0;
317 	}
318 
319 	if (ppd->sdma_descq) {
320 		dma_free_coherent(&dd->pcidev->dev,
321 				  ppd->sdma_descq_cnt * sizeof(u64[2]),
322 				  ppd->sdma_descq, ppd->sdma_descq_phys);
323 		ppd->sdma_descq = NULL;
324 		ppd->sdma_descq_phys = 0;
325 	}
326 }
327 
328 static inline void make_sdma_desc(struct qib_pportdata *ppd,
329 				  u64 *sdmadesc, u64 addr, u64 dwlen,
330 				  u64 dwoffset)
331 {
332 
333 	WARN_ON(addr & 3);
334 	/* SDmaPhyAddr[47:32] */
335 	sdmadesc[1] = addr >> 32;
336 	/* SDmaPhyAddr[31:0] */
337 	sdmadesc[0] = (addr & 0xfffffffcULL) << 32;
338 	/* SDmaGeneration[1:0] */
339 	sdmadesc[0] |= (ppd->sdma_generation & 3ULL) <<
340 		SDMA_DESC_GEN_LSB;
341 	/* SDmaDwordCount[10:0] */
342 	sdmadesc[0] |= (dwlen & 0x7ffULL) << SDMA_DESC_COUNT_LSB;
343 	/* SDmaBufOffset[12:2] */
344 	sdmadesc[0] |= dwoffset & 0x7ffULL;
345 }
346 
347 /* sdma_lock must be held */
348 int qib_sdma_make_progress(struct qib_pportdata *ppd)
349 {
350 	struct list_head *lp = NULL;
351 	struct qib_sdma_txreq *txp = NULL;
352 	struct qib_devdata *dd = ppd->dd;
353 	int progress = 0;
354 	u16 hwhead;
355 	u16 idx = 0;
356 
357 	hwhead = dd->f_sdma_gethead(ppd);
358 
359 	/* The reason for some of the complexity of this code is that
360 	 * not all descriptors have corresponding txps.  So, we have to
361 	 * be able to skip over descs until we wander into the range of
362 	 * the next txp on the list.
363 	 */
364 
365 	if (!list_empty(&ppd->sdma_activelist)) {
366 		lp = ppd->sdma_activelist.next;
367 		txp = list_entry(lp, struct qib_sdma_txreq, list);
368 		idx = txp->start_idx;
369 	}
370 
371 	while (ppd->sdma_descq_head != hwhead) {
372 		/* if desc is part of this txp, unmap if needed */
373 		if (txp && (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) &&
374 		    (idx == ppd->sdma_descq_head)) {
375 			unmap_desc(ppd, ppd->sdma_descq_head);
376 			if (++idx == ppd->sdma_descq_cnt)
377 				idx = 0;
378 		}
379 
380 		/* increment dequed desc count */
381 		ppd->sdma_descq_removed++;
382 
383 		/* advance head, wrap if needed */
384 		if (++ppd->sdma_descq_head == ppd->sdma_descq_cnt)
385 			ppd->sdma_descq_head = 0;
386 
387 		/* if now past this txp's descs, do the callback */
388 		if (txp && txp->next_descq_idx == ppd->sdma_descq_head) {
389 			/* remove from active list */
390 			list_del_init(&txp->list);
391 			if (txp->callback)
392 				(*txp->callback)(txp, QIB_SDMA_TXREQ_S_OK);
393 			/* see if there is another txp */
394 			if (list_empty(&ppd->sdma_activelist))
395 				txp = NULL;
396 			else {
397 				lp = ppd->sdma_activelist.next;
398 				txp = list_entry(lp, struct qib_sdma_txreq,
399 					list);
400 				idx = txp->start_idx;
401 			}
402 		}
403 		progress = 1;
404 	}
405 	if (progress)
406 		qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
407 	return progress;
408 }
409 
410 /*
411  * This is called from interrupt context.
412  */
413 void qib_sdma_intr(struct qib_pportdata *ppd)
414 {
415 	unsigned long flags;
416 
417 	spin_lock_irqsave(&ppd->sdma_lock, flags);
418 
419 	__qib_sdma_intr(ppd);
420 
421 	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
422 }
423 
424 void __qib_sdma_intr(struct qib_pportdata *ppd)
425 {
426 	if (__qib_sdma_running(ppd)) {
427 		qib_sdma_make_progress(ppd);
428 		if (!list_empty(&ppd->sdma_userpending))
429 			qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending);
430 	}
431 }
432 
433 int qib_setup_sdma(struct qib_pportdata *ppd)
434 {
435 	struct qib_devdata *dd = ppd->dd;
436 	unsigned long flags;
437 	int ret = 0;
438 
439 	ret = alloc_sdma(ppd);
440 	if (ret)
441 		goto bail;
442 
443 	/* set consistent sdma state */
444 	ppd->dd->f_sdma_init_early(ppd);
445 	spin_lock_irqsave(&ppd->sdma_lock, flags);
446 	sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
447 	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
448 
449 	/* set up reference counting */
450 	kref_init(&ppd->sdma_state.kref);
451 	init_completion(&ppd->sdma_state.comp);
452 
453 	ppd->sdma_generation = 0;
454 	ppd->sdma_descq_head = 0;
455 	ppd->sdma_descq_removed = 0;
456 	ppd->sdma_descq_added = 0;
457 
458 	ppd->sdma_intrequest = 0;
459 	INIT_LIST_HEAD(&ppd->sdma_userpending);
460 
461 	INIT_LIST_HEAD(&ppd->sdma_activelist);
462 
463 	tasklet_init(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
464 		(unsigned long)ppd);
465 
466 	ret = dd->f_init_sdma_regs(ppd);
467 	if (ret)
468 		goto bail_alloc;
469 
470 	qib_sdma_process_event(ppd, qib_sdma_event_e10_go_hw_start);
471 
472 	return 0;
473 
474 bail_alloc:
475 	qib_teardown_sdma(ppd);
476 bail:
477 	return ret;
478 }
479 
480 void qib_teardown_sdma(struct qib_pportdata *ppd)
481 {
482 	qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);
483 
484 	/*
485 	 * This waits for the state machine to exit so it is not
486 	 * necessary to kill the sdma_sw_clean_up_task to make sure
487 	 * it is not running.
488 	 */
489 	sdma_finalput(&ppd->sdma_state);
490 
491 	free_sdma(ppd);
492 }
493 
494 int qib_sdma_running(struct qib_pportdata *ppd)
495 {
496 	unsigned long flags;
497 	int ret;
498 
499 	spin_lock_irqsave(&ppd->sdma_lock, flags);
500 	ret = __qib_sdma_running(ppd);
501 	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
502 
503 	return ret;
504 }
505 
506 /*
507  * Complete a request when sdma not running; likely only request
508  * but to simplify the code, always queue it, then process the full
509  * activelist.  We process the entire list to ensure that this particular
510  * request does get it's callback, but in the correct order.
511  * Must be called with sdma_lock held
512  */
513 static void complete_sdma_err_req(struct qib_pportdata *ppd,
514 				  struct qib_verbs_txreq *tx)
515 {
516 	struct qib_qp_priv *priv = tx->qp->priv;
517 
518 	atomic_inc(&priv->s_dma_busy);
519 	/* no sdma descriptors, so no unmap_desc */
520 	tx->txreq.start_idx = 0;
521 	tx->txreq.next_descq_idx = 0;
522 	list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
523 	clear_sdma_activelist(ppd);
524 }
525 
526 /*
527  * This function queues one IB packet onto the send DMA queue per call.
528  * The caller is responsible for checking:
529  * 1) The number of send DMA descriptor entries is less than the size of
530  *    the descriptor queue.
531  * 2) The IB SGE addresses and lengths are 32-bit aligned
532  *    (except possibly the last SGE's length)
533  * 3) The SGE addresses are suitable for passing to dma_map_single().
534  */
535 int qib_sdma_verbs_send(struct qib_pportdata *ppd,
536 			struct rvt_sge_state *ss, u32 dwords,
537 			struct qib_verbs_txreq *tx)
538 {
539 	unsigned long flags;
540 	struct rvt_sge *sge;
541 	struct rvt_qp *qp;
542 	int ret = 0;
543 	u16 tail;
544 	__le64 *descqp;
545 	u64 sdmadesc[2];
546 	u32 dwoffset;
547 	dma_addr_t addr;
548 	struct qib_qp_priv *priv;
549 
550 	spin_lock_irqsave(&ppd->sdma_lock, flags);
551 
552 retry:
553 	if (unlikely(!__qib_sdma_running(ppd))) {
554 		complete_sdma_err_req(ppd, tx);
555 		goto unlock;
556 	}
557 
558 	if (tx->txreq.sg_count > qib_sdma_descq_freecnt(ppd)) {
559 		if (qib_sdma_make_progress(ppd))
560 			goto retry;
561 		if (ppd->dd->flags & QIB_HAS_SDMA_TIMEOUT)
562 			ppd->dd->f_sdma_set_desc_cnt(ppd,
563 					ppd->sdma_descq_cnt / 2);
564 		goto busy;
565 	}
566 
567 	dwoffset = tx->hdr_dwords;
568 	make_sdma_desc(ppd, sdmadesc, (u64) tx->txreq.addr, dwoffset, 0);
569 
570 	sdmadesc[0] |= SDMA_DESC_FIRST;
571 	if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
572 		sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
573 
574 	/* write to the descq */
575 	tail = ppd->sdma_descq_tail;
576 	descqp = &ppd->sdma_descq[tail].qw[0];
577 	*descqp++ = cpu_to_le64(sdmadesc[0]);
578 	*descqp++ = cpu_to_le64(sdmadesc[1]);
579 
580 	/* increment the tail */
581 	if (++tail == ppd->sdma_descq_cnt) {
582 		tail = 0;
583 		descqp = &ppd->sdma_descq[0].qw[0];
584 		++ppd->sdma_generation;
585 	}
586 
587 	tx->txreq.start_idx = tail;
588 
589 	sge = &ss->sge;
590 	while (dwords) {
591 		u32 dw;
592 		u32 len;
593 
594 		len = dwords << 2;
595 		if (len > sge->length)
596 			len = sge->length;
597 		if (len > sge->sge_length)
598 			len = sge->sge_length;
599 		BUG_ON(len == 0);
600 		dw = (len + 3) >> 2;
601 		addr = dma_map_single(&ppd->dd->pcidev->dev, sge->vaddr,
602 				      dw << 2, DMA_TO_DEVICE);
603 		if (dma_mapping_error(&ppd->dd->pcidev->dev, addr))
604 			goto unmap;
605 		sdmadesc[0] = 0;
606 		make_sdma_desc(ppd, sdmadesc, (u64) addr, dw, dwoffset);
607 		/* SDmaUseLargeBuf has to be set in every descriptor */
608 		if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
609 			sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
610 		/* write to the descq */
611 		*descqp++ = cpu_to_le64(sdmadesc[0]);
612 		*descqp++ = cpu_to_le64(sdmadesc[1]);
613 
614 		/* increment the tail */
615 		if (++tail == ppd->sdma_descq_cnt) {
616 			tail = 0;
617 			descqp = &ppd->sdma_descq[0].qw[0];
618 			++ppd->sdma_generation;
619 		}
620 		sge->vaddr += len;
621 		sge->length -= len;
622 		sge->sge_length -= len;
623 		if (sge->sge_length == 0) {
624 			if (--ss->num_sge)
625 				*sge = *ss->sg_list++;
626 		} else if (sge->length == 0 && sge->mr->lkey) {
627 			if (++sge->n >= RVT_SEGSZ) {
628 				if (++sge->m >= sge->mr->mapsz)
629 					break;
630 				sge->n = 0;
631 			}
632 			sge->vaddr =
633 				sge->mr->map[sge->m]->segs[sge->n].vaddr;
634 			sge->length =
635 				sge->mr->map[sge->m]->segs[sge->n].length;
636 		}
637 
638 		dwoffset += dw;
639 		dwords -= dw;
640 	}
641 
642 	if (!tail)
643 		descqp = &ppd->sdma_descq[ppd->sdma_descq_cnt].qw[0];
644 	descqp -= 2;
645 	descqp[0] |= cpu_to_le64(SDMA_DESC_LAST);
646 	if (tx->txreq.flags & QIB_SDMA_TXREQ_F_HEADTOHOST)
647 		descqp[0] |= cpu_to_le64(SDMA_DESC_DMA_HEAD);
648 	if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ)
649 		descqp[0] |= cpu_to_le64(SDMA_DESC_INTR);
650 	priv = tx->qp->priv;
651 	atomic_inc(&priv->s_dma_busy);
652 	tx->txreq.next_descq_idx = tail;
653 	ppd->dd->f_sdma_update_tail(ppd, tail);
654 	ppd->sdma_descq_added += tx->txreq.sg_count;
655 	list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
656 	goto unlock;
657 
658 unmap:
659 	for (;;) {
660 		if (!tail)
661 			tail = ppd->sdma_descq_cnt - 1;
662 		else
663 			tail--;
664 		if (tail == ppd->sdma_descq_tail)
665 			break;
666 		unmap_desc(ppd, tail);
667 	}
668 	qp = tx->qp;
669 	priv = qp->priv;
670 	qib_put_txreq(tx);
671 	spin_lock(&qp->r_lock);
672 	spin_lock(&qp->s_lock);
673 	if (qp->ibqp.qp_type == IB_QPT_RC) {
674 		/* XXX what about error sending RDMA read responses? */
675 		if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)
676 			rvt_error_qp(qp, IB_WC_GENERAL_ERR);
677 	} else if (qp->s_wqe)
678 		qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
679 	spin_unlock(&qp->s_lock);
680 	spin_unlock(&qp->r_lock);
681 	/* return zero to process the next send work request */
682 	goto unlock;
683 
684 busy:
685 	qp = tx->qp;
686 	priv = qp->priv;
687 	spin_lock(&qp->s_lock);
688 	if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
689 		struct qib_ibdev *dev;
690 
691 		/*
692 		 * If we couldn't queue the DMA request, save the info
693 		 * and try again later rather than destroying the
694 		 * buffer and undoing the side effects of the copy.
695 		 */
696 		tx->ss = ss;
697 		tx->dwords = dwords;
698 		priv->s_tx = tx;
699 		dev = &ppd->dd->verbs_dev;
700 		spin_lock(&dev->rdi.pending_lock);
701 		if (list_empty(&priv->iowait)) {
702 			struct qib_ibport *ibp;
703 
704 			ibp = &ppd->ibport_data;
705 			ibp->rvp.n_dmawait++;
706 			qp->s_flags |= RVT_S_WAIT_DMA_DESC;
707 			list_add_tail(&priv->iowait, &dev->dmawait);
708 		}
709 		spin_unlock(&dev->rdi.pending_lock);
710 		qp->s_flags &= ~RVT_S_BUSY;
711 		spin_unlock(&qp->s_lock);
712 		ret = -EBUSY;
713 	} else {
714 		spin_unlock(&qp->s_lock);
715 		qib_put_txreq(tx);
716 	}
717 unlock:
718 	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
719 	return ret;
720 }
721 
722 /*
723  * sdma_lock should be acquired before calling this routine
724  */
725 void dump_sdma_state(struct qib_pportdata *ppd)
726 {
727 	struct qib_sdma_desc *descq;
728 	struct qib_sdma_txreq *txp, *txpnext;
729 	__le64 *descqp;
730 	u64 desc[2];
731 	u64 addr;
732 	u16 gen, dwlen, dwoffset;
733 	u16 head, tail, cnt;
734 
735 	head = ppd->sdma_descq_head;
736 	tail = ppd->sdma_descq_tail;
737 	cnt = qib_sdma_descq_freecnt(ppd);
738 	descq = ppd->sdma_descq;
739 
740 	qib_dev_porterr(ppd->dd, ppd->port,
741 		"SDMA ppd->sdma_descq_head: %u\n", head);
742 	qib_dev_porterr(ppd->dd, ppd->port,
743 		"SDMA ppd->sdma_descq_tail: %u\n", tail);
744 	qib_dev_porterr(ppd->dd, ppd->port,
745 		"SDMA sdma_descq_freecnt: %u\n", cnt);
746 
747 	/* print info for each entry in the descriptor queue */
748 	while (head != tail) {
749 		char flags[6] = { 'x', 'x', 'x', 'x', 'x', 0 };
750 
751 		descqp = &descq[head].qw[0];
752 		desc[0] = le64_to_cpu(descqp[0]);
753 		desc[1] = le64_to_cpu(descqp[1]);
754 		flags[0] = (desc[0] & 1<<15) ? 'I' : '-';
755 		flags[1] = (desc[0] & 1<<14) ? 'L' : 'S';
756 		flags[2] = (desc[0] & 1<<13) ? 'H' : '-';
757 		flags[3] = (desc[0] & 1<<12) ? 'F' : '-';
758 		flags[4] = (desc[0] & 1<<11) ? 'L' : '-';
759 		addr = (desc[1] << 32) | ((desc[0] >> 32) & 0xfffffffcULL);
760 		gen = (desc[0] >> 30) & 3ULL;
761 		dwlen = (desc[0] >> 14) & (0x7ffULL << 2);
762 		dwoffset = (desc[0] & 0x7ffULL) << 2;
763 		qib_dev_porterr(ppd->dd, ppd->port,
764 			"SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes offset:%u bytes\n",
765 			 head, flags, addr, gen, dwlen, dwoffset);
766 		if (++head == ppd->sdma_descq_cnt)
767 			head = 0;
768 	}
769 
770 	/* print dma descriptor indices from the TX requests */
771 	list_for_each_entry_safe(txp, txpnext, &ppd->sdma_activelist,
772 				 list)
773 		qib_dev_porterr(ppd->dd, ppd->port,
774 			"SDMA txp->start_idx: %u txp->next_descq_idx: %u\n",
775 			txp->start_idx, txp->next_descq_idx);
776 }
777 
778 void qib_sdma_process_event(struct qib_pportdata *ppd,
779 	enum qib_sdma_events event)
780 {
781 	unsigned long flags;
782 
783 	spin_lock_irqsave(&ppd->sdma_lock, flags);
784 
785 	__qib_sdma_process_event(ppd, event);
786 
787 	if (ppd->sdma_state.current_state == qib_sdma_state_s99_running)
788 		qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
789 
790 	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
791 }
792 
793 void __qib_sdma_process_event(struct qib_pportdata *ppd,
794 	enum qib_sdma_events event)
795 {
796 	struct qib_sdma_state *ss = &ppd->sdma_state;
797 
798 	switch (ss->current_state) {
799 	case qib_sdma_state_s00_hw_down:
800 		switch (event) {
801 		case qib_sdma_event_e00_go_hw_down:
802 			break;
803 		case qib_sdma_event_e30_go_running:
804 			/*
805 			 * If down, but running requested (usually result
806 			 * of link up, then we need to start up.
807 			 * This can happen when hw down is requested while
808 			 * bringing the link up with traffic active on
809 			 * 7220, e.g. */
810 			ss->go_s99_running = 1;
811 			/* fall through and start dma engine */
812 		case qib_sdma_event_e10_go_hw_start:
813 			/* This reference means the state machine is started */
814 			sdma_get(&ppd->sdma_state);
815 			sdma_set_state(ppd,
816 				       qib_sdma_state_s10_hw_start_up_wait);
817 			break;
818 		case qib_sdma_event_e20_hw_started:
819 			break;
820 		case qib_sdma_event_e40_sw_cleaned:
821 			sdma_sw_tear_down(ppd);
822 			break;
823 		case qib_sdma_event_e50_hw_cleaned:
824 			break;
825 		case qib_sdma_event_e60_hw_halted:
826 			break;
827 		case qib_sdma_event_e70_go_idle:
828 			break;
829 		case qib_sdma_event_e7220_err_halted:
830 			break;
831 		case qib_sdma_event_e7322_err_halted:
832 			break;
833 		case qib_sdma_event_e90_timer_tick:
834 			break;
835 		}
836 		break;
837 
838 	case qib_sdma_state_s10_hw_start_up_wait:
839 		switch (event) {
840 		case qib_sdma_event_e00_go_hw_down:
841 			sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
842 			sdma_sw_tear_down(ppd);
843 			break;
844 		case qib_sdma_event_e10_go_hw_start:
845 			break;
846 		case qib_sdma_event_e20_hw_started:
847 			sdma_set_state(ppd, ss->go_s99_running ?
848 				       qib_sdma_state_s99_running :
849 				       qib_sdma_state_s20_idle);
850 			break;
851 		case qib_sdma_event_e30_go_running:
852 			ss->go_s99_running = 1;
853 			break;
854 		case qib_sdma_event_e40_sw_cleaned:
855 			break;
856 		case qib_sdma_event_e50_hw_cleaned:
857 			break;
858 		case qib_sdma_event_e60_hw_halted:
859 			break;
860 		case qib_sdma_event_e70_go_idle:
861 			ss->go_s99_running = 0;
862 			break;
863 		case qib_sdma_event_e7220_err_halted:
864 			break;
865 		case qib_sdma_event_e7322_err_halted:
866 			break;
867 		case qib_sdma_event_e90_timer_tick:
868 			break;
869 		}
870 		break;
871 
872 	case qib_sdma_state_s20_idle:
873 		switch (event) {
874 		case qib_sdma_event_e00_go_hw_down:
875 			sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
876 			sdma_sw_tear_down(ppd);
877 			break;
878 		case qib_sdma_event_e10_go_hw_start:
879 			break;
880 		case qib_sdma_event_e20_hw_started:
881 			break;
882 		case qib_sdma_event_e30_go_running:
883 			sdma_set_state(ppd, qib_sdma_state_s99_running);
884 			ss->go_s99_running = 1;
885 			break;
886 		case qib_sdma_event_e40_sw_cleaned:
887 			break;
888 		case qib_sdma_event_e50_hw_cleaned:
889 			break;
890 		case qib_sdma_event_e60_hw_halted:
891 			break;
892 		case qib_sdma_event_e70_go_idle:
893 			break;
894 		case qib_sdma_event_e7220_err_halted:
895 			break;
896 		case qib_sdma_event_e7322_err_halted:
897 			break;
898 		case qib_sdma_event_e90_timer_tick:
899 			break;
900 		}
901 		break;
902 
903 	case qib_sdma_state_s30_sw_clean_up_wait:
904 		switch (event) {
905 		case qib_sdma_event_e00_go_hw_down:
906 			sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
907 			break;
908 		case qib_sdma_event_e10_go_hw_start:
909 			break;
910 		case qib_sdma_event_e20_hw_started:
911 			break;
912 		case qib_sdma_event_e30_go_running:
913 			ss->go_s99_running = 1;
914 			break;
915 		case qib_sdma_event_e40_sw_cleaned:
916 			sdma_set_state(ppd,
917 				       qib_sdma_state_s10_hw_start_up_wait);
918 			sdma_hw_start_up(ppd);
919 			break;
920 		case qib_sdma_event_e50_hw_cleaned:
921 			break;
922 		case qib_sdma_event_e60_hw_halted:
923 			break;
924 		case qib_sdma_event_e70_go_idle:
925 			ss->go_s99_running = 0;
926 			break;
927 		case qib_sdma_event_e7220_err_halted:
928 			break;
929 		case qib_sdma_event_e7322_err_halted:
930 			break;
931 		case qib_sdma_event_e90_timer_tick:
932 			break;
933 		}
934 		break;
935 
936 	case qib_sdma_state_s40_hw_clean_up_wait:
937 		switch (event) {
938 		case qib_sdma_event_e00_go_hw_down:
939 			sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
940 			sdma_start_sw_clean_up(ppd);
941 			break;
942 		case qib_sdma_event_e10_go_hw_start:
943 			break;
944 		case qib_sdma_event_e20_hw_started:
945 			break;
946 		case qib_sdma_event_e30_go_running:
947 			ss->go_s99_running = 1;
948 			break;
949 		case qib_sdma_event_e40_sw_cleaned:
950 			break;
951 		case qib_sdma_event_e50_hw_cleaned:
952 			sdma_set_state(ppd,
953 				       qib_sdma_state_s30_sw_clean_up_wait);
954 			sdma_start_sw_clean_up(ppd);
955 			break;
956 		case qib_sdma_event_e60_hw_halted:
957 			break;
958 		case qib_sdma_event_e70_go_idle:
959 			ss->go_s99_running = 0;
960 			break;
961 		case qib_sdma_event_e7220_err_halted:
962 			break;
963 		case qib_sdma_event_e7322_err_halted:
964 			break;
965 		case qib_sdma_event_e90_timer_tick:
966 			break;
967 		}
968 		break;
969 
970 	case qib_sdma_state_s50_hw_halt_wait:
971 		switch (event) {
972 		case qib_sdma_event_e00_go_hw_down:
973 			sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
974 			sdma_start_sw_clean_up(ppd);
975 			break;
976 		case qib_sdma_event_e10_go_hw_start:
977 			break;
978 		case qib_sdma_event_e20_hw_started:
979 			break;
980 		case qib_sdma_event_e30_go_running:
981 			ss->go_s99_running = 1;
982 			break;
983 		case qib_sdma_event_e40_sw_cleaned:
984 			break;
985 		case qib_sdma_event_e50_hw_cleaned:
986 			break;
987 		case qib_sdma_event_e60_hw_halted:
988 			sdma_set_state(ppd,
989 				       qib_sdma_state_s40_hw_clean_up_wait);
990 			ppd->dd->f_sdma_hw_clean_up(ppd);
991 			break;
992 		case qib_sdma_event_e70_go_idle:
993 			ss->go_s99_running = 0;
994 			break;
995 		case qib_sdma_event_e7220_err_halted:
996 			break;
997 		case qib_sdma_event_e7322_err_halted:
998 			break;
999 		case qib_sdma_event_e90_timer_tick:
1000 			break;
1001 		}
1002 		break;
1003 
1004 	case qib_sdma_state_s99_running:
1005 		switch (event) {
1006 		case qib_sdma_event_e00_go_hw_down:
1007 			sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
1008 			sdma_start_sw_clean_up(ppd);
1009 			break;
1010 		case qib_sdma_event_e10_go_hw_start:
1011 			break;
1012 		case qib_sdma_event_e20_hw_started:
1013 			break;
1014 		case qib_sdma_event_e30_go_running:
1015 			break;
1016 		case qib_sdma_event_e40_sw_cleaned:
1017 			break;
1018 		case qib_sdma_event_e50_hw_cleaned:
1019 			break;
1020 		case qib_sdma_event_e60_hw_halted:
1021 			sdma_set_state(ppd,
1022 				       qib_sdma_state_s30_sw_clean_up_wait);
1023 			sdma_start_sw_clean_up(ppd);
1024 			break;
1025 		case qib_sdma_event_e70_go_idle:
1026 			sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
1027 			ss->go_s99_running = 0;
1028 			break;
1029 		case qib_sdma_event_e7220_err_halted:
1030 			sdma_set_state(ppd,
1031 				       qib_sdma_state_s30_sw_clean_up_wait);
1032 			sdma_start_sw_clean_up(ppd);
1033 			break;
1034 		case qib_sdma_event_e7322_err_halted:
1035 			sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
1036 			break;
1037 		case qib_sdma_event_e90_timer_tick:
1038 			break;
1039 		}
1040 		break;
1041 	}
1042 
1043 	ss->last_event = event;
1044 }
1045