xref: /openbmc/linux/drivers/infiniband/hw/hfi1/sdma.c (revision b830f94f)
1 /*
2  * Copyright(c) 2015 - 2018 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47 
48 #include <linux/spinlock.h>
49 #include <linux/seqlock.h>
50 #include <linux/netdevice.h>
51 #include <linux/moduleparam.h>
52 #include <linux/bitops.h>
53 #include <linux/timer.h>
54 #include <linux/vmalloc.h>
55 #include <linux/highmem.h>
56 
57 #include "hfi.h"
58 #include "common.h"
59 #include "qp.h"
60 #include "sdma.h"
61 #include "iowait.h"
62 #include "trace.h"
63 
64 /* must be a power of 2 >= 64 <= 32768 */
65 #define SDMA_DESCQ_CNT 2048
66 #define SDMA_DESC_INTR 64
67 #define INVALID_TAIL 0xffff
68 
69 static uint sdma_descq_cnt = SDMA_DESCQ_CNT;
70 module_param(sdma_descq_cnt, uint, S_IRUGO);
71 MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
72 
73 static uint sdma_idle_cnt = 250;
74 module_param(sdma_idle_cnt, uint, S_IRUGO);
75 MODULE_PARM_DESC(sdma_idle_cnt, "sdma interrupt idle delay (ns,default 250)");
76 
77 uint mod_num_sdma;
78 module_param_named(num_sdma, mod_num_sdma, uint, S_IRUGO);
79 MODULE_PARM_DESC(num_sdma, "Set max number SDMA engines to use");
80 
81 static uint sdma_desct_intr = SDMA_DESC_INTR;
82 module_param_named(desct_intr, sdma_desct_intr, uint, S_IRUGO | S_IWUSR);
83 MODULE_PARM_DESC(desct_intr, "Number of SDMA descriptor before interrupt");
84 
85 #define SDMA_WAIT_BATCH_SIZE 20
86 /* max wait time for a SDMA engine to indicate it has halted */
87 #define SDMA_ERR_HALT_TIMEOUT 10 /* ms */
88 /* all SDMA engine errors that cause a halt */
89 
90 #define SD(name) SEND_DMA_##name
91 #define ALL_SDMA_ENG_HALT_ERRS \
92 	(SD(ENG_ERR_STATUS_SDMA_WRONG_DW_ERR_SMASK) \
93 	| SD(ENG_ERR_STATUS_SDMA_GEN_MISMATCH_ERR_SMASK) \
94 	| SD(ENG_ERR_STATUS_SDMA_TOO_LONG_ERR_SMASK) \
95 	| SD(ENG_ERR_STATUS_SDMA_TAIL_OUT_OF_BOUNDS_ERR_SMASK) \
96 	| SD(ENG_ERR_STATUS_SDMA_FIRST_DESC_ERR_SMASK) \
97 	| SD(ENG_ERR_STATUS_SDMA_MEM_READ_ERR_SMASK) \
98 	| SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK) \
99 	| SD(ENG_ERR_STATUS_SDMA_LENGTH_MISMATCH_ERR_SMASK) \
100 	| SD(ENG_ERR_STATUS_SDMA_PACKET_DESC_OVERFLOW_ERR_SMASK) \
101 	| SD(ENG_ERR_STATUS_SDMA_HEADER_SELECT_ERR_SMASK) \
102 	| SD(ENG_ERR_STATUS_SDMA_HEADER_ADDRESS_ERR_SMASK) \
103 	| SD(ENG_ERR_STATUS_SDMA_HEADER_LENGTH_ERR_SMASK) \
104 	| SD(ENG_ERR_STATUS_SDMA_TIMEOUT_ERR_SMASK) \
105 	| SD(ENG_ERR_STATUS_SDMA_DESC_TABLE_UNC_ERR_SMASK) \
106 	| SD(ENG_ERR_STATUS_SDMA_ASSEMBLY_UNC_ERR_SMASK) \
107 	| SD(ENG_ERR_STATUS_SDMA_PACKET_TRACKING_UNC_ERR_SMASK) \
108 	| SD(ENG_ERR_STATUS_SDMA_HEADER_STORAGE_UNC_ERR_SMASK) \
109 	| SD(ENG_ERR_STATUS_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SMASK))
110 
111 /* sdma_sendctrl operations */
112 #define SDMA_SENDCTRL_OP_ENABLE    BIT(0)
113 #define SDMA_SENDCTRL_OP_INTENABLE BIT(1)
114 #define SDMA_SENDCTRL_OP_HALT      BIT(2)
115 #define SDMA_SENDCTRL_OP_CLEANUP   BIT(3)
116 
117 /* handle long defines */
118 #define SDMA_EGRESS_PACKET_OCCUPANCY_SMASK \
119 SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SMASK
120 #define SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT \
121 SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT
122 
123 static const char * const sdma_state_names[] = {
124 	[sdma_state_s00_hw_down]                = "s00_HwDown",
125 	[sdma_state_s10_hw_start_up_halt_wait]  = "s10_HwStartUpHaltWait",
126 	[sdma_state_s15_hw_start_up_clean_wait] = "s15_HwStartUpCleanWait",
127 	[sdma_state_s20_idle]                   = "s20_Idle",
128 	[sdma_state_s30_sw_clean_up_wait]       = "s30_SwCleanUpWait",
129 	[sdma_state_s40_hw_clean_up_wait]       = "s40_HwCleanUpWait",
130 	[sdma_state_s50_hw_halt_wait]           = "s50_HwHaltWait",
131 	[sdma_state_s60_idle_halt_wait]         = "s60_IdleHaltWait",
132 	[sdma_state_s80_hw_freeze]		= "s80_HwFreeze",
133 	[sdma_state_s82_freeze_sw_clean]	= "s82_FreezeSwClean",
134 	[sdma_state_s99_running]                = "s99_Running",
135 };
136 
137 #ifdef CONFIG_SDMA_VERBOSITY
138 static const char * const sdma_event_names[] = {
139 	[sdma_event_e00_go_hw_down]   = "e00_GoHwDown",
140 	[sdma_event_e10_go_hw_start]  = "e10_GoHwStart",
141 	[sdma_event_e15_hw_halt_done] = "e15_HwHaltDone",
142 	[sdma_event_e25_hw_clean_up_done] = "e25_HwCleanUpDone",
143 	[sdma_event_e30_go_running]   = "e30_GoRunning",
144 	[sdma_event_e40_sw_cleaned]   = "e40_SwCleaned",
145 	[sdma_event_e50_hw_cleaned]   = "e50_HwCleaned",
146 	[sdma_event_e60_hw_halted]    = "e60_HwHalted",
147 	[sdma_event_e70_go_idle]      = "e70_GoIdle",
148 	[sdma_event_e80_hw_freeze]    = "e80_HwFreeze",
149 	[sdma_event_e81_hw_frozen]    = "e81_HwFrozen",
150 	[sdma_event_e82_hw_unfreeze]  = "e82_HwUnfreeze",
151 	[sdma_event_e85_link_down]    = "e85_LinkDown",
152 	[sdma_event_e90_sw_halted]    = "e90_SwHalted",
153 };
154 #endif
155 
156 static const struct sdma_set_state_action sdma_action_table[] = {
157 	[sdma_state_s00_hw_down] = {
158 		.go_s99_running_tofalse = 1,
159 		.op_enable = 0,
160 		.op_intenable = 0,
161 		.op_halt = 0,
162 		.op_cleanup = 0,
163 	},
164 	[sdma_state_s10_hw_start_up_halt_wait] = {
165 		.op_enable = 0,
166 		.op_intenable = 0,
167 		.op_halt = 1,
168 		.op_cleanup = 0,
169 	},
170 	[sdma_state_s15_hw_start_up_clean_wait] = {
171 		.op_enable = 0,
172 		.op_intenable = 1,
173 		.op_halt = 0,
174 		.op_cleanup = 1,
175 	},
176 	[sdma_state_s20_idle] = {
177 		.op_enable = 0,
178 		.op_intenable = 1,
179 		.op_halt = 0,
180 		.op_cleanup = 0,
181 	},
182 	[sdma_state_s30_sw_clean_up_wait] = {
183 		.op_enable = 0,
184 		.op_intenable = 0,
185 		.op_halt = 0,
186 		.op_cleanup = 0,
187 	},
188 	[sdma_state_s40_hw_clean_up_wait] = {
189 		.op_enable = 0,
190 		.op_intenable = 0,
191 		.op_halt = 0,
192 		.op_cleanup = 1,
193 	},
194 	[sdma_state_s50_hw_halt_wait] = {
195 		.op_enable = 0,
196 		.op_intenable = 0,
197 		.op_halt = 0,
198 		.op_cleanup = 0,
199 	},
200 	[sdma_state_s60_idle_halt_wait] = {
201 		.go_s99_running_tofalse = 1,
202 		.op_enable = 0,
203 		.op_intenable = 0,
204 		.op_halt = 1,
205 		.op_cleanup = 0,
206 	},
207 	[sdma_state_s80_hw_freeze] = {
208 		.op_enable = 0,
209 		.op_intenable = 0,
210 		.op_halt = 0,
211 		.op_cleanup = 0,
212 	},
213 	[sdma_state_s82_freeze_sw_clean] = {
214 		.op_enable = 0,
215 		.op_intenable = 0,
216 		.op_halt = 0,
217 		.op_cleanup = 0,
218 	},
219 	[sdma_state_s99_running] = {
220 		.op_enable = 1,
221 		.op_intenable = 1,
222 		.op_halt = 0,
223 		.op_cleanup = 0,
224 		.go_s99_running_totrue = 1,
225 	},
226 };
227 
228 #define SDMA_TAIL_UPDATE_THRESH 0x1F
229 
230 /* declare all statics here rather than keep sorting */
231 static void sdma_complete(struct kref *);
232 static void sdma_finalput(struct sdma_state *);
233 static void sdma_get(struct sdma_state *);
234 static void sdma_hw_clean_up_task(unsigned long);
235 static void sdma_put(struct sdma_state *);
236 static void sdma_set_state(struct sdma_engine *, enum sdma_states);
237 static void sdma_start_hw_clean_up(struct sdma_engine *);
238 static void sdma_sw_clean_up_task(unsigned long);
239 static void sdma_sendctrl(struct sdma_engine *, unsigned);
240 static void init_sdma_regs(struct sdma_engine *, u32, uint);
241 static void sdma_process_event(
242 	struct sdma_engine *sde,
243 	enum sdma_events event);
244 static void __sdma_process_event(
245 	struct sdma_engine *sde,
246 	enum sdma_events event);
247 static void dump_sdma_state(struct sdma_engine *sde);
248 static void sdma_make_progress(struct sdma_engine *sde, u64 status);
249 static void sdma_desc_avail(struct sdma_engine *sde, uint avail);
250 static void sdma_flush_descq(struct sdma_engine *sde);
251 
252 /**
253  * sdma_state_name() - return state string from enum
254  * @state: state
255  */
256 static const char *sdma_state_name(enum sdma_states state)
257 {
258 	return sdma_state_names[state];
259 }
260 
261 static void sdma_get(struct sdma_state *ss)
262 {
263 	kref_get(&ss->kref);
264 }
265 
266 static void sdma_complete(struct kref *kref)
267 {
268 	struct sdma_state *ss =
269 		container_of(kref, struct sdma_state, kref);
270 
271 	complete(&ss->comp);
272 }
273 
274 static void sdma_put(struct sdma_state *ss)
275 {
276 	kref_put(&ss->kref, sdma_complete);
277 }
278 
279 static void sdma_finalput(struct sdma_state *ss)
280 {
281 	sdma_put(ss);
282 	wait_for_completion(&ss->comp);
283 }
284 
285 static inline void write_sde_csr(
286 	struct sdma_engine *sde,
287 	u32 offset0,
288 	u64 value)
289 {
290 	write_kctxt_csr(sde->dd, sde->this_idx, offset0, value);
291 }
292 
293 static inline u64 read_sde_csr(
294 	struct sdma_engine *sde,
295 	u32 offset0)
296 {
297 	return read_kctxt_csr(sde->dd, sde->this_idx, offset0);
298 }
299 
300 /*
301  * sdma_wait_for_packet_egress() - wait for the VL FIFO occupancy for
302  * sdma engine 'sde' to drop to 0.
303  */
304 static void sdma_wait_for_packet_egress(struct sdma_engine *sde,
305 					int pause)
306 {
307 	u64 off = 8 * sde->this_idx;
308 	struct hfi1_devdata *dd = sde->dd;
309 	int lcnt = 0;
310 	u64 reg_prev;
311 	u64 reg = 0;
312 
313 	while (1) {
314 		reg_prev = reg;
315 		reg = read_csr(dd, off + SEND_EGRESS_SEND_DMA_STATUS);
316 
317 		reg &= SDMA_EGRESS_PACKET_OCCUPANCY_SMASK;
318 		reg >>= SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT;
319 		if (reg == 0)
320 			break;
321 		/* counter is reest if accupancy count changes */
322 		if (reg != reg_prev)
323 			lcnt = 0;
324 		if (lcnt++ > 500) {
325 			/* timed out - bounce the link */
326 			dd_dev_err(dd, "%s: engine %u timeout waiting for packets to egress, remaining count %u, bouncing link\n",
327 				   __func__, sde->this_idx, (u32)reg);
328 			queue_work(dd->pport->link_wq,
329 				   &dd->pport->link_bounce_work);
330 			break;
331 		}
332 		udelay(1);
333 	}
334 }
335 
336 /*
337  * sdma_wait() - wait for packet egress to complete for all SDMA engines,
338  * and pause for credit return.
339  */
340 void sdma_wait(struct hfi1_devdata *dd)
341 {
342 	int i;
343 
344 	for (i = 0; i < dd->num_sdma; i++) {
345 		struct sdma_engine *sde = &dd->per_sdma[i];
346 
347 		sdma_wait_for_packet_egress(sde, 0);
348 	}
349 }
350 
351 static inline void sdma_set_desc_cnt(struct sdma_engine *sde, unsigned cnt)
352 {
353 	u64 reg;
354 
355 	if (!(sde->dd->flags & HFI1_HAS_SDMA_TIMEOUT))
356 		return;
357 	reg = cnt;
358 	reg &= SD(DESC_CNT_CNT_MASK);
359 	reg <<= SD(DESC_CNT_CNT_SHIFT);
360 	write_sde_csr(sde, SD(DESC_CNT), reg);
361 }
362 
363 static inline void complete_tx(struct sdma_engine *sde,
364 			       struct sdma_txreq *tx,
365 			       int res)
366 {
367 	/* protect against complete modifying */
368 	struct iowait *wait = tx->wait;
369 	callback_t complete = tx->complete;
370 
371 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
372 	trace_hfi1_sdma_out_sn(sde, tx->sn);
373 	if (WARN_ON_ONCE(sde->head_sn != tx->sn))
374 		dd_dev_err(sde->dd, "expected %llu got %llu\n",
375 			   sde->head_sn, tx->sn);
376 	sde->head_sn++;
377 #endif
378 	__sdma_txclean(sde->dd, tx);
379 	if (complete)
380 		(*complete)(tx, res);
381 	if (iowait_sdma_dec(wait))
382 		iowait_drain_wakeup(wait);
383 }
384 
385 /*
386  * Complete all the sdma requests with a SDMA_TXREQ_S_ABORTED status
387  *
388  * Depending on timing there can be txreqs in two places:
389  * - in the descq ring
390  * - in the flush list
391  *
392  * To avoid ordering issues the descq ring needs to be flushed
393  * first followed by the flush list.
394  *
395  * This routine is called from two places
396  * - From a work queue item
397  * - Directly from the state machine just before setting the
398  *   state to running
399  *
400  * Must be called with head_lock held
401  *
402  */
403 static void sdma_flush(struct sdma_engine *sde)
404 {
405 	struct sdma_txreq *txp, *txp_next;
406 	LIST_HEAD(flushlist);
407 	unsigned long flags;
408 	uint seq;
409 
410 	/* flush from head to tail */
411 	sdma_flush_descq(sde);
412 	spin_lock_irqsave(&sde->flushlist_lock, flags);
413 	/* copy flush list */
414 	list_splice_init(&sde->flushlist, &flushlist);
415 	spin_unlock_irqrestore(&sde->flushlist_lock, flags);
416 	/* flush from flush list */
417 	list_for_each_entry_safe(txp, txp_next, &flushlist, list)
418 		complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED);
419 	/* wakeup QPs orphaned on the dmawait list */
420 	do {
421 		struct iowait *w, *nw;
422 
423 		seq = read_seqbegin(&sde->waitlock);
424 		if (!list_empty(&sde->dmawait)) {
425 			write_seqlock(&sde->waitlock);
426 			list_for_each_entry_safe(w, nw, &sde->dmawait, list) {
427 				if (w->wakeup) {
428 					w->wakeup(w, SDMA_AVAIL_REASON);
429 					list_del_init(&w->list);
430 				}
431 			}
432 			write_sequnlock(&sde->waitlock);
433 		}
434 	} while (read_seqretry(&sde->waitlock, seq));
435 }
436 
437 /*
438  * Fields a work request for flushing the descq ring
439  * and the flush list
440  *
441  * If the engine has been brought to running during
442  * the scheduling delay, the flush is ignored, assuming
443  * that the process of bringing the engine to running
444  * would have done this flush prior to going to running.
445  *
446  */
447 static void sdma_field_flush(struct work_struct *work)
448 {
449 	unsigned long flags;
450 	struct sdma_engine *sde =
451 		container_of(work, struct sdma_engine, flush_worker);
452 
453 	write_seqlock_irqsave(&sde->head_lock, flags);
454 	if (!__sdma_running(sde))
455 		sdma_flush(sde);
456 	write_sequnlock_irqrestore(&sde->head_lock, flags);
457 }
458 
459 static void sdma_err_halt_wait(struct work_struct *work)
460 {
461 	struct sdma_engine *sde = container_of(work, struct sdma_engine,
462 						err_halt_worker);
463 	u64 statuscsr;
464 	unsigned long timeout;
465 
466 	timeout = jiffies + msecs_to_jiffies(SDMA_ERR_HALT_TIMEOUT);
467 	while (1) {
468 		statuscsr = read_sde_csr(sde, SD(STATUS));
469 		statuscsr &= SD(STATUS_ENG_HALTED_SMASK);
470 		if (statuscsr)
471 			break;
472 		if (time_after(jiffies, timeout)) {
473 			dd_dev_err(sde->dd,
474 				   "SDMA engine %d - timeout waiting for engine to halt\n",
475 				   sde->this_idx);
476 			/*
477 			 * Continue anyway.  This could happen if there was
478 			 * an uncorrectable error in the wrong spot.
479 			 */
480 			break;
481 		}
482 		usleep_range(80, 120);
483 	}
484 
485 	sdma_process_event(sde, sdma_event_e15_hw_halt_done);
486 }
487 
488 static void sdma_err_progress_check_schedule(struct sdma_engine *sde)
489 {
490 	if (!is_bx(sde->dd) && HFI1_CAP_IS_KSET(SDMA_AHG)) {
491 		unsigned index;
492 		struct hfi1_devdata *dd = sde->dd;
493 
494 		for (index = 0; index < dd->num_sdma; index++) {
495 			struct sdma_engine *curr_sdma = &dd->per_sdma[index];
496 
497 			if (curr_sdma != sde)
498 				curr_sdma->progress_check_head =
499 							curr_sdma->descq_head;
500 		}
501 		dd_dev_err(sde->dd,
502 			   "SDMA engine %d - check scheduled\n",
503 				sde->this_idx);
504 		mod_timer(&sde->err_progress_check_timer, jiffies + 10);
505 	}
506 }
507 
508 static void sdma_err_progress_check(struct timer_list *t)
509 {
510 	unsigned index;
511 	struct sdma_engine *sde = from_timer(sde, t, err_progress_check_timer);
512 
513 	dd_dev_err(sde->dd, "SDE progress check event\n");
514 	for (index = 0; index < sde->dd->num_sdma; index++) {
515 		struct sdma_engine *curr_sde = &sde->dd->per_sdma[index];
516 		unsigned long flags;
517 
518 		/* check progress on each engine except the current one */
519 		if (curr_sde == sde)
520 			continue;
521 		/*
522 		 * We must lock interrupts when acquiring sde->lock,
523 		 * to avoid a deadlock if interrupt triggers and spins on
524 		 * the same lock on same CPU
525 		 */
526 		spin_lock_irqsave(&curr_sde->tail_lock, flags);
527 		write_seqlock(&curr_sde->head_lock);
528 
529 		/* skip non-running queues */
530 		if (curr_sde->state.current_state != sdma_state_s99_running) {
531 			write_sequnlock(&curr_sde->head_lock);
532 			spin_unlock_irqrestore(&curr_sde->tail_lock, flags);
533 			continue;
534 		}
535 
536 		if ((curr_sde->descq_head != curr_sde->descq_tail) &&
537 		    (curr_sde->descq_head ==
538 				curr_sde->progress_check_head))
539 			__sdma_process_event(curr_sde,
540 					     sdma_event_e90_sw_halted);
541 		write_sequnlock(&curr_sde->head_lock);
542 		spin_unlock_irqrestore(&curr_sde->tail_lock, flags);
543 	}
544 	schedule_work(&sde->err_halt_worker);
545 }
546 
547 static void sdma_hw_clean_up_task(unsigned long opaque)
548 {
549 	struct sdma_engine *sde = (struct sdma_engine *)opaque;
550 	u64 statuscsr;
551 
552 	while (1) {
553 #ifdef CONFIG_SDMA_VERBOSITY
554 		dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
555 			   sde->this_idx, slashstrip(__FILE__), __LINE__,
556 			__func__);
557 #endif
558 		statuscsr = read_sde_csr(sde, SD(STATUS));
559 		statuscsr &= SD(STATUS_ENG_CLEANED_UP_SMASK);
560 		if (statuscsr)
561 			break;
562 		udelay(10);
563 	}
564 
565 	sdma_process_event(sde, sdma_event_e25_hw_clean_up_done);
566 }
567 
568 static inline struct sdma_txreq *get_txhead(struct sdma_engine *sde)
569 {
570 	return sde->tx_ring[sde->tx_head & sde->sdma_mask];
571 }
572 
573 /*
574  * flush ring for recovery
575  */
576 static void sdma_flush_descq(struct sdma_engine *sde)
577 {
578 	u16 head, tail;
579 	int progress = 0;
580 	struct sdma_txreq *txp = get_txhead(sde);
581 
582 	/* The reason for some of the complexity of this code is that
583 	 * not all descriptors have corresponding txps.  So, we have to
584 	 * be able to skip over descs until we wander into the range of
585 	 * the next txp on the list.
586 	 */
587 	head = sde->descq_head & sde->sdma_mask;
588 	tail = sde->descq_tail & sde->sdma_mask;
589 	while (head != tail) {
590 		/* advance head, wrap if needed */
591 		head = ++sde->descq_head & sde->sdma_mask;
592 		/* if now past this txp's descs, do the callback */
593 		if (txp && txp->next_descq_idx == head) {
594 			/* remove from list */
595 			sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
596 			complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED);
597 			trace_hfi1_sdma_progress(sde, head, tail, txp);
598 			txp = get_txhead(sde);
599 		}
600 		progress++;
601 	}
602 	if (progress)
603 		sdma_desc_avail(sde, sdma_descq_freecnt(sde));
604 }
605 
606 static void sdma_sw_clean_up_task(unsigned long opaque)
607 {
608 	struct sdma_engine *sde = (struct sdma_engine *)opaque;
609 	unsigned long flags;
610 
611 	spin_lock_irqsave(&sde->tail_lock, flags);
612 	write_seqlock(&sde->head_lock);
613 
614 	/*
615 	 * At this point, the following should always be true:
616 	 * - We are halted, so no more descriptors are getting retired.
617 	 * - We are not running, so no one is submitting new work.
618 	 * - Only we can send the e40_sw_cleaned, so we can't start
619 	 *   running again until we say so.  So, the active list and
620 	 *   descq are ours to play with.
621 	 */
622 
623 	/*
624 	 * In the error clean up sequence, software clean must be called
625 	 * before the hardware clean so we can use the hardware head in
626 	 * the progress routine.  A hardware clean or SPC unfreeze will
627 	 * reset the hardware head.
628 	 *
629 	 * Process all retired requests. The progress routine will use the
630 	 * latest physical hardware head - we are not running so speed does
631 	 * not matter.
632 	 */
633 	sdma_make_progress(sde, 0);
634 
635 	sdma_flush(sde);
636 
637 	/*
638 	 * Reset our notion of head and tail.
639 	 * Note that the HW registers have been reset via an earlier
640 	 * clean up.
641 	 */
642 	sde->descq_tail = 0;
643 	sde->descq_head = 0;
644 	sde->desc_avail = sdma_descq_freecnt(sde);
645 	*sde->head_dma = 0;
646 
647 	__sdma_process_event(sde, sdma_event_e40_sw_cleaned);
648 
649 	write_sequnlock(&sde->head_lock);
650 	spin_unlock_irqrestore(&sde->tail_lock, flags);
651 }
652 
653 static void sdma_sw_tear_down(struct sdma_engine *sde)
654 {
655 	struct sdma_state *ss = &sde->state;
656 
657 	/* Releasing this reference means the state machine has stopped. */
658 	sdma_put(ss);
659 
660 	/* stop waiting for all unfreeze events to complete */
661 	atomic_set(&sde->dd->sdma_unfreeze_count, -1);
662 	wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
663 }
664 
665 static void sdma_start_hw_clean_up(struct sdma_engine *sde)
666 {
667 	tasklet_hi_schedule(&sde->sdma_hw_clean_up_task);
668 }
669 
670 static void sdma_set_state(struct sdma_engine *sde,
671 			   enum sdma_states next_state)
672 {
673 	struct sdma_state *ss = &sde->state;
674 	const struct sdma_set_state_action *action = sdma_action_table;
675 	unsigned op = 0;
676 
677 	trace_hfi1_sdma_state(
678 		sde,
679 		sdma_state_names[ss->current_state],
680 		sdma_state_names[next_state]);
681 
682 	/* debugging bookkeeping */
683 	ss->previous_state = ss->current_state;
684 	ss->previous_op = ss->current_op;
685 	ss->current_state = next_state;
686 
687 	if (ss->previous_state != sdma_state_s99_running &&
688 	    next_state == sdma_state_s99_running)
689 		sdma_flush(sde);
690 
691 	if (action[next_state].op_enable)
692 		op |= SDMA_SENDCTRL_OP_ENABLE;
693 
694 	if (action[next_state].op_intenable)
695 		op |= SDMA_SENDCTRL_OP_INTENABLE;
696 
697 	if (action[next_state].op_halt)
698 		op |= SDMA_SENDCTRL_OP_HALT;
699 
700 	if (action[next_state].op_cleanup)
701 		op |= SDMA_SENDCTRL_OP_CLEANUP;
702 
703 	if (action[next_state].go_s99_running_tofalse)
704 		ss->go_s99_running = 0;
705 
706 	if (action[next_state].go_s99_running_totrue)
707 		ss->go_s99_running = 1;
708 
709 	ss->current_op = op;
710 	sdma_sendctrl(sde, ss->current_op);
711 }
712 
713 /**
714  * sdma_get_descq_cnt() - called when device probed
715  *
716  * Return a validated descq count.
717  *
718  * This is currently only used in the verbs initialization to build the tx
719  * list.
720  *
721  * This will probably be deleted in favor of a more scalable approach to
722  * alloc tx's.
723  *
724  */
725 u16 sdma_get_descq_cnt(void)
726 {
727 	u16 count = sdma_descq_cnt;
728 
729 	if (!count)
730 		return SDMA_DESCQ_CNT;
731 	/* count must be a power of 2 greater than 64 and less than
732 	 * 32768.   Otherwise return default.
733 	 */
734 	if (!is_power_of_2(count))
735 		return SDMA_DESCQ_CNT;
736 	if (count < 64 || count > 32768)
737 		return SDMA_DESCQ_CNT;
738 	return count;
739 }
740 
741 /**
742  * sdma_engine_get_vl() - return vl for a given sdma engine
743  * @sde: sdma engine
744  *
745  * This function returns the vl mapped to a given engine, or an error if
746  * the mapping can't be found. The mapping fields are protected by RCU.
747  */
748 int sdma_engine_get_vl(struct sdma_engine *sde)
749 {
750 	struct hfi1_devdata *dd = sde->dd;
751 	struct sdma_vl_map *m;
752 	u8 vl;
753 
754 	if (sde->this_idx >= TXE_NUM_SDMA_ENGINES)
755 		return -EINVAL;
756 
757 	rcu_read_lock();
758 	m = rcu_dereference(dd->sdma_map);
759 	if (unlikely(!m)) {
760 		rcu_read_unlock();
761 		return -EINVAL;
762 	}
763 	vl = m->engine_to_vl[sde->this_idx];
764 	rcu_read_unlock();
765 
766 	return vl;
767 }
768 
769 /**
770  * sdma_select_engine_vl() - select sdma engine
771  * @dd: devdata
772  * @selector: a spreading factor
773  * @vl: this vl
774  *
775  *
776  * This function returns an engine based on the selector and a vl.  The
777  * mapping fields are protected by RCU.
778  */
779 struct sdma_engine *sdma_select_engine_vl(
780 	struct hfi1_devdata *dd,
781 	u32 selector,
782 	u8 vl)
783 {
784 	struct sdma_vl_map *m;
785 	struct sdma_map_elem *e;
786 	struct sdma_engine *rval;
787 
788 	/* NOTE This should only happen if SC->VL changed after the initial
789 	 *      checks on the QP/AH
790 	 *      Default will return engine 0 below
791 	 */
792 	if (vl >= num_vls) {
793 		rval = NULL;
794 		goto done;
795 	}
796 
797 	rcu_read_lock();
798 	m = rcu_dereference(dd->sdma_map);
799 	if (unlikely(!m)) {
800 		rcu_read_unlock();
801 		return &dd->per_sdma[0];
802 	}
803 	e = m->map[vl & m->mask];
804 	rval = e->sde[selector & e->mask];
805 	rcu_read_unlock();
806 
807 done:
808 	rval =  !rval ? &dd->per_sdma[0] : rval;
809 	trace_hfi1_sdma_engine_select(dd, selector, vl, rval->this_idx);
810 	return rval;
811 }
812 
813 /**
814  * sdma_select_engine_sc() - select sdma engine
815  * @dd: devdata
816  * @selector: a spreading factor
817  * @sc5: the 5 bit sc
818  *
819  *
820  * This function returns an engine based on the selector and an sc.
821  */
822 struct sdma_engine *sdma_select_engine_sc(
823 	struct hfi1_devdata *dd,
824 	u32 selector,
825 	u8 sc5)
826 {
827 	u8 vl = sc_to_vlt(dd, sc5);
828 
829 	return sdma_select_engine_vl(dd, selector, vl);
830 }
831 
832 struct sdma_rht_map_elem {
833 	u32 mask;
834 	u8 ctr;
835 	struct sdma_engine *sde[0];
836 };
837 
838 struct sdma_rht_node {
839 	unsigned long cpu_id;
840 	struct sdma_rht_map_elem *map[HFI1_MAX_VLS_SUPPORTED];
841 	struct rhash_head node;
842 };
843 
844 #define NR_CPUS_HINT 192
845 
846 static const struct rhashtable_params sdma_rht_params = {
847 	.nelem_hint = NR_CPUS_HINT,
848 	.head_offset = offsetof(struct sdma_rht_node, node),
849 	.key_offset = offsetof(struct sdma_rht_node, cpu_id),
850 	.key_len = FIELD_SIZEOF(struct sdma_rht_node, cpu_id),
851 	.max_size = NR_CPUS,
852 	.min_size = 8,
853 	.automatic_shrinking = true,
854 };
855 
856 /*
857  * sdma_select_user_engine() - select sdma engine based on user setup
858  * @dd: devdata
859  * @selector: a spreading factor
860  * @vl: this vl
861  *
862  * This function returns an sdma engine for a user sdma request.
863  * User defined sdma engine affinity setting is honored when applicable,
864  * otherwise system default sdma engine mapping is used. To ensure correct
865  * ordering, the mapping from <selector, vl> to sde must remain unchanged.
866  */
867 struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
868 					    u32 selector, u8 vl)
869 {
870 	struct sdma_rht_node *rht_node;
871 	struct sdma_engine *sde = NULL;
872 	unsigned long cpu_id;
873 
874 	/*
875 	 * To ensure that always the same sdma engine(s) will be
876 	 * selected make sure the process is pinned to this CPU only.
877 	 */
878 	if (current->nr_cpus_allowed != 1)
879 		goto out;
880 
881 	cpu_id = smp_processor_id();
882 	rcu_read_lock();
883 	rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu_id,
884 					  sdma_rht_params);
885 
886 	if (rht_node && rht_node->map[vl]) {
887 		struct sdma_rht_map_elem *map = rht_node->map[vl];
888 
889 		sde = map->sde[selector & map->mask];
890 	}
891 	rcu_read_unlock();
892 
893 	if (sde)
894 		return sde;
895 
896 out:
897 	return sdma_select_engine_vl(dd, selector, vl);
898 }
899 
900 static void sdma_populate_sde_map(struct sdma_rht_map_elem *map)
901 {
902 	int i;
903 
904 	for (i = 0; i < roundup_pow_of_two(map->ctr ? : 1) - map->ctr; i++)
905 		map->sde[map->ctr + i] = map->sde[i];
906 }
907 
908 static void sdma_cleanup_sde_map(struct sdma_rht_map_elem *map,
909 				 struct sdma_engine *sde)
910 {
911 	unsigned int i, pow;
912 
913 	/* only need to check the first ctr entries for a match */
914 	for (i = 0; i < map->ctr; i++) {
915 		if (map->sde[i] == sde) {
916 			memmove(&map->sde[i], &map->sde[i + 1],
917 				(map->ctr - i - 1) * sizeof(map->sde[0]));
918 			map->ctr--;
919 			pow = roundup_pow_of_two(map->ctr ? : 1);
920 			map->mask = pow - 1;
921 			sdma_populate_sde_map(map);
922 			break;
923 		}
924 	}
925 }
926 
927 /*
928  * Prevents concurrent reads and writes of the sdma engine cpu_mask
929  */
930 static DEFINE_MUTEX(process_to_sde_mutex);
931 
932 ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf,
933 				size_t count)
934 {
935 	struct hfi1_devdata *dd = sde->dd;
936 	cpumask_var_t mask, new_mask;
937 	unsigned long cpu;
938 	int ret, vl, sz;
939 	struct sdma_rht_node *rht_node;
940 
941 	vl = sdma_engine_get_vl(sde);
942 	if (unlikely(vl < 0 || vl >= ARRAY_SIZE(rht_node->map)))
943 		return -EINVAL;
944 
945 	ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
946 	if (!ret)
947 		return -ENOMEM;
948 
949 	ret = zalloc_cpumask_var(&new_mask, GFP_KERNEL);
950 	if (!ret) {
951 		free_cpumask_var(mask);
952 		return -ENOMEM;
953 	}
954 	ret = cpulist_parse(buf, mask);
955 	if (ret)
956 		goto out_free;
957 
958 	if (!cpumask_subset(mask, cpu_online_mask)) {
959 		dd_dev_warn(sde->dd, "Invalid CPU mask\n");
960 		ret = -EINVAL;
961 		goto out_free;
962 	}
963 
964 	sz = sizeof(struct sdma_rht_map_elem) +
965 			(TXE_NUM_SDMA_ENGINES * sizeof(struct sdma_engine *));
966 
967 	mutex_lock(&process_to_sde_mutex);
968 
969 	for_each_cpu(cpu, mask) {
970 		/* Check if we have this already mapped */
971 		if (cpumask_test_cpu(cpu, &sde->cpu_mask)) {
972 			cpumask_set_cpu(cpu, new_mask);
973 			continue;
974 		}
975 
976 		rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu,
977 						  sdma_rht_params);
978 		if (!rht_node) {
979 			rht_node = kzalloc(sizeof(*rht_node), GFP_KERNEL);
980 			if (!rht_node) {
981 				ret = -ENOMEM;
982 				goto out;
983 			}
984 
985 			rht_node->map[vl] = kzalloc(sz, GFP_KERNEL);
986 			if (!rht_node->map[vl]) {
987 				kfree(rht_node);
988 				ret = -ENOMEM;
989 				goto out;
990 			}
991 			rht_node->cpu_id = cpu;
992 			rht_node->map[vl]->mask = 0;
993 			rht_node->map[vl]->ctr = 1;
994 			rht_node->map[vl]->sde[0] = sde;
995 
996 			ret = rhashtable_insert_fast(dd->sdma_rht,
997 						     &rht_node->node,
998 						     sdma_rht_params);
999 			if (ret) {
1000 				kfree(rht_node->map[vl]);
1001 				kfree(rht_node);
1002 				dd_dev_err(sde->dd, "Failed to set process to sde affinity for cpu %lu\n",
1003 					   cpu);
1004 				goto out;
1005 			}
1006 
1007 		} else {
1008 			int ctr, pow;
1009 
1010 			/* Add new user mappings */
1011 			if (!rht_node->map[vl])
1012 				rht_node->map[vl] = kzalloc(sz, GFP_KERNEL);
1013 
1014 			if (!rht_node->map[vl]) {
1015 				ret = -ENOMEM;
1016 				goto out;
1017 			}
1018 
1019 			rht_node->map[vl]->ctr++;
1020 			ctr = rht_node->map[vl]->ctr;
1021 			rht_node->map[vl]->sde[ctr - 1] = sde;
1022 			pow = roundup_pow_of_two(ctr);
1023 			rht_node->map[vl]->mask = pow - 1;
1024 
1025 			/* Populate the sde map table */
1026 			sdma_populate_sde_map(rht_node->map[vl]);
1027 		}
1028 		cpumask_set_cpu(cpu, new_mask);
1029 	}
1030 
1031 	/* Clean up old mappings */
1032 	for_each_cpu(cpu, cpu_online_mask) {
1033 		struct sdma_rht_node *rht_node;
1034 
1035 		/* Don't cleanup sdes that are set in the new mask */
1036 		if (cpumask_test_cpu(cpu, mask))
1037 			continue;
1038 
1039 		rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu,
1040 						  sdma_rht_params);
1041 		if (rht_node) {
1042 			bool empty = true;
1043 			int i;
1044 
1045 			/* Remove mappings for old sde */
1046 			for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++)
1047 				if (rht_node->map[i])
1048 					sdma_cleanup_sde_map(rht_node->map[i],
1049 							     sde);
1050 
1051 			/* Free empty hash table entries */
1052 			for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) {
1053 				if (!rht_node->map[i])
1054 					continue;
1055 
1056 				if (rht_node->map[i]->ctr) {
1057 					empty = false;
1058 					break;
1059 				}
1060 			}
1061 
1062 			if (empty) {
1063 				ret = rhashtable_remove_fast(dd->sdma_rht,
1064 							     &rht_node->node,
1065 							     sdma_rht_params);
1066 				WARN_ON(ret);
1067 
1068 				for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++)
1069 					kfree(rht_node->map[i]);
1070 
1071 				kfree(rht_node);
1072 			}
1073 		}
1074 	}
1075 
1076 	cpumask_copy(&sde->cpu_mask, new_mask);
1077 out:
1078 	mutex_unlock(&process_to_sde_mutex);
1079 out_free:
1080 	free_cpumask_var(mask);
1081 	free_cpumask_var(new_mask);
1082 	return ret ? : strnlen(buf, PAGE_SIZE);
1083 }
1084 
1085 ssize_t sdma_get_cpu_to_sde_map(struct sdma_engine *sde, char *buf)
1086 {
1087 	mutex_lock(&process_to_sde_mutex);
1088 	if (cpumask_empty(&sde->cpu_mask))
1089 		snprintf(buf, PAGE_SIZE, "%s\n", "empty");
1090 	else
1091 		cpumap_print_to_pagebuf(true, buf, &sde->cpu_mask);
1092 	mutex_unlock(&process_to_sde_mutex);
1093 	return strnlen(buf, PAGE_SIZE);
1094 }
1095 
1096 static void sdma_rht_free(void *ptr, void *arg)
1097 {
1098 	struct sdma_rht_node *rht_node = ptr;
1099 	int i;
1100 
1101 	for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++)
1102 		kfree(rht_node->map[i]);
1103 
1104 	kfree(rht_node);
1105 }
1106 
1107 /**
1108  * sdma_seqfile_dump_cpu_list() - debugfs dump the cpu to sdma mappings
1109  * @s: seq file
1110  * @dd: hfi1_devdata
1111  * @cpuid: cpu id
1112  *
1113  * This routine dumps the process to sde mappings per cpu
1114  */
1115 void sdma_seqfile_dump_cpu_list(struct seq_file *s,
1116 				struct hfi1_devdata *dd,
1117 				unsigned long cpuid)
1118 {
1119 	struct sdma_rht_node *rht_node;
1120 	int i, j;
1121 
1122 	rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpuid,
1123 					  sdma_rht_params);
1124 	if (!rht_node)
1125 		return;
1126 
1127 	seq_printf(s, "cpu%3lu: ", cpuid);
1128 	for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) {
1129 		if (!rht_node->map[i] || !rht_node->map[i]->ctr)
1130 			continue;
1131 
1132 		seq_printf(s, " vl%d: [", i);
1133 
1134 		for (j = 0; j < rht_node->map[i]->ctr; j++) {
1135 			if (!rht_node->map[i]->sde[j])
1136 				continue;
1137 
1138 			if (j > 0)
1139 				seq_puts(s, ",");
1140 
1141 			seq_printf(s, " sdma%2d",
1142 				   rht_node->map[i]->sde[j]->this_idx);
1143 		}
1144 		seq_puts(s, " ]");
1145 	}
1146 
1147 	seq_puts(s, "\n");
1148 }
1149 
1150 /*
1151  * Free the indicated map struct
1152  */
1153 static void sdma_map_free(struct sdma_vl_map *m)
1154 {
1155 	int i;
1156 
1157 	for (i = 0; m && i < m->actual_vls; i++)
1158 		kfree(m->map[i]);
1159 	kfree(m);
1160 }
1161 
1162 /*
1163  * Handle RCU callback
1164  */
1165 static void sdma_map_rcu_callback(struct rcu_head *list)
1166 {
1167 	struct sdma_vl_map *m = container_of(list, struct sdma_vl_map, list);
1168 
1169 	sdma_map_free(m);
1170 }
1171 
1172 /**
1173  * sdma_map_init - called when # vls change
1174  * @dd: hfi1_devdata
1175  * @port: port number
1176  * @num_vls: number of vls
1177  * @vl_engines: per vl engine mapping (optional)
1178  *
1179  * This routine changes the mapping based on the number of vls.
1180  *
1181  * vl_engines is used to specify a non-uniform vl/engine loading. NULL
1182  * implies auto computing the loading and giving each VLs a uniform
1183  * distribution of engines per VL.
1184  *
1185  * The auto algorithm computes the sde_per_vl and the number of extra
1186  * engines.  Any extra engines are added from the last VL on down.
1187  *
1188  * rcu locking is used here to control access to the mapping fields.
1189  *
1190  * If either the num_vls or num_sdma are non-power of 2, the array sizes
1191  * in the struct sdma_vl_map and the struct sdma_map_elem are rounded
1192  * up to the next highest power of 2 and the first entry is reused
1193  * in a round robin fashion.
1194  *
1195  * If an error occurs the map change is not done and the mapping is
1196  * not changed.
1197  *
1198  */
1199 int sdma_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_engines)
1200 {
1201 	int i, j;
1202 	int extra, sde_per_vl;
1203 	int engine = 0;
1204 	u8 lvl_engines[OPA_MAX_VLS];
1205 	struct sdma_vl_map *oldmap, *newmap;
1206 
1207 	if (!(dd->flags & HFI1_HAS_SEND_DMA))
1208 		return 0;
1209 
1210 	if (!vl_engines) {
1211 		/* truncate divide */
1212 		sde_per_vl = dd->num_sdma / num_vls;
1213 		/* extras */
1214 		extra = dd->num_sdma % num_vls;
1215 		vl_engines = lvl_engines;
1216 		/* add extras from last vl down */
1217 		for (i = num_vls - 1; i >= 0; i--, extra--)
1218 			vl_engines[i] = sde_per_vl + (extra > 0 ? 1 : 0);
1219 	}
1220 	/* build new map */
1221 	newmap = kzalloc(
1222 		sizeof(struct sdma_vl_map) +
1223 			roundup_pow_of_two(num_vls) *
1224 			sizeof(struct sdma_map_elem *),
1225 		GFP_KERNEL);
1226 	if (!newmap)
1227 		goto bail;
1228 	newmap->actual_vls = num_vls;
1229 	newmap->vls = roundup_pow_of_two(num_vls);
1230 	newmap->mask = (1 << ilog2(newmap->vls)) - 1;
1231 	/* initialize back-map */
1232 	for (i = 0; i < TXE_NUM_SDMA_ENGINES; i++)
1233 		newmap->engine_to_vl[i] = -1;
1234 	for (i = 0; i < newmap->vls; i++) {
1235 		/* save for wrap around */
1236 		int first_engine = engine;
1237 
1238 		if (i < newmap->actual_vls) {
1239 			int sz = roundup_pow_of_two(vl_engines[i]);
1240 
1241 			/* only allocate once */
1242 			newmap->map[i] = kzalloc(
1243 				sizeof(struct sdma_map_elem) +
1244 					sz * sizeof(struct sdma_engine *),
1245 				GFP_KERNEL);
1246 			if (!newmap->map[i])
1247 				goto bail;
1248 			newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
1249 			/* assign engines */
1250 			for (j = 0; j < sz; j++) {
1251 				newmap->map[i]->sde[j] =
1252 					&dd->per_sdma[engine];
1253 				if (++engine >= first_engine + vl_engines[i])
1254 					/* wrap back to first engine */
1255 					engine = first_engine;
1256 			}
1257 			/* assign back-map */
1258 			for (j = 0; j < vl_engines[i]; j++)
1259 				newmap->engine_to_vl[first_engine + j] = i;
1260 		} else {
1261 			/* just re-use entry without allocating */
1262 			newmap->map[i] = newmap->map[i % num_vls];
1263 		}
1264 		engine = first_engine + vl_engines[i];
1265 	}
1266 	/* newmap in hand, save old map */
1267 	spin_lock_irq(&dd->sde_map_lock);
1268 	oldmap = rcu_dereference_protected(dd->sdma_map,
1269 					   lockdep_is_held(&dd->sde_map_lock));
1270 
1271 	/* publish newmap */
1272 	rcu_assign_pointer(dd->sdma_map, newmap);
1273 
1274 	spin_unlock_irq(&dd->sde_map_lock);
1275 	/* success, free any old map after grace period */
1276 	if (oldmap)
1277 		call_rcu(&oldmap->list, sdma_map_rcu_callback);
1278 	return 0;
1279 bail:
1280 	/* free any partial allocation */
1281 	sdma_map_free(newmap);
1282 	return -ENOMEM;
1283 }
1284 
1285 /**
1286  * sdma_clean()  Clean up allocated memory
1287  * @dd:          struct hfi1_devdata
1288  * @num_engines: num sdma engines
1289  *
1290  * This routine can be called regardless of the success of
1291  * sdma_init()
1292  */
1293 void sdma_clean(struct hfi1_devdata *dd, size_t num_engines)
1294 {
1295 	size_t i;
1296 	struct sdma_engine *sde;
1297 
1298 	if (dd->sdma_pad_dma) {
1299 		dma_free_coherent(&dd->pcidev->dev, 4,
1300 				  (void *)dd->sdma_pad_dma,
1301 				  dd->sdma_pad_phys);
1302 		dd->sdma_pad_dma = NULL;
1303 		dd->sdma_pad_phys = 0;
1304 	}
1305 	if (dd->sdma_heads_dma) {
1306 		dma_free_coherent(&dd->pcidev->dev, dd->sdma_heads_size,
1307 				  (void *)dd->sdma_heads_dma,
1308 				  dd->sdma_heads_phys);
1309 		dd->sdma_heads_dma = NULL;
1310 		dd->sdma_heads_phys = 0;
1311 	}
1312 	for (i = 0; dd->per_sdma && i < num_engines; ++i) {
1313 		sde = &dd->per_sdma[i];
1314 
1315 		sde->head_dma = NULL;
1316 		sde->head_phys = 0;
1317 
1318 		if (sde->descq) {
1319 			dma_free_coherent(
1320 				&dd->pcidev->dev,
1321 				sde->descq_cnt * sizeof(u64[2]),
1322 				sde->descq,
1323 				sde->descq_phys
1324 			);
1325 			sde->descq = NULL;
1326 			sde->descq_phys = 0;
1327 		}
1328 		kvfree(sde->tx_ring);
1329 		sde->tx_ring = NULL;
1330 	}
1331 	spin_lock_irq(&dd->sde_map_lock);
1332 	sdma_map_free(rcu_access_pointer(dd->sdma_map));
1333 	RCU_INIT_POINTER(dd->sdma_map, NULL);
1334 	spin_unlock_irq(&dd->sde_map_lock);
1335 	synchronize_rcu();
1336 	kfree(dd->per_sdma);
1337 	dd->per_sdma = NULL;
1338 
1339 	if (dd->sdma_rht) {
1340 		rhashtable_free_and_destroy(dd->sdma_rht, sdma_rht_free, NULL);
1341 		kfree(dd->sdma_rht);
1342 		dd->sdma_rht = NULL;
1343 	}
1344 }
1345 
1346 /**
1347  * sdma_init() - called when device probed
1348  * @dd: hfi1_devdata
1349  * @port: port number (currently only zero)
1350  *
1351  * Initializes each sde and its csrs.
1352  * Interrupts are not required to be enabled.
1353  *
1354  * Returns:
1355  * 0 - success, -errno on failure
1356  */
1357 int sdma_init(struct hfi1_devdata *dd, u8 port)
1358 {
1359 	unsigned this_idx;
1360 	struct sdma_engine *sde;
1361 	struct rhashtable *tmp_sdma_rht;
1362 	u16 descq_cnt;
1363 	void *curr_head;
1364 	struct hfi1_pportdata *ppd = dd->pport + port;
1365 	u32 per_sdma_credits;
1366 	uint idle_cnt = sdma_idle_cnt;
1367 	size_t num_engines = chip_sdma_engines(dd);
1368 	int ret = -ENOMEM;
1369 
1370 	if (!HFI1_CAP_IS_KSET(SDMA)) {
1371 		HFI1_CAP_CLEAR(SDMA_AHG);
1372 		return 0;
1373 	}
1374 	if (mod_num_sdma &&
1375 	    /* can't exceed chip support */
1376 	    mod_num_sdma <= chip_sdma_engines(dd) &&
1377 	    /* count must be >= vls */
1378 	    mod_num_sdma >= num_vls)
1379 		num_engines = mod_num_sdma;
1380 
1381 	dd_dev_info(dd, "SDMA mod_num_sdma: %u\n", mod_num_sdma);
1382 	dd_dev_info(dd, "SDMA chip_sdma_engines: %u\n", chip_sdma_engines(dd));
1383 	dd_dev_info(dd, "SDMA chip_sdma_mem_size: %u\n",
1384 		    chip_sdma_mem_size(dd));
1385 
1386 	per_sdma_credits =
1387 		chip_sdma_mem_size(dd) / (num_engines * SDMA_BLOCK_SIZE);
1388 
1389 	/* set up freeze waitqueue */
1390 	init_waitqueue_head(&dd->sdma_unfreeze_wq);
1391 	atomic_set(&dd->sdma_unfreeze_count, 0);
1392 
1393 	descq_cnt = sdma_get_descq_cnt();
1394 	dd_dev_info(dd, "SDMA engines %zu descq_cnt %u\n",
1395 		    num_engines, descq_cnt);
1396 
1397 	/* alloc memory for array of send engines */
1398 	dd->per_sdma = kcalloc_node(num_engines, sizeof(*dd->per_sdma),
1399 				    GFP_KERNEL, dd->node);
1400 	if (!dd->per_sdma)
1401 		return ret;
1402 
1403 	idle_cnt = ns_to_cclock(dd, idle_cnt);
1404 	if (idle_cnt)
1405 		dd->default_desc1 =
1406 			SDMA_DESC1_HEAD_TO_HOST_FLAG;
1407 	else
1408 		dd->default_desc1 =
1409 			SDMA_DESC1_INT_REQ_FLAG;
1410 
1411 	if (!sdma_desct_intr)
1412 		sdma_desct_intr = SDMA_DESC_INTR;
1413 
1414 	/* Allocate memory for SendDMA descriptor FIFOs */
1415 	for (this_idx = 0; this_idx < num_engines; ++this_idx) {
1416 		sde = &dd->per_sdma[this_idx];
1417 		sde->dd = dd;
1418 		sde->ppd = ppd;
1419 		sde->this_idx = this_idx;
1420 		sde->descq_cnt = descq_cnt;
1421 		sde->desc_avail = sdma_descq_freecnt(sde);
1422 		sde->sdma_shift = ilog2(descq_cnt);
1423 		sde->sdma_mask = (1 << sde->sdma_shift) - 1;
1424 
1425 		/* Create a mask specifically for each interrupt source */
1426 		sde->int_mask = (u64)1 << (0 * TXE_NUM_SDMA_ENGINES +
1427 					   this_idx);
1428 		sde->progress_mask = (u64)1 << (1 * TXE_NUM_SDMA_ENGINES +
1429 						this_idx);
1430 		sde->idle_mask = (u64)1 << (2 * TXE_NUM_SDMA_ENGINES +
1431 					    this_idx);
1432 		/* Create a combined mask to cover all 3 interrupt sources */
1433 		sde->imask = sde->int_mask | sde->progress_mask |
1434 			     sde->idle_mask;
1435 
1436 		spin_lock_init(&sde->tail_lock);
1437 		seqlock_init(&sde->head_lock);
1438 		spin_lock_init(&sde->senddmactrl_lock);
1439 		spin_lock_init(&sde->flushlist_lock);
1440 		seqlock_init(&sde->waitlock);
1441 		/* insure there is always a zero bit */
1442 		sde->ahg_bits = 0xfffffffe00000000ULL;
1443 
1444 		sdma_set_state(sde, sdma_state_s00_hw_down);
1445 
1446 		/* set up reference counting */
1447 		kref_init(&sde->state.kref);
1448 		init_completion(&sde->state.comp);
1449 
1450 		INIT_LIST_HEAD(&sde->flushlist);
1451 		INIT_LIST_HEAD(&sde->dmawait);
1452 
1453 		sde->tail_csr =
1454 			get_kctxt_csr_addr(dd, this_idx, SD(TAIL));
1455 
1456 		tasklet_init(&sde->sdma_hw_clean_up_task, sdma_hw_clean_up_task,
1457 			     (unsigned long)sde);
1458 
1459 		tasklet_init(&sde->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
1460 			     (unsigned long)sde);
1461 		INIT_WORK(&sde->err_halt_worker, sdma_err_halt_wait);
1462 		INIT_WORK(&sde->flush_worker, sdma_field_flush);
1463 
1464 		sde->progress_check_head = 0;
1465 
1466 		timer_setup(&sde->err_progress_check_timer,
1467 			    sdma_err_progress_check, 0);
1468 
1469 		sde->descq = dma_alloc_coherent(&dd->pcidev->dev,
1470 						descq_cnt * sizeof(u64[2]),
1471 						&sde->descq_phys, GFP_KERNEL);
1472 		if (!sde->descq)
1473 			goto bail;
1474 		sde->tx_ring =
1475 			kvzalloc_node(array_size(descq_cnt,
1476 						 sizeof(struct sdma_txreq *)),
1477 				      GFP_KERNEL, dd->node);
1478 		if (!sde->tx_ring)
1479 			goto bail;
1480 	}
1481 
1482 	dd->sdma_heads_size = L1_CACHE_BYTES * num_engines;
1483 	/* Allocate memory for DMA of head registers to memory */
1484 	dd->sdma_heads_dma = dma_alloc_coherent(&dd->pcidev->dev,
1485 						dd->sdma_heads_size,
1486 						&dd->sdma_heads_phys,
1487 						GFP_KERNEL);
1488 	if (!dd->sdma_heads_dma) {
1489 		dd_dev_err(dd, "failed to allocate SendDMA head memory\n");
1490 		goto bail;
1491 	}
1492 
1493 	/* Allocate memory for pad */
1494 	dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, sizeof(u32),
1495 					      &dd->sdma_pad_phys, GFP_KERNEL);
1496 	if (!dd->sdma_pad_dma) {
1497 		dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
1498 		goto bail;
1499 	}
1500 
1501 	/* assign each engine to different cacheline and init registers */
1502 	curr_head = (void *)dd->sdma_heads_dma;
1503 	for (this_idx = 0; this_idx < num_engines; ++this_idx) {
1504 		unsigned long phys_offset;
1505 
1506 		sde = &dd->per_sdma[this_idx];
1507 
1508 		sde->head_dma = curr_head;
1509 		curr_head += L1_CACHE_BYTES;
1510 		phys_offset = (unsigned long)sde->head_dma -
1511 			      (unsigned long)dd->sdma_heads_dma;
1512 		sde->head_phys = dd->sdma_heads_phys + phys_offset;
1513 		init_sdma_regs(sde, per_sdma_credits, idle_cnt);
1514 	}
1515 	dd->flags |= HFI1_HAS_SEND_DMA;
1516 	dd->flags |= idle_cnt ? HFI1_HAS_SDMA_TIMEOUT : 0;
1517 	dd->num_sdma = num_engines;
1518 	ret = sdma_map_init(dd, port, ppd->vls_operational, NULL);
1519 	if (ret < 0)
1520 		goto bail;
1521 
1522 	tmp_sdma_rht = kzalloc(sizeof(*tmp_sdma_rht), GFP_KERNEL);
1523 	if (!tmp_sdma_rht) {
1524 		ret = -ENOMEM;
1525 		goto bail;
1526 	}
1527 
1528 	ret = rhashtable_init(tmp_sdma_rht, &sdma_rht_params);
1529 	if (ret < 0)
1530 		goto bail;
1531 	dd->sdma_rht = tmp_sdma_rht;
1532 
1533 	dd_dev_info(dd, "SDMA num_sdma: %u\n", dd->num_sdma);
1534 	return 0;
1535 
1536 bail:
1537 	sdma_clean(dd, num_engines);
1538 	return ret;
1539 }
1540 
1541 /**
1542  * sdma_all_running() - called when the link goes up
1543  * @dd: hfi1_devdata
1544  *
1545  * This routine moves all engines to the running state.
1546  */
1547 void sdma_all_running(struct hfi1_devdata *dd)
1548 {
1549 	struct sdma_engine *sde;
1550 	unsigned int i;
1551 
1552 	/* move all engines to running */
1553 	for (i = 0; i < dd->num_sdma; ++i) {
1554 		sde = &dd->per_sdma[i];
1555 		sdma_process_event(sde, sdma_event_e30_go_running);
1556 	}
1557 }
1558 
1559 /**
1560  * sdma_all_idle() - called when the link goes down
1561  * @dd: hfi1_devdata
1562  *
1563  * This routine moves all engines to the idle state.
1564  */
1565 void sdma_all_idle(struct hfi1_devdata *dd)
1566 {
1567 	struct sdma_engine *sde;
1568 	unsigned int i;
1569 
1570 	/* idle all engines */
1571 	for (i = 0; i < dd->num_sdma; ++i) {
1572 		sde = &dd->per_sdma[i];
1573 		sdma_process_event(sde, sdma_event_e70_go_idle);
1574 	}
1575 }
1576 
1577 /**
1578  * sdma_start() - called to kick off state processing for all engines
1579  * @dd: hfi1_devdata
1580  *
1581  * This routine is for kicking off the state processing for all required
1582  * sdma engines.  Interrupts need to be working at this point.
1583  *
1584  */
1585 void sdma_start(struct hfi1_devdata *dd)
1586 {
1587 	unsigned i;
1588 	struct sdma_engine *sde;
1589 
1590 	/* kick off the engines state processing */
1591 	for (i = 0; i < dd->num_sdma; ++i) {
1592 		sde = &dd->per_sdma[i];
1593 		sdma_process_event(sde, sdma_event_e10_go_hw_start);
1594 	}
1595 }
1596 
1597 /**
1598  * sdma_exit() - used when module is removed
1599  * @dd: hfi1_devdata
1600  */
1601 void sdma_exit(struct hfi1_devdata *dd)
1602 {
1603 	unsigned this_idx;
1604 	struct sdma_engine *sde;
1605 
1606 	for (this_idx = 0; dd->per_sdma && this_idx < dd->num_sdma;
1607 			++this_idx) {
1608 		sde = &dd->per_sdma[this_idx];
1609 		if (!list_empty(&sde->dmawait))
1610 			dd_dev_err(dd, "sde %u: dmawait list not empty!\n",
1611 				   sde->this_idx);
1612 		sdma_process_event(sde, sdma_event_e00_go_hw_down);
1613 
1614 		del_timer_sync(&sde->err_progress_check_timer);
1615 
1616 		/*
1617 		 * This waits for the state machine to exit so it is not
1618 		 * necessary to kill the sdma_sw_clean_up_task to make sure
1619 		 * it is not running.
1620 		 */
1621 		sdma_finalput(&sde->state);
1622 	}
1623 }
1624 
1625 /*
1626  * unmap the indicated descriptor
1627  */
1628 static inline void sdma_unmap_desc(
1629 	struct hfi1_devdata *dd,
1630 	struct sdma_desc *descp)
1631 {
1632 	switch (sdma_mapping_type(descp)) {
1633 	case SDMA_MAP_SINGLE:
1634 		dma_unmap_single(
1635 			&dd->pcidev->dev,
1636 			sdma_mapping_addr(descp),
1637 			sdma_mapping_len(descp),
1638 			DMA_TO_DEVICE);
1639 		break;
1640 	case SDMA_MAP_PAGE:
1641 		dma_unmap_page(
1642 			&dd->pcidev->dev,
1643 			sdma_mapping_addr(descp),
1644 			sdma_mapping_len(descp),
1645 			DMA_TO_DEVICE);
1646 		break;
1647 	}
1648 }
1649 
1650 /*
1651  * return the mode as indicated by the first
1652  * descriptor in the tx.
1653  */
1654 static inline u8 ahg_mode(struct sdma_txreq *tx)
1655 {
1656 	return (tx->descp[0].qw[1] & SDMA_DESC1_HEADER_MODE_SMASK)
1657 		>> SDMA_DESC1_HEADER_MODE_SHIFT;
1658 }
1659 
1660 /**
1661  * __sdma_txclean() - clean tx of mappings, descp *kmalloc's
1662  * @dd: hfi1_devdata for unmapping
1663  * @tx: tx request to clean
1664  *
1665  * This is used in the progress routine to clean the tx or
1666  * by the ULP to toss an in-process tx build.
1667  *
1668  * The code can be called multiple times without issue.
1669  *
1670  */
1671 void __sdma_txclean(
1672 	struct hfi1_devdata *dd,
1673 	struct sdma_txreq *tx)
1674 {
1675 	u16 i;
1676 
1677 	if (tx->num_desc) {
1678 		u8 skip = 0, mode = ahg_mode(tx);
1679 
1680 		/* unmap first */
1681 		sdma_unmap_desc(dd, &tx->descp[0]);
1682 		/* determine number of AHG descriptors to skip */
1683 		if (mode > SDMA_AHG_APPLY_UPDATE1)
1684 			skip = mode >> 1;
1685 		for (i = 1 + skip; i < tx->num_desc; i++)
1686 			sdma_unmap_desc(dd, &tx->descp[i]);
1687 		tx->num_desc = 0;
1688 	}
1689 	kfree(tx->coalesce_buf);
1690 	tx->coalesce_buf = NULL;
1691 	/* kmalloc'ed descp */
1692 	if (unlikely(tx->desc_limit > ARRAY_SIZE(tx->descs))) {
1693 		tx->desc_limit = ARRAY_SIZE(tx->descs);
1694 		kfree(tx->descp);
1695 	}
1696 }
1697 
1698 static inline u16 sdma_gethead(struct sdma_engine *sde)
1699 {
1700 	struct hfi1_devdata *dd = sde->dd;
1701 	int use_dmahead;
1702 	u16 hwhead;
1703 
1704 #ifdef CONFIG_SDMA_VERBOSITY
1705 	dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
1706 		   sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
1707 #endif
1708 
1709 retry:
1710 	use_dmahead = HFI1_CAP_IS_KSET(USE_SDMA_HEAD) && __sdma_running(sde) &&
1711 					(dd->flags & HFI1_HAS_SDMA_TIMEOUT);
1712 	hwhead = use_dmahead ?
1713 		(u16)le64_to_cpu(*sde->head_dma) :
1714 		(u16)read_sde_csr(sde, SD(HEAD));
1715 
1716 	if (unlikely(HFI1_CAP_IS_KSET(SDMA_HEAD_CHECK))) {
1717 		u16 cnt;
1718 		u16 swtail;
1719 		u16 swhead;
1720 		int sane;
1721 
1722 		swhead = sde->descq_head & sde->sdma_mask;
1723 		/* this code is really bad for cache line trading */
1724 		swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
1725 		cnt = sde->descq_cnt;
1726 
1727 		if (swhead < swtail)
1728 			/* not wrapped */
1729 			sane = (hwhead >= swhead) & (hwhead <= swtail);
1730 		else if (swhead > swtail)
1731 			/* wrapped around */
1732 			sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
1733 				(hwhead <= swtail);
1734 		else
1735 			/* empty */
1736 			sane = (hwhead == swhead);
1737 
1738 		if (unlikely(!sane)) {
1739 			dd_dev_err(dd, "SDMA(%u) bad head (%s) hwhd=%hu swhd=%hu swtl=%hu cnt=%hu\n",
1740 				   sde->this_idx,
1741 				   use_dmahead ? "dma" : "kreg",
1742 				   hwhead, swhead, swtail, cnt);
1743 			if (use_dmahead) {
1744 				/* try one more time, using csr */
1745 				use_dmahead = 0;
1746 				goto retry;
1747 			}
1748 			/* proceed as if no progress */
1749 			hwhead = swhead;
1750 		}
1751 	}
1752 	return hwhead;
1753 }
1754 
1755 /*
1756  * This is called when there are send DMA descriptors that might be
1757  * available.
1758  *
1759  * This is called with head_lock held.
1760  */
1761 static void sdma_desc_avail(struct sdma_engine *sde, uint avail)
1762 {
1763 	struct iowait *wait, *nw, *twait;
1764 	struct iowait *waits[SDMA_WAIT_BATCH_SIZE];
1765 	uint i, n = 0, seq, tidx = 0;
1766 
1767 #ifdef CONFIG_SDMA_VERBOSITY
1768 	dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
1769 		   slashstrip(__FILE__), __LINE__, __func__);
1770 	dd_dev_err(sde->dd, "avail: %u\n", avail);
1771 #endif
1772 
1773 	do {
1774 		seq = read_seqbegin(&sde->waitlock);
1775 		if (!list_empty(&sde->dmawait)) {
1776 			/* at least one item */
1777 			write_seqlock(&sde->waitlock);
1778 			/* Harvest waiters wanting DMA descriptors */
1779 			list_for_each_entry_safe(
1780 					wait,
1781 					nw,
1782 					&sde->dmawait,
1783 					list) {
1784 				u32 num_desc;
1785 
1786 				if (!wait->wakeup)
1787 					continue;
1788 				if (n == ARRAY_SIZE(waits))
1789 					break;
1790 				iowait_init_priority(wait);
1791 				num_desc = iowait_get_all_desc(wait);
1792 				if (num_desc > avail)
1793 					break;
1794 				avail -= num_desc;
1795 				/* Find the top-priority wait memeber */
1796 				if (n) {
1797 					twait = waits[tidx];
1798 					tidx =
1799 					    iowait_priority_update_top(wait,
1800 								       twait,
1801 								       n,
1802 								       tidx);
1803 				}
1804 				list_del_init(&wait->list);
1805 				waits[n++] = wait;
1806 			}
1807 			write_sequnlock(&sde->waitlock);
1808 			break;
1809 		}
1810 	} while (read_seqretry(&sde->waitlock, seq));
1811 
1812 	/* Schedule the top-priority entry first */
1813 	if (n)
1814 		waits[tidx]->wakeup(waits[tidx], SDMA_AVAIL_REASON);
1815 
1816 	for (i = 0; i < n; i++)
1817 		if (i != tidx)
1818 			waits[i]->wakeup(waits[i], SDMA_AVAIL_REASON);
1819 }
1820 
1821 /* head_lock must be held */
1822 static void sdma_make_progress(struct sdma_engine *sde, u64 status)
1823 {
1824 	struct sdma_txreq *txp = NULL;
1825 	int progress = 0;
1826 	u16 hwhead, swhead;
1827 	int idle_check_done = 0;
1828 
1829 	hwhead = sdma_gethead(sde);
1830 
1831 	/* The reason for some of the complexity of this code is that
1832 	 * not all descriptors have corresponding txps.  So, we have to
1833 	 * be able to skip over descs until we wander into the range of
1834 	 * the next txp on the list.
1835 	 */
1836 
1837 retry:
1838 	txp = get_txhead(sde);
1839 	swhead = sde->descq_head & sde->sdma_mask;
1840 	trace_hfi1_sdma_progress(sde, hwhead, swhead, txp);
1841 	while (swhead != hwhead) {
1842 		/* advance head, wrap if needed */
1843 		swhead = ++sde->descq_head & sde->sdma_mask;
1844 
1845 		/* if now past this txp's descs, do the callback */
1846 		if (txp && txp->next_descq_idx == swhead) {
1847 			/* remove from list */
1848 			sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
1849 			complete_tx(sde, txp, SDMA_TXREQ_S_OK);
1850 			/* see if there is another txp */
1851 			txp = get_txhead(sde);
1852 		}
1853 		trace_hfi1_sdma_progress(sde, hwhead, swhead, txp);
1854 		progress++;
1855 	}
1856 
1857 	/*
1858 	 * The SDMA idle interrupt is not guaranteed to be ordered with respect
1859 	 * to updates to the the dma_head location in host memory. The head
1860 	 * value read might not be fully up to date. If there are pending
1861 	 * descriptors and the SDMA idle interrupt fired then read from the
1862 	 * CSR SDMA head instead to get the latest value from the hardware.
1863 	 * The hardware SDMA head should be read at most once in this invocation
1864 	 * of sdma_make_progress(..) which is ensured by idle_check_done flag
1865 	 */
1866 	if ((status & sde->idle_mask) && !idle_check_done) {
1867 		u16 swtail;
1868 
1869 		swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
1870 		if (swtail != hwhead) {
1871 			hwhead = (u16)read_sde_csr(sde, SD(HEAD));
1872 			idle_check_done = 1;
1873 			goto retry;
1874 		}
1875 	}
1876 
1877 	sde->last_status = status;
1878 	if (progress)
1879 		sdma_desc_avail(sde, sdma_descq_freecnt(sde));
1880 }
1881 
1882 /*
1883  * sdma_engine_interrupt() - interrupt handler for engine
1884  * @sde: sdma engine
1885  * @status: sdma interrupt reason
1886  *
1887  * Status is a mask of the 3 possible interrupts for this engine.  It will
1888  * contain bits _only_ for this SDMA engine.  It will contain at least one
1889  * bit, it may contain more.
1890  */
1891 void sdma_engine_interrupt(struct sdma_engine *sde, u64 status)
1892 {
1893 	trace_hfi1_sdma_engine_interrupt(sde, status);
1894 	write_seqlock(&sde->head_lock);
1895 	sdma_set_desc_cnt(sde, sdma_desct_intr);
1896 	if (status & sde->idle_mask)
1897 		sde->idle_int_cnt++;
1898 	else if (status & sde->progress_mask)
1899 		sde->progress_int_cnt++;
1900 	else if (status & sde->int_mask)
1901 		sde->sdma_int_cnt++;
1902 	sdma_make_progress(sde, status);
1903 	write_sequnlock(&sde->head_lock);
1904 }
1905 
1906 /**
1907  * sdma_engine_error() - error handler for engine
1908  * @sde: sdma engine
1909  * @status: sdma interrupt reason
1910  */
1911 void sdma_engine_error(struct sdma_engine *sde, u64 status)
1912 {
1913 	unsigned long flags;
1914 
1915 #ifdef CONFIG_SDMA_VERBOSITY
1916 	dd_dev_err(sde->dd, "CONFIG SDMA(%u) error status 0x%llx state %s\n",
1917 		   sde->this_idx,
1918 		   (unsigned long long)status,
1919 		   sdma_state_names[sde->state.current_state]);
1920 #endif
1921 	spin_lock_irqsave(&sde->tail_lock, flags);
1922 	write_seqlock(&sde->head_lock);
1923 	if (status & ALL_SDMA_ENG_HALT_ERRS)
1924 		__sdma_process_event(sde, sdma_event_e60_hw_halted);
1925 	if (status & ~SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK)) {
1926 		dd_dev_err(sde->dd,
1927 			   "SDMA (%u) engine error: 0x%llx state %s\n",
1928 			   sde->this_idx,
1929 			   (unsigned long long)status,
1930 			   sdma_state_names[sde->state.current_state]);
1931 		dump_sdma_state(sde);
1932 	}
1933 	write_sequnlock(&sde->head_lock);
1934 	spin_unlock_irqrestore(&sde->tail_lock, flags);
1935 }
1936 
1937 static void sdma_sendctrl(struct sdma_engine *sde, unsigned op)
1938 {
1939 	u64 set_senddmactrl = 0;
1940 	u64 clr_senddmactrl = 0;
1941 	unsigned long flags;
1942 
1943 #ifdef CONFIG_SDMA_VERBOSITY
1944 	dd_dev_err(sde->dd, "CONFIG SDMA(%u) senddmactrl E=%d I=%d H=%d C=%d\n",
1945 		   sde->this_idx,
1946 		   (op & SDMA_SENDCTRL_OP_ENABLE) ? 1 : 0,
1947 		   (op & SDMA_SENDCTRL_OP_INTENABLE) ? 1 : 0,
1948 		   (op & SDMA_SENDCTRL_OP_HALT) ? 1 : 0,
1949 		   (op & SDMA_SENDCTRL_OP_CLEANUP) ? 1 : 0);
1950 #endif
1951 
1952 	if (op & SDMA_SENDCTRL_OP_ENABLE)
1953 		set_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK);
1954 	else
1955 		clr_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK);
1956 
1957 	if (op & SDMA_SENDCTRL_OP_INTENABLE)
1958 		set_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK);
1959 	else
1960 		clr_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK);
1961 
1962 	if (op & SDMA_SENDCTRL_OP_HALT)
1963 		set_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK);
1964 	else
1965 		clr_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK);
1966 
1967 	spin_lock_irqsave(&sde->senddmactrl_lock, flags);
1968 
1969 	sde->p_senddmactrl |= set_senddmactrl;
1970 	sde->p_senddmactrl &= ~clr_senddmactrl;
1971 
1972 	if (op & SDMA_SENDCTRL_OP_CLEANUP)
1973 		write_sde_csr(sde, SD(CTRL),
1974 			      sde->p_senddmactrl |
1975 			      SD(CTRL_SDMA_CLEANUP_SMASK));
1976 	else
1977 		write_sde_csr(sde, SD(CTRL), sde->p_senddmactrl);
1978 
1979 	spin_unlock_irqrestore(&sde->senddmactrl_lock, flags);
1980 
1981 #ifdef CONFIG_SDMA_VERBOSITY
1982 	sdma_dumpstate(sde);
1983 #endif
1984 }
1985 
1986 static void sdma_setlengen(struct sdma_engine *sde)
1987 {
1988 #ifdef CONFIG_SDMA_VERBOSITY
1989 	dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
1990 		   sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
1991 #endif
1992 
1993 	/*
1994 	 * Set SendDmaLenGen and clear-then-set the MSB of the generation
1995 	 * count to enable generation checking and load the internal
1996 	 * generation counter.
1997 	 */
1998 	write_sde_csr(sde, SD(LEN_GEN),
1999 		      (sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT));
2000 	write_sde_csr(sde, SD(LEN_GEN),
2001 		      ((sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)) |
2002 		      (4ULL << SD(LEN_GEN_GENERATION_SHIFT)));
2003 }
2004 
2005 static inline void sdma_update_tail(struct sdma_engine *sde, u16 tail)
2006 {
2007 	/* Commit writes to memory and advance the tail on the chip */
2008 	smp_wmb(); /* see get_txhead() */
2009 	writeq(tail, sde->tail_csr);
2010 }
2011 
2012 /*
2013  * This is called when changing to state s10_hw_start_up_halt_wait as
2014  * a result of send buffer errors or send DMA descriptor errors.
2015  */
2016 static void sdma_hw_start_up(struct sdma_engine *sde)
2017 {
2018 	u64 reg;
2019 
2020 #ifdef CONFIG_SDMA_VERBOSITY
2021 	dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
2022 		   sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
2023 #endif
2024 
2025 	sdma_setlengen(sde);
2026 	sdma_update_tail(sde, 0); /* Set SendDmaTail */
2027 	*sde->head_dma = 0;
2028 
2029 	reg = SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_MASK) <<
2030 	      SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SHIFT);
2031 	write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg);
2032 }
2033 
2034 /*
2035  * set_sdma_integrity
2036  *
2037  * Set the SEND_DMA_CHECK_ENABLE register for send DMA engine 'sde'.
2038  */
2039 static void set_sdma_integrity(struct sdma_engine *sde)
2040 {
2041 	struct hfi1_devdata *dd = sde->dd;
2042 
2043 	write_sde_csr(sde, SD(CHECK_ENABLE),
2044 		      hfi1_pkt_base_sdma_integrity(dd));
2045 }
2046 
2047 static void init_sdma_regs(
2048 	struct sdma_engine *sde,
2049 	u32 credits,
2050 	uint idle_cnt)
2051 {
2052 	u8 opval, opmask;
2053 #ifdef CONFIG_SDMA_VERBOSITY
2054 	struct hfi1_devdata *dd = sde->dd;
2055 
2056 	dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n",
2057 		   sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
2058 #endif
2059 
2060 	write_sde_csr(sde, SD(BASE_ADDR), sde->descq_phys);
2061 	sdma_setlengen(sde);
2062 	sdma_update_tail(sde, 0); /* Set SendDmaTail */
2063 	write_sde_csr(sde, SD(RELOAD_CNT), idle_cnt);
2064 	write_sde_csr(sde, SD(DESC_CNT), 0);
2065 	write_sde_csr(sde, SD(HEAD_ADDR), sde->head_phys);
2066 	write_sde_csr(sde, SD(MEMORY),
2067 		      ((u64)credits << SD(MEMORY_SDMA_MEMORY_CNT_SHIFT)) |
2068 		      ((u64)(credits * sde->this_idx) <<
2069 		       SD(MEMORY_SDMA_MEMORY_INDEX_SHIFT)));
2070 	write_sde_csr(sde, SD(ENG_ERR_MASK), ~0ull);
2071 	set_sdma_integrity(sde);
2072 	opmask = OPCODE_CHECK_MASK_DISABLED;
2073 	opval = OPCODE_CHECK_VAL_DISABLED;
2074 	write_sde_csr(sde, SD(CHECK_OPCODE),
2075 		      (opmask << SEND_CTXT_CHECK_OPCODE_MASK_SHIFT) |
2076 		      (opval << SEND_CTXT_CHECK_OPCODE_VALUE_SHIFT));
2077 }
2078 
2079 #ifdef CONFIG_SDMA_VERBOSITY
2080 
2081 #define sdma_dumpstate_helper0(reg) do { \
2082 		csr = read_csr(sde->dd, reg); \
2083 		dd_dev_err(sde->dd, "%36s     0x%016llx\n", #reg, csr); \
2084 	} while (0)
2085 
2086 #define sdma_dumpstate_helper(reg) do { \
2087 		csr = read_sde_csr(sde, reg); \
2088 		dd_dev_err(sde->dd, "%36s[%02u] 0x%016llx\n", \
2089 			#reg, sde->this_idx, csr); \
2090 	} while (0)
2091 
2092 #define sdma_dumpstate_helper2(reg) do { \
2093 		csr = read_csr(sde->dd, reg + (8 * i)); \
2094 		dd_dev_err(sde->dd, "%33s_%02u     0x%016llx\n", \
2095 				#reg, i, csr); \
2096 	} while (0)
2097 
2098 void sdma_dumpstate(struct sdma_engine *sde)
2099 {
2100 	u64 csr;
2101 	unsigned i;
2102 
2103 	sdma_dumpstate_helper(SD(CTRL));
2104 	sdma_dumpstate_helper(SD(STATUS));
2105 	sdma_dumpstate_helper0(SD(ERR_STATUS));
2106 	sdma_dumpstate_helper0(SD(ERR_MASK));
2107 	sdma_dumpstate_helper(SD(ENG_ERR_STATUS));
2108 	sdma_dumpstate_helper(SD(ENG_ERR_MASK));
2109 
2110 	for (i = 0; i < CCE_NUM_INT_CSRS; ++i) {
2111 		sdma_dumpstate_helper2(CCE_INT_STATUS);
2112 		sdma_dumpstate_helper2(CCE_INT_MASK);
2113 		sdma_dumpstate_helper2(CCE_INT_BLOCKED);
2114 	}
2115 
2116 	sdma_dumpstate_helper(SD(TAIL));
2117 	sdma_dumpstate_helper(SD(HEAD));
2118 	sdma_dumpstate_helper(SD(PRIORITY_THLD));
2119 	sdma_dumpstate_helper(SD(IDLE_CNT));
2120 	sdma_dumpstate_helper(SD(RELOAD_CNT));
2121 	sdma_dumpstate_helper(SD(DESC_CNT));
2122 	sdma_dumpstate_helper(SD(DESC_FETCHED_CNT));
2123 	sdma_dumpstate_helper(SD(MEMORY));
2124 	sdma_dumpstate_helper0(SD(ENGINES));
2125 	sdma_dumpstate_helper0(SD(MEM_SIZE));
2126 	/* sdma_dumpstate_helper(SEND_EGRESS_SEND_DMA_STATUS);  */
2127 	sdma_dumpstate_helper(SD(BASE_ADDR));
2128 	sdma_dumpstate_helper(SD(LEN_GEN));
2129 	sdma_dumpstate_helper(SD(HEAD_ADDR));
2130 	sdma_dumpstate_helper(SD(CHECK_ENABLE));
2131 	sdma_dumpstate_helper(SD(CHECK_VL));
2132 	sdma_dumpstate_helper(SD(CHECK_JOB_KEY));
2133 	sdma_dumpstate_helper(SD(CHECK_PARTITION_KEY));
2134 	sdma_dumpstate_helper(SD(CHECK_SLID));
2135 	sdma_dumpstate_helper(SD(CHECK_OPCODE));
2136 }
2137 #endif
2138 
2139 static void dump_sdma_state(struct sdma_engine *sde)
2140 {
2141 	struct hw_sdma_desc *descqp;
2142 	u64 desc[2];
2143 	u64 addr;
2144 	u8 gen;
2145 	u16 len;
2146 	u16 head, tail, cnt;
2147 
2148 	head = sde->descq_head & sde->sdma_mask;
2149 	tail = sde->descq_tail & sde->sdma_mask;
2150 	cnt = sdma_descq_freecnt(sde);
2151 
2152 	dd_dev_err(sde->dd,
2153 		   "SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n",
2154 		   sde->this_idx, head, tail, cnt,
2155 		   !list_empty(&sde->flushlist));
2156 
2157 	/* print info for each entry in the descriptor queue */
2158 	while (head != tail) {
2159 		char flags[6] = { 'x', 'x', 'x', 'x', 0 };
2160 
2161 		descqp = &sde->descq[head];
2162 		desc[0] = le64_to_cpu(descqp->qw[0]);
2163 		desc[1] = le64_to_cpu(descqp->qw[1]);
2164 		flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
2165 		flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ?
2166 				'H' : '-';
2167 		flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
2168 		flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
2169 		addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT)
2170 			& SDMA_DESC0_PHY_ADDR_MASK;
2171 		gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT)
2172 			& SDMA_DESC1_GENERATION_MASK;
2173 		len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT)
2174 			& SDMA_DESC0_BYTE_COUNT_MASK;
2175 		dd_dev_err(sde->dd,
2176 			   "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
2177 			   head, flags, addr, gen, len);
2178 		dd_dev_err(sde->dd,
2179 			   "\tdesc0:0x%016llx desc1 0x%016llx\n",
2180 			   desc[0], desc[1]);
2181 		if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
2182 			dd_dev_err(sde->dd,
2183 				   "\taidx: %u amode: %u alen: %u\n",
2184 				   (u8)((desc[1] &
2185 					 SDMA_DESC1_HEADER_INDEX_SMASK) >>
2186 					SDMA_DESC1_HEADER_INDEX_SHIFT),
2187 				   (u8)((desc[1] &
2188 					 SDMA_DESC1_HEADER_MODE_SMASK) >>
2189 					SDMA_DESC1_HEADER_MODE_SHIFT),
2190 				   (u8)((desc[1] &
2191 					 SDMA_DESC1_HEADER_DWS_SMASK) >>
2192 					SDMA_DESC1_HEADER_DWS_SHIFT));
2193 		head++;
2194 		head &= sde->sdma_mask;
2195 	}
2196 }
2197 
2198 #define SDE_FMT \
2199 	"SDE %u CPU %d STE %s C 0x%llx S 0x%016llx E 0x%llx T(HW) 0x%llx T(SW) 0x%x H(HW) 0x%llx H(SW) 0x%x H(D) 0x%llx DM 0x%llx GL 0x%llx R 0x%llx LIS 0x%llx AHGI 0x%llx TXT %u TXH %u DT %u DH %u FLNE %d DQF %u SLC 0x%llx\n"
2200 /**
2201  * sdma_seqfile_dump_sde() - debugfs dump of sde
2202  * @s: seq file
2203  * @sde: send dma engine to dump
2204  *
2205  * This routine dumps the sde to the indicated seq file.
2206  */
2207 void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde)
2208 {
2209 	u16 head, tail;
2210 	struct hw_sdma_desc *descqp;
2211 	u64 desc[2];
2212 	u64 addr;
2213 	u8 gen;
2214 	u16 len;
2215 
2216 	head = sde->descq_head & sde->sdma_mask;
2217 	tail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
2218 	seq_printf(s, SDE_FMT, sde->this_idx,
2219 		   sde->cpu,
2220 		   sdma_state_name(sde->state.current_state),
2221 		   (unsigned long long)read_sde_csr(sde, SD(CTRL)),
2222 		   (unsigned long long)read_sde_csr(sde, SD(STATUS)),
2223 		   (unsigned long long)read_sde_csr(sde, SD(ENG_ERR_STATUS)),
2224 		   (unsigned long long)read_sde_csr(sde, SD(TAIL)), tail,
2225 		   (unsigned long long)read_sde_csr(sde, SD(HEAD)), head,
2226 		   (unsigned long long)le64_to_cpu(*sde->head_dma),
2227 		   (unsigned long long)read_sde_csr(sde, SD(MEMORY)),
2228 		   (unsigned long long)read_sde_csr(sde, SD(LEN_GEN)),
2229 		   (unsigned long long)read_sde_csr(sde, SD(RELOAD_CNT)),
2230 		   (unsigned long long)sde->last_status,
2231 		   (unsigned long long)sde->ahg_bits,
2232 		   sde->tx_tail,
2233 		   sde->tx_head,
2234 		   sde->descq_tail,
2235 		   sde->descq_head,
2236 		   !list_empty(&sde->flushlist),
2237 		   sde->descq_full_count,
2238 		   (unsigned long long)read_sde_csr(sde, SEND_DMA_CHECK_SLID));
2239 
2240 	/* print info for each entry in the descriptor queue */
2241 	while (head != tail) {
2242 		char flags[6] = { 'x', 'x', 'x', 'x', 0 };
2243 
2244 		descqp = &sde->descq[head];
2245 		desc[0] = le64_to_cpu(descqp->qw[0]);
2246 		desc[1] = le64_to_cpu(descqp->qw[1]);
2247 		flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
2248 		flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ?
2249 				'H' : '-';
2250 		flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
2251 		flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
2252 		addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT)
2253 			& SDMA_DESC0_PHY_ADDR_MASK;
2254 		gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT)
2255 			& SDMA_DESC1_GENERATION_MASK;
2256 		len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT)
2257 			& SDMA_DESC0_BYTE_COUNT_MASK;
2258 		seq_printf(s,
2259 			   "\tdesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
2260 			   head, flags, addr, gen, len);
2261 		if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
2262 			seq_printf(s, "\t\tahgidx: %u ahgmode: %u\n",
2263 				   (u8)((desc[1] &
2264 					 SDMA_DESC1_HEADER_INDEX_SMASK) >>
2265 					SDMA_DESC1_HEADER_INDEX_SHIFT),
2266 				   (u8)((desc[1] &
2267 					 SDMA_DESC1_HEADER_MODE_SMASK) >>
2268 					SDMA_DESC1_HEADER_MODE_SHIFT));
2269 		head = (head + 1) & sde->sdma_mask;
2270 	}
2271 }
2272 
2273 /*
2274  * add the generation number into
2275  * the qw1 and return
2276  */
2277 static inline u64 add_gen(struct sdma_engine *sde, u64 qw1)
2278 {
2279 	u8 generation = (sde->descq_tail >> sde->sdma_shift) & 3;
2280 
2281 	qw1 &= ~SDMA_DESC1_GENERATION_SMASK;
2282 	qw1 |= ((u64)generation & SDMA_DESC1_GENERATION_MASK)
2283 			<< SDMA_DESC1_GENERATION_SHIFT;
2284 	return qw1;
2285 }
2286 
2287 /*
2288  * This routine submits the indicated tx
2289  *
2290  * Space has already been guaranteed and
2291  * tail side of ring is locked.
2292  *
2293  * The hardware tail update is done
2294  * in the caller and that is facilitated
2295  * by returning the new tail.
2296  *
2297  * There is special case logic for ahg
2298  * to not add the generation number for
2299  * up to 2 descriptors that follow the
2300  * first descriptor.
2301  *
2302  */
2303 static inline u16 submit_tx(struct sdma_engine *sde, struct sdma_txreq *tx)
2304 {
2305 	int i;
2306 	u16 tail;
2307 	struct sdma_desc *descp = tx->descp;
2308 	u8 skip = 0, mode = ahg_mode(tx);
2309 
2310 	tail = sde->descq_tail & sde->sdma_mask;
2311 	sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]);
2312 	sde->descq[tail].qw[1] = cpu_to_le64(add_gen(sde, descp->qw[1]));
2313 	trace_hfi1_sdma_descriptor(sde, descp->qw[0], descp->qw[1],
2314 				   tail, &sde->descq[tail]);
2315 	tail = ++sde->descq_tail & sde->sdma_mask;
2316 	descp++;
2317 	if (mode > SDMA_AHG_APPLY_UPDATE1)
2318 		skip = mode >> 1;
2319 	for (i = 1; i < tx->num_desc; i++, descp++) {
2320 		u64 qw1;
2321 
2322 		sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]);
2323 		if (skip) {
2324 			/* edits don't have generation */
2325 			qw1 = descp->qw[1];
2326 			skip--;
2327 		} else {
2328 			/* replace generation with real one for non-edits */
2329 			qw1 = add_gen(sde, descp->qw[1]);
2330 		}
2331 		sde->descq[tail].qw[1] = cpu_to_le64(qw1);
2332 		trace_hfi1_sdma_descriptor(sde, descp->qw[0], qw1,
2333 					   tail, &sde->descq[tail]);
2334 		tail = ++sde->descq_tail & sde->sdma_mask;
2335 	}
2336 	tx->next_descq_idx = tail;
2337 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2338 	tx->sn = sde->tail_sn++;
2339 	trace_hfi1_sdma_in_sn(sde, tx->sn);
2340 	WARN_ON_ONCE(sde->tx_ring[sde->tx_tail & sde->sdma_mask]);
2341 #endif
2342 	sde->tx_ring[sde->tx_tail++ & sde->sdma_mask] = tx;
2343 	sde->desc_avail -= tx->num_desc;
2344 	return tail;
2345 }
2346 
2347 /*
2348  * Check for progress
2349  */
2350 static int sdma_check_progress(
2351 	struct sdma_engine *sde,
2352 	struct iowait_work *wait,
2353 	struct sdma_txreq *tx,
2354 	bool pkts_sent)
2355 {
2356 	int ret;
2357 
2358 	sde->desc_avail = sdma_descq_freecnt(sde);
2359 	if (tx->num_desc <= sde->desc_avail)
2360 		return -EAGAIN;
2361 	/* pulse the head_lock */
2362 	if (wait && iowait_ioww_to_iow(wait)->sleep) {
2363 		unsigned seq;
2364 
2365 		seq = raw_seqcount_begin(
2366 			(const seqcount_t *)&sde->head_lock.seqcount);
2367 		ret = wait->iow->sleep(sde, wait, tx, seq, pkts_sent);
2368 		if (ret == -EAGAIN)
2369 			sde->desc_avail = sdma_descq_freecnt(sde);
2370 	} else {
2371 		ret = -EBUSY;
2372 	}
2373 	return ret;
2374 }
2375 
2376 /**
2377  * sdma_send_txreq() - submit a tx req to ring
2378  * @sde: sdma engine to use
2379  * @wait: SE wait structure to use when full (may be NULL)
2380  * @tx: sdma_txreq to submit
2381  * @pkts_sent: has any packet been sent yet?
2382  *
2383  * The call submits the tx into the ring.  If a iowait structure is non-NULL
2384  * the packet will be queued to the list in wait.
2385  *
2386  * Return:
2387  * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in
2388  * ring (wait == NULL)
2389  * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
2390  */
2391 int sdma_send_txreq(struct sdma_engine *sde,
2392 		    struct iowait_work *wait,
2393 		    struct sdma_txreq *tx,
2394 		    bool pkts_sent)
2395 {
2396 	int ret = 0;
2397 	u16 tail;
2398 	unsigned long flags;
2399 
2400 	/* user should have supplied entire packet */
2401 	if (unlikely(tx->tlen))
2402 		return -EINVAL;
2403 	tx->wait = iowait_ioww_to_iow(wait);
2404 	spin_lock_irqsave(&sde->tail_lock, flags);
2405 retry:
2406 	if (unlikely(!__sdma_running(sde)))
2407 		goto unlock_noconn;
2408 	if (unlikely(tx->num_desc > sde->desc_avail))
2409 		goto nodesc;
2410 	tail = submit_tx(sde, tx);
2411 	if (wait)
2412 		iowait_sdma_inc(iowait_ioww_to_iow(wait));
2413 	sdma_update_tail(sde, tail);
2414 unlock:
2415 	spin_unlock_irqrestore(&sde->tail_lock, flags);
2416 	return ret;
2417 unlock_noconn:
2418 	if (wait)
2419 		iowait_sdma_inc(iowait_ioww_to_iow(wait));
2420 	tx->next_descq_idx = 0;
2421 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2422 	tx->sn = sde->tail_sn++;
2423 	trace_hfi1_sdma_in_sn(sde, tx->sn);
2424 #endif
2425 	spin_lock(&sde->flushlist_lock);
2426 	list_add_tail(&tx->list, &sde->flushlist);
2427 	spin_unlock(&sde->flushlist_lock);
2428 	iowait_inc_wait_count(wait, tx->num_desc);
2429 	queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
2430 	ret = -ECOMM;
2431 	goto unlock;
2432 nodesc:
2433 	ret = sdma_check_progress(sde, wait, tx, pkts_sent);
2434 	if (ret == -EAGAIN) {
2435 		ret = 0;
2436 		goto retry;
2437 	}
2438 	sde->descq_full_count++;
2439 	goto unlock;
2440 }
2441 
2442 /**
2443  * sdma_send_txlist() - submit a list of tx req to ring
2444  * @sde: sdma engine to use
2445  * @wait: SE wait structure to use when full (may be NULL)
2446  * @tx_list: list of sdma_txreqs to submit
2447  * @count: pointer to a u16 which, after return will contain the total number of
2448  *         sdma_txreqs removed from the tx_list. This will include sdma_txreqs
2449  *         whose SDMA descriptors are submitted to the ring and the sdma_txreqs
2450  *         which are added to SDMA engine flush list if the SDMA engine state is
2451  *         not running.
2452  *
2453  * The call submits the list into the ring.
2454  *
2455  * If the iowait structure is non-NULL and not equal to the iowait list
2456  * the unprocessed part of the list  will be appended to the list in wait.
2457  *
2458  * In all cases, the tx_list will be updated so the head of the tx_list is
2459  * the list of descriptors that have yet to be transmitted.
2460  *
2461  * The intent of this call is to provide a more efficient
2462  * way of submitting multiple packets to SDMA while holding the tail
2463  * side locking.
2464  *
2465  * Return:
2466  * 0 - Success,
2467  * -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring (wait == NULL)
2468  * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
2469  */
2470 int sdma_send_txlist(struct sdma_engine *sde, struct iowait_work *wait,
2471 		     struct list_head *tx_list, u16 *count_out)
2472 {
2473 	struct sdma_txreq *tx, *tx_next;
2474 	int ret = 0;
2475 	unsigned long flags;
2476 	u16 tail = INVALID_TAIL;
2477 	u32 submit_count = 0, flush_count = 0, total_count;
2478 
2479 	spin_lock_irqsave(&sde->tail_lock, flags);
2480 retry:
2481 	list_for_each_entry_safe(tx, tx_next, tx_list, list) {
2482 		tx->wait = iowait_ioww_to_iow(wait);
2483 		if (unlikely(!__sdma_running(sde)))
2484 			goto unlock_noconn;
2485 		if (unlikely(tx->num_desc > sde->desc_avail))
2486 			goto nodesc;
2487 		if (unlikely(tx->tlen)) {
2488 			ret = -EINVAL;
2489 			goto update_tail;
2490 		}
2491 		list_del_init(&tx->list);
2492 		tail = submit_tx(sde, tx);
2493 		submit_count++;
2494 		if (tail != INVALID_TAIL &&
2495 		    (submit_count & SDMA_TAIL_UPDATE_THRESH) == 0) {
2496 			sdma_update_tail(sde, tail);
2497 			tail = INVALID_TAIL;
2498 		}
2499 	}
2500 update_tail:
2501 	total_count = submit_count + flush_count;
2502 	if (wait) {
2503 		iowait_sdma_add(iowait_ioww_to_iow(wait), total_count);
2504 		iowait_starve_clear(submit_count > 0,
2505 				    iowait_ioww_to_iow(wait));
2506 	}
2507 	if (tail != INVALID_TAIL)
2508 		sdma_update_tail(sde, tail);
2509 	spin_unlock_irqrestore(&sde->tail_lock, flags);
2510 	*count_out = total_count;
2511 	return ret;
2512 unlock_noconn:
2513 	spin_lock(&sde->flushlist_lock);
2514 	list_for_each_entry_safe(tx, tx_next, tx_list, list) {
2515 		tx->wait = iowait_ioww_to_iow(wait);
2516 		list_del_init(&tx->list);
2517 		tx->next_descq_idx = 0;
2518 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2519 		tx->sn = sde->tail_sn++;
2520 		trace_hfi1_sdma_in_sn(sde, tx->sn);
2521 #endif
2522 		list_add_tail(&tx->list, &sde->flushlist);
2523 		flush_count++;
2524 		iowait_inc_wait_count(wait, tx->num_desc);
2525 	}
2526 	spin_unlock(&sde->flushlist_lock);
2527 	queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
2528 	ret = -ECOMM;
2529 	goto update_tail;
2530 nodesc:
2531 	ret = sdma_check_progress(sde, wait, tx, submit_count > 0);
2532 	if (ret == -EAGAIN) {
2533 		ret = 0;
2534 		goto retry;
2535 	}
2536 	sde->descq_full_count++;
2537 	goto update_tail;
2538 }
2539 
2540 static void sdma_process_event(struct sdma_engine *sde, enum sdma_events event)
2541 {
2542 	unsigned long flags;
2543 
2544 	spin_lock_irqsave(&sde->tail_lock, flags);
2545 	write_seqlock(&sde->head_lock);
2546 
2547 	__sdma_process_event(sde, event);
2548 
2549 	if (sde->state.current_state == sdma_state_s99_running)
2550 		sdma_desc_avail(sde, sdma_descq_freecnt(sde));
2551 
2552 	write_sequnlock(&sde->head_lock);
2553 	spin_unlock_irqrestore(&sde->tail_lock, flags);
2554 }
2555 
2556 static void __sdma_process_event(struct sdma_engine *sde,
2557 				 enum sdma_events event)
2558 {
2559 	struct sdma_state *ss = &sde->state;
2560 	int need_progress = 0;
2561 
2562 	/* CONFIG SDMA temporary */
2563 #ifdef CONFIG_SDMA_VERBOSITY
2564 	dd_dev_err(sde->dd, "CONFIG SDMA(%u) [%s] %s\n", sde->this_idx,
2565 		   sdma_state_names[ss->current_state],
2566 		   sdma_event_names[event]);
2567 #endif
2568 
2569 	switch (ss->current_state) {
2570 	case sdma_state_s00_hw_down:
2571 		switch (event) {
2572 		case sdma_event_e00_go_hw_down:
2573 			break;
2574 		case sdma_event_e30_go_running:
2575 			/*
2576 			 * If down, but running requested (usually result
2577 			 * of link up, then we need to start up.
2578 			 * This can happen when hw down is requested while
2579 			 * bringing the link up with traffic active on
2580 			 * 7220, e.g.
2581 			 */
2582 			ss->go_s99_running = 1;
2583 			/* fall through -- and start dma engine */
2584 		case sdma_event_e10_go_hw_start:
2585 			/* This reference means the state machine is started */
2586 			sdma_get(&sde->state);
2587 			sdma_set_state(sde,
2588 				       sdma_state_s10_hw_start_up_halt_wait);
2589 			break;
2590 		case sdma_event_e15_hw_halt_done:
2591 			break;
2592 		case sdma_event_e25_hw_clean_up_done:
2593 			break;
2594 		case sdma_event_e40_sw_cleaned:
2595 			sdma_sw_tear_down(sde);
2596 			break;
2597 		case sdma_event_e50_hw_cleaned:
2598 			break;
2599 		case sdma_event_e60_hw_halted:
2600 			break;
2601 		case sdma_event_e70_go_idle:
2602 			break;
2603 		case sdma_event_e80_hw_freeze:
2604 			break;
2605 		case sdma_event_e81_hw_frozen:
2606 			break;
2607 		case sdma_event_e82_hw_unfreeze:
2608 			break;
2609 		case sdma_event_e85_link_down:
2610 			break;
2611 		case sdma_event_e90_sw_halted:
2612 			break;
2613 		}
2614 		break;
2615 
2616 	case sdma_state_s10_hw_start_up_halt_wait:
2617 		switch (event) {
2618 		case sdma_event_e00_go_hw_down:
2619 			sdma_set_state(sde, sdma_state_s00_hw_down);
2620 			sdma_sw_tear_down(sde);
2621 			break;
2622 		case sdma_event_e10_go_hw_start:
2623 			break;
2624 		case sdma_event_e15_hw_halt_done:
2625 			sdma_set_state(sde,
2626 				       sdma_state_s15_hw_start_up_clean_wait);
2627 			sdma_start_hw_clean_up(sde);
2628 			break;
2629 		case sdma_event_e25_hw_clean_up_done:
2630 			break;
2631 		case sdma_event_e30_go_running:
2632 			ss->go_s99_running = 1;
2633 			break;
2634 		case sdma_event_e40_sw_cleaned:
2635 			break;
2636 		case sdma_event_e50_hw_cleaned:
2637 			break;
2638 		case sdma_event_e60_hw_halted:
2639 			schedule_work(&sde->err_halt_worker);
2640 			break;
2641 		case sdma_event_e70_go_idle:
2642 			ss->go_s99_running = 0;
2643 			break;
2644 		case sdma_event_e80_hw_freeze:
2645 			break;
2646 		case sdma_event_e81_hw_frozen:
2647 			break;
2648 		case sdma_event_e82_hw_unfreeze:
2649 			break;
2650 		case sdma_event_e85_link_down:
2651 			break;
2652 		case sdma_event_e90_sw_halted:
2653 			break;
2654 		}
2655 		break;
2656 
2657 	case sdma_state_s15_hw_start_up_clean_wait:
2658 		switch (event) {
2659 		case sdma_event_e00_go_hw_down:
2660 			sdma_set_state(sde, sdma_state_s00_hw_down);
2661 			sdma_sw_tear_down(sde);
2662 			break;
2663 		case sdma_event_e10_go_hw_start:
2664 			break;
2665 		case sdma_event_e15_hw_halt_done:
2666 			break;
2667 		case sdma_event_e25_hw_clean_up_done:
2668 			sdma_hw_start_up(sde);
2669 			sdma_set_state(sde, ss->go_s99_running ?
2670 				       sdma_state_s99_running :
2671 				       sdma_state_s20_idle);
2672 			break;
2673 		case sdma_event_e30_go_running:
2674 			ss->go_s99_running = 1;
2675 			break;
2676 		case sdma_event_e40_sw_cleaned:
2677 			break;
2678 		case sdma_event_e50_hw_cleaned:
2679 			break;
2680 		case sdma_event_e60_hw_halted:
2681 			break;
2682 		case sdma_event_e70_go_idle:
2683 			ss->go_s99_running = 0;
2684 			break;
2685 		case sdma_event_e80_hw_freeze:
2686 			break;
2687 		case sdma_event_e81_hw_frozen:
2688 			break;
2689 		case sdma_event_e82_hw_unfreeze:
2690 			break;
2691 		case sdma_event_e85_link_down:
2692 			break;
2693 		case sdma_event_e90_sw_halted:
2694 			break;
2695 		}
2696 		break;
2697 
2698 	case sdma_state_s20_idle:
2699 		switch (event) {
2700 		case sdma_event_e00_go_hw_down:
2701 			sdma_set_state(sde, sdma_state_s00_hw_down);
2702 			sdma_sw_tear_down(sde);
2703 			break;
2704 		case sdma_event_e10_go_hw_start:
2705 			break;
2706 		case sdma_event_e15_hw_halt_done:
2707 			break;
2708 		case sdma_event_e25_hw_clean_up_done:
2709 			break;
2710 		case sdma_event_e30_go_running:
2711 			sdma_set_state(sde, sdma_state_s99_running);
2712 			ss->go_s99_running = 1;
2713 			break;
2714 		case sdma_event_e40_sw_cleaned:
2715 			break;
2716 		case sdma_event_e50_hw_cleaned:
2717 			break;
2718 		case sdma_event_e60_hw_halted:
2719 			sdma_set_state(sde, sdma_state_s50_hw_halt_wait);
2720 			schedule_work(&sde->err_halt_worker);
2721 			break;
2722 		case sdma_event_e70_go_idle:
2723 			break;
2724 		case sdma_event_e85_link_down:
2725 			/* fall through */
2726 		case sdma_event_e80_hw_freeze:
2727 			sdma_set_state(sde, sdma_state_s80_hw_freeze);
2728 			atomic_dec(&sde->dd->sdma_unfreeze_count);
2729 			wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
2730 			break;
2731 		case sdma_event_e81_hw_frozen:
2732 			break;
2733 		case sdma_event_e82_hw_unfreeze:
2734 			break;
2735 		case sdma_event_e90_sw_halted:
2736 			break;
2737 		}
2738 		break;
2739 
2740 	case sdma_state_s30_sw_clean_up_wait:
2741 		switch (event) {
2742 		case sdma_event_e00_go_hw_down:
2743 			sdma_set_state(sde, sdma_state_s00_hw_down);
2744 			break;
2745 		case sdma_event_e10_go_hw_start:
2746 			break;
2747 		case sdma_event_e15_hw_halt_done:
2748 			break;
2749 		case sdma_event_e25_hw_clean_up_done:
2750 			break;
2751 		case sdma_event_e30_go_running:
2752 			ss->go_s99_running = 1;
2753 			break;
2754 		case sdma_event_e40_sw_cleaned:
2755 			sdma_set_state(sde, sdma_state_s40_hw_clean_up_wait);
2756 			sdma_start_hw_clean_up(sde);
2757 			break;
2758 		case sdma_event_e50_hw_cleaned:
2759 			break;
2760 		case sdma_event_e60_hw_halted:
2761 			break;
2762 		case sdma_event_e70_go_idle:
2763 			ss->go_s99_running = 0;
2764 			break;
2765 		case sdma_event_e80_hw_freeze:
2766 			break;
2767 		case sdma_event_e81_hw_frozen:
2768 			break;
2769 		case sdma_event_e82_hw_unfreeze:
2770 			break;
2771 		case sdma_event_e85_link_down:
2772 			ss->go_s99_running = 0;
2773 			break;
2774 		case sdma_event_e90_sw_halted:
2775 			break;
2776 		}
2777 		break;
2778 
2779 	case sdma_state_s40_hw_clean_up_wait:
2780 		switch (event) {
2781 		case sdma_event_e00_go_hw_down:
2782 			sdma_set_state(sde, sdma_state_s00_hw_down);
2783 			tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2784 			break;
2785 		case sdma_event_e10_go_hw_start:
2786 			break;
2787 		case sdma_event_e15_hw_halt_done:
2788 			break;
2789 		case sdma_event_e25_hw_clean_up_done:
2790 			sdma_hw_start_up(sde);
2791 			sdma_set_state(sde, ss->go_s99_running ?
2792 				       sdma_state_s99_running :
2793 				       sdma_state_s20_idle);
2794 			break;
2795 		case sdma_event_e30_go_running:
2796 			ss->go_s99_running = 1;
2797 			break;
2798 		case sdma_event_e40_sw_cleaned:
2799 			break;
2800 		case sdma_event_e50_hw_cleaned:
2801 			break;
2802 		case sdma_event_e60_hw_halted:
2803 			break;
2804 		case sdma_event_e70_go_idle:
2805 			ss->go_s99_running = 0;
2806 			break;
2807 		case sdma_event_e80_hw_freeze:
2808 			break;
2809 		case sdma_event_e81_hw_frozen:
2810 			break;
2811 		case sdma_event_e82_hw_unfreeze:
2812 			break;
2813 		case sdma_event_e85_link_down:
2814 			ss->go_s99_running = 0;
2815 			break;
2816 		case sdma_event_e90_sw_halted:
2817 			break;
2818 		}
2819 		break;
2820 
2821 	case sdma_state_s50_hw_halt_wait:
2822 		switch (event) {
2823 		case sdma_event_e00_go_hw_down:
2824 			sdma_set_state(sde, sdma_state_s00_hw_down);
2825 			tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2826 			break;
2827 		case sdma_event_e10_go_hw_start:
2828 			break;
2829 		case sdma_event_e15_hw_halt_done:
2830 			sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait);
2831 			tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2832 			break;
2833 		case sdma_event_e25_hw_clean_up_done:
2834 			break;
2835 		case sdma_event_e30_go_running:
2836 			ss->go_s99_running = 1;
2837 			break;
2838 		case sdma_event_e40_sw_cleaned:
2839 			break;
2840 		case sdma_event_e50_hw_cleaned:
2841 			break;
2842 		case sdma_event_e60_hw_halted:
2843 			schedule_work(&sde->err_halt_worker);
2844 			break;
2845 		case sdma_event_e70_go_idle:
2846 			ss->go_s99_running = 0;
2847 			break;
2848 		case sdma_event_e80_hw_freeze:
2849 			break;
2850 		case sdma_event_e81_hw_frozen:
2851 			break;
2852 		case sdma_event_e82_hw_unfreeze:
2853 			break;
2854 		case sdma_event_e85_link_down:
2855 			ss->go_s99_running = 0;
2856 			break;
2857 		case sdma_event_e90_sw_halted:
2858 			break;
2859 		}
2860 		break;
2861 
2862 	case sdma_state_s60_idle_halt_wait:
2863 		switch (event) {
2864 		case sdma_event_e00_go_hw_down:
2865 			sdma_set_state(sde, sdma_state_s00_hw_down);
2866 			tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2867 			break;
2868 		case sdma_event_e10_go_hw_start:
2869 			break;
2870 		case sdma_event_e15_hw_halt_done:
2871 			sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait);
2872 			tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2873 			break;
2874 		case sdma_event_e25_hw_clean_up_done:
2875 			break;
2876 		case sdma_event_e30_go_running:
2877 			ss->go_s99_running = 1;
2878 			break;
2879 		case sdma_event_e40_sw_cleaned:
2880 			break;
2881 		case sdma_event_e50_hw_cleaned:
2882 			break;
2883 		case sdma_event_e60_hw_halted:
2884 			schedule_work(&sde->err_halt_worker);
2885 			break;
2886 		case sdma_event_e70_go_idle:
2887 			ss->go_s99_running = 0;
2888 			break;
2889 		case sdma_event_e80_hw_freeze:
2890 			break;
2891 		case sdma_event_e81_hw_frozen:
2892 			break;
2893 		case sdma_event_e82_hw_unfreeze:
2894 			break;
2895 		case sdma_event_e85_link_down:
2896 			break;
2897 		case sdma_event_e90_sw_halted:
2898 			break;
2899 		}
2900 		break;
2901 
2902 	case sdma_state_s80_hw_freeze:
2903 		switch (event) {
2904 		case sdma_event_e00_go_hw_down:
2905 			sdma_set_state(sde, sdma_state_s00_hw_down);
2906 			tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2907 			break;
2908 		case sdma_event_e10_go_hw_start:
2909 			break;
2910 		case sdma_event_e15_hw_halt_done:
2911 			break;
2912 		case sdma_event_e25_hw_clean_up_done:
2913 			break;
2914 		case sdma_event_e30_go_running:
2915 			ss->go_s99_running = 1;
2916 			break;
2917 		case sdma_event_e40_sw_cleaned:
2918 			break;
2919 		case sdma_event_e50_hw_cleaned:
2920 			break;
2921 		case sdma_event_e60_hw_halted:
2922 			break;
2923 		case sdma_event_e70_go_idle:
2924 			ss->go_s99_running = 0;
2925 			break;
2926 		case sdma_event_e80_hw_freeze:
2927 			break;
2928 		case sdma_event_e81_hw_frozen:
2929 			sdma_set_state(sde, sdma_state_s82_freeze_sw_clean);
2930 			tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2931 			break;
2932 		case sdma_event_e82_hw_unfreeze:
2933 			break;
2934 		case sdma_event_e85_link_down:
2935 			break;
2936 		case sdma_event_e90_sw_halted:
2937 			break;
2938 		}
2939 		break;
2940 
2941 	case sdma_state_s82_freeze_sw_clean:
2942 		switch (event) {
2943 		case sdma_event_e00_go_hw_down:
2944 			sdma_set_state(sde, sdma_state_s00_hw_down);
2945 			tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2946 			break;
2947 		case sdma_event_e10_go_hw_start:
2948 			break;
2949 		case sdma_event_e15_hw_halt_done:
2950 			break;
2951 		case sdma_event_e25_hw_clean_up_done:
2952 			break;
2953 		case sdma_event_e30_go_running:
2954 			ss->go_s99_running = 1;
2955 			break;
2956 		case sdma_event_e40_sw_cleaned:
2957 			/* notify caller this engine is done cleaning */
2958 			atomic_dec(&sde->dd->sdma_unfreeze_count);
2959 			wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
2960 			break;
2961 		case sdma_event_e50_hw_cleaned:
2962 			break;
2963 		case sdma_event_e60_hw_halted:
2964 			break;
2965 		case sdma_event_e70_go_idle:
2966 			ss->go_s99_running = 0;
2967 			break;
2968 		case sdma_event_e80_hw_freeze:
2969 			break;
2970 		case sdma_event_e81_hw_frozen:
2971 			break;
2972 		case sdma_event_e82_hw_unfreeze:
2973 			sdma_hw_start_up(sde);
2974 			sdma_set_state(sde, ss->go_s99_running ?
2975 				       sdma_state_s99_running :
2976 				       sdma_state_s20_idle);
2977 			break;
2978 		case sdma_event_e85_link_down:
2979 			break;
2980 		case sdma_event_e90_sw_halted:
2981 			break;
2982 		}
2983 		break;
2984 
2985 	case sdma_state_s99_running:
2986 		switch (event) {
2987 		case sdma_event_e00_go_hw_down:
2988 			sdma_set_state(sde, sdma_state_s00_hw_down);
2989 			tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2990 			break;
2991 		case sdma_event_e10_go_hw_start:
2992 			break;
2993 		case sdma_event_e15_hw_halt_done:
2994 			break;
2995 		case sdma_event_e25_hw_clean_up_done:
2996 			break;
2997 		case sdma_event_e30_go_running:
2998 			break;
2999 		case sdma_event_e40_sw_cleaned:
3000 			break;
3001 		case sdma_event_e50_hw_cleaned:
3002 			break;
3003 		case sdma_event_e60_hw_halted:
3004 			need_progress = 1;
3005 			sdma_err_progress_check_schedule(sde);
3006 			/* fall through */
3007 		case sdma_event_e90_sw_halted:
3008 			/*
3009 			* SW initiated halt does not perform engines
3010 			* progress check
3011 			*/
3012 			sdma_set_state(sde, sdma_state_s50_hw_halt_wait);
3013 			schedule_work(&sde->err_halt_worker);
3014 			break;
3015 		case sdma_event_e70_go_idle:
3016 			sdma_set_state(sde, sdma_state_s60_idle_halt_wait);
3017 			break;
3018 		case sdma_event_e85_link_down:
3019 			ss->go_s99_running = 0;
3020 			/* fall through */
3021 		case sdma_event_e80_hw_freeze:
3022 			sdma_set_state(sde, sdma_state_s80_hw_freeze);
3023 			atomic_dec(&sde->dd->sdma_unfreeze_count);
3024 			wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
3025 			break;
3026 		case sdma_event_e81_hw_frozen:
3027 			break;
3028 		case sdma_event_e82_hw_unfreeze:
3029 			break;
3030 		}
3031 		break;
3032 	}
3033 
3034 	ss->last_event = event;
3035 	if (need_progress)
3036 		sdma_make_progress(sde, 0);
3037 }
3038 
3039 /*
3040  * _extend_sdma_tx_descs() - helper to extend txreq
3041  *
3042  * This is called once the initial nominal allocation
3043  * of descriptors in the sdma_txreq is exhausted.
3044  *
3045  * The code will bump the allocation up to the max
3046  * of MAX_DESC (64) descriptors. There doesn't seem
3047  * much point in an interim step. The last descriptor
3048  * is reserved for coalesce buffer in order to support
3049  * cases where input packet has >MAX_DESC iovecs.
3050  *
3051  */
3052 static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
3053 {
3054 	int i;
3055 
3056 	/* Handle last descriptor */
3057 	if (unlikely((tx->num_desc == (MAX_DESC - 1)))) {
3058 		/* if tlen is 0, it is for padding, release last descriptor */
3059 		if (!tx->tlen) {
3060 			tx->desc_limit = MAX_DESC;
3061 		} else if (!tx->coalesce_buf) {
3062 			/* allocate coalesce buffer with space for padding */
3063 			tx->coalesce_buf = kmalloc(tx->tlen + sizeof(u32),
3064 						   GFP_ATOMIC);
3065 			if (!tx->coalesce_buf)
3066 				goto enomem;
3067 			tx->coalesce_idx = 0;
3068 		}
3069 		return 0;
3070 	}
3071 
3072 	if (unlikely(tx->num_desc == MAX_DESC))
3073 		goto enomem;
3074 
3075 	tx->descp = kmalloc_array(
3076 			MAX_DESC,
3077 			sizeof(struct sdma_desc),
3078 			GFP_ATOMIC);
3079 	if (!tx->descp)
3080 		goto enomem;
3081 
3082 	/* reserve last descriptor for coalescing */
3083 	tx->desc_limit = MAX_DESC - 1;
3084 	/* copy ones already built */
3085 	for (i = 0; i < tx->num_desc; i++)
3086 		tx->descp[i] = tx->descs[i];
3087 	return 0;
3088 enomem:
3089 	__sdma_txclean(dd, tx);
3090 	return -ENOMEM;
3091 }
3092 
3093 /*
3094  * ext_coal_sdma_tx_descs() - extend or coalesce sdma tx descriptors
3095  *
3096  * This is called once the initial nominal allocation of descriptors
3097  * in the sdma_txreq is exhausted.
3098  *
3099  * This function calls _extend_sdma_tx_descs to extend or allocate
3100  * coalesce buffer. If there is a allocated coalesce buffer, it will
3101  * copy the input packet data into the coalesce buffer. It also adds
3102  * coalesce buffer descriptor once when whole packet is received.
3103  *
3104  * Return:
3105  * <0 - error
3106  * 0 - coalescing, don't populate descriptor
3107  * 1 - continue with populating descriptor
3108  */
3109 int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
3110 			   int type, void *kvaddr, struct page *page,
3111 			   unsigned long offset, u16 len)
3112 {
3113 	int pad_len, rval;
3114 	dma_addr_t addr;
3115 
3116 	rval = _extend_sdma_tx_descs(dd, tx);
3117 	if (rval) {
3118 		__sdma_txclean(dd, tx);
3119 		return rval;
3120 	}
3121 
3122 	/* If coalesce buffer is allocated, copy data into it */
3123 	if (tx->coalesce_buf) {
3124 		if (type == SDMA_MAP_NONE) {
3125 			__sdma_txclean(dd, tx);
3126 			return -EINVAL;
3127 		}
3128 
3129 		if (type == SDMA_MAP_PAGE) {
3130 			kvaddr = kmap(page);
3131 			kvaddr += offset;
3132 		} else if (WARN_ON(!kvaddr)) {
3133 			__sdma_txclean(dd, tx);
3134 			return -EINVAL;
3135 		}
3136 
3137 		memcpy(tx->coalesce_buf + tx->coalesce_idx, kvaddr, len);
3138 		tx->coalesce_idx += len;
3139 		if (type == SDMA_MAP_PAGE)
3140 			kunmap(page);
3141 
3142 		/* If there is more data, return */
3143 		if (tx->tlen - tx->coalesce_idx)
3144 			return 0;
3145 
3146 		/* Whole packet is received; add any padding */
3147 		pad_len = tx->packet_len & (sizeof(u32) - 1);
3148 		if (pad_len) {
3149 			pad_len = sizeof(u32) - pad_len;
3150 			memset(tx->coalesce_buf + tx->coalesce_idx, 0, pad_len);
3151 			/* padding is taken care of for coalescing case */
3152 			tx->packet_len += pad_len;
3153 			tx->tlen += pad_len;
3154 		}
3155 
3156 		/* dma map the coalesce buffer */
3157 		addr = dma_map_single(&dd->pcidev->dev,
3158 				      tx->coalesce_buf,
3159 				      tx->tlen,
3160 				      DMA_TO_DEVICE);
3161 
3162 		if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
3163 			__sdma_txclean(dd, tx);
3164 			return -ENOSPC;
3165 		}
3166 
3167 		/* Add descriptor for coalesce buffer */
3168 		tx->desc_limit = MAX_DESC;
3169 		return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx,
3170 					 addr, tx->tlen);
3171 	}
3172 
3173 	return 1;
3174 }
3175 
3176 /* Update sdes when the lmc changes */
3177 void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid)
3178 {
3179 	struct sdma_engine *sde;
3180 	int i;
3181 	u64 sreg;
3182 
3183 	sreg = ((mask & SD(CHECK_SLID_MASK_MASK)) <<
3184 		SD(CHECK_SLID_MASK_SHIFT)) |
3185 		(((lid & mask) & SD(CHECK_SLID_VALUE_MASK)) <<
3186 		SD(CHECK_SLID_VALUE_SHIFT));
3187 
3188 	for (i = 0; i < dd->num_sdma; i++) {
3189 		hfi1_cdbg(LINKVERB, "SendDmaEngine[%d].SLID_CHECK = 0x%x",
3190 			  i, (u32)sreg);
3191 		sde = &dd->per_sdma[i];
3192 		write_sde_csr(sde, SD(CHECK_SLID), sreg);
3193 	}
3194 }
3195 
3196 /* tx not dword sized - pad */
3197 int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
3198 {
3199 	int rval = 0;
3200 
3201 	tx->num_desc++;
3202 	if ((unlikely(tx->num_desc == tx->desc_limit))) {
3203 		rval = _extend_sdma_tx_descs(dd, tx);
3204 		if (rval) {
3205 			__sdma_txclean(dd, tx);
3206 			return rval;
3207 		}
3208 	}
3209 	/* finish the one just added */
3210 	make_tx_sdma_desc(
3211 		tx,
3212 		SDMA_MAP_NONE,
3213 		dd->sdma_pad_phys,
3214 		sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)));
3215 	_sdma_close_tx(dd, tx);
3216 	return rval;
3217 }
3218 
3219 /*
3220  * Add ahg to the sdma_txreq
3221  *
3222  * The logic will consume up to 3
3223  * descriptors at the beginning of
3224  * sdma_txreq.
3225  */
3226 void _sdma_txreq_ahgadd(
3227 	struct sdma_txreq *tx,
3228 	u8 num_ahg,
3229 	u8 ahg_entry,
3230 	u32 *ahg,
3231 	u8 ahg_hlen)
3232 {
3233 	u32 i, shift = 0, desc = 0;
3234 	u8 mode;
3235 
3236 	WARN_ON_ONCE(num_ahg > 9 || (ahg_hlen & 3) || ahg_hlen == 4);
3237 	/* compute mode */
3238 	if (num_ahg == 1)
3239 		mode = SDMA_AHG_APPLY_UPDATE1;
3240 	else if (num_ahg <= 5)
3241 		mode = SDMA_AHG_APPLY_UPDATE2;
3242 	else
3243 		mode = SDMA_AHG_APPLY_UPDATE3;
3244 	tx->num_desc++;
3245 	/* initialize to consumed descriptors to zero */
3246 	switch (mode) {
3247 	case SDMA_AHG_APPLY_UPDATE3:
3248 		tx->num_desc++;
3249 		tx->descs[2].qw[0] = 0;
3250 		tx->descs[2].qw[1] = 0;
3251 		/* FALLTHROUGH */
3252 	case SDMA_AHG_APPLY_UPDATE2:
3253 		tx->num_desc++;
3254 		tx->descs[1].qw[0] = 0;
3255 		tx->descs[1].qw[1] = 0;
3256 		break;
3257 	}
3258 	ahg_hlen >>= 2;
3259 	tx->descs[0].qw[1] |=
3260 		(((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK)
3261 			<< SDMA_DESC1_HEADER_INDEX_SHIFT) |
3262 		(((u64)ahg_hlen & SDMA_DESC1_HEADER_DWS_MASK)
3263 			<< SDMA_DESC1_HEADER_DWS_SHIFT) |
3264 		(((u64)mode & SDMA_DESC1_HEADER_MODE_MASK)
3265 			<< SDMA_DESC1_HEADER_MODE_SHIFT) |
3266 		(((u64)ahg[0] & SDMA_DESC1_HEADER_UPDATE1_MASK)
3267 			<< SDMA_DESC1_HEADER_UPDATE1_SHIFT);
3268 	for (i = 0; i < (num_ahg - 1); i++) {
3269 		if (!shift && !(i & 2))
3270 			desc++;
3271 		tx->descs[desc].qw[!!(i & 2)] |=
3272 			(((u64)ahg[i + 1])
3273 				<< shift);
3274 		shift = (shift + 32) & 63;
3275 	}
3276 }
3277 
3278 /**
3279  * sdma_ahg_alloc - allocate an AHG entry
3280  * @sde: engine to allocate from
3281  *
3282  * Return:
3283  * 0-31 when successful, -EOPNOTSUPP if AHG is not enabled,
3284  * -ENOSPC if an entry is not available
3285  */
3286 int sdma_ahg_alloc(struct sdma_engine *sde)
3287 {
3288 	int nr;
3289 	int oldbit;
3290 
3291 	if (!sde) {
3292 		trace_hfi1_ahg_allocate(sde, -EINVAL);
3293 		return -EINVAL;
3294 	}
3295 	while (1) {
3296 		nr = ffz(READ_ONCE(sde->ahg_bits));
3297 		if (nr > 31) {
3298 			trace_hfi1_ahg_allocate(sde, -ENOSPC);
3299 			return -ENOSPC;
3300 		}
3301 		oldbit = test_and_set_bit(nr, &sde->ahg_bits);
3302 		if (!oldbit)
3303 			break;
3304 		cpu_relax();
3305 	}
3306 	trace_hfi1_ahg_allocate(sde, nr);
3307 	return nr;
3308 }
3309 
3310 /**
3311  * sdma_ahg_free - free an AHG entry
3312  * @sde: engine to return AHG entry
3313  * @ahg_index: index to free
3314  *
3315  * This routine frees the indicate AHG entry.
3316  */
3317 void sdma_ahg_free(struct sdma_engine *sde, int ahg_index)
3318 {
3319 	if (!sde)
3320 		return;
3321 	trace_hfi1_ahg_deallocate(sde, ahg_index);
3322 	if (ahg_index < 0 || ahg_index > 31)
3323 		return;
3324 	clear_bit(ahg_index, &sde->ahg_bits);
3325 }
3326 
3327 /*
3328  * SPC freeze handling for SDMA engines.  Called when the driver knows
3329  * the SPC is going into a freeze but before the freeze is fully
3330  * settled.  Generally an error interrupt.
3331  *
3332  * This event will pull the engine out of running so no more entries can be
3333  * added to the engine's queue.
3334  */
3335 void sdma_freeze_notify(struct hfi1_devdata *dd, int link_down)
3336 {
3337 	int i;
3338 	enum sdma_events event = link_down ? sdma_event_e85_link_down :
3339 					     sdma_event_e80_hw_freeze;
3340 
3341 	/* set up the wait but do not wait here */
3342 	atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma);
3343 
3344 	/* tell all engines to stop running and wait */
3345 	for (i = 0; i < dd->num_sdma; i++)
3346 		sdma_process_event(&dd->per_sdma[i], event);
3347 
3348 	/* sdma_freeze() will wait for all engines to have stopped */
3349 }
3350 
3351 /*
3352  * SPC freeze handling for SDMA engines.  Called when the driver knows
3353  * the SPC is fully frozen.
3354  */
3355 void sdma_freeze(struct hfi1_devdata *dd)
3356 {
3357 	int i;
3358 	int ret;
3359 
3360 	/*
3361 	 * Make sure all engines have moved out of the running state before
3362 	 * continuing.
3363 	 */
3364 	ret = wait_event_interruptible(dd->sdma_unfreeze_wq,
3365 				       atomic_read(&dd->sdma_unfreeze_count) <=
3366 				       0);
3367 	/* interrupted or count is negative, then unloading - just exit */
3368 	if (ret || atomic_read(&dd->sdma_unfreeze_count) < 0)
3369 		return;
3370 
3371 	/* set up the count for the next wait */
3372 	atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma);
3373 
3374 	/* tell all engines that the SPC is frozen, they can start cleaning */
3375 	for (i = 0; i < dd->num_sdma; i++)
3376 		sdma_process_event(&dd->per_sdma[i], sdma_event_e81_hw_frozen);
3377 
3378 	/*
3379 	 * Wait for everyone to finish software clean before exiting.  The
3380 	 * software clean will read engine CSRs, so must be completed before
3381 	 * the next step, which will clear the engine CSRs.
3382 	 */
3383 	(void)wait_event_interruptible(dd->sdma_unfreeze_wq,
3384 				atomic_read(&dd->sdma_unfreeze_count) <= 0);
3385 	/* no need to check results - done no matter what */
3386 }
3387 
3388 /*
3389  * SPC freeze handling for the SDMA engines.  Called after the SPC is unfrozen.
3390  *
3391  * The SPC freeze acts like a SDMA halt and a hardware clean combined.  All
3392  * that is left is a software clean.  We could do it after the SPC is fully
3393  * frozen, but then we'd have to add another state to wait for the unfreeze.
3394  * Instead, just defer the software clean until the unfreeze step.
3395  */
3396 void sdma_unfreeze(struct hfi1_devdata *dd)
3397 {
3398 	int i;
3399 
3400 	/* tell all engines start freeze clean up */
3401 	for (i = 0; i < dd->num_sdma; i++)
3402 		sdma_process_event(&dd->per_sdma[i],
3403 				   sdma_event_e82_hw_unfreeze);
3404 }
3405 
3406 /**
3407  * _sdma_engine_progress_schedule() - schedule progress on engine
3408  * @sde: sdma_engine to schedule progress
3409  *
3410  */
3411 void _sdma_engine_progress_schedule(
3412 	struct sdma_engine *sde)
3413 {
3414 	trace_hfi1_sdma_engine_progress(sde, sde->progress_mask);
3415 	/* assume we have selected a good cpu */
3416 	write_csr(sde->dd,
3417 		  CCE_INT_FORCE + (8 * (IS_SDMA_START / 64)),
3418 		  sde->progress_mask);
3419 }
3420