xref: /openbmc/linux/drivers/infiniband/hw/hfi1/sdma.c (revision 711aab1d)
1 /*
2  * Copyright(c) 2015, 2016 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47 
48 #include <linux/spinlock.h>
49 #include <linux/seqlock.h>
50 #include <linux/netdevice.h>
51 #include <linux/moduleparam.h>
52 #include <linux/bitops.h>
53 #include <linux/timer.h>
54 #include <linux/vmalloc.h>
55 #include <linux/highmem.h>
56 
57 #include "hfi.h"
58 #include "common.h"
59 #include "qp.h"
60 #include "sdma.h"
61 #include "iowait.h"
62 #include "trace.h"
63 
64 /* must be a power of 2 >= 64 <= 32768 */
65 #define SDMA_DESCQ_CNT 2048
66 #define SDMA_DESC_INTR 64
67 #define INVALID_TAIL 0xffff
68 
69 static uint sdma_descq_cnt = SDMA_DESCQ_CNT;
70 module_param(sdma_descq_cnt, uint, S_IRUGO);
71 MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
72 
73 static uint sdma_idle_cnt = 250;
74 module_param(sdma_idle_cnt, uint, S_IRUGO);
75 MODULE_PARM_DESC(sdma_idle_cnt, "sdma interrupt idle delay (ns,default 250)");
76 
77 uint mod_num_sdma;
78 module_param_named(num_sdma, mod_num_sdma, uint, S_IRUGO);
79 MODULE_PARM_DESC(num_sdma, "Set max number SDMA engines to use");
80 
81 static uint sdma_desct_intr = SDMA_DESC_INTR;
82 module_param_named(desct_intr, sdma_desct_intr, uint, S_IRUGO | S_IWUSR);
83 MODULE_PARM_DESC(desct_intr, "Number of SDMA descriptor before interrupt");
84 
85 #define SDMA_WAIT_BATCH_SIZE 20
86 /* max wait time for a SDMA engine to indicate it has halted */
87 #define SDMA_ERR_HALT_TIMEOUT 10 /* ms */
88 /* all SDMA engine errors that cause a halt */
89 
90 #define SD(name) SEND_DMA_##name
91 #define ALL_SDMA_ENG_HALT_ERRS \
92 	(SD(ENG_ERR_STATUS_SDMA_WRONG_DW_ERR_SMASK) \
93 	| SD(ENG_ERR_STATUS_SDMA_GEN_MISMATCH_ERR_SMASK) \
94 	| SD(ENG_ERR_STATUS_SDMA_TOO_LONG_ERR_SMASK) \
95 	| SD(ENG_ERR_STATUS_SDMA_TAIL_OUT_OF_BOUNDS_ERR_SMASK) \
96 	| SD(ENG_ERR_STATUS_SDMA_FIRST_DESC_ERR_SMASK) \
97 	| SD(ENG_ERR_STATUS_SDMA_MEM_READ_ERR_SMASK) \
98 	| SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK) \
99 	| SD(ENG_ERR_STATUS_SDMA_LENGTH_MISMATCH_ERR_SMASK) \
100 	| SD(ENG_ERR_STATUS_SDMA_PACKET_DESC_OVERFLOW_ERR_SMASK) \
101 	| SD(ENG_ERR_STATUS_SDMA_HEADER_SELECT_ERR_SMASK) \
102 	| SD(ENG_ERR_STATUS_SDMA_HEADER_ADDRESS_ERR_SMASK) \
103 	| SD(ENG_ERR_STATUS_SDMA_HEADER_LENGTH_ERR_SMASK) \
104 	| SD(ENG_ERR_STATUS_SDMA_TIMEOUT_ERR_SMASK) \
105 	| SD(ENG_ERR_STATUS_SDMA_DESC_TABLE_UNC_ERR_SMASK) \
106 	| SD(ENG_ERR_STATUS_SDMA_ASSEMBLY_UNC_ERR_SMASK) \
107 	| SD(ENG_ERR_STATUS_SDMA_PACKET_TRACKING_UNC_ERR_SMASK) \
108 	| SD(ENG_ERR_STATUS_SDMA_HEADER_STORAGE_UNC_ERR_SMASK) \
109 	| SD(ENG_ERR_STATUS_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SMASK))
110 
111 /* sdma_sendctrl operations */
112 #define SDMA_SENDCTRL_OP_ENABLE    BIT(0)
113 #define SDMA_SENDCTRL_OP_INTENABLE BIT(1)
114 #define SDMA_SENDCTRL_OP_HALT      BIT(2)
115 #define SDMA_SENDCTRL_OP_CLEANUP   BIT(3)
116 
117 /* handle long defines */
118 #define SDMA_EGRESS_PACKET_OCCUPANCY_SMASK \
119 SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SMASK
120 #define SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT \
121 SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT
122 
123 static const char * const sdma_state_names[] = {
124 	[sdma_state_s00_hw_down]                = "s00_HwDown",
125 	[sdma_state_s10_hw_start_up_halt_wait]  = "s10_HwStartUpHaltWait",
126 	[sdma_state_s15_hw_start_up_clean_wait] = "s15_HwStartUpCleanWait",
127 	[sdma_state_s20_idle]                   = "s20_Idle",
128 	[sdma_state_s30_sw_clean_up_wait]       = "s30_SwCleanUpWait",
129 	[sdma_state_s40_hw_clean_up_wait]       = "s40_HwCleanUpWait",
130 	[sdma_state_s50_hw_halt_wait]           = "s50_HwHaltWait",
131 	[sdma_state_s60_idle_halt_wait]         = "s60_IdleHaltWait",
132 	[sdma_state_s80_hw_freeze]		= "s80_HwFreeze",
133 	[sdma_state_s82_freeze_sw_clean]	= "s82_FreezeSwClean",
134 	[sdma_state_s99_running]                = "s99_Running",
135 };
136 
137 #ifdef CONFIG_SDMA_VERBOSITY
138 static const char * const sdma_event_names[] = {
139 	[sdma_event_e00_go_hw_down]   = "e00_GoHwDown",
140 	[sdma_event_e10_go_hw_start]  = "e10_GoHwStart",
141 	[sdma_event_e15_hw_halt_done] = "e15_HwHaltDone",
142 	[sdma_event_e25_hw_clean_up_done] = "e25_HwCleanUpDone",
143 	[sdma_event_e30_go_running]   = "e30_GoRunning",
144 	[sdma_event_e40_sw_cleaned]   = "e40_SwCleaned",
145 	[sdma_event_e50_hw_cleaned]   = "e50_HwCleaned",
146 	[sdma_event_e60_hw_halted]    = "e60_HwHalted",
147 	[sdma_event_e70_go_idle]      = "e70_GoIdle",
148 	[sdma_event_e80_hw_freeze]    = "e80_HwFreeze",
149 	[sdma_event_e81_hw_frozen]    = "e81_HwFrozen",
150 	[sdma_event_e82_hw_unfreeze]  = "e82_HwUnfreeze",
151 	[sdma_event_e85_link_down]    = "e85_LinkDown",
152 	[sdma_event_e90_sw_halted]    = "e90_SwHalted",
153 };
154 #endif
155 
156 static const struct sdma_set_state_action sdma_action_table[] = {
157 	[sdma_state_s00_hw_down] = {
158 		.go_s99_running_tofalse = 1,
159 		.op_enable = 0,
160 		.op_intenable = 0,
161 		.op_halt = 0,
162 		.op_cleanup = 0,
163 	},
164 	[sdma_state_s10_hw_start_up_halt_wait] = {
165 		.op_enable = 0,
166 		.op_intenable = 0,
167 		.op_halt = 1,
168 		.op_cleanup = 0,
169 	},
170 	[sdma_state_s15_hw_start_up_clean_wait] = {
171 		.op_enable = 0,
172 		.op_intenable = 1,
173 		.op_halt = 0,
174 		.op_cleanup = 1,
175 	},
176 	[sdma_state_s20_idle] = {
177 		.op_enable = 0,
178 		.op_intenable = 1,
179 		.op_halt = 0,
180 		.op_cleanup = 0,
181 	},
182 	[sdma_state_s30_sw_clean_up_wait] = {
183 		.op_enable = 0,
184 		.op_intenable = 0,
185 		.op_halt = 0,
186 		.op_cleanup = 0,
187 	},
188 	[sdma_state_s40_hw_clean_up_wait] = {
189 		.op_enable = 0,
190 		.op_intenable = 0,
191 		.op_halt = 0,
192 		.op_cleanup = 1,
193 	},
194 	[sdma_state_s50_hw_halt_wait] = {
195 		.op_enable = 0,
196 		.op_intenable = 0,
197 		.op_halt = 0,
198 		.op_cleanup = 0,
199 	},
200 	[sdma_state_s60_idle_halt_wait] = {
201 		.go_s99_running_tofalse = 1,
202 		.op_enable = 0,
203 		.op_intenable = 0,
204 		.op_halt = 1,
205 		.op_cleanup = 0,
206 	},
207 	[sdma_state_s80_hw_freeze] = {
208 		.op_enable = 0,
209 		.op_intenable = 0,
210 		.op_halt = 0,
211 		.op_cleanup = 0,
212 	},
213 	[sdma_state_s82_freeze_sw_clean] = {
214 		.op_enable = 0,
215 		.op_intenable = 0,
216 		.op_halt = 0,
217 		.op_cleanup = 0,
218 	},
219 	[sdma_state_s99_running] = {
220 		.op_enable = 1,
221 		.op_intenable = 1,
222 		.op_halt = 0,
223 		.op_cleanup = 0,
224 		.go_s99_running_totrue = 1,
225 	},
226 };
227 
228 #define SDMA_TAIL_UPDATE_THRESH 0x1F
229 
230 /* declare all statics here rather than keep sorting */
231 static void sdma_complete(struct kref *);
232 static void sdma_finalput(struct sdma_state *);
233 static void sdma_get(struct sdma_state *);
234 static void sdma_hw_clean_up_task(unsigned long);
235 static void sdma_put(struct sdma_state *);
236 static void sdma_set_state(struct sdma_engine *, enum sdma_states);
237 static void sdma_start_hw_clean_up(struct sdma_engine *);
238 static void sdma_sw_clean_up_task(unsigned long);
239 static void sdma_sendctrl(struct sdma_engine *, unsigned);
240 static void init_sdma_regs(struct sdma_engine *, u32, uint);
241 static void sdma_process_event(
242 	struct sdma_engine *sde,
243 	enum sdma_events event);
244 static void __sdma_process_event(
245 	struct sdma_engine *sde,
246 	enum sdma_events event);
247 static void dump_sdma_state(struct sdma_engine *sde);
248 static void sdma_make_progress(struct sdma_engine *sde, u64 status);
249 static void sdma_desc_avail(struct sdma_engine *sde, uint avail);
250 static void sdma_flush_descq(struct sdma_engine *sde);
251 
252 /**
253  * sdma_state_name() - return state string from enum
254  * @state: state
255  */
256 static const char *sdma_state_name(enum sdma_states state)
257 {
258 	return sdma_state_names[state];
259 }
260 
261 static void sdma_get(struct sdma_state *ss)
262 {
263 	kref_get(&ss->kref);
264 }
265 
266 static void sdma_complete(struct kref *kref)
267 {
268 	struct sdma_state *ss =
269 		container_of(kref, struct sdma_state, kref);
270 
271 	complete(&ss->comp);
272 }
273 
274 static void sdma_put(struct sdma_state *ss)
275 {
276 	kref_put(&ss->kref, sdma_complete);
277 }
278 
279 static void sdma_finalput(struct sdma_state *ss)
280 {
281 	sdma_put(ss);
282 	wait_for_completion(&ss->comp);
283 }
284 
285 static inline void write_sde_csr(
286 	struct sdma_engine *sde,
287 	u32 offset0,
288 	u64 value)
289 {
290 	write_kctxt_csr(sde->dd, sde->this_idx, offset0, value);
291 }
292 
293 static inline u64 read_sde_csr(
294 	struct sdma_engine *sde,
295 	u32 offset0)
296 {
297 	return read_kctxt_csr(sde->dd, sde->this_idx, offset0);
298 }
299 
300 /*
301  * sdma_wait_for_packet_egress() - wait for the VL FIFO occupancy for
302  * sdma engine 'sde' to drop to 0.
303  */
304 static void sdma_wait_for_packet_egress(struct sdma_engine *sde,
305 					int pause)
306 {
307 	u64 off = 8 * sde->this_idx;
308 	struct hfi1_devdata *dd = sde->dd;
309 	int lcnt = 0;
310 	u64 reg_prev;
311 	u64 reg = 0;
312 
313 	while (1) {
314 		reg_prev = reg;
315 		reg = read_csr(dd, off + SEND_EGRESS_SEND_DMA_STATUS);
316 
317 		reg &= SDMA_EGRESS_PACKET_OCCUPANCY_SMASK;
318 		reg >>= SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT;
319 		if (reg == 0)
320 			break;
321 		/* counter is reest if accupancy count changes */
322 		if (reg != reg_prev)
323 			lcnt = 0;
324 		if (lcnt++ > 500) {
325 			/* timed out - bounce the link */
326 			dd_dev_err(dd, "%s: engine %u timeout waiting for packets to egress, remaining count %u, bouncing link\n",
327 				   __func__, sde->this_idx, (u32)reg);
328 			queue_work(dd->pport->link_wq,
329 				   &dd->pport->link_bounce_work);
330 			break;
331 		}
332 		udelay(1);
333 	}
334 }
335 
336 /*
337  * sdma_wait() - wait for packet egress to complete for all SDMA engines,
338  * and pause for credit return.
339  */
340 void sdma_wait(struct hfi1_devdata *dd)
341 {
342 	int i;
343 
344 	for (i = 0; i < dd->num_sdma; i++) {
345 		struct sdma_engine *sde = &dd->per_sdma[i];
346 
347 		sdma_wait_for_packet_egress(sde, 0);
348 	}
349 }
350 
351 static inline void sdma_set_desc_cnt(struct sdma_engine *sde, unsigned cnt)
352 {
353 	u64 reg;
354 
355 	if (!(sde->dd->flags & HFI1_HAS_SDMA_TIMEOUT))
356 		return;
357 	reg = cnt;
358 	reg &= SD(DESC_CNT_CNT_MASK);
359 	reg <<= SD(DESC_CNT_CNT_SHIFT);
360 	write_sde_csr(sde, SD(DESC_CNT), reg);
361 }
362 
363 static inline void complete_tx(struct sdma_engine *sde,
364 			       struct sdma_txreq *tx,
365 			       int res)
366 {
367 	/* protect against complete modifying */
368 	struct iowait *wait = tx->wait;
369 	callback_t complete = tx->complete;
370 
371 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
372 	trace_hfi1_sdma_out_sn(sde, tx->sn);
373 	if (WARN_ON_ONCE(sde->head_sn != tx->sn))
374 		dd_dev_err(sde->dd, "expected %llu got %llu\n",
375 			   sde->head_sn, tx->sn);
376 	sde->head_sn++;
377 #endif
378 	__sdma_txclean(sde->dd, tx);
379 	if (complete)
380 		(*complete)(tx, res);
381 	if (wait && iowait_sdma_dec(wait))
382 		iowait_drain_wakeup(wait);
383 }
384 
385 /*
386  * Complete all the sdma requests with a SDMA_TXREQ_S_ABORTED status
387  *
388  * Depending on timing there can be txreqs in two places:
389  * - in the descq ring
390  * - in the flush list
391  *
392  * To avoid ordering issues the descq ring needs to be flushed
393  * first followed by the flush list.
394  *
395  * This routine is called from two places
396  * - From a work queue item
397  * - Directly from the state machine just before setting the
398  *   state to running
399  *
400  * Must be called with head_lock held
401  *
402  */
403 static void sdma_flush(struct sdma_engine *sde)
404 {
405 	struct sdma_txreq *txp, *txp_next;
406 	LIST_HEAD(flushlist);
407 	unsigned long flags;
408 
409 	/* flush from head to tail */
410 	sdma_flush_descq(sde);
411 	spin_lock_irqsave(&sde->flushlist_lock, flags);
412 	/* copy flush list */
413 	list_for_each_entry_safe(txp, txp_next, &sde->flushlist, list) {
414 		list_del_init(&txp->list);
415 		list_add_tail(&txp->list, &flushlist);
416 	}
417 	spin_unlock_irqrestore(&sde->flushlist_lock, flags);
418 	/* flush from flush list */
419 	list_for_each_entry_safe(txp, txp_next, &flushlist, list)
420 		complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED);
421 }
422 
423 /*
424  * Fields a work request for flushing the descq ring
425  * and the flush list
426  *
427  * If the engine has been brought to running during
428  * the scheduling delay, the flush is ignored, assuming
429  * that the process of bringing the engine to running
430  * would have done this flush prior to going to running.
431  *
432  */
433 static void sdma_field_flush(struct work_struct *work)
434 {
435 	unsigned long flags;
436 	struct sdma_engine *sde =
437 		container_of(work, struct sdma_engine, flush_worker);
438 
439 	write_seqlock_irqsave(&sde->head_lock, flags);
440 	if (!__sdma_running(sde))
441 		sdma_flush(sde);
442 	write_sequnlock_irqrestore(&sde->head_lock, flags);
443 }
444 
445 static void sdma_err_halt_wait(struct work_struct *work)
446 {
447 	struct sdma_engine *sde = container_of(work, struct sdma_engine,
448 						err_halt_worker);
449 	u64 statuscsr;
450 	unsigned long timeout;
451 
452 	timeout = jiffies + msecs_to_jiffies(SDMA_ERR_HALT_TIMEOUT);
453 	while (1) {
454 		statuscsr = read_sde_csr(sde, SD(STATUS));
455 		statuscsr &= SD(STATUS_ENG_HALTED_SMASK);
456 		if (statuscsr)
457 			break;
458 		if (time_after(jiffies, timeout)) {
459 			dd_dev_err(sde->dd,
460 				   "SDMA engine %d - timeout waiting for engine to halt\n",
461 				   sde->this_idx);
462 			/*
463 			 * Continue anyway.  This could happen if there was
464 			 * an uncorrectable error in the wrong spot.
465 			 */
466 			break;
467 		}
468 		usleep_range(80, 120);
469 	}
470 
471 	sdma_process_event(sde, sdma_event_e15_hw_halt_done);
472 }
473 
474 static void sdma_err_progress_check_schedule(struct sdma_engine *sde)
475 {
476 	if (!is_bx(sde->dd) && HFI1_CAP_IS_KSET(SDMA_AHG)) {
477 		unsigned index;
478 		struct hfi1_devdata *dd = sde->dd;
479 
480 		for (index = 0; index < dd->num_sdma; index++) {
481 			struct sdma_engine *curr_sdma = &dd->per_sdma[index];
482 
483 			if (curr_sdma != sde)
484 				curr_sdma->progress_check_head =
485 							curr_sdma->descq_head;
486 		}
487 		dd_dev_err(sde->dd,
488 			   "SDMA engine %d - check scheduled\n",
489 				sde->this_idx);
490 		mod_timer(&sde->err_progress_check_timer, jiffies + 10);
491 	}
492 }
493 
494 static void sdma_err_progress_check(unsigned long data)
495 {
496 	unsigned index;
497 	struct sdma_engine *sde = (struct sdma_engine *)data;
498 
499 	dd_dev_err(sde->dd, "SDE progress check event\n");
500 	for (index = 0; index < sde->dd->num_sdma; index++) {
501 		struct sdma_engine *curr_sde = &sde->dd->per_sdma[index];
502 		unsigned long flags;
503 
504 		/* check progress on each engine except the current one */
505 		if (curr_sde == sde)
506 			continue;
507 		/*
508 		 * We must lock interrupts when acquiring sde->lock,
509 		 * to avoid a deadlock if interrupt triggers and spins on
510 		 * the same lock on same CPU
511 		 */
512 		spin_lock_irqsave(&curr_sde->tail_lock, flags);
513 		write_seqlock(&curr_sde->head_lock);
514 
515 		/* skip non-running queues */
516 		if (curr_sde->state.current_state != sdma_state_s99_running) {
517 			write_sequnlock(&curr_sde->head_lock);
518 			spin_unlock_irqrestore(&curr_sde->tail_lock, flags);
519 			continue;
520 		}
521 
522 		if ((curr_sde->descq_head != curr_sde->descq_tail) &&
523 		    (curr_sde->descq_head ==
524 				curr_sde->progress_check_head))
525 			__sdma_process_event(curr_sde,
526 					     sdma_event_e90_sw_halted);
527 		write_sequnlock(&curr_sde->head_lock);
528 		spin_unlock_irqrestore(&curr_sde->tail_lock, flags);
529 	}
530 	schedule_work(&sde->err_halt_worker);
531 }
532 
533 static void sdma_hw_clean_up_task(unsigned long opaque)
534 {
535 	struct sdma_engine *sde = (struct sdma_engine *)opaque;
536 	u64 statuscsr;
537 
538 	while (1) {
539 #ifdef CONFIG_SDMA_VERBOSITY
540 		dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
541 			   sde->this_idx, slashstrip(__FILE__), __LINE__,
542 			__func__);
543 #endif
544 		statuscsr = read_sde_csr(sde, SD(STATUS));
545 		statuscsr &= SD(STATUS_ENG_CLEANED_UP_SMASK);
546 		if (statuscsr)
547 			break;
548 		udelay(10);
549 	}
550 
551 	sdma_process_event(sde, sdma_event_e25_hw_clean_up_done);
552 }
553 
554 static inline struct sdma_txreq *get_txhead(struct sdma_engine *sde)
555 {
556 	smp_read_barrier_depends(); /* see sdma_update_tail() */
557 	return sde->tx_ring[sde->tx_head & sde->sdma_mask];
558 }
559 
560 /*
561  * flush ring for recovery
562  */
563 static void sdma_flush_descq(struct sdma_engine *sde)
564 {
565 	u16 head, tail;
566 	int progress = 0;
567 	struct sdma_txreq *txp = get_txhead(sde);
568 
569 	/* The reason for some of the complexity of this code is that
570 	 * not all descriptors have corresponding txps.  So, we have to
571 	 * be able to skip over descs until we wander into the range of
572 	 * the next txp on the list.
573 	 */
574 	head = sde->descq_head & sde->sdma_mask;
575 	tail = sde->descq_tail & sde->sdma_mask;
576 	while (head != tail) {
577 		/* advance head, wrap if needed */
578 		head = ++sde->descq_head & sde->sdma_mask;
579 		/* if now past this txp's descs, do the callback */
580 		if (txp && txp->next_descq_idx == head) {
581 			/* remove from list */
582 			sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
583 			complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED);
584 			trace_hfi1_sdma_progress(sde, head, tail, txp);
585 			txp = get_txhead(sde);
586 		}
587 		progress++;
588 	}
589 	if (progress)
590 		sdma_desc_avail(sde, sdma_descq_freecnt(sde));
591 }
592 
593 static void sdma_sw_clean_up_task(unsigned long opaque)
594 {
595 	struct sdma_engine *sde = (struct sdma_engine *)opaque;
596 	unsigned long flags;
597 
598 	spin_lock_irqsave(&sde->tail_lock, flags);
599 	write_seqlock(&sde->head_lock);
600 
601 	/*
602 	 * At this point, the following should always be true:
603 	 * - We are halted, so no more descriptors are getting retired.
604 	 * - We are not running, so no one is submitting new work.
605 	 * - Only we can send the e40_sw_cleaned, so we can't start
606 	 *   running again until we say so.  So, the active list and
607 	 *   descq are ours to play with.
608 	 */
609 
610 	/*
611 	 * In the error clean up sequence, software clean must be called
612 	 * before the hardware clean so we can use the hardware head in
613 	 * the progress routine.  A hardware clean or SPC unfreeze will
614 	 * reset the hardware head.
615 	 *
616 	 * Process all retired requests. The progress routine will use the
617 	 * latest physical hardware head - we are not running so speed does
618 	 * not matter.
619 	 */
620 	sdma_make_progress(sde, 0);
621 
622 	sdma_flush(sde);
623 
624 	/*
625 	 * Reset our notion of head and tail.
626 	 * Note that the HW registers have been reset via an earlier
627 	 * clean up.
628 	 */
629 	sde->descq_tail = 0;
630 	sde->descq_head = 0;
631 	sde->desc_avail = sdma_descq_freecnt(sde);
632 	*sde->head_dma = 0;
633 
634 	__sdma_process_event(sde, sdma_event_e40_sw_cleaned);
635 
636 	write_sequnlock(&sde->head_lock);
637 	spin_unlock_irqrestore(&sde->tail_lock, flags);
638 }
639 
640 static void sdma_sw_tear_down(struct sdma_engine *sde)
641 {
642 	struct sdma_state *ss = &sde->state;
643 
644 	/* Releasing this reference means the state machine has stopped. */
645 	sdma_put(ss);
646 
647 	/* stop waiting for all unfreeze events to complete */
648 	atomic_set(&sde->dd->sdma_unfreeze_count, -1);
649 	wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
650 }
651 
652 static void sdma_start_hw_clean_up(struct sdma_engine *sde)
653 {
654 	tasklet_hi_schedule(&sde->sdma_hw_clean_up_task);
655 }
656 
657 static void sdma_set_state(struct sdma_engine *sde,
658 			   enum sdma_states next_state)
659 {
660 	struct sdma_state *ss = &sde->state;
661 	const struct sdma_set_state_action *action = sdma_action_table;
662 	unsigned op = 0;
663 
664 	trace_hfi1_sdma_state(
665 		sde,
666 		sdma_state_names[ss->current_state],
667 		sdma_state_names[next_state]);
668 
669 	/* debugging bookkeeping */
670 	ss->previous_state = ss->current_state;
671 	ss->previous_op = ss->current_op;
672 	ss->current_state = next_state;
673 
674 	if (ss->previous_state != sdma_state_s99_running &&
675 	    next_state == sdma_state_s99_running)
676 		sdma_flush(sde);
677 
678 	if (action[next_state].op_enable)
679 		op |= SDMA_SENDCTRL_OP_ENABLE;
680 
681 	if (action[next_state].op_intenable)
682 		op |= SDMA_SENDCTRL_OP_INTENABLE;
683 
684 	if (action[next_state].op_halt)
685 		op |= SDMA_SENDCTRL_OP_HALT;
686 
687 	if (action[next_state].op_cleanup)
688 		op |= SDMA_SENDCTRL_OP_CLEANUP;
689 
690 	if (action[next_state].go_s99_running_tofalse)
691 		ss->go_s99_running = 0;
692 
693 	if (action[next_state].go_s99_running_totrue)
694 		ss->go_s99_running = 1;
695 
696 	ss->current_op = op;
697 	sdma_sendctrl(sde, ss->current_op);
698 }
699 
700 /**
701  * sdma_get_descq_cnt() - called when device probed
702  *
703  * Return a validated descq count.
704  *
705  * This is currently only used in the verbs initialization to build the tx
706  * list.
707  *
708  * This will probably be deleted in favor of a more scalable approach to
709  * alloc tx's.
710  *
711  */
712 u16 sdma_get_descq_cnt(void)
713 {
714 	u16 count = sdma_descq_cnt;
715 
716 	if (!count)
717 		return SDMA_DESCQ_CNT;
718 	/* count must be a power of 2 greater than 64 and less than
719 	 * 32768.   Otherwise return default.
720 	 */
721 	if (!is_power_of_2(count))
722 		return SDMA_DESCQ_CNT;
723 	if (count < 64 || count > 32768)
724 		return SDMA_DESCQ_CNT;
725 	return count;
726 }
727 
728 /**
729  * sdma_engine_get_vl() - return vl for a given sdma engine
730  * @sde: sdma engine
731  *
732  * This function returns the vl mapped to a given engine, or an error if
733  * the mapping can't be found. The mapping fields are protected by RCU.
734  */
735 int sdma_engine_get_vl(struct sdma_engine *sde)
736 {
737 	struct hfi1_devdata *dd = sde->dd;
738 	struct sdma_vl_map *m;
739 	u8 vl;
740 
741 	if (sde->this_idx >= TXE_NUM_SDMA_ENGINES)
742 		return -EINVAL;
743 
744 	rcu_read_lock();
745 	m = rcu_dereference(dd->sdma_map);
746 	if (unlikely(!m)) {
747 		rcu_read_unlock();
748 		return -EINVAL;
749 	}
750 	vl = m->engine_to_vl[sde->this_idx];
751 	rcu_read_unlock();
752 
753 	return vl;
754 }
755 
756 /**
757  * sdma_select_engine_vl() - select sdma engine
758  * @dd: devdata
759  * @selector: a spreading factor
760  * @vl: this vl
761  *
762  *
763  * This function returns an engine based on the selector and a vl.  The
764  * mapping fields are protected by RCU.
765  */
766 struct sdma_engine *sdma_select_engine_vl(
767 	struct hfi1_devdata *dd,
768 	u32 selector,
769 	u8 vl)
770 {
771 	struct sdma_vl_map *m;
772 	struct sdma_map_elem *e;
773 	struct sdma_engine *rval;
774 
775 	/* NOTE This should only happen if SC->VL changed after the initial
776 	 *      checks on the QP/AH
777 	 *      Default will return engine 0 below
778 	 */
779 	if (vl >= num_vls) {
780 		rval = NULL;
781 		goto done;
782 	}
783 
784 	rcu_read_lock();
785 	m = rcu_dereference(dd->sdma_map);
786 	if (unlikely(!m)) {
787 		rcu_read_unlock();
788 		return &dd->per_sdma[0];
789 	}
790 	e = m->map[vl & m->mask];
791 	rval = e->sde[selector & e->mask];
792 	rcu_read_unlock();
793 
794 done:
795 	rval =  !rval ? &dd->per_sdma[0] : rval;
796 	trace_hfi1_sdma_engine_select(dd, selector, vl, rval->this_idx);
797 	return rval;
798 }
799 
800 /**
801  * sdma_select_engine_sc() - select sdma engine
802  * @dd: devdata
803  * @selector: a spreading factor
804  * @sc5: the 5 bit sc
805  *
806  *
807  * This function returns an engine based on the selector and an sc.
808  */
809 struct sdma_engine *sdma_select_engine_sc(
810 	struct hfi1_devdata *dd,
811 	u32 selector,
812 	u8 sc5)
813 {
814 	u8 vl = sc_to_vlt(dd, sc5);
815 
816 	return sdma_select_engine_vl(dd, selector, vl);
817 }
818 
819 struct sdma_rht_map_elem {
820 	u32 mask;
821 	u8 ctr;
822 	struct sdma_engine *sde[0];
823 };
824 
825 struct sdma_rht_node {
826 	unsigned long cpu_id;
827 	struct sdma_rht_map_elem *map[HFI1_MAX_VLS_SUPPORTED];
828 	struct rhash_head node;
829 };
830 
831 #define NR_CPUS_HINT 192
832 
833 static const struct rhashtable_params sdma_rht_params = {
834 	.nelem_hint = NR_CPUS_HINT,
835 	.head_offset = offsetof(struct sdma_rht_node, node),
836 	.key_offset = offsetof(struct sdma_rht_node, cpu_id),
837 	.key_len = FIELD_SIZEOF(struct sdma_rht_node, cpu_id),
838 	.max_size = NR_CPUS,
839 	.min_size = 8,
840 	.automatic_shrinking = true,
841 };
842 
843 /*
844  * sdma_select_user_engine() - select sdma engine based on user setup
845  * @dd: devdata
846  * @selector: a spreading factor
847  * @vl: this vl
848  *
849  * This function returns an sdma engine for a user sdma request.
850  * User defined sdma engine affinity setting is honored when applicable,
851  * otherwise system default sdma engine mapping is used. To ensure correct
852  * ordering, the mapping from <selector, vl> to sde must remain unchanged.
853  */
854 struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
855 					    u32 selector, u8 vl)
856 {
857 	struct sdma_rht_node *rht_node;
858 	struct sdma_engine *sde = NULL;
859 	const struct cpumask *current_mask = &current->cpus_allowed;
860 	unsigned long cpu_id;
861 
862 	/*
863 	 * To ensure that always the same sdma engine(s) will be
864 	 * selected make sure the process is pinned to this CPU only.
865 	 */
866 	if (cpumask_weight(current_mask) != 1)
867 		goto out;
868 
869 	cpu_id = smp_processor_id();
870 	rcu_read_lock();
871 	rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu_id,
872 					  sdma_rht_params);
873 
874 	if (rht_node && rht_node->map[vl]) {
875 		struct sdma_rht_map_elem *map = rht_node->map[vl];
876 
877 		sde = map->sde[selector & map->mask];
878 	}
879 	rcu_read_unlock();
880 
881 	if (sde)
882 		return sde;
883 
884 out:
885 	return sdma_select_engine_vl(dd, selector, vl);
886 }
887 
888 static void sdma_populate_sde_map(struct sdma_rht_map_elem *map)
889 {
890 	int i;
891 
892 	for (i = 0; i < roundup_pow_of_two(map->ctr ? : 1) - map->ctr; i++)
893 		map->sde[map->ctr + i] = map->sde[i];
894 }
895 
896 static void sdma_cleanup_sde_map(struct sdma_rht_map_elem *map,
897 				 struct sdma_engine *sde)
898 {
899 	unsigned int i, pow;
900 
901 	/* only need to check the first ctr entries for a match */
902 	for (i = 0; i < map->ctr; i++) {
903 		if (map->sde[i] == sde) {
904 			memmove(&map->sde[i], &map->sde[i + 1],
905 				(map->ctr - i - 1) * sizeof(map->sde[0]));
906 			map->ctr--;
907 			pow = roundup_pow_of_two(map->ctr ? : 1);
908 			map->mask = pow - 1;
909 			sdma_populate_sde_map(map);
910 			break;
911 		}
912 	}
913 }
914 
915 /*
916  * Prevents concurrent reads and writes of the sdma engine cpu_mask
917  */
918 static DEFINE_MUTEX(process_to_sde_mutex);
919 
920 ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf,
921 				size_t count)
922 {
923 	struct hfi1_devdata *dd = sde->dd;
924 	cpumask_var_t mask, new_mask;
925 	unsigned long cpu;
926 	int ret, vl, sz;
927 
928 	vl = sdma_engine_get_vl(sde);
929 	if (unlikely(vl < 0))
930 		return -EINVAL;
931 
932 	ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
933 	if (!ret)
934 		return -ENOMEM;
935 
936 	ret = zalloc_cpumask_var(&new_mask, GFP_KERNEL);
937 	if (!ret) {
938 		free_cpumask_var(mask);
939 		return -ENOMEM;
940 	}
941 	ret = cpulist_parse(buf, mask);
942 	if (ret)
943 		goto out_free;
944 
945 	if (!cpumask_subset(mask, cpu_online_mask)) {
946 		dd_dev_warn(sde->dd, "Invalid CPU mask\n");
947 		ret = -EINVAL;
948 		goto out_free;
949 	}
950 
951 	sz = sizeof(struct sdma_rht_map_elem) +
952 			(TXE_NUM_SDMA_ENGINES * sizeof(struct sdma_engine *));
953 
954 	mutex_lock(&process_to_sde_mutex);
955 
956 	for_each_cpu(cpu, mask) {
957 		struct sdma_rht_node *rht_node;
958 
959 		/* Check if we have this already mapped */
960 		if (cpumask_test_cpu(cpu, &sde->cpu_mask)) {
961 			cpumask_set_cpu(cpu, new_mask);
962 			continue;
963 		}
964 
965 		if (vl >= ARRAY_SIZE(rht_node->map)) {
966 			ret = -EINVAL;
967 			goto out;
968 		}
969 
970 		rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu,
971 						  sdma_rht_params);
972 		if (!rht_node) {
973 			rht_node = kzalloc(sizeof(*rht_node), GFP_KERNEL);
974 			if (!rht_node) {
975 				ret = -ENOMEM;
976 				goto out;
977 			}
978 
979 			rht_node->map[vl] = kzalloc(sz, GFP_KERNEL);
980 			if (!rht_node->map[vl]) {
981 				kfree(rht_node);
982 				ret = -ENOMEM;
983 				goto out;
984 			}
985 			rht_node->cpu_id = cpu;
986 			rht_node->map[vl]->mask = 0;
987 			rht_node->map[vl]->ctr = 1;
988 			rht_node->map[vl]->sde[0] = sde;
989 
990 			ret = rhashtable_insert_fast(dd->sdma_rht,
991 						     &rht_node->node,
992 						     sdma_rht_params);
993 			if (ret) {
994 				kfree(rht_node->map[vl]);
995 				kfree(rht_node);
996 				dd_dev_err(sde->dd, "Failed to set process to sde affinity for cpu %lu\n",
997 					   cpu);
998 				goto out;
999 			}
1000 
1001 		} else {
1002 			int ctr, pow;
1003 
1004 			/* Add new user mappings */
1005 			if (!rht_node->map[vl])
1006 				rht_node->map[vl] = kzalloc(sz, GFP_KERNEL);
1007 
1008 			if (!rht_node->map[vl]) {
1009 				ret = -ENOMEM;
1010 				goto out;
1011 			}
1012 
1013 			rht_node->map[vl]->ctr++;
1014 			ctr = rht_node->map[vl]->ctr;
1015 			rht_node->map[vl]->sde[ctr - 1] = sde;
1016 			pow = roundup_pow_of_two(ctr);
1017 			rht_node->map[vl]->mask = pow - 1;
1018 
1019 			/* Populate the sde map table */
1020 			sdma_populate_sde_map(rht_node->map[vl]);
1021 		}
1022 		cpumask_set_cpu(cpu, new_mask);
1023 	}
1024 
1025 	/* Clean up old mappings */
1026 	for_each_cpu(cpu, cpu_online_mask) {
1027 		struct sdma_rht_node *rht_node;
1028 
1029 		/* Don't cleanup sdes that are set in the new mask */
1030 		if (cpumask_test_cpu(cpu, mask))
1031 			continue;
1032 
1033 		rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu,
1034 						  sdma_rht_params);
1035 		if (rht_node) {
1036 			bool empty = true;
1037 			int i;
1038 
1039 			/* Remove mappings for old sde */
1040 			for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++)
1041 				if (rht_node->map[i])
1042 					sdma_cleanup_sde_map(rht_node->map[i],
1043 							     sde);
1044 
1045 			/* Free empty hash table entries */
1046 			for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) {
1047 				if (!rht_node->map[i])
1048 					continue;
1049 
1050 				if (rht_node->map[i]->ctr) {
1051 					empty = false;
1052 					break;
1053 				}
1054 			}
1055 
1056 			if (empty) {
1057 				ret = rhashtable_remove_fast(dd->sdma_rht,
1058 							     &rht_node->node,
1059 							     sdma_rht_params);
1060 				WARN_ON(ret);
1061 
1062 				for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++)
1063 					kfree(rht_node->map[i]);
1064 
1065 				kfree(rht_node);
1066 			}
1067 		}
1068 	}
1069 
1070 	cpumask_copy(&sde->cpu_mask, new_mask);
1071 out:
1072 	mutex_unlock(&process_to_sde_mutex);
1073 out_free:
1074 	free_cpumask_var(mask);
1075 	free_cpumask_var(new_mask);
1076 	return ret ? : strnlen(buf, PAGE_SIZE);
1077 }
1078 
1079 ssize_t sdma_get_cpu_to_sde_map(struct sdma_engine *sde, char *buf)
1080 {
1081 	mutex_lock(&process_to_sde_mutex);
1082 	if (cpumask_empty(&sde->cpu_mask))
1083 		snprintf(buf, PAGE_SIZE, "%s\n", "empty");
1084 	else
1085 		cpumap_print_to_pagebuf(true, buf, &sde->cpu_mask);
1086 	mutex_unlock(&process_to_sde_mutex);
1087 	return strnlen(buf, PAGE_SIZE);
1088 }
1089 
1090 static void sdma_rht_free(void *ptr, void *arg)
1091 {
1092 	struct sdma_rht_node *rht_node = ptr;
1093 	int i;
1094 
1095 	for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++)
1096 		kfree(rht_node->map[i]);
1097 
1098 	kfree(rht_node);
1099 }
1100 
1101 /**
1102  * sdma_seqfile_dump_cpu_list() - debugfs dump the cpu to sdma mappings
1103  * @s: seq file
1104  * @dd: hfi1_devdata
1105  * @cpuid: cpu id
1106  *
1107  * This routine dumps the process to sde mappings per cpu
1108  */
1109 void sdma_seqfile_dump_cpu_list(struct seq_file *s,
1110 				struct hfi1_devdata *dd,
1111 				unsigned long cpuid)
1112 {
1113 	struct sdma_rht_node *rht_node;
1114 	int i, j;
1115 
1116 	rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpuid,
1117 					  sdma_rht_params);
1118 	if (!rht_node)
1119 		return;
1120 
1121 	seq_printf(s, "cpu%3lu: ", cpuid);
1122 	for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) {
1123 		if (!rht_node->map[i] || !rht_node->map[i]->ctr)
1124 			continue;
1125 
1126 		seq_printf(s, " vl%d: [", i);
1127 
1128 		for (j = 0; j < rht_node->map[i]->ctr; j++) {
1129 			if (!rht_node->map[i]->sde[j])
1130 				continue;
1131 
1132 			if (j > 0)
1133 				seq_puts(s, ",");
1134 
1135 			seq_printf(s, " sdma%2d",
1136 				   rht_node->map[i]->sde[j]->this_idx);
1137 		}
1138 		seq_puts(s, " ]");
1139 	}
1140 
1141 	seq_puts(s, "\n");
1142 }
1143 
1144 /*
1145  * Free the indicated map struct
1146  */
1147 static void sdma_map_free(struct sdma_vl_map *m)
1148 {
1149 	int i;
1150 
1151 	for (i = 0; m && i < m->actual_vls; i++)
1152 		kfree(m->map[i]);
1153 	kfree(m);
1154 }
1155 
1156 /*
1157  * Handle RCU callback
1158  */
1159 static void sdma_map_rcu_callback(struct rcu_head *list)
1160 {
1161 	struct sdma_vl_map *m = container_of(list, struct sdma_vl_map, list);
1162 
1163 	sdma_map_free(m);
1164 }
1165 
1166 /**
1167  * sdma_map_init - called when # vls change
1168  * @dd: hfi1_devdata
1169  * @port: port number
1170  * @num_vls: number of vls
1171  * @vl_engines: per vl engine mapping (optional)
1172  *
1173  * This routine changes the mapping based on the number of vls.
1174  *
1175  * vl_engines is used to specify a non-uniform vl/engine loading. NULL
1176  * implies auto computing the loading and giving each VLs a uniform
1177  * distribution of engines per VL.
1178  *
1179  * The auto algorithm computes the sde_per_vl and the number of extra
1180  * engines.  Any extra engines are added from the last VL on down.
1181  *
1182  * rcu locking is used here to control access to the mapping fields.
1183  *
1184  * If either the num_vls or num_sdma are non-power of 2, the array sizes
1185  * in the struct sdma_vl_map and the struct sdma_map_elem are rounded
1186  * up to the next highest power of 2 and the first entry is reused
1187  * in a round robin fashion.
1188  *
1189  * If an error occurs the map change is not done and the mapping is
1190  * not changed.
1191  *
1192  */
1193 int sdma_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_engines)
1194 {
1195 	int i, j;
1196 	int extra, sde_per_vl;
1197 	int engine = 0;
1198 	u8 lvl_engines[OPA_MAX_VLS];
1199 	struct sdma_vl_map *oldmap, *newmap;
1200 
1201 	if (!(dd->flags & HFI1_HAS_SEND_DMA))
1202 		return 0;
1203 
1204 	if (!vl_engines) {
1205 		/* truncate divide */
1206 		sde_per_vl = dd->num_sdma / num_vls;
1207 		/* extras */
1208 		extra = dd->num_sdma % num_vls;
1209 		vl_engines = lvl_engines;
1210 		/* add extras from last vl down */
1211 		for (i = num_vls - 1; i >= 0; i--, extra--)
1212 			vl_engines[i] = sde_per_vl + (extra > 0 ? 1 : 0);
1213 	}
1214 	/* build new map */
1215 	newmap = kzalloc(
1216 		sizeof(struct sdma_vl_map) +
1217 			roundup_pow_of_two(num_vls) *
1218 			sizeof(struct sdma_map_elem *),
1219 		GFP_KERNEL);
1220 	if (!newmap)
1221 		goto bail;
1222 	newmap->actual_vls = num_vls;
1223 	newmap->vls = roundup_pow_of_two(num_vls);
1224 	newmap->mask = (1 << ilog2(newmap->vls)) - 1;
1225 	/* initialize back-map */
1226 	for (i = 0; i < TXE_NUM_SDMA_ENGINES; i++)
1227 		newmap->engine_to_vl[i] = -1;
1228 	for (i = 0; i < newmap->vls; i++) {
1229 		/* save for wrap around */
1230 		int first_engine = engine;
1231 
1232 		if (i < newmap->actual_vls) {
1233 			int sz = roundup_pow_of_two(vl_engines[i]);
1234 
1235 			/* only allocate once */
1236 			newmap->map[i] = kzalloc(
1237 				sizeof(struct sdma_map_elem) +
1238 					sz * sizeof(struct sdma_engine *),
1239 				GFP_KERNEL);
1240 			if (!newmap->map[i])
1241 				goto bail;
1242 			newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
1243 			/* assign engines */
1244 			for (j = 0; j < sz; j++) {
1245 				newmap->map[i]->sde[j] =
1246 					&dd->per_sdma[engine];
1247 				if (++engine >= first_engine + vl_engines[i])
1248 					/* wrap back to first engine */
1249 					engine = first_engine;
1250 			}
1251 			/* assign back-map */
1252 			for (j = 0; j < vl_engines[i]; j++)
1253 				newmap->engine_to_vl[first_engine + j] = i;
1254 		} else {
1255 			/* just re-use entry without allocating */
1256 			newmap->map[i] = newmap->map[i % num_vls];
1257 		}
1258 		engine = first_engine + vl_engines[i];
1259 	}
1260 	/* newmap in hand, save old map */
1261 	spin_lock_irq(&dd->sde_map_lock);
1262 	oldmap = rcu_dereference_protected(dd->sdma_map,
1263 					   lockdep_is_held(&dd->sde_map_lock));
1264 
1265 	/* publish newmap */
1266 	rcu_assign_pointer(dd->sdma_map, newmap);
1267 
1268 	spin_unlock_irq(&dd->sde_map_lock);
1269 	/* success, free any old map after grace period */
1270 	if (oldmap)
1271 		call_rcu(&oldmap->list, sdma_map_rcu_callback);
1272 	return 0;
1273 bail:
1274 	/* free any partial allocation */
1275 	sdma_map_free(newmap);
1276 	return -ENOMEM;
1277 }
1278 
1279 /*
1280  * Clean up allocated memory.
1281  *
1282  * This routine is can be called regardless of the success of sdma_init()
1283  *
1284  */
1285 static void sdma_clean(struct hfi1_devdata *dd, size_t num_engines)
1286 {
1287 	size_t i;
1288 	struct sdma_engine *sde;
1289 
1290 	if (dd->sdma_pad_dma) {
1291 		dma_free_coherent(&dd->pcidev->dev, 4,
1292 				  (void *)dd->sdma_pad_dma,
1293 				  dd->sdma_pad_phys);
1294 		dd->sdma_pad_dma = NULL;
1295 		dd->sdma_pad_phys = 0;
1296 	}
1297 	if (dd->sdma_heads_dma) {
1298 		dma_free_coherent(&dd->pcidev->dev, dd->sdma_heads_size,
1299 				  (void *)dd->sdma_heads_dma,
1300 				  dd->sdma_heads_phys);
1301 		dd->sdma_heads_dma = NULL;
1302 		dd->sdma_heads_phys = 0;
1303 	}
1304 	for (i = 0; dd->per_sdma && i < num_engines; ++i) {
1305 		sde = &dd->per_sdma[i];
1306 
1307 		sde->head_dma = NULL;
1308 		sde->head_phys = 0;
1309 
1310 		if (sde->descq) {
1311 			dma_free_coherent(
1312 				&dd->pcidev->dev,
1313 				sde->descq_cnt * sizeof(u64[2]),
1314 				sde->descq,
1315 				sde->descq_phys
1316 			);
1317 			sde->descq = NULL;
1318 			sde->descq_phys = 0;
1319 		}
1320 		kvfree(sde->tx_ring);
1321 		sde->tx_ring = NULL;
1322 	}
1323 	spin_lock_irq(&dd->sde_map_lock);
1324 	sdma_map_free(rcu_access_pointer(dd->sdma_map));
1325 	RCU_INIT_POINTER(dd->sdma_map, NULL);
1326 	spin_unlock_irq(&dd->sde_map_lock);
1327 	synchronize_rcu();
1328 	kfree(dd->per_sdma);
1329 	dd->per_sdma = NULL;
1330 
1331 	if (dd->sdma_rht) {
1332 		rhashtable_free_and_destroy(dd->sdma_rht, sdma_rht_free, NULL);
1333 		kfree(dd->sdma_rht);
1334 		dd->sdma_rht = NULL;
1335 	}
1336 }
1337 
1338 /**
1339  * sdma_init() - called when device probed
1340  * @dd: hfi1_devdata
1341  * @port: port number (currently only zero)
1342  *
1343  * Initializes each sde and its csrs.
1344  * Interrupts are not required to be enabled.
1345  *
1346  * Returns:
1347  * 0 - success, -errno on failure
1348  */
1349 int sdma_init(struct hfi1_devdata *dd, u8 port)
1350 {
1351 	unsigned this_idx;
1352 	struct sdma_engine *sde;
1353 	struct rhashtable *tmp_sdma_rht;
1354 	u16 descq_cnt;
1355 	void *curr_head;
1356 	struct hfi1_pportdata *ppd = dd->pport + port;
1357 	u32 per_sdma_credits;
1358 	uint idle_cnt = sdma_idle_cnt;
1359 	size_t num_engines = dd->chip_sdma_engines;
1360 	int ret = -ENOMEM;
1361 
1362 	if (!HFI1_CAP_IS_KSET(SDMA)) {
1363 		HFI1_CAP_CLEAR(SDMA_AHG);
1364 		return 0;
1365 	}
1366 	if (mod_num_sdma &&
1367 	    /* can't exceed chip support */
1368 	    mod_num_sdma <= dd->chip_sdma_engines &&
1369 	    /* count must be >= vls */
1370 	    mod_num_sdma >= num_vls)
1371 		num_engines = mod_num_sdma;
1372 
1373 	dd_dev_info(dd, "SDMA mod_num_sdma: %u\n", mod_num_sdma);
1374 	dd_dev_info(dd, "SDMA chip_sdma_engines: %u\n", dd->chip_sdma_engines);
1375 	dd_dev_info(dd, "SDMA chip_sdma_mem_size: %u\n",
1376 		    dd->chip_sdma_mem_size);
1377 
1378 	per_sdma_credits =
1379 		dd->chip_sdma_mem_size / (num_engines * SDMA_BLOCK_SIZE);
1380 
1381 	/* set up freeze waitqueue */
1382 	init_waitqueue_head(&dd->sdma_unfreeze_wq);
1383 	atomic_set(&dd->sdma_unfreeze_count, 0);
1384 
1385 	descq_cnt = sdma_get_descq_cnt();
1386 	dd_dev_info(dd, "SDMA engines %zu descq_cnt %u\n",
1387 		    num_engines, descq_cnt);
1388 
1389 	/* alloc memory for array of send engines */
1390 	dd->per_sdma = kcalloc(num_engines, sizeof(*dd->per_sdma), GFP_KERNEL);
1391 	if (!dd->per_sdma)
1392 		return ret;
1393 
1394 	idle_cnt = ns_to_cclock(dd, idle_cnt);
1395 	if (!sdma_desct_intr)
1396 		sdma_desct_intr = SDMA_DESC_INTR;
1397 
1398 	/* Allocate memory for SendDMA descriptor FIFOs */
1399 	for (this_idx = 0; this_idx < num_engines; ++this_idx) {
1400 		sde = &dd->per_sdma[this_idx];
1401 		sde->dd = dd;
1402 		sde->ppd = ppd;
1403 		sde->this_idx = this_idx;
1404 		sde->descq_cnt = descq_cnt;
1405 		sde->desc_avail = sdma_descq_freecnt(sde);
1406 		sde->sdma_shift = ilog2(descq_cnt);
1407 		sde->sdma_mask = (1 << sde->sdma_shift) - 1;
1408 
1409 		/* Create a mask specifically for each interrupt source */
1410 		sde->int_mask = (u64)1 << (0 * TXE_NUM_SDMA_ENGINES +
1411 					   this_idx);
1412 		sde->progress_mask = (u64)1 << (1 * TXE_NUM_SDMA_ENGINES +
1413 						this_idx);
1414 		sde->idle_mask = (u64)1 << (2 * TXE_NUM_SDMA_ENGINES +
1415 					    this_idx);
1416 		/* Create a combined mask to cover all 3 interrupt sources */
1417 		sde->imask = sde->int_mask | sde->progress_mask |
1418 			     sde->idle_mask;
1419 
1420 		spin_lock_init(&sde->tail_lock);
1421 		seqlock_init(&sde->head_lock);
1422 		spin_lock_init(&sde->senddmactrl_lock);
1423 		spin_lock_init(&sde->flushlist_lock);
1424 		/* insure there is always a zero bit */
1425 		sde->ahg_bits = 0xfffffffe00000000ULL;
1426 
1427 		sdma_set_state(sde, sdma_state_s00_hw_down);
1428 
1429 		/* set up reference counting */
1430 		kref_init(&sde->state.kref);
1431 		init_completion(&sde->state.comp);
1432 
1433 		INIT_LIST_HEAD(&sde->flushlist);
1434 		INIT_LIST_HEAD(&sde->dmawait);
1435 
1436 		sde->tail_csr =
1437 			get_kctxt_csr_addr(dd, this_idx, SD(TAIL));
1438 
1439 		if (idle_cnt)
1440 			dd->default_desc1 =
1441 				SDMA_DESC1_HEAD_TO_HOST_FLAG;
1442 		else
1443 			dd->default_desc1 =
1444 				SDMA_DESC1_INT_REQ_FLAG;
1445 
1446 		tasklet_init(&sde->sdma_hw_clean_up_task, sdma_hw_clean_up_task,
1447 			     (unsigned long)sde);
1448 
1449 		tasklet_init(&sde->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
1450 			     (unsigned long)sde);
1451 		INIT_WORK(&sde->err_halt_worker, sdma_err_halt_wait);
1452 		INIT_WORK(&sde->flush_worker, sdma_field_flush);
1453 
1454 		sde->progress_check_head = 0;
1455 
1456 		setup_timer(&sde->err_progress_check_timer,
1457 			    sdma_err_progress_check, (unsigned long)sde);
1458 
1459 		sde->descq = dma_zalloc_coherent(
1460 			&dd->pcidev->dev,
1461 			descq_cnt * sizeof(u64[2]),
1462 			&sde->descq_phys,
1463 			GFP_KERNEL
1464 		);
1465 		if (!sde->descq)
1466 			goto bail;
1467 		sde->tx_ring =
1468 			kcalloc(descq_cnt, sizeof(struct sdma_txreq *),
1469 				GFP_KERNEL);
1470 		if (!sde->tx_ring)
1471 			sde->tx_ring =
1472 				vzalloc(
1473 					sizeof(struct sdma_txreq *) *
1474 					descq_cnt);
1475 		if (!sde->tx_ring)
1476 			goto bail;
1477 	}
1478 
1479 	dd->sdma_heads_size = L1_CACHE_BYTES * num_engines;
1480 	/* Allocate memory for DMA of head registers to memory */
1481 	dd->sdma_heads_dma = dma_zalloc_coherent(
1482 		&dd->pcidev->dev,
1483 		dd->sdma_heads_size,
1484 		&dd->sdma_heads_phys,
1485 		GFP_KERNEL
1486 	);
1487 	if (!dd->sdma_heads_dma) {
1488 		dd_dev_err(dd, "failed to allocate SendDMA head memory\n");
1489 		goto bail;
1490 	}
1491 
1492 	/* Allocate memory for pad */
1493 	dd->sdma_pad_dma = dma_zalloc_coherent(
1494 		&dd->pcidev->dev,
1495 		sizeof(u32),
1496 		&dd->sdma_pad_phys,
1497 		GFP_KERNEL
1498 	);
1499 	if (!dd->sdma_pad_dma) {
1500 		dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
1501 		goto bail;
1502 	}
1503 
1504 	/* assign each engine to different cacheline and init registers */
1505 	curr_head = (void *)dd->sdma_heads_dma;
1506 	for (this_idx = 0; this_idx < num_engines; ++this_idx) {
1507 		unsigned long phys_offset;
1508 
1509 		sde = &dd->per_sdma[this_idx];
1510 
1511 		sde->head_dma = curr_head;
1512 		curr_head += L1_CACHE_BYTES;
1513 		phys_offset = (unsigned long)sde->head_dma -
1514 			      (unsigned long)dd->sdma_heads_dma;
1515 		sde->head_phys = dd->sdma_heads_phys + phys_offset;
1516 		init_sdma_regs(sde, per_sdma_credits, idle_cnt);
1517 	}
1518 	dd->flags |= HFI1_HAS_SEND_DMA;
1519 	dd->flags |= idle_cnt ? HFI1_HAS_SDMA_TIMEOUT : 0;
1520 	dd->num_sdma = num_engines;
1521 	ret = sdma_map_init(dd, port, ppd->vls_operational, NULL);
1522 	if (ret < 0)
1523 		goto bail;
1524 
1525 	tmp_sdma_rht = kzalloc(sizeof(*tmp_sdma_rht), GFP_KERNEL);
1526 	if (!tmp_sdma_rht) {
1527 		ret = -ENOMEM;
1528 		goto bail;
1529 	}
1530 
1531 	ret = rhashtable_init(tmp_sdma_rht, &sdma_rht_params);
1532 	if (ret < 0)
1533 		goto bail;
1534 	dd->sdma_rht = tmp_sdma_rht;
1535 
1536 	dd_dev_info(dd, "SDMA num_sdma: %u\n", dd->num_sdma);
1537 	return 0;
1538 
1539 bail:
1540 	sdma_clean(dd, num_engines);
1541 	return ret;
1542 }
1543 
1544 /**
1545  * sdma_all_running() - called when the link goes up
1546  * @dd: hfi1_devdata
1547  *
1548  * This routine moves all engines to the running state.
1549  */
1550 void sdma_all_running(struct hfi1_devdata *dd)
1551 {
1552 	struct sdma_engine *sde;
1553 	unsigned int i;
1554 
1555 	/* move all engines to running */
1556 	for (i = 0; i < dd->num_sdma; ++i) {
1557 		sde = &dd->per_sdma[i];
1558 		sdma_process_event(sde, sdma_event_e30_go_running);
1559 	}
1560 }
1561 
1562 /**
1563  * sdma_all_idle() - called when the link goes down
1564  * @dd: hfi1_devdata
1565  *
1566  * This routine moves all engines to the idle state.
1567  */
1568 void sdma_all_idle(struct hfi1_devdata *dd)
1569 {
1570 	struct sdma_engine *sde;
1571 	unsigned int i;
1572 
1573 	/* idle all engines */
1574 	for (i = 0; i < dd->num_sdma; ++i) {
1575 		sde = &dd->per_sdma[i];
1576 		sdma_process_event(sde, sdma_event_e70_go_idle);
1577 	}
1578 }
1579 
1580 /**
1581  * sdma_start() - called to kick off state processing for all engines
1582  * @dd: hfi1_devdata
1583  *
1584  * This routine is for kicking off the state processing for all required
1585  * sdma engines.  Interrupts need to be working at this point.
1586  *
1587  */
1588 void sdma_start(struct hfi1_devdata *dd)
1589 {
1590 	unsigned i;
1591 	struct sdma_engine *sde;
1592 
1593 	/* kick off the engines state processing */
1594 	for (i = 0; i < dd->num_sdma; ++i) {
1595 		sde = &dd->per_sdma[i];
1596 		sdma_process_event(sde, sdma_event_e10_go_hw_start);
1597 	}
1598 }
1599 
1600 /**
1601  * sdma_exit() - used when module is removed
1602  * @dd: hfi1_devdata
1603  */
1604 void sdma_exit(struct hfi1_devdata *dd)
1605 {
1606 	unsigned this_idx;
1607 	struct sdma_engine *sde;
1608 
1609 	for (this_idx = 0; dd->per_sdma && this_idx < dd->num_sdma;
1610 			++this_idx) {
1611 		sde = &dd->per_sdma[this_idx];
1612 		if (!list_empty(&sde->dmawait))
1613 			dd_dev_err(dd, "sde %u: dmawait list not empty!\n",
1614 				   sde->this_idx);
1615 		sdma_process_event(sde, sdma_event_e00_go_hw_down);
1616 
1617 		del_timer_sync(&sde->err_progress_check_timer);
1618 
1619 		/*
1620 		 * This waits for the state machine to exit so it is not
1621 		 * necessary to kill the sdma_sw_clean_up_task to make sure
1622 		 * it is not running.
1623 		 */
1624 		sdma_finalput(&sde->state);
1625 	}
1626 	sdma_clean(dd, dd->num_sdma);
1627 }
1628 
1629 /*
1630  * unmap the indicated descriptor
1631  */
1632 static inline void sdma_unmap_desc(
1633 	struct hfi1_devdata *dd,
1634 	struct sdma_desc *descp)
1635 {
1636 	switch (sdma_mapping_type(descp)) {
1637 	case SDMA_MAP_SINGLE:
1638 		dma_unmap_single(
1639 			&dd->pcidev->dev,
1640 			sdma_mapping_addr(descp),
1641 			sdma_mapping_len(descp),
1642 			DMA_TO_DEVICE);
1643 		break;
1644 	case SDMA_MAP_PAGE:
1645 		dma_unmap_page(
1646 			&dd->pcidev->dev,
1647 			sdma_mapping_addr(descp),
1648 			sdma_mapping_len(descp),
1649 			DMA_TO_DEVICE);
1650 		break;
1651 	}
1652 }
1653 
1654 /*
1655  * return the mode as indicated by the first
1656  * descriptor in the tx.
1657  */
1658 static inline u8 ahg_mode(struct sdma_txreq *tx)
1659 {
1660 	return (tx->descp[0].qw[1] & SDMA_DESC1_HEADER_MODE_SMASK)
1661 		>> SDMA_DESC1_HEADER_MODE_SHIFT;
1662 }
1663 
1664 /**
1665  * __sdma_txclean() - clean tx of mappings, descp *kmalloc's
1666  * @dd: hfi1_devdata for unmapping
1667  * @tx: tx request to clean
1668  *
1669  * This is used in the progress routine to clean the tx or
1670  * by the ULP to toss an in-process tx build.
1671  *
1672  * The code can be called multiple times without issue.
1673  *
1674  */
1675 void __sdma_txclean(
1676 	struct hfi1_devdata *dd,
1677 	struct sdma_txreq *tx)
1678 {
1679 	u16 i;
1680 
1681 	if (tx->num_desc) {
1682 		u8 skip = 0, mode = ahg_mode(tx);
1683 
1684 		/* unmap first */
1685 		sdma_unmap_desc(dd, &tx->descp[0]);
1686 		/* determine number of AHG descriptors to skip */
1687 		if (mode > SDMA_AHG_APPLY_UPDATE1)
1688 			skip = mode >> 1;
1689 		for (i = 1 + skip; i < tx->num_desc; i++)
1690 			sdma_unmap_desc(dd, &tx->descp[i]);
1691 		tx->num_desc = 0;
1692 	}
1693 	kfree(tx->coalesce_buf);
1694 	tx->coalesce_buf = NULL;
1695 	/* kmalloc'ed descp */
1696 	if (unlikely(tx->desc_limit > ARRAY_SIZE(tx->descs))) {
1697 		tx->desc_limit = ARRAY_SIZE(tx->descs);
1698 		kfree(tx->descp);
1699 	}
1700 }
1701 
1702 static inline u16 sdma_gethead(struct sdma_engine *sde)
1703 {
1704 	struct hfi1_devdata *dd = sde->dd;
1705 	int use_dmahead;
1706 	u16 hwhead;
1707 
1708 #ifdef CONFIG_SDMA_VERBOSITY
1709 	dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
1710 		   sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
1711 #endif
1712 
1713 retry:
1714 	use_dmahead = HFI1_CAP_IS_KSET(USE_SDMA_HEAD) && __sdma_running(sde) &&
1715 					(dd->flags & HFI1_HAS_SDMA_TIMEOUT);
1716 	hwhead = use_dmahead ?
1717 		(u16)le64_to_cpu(*sde->head_dma) :
1718 		(u16)read_sde_csr(sde, SD(HEAD));
1719 
1720 	if (unlikely(HFI1_CAP_IS_KSET(SDMA_HEAD_CHECK))) {
1721 		u16 cnt;
1722 		u16 swtail;
1723 		u16 swhead;
1724 		int sane;
1725 
1726 		swhead = sde->descq_head & sde->sdma_mask;
1727 		/* this code is really bad for cache line trading */
1728 		swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
1729 		cnt = sde->descq_cnt;
1730 
1731 		if (swhead < swtail)
1732 			/* not wrapped */
1733 			sane = (hwhead >= swhead) & (hwhead <= swtail);
1734 		else if (swhead > swtail)
1735 			/* wrapped around */
1736 			sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
1737 				(hwhead <= swtail);
1738 		else
1739 			/* empty */
1740 			sane = (hwhead == swhead);
1741 
1742 		if (unlikely(!sane)) {
1743 			dd_dev_err(dd, "SDMA(%u) bad head (%s) hwhd=%hu swhd=%hu swtl=%hu cnt=%hu\n",
1744 				   sde->this_idx,
1745 				   use_dmahead ? "dma" : "kreg",
1746 				   hwhead, swhead, swtail, cnt);
1747 			if (use_dmahead) {
1748 				/* try one more time, using csr */
1749 				use_dmahead = 0;
1750 				goto retry;
1751 			}
1752 			/* proceed as if no progress */
1753 			hwhead = swhead;
1754 		}
1755 	}
1756 	return hwhead;
1757 }
1758 
1759 /*
1760  * This is called when there are send DMA descriptors that might be
1761  * available.
1762  *
1763  * This is called with head_lock held.
1764  */
1765 static void sdma_desc_avail(struct sdma_engine *sde, uint avail)
1766 {
1767 	struct iowait *wait, *nw;
1768 	struct iowait *waits[SDMA_WAIT_BATCH_SIZE];
1769 	uint i, n = 0, seq, max_idx = 0;
1770 	struct sdma_txreq *stx;
1771 	struct hfi1_ibdev *dev = &sde->dd->verbs_dev;
1772 	u8 max_starved_cnt = 0;
1773 
1774 #ifdef CONFIG_SDMA_VERBOSITY
1775 	dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
1776 		   slashstrip(__FILE__), __LINE__, __func__);
1777 	dd_dev_err(sde->dd, "avail: %u\n", avail);
1778 #endif
1779 
1780 	do {
1781 		seq = read_seqbegin(&dev->iowait_lock);
1782 		if (!list_empty(&sde->dmawait)) {
1783 			/* at least one item */
1784 			write_seqlock(&dev->iowait_lock);
1785 			/* Harvest waiters wanting DMA descriptors */
1786 			list_for_each_entry_safe(
1787 					wait,
1788 					nw,
1789 					&sde->dmawait,
1790 					list) {
1791 				u16 num_desc = 0;
1792 
1793 				if (!wait->wakeup)
1794 					continue;
1795 				if (n == ARRAY_SIZE(waits))
1796 					break;
1797 				if (!list_empty(&wait->tx_head)) {
1798 					stx = list_first_entry(
1799 						&wait->tx_head,
1800 						struct sdma_txreq,
1801 						list);
1802 					num_desc = stx->num_desc;
1803 				}
1804 				if (num_desc > avail)
1805 					break;
1806 				avail -= num_desc;
1807 				/* Find the most starved wait memeber */
1808 				iowait_starve_find_max(wait, &max_starved_cnt,
1809 						       n, &max_idx);
1810 				list_del_init(&wait->list);
1811 				waits[n++] = wait;
1812 			}
1813 			write_sequnlock(&dev->iowait_lock);
1814 			break;
1815 		}
1816 	} while (read_seqretry(&dev->iowait_lock, seq));
1817 
1818 	/* Schedule the most starved one first */
1819 	if (n)
1820 		waits[max_idx]->wakeup(waits[max_idx], SDMA_AVAIL_REASON);
1821 
1822 	for (i = 0; i < n; i++)
1823 		if (i != max_idx)
1824 			waits[i]->wakeup(waits[i], SDMA_AVAIL_REASON);
1825 }
1826 
1827 /* head_lock must be held */
1828 static void sdma_make_progress(struct sdma_engine *sde, u64 status)
1829 {
1830 	struct sdma_txreq *txp = NULL;
1831 	int progress = 0;
1832 	u16 hwhead, swhead;
1833 	int idle_check_done = 0;
1834 
1835 	hwhead = sdma_gethead(sde);
1836 
1837 	/* The reason for some of the complexity of this code is that
1838 	 * not all descriptors have corresponding txps.  So, we have to
1839 	 * be able to skip over descs until we wander into the range of
1840 	 * the next txp on the list.
1841 	 */
1842 
1843 retry:
1844 	txp = get_txhead(sde);
1845 	swhead = sde->descq_head & sde->sdma_mask;
1846 	trace_hfi1_sdma_progress(sde, hwhead, swhead, txp);
1847 	while (swhead != hwhead) {
1848 		/* advance head, wrap if needed */
1849 		swhead = ++sde->descq_head & sde->sdma_mask;
1850 
1851 		/* if now past this txp's descs, do the callback */
1852 		if (txp && txp->next_descq_idx == swhead) {
1853 			/* remove from list */
1854 			sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
1855 			complete_tx(sde, txp, SDMA_TXREQ_S_OK);
1856 			/* see if there is another txp */
1857 			txp = get_txhead(sde);
1858 		}
1859 		trace_hfi1_sdma_progress(sde, hwhead, swhead, txp);
1860 		progress++;
1861 	}
1862 
1863 	/*
1864 	 * The SDMA idle interrupt is not guaranteed to be ordered with respect
1865 	 * to updates to the the dma_head location in host memory. The head
1866 	 * value read might not be fully up to date. If there are pending
1867 	 * descriptors and the SDMA idle interrupt fired then read from the
1868 	 * CSR SDMA head instead to get the latest value from the hardware.
1869 	 * The hardware SDMA head should be read at most once in this invocation
1870 	 * of sdma_make_progress(..) which is ensured by idle_check_done flag
1871 	 */
1872 	if ((status & sde->idle_mask) && !idle_check_done) {
1873 		u16 swtail;
1874 
1875 		swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
1876 		if (swtail != hwhead) {
1877 			hwhead = (u16)read_sde_csr(sde, SD(HEAD));
1878 			idle_check_done = 1;
1879 			goto retry;
1880 		}
1881 	}
1882 
1883 	sde->last_status = status;
1884 	if (progress)
1885 		sdma_desc_avail(sde, sdma_descq_freecnt(sde));
1886 }
1887 
1888 /*
1889  * sdma_engine_interrupt() - interrupt handler for engine
1890  * @sde: sdma engine
1891  * @status: sdma interrupt reason
1892  *
1893  * Status is a mask of the 3 possible interrupts for this engine.  It will
1894  * contain bits _only_ for this SDMA engine.  It will contain at least one
1895  * bit, it may contain more.
1896  */
1897 void sdma_engine_interrupt(struct sdma_engine *sde, u64 status)
1898 {
1899 	trace_hfi1_sdma_engine_interrupt(sde, status);
1900 	write_seqlock(&sde->head_lock);
1901 	sdma_set_desc_cnt(sde, sdma_desct_intr);
1902 	if (status & sde->idle_mask)
1903 		sde->idle_int_cnt++;
1904 	else if (status & sde->progress_mask)
1905 		sde->progress_int_cnt++;
1906 	else if (status & sde->int_mask)
1907 		sde->sdma_int_cnt++;
1908 	sdma_make_progress(sde, status);
1909 	write_sequnlock(&sde->head_lock);
1910 }
1911 
1912 /**
1913  * sdma_engine_error() - error handler for engine
1914  * @sde: sdma engine
1915  * @status: sdma interrupt reason
1916  */
1917 void sdma_engine_error(struct sdma_engine *sde, u64 status)
1918 {
1919 	unsigned long flags;
1920 
1921 #ifdef CONFIG_SDMA_VERBOSITY
1922 	dd_dev_err(sde->dd, "CONFIG SDMA(%u) error status 0x%llx state %s\n",
1923 		   sde->this_idx,
1924 		   (unsigned long long)status,
1925 		   sdma_state_names[sde->state.current_state]);
1926 #endif
1927 	spin_lock_irqsave(&sde->tail_lock, flags);
1928 	write_seqlock(&sde->head_lock);
1929 	if (status & ALL_SDMA_ENG_HALT_ERRS)
1930 		__sdma_process_event(sde, sdma_event_e60_hw_halted);
1931 	if (status & ~SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK)) {
1932 		dd_dev_err(sde->dd,
1933 			   "SDMA (%u) engine error: 0x%llx state %s\n",
1934 			   sde->this_idx,
1935 			   (unsigned long long)status,
1936 			   sdma_state_names[sde->state.current_state]);
1937 		dump_sdma_state(sde);
1938 	}
1939 	write_sequnlock(&sde->head_lock);
1940 	spin_unlock_irqrestore(&sde->tail_lock, flags);
1941 }
1942 
1943 static void sdma_sendctrl(struct sdma_engine *sde, unsigned op)
1944 {
1945 	u64 set_senddmactrl = 0;
1946 	u64 clr_senddmactrl = 0;
1947 	unsigned long flags;
1948 
1949 #ifdef CONFIG_SDMA_VERBOSITY
1950 	dd_dev_err(sde->dd, "CONFIG SDMA(%u) senddmactrl E=%d I=%d H=%d C=%d\n",
1951 		   sde->this_idx,
1952 		   (op & SDMA_SENDCTRL_OP_ENABLE) ? 1 : 0,
1953 		   (op & SDMA_SENDCTRL_OP_INTENABLE) ? 1 : 0,
1954 		   (op & SDMA_SENDCTRL_OP_HALT) ? 1 : 0,
1955 		   (op & SDMA_SENDCTRL_OP_CLEANUP) ? 1 : 0);
1956 #endif
1957 
1958 	if (op & SDMA_SENDCTRL_OP_ENABLE)
1959 		set_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK);
1960 	else
1961 		clr_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK);
1962 
1963 	if (op & SDMA_SENDCTRL_OP_INTENABLE)
1964 		set_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK);
1965 	else
1966 		clr_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK);
1967 
1968 	if (op & SDMA_SENDCTRL_OP_HALT)
1969 		set_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK);
1970 	else
1971 		clr_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK);
1972 
1973 	spin_lock_irqsave(&sde->senddmactrl_lock, flags);
1974 
1975 	sde->p_senddmactrl |= set_senddmactrl;
1976 	sde->p_senddmactrl &= ~clr_senddmactrl;
1977 
1978 	if (op & SDMA_SENDCTRL_OP_CLEANUP)
1979 		write_sde_csr(sde, SD(CTRL),
1980 			      sde->p_senddmactrl |
1981 			      SD(CTRL_SDMA_CLEANUP_SMASK));
1982 	else
1983 		write_sde_csr(sde, SD(CTRL), sde->p_senddmactrl);
1984 
1985 	spin_unlock_irqrestore(&sde->senddmactrl_lock, flags);
1986 
1987 #ifdef CONFIG_SDMA_VERBOSITY
1988 	sdma_dumpstate(sde);
1989 #endif
1990 }
1991 
1992 static void sdma_setlengen(struct sdma_engine *sde)
1993 {
1994 #ifdef CONFIG_SDMA_VERBOSITY
1995 	dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
1996 		   sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
1997 #endif
1998 
1999 	/*
2000 	 * Set SendDmaLenGen and clear-then-set the MSB of the generation
2001 	 * count to enable generation checking and load the internal
2002 	 * generation counter.
2003 	 */
2004 	write_sde_csr(sde, SD(LEN_GEN),
2005 		      (sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT));
2006 	write_sde_csr(sde, SD(LEN_GEN),
2007 		      ((sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)) |
2008 		      (4ULL << SD(LEN_GEN_GENERATION_SHIFT)));
2009 }
2010 
2011 static inline void sdma_update_tail(struct sdma_engine *sde, u16 tail)
2012 {
2013 	/* Commit writes to memory and advance the tail on the chip */
2014 	smp_wmb(); /* see get_txhead() */
2015 	writeq(tail, sde->tail_csr);
2016 }
2017 
2018 /*
2019  * This is called when changing to state s10_hw_start_up_halt_wait as
2020  * a result of send buffer errors or send DMA descriptor errors.
2021  */
2022 static void sdma_hw_start_up(struct sdma_engine *sde)
2023 {
2024 	u64 reg;
2025 
2026 #ifdef CONFIG_SDMA_VERBOSITY
2027 	dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
2028 		   sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
2029 #endif
2030 
2031 	sdma_setlengen(sde);
2032 	sdma_update_tail(sde, 0); /* Set SendDmaTail */
2033 	*sde->head_dma = 0;
2034 
2035 	reg = SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_MASK) <<
2036 	      SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SHIFT);
2037 	write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg);
2038 }
2039 
2040 /*
2041  * set_sdma_integrity
2042  *
2043  * Set the SEND_DMA_CHECK_ENABLE register for send DMA engine 'sde'.
2044  */
2045 static void set_sdma_integrity(struct sdma_engine *sde)
2046 {
2047 	struct hfi1_devdata *dd = sde->dd;
2048 
2049 	write_sde_csr(sde, SD(CHECK_ENABLE),
2050 		      hfi1_pkt_base_sdma_integrity(dd));
2051 }
2052 
2053 static void init_sdma_regs(
2054 	struct sdma_engine *sde,
2055 	u32 credits,
2056 	uint idle_cnt)
2057 {
2058 	u8 opval, opmask;
2059 #ifdef CONFIG_SDMA_VERBOSITY
2060 	struct hfi1_devdata *dd = sde->dd;
2061 
2062 	dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n",
2063 		   sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
2064 #endif
2065 
2066 	write_sde_csr(sde, SD(BASE_ADDR), sde->descq_phys);
2067 	sdma_setlengen(sde);
2068 	sdma_update_tail(sde, 0); /* Set SendDmaTail */
2069 	write_sde_csr(sde, SD(RELOAD_CNT), idle_cnt);
2070 	write_sde_csr(sde, SD(DESC_CNT), 0);
2071 	write_sde_csr(sde, SD(HEAD_ADDR), sde->head_phys);
2072 	write_sde_csr(sde, SD(MEMORY),
2073 		      ((u64)credits << SD(MEMORY_SDMA_MEMORY_CNT_SHIFT)) |
2074 		      ((u64)(credits * sde->this_idx) <<
2075 		       SD(MEMORY_SDMA_MEMORY_INDEX_SHIFT)));
2076 	write_sde_csr(sde, SD(ENG_ERR_MASK), ~0ull);
2077 	set_sdma_integrity(sde);
2078 	opmask = OPCODE_CHECK_MASK_DISABLED;
2079 	opval = OPCODE_CHECK_VAL_DISABLED;
2080 	write_sde_csr(sde, SD(CHECK_OPCODE),
2081 		      (opmask << SEND_CTXT_CHECK_OPCODE_MASK_SHIFT) |
2082 		      (opval << SEND_CTXT_CHECK_OPCODE_VALUE_SHIFT));
2083 }
2084 
2085 #ifdef CONFIG_SDMA_VERBOSITY
2086 
2087 #define sdma_dumpstate_helper0(reg) do { \
2088 		csr = read_csr(sde->dd, reg); \
2089 		dd_dev_err(sde->dd, "%36s     0x%016llx\n", #reg, csr); \
2090 	} while (0)
2091 
2092 #define sdma_dumpstate_helper(reg) do { \
2093 		csr = read_sde_csr(sde, reg); \
2094 		dd_dev_err(sde->dd, "%36s[%02u] 0x%016llx\n", \
2095 			#reg, sde->this_idx, csr); \
2096 	} while (0)
2097 
2098 #define sdma_dumpstate_helper2(reg) do { \
2099 		csr = read_csr(sde->dd, reg + (8 * i)); \
2100 		dd_dev_err(sde->dd, "%33s_%02u     0x%016llx\n", \
2101 				#reg, i, csr); \
2102 	} while (0)
2103 
2104 void sdma_dumpstate(struct sdma_engine *sde)
2105 {
2106 	u64 csr;
2107 	unsigned i;
2108 
2109 	sdma_dumpstate_helper(SD(CTRL));
2110 	sdma_dumpstate_helper(SD(STATUS));
2111 	sdma_dumpstate_helper0(SD(ERR_STATUS));
2112 	sdma_dumpstate_helper0(SD(ERR_MASK));
2113 	sdma_dumpstate_helper(SD(ENG_ERR_STATUS));
2114 	sdma_dumpstate_helper(SD(ENG_ERR_MASK));
2115 
2116 	for (i = 0; i < CCE_NUM_INT_CSRS; ++i) {
2117 		sdma_dumpstate_helper2(CCE_INT_STATUS);
2118 		sdma_dumpstate_helper2(CCE_INT_MASK);
2119 		sdma_dumpstate_helper2(CCE_INT_BLOCKED);
2120 	}
2121 
2122 	sdma_dumpstate_helper(SD(TAIL));
2123 	sdma_dumpstate_helper(SD(HEAD));
2124 	sdma_dumpstate_helper(SD(PRIORITY_THLD));
2125 	sdma_dumpstate_helper(SD(IDLE_CNT));
2126 	sdma_dumpstate_helper(SD(RELOAD_CNT));
2127 	sdma_dumpstate_helper(SD(DESC_CNT));
2128 	sdma_dumpstate_helper(SD(DESC_FETCHED_CNT));
2129 	sdma_dumpstate_helper(SD(MEMORY));
2130 	sdma_dumpstate_helper0(SD(ENGINES));
2131 	sdma_dumpstate_helper0(SD(MEM_SIZE));
2132 	/* sdma_dumpstate_helper(SEND_EGRESS_SEND_DMA_STATUS);  */
2133 	sdma_dumpstate_helper(SD(BASE_ADDR));
2134 	sdma_dumpstate_helper(SD(LEN_GEN));
2135 	sdma_dumpstate_helper(SD(HEAD_ADDR));
2136 	sdma_dumpstate_helper(SD(CHECK_ENABLE));
2137 	sdma_dumpstate_helper(SD(CHECK_VL));
2138 	sdma_dumpstate_helper(SD(CHECK_JOB_KEY));
2139 	sdma_dumpstate_helper(SD(CHECK_PARTITION_KEY));
2140 	sdma_dumpstate_helper(SD(CHECK_SLID));
2141 	sdma_dumpstate_helper(SD(CHECK_OPCODE));
2142 }
2143 #endif
2144 
2145 static void dump_sdma_state(struct sdma_engine *sde)
2146 {
2147 	struct hw_sdma_desc *descq;
2148 	struct hw_sdma_desc *descqp;
2149 	u64 desc[2];
2150 	u64 addr;
2151 	u8 gen;
2152 	u16 len;
2153 	u16 head, tail, cnt;
2154 
2155 	head = sde->descq_head & sde->sdma_mask;
2156 	tail = sde->descq_tail & sde->sdma_mask;
2157 	cnt = sdma_descq_freecnt(sde);
2158 	descq = sde->descq;
2159 
2160 	dd_dev_err(sde->dd,
2161 		   "SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n",
2162 		   sde->this_idx, head, tail, cnt,
2163 		   !list_empty(&sde->flushlist));
2164 
2165 	/* print info for each entry in the descriptor queue */
2166 	while (head != tail) {
2167 		char flags[6] = { 'x', 'x', 'x', 'x', 0 };
2168 
2169 		descqp = &sde->descq[head];
2170 		desc[0] = le64_to_cpu(descqp->qw[0]);
2171 		desc[1] = le64_to_cpu(descqp->qw[1]);
2172 		flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
2173 		flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ?
2174 				'H' : '-';
2175 		flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
2176 		flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
2177 		addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT)
2178 			& SDMA_DESC0_PHY_ADDR_MASK;
2179 		gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT)
2180 			& SDMA_DESC1_GENERATION_MASK;
2181 		len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT)
2182 			& SDMA_DESC0_BYTE_COUNT_MASK;
2183 		dd_dev_err(sde->dd,
2184 			   "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
2185 			   head, flags, addr, gen, len);
2186 		dd_dev_err(sde->dd,
2187 			   "\tdesc0:0x%016llx desc1 0x%016llx\n",
2188 			   desc[0], desc[1]);
2189 		if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
2190 			dd_dev_err(sde->dd,
2191 				   "\taidx: %u amode: %u alen: %u\n",
2192 				   (u8)((desc[1] &
2193 					 SDMA_DESC1_HEADER_INDEX_SMASK) >>
2194 					SDMA_DESC1_HEADER_INDEX_SHIFT),
2195 				   (u8)((desc[1] &
2196 					 SDMA_DESC1_HEADER_MODE_SMASK) >>
2197 					SDMA_DESC1_HEADER_MODE_SHIFT),
2198 				   (u8)((desc[1] &
2199 					 SDMA_DESC1_HEADER_DWS_SMASK) >>
2200 					SDMA_DESC1_HEADER_DWS_SHIFT));
2201 		head++;
2202 		head &= sde->sdma_mask;
2203 	}
2204 }
2205 
2206 #define SDE_FMT \
2207 	"SDE %u CPU %d STE %s C 0x%llx S 0x%016llx E 0x%llx T(HW) 0x%llx T(SW) 0x%x H(HW) 0x%llx H(SW) 0x%x H(D) 0x%llx DM 0x%llx GL 0x%llx R 0x%llx LIS 0x%llx AHGI 0x%llx TXT %u TXH %u DT %u DH %u FLNE %d DQF %u SLC 0x%llx\n"
2208 /**
2209  * sdma_seqfile_dump_sde() - debugfs dump of sde
2210  * @s: seq file
2211  * @sde: send dma engine to dump
2212  *
2213  * This routine dumps the sde to the indicated seq file.
2214  */
2215 void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde)
2216 {
2217 	u16 head, tail;
2218 	struct hw_sdma_desc *descqp;
2219 	u64 desc[2];
2220 	u64 addr;
2221 	u8 gen;
2222 	u16 len;
2223 
2224 	head = sde->descq_head & sde->sdma_mask;
2225 	tail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
2226 	seq_printf(s, SDE_FMT, sde->this_idx,
2227 		   sde->cpu,
2228 		   sdma_state_name(sde->state.current_state),
2229 		   (unsigned long long)read_sde_csr(sde, SD(CTRL)),
2230 		   (unsigned long long)read_sde_csr(sde, SD(STATUS)),
2231 		   (unsigned long long)read_sde_csr(sde, SD(ENG_ERR_STATUS)),
2232 		   (unsigned long long)read_sde_csr(sde, SD(TAIL)), tail,
2233 		   (unsigned long long)read_sde_csr(sde, SD(HEAD)), head,
2234 		   (unsigned long long)le64_to_cpu(*sde->head_dma),
2235 		   (unsigned long long)read_sde_csr(sde, SD(MEMORY)),
2236 		   (unsigned long long)read_sde_csr(sde, SD(LEN_GEN)),
2237 		   (unsigned long long)read_sde_csr(sde, SD(RELOAD_CNT)),
2238 		   (unsigned long long)sde->last_status,
2239 		   (unsigned long long)sde->ahg_bits,
2240 		   sde->tx_tail,
2241 		   sde->tx_head,
2242 		   sde->descq_tail,
2243 		   sde->descq_head,
2244 		   !list_empty(&sde->flushlist),
2245 		   sde->descq_full_count,
2246 		   (unsigned long long)read_sde_csr(sde, SEND_DMA_CHECK_SLID));
2247 
2248 	/* print info for each entry in the descriptor queue */
2249 	while (head != tail) {
2250 		char flags[6] = { 'x', 'x', 'x', 'x', 0 };
2251 
2252 		descqp = &sde->descq[head];
2253 		desc[0] = le64_to_cpu(descqp->qw[0]);
2254 		desc[1] = le64_to_cpu(descqp->qw[1]);
2255 		flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
2256 		flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ?
2257 				'H' : '-';
2258 		flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
2259 		flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
2260 		addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT)
2261 			& SDMA_DESC0_PHY_ADDR_MASK;
2262 		gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT)
2263 			& SDMA_DESC1_GENERATION_MASK;
2264 		len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT)
2265 			& SDMA_DESC0_BYTE_COUNT_MASK;
2266 		seq_printf(s,
2267 			   "\tdesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
2268 			   head, flags, addr, gen, len);
2269 		if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
2270 			seq_printf(s, "\t\tahgidx: %u ahgmode: %u\n",
2271 				   (u8)((desc[1] &
2272 					 SDMA_DESC1_HEADER_INDEX_SMASK) >>
2273 					SDMA_DESC1_HEADER_INDEX_SHIFT),
2274 				   (u8)((desc[1] &
2275 					 SDMA_DESC1_HEADER_MODE_SMASK) >>
2276 					SDMA_DESC1_HEADER_MODE_SHIFT));
2277 		head = (head + 1) & sde->sdma_mask;
2278 	}
2279 }
2280 
2281 /*
2282  * add the generation number into
2283  * the qw1 and return
2284  */
2285 static inline u64 add_gen(struct sdma_engine *sde, u64 qw1)
2286 {
2287 	u8 generation = (sde->descq_tail >> sde->sdma_shift) & 3;
2288 
2289 	qw1 &= ~SDMA_DESC1_GENERATION_SMASK;
2290 	qw1 |= ((u64)generation & SDMA_DESC1_GENERATION_MASK)
2291 			<< SDMA_DESC1_GENERATION_SHIFT;
2292 	return qw1;
2293 }
2294 
2295 /*
2296  * This routine submits the indicated tx
2297  *
2298  * Space has already been guaranteed and
2299  * tail side of ring is locked.
2300  *
2301  * The hardware tail update is done
2302  * in the caller and that is facilitated
2303  * by returning the new tail.
2304  *
2305  * There is special case logic for ahg
2306  * to not add the generation number for
2307  * up to 2 descriptors that follow the
2308  * first descriptor.
2309  *
2310  */
2311 static inline u16 submit_tx(struct sdma_engine *sde, struct sdma_txreq *tx)
2312 {
2313 	int i;
2314 	u16 tail;
2315 	struct sdma_desc *descp = tx->descp;
2316 	u8 skip = 0, mode = ahg_mode(tx);
2317 
2318 	tail = sde->descq_tail & sde->sdma_mask;
2319 	sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]);
2320 	sde->descq[tail].qw[1] = cpu_to_le64(add_gen(sde, descp->qw[1]));
2321 	trace_hfi1_sdma_descriptor(sde, descp->qw[0], descp->qw[1],
2322 				   tail, &sde->descq[tail]);
2323 	tail = ++sde->descq_tail & sde->sdma_mask;
2324 	descp++;
2325 	if (mode > SDMA_AHG_APPLY_UPDATE1)
2326 		skip = mode >> 1;
2327 	for (i = 1; i < tx->num_desc; i++, descp++) {
2328 		u64 qw1;
2329 
2330 		sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]);
2331 		if (skip) {
2332 			/* edits don't have generation */
2333 			qw1 = descp->qw[1];
2334 			skip--;
2335 		} else {
2336 			/* replace generation with real one for non-edits */
2337 			qw1 = add_gen(sde, descp->qw[1]);
2338 		}
2339 		sde->descq[tail].qw[1] = cpu_to_le64(qw1);
2340 		trace_hfi1_sdma_descriptor(sde, descp->qw[0], qw1,
2341 					   tail, &sde->descq[tail]);
2342 		tail = ++sde->descq_tail & sde->sdma_mask;
2343 	}
2344 	tx->next_descq_idx = tail;
2345 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2346 	tx->sn = sde->tail_sn++;
2347 	trace_hfi1_sdma_in_sn(sde, tx->sn);
2348 	WARN_ON_ONCE(sde->tx_ring[sde->tx_tail & sde->sdma_mask]);
2349 #endif
2350 	sde->tx_ring[sde->tx_tail++ & sde->sdma_mask] = tx;
2351 	sde->desc_avail -= tx->num_desc;
2352 	return tail;
2353 }
2354 
2355 /*
2356  * Check for progress
2357  */
2358 static int sdma_check_progress(
2359 	struct sdma_engine *sde,
2360 	struct iowait *wait,
2361 	struct sdma_txreq *tx,
2362 	bool pkts_sent)
2363 {
2364 	int ret;
2365 
2366 	sde->desc_avail = sdma_descq_freecnt(sde);
2367 	if (tx->num_desc <= sde->desc_avail)
2368 		return -EAGAIN;
2369 	/* pulse the head_lock */
2370 	if (wait && wait->sleep) {
2371 		unsigned seq;
2372 
2373 		seq = raw_seqcount_begin(
2374 			(const seqcount_t *)&sde->head_lock.seqcount);
2375 		ret = wait->sleep(sde, wait, tx, seq, pkts_sent);
2376 		if (ret == -EAGAIN)
2377 			sde->desc_avail = sdma_descq_freecnt(sde);
2378 	} else {
2379 		ret = -EBUSY;
2380 	}
2381 	return ret;
2382 }
2383 
2384 /**
2385  * sdma_send_txreq() - submit a tx req to ring
2386  * @sde: sdma engine to use
2387  * @wait: wait structure to use when full (may be NULL)
2388  * @tx: sdma_txreq to submit
2389  * @pkts_sent: has any packet been sent yet?
2390  *
2391  * The call submits the tx into the ring.  If a iowait structure is non-NULL
2392  * the packet will be queued to the list in wait.
2393  *
2394  * Return:
2395  * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in
2396  * ring (wait == NULL)
2397  * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
2398  */
2399 int sdma_send_txreq(struct sdma_engine *sde,
2400 		    struct iowait *wait,
2401 		    struct sdma_txreq *tx,
2402 		    bool pkts_sent)
2403 {
2404 	int ret = 0;
2405 	u16 tail;
2406 	unsigned long flags;
2407 
2408 	/* user should have supplied entire packet */
2409 	if (unlikely(tx->tlen))
2410 		return -EINVAL;
2411 	tx->wait = wait;
2412 	spin_lock_irqsave(&sde->tail_lock, flags);
2413 retry:
2414 	if (unlikely(!__sdma_running(sde)))
2415 		goto unlock_noconn;
2416 	if (unlikely(tx->num_desc > sde->desc_avail))
2417 		goto nodesc;
2418 	tail = submit_tx(sde, tx);
2419 	if (wait)
2420 		iowait_sdma_inc(wait);
2421 	sdma_update_tail(sde, tail);
2422 unlock:
2423 	spin_unlock_irqrestore(&sde->tail_lock, flags);
2424 	return ret;
2425 unlock_noconn:
2426 	if (wait)
2427 		iowait_sdma_inc(wait);
2428 	tx->next_descq_idx = 0;
2429 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2430 	tx->sn = sde->tail_sn++;
2431 	trace_hfi1_sdma_in_sn(sde, tx->sn);
2432 #endif
2433 	spin_lock(&sde->flushlist_lock);
2434 	list_add_tail(&tx->list, &sde->flushlist);
2435 	spin_unlock(&sde->flushlist_lock);
2436 	if (wait) {
2437 		wait->tx_count++;
2438 		wait->count += tx->num_desc;
2439 	}
2440 	schedule_work(&sde->flush_worker);
2441 	ret = -ECOMM;
2442 	goto unlock;
2443 nodesc:
2444 	ret = sdma_check_progress(sde, wait, tx, pkts_sent);
2445 	if (ret == -EAGAIN) {
2446 		ret = 0;
2447 		goto retry;
2448 	}
2449 	sde->descq_full_count++;
2450 	goto unlock;
2451 }
2452 
2453 /**
2454  * sdma_send_txlist() - submit a list of tx req to ring
2455  * @sde: sdma engine to use
2456  * @wait: wait structure to use when full (may be NULL)
2457  * @tx_list: list of sdma_txreqs to submit
2458  * @count: pointer to a u32 which, after return will contain the total number of
2459  *         sdma_txreqs removed from the tx_list. This will include sdma_txreqs
2460  *         whose SDMA descriptors are submitted to the ring and the sdma_txreqs
2461  *         which are added to SDMA engine flush list if the SDMA engine state is
2462  *         not running.
2463  *
2464  * The call submits the list into the ring.
2465  *
2466  * If the iowait structure is non-NULL and not equal to the iowait list
2467  * the unprocessed part of the list  will be appended to the list in wait.
2468  *
2469  * In all cases, the tx_list will be updated so the head of the tx_list is
2470  * the list of descriptors that have yet to be transmitted.
2471  *
2472  * The intent of this call is to provide a more efficient
2473  * way of submitting multiple packets to SDMA while holding the tail
2474  * side locking.
2475  *
2476  * Return:
2477  * 0 - Success,
2478  * -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring (wait == NULL)
2479  * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
2480  */
2481 int sdma_send_txlist(struct sdma_engine *sde, struct iowait *wait,
2482 		     struct list_head *tx_list, u32 *count_out)
2483 {
2484 	struct sdma_txreq *tx, *tx_next;
2485 	int ret = 0;
2486 	unsigned long flags;
2487 	u16 tail = INVALID_TAIL;
2488 	u32 submit_count = 0, flush_count = 0, total_count;
2489 
2490 	spin_lock_irqsave(&sde->tail_lock, flags);
2491 retry:
2492 	list_for_each_entry_safe(tx, tx_next, tx_list, list) {
2493 		tx->wait = wait;
2494 		if (unlikely(!__sdma_running(sde)))
2495 			goto unlock_noconn;
2496 		if (unlikely(tx->num_desc > sde->desc_avail))
2497 			goto nodesc;
2498 		if (unlikely(tx->tlen)) {
2499 			ret = -EINVAL;
2500 			goto update_tail;
2501 		}
2502 		list_del_init(&tx->list);
2503 		tail = submit_tx(sde, tx);
2504 		submit_count++;
2505 		if (tail != INVALID_TAIL &&
2506 		    (submit_count & SDMA_TAIL_UPDATE_THRESH) == 0) {
2507 			sdma_update_tail(sde, tail);
2508 			tail = INVALID_TAIL;
2509 		}
2510 	}
2511 update_tail:
2512 	total_count = submit_count + flush_count;
2513 	if (wait) {
2514 		iowait_sdma_add(wait, total_count);
2515 		iowait_starve_clear(submit_count > 0, wait);
2516 	}
2517 	if (tail != INVALID_TAIL)
2518 		sdma_update_tail(sde, tail);
2519 	spin_unlock_irqrestore(&sde->tail_lock, flags);
2520 	*count_out = total_count;
2521 	return ret;
2522 unlock_noconn:
2523 	spin_lock(&sde->flushlist_lock);
2524 	list_for_each_entry_safe(tx, tx_next, tx_list, list) {
2525 		tx->wait = wait;
2526 		list_del_init(&tx->list);
2527 		tx->next_descq_idx = 0;
2528 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2529 		tx->sn = sde->tail_sn++;
2530 		trace_hfi1_sdma_in_sn(sde, tx->sn);
2531 #endif
2532 		list_add_tail(&tx->list, &sde->flushlist);
2533 		flush_count++;
2534 		if (wait) {
2535 			wait->tx_count++;
2536 			wait->count += tx->num_desc;
2537 		}
2538 	}
2539 	spin_unlock(&sde->flushlist_lock);
2540 	schedule_work(&sde->flush_worker);
2541 	ret = -ECOMM;
2542 	goto update_tail;
2543 nodesc:
2544 	ret = sdma_check_progress(sde, wait, tx, submit_count > 0);
2545 	if (ret == -EAGAIN) {
2546 		ret = 0;
2547 		goto retry;
2548 	}
2549 	sde->descq_full_count++;
2550 	goto update_tail;
2551 }
2552 
2553 static void sdma_process_event(struct sdma_engine *sde, enum sdma_events event)
2554 {
2555 	unsigned long flags;
2556 
2557 	spin_lock_irqsave(&sde->tail_lock, flags);
2558 	write_seqlock(&sde->head_lock);
2559 
2560 	__sdma_process_event(sde, event);
2561 
2562 	if (sde->state.current_state == sdma_state_s99_running)
2563 		sdma_desc_avail(sde, sdma_descq_freecnt(sde));
2564 
2565 	write_sequnlock(&sde->head_lock);
2566 	spin_unlock_irqrestore(&sde->tail_lock, flags);
2567 }
2568 
2569 static void __sdma_process_event(struct sdma_engine *sde,
2570 				 enum sdma_events event)
2571 {
2572 	struct sdma_state *ss = &sde->state;
2573 	int need_progress = 0;
2574 
2575 	/* CONFIG SDMA temporary */
2576 #ifdef CONFIG_SDMA_VERBOSITY
2577 	dd_dev_err(sde->dd, "CONFIG SDMA(%u) [%s] %s\n", sde->this_idx,
2578 		   sdma_state_names[ss->current_state],
2579 		   sdma_event_names[event]);
2580 #endif
2581 
2582 	switch (ss->current_state) {
2583 	case sdma_state_s00_hw_down:
2584 		switch (event) {
2585 		case sdma_event_e00_go_hw_down:
2586 			break;
2587 		case sdma_event_e30_go_running:
2588 			/*
2589 			 * If down, but running requested (usually result
2590 			 * of link up, then we need to start up.
2591 			 * This can happen when hw down is requested while
2592 			 * bringing the link up with traffic active on
2593 			 * 7220, e.g.
2594 			 */
2595 			ss->go_s99_running = 1;
2596 			/* fall through and start dma engine */
2597 		case sdma_event_e10_go_hw_start:
2598 			/* This reference means the state machine is started */
2599 			sdma_get(&sde->state);
2600 			sdma_set_state(sde,
2601 				       sdma_state_s10_hw_start_up_halt_wait);
2602 			break;
2603 		case sdma_event_e15_hw_halt_done:
2604 			break;
2605 		case sdma_event_e25_hw_clean_up_done:
2606 			break;
2607 		case sdma_event_e40_sw_cleaned:
2608 			sdma_sw_tear_down(sde);
2609 			break;
2610 		case sdma_event_e50_hw_cleaned:
2611 			break;
2612 		case sdma_event_e60_hw_halted:
2613 			break;
2614 		case sdma_event_e70_go_idle:
2615 			break;
2616 		case sdma_event_e80_hw_freeze:
2617 			break;
2618 		case sdma_event_e81_hw_frozen:
2619 			break;
2620 		case sdma_event_e82_hw_unfreeze:
2621 			break;
2622 		case sdma_event_e85_link_down:
2623 			break;
2624 		case sdma_event_e90_sw_halted:
2625 			break;
2626 		}
2627 		break;
2628 
2629 	case sdma_state_s10_hw_start_up_halt_wait:
2630 		switch (event) {
2631 		case sdma_event_e00_go_hw_down:
2632 			sdma_set_state(sde, sdma_state_s00_hw_down);
2633 			sdma_sw_tear_down(sde);
2634 			break;
2635 		case sdma_event_e10_go_hw_start:
2636 			break;
2637 		case sdma_event_e15_hw_halt_done:
2638 			sdma_set_state(sde,
2639 				       sdma_state_s15_hw_start_up_clean_wait);
2640 			sdma_start_hw_clean_up(sde);
2641 			break;
2642 		case sdma_event_e25_hw_clean_up_done:
2643 			break;
2644 		case sdma_event_e30_go_running:
2645 			ss->go_s99_running = 1;
2646 			break;
2647 		case sdma_event_e40_sw_cleaned:
2648 			break;
2649 		case sdma_event_e50_hw_cleaned:
2650 			break;
2651 		case sdma_event_e60_hw_halted:
2652 			schedule_work(&sde->err_halt_worker);
2653 			break;
2654 		case sdma_event_e70_go_idle:
2655 			ss->go_s99_running = 0;
2656 			break;
2657 		case sdma_event_e80_hw_freeze:
2658 			break;
2659 		case sdma_event_e81_hw_frozen:
2660 			break;
2661 		case sdma_event_e82_hw_unfreeze:
2662 			break;
2663 		case sdma_event_e85_link_down:
2664 			break;
2665 		case sdma_event_e90_sw_halted:
2666 			break;
2667 		}
2668 		break;
2669 
2670 	case sdma_state_s15_hw_start_up_clean_wait:
2671 		switch (event) {
2672 		case sdma_event_e00_go_hw_down:
2673 			sdma_set_state(sde, sdma_state_s00_hw_down);
2674 			sdma_sw_tear_down(sde);
2675 			break;
2676 		case sdma_event_e10_go_hw_start:
2677 			break;
2678 		case sdma_event_e15_hw_halt_done:
2679 			break;
2680 		case sdma_event_e25_hw_clean_up_done:
2681 			sdma_hw_start_up(sde);
2682 			sdma_set_state(sde, ss->go_s99_running ?
2683 				       sdma_state_s99_running :
2684 				       sdma_state_s20_idle);
2685 			break;
2686 		case sdma_event_e30_go_running:
2687 			ss->go_s99_running = 1;
2688 			break;
2689 		case sdma_event_e40_sw_cleaned:
2690 			break;
2691 		case sdma_event_e50_hw_cleaned:
2692 			break;
2693 		case sdma_event_e60_hw_halted:
2694 			break;
2695 		case sdma_event_e70_go_idle:
2696 			ss->go_s99_running = 0;
2697 			break;
2698 		case sdma_event_e80_hw_freeze:
2699 			break;
2700 		case sdma_event_e81_hw_frozen:
2701 			break;
2702 		case sdma_event_e82_hw_unfreeze:
2703 			break;
2704 		case sdma_event_e85_link_down:
2705 			break;
2706 		case sdma_event_e90_sw_halted:
2707 			break;
2708 		}
2709 		break;
2710 
2711 	case sdma_state_s20_idle:
2712 		switch (event) {
2713 		case sdma_event_e00_go_hw_down:
2714 			sdma_set_state(sde, sdma_state_s00_hw_down);
2715 			sdma_sw_tear_down(sde);
2716 			break;
2717 		case sdma_event_e10_go_hw_start:
2718 			break;
2719 		case sdma_event_e15_hw_halt_done:
2720 			break;
2721 		case sdma_event_e25_hw_clean_up_done:
2722 			break;
2723 		case sdma_event_e30_go_running:
2724 			sdma_set_state(sde, sdma_state_s99_running);
2725 			ss->go_s99_running = 1;
2726 			break;
2727 		case sdma_event_e40_sw_cleaned:
2728 			break;
2729 		case sdma_event_e50_hw_cleaned:
2730 			break;
2731 		case sdma_event_e60_hw_halted:
2732 			sdma_set_state(sde, sdma_state_s50_hw_halt_wait);
2733 			schedule_work(&sde->err_halt_worker);
2734 			break;
2735 		case sdma_event_e70_go_idle:
2736 			break;
2737 		case sdma_event_e85_link_down:
2738 			/* fall through */
2739 		case sdma_event_e80_hw_freeze:
2740 			sdma_set_state(sde, sdma_state_s80_hw_freeze);
2741 			atomic_dec(&sde->dd->sdma_unfreeze_count);
2742 			wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
2743 			break;
2744 		case sdma_event_e81_hw_frozen:
2745 			break;
2746 		case sdma_event_e82_hw_unfreeze:
2747 			break;
2748 		case sdma_event_e90_sw_halted:
2749 			break;
2750 		}
2751 		break;
2752 
2753 	case sdma_state_s30_sw_clean_up_wait:
2754 		switch (event) {
2755 		case sdma_event_e00_go_hw_down:
2756 			sdma_set_state(sde, sdma_state_s00_hw_down);
2757 			break;
2758 		case sdma_event_e10_go_hw_start:
2759 			break;
2760 		case sdma_event_e15_hw_halt_done:
2761 			break;
2762 		case sdma_event_e25_hw_clean_up_done:
2763 			break;
2764 		case sdma_event_e30_go_running:
2765 			ss->go_s99_running = 1;
2766 			break;
2767 		case sdma_event_e40_sw_cleaned:
2768 			sdma_set_state(sde, sdma_state_s40_hw_clean_up_wait);
2769 			sdma_start_hw_clean_up(sde);
2770 			break;
2771 		case sdma_event_e50_hw_cleaned:
2772 			break;
2773 		case sdma_event_e60_hw_halted:
2774 			break;
2775 		case sdma_event_e70_go_idle:
2776 			ss->go_s99_running = 0;
2777 			break;
2778 		case sdma_event_e80_hw_freeze:
2779 			break;
2780 		case sdma_event_e81_hw_frozen:
2781 			break;
2782 		case sdma_event_e82_hw_unfreeze:
2783 			break;
2784 		case sdma_event_e85_link_down:
2785 			ss->go_s99_running = 0;
2786 			break;
2787 		case sdma_event_e90_sw_halted:
2788 			break;
2789 		}
2790 		break;
2791 
2792 	case sdma_state_s40_hw_clean_up_wait:
2793 		switch (event) {
2794 		case sdma_event_e00_go_hw_down:
2795 			sdma_set_state(sde, sdma_state_s00_hw_down);
2796 			tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2797 			break;
2798 		case sdma_event_e10_go_hw_start:
2799 			break;
2800 		case sdma_event_e15_hw_halt_done:
2801 			break;
2802 		case sdma_event_e25_hw_clean_up_done:
2803 			sdma_hw_start_up(sde);
2804 			sdma_set_state(sde, ss->go_s99_running ?
2805 				       sdma_state_s99_running :
2806 				       sdma_state_s20_idle);
2807 			break;
2808 		case sdma_event_e30_go_running:
2809 			ss->go_s99_running = 1;
2810 			break;
2811 		case sdma_event_e40_sw_cleaned:
2812 			break;
2813 		case sdma_event_e50_hw_cleaned:
2814 			break;
2815 		case sdma_event_e60_hw_halted:
2816 			break;
2817 		case sdma_event_e70_go_idle:
2818 			ss->go_s99_running = 0;
2819 			break;
2820 		case sdma_event_e80_hw_freeze:
2821 			break;
2822 		case sdma_event_e81_hw_frozen:
2823 			break;
2824 		case sdma_event_e82_hw_unfreeze:
2825 			break;
2826 		case sdma_event_e85_link_down:
2827 			ss->go_s99_running = 0;
2828 			break;
2829 		case sdma_event_e90_sw_halted:
2830 			break;
2831 		}
2832 		break;
2833 
2834 	case sdma_state_s50_hw_halt_wait:
2835 		switch (event) {
2836 		case sdma_event_e00_go_hw_down:
2837 			sdma_set_state(sde, sdma_state_s00_hw_down);
2838 			tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2839 			break;
2840 		case sdma_event_e10_go_hw_start:
2841 			break;
2842 		case sdma_event_e15_hw_halt_done:
2843 			sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait);
2844 			tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2845 			break;
2846 		case sdma_event_e25_hw_clean_up_done:
2847 			break;
2848 		case sdma_event_e30_go_running:
2849 			ss->go_s99_running = 1;
2850 			break;
2851 		case sdma_event_e40_sw_cleaned:
2852 			break;
2853 		case sdma_event_e50_hw_cleaned:
2854 			break;
2855 		case sdma_event_e60_hw_halted:
2856 			schedule_work(&sde->err_halt_worker);
2857 			break;
2858 		case sdma_event_e70_go_idle:
2859 			ss->go_s99_running = 0;
2860 			break;
2861 		case sdma_event_e80_hw_freeze:
2862 			break;
2863 		case sdma_event_e81_hw_frozen:
2864 			break;
2865 		case sdma_event_e82_hw_unfreeze:
2866 			break;
2867 		case sdma_event_e85_link_down:
2868 			ss->go_s99_running = 0;
2869 			break;
2870 		case sdma_event_e90_sw_halted:
2871 			break;
2872 		}
2873 		break;
2874 
2875 	case sdma_state_s60_idle_halt_wait:
2876 		switch (event) {
2877 		case sdma_event_e00_go_hw_down:
2878 			sdma_set_state(sde, sdma_state_s00_hw_down);
2879 			tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2880 			break;
2881 		case sdma_event_e10_go_hw_start:
2882 			break;
2883 		case sdma_event_e15_hw_halt_done:
2884 			sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait);
2885 			tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2886 			break;
2887 		case sdma_event_e25_hw_clean_up_done:
2888 			break;
2889 		case sdma_event_e30_go_running:
2890 			ss->go_s99_running = 1;
2891 			break;
2892 		case sdma_event_e40_sw_cleaned:
2893 			break;
2894 		case sdma_event_e50_hw_cleaned:
2895 			break;
2896 		case sdma_event_e60_hw_halted:
2897 			schedule_work(&sde->err_halt_worker);
2898 			break;
2899 		case sdma_event_e70_go_idle:
2900 			ss->go_s99_running = 0;
2901 			break;
2902 		case sdma_event_e80_hw_freeze:
2903 			break;
2904 		case sdma_event_e81_hw_frozen:
2905 			break;
2906 		case sdma_event_e82_hw_unfreeze:
2907 			break;
2908 		case sdma_event_e85_link_down:
2909 			break;
2910 		case sdma_event_e90_sw_halted:
2911 			break;
2912 		}
2913 		break;
2914 
2915 	case sdma_state_s80_hw_freeze:
2916 		switch (event) {
2917 		case sdma_event_e00_go_hw_down:
2918 			sdma_set_state(sde, sdma_state_s00_hw_down);
2919 			tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2920 			break;
2921 		case sdma_event_e10_go_hw_start:
2922 			break;
2923 		case sdma_event_e15_hw_halt_done:
2924 			break;
2925 		case sdma_event_e25_hw_clean_up_done:
2926 			break;
2927 		case sdma_event_e30_go_running:
2928 			ss->go_s99_running = 1;
2929 			break;
2930 		case sdma_event_e40_sw_cleaned:
2931 			break;
2932 		case sdma_event_e50_hw_cleaned:
2933 			break;
2934 		case sdma_event_e60_hw_halted:
2935 			break;
2936 		case sdma_event_e70_go_idle:
2937 			ss->go_s99_running = 0;
2938 			break;
2939 		case sdma_event_e80_hw_freeze:
2940 			break;
2941 		case sdma_event_e81_hw_frozen:
2942 			sdma_set_state(sde, sdma_state_s82_freeze_sw_clean);
2943 			tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2944 			break;
2945 		case sdma_event_e82_hw_unfreeze:
2946 			break;
2947 		case sdma_event_e85_link_down:
2948 			break;
2949 		case sdma_event_e90_sw_halted:
2950 			break;
2951 		}
2952 		break;
2953 
2954 	case sdma_state_s82_freeze_sw_clean:
2955 		switch (event) {
2956 		case sdma_event_e00_go_hw_down:
2957 			sdma_set_state(sde, sdma_state_s00_hw_down);
2958 			tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2959 			break;
2960 		case sdma_event_e10_go_hw_start:
2961 			break;
2962 		case sdma_event_e15_hw_halt_done:
2963 			break;
2964 		case sdma_event_e25_hw_clean_up_done:
2965 			break;
2966 		case sdma_event_e30_go_running:
2967 			ss->go_s99_running = 1;
2968 			break;
2969 		case sdma_event_e40_sw_cleaned:
2970 			/* notify caller this engine is done cleaning */
2971 			atomic_dec(&sde->dd->sdma_unfreeze_count);
2972 			wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
2973 			break;
2974 		case sdma_event_e50_hw_cleaned:
2975 			break;
2976 		case sdma_event_e60_hw_halted:
2977 			break;
2978 		case sdma_event_e70_go_idle:
2979 			ss->go_s99_running = 0;
2980 			break;
2981 		case sdma_event_e80_hw_freeze:
2982 			break;
2983 		case sdma_event_e81_hw_frozen:
2984 			break;
2985 		case sdma_event_e82_hw_unfreeze:
2986 			sdma_hw_start_up(sde);
2987 			sdma_set_state(sde, ss->go_s99_running ?
2988 				       sdma_state_s99_running :
2989 				       sdma_state_s20_idle);
2990 			break;
2991 		case sdma_event_e85_link_down:
2992 			break;
2993 		case sdma_event_e90_sw_halted:
2994 			break;
2995 		}
2996 		break;
2997 
2998 	case sdma_state_s99_running:
2999 		switch (event) {
3000 		case sdma_event_e00_go_hw_down:
3001 			sdma_set_state(sde, sdma_state_s00_hw_down);
3002 			tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
3003 			break;
3004 		case sdma_event_e10_go_hw_start:
3005 			break;
3006 		case sdma_event_e15_hw_halt_done:
3007 			break;
3008 		case sdma_event_e25_hw_clean_up_done:
3009 			break;
3010 		case sdma_event_e30_go_running:
3011 			break;
3012 		case sdma_event_e40_sw_cleaned:
3013 			break;
3014 		case sdma_event_e50_hw_cleaned:
3015 			break;
3016 		case sdma_event_e60_hw_halted:
3017 			need_progress = 1;
3018 			sdma_err_progress_check_schedule(sde);
3019 		case sdma_event_e90_sw_halted:
3020 			/*
3021 			* SW initiated halt does not perform engines
3022 			* progress check
3023 			*/
3024 			sdma_set_state(sde, sdma_state_s50_hw_halt_wait);
3025 			schedule_work(&sde->err_halt_worker);
3026 			break;
3027 		case sdma_event_e70_go_idle:
3028 			sdma_set_state(sde, sdma_state_s60_idle_halt_wait);
3029 			break;
3030 		case sdma_event_e85_link_down:
3031 			ss->go_s99_running = 0;
3032 			/* fall through */
3033 		case sdma_event_e80_hw_freeze:
3034 			sdma_set_state(sde, sdma_state_s80_hw_freeze);
3035 			atomic_dec(&sde->dd->sdma_unfreeze_count);
3036 			wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
3037 			break;
3038 		case sdma_event_e81_hw_frozen:
3039 			break;
3040 		case sdma_event_e82_hw_unfreeze:
3041 			break;
3042 		}
3043 		break;
3044 	}
3045 
3046 	ss->last_event = event;
3047 	if (need_progress)
3048 		sdma_make_progress(sde, 0);
3049 }
3050 
3051 /*
3052  * _extend_sdma_tx_descs() - helper to extend txreq
3053  *
3054  * This is called once the initial nominal allocation
3055  * of descriptors in the sdma_txreq is exhausted.
3056  *
3057  * The code will bump the allocation up to the max
3058  * of MAX_DESC (64) descriptors. There doesn't seem
3059  * much point in an interim step. The last descriptor
3060  * is reserved for coalesce buffer in order to support
3061  * cases where input packet has >MAX_DESC iovecs.
3062  *
3063  */
3064 static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
3065 {
3066 	int i;
3067 
3068 	/* Handle last descriptor */
3069 	if (unlikely((tx->num_desc == (MAX_DESC - 1)))) {
3070 		/* if tlen is 0, it is for padding, release last descriptor */
3071 		if (!tx->tlen) {
3072 			tx->desc_limit = MAX_DESC;
3073 		} else if (!tx->coalesce_buf) {
3074 			/* allocate coalesce buffer with space for padding */
3075 			tx->coalesce_buf = kmalloc(tx->tlen + sizeof(u32),
3076 						   GFP_ATOMIC);
3077 			if (!tx->coalesce_buf)
3078 				goto enomem;
3079 			tx->coalesce_idx = 0;
3080 		}
3081 		return 0;
3082 	}
3083 
3084 	if (unlikely(tx->num_desc == MAX_DESC))
3085 		goto enomem;
3086 
3087 	tx->descp = kmalloc_array(
3088 			MAX_DESC,
3089 			sizeof(struct sdma_desc),
3090 			GFP_ATOMIC);
3091 	if (!tx->descp)
3092 		goto enomem;
3093 
3094 	/* reserve last descriptor for coalescing */
3095 	tx->desc_limit = MAX_DESC - 1;
3096 	/* copy ones already built */
3097 	for (i = 0; i < tx->num_desc; i++)
3098 		tx->descp[i] = tx->descs[i];
3099 	return 0;
3100 enomem:
3101 	__sdma_txclean(dd, tx);
3102 	return -ENOMEM;
3103 }
3104 
3105 /*
3106  * ext_coal_sdma_tx_descs() - extend or coalesce sdma tx descriptors
3107  *
3108  * This is called once the initial nominal allocation of descriptors
3109  * in the sdma_txreq is exhausted.
3110  *
3111  * This function calls _extend_sdma_tx_descs to extend or allocate
3112  * coalesce buffer. If there is a allocated coalesce buffer, it will
3113  * copy the input packet data into the coalesce buffer. It also adds
3114  * coalesce buffer descriptor once when whole packet is received.
3115  *
3116  * Return:
3117  * <0 - error
3118  * 0 - coalescing, don't populate descriptor
3119  * 1 - continue with populating descriptor
3120  */
3121 int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
3122 			   int type, void *kvaddr, struct page *page,
3123 			   unsigned long offset, u16 len)
3124 {
3125 	int pad_len, rval;
3126 	dma_addr_t addr;
3127 
3128 	rval = _extend_sdma_tx_descs(dd, tx);
3129 	if (rval) {
3130 		__sdma_txclean(dd, tx);
3131 		return rval;
3132 	}
3133 
3134 	/* If coalesce buffer is allocated, copy data into it */
3135 	if (tx->coalesce_buf) {
3136 		if (type == SDMA_MAP_NONE) {
3137 			__sdma_txclean(dd, tx);
3138 			return -EINVAL;
3139 		}
3140 
3141 		if (type == SDMA_MAP_PAGE) {
3142 			kvaddr = kmap(page);
3143 			kvaddr += offset;
3144 		} else if (WARN_ON(!kvaddr)) {
3145 			__sdma_txclean(dd, tx);
3146 			return -EINVAL;
3147 		}
3148 
3149 		memcpy(tx->coalesce_buf + tx->coalesce_idx, kvaddr, len);
3150 		tx->coalesce_idx += len;
3151 		if (type == SDMA_MAP_PAGE)
3152 			kunmap(page);
3153 
3154 		/* If there is more data, return */
3155 		if (tx->tlen - tx->coalesce_idx)
3156 			return 0;
3157 
3158 		/* Whole packet is received; add any padding */
3159 		pad_len = tx->packet_len & (sizeof(u32) - 1);
3160 		if (pad_len) {
3161 			pad_len = sizeof(u32) - pad_len;
3162 			memset(tx->coalesce_buf + tx->coalesce_idx, 0, pad_len);
3163 			/* padding is taken care of for coalescing case */
3164 			tx->packet_len += pad_len;
3165 			tx->tlen += pad_len;
3166 		}
3167 
3168 		/* dma map the coalesce buffer */
3169 		addr = dma_map_single(&dd->pcidev->dev,
3170 				      tx->coalesce_buf,
3171 				      tx->tlen,
3172 				      DMA_TO_DEVICE);
3173 
3174 		if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
3175 			__sdma_txclean(dd, tx);
3176 			return -ENOSPC;
3177 		}
3178 
3179 		/* Add descriptor for coalesce buffer */
3180 		tx->desc_limit = MAX_DESC;
3181 		return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx,
3182 					 addr, tx->tlen);
3183 	}
3184 
3185 	return 1;
3186 }
3187 
3188 /* Update sdes when the lmc changes */
3189 void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid)
3190 {
3191 	struct sdma_engine *sde;
3192 	int i;
3193 	u64 sreg;
3194 
3195 	sreg = ((mask & SD(CHECK_SLID_MASK_MASK)) <<
3196 		SD(CHECK_SLID_MASK_SHIFT)) |
3197 		(((lid & mask) & SD(CHECK_SLID_VALUE_MASK)) <<
3198 		SD(CHECK_SLID_VALUE_SHIFT));
3199 
3200 	for (i = 0; i < dd->num_sdma; i++) {
3201 		hfi1_cdbg(LINKVERB, "SendDmaEngine[%d].SLID_CHECK = 0x%x",
3202 			  i, (u32)sreg);
3203 		sde = &dd->per_sdma[i];
3204 		write_sde_csr(sde, SD(CHECK_SLID), sreg);
3205 	}
3206 }
3207 
3208 /* tx not dword sized - pad */
3209 int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
3210 {
3211 	int rval = 0;
3212 
3213 	tx->num_desc++;
3214 	if ((unlikely(tx->num_desc == tx->desc_limit))) {
3215 		rval = _extend_sdma_tx_descs(dd, tx);
3216 		if (rval) {
3217 			__sdma_txclean(dd, tx);
3218 			return rval;
3219 		}
3220 	}
3221 	/* finish the one just added */
3222 	make_tx_sdma_desc(
3223 		tx,
3224 		SDMA_MAP_NONE,
3225 		dd->sdma_pad_phys,
3226 		sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)));
3227 	_sdma_close_tx(dd, tx);
3228 	return rval;
3229 }
3230 
3231 /*
3232  * Add ahg to the sdma_txreq
3233  *
3234  * The logic will consume up to 3
3235  * descriptors at the beginning of
3236  * sdma_txreq.
3237  */
3238 void _sdma_txreq_ahgadd(
3239 	struct sdma_txreq *tx,
3240 	u8 num_ahg,
3241 	u8 ahg_entry,
3242 	u32 *ahg,
3243 	u8 ahg_hlen)
3244 {
3245 	u32 i, shift = 0, desc = 0;
3246 	u8 mode;
3247 
3248 	WARN_ON_ONCE(num_ahg > 9 || (ahg_hlen & 3) || ahg_hlen == 4);
3249 	/* compute mode */
3250 	if (num_ahg == 1)
3251 		mode = SDMA_AHG_APPLY_UPDATE1;
3252 	else if (num_ahg <= 5)
3253 		mode = SDMA_AHG_APPLY_UPDATE2;
3254 	else
3255 		mode = SDMA_AHG_APPLY_UPDATE3;
3256 	tx->num_desc++;
3257 	/* initialize to consumed descriptors to zero */
3258 	switch (mode) {
3259 	case SDMA_AHG_APPLY_UPDATE3:
3260 		tx->num_desc++;
3261 		tx->descs[2].qw[0] = 0;
3262 		tx->descs[2].qw[1] = 0;
3263 		/* FALLTHROUGH */
3264 	case SDMA_AHG_APPLY_UPDATE2:
3265 		tx->num_desc++;
3266 		tx->descs[1].qw[0] = 0;
3267 		tx->descs[1].qw[1] = 0;
3268 		break;
3269 	}
3270 	ahg_hlen >>= 2;
3271 	tx->descs[0].qw[1] |=
3272 		(((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK)
3273 			<< SDMA_DESC1_HEADER_INDEX_SHIFT) |
3274 		(((u64)ahg_hlen & SDMA_DESC1_HEADER_DWS_MASK)
3275 			<< SDMA_DESC1_HEADER_DWS_SHIFT) |
3276 		(((u64)mode & SDMA_DESC1_HEADER_MODE_MASK)
3277 			<< SDMA_DESC1_HEADER_MODE_SHIFT) |
3278 		(((u64)ahg[0] & SDMA_DESC1_HEADER_UPDATE1_MASK)
3279 			<< SDMA_DESC1_HEADER_UPDATE1_SHIFT);
3280 	for (i = 0; i < (num_ahg - 1); i++) {
3281 		if (!shift && !(i & 2))
3282 			desc++;
3283 		tx->descs[desc].qw[!!(i & 2)] |=
3284 			(((u64)ahg[i + 1])
3285 				<< shift);
3286 		shift = (shift + 32) & 63;
3287 	}
3288 }
3289 
3290 /**
3291  * sdma_ahg_alloc - allocate an AHG entry
3292  * @sde: engine to allocate from
3293  *
3294  * Return:
3295  * 0-31 when successful, -EOPNOTSUPP if AHG is not enabled,
3296  * -ENOSPC if an entry is not available
3297  */
3298 int sdma_ahg_alloc(struct sdma_engine *sde)
3299 {
3300 	int nr;
3301 	int oldbit;
3302 
3303 	if (!sde) {
3304 		trace_hfi1_ahg_allocate(sde, -EINVAL);
3305 		return -EINVAL;
3306 	}
3307 	while (1) {
3308 		nr = ffz(ACCESS_ONCE(sde->ahg_bits));
3309 		if (nr > 31) {
3310 			trace_hfi1_ahg_allocate(sde, -ENOSPC);
3311 			return -ENOSPC;
3312 		}
3313 		oldbit = test_and_set_bit(nr, &sde->ahg_bits);
3314 		if (!oldbit)
3315 			break;
3316 		cpu_relax();
3317 	}
3318 	trace_hfi1_ahg_allocate(sde, nr);
3319 	return nr;
3320 }
3321 
3322 /**
3323  * sdma_ahg_free - free an AHG entry
3324  * @sde: engine to return AHG entry
3325  * @ahg_index: index to free
3326  *
3327  * This routine frees the indicate AHG entry.
3328  */
3329 void sdma_ahg_free(struct sdma_engine *sde, int ahg_index)
3330 {
3331 	if (!sde)
3332 		return;
3333 	trace_hfi1_ahg_deallocate(sde, ahg_index);
3334 	if (ahg_index < 0 || ahg_index > 31)
3335 		return;
3336 	clear_bit(ahg_index, &sde->ahg_bits);
3337 }
3338 
3339 /*
3340  * SPC freeze handling for SDMA engines.  Called when the driver knows
3341  * the SPC is going into a freeze but before the freeze is fully
3342  * settled.  Generally an error interrupt.
3343  *
3344  * This event will pull the engine out of running so no more entries can be
3345  * added to the engine's queue.
3346  */
3347 void sdma_freeze_notify(struct hfi1_devdata *dd, int link_down)
3348 {
3349 	int i;
3350 	enum sdma_events event = link_down ? sdma_event_e85_link_down :
3351 					     sdma_event_e80_hw_freeze;
3352 
3353 	/* set up the wait but do not wait here */
3354 	atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma);
3355 
3356 	/* tell all engines to stop running and wait */
3357 	for (i = 0; i < dd->num_sdma; i++)
3358 		sdma_process_event(&dd->per_sdma[i], event);
3359 
3360 	/* sdma_freeze() will wait for all engines to have stopped */
3361 }
3362 
3363 /*
3364  * SPC freeze handling for SDMA engines.  Called when the driver knows
3365  * the SPC is fully frozen.
3366  */
3367 void sdma_freeze(struct hfi1_devdata *dd)
3368 {
3369 	int i;
3370 	int ret;
3371 
3372 	/*
3373 	 * Make sure all engines have moved out of the running state before
3374 	 * continuing.
3375 	 */
3376 	ret = wait_event_interruptible(dd->sdma_unfreeze_wq,
3377 				       atomic_read(&dd->sdma_unfreeze_count) <=
3378 				       0);
3379 	/* interrupted or count is negative, then unloading - just exit */
3380 	if (ret || atomic_read(&dd->sdma_unfreeze_count) < 0)
3381 		return;
3382 
3383 	/* set up the count for the next wait */
3384 	atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma);
3385 
3386 	/* tell all engines that the SPC is frozen, they can start cleaning */
3387 	for (i = 0; i < dd->num_sdma; i++)
3388 		sdma_process_event(&dd->per_sdma[i], sdma_event_e81_hw_frozen);
3389 
3390 	/*
3391 	 * Wait for everyone to finish software clean before exiting.  The
3392 	 * software clean will read engine CSRs, so must be completed before
3393 	 * the next step, which will clear the engine CSRs.
3394 	 */
3395 	(void)wait_event_interruptible(dd->sdma_unfreeze_wq,
3396 				atomic_read(&dd->sdma_unfreeze_count) <= 0);
3397 	/* no need to check results - done no matter what */
3398 }
3399 
3400 /*
3401  * SPC freeze handling for the SDMA engines.  Called after the SPC is unfrozen.
3402  *
3403  * The SPC freeze acts like a SDMA halt and a hardware clean combined.  All
3404  * that is left is a software clean.  We could do it after the SPC is fully
3405  * frozen, but then we'd have to add another state to wait for the unfreeze.
3406  * Instead, just defer the software clean until the unfreeze step.
3407  */
3408 void sdma_unfreeze(struct hfi1_devdata *dd)
3409 {
3410 	int i;
3411 
3412 	/* tell all engines start freeze clean up */
3413 	for (i = 0; i < dd->num_sdma; i++)
3414 		sdma_process_event(&dd->per_sdma[i],
3415 				   sdma_event_e82_hw_unfreeze);
3416 }
3417 
3418 /**
3419  * _sdma_engine_progress_schedule() - schedule progress on engine
3420  * @sde: sdma_engine to schedule progress
3421  *
3422  */
3423 void _sdma_engine_progress_schedule(
3424 	struct sdma_engine *sde)
3425 {
3426 	trace_hfi1_sdma_engine_progress(sde, sde->progress_mask);
3427 	/* assume we have selected a good cpu */
3428 	write_csr(sde->dd,
3429 		  CCE_INT_FORCE + (8 * (IS_SDMA_START / 64)),
3430 		  sde->progress_mask);
3431 }
3432