xref: /openbmc/linux/drivers/infiniband/hw/hfi1/pio.c (revision 293d5b43)
1 /*
2  * Copyright(c) 2015, 2016 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47 
48 #include <linux/delay.h>
49 #include "hfi.h"
50 #include "qp.h"
51 #include "trace.h"
52 
53 #define SC_CTXT_PACKET_EGRESS_TIMEOUT 350 /* in chip cycles */
54 
55 #define SC(name) SEND_CTXT_##name
56 /*
57  * Send Context functions
58  */
59 static void sc_wait_for_packet_egress(struct send_context *sc, int pause);
60 
61 /*
62  * Set the CM reset bit and wait for it to clear.  Use the provided
63  * sendctrl register.  This routine has no locking.
64  */
65 void __cm_reset(struct hfi1_devdata *dd, u64 sendctrl)
66 {
67 	write_csr(dd, SEND_CTRL, sendctrl | SEND_CTRL_CM_RESET_SMASK);
68 	while (1) {
69 		udelay(1);
70 		sendctrl = read_csr(dd, SEND_CTRL);
71 		if ((sendctrl & SEND_CTRL_CM_RESET_SMASK) == 0)
72 			break;
73 	}
74 }
75 
76 /* defined in header release 48 and higher */
77 #ifndef SEND_CTRL_UNSUPPORTED_VL_SHIFT
78 #define SEND_CTRL_UNSUPPORTED_VL_SHIFT 3
79 #define SEND_CTRL_UNSUPPORTED_VL_MASK 0xffull
80 #define SEND_CTRL_UNSUPPORTED_VL_SMASK (SEND_CTRL_UNSUPPORTED_VL_MASK \
81 		<< SEND_CTRL_UNSUPPORTED_VL_SHIFT)
82 #endif
83 
84 /* global control of PIO send */
85 void pio_send_control(struct hfi1_devdata *dd, int op)
86 {
87 	u64 reg, mask;
88 	unsigned long flags;
89 	int write = 1;	/* write sendctrl back */
90 	int flush = 0;	/* re-read sendctrl to make sure it is flushed */
91 
92 	spin_lock_irqsave(&dd->sendctrl_lock, flags);
93 
94 	reg = read_csr(dd, SEND_CTRL);
95 	switch (op) {
96 	case PSC_GLOBAL_ENABLE:
97 		reg |= SEND_CTRL_SEND_ENABLE_SMASK;
98 	/* Fall through */
99 	case PSC_DATA_VL_ENABLE:
100 		/* Disallow sending on VLs not enabled */
101 		mask = (((~0ull) << num_vls) & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
102 				SEND_CTRL_UNSUPPORTED_VL_SHIFT;
103 		reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask;
104 		break;
105 	case PSC_GLOBAL_DISABLE:
106 		reg &= ~SEND_CTRL_SEND_ENABLE_SMASK;
107 		break;
108 	case PSC_GLOBAL_VLARB_ENABLE:
109 		reg |= SEND_CTRL_VL_ARBITER_ENABLE_SMASK;
110 		break;
111 	case PSC_GLOBAL_VLARB_DISABLE:
112 		reg &= ~SEND_CTRL_VL_ARBITER_ENABLE_SMASK;
113 		break;
114 	case PSC_CM_RESET:
115 		__cm_reset(dd, reg);
116 		write = 0; /* CSR already written (and flushed) */
117 		break;
118 	case PSC_DATA_VL_DISABLE:
119 		reg |= SEND_CTRL_UNSUPPORTED_VL_SMASK;
120 		flush = 1;
121 		break;
122 	default:
123 		dd_dev_err(dd, "%s: invalid control %d\n", __func__, op);
124 		break;
125 	}
126 
127 	if (write) {
128 		write_csr(dd, SEND_CTRL, reg);
129 		if (flush)
130 			(void)read_csr(dd, SEND_CTRL); /* flush write */
131 	}
132 
133 	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
134 }
135 
136 /* number of send context memory pools */
137 #define NUM_SC_POOLS 2
138 
139 /* Send Context Size (SCS) wildcards */
140 #define SCS_POOL_0 -1
141 #define SCS_POOL_1 -2
142 
143 /* Send Context Count (SCC) wildcards */
144 #define SCC_PER_VL -1
145 #define SCC_PER_CPU  -2
146 #define SCC_PER_KRCVQ  -3
147 
148 /* Send Context Size (SCS) constants */
149 #define SCS_ACK_CREDITS  32
150 #define SCS_VL15_CREDITS 102	/* 3 pkts of 2048B data + 128B header */
151 
152 #define PIO_THRESHOLD_CEILING 4096
153 
154 #define PIO_WAIT_BATCH_SIZE 5
155 
156 /* default send context sizes */
157 static struct sc_config_sizes sc_config_sizes[SC_MAX] = {
158 	[SC_KERNEL] = { .size  = SCS_POOL_0,	/* even divide, pool 0 */
159 			.count = SCC_PER_VL },	/* one per NUMA */
160 	[SC_ACK]    = { .size  = SCS_ACK_CREDITS,
161 			.count = SCC_PER_KRCVQ },
162 	[SC_USER]   = { .size  = SCS_POOL_0,	/* even divide, pool 0 */
163 			.count = SCC_PER_CPU },	/* one per CPU */
164 	[SC_VL15]   = { .size  = SCS_VL15_CREDITS,
165 			.count = 1 },
166 
167 };
168 
169 /* send context memory pool configuration */
170 struct mem_pool_config {
171 	int centipercent;	/* % of memory, in 100ths of 1% */
172 	int absolute_blocks;	/* absolute block count */
173 };
174 
175 /* default memory pool configuration: 100% in pool 0 */
176 static struct mem_pool_config sc_mem_pool_config[NUM_SC_POOLS] = {
177 	/* centi%, abs blocks */
178 	{  10000,     -1 },		/* pool 0 */
179 	{      0,     -1 },		/* pool 1 */
180 };
181 
182 /* memory pool information, used when calculating final sizes */
183 struct mem_pool_info {
184 	int centipercent;	/*
185 				 * 100th of 1% of memory to use, -1 if blocks
186 				 * already set
187 				 */
188 	int count;		/* count of contexts in the pool */
189 	int blocks;		/* block size of the pool */
190 	int size;		/* context size, in blocks */
191 };
192 
193 /*
194  * Convert a pool wildcard to a valid pool index.  The wildcards
195  * start at -1 and increase negatively.  Map them as:
196  *	-1 => 0
197  *	-2 => 1
198  *	etc.
199  *
200  * Return -1 on non-wildcard input, otherwise convert to a pool number.
201  */
202 static int wildcard_to_pool(int wc)
203 {
204 	if (wc >= 0)
205 		return -1;	/* non-wildcard */
206 	return -wc - 1;
207 }
208 
209 static const char *sc_type_names[SC_MAX] = {
210 	"kernel",
211 	"ack",
212 	"user",
213 	"vl15"
214 };
215 
216 static const char *sc_type_name(int index)
217 {
218 	if (index < 0 || index >= SC_MAX)
219 		return "unknown";
220 	return sc_type_names[index];
221 }
222 
223 /*
224  * Read the send context memory pool configuration and send context
225  * size configuration.  Replace any wildcards and come up with final
226  * counts and sizes for the send context types.
227  */
228 int init_sc_pools_and_sizes(struct hfi1_devdata *dd)
229 {
230 	struct mem_pool_info mem_pool_info[NUM_SC_POOLS] = { { 0 } };
231 	int total_blocks = (dd->chip_pio_mem_size / PIO_BLOCK_SIZE) - 1;
232 	int total_contexts = 0;
233 	int fixed_blocks;
234 	int pool_blocks;
235 	int used_blocks;
236 	int cp_total;		/* centipercent total */
237 	int ab_total;		/* absolute block total */
238 	int extra;
239 	int i;
240 
241 	/*
242 	 * When SDMA is enabled, kernel context pio packet size is capped by
243 	 * "piothreshold". Reduce pio buffer allocation for kernel context by
244 	 * setting it to a fixed size. The allocation allows 3-deep buffering
245 	 * of the largest pio packets plus up to 128 bytes header, sufficient
246 	 * to maintain verbs performance.
247 	 *
248 	 * When SDMA is disabled, keep the default pooling allocation.
249 	 */
250 	if (HFI1_CAP_IS_KSET(SDMA)) {
251 		u16 max_pkt_size = (piothreshold < PIO_THRESHOLD_CEILING) ?
252 					 piothreshold : PIO_THRESHOLD_CEILING;
253 		sc_config_sizes[SC_KERNEL].size =
254 			3 * (max_pkt_size + 128) / PIO_BLOCK_SIZE;
255 	}
256 
257 	/*
258 	 * Step 0:
259 	 *	- copy the centipercents/absolute sizes from the pool config
260 	 *	- sanity check these values
261 	 *	- add up centipercents, then later check for full value
262 	 *	- add up absolute blocks, then later check for over-commit
263 	 */
264 	cp_total = 0;
265 	ab_total = 0;
266 	for (i = 0; i < NUM_SC_POOLS; i++) {
267 		int cp = sc_mem_pool_config[i].centipercent;
268 		int ab = sc_mem_pool_config[i].absolute_blocks;
269 
270 		/*
271 		 * A negative value is "unused" or "invalid".  Both *can*
272 		 * be valid, but centipercent wins, so check that first
273 		 */
274 		if (cp >= 0) {			/* centipercent valid */
275 			cp_total += cp;
276 		} else if (ab >= 0) {		/* absolute blocks valid */
277 			ab_total += ab;
278 		} else {			/* neither valid */
279 			dd_dev_err(
280 				dd,
281 				"Send context memory pool %d: both the block count and centipercent are invalid\n",
282 				i);
283 			return -EINVAL;
284 		}
285 
286 		mem_pool_info[i].centipercent = cp;
287 		mem_pool_info[i].blocks = ab;
288 	}
289 
290 	/* do not use both % and absolute blocks for different pools */
291 	if (cp_total != 0 && ab_total != 0) {
292 		dd_dev_err(
293 			dd,
294 			"All send context memory pools must be described as either centipercent or blocks, no mixing between pools\n");
295 		return -EINVAL;
296 	}
297 
298 	/* if any percentages are present, they must add up to 100% x 100 */
299 	if (cp_total != 0 && cp_total != 10000) {
300 		dd_dev_err(
301 			dd,
302 			"Send context memory pool centipercent is %d, expecting 10000\n",
303 			cp_total);
304 		return -EINVAL;
305 	}
306 
307 	/* the absolute pool total cannot be more than the mem total */
308 	if (ab_total > total_blocks) {
309 		dd_dev_err(
310 			dd,
311 			"Send context memory pool absolute block count %d is larger than the memory size %d\n",
312 			ab_total, total_blocks);
313 		return -EINVAL;
314 	}
315 
316 	/*
317 	 * Step 2:
318 	 *	- copy from the context size config
319 	 *	- replace context type wildcard counts with real values
320 	 *	- add up non-memory pool block sizes
321 	 *	- add up memory pool user counts
322 	 */
323 	fixed_blocks = 0;
324 	for (i = 0; i < SC_MAX; i++) {
325 		int count = sc_config_sizes[i].count;
326 		int size = sc_config_sizes[i].size;
327 		int pool;
328 
329 		/*
330 		 * Sanity check count: Either a positive value or
331 		 * one of the expected wildcards is valid.  The positive
332 		 * value is checked later when we compare against total
333 		 * memory available.
334 		 */
335 		if (i == SC_ACK) {
336 			count = dd->n_krcv_queues;
337 		} else if (i == SC_KERNEL) {
338 			count = INIT_SC_PER_VL * num_vls;
339 		} else if (count == SCC_PER_CPU) {
340 			count = dd->num_rcv_contexts - dd->n_krcv_queues;
341 		} else if (count < 0) {
342 			dd_dev_err(
343 				dd,
344 				"%s send context invalid count wildcard %d\n",
345 				sc_type_name(i), count);
346 			return -EINVAL;
347 		}
348 		if (total_contexts + count > dd->chip_send_contexts)
349 			count = dd->chip_send_contexts - total_contexts;
350 
351 		total_contexts += count;
352 
353 		/*
354 		 * Sanity check pool: The conversion will return a pool
355 		 * number or -1 if a fixed (non-negative) value.  The fixed
356 		 * value is checked later when we compare against
357 		 * total memory available.
358 		 */
359 		pool = wildcard_to_pool(size);
360 		if (pool == -1) {			/* non-wildcard */
361 			fixed_blocks += size * count;
362 		} else if (pool < NUM_SC_POOLS) {	/* valid wildcard */
363 			mem_pool_info[pool].count += count;
364 		} else {				/* invalid wildcard */
365 			dd_dev_err(
366 				dd,
367 				"%s send context invalid pool wildcard %d\n",
368 				sc_type_name(i), size);
369 			return -EINVAL;
370 		}
371 
372 		dd->sc_sizes[i].count = count;
373 		dd->sc_sizes[i].size = size;
374 	}
375 	if (fixed_blocks > total_blocks) {
376 		dd_dev_err(
377 			dd,
378 			"Send context fixed block count, %u, larger than total block count %u\n",
379 			fixed_blocks, total_blocks);
380 		return -EINVAL;
381 	}
382 
383 	/* step 3: calculate the blocks in the pools, and pool context sizes */
384 	pool_blocks = total_blocks - fixed_blocks;
385 	if (ab_total > pool_blocks) {
386 		dd_dev_err(
387 			dd,
388 			"Send context fixed pool sizes, %u, larger than pool block count %u\n",
389 			ab_total, pool_blocks);
390 		return -EINVAL;
391 	}
392 	/* subtract off the fixed pool blocks */
393 	pool_blocks -= ab_total;
394 
395 	for (i = 0; i < NUM_SC_POOLS; i++) {
396 		struct mem_pool_info *pi = &mem_pool_info[i];
397 
398 		/* % beats absolute blocks */
399 		if (pi->centipercent >= 0)
400 			pi->blocks = (pool_blocks * pi->centipercent) / 10000;
401 
402 		if (pi->blocks == 0 && pi->count != 0) {
403 			dd_dev_err(
404 				dd,
405 				"Send context memory pool %d has %u contexts, but no blocks\n",
406 				i, pi->count);
407 			return -EINVAL;
408 		}
409 		if (pi->count == 0) {
410 			/* warn about wasted blocks */
411 			if (pi->blocks != 0)
412 				dd_dev_err(
413 					dd,
414 					"Send context memory pool %d has %u blocks, but zero contexts\n",
415 					i, pi->blocks);
416 			pi->size = 0;
417 		} else {
418 			pi->size = pi->blocks / pi->count;
419 		}
420 	}
421 
422 	/* step 4: fill in the context type sizes from the pool sizes */
423 	used_blocks = 0;
424 	for (i = 0; i < SC_MAX; i++) {
425 		if (dd->sc_sizes[i].size < 0) {
426 			unsigned pool = wildcard_to_pool(dd->sc_sizes[i].size);
427 
428 			WARN_ON_ONCE(pool >= NUM_SC_POOLS);
429 			dd->sc_sizes[i].size = mem_pool_info[pool].size;
430 		}
431 		/* make sure we are not larger than what is allowed by the HW */
432 #define PIO_MAX_BLOCKS 1024
433 		if (dd->sc_sizes[i].size > PIO_MAX_BLOCKS)
434 			dd->sc_sizes[i].size = PIO_MAX_BLOCKS;
435 
436 		/* calculate our total usage */
437 		used_blocks += dd->sc_sizes[i].size * dd->sc_sizes[i].count;
438 	}
439 	extra = total_blocks - used_blocks;
440 	if (extra != 0)
441 		dd_dev_info(dd, "unused send context blocks: %d\n", extra);
442 
443 	return total_contexts;
444 }
445 
446 int init_send_contexts(struct hfi1_devdata *dd)
447 {
448 	u16 base;
449 	int ret, i, j, context;
450 
451 	ret = init_credit_return(dd);
452 	if (ret)
453 		return ret;
454 
455 	dd->hw_to_sw = kmalloc_array(TXE_NUM_CONTEXTS, sizeof(u8),
456 					GFP_KERNEL);
457 	dd->send_contexts = kcalloc(dd->num_send_contexts,
458 					sizeof(struct send_context_info),
459 					GFP_KERNEL);
460 	if (!dd->send_contexts || !dd->hw_to_sw) {
461 		kfree(dd->hw_to_sw);
462 		kfree(dd->send_contexts);
463 		free_credit_return(dd);
464 		return -ENOMEM;
465 	}
466 
467 	/* hardware context map starts with invalid send context indices */
468 	for (i = 0; i < TXE_NUM_CONTEXTS; i++)
469 		dd->hw_to_sw[i] = INVALID_SCI;
470 
471 	/*
472 	 * All send contexts have their credit sizes.  Allocate credits
473 	 * for each context one after another from the global space.
474 	 */
475 	context = 0;
476 	base = 1;
477 	for (i = 0; i < SC_MAX; i++) {
478 		struct sc_config_sizes *scs = &dd->sc_sizes[i];
479 
480 		for (j = 0; j < scs->count; j++) {
481 			struct send_context_info *sci =
482 						&dd->send_contexts[context];
483 			sci->type = i;
484 			sci->base = base;
485 			sci->credits = scs->size;
486 
487 			context++;
488 			base += scs->size;
489 		}
490 	}
491 
492 	return 0;
493 }
494 
495 /*
496  * Allocate a software index and hardware context of the given type.
497  *
498  * Must be called with dd->sc_lock held.
499  */
500 static int sc_hw_alloc(struct hfi1_devdata *dd, int type, u32 *sw_index,
501 		       u32 *hw_context)
502 {
503 	struct send_context_info *sci;
504 	u32 index;
505 	u32 context;
506 
507 	for (index = 0, sci = &dd->send_contexts[0];
508 			index < dd->num_send_contexts; index++, sci++) {
509 		if (sci->type == type && sci->allocated == 0) {
510 			sci->allocated = 1;
511 			/* use a 1:1 mapping, but make them non-equal */
512 			context = dd->chip_send_contexts - index - 1;
513 			dd->hw_to_sw[context] = index;
514 			*sw_index = index;
515 			*hw_context = context;
516 			return 0; /* success */
517 		}
518 	}
519 	dd_dev_err(dd, "Unable to locate a free type %d send context\n", type);
520 	return -ENOSPC;
521 }
522 
523 /*
524  * Free the send context given by its software index.
525  *
526  * Must be called with dd->sc_lock held.
527  */
528 static void sc_hw_free(struct hfi1_devdata *dd, u32 sw_index, u32 hw_context)
529 {
530 	struct send_context_info *sci;
531 
532 	sci = &dd->send_contexts[sw_index];
533 	if (!sci->allocated) {
534 		dd_dev_err(dd, "%s: sw_index %u not allocated? hw_context %u\n",
535 			   __func__, sw_index, hw_context);
536 	}
537 	sci->allocated = 0;
538 	dd->hw_to_sw[hw_context] = INVALID_SCI;
539 }
540 
541 /* return the base context of a context in a group */
542 static inline u32 group_context(u32 context, u32 group)
543 {
544 	return (context >> group) << group;
545 }
546 
547 /* return the size of a group */
548 static inline u32 group_size(u32 group)
549 {
550 	return 1 << group;
551 }
552 
553 /*
554  * Obtain the credit return addresses, kernel virtual and physical, for the
555  * given sc.
556  *
557  * To understand this routine:
558  * o va and pa are arrays of struct credit_return.  One for each physical
559  *   send context, per NUMA.
560  * o Each send context always looks in its relative location in a struct
561  *   credit_return for its credit return.
562  * o Each send context in a group must have its return address CSR programmed
563  *   with the same value.  Use the address of the first send context in the
564  *   group.
565  */
566 static void cr_group_addresses(struct send_context *sc, dma_addr_t *pa)
567 {
568 	u32 gc = group_context(sc->hw_context, sc->group);
569 	u32 index = sc->hw_context & 0x7;
570 
571 	sc->hw_free = &sc->dd->cr_base[sc->node].va[gc].cr[index];
572 	*pa = (unsigned long)
573 	       &((struct credit_return *)sc->dd->cr_base[sc->node].pa)[gc];
574 }
575 
576 /*
577  * Work queue function triggered in error interrupt routine for
578  * kernel contexts.
579  */
580 static void sc_halted(struct work_struct *work)
581 {
582 	struct send_context *sc;
583 
584 	sc = container_of(work, struct send_context, halt_work);
585 	sc_restart(sc);
586 }
587 
588 /*
589  * Calculate PIO block threshold for this send context using the given MTU.
590  * Trigger a return when one MTU plus optional header of credits remain.
591  *
592  * Parameter mtu is in bytes.
593  * Parameter hdrqentsize is in DWORDs.
594  *
595  * Return value is what to write into the CSR: trigger return when
596  * unreturned credits pass this count.
597  */
598 u32 sc_mtu_to_threshold(struct send_context *sc, u32 mtu, u32 hdrqentsize)
599 {
600 	u32 release_credits;
601 	u32 threshold;
602 
603 	/* add in the header size, then divide by the PIO block size */
604 	mtu += hdrqentsize << 2;
605 	release_credits = DIV_ROUND_UP(mtu, PIO_BLOCK_SIZE);
606 
607 	/* check against this context's credits */
608 	if (sc->credits <= release_credits)
609 		threshold = 1;
610 	else
611 		threshold = sc->credits - release_credits;
612 
613 	return threshold;
614 }
615 
616 /*
617  * Calculate credit threshold in terms of percent of the allocated credits.
618  * Trigger when unreturned credits equal or exceed the percentage of the whole.
619  *
620  * Return value is what to write into the CSR: trigger return when
621  * unreturned credits pass this count.
622  */
623 u32 sc_percent_to_threshold(struct send_context *sc, u32 percent)
624 {
625 	return (sc->credits * percent) / 100;
626 }
627 
628 /*
629  * Set the credit return threshold.
630  */
631 void sc_set_cr_threshold(struct send_context *sc, u32 new_threshold)
632 {
633 	unsigned long flags;
634 	u32 old_threshold;
635 	int force_return = 0;
636 
637 	spin_lock_irqsave(&sc->credit_ctrl_lock, flags);
638 
639 	old_threshold = (sc->credit_ctrl >>
640 				SC(CREDIT_CTRL_THRESHOLD_SHIFT))
641 			 & SC(CREDIT_CTRL_THRESHOLD_MASK);
642 
643 	if (new_threshold != old_threshold) {
644 		sc->credit_ctrl =
645 			(sc->credit_ctrl
646 				& ~SC(CREDIT_CTRL_THRESHOLD_SMASK))
647 			| ((new_threshold
648 				& SC(CREDIT_CTRL_THRESHOLD_MASK))
649 			   << SC(CREDIT_CTRL_THRESHOLD_SHIFT));
650 		write_kctxt_csr(sc->dd, sc->hw_context,
651 				SC(CREDIT_CTRL), sc->credit_ctrl);
652 
653 		/* force a credit return on change to avoid a possible stall */
654 		force_return = 1;
655 	}
656 
657 	spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
658 
659 	if (force_return)
660 		sc_return_credits(sc);
661 }
662 
663 /*
664  * set_pio_integrity
665  *
666  * Set the CHECK_ENABLE register for the send context 'sc'.
667  */
668 void set_pio_integrity(struct send_context *sc)
669 {
670 	struct hfi1_devdata *dd = sc->dd;
671 	u64 reg = 0;
672 	u32 hw_context = sc->hw_context;
673 	int type = sc->type;
674 
675 	/*
676 	 * No integrity checks if HFI1_CAP_NO_INTEGRITY is set, or if
677 	 * we're snooping.
678 	 */
679 	if (likely(!HFI1_CAP_IS_KSET(NO_INTEGRITY)) &&
680 	    dd->hfi1_snoop.mode_flag != HFI1_PORT_SNOOP_MODE)
681 		reg = hfi1_pkt_default_send_ctxt_mask(dd, type);
682 
683 	write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), reg);
684 }
685 
686 static u32 get_buffers_allocated(struct send_context *sc)
687 {
688 	int cpu;
689 	u32 ret = 0;
690 
691 	for_each_possible_cpu(cpu)
692 		ret += *per_cpu_ptr(sc->buffers_allocated, cpu);
693 	return ret;
694 }
695 
696 static void reset_buffers_allocated(struct send_context *sc)
697 {
698 	int cpu;
699 
700 	for_each_possible_cpu(cpu)
701 		(*per_cpu_ptr(sc->buffers_allocated, cpu)) = 0;
702 }
703 
704 /*
705  * Allocate a NUMA relative send context structure of the given type along
706  * with a HW context.
707  */
708 struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
709 			      uint hdrqentsize, int numa)
710 {
711 	struct send_context_info *sci;
712 	struct send_context *sc = NULL;
713 	dma_addr_t pa;
714 	unsigned long flags;
715 	u64 reg;
716 	u32 thresh;
717 	u32 sw_index;
718 	u32 hw_context;
719 	int ret;
720 	u8 opval, opmask;
721 
722 	/* do not allocate while frozen */
723 	if (dd->flags & HFI1_FROZEN)
724 		return NULL;
725 
726 	sc = kzalloc_node(sizeof(*sc), GFP_KERNEL, numa);
727 	if (!sc)
728 		return NULL;
729 
730 	sc->buffers_allocated = alloc_percpu(u32);
731 	if (!sc->buffers_allocated) {
732 		kfree(sc);
733 		dd_dev_err(dd,
734 			   "Cannot allocate buffers_allocated per cpu counters\n"
735 			  );
736 		return NULL;
737 	}
738 
739 	spin_lock_irqsave(&dd->sc_lock, flags);
740 	ret = sc_hw_alloc(dd, type, &sw_index, &hw_context);
741 	if (ret) {
742 		spin_unlock_irqrestore(&dd->sc_lock, flags);
743 		free_percpu(sc->buffers_allocated);
744 		kfree(sc);
745 		return NULL;
746 	}
747 
748 	sci = &dd->send_contexts[sw_index];
749 	sci->sc = sc;
750 
751 	sc->dd = dd;
752 	sc->node = numa;
753 	sc->type = type;
754 	spin_lock_init(&sc->alloc_lock);
755 	spin_lock_init(&sc->release_lock);
756 	spin_lock_init(&sc->credit_ctrl_lock);
757 	INIT_LIST_HEAD(&sc->piowait);
758 	INIT_WORK(&sc->halt_work, sc_halted);
759 	init_waitqueue_head(&sc->halt_wait);
760 
761 	/* grouping is always single context for now */
762 	sc->group = 0;
763 
764 	sc->sw_index = sw_index;
765 	sc->hw_context = hw_context;
766 	cr_group_addresses(sc, &pa);
767 	sc->credits = sci->credits;
768 
769 /* PIO Send Memory Address details */
770 #define PIO_ADDR_CONTEXT_MASK 0xfful
771 #define PIO_ADDR_CONTEXT_SHIFT 16
772 	sc->base_addr = dd->piobase + ((hw_context & PIO_ADDR_CONTEXT_MASK)
773 					<< PIO_ADDR_CONTEXT_SHIFT);
774 
775 	/* set base and credits */
776 	reg = ((sci->credits & SC(CTRL_CTXT_DEPTH_MASK))
777 					<< SC(CTRL_CTXT_DEPTH_SHIFT))
778 		| ((sci->base & SC(CTRL_CTXT_BASE_MASK))
779 					<< SC(CTRL_CTXT_BASE_SHIFT));
780 	write_kctxt_csr(dd, hw_context, SC(CTRL), reg);
781 
782 	set_pio_integrity(sc);
783 
784 	/* unmask all errors */
785 	write_kctxt_csr(dd, hw_context, SC(ERR_MASK), (u64)-1);
786 
787 	/* set the default partition key */
788 	write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY),
789 			(SC(CHECK_PARTITION_KEY_VALUE_MASK) &
790 			 DEFAULT_PKEY) <<
791 			SC(CHECK_PARTITION_KEY_VALUE_SHIFT));
792 
793 	/* per context type checks */
794 	if (type == SC_USER) {
795 		opval = USER_OPCODE_CHECK_VAL;
796 		opmask = USER_OPCODE_CHECK_MASK;
797 	} else {
798 		opval = OPCODE_CHECK_VAL_DISABLED;
799 		opmask = OPCODE_CHECK_MASK_DISABLED;
800 	}
801 
802 	/* set the send context check opcode mask and value */
803 	write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE),
804 			((u64)opmask << SC(CHECK_OPCODE_MASK_SHIFT)) |
805 			((u64)opval << SC(CHECK_OPCODE_VALUE_SHIFT)));
806 
807 	/* set up credit return */
808 	reg = pa & SC(CREDIT_RETURN_ADDR_ADDRESS_SMASK);
809 	write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), reg);
810 
811 	/*
812 	 * Calculate the initial credit return threshold.
813 	 *
814 	 * For Ack contexts, set a threshold for half the credits.
815 	 * For User contexts use the given percentage.  This has been
816 	 * sanitized on driver start-up.
817 	 * For Kernel contexts, use the default MTU plus a header
818 	 * or half the credits, whichever is smaller. This should
819 	 * work for both the 3-deep buffering allocation and the
820 	 * pooling allocation.
821 	 */
822 	if (type == SC_ACK) {
823 		thresh = sc_percent_to_threshold(sc, 50);
824 	} else if (type == SC_USER) {
825 		thresh = sc_percent_to_threshold(sc,
826 						 user_credit_return_threshold);
827 	} else { /* kernel */
828 		thresh = min(sc_percent_to_threshold(sc, 50),
829 			     sc_mtu_to_threshold(sc, hfi1_max_mtu,
830 						 hdrqentsize));
831 	}
832 	reg = thresh << SC(CREDIT_CTRL_THRESHOLD_SHIFT);
833 	/* add in early return */
834 	if (type == SC_USER && HFI1_CAP_IS_USET(EARLY_CREDIT_RETURN))
835 		reg |= SC(CREDIT_CTRL_EARLY_RETURN_SMASK);
836 	else if (HFI1_CAP_IS_KSET(EARLY_CREDIT_RETURN)) /* kernel, ack */
837 		reg |= SC(CREDIT_CTRL_EARLY_RETURN_SMASK);
838 
839 	/* set up write-through credit_ctrl */
840 	sc->credit_ctrl = reg;
841 	write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), reg);
842 
843 	/* User send contexts should not allow sending on VL15 */
844 	if (type == SC_USER) {
845 		reg = 1ULL << 15;
846 		write_kctxt_csr(dd, hw_context, SC(CHECK_VL), reg);
847 	}
848 
849 	spin_unlock_irqrestore(&dd->sc_lock, flags);
850 
851 	/*
852 	 * Allocate shadow ring to track outstanding PIO buffers _after_
853 	 * unlocking.  We don't know the size until the lock is held and
854 	 * we can't allocate while the lock is held.  No one is using
855 	 * the context yet, so allocate it now.
856 	 *
857 	 * User contexts do not get a shadow ring.
858 	 */
859 	if (type != SC_USER) {
860 		/*
861 		 * Size the shadow ring 1 larger than the number of credits
862 		 * so head == tail can mean empty.
863 		 */
864 		sc->sr_size = sci->credits + 1;
865 		sc->sr = kzalloc_node(sizeof(union pio_shadow_ring) *
866 				sc->sr_size, GFP_KERNEL, numa);
867 		if (!sc->sr) {
868 			sc_free(sc);
869 			return NULL;
870 		}
871 	}
872 
873 	hfi1_cdbg(PIO,
874 		  "Send context %u(%u) %s group %u credits %u credit_ctrl 0x%llx threshold %u\n",
875 		  sw_index,
876 		  hw_context,
877 		  sc_type_name(type),
878 		  sc->group,
879 		  sc->credits,
880 		  sc->credit_ctrl,
881 		  thresh);
882 
883 	return sc;
884 }
885 
886 /* free a per-NUMA send context structure */
887 void sc_free(struct send_context *sc)
888 {
889 	struct hfi1_devdata *dd;
890 	unsigned long flags;
891 	u32 sw_index;
892 	u32 hw_context;
893 
894 	if (!sc)
895 		return;
896 
897 	sc->flags |= SCF_IN_FREE;	/* ensure no restarts */
898 	dd = sc->dd;
899 	if (!list_empty(&sc->piowait))
900 		dd_dev_err(dd, "piowait list not empty!\n");
901 	sw_index = sc->sw_index;
902 	hw_context = sc->hw_context;
903 	sc_disable(sc);	/* make sure the HW is disabled */
904 	flush_work(&sc->halt_work);
905 
906 	spin_lock_irqsave(&dd->sc_lock, flags);
907 	dd->send_contexts[sw_index].sc = NULL;
908 
909 	/* clear/disable all registers set in sc_alloc */
910 	write_kctxt_csr(dd, hw_context, SC(CTRL), 0);
911 	write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), 0);
912 	write_kctxt_csr(dd, hw_context, SC(ERR_MASK), 0);
913 	write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY), 0);
914 	write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE), 0);
915 	write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), 0);
916 	write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), 0);
917 
918 	/* release the index and context for re-use */
919 	sc_hw_free(dd, sw_index, hw_context);
920 	spin_unlock_irqrestore(&dd->sc_lock, flags);
921 
922 	kfree(sc->sr);
923 	free_percpu(sc->buffers_allocated);
924 	kfree(sc);
925 }
926 
927 /* disable the context */
928 void sc_disable(struct send_context *sc)
929 {
930 	u64 reg;
931 	unsigned long flags;
932 	struct pio_buf *pbuf;
933 
934 	if (!sc)
935 		return;
936 
937 	/* do all steps, even if already disabled */
938 	spin_lock_irqsave(&sc->alloc_lock, flags);
939 	reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL));
940 	reg &= ~SC(CTRL_CTXT_ENABLE_SMASK);
941 	sc->flags &= ~SCF_ENABLED;
942 	sc_wait_for_packet_egress(sc, 1);
943 	write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg);
944 	spin_unlock_irqrestore(&sc->alloc_lock, flags);
945 
946 	/*
947 	 * Flush any waiters.  Once the context is disabled,
948 	 * credit return interrupts are stopped (although there
949 	 * could be one in-process when the context is disabled).
950 	 * Wait one microsecond for any lingering interrupts, then
951 	 * proceed with the flush.
952 	 */
953 	udelay(1);
954 	spin_lock_irqsave(&sc->release_lock, flags);
955 	if (sc->sr) {	/* this context has a shadow ring */
956 		while (sc->sr_tail != sc->sr_head) {
957 			pbuf = &sc->sr[sc->sr_tail].pbuf;
958 			if (pbuf->cb)
959 				(*pbuf->cb)(pbuf->arg, PRC_SC_DISABLE);
960 			sc->sr_tail++;
961 			if (sc->sr_tail >= sc->sr_size)
962 				sc->sr_tail = 0;
963 		}
964 	}
965 	spin_unlock_irqrestore(&sc->release_lock, flags);
966 }
967 
968 /* return SendEgressCtxtStatus.PacketOccupancy */
969 #define packet_occupancy(r) \
970 	(((r) & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK)\
971 	>> SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT)
972 
973 /* is egress halted on the context? */
974 #define egress_halted(r) \
975 	((r) & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK)
976 
977 /* wait for packet egress, optionally pause for credit return  */
978 static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
979 {
980 	struct hfi1_devdata *dd = sc->dd;
981 	u64 reg = 0;
982 	u64 reg_prev;
983 	u32 loop = 0;
984 
985 	while (1) {
986 		reg_prev = reg;
987 		reg = read_csr(dd, sc->hw_context * 8 +
988 			       SEND_EGRESS_CTXT_STATUS);
989 		/* done if egress is stopped */
990 		if (egress_halted(reg))
991 			break;
992 		reg = packet_occupancy(reg);
993 		if (reg == 0)
994 			break;
995 		/* counter is reset if occupancy count changes */
996 		if (reg != reg_prev)
997 			loop = 0;
998 		if (loop > 50000) {
999 			/* timed out - bounce the link */
1000 			dd_dev_err(dd,
1001 				   "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n",
1002 				   __func__, sc->sw_index,
1003 				   sc->hw_context, (u32)reg);
1004 			queue_work(dd->pport->hfi1_wq,
1005 				   &dd->pport->link_bounce_work);
1006 			break;
1007 		}
1008 		loop++;
1009 		udelay(1);
1010 	}
1011 
1012 	if (pause)
1013 		/* Add additional delay to ensure chip returns all credits */
1014 		pause_for_credit_return(dd);
1015 }
1016 
1017 void sc_wait(struct hfi1_devdata *dd)
1018 {
1019 	int i;
1020 
1021 	for (i = 0; i < dd->num_send_contexts; i++) {
1022 		struct send_context *sc = dd->send_contexts[i].sc;
1023 
1024 		if (!sc)
1025 			continue;
1026 		sc_wait_for_packet_egress(sc, 0);
1027 	}
1028 }
1029 
1030 /*
1031  * Restart a context after it has been halted due to error.
1032  *
1033  * If the first step fails - wait for the halt to be asserted, return early.
1034  * Otherwise complain about timeouts but keep going.
1035  *
1036  * It is expected that allocations (enabled flag bit) have been shut off
1037  * already (only applies to kernel contexts).
1038  */
1039 int sc_restart(struct send_context *sc)
1040 {
1041 	struct hfi1_devdata *dd = sc->dd;
1042 	u64 reg;
1043 	u32 loop;
1044 	int count;
1045 
1046 	/* bounce off if not halted, or being free'd */
1047 	if (!(sc->flags & SCF_HALTED) || (sc->flags & SCF_IN_FREE))
1048 		return -EINVAL;
1049 
1050 	dd_dev_info(dd, "restarting send context %u(%u)\n", sc->sw_index,
1051 		    sc->hw_context);
1052 
1053 	/*
1054 	 * Step 1: Wait for the context to actually halt.
1055 	 *
1056 	 * The error interrupt is asynchronous to actually setting halt
1057 	 * on the context.
1058 	 */
1059 	loop = 0;
1060 	while (1) {
1061 		reg = read_kctxt_csr(dd, sc->hw_context, SC(STATUS));
1062 		if (reg & SC(STATUS_CTXT_HALTED_SMASK))
1063 			break;
1064 		if (loop > 100) {
1065 			dd_dev_err(dd, "%s: context %u(%u) not halting, skipping\n",
1066 				   __func__, sc->sw_index, sc->hw_context);
1067 			return -ETIME;
1068 		}
1069 		loop++;
1070 		udelay(1);
1071 	}
1072 
1073 	/*
1074 	 * Step 2: Ensure no users are still trying to write to PIO.
1075 	 *
1076 	 * For kernel contexts, we have already turned off buffer allocation.
1077 	 * Now wait for the buffer count to go to zero.
1078 	 *
1079 	 * For user contexts, the user handling code has cut off write access
1080 	 * to the context's PIO pages before calling this routine and will
1081 	 * restore write access after this routine returns.
1082 	 */
1083 	if (sc->type != SC_USER) {
1084 		/* kernel context */
1085 		loop = 0;
1086 		while (1) {
1087 			count = get_buffers_allocated(sc);
1088 			if (count == 0)
1089 				break;
1090 			if (loop > 100) {
1091 				dd_dev_err(dd,
1092 					   "%s: context %u(%u) timeout waiting for PIO buffers to zero, remaining %d\n",
1093 					   __func__, sc->sw_index,
1094 					   sc->hw_context, count);
1095 			}
1096 			loop++;
1097 			udelay(1);
1098 		}
1099 	}
1100 
1101 	/*
1102 	 * Step 3: Wait for all packets to egress.
1103 	 * This is done while disabling the send context
1104 	 *
1105 	 * Step 4: Disable the context
1106 	 *
1107 	 * This is a superset of the halt.  After the disable, the
1108 	 * errors can be cleared.
1109 	 */
1110 	sc_disable(sc);
1111 
1112 	/*
1113 	 * Step 5: Enable the context
1114 	 *
1115 	 * This enable will clear the halted flag and per-send context
1116 	 * error flags.
1117 	 */
1118 	return sc_enable(sc);
1119 }
1120 
1121 /*
1122  * PIO freeze processing.  To be called after the TXE block is fully frozen.
1123  * Go through all frozen send contexts and disable them.  The contexts are
1124  * already stopped by the freeze.
1125  */
1126 void pio_freeze(struct hfi1_devdata *dd)
1127 {
1128 	struct send_context *sc;
1129 	int i;
1130 
1131 	for (i = 0; i < dd->num_send_contexts; i++) {
1132 		sc = dd->send_contexts[i].sc;
1133 		/*
1134 		 * Don't disable unallocated, unfrozen, or user send contexts.
1135 		 * User send contexts will be disabled when the process
1136 		 * calls into the driver to reset its context.
1137 		 */
1138 		if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
1139 			continue;
1140 
1141 		/* only need to disable, the context is already stopped */
1142 		sc_disable(sc);
1143 	}
1144 }
1145 
1146 /*
1147  * Unfreeze PIO for kernel send contexts.  The precondition for calling this
1148  * is that all PIO send contexts have been disabled and the SPC freeze has
1149  * been cleared.  Now perform the last step and re-enable each kernel context.
1150  * User (PSM) processing will occur when PSM calls into the kernel to
1151  * acknowledge the freeze.
1152  */
1153 void pio_kernel_unfreeze(struct hfi1_devdata *dd)
1154 {
1155 	struct send_context *sc;
1156 	int i;
1157 
1158 	for (i = 0; i < dd->num_send_contexts; i++) {
1159 		sc = dd->send_contexts[i].sc;
1160 		if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
1161 			continue;
1162 
1163 		sc_enable(sc);	/* will clear the sc frozen flag */
1164 	}
1165 }
1166 
1167 /*
1168  * Wait for the SendPioInitCtxt.PioInitInProgress bit to clear.
1169  * Returns:
1170  *	-ETIMEDOUT - if we wait too long
1171  *	-EIO	   - if there was an error
1172  */
1173 static int pio_init_wait_progress(struct hfi1_devdata *dd)
1174 {
1175 	u64 reg;
1176 	int max, count = 0;
1177 
1178 	/* max is the longest possible HW init time / delay */
1179 	max = (dd->icode == ICODE_FPGA_EMULATION) ? 120 : 5;
1180 	while (1) {
1181 		reg = read_csr(dd, SEND_PIO_INIT_CTXT);
1182 		if (!(reg & SEND_PIO_INIT_CTXT_PIO_INIT_IN_PROGRESS_SMASK))
1183 			break;
1184 		if (count >= max)
1185 			return -ETIMEDOUT;
1186 		udelay(5);
1187 		count++;
1188 	}
1189 
1190 	return reg & SEND_PIO_INIT_CTXT_PIO_INIT_ERR_SMASK ? -EIO : 0;
1191 }
1192 
1193 /*
1194  * Reset all of the send contexts to their power-on state.  Used
1195  * only during manual init - no lock against sc_enable needed.
1196  */
1197 void pio_reset_all(struct hfi1_devdata *dd)
1198 {
1199 	int ret;
1200 
1201 	/* make sure the init engine is not busy */
1202 	ret = pio_init_wait_progress(dd);
1203 	/* ignore any timeout */
1204 	if (ret == -EIO) {
1205 		/* clear the error */
1206 		write_csr(dd, SEND_PIO_ERR_CLEAR,
1207 			  SEND_PIO_ERR_CLEAR_PIO_INIT_SM_IN_ERR_SMASK);
1208 	}
1209 
1210 	/* reset init all */
1211 	write_csr(dd, SEND_PIO_INIT_CTXT,
1212 		  SEND_PIO_INIT_CTXT_PIO_ALL_CTXT_INIT_SMASK);
1213 	udelay(2);
1214 	ret = pio_init_wait_progress(dd);
1215 	if (ret < 0) {
1216 		dd_dev_err(dd,
1217 			   "PIO send context init %s while initializing all PIO blocks\n",
1218 			   ret == -ETIMEDOUT ? "is stuck" : "had an error");
1219 	}
1220 }
1221 
1222 /* enable the context */
1223 int sc_enable(struct send_context *sc)
1224 {
1225 	u64 sc_ctrl, reg, pio;
1226 	struct hfi1_devdata *dd;
1227 	unsigned long flags;
1228 	int ret = 0;
1229 
1230 	if (!sc)
1231 		return -EINVAL;
1232 	dd = sc->dd;
1233 
1234 	/*
1235 	 * Obtain the allocator lock to guard against any allocation
1236 	 * attempts (which should not happen prior to context being
1237 	 * enabled). On the release/disable side we don't need to
1238 	 * worry about locking since the releaser will not do anything
1239 	 * if the context accounting values have not changed.
1240 	 */
1241 	spin_lock_irqsave(&sc->alloc_lock, flags);
1242 	sc_ctrl = read_kctxt_csr(dd, sc->hw_context, SC(CTRL));
1243 	if ((sc_ctrl & SC(CTRL_CTXT_ENABLE_SMASK)))
1244 		goto unlock; /* already enabled */
1245 
1246 	/* IMPORTANT: only clear free and fill if transitioning 0 -> 1 */
1247 
1248 	*sc->hw_free = 0;
1249 	sc->free = 0;
1250 	sc->alloc_free = 0;
1251 	sc->fill = 0;
1252 	sc->sr_head = 0;
1253 	sc->sr_tail = 0;
1254 	sc->flags = 0;
1255 	/* the alloc lock insures no fast path allocation */
1256 	reset_buffers_allocated(sc);
1257 
1258 	/*
1259 	 * Clear all per-context errors.  Some of these will be set when
1260 	 * we are re-enabling after a context halt.  Now that the context
1261 	 * is disabled, the halt will not clear until after the PIO init
1262 	 * engine runs below.
1263 	 */
1264 	reg = read_kctxt_csr(dd, sc->hw_context, SC(ERR_STATUS));
1265 	if (reg)
1266 		write_kctxt_csr(dd, sc->hw_context, SC(ERR_CLEAR), reg);
1267 
1268 	/*
1269 	 * The HW PIO initialization engine can handle only one init
1270 	 * request at a time. Serialize access to each device's engine.
1271 	 */
1272 	spin_lock(&dd->sc_init_lock);
1273 	/*
1274 	 * Since access to this code block is serialized and
1275 	 * each access waits for the initialization to complete
1276 	 * before releasing the lock, the PIO initialization engine
1277 	 * should not be in use, so we don't have to wait for the
1278 	 * InProgress bit to go down.
1279 	 */
1280 	pio = ((sc->hw_context & SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_MASK) <<
1281 	       SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_SHIFT) |
1282 		SEND_PIO_INIT_CTXT_PIO_SINGLE_CTXT_INIT_SMASK;
1283 	write_csr(dd, SEND_PIO_INIT_CTXT, pio);
1284 	/*
1285 	 * Wait until the engine is done.  Give the chip the required time
1286 	 * so, hopefully, we read the register just once.
1287 	 */
1288 	udelay(2);
1289 	ret = pio_init_wait_progress(dd);
1290 	spin_unlock(&dd->sc_init_lock);
1291 	if (ret) {
1292 		dd_dev_err(dd,
1293 			   "sctxt%u(%u): Context not enabled due to init failure %d\n",
1294 			   sc->sw_index, sc->hw_context, ret);
1295 		goto unlock;
1296 	}
1297 
1298 	/*
1299 	 * All is well. Enable the context.
1300 	 */
1301 	sc_ctrl |= SC(CTRL_CTXT_ENABLE_SMASK);
1302 	write_kctxt_csr(dd, sc->hw_context, SC(CTRL), sc_ctrl);
1303 	/*
1304 	 * Read SendCtxtCtrl to force the write out and prevent a timing
1305 	 * hazard where a PIO write may reach the context before the enable.
1306 	 */
1307 	read_kctxt_csr(dd, sc->hw_context, SC(CTRL));
1308 	sc->flags |= SCF_ENABLED;
1309 
1310 unlock:
1311 	spin_unlock_irqrestore(&sc->alloc_lock, flags);
1312 
1313 	return ret;
1314 }
1315 
1316 /* force a credit return on the context */
1317 void sc_return_credits(struct send_context *sc)
1318 {
1319 	if (!sc)
1320 		return;
1321 
1322 	/* a 0->1 transition schedules a credit return */
1323 	write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE),
1324 			SC(CREDIT_FORCE_FORCE_RETURN_SMASK));
1325 	/*
1326 	 * Ensure that the write is flushed and the credit return is
1327 	 * scheduled. We care more about the 0 -> 1 transition.
1328 	 */
1329 	read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE));
1330 	/* set back to 0 for next time */
1331 	write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), 0);
1332 }
1333 
1334 /* allow all in-flight packets to drain on the context */
1335 void sc_flush(struct send_context *sc)
1336 {
1337 	if (!sc)
1338 		return;
1339 
1340 	sc_wait_for_packet_egress(sc, 1);
1341 }
1342 
1343 /* drop all packets on the context, no waiting until they are sent */
1344 void sc_drop(struct send_context *sc)
1345 {
1346 	if (!sc)
1347 		return;
1348 
1349 	dd_dev_info(sc->dd, "%s: context %u(%u) - not implemented\n",
1350 		    __func__, sc->sw_index, sc->hw_context);
1351 }
1352 
1353 /*
1354  * Start the software reaction to a context halt or SPC freeze:
1355  *	- mark the context as halted or frozen
1356  *	- stop buffer allocations
1357  *
1358  * Called from the error interrupt.  Other work is deferred until
1359  * out of the interrupt.
1360  */
1361 void sc_stop(struct send_context *sc, int flag)
1362 {
1363 	unsigned long flags;
1364 
1365 	/* mark the context */
1366 	sc->flags |= flag;
1367 
1368 	/* stop buffer allocations */
1369 	spin_lock_irqsave(&sc->alloc_lock, flags);
1370 	sc->flags &= ~SCF_ENABLED;
1371 	spin_unlock_irqrestore(&sc->alloc_lock, flags);
1372 	wake_up(&sc->halt_wait);
1373 }
1374 
1375 #define BLOCK_DWORDS (PIO_BLOCK_SIZE / sizeof(u32))
1376 #define dwords_to_blocks(x) DIV_ROUND_UP(x, BLOCK_DWORDS)
1377 
1378 /*
1379  * The send context buffer "allocator".
1380  *
1381  * @sc: the PIO send context we are allocating from
1382  * @len: length of whole packet - including PBC - in dwords
1383  * @cb: optional callback to call when the buffer is finished sending
1384  * @arg: argument for cb
1385  *
1386  * Return a pointer to a PIO buffer if successful, NULL if not enough room.
1387  */
1388 struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
1389 				pio_release_cb cb, void *arg)
1390 {
1391 	struct pio_buf *pbuf = NULL;
1392 	unsigned long flags;
1393 	unsigned long avail;
1394 	unsigned long blocks = dwords_to_blocks(dw_len);
1395 	unsigned long start_fill;
1396 	int trycount = 0;
1397 	u32 head, next;
1398 
1399 	spin_lock_irqsave(&sc->alloc_lock, flags);
1400 	if (!(sc->flags & SCF_ENABLED)) {
1401 		spin_unlock_irqrestore(&sc->alloc_lock, flags);
1402 		goto done;
1403 	}
1404 
1405 retry:
1406 	avail = (unsigned long)sc->credits - (sc->fill - sc->alloc_free);
1407 	if (blocks > avail) {
1408 		/* not enough room */
1409 		if (unlikely(trycount))	{ /* already tried to get more room */
1410 			spin_unlock_irqrestore(&sc->alloc_lock, flags);
1411 			goto done;
1412 		}
1413 		/* copy from receiver cache line and recalculate */
1414 		sc->alloc_free = ACCESS_ONCE(sc->free);
1415 		avail =
1416 			(unsigned long)sc->credits -
1417 			(sc->fill - sc->alloc_free);
1418 		if (blocks > avail) {
1419 			/* still no room, actively update */
1420 			spin_unlock_irqrestore(&sc->alloc_lock, flags);
1421 			sc_release_update(sc);
1422 			spin_lock_irqsave(&sc->alloc_lock, flags);
1423 			sc->alloc_free = ACCESS_ONCE(sc->free);
1424 			trycount++;
1425 			goto retry;
1426 		}
1427 	}
1428 
1429 	/* there is enough room */
1430 
1431 	preempt_disable();
1432 	this_cpu_inc(*sc->buffers_allocated);
1433 
1434 	/* read this once */
1435 	head = sc->sr_head;
1436 
1437 	/* "allocate" the buffer */
1438 	start_fill = sc->fill;
1439 	sc->fill += blocks;
1440 
1441 	/*
1442 	 * Fill the parts that the releaser looks at before moving the head.
1443 	 * The only necessary piece is the sent_at field.  The credits
1444 	 * we have just allocated cannot have been returned yet, so the
1445 	 * cb and arg will not be looked at for a "while".  Put them
1446 	 * on this side of the memory barrier anyway.
1447 	 */
1448 	pbuf = &sc->sr[head].pbuf;
1449 	pbuf->sent_at = sc->fill;
1450 	pbuf->cb = cb;
1451 	pbuf->arg = arg;
1452 	pbuf->sc = sc;	/* could be filled in at sc->sr init time */
1453 	/* make sure this is in memory before updating the head */
1454 
1455 	/* calculate next head index, do not store */
1456 	next = head + 1;
1457 	if (next >= sc->sr_size)
1458 		next = 0;
1459 	/*
1460 	 * update the head - must be last! - the releaser can look at fields
1461 	 * in pbuf once we move the head
1462 	 */
1463 	smp_wmb();
1464 	sc->sr_head = next;
1465 	spin_unlock_irqrestore(&sc->alloc_lock, flags);
1466 
1467 	/* finish filling in the buffer outside the lock */
1468 	pbuf->start = sc->base_addr + ((start_fill % sc->credits)
1469 							* PIO_BLOCK_SIZE);
1470 	pbuf->size = sc->credits * PIO_BLOCK_SIZE;
1471 	pbuf->end = sc->base_addr + pbuf->size;
1472 	pbuf->block_count = blocks;
1473 	pbuf->qw_written = 0;
1474 	pbuf->carry_bytes = 0;
1475 	pbuf->carry.val64 = 0;
1476 done:
1477 	return pbuf;
1478 }
1479 
1480 /*
1481  * There are at least two entities that can turn on credit return
1482  * interrupts and they can overlap.  Avoid problems by implementing
1483  * a count scheme that is enforced by a lock.  The lock is needed because
1484  * the count and CSR write must be paired.
1485  */
1486 
1487 /*
1488  * Start credit return interrupts.  This is managed by a count.  If already
1489  * on, just increment the count.
1490  */
1491 void sc_add_credit_return_intr(struct send_context *sc)
1492 {
1493 	unsigned long flags;
1494 
1495 	/* lock must surround both the count change and the CSR update */
1496 	spin_lock_irqsave(&sc->credit_ctrl_lock, flags);
1497 	if (sc->credit_intr_count == 0) {
1498 		sc->credit_ctrl |= SC(CREDIT_CTRL_CREDIT_INTR_SMASK);
1499 		write_kctxt_csr(sc->dd, sc->hw_context,
1500 				SC(CREDIT_CTRL), sc->credit_ctrl);
1501 	}
1502 	sc->credit_intr_count++;
1503 	spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
1504 }
1505 
1506 /*
1507  * Stop credit return interrupts.  This is managed by a count.  Decrement the
1508  * count, if the last user, then turn the credit interrupts off.
1509  */
1510 void sc_del_credit_return_intr(struct send_context *sc)
1511 {
1512 	unsigned long flags;
1513 
1514 	WARN_ON(sc->credit_intr_count == 0);
1515 
1516 	/* lock must surround both the count change and the CSR update */
1517 	spin_lock_irqsave(&sc->credit_ctrl_lock, flags);
1518 	sc->credit_intr_count--;
1519 	if (sc->credit_intr_count == 0) {
1520 		sc->credit_ctrl &= ~SC(CREDIT_CTRL_CREDIT_INTR_SMASK);
1521 		write_kctxt_csr(sc->dd, sc->hw_context,
1522 				SC(CREDIT_CTRL), sc->credit_ctrl);
1523 	}
1524 	spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
1525 }
1526 
1527 /*
1528  * The caller must be careful when calling this.  All needint calls
1529  * must be paired with !needint.
1530  */
1531 void hfi1_sc_wantpiobuf_intr(struct send_context *sc, u32 needint)
1532 {
1533 	if (needint)
1534 		sc_add_credit_return_intr(sc);
1535 	else
1536 		sc_del_credit_return_intr(sc);
1537 	trace_hfi1_wantpiointr(sc, needint, sc->credit_ctrl);
1538 	if (needint) {
1539 		mmiowb();
1540 		sc_return_credits(sc);
1541 	}
1542 }
1543 
1544 /**
1545  * sc_piobufavail - callback when a PIO buffer is available
1546  * @sc: the send context
1547  *
1548  * This is called from the interrupt handler when a PIO buffer is
1549  * available after hfi1_verbs_send() returned an error that no buffers were
1550  * available. Disable the interrupt if there are no more QPs waiting.
1551  */
1552 static void sc_piobufavail(struct send_context *sc)
1553 {
1554 	struct hfi1_devdata *dd = sc->dd;
1555 	struct hfi1_ibdev *dev = &dd->verbs_dev;
1556 	struct list_head *list;
1557 	struct rvt_qp *qps[PIO_WAIT_BATCH_SIZE];
1558 	struct rvt_qp *qp;
1559 	struct hfi1_qp_priv *priv;
1560 	unsigned long flags;
1561 	unsigned i, n = 0;
1562 
1563 	if (dd->send_contexts[sc->sw_index].type != SC_KERNEL &&
1564 	    dd->send_contexts[sc->sw_index].type != SC_VL15)
1565 		return;
1566 	list = &sc->piowait;
1567 	/*
1568 	 * Note: checking that the piowait list is empty and clearing
1569 	 * the buffer available interrupt needs to be atomic or we
1570 	 * could end up with QPs on the wait list with the interrupt
1571 	 * disabled.
1572 	 */
1573 	write_seqlock_irqsave(&dev->iowait_lock, flags);
1574 	while (!list_empty(list)) {
1575 		struct iowait *wait;
1576 
1577 		if (n == ARRAY_SIZE(qps))
1578 			break;
1579 		wait = list_first_entry(list, struct iowait, list);
1580 		qp = iowait_to_qp(wait);
1581 		priv = qp->priv;
1582 		list_del_init(&priv->s_iowait.list);
1583 		/* refcount held until actual wake up */
1584 		qps[n++] = qp;
1585 	}
1586 	/*
1587 	 * If there had been waiters and there are more
1588 	 * insure that we redo the force to avoid a potential hang.
1589 	 */
1590 	if (n) {
1591 		hfi1_sc_wantpiobuf_intr(sc, 0);
1592 		if (!list_empty(list))
1593 			hfi1_sc_wantpiobuf_intr(sc, 1);
1594 	}
1595 	write_sequnlock_irqrestore(&dev->iowait_lock, flags);
1596 
1597 	for (i = 0; i < n; i++)
1598 		hfi1_qp_wakeup(qps[i],
1599 			       RVT_S_WAIT_PIO | RVT_S_WAIT_PIO_DRAIN);
1600 }
1601 
1602 /* translate a send credit update to a bit code of reasons */
1603 static inline int fill_code(u64 hw_free)
1604 {
1605 	int code = 0;
1606 
1607 	if (hw_free & CR_STATUS_SMASK)
1608 		code |= PRC_STATUS_ERR;
1609 	if (hw_free & CR_CREDIT_RETURN_DUE_TO_PBC_SMASK)
1610 		code |= PRC_PBC;
1611 	if (hw_free & CR_CREDIT_RETURN_DUE_TO_THRESHOLD_SMASK)
1612 		code |= PRC_THRESHOLD;
1613 	if (hw_free & CR_CREDIT_RETURN_DUE_TO_ERR_SMASK)
1614 		code |= PRC_FILL_ERR;
1615 	if (hw_free & CR_CREDIT_RETURN_DUE_TO_FORCE_SMASK)
1616 		code |= PRC_SC_DISABLE;
1617 	return code;
1618 }
1619 
1620 /* use the jiffies compare to get the wrap right */
1621 #define sent_before(a, b) time_before(a, b)	/* a < b */
1622 
1623 /*
1624  * The send context buffer "releaser".
1625  */
1626 void sc_release_update(struct send_context *sc)
1627 {
1628 	struct pio_buf *pbuf;
1629 	u64 hw_free;
1630 	u32 head, tail;
1631 	unsigned long old_free;
1632 	unsigned long free;
1633 	unsigned long extra;
1634 	unsigned long flags;
1635 	int code;
1636 
1637 	if (!sc)
1638 		return;
1639 
1640 	spin_lock_irqsave(&sc->release_lock, flags);
1641 	/* update free */
1642 	hw_free = le64_to_cpu(*sc->hw_free);		/* volatile read */
1643 	old_free = sc->free;
1644 	extra = (((hw_free & CR_COUNTER_SMASK) >> CR_COUNTER_SHIFT)
1645 			- (old_free & CR_COUNTER_MASK))
1646 				& CR_COUNTER_MASK;
1647 	free = old_free + extra;
1648 	trace_hfi1_piofree(sc, extra);
1649 
1650 	/* call sent buffer callbacks */
1651 	code = -1;				/* code not yet set */
1652 	head = ACCESS_ONCE(sc->sr_head);	/* snapshot the head */
1653 	tail = sc->sr_tail;
1654 	while (head != tail) {
1655 		pbuf = &sc->sr[tail].pbuf;
1656 
1657 		if (sent_before(free, pbuf->sent_at)) {
1658 			/* not sent yet */
1659 			break;
1660 		}
1661 		if (pbuf->cb) {
1662 			if (code < 0) /* fill in code on first user */
1663 				code = fill_code(hw_free);
1664 			(*pbuf->cb)(pbuf->arg, code);
1665 		}
1666 
1667 		tail++;
1668 		if (tail >= sc->sr_size)
1669 			tail = 0;
1670 	}
1671 	sc->sr_tail = tail;
1672 	/* make sure tail is updated before free */
1673 	smp_wmb();
1674 	sc->free = free;
1675 	spin_unlock_irqrestore(&sc->release_lock, flags);
1676 	sc_piobufavail(sc);
1677 }
1678 
1679 /*
1680  * Send context group releaser.  Argument is the send context that caused
1681  * the interrupt.  Called from the send context interrupt handler.
1682  *
1683  * Call release on all contexts in the group.
1684  *
1685  * This routine takes the sc_lock without an irqsave because it is only
1686  * called from an interrupt handler.  Adjust if that changes.
1687  */
1688 void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context)
1689 {
1690 	struct send_context *sc;
1691 	u32 sw_index;
1692 	u32 gc, gc_end;
1693 
1694 	spin_lock(&dd->sc_lock);
1695 	sw_index = dd->hw_to_sw[hw_context];
1696 	if (unlikely(sw_index >= dd->num_send_contexts)) {
1697 		dd_dev_err(dd, "%s: invalid hw (%u) to sw (%u) mapping\n",
1698 			   __func__, hw_context, sw_index);
1699 		goto done;
1700 	}
1701 	sc = dd->send_contexts[sw_index].sc;
1702 	if (unlikely(!sc))
1703 		goto done;
1704 
1705 	gc = group_context(hw_context, sc->group);
1706 	gc_end = gc + group_size(sc->group);
1707 	for (; gc < gc_end; gc++) {
1708 		sw_index = dd->hw_to_sw[gc];
1709 		if (unlikely(sw_index >= dd->num_send_contexts)) {
1710 			dd_dev_err(dd,
1711 				   "%s: invalid hw (%u) to sw (%u) mapping\n",
1712 				   __func__, hw_context, sw_index);
1713 			continue;
1714 		}
1715 		sc_release_update(dd->send_contexts[sw_index].sc);
1716 	}
1717 done:
1718 	spin_unlock(&dd->sc_lock);
1719 }
1720 
1721 /*
1722  * pio_select_send_context_vl() - select send context
1723  * @dd: devdata
1724  * @selector: a spreading factor
1725  * @vl: this vl
1726  *
1727  * This function returns a send context based on the selector and a vl.
1728  * The mapping fields are protected by RCU
1729  */
1730 struct send_context *pio_select_send_context_vl(struct hfi1_devdata *dd,
1731 						u32 selector, u8 vl)
1732 {
1733 	struct pio_vl_map *m;
1734 	struct pio_map_elem *e;
1735 	struct send_context *rval;
1736 
1737 	/*
1738 	 * NOTE This should only happen if SC->VL changed after the initial
1739 	 * checks on the QP/AH
1740 	 * Default will return VL0's send context below
1741 	 */
1742 	if (unlikely(vl >= num_vls)) {
1743 		rval = NULL;
1744 		goto done;
1745 	}
1746 
1747 	rcu_read_lock();
1748 	m = rcu_dereference(dd->pio_map);
1749 	if (unlikely(!m)) {
1750 		rcu_read_unlock();
1751 		return dd->vld[0].sc;
1752 	}
1753 	e = m->map[vl & m->mask];
1754 	rval = e->ksc[selector & e->mask];
1755 	rcu_read_unlock();
1756 
1757 done:
1758 	rval = !rval ? dd->vld[0].sc : rval;
1759 	return rval;
1760 }
1761 
1762 /*
1763  * pio_select_send_context_sc() - select send context
1764  * @dd: devdata
1765  * @selector: a spreading factor
1766  * @sc5: the 5 bit sc
1767  *
1768  * This function returns an send context based on the selector and an sc
1769  */
1770 struct send_context *pio_select_send_context_sc(struct hfi1_devdata *dd,
1771 						u32 selector, u8 sc5)
1772 {
1773 	u8 vl = sc_to_vlt(dd, sc5);
1774 
1775 	return pio_select_send_context_vl(dd, selector, vl);
1776 }
1777 
1778 /*
1779  * Free the indicated map struct
1780  */
1781 static void pio_map_free(struct pio_vl_map *m)
1782 {
1783 	int i;
1784 
1785 	for (i = 0; m && i < m->actual_vls; i++)
1786 		kfree(m->map[i]);
1787 	kfree(m);
1788 }
1789 
1790 /*
1791  * Handle RCU callback
1792  */
1793 static void pio_map_rcu_callback(struct rcu_head *list)
1794 {
1795 	struct pio_vl_map *m = container_of(list, struct pio_vl_map, list);
1796 
1797 	pio_map_free(m);
1798 }
1799 
1800 /*
1801  * Set credit return threshold for the kernel send context
1802  */
1803 static void set_threshold(struct hfi1_devdata *dd, int scontext, int i)
1804 {
1805 	u32 thres;
1806 
1807 	thres = min(sc_percent_to_threshold(dd->kernel_send_context[scontext],
1808 					    50),
1809 		    sc_mtu_to_threshold(dd->kernel_send_context[scontext],
1810 					dd->vld[i].mtu,
1811 					dd->rcd[0]->rcvhdrqentsize));
1812 	sc_set_cr_threshold(dd->kernel_send_context[scontext], thres);
1813 }
1814 
1815 /*
1816  * pio_map_init - called when #vls change
1817  * @dd: hfi1_devdata
1818  * @port: port number
1819  * @num_vls: number of vls
1820  * @vl_scontexts: per vl send context mapping (optional)
1821  *
1822  * This routine changes the mapping based on the number of vls.
1823  *
1824  * vl_scontexts is used to specify a non-uniform vl/send context
1825  * loading. NULL implies auto computing the loading and giving each
1826  * VL an uniform distribution of send contexts per VL.
1827  *
1828  * The auto algorithm computers the sc_per_vl and the number of extra
1829  * send contexts. Any extra send contexts are added from the last VL
1830  * on down
1831  *
1832  * rcu locking is used here to control access to the mapping fields.
1833  *
1834  * If either the num_vls or num_send_contexts are non-power of 2, the
1835  * array sizes in the struct pio_vl_map and the struct pio_map_elem are
1836  * rounded up to the next highest power of 2 and the first entry is
1837  * reused in a round robin fashion.
1838  *
1839  * If an error occurs the map change is not done and the mapping is not
1840  * chaged.
1841  *
1842  */
1843 int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts)
1844 {
1845 	int i, j;
1846 	int extra, sc_per_vl;
1847 	int scontext = 1;
1848 	int num_kernel_send_contexts = 0;
1849 	u8 lvl_scontexts[OPA_MAX_VLS];
1850 	struct pio_vl_map *oldmap, *newmap;
1851 
1852 	if (!vl_scontexts) {
1853 		for (i = 0; i < dd->num_send_contexts; i++)
1854 			if (dd->send_contexts[i].type == SC_KERNEL)
1855 				num_kernel_send_contexts++;
1856 		/* truncate divide */
1857 		sc_per_vl = num_kernel_send_contexts / num_vls;
1858 		/* extras */
1859 		extra = num_kernel_send_contexts % num_vls;
1860 		vl_scontexts = lvl_scontexts;
1861 		/* add extras from last vl down */
1862 		for (i = num_vls - 1; i >= 0; i--, extra--)
1863 			vl_scontexts[i] = sc_per_vl + (extra > 0 ? 1 : 0);
1864 	}
1865 	/* build new map */
1866 	newmap = kzalloc(sizeof(*newmap) +
1867 			 roundup_pow_of_two(num_vls) *
1868 			 sizeof(struct pio_map_elem *),
1869 			 GFP_KERNEL);
1870 	if (!newmap)
1871 		goto bail;
1872 	newmap->actual_vls = num_vls;
1873 	newmap->vls = roundup_pow_of_two(num_vls);
1874 	newmap->mask = (1 << ilog2(newmap->vls)) - 1;
1875 	for (i = 0; i < newmap->vls; i++) {
1876 		/* save for wrap around */
1877 		int first_scontext = scontext;
1878 
1879 		if (i < newmap->actual_vls) {
1880 			int sz = roundup_pow_of_two(vl_scontexts[i]);
1881 
1882 			/* only allocate once */
1883 			newmap->map[i] = kzalloc(sizeof(*newmap->map[i]) +
1884 						 sz * sizeof(struct
1885 							     send_context *),
1886 						 GFP_KERNEL);
1887 			if (!newmap->map[i])
1888 				goto bail;
1889 			newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
1890 			/*
1891 			 * assign send contexts and
1892 			 * adjust credit return threshold
1893 			 */
1894 			for (j = 0; j < sz; j++) {
1895 				if (dd->kernel_send_context[scontext]) {
1896 					newmap->map[i]->ksc[j] =
1897 					dd->kernel_send_context[scontext];
1898 					set_threshold(dd, scontext, i);
1899 				}
1900 				if (++scontext >= first_scontext +
1901 						  vl_scontexts[i])
1902 					/* wrap back to first send context */
1903 					scontext = first_scontext;
1904 			}
1905 		} else {
1906 			/* just re-use entry without allocating */
1907 			newmap->map[i] = newmap->map[i % num_vls];
1908 		}
1909 		scontext = first_scontext + vl_scontexts[i];
1910 	}
1911 	/* newmap in hand, save old map */
1912 	spin_lock_irq(&dd->pio_map_lock);
1913 	oldmap = rcu_dereference_protected(dd->pio_map,
1914 					   lockdep_is_held(&dd->pio_map_lock));
1915 
1916 	/* publish newmap */
1917 	rcu_assign_pointer(dd->pio_map, newmap);
1918 
1919 	spin_unlock_irq(&dd->pio_map_lock);
1920 	/* success, free any old map after grace period */
1921 	if (oldmap)
1922 		call_rcu(&oldmap->list, pio_map_rcu_callback);
1923 	return 0;
1924 bail:
1925 	/* free any partial allocation */
1926 	pio_map_free(newmap);
1927 	return -ENOMEM;
1928 }
1929 
1930 void free_pio_map(struct hfi1_devdata *dd)
1931 {
1932 	/* Free PIO map if allocated */
1933 	if (rcu_access_pointer(dd->pio_map)) {
1934 		spin_lock_irq(&dd->pio_map_lock);
1935 		pio_map_free(rcu_access_pointer(dd->pio_map));
1936 		RCU_INIT_POINTER(dd->pio_map, NULL);
1937 		spin_unlock_irq(&dd->pio_map_lock);
1938 		synchronize_rcu();
1939 	}
1940 	kfree(dd->kernel_send_context);
1941 	dd->kernel_send_context = NULL;
1942 }
1943 
1944 int init_pervl_scs(struct hfi1_devdata *dd)
1945 {
1946 	int i;
1947 	u64 mask, all_vl_mask = (u64)0x80ff; /* VLs 0-7, 15 */
1948 	u64 data_vls_mask = (u64)0x00ff; /* VLs 0-7 */
1949 	u32 ctxt;
1950 	struct hfi1_pportdata *ppd = dd->pport;
1951 
1952 	dd->vld[15].sc = sc_alloc(dd, SC_VL15,
1953 				  dd->rcd[0]->rcvhdrqentsize, dd->node);
1954 	if (!dd->vld[15].sc)
1955 		return -ENOMEM;
1956 
1957 	hfi1_init_ctxt(dd->vld[15].sc);
1958 	dd->vld[15].mtu = enum_to_mtu(OPA_MTU_2048);
1959 
1960 	dd->kernel_send_context = kzalloc_node(dd->num_send_contexts *
1961 					sizeof(struct send_context *),
1962 					GFP_KERNEL, dd->node);
1963 	if (!dd->kernel_send_context)
1964 		goto freesc15;
1965 
1966 	dd->kernel_send_context[0] = dd->vld[15].sc;
1967 
1968 	for (i = 0; i < num_vls; i++) {
1969 		/*
1970 		 * Since this function does not deal with a specific
1971 		 * receive context but we need the RcvHdrQ entry size,
1972 		 * use the size from rcd[0]. It is guaranteed to be
1973 		 * valid at this point and will remain the same for all
1974 		 * receive contexts.
1975 		 */
1976 		dd->vld[i].sc = sc_alloc(dd, SC_KERNEL,
1977 					 dd->rcd[0]->rcvhdrqentsize, dd->node);
1978 		if (!dd->vld[i].sc)
1979 			goto nomem;
1980 		dd->kernel_send_context[i + 1] = dd->vld[i].sc;
1981 		hfi1_init_ctxt(dd->vld[i].sc);
1982 		/* non VL15 start with the max MTU */
1983 		dd->vld[i].mtu = hfi1_max_mtu;
1984 	}
1985 	for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++) {
1986 		dd->kernel_send_context[i + 1] =
1987 		sc_alloc(dd, SC_KERNEL, dd->rcd[0]->rcvhdrqentsize, dd->node);
1988 		if (!dd->kernel_send_context[i + 1])
1989 			goto nomem;
1990 		hfi1_init_ctxt(dd->kernel_send_context[i + 1]);
1991 	}
1992 
1993 	sc_enable(dd->vld[15].sc);
1994 	ctxt = dd->vld[15].sc->hw_context;
1995 	mask = all_vl_mask & ~(1LL << 15);
1996 	write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask);
1997 	dd_dev_info(dd,
1998 		    "Using send context %u(%u) for VL15\n",
1999 		    dd->vld[15].sc->sw_index, ctxt);
2000 
2001 	for (i = 0; i < num_vls; i++) {
2002 		sc_enable(dd->vld[i].sc);
2003 		ctxt = dd->vld[i].sc->hw_context;
2004 		mask = all_vl_mask & ~(data_vls_mask);
2005 		write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask);
2006 	}
2007 	for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++) {
2008 		sc_enable(dd->kernel_send_context[i + 1]);
2009 		ctxt = dd->kernel_send_context[i + 1]->hw_context;
2010 		mask = all_vl_mask & ~(data_vls_mask);
2011 		write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask);
2012 	}
2013 
2014 	if (pio_map_init(dd, ppd->port - 1, num_vls, NULL))
2015 		goto nomem;
2016 	return 0;
2017 
2018 nomem:
2019 	for (i = 0; i < num_vls; i++) {
2020 		sc_free(dd->vld[i].sc);
2021 		dd->vld[i].sc = NULL;
2022 	}
2023 
2024 	for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++)
2025 		sc_free(dd->kernel_send_context[i + 1]);
2026 
2027 	kfree(dd->kernel_send_context);
2028 	dd->kernel_send_context = NULL;
2029 
2030 freesc15:
2031 	sc_free(dd->vld[15].sc);
2032 	return -ENOMEM;
2033 }
2034 
2035 int init_credit_return(struct hfi1_devdata *dd)
2036 {
2037 	int ret;
2038 	int num_numa;
2039 	int i;
2040 
2041 	num_numa = num_online_nodes();
2042 	/* enforce the expectation that the numas are compact */
2043 	for (i = 0; i < num_numa; i++) {
2044 		if (!node_online(i)) {
2045 			dd_dev_err(dd, "NUMA nodes are not compact\n");
2046 			ret = -EINVAL;
2047 			goto done;
2048 		}
2049 	}
2050 
2051 	dd->cr_base = kcalloc(
2052 		num_numa,
2053 		sizeof(struct credit_return_base),
2054 		GFP_KERNEL);
2055 	if (!dd->cr_base) {
2056 		dd_dev_err(dd, "Unable to allocate credit return base\n");
2057 		ret = -ENOMEM;
2058 		goto done;
2059 	}
2060 	for (i = 0; i < num_numa; i++) {
2061 		int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return);
2062 
2063 		set_dev_node(&dd->pcidev->dev, i);
2064 		dd->cr_base[i].va = dma_zalloc_coherent(
2065 					&dd->pcidev->dev,
2066 					bytes,
2067 					&dd->cr_base[i].pa,
2068 					GFP_KERNEL);
2069 		if (!dd->cr_base[i].va) {
2070 			set_dev_node(&dd->pcidev->dev, dd->node);
2071 			dd_dev_err(dd,
2072 				   "Unable to allocate credit return DMA range for NUMA %d\n",
2073 				   i);
2074 			ret = -ENOMEM;
2075 			goto done;
2076 		}
2077 	}
2078 	set_dev_node(&dd->pcidev->dev, dd->node);
2079 
2080 	ret = 0;
2081 done:
2082 	return ret;
2083 }
2084 
2085 void free_credit_return(struct hfi1_devdata *dd)
2086 {
2087 	int num_numa;
2088 	int i;
2089 
2090 	if (!dd->cr_base)
2091 		return;
2092 
2093 	num_numa = num_online_nodes();
2094 	for (i = 0; i < num_numa; i++) {
2095 		if (dd->cr_base[i].va) {
2096 			dma_free_coherent(&dd->pcidev->dev,
2097 					  TXE_NUM_CONTEXTS *
2098 					  sizeof(struct credit_return),
2099 					  dd->cr_base[i].va,
2100 					  dd->cr_base[i].pa);
2101 		}
2102 	}
2103 	kfree(dd->cr_base);
2104 	dd->cr_base = NULL;
2105 }
2106