xref: /openbmc/linux/drivers/infiniband/hw/hfi1/init.c (revision 82003e04)
1 /*
2  * Copyright(c) 2015, 2016 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47 
48 #include <linux/pci.h>
49 #include <linux/netdevice.h>
50 #include <linux/vmalloc.h>
51 #include <linux/delay.h>
52 #include <linux/idr.h>
53 #include <linux/module.h>
54 #include <linux/printk.h>
55 #include <linux/hrtimer.h>
56 #include <rdma/rdma_vt.h>
57 
58 #include "hfi.h"
59 #include "device.h"
60 #include "common.h"
61 #include "trace.h"
62 #include "mad.h"
63 #include "sdma.h"
64 #include "debugfs.h"
65 #include "verbs.h"
66 #include "aspm.h"
67 #include "affinity.h"
68 
69 #undef pr_fmt
70 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
71 
72 /*
73  * min buffers we want to have per context, after driver
74  */
75 #define HFI1_MIN_USER_CTXT_BUFCNT 7
76 
77 #define HFI1_MIN_HDRQ_EGRBUF_CNT 2
78 #define HFI1_MAX_HDRQ_EGRBUF_CNT 16352
79 #define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */
80 #define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */
81 
82 /*
83  * Number of user receive contexts we are configured to use (to allow for more
84  * pio buffers per ctxt, etc.)  Zero means use one user context per CPU.
85  */
86 int num_user_contexts = -1;
87 module_param_named(num_user_contexts, num_user_contexts, uint, S_IRUGO);
88 MODULE_PARM_DESC(
89 	num_user_contexts, "Set max number of user contexts to use");
90 
91 uint krcvqs[RXE_NUM_DATA_VL];
92 int krcvqsset;
93 module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO);
94 MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL");
95 
96 /* computed based on above array */
97 unsigned long n_krcvqs;
98 
99 static unsigned hfi1_rcvarr_split = 25;
100 module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO);
101 MODULE_PARM_DESC(rcvarr_split, "Percent of context's RcvArray entries used for Eager buffers");
102 
103 static uint eager_buffer_size = (2 << 20); /* 2MB */
104 module_param(eager_buffer_size, uint, S_IRUGO);
105 MODULE_PARM_DESC(eager_buffer_size, "Size of the eager buffers, default: 2MB");
106 
107 static uint rcvhdrcnt = 2048; /* 2x the max eager buffer count */
108 module_param_named(rcvhdrcnt, rcvhdrcnt, uint, S_IRUGO);
109 MODULE_PARM_DESC(rcvhdrcnt, "Receive header queue count (default 2048)");
110 
111 static uint hfi1_hdrq_entsize = 32;
112 module_param_named(hdrq_entsize, hfi1_hdrq_entsize, uint, S_IRUGO);
113 MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B (default), 32 - 128B");
114 
115 unsigned int user_credit_return_threshold = 33;	/* default is 33% */
116 module_param(user_credit_return_threshold, uint, S_IRUGO);
117 MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)");
118 
119 static inline u64 encode_rcv_header_entry_size(u16);
120 
121 static struct idr hfi1_unit_table;
122 u32 hfi1_cpulist_count;
123 unsigned long *hfi1_cpulist;
124 
125 /*
126  * Common code for creating the receive context array.
127  */
128 int hfi1_create_ctxts(struct hfi1_devdata *dd)
129 {
130 	unsigned i;
131 	int ret;
132 
133 	/* Control context has to be always 0 */
134 	BUILD_BUG_ON(HFI1_CTRL_CTXT != 0);
135 
136 	dd->rcd = kzalloc_node(dd->num_rcv_contexts * sizeof(*dd->rcd),
137 			       GFP_KERNEL, dd->node);
138 	if (!dd->rcd)
139 		goto nomem;
140 
141 	/* create one or more kernel contexts */
142 	for (i = 0; i < dd->first_user_ctxt; ++i) {
143 		struct hfi1_pportdata *ppd;
144 		struct hfi1_ctxtdata *rcd;
145 
146 		ppd = dd->pport + (i % dd->num_pports);
147 		rcd = hfi1_create_ctxtdata(ppd, i, dd->node);
148 		if (!rcd) {
149 			dd_dev_err(dd,
150 				   "Unable to allocate kernel receive context, failing\n");
151 			goto nomem;
152 		}
153 		/*
154 		 * Set up the kernel context flags here and now because they
155 		 * use default values for all receive side memories.  User
156 		 * contexts will be handled as they are created.
157 		 */
158 		rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) |
159 			HFI1_CAP_KGET(NODROP_RHQ_FULL) |
160 			HFI1_CAP_KGET(NODROP_EGR_FULL) |
161 			HFI1_CAP_KGET(DMA_RTAIL);
162 
163 		/* Control context must use DMA_RTAIL */
164 		if (rcd->ctxt == HFI1_CTRL_CTXT)
165 			rcd->flags |= HFI1_CAP_DMA_RTAIL;
166 		rcd->seq_cnt = 1;
167 
168 		rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node);
169 		if (!rcd->sc) {
170 			dd_dev_err(dd,
171 				   "Unable to allocate kernel send context, failing\n");
172 			dd->rcd[rcd->ctxt] = NULL;
173 			hfi1_free_ctxtdata(dd, rcd);
174 			goto nomem;
175 		}
176 
177 		ret = hfi1_init_ctxt(rcd->sc);
178 		if (ret < 0) {
179 			dd_dev_err(dd,
180 				   "Failed to setup kernel receive context, failing\n");
181 			sc_free(rcd->sc);
182 			dd->rcd[rcd->ctxt] = NULL;
183 			hfi1_free_ctxtdata(dd, rcd);
184 			ret = -EFAULT;
185 			goto bail;
186 		}
187 	}
188 
189 	/*
190 	 * Initialize aspm, to be done after gen3 transition and setting up
191 	 * contexts and before enabling interrupts
192 	 */
193 	aspm_init(dd);
194 
195 	return 0;
196 nomem:
197 	ret = -ENOMEM;
198 bail:
199 	kfree(dd->rcd);
200 	dd->rcd = NULL;
201 	return ret;
202 }
203 
204 /*
205  * Common code for user and kernel context setup.
206  */
207 struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt,
208 					   int numa)
209 {
210 	struct hfi1_devdata *dd = ppd->dd;
211 	struct hfi1_ctxtdata *rcd;
212 	unsigned kctxt_ngroups = 0;
213 	u32 base;
214 
215 	if (dd->rcv_entries.nctxt_extra >
216 	    dd->num_rcv_contexts - dd->first_user_ctxt)
217 		kctxt_ngroups = (dd->rcv_entries.nctxt_extra -
218 				 (dd->num_rcv_contexts - dd->first_user_ctxt));
219 	rcd = kzalloc(sizeof(*rcd), GFP_KERNEL);
220 	if (rcd) {
221 		u32 rcvtids, max_entries;
222 
223 		hfi1_cdbg(PROC, "setting up context %u\n", ctxt);
224 
225 		INIT_LIST_HEAD(&rcd->qp_wait_list);
226 		rcd->ppd = ppd;
227 		rcd->dd = dd;
228 		rcd->cnt = 1;
229 		rcd->ctxt = ctxt;
230 		dd->rcd[ctxt] = rcd;
231 		rcd->numa_id = numa;
232 		rcd->rcv_array_groups = dd->rcv_entries.ngroups;
233 
234 		mutex_init(&rcd->exp_lock);
235 
236 		/*
237 		 * Calculate the context's RcvArray entry starting point.
238 		 * We do this here because we have to take into account all
239 		 * the RcvArray entries that previous context would have
240 		 * taken and we have to account for any extra groups
241 		 * assigned to the kernel or user contexts.
242 		 */
243 		if (ctxt < dd->first_user_ctxt) {
244 			if (ctxt < kctxt_ngroups) {
245 				base = ctxt * (dd->rcv_entries.ngroups + 1);
246 				rcd->rcv_array_groups++;
247 			} else
248 				base = kctxt_ngroups +
249 					(ctxt * dd->rcv_entries.ngroups);
250 		} else {
251 			u16 ct = ctxt - dd->first_user_ctxt;
252 
253 			base = ((dd->n_krcv_queues * dd->rcv_entries.ngroups) +
254 				kctxt_ngroups);
255 			if (ct < dd->rcv_entries.nctxt_extra) {
256 				base += ct * (dd->rcv_entries.ngroups + 1);
257 				rcd->rcv_array_groups++;
258 			} else
259 				base += dd->rcv_entries.nctxt_extra +
260 					(ct * dd->rcv_entries.ngroups);
261 		}
262 		rcd->eager_base = base * dd->rcv_entries.group_size;
263 
264 		/* Validate and initialize Rcv Hdr Q variables */
265 		if (rcvhdrcnt % HDRQ_INCREMENT) {
266 			dd_dev_err(dd,
267 				   "ctxt%u: header queue count %d must be divisible by %lu\n",
268 				   rcd->ctxt, rcvhdrcnt, HDRQ_INCREMENT);
269 			goto bail;
270 		}
271 		rcd->rcvhdrq_cnt = rcvhdrcnt;
272 		rcd->rcvhdrqentsize = hfi1_hdrq_entsize;
273 		/*
274 		 * Simple Eager buffer allocation: we have already pre-allocated
275 		 * the number of RcvArray entry groups. Each ctxtdata structure
276 		 * holds the number of groups for that context.
277 		 *
278 		 * To follow CSR requirements and maintain cacheline alignment,
279 		 * make sure all sizes and bases are multiples of group_size.
280 		 *
281 		 * The expected entry count is what is left after assigning
282 		 * eager.
283 		 */
284 		max_entries = rcd->rcv_array_groups *
285 			dd->rcv_entries.group_size;
286 		rcvtids = ((max_entries * hfi1_rcvarr_split) / 100);
287 		rcd->egrbufs.count = round_down(rcvtids,
288 						dd->rcv_entries.group_size);
289 		if (rcd->egrbufs.count > MAX_EAGER_ENTRIES) {
290 			dd_dev_err(dd, "ctxt%u: requested too many RcvArray entries.\n",
291 				   rcd->ctxt);
292 			rcd->egrbufs.count = MAX_EAGER_ENTRIES;
293 		}
294 		hfi1_cdbg(PROC,
295 			  "ctxt%u: max Eager buffer RcvArray entries: %u\n",
296 			  rcd->ctxt, rcd->egrbufs.count);
297 
298 		/*
299 		 * Allocate array that will hold the eager buffer accounting
300 		 * data.
301 		 * This will allocate the maximum possible buffer count based
302 		 * on the value of the RcvArray split parameter.
303 		 * The resulting value will be rounded down to the closest
304 		 * multiple of dd->rcv_entries.group_size.
305 		 */
306 		rcd->egrbufs.buffers = kcalloc(rcd->egrbufs.count,
307 					       sizeof(*rcd->egrbufs.buffers),
308 					       GFP_KERNEL);
309 		if (!rcd->egrbufs.buffers)
310 			goto bail;
311 		rcd->egrbufs.rcvtids = kcalloc(rcd->egrbufs.count,
312 					       sizeof(*rcd->egrbufs.rcvtids),
313 					       GFP_KERNEL);
314 		if (!rcd->egrbufs.rcvtids)
315 			goto bail;
316 		rcd->egrbufs.size = eager_buffer_size;
317 		/*
318 		 * The size of the buffers programmed into the RcvArray
319 		 * entries needs to be big enough to handle the highest
320 		 * MTU supported.
321 		 */
322 		if (rcd->egrbufs.size < hfi1_max_mtu) {
323 			rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu);
324 			hfi1_cdbg(PROC,
325 				  "ctxt%u: eager bufs size too small. Adjusting to %zu\n",
326 				    rcd->ctxt, rcd->egrbufs.size);
327 		}
328 		rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE;
329 
330 		if (ctxt < dd->first_user_ctxt) { /* N/A for PSM contexts */
331 			rcd->opstats = kzalloc(sizeof(*rcd->opstats),
332 				GFP_KERNEL);
333 			if (!rcd->opstats)
334 				goto bail;
335 		}
336 	}
337 	return rcd;
338 bail:
339 	dd->rcd[ctxt] = NULL;
340 	kfree(rcd->egrbufs.rcvtids);
341 	kfree(rcd->egrbufs.buffers);
342 	kfree(rcd);
343 	return NULL;
344 }
345 
346 /*
347  * Convert a receive header entry size that to the encoding used in the CSR.
348  *
349  * Return a zero if the given size is invalid.
350  */
351 static inline u64 encode_rcv_header_entry_size(u16 size)
352 {
353 	/* there are only 3 valid receive header entry sizes */
354 	if (size == 2)
355 		return 1;
356 	if (size == 16)
357 		return 2;
358 	else if (size == 32)
359 		return 4;
360 	return 0; /* invalid */
361 }
362 
363 /*
364  * Select the largest ccti value over all SLs to determine the intra-
365  * packet gap for the link.
366  *
367  * called with cca_timer_lock held (to protect access to cca_timer
368  * array), and rcu_read_lock() (to protect access to cc_state).
369  */
370 void set_link_ipg(struct hfi1_pportdata *ppd)
371 {
372 	struct hfi1_devdata *dd = ppd->dd;
373 	struct cc_state *cc_state;
374 	int i;
375 	u16 cce, ccti_limit, max_ccti = 0;
376 	u16 shift, mult;
377 	u64 src;
378 	u32 current_egress_rate; /* Mbits /sec */
379 	u32 max_pkt_time;
380 	/*
381 	 * max_pkt_time is the maximum packet egress time in units
382 	 * of the fabric clock period 1/(805 MHz).
383 	 */
384 
385 	cc_state = get_cc_state(ppd);
386 
387 	if (!cc_state)
388 		/*
389 		 * This should _never_ happen - rcu_read_lock() is held,
390 		 * and set_link_ipg() should not be called if cc_state
391 		 * is NULL.
392 		 */
393 		return;
394 
395 	for (i = 0; i < OPA_MAX_SLS; i++) {
396 		u16 ccti = ppd->cca_timer[i].ccti;
397 
398 		if (ccti > max_ccti)
399 			max_ccti = ccti;
400 	}
401 
402 	ccti_limit = cc_state->cct.ccti_limit;
403 	if (max_ccti > ccti_limit)
404 		max_ccti = ccti_limit;
405 
406 	cce = cc_state->cct.entries[max_ccti].entry;
407 	shift = (cce & 0xc000) >> 14;
408 	mult = (cce & 0x3fff);
409 
410 	current_egress_rate = active_egress_rate(ppd);
411 
412 	max_pkt_time = egress_cycles(ppd->ibmaxlen, current_egress_rate);
413 
414 	src = (max_pkt_time >> shift) * mult;
415 
416 	src &= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK;
417 	src <<= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT;
418 
419 	write_csr(dd, SEND_STATIC_RATE_CONTROL, src);
420 }
421 
422 static enum hrtimer_restart cca_timer_fn(struct hrtimer *t)
423 {
424 	struct cca_timer *cca_timer;
425 	struct hfi1_pportdata *ppd;
426 	int sl;
427 	u16 ccti_timer, ccti_min;
428 	struct cc_state *cc_state;
429 	unsigned long flags;
430 	enum hrtimer_restart ret = HRTIMER_NORESTART;
431 
432 	cca_timer = container_of(t, struct cca_timer, hrtimer);
433 	ppd = cca_timer->ppd;
434 	sl = cca_timer->sl;
435 
436 	rcu_read_lock();
437 
438 	cc_state = get_cc_state(ppd);
439 
440 	if (!cc_state) {
441 		rcu_read_unlock();
442 		return HRTIMER_NORESTART;
443 	}
444 
445 	/*
446 	 * 1) decrement ccti for SL
447 	 * 2) calculate IPG for link (set_link_ipg())
448 	 * 3) restart timer, unless ccti is at min value
449 	 */
450 
451 	ccti_min = cc_state->cong_setting.entries[sl].ccti_min;
452 	ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer;
453 
454 	spin_lock_irqsave(&ppd->cca_timer_lock, flags);
455 
456 	if (cca_timer->ccti > ccti_min) {
457 		cca_timer->ccti--;
458 		set_link_ipg(ppd);
459 	}
460 
461 	if (cca_timer->ccti > ccti_min) {
462 		unsigned long nsec = 1024 * ccti_timer;
463 		/* ccti_timer is in units of 1.024 usec */
464 		hrtimer_forward_now(t, ns_to_ktime(nsec));
465 		ret = HRTIMER_RESTART;
466 	}
467 
468 	spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
469 	rcu_read_unlock();
470 	return ret;
471 }
472 
473 /*
474  * Common code for initializing the physical port structure.
475  */
476 void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
477 			 struct hfi1_devdata *dd, u8 hw_pidx, u8 port)
478 {
479 	int i;
480 	uint default_pkey_idx;
481 	struct cc_state *cc_state;
482 
483 	ppd->dd = dd;
484 	ppd->hw_pidx = hw_pidx;
485 	ppd->port = port; /* IB port number, not index */
486 
487 	default_pkey_idx = 1;
488 
489 	ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY;
490 	if (loopback) {
491 		hfi1_early_err(&pdev->dev,
492 			       "Faking data partition 0x8001 in idx %u\n",
493 			       !default_pkey_idx);
494 		ppd->pkeys[!default_pkey_idx] = 0x8001;
495 	}
496 
497 	INIT_WORK(&ppd->link_vc_work, handle_verify_cap);
498 	INIT_WORK(&ppd->link_up_work, handle_link_up);
499 	INIT_WORK(&ppd->link_down_work, handle_link_down);
500 	INIT_WORK(&ppd->freeze_work, handle_freeze);
501 	INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade);
502 	INIT_WORK(&ppd->sma_message_work, handle_sma_message);
503 	INIT_WORK(&ppd->link_bounce_work, handle_link_bounce);
504 	INIT_DELAYED_WORK(&ppd->start_link_work, handle_start_link);
505 	INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work);
506 	INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
507 
508 	mutex_init(&ppd->hls_lock);
509 	spin_lock_init(&ppd->sdma_alllock);
510 	spin_lock_init(&ppd->qsfp_info.qsfp_lock);
511 
512 	ppd->qsfp_info.ppd = ppd;
513 	ppd->sm_trap_qp = 0x0;
514 	ppd->sa_qp = 0x1;
515 
516 	ppd->hfi1_wq = NULL;
517 
518 	spin_lock_init(&ppd->cca_timer_lock);
519 
520 	for (i = 0; i < OPA_MAX_SLS; i++) {
521 		hrtimer_init(&ppd->cca_timer[i].hrtimer, CLOCK_MONOTONIC,
522 			     HRTIMER_MODE_REL);
523 		ppd->cca_timer[i].ppd = ppd;
524 		ppd->cca_timer[i].sl = i;
525 		ppd->cca_timer[i].ccti = 0;
526 		ppd->cca_timer[i].hrtimer.function = cca_timer_fn;
527 	}
528 
529 	ppd->cc_max_table_entries = IB_CC_TABLE_CAP_DEFAULT;
530 
531 	spin_lock_init(&ppd->cc_state_lock);
532 	spin_lock_init(&ppd->cc_log_lock);
533 	cc_state = kzalloc(sizeof(*cc_state), GFP_KERNEL);
534 	RCU_INIT_POINTER(ppd->cc_state, cc_state);
535 	if (!cc_state)
536 		goto bail;
537 	return;
538 
539 bail:
540 
541 	hfi1_early_err(&pdev->dev,
542 		       "Congestion Control Agent disabled for port %d\n", port);
543 }
544 
545 /*
546  * Do initialization for device that is only needed on
547  * first detect, not on resets.
548  */
549 static int loadtime_init(struct hfi1_devdata *dd)
550 {
551 	return 0;
552 }
553 
554 /**
555  * init_after_reset - re-initialize after a reset
556  * @dd: the hfi1_ib device
557  *
558  * sanity check at least some of the values after reset, and
559  * ensure no receive or transmit (explicitly, in case reset
560  * failed
561  */
562 static int init_after_reset(struct hfi1_devdata *dd)
563 {
564 	int i;
565 
566 	/*
567 	 * Ensure chip does no sends or receives, tail updates, or
568 	 * pioavail updates while we re-initialize.  This is mostly
569 	 * for the driver data structures, not chip registers.
570 	 */
571 	for (i = 0; i < dd->num_rcv_contexts; i++)
572 		hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
573 				  HFI1_RCVCTRL_INTRAVAIL_DIS |
574 				  HFI1_RCVCTRL_TAILUPD_DIS, i);
575 	pio_send_control(dd, PSC_GLOBAL_DISABLE);
576 	for (i = 0; i < dd->num_send_contexts; i++)
577 		sc_disable(dd->send_contexts[i].sc);
578 
579 	return 0;
580 }
581 
582 static void enable_chip(struct hfi1_devdata *dd)
583 {
584 	u32 rcvmask;
585 	u32 i;
586 
587 	/* enable PIO send */
588 	pio_send_control(dd, PSC_GLOBAL_ENABLE);
589 
590 	/*
591 	 * Enable kernel ctxts' receive and receive interrupt.
592 	 * Other ctxts done as user opens and initializes them.
593 	 */
594 	for (i = 0; i < dd->first_user_ctxt; ++i) {
595 		rcvmask = HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB;
596 		rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
597 			HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
598 		if (!HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, MULTI_PKT_EGR))
599 			rcvmask |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
600 		if (HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, NODROP_RHQ_FULL))
601 			rcvmask |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
602 		if (HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, NODROP_EGR_FULL))
603 			rcvmask |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
604 		hfi1_rcvctrl(dd, rcvmask, i);
605 		sc_enable(dd->rcd[i]->sc);
606 	}
607 }
608 
609 /**
610  * create_workqueues - create per port workqueues
611  * @dd: the hfi1_ib device
612  */
613 static int create_workqueues(struct hfi1_devdata *dd)
614 {
615 	int pidx;
616 	struct hfi1_pportdata *ppd;
617 
618 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
619 		ppd = dd->pport + pidx;
620 		if (!ppd->hfi1_wq) {
621 			ppd->hfi1_wq =
622 				alloc_workqueue(
623 				    "hfi%d_%d",
624 				    WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE,
625 				    dd->num_sdma,
626 				    dd->unit, pidx);
627 			if (!ppd->hfi1_wq)
628 				goto wq_error;
629 		}
630 	}
631 	return 0;
632 wq_error:
633 	pr_err("alloc_workqueue failed for port %d\n", pidx + 1);
634 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
635 		ppd = dd->pport + pidx;
636 		if (ppd->hfi1_wq) {
637 			destroy_workqueue(ppd->hfi1_wq);
638 			ppd->hfi1_wq = NULL;
639 		}
640 	}
641 	return -ENOMEM;
642 }
643 
644 /**
645  * hfi1_init - do the actual initialization sequence on the chip
646  * @dd: the hfi1_ib device
647  * @reinit: re-initializing, so don't allocate new memory
648  *
649  * Do the actual initialization sequence on the chip.  This is done
650  * both from the init routine called from the PCI infrastructure, and
651  * when we reset the chip, or detect that it was reset internally,
652  * or it's administratively re-enabled.
653  *
654  * Memory allocation here and in called routines is only done in
655  * the first case (reinit == 0).  We have to be careful, because even
656  * without memory allocation, we need to re-write all the chip registers
657  * TIDs, etc. after the reset or enable has completed.
658  */
659 int hfi1_init(struct hfi1_devdata *dd, int reinit)
660 {
661 	int ret = 0, pidx, lastfail = 0;
662 	unsigned i, len;
663 	struct hfi1_ctxtdata *rcd;
664 	struct hfi1_pportdata *ppd;
665 
666 	/* Set up recv low level handlers */
667 	dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EXPECTED] =
668 						kdeth_process_expected;
669 	dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EAGER] =
670 						kdeth_process_eager;
671 	dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_IB] = process_receive_ib;
672 	dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_ERROR] =
673 						process_receive_error;
674 	dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_BYPASS] =
675 						process_receive_bypass;
676 	dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID5] =
677 						process_receive_invalid;
678 	dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID6] =
679 						process_receive_invalid;
680 	dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID7] =
681 						process_receive_invalid;
682 	dd->rhf_rcv_function_map = dd->normal_rhf_rcv_functions;
683 
684 	/* Set up send low level handlers */
685 	dd->process_pio_send = hfi1_verbs_send_pio;
686 	dd->process_dma_send = hfi1_verbs_send_dma;
687 	dd->pio_inline_send = pio_copy;
688 
689 	if (is_ax(dd)) {
690 		atomic_set(&dd->drop_packet, DROP_PACKET_ON);
691 		dd->do_drop = 1;
692 	} else {
693 		atomic_set(&dd->drop_packet, DROP_PACKET_OFF);
694 		dd->do_drop = 0;
695 	}
696 
697 	/* make sure the link is not "up" */
698 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
699 		ppd = dd->pport + pidx;
700 		ppd->linkup = 0;
701 	}
702 
703 	if (reinit)
704 		ret = init_after_reset(dd);
705 	else
706 		ret = loadtime_init(dd);
707 	if (ret)
708 		goto done;
709 
710 	/* allocate dummy tail memory for all receive contexts */
711 	dd->rcvhdrtail_dummy_kvaddr = dma_zalloc_coherent(
712 		&dd->pcidev->dev, sizeof(u64),
713 		&dd->rcvhdrtail_dummy_dma,
714 		GFP_KERNEL);
715 
716 	if (!dd->rcvhdrtail_dummy_kvaddr) {
717 		dd_dev_err(dd, "cannot allocate dummy tail memory\n");
718 		ret = -ENOMEM;
719 		goto done;
720 	}
721 
722 	/* dd->rcd can be NULL if early initialization failed */
723 	for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
724 		/*
725 		 * Set up the (kernel) rcvhdr queue and egr TIDs.  If doing
726 		 * re-init, the simplest way to handle this is to free
727 		 * existing, and re-allocate.
728 		 * Need to re-create rest of ctxt 0 ctxtdata as well.
729 		 */
730 		rcd = dd->rcd[i];
731 		if (!rcd)
732 			continue;
733 
734 		rcd->do_interrupt = &handle_receive_interrupt;
735 
736 		lastfail = hfi1_create_rcvhdrq(dd, rcd);
737 		if (!lastfail)
738 			lastfail = hfi1_setup_eagerbufs(rcd);
739 		if (lastfail) {
740 			dd_dev_err(dd,
741 				   "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
742 			ret = lastfail;
743 		}
744 	}
745 
746 	/* Allocate enough memory for user event notification. */
747 	len = PAGE_ALIGN(dd->chip_rcv_contexts * HFI1_MAX_SHARED_CTXTS *
748 			 sizeof(*dd->events));
749 	dd->events = vmalloc_user(len);
750 	if (!dd->events)
751 		dd_dev_err(dd, "Failed to allocate user events page\n");
752 	/*
753 	 * Allocate a page for device and port status.
754 	 * Page will be shared amongst all user processes.
755 	 */
756 	dd->status = vmalloc_user(PAGE_SIZE);
757 	if (!dd->status)
758 		dd_dev_err(dd, "Failed to allocate dev status page\n");
759 	else
760 		dd->freezelen = PAGE_SIZE - (sizeof(*dd->status) -
761 					     sizeof(dd->status->freezemsg));
762 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
763 		ppd = dd->pport + pidx;
764 		if (dd->status)
765 			/* Currently, we only have one port */
766 			ppd->statusp = &dd->status->port;
767 
768 		set_mtu(ppd);
769 	}
770 
771 	/* enable chip even if we have an error, so we can debug cause */
772 	enable_chip(dd);
773 
774 done:
775 	/*
776 	 * Set status even if port serdes is not initialized
777 	 * so that diags will work.
778 	 */
779 	if (dd->status)
780 		dd->status->dev |= HFI1_STATUS_CHIP_PRESENT |
781 			HFI1_STATUS_INITTED;
782 	if (!ret) {
783 		/* enable all interrupts from the chip */
784 		set_intr_state(dd, 1);
785 
786 		/* chip is OK for user apps; mark it as initialized */
787 		for (pidx = 0; pidx < dd->num_pports; ++pidx) {
788 			ppd = dd->pport + pidx;
789 
790 			/*
791 			 * start the serdes - must be after interrupts are
792 			 * enabled so we are notified when the link goes up
793 			 */
794 			lastfail = bringup_serdes(ppd);
795 			if (lastfail)
796 				dd_dev_info(dd,
797 					    "Failed to bring up port %u\n",
798 					    ppd->port);
799 
800 			/*
801 			 * Set status even if port serdes is not initialized
802 			 * so that diags will work.
803 			 */
804 			if (ppd->statusp)
805 				*ppd->statusp |= HFI1_STATUS_CHIP_PRESENT |
806 							HFI1_STATUS_INITTED;
807 			if (!ppd->link_speed_enabled)
808 				continue;
809 		}
810 	}
811 
812 	/* if ret is non-zero, we probably should do some cleanup here... */
813 	return ret;
814 }
815 
816 static inline struct hfi1_devdata *__hfi1_lookup(int unit)
817 {
818 	return idr_find(&hfi1_unit_table, unit);
819 }
820 
821 struct hfi1_devdata *hfi1_lookup(int unit)
822 {
823 	struct hfi1_devdata *dd;
824 	unsigned long flags;
825 
826 	spin_lock_irqsave(&hfi1_devs_lock, flags);
827 	dd = __hfi1_lookup(unit);
828 	spin_unlock_irqrestore(&hfi1_devs_lock, flags);
829 
830 	return dd;
831 }
832 
833 /*
834  * Stop the timers during unit shutdown, or after an error late
835  * in initialization.
836  */
837 static void stop_timers(struct hfi1_devdata *dd)
838 {
839 	struct hfi1_pportdata *ppd;
840 	int pidx;
841 
842 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
843 		ppd = dd->pport + pidx;
844 		if (ppd->led_override_timer.data) {
845 			del_timer_sync(&ppd->led_override_timer);
846 			atomic_set(&ppd->led_override_timer_active, 0);
847 		}
848 	}
849 }
850 
851 /**
852  * shutdown_device - shut down a device
853  * @dd: the hfi1_ib device
854  *
855  * This is called to make the device quiet when we are about to
856  * unload the driver, and also when the device is administratively
857  * disabled.   It does not free any data structures.
858  * Everything it does has to be setup again by hfi1_init(dd, 1)
859  */
860 static void shutdown_device(struct hfi1_devdata *dd)
861 {
862 	struct hfi1_pportdata *ppd;
863 	unsigned pidx;
864 	int i;
865 
866 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
867 		ppd = dd->pport + pidx;
868 
869 		ppd->linkup = 0;
870 		if (ppd->statusp)
871 			*ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
872 					   HFI1_STATUS_IB_READY);
873 	}
874 	dd->flags &= ~HFI1_INITTED;
875 
876 	/* mask interrupts, but not errors */
877 	set_intr_state(dd, 0);
878 
879 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
880 		ppd = dd->pport + pidx;
881 		for (i = 0; i < dd->num_rcv_contexts; i++)
882 			hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS |
883 					  HFI1_RCVCTRL_CTXT_DIS |
884 					  HFI1_RCVCTRL_INTRAVAIL_DIS |
885 					  HFI1_RCVCTRL_PKEY_DIS |
886 					  HFI1_RCVCTRL_ONE_PKT_EGR_DIS, i);
887 		/*
888 		 * Gracefully stop all sends allowing any in progress to
889 		 * trickle out first.
890 		 */
891 		for (i = 0; i < dd->num_send_contexts; i++)
892 			sc_flush(dd->send_contexts[i].sc);
893 	}
894 
895 	/*
896 	 * Enough for anything that's going to trickle out to have actually
897 	 * done so.
898 	 */
899 	udelay(20);
900 
901 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
902 		ppd = dd->pport + pidx;
903 
904 		/* disable all contexts */
905 		for (i = 0; i < dd->num_send_contexts; i++)
906 			sc_disable(dd->send_contexts[i].sc);
907 		/* disable the send device */
908 		pio_send_control(dd, PSC_GLOBAL_DISABLE);
909 
910 		shutdown_led_override(ppd);
911 
912 		/*
913 		 * Clear SerdesEnable.
914 		 * We can't count on interrupts since we are stopping.
915 		 */
916 		hfi1_quiet_serdes(ppd);
917 
918 		if (ppd->hfi1_wq) {
919 			destroy_workqueue(ppd->hfi1_wq);
920 			ppd->hfi1_wq = NULL;
921 		}
922 	}
923 	sdma_exit(dd);
924 }
925 
926 /**
927  * hfi1_free_ctxtdata - free a context's allocated data
928  * @dd: the hfi1_ib device
929  * @rcd: the ctxtdata structure
930  *
931  * free up any allocated data for a context
932  * This should not touch anything that would affect a simultaneous
933  * re-allocation of context data, because it is called after hfi1_mutex
934  * is released (and can be called from reinit as well).
935  * It should never change any chip state, or global driver state.
936  */
937 void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
938 {
939 	unsigned e;
940 
941 	if (!rcd)
942 		return;
943 
944 	if (rcd->rcvhdrq) {
945 		dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size,
946 				  rcd->rcvhdrq, rcd->rcvhdrq_dma);
947 		rcd->rcvhdrq = NULL;
948 		if (rcd->rcvhdrtail_kvaddr) {
949 			dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
950 					  (void *)rcd->rcvhdrtail_kvaddr,
951 					  rcd->rcvhdrqtailaddr_dma);
952 			rcd->rcvhdrtail_kvaddr = NULL;
953 		}
954 	}
955 
956 	/* all the RcvArray entries should have been cleared by now */
957 	kfree(rcd->egrbufs.rcvtids);
958 
959 	for (e = 0; e < rcd->egrbufs.alloced; e++) {
960 		if (rcd->egrbufs.buffers[e].dma)
961 			dma_free_coherent(&dd->pcidev->dev,
962 					  rcd->egrbufs.buffers[e].len,
963 					  rcd->egrbufs.buffers[e].addr,
964 					  rcd->egrbufs.buffers[e].dma);
965 	}
966 	kfree(rcd->egrbufs.buffers);
967 
968 	sc_free(rcd->sc);
969 	vfree(rcd->user_event_mask);
970 	vfree(rcd->subctxt_uregbase);
971 	vfree(rcd->subctxt_rcvegrbuf);
972 	vfree(rcd->subctxt_rcvhdr_base);
973 	kfree(rcd->opstats);
974 	kfree(rcd);
975 }
976 
977 /*
978  * Release our hold on the shared asic data.  If we are the last one,
979  * return the structure to be finalized outside the lock.  Must be
980  * holding hfi1_devs_lock.
981  */
982 static struct hfi1_asic_data *release_asic_data(struct hfi1_devdata *dd)
983 {
984 	struct hfi1_asic_data *ad;
985 	int other;
986 
987 	if (!dd->asic_data)
988 		return NULL;
989 	dd->asic_data->dds[dd->hfi1_id] = NULL;
990 	other = dd->hfi1_id ? 0 : 1;
991 	ad = dd->asic_data;
992 	dd->asic_data = NULL;
993 	/* return NULL if the other dd still has a link */
994 	return ad->dds[other] ? NULL : ad;
995 }
996 
997 static void finalize_asic_data(struct hfi1_devdata *dd,
998 			       struct hfi1_asic_data *ad)
999 {
1000 	clean_up_i2c(dd, ad);
1001 	kfree(ad);
1002 }
1003 
1004 static void __hfi1_free_devdata(struct kobject *kobj)
1005 {
1006 	struct hfi1_devdata *dd =
1007 		container_of(kobj, struct hfi1_devdata, kobj);
1008 	struct hfi1_asic_data *ad;
1009 	unsigned long flags;
1010 
1011 	spin_lock_irqsave(&hfi1_devs_lock, flags);
1012 	idr_remove(&hfi1_unit_table, dd->unit);
1013 	list_del(&dd->list);
1014 	ad = release_asic_data(dd);
1015 	spin_unlock_irqrestore(&hfi1_devs_lock, flags);
1016 	if (ad)
1017 		finalize_asic_data(dd, ad);
1018 	free_platform_config(dd);
1019 	rcu_barrier(); /* wait for rcu callbacks to complete */
1020 	free_percpu(dd->int_counter);
1021 	free_percpu(dd->rcv_limit);
1022 	free_percpu(dd->send_schedule);
1023 	rvt_dealloc_device(&dd->verbs_dev.rdi);
1024 }
1025 
1026 static struct kobj_type hfi1_devdata_type = {
1027 	.release = __hfi1_free_devdata,
1028 };
1029 
1030 void hfi1_free_devdata(struct hfi1_devdata *dd)
1031 {
1032 	kobject_put(&dd->kobj);
1033 }
1034 
1035 /*
1036  * Allocate our primary per-unit data structure.  Must be done via verbs
1037  * allocator, because the verbs cleanup process both does cleanup and
1038  * free of the data structure.
1039  * "extra" is for chip-specific data.
1040  *
1041  * Use the idr mechanism to get a unit number for this unit.
1042  */
1043 struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
1044 {
1045 	unsigned long flags;
1046 	struct hfi1_devdata *dd;
1047 	int ret, nports;
1048 
1049 	/* extra is * number of ports */
1050 	nports = extra / sizeof(struct hfi1_pportdata);
1051 
1052 	dd = (struct hfi1_devdata *)rvt_alloc_device(sizeof(*dd) + extra,
1053 						     nports);
1054 	if (!dd)
1055 		return ERR_PTR(-ENOMEM);
1056 	dd->num_pports = nports;
1057 	dd->pport = (struct hfi1_pportdata *)(dd + 1);
1058 
1059 	INIT_LIST_HEAD(&dd->list);
1060 	idr_preload(GFP_KERNEL);
1061 	spin_lock_irqsave(&hfi1_devs_lock, flags);
1062 
1063 	ret = idr_alloc(&hfi1_unit_table, dd, 0, 0, GFP_NOWAIT);
1064 	if (ret >= 0) {
1065 		dd->unit = ret;
1066 		list_add(&dd->list, &hfi1_dev_list);
1067 	}
1068 
1069 	spin_unlock_irqrestore(&hfi1_devs_lock, flags);
1070 	idr_preload_end();
1071 
1072 	if (ret < 0) {
1073 		hfi1_early_err(&pdev->dev,
1074 			       "Could not allocate unit ID: error %d\n", -ret);
1075 		goto bail;
1076 	}
1077 	/*
1078 	 * Initialize all locks for the device. This needs to be as early as
1079 	 * possible so locks are usable.
1080 	 */
1081 	spin_lock_init(&dd->sc_lock);
1082 	spin_lock_init(&dd->sendctrl_lock);
1083 	spin_lock_init(&dd->rcvctrl_lock);
1084 	spin_lock_init(&dd->uctxt_lock);
1085 	spin_lock_init(&dd->hfi1_diag_trans_lock);
1086 	spin_lock_init(&dd->sc_init_lock);
1087 	spin_lock_init(&dd->dc8051_lock);
1088 	spin_lock_init(&dd->dc8051_memlock);
1089 	seqlock_init(&dd->sc2vl_lock);
1090 	spin_lock_init(&dd->sde_map_lock);
1091 	spin_lock_init(&dd->pio_map_lock);
1092 	init_waitqueue_head(&dd->event_queue);
1093 
1094 	dd->int_counter = alloc_percpu(u64);
1095 	if (!dd->int_counter) {
1096 		ret = -ENOMEM;
1097 		hfi1_early_err(&pdev->dev,
1098 			       "Could not allocate per-cpu int_counter\n");
1099 		goto bail;
1100 	}
1101 
1102 	dd->rcv_limit = alloc_percpu(u64);
1103 	if (!dd->rcv_limit) {
1104 		ret = -ENOMEM;
1105 		hfi1_early_err(&pdev->dev,
1106 			       "Could not allocate per-cpu rcv_limit\n");
1107 		goto bail;
1108 	}
1109 
1110 	dd->send_schedule = alloc_percpu(u64);
1111 	if (!dd->send_schedule) {
1112 		ret = -ENOMEM;
1113 		hfi1_early_err(&pdev->dev,
1114 			       "Could not allocate per-cpu int_counter\n");
1115 		goto bail;
1116 	}
1117 
1118 	if (!hfi1_cpulist_count) {
1119 		u32 count = num_online_cpus();
1120 
1121 		hfi1_cpulist = kcalloc(BITS_TO_LONGS(count), sizeof(long),
1122 				       GFP_KERNEL);
1123 		if (hfi1_cpulist)
1124 			hfi1_cpulist_count = count;
1125 		else
1126 			hfi1_early_err(
1127 			&pdev->dev,
1128 			"Could not alloc cpulist info, cpu affinity might be wrong\n");
1129 	}
1130 	kobject_init(&dd->kobj, &hfi1_devdata_type);
1131 	return dd;
1132 
1133 bail:
1134 	if (!list_empty(&dd->list))
1135 		list_del_init(&dd->list);
1136 	rvt_dealloc_device(&dd->verbs_dev.rdi);
1137 	return ERR_PTR(ret);
1138 }
1139 
1140 /*
1141  * Called from freeze mode handlers, and from PCI error
1142  * reporting code.  Should be paranoid about state of
1143  * system and data structures.
1144  */
1145 void hfi1_disable_after_error(struct hfi1_devdata *dd)
1146 {
1147 	if (dd->flags & HFI1_INITTED) {
1148 		u32 pidx;
1149 
1150 		dd->flags &= ~HFI1_INITTED;
1151 		if (dd->pport)
1152 			for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1153 				struct hfi1_pportdata *ppd;
1154 
1155 				ppd = dd->pport + pidx;
1156 				if (dd->flags & HFI1_PRESENT)
1157 					set_link_state(ppd, HLS_DN_DISABLE);
1158 
1159 				if (ppd->statusp)
1160 					*ppd->statusp &= ~HFI1_STATUS_IB_READY;
1161 			}
1162 	}
1163 
1164 	/*
1165 	 * Mark as having had an error for driver, and also
1166 	 * for /sys and status word mapped to user programs.
1167 	 * This marks unit as not usable, until reset.
1168 	 */
1169 	if (dd->status)
1170 		dd->status->dev |= HFI1_STATUS_HWERROR;
1171 }
1172 
1173 static void remove_one(struct pci_dev *);
1174 static int init_one(struct pci_dev *, const struct pci_device_id *);
1175 
1176 #define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: "
1177 #define PFX DRIVER_NAME ": "
1178 
1179 const struct pci_device_id hfi1_pci_tbl[] = {
1180 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL0) },
1181 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL1) },
1182 	{ 0, }
1183 };
1184 
1185 MODULE_DEVICE_TABLE(pci, hfi1_pci_tbl);
1186 
1187 static struct pci_driver hfi1_pci_driver = {
1188 	.name = DRIVER_NAME,
1189 	.probe = init_one,
1190 	.remove = remove_one,
1191 	.id_table = hfi1_pci_tbl,
1192 	.err_handler = &hfi1_pci_err_handler,
1193 };
1194 
1195 static void __init compute_krcvqs(void)
1196 {
1197 	int i;
1198 
1199 	for (i = 0; i < krcvqsset; i++)
1200 		n_krcvqs += krcvqs[i];
1201 }
1202 
1203 /*
1204  * Do all the generic driver unit- and chip-independent memory
1205  * allocation and initialization.
1206  */
1207 static int __init hfi1_mod_init(void)
1208 {
1209 	int ret;
1210 
1211 	ret = dev_init();
1212 	if (ret)
1213 		goto bail;
1214 
1215 	ret = node_affinity_init();
1216 	if (ret)
1217 		goto bail;
1218 
1219 	/* validate max MTU before any devices start */
1220 	if (!valid_opa_max_mtu(hfi1_max_mtu)) {
1221 		pr_err("Invalid max_mtu 0x%x, using 0x%x instead\n",
1222 		       hfi1_max_mtu, HFI1_DEFAULT_MAX_MTU);
1223 		hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU;
1224 	}
1225 	/* valid CUs run from 1-128 in powers of 2 */
1226 	if (hfi1_cu > 128 || !is_power_of_2(hfi1_cu))
1227 		hfi1_cu = 1;
1228 	/* valid credit return threshold is 0-100, variable is unsigned */
1229 	if (user_credit_return_threshold > 100)
1230 		user_credit_return_threshold = 100;
1231 
1232 	compute_krcvqs();
1233 	/*
1234 	 * sanitize receive interrupt count, time must wait until after
1235 	 * the hardware type is known
1236 	 */
1237 	if (rcv_intr_count > RCV_HDR_HEAD_COUNTER_MASK)
1238 		rcv_intr_count = RCV_HDR_HEAD_COUNTER_MASK;
1239 	/* reject invalid combinations */
1240 	if (rcv_intr_count == 0 && rcv_intr_timeout == 0) {
1241 		pr_err("Invalid mode: both receive interrupt count and available timeout are zero - setting interrupt count to 1\n");
1242 		rcv_intr_count = 1;
1243 	}
1244 	if (rcv_intr_count > 1 && rcv_intr_timeout == 0) {
1245 		/*
1246 		 * Avoid indefinite packet delivery by requiring a timeout
1247 		 * if count is > 1.
1248 		 */
1249 		pr_err("Invalid mode: receive interrupt count greater than 1 and available timeout is zero - setting available timeout to 1\n");
1250 		rcv_intr_timeout = 1;
1251 	}
1252 	if (rcv_intr_dynamic && !(rcv_intr_count > 1 && rcv_intr_timeout > 0)) {
1253 		/*
1254 		 * The dynamic algorithm expects a non-zero timeout
1255 		 * and a count > 1.
1256 		 */
1257 		pr_err("Invalid mode: dynamic receive interrupt mitigation with invalid count and timeout - turning dynamic off\n");
1258 		rcv_intr_dynamic = 0;
1259 	}
1260 
1261 	/* sanitize link CRC options */
1262 	link_crc_mask &= SUPPORTED_CRCS;
1263 
1264 	/*
1265 	 * These must be called before the driver is registered with
1266 	 * the PCI subsystem.
1267 	 */
1268 	idr_init(&hfi1_unit_table);
1269 
1270 	hfi1_dbg_init();
1271 	ret = hfi1_wss_init();
1272 	if (ret < 0)
1273 		goto bail_wss;
1274 	ret = pci_register_driver(&hfi1_pci_driver);
1275 	if (ret < 0) {
1276 		pr_err("Unable to register driver: error %d\n", -ret);
1277 		goto bail_dev;
1278 	}
1279 	goto bail; /* all OK */
1280 
1281 bail_dev:
1282 	hfi1_wss_exit();
1283 bail_wss:
1284 	hfi1_dbg_exit();
1285 	idr_destroy(&hfi1_unit_table);
1286 	dev_cleanup();
1287 bail:
1288 	return ret;
1289 }
1290 
1291 module_init(hfi1_mod_init);
1292 
1293 /*
1294  * Do the non-unit driver cleanup, memory free, etc. at unload.
1295  */
1296 static void __exit hfi1_mod_cleanup(void)
1297 {
1298 	pci_unregister_driver(&hfi1_pci_driver);
1299 	node_affinity_destroy();
1300 	hfi1_wss_exit();
1301 	hfi1_dbg_exit();
1302 	hfi1_cpulist_count = 0;
1303 	kfree(hfi1_cpulist);
1304 
1305 	idr_destroy(&hfi1_unit_table);
1306 	dispose_firmware();	/* asymmetric with obtain_firmware() */
1307 	dev_cleanup();
1308 }
1309 
1310 module_exit(hfi1_mod_cleanup);
1311 
1312 /* this can only be called after a successful initialization */
1313 static void cleanup_device_data(struct hfi1_devdata *dd)
1314 {
1315 	int ctxt;
1316 	int pidx;
1317 	struct hfi1_ctxtdata **tmp;
1318 	unsigned long flags;
1319 
1320 	/* users can't do anything more with chip */
1321 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1322 		struct hfi1_pportdata *ppd = &dd->pport[pidx];
1323 		struct cc_state *cc_state;
1324 		int i;
1325 
1326 		if (ppd->statusp)
1327 			*ppd->statusp &= ~HFI1_STATUS_CHIP_PRESENT;
1328 
1329 		for (i = 0; i < OPA_MAX_SLS; i++)
1330 			hrtimer_cancel(&ppd->cca_timer[i].hrtimer);
1331 
1332 		spin_lock(&ppd->cc_state_lock);
1333 		cc_state = get_cc_state_protected(ppd);
1334 		RCU_INIT_POINTER(ppd->cc_state, NULL);
1335 		spin_unlock(&ppd->cc_state_lock);
1336 
1337 		if (cc_state)
1338 			kfree_rcu(cc_state, rcu);
1339 	}
1340 
1341 	free_credit_return(dd);
1342 
1343 	/*
1344 	 * Free any resources still in use (usually just kernel contexts)
1345 	 * at unload; we do for ctxtcnt, because that's what we allocate.
1346 	 * We acquire lock to be really paranoid that rcd isn't being
1347 	 * accessed from some interrupt-related code (that should not happen,
1348 	 * but best to be sure).
1349 	 */
1350 	spin_lock_irqsave(&dd->uctxt_lock, flags);
1351 	tmp = dd->rcd;
1352 	dd->rcd = NULL;
1353 	spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1354 
1355 	if (dd->rcvhdrtail_dummy_kvaddr) {
1356 		dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
1357 				  (void *)dd->rcvhdrtail_dummy_kvaddr,
1358 				  dd->rcvhdrtail_dummy_dma);
1359 		dd->rcvhdrtail_dummy_kvaddr = NULL;
1360 	}
1361 
1362 	for (ctxt = 0; tmp && ctxt < dd->num_rcv_contexts; ctxt++) {
1363 		struct hfi1_ctxtdata *rcd = tmp[ctxt];
1364 
1365 		tmp[ctxt] = NULL; /* debugging paranoia */
1366 		if (rcd) {
1367 			hfi1_clear_tids(rcd);
1368 			hfi1_free_ctxtdata(dd, rcd);
1369 		}
1370 	}
1371 	kfree(tmp);
1372 	free_pio_map(dd);
1373 	/* must follow rcv context free - need to remove rcv's hooks */
1374 	for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++)
1375 		sc_free(dd->send_contexts[ctxt].sc);
1376 	dd->num_send_contexts = 0;
1377 	kfree(dd->send_contexts);
1378 	dd->send_contexts = NULL;
1379 	kfree(dd->hw_to_sw);
1380 	dd->hw_to_sw = NULL;
1381 	kfree(dd->boardname);
1382 	vfree(dd->events);
1383 	vfree(dd->status);
1384 }
1385 
1386 /*
1387  * Clean up on unit shutdown, or error during unit load after
1388  * successful initialization.
1389  */
1390 static void postinit_cleanup(struct hfi1_devdata *dd)
1391 {
1392 	hfi1_start_cleanup(dd);
1393 
1394 	hfi1_pcie_ddcleanup(dd);
1395 	hfi1_pcie_cleanup(dd->pcidev);
1396 
1397 	cleanup_device_data(dd);
1398 
1399 	hfi1_free_devdata(dd);
1400 }
1401 
1402 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1403 {
1404 	int ret = 0, j, pidx, initfail;
1405 	struct hfi1_devdata *dd = ERR_PTR(-EINVAL);
1406 	struct hfi1_pportdata *ppd;
1407 
1408 	/* First, lock the non-writable module parameters */
1409 	HFI1_CAP_LOCK();
1410 
1411 	/* Validate some global module parameters */
1412 	if (rcvhdrcnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) {
1413 		hfi1_early_err(&pdev->dev, "Header queue  count too small\n");
1414 		ret = -EINVAL;
1415 		goto bail;
1416 	}
1417 	if (rcvhdrcnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
1418 		hfi1_early_err(&pdev->dev,
1419 			       "Receive header queue count cannot be greater than %u\n",
1420 			       HFI1_MAX_HDRQ_EGRBUF_CNT);
1421 		ret = -EINVAL;
1422 		goto bail;
1423 	}
1424 	/* use the encoding function as a sanitization check */
1425 	if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) {
1426 		hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n",
1427 			       hfi1_hdrq_entsize);
1428 		ret = -EINVAL;
1429 		goto bail;
1430 	}
1431 
1432 	/* The receive eager buffer size must be set before the receive
1433 	 * contexts are created.
1434 	 *
1435 	 * Set the eager buffer size.  Validate that it falls in a range
1436 	 * allowed by the hardware - all powers of 2 between the min and
1437 	 * max.  The maximum valid MTU is within the eager buffer range
1438 	 * so we do not need to cap the max_mtu by an eager buffer size
1439 	 * setting.
1440 	 */
1441 	if (eager_buffer_size) {
1442 		if (!is_power_of_2(eager_buffer_size))
1443 			eager_buffer_size =
1444 				roundup_pow_of_two(eager_buffer_size);
1445 		eager_buffer_size =
1446 			clamp_val(eager_buffer_size,
1447 				  MIN_EAGER_BUFFER * 8,
1448 				  MAX_EAGER_BUFFER_TOTAL);
1449 		hfi1_early_info(&pdev->dev, "Eager buffer size %u\n",
1450 				eager_buffer_size);
1451 	} else {
1452 		hfi1_early_err(&pdev->dev, "Invalid Eager buffer size of 0\n");
1453 		ret = -EINVAL;
1454 		goto bail;
1455 	}
1456 
1457 	/* restrict value of hfi1_rcvarr_split */
1458 	hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100);
1459 
1460 	ret = hfi1_pcie_init(pdev, ent);
1461 	if (ret)
1462 		goto bail;
1463 
1464 	/*
1465 	 * Do device-specific initialization, function table setup, dd
1466 	 * allocation, etc.
1467 	 */
1468 	switch (ent->device) {
1469 	case PCI_DEVICE_ID_INTEL0:
1470 	case PCI_DEVICE_ID_INTEL1:
1471 		dd = hfi1_init_dd(pdev, ent);
1472 		break;
1473 	default:
1474 		hfi1_early_err(&pdev->dev,
1475 			       "Failing on unknown Intel deviceid 0x%x\n",
1476 			       ent->device);
1477 		ret = -ENODEV;
1478 	}
1479 
1480 	if (IS_ERR(dd))
1481 		ret = PTR_ERR(dd);
1482 	if (ret)
1483 		goto clean_bail; /* error already printed */
1484 
1485 	ret = create_workqueues(dd);
1486 	if (ret)
1487 		goto clean_bail;
1488 
1489 	/* do the generic initialization */
1490 	initfail = hfi1_init(dd, 0);
1491 
1492 	ret = hfi1_register_ib_device(dd);
1493 
1494 	/*
1495 	 * Now ready for use.  this should be cleared whenever we
1496 	 * detect a reset, or initiate one.  If earlier failure,
1497 	 * we still create devices, so diags, etc. can be used
1498 	 * to determine cause of problem.
1499 	 */
1500 	if (!initfail && !ret) {
1501 		dd->flags |= HFI1_INITTED;
1502 		/* create debufs files after init and ib register */
1503 		hfi1_dbg_ibdev_init(&dd->verbs_dev);
1504 	}
1505 
1506 	j = hfi1_device_create(dd);
1507 	if (j)
1508 		dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
1509 
1510 	if (initfail || ret) {
1511 		stop_timers(dd);
1512 		flush_workqueue(ib_wq);
1513 		for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1514 			hfi1_quiet_serdes(dd->pport + pidx);
1515 			ppd = dd->pport + pidx;
1516 			if (ppd->hfi1_wq) {
1517 				destroy_workqueue(ppd->hfi1_wq);
1518 				ppd->hfi1_wq = NULL;
1519 			}
1520 		}
1521 		if (!j)
1522 			hfi1_device_remove(dd);
1523 		if (!ret)
1524 			hfi1_unregister_ib_device(dd);
1525 		postinit_cleanup(dd);
1526 		if (initfail)
1527 			ret = initfail;
1528 		goto bail;	/* everything already cleaned */
1529 	}
1530 
1531 	sdma_start(dd);
1532 
1533 	return 0;
1534 
1535 clean_bail:
1536 	hfi1_pcie_cleanup(pdev);
1537 bail:
1538 	return ret;
1539 }
1540 
1541 static void remove_one(struct pci_dev *pdev)
1542 {
1543 	struct hfi1_devdata *dd = pci_get_drvdata(pdev);
1544 
1545 	/* close debugfs files before ib unregister */
1546 	hfi1_dbg_ibdev_exit(&dd->verbs_dev);
1547 	/* unregister from IB core */
1548 	hfi1_unregister_ib_device(dd);
1549 
1550 	/*
1551 	 * Disable the IB link, disable interrupts on the device,
1552 	 * clear dma engines, etc.
1553 	 */
1554 	shutdown_device(dd);
1555 
1556 	stop_timers(dd);
1557 
1558 	/* wait until all of our (qsfp) queue_work() calls complete */
1559 	flush_workqueue(ib_wq);
1560 
1561 	hfi1_device_remove(dd);
1562 
1563 	postinit_cleanup(dd);
1564 }
1565 
1566 /**
1567  * hfi1_create_rcvhdrq - create a receive header queue
1568  * @dd: the hfi1_ib device
1569  * @rcd: the context data
1570  *
1571  * This must be contiguous memory (from an i/o perspective), and must be
1572  * DMA'able (which means for some systems, it will go through an IOMMU,
1573  * or be forced into a low address range).
1574  */
1575 int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
1576 {
1577 	unsigned amt;
1578 	u64 reg;
1579 
1580 	if (!rcd->rcvhdrq) {
1581 		dma_addr_t dma_hdrqtail;
1582 		gfp_t gfp_flags;
1583 
1584 		/*
1585 		 * rcvhdrqentsize is in DWs, so we have to convert to bytes
1586 		 * (* sizeof(u32)).
1587 		 */
1588 		amt = PAGE_ALIGN(rcd->rcvhdrq_cnt * rcd->rcvhdrqentsize *
1589 				 sizeof(u32));
1590 
1591 		gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ?
1592 			GFP_USER : GFP_KERNEL;
1593 		rcd->rcvhdrq = dma_zalloc_coherent(
1594 			&dd->pcidev->dev, amt, &rcd->rcvhdrq_dma,
1595 			gfp_flags | __GFP_COMP);
1596 
1597 		if (!rcd->rcvhdrq) {
1598 			dd_dev_err(dd,
1599 				   "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n",
1600 				   amt, rcd->ctxt);
1601 			goto bail;
1602 		}
1603 
1604 		if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
1605 			rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent(
1606 				&dd->pcidev->dev, PAGE_SIZE, &dma_hdrqtail,
1607 				gfp_flags);
1608 			if (!rcd->rcvhdrtail_kvaddr)
1609 				goto bail_free;
1610 			rcd->rcvhdrqtailaddr_dma = dma_hdrqtail;
1611 		}
1612 
1613 		rcd->rcvhdrq_size = amt;
1614 	}
1615 	/*
1616 	 * These values are per-context:
1617 	 *	RcvHdrCnt
1618 	 *	RcvHdrEntSize
1619 	 *	RcvHdrSize
1620 	 */
1621 	reg = ((u64)(rcd->rcvhdrq_cnt >> HDRQ_SIZE_SHIFT)
1622 			& RCV_HDR_CNT_CNT_MASK)
1623 		<< RCV_HDR_CNT_CNT_SHIFT;
1624 	write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_CNT, reg);
1625 	reg = (encode_rcv_header_entry_size(rcd->rcvhdrqentsize)
1626 			& RCV_HDR_ENT_SIZE_ENT_SIZE_MASK)
1627 		<< RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT;
1628 	write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_ENT_SIZE, reg);
1629 	reg = (dd->rcvhdrsize & RCV_HDR_SIZE_HDR_SIZE_MASK)
1630 		<< RCV_HDR_SIZE_HDR_SIZE_SHIFT;
1631 	write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_SIZE, reg);
1632 
1633 	/*
1634 	 * Program dummy tail address for every receive context
1635 	 * before enabling any receive context
1636 	 */
1637 	write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_TAIL_ADDR,
1638 			dd->rcvhdrtail_dummy_dma);
1639 
1640 	return 0;
1641 
1642 bail_free:
1643 	dd_dev_err(dd,
1644 		   "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n",
1645 		   rcd->ctxt);
1646 	vfree(rcd->user_event_mask);
1647 	rcd->user_event_mask = NULL;
1648 	dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
1649 			  rcd->rcvhdrq_dma);
1650 	rcd->rcvhdrq = NULL;
1651 bail:
1652 	return -ENOMEM;
1653 }
1654 
1655 /**
1656  * allocate eager buffers, both kernel and user contexts.
1657  * @rcd: the context we are setting up.
1658  *
1659  * Allocate the eager TID buffers and program them into hip.
1660  * They are no longer completely contiguous, we do multiple allocation
1661  * calls.  Otherwise we get the OOM code involved, by asking for too
1662  * much per call, with disastrous results on some kernels.
1663  */
1664 int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
1665 {
1666 	struct hfi1_devdata *dd = rcd->dd;
1667 	u32 max_entries, egrtop, alloced_bytes = 0, idx = 0;
1668 	gfp_t gfp_flags;
1669 	u16 order;
1670 	int ret = 0;
1671 	u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu);
1672 
1673 	/*
1674 	 * GFP_USER, but without GFP_FS, so buffer cache can be
1675 	 * coalesced (we hope); otherwise, even at order 4,
1676 	 * heavy filesystem activity makes these fail, and we can
1677 	 * use compound pages.
1678 	 */
1679 	gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
1680 
1681 	/*
1682 	 * The minimum size of the eager buffers is a groups of MTU-sized
1683 	 * buffers.
1684 	 * The global eager_buffer_size parameter is checked against the
1685 	 * theoretical lower limit of the value. Here, we check against the
1686 	 * MTU.
1687 	 */
1688 	if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size))
1689 		rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size;
1690 	/*
1691 	 * If using one-pkt-per-egr-buffer, lower the eager buffer
1692 	 * size to the max MTU (page-aligned).
1693 	 */
1694 	if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR))
1695 		rcd->egrbufs.rcvtid_size = round_mtu;
1696 
1697 	/*
1698 	 * Eager buffers sizes of 1MB or less require smaller TID sizes
1699 	 * to satisfy the "multiple of 8 RcvArray entries" requirement.
1700 	 */
1701 	if (rcd->egrbufs.size <= (1 << 20))
1702 		rcd->egrbufs.rcvtid_size = max((unsigned long)round_mtu,
1703 			rounddown_pow_of_two(rcd->egrbufs.size / 8));
1704 
1705 	while (alloced_bytes < rcd->egrbufs.size &&
1706 	       rcd->egrbufs.alloced < rcd->egrbufs.count) {
1707 		rcd->egrbufs.buffers[idx].addr =
1708 			dma_zalloc_coherent(&dd->pcidev->dev,
1709 					    rcd->egrbufs.rcvtid_size,
1710 					    &rcd->egrbufs.buffers[idx].dma,
1711 					    gfp_flags);
1712 		if (rcd->egrbufs.buffers[idx].addr) {
1713 			rcd->egrbufs.buffers[idx].len =
1714 				rcd->egrbufs.rcvtid_size;
1715 			rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].addr =
1716 				rcd->egrbufs.buffers[idx].addr;
1717 			rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].dma =
1718 				rcd->egrbufs.buffers[idx].dma;
1719 			rcd->egrbufs.alloced++;
1720 			alloced_bytes += rcd->egrbufs.rcvtid_size;
1721 			idx++;
1722 		} else {
1723 			u32 new_size, i, j;
1724 			u64 offset = 0;
1725 
1726 			/*
1727 			 * Fail the eager buffer allocation if:
1728 			 *   - we are already using the lowest acceptable size
1729 			 *   - we are using one-pkt-per-egr-buffer (this implies
1730 			 *     that we are accepting only one size)
1731 			 */
1732 			if (rcd->egrbufs.rcvtid_size == round_mtu ||
1733 			    !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) {
1734 				dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n",
1735 					   rcd->ctxt);
1736 				goto bail_rcvegrbuf_phys;
1737 			}
1738 
1739 			new_size = rcd->egrbufs.rcvtid_size / 2;
1740 
1741 			/*
1742 			 * If the first attempt to allocate memory failed, don't
1743 			 * fail everything but continue with the next lower
1744 			 * size.
1745 			 */
1746 			if (idx == 0) {
1747 				rcd->egrbufs.rcvtid_size = new_size;
1748 				continue;
1749 			}
1750 
1751 			/*
1752 			 * Re-partition already allocated buffers to a smaller
1753 			 * size.
1754 			 */
1755 			rcd->egrbufs.alloced = 0;
1756 			for (i = 0, j = 0, offset = 0; j < idx; i++) {
1757 				if (i >= rcd->egrbufs.count)
1758 					break;
1759 				rcd->egrbufs.rcvtids[i].dma =
1760 					rcd->egrbufs.buffers[j].dma + offset;
1761 				rcd->egrbufs.rcvtids[i].addr =
1762 					rcd->egrbufs.buffers[j].addr + offset;
1763 				rcd->egrbufs.alloced++;
1764 				if ((rcd->egrbufs.buffers[j].dma + offset +
1765 				     new_size) ==
1766 				    (rcd->egrbufs.buffers[j].dma +
1767 				     rcd->egrbufs.buffers[j].len)) {
1768 					j++;
1769 					offset = 0;
1770 				} else {
1771 					offset += new_size;
1772 				}
1773 			}
1774 			rcd->egrbufs.rcvtid_size = new_size;
1775 		}
1776 	}
1777 	rcd->egrbufs.numbufs = idx;
1778 	rcd->egrbufs.size = alloced_bytes;
1779 
1780 	hfi1_cdbg(PROC,
1781 		  "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %zuKB\n",
1782 		  rcd->ctxt, rcd->egrbufs.alloced,
1783 		  rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024);
1784 
1785 	/*
1786 	 * Set the contexts rcv array head update threshold to the closest
1787 	 * power of 2 (so we can use a mask instead of modulo) below half
1788 	 * the allocated entries.
1789 	 */
1790 	rcd->egrbufs.threshold =
1791 		rounddown_pow_of_two(rcd->egrbufs.alloced / 2);
1792 	/*
1793 	 * Compute the expected RcvArray entry base. This is done after
1794 	 * allocating the eager buffers in order to maximize the
1795 	 * expected RcvArray entries for the context.
1796 	 */
1797 	max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size;
1798 	egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size);
1799 	rcd->expected_count = max_entries - egrtop;
1800 	if (rcd->expected_count > MAX_TID_PAIR_ENTRIES * 2)
1801 		rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2;
1802 
1803 	rcd->expected_base = rcd->eager_base + egrtop;
1804 	hfi1_cdbg(PROC, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u\n",
1805 		  rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count,
1806 		  rcd->eager_base, rcd->expected_base);
1807 
1808 	if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) {
1809 		hfi1_cdbg(PROC,
1810 			  "ctxt%u: current Eager buffer size is invalid %u\n",
1811 			  rcd->ctxt, rcd->egrbufs.rcvtid_size);
1812 		ret = -EINVAL;
1813 		goto bail;
1814 	}
1815 
1816 	for (idx = 0; idx < rcd->egrbufs.alloced; idx++) {
1817 		hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER,
1818 			     rcd->egrbufs.rcvtids[idx].dma, order);
1819 		cond_resched();
1820 	}
1821 	goto bail;
1822 
1823 bail_rcvegrbuf_phys:
1824 	for (idx = 0; idx < rcd->egrbufs.alloced &&
1825 	     rcd->egrbufs.buffers[idx].addr;
1826 	     idx++) {
1827 		dma_free_coherent(&dd->pcidev->dev,
1828 				  rcd->egrbufs.buffers[idx].len,
1829 				  rcd->egrbufs.buffers[idx].addr,
1830 				  rcd->egrbufs.buffers[idx].dma);
1831 		rcd->egrbufs.buffers[idx].addr = NULL;
1832 		rcd->egrbufs.buffers[idx].dma = 0;
1833 		rcd->egrbufs.buffers[idx].len = 0;
1834 	}
1835 bail:
1836 	return ret;
1837 }
1838