1 /*
2  * Copyright (c) 2012 Intel Corporation.  All rights reserved.
3  * Copyright (c) 2008 - 2012 QLogic Corporation. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 /*
35  * This file contains all of the code that is specific to the
36  * InfiniPath 7322 chip
37  */
38 
39 #include <linux/interrupt.h>
40 #include <linux/pci.h>
41 #include <linux/delay.h>
42 #include <linux/io.h>
43 #include <linux/jiffies.h>
44 #include <linux/module.h>
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_smi.h>
47 #ifdef CONFIG_INFINIBAND_QIB_DCA
48 #include <linux/dca.h>
49 #endif
50 
51 #include "qib.h"
52 #include "qib_7322_regs.h"
53 #include "qib_qsfp.h"
54 
55 #include "qib_mad.h"
56 #include "qib_verbs.h"
57 
58 #undef pr_fmt
59 #define pr_fmt(fmt) QIB_DRV_NAME " " fmt
60 
61 static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
62 static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
63 static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
64 static irqreturn_t qib_7322intr(int irq, void *data);
65 static irqreturn_t qib_7322bufavail(int irq, void *data);
66 static irqreturn_t sdma_intr(int irq, void *data);
67 static irqreturn_t sdma_idle_intr(int irq, void *data);
68 static irqreturn_t sdma_progress_intr(int irq, void *data);
69 static irqreturn_t sdma_cleanup_intr(int irq, void *data);
70 static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32,
71 				  struct qib_ctxtdata *rcd);
72 static u8 qib_7322_phys_portstate(u64);
73 static u32 qib_7322_iblink_state(u64);
74 static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
75 				   u16 linitcmd);
76 static void force_h1(struct qib_pportdata *);
77 static void adj_tx_serdes(struct qib_pportdata *);
78 static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8);
79 static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
80 
81 static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
82 static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
83 static void serdes_7322_los_enable(struct qib_pportdata *, int);
84 static int serdes_7322_init_old(struct qib_pportdata *);
85 static int serdes_7322_init_new(struct qib_pportdata *);
86 static void dump_sdma_7322_state(struct qib_pportdata *);
87 
88 #define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
89 
90 /* LE2 serdes values for different cases */
91 #define LE2_DEFAULT 5
92 #define LE2_5m 4
93 #define LE2_QME 0
94 
95 /* Below is special-purpose, so only really works for the IB SerDes blocks. */
96 #define IBSD(hw_pidx) (hw_pidx + 2)
97 
98 /* these are variables for documentation and experimentation purposes */
99 static const unsigned rcv_int_timeout = 375;
100 static const unsigned rcv_int_count = 16;
101 static const unsigned sdma_idle_cnt = 64;
102 
103 /* Time to stop altering Rx Equalization parameters, after link up. */
104 #define RXEQ_DISABLE_MSECS 2500
105 
106 /*
107  * Number of VLs we are configured to use (to allow for more
108  * credits per vl, etc.)
109  */
110 ushort qib_num_cfg_vls = 2;
111 module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO);
112 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
113 
114 static ushort qib_chase = 1;
115 module_param_named(chase, qib_chase, ushort, S_IRUGO);
116 MODULE_PARM_DESC(chase, "Enable state chase handling");
117 
118 static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
119 module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
120 MODULE_PARM_DESC(long_attenuation, \
121 		 "attenuation cutoff (dB) for long copper cable setup");
122 
123 static ushort qib_singleport;
124 module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
125 MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
126 
127 static ushort qib_krcvq01_no_msi;
128 module_param_named(krcvq01_no_msi, qib_krcvq01_no_msi, ushort, S_IRUGO);
129 MODULE_PARM_DESC(krcvq01_no_msi, "No MSI for kctx < 2");
130 
131 /*
132  * Receive header queue sizes
133  */
134 static unsigned qib_rcvhdrcnt;
135 module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
136 MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
137 
138 static unsigned qib_rcvhdrsize;
139 module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
140 MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
141 
142 static unsigned qib_rcvhdrentsize;
143 module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
144 MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
145 
146 #define MAX_ATTEN_LEN 64 /* plenty for any real system */
147 /* for read back, default index is ~5m copper cable */
148 static char txselect_list[MAX_ATTEN_LEN] = "10";
149 static struct kparam_string kp_txselect = {
150 	.string = txselect_list,
151 	.maxlen = MAX_ATTEN_LEN
152 };
153 static int  setup_txselect(const char *, struct kernel_param *);
154 module_param_call(txselect, setup_txselect, param_get_string,
155 		  &kp_txselect, S_IWUSR | S_IRUGO);
156 MODULE_PARM_DESC(txselect, \
157 		 "Tx serdes indices (for no QSFP or invalid QSFP data)");
158 
159 #define BOARD_QME7342 5
160 #define BOARD_QMH7342 6
161 #define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
162 		    BOARD_QMH7342)
163 #define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
164 		    BOARD_QME7342)
165 
166 #define KREG_IDX(regname)     (QIB_7322_##regname##_OFFS / sizeof(u64))
167 
168 #define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
169 
170 #define MASK_ACROSS(lsb, msb) \
171 	(((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
172 
173 #define SYM_RMASK(regname, fldname) ((u64)              \
174 	QIB_7322_##regname##_##fldname##_RMASK)
175 
176 #define SYM_MASK(regname, fldname) ((u64)               \
177 	QIB_7322_##regname##_##fldname##_RMASK <<       \
178 	 QIB_7322_##regname##_##fldname##_LSB)
179 
180 #define SYM_FIELD(value, regname, fldname) ((u64)	\
181 	(((value) >> SYM_LSB(regname, fldname)) &	\
182 	 SYM_RMASK(regname, fldname)))
183 
184 /* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */
185 #define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
186 	(((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
187 
188 #define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
189 #define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
190 #define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
191 #define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
192 #define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
193 /* Below because most, but not all, fields of IntMask have that full suffix */
194 #define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
195 
196 
197 #define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
198 
199 /*
200  * the size bits give us 2^N, in KB units.  0 marks as invalid,
201  * and 7 is reserved.  We currently use only 2KB and 4KB
202  */
203 #define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
204 #define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */
205 #define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */
206 #define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
207 
208 #define SendIBSLIDAssignMask \
209 	QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
210 #define SendIBSLMCMask \
211 	QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
212 
213 #define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
214 #define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
215 #define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
216 #define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
217 #define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
218 #define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
219 
220 #define _QIB_GPIO_SDA_NUM 1
221 #define _QIB_GPIO_SCL_NUM 0
222 #define QIB_EEPROM_WEN_NUM 14
223 #define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */
224 
225 /* HW counter clock is at 4nsec */
226 #define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
227 
228 /* full speed IB port 1 only */
229 #define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
230 #define PORT_SPD_CAP_SHIFT 3
231 
232 /* full speed featuremask, both ports */
233 #define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
234 
235 /*
236  * This file contains almost all the chip-specific register information and
237  * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip.
238  */
239 
240 /* Use defines to tie machine-generated names to lower-case names */
241 #define kr_contextcnt KREG_IDX(ContextCnt)
242 #define kr_control KREG_IDX(Control)
243 #define kr_counterregbase KREG_IDX(CntrRegBase)
244 #define kr_errclear KREG_IDX(ErrClear)
245 #define kr_errmask KREG_IDX(ErrMask)
246 #define kr_errstatus KREG_IDX(ErrStatus)
247 #define kr_extctrl KREG_IDX(EXTCtrl)
248 #define kr_extstatus KREG_IDX(EXTStatus)
249 #define kr_gpio_clear KREG_IDX(GPIOClear)
250 #define kr_gpio_mask KREG_IDX(GPIOMask)
251 #define kr_gpio_out KREG_IDX(GPIOOut)
252 #define kr_gpio_status KREG_IDX(GPIOStatus)
253 #define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
254 #define kr_debugportval KREG_IDX(DebugPortValueReg)
255 #define kr_fmask KREG_IDX(feature_mask)
256 #define kr_act_fmask KREG_IDX(active_feature_mask)
257 #define kr_hwerrclear KREG_IDX(HwErrClear)
258 #define kr_hwerrmask KREG_IDX(HwErrMask)
259 #define kr_hwerrstatus KREG_IDX(HwErrStatus)
260 #define kr_intclear KREG_IDX(IntClear)
261 #define kr_intmask KREG_IDX(IntMask)
262 #define kr_intredirect KREG_IDX(IntRedirect0)
263 #define kr_intstatus KREG_IDX(IntStatus)
264 #define kr_pagealign KREG_IDX(PageAlign)
265 #define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
266 #define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */
267 #define kr_rcvegrbase KREG_IDX(RcvEgrBase)
268 #define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
269 #define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
270 #define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
271 #define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
272 #define kr_rcvtidbase KREG_IDX(RcvTIDBase)
273 #define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
274 #define kr_revision KREG_IDX(Revision)
275 #define kr_scratch KREG_IDX(Scratch)
276 #define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */
277 #define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */
278 #define kr_sendctrl KREG_IDX(SendCtrl)
279 #define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */
280 #define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */
281 #define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
282 #define kr_sendpiobufbase KREG_IDX(SendBufBase)
283 #define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
284 #define kr_sendpiosize KREG_IDX(SendBufSize)
285 #define kr_sendregbase KREG_IDX(SendRegBase)
286 #define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
287 #define kr_userregbase KREG_IDX(UserRegBase)
288 #define kr_intgranted KREG_IDX(Int_Granted)
289 #define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
290 #define kr_intblocked KREG_IDX(IntBlocked)
291 #define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
292 
293 /*
294  * per-port kernel registers.  Access only with qib_read_kreg_port()
295  * or qib_write_kreg_port()
296  */
297 #define krp_errclear KREG_IBPORT_IDX(ErrClear)
298 #define krp_errmask KREG_IBPORT_IDX(ErrMask)
299 #define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
300 #define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
301 #define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
302 #define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
303 #define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
304 #define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
305 #define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
306 #define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
307 #define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
308 #define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
309 #define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
310 #define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
311 #define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
312 #define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
313 #define krp_psinterval KREG_IBPORT_IDX(PSInterval)
314 #define krp_psstart KREG_IBPORT_IDX(PSStart)
315 #define krp_psstat KREG_IBPORT_IDX(PSStat)
316 #define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
317 #define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
318 #define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
319 #define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
320 #define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
321 #define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
322 #define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
323 #define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
324 #define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
325 #define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
326 #define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
327 #define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
328 #define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
329 #define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
330 #define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
331 #define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
332 #define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
333 #define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
334 #define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
335 #define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
336 #define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
337 #define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
338 #define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
339 #define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
340 #define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
341 #define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
342 #define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
343 #define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
344 #define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
345 #define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
346 #define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
347 
348 /*
349  * Per-context kernel registers.  Access only with qib_read_kreg_ctxt()
350  * or qib_write_kreg_ctxt()
351  */
352 #define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
353 #define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
354 
355 /*
356  * TID Flow table, per context.  Reduces
357  * number of hdrq updates to one per flow (or on errors).
358  * context 0 and 1 share same memory, but have distinct
359  * addresses.  Since for now, we never use expected sends
360  * on kernel contexts, we don't worry about that (we initialize
361  * those entries for ctxt 0/1 on driver load twice, for example).
362  */
363 #define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */
364 #define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
365 
366 /* these are the error bits in the tid flows, and are W1C */
367 #define TIDFLOW_ERRBITS  ( \
368 	(SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
369 	SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
370 	(SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
371 	SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
372 
373 /* Most (not all) Counters are per-IBport.
374  * Requires LBIntCnt is at offset 0 in the group
375  */
376 #define CREG_IDX(regname) \
377 ((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
378 
379 #define crp_badformat CREG_IDX(RxVersionErrCnt)
380 #define crp_err_rlen CREG_IDX(RxLenErrCnt)
381 #define crp_erricrc CREG_IDX(RxICRCErrCnt)
382 #define crp_errlink CREG_IDX(RxLinkMalformCnt)
383 #define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
384 #define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
385 #define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
386 #define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
387 #define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
388 #define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
389 #define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
390 #define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
391 #define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
392 #define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
393 #define crp_pktrcv CREG_IDX(RxDataPktCnt)
394 #define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
395 #define crp_pktsend CREG_IDX(TxDataPktCnt)
396 #define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
397 #define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
398 #define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
399 #define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
400 #define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
401 #define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
402 #define crp_rcvebp CREG_IDX(RxEBPCnt)
403 #define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
404 #define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
405 #define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
406 #define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
407 #define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
408 #define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
409 #define crp_rxvlerr CREG_IDX(RxVlErrCnt)
410 #define crp_sendstall CREG_IDX(TxFlowStallCnt)
411 #define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
412 #define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
413 #define crp_txlenerr CREG_IDX(TxLenErrCnt)
414 #define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
415 #define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
416 #define crp_txunderrun CREG_IDX(TxUnderrunCnt)
417 #define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
418 #define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
419 #define crp_wordrcv CREG_IDX(RxDwordCnt)
420 #define crp_wordsend CREG_IDX(TxDwordCnt)
421 #define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
422 
423 /* these are the (few) counters that are not port-specific */
424 #define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
425 			QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
426 #define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
427 #define cr_lbint CREG_DEVIDX(LBIntCnt)
428 #define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
429 #define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
430 #define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
431 #define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
432 #define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
433 
434 /* no chip register for # of IB ports supported, so define */
435 #define NUM_IB_PORTS 2
436 
437 /* 1 VL15 buffer per hardware IB port, no register for this, so define */
438 #define NUM_VL15_BUFS NUM_IB_PORTS
439 
440 /*
441  * context 0 and 1 are special, and there is no chip register that
442  * defines this value, so we have to define it here.
443  * These are all allocated to either 0 or 1 for single port
444  * hardware configuration, otherwise each gets half
445  */
446 #define KCTXT0_EGRCNT 2048
447 
448 /* values for vl and port fields in PBC, 7322-specific */
449 #define PBC_PORT_SEL_LSB 26
450 #define PBC_PORT_SEL_RMASK 1
451 #define PBC_VL_NUM_LSB 27
452 #define PBC_VL_NUM_RMASK 7
453 #define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
454 #define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
455 
456 static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
457 	[IB_RATE_2_5_GBPS] = 16,
458 	[IB_RATE_5_GBPS] = 8,
459 	[IB_RATE_10_GBPS] = 4,
460 	[IB_RATE_20_GBPS] = 2,
461 	[IB_RATE_30_GBPS] = 2,
462 	[IB_RATE_40_GBPS] = 1
463 };
464 
465 #define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
466 #define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
467 
468 /* link training states, from IBC */
469 #define IB_7322_LT_STATE_DISABLED        0x00
470 #define IB_7322_LT_STATE_LINKUP          0x01
471 #define IB_7322_LT_STATE_POLLACTIVE      0x02
472 #define IB_7322_LT_STATE_POLLQUIET       0x03
473 #define IB_7322_LT_STATE_SLEEPDELAY      0x04
474 #define IB_7322_LT_STATE_SLEEPQUIET      0x05
475 #define IB_7322_LT_STATE_CFGDEBOUNCE     0x08
476 #define IB_7322_LT_STATE_CFGRCVFCFG      0x09
477 #define IB_7322_LT_STATE_CFGWAITRMT      0x0a
478 #define IB_7322_LT_STATE_CFGIDLE         0x0b
479 #define IB_7322_LT_STATE_RECOVERRETRAIN  0x0c
480 #define IB_7322_LT_STATE_TXREVLANES      0x0d
481 #define IB_7322_LT_STATE_RECOVERWAITRMT  0x0e
482 #define IB_7322_LT_STATE_RECOVERIDLE     0x0f
483 #define IB_7322_LT_STATE_CFGENH          0x10
484 #define IB_7322_LT_STATE_CFGTEST         0x11
485 #define IB_7322_LT_STATE_CFGWAITRMTTEST  0x12
486 #define IB_7322_LT_STATE_CFGWAITENH      0x13
487 
488 /* link state machine states from IBC */
489 #define IB_7322_L_STATE_DOWN             0x0
490 #define IB_7322_L_STATE_INIT             0x1
491 #define IB_7322_L_STATE_ARM              0x2
492 #define IB_7322_L_STATE_ACTIVE           0x3
493 #define IB_7322_L_STATE_ACT_DEFER        0x4
494 
495 static const u8 qib_7322_physportstate[0x20] = {
496 	[IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
497 	[IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
498 	[IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
499 	[IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
500 	[IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
501 	[IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
502 	[IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN,
503 	[IB_7322_LT_STATE_CFGRCVFCFG] =
504 		IB_PHYSPORTSTATE_CFG_TRAIN,
505 	[IB_7322_LT_STATE_CFGWAITRMT] =
506 		IB_PHYSPORTSTATE_CFG_TRAIN,
507 	[IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE,
508 	[IB_7322_LT_STATE_RECOVERRETRAIN] =
509 		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
510 	[IB_7322_LT_STATE_RECOVERWAITRMT] =
511 		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
512 	[IB_7322_LT_STATE_RECOVERIDLE] =
513 		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
514 	[IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
515 	[IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
516 	[IB_7322_LT_STATE_CFGWAITRMTTEST] =
517 		IB_PHYSPORTSTATE_CFG_TRAIN,
518 	[IB_7322_LT_STATE_CFGWAITENH] =
519 		IB_PHYSPORTSTATE_CFG_WAIT_ENH,
520 	[0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
521 	[0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
522 	[0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
523 	[0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
524 };
525 
526 #ifdef CONFIG_INFINIBAND_QIB_DCA
527 struct qib_irq_notify {
528 	int rcv;
529 	void *arg;
530 	struct irq_affinity_notify notify;
531 };
532 #endif
533 
534 struct qib_chip_specific {
535 	u64 __iomem *cregbase;
536 	u64 *cntrs;
537 	spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
538 	spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
539 	u64 main_int_mask;      /* clear bits which have dedicated handlers */
540 	u64 int_enable_mask;  /* for per port interrupts in single port mode */
541 	u64 errormask;
542 	u64 hwerrmask;
543 	u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
544 	u64 gpio_mask; /* shadow the gpio mask register */
545 	u64 extctrl; /* shadow the gpio output enable, etc... */
546 	u32 ncntrs;
547 	u32 nportcntrs;
548 	u32 cntrnamelen;
549 	u32 portcntrnamelen;
550 	u32 numctxts;
551 	u32 rcvegrcnt;
552 	u32 updthresh; /* current AvailUpdThld */
553 	u32 updthresh_dflt; /* default AvailUpdThld */
554 	u32 r1;
555 	int irq;
556 	u32 num_msix_entries;
557 	u32 sdmabufcnt;
558 	u32 lastbuf_for_pio;
559 	u32 stay_in_freeze;
560 	u32 recovery_ports_initted;
561 #ifdef CONFIG_INFINIBAND_QIB_DCA
562 	u32 dca_ctrl;
563 	int rhdr_cpu[18];
564 	int sdma_cpu[2];
565 	u64 dca_rcvhdr_ctrl[5]; /* B, C, D, E, F */
566 #endif
567 	struct qib_msix_entry *msix_entries;
568 	unsigned long *sendchkenable;
569 	unsigned long *sendgrhchk;
570 	unsigned long *sendibchk;
571 	u32 rcvavail_timeout[18];
572 	char emsgbuf[128]; /* for device error interrupt msg buffer */
573 };
574 
575 /* Table of entries in "human readable" form Tx Emphasis. */
576 struct txdds_ent {
577 	u8 amp;
578 	u8 pre;
579 	u8 main;
580 	u8 post;
581 };
582 
583 struct vendor_txdds_ent {
584 	u8 oui[QSFP_VOUI_LEN];
585 	u8 *partnum;
586 	struct txdds_ent sdr;
587 	struct txdds_ent ddr;
588 	struct txdds_ent qdr;
589 };
590 
591 static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
592 
593 #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
594 #define TXDDS_EXTRA_SZ 18 /* number of extra tx settings entries */
595 #define TXDDS_MFG_SZ 2    /* number of mfg tx settings entries */
596 #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
597 
598 #define H1_FORCE_VAL 8
599 #define H1_FORCE_QME 1 /*  may be overridden via setup_txselect() */
600 #define H1_FORCE_QMH 7 /*  may be overridden via setup_txselect() */
601 
602 /* The static and dynamic registers are paired, and the pairs indexed by spd */
603 #define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
604 	+ ((spd) * 2))
605 
606 #define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */
607 #define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */
608 #define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */
609 #define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
610 #define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
611 
612 struct qib_chippport_specific {
613 	u64 __iomem *kpregbase;
614 	u64 __iomem *cpregbase;
615 	u64 *portcntrs;
616 	struct qib_pportdata *ppd;
617 	wait_queue_head_t autoneg_wait;
618 	struct delayed_work autoneg_work;
619 	struct delayed_work ipg_work;
620 	struct timer_list chase_timer;
621 	/*
622 	 * these 5 fields are used to establish deltas for IB symbol
623 	 * errors and linkrecovery errors.  They can be reported on
624 	 * some chips during link negotiation prior to INIT, and with
625 	 * DDR when faking DDR negotiations with non-IBTA switches.
626 	 * The chip counters are adjusted at driver unload if there is
627 	 * a non-zero delta.
628 	 */
629 	u64 ibdeltainprog;
630 	u64 ibsymdelta;
631 	u64 ibsymsnap;
632 	u64 iblnkerrdelta;
633 	u64 iblnkerrsnap;
634 	u64 iblnkdownsnap;
635 	u64 iblnkdowndelta;
636 	u64 ibmalfdelta;
637 	u64 ibmalfsnap;
638 	u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
639 	u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
640 	unsigned long qdr_dfe_time;
641 	unsigned long chase_end;
642 	u32 autoneg_tries;
643 	u32 recovery_init;
644 	u32 qdr_dfe_on;
645 	u32 qdr_reforce;
646 	/*
647 	 * Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
648 	 * entry zero is unused, to simplify indexing
649 	 */
650 	u8 h1_val;
651 	u8 no_eep;  /* txselect table index to use if no qsfp info */
652 	u8 ipg_tries;
653 	u8 ibmalfusesnap;
654 	struct qib_qsfp_data qsfp_data;
655 	char epmsgbuf[192]; /* for port error interrupt msg buffer */
656 	char sdmamsgbuf[192]; /* for per-port sdma error messages */
657 };
658 
659 static struct {
660 	const char *name;
661 	irq_handler_t handler;
662 	int lsb;
663 	int port; /* 0 if not port-specific, else port # */
664 	int dca;
665 } irq_table[] = {
666 	{ "", qib_7322intr, -1, 0, 0 },
667 	{ " (buf avail)", qib_7322bufavail,
668 		SYM_LSB(IntStatus, SendBufAvail), 0, 0},
669 	{ " (sdma 0)", sdma_intr,
670 		SYM_LSB(IntStatus, SDmaInt_0), 1, 1 },
671 	{ " (sdma 1)", sdma_intr,
672 		SYM_LSB(IntStatus, SDmaInt_1), 2, 1 },
673 	{ " (sdmaI 0)", sdma_idle_intr,
674 		SYM_LSB(IntStatus, SDmaIdleInt_0), 1, 1},
675 	{ " (sdmaI 1)", sdma_idle_intr,
676 		SYM_LSB(IntStatus, SDmaIdleInt_1), 2, 1},
677 	{ " (sdmaP 0)", sdma_progress_intr,
678 		SYM_LSB(IntStatus, SDmaProgressInt_0), 1, 1 },
679 	{ " (sdmaP 1)", sdma_progress_intr,
680 		SYM_LSB(IntStatus, SDmaProgressInt_1), 2, 1 },
681 	{ " (sdmaC 0)", sdma_cleanup_intr,
682 		SYM_LSB(IntStatus, SDmaCleanupDone_0), 1, 0 },
683 	{ " (sdmaC 1)", sdma_cleanup_intr,
684 		SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 , 0},
685 };
686 
687 #ifdef CONFIG_INFINIBAND_QIB_DCA
688 
689 static const struct dca_reg_map {
690 	int     shadow_inx;
691 	int     lsb;
692 	u64     mask;
693 	u16     regno;
694 } dca_rcvhdr_reg_map[] = {
695 	{ 0, SYM_LSB(DCACtrlB, RcvHdrq0DCAOPH),
696 	   ~SYM_MASK(DCACtrlB, RcvHdrq0DCAOPH) , KREG_IDX(DCACtrlB) },
697 	{ 0, SYM_LSB(DCACtrlB, RcvHdrq1DCAOPH),
698 	   ~SYM_MASK(DCACtrlB, RcvHdrq1DCAOPH) , KREG_IDX(DCACtrlB) },
699 	{ 0, SYM_LSB(DCACtrlB, RcvHdrq2DCAOPH),
700 	   ~SYM_MASK(DCACtrlB, RcvHdrq2DCAOPH) , KREG_IDX(DCACtrlB) },
701 	{ 0, SYM_LSB(DCACtrlB, RcvHdrq3DCAOPH),
702 	   ~SYM_MASK(DCACtrlB, RcvHdrq3DCAOPH) , KREG_IDX(DCACtrlB) },
703 	{ 1, SYM_LSB(DCACtrlC, RcvHdrq4DCAOPH),
704 	   ~SYM_MASK(DCACtrlC, RcvHdrq4DCAOPH) , KREG_IDX(DCACtrlC) },
705 	{ 1, SYM_LSB(DCACtrlC, RcvHdrq5DCAOPH),
706 	   ~SYM_MASK(DCACtrlC, RcvHdrq5DCAOPH) , KREG_IDX(DCACtrlC) },
707 	{ 1, SYM_LSB(DCACtrlC, RcvHdrq6DCAOPH),
708 	   ~SYM_MASK(DCACtrlC, RcvHdrq6DCAOPH) , KREG_IDX(DCACtrlC) },
709 	{ 1, SYM_LSB(DCACtrlC, RcvHdrq7DCAOPH),
710 	   ~SYM_MASK(DCACtrlC, RcvHdrq7DCAOPH) , KREG_IDX(DCACtrlC) },
711 	{ 2, SYM_LSB(DCACtrlD, RcvHdrq8DCAOPH),
712 	   ~SYM_MASK(DCACtrlD, RcvHdrq8DCAOPH) , KREG_IDX(DCACtrlD) },
713 	{ 2, SYM_LSB(DCACtrlD, RcvHdrq9DCAOPH),
714 	   ~SYM_MASK(DCACtrlD, RcvHdrq9DCAOPH) , KREG_IDX(DCACtrlD) },
715 	{ 2, SYM_LSB(DCACtrlD, RcvHdrq10DCAOPH),
716 	   ~SYM_MASK(DCACtrlD, RcvHdrq10DCAOPH) , KREG_IDX(DCACtrlD) },
717 	{ 2, SYM_LSB(DCACtrlD, RcvHdrq11DCAOPH),
718 	   ~SYM_MASK(DCACtrlD, RcvHdrq11DCAOPH) , KREG_IDX(DCACtrlD) },
719 	{ 3, SYM_LSB(DCACtrlE, RcvHdrq12DCAOPH),
720 	   ~SYM_MASK(DCACtrlE, RcvHdrq12DCAOPH) , KREG_IDX(DCACtrlE) },
721 	{ 3, SYM_LSB(DCACtrlE, RcvHdrq13DCAOPH),
722 	   ~SYM_MASK(DCACtrlE, RcvHdrq13DCAOPH) , KREG_IDX(DCACtrlE) },
723 	{ 3, SYM_LSB(DCACtrlE, RcvHdrq14DCAOPH),
724 	   ~SYM_MASK(DCACtrlE, RcvHdrq14DCAOPH) , KREG_IDX(DCACtrlE) },
725 	{ 3, SYM_LSB(DCACtrlE, RcvHdrq15DCAOPH),
726 	   ~SYM_MASK(DCACtrlE, RcvHdrq15DCAOPH) , KREG_IDX(DCACtrlE) },
727 	{ 4, SYM_LSB(DCACtrlF, RcvHdrq16DCAOPH),
728 	   ~SYM_MASK(DCACtrlF, RcvHdrq16DCAOPH) , KREG_IDX(DCACtrlF) },
729 	{ 4, SYM_LSB(DCACtrlF, RcvHdrq17DCAOPH),
730 	   ~SYM_MASK(DCACtrlF, RcvHdrq17DCAOPH) , KREG_IDX(DCACtrlF) },
731 };
732 #endif
733 
734 /* ibcctrl bits */
735 #define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
736 /* cycle through TS1/TS2 till OK */
737 #define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
738 /* wait for TS1, then go on */
739 #define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
740 #define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
741 
742 #define QLOGIC_IB_IBCC_LINKCMD_DOWN 1           /* move to 0x11 */
743 #define QLOGIC_IB_IBCC_LINKCMD_ARMED 2          /* move to 0x21 */
744 #define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
745 
746 #define BLOB_7322_IBCHG 0x101
747 
748 static inline void qib_write_kreg(const struct qib_devdata *dd,
749 				  const u32 regno, u64 value);
750 static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32);
751 static void write_7322_initregs(struct qib_devdata *);
752 static void write_7322_init_portregs(struct qib_pportdata *);
753 static void setup_7322_link_recovery(struct qib_pportdata *, u32);
754 static void check_7322_rxe_status(struct qib_pportdata *);
755 static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
756 #ifdef CONFIG_INFINIBAND_QIB_DCA
757 static void qib_setup_dca(struct qib_devdata *dd);
758 static void setup_dca_notifier(struct qib_devdata *dd,
759 			       struct qib_msix_entry *m);
760 static void reset_dca_notifier(struct qib_devdata *dd,
761 			       struct qib_msix_entry *m);
762 #endif
763 
764 /**
765  * qib_read_ureg32 - read 32-bit virtualized per-context register
766  * @dd: device
767  * @regno: register number
768  * @ctxt: context number
769  *
770  * Return the contents of a register that is virtualized to be per context.
771  * Returns -1 on errors (not distinguishable from valid contents at
772  * runtime; we may add a separate error variable at some point).
773  */
774 static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
775 				  enum qib_ureg regno, int ctxt)
776 {
777 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
778 		return 0;
779 	return readl(regno + (u64 __iomem *)(
780 		(dd->ureg_align * ctxt) + (dd->userbase ?
781 		 (char __iomem *)dd->userbase :
782 		 (char __iomem *)dd->kregbase + dd->uregbase)));
783 }
784 
785 /**
786  * qib_read_ureg - read virtualized per-context register
787  * @dd: device
788  * @regno: register number
789  * @ctxt: context number
790  *
791  * Return the contents of a register that is virtualized to be per context.
792  * Returns -1 on errors (not distinguishable from valid contents at
793  * runtime; we may add a separate error variable at some point).
794  */
795 static inline u64 qib_read_ureg(const struct qib_devdata *dd,
796 				enum qib_ureg regno, int ctxt)
797 {
798 
799 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
800 		return 0;
801 	return readq(regno + (u64 __iomem *)(
802 		(dd->ureg_align * ctxt) + (dd->userbase ?
803 		 (char __iomem *)dd->userbase :
804 		 (char __iomem *)dd->kregbase + dd->uregbase)));
805 }
806 
807 /**
808  * qib_write_ureg - write virtualized per-context register
809  * @dd: device
810  * @regno: register number
811  * @value: value
812  * @ctxt: context
813  *
814  * Write the contents of a register that is virtualized to be per context.
815  */
816 static inline void qib_write_ureg(const struct qib_devdata *dd,
817 				  enum qib_ureg regno, u64 value, int ctxt)
818 {
819 	u64 __iomem *ubase;
820 	if (dd->userbase)
821 		ubase = (u64 __iomem *)
822 			((char __iomem *) dd->userbase +
823 			 dd->ureg_align * ctxt);
824 	else
825 		ubase = (u64 __iomem *)
826 			(dd->uregbase +
827 			 (char __iomem *) dd->kregbase +
828 			 dd->ureg_align * ctxt);
829 
830 	if (dd->kregbase && (dd->flags & QIB_PRESENT))
831 		writeq(value, &ubase[regno]);
832 }
833 
834 static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
835 				  const u32 regno)
836 {
837 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
838 		return -1;
839 	return readl((u32 __iomem *) &dd->kregbase[regno]);
840 }
841 
842 static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
843 				  const u32 regno)
844 {
845 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
846 		return -1;
847 	return readq(&dd->kregbase[regno]);
848 }
849 
850 static inline void qib_write_kreg(const struct qib_devdata *dd,
851 				  const u32 regno, u64 value)
852 {
853 	if (dd->kregbase && (dd->flags & QIB_PRESENT))
854 		writeq(value, &dd->kregbase[regno]);
855 }
856 
857 /*
858  * not many sanity checks for the port-specific kernel register routines,
859  * since they are only used when it's known to be safe.
860 */
861 static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
862 				     const u16 regno)
863 {
864 	if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
865 		return 0ULL;
866 	return readq(&ppd->cpspec->kpregbase[regno]);
867 }
868 
869 static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
870 				       const u16 regno, u64 value)
871 {
872 	if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
873 	    (ppd->dd->flags & QIB_PRESENT))
874 		writeq(value, &ppd->cpspec->kpregbase[regno]);
875 }
876 
877 /**
878  * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
879  * @dd: the qlogic_ib device
880  * @regno: the register number to write
881  * @ctxt: the context containing the register
882  * @value: the value to write
883  */
884 static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
885 				       const u16 regno, unsigned ctxt,
886 				       u64 value)
887 {
888 	qib_write_kreg(dd, regno + ctxt, value);
889 }
890 
891 static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
892 {
893 	if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
894 		return 0;
895 	return readq(&dd->cspec->cregbase[regno]);
896 
897 
898 }
899 
900 static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
901 {
902 	if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
903 		return 0;
904 	return readl(&dd->cspec->cregbase[regno]);
905 
906 
907 }
908 
909 static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
910 					u16 regno, u64 value)
911 {
912 	if (ppd->cpspec && ppd->cpspec->cpregbase &&
913 	    (ppd->dd->flags & QIB_PRESENT))
914 		writeq(value, &ppd->cpspec->cpregbase[regno]);
915 }
916 
917 static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
918 				      u16 regno)
919 {
920 	if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
921 	    !(ppd->dd->flags & QIB_PRESENT))
922 		return 0;
923 	return readq(&ppd->cpspec->cpregbase[regno]);
924 }
925 
926 static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
927 					u16 regno)
928 {
929 	if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
930 	    !(ppd->dd->flags & QIB_PRESENT))
931 		return 0;
932 	return readl(&ppd->cpspec->cpregbase[regno]);
933 }
934 
935 /* bits in Control register */
936 #define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
937 #define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
938 
939 /* bits in general interrupt regs */
940 #define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
941 #define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
942 #define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
943 #define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
944 #define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
945 #define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
946 #define QIB_I_C_ERROR INT_MASK(Err)
947 
948 #define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
949 #define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
950 #define QIB_I_GPIO INT_MASK(AssertGPIO)
951 #define QIB_I_P_SDMAINT(pidx) \
952 	(INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
953 	 INT_MASK_P(SDmaProgress, pidx) | \
954 	 INT_MASK_PM(SDmaCleanupDone, pidx))
955 
956 /* Interrupt bits that are "per port" */
957 #define QIB_I_P_BITSEXTANT(pidx) \
958 	(INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
959 	INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
960 	INT_MASK_P(SDmaProgress, pidx) | \
961 	INT_MASK_PM(SDmaCleanupDone, pidx))
962 
963 /* Interrupt bits that are common to a device */
964 /* currently unused: QIB_I_SPIOSENT */
965 #define QIB_I_C_BITSEXTANT \
966 	(QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
967 	QIB_I_SPIOSENT | \
968 	QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
969 
970 #define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
971 	QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
972 
973 /*
974  * Error bits that are "per port".
975  */
976 #define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
977 #define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
978 #define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
979 #define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
980 #define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
981 #define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
982 #define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
983 #define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
984 #define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
985 #define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
986 #define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
987 #define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
988 #define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
989 #define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
990 #define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
991 #define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
992 #define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
993 #define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
994 #define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
995 #define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
996 #define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
997 #define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
998 #define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
999 #define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
1000 #define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
1001 #define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
1002 #define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
1003 #define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
1004 
1005 #define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
1006 #define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
1007 #define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
1008 #define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
1009 #define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
1010 #define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
1011 #define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
1012 #define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
1013 #define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
1014 #define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
1015 #define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
1016 
1017 /* Error bits that are common to a device */
1018 #define QIB_E_RESET ERR_MASK(ResetNegated)
1019 #define QIB_E_HARDWARE ERR_MASK(HardwareErr)
1020 #define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
1021 
1022 
1023 /*
1024  * Per chip (rather than per-port) errors.  Most either do
1025  * nothing but trigger a print (because they self-recover, or
1026  * always occur in tandem with other errors that handle the
1027  * issue), or because they indicate errors with no recovery,
1028  * but we want to know that they happened.
1029  */
1030 #define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
1031 #define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
1032 #define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
1033 #define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
1034 #define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
1035 #define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
1036 #define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
1037 #define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
1038 
1039 /* SDMA chip errors (not per port)
1040  * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get
1041  * the SDMAHALT error immediately, so we just print the dup error via the
1042  * E_AUTO mechanism.  This is true of most of the per-port fatal errors
1043  * as well, but since this is port-independent, by definition, it's
1044  * handled a bit differently.  SDMA_VL15 and SDMA_WRONG_PORT are per
1045  * packet send errors, and so are handled in the same manner as other
1046  * per-packet errors.
1047  */
1048 #define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
1049 #define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
1050 #define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
1051 
1052 /*
1053  * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS
1054  * it is used to print "common" packet errors.
1055  */
1056 #define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
1057 	QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
1058 	QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
1059 	QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1060 	QIB_E_P_REBP)
1061 
1062 /* Error Bits that Packet-related (Receive, per-port) */
1063 #define QIB_E_P_RPKTERRS (\
1064 	QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
1065 	QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
1066 	QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
1067 	QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
1068 	QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
1069 	QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
1070 
1071 /*
1072  * Error bits that are Send-related (per port)
1073  * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling).
1074  * All of these potentially need to have a buffer disarmed
1075  */
1076 #define QIB_E_P_SPKTERRS (\
1077 	QIB_E_P_SUNEXP_PKTNUM |\
1078 	QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1079 	QIB_E_P_SMAXPKTLEN |\
1080 	QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1081 	QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
1082 	QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
1083 
1084 #define QIB_E_SPKTERRS ( \
1085 		QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
1086 		ERR_MASK_N(SendUnsupportedVLErr) |			\
1087 		QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
1088 
1089 #define QIB_E_P_SDMAERRS ( \
1090 	QIB_E_P_SDMAHALT | \
1091 	QIB_E_P_SDMADESCADDRMISALIGN | \
1092 	QIB_E_P_SDMAUNEXPDATA | \
1093 	QIB_E_P_SDMAMISSINGDW | \
1094 	QIB_E_P_SDMADWEN | \
1095 	QIB_E_P_SDMARPYTAG | \
1096 	QIB_E_P_SDMA1STDESC | \
1097 	QIB_E_P_SDMABASE | \
1098 	QIB_E_P_SDMATAILOUTOFBOUND | \
1099 	QIB_E_P_SDMAOUTOFBOUND | \
1100 	QIB_E_P_SDMAGENMISMATCH)
1101 
1102 /*
1103  * This sets some bits more than once, but makes it more obvious which
1104  * bits are not handled under other categories, and the repeat definition
1105  * is not a problem.
1106  */
1107 #define QIB_E_P_BITSEXTANT ( \
1108 	QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
1109 	QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
1110 	QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
1111 	QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
1112 	)
1113 
1114 /*
1115  * These are errors that can occur when the link
1116  * changes state while a packet is being sent or received.  This doesn't
1117  * cover things like EBP or VCRC that can be the result of a sending
1118  * having the link change state, so we receive a "known bad" packet.
1119  * All of these are "per port", so renamed:
1120  */
1121 #define QIB_E_P_LINK_PKTERRS (\
1122 	QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1123 	QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
1124 	QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
1125 	QIB_E_P_RUNEXPCHAR)
1126 
1127 /*
1128  * This sets some bits more than once, but makes it more obvious which
1129  * bits are not handled under other categories (such as QIB_E_SPKTERRS),
1130  * and the repeat definition is not a problem.
1131  */
1132 #define QIB_E_C_BITSEXTANT (\
1133 	QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
1134 	QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
1135 	QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
1136 
1137 /* Likewise Neuter E_SPKT_ERRS_IGNORE */
1138 #define E_SPKT_ERRS_IGNORE 0
1139 
1140 #define QIB_EXTS_MEMBIST_DISABLED \
1141 	SYM_MASK(EXTStatus, MemBISTDisabled)
1142 #define QIB_EXTS_MEMBIST_ENDTEST \
1143 	SYM_MASK(EXTStatus, MemBISTEndTest)
1144 
1145 #define QIB_E_SPIOARMLAUNCH \
1146 	ERR_MASK(SendArmLaunchErr)
1147 
1148 #define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
1149 #define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
1150 
1151 /*
1152  * IBTA_1_2 is set when multiple speeds are enabled (normal),
1153  * and also if forced QDR (only QDR enabled).  It's enabled for the
1154  * forced QDR case so that scrambling will be enabled by the TS3
1155  * exchange, when supported by both sides of the link.
1156  */
1157 #define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
1158 #define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
1159 #define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
1160 #define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
1161 #define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
1162 #define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
1163 	SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
1164 #define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
1165 
1166 #define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
1167 #define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
1168 
1169 #define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
1170 #define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1171 #define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1172 
1173 #define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1174 #define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1175 #define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
1176 	SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
1177 #define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
1178 	SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
1179 #define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
1180 
1181 #define IBA7322_REDIRECT_VEC_PER_REG 12
1182 
1183 #define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
1184 #define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
1185 #define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
1186 #define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
1187 #define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
1188 
1189 #define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
1190 
1191 #define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
1192 	.msg = #fldname , .sz = sizeof(#fldname) }
1193 #define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
1194 	fldname##Mask##_##port), .msg = #fldname , .sz = sizeof(#fldname) }
1195 static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
1196 	HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
1197 	HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
1198 	HWE_AUTO(PCIESerdesPClkNotDetect),
1199 	HWE_AUTO(PowerOnBISTFailed),
1200 	HWE_AUTO(TempsenseTholdReached),
1201 	HWE_AUTO(MemoryErr),
1202 	HWE_AUTO(PCIeBusParityErr),
1203 	HWE_AUTO(PcieCplTimeout),
1204 	HWE_AUTO(PciePoisonedTLP),
1205 	HWE_AUTO_P(SDmaMemReadErr, 1),
1206 	HWE_AUTO_P(SDmaMemReadErr, 0),
1207 	HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
1208 	HWE_AUTO_P(IBCBusToSPCParityErr, 1),
1209 	HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
1210 	HWE_AUTO(statusValidNoEop),
1211 	HWE_AUTO(LATriggered),
1212 	{ .mask = 0, .sz = 0 }
1213 };
1214 
1215 #define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
1216 	.msg = #fldname, .sz = sizeof(#fldname) }
1217 #define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
1218 	.msg = #fldname, .sz = sizeof(#fldname) }
1219 static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
1220 	E_AUTO(RcvEgrFullErr),
1221 	E_AUTO(RcvHdrFullErr),
1222 	E_AUTO(ResetNegated),
1223 	E_AUTO(HardwareErr),
1224 	E_AUTO(InvalidAddrErr),
1225 	E_AUTO(SDmaVL15Err),
1226 	E_AUTO(SBufVL15MisUseErr),
1227 	E_AUTO(InvalidEEPCmd),
1228 	E_AUTO(RcvContextShareErr),
1229 	E_AUTO(SendVLMismatchErr),
1230 	E_AUTO(SendArmLaunchErr),
1231 	E_AUTO(SendSpecialTriggerErr),
1232 	E_AUTO(SDmaWrongPortErr),
1233 	E_AUTO(SDmaBufMaskDuplicateErr),
1234 	{ .mask = 0, .sz = 0 }
1235 };
1236 
1237 static const struct  qib_hwerror_msgs qib_7322p_error_msgs[] = {
1238 	E_P_AUTO(IBStatusChanged),
1239 	E_P_AUTO(SHeadersErr),
1240 	E_P_AUTO(VL15BufMisuseErr),
1241 	/*
1242 	 * SDmaHaltErr is not really an error, make it clearer;
1243 	 */
1244 	{.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted",
1245 		.sz = 11},
1246 	E_P_AUTO(SDmaDescAddrMisalignErr),
1247 	E_P_AUTO(SDmaUnexpDataErr),
1248 	E_P_AUTO(SDmaMissingDwErr),
1249 	E_P_AUTO(SDmaDwEnErr),
1250 	E_P_AUTO(SDmaRpyTagErr),
1251 	E_P_AUTO(SDma1stDescErr),
1252 	E_P_AUTO(SDmaBaseErr),
1253 	E_P_AUTO(SDmaTailOutOfBoundErr),
1254 	E_P_AUTO(SDmaOutOfBoundErr),
1255 	E_P_AUTO(SDmaGenMismatchErr),
1256 	E_P_AUTO(SendBufMisuseErr),
1257 	E_P_AUTO(SendUnsupportedVLErr),
1258 	E_P_AUTO(SendUnexpectedPktNumErr),
1259 	E_P_AUTO(SendDroppedDataPktErr),
1260 	E_P_AUTO(SendDroppedSmpPktErr),
1261 	E_P_AUTO(SendPktLenErr),
1262 	E_P_AUTO(SendUnderRunErr),
1263 	E_P_AUTO(SendMaxPktLenErr),
1264 	E_P_AUTO(SendMinPktLenErr),
1265 	E_P_AUTO(RcvIBLostLinkErr),
1266 	E_P_AUTO(RcvHdrErr),
1267 	E_P_AUTO(RcvHdrLenErr),
1268 	E_P_AUTO(RcvBadTidErr),
1269 	E_P_AUTO(RcvBadVersionErr),
1270 	E_P_AUTO(RcvIBFlowErr),
1271 	E_P_AUTO(RcvEBPErr),
1272 	E_P_AUTO(RcvUnsupportedVLErr),
1273 	E_P_AUTO(RcvUnexpectedCharErr),
1274 	E_P_AUTO(RcvShortPktLenErr),
1275 	E_P_AUTO(RcvLongPktLenErr),
1276 	E_P_AUTO(RcvMaxPktLenErr),
1277 	E_P_AUTO(RcvMinPktLenErr),
1278 	E_P_AUTO(RcvICRCErr),
1279 	E_P_AUTO(RcvVCRCErr),
1280 	E_P_AUTO(RcvFormatErr),
1281 	{ .mask = 0, .sz = 0 }
1282 };
1283 
1284 /*
1285  * Below generates "auto-message" for interrupts not specific to any port or
1286  * context
1287  */
1288 #define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
1289 	.msg = #fldname, .sz = sizeof(#fldname) }
1290 /* Below generates "auto-message" for interrupts specific to a port */
1291 #define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
1292 	SYM_LSB(IntMask, fldname##Mask##_0), \
1293 	SYM_LSB(IntMask, fldname##Mask##_1)), \
1294 	.msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
1295 /* For some reason, the SerDesTrimDone bits are reversed */
1296 #define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
1297 	SYM_LSB(IntMask, fldname##Mask##_1), \
1298 	SYM_LSB(IntMask, fldname##Mask##_0)), \
1299 	.msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
1300 /*
1301  * Below generates "auto-message" for interrupts specific to a context,
1302  * with ctxt-number appended
1303  */
1304 #define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
1305 	SYM_LSB(IntMask, fldname##0IntMask), \
1306 	SYM_LSB(IntMask, fldname##17IntMask)), \
1307 	.msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
1308 
1309 static const struct  qib_hwerror_msgs qib_7322_intr_msgs[] = {
1310 	INTR_AUTO_P(SDmaInt),
1311 	INTR_AUTO_P(SDmaProgressInt),
1312 	INTR_AUTO_P(SDmaIdleInt),
1313 	INTR_AUTO_P(SDmaCleanupDone),
1314 	INTR_AUTO_C(RcvUrg),
1315 	INTR_AUTO_P(ErrInt),
1316 	INTR_AUTO(ErrInt),      /* non-port-specific errs */
1317 	INTR_AUTO(AssertGPIOInt),
1318 	INTR_AUTO_P(SendDoneInt),
1319 	INTR_AUTO(SendBufAvailInt),
1320 	INTR_AUTO_C(RcvAvail),
1321 	{ .mask = 0, .sz = 0 }
1322 };
1323 
1324 #define TXSYMPTOM_AUTO_P(fldname) \
1325 	{ .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
1326 	.msg = #fldname, .sz = sizeof(#fldname) }
1327 static const struct  qib_hwerror_msgs hdrchk_msgs[] = {
1328 	TXSYMPTOM_AUTO_P(NonKeyPacket),
1329 	TXSYMPTOM_AUTO_P(GRHFail),
1330 	TXSYMPTOM_AUTO_P(PkeyFail),
1331 	TXSYMPTOM_AUTO_P(QPFail),
1332 	TXSYMPTOM_AUTO_P(SLIDFail),
1333 	TXSYMPTOM_AUTO_P(RawIPV6),
1334 	TXSYMPTOM_AUTO_P(PacketTooSmall),
1335 	{ .mask = 0, .sz = 0 }
1336 };
1337 
1338 #define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
1339 
1340 /*
1341  * Called when we might have an error that is specific to a particular
1342  * PIO buffer, and may need to cancel that buffer, so it can be re-used,
1343  * because we don't need to force the update of pioavail
1344  */
1345 static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
1346 {
1347 	struct qib_devdata *dd = ppd->dd;
1348 	u32 i;
1349 	int any;
1350 	u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
1351 	u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG;
1352 	unsigned long sbuf[4];
1353 
1354 	/*
1355 	 * It's possible that sendbuffererror could have bits set; might
1356 	 * have already done this as a result of hardware error handling.
1357 	 */
1358 	any = 0;
1359 	for (i = 0; i < regcnt; ++i) {
1360 		sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
1361 		if (sbuf[i]) {
1362 			any = 1;
1363 			qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
1364 		}
1365 	}
1366 
1367 	if (any)
1368 		qib_disarm_piobufs_set(dd, sbuf, piobcnt);
1369 }
1370 
1371 /* No txe_recover yet, if ever */
1372 
1373 /* No decode__errors yet */
1374 static void err_decode(char *msg, size_t len, u64 errs,
1375 		       const struct qib_hwerror_msgs *msp)
1376 {
1377 	u64 these, lmask;
1378 	int took, multi, n = 0;
1379 
1380 	while (errs && msp && msp->mask) {
1381 		multi = (msp->mask & (msp->mask - 1));
1382 		while (errs & msp->mask) {
1383 			these = (errs & msp->mask);
1384 			lmask = (these & (these - 1)) ^ these;
1385 			if (len) {
1386 				if (n++) {
1387 					/* separate the strings */
1388 					*msg++ = ',';
1389 					len--;
1390 				}
1391 				BUG_ON(!msp->sz);
1392 				/* msp->sz counts the nul */
1393 				took = min_t(size_t, msp->sz - (size_t)1, len);
1394 				memcpy(msg,  msp->msg, took);
1395 				len -= took;
1396 				msg += took;
1397 				if (len)
1398 					*msg = '\0';
1399 			}
1400 			errs &= ~lmask;
1401 			if (len && multi) {
1402 				/* More than one bit this mask */
1403 				int idx = -1;
1404 
1405 				while (lmask & msp->mask) {
1406 					++idx;
1407 					lmask >>= 1;
1408 				}
1409 				took = scnprintf(msg, len, "_%d", idx);
1410 				len -= took;
1411 				msg += took;
1412 			}
1413 		}
1414 		++msp;
1415 	}
1416 	/* If some bits are left, show in hex. */
1417 	if (len && errs)
1418 		snprintf(msg, len, "%sMORE:%llX", n ? "," : "",
1419 			(unsigned long long) errs);
1420 }
1421 
1422 /* only called if r1 set */
1423 static void flush_fifo(struct qib_pportdata *ppd)
1424 {
1425 	struct qib_devdata *dd = ppd->dd;
1426 	u32 __iomem *piobuf;
1427 	u32 bufn;
1428 	u32 *hdr;
1429 	u64 pbc;
1430 	const unsigned hdrwords = 7;
1431 	static struct qib_ib_header ibhdr = {
1432 		.lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
1433 		.lrh[1] = IB_LID_PERMISSIVE,
1434 		.lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
1435 		.lrh[3] = IB_LID_PERMISSIVE,
1436 		.u.oth.bth[0] = cpu_to_be32(
1437 			(IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY),
1438 		.u.oth.bth[1] = cpu_to_be32(0),
1439 		.u.oth.bth[2] = cpu_to_be32(0),
1440 		.u.oth.u.ud.deth[0] = cpu_to_be32(0),
1441 		.u.oth.u.ud.deth[1] = cpu_to_be32(0),
1442 	};
1443 
1444 	/*
1445 	 * Send a dummy VL15 packet to flush the launch FIFO.
1446 	 * This will not actually be sent since the TxeBypassIbc bit is set.
1447 	 */
1448 	pbc = PBC_7322_VL15_SEND |
1449 		(((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
1450 		(hdrwords + SIZE_OF_CRC);
1451 	piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
1452 	if (!piobuf)
1453 		return;
1454 	writeq(pbc, piobuf);
1455 	hdr = (u32 *) &ibhdr;
1456 	if (dd->flags & QIB_PIO_FLUSH_WC) {
1457 		qib_flush_wc();
1458 		qib_pio_copy(piobuf + 2, hdr, hdrwords - 1);
1459 		qib_flush_wc();
1460 		__raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
1461 		qib_flush_wc();
1462 	} else
1463 		qib_pio_copy(piobuf + 2, hdr, hdrwords);
1464 	qib_sendbuf_done(dd, bufn);
1465 }
1466 
1467 /*
1468  * This is called with interrupts disabled and sdma_lock held.
1469  */
1470 static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
1471 {
1472 	struct qib_devdata *dd = ppd->dd;
1473 	u64 set_sendctrl = 0;
1474 	u64 clr_sendctrl = 0;
1475 
1476 	if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
1477 		set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1478 	else
1479 		clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1480 
1481 	if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
1482 		set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1483 	else
1484 		clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1485 
1486 	if (op & QIB_SDMA_SENDCTRL_OP_HALT)
1487 		set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1488 	else
1489 		clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1490 
1491 	if (op & QIB_SDMA_SENDCTRL_OP_DRAIN)
1492 		set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1493 				SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1494 				SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1495 	else
1496 		clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1497 				SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1498 				SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1499 
1500 	spin_lock(&dd->sendctrl_lock);
1501 
1502 	/* If we are draining everything, block sends first */
1503 	if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1504 		ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
1505 		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1506 		qib_write_kreg(dd, kr_scratch, 0);
1507 	}
1508 
1509 	ppd->p_sendctrl |= set_sendctrl;
1510 	ppd->p_sendctrl &= ~clr_sendctrl;
1511 
1512 	if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP)
1513 		qib_write_kreg_port(ppd, krp_sendctrl,
1514 				    ppd->p_sendctrl |
1515 				    SYM_MASK(SendCtrl_0, SDmaCleanup));
1516 	else
1517 		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1518 	qib_write_kreg(dd, kr_scratch, 0);
1519 
1520 	if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1521 		ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
1522 		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1523 		qib_write_kreg(dd, kr_scratch, 0);
1524 	}
1525 
1526 	spin_unlock(&dd->sendctrl_lock);
1527 
1528 	if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
1529 		flush_fifo(ppd);
1530 }
1531 
1532 static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
1533 {
1534 	__qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
1535 }
1536 
1537 static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
1538 {
1539 	/*
1540 	 * Set SendDmaLenGen and clear and set
1541 	 * the MSB of the generation count to enable generation checking
1542 	 * and load the internal generation counter.
1543 	 */
1544 	qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
1545 	qib_write_kreg_port(ppd, krp_senddmalengen,
1546 			    ppd->sdma_descq_cnt |
1547 			    (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB));
1548 }
1549 
1550 /*
1551  * Must be called with sdma_lock held, or before init finished.
1552  */
1553 static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
1554 {
1555 	/* Commit writes to memory and advance the tail on the chip */
1556 	wmb();
1557 	ppd->sdma_descq_tail = tail;
1558 	qib_write_kreg_port(ppd, krp_senddmatail, tail);
1559 }
1560 
1561 /*
1562  * This is called with interrupts disabled and sdma_lock held.
1563  */
1564 static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
1565 {
1566 	/*
1567 	 * Drain all FIFOs.
1568 	 * The hardware doesn't require this but we do it so that verbs
1569 	 * and user applications don't wait for link active to send stale
1570 	 * data.
1571 	 */
1572 	sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
1573 
1574 	qib_sdma_7322_setlengen(ppd);
1575 	qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
1576 	ppd->sdma_head_dma[0] = 0;
1577 	qib_7322_sdma_sendctrl(ppd,
1578 		ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
1579 }
1580 
1581 #define DISABLES_SDMA ( \
1582 	QIB_E_P_SDMAHALT | \
1583 	QIB_E_P_SDMADESCADDRMISALIGN | \
1584 	QIB_E_P_SDMAMISSINGDW | \
1585 	QIB_E_P_SDMADWEN | \
1586 	QIB_E_P_SDMARPYTAG | \
1587 	QIB_E_P_SDMA1STDESC | \
1588 	QIB_E_P_SDMABASE | \
1589 	QIB_E_P_SDMATAILOUTOFBOUND | \
1590 	QIB_E_P_SDMAOUTOFBOUND | \
1591 	QIB_E_P_SDMAGENMISMATCH)
1592 
1593 static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
1594 {
1595 	unsigned long flags;
1596 	struct qib_devdata *dd = ppd->dd;
1597 
1598 	errs &= QIB_E_P_SDMAERRS;
1599 	err_decode(ppd->cpspec->sdmamsgbuf, sizeof(ppd->cpspec->sdmamsgbuf),
1600 		   errs, qib_7322p_error_msgs);
1601 
1602 	if (errs & QIB_E_P_SDMAUNEXPDATA)
1603 		qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
1604 			    ppd->port);
1605 
1606 	spin_lock_irqsave(&ppd->sdma_lock, flags);
1607 
1608 	if (errs != QIB_E_P_SDMAHALT) {
1609 		/* SDMA errors have QIB_E_P_SDMAHALT and another bit set */
1610 		qib_dev_porterr(dd, ppd->port,
1611 			"SDMA %s 0x%016llx %s\n",
1612 			qib_sdma_state_names[ppd->sdma_state.current_state],
1613 			errs, ppd->cpspec->sdmamsgbuf);
1614 		dump_sdma_7322_state(ppd);
1615 	}
1616 
1617 	switch (ppd->sdma_state.current_state) {
1618 	case qib_sdma_state_s00_hw_down:
1619 		break;
1620 
1621 	case qib_sdma_state_s10_hw_start_up_wait:
1622 		if (errs & QIB_E_P_SDMAHALT)
1623 			__qib_sdma_process_event(ppd,
1624 				qib_sdma_event_e20_hw_started);
1625 		break;
1626 
1627 	case qib_sdma_state_s20_idle:
1628 		break;
1629 
1630 	case qib_sdma_state_s30_sw_clean_up_wait:
1631 		break;
1632 
1633 	case qib_sdma_state_s40_hw_clean_up_wait:
1634 		if (errs & QIB_E_P_SDMAHALT)
1635 			__qib_sdma_process_event(ppd,
1636 				qib_sdma_event_e50_hw_cleaned);
1637 		break;
1638 
1639 	case qib_sdma_state_s50_hw_halt_wait:
1640 		if (errs & QIB_E_P_SDMAHALT)
1641 			__qib_sdma_process_event(ppd,
1642 				qib_sdma_event_e60_hw_halted);
1643 		break;
1644 
1645 	case qib_sdma_state_s99_running:
1646 		__qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
1647 		__qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
1648 		break;
1649 	}
1650 
1651 	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1652 }
1653 
1654 /*
1655  * handle per-device errors (not per-port errors)
1656  */
1657 static noinline void handle_7322_errors(struct qib_devdata *dd)
1658 {
1659 	char *msg;
1660 	u64 iserr = 0;
1661 	u64 errs;
1662 	u64 mask;
1663 	int log_idx;
1664 
1665 	qib_stats.sps_errints++;
1666 	errs = qib_read_kreg64(dd, kr_errstatus);
1667 	if (!errs) {
1668 		qib_devinfo(dd->pcidev,
1669 			"device error interrupt, but no error bits set!\n");
1670 		goto done;
1671 	}
1672 
1673 	/* don't report errors that are masked */
1674 	errs &= dd->cspec->errormask;
1675 	msg = dd->cspec->emsgbuf;
1676 
1677 	/* do these first, they are most important */
1678 	if (errs & QIB_E_HARDWARE) {
1679 		*msg = '\0';
1680 		qib_7322_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
1681 	} else
1682 		for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
1683 			if (errs & dd->eep_st_masks[log_idx].errs_to_log)
1684 				qib_inc_eeprom_err(dd, log_idx, 1);
1685 
1686 	if (errs & QIB_E_SPKTERRS) {
1687 		qib_disarm_7322_senderrbufs(dd->pport);
1688 		qib_stats.sps_txerrs++;
1689 	} else if (errs & QIB_E_INVALIDADDR)
1690 		qib_stats.sps_txerrs++;
1691 	else if (errs & QIB_E_ARMLAUNCH) {
1692 		qib_stats.sps_txerrs++;
1693 		qib_disarm_7322_senderrbufs(dd->pport);
1694 	}
1695 	qib_write_kreg(dd, kr_errclear, errs);
1696 
1697 	/*
1698 	 * The ones we mask off are handled specially below
1699 	 * or above.  Also mask SDMADISABLED by default as it
1700 	 * is too chatty.
1701 	 */
1702 	mask = QIB_E_HARDWARE;
1703 	*msg = '\0';
1704 
1705 	err_decode(msg, sizeof dd->cspec->emsgbuf, errs & ~mask,
1706 		   qib_7322error_msgs);
1707 
1708 	/*
1709 	 * Getting reset is a tragedy for all ports. Mark the device
1710 	 * _and_ the ports as "offline" in way meaningful to each.
1711 	 */
1712 	if (errs & QIB_E_RESET) {
1713 		int pidx;
1714 
1715 		qib_dev_err(dd,
1716 			"Got reset, requires re-init (unload and reload driver)\n");
1717 		dd->flags &= ~QIB_INITTED;  /* needs re-init */
1718 		/* mark as having had error */
1719 		*dd->devstatusp |= QIB_STATUS_HWERROR;
1720 		for (pidx = 0; pidx < dd->num_pports; ++pidx)
1721 			if (dd->pport[pidx].link_speed_supported)
1722 				*dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
1723 	}
1724 
1725 	if (*msg && iserr)
1726 		qib_dev_err(dd, "%s error\n", msg);
1727 
1728 	/*
1729 	 * If there were hdrq or egrfull errors, wake up any processes
1730 	 * waiting in poll.  We used to try to check which contexts had
1731 	 * the overflow, but given the cost of that and the chip reads
1732 	 * to support it, it's better to just wake everybody up if we
1733 	 * get an overflow; waiters can poll again if it's not them.
1734 	 */
1735 	if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
1736 		qib_handle_urcv(dd, ~0U);
1737 		if (errs & ERR_MASK(RcvEgrFullErr))
1738 			qib_stats.sps_buffull++;
1739 		else
1740 			qib_stats.sps_hdrfull++;
1741 	}
1742 
1743 done:
1744 	return;
1745 }
1746 
1747 static void qib_error_tasklet(unsigned long data)
1748 {
1749 	struct qib_devdata *dd = (struct qib_devdata *)data;
1750 
1751 	handle_7322_errors(dd);
1752 	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1753 }
1754 
1755 static void reenable_chase(unsigned long opaque)
1756 {
1757 	struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
1758 
1759 	ppd->cpspec->chase_timer.expires = 0;
1760 	qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1761 		QLOGIC_IB_IBCC_LINKINITCMD_POLL);
1762 }
1763 
1764 static void disable_chase(struct qib_pportdata *ppd, unsigned long tnow,
1765 		u8 ibclt)
1766 {
1767 	ppd->cpspec->chase_end = 0;
1768 
1769 	if (!qib_chase)
1770 		return;
1771 
1772 	qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1773 		QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1774 	ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
1775 	add_timer(&ppd->cpspec->chase_timer);
1776 }
1777 
1778 static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
1779 {
1780 	u8 ibclt;
1781 	unsigned long tnow;
1782 
1783 	ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
1784 
1785 	/*
1786 	 * Detect and handle the state chase issue, where we can
1787 	 * get stuck if we are unlucky on timing on both sides of
1788 	 * the link.   If we are, we disable, set a timer, and
1789 	 * then re-enable.
1790 	 */
1791 	switch (ibclt) {
1792 	case IB_7322_LT_STATE_CFGRCVFCFG:
1793 	case IB_7322_LT_STATE_CFGWAITRMT:
1794 	case IB_7322_LT_STATE_TXREVLANES:
1795 	case IB_7322_LT_STATE_CFGENH:
1796 		tnow = jiffies;
1797 		if (ppd->cpspec->chase_end &&
1798 		     time_after(tnow, ppd->cpspec->chase_end))
1799 			disable_chase(ppd, tnow, ibclt);
1800 		else if (!ppd->cpspec->chase_end)
1801 			ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
1802 		break;
1803 	default:
1804 		ppd->cpspec->chase_end = 0;
1805 		break;
1806 	}
1807 
1808 	if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
1809 	      ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
1810 	     ibclt == IB_7322_LT_STATE_LINKUP) &&
1811 	    (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
1812 		force_h1(ppd);
1813 		ppd->cpspec->qdr_reforce = 1;
1814 		if (!ppd->dd->cspec->r1)
1815 			serdes_7322_los_enable(ppd, 0);
1816 	} else if (ppd->cpspec->qdr_reforce &&
1817 		(ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
1818 		 (ibclt == IB_7322_LT_STATE_CFGENH ||
1819 		ibclt == IB_7322_LT_STATE_CFGIDLE ||
1820 		ibclt == IB_7322_LT_STATE_LINKUP))
1821 		force_h1(ppd);
1822 
1823 	if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
1824 	    ppd->link_speed_enabled == QIB_IB_QDR &&
1825 	    (ibclt == IB_7322_LT_STATE_CFGTEST ||
1826 	     ibclt == IB_7322_LT_STATE_CFGENH ||
1827 	     (ibclt >= IB_7322_LT_STATE_POLLACTIVE &&
1828 	      ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
1829 		adj_tx_serdes(ppd);
1830 
1831 	if (ibclt != IB_7322_LT_STATE_LINKUP) {
1832 		u8 ltstate = qib_7322_phys_portstate(ibcst);
1833 		u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
1834 					  LinkTrainingState);
1835 		if (!ppd->dd->cspec->r1 &&
1836 		    pibclt == IB_7322_LT_STATE_LINKUP &&
1837 		    ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1838 		    ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1839 		    ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1840 		    ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1841 			/* If the link went down (but no into recovery,
1842 			 * turn LOS back on */
1843 			serdes_7322_los_enable(ppd, 1);
1844 		if (!ppd->cpspec->qdr_dfe_on &&
1845 		    ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
1846 			ppd->cpspec->qdr_dfe_on = 1;
1847 			ppd->cpspec->qdr_dfe_time = 0;
1848 			/* On link down, reenable QDR adaptation */
1849 			qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
1850 					    ppd->dd->cspec->r1 ?
1851 					    QDR_STATIC_ADAPT_DOWN_R1 :
1852 					    QDR_STATIC_ADAPT_DOWN);
1853 			pr_info(
1854 				"IB%u:%u re-enabled QDR adaptation ibclt %x\n",
1855 				ppd->dd->unit, ppd->port, ibclt);
1856 		}
1857 	}
1858 }
1859 
1860 static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
1861 
1862 /*
1863  * This is per-pport error handling.
1864  * will likely get it's own MSIx interrupt (one for each port,
1865  * although just a single handler).
1866  */
1867 static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1868 {
1869 	char *msg;
1870 	u64 ignore_this_time = 0, iserr = 0, errs, fmask;
1871 	struct qib_devdata *dd = ppd->dd;
1872 
1873 	/* do this as soon as possible */
1874 	fmask = qib_read_kreg64(dd, kr_act_fmask);
1875 	if (!fmask)
1876 		check_7322_rxe_status(ppd);
1877 
1878 	errs = qib_read_kreg_port(ppd, krp_errstatus);
1879 	if (!errs)
1880 		qib_devinfo(dd->pcidev,
1881 			 "Port%d error interrupt, but no error bits set!\n",
1882 			 ppd->port);
1883 	if (!fmask)
1884 		errs &= ~QIB_E_P_IBSTATUSCHANGED;
1885 	if (!errs)
1886 		goto done;
1887 
1888 	msg = ppd->cpspec->epmsgbuf;
1889 	*msg = '\0';
1890 
1891 	if (errs & ~QIB_E_P_BITSEXTANT) {
1892 		err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
1893 			   errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
1894 		if (!*msg)
1895 			snprintf(msg, sizeof ppd->cpspec->epmsgbuf,
1896 				 "no others");
1897 		qib_dev_porterr(dd, ppd->port,
1898 			"error interrupt with unknown errors 0x%016Lx set (and %s)\n",
1899 			(errs & ~QIB_E_P_BITSEXTANT), msg);
1900 		*msg = '\0';
1901 	}
1902 
1903 	if (errs & QIB_E_P_SHDR) {
1904 		u64 symptom;
1905 
1906 		/* determine cause, then write to clear */
1907 		symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
1908 		qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
1909 		err_decode(msg, sizeof ppd->cpspec->epmsgbuf, symptom,
1910 			   hdrchk_msgs);
1911 		*msg = '\0';
1912 		/* senderrbuf cleared in SPKTERRS below */
1913 	}
1914 
1915 	if (errs & QIB_E_P_SPKTERRS) {
1916 		if ((errs & QIB_E_P_LINK_PKTERRS) &&
1917 		    !(ppd->lflags & QIBL_LINKACTIVE)) {
1918 			/*
1919 			 * This can happen when trying to bring the link
1920 			 * up, but the IB link changes state at the "wrong"
1921 			 * time. The IB logic then complains that the packet
1922 			 * isn't valid.  We don't want to confuse people, so
1923 			 * we just don't print them, except at debug
1924 			 */
1925 			err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
1926 				   (errs & QIB_E_P_LINK_PKTERRS),
1927 				   qib_7322p_error_msgs);
1928 			*msg = '\0';
1929 			ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1930 		}
1931 		qib_disarm_7322_senderrbufs(ppd);
1932 	} else if ((errs & QIB_E_P_LINK_PKTERRS) &&
1933 		   !(ppd->lflags & QIBL_LINKACTIVE)) {
1934 		/*
1935 		 * This can happen when SMA is trying to bring the link
1936 		 * up, but the IB link changes state at the "wrong" time.
1937 		 * The IB logic then complains that the packet isn't
1938 		 * valid.  We don't want to confuse people, so we just
1939 		 * don't print them, except at debug
1940 		 */
1941 		err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs,
1942 			   qib_7322p_error_msgs);
1943 		ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1944 		*msg = '\0';
1945 	}
1946 
1947 	qib_write_kreg_port(ppd, krp_errclear, errs);
1948 
1949 	errs &= ~ignore_this_time;
1950 	if (!errs)
1951 		goto done;
1952 
1953 	if (errs & QIB_E_P_RPKTERRS)
1954 		qib_stats.sps_rcverrs++;
1955 	if (errs & QIB_E_P_SPKTERRS)
1956 		qib_stats.sps_txerrs++;
1957 
1958 	iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS);
1959 
1960 	if (errs & QIB_E_P_SDMAERRS)
1961 		sdma_7322_p_errors(ppd, errs);
1962 
1963 	if (errs & QIB_E_P_IBSTATUSCHANGED) {
1964 		u64 ibcs;
1965 		u8 ltstate;
1966 
1967 		ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
1968 		ltstate = qib_7322_phys_portstate(ibcs);
1969 
1970 		if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
1971 			handle_serdes_issues(ppd, ibcs);
1972 		if (!(ppd->cpspec->ibcctrl_a &
1973 		      SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
1974 			/*
1975 			 * We got our interrupt, so init code should be
1976 			 * happy and not try alternatives. Now squelch
1977 			 * other "chatter" from link-negotiation (pre Init)
1978 			 */
1979 			ppd->cpspec->ibcctrl_a |=
1980 				SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
1981 			qib_write_kreg_port(ppd, krp_ibcctrl_a,
1982 					    ppd->cpspec->ibcctrl_a);
1983 		}
1984 
1985 		/* Update our picture of width and speed from chip */
1986 		ppd->link_width_active =
1987 			(ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
1988 			    IB_WIDTH_4X : IB_WIDTH_1X;
1989 		ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
1990 			LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs &
1991 			  SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
1992 				   QIB_IB_DDR : QIB_IB_SDR;
1993 
1994 		if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
1995 		    IB_PHYSPORTSTATE_DISABLED)
1996 			qib_set_ib_7322_lstate(ppd, 0,
1997 			       QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1998 		else
1999 			/*
2000 			 * Since going into a recovery state causes the link
2001 			 * state to go down and since recovery is transitory,
2002 			 * it is better if we "miss" ever seeing the link
2003 			 * training state go into recovery (i.e., ignore this
2004 			 * transition for link state special handling purposes)
2005 			 * without updating lastibcstat.
2006 			 */
2007 			if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
2008 			    ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
2009 			    ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
2010 			    ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
2011 				qib_handle_e_ibstatuschanged(ppd, ibcs);
2012 	}
2013 	if (*msg && iserr)
2014 		qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
2015 
2016 	if (ppd->state_wanted & ppd->lflags)
2017 		wake_up_interruptible(&ppd->state_wait);
2018 done:
2019 	return;
2020 }
2021 
2022 /* enable/disable chip from delivering interrupts */
2023 static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
2024 {
2025 	if (enable) {
2026 		if (dd->flags & QIB_BADINTR)
2027 			return;
2028 		qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
2029 		/* cause any pending enabled interrupts to be re-delivered */
2030 		qib_write_kreg(dd, kr_intclear, 0ULL);
2031 		if (dd->cspec->num_msix_entries) {
2032 			/* and same for MSIx */
2033 			u64 val = qib_read_kreg64(dd, kr_intgranted);
2034 			if (val)
2035 				qib_write_kreg(dd, kr_intgranted, val);
2036 		}
2037 	} else
2038 		qib_write_kreg(dd, kr_intmask, 0ULL);
2039 }
2040 
2041 /*
2042  * Try to cleanup as much as possible for anything that might have gone
2043  * wrong while in freeze mode, such as pio buffers being written by user
2044  * processes (causing armlaunch), send errors due to going into freeze mode,
2045  * etc., and try to avoid causing extra interrupts while doing so.
2046  * Forcibly update the in-memory pioavail register copies after cleanup
2047  * because the chip won't do it while in freeze mode (the register values
2048  * themselves are kept correct).
2049  * Make sure that we don't lose any important interrupts by using the chip
2050  * feature that says that writing 0 to a bit in *clear that is set in
2051  * *status will cause an interrupt to be generated again (if allowed by
2052  * the *mask value).
2053  * This is in chip-specific code because of all of the register accesses,
2054  * even though the details are similar on most chips.
2055  */
2056 static void qib_7322_clear_freeze(struct qib_devdata *dd)
2057 {
2058 	int pidx;
2059 
2060 	/* disable error interrupts, to avoid confusion */
2061 	qib_write_kreg(dd, kr_errmask, 0ULL);
2062 
2063 	for (pidx = 0; pidx < dd->num_pports; ++pidx)
2064 		if (dd->pport[pidx].link_speed_supported)
2065 			qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2066 					    0ULL);
2067 
2068 	/* also disable interrupts; errormask is sometimes overwriten */
2069 	qib_7322_set_intr_state(dd, 0);
2070 
2071 	/* clear the freeze, and be sure chip saw it */
2072 	qib_write_kreg(dd, kr_control, dd->control);
2073 	qib_read_kreg32(dd, kr_scratch);
2074 
2075 	/*
2076 	 * Force new interrupt if any hwerr, error or interrupt bits are
2077 	 * still set, and clear "safe" send packet errors related to freeze
2078 	 * and cancelling sends.  Re-enable error interrupts before possible
2079 	 * force of re-interrupt on pending interrupts.
2080 	 */
2081 	qib_write_kreg(dd, kr_hwerrclear, 0ULL);
2082 	qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
2083 	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2084 	/* We need to purge per-port errs and reset mask, too */
2085 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
2086 		if (!dd->pport[pidx].link_speed_supported)
2087 			continue;
2088 		qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
2089 		qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
2090 	}
2091 	qib_7322_set_intr_state(dd, 1);
2092 }
2093 
2094 /* no error handling to speak of */
2095 /**
2096  * qib_7322_handle_hwerrors - display hardware errors.
2097  * @dd: the qlogic_ib device
2098  * @msg: the output buffer
2099  * @msgl: the size of the output buffer
2100  *
2101  * Use same msg buffer as regular errors to avoid excessive stack
2102  * use.  Most hardware errors are catastrophic, but for right now,
2103  * we'll print them and continue.  We reuse the same message buffer as
2104  * qib_handle_errors() to avoid excessive stack usage.
2105  */
2106 static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
2107 				     size_t msgl)
2108 {
2109 	u64 hwerrs;
2110 	u32 ctrl;
2111 	int isfatal = 0;
2112 
2113 	hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
2114 	if (!hwerrs)
2115 		goto bail;
2116 	if (hwerrs == ~0ULL) {
2117 		qib_dev_err(dd,
2118 			"Read of hardware error status failed (all bits set); ignoring\n");
2119 		goto bail;
2120 	}
2121 	qib_stats.sps_hwerrs++;
2122 
2123 	/* Always clear the error status register, except BIST fail */
2124 	qib_write_kreg(dd, kr_hwerrclear, hwerrs &
2125 		       ~HWE_MASK(PowerOnBISTFailed));
2126 
2127 	hwerrs &= dd->cspec->hwerrmask;
2128 
2129 	/* no EEPROM logging, yet */
2130 
2131 	if (hwerrs)
2132 		qib_devinfo(dd->pcidev,
2133 			"Hardware error: hwerr=0x%llx (cleared)\n",
2134 			(unsigned long long) hwerrs);
2135 
2136 	ctrl = qib_read_kreg32(dd, kr_control);
2137 	if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
2138 		/*
2139 		 * No recovery yet...
2140 		 */
2141 		if ((hwerrs & ~HWE_MASK(LATriggered)) ||
2142 		    dd->cspec->stay_in_freeze) {
2143 			/*
2144 			 * If any set that we aren't ignoring only make the
2145 			 * complaint once, in case it's stuck or recurring,
2146 			 * and we get here multiple times
2147 			 * Force link down, so switch knows, and
2148 			 * LEDs are turned off.
2149 			 */
2150 			if (dd->flags & QIB_INITTED)
2151 				isfatal = 1;
2152 		} else
2153 			qib_7322_clear_freeze(dd);
2154 	}
2155 
2156 	if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
2157 		isfatal = 1;
2158 		strlcpy(msg,
2159 			"[Memory BIST test failed, InfiniPath hardware unusable]",
2160 			msgl);
2161 		/* ignore from now on, so disable until driver reloaded */
2162 		dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
2163 		qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2164 	}
2165 
2166 	err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
2167 
2168 	/* Ignore esoteric PLL failures et al. */
2169 
2170 	qib_dev_err(dd, "%s hardware error\n", msg);
2171 
2172 	if (hwerrs &
2173 		   (SYM_MASK(HwErrMask, SDmaMemReadErrMask_0) |
2174 		    SYM_MASK(HwErrMask, SDmaMemReadErrMask_1))) {
2175 		int pidx = 0;
2176 		int err;
2177 		unsigned long flags;
2178 		struct qib_pportdata *ppd = dd->pport;
2179 		for (; pidx < dd->num_pports; ++pidx, ppd++) {
2180 			err = 0;
2181 			if (pidx == 0 && (hwerrs &
2182 				SYM_MASK(HwErrMask, SDmaMemReadErrMask_0)))
2183 				err++;
2184 			if (pidx == 1 && (hwerrs &
2185 				SYM_MASK(HwErrMask, SDmaMemReadErrMask_1)))
2186 				err++;
2187 			if (err) {
2188 				spin_lock_irqsave(&ppd->sdma_lock, flags);
2189 				dump_sdma_7322_state(ppd);
2190 				spin_unlock_irqrestore(&ppd->sdma_lock, flags);
2191 			}
2192 		}
2193 	}
2194 
2195 	if (isfatal && !dd->diag_client) {
2196 		qib_dev_err(dd,
2197 			"Fatal Hardware Error, no longer usable, SN %.16s\n",
2198 			dd->serial);
2199 		/*
2200 		 * for /sys status file and user programs to print; if no
2201 		 * trailing brace is copied, we'll know it was truncated.
2202 		 */
2203 		if (dd->freezemsg)
2204 			snprintf(dd->freezemsg, dd->freezelen,
2205 				 "{%s}", msg);
2206 		qib_disable_after_error(dd);
2207 	}
2208 bail:;
2209 }
2210 
2211 /**
2212  * qib_7322_init_hwerrors - enable hardware errors
2213  * @dd: the qlogic_ib device
2214  *
2215  * now that we have finished initializing everything that might reasonably
2216  * cause a hardware error, and cleared those errors bits as they occur,
2217  * we can enable hardware errors in the mask (potentially enabling
2218  * freeze mode), and enable hardware errors as errors (along with
2219  * everything else) in errormask
2220  */
2221 static void qib_7322_init_hwerrors(struct qib_devdata *dd)
2222 {
2223 	int pidx;
2224 	u64 extsval;
2225 
2226 	extsval = qib_read_kreg64(dd, kr_extstatus);
2227 	if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED |
2228 			 QIB_EXTS_MEMBIST_ENDTEST)))
2229 		qib_dev_err(dd, "MemBIST did not complete!\n");
2230 
2231 	/* never clear BIST failure, so reported on each driver load */
2232 	qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
2233 	qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2234 
2235 	/* clear all */
2236 	qib_write_kreg(dd, kr_errclear, ~0ULL);
2237 	/* enable errors that are masked, at least this first time. */
2238 	qib_write_kreg(dd, kr_errmask, ~0ULL);
2239 	dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
2240 	for (pidx = 0; pidx < dd->num_pports; ++pidx)
2241 		if (dd->pport[pidx].link_speed_supported)
2242 			qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2243 					    ~0ULL);
2244 }
2245 
2246 /*
2247  * Disable and enable the armlaunch error.  Used for PIO bandwidth testing
2248  * on chips that are count-based, rather than trigger-based.  There is no
2249  * reference counting, but that's also fine, given the intended use.
2250  * Only chip-specific because it's all register accesses
2251  */
2252 static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
2253 {
2254 	if (enable) {
2255 		qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
2256 		dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
2257 	} else
2258 		dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
2259 	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2260 }
2261 
2262 /*
2263  * Formerly took parameter <which> in pre-shifted,
2264  * pre-merged form with LinkCmd and LinkInitCmd
2265  * together, and assuming the zero was NOP.
2266  */
2267 static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
2268 				   u16 linitcmd)
2269 {
2270 	u64 mod_wd;
2271 	struct qib_devdata *dd = ppd->dd;
2272 	unsigned long flags;
2273 
2274 	if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
2275 		/*
2276 		 * If we are told to disable, note that so link-recovery
2277 		 * code does not attempt to bring us back up.
2278 		 * Also reset everything that we can, so we start
2279 		 * completely clean when re-enabled (before we
2280 		 * actually issue the disable to the IBC)
2281 		 */
2282 		qib_7322_mini_pcs_reset(ppd);
2283 		spin_lock_irqsave(&ppd->lflags_lock, flags);
2284 		ppd->lflags |= QIBL_IB_LINK_DISABLED;
2285 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2286 	} else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
2287 		/*
2288 		 * Any other linkinitcmd will lead to LINKDOWN and then
2289 		 * to INIT (if all is well), so clear flag to let
2290 		 * link-recovery code attempt to bring us back up.
2291 		 */
2292 		spin_lock_irqsave(&ppd->lflags_lock, flags);
2293 		ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
2294 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2295 		/*
2296 		 * Clear status change interrupt reduction so the
2297 		 * new state is seen.
2298 		 */
2299 		ppd->cpspec->ibcctrl_a &=
2300 			~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
2301 	}
2302 
2303 	mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) |
2304 		(linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2305 
2306 	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
2307 			    mod_wd);
2308 	/* write to chip to prevent back-to-back writes of ibc reg */
2309 	qib_write_kreg(dd, kr_scratch, 0);
2310 
2311 }
2312 
2313 /*
2314  * The total RCV buffer memory is 64KB, used for both ports, and is
2315  * in units of 64 bytes (same as IB flow control credit unit).
2316  * The consumedVL unit in the same registers are in 32 byte units!
2317  * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks,
2318  * and we can therefore allocate just 9 IB credits for 2 VL15 packets
2319  * in krp_rxcreditvl15, rather than 10.
2320  */
2321 #define RCV_BUF_UNITSZ 64
2322 #define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
2323 
2324 static void set_vls(struct qib_pportdata *ppd)
2325 {
2326 	int i, numvls, totcred, cred_vl, vl0extra;
2327 	struct qib_devdata *dd = ppd->dd;
2328 	u64 val;
2329 
2330 	numvls = qib_num_vls(ppd->vls_operational);
2331 
2332 	/*
2333 	 * Set up per-VL credits. Below is kluge based on these assumptions:
2334 	 * 1) port is disabled at the time early_init is called.
2335 	 * 2) give VL15 17 credits, for two max-plausible packets.
2336 	 * 3) Give VL0-N the rest, with any rounding excess used for VL0
2337 	 */
2338 	/* 2 VL15 packets @ 288 bytes each (including IB headers) */
2339 	totcred = NUM_RCV_BUF_UNITS(dd);
2340 	cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ;
2341 	totcred -= cred_vl;
2342 	qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
2343 	cred_vl = totcred / numvls;
2344 	vl0extra = totcred - cred_vl * numvls;
2345 	qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
2346 	for (i = 1; i < numvls; i++)
2347 		qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
2348 	for (; i < 8; i++) /* no buffer space for other VLs */
2349 		qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
2350 
2351 	/* Notify IBC that credits need to be recalculated */
2352 	val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
2353 	val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2354 	qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2355 	qib_write_kreg(dd, kr_scratch, 0ULL);
2356 	val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2357 	qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2358 
2359 	for (i = 0; i < numvls; i++)
2360 		val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
2361 	val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
2362 
2363 	/* Change the number of operational VLs */
2364 	ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
2365 				~SYM_MASK(IBCCtrlA_0, NumVLane)) |
2366 		((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane));
2367 	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2368 	qib_write_kreg(dd, kr_scratch, 0ULL);
2369 }
2370 
2371 /*
2372  * The code that deals with actual SerDes is in serdes_7322_init().
2373  * Compared to the code for iba7220, it is minimal.
2374  */
2375 static int serdes_7322_init(struct qib_pportdata *ppd);
2376 
2377 /**
2378  * qib_7322_bringup_serdes - bring up the serdes
2379  * @ppd: physical port on the qlogic_ib device
2380  */
2381 static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
2382 {
2383 	struct qib_devdata *dd = ppd->dd;
2384 	u64 val, guid, ibc;
2385 	unsigned long flags;
2386 	int ret = 0;
2387 
2388 	/*
2389 	 * SerDes model not in Pd, but still need to
2390 	 * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere
2391 	 * eventually.
2392 	 */
2393 	/* Put IBC in reset, sends disabled (should be in reset already) */
2394 	ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2395 	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2396 	qib_write_kreg(dd, kr_scratch, 0ULL);
2397 
2398 	/* ensure previous Tx parameters are not still forced */
2399 	qib_write_kreg_port(ppd, krp_tx_deemph_override,
2400 		SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
2401 		reset_tx_deemphasis_override));
2402 
2403 	if (qib_compat_ddr_negotiate) {
2404 		ppd->cpspec->ibdeltainprog = 1;
2405 		ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
2406 						crp_ibsymbolerr);
2407 		ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
2408 						crp_iblinkerrrecov);
2409 	}
2410 
2411 	/* flowcontrolwatermark is in units of KBytes */
2412 	ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
2413 	/*
2414 	 * Flow control is sent this often, even if no changes in
2415 	 * buffer space occur.  Units are 128ns for this chip.
2416 	 * Set to 3usec.
2417 	 */
2418 	ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
2419 	/* max error tolerance */
2420 	ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
2421 	/* IB credit flow control. */
2422 	ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold);
2423 	/*
2424 	 * set initial max size pkt IBC will send, including ICRC; it's the
2425 	 * PIO buffer size in dwords, less 1; also see qib_set_mtu()
2426 	 */
2427 	ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
2428 		SYM_LSB(IBCCtrlA_0, MaxPktLen);
2429 	ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
2430 
2431 	/*
2432 	 * Reset the PCS interface to the serdes (and also ibc, which is still
2433 	 * in reset from above).  Writes new value of ibcctrl_a as last step.
2434 	 */
2435 	qib_7322_mini_pcs_reset(ppd);
2436 
2437 	if (!ppd->cpspec->ibcctrl_b) {
2438 		unsigned lse = ppd->link_speed_enabled;
2439 
2440 		/*
2441 		 * Not on re-init after reset, establish shadow
2442 		 * and force initial config.
2443 		 */
2444 		ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
2445 							     krp_ibcctrl_b);
2446 		ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
2447 				IBA7322_IBC_SPEED_DDR |
2448 				IBA7322_IBC_SPEED_SDR |
2449 				IBA7322_IBC_WIDTH_AUTONEG |
2450 				SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
2451 		if (lse & (lse - 1)) /* Muliple speeds enabled */
2452 			ppd->cpspec->ibcctrl_b |=
2453 				(lse << IBA7322_IBC_SPEED_LSB) |
2454 				IBA7322_IBC_IBTA_1_2_MASK |
2455 				IBA7322_IBC_MAX_SPEED_MASK;
2456 		else
2457 			ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
2458 				IBA7322_IBC_SPEED_QDR |
2459 				 IBA7322_IBC_IBTA_1_2_MASK :
2460 				(lse == QIB_IB_DDR) ?
2461 					IBA7322_IBC_SPEED_DDR :
2462 					IBA7322_IBC_SPEED_SDR;
2463 		if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
2464 		    (IB_WIDTH_1X | IB_WIDTH_4X))
2465 			ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
2466 		else
2467 			ppd->cpspec->ibcctrl_b |=
2468 				ppd->link_width_enabled == IB_WIDTH_4X ?
2469 				IBA7322_IBC_WIDTH_4X_ONLY :
2470 				IBA7322_IBC_WIDTH_1X_ONLY;
2471 
2472 		/* always enable these on driver reload, not sticky */
2473 		ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
2474 			IBA7322_IBC_HRTBT_MASK);
2475 	}
2476 	qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
2477 
2478 	/* setup so we have more time at CFGTEST to change H1 */
2479 	val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
2480 	val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
2481 	val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
2482 	qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
2483 
2484 	serdes_7322_init(ppd);
2485 
2486 	guid = be64_to_cpu(ppd->guid);
2487 	if (!guid) {
2488 		if (dd->base_guid)
2489 			guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
2490 		ppd->guid = cpu_to_be64(guid);
2491 	}
2492 
2493 	qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
2494 	/* write to chip to prevent back-to-back writes of ibc reg */
2495 	qib_write_kreg(dd, kr_scratch, 0);
2496 
2497 	/* Enable port */
2498 	ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
2499 	set_vls(ppd);
2500 
2501 	/* initially come up DISABLED, without sending anything. */
2502 	val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
2503 					QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2504 	qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
2505 	qib_write_kreg(dd, kr_scratch, 0ULL);
2506 	/* clear the linkinit cmds */
2507 	ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
2508 
2509 	/* be paranoid against later code motion, etc. */
2510 	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2511 	ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
2512 	qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
2513 	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2514 
2515 	/* Also enable IBSTATUSCHG interrupt.  */
2516 	val = qib_read_kreg_port(ppd, krp_errmask);
2517 	qib_write_kreg_port(ppd, krp_errmask,
2518 		val | ERR_MASK_N(IBStatusChanged));
2519 
2520 	/* Always zero until we start messing with SerDes for real */
2521 	return ret;
2522 }
2523 
2524 /**
2525  * qib_7322_quiet_serdes - set serdes to txidle
2526  * @dd: the qlogic_ib device
2527  * Called when driver is being unloaded
2528  */
2529 static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
2530 {
2531 	u64 val;
2532 	unsigned long flags;
2533 
2534 	qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
2535 
2536 	spin_lock_irqsave(&ppd->lflags_lock, flags);
2537 	ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
2538 	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2539 	wake_up(&ppd->cpspec->autoneg_wait);
2540 	cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
2541 	if (ppd->dd->cspec->r1)
2542 		cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
2543 
2544 	ppd->cpspec->chase_end = 0;
2545 	if (ppd->cpspec->chase_timer.data) /* if initted */
2546 		del_timer_sync(&ppd->cpspec->chase_timer);
2547 
2548 	/*
2549 	 * Despite the name, actually disables IBC as well. Do it when
2550 	 * we are as sure as possible that no more packets can be
2551 	 * received, following the down and the PCS reset.
2552 	 * The actual disabling happens in qib_7322_mini_pci_reset(),
2553 	 * along with the PCS being reset.
2554 	 */
2555 	ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2556 	qib_7322_mini_pcs_reset(ppd);
2557 
2558 	/*
2559 	 * Update the adjusted counters so the adjustment persists
2560 	 * across driver reload.
2561 	 */
2562 	if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
2563 	    ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
2564 		struct qib_devdata *dd = ppd->dd;
2565 		u64 diagc;
2566 
2567 		/* enable counter writes */
2568 		diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
2569 		qib_write_kreg(dd, kr_hwdiagctrl,
2570 			       diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
2571 
2572 		if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
2573 			val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
2574 			if (ppd->cpspec->ibdeltainprog)
2575 				val -= val - ppd->cpspec->ibsymsnap;
2576 			val -= ppd->cpspec->ibsymdelta;
2577 			write_7322_creg_port(ppd, crp_ibsymbolerr, val);
2578 		}
2579 		if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
2580 			val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
2581 			if (ppd->cpspec->ibdeltainprog)
2582 				val -= val - ppd->cpspec->iblnkerrsnap;
2583 			val -= ppd->cpspec->iblnkerrdelta;
2584 			write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
2585 		}
2586 		if (ppd->cpspec->iblnkdowndelta) {
2587 			val = read_7322_creg32_port(ppd, crp_iblinkdown);
2588 			val += ppd->cpspec->iblnkdowndelta;
2589 			write_7322_creg_port(ppd, crp_iblinkdown, val);
2590 		}
2591 		/*
2592 		 * No need to save ibmalfdelta since IB perfcounters
2593 		 * are cleared on driver reload.
2594 		 */
2595 
2596 		/* and disable counter writes */
2597 		qib_write_kreg(dd, kr_hwdiagctrl, diagc);
2598 	}
2599 }
2600 
2601 /**
2602  * qib_setup_7322_setextled - set the state of the two external LEDs
2603  * @ppd: physical port on the qlogic_ib device
2604  * @on: whether the link is up or not
2605  *
2606  * The exact combo of LEDs if on is true is determined by looking
2607  * at the ibcstatus.
2608  *
2609  * These LEDs indicate the physical and logical state of IB link.
2610  * For this chip (at least with recommended board pinouts), LED1
2611  * is Yellow (logical state) and LED2 is Green (physical state),
2612  *
2613  * Note:  We try to match the Mellanox HCA LED behavior as best
2614  * we can.  Green indicates physical link state is OK (something is
2615  * plugged in, and we can train).
2616  * Amber indicates the link is logically up (ACTIVE).
2617  * Mellanox further blinks the amber LED to indicate data packet
2618  * activity, but we have no hardware support for that, so it would
2619  * require waking up every 10-20 msecs and checking the counters
2620  * on the chip, and then turning the LED off if appropriate.  That's
2621  * visible overhead, so not something we will do.
2622  */
2623 static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
2624 {
2625 	struct qib_devdata *dd = ppd->dd;
2626 	u64 extctl, ledblink = 0, val;
2627 	unsigned long flags;
2628 	int yel, grn;
2629 
2630 	/*
2631 	 * The diags use the LED to indicate diag info, so we leave
2632 	 * the external LED alone when the diags are running.
2633 	 */
2634 	if (dd->diag_client)
2635 		return;
2636 
2637 	/* Allow override of LED display for, e.g. Locating system in rack */
2638 	if (ppd->led_override) {
2639 		grn = (ppd->led_override & QIB_LED_PHYS);
2640 		yel = (ppd->led_override & QIB_LED_LOG);
2641 	} else if (on) {
2642 		val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
2643 		grn = qib_7322_phys_portstate(val) ==
2644 			IB_PHYSPORTSTATE_LINKUP;
2645 		yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE;
2646 	} else {
2647 		grn = 0;
2648 		yel = 0;
2649 	}
2650 
2651 	spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2652 	extctl = dd->cspec->extctrl & (ppd->port == 1 ?
2653 		~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK);
2654 	if (grn) {
2655 		extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
2656 		/*
2657 		 * Counts are in chip clock (4ns) periods.
2658 		 * This is 1/16 sec (66.6ms) on,
2659 		 * 3/16 sec (187.5 ms) off, with packets rcvd.
2660 		 */
2661 		ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) |
2662 			((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT);
2663 	}
2664 	if (yel)
2665 		extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
2666 	dd->cspec->extctrl = extctl;
2667 	qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
2668 	spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2669 
2670 	if (ledblink) /* blink the LED on packet receive */
2671 		qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
2672 }
2673 
2674 #ifdef CONFIG_INFINIBAND_QIB_DCA
2675 
2676 static int qib_7322_notify_dca(struct qib_devdata *dd, unsigned long event)
2677 {
2678 	switch (event) {
2679 	case DCA_PROVIDER_ADD:
2680 		if (dd->flags & QIB_DCA_ENABLED)
2681 			break;
2682 		if (!dca_add_requester(&dd->pcidev->dev)) {
2683 			qib_devinfo(dd->pcidev, "DCA enabled\n");
2684 			dd->flags |= QIB_DCA_ENABLED;
2685 			qib_setup_dca(dd);
2686 		}
2687 		break;
2688 	case DCA_PROVIDER_REMOVE:
2689 		if (dd->flags & QIB_DCA_ENABLED) {
2690 			dca_remove_requester(&dd->pcidev->dev);
2691 			dd->flags &= ~QIB_DCA_ENABLED;
2692 			dd->cspec->dca_ctrl = 0;
2693 			qib_write_kreg(dd, KREG_IDX(DCACtrlA),
2694 				dd->cspec->dca_ctrl);
2695 		}
2696 		break;
2697 	}
2698 	return 0;
2699 }
2700 
2701 static void qib_update_rhdrq_dca(struct qib_ctxtdata *rcd, int cpu)
2702 {
2703 	struct qib_devdata *dd = rcd->dd;
2704 	struct qib_chip_specific *cspec = dd->cspec;
2705 
2706 	if (!(dd->flags & QIB_DCA_ENABLED))
2707 		return;
2708 	if (cspec->rhdr_cpu[rcd->ctxt] != cpu) {
2709 		const struct dca_reg_map *rmp;
2710 
2711 		cspec->rhdr_cpu[rcd->ctxt] = cpu;
2712 		rmp = &dca_rcvhdr_reg_map[rcd->ctxt];
2713 		cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] &= rmp->mask;
2714 		cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] |=
2715 			(u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb;
2716 		qib_devinfo(dd->pcidev,
2717 			"Ctxt %d cpu %d dca %llx\n", rcd->ctxt, cpu,
2718 			(long long) cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
2719 		qib_write_kreg(dd, rmp->regno,
2720 			       cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
2721 		cspec->dca_ctrl |= SYM_MASK(DCACtrlA, RcvHdrqDCAEnable);
2722 		qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2723 	}
2724 }
2725 
2726 static void qib_update_sdma_dca(struct qib_pportdata *ppd, int cpu)
2727 {
2728 	struct qib_devdata *dd = ppd->dd;
2729 	struct qib_chip_specific *cspec = dd->cspec;
2730 	unsigned pidx = ppd->port - 1;
2731 
2732 	if (!(dd->flags & QIB_DCA_ENABLED))
2733 		return;
2734 	if (cspec->sdma_cpu[pidx] != cpu) {
2735 		cspec->sdma_cpu[pidx] = cpu;
2736 		cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ?
2737 			SYM_MASK(DCACtrlF, SendDma1DCAOPH) :
2738 			SYM_MASK(DCACtrlF, SendDma0DCAOPH));
2739 		cspec->dca_rcvhdr_ctrl[4] |=
2740 			(u64) dca3_get_tag(&dd->pcidev->dev, cpu) <<
2741 				(ppd->hw_pidx ?
2742 					SYM_LSB(DCACtrlF, SendDma1DCAOPH) :
2743 					SYM_LSB(DCACtrlF, SendDma0DCAOPH));
2744 		qib_devinfo(dd->pcidev,
2745 			"sdma %d cpu %d dca %llx\n", ppd->hw_pidx, cpu,
2746 			(long long) cspec->dca_rcvhdr_ctrl[4]);
2747 		qib_write_kreg(dd, KREG_IDX(DCACtrlF),
2748 			       cspec->dca_rcvhdr_ctrl[4]);
2749 		cspec->dca_ctrl |= ppd->hw_pidx ?
2750 			SYM_MASK(DCACtrlA, SendDMAHead1DCAEnable) :
2751 			SYM_MASK(DCACtrlA, SendDMAHead0DCAEnable);
2752 		qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2753 	}
2754 }
2755 
2756 static void qib_setup_dca(struct qib_devdata *dd)
2757 {
2758 	struct qib_chip_specific *cspec = dd->cspec;
2759 	int i;
2760 
2761 	for (i = 0; i < ARRAY_SIZE(cspec->rhdr_cpu); i++)
2762 		cspec->rhdr_cpu[i] = -1;
2763 	for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2764 		cspec->sdma_cpu[i] = -1;
2765 	cspec->dca_rcvhdr_ctrl[0] =
2766 		(1ULL << SYM_LSB(DCACtrlB, RcvHdrq0DCAXfrCnt)) |
2767 		(1ULL << SYM_LSB(DCACtrlB, RcvHdrq1DCAXfrCnt)) |
2768 		(1ULL << SYM_LSB(DCACtrlB, RcvHdrq2DCAXfrCnt)) |
2769 		(1ULL << SYM_LSB(DCACtrlB, RcvHdrq3DCAXfrCnt));
2770 	cspec->dca_rcvhdr_ctrl[1] =
2771 		(1ULL << SYM_LSB(DCACtrlC, RcvHdrq4DCAXfrCnt)) |
2772 		(1ULL << SYM_LSB(DCACtrlC, RcvHdrq5DCAXfrCnt)) |
2773 		(1ULL << SYM_LSB(DCACtrlC, RcvHdrq6DCAXfrCnt)) |
2774 		(1ULL << SYM_LSB(DCACtrlC, RcvHdrq7DCAXfrCnt));
2775 	cspec->dca_rcvhdr_ctrl[2] =
2776 		(1ULL << SYM_LSB(DCACtrlD, RcvHdrq8DCAXfrCnt)) |
2777 		(1ULL << SYM_LSB(DCACtrlD, RcvHdrq9DCAXfrCnt)) |
2778 		(1ULL << SYM_LSB(DCACtrlD, RcvHdrq10DCAXfrCnt)) |
2779 		(1ULL << SYM_LSB(DCACtrlD, RcvHdrq11DCAXfrCnt));
2780 	cspec->dca_rcvhdr_ctrl[3] =
2781 		(1ULL << SYM_LSB(DCACtrlE, RcvHdrq12DCAXfrCnt)) |
2782 		(1ULL << SYM_LSB(DCACtrlE, RcvHdrq13DCAXfrCnt)) |
2783 		(1ULL << SYM_LSB(DCACtrlE, RcvHdrq14DCAXfrCnt)) |
2784 		(1ULL << SYM_LSB(DCACtrlE, RcvHdrq15DCAXfrCnt));
2785 	cspec->dca_rcvhdr_ctrl[4] =
2786 		(1ULL << SYM_LSB(DCACtrlF, RcvHdrq16DCAXfrCnt)) |
2787 		(1ULL << SYM_LSB(DCACtrlF, RcvHdrq17DCAXfrCnt));
2788 	for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2789 		qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i,
2790 			       cspec->dca_rcvhdr_ctrl[i]);
2791 	for (i = 0; i < cspec->num_msix_entries; i++)
2792 		setup_dca_notifier(dd, &cspec->msix_entries[i]);
2793 }
2794 
2795 static void qib_irq_notifier_notify(struct irq_affinity_notify *notify,
2796 			     const cpumask_t *mask)
2797 {
2798 	struct qib_irq_notify *n =
2799 		container_of(notify, struct qib_irq_notify, notify);
2800 	int cpu = cpumask_first(mask);
2801 
2802 	if (n->rcv) {
2803 		struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2804 		qib_update_rhdrq_dca(rcd, cpu);
2805 	} else {
2806 		struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2807 		qib_update_sdma_dca(ppd, cpu);
2808 	}
2809 }
2810 
2811 static void qib_irq_notifier_release(struct kref *ref)
2812 {
2813 	struct qib_irq_notify *n =
2814 		container_of(ref, struct qib_irq_notify, notify.kref);
2815 	struct qib_devdata *dd;
2816 
2817 	if (n->rcv) {
2818 		struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2819 		dd = rcd->dd;
2820 	} else {
2821 		struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2822 		dd = ppd->dd;
2823 	}
2824 	qib_devinfo(dd->pcidev,
2825 		"release on HCA notify 0x%p n 0x%p\n", ref, n);
2826 	kfree(n);
2827 }
2828 #endif
2829 
2830 /*
2831  * Disable MSIx interrupt if enabled, call generic MSIx code
2832  * to cleanup, and clear pending MSIx interrupts.
2833  * Used for fallback to INTx, after reset, and when MSIx setup fails.
2834  */
2835 static void qib_7322_nomsix(struct qib_devdata *dd)
2836 {
2837 	u64 intgranted;
2838 	int n;
2839 
2840 	dd->cspec->main_int_mask = ~0ULL;
2841 	n = dd->cspec->num_msix_entries;
2842 	if (n) {
2843 		int i;
2844 
2845 		dd->cspec->num_msix_entries = 0;
2846 		for (i = 0; i < n; i++) {
2847 #ifdef CONFIG_INFINIBAND_QIB_DCA
2848 			reset_dca_notifier(dd, &dd->cspec->msix_entries[i]);
2849 #endif
2850 			irq_set_affinity_hint(
2851 			  dd->cspec->msix_entries[i].msix.vector, NULL);
2852 			free_cpumask_var(dd->cspec->msix_entries[i].mask);
2853 			free_irq(dd->cspec->msix_entries[i].msix.vector,
2854 			   dd->cspec->msix_entries[i].arg);
2855 		}
2856 		qib_nomsix(dd);
2857 	}
2858 	/* make sure no MSIx interrupts are left pending */
2859 	intgranted = qib_read_kreg64(dd, kr_intgranted);
2860 	if (intgranted)
2861 		qib_write_kreg(dd, kr_intgranted, intgranted);
2862 }
2863 
2864 static void qib_7322_free_irq(struct qib_devdata *dd)
2865 {
2866 	if (dd->cspec->irq) {
2867 		free_irq(dd->cspec->irq, dd);
2868 		dd->cspec->irq = 0;
2869 	}
2870 	qib_7322_nomsix(dd);
2871 }
2872 
2873 static void qib_setup_7322_cleanup(struct qib_devdata *dd)
2874 {
2875 	int i;
2876 
2877 #ifdef CONFIG_INFINIBAND_QIB_DCA
2878 	if (dd->flags & QIB_DCA_ENABLED) {
2879 		dca_remove_requester(&dd->pcidev->dev);
2880 		dd->flags &= ~QIB_DCA_ENABLED;
2881 		dd->cspec->dca_ctrl = 0;
2882 		qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl);
2883 	}
2884 #endif
2885 
2886 	qib_7322_free_irq(dd);
2887 	kfree(dd->cspec->cntrs);
2888 	kfree(dd->cspec->sendchkenable);
2889 	kfree(dd->cspec->sendgrhchk);
2890 	kfree(dd->cspec->sendibchk);
2891 	kfree(dd->cspec->msix_entries);
2892 	for (i = 0; i < dd->num_pports; i++) {
2893 		unsigned long flags;
2894 		u32 mask = QSFP_GPIO_MOD_PRS_N |
2895 			(QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT);
2896 
2897 		kfree(dd->pport[i].cpspec->portcntrs);
2898 		if (dd->flags & QIB_HAS_QSFP) {
2899 			spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2900 			dd->cspec->gpio_mask &= ~mask;
2901 			qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
2902 			spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2903 			qib_qsfp_deinit(&dd->pport[i].cpspec->qsfp_data);
2904 		}
2905 		if (dd->pport[i].ibport_data.smi_ah)
2906 			ib_destroy_ah(&dd->pport[i].ibport_data.smi_ah->ibah);
2907 	}
2908 }
2909 
2910 /* handle SDMA interrupts */
2911 static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
2912 {
2913 	struct qib_pportdata *ppd0 = &dd->pport[0];
2914 	struct qib_pportdata *ppd1 = &dd->pport[1];
2915 	u64 intr0 = istat & (INT_MASK_P(SDma, 0) |
2916 		INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0));
2917 	u64 intr1 = istat & (INT_MASK_P(SDma, 1) |
2918 		INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1));
2919 
2920 	if (intr0)
2921 		qib_sdma_intr(ppd0);
2922 	if (intr1)
2923 		qib_sdma_intr(ppd1);
2924 
2925 	if (istat & INT_MASK_PM(SDmaCleanupDone, 0))
2926 		qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started);
2927 	if (istat & INT_MASK_PM(SDmaCleanupDone, 1))
2928 		qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started);
2929 }
2930 
2931 /*
2932  * Set or clear the Send buffer available interrupt enable bit.
2933  */
2934 static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
2935 {
2936 	unsigned long flags;
2937 
2938 	spin_lock_irqsave(&dd->sendctrl_lock, flags);
2939 	if (needint)
2940 		dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
2941 	else
2942 		dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
2943 	qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
2944 	qib_write_kreg(dd, kr_scratch, 0ULL);
2945 	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
2946 }
2947 
2948 /*
2949  * Somehow got an interrupt with reserved bits set in interrupt status.
2950  * Print a message so we know it happened, then clear them.
2951  * keep mainline interrupt handler cache-friendly
2952  */
2953 static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
2954 {
2955 	u64 kills;
2956 	char msg[128];
2957 
2958 	kills = istat & ~QIB_I_BITSEXTANT;
2959 	qib_dev_err(dd,
2960 		"Clearing reserved interrupt(s) 0x%016llx: %s\n",
2961 		(unsigned long long) kills, msg);
2962 	qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
2963 }
2964 
2965 /* keep mainline interrupt handler cache-friendly */
2966 static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
2967 {
2968 	u32 gpiostatus;
2969 	int handled = 0;
2970 	int pidx;
2971 
2972 	/*
2973 	 * Boards for this chip currently don't use GPIO interrupts,
2974 	 * so clear by writing GPIOstatus to GPIOclear, and complain
2975 	 * to developer.  To avoid endless repeats, clear
2976 	 * the bits in the mask, since there is some kind of
2977 	 * programming error or chip problem.
2978 	 */
2979 	gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
2980 	/*
2981 	 * In theory, writing GPIOstatus to GPIOclear could
2982 	 * have a bad side-effect on some diagnostic that wanted
2983 	 * to poll for a status-change, but the various shadows
2984 	 * make that problematic at best. Diags will just suppress
2985 	 * all GPIO interrupts during such tests.
2986 	 */
2987 	qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
2988 	/*
2989 	 * Check for QSFP MOD_PRS changes
2990 	 * only works for single port if IB1 != pidx1
2991 	 */
2992 	for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
2993 	     ++pidx) {
2994 		struct qib_pportdata *ppd;
2995 		struct qib_qsfp_data *qd;
2996 		u32 mask;
2997 		if (!dd->pport[pidx].link_speed_supported)
2998 			continue;
2999 		mask = QSFP_GPIO_MOD_PRS_N;
3000 		ppd = dd->pport + pidx;
3001 		mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
3002 		if (gpiostatus & dd->cspec->gpio_mask & mask) {
3003 			u64 pins;
3004 			qd = &ppd->cpspec->qsfp_data;
3005 			gpiostatus &= ~mask;
3006 			pins = qib_read_kreg64(dd, kr_extstatus);
3007 			pins >>= SYM_LSB(EXTStatus, GPIOIn);
3008 			if (!(pins & mask)) {
3009 				++handled;
3010 				qd->t_insert = jiffies;
3011 				queue_work(ib_wq, &qd->work);
3012 			}
3013 		}
3014 	}
3015 
3016 	if (gpiostatus && !handled) {
3017 		const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
3018 		u32 gpio_irq = mask & gpiostatus;
3019 
3020 		/*
3021 		 * Clear any troublemakers, and update chip from shadow
3022 		 */
3023 		dd->cspec->gpio_mask &= ~gpio_irq;
3024 		qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
3025 	}
3026 }
3027 
3028 /*
3029  * Handle errors and unusual events first, separate function
3030  * to improve cache hits for fast path interrupt handling.
3031  */
3032 static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
3033 {
3034 	if (istat & ~QIB_I_BITSEXTANT)
3035 		unknown_7322_ibits(dd, istat);
3036 	if (istat & QIB_I_GPIO)
3037 		unknown_7322_gpio_intr(dd);
3038 	if (istat & QIB_I_C_ERROR) {
3039 		qib_write_kreg(dd, kr_errmask, 0ULL);
3040 		tasklet_schedule(&dd->error_tasklet);
3041 	}
3042 	if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
3043 		handle_7322_p_errors(dd->rcd[0]->ppd);
3044 	if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
3045 		handle_7322_p_errors(dd->rcd[1]->ppd);
3046 }
3047 
3048 /*
3049  * Dynamically adjust the rcv int timeout for a context based on incoming
3050  * packet rate.
3051  */
3052 static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts)
3053 {
3054 	struct qib_devdata *dd = rcd->dd;
3055 	u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
3056 
3057 	/*
3058 	 * Dynamically adjust idle timeout on chip
3059 	 * based on number of packets processed.
3060 	 */
3061 	if (npkts < rcv_int_count && timeout > 2)
3062 		timeout >>= 1;
3063 	else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
3064 		timeout = min(timeout << 1, rcv_int_timeout);
3065 	else
3066 		return;
3067 
3068 	dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
3069 	qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
3070 }
3071 
3072 /*
3073  * This is the main interrupt handler.
3074  * It will normally only be used for low frequency interrupts but may
3075  * have to handle all interrupts if INTx is enabled or fewer than normal
3076  * MSIx interrupts were allocated.
3077  * This routine should ignore the interrupt bits for any of the
3078  * dedicated MSIx handlers.
3079  */
3080 static irqreturn_t qib_7322intr(int irq, void *data)
3081 {
3082 	struct qib_devdata *dd = data;
3083 	irqreturn_t ret;
3084 	u64 istat;
3085 	u64 ctxtrbits;
3086 	u64 rmask;
3087 	unsigned i;
3088 	u32 npkts;
3089 
3090 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
3091 		/*
3092 		 * This return value is not great, but we do not want the
3093 		 * interrupt core code to remove our interrupt handler
3094 		 * because we don't appear to be handling an interrupt
3095 		 * during a chip reset.
3096 		 */
3097 		ret = IRQ_HANDLED;
3098 		goto bail;
3099 	}
3100 
3101 	istat = qib_read_kreg64(dd, kr_intstatus);
3102 
3103 	if (unlikely(istat == ~0ULL)) {
3104 		qib_bad_intrstatus(dd);
3105 		qib_dev_err(dd, "Interrupt status all f's, skipping\n");
3106 		/* don't know if it was our interrupt or not */
3107 		ret = IRQ_NONE;
3108 		goto bail;
3109 	}
3110 
3111 	istat &= dd->cspec->main_int_mask;
3112 	if (unlikely(!istat)) {
3113 		/* already handled, or shared and not us */
3114 		ret = IRQ_NONE;
3115 		goto bail;
3116 	}
3117 
3118 	qib_stats.sps_ints++;
3119 	if (dd->int_counter != (u32) -1)
3120 		dd->int_counter++;
3121 
3122 	/* handle "errors" of various kinds first, device ahead of port */
3123 	if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
3124 			      QIB_I_C_ERROR | INT_MASK_P(Err, 0) |
3125 			      INT_MASK_P(Err, 1))))
3126 		unlikely_7322_intr(dd, istat);
3127 
3128 	/*
3129 	 * Clear the interrupt bits we found set, relatively early, so we
3130 	 * "know" know the chip will have seen this by the time we process
3131 	 * the queue, and will re-interrupt if necessary.  The processor
3132 	 * itself won't take the interrupt again until we return.
3133 	 */
3134 	qib_write_kreg(dd, kr_intclear, istat);
3135 
3136 	/*
3137 	 * Handle kernel receive queues before checking for pio buffers
3138 	 * available since receives can overflow; piobuf waiters can afford
3139 	 * a few extra cycles, since they were waiting anyway.
3140 	 */
3141 	ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK);
3142 	if (ctxtrbits) {
3143 		rmask = (1ULL << QIB_I_RCVAVAIL_LSB) |
3144 			(1ULL << QIB_I_RCVURG_LSB);
3145 		for (i = 0; i < dd->first_user_ctxt; i++) {
3146 			if (ctxtrbits & rmask) {
3147 				ctxtrbits &= ~rmask;
3148 				if (dd->rcd[i])
3149 					qib_kreceive(dd->rcd[i], NULL, &npkts);
3150 			}
3151 			rmask <<= 1;
3152 		}
3153 		if (ctxtrbits) {
3154 			ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) |
3155 				(ctxtrbits >> QIB_I_RCVURG_LSB);
3156 			qib_handle_urcv(dd, ctxtrbits);
3157 		}
3158 	}
3159 
3160 	if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1)))
3161 		sdma_7322_intr(dd, istat);
3162 
3163 	if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
3164 		qib_ib_piobufavail(dd);
3165 
3166 	ret = IRQ_HANDLED;
3167 bail:
3168 	return ret;
3169 }
3170 
3171 /*
3172  * Dedicated receive packet available interrupt handler.
3173  */
3174 static irqreturn_t qib_7322pintr(int irq, void *data)
3175 {
3176 	struct qib_ctxtdata *rcd = data;
3177 	struct qib_devdata *dd = rcd->dd;
3178 	u32 npkts;
3179 
3180 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3181 		/*
3182 		 * This return value is not great, but we do not want the
3183 		 * interrupt core code to remove our interrupt handler
3184 		 * because we don't appear to be handling an interrupt
3185 		 * during a chip reset.
3186 		 */
3187 		return IRQ_HANDLED;
3188 
3189 	qib_stats.sps_ints++;
3190 	if (dd->int_counter != (u32) -1)
3191 		dd->int_counter++;
3192 
3193 	/* Clear the interrupt bit we expect to be set. */
3194 	qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
3195 		       (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
3196 
3197 	qib_kreceive(rcd, NULL, &npkts);
3198 
3199 	return IRQ_HANDLED;
3200 }
3201 
3202 /*
3203  * Dedicated Send buffer available interrupt handler.
3204  */
3205 static irqreturn_t qib_7322bufavail(int irq, void *data)
3206 {
3207 	struct qib_devdata *dd = data;
3208 
3209 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3210 		/*
3211 		 * This return value is not great, but we do not want the
3212 		 * interrupt core code to remove our interrupt handler
3213 		 * because we don't appear to be handling an interrupt
3214 		 * during a chip reset.
3215 		 */
3216 		return IRQ_HANDLED;
3217 
3218 	qib_stats.sps_ints++;
3219 	if (dd->int_counter != (u32) -1)
3220 		dd->int_counter++;
3221 
3222 	/* Clear the interrupt bit we expect to be set. */
3223 	qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
3224 
3225 	/* qib_ib_piobufavail() will clear the want PIO interrupt if needed */
3226 	if (dd->flags & QIB_INITTED)
3227 		qib_ib_piobufavail(dd);
3228 	else
3229 		qib_wantpiobuf_7322_intr(dd, 0);
3230 
3231 	return IRQ_HANDLED;
3232 }
3233 
3234 /*
3235  * Dedicated Send DMA interrupt handler.
3236  */
3237 static irqreturn_t sdma_intr(int irq, void *data)
3238 {
3239 	struct qib_pportdata *ppd = data;
3240 	struct qib_devdata *dd = ppd->dd;
3241 
3242 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3243 		/*
3244 		 * This return value is not great, but we do not want the
3245 		 * interrupt core code to remove our interrupt handler
3246 		 * because we don't appear to be handling an interrupt
3247 		 * during a chip reset.
3248 		 */
3249 		return IRQ_HANDLED;
3250 
3251 	qib_stats.sps_ints++;
3252 	if (dd->int_counter != (u32) -1)
3253 		dd->int_counter++;
3254 
3255 	/* Clear the interrupt bit we expect to be set. */
3256 	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3257 		       INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
3258 	qib_sdma_intr(ppd);
3259 
3260 	return IRQ_HANDLED;
3261 }
3262 
3263 /*
3264  * Dedicated Send DMA idle interrupt handler.
3265  */
3266 static irqreturn_t sdma_idle_intr(int irq, void *data)
3267 {
3268 	struct qib_pportdata *ppd = data;
3269 	struct qib_devdata *dd = ppd->dd;
3270 
3271 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3272 		/*
3273 		 * This return value is not great, but we do not want the
3274 		 * interrupt core code to remove our interrupt handler
3275 		 * because we don't appear to be handling an interrupt
3276 		 * during a chip reset.
3277 		 */
3278 		return IRQ_HANDLED;
3279 
3280 	qib_stats.sps_ints++;
3281 	if (dd->int_counter != (u32) -1)
3282 		dd->int_counter++;
3283 
3284 	/* Clear the interrupt bit we expect to be set. */
3285 	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3286 		       INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
3287 	qib_sdma_intr(ppd);
3288 
3289 	return IRQ_HANDLED;
3290 }
3291 
3292 /*
3293  * Dedicated Send DMA progress interrupt handler.
3294  */
3295 static irqreturn_t sdma_progress_intr(int irq, void *data)
3296 {
3297 	struct qib_pportdata *ppd = data;
3298 	struct qib_devdata *dd = ppd->dd;
3299 
3300 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3301 		/*
3302 		 * This return value is not great, but we do not want the
3303 		 * interrupt core code to remove our interrupt handler
3304 		 * because we don't appear to be handling an interrupt
3305 		 * during a chip reset.
3306 		 */
3307 		return IRQ_HANDLED;
3308 
3309 	qib_stats.sps_ints++;
3310 	if (dd->int_counter != (u32) -1)
3311 		dd->int_counter++;
3312 
3313 	/* Clear the interrupt bit we expect to be set. */
3314 	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3315 		       INT_MASK_P(SDmaProgress, 1) :
3316 		       INT_MASK_P(SDmaProgress, 0));
3317 	qib_sdma_intr(ppd);
3318 
3319 	return IRQ_HANDLED;
3320 }
3321 
3322 /*
3323  * Dedicated Send DMA cleanup interrupt handler.
3324  */
3325 static irqreturn_t sdma_cleanup_intr(int irq, void *data)
3326 {
3327 	struct qib_pportdata *ppd = data;
3328 	struct qib_devdata *dd = ppd->dd;
3329 
3330 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3331 		/*
3332 		 * This return value is not great, but we do not want the
3333 		 * interrupt core code to remove our interrupt handler
3334 		 * because we don't appear to be handling an interrupt
3335 		 * during a chip reset.
3336 		 */
3337 		return IRQ_HANDLED;
3338 
3339 	qib_stats.sps_ints++;
3340 	if (dd->int_counter != (u32) -1)
3341 		dd->int_counter++;
3342 
3343 	/* Clear the interrupt bit we expect to be set. */
3344 	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3345 		       INT_MASK_PM(SDmaCleanupDone, 1) :
3346 		       INT_MASK_PM(SDmaCleanupDone, 0));
3347 	qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
3348 
3349 	return IRQ_HANDLED;
3350 }
3351 
3352 #ifdef CONFIG_INFINIBAND_QIB_DCA
3353 
3354 static void reset_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
3355 {
3356 	if (!m->dca)
3357 		return;
3358 	qib_devinfo(dd->pcidev,
3359 		"Disabling notifier on HCA %d irq %d\n",
3360 		dd->unit,
3361 		m->msix.vector);
3362 	irq_set_affinity_notifier(
3363 		m->msix.vector,
3364 		NULL);
3365 	m->notifier = NULL;
3366 }
3367 
3368 static void setup_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
3369 {
3370 	struct qib_irq_notify *n;
3371 
3372 	if (!m->dca)
3373 		return;
3374 	n = kzalloc(sizeof(*n), GFP_KERNEL);
3375 	if (n) {
3376 		int ret;
3377 
3378 		m->notifier = n;
3379 		n->notify.irq = m->msix.vector;
3380 		n->notify.notify = qib_irq_notifier_notify;
3381 		n->notify.release = qib_irq_notifier_release;
3382 		n->arg = m->arg;
3383 		n->rcv = m->rcv;
3384 		qib_devinfo(dd->pcidev,
3385 			"set notifier irq %d rcv %d notify %p\n",
3386 			n->notify.irq, n->rcv, &n->notify);
3387 		ret = irq_set_affinity_notifier(
3388 				n->notify.irq,
3389 				&n->notify);
3390 		if (ret) {
3391 			m->notifier = NULL;
3392 			kfree(n);
3393 		}
3394 	}
3395 }
3396 
3397 #endif
3398 
3399 /*
3400  * Set up our chip-specific interrupt handler.
3401  * The interrupt type has already been setup, so
3402  * we just need to do the registration and error checking.
3403  * If we are using MSIx interrupts, we may fall back to
3404  * INTx later, if the interrupt handler doesn't get called
3405  * within 1/2 second (see verify_interrupt()).
3406  */
3407 static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
3408 {
3409 	int ret, i, msixnum;
3410 	u64 redirect[6];
3411 	u64 mask;
3412 	const struct cpumask *local_mask;
3413 	int firstcpu, secondcpu = 0, currrcvcpu = 0;
3414 
3415 	if (!dd->num_pports)
3416 		return;
3417 
3418 	if (clearpend) {
3419 		/*
3420 		 * if not switching interrupt types, be sure interrupts are
3421 		 * disabled, and then clear anything pending at this point,
3422 		 * because we are starting clean.
3423 		 */
3424 		qib_7322_set_intr_state(dd, 0);
3425 
3426 		/* clear the reset error, init error/hwerror mask */
3427 		qib_7322_init_hwerrors(dd);
3428 
3429 		/* clear any interrupt bits that might be set */
3430 		qib_write_kreg(dd, kr_intclear, ~0ULL);
3431 
3432 		/* make sure no pending MSIx intr, and clear diag reg */
3433 		qib_write_kreg(dd, kr_intgranted, ~0ULL);
3434 		qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
3435 	}
3436 
3437 	if (!dd->cspec->num_msix_entries) {
3438 		/* Try to get INTx interrupt */
3439 try_intx:
3440 		if (!dd->pcidev->irq) {
3441 			qib_dev_err(dd,
3442 				"irq is 0, BIOS error?  Interrupts won't work\n");
3443 			goto bail;
3444 		}
3445 		ret = request_irq(dd->pcidev->irq, qib_7322intr,
3446 				  IRQF_SHARED, QIB_DRV_NAME, dd);
3447 		if (ret) {
3448 			qib_dev_err(dd,
3449 				"Couldn't setup INTx interrupt (irq=%d): %d\n",
3450 				dd->pcidev->irq, ret);
3451 			goto bail;
3452 		}
3453 		dd->cspec->irq = dd->pcidev->irq;
3454 		dd->cspec->main_int_mask = ~0ULL;
3455 		goto bail;
3456 	}
3457 
3458 	/* Try to get MSIx interrupts */
3459 	memset(redirect, 0, sizeof redirect);
3460 	mask = ~0ULL;
3461 	msixnum = 0;
3462 	local_mask = cpumask_of_pcibus(dd->pcidev->bus);
3463 	firstcpu = cpumask_first(local_mask);
3464 	if (firstcpu >= nr_cpu_ids ||
3465 			cpumask_weight(local_mask) == num_online_cpus()) {
3466 		local_mask = topology_core_cpumask(0);
3467 		firstcpu = cpumask_first(local_mask);
3468 	}
3469 	if (firstcpu < nr_cpu_ids) {
3470 		secondcpu = cpumask_next(firstcpu, local_mask);
3471 		if (secondcpu >= nr_cpu_ids)
3472 			secondcpu = firstcpu;
3473 		currrcvcpu = secondcpu;
3474 	}
3475 	for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
3476 		irq_handler_t handler;
3477 		void *arg;
3478 		u64 val;
3479 		int lsb, reg, sh;
3480 #ifdef CONFIG_INFINIBAND_QIB_DCA
3481 		int dca = 0;
3482 #endif
3483 
3484 		dd->cspec->msix_entries[msixnum].
3485 			name[sizeof(dd->cspec->msix_entries[msixnum].name) - 1]
3486 			= '\0';
3487 		if (i < ARRAY_SIZE(irq_table)) {
3488 			if (irq_table[i].port) {
3489 				/* skip if for a non-configured port */
3490 				if (irq_table[i].port > dd->num_pports)
3491 					continue;
3492 				arg = dd->pport + irq_table[i].port - 1;
3493 			} else
3494 				arg = dd;
3495 #ifdef CONFIG_INFINIBAND_QIB_DCA
3496 			dca = irq_table[i].dca;
3497 #endif
3498 			lsb = irq_table[i].lsb;
3499 			handler = irq_table[i].handler;
3500 			snprintf(dd->cspec->msix_entries[msixnum].name,
3501 				sizeof(dd->cspec->msix_entries[msixnum].name)
3502 				 - 1,
3503 				QIB_DRV_NAME "%d%s", dd->unit,
3504 				irq_table[i].name);
3505 		} else {
3506 			unsigned ctxt;
3507 
3508 			ctxt = i - ARRAY_SIZE(irq_table);
3509 			/* per krcvq context receive interrupt */
3510 			arg = dd->rcd[ctxt];
3511 			if (!arg)
3512 				continue;
3513 			if (qib_krcvq01_no_msi && ctxt < 2)
3514 				continue;
3515 #ifdef CONFIG_INFINIBAND_QIB_DCA
3516 			dca = 1;
3517 #endif
3518 			lsb = QIB_I_RCVAVAIL_LSB + ctxt;
3519 			handler = qib_7322pintr;
3520 			snprintf(dd->cspec->msix_entries[msixnum].name,
3521 				sizeof(dd->cspec->msix_entries[msixnum].name)
3522 				 - 1,
3523 				QIB_DRV_NAME "%d (kctx)", dd->unit);
3524 		}
3525 		ret = request_irq(
3526 			dd->cspec->msix_entries[msixnum].msix.vector,
3527 			handler, 0, dd->cspec->msix_entries[msixnum].name,
3528 			arg);
3529 		if (ret) {
3530 			/*
3531 			 * Shouldn't happen since the enable said we could
3532 			 * have as many as we are trying to setup here.
3533 			 */
3534 			qib_dev_err(dd,
3535 				"Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n",
3536 				msixnum,
3537 				dd->cspec->msix_entries[msixnum].msix.vector,
3538 				ret);
3539 			qib_7322_nomsix(dd);
3540 			goto try_intx;
3541 		}
3542 		dd->cspec->msix_entries[msixnum].arg = arg;
3543 #ifdef CONFIG_INFINIBAND_QIB_DCA
3544 		dd->cspec->msix_entries[msixnum].dca = dca;
3545 		dd->cspec->msix_entries[msixnum].rcv =
3546 			handler == qib_7322pintr;
3547 #endif
3548 		if (lsb >= 0) {
3549 			reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
3550 			sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
3551 				SYM_LSB(IntRedirect0, vec1);
3552 			mask &= ~(1ULL << lsb);
3553 			redirect[reg] |= ((u64) msixnum) << sh;
3554 		}
3555 		val = qib_read_kreg64(dd, 2 * msixnum + 1 +
3556 			(QIB_7322_MsixTable_OFFS / sizeof(u64)));
3557 		if (firstcpu < nr_cpu_ids &&
3558 			zalloc_cpumask_var(
3559 				&dd->cspec->msix_entries[msixnum].mask,
3560 				GFP_KERNEL)) {
3561 			if (handler == qib_7322pintr) {
3562 				cpumask_set_cpu(currrcvcpu,
3563 					dd->cspec->msix_entries[msixnum].mask);
3564 				currrcvcpu = cpumask_next(currrcvcpu,
3565 					local_mask);
3566 				if (currrcvcpu >= nr_cpu_ids)
3567 					currrcvcpu = secondcpu;
3568 			} else {
3569 				cpumask_set_cpu(firstcpu,
3570 					dd->cspec->msix_entries[msixnum].mask);
3571 			}
3572 			irq_set_affinity_hint(
3573 				dd->cspec->msix_entries[msixnum].msix.vector,
3574 				dd->cspec->msix_entries[msixnum].mask);
3575 		}
3576 		msixnum++;
3577 	}
3578 	/* Initialize the vector mapping */
3579 	for (i = 0; i < ARRAY_SIZE(redirect); i++)
3580 		qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
3581 	dd->cspec->main_int_mask = mask;
3582 	tasklet_init(&dd->error_tasklet, qib_error_tasklet,
3583 		(unsigned long)dd);
3584 bail:;
3585 }
3586 
3587 /**
3588  * qib_7322_boardname - fill in the board name and note features
3589  * @dd: the qlogic_ib device
3590  *
3591  * info will be based on the board revision register
3592  */
3593 static unsigned qib_7322_boardname(struct qib_devdata *dd)
3594 {
3595 	/* Will need enumeration of board-types here */
3596 	char *n;
3597 	u32 boardid, namelen;
3598 	unsigned features = DUAL_PORT_CAP;
3599 
3600 	boardid = SYM_FIELD(dd->revision, Revision, BoardID);
3601 
3602 	switch (boardid) {
3603 	case 0:
3604 		n = "InfiniPath_QLE7342_Emulation";
3605 		break;
3606 	case 1:
3607 		n = "InfiniPath_QLE7340";
3608 		dd->flags |= QIB_HAS_QSFP;
3609 		features = PORT_SPD_CAP;
3610 		break;
3611 	case 2:
3612 		n = "InfiniPath_QLE7342";
3613 		dd->flags |= QIB_HAS_QSFP;
3614 		break;
3615 	case 3:
3616 		n = "InfiniPath_QMI7342";
3617 		break;
3618 	case 4:
3619 		n = "InfiniPath_Unsupported7342";
3620 		qib_dev_err(dd, "Unsupported version of QMH7342\n");
3621 		features = 0;
3622 		break;
3623 	case BOARD_QMH7342:
3624 		n = "InfiniPath_QMH7342";
3625 		features = 0x24;
3626 		break;
3627 	case BOARD_QME7342:
3628 		n = "InfiniPath_QME7342";
3629 		break;
3630 	case 8:
3631 		n = "InfiniPath_QME7362";
3632 		dd->flags |= QIB_HAS_QSFP;
3633 		break;
3634 	case 15:
3635 		n = "InfiniPath_QLE7342_TEST";
3636 		dd->flags |= QIB_HAS_QSFP;
3637 		break;
3638 	default:
3639 		n = "InfiniPath_QLE73xy_UNKNOWN";
3640 		qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
3641 		break;
3642 	}
3643 	dd->board_atten = 1; /* index into txdds_Xdr */
3644 
3645 	namelen = strlen(n) + 1;
3646 	dd->boardname = kmalloc(namelen, GFP_KERNEL);
3647 	if (!dd->boardname)
3648 		qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
3649 	else
3650 		snprintf(dd->boardname, namelen, "%s", n);
3651 
3652 	snprintf(dd->boardversion, sizeof(dd->boardversion),
3653 		 "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
3654 		 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
3655 		 (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
3656 		 dd->majrev, dd->minrev,
3657 		 (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
3658 
3659 	if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
3660 		qib_devinfo(dd->pcidev,
3661 			"IB%u: Forced to single port mode by module parameter\n",
3662 			dd->unit);
3663 		features &= PORT_SPD_CAP;
3664 	}
3665 
3666 	return features;
3667 }
3668 
3669 /*
3670  * This routine sleeps, so it can only be called from user context, not
3671  * from interrupt context.
3672  */
3673 static int qib_do_7322_reset(struct qib_devdata *dd)
3674 {
3675 	u64 val;
3676 	u64 *msix_vecsave;
3677 	int i, msix_entries, ret = 1;
3678 	u16 cmdval;
3679 	u8 int_line, clinesz;
3680 	unsigned long flags;
3681 
3682 	/* Use dev_err so it shows up in logs, etc. */
3683 	qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
3684 
3685 	qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
3686 
3687 	msix_entries = dd->cspec->num_msix_entries;
3688 
3689 	/* no interrupts till re-initted */
3690 	qib_7322_set_intr_state(dd, 0);
3691 
3692 	if (msix_entries) {
3693 		qib_7322_nomsix(dd);
3694 		/* can be up to 512 bytes, too big for stack */
3695 		msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries *
3696 			sizeof(u64), GFP_KERNEL);
3697 		if (!msix_vecsave)
3698 			qib_dev_err(dd, "No mem to save MSIx data\n");
3699 	} else
3700 		msix_vecsave = NULL;
3701 
3702 	/*
3703 	 * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
3704 	 * info that is set up by the BIOS, so we have to save and restore
3705 	 * it ourselves.   There is some risk something could change it,
3706 	 * after we save it, but since we have disabled the MSIx, it
3707 	 * shouldn't be touched...
3708 	 */
3709 	for (i = 0; i < msix_entries; i++) {
3710 		u64 vecaddr, vecdata;
3711 		vecaddr = qib_read_kreg64(dd, 2 * i +
3712 				  (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3713 		vecdata = qib_read_kreg64(dd, 1 + 2 * i +
3714 				  (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3715 		if (msix_vecsave) {
3716 			msix_vecsave[2 * i] = vecaddr;
3717 			/* save it without the masked bit set */
3718 			msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL;
3719 		}
3720 	}
3721 
3722 	dd->pport->cpspec->ibdeltainprog = 0;
3723 	dd->pport->cpspec->ibsymdelta = 0;
3724 	dd->pport->cpspec->iblnkerrdelta = 0;
3725 	dd->pport->cpspec->ibmalfdelta = 0;
3726 	dd->int_counter = 0; /* so we check interrupts work again */
3727 
3728 	/*
3729 	 * Keep chip from being accessed until we are ready.  Use
3730 	 * writeq() directly, to allow the write even though QIB_PRESENT
3731 	 * isn't set.
3732 	 */
3733 	dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
3734 	dd->flags |= QIB_DOING_RESET;
3735 	val = dd->control | QLOGIC_IB_C_RESET;
3736 	writeq(val, &dd->kregbase[kr_control]);
3737 
3738 	for (i = 1; i <= 5; i++) {
3739 		/*
3740 		 * Allow MBIST, etc. to complete; longer on each retry.
3741 		 * We sometimes get machine checks from bus timeout if no
3742 		 * response, so for now, make it *really* long.
3743 		 */
3744 		msleep(1000 + (1 + i) * 3000);
3745 
3746 		qib_pcie_reenable(dd, cmdval, int_line, clinesz);
3747 
3748 		/*
3749 		 * Use readq directly, so we don't need to mark it as PRESENT
3750 		 * until we get a successful indication that all is well.
3751 		 */
3752 		val = readq(&dd->kregbase[kr_revision]);
3753 		if (val == dd->revision)
3754 			break;
3755 		if (i == 5) {
3756 			qib_dev_err(dd,
3757 				"Failed to initialize after reset, unusable\n");
3758 			ret = 0;
3759 			goto  bail;
3760 		}
3761 	}
3762 
3763 	dd->flags |= QIB_PRESENT; /* it's back */
3764 
3765 	if (msix_entries) {
3766 		/* restore the MSIx vector address and data if saved above */
3767 		for (i = 0; i < msix_entries; i++) {
3768 			dd->cspec->msix_entries[i].msix.entry = i;
3769 			if (!msix_vecsave || !msix_vecsave[2 * i])
3770 				continue;
3771 			qib_write_kreg(dd, 2 * i +
3772 				(QIB_7322_MsixTable_OFFS / sizeof(u64)),
3773 				msix_vecsave[2 * i]);
3774 			qib_write_kreg(dd, 1 + 2 * i +
3775 				(QIB_7322_MsixTable_OFFS / sizeof(u64)),
3776 				msix_vecsave[1 + 2 * i]);
3777 		}
3778 	}
3779 
3780 	/* initialize the remaining registers.  */
3781 	for (i = 0; i < dd->num_pports; ++i)
3782 		write_7322_init_portregs(&dd->pport[i]);
3783 	write_7322_initregs(dd);
3784 
3785 	if (qib_pcie_params(dd, dd->lbus_width,
3786 			    &dd->cspec->num_msix_entries,
3787 			    dd->cspec->msix_entries))
3788 		qib_dev_err(dd,
3789 			"Reset failed to setup PCIe or interrupts; continuing anyway\n");
3790 
3791 	qib_setup_7322_interrupt(dd, 1);
3792 
3793 	for (i = 0; i < dd->num_pports; ++i) {
3794 		struct qib_pportdata *ppd = &dd->pport[i];
3795 
3796 		spin_lock_irqsave(&ppd->lflags_lock, flags);
3797 		ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
3798 		ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
3799 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3800 	}
3801 
3802 bail:
3803 	dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */
3804 	kfree(msix_vecsave);
3805 	return ret;
3806 }
3807 
3808 /**
3809  * qib_7322_put_tid - write a TID to the chip
3810  * @dd: the qlogic_ib device
3811  * @tidptr: pointer to the expected TID (in chip) to update
3812  * @tidtype: 0 for eager, 1 for expected
3813  * @pa: physical address of in memory buffer; tidinvalid if freeing
3814  */
3815 static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
3816 			     u32 type, unsigned long pa)
3817 {
3818 	if (!(dd->flags & QIB_PRESENT))
3819 		return;
3820 	if (pa != dd->tidinvalid) {
3821 		u64 chippa = pa >> IBA7322_TID_PA_SHIFT;
3822 
3823 		/* paranoia checks */
3824 		if (pa != (chippa << IBA7322_TID_PA_SHIFT)) {
3825 			qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
3826 				    pa);
3827 			return;
3828 		}
3829 		if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
3830 			qib_dev_err(dd,
3831 				"Physical page address 0x%lx larger than supported\n",
3832 				pa);
3833 			return;
3834 		}
3835 
3836 		if (type == RCVHQ_RCV_TYPE_EAGER)
3837 			chippa |= dd->tidtemplate;
3838 		else /* for now, always full 4KB page */
3839 			chippa |= IBA7322_TID_SZ_4K;
3840 		pa = chippa;
3841 	}
3842 	writeq(pa, tidptr);
3843 	mmiowb();
3844 }
3845 
3846 /**
3847  * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
3848  * @dd: the qlogic_ib device
3849  * @ctxt: the ctxt
3850  *
3851  * clear all TID entries for a ctxt, expected and eager.
3852  * Used from qib_close().
3853  */
3854 static void qib_7322_clear_tids(struct qib_devdata *dd,
3855 				struct qib_ctxtdata *rcd)
3856 {
3857 	u64 __iomem *tidbase;
3858 	unsigned long tidinv;
3859 	u32 ctxt;
3860 	int i;
3861 
3862 	if (!dd->kregbase || !rcd)
3863 		return;
3864 
3865 	ctxt = rcd->ctxt;
3866 
3867 	tidinv = dd->tidinvalid;
3868 	tidbase = (u64 __iomem *)
3869 		((char __iomem *) dd->kregbase +
3870 		 dd->rcvtidbase +
3871 		 ctxt * dd->rcvtidcnt * sizeof(*tidbase));
3872 
3873 	for (i = 0; i < dd->rcvtidcnt; i++)
3874 		qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
3875 				 tidinv);
3876 
3877 	tidbase = (u64 __iomem *)
3878 		((char __iomem *) dd->kregbase +
3879 		 dd->rcvegrbase +
3880 		 rcd->rcvegr_tid_base * sizeof(*tidbase));
3881 
3882 	for (i = 0; i < rcd->rcvegrcnt; i++)
3883 		qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
3884 				 tidinv);
3885 }
3886 
3887 /**
3888  * qib_7322_tidtemplate - setup constants for TID updates
3889  * @dd: the qlogic_ib device
3890  *
3891  * We setup stuff that we use a lot, to avoid calculating each time
3892  */
3893 static void qib_7322_tidtemplate(struct qib_devdata *dd)
3894 {
3895 	/*
3896 	 * For now, we always allocate 4KB buffers (at init) so we can
3897 	 * receive max size packets.  We may want a module parameter to
3898 	 * specify 2KB or 4KB and/or make it per port instead of per device
3899 	 * for those who want to reduce memory footprint.  Note that the
3900 	 * rcvhdrentsize size must be large enough to hold the largest
3901 	 * IB header (currently 96 bytes) that we expect to handle (plus of
3902 	 * course the 2 dwords of RHF).
3903 	 */
3904 	if (dd->rcvegrbufsize == 2048)
3905 		dd->tidtemplate = IBA7322_TID_SZ_2K;
3906 	else if (dd->rcvegrbufsize == 4096)
3907 		dd->tidtemplate = IBA7322_TID_SZ_4K;
3908 	dd->tidinvalid = 0;
3909 }
3910 
3911 /**
3912  * qib_init_7322_get_base_info - set chip-specific flags for user code
3913  * @rcd: the qlogic_ib ctxt
3914  * @kbase: qib_base_info pointer
3915  *
3916  * We set the PCIE flag because the lower bandwidth on PCIe vs
3917  * HyperTransport can affect some user packet algorithims.
3918  */
3919 
3920 static int qib_7322_get_base_info(struct qib_ctxtdata *rcd,
3921 				  struct qib_base_info *kinfo)
3922 {
3923 	kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP |
3924 		QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL |
3925 		QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA;
3926 	if (rcd->dd->cspec->r1)
3927 		kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK;
3928 	if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
3929 		kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
3930 
3931 	return 0;
3932 }
3933 
3934 static struct qib_message_header *
3935 qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
3936 {
3937 	u32 offset = qib_hdrget_offset(rhf_addr);
3938 
3939 	return (struct qib_message_header *)
3940 		(rhf_addr - dd->rhf_offset + offset);
3941 }
3942 
3943 /*
3944  * Configure number of contexts.
3945  */
3946 static void qib_7322_config_ctxts(struct qib_devdata *dd)
3947 {
3948 	unsigned long flags;
3949 	u32 nchipctxts;
3950 
3951 	nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
3952 	dd->cspec->numctxts = nchipctxts;
3953 	if (qib_n_krcv_queues > 1 && dd->num_pports) {
3954 		dd->first_user_ctxt = NUM_IB_PORTS +
3955 			(qib_n_krcv_queues - 1) * dd->num_pports;
3956 		if (dd->first_user_ctxt > nchipctxts)
3957 			dd->first_user_ctxt = nchipctxts;
3958 		dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
3959 	} else {
3960 		dd->first_user_ctxt = NUM_IB_PORTS;
3961 		dd->n_krcv_queues = 1;
3962 	}
3963 
3964 	if (!qib_cfgctxts) {
3965 		int nctxts = dd->first_user_ctxt + num_online_cpus();
3966 
3967 		if (nctxts <= 6)
3968 			dd->ctxtcnt = 6;
3969 		else if (nctxts <= 10)
3970 			dd->ctxtcnt = 10;
3971 		else if (nctxts <= nchipctxts)
3972 			dd->ctxtcnt = nchipctxts;
3973 	} else if (qib_cfgctxts < dd->num_pports)
3974 		dd->ctxtcnt = dd->num_pports;
3975 	else if (qib_cfgctxts <= nchipctxts)
3976 		dd->ctxtcnt = qib_cfgctxts;
3977 	if (!dd->ctxtcnt) /* none of the above, set to max */
3978 		dd->ctxtcnt = nchipctxts;
3979 
3980 	/*
3981 	 * Chip can be configured for 6, 10, or 18 ctxts, and choice
3982 	 * affects number of eager TIDs per ctxt (1K, 2K, 4K).
3983 	 * Lock to be paranoid about later motion, etc.
3984 	 */
3985 	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
3986 	if (dd->ctxtcnt > 10)
3987 		dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg);
3988 	else if (dd->ctxtcnt > 6)
3989 		dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg);
3990 	/* else configure for default 6 receive ctxts */
3991 
3992 	/* The XRC opcode is 5. */
3993 	dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode);
3994 
3995 	/*
3996 	 * RcvCtrl *must* be written here so that the
3997 	 * chip understands how to change rcvegrcnt below.
3998 	 */
3999 	qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
4000 	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
4001 
4002 	/* kr_rcvegrcnt changes based on the number of contexts enabled */
4003 	dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
4004 	if (qib_rcvhdrcnt)
4005 		dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
4006 	else
4007 		dd->rcvhdrcnt = 2 * max(dd->cspec->rcvegrcnt,
4008 				    dd->num_pports > 1 ? 1024U : 2048U);
4009 }
4010 
4011 static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
4012 {
4013 
4014 	int lsb, ret = 0;
4015 	u64 maskr; /* right-justified mask */
4016 
4017 	switch (which) {
4018 
4019 	case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
4020 		ret = ppd->link_width_enabled;
4021 		goto done;
4022 
4023 	case QIB_IB_CFG_LWID: /* Get currently active Link-width */
4024 		ret = ppd->link_width_active;
4025 		goto done;
4026 
4027 	case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
4028 		ret = ppd->link_speed_enabled;
4029 		goto done;
4030 
4031 	case QIB_IB_CFG_SPD: /* Get current Link spd */
4032 		ret = ppd->link_speed_active;
4033 		goto done;
4034 
4035 	case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
4036 		lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4037 		maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4038 		break;
4039 
4040 	case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
4041 		lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4042 		maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4043 		break;
4044 
4045 	case QIB_IB_CFG_LINKLATENCY:
4046 		ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
4047 			SYM_MASK(IBCStatusB_0, LinkRoundTripLatency);
4048 		goto done;
4049 
4050 	case QIB_IB_CFG_OP_VLS:
4051 		ret = ppd->vls_operational;
4052 		goto done;
4053 
4054 	case QIB_IB_CFG_VL_HIGH_CAP:
4055 		ret = 16;
4056 		goto done;
4057 
4058 	case QIB_IB_CFG_VL_LOW_CAP:
4059 		ret = 16;
4060 		goto done;
4061 
4062 	case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
4063 		ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4064 				OverrunThreshold);
4065 		goto done;
4066 
4067 	case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
4068 		ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4069 				PhyerrThreshold);
4070 		goto done;
4071 
4072 	case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
4073 		/* will only take effect when the link state changes */
4074 		ret = (ppd->cpspec->ibcctrl_a &
4075 		       SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ?
4076 			IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
4077 		goto done;
4078 
4079 	case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
4080 		lsb = IBA7322_IBC_HRTBT_LSB;
4081 		maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
4082 		break;
4083 
4084 	case QIB_IB_CFG_PMA_TICKS:
4085 		/*
4086 		 * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
4087 		 * Since the clock is always 250MHz, the value is 3, 1 or 0.
4088 		 */
4089 		if (ppd->link_speed_active == QIB_IB_QDR)
4090 			ret = 3;
4091 		else if (ppd->link_speed_active == QIB_IB_DDR)
4092 			ret = 1;
4093 		else
4094 			ret = 0;
4095 		goto done;
4096 
4097 	default:
4098 		ret = -EINVAL;
4099 		goto done;
4100 	}
4101 	ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
4102 done:
4103 	return ret;
4104 }
4105 
4106 /*
4107  * Below again cribbed liberally from older version. Do not lean
4108  * heavily on it.
4109  */
4110 #define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB
4111 #define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \
4112 	| (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16))
4113 
4114 static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
4115 {
4116 	struct qib_devdata *dd = ppd->dd;
4117 	u64 maskr; /* right-justified mask */
4118 	int lsb, ret = 0;
4119 	u16 lcmd, licmd;
4120 	unsigned long flags;
4121 
4122 	switch (which) {
4123 	case QIB_IB_CFG_LIDLMC:
4124 		/*
4125 		 * Set LID and LMC. Combined to avoid possible hazard
4126 		 * caller puts LMC in 16MSbits, DLID in 16LSbits of val
4127 		 */
4128 		lsb = IBA7322_IBC_DLIDLMC_SHIFT;
4129 		maskr = IBA7322_IBC_DLIDLMC_MASK;
4130 		/*
4131 		 * For header-checking, the SLID in the packet will
4132 		 * be masked with SendIBSLMCMask, and compared
4133 		 * with SendIBSLIDAssignMask. Make sure we do not
4134 		 * set any bits not covered by the mask, or we get
4135 		 * false-positives.
4136 		 */
4137 		qib_write_kreg_port(ppd, krp_sendslid,
4138 				    val & (val >> 16) & SendIBSLIDAssignMask);
4139 		qib_write_kreg_port(ppd, krp_sendslidmask,
4140 				    (val >> 16) & SendIBSLMCMask);
4141 		break;
4142 
4143 	case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
4144 		ppd->link_width_enabled = val;
4145 		/* convert IB value to chip register value */
4146 		if (val == IB_WIDTH_1X)
4147 			val = 0;
4148 		else if (val == IB_WIDTH_4X)
4149 			val = 1;
4150 		else
4151 			val = 3;
4152 		maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS);
4153 		lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS);
4154 		break;
4155 
4156 	case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
4157 		/*
4158 		 * As with width, only write the actual register if the
4159 		 * link is currently down, otherwise takes effect on next
4160 		 * link change.  Since setting is being explicitly requested
4161 		 * (via MAD or sysfs), clear autoneg failure status if speed
4162 		 * autoneg is enabled.
4163 		 */
4164 		ppd->link_speed_enabled = val;
4165 		val <<= IBA7322_IBC_SPEED_LSB;
4166 		maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK |
4167 			IBA7322_IBC_MAX_SPEED_MASK;
4168 		if (val & (val - 1)) {
4169 			/* Muliple speeds enabled */
4170 			val |= IBA7322_IBC_IBTA_1_2_MASK |
4171 				IBA7322_IBC_MAX_SPEED_MASK;
4172 			spin_lock_irqsave(&ppd->lflags_lock, flags);
4173 			ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
4174 			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
4175 		} else if (val & IBA7322_IBC_SPEED_QDR)
4176 			val |= IBA7322_IBC_IBTA_1_2_MASK;
4177 		/* IBTA 1.2 mode + min/max + speed bits are contiguous */
4178 		lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE);
4179 		break;
4180 
4181 	case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
4182 		lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4183 		maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4184 		break;
4185 
4186 	case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
4187 		lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4188 		maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4189 		break;
4190 
4191 	case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
4192 		maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4193 				  OverrunThreshold);
4194 		if (maskr != val) {
4195 			ppd->cpspec->ibcctrl_a &=
4196 				~SYM_MASK(IBCCtrlA_0, OverrunThreshold);
4197 			ppd->cpspec->ibcctrl_a |= (u64) val <<
4198 				SYM_LSB(IBCCtrlA_0, OverrunThreshold);
4199 			qib_write_kreg_port(ppd, krp_ibcctrl_a,
4200 					    ppd->cpspec->ibcctrl_a);
4201 			qib_write_kreg(dd, kr_scratch, 0ULL);
4202 		}
4203 		goto bail;
4204 
4205 	case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
4206 		maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4207 				  PhyerrThreshold);
4208 		if (maskr != val) {
4209 			ppd->cpspec->ibcctrl_a &=
4210 				~SYM_MASK(IBCCtrlA_0, PhyerrThreshold);
4211 			ppd->cpspec->ibcctrl_a |= (u64) val <<
4212 				SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
4213 			qib_write_kreg_port(ppd, krp_ibcctrl_a,
4214 					    ppd->cpspec->ibcctrl_a);
4215 			qib_write_kreg(dd, kr_scratch, 0ULL);
4216 		}
4217 		goto bail;
4218 
4219 	case QIB_IB_CFG_PKEYS: /* update pkeys */
4220 		maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
4221 			((u64) ppd->pkeys[2] << 32) |
4222 			((u64) ppd->pkeys[3] << 48);
4223 		qib_write_kreg_port(ppd, krp_partitionkey, maskr);
4224 		goto bail;
4225 
4226 	case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
4227 		/* will only take effect when the link state changes */
4228 		if (val == IB_LINKINITCMD_POLL)
4229 			ppd->cpspec->ibcctrl_a &=
4230 				~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
4231 		else /* SLEEP */
4232 			ppd->cpspec->ibcctrl_a |=
4233 				SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
4234 		qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
4235 		qib_write_kreg(dd, kr_scratch, 0ULL);
4236 		goto bail;
4237 
4238 	case QIB_IB_CFG_MTU: /* update the MTU in IBC */
4239 		/*
4240 		 * Update our housekeeping variables, and set IBC max
4241 		 * size, same as init code; max IBC is max we allow in
4242 		 * buffer, less the qword pbc, plus 1 for ICRC, in dwords
4243 		 * Set even if it's unchanged, print debug message only
4244 		 * on changes.
4245 		 */
4246 		val = (ppd->ibmaxlen >> 2) + 1;
4247 		ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
4248 		ppd->cpspec->ibcctrl_a |= (u64)val <<
4249 			SYM_LSB(IBCCtrlA_0, MaxPktLen);
4250 		qib_write_kreg_port(ppd, krp_ibcctrl_a,
4251 				    ppd->cpspec->ibcctrl_a);
4252 		qib_write_kreg(dd, kr_scratch, 0ULL);
4253 		goto bail;
4254 
4255 	case QIB_IB_CFG_LSTATE: /* set the IB link state */
4256 		switch (val & 0xffff0000) {
4257 		case IB_LINKCMD_DOWN:
4258 			lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
4259 			ppd->cpspec->ibmalfusesnap = 1;
4260 			ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
4261 				crp_errlink);
4262 			if (!ppd->cpspec->ibdeltainprog &&
4263 			    qib_compat_ddr_negotiate) {
4264 				ppd->cpspec->ibdeltainprog = 1;
4265 				ppd->cpspec->ibsymsnap =
4266 					read_7322_creg32_port(ppd,
4267 							      crp_ibsymbolerr);
4268 				ppd->cpspec->iblnkerrsnap =
4269 					read_7322_creg32_port(ppd,
4270 						      crp_iblinkerrrecov);
4271 			}
4272 			break;
4273 
4274 		case IB_LINKCMD_ARMED:
4275 			lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
4276 			if (ppd->cpspec->ibmalfusesnap) {
4277 				ppd->cpspec->ibmalfusesnap = 0;
4278 				ppd->cpspec->ibmalfdelta +=
4279 					read_7322_creg32_port(ppd,
4280 							      crp_errlink) -
4281 					ppd->cpspec->ibmalfsnap;
4282 			}
4283 			break;
4284 
4285 		case IB_LINKCMD_ACTIVE:
4286 			lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
4287 			break;
4288 
4289 		default:
4290 			ret = -EINVAL;
4291 			qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
4292 			goto bail;
4293 		}
4294 		switch (val & 0xffff) {
4295 		case IB_LINKINITCMD_NOP:
4296 			licmd = 0;
4297 			break;
4298 
4299 		case IB_LINKINITCMD_POLL:
4300 			licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
4301 			break;
4302 
4303 		case IB_LINKINITCMD_SLEEP:
4304 			licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
4305 			break;
4306 
4307 		case IB_LINKINITCMD_DISABLE:
4308 			licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
4309 			ppd->cpspec->chase_end = 0;
4310 			/*
4311 			 * stop state chase counter and timer, if running.
4312 			 * wait forpending timer, but don't clear .data (ppd)!
4313 			 */
4314 			if (ppd->cpspec->chase_timer.expires) {
4315 				del_timer_sync(&ppd->cpspec->chase_timer);
4316 				ppd->cpspec->chase_timer.expires = 0;
4317 			}
4318 			break;
4319 
4320 		default:
4321 			ret = -EINVAL;
4322 			qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
4323 				    val & 0xffff);
4324 			goto bail;
4325 		}
4326 		qib_set_ib_7322_lstate(ppd, lcmd, licmd);
4327 		goto bail;
4328 
4329 	case QIB_IB_CFG_OP_VLS:
4330 		if (ppd->vls_operational != val) {
4331 			ppd->vls_operational = val;
4332 			set_vls(ppd);
4333 		}
4334 		goto bail;
4335 
4336 	case QIB_IB_CFG_VL_HIGH_LIMIT:
4337 		qib_write_kreg_port(ppd, krp_highprio_limit, val);
4338 		goto bail;
4339 
4340 	case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
4341 		if (val > 3) {
4342 			ret = -EINVAL;
4343 			goto bail;
4344 		}
4345 		lsb = IBA7322_IBC_HRTBT_LSB;
4346 		maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
4347 		break;
4348 
4349 	case QIB_IB_CFG_PORT:
4350 		/* val is the port number of the switch we are connected to. */
4351 		if (ppd->dd->cspec->r1) {
4352 			cancel_delayed_work(&ppd->cpspec->ipg_work);
4353 			ppd->cpspec->ipg_tries = 0;
4354 		}
4355 		goto bail;
4356 
4357 	default:
4358 		ret = -EINVAL;
4359 		goto bail;
4360 	}
4361 	ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
4362 	ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
4363 	qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
4364 	qib_write_kreg(dd, kr_scratch, 0);
4365 bail:
4366 	return ret;
4367 }
4368 
4369 static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
4370 {
4371 	int ret = 0;
4372 	u64 val, ctrlb;
4373 
4374 	/* only IBC loopback, may add serdes and xgxs loopbacks later */
4375 	if (!strncmp(what, "ibc", 3)) {
4376 		ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
4377 						       Loopback);
4378 		val = 0; /* disable heart beat, so link will come up */
4379 		qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
4380 			 ppd->dd->unit, ppd->port);
4381 	} else if (!strncmp(what, "off", 3)) {
4382 		ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
4383 							Loopback);
4384 		/* enable heart beat again */
4385 		val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
4386 		qib_devinfo(ppd->dd->pcidev,
4387 			"Disabling IB%u:%u IBC loopback (normal)\n",
4388 			ppd->dd->unit, ppd->port);
4389 	} else
4390 		ret = -EINVAL;
4391 	if (!ret) {
4392 		qib_write_kreg_port(ppd, krp_ibcctrl_a,
4393 				    ppd->cpspec->ibcctrl_a);
4394 		ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
4395 					     << IBA7322_IBC_HRTBT_LSB);
4396 		ppd->cpspec->ibcctrl_b = ctrlb | val;
4397 		qib_write_kreg_port(ppd, krp_ibcctrl_b,
4398 				    ppd->cpspec->ibcctrl_b);
4399 		qib_write_kreg(ppd->dd, kr_scratch, 0);
4400 	}
4401 	return ret;
4402 }
4403 
4404 static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4405 			   struct ib_vl_weight_elem *vl)
4406 {
4407 	unsigned i;
4408 
4409 	for (i = 0; i < 16; i++, regno++, vl++) {
4410 		u32 val = qib_read_kreg_port(ppd, regno);
4411 
4412 		vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) &
4413 			SYM_RMASK(LowPriority0_0, VirtualLane);
4414 		vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) &
4415 			SYM_RMASK(LowPriority0_0, Weight);
4416 	}
4417 }
4418 
4419 static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4420 			   struct ib_vl_weight_elem *vl)
4421 {
4422 	unsigned i;
4423 
4424 	for (i = 0; i < 16; i++, regno++, vl++) {
4425 		u64 val;
4426 
4427 		val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) <<
4428 			SYM_LSB(LowPriority0_0, VirtualLane)) |
4429 		      ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) <<
4430 			SYM_LSB(LowPriority0_0, Weight));
4431 		qib_write_kreg_port(ppd, regno, val);
4432 	}
4433 	if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
4434 		struct qib_devdata *dd = ppd->dd;
4435 		unsigned long flags;
4436 
4437 		spin_lock_irqsave(&dd->sendctrl_lock, flags);
4438 		ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
4439 		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4440 		qib_write_kreg(dd, kr_scratch, 0);
4441 		spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4442 	}
4443 }
4444 
4445 static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
4446 {
4447 	switch (which) {
4448 	case QIB_IB_TBL_VL_HIGH_ARB:
4449 		get_vl_weights(ppd, krp_highprio_0, t);
4450 		break;
4451 
4452 	case QIB_IB_TBL_VL_LOW_ARB:
4453 		get_vl_weights(ppd, krp_lowprio_0, t);
4454 		break;
4455 
4456 	default:
4457 		return -EINVAL;
4458 	}
4459 	return 0;
4460 }
4461 
4462 static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
4463 {
4464 	switch (which) {
4465 	case QIB_IB_TBL_VL_HIGH_ARB:
4466 		set_vl_weights(ppd, krp_highprio_0, t);
4467 		break;
4468 
4469 	case QIB_IB_TBL_VL_LOW_ARB:
4470 		set_vl_weights(ppd, krp_lowprio_0, t);
4471 		break;
4472 
4473 	default:
4474 		return -EINVAL;
4475 	}
4476 	return 0;
4477 }
4478 
4479 static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
4480 				    u32 updegr, u32 egrhd, u32 npkts)
4481 {
4482 	/*
4483 	 * Need to write timeout register before updating rcvhdrhead to ensure
4484 	 * that the timer is enabled on reception of a packet.
4485 	 */
4486 	if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
4487 		adjust_rcv_timeout(rcd, npkts);
4488 	if (updegr)
4489 		qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
4490 	mmiowb();
4491 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4492 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4493 	mmiowb();
4494 }
4495 
4496 static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
4497 {
4498 	u32 head, tail;
4499 
4500 	head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
4501 	if (rcd->rcvhdrtail_kvaddr)
4502 		tail = qib_get_rcvhdrtail(rcd);
4503 	else
4504 		tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
4505 	return head == tail;
4506 }
4507 
4508 #define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \
4509 	QIB_RCVCTRL_CTXT_DIS | \
4510 	QIB_RCVCTRL_TIDFLOW_ENB | \
4511 	QIB_RCVCTRL_TIDFLOW_DIS | \
4512 	QIB_RCVCTRL_TAILUPD_ENB | \
4513 	QIB_RCVCTRL_TAILUPD_DIS | \
4514 	QIB_RCVCTRL_INTRAVAIL_ENB | \
4515 	QIB_RCVCTRL_INTRAVAIL_DIS | \
4516 	QIB_RCVCTRL_BP_ENB | \
4517 	QIB_RCVCTRL_BP_DIS)
4518 
4519 #define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \
4520 	QIB_RCVCTRL_CTXT_DIS | \
4521 	QIB_RCVCTRL_PKEY_DIS | \
4522 	QIB_RCVCTRL_PKEY_ENB)
4523 
4524 /*
4525  * Modify the RCVCTRL register in chip-specific way. This
4526  * is a function because bit positions and (future) register
4527  * location is chip-specifc, but the needed operations are
4528  * generic. <op> is a bit-mask because we often want to
4529  * do multiple modifications.
4530  */
4531 static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
4532 			     int ctxt)
4533 {
4534 	struct qib_devdata *dd = ppd->dd;
4535 	struct qib_ctxtdata *rcd;
4536 	u64 mask, val;
4537 	unsigned long flags;
4538 
4539 	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
4540 
4541 	if (op & QIB_RCVCTRL_TIDFLOW_ENB)
4542 		dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable);
4543 	if (op & QIB_RCVCTRL_TIDFLOW_DIS)
4544 		dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable);
4545 	if (op & QIB_RCVCTRL_TAILUPD_ENB)
4546 		dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4547 	if (op & QIB_RCVCTRL_TAILUPD_DIS)
4548 		dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd);
4549 	if (op & QIB_RCVCTRL_PKEY_ENB)
4550 		ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4551 	if (op & QIB_RCVCTRL_PKEY_DIS)
4552 		ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4553 	if (ctxt < 0) {
4554 		mask = (1ULL << dd->ctxtcnt) - 1;
4555 		rcd = NULL;
4556 	} else {
4557 		mask = (1ULL << ctxt);
4558 		rcd = dd->rcd[ctxt];
4559 	}
4560 	if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) {
4561 		ppd->p_rcvctrl |=
4562 			(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4563 		if (!(dd->flags & QIB_NODMA_RTAIL)) {
4564 			op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */
4565 			dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4566 		}
4567 		/* Write these registers before the context is enabled. */
4568 		qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt,
4569 				    rcd->rcvhdrqtailaddr_phys);
4570 		qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
4571 				    rcd->rcvhdrq_phys);
4572 		rcd->seq_cnt = 1;
4573 	}
4574 	if (op & QIB_RCVCTRL_CTXT_DIS)
4575 		ppd->p_rcvctrl &=
4576 			~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4577 	if (op & QIB_RCVCTRL_BP_ENB)
4578 		dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull);
4579 	if (op & QIB_RCVCTRL_BP_DIS)
4580 		dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull));
4581 	if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
4582 		dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail));
4583 	if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
4584 		dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail));
4585 	/*
4586 	 * Decide which registers to write depending on the ops enabled.
4587 	 * Special case is "flush" (no bits set at all)
4588 	 * which needs to write both.
4589 	 */
4590 	if (op == 0 || (op & RCVCTRL_COMMON_MODS))
4591 		qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
4592 	if (op == 0 || (op & RCVCTRL_PORT_MODS))
4593 		qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
4594 	if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) {
4595 		/*
4596 		 * Init the context registers also; if we were
4597 		 * disabled, tail and head should both be zero
4598 		 * already from the enable, but since we don't
4599 		 * know, we have to do it explicitly.
4600 		 */
4601 		val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
4602 		qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
4603 
4604 		/* be sure enabling write seen; hd/tl should be 0 */
4605 		(void) qib_read_kreg32(dd, kr_scratch);
4606 		val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
4607 		dd->rcd[ctxt]->head = val;
4608 		/* If kctxt, interrupt on next receive. */
4609 		if (ctxt < dd->first_user_ctxt)
4610 			val |= dd->rhdrhead_intr_off;
4611 		qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4612 	} else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) &&
4613 		dd->rcd[ctxt] && dd->rhdrhead_intr_off) {
4614 		/* arm rcv interrupt */
4615 		val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off;
4616 		qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4617 	}
4618 	if (op & QIB_RCVCTRL_CTXT_DIS) {
4619 		unsigned f;
4620 
4621 		/* Now that the context is disabled, clear these registers. */
4622 		if (ctxt >= 0) {
4623 			qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0);
4624 			qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0);
4625 			for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4626 				qib_write_ureg(dd, ur_rcvflowtable + f,
4627 					       TIDFLOW_ERRBITS, ctxt);
4628 		} else {
4629 			unsigned i;
4630 
4631 			for (i = 0; i < dd->cfgctxts; i++) {
4632 				qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr,
4633 						    i, 0);
4634 				qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0);
4635 				for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4636 					qib_write_ureg(dd, ur_rcvflowtable + f,
4637 						       TIDFLOW_ERRBITS, i);
4638 			}
4639 		}
4640 	}
4641 	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
4642 }
4643 
4644 /*
4645  * Modify the SENDCTRL register in chip-specific way. This
4646  * is a function where there are multiple such registers with
4647  * slightly different layouts.
4648  * The chip doesn't allow back-to-back sendctrl writes, so write
4649  * the scratch register after writing sendctrl.
4650  *
4651  * Which register is written depends on the operation.
4652  * Most operate on the common register, while
4653  * SEND_ENB and SEND_DIS operate on the per-port ones.
4654  * SEND_ENB is included in common because it can change SPCL_TRIG
4655  */
4656 #define SENDCTRL_COMMON_MODS (\
4657 	QIB_SENDCTRL_CLEAR | \
4658 	QIB_SENDCTRL_AVAIL_DIS | \
4659 	QIB_SENDCTRL_AVAIL_ENB | \
4660 	QIB_SENDCTRL_AVAIL_BLIP | \
4661 	QIB_SENDCTRL_DISARM | \
4662 	QIB_SENDCTRL_DISARM_ALL | \
4663 	QIB_SENDCTRL_SEND_ENB)
4664 
4665 #define SENDCTRL_PORT_MODS (\
4666 	QIB_SENDCTRL_CLEAR | \
4667 	QIB_SENDCTRL_SEND_ENB | \
4668 	QIB_SENDCTRL_SEND_DIS | \
4669 	QIB_SENDCTRL_FLUSH)
4670 
4671 static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
4672 {
4673 	struct qib_devdata *dd = ppd->dd;
4674 	u64 tmp_dd_sendctrl;
4675 	unsigned long flags;
4676 
4677 	spin_lock_irqsave(&dd->sendctrl_lock, flags);
4678 
4679 	/* First the dd ones that are "sticky", saved in shadow */
4680 	if (op & QIB_SENDCTRL_CLEAR)
4681 		dd->sendctrl = 0;
4682 	if (op & QIB_SENDCTRL_AVAIL_DIS)
4683 		dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4684 	else if (op & QIB_SENDCTRL_AVAIL_ENB) {
4685 		dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
4686 		if (dd->flags & QIB_USE_SPCL_TRIG)
4687 			dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn);
4688 	}
4689 
4690 	/* Then the ppd ones that are "sticky", saved in shadow */
4691 	if (op & QIB_SENDCTRL_SEND_DIS)
4692 		ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
4693 	else if (op & QIB_SENDCTRL_SEND_ENB)
4694 		ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
4695 
4696 	if (op & QIB_SENDCTRL_DISARM_ALL) {
4697 		u32 i, last;
4698 
4699 		tmp_dd_sendctrl = dd->sendctrl;
4700 		last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
4701 		/*
4702 		 * Disarm any buffers that are not yet launched,
4703 		 * disabling updates until done.
4704 		 */
4705 		tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4706 		for (i = 0; i < last; i++) {
4707 			qib_write_kreg(dd, kr_sendctrl,
4708 				       tmp_dd_sendctrl |
4709 				       SYM_MASK(SendCtrl, Disarm) | i);
4710 			qib_write_kreg(dd, kr_scratch, 0);
4711 		}
4712 	}
4713 
4714 	if (op & QIB_SENDCTRL_FLUSH) {
4715 		u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
4716 
4717 		/*
4718 		 * Now drain all the fifos.  The Abort bit should never be
4719 		 * needed, so for now, at least, we don't use it.
4720 		 */
4721 		tmp_ppd_sendctrl |=
4722 			SYM_MASK(SendCtrl_0, TxeDrainRmFifo) |
4723 			SYM_MASK(SendCtrl_0, TxeDrainLaFifo) |
4724 			SYM_MASK(SendCtrl_0, TxeBypassIbc);
4725 		qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
4726 		qib_write_kreg(dd, kr_scratch, 0);
4727 	}
4728 
4729 	tmp_dd_sendctrl = dd->sendctrl;
4730 
4731 	if (op & QIB_SENDCTRL_DISARM)
4732 		tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
4733 			((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) <<
4734 			 SYM_LSB(SendCtrl, DisarmSendBuf));
4735 	if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
4736 	    (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
4737 		tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4738 
4739 	if (op == 0 || (op & SENDCTRL_COMMON_MODS)) {
4740 		qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
4741 		qib_write_kreg(dd, kr_scratch, 0);
4742 	}
4743 
4744 	if (op == 0 || (op & SENDCTRL_PORT_MODS)) {
4745 		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4746 		qib_write_kreg(dd, kr_scratch, 0);
4747 	}
4748 
4749 	if (op & QIB_SENDCTRL_AVAIL_BLIP) {
4750 		qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
4751 		qib_write_kreg(dd, kr_scratch, 0);
4752 	}
4753 
4754 	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4755 
4756 	if (op & QIB_SENDCTRL_FLUSH) {
4757 		u32 v;
4758 		/*
4759 		 * ensure writes have hit chip, then do a few
4760 		 * more reads, to allow DMA of pioavail registers
4761 		 * to occur, so in-memory copy is in sync with
4762 		 * the chip.  Not always safe to sleep.
4763 		 */
4764 		v = qib_read_kreg32(dd, kr_scratch);
4765 		qib_write_kreg(dd, kr_scratch, v);
4766 		v = qib_read_kreg32(dd, kr_scratch);
4767 		qib_write_kreg(dd, kr_scratch, v);
4768 		qib_read_kreg32(dd, kr_scratch);
4769 	}
4770 }
4771 
4772 #define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */
4773 #define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */
4774 #define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */
4775 
4776 /**
4777  * qib_portcntr_7322 - read a per-port chip counter
4778  * @ppd: the qlogic_ib pport
4779  * @creg: the counter to read (not a chip offset)
4780  */
4781 static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
4782 {
4783 	struct qib_devdata *dd = ppd->dd;
4784 	u64 ret = 0ULL;
4785 	u16 creg;
4786 	/* 0xffff for unimplemented or synthesized counters */
4787 	static const u32 xlator[] = {
4788 		[QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG,
4789 		[QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG,
4790 		[QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount,
4791 		[QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount,
4792 		[QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount,
4793 		[QIBPORTCNTR_SENDSTALL] = crp_sendstall,
4794 		[QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG,
4795 		[QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount,
4796 		[QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount,
4797 		[QIBPORTCNTR_RCVEBP] = crp_rcvebp,
4798 		[QIBPORTCNTR_RCVOVFL] = crp_rcvovfl,
4799 		[QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG,
4800 		[QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed  for 7322 */
4801 		[QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr,
4802 		[QIBPORTCNTR_RXVLERR] = crp_rxvlerr,
4803 		[QIBPORTCNTR_ERRICRC] = crp_erricrc,
4804 		[QIBPORTCNTR_ERRVCRC] = crp_errvcrc,
4805 		[QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc,
4806 		[QIBPORTCNTR_BADFORMAT] = crp_badformat,
4807 		[QIBPORTCNTR_ERR_RLEN] = crp_err_rlen,
4808 		[QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr,
4809 		[QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen,
4810 		[QIBPORTCNTR_UNSUPVL] = crp_txunsupvl,
4811 		[QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl,
4812 		[QIBPORTCNTR_ERRLINK] = crp_errlink,
4813 		[QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown,
4814 		[QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov,
4815 		[QIBPORTCNTR_LLI] = crp_locallinkintegrityerr,
4816 		[QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt,
4817 		[QIBPORTCNTR_ERRPKEY] = crp_errpkey,
4818 		/*
4819 		 * the next 3 aren't really counters, but were implemented
4820 		 * as counters in older chips, so still get accessed as
4821 		 * though they were counters from this code.
4822 		 */
4823 		[QIBPORTCNTR_PSINTERVAL] = krp_psinterval,
4824 		[QIBPORTCNTR_PSSTART] = krp_psstart,
4825 		[QIBPORTCNTR_PSSTAT] = krp_psstat,
4826 		/* pseudo-counter, summed for all ports */
4827 		[QIBPORTCNTR_KHDROVFL] = 0xffff,
4828 	};
4829 
4830 	if (reg >= ARRAY_SIZE(xlator)) {
4831 		qib_devinfo(ppd->dd->pcidev,
4832 			 "Unimplemented portcounter %u\n", reg);
4833 		goto done;
4834 	}
4835 	creg = xlator[reg] & _PORT_CNTR_IDXMASK;
4836 
4837 	/* handle non-counters and special cases first */
4838 	if (reg == QIBPORTCNTR_KHDROVFL) {
4839 		int i;
4840 
4841 		/* sum over all kernel contexts (skip if mini_init) */
4842 		for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) {
4843 			struct qib_ctxtdata *rcd = dd->rcd[i];
4844 
4845 			if (!rcd || rcd->ppd != ppd)
4846 				continue;
4847 			ret += read_7322_creg32(dd, cr_base_egrovfl + i);
4848 		}
4849 		goto done;
4850 	} else if (reg == QIBPORTCNTR_RXDROPPKT) {
4851 		/*
4852 		 * Used as part of the synthesis of port_rcv_errors
4853 		 * in the verbs code for IBTA counters.  Not needed for 7322,
4854 		 * because all the errors are already counted by other cntrs.
4855 		 */
4856 		goto done;
4857 	} else if (reg == QIBPORTCNTR_PSINTERVAL ||
4858 		   reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) {
4859 		/* were counters in older chips, now per-port kernel regs */
4860 		ret = qib_read_kreg_port(ppd, creg);
4861 		goto done;
4862 	}
4863 
4864 	/*
4865 	 * Only fast increment counters are 64 bits; use 32 bit reads to
4866 	 * avoid two independent reads when on Opteron.
4867 	 */
4868 	if (xlator[reg] & _PORT_64BIT_FLAG)
4869 		ret = read_7322_creg_port(ppd, creg);
4870 	else
4871 		ret = read_7322_creg32_port(ppd, creg);
4872 	if (creg == crp_ibsymbolerr) {
4873 		if (ppd->cpspec->ibdeltainprog)
4874 			ret -= ret - ppd->cpspec->ibsymsnap;
4875 		ret -= ppd->cpspec->ibsymdelta;
4876 	} else if (creg == crp_iblinkerrrecov) {
4877 		if (ppd->cpspec->ibdeltainprog)
4878 			ret -= ret - ppd->cpspec->iblnkerrsnap;
4879 		ret -= ppd->cpspec->iblnkerrdelta;
4880 	} else if (creg == crp_errlink)
4881 		ret -= ppd->cpspec->ibmalfdelta;
4882 	else if (creg == crp_iblinkdown)
4883 		ret += ppd->cpspec->iblnkdowndelta;
4884 done:
4885 	return ret;
4886 }
4887 
4888 /*
4889  * Device counter names (not port-specific), one line per stat,
4890  * single string.  Used by utilities like ipathstats to print the stats
4891  * in a way which works for different versions of drivers, without changing
4892  * the utility.  Names need to be 12 chars or less (w/o newline), for proper
4893  * display by utility.
4894  * Non-error counters are first.
4895  * Start of "error" conters is indicated by a leading "E " on the first
4896  * "error" counter, and doesn't count in label length.
4897  * The EgrOvfl list needs to be last so we truncate them at the configured
4898  * context count for the device.
4899  * cntr7322indices contains the corresponding register indices.
4900  */
4901 static const char cntr7322names[] =
4902 	"Interrupts\n"
4903 	"HostBusStall\n"
4904 	"E RxTIDFull\n"
4905 	"RxTIDInvalid\n"
4906 	"RxTIDFloDrop\n" /* 7322 only */
4907 	"Ctxt0EgrOvfl\n"
4908 	"Ctxt1EgrOvfl\n"
4909 	"Ctxt2EgrOvfl\n"
4910 	"Ctxt3EgrOvfl\n"
4911 	"Ctxt4EgrOvfl\n"
4912 	"Ctxt5EgrOvfl\n"
4913 	"Ctxt6EgrOvfl\n"
4914 	"Ctxt7EgrOvfl\n"
4915 	"Ctxt8EgrOvfl\n"
4916 	"Ctxt9EgrOvfl\n"
4917 	"Ctx10EgrOvfl\n"
4918 	"Ctx11EgrOvfl\n"
4919 	"Ctx12EgrOvfl\n"
4920 	"Ctx13EgrOvfl\n"
4921 	"Ctx14EgrOvfl\n"
4922 	"Ctx15EgrOvfl\n"
4923 	"Ctx16EgrOvfl\n"
4924 	"Ctx17EgrOvfl\n"
4925 	;
4926 
4927 static const u32 cntr7322indices[] = {
4928 	cr_lbint | _PORT_64BIT_FLAG,
4929 	cr_lbstall | _PORT_64BIT_FLAG,
4930 	cr_tidfull,
4931 	cr_tidinvalid,
4932 	cr_rxtidflowdrop,
4933 	cr_base_egrovfl + 0,
4934 	cr_base_egrovfl + 1,
4935 	cr_base_egrovfl + 2,
4936 	cr_base_egrovfl + 3,
4937 	cr_base_egrovfl + 4,
4938 	cr_base_egrovfl + 5,
4939 	cr_base_egrovfl + 6,
4940 	cr_base_egrovfl + 7,
4941 	cr_base_egrovfl + 8,
4942 	cr_base_egrovfl + 9,
4943 	cr_base_egrovfl + 10,
4944 	cr_base_egrovfl + 11,
4945 	cr_base_egrovfl + 12,
4946 	cr_base_egrovfl + 13,
4947 	cr_base_egrovfl + 14,
4948 	cr_base_egrovfl + 15,
4949 	cr_base_egrovfl + 16,
4950 	cr_base_egrovfl + 17,
4951 };
4952 
4953 /*
4954  * same as cntr7322names and cntr7322indices, but for port-specific counters.
4955  * portcntr7322indices is somewhat complicated by some registers needing
4956  * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
4957  */
4958 static const char portcntr7322names[] =
4959 	"TxPkt\n"
4960 	"TxFlowPkt\n"
4961 	"TxWords\n"
4962 	"RxPkt\n"
4963 	"RxFlowPkt\n"
4964 	"RxWords\n"
4965 	"TxFlowStall\n"
4966 	"TxDmaDesc\n"  /* 7220 and 7322-only */
4967 	"E RxDlidFltr\n"  /* 7220 and 7322-only */
4968 	"IBStatusChng\n"
4969 	"IBLinkDown\n"
4970 	"IBLnkRecov\n"
4971 	"IBRxLinkErr\n"
4972 	"IBSymbolErr\n"
4973 	"RxLLIErr\n"
4974 	"RxBadFormat\n"
4975 	"RxBadLen\n"
4976 	"RxBufOvrfl\n"
4977 	"RxEBP\n"
4978 	"RxFlowCtlErr\n"
4979 	"RxICRCerr\n"
4980 	"RxLPCRCerr\n"
4981 	"RxVCRCerr\n"
4982 	"RxInvalLen\n"
4983 	"RxInvalPKey\n"
4984 	"RxPktDropped\n"
4985 	"TxBadLength\n"
4986 	"TxDropped\n"
4987 	"TxInvalLen\n"
4988 	"TxUnderrun\n"
4989 	"TxUnsupVL\n"
4990 	"RxLclPhyErr\n" /* 7220 and 7322-only from here down */
4991 	"RxVL15Drop\n"
4992 	"RxVlErr\n"
4993 	"XcessBufOvfl\n"
4994 	"RxQPBadCtxt\n" /* 7322-only from here down */
4995 	"TXBadHeader\n"
4996 	;
4997 
4998 static const u32 portcntr7322indices[] = {
4999 	QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
5000 	crp_pktsendflow,
5001 	QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
5002 	QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
5003 	crp_pktrcvflowctrl,
5004 	QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
5005 	QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
5006 	crp_txsdmadesc | _PORT_64BIT_FLAG,
5007 	crp_rxdlidfltr,
5008 	crp_ibstatuschange,
5009 	QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
5010 	QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
5011 	QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
5012 	QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
5013 	QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
5014 	QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
5015 	QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
5016 	QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
5017 	QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
5018 	crp_rcvflowctrlviol,
5019 	QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
5020 	QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
5021 	QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
5022 	QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
5023 	QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
5024 	QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
5025 	crp_txminmaxlenerr,
5026 	crp_txdroppedpkt,
5027 	crp_txlenerr,
5028 	crp_txunderrun,
5029 	crp_txunsupvl,
5030 	QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
5031 	QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
5032 	QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
5033 	QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
5034 	crp_rxqpinvalidctxt,
5035 	crp_txhdrerr,
5036 };
5037 
5038 /* do all the setup to make the counter reads efficient later */
5039 static void init_7322_cntrnames(struct qib_devdata *dd)
5040 {
5041 	int i, j = 0;
5042 	char *s;
5043 
5044 	for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts;
5045 	     i++) {
5046 		/* we always have at least one counter before the egrovfl */
5047 		if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
5048 			j = 1;
5049 		s = strchr(s + 1, '\n');
5050 		if (s && j)
5051 			j++;
5052 	}
5053 	dd->cspec->ncntrs = i;
5054 	if (!s)
5055 		/* full list; size is without terminating null */
5056 		dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
5057 	else
5058 		dd->cspec->cntrnamelen = 1 + s - cntr7322names;
5059 	dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
5060 		* sizeof(u64), GFP_KERNEL);
5061 	if (!dd->cspec->cntrs)
5062 		qib_dev_err(dd, "Failed allocation for counters\n");
5063 
5064 	for (i = 0, s = (char *)portcntr7322names; s; i++)
5065 		s = strchr(s + 1, '\n');
5066 	dd->cspec->nportcntrs = i - 1;
5067 	dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
5068 	for (i = 0; i < dd->num_pports; ++i) {
5069 		dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs
5070 			* sizeof(u64), GFP_KERNEL);
5071 		if (!dd->pport[i].cpspec->portcntrs)
5072 			qib_dev_err(dd,
5073 				"Failed allocation for portcounters\n");
5074 	}
5075 }
5076 
5077 static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
5078 			      u64 **cntrp)
5079 {
5080 	u32 ret;
5081 
5082 	if (namep) {
5083 		ret = dd->cspec->cntrnamelen;
5084 		if (pos >= ret)
5085 			ret = 0; /* final read after getting everything */
5086 		else
5087 			*namep = (char *) cntr7322names;
5088 	} else {
5089 		u64 *cntr = dd->cspec->cntrs;
5090 		int i;
5091 
5092 		ret = dd->cspec->ncntrs * sizeof(u64);
5093 		if (!cntr || pos >= ret) {
5094 			/* everything read, or couldn't get memory */
5095 			ret = 0;
5096 			goto done;
5097 		}
5098 		*cntrp = cntr;
5099 		for (i = 0; i < dd->cspec->ncntrs; i++)
5100 			if (cntr7322indices[i] & _PORT_64BIT_FLAG)
5101 				*cntr++ = read_7322_creg(dd,
5102 							 cntr7322indices[i] &
5103 							 _PORT_CNTR_IDXMASK);
5104 			else
5105 				*cntr++ = read_7322_creg32(dd,
5106 							   cntr7322indices[i]);
5107 	}
5108 done:
5109 	return ret;
5110 }
5111 
5112 static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
5113 				  char **namep, u64 **cntrp)
5114 {
5115 	u32 ret;
5116 
5117 	if (namep) {
5118 		ret = dd->cspec->portcntrnamelen;
5119 		if (pos >= ret)
5120 			ret = 0; /* final read after getting everything */
5121 		else
5122 			*namep = (char *)portcntr7322names;
5123 	} else {
5124 		struct qib_pportdata *ppd = &dd->pport[port];
5125 		u64 *cntr = ppd->cpspec->portcntrs;
5126 		int i;
5127 
5128 		ret = dd->cspec->nportcntrs * sizeof(u64);
5129 		if (!cntr || pos >= ret) {
5130 			/* everything read, or couldn't get memory */
5131 			ret = 0;
5132 			goto done;
5133 		}
5134 		*cntrp = cntr;
5135 		for (i = 0; i < dd->cspec->nportcntrs; i++) {
5136 			if (portcntr7322indices[i] & _PORT_VIRT_FLAG)
5137 				*cntr++ = qib_portcntr_7322(ppd,
5138 					portcntr7322indices[i] &
5139 					_PORT_CNTR_IDXMASK);
5140 			else if (portcntr7322indices[i] & _PORT_64BIT_FLAG)
5141 				*cntr++ = read_7322_creg_port(ppd,
5142 					   portcntr7322indices[i] &
5143 					    _PORT_CNTR_IDXMASK);
5144 			else
5145 				*cntr++ = read_7322_creg32_port(ppd,
5146 					   portcntr7322indices[i]);
5147 		}
5148 	}
5149 done:
5150 	return ret;
5151 }
5152 
5153 /**
5154  * qib_get_7322_faststats - get word counters from chip before they overflow
5155  * @opaque - contains a pointer to the qlogic_ib device qib_devdata
5156  *
5157  * VESTIGIAL IBA7322 has no "small fast counters", so the only
5158  * real purpose of this function is to maintain the notion of
5159  * "active time", which in turn is only logged into the eeprom,
5160  * which we don;t have, yet, for 7322-based boards.
5161  *
5162  * called from add_timer
5163  */
5164 static void qib_get_7322_faststats(unsigned long opaque)
5165 {
5166 	struct qib_devdata *dd = (struct qib_devdata *) opaque;
5167 	struct qib_pportdata *ppd;
5168 	unsigned long flags;
5169 	u64 traffic_wds;
5170 	int pidx;
5171 
5172 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5173 		ppd = dd->pport + pidx;
5174 
5175 		/*
5176 		 * If port isn't enabled or not operational ports, or
5177 		 * diags is running (can cause memory diags to fail)
5178 		 * skip this port this time.
5179 		 */
5180 		if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
5181 		    || dd->diag_client)
5182 			continue;
5183 
5184 		/*
5185 		 * Maintain an activity timer, based on traffic
5186 		 * exceeding a threshold, so we need to check the word-counts
5187 		 * even if they are 64-bit.
5188 		 */
5189 		traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
5190 			qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
5191 		spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
5192 		traffic_wds -= ppd->dd->traffic_wds;
5193 		ppd->dd->traffic_wds += traffic_wds;
5194 		if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
5195 			atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time);
5196 		spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
5197 		if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
5198 						QIB_IB_QDR) &&
5199 		    (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
5200 				    QIBL_LINKACTIVE)) &&
5201 		    ppd->cpspec->qdr_dfe_time &&
5202 		    time_is_before_jiffies(ppd->cpspec->qdr_dfe_time)) {
5203 			ppd->cpspec->qdr_dfe_on = 0;
5204 
5205 			qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
5206 					    ppd->dd->cspec->r1 ?
5207 					    QDR_STATIC_ADAPT_INIT_R1 :
5208 					    QDR_STATIC_ADAPT_INIT);
5209 			force_h1(ppd);
5210 		}
5211 	}
5212 	mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
5213 }
5214 
5215 /*
5216  * If we were using MSIx, try to fallback to INTx.
5217  */
5218 static int qib_7322_intr_fallback(struct qib_devdata *dd)
5219 {
5220 	if (!dd->cspec->num_msix_entries)
5221 		return 0; /* already using INTx */
5222 
5223 	qib_devinfo(dd->pcidev,
5224 		"MSIx interrupt not detected, trying INTx interrupts\n");
5225 	qib_7322_nomsix(dd);
5226 	qib_enable_intx(dd->pcidev);
5227 	qib_setup_7322_interrupt(dd, 0);
5228 	return 1;
5229 }
5230 
5231 /*
5232  * Reset the XGXS (between serdes and IBC).  Slightly less intrusive
5233  * than resetting the IBC or external link state, and useful in some
5234  * cases to cause some retraining.  To do this right, we reset IBC
5235  * as well, then return to previous state (which may be still in reset)
5236  * NOTE: some callers of this "know" this writes the current value
5237  * of cpspec->ibcctrl_a as part of it's operation, so if that changes,
5238  * check all callers.
5239  */
5240 static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
5241 {
5242 	u64 val;
5243 	struct qib_devdata *dd = ppd->dd;
5244 	const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) |
5245 		SYM_MASK(IBPCSConfig_0, xcv_treset) |
5246 		SYM_MASK(IBPCSConfig_0, tx_rx_reset);
5247 
5248 	val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
5249 	qib_write_kreg(dd, kr_hwerrmask,
5250 		       dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop));
5251 	qib_write_kreg_port(ppd, krp_ibcctrl_a,
5252 			    ppd->cpspec->ibcctrl_a &
5253 			    ~SYM_MASK(IBCCtrlA_0, IBLinkEn));
5254 
5255 	qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
5256 	qib_read_kreg32(dd, kr_scratch);
5257 	qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
5258 	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
5259 	qib_write_kreg(dd, kr_scratch, 0ULL);
5260 	qib_write_kreg(dd, kr_hwerrclear,
5261 		       SYM_MASK(HwErrClear, statusValidNoEopClear));
5262 	qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
5263 }
5264 
5265 /*
5266  * This code for non-IBTA-compliant IB speed negotiation is only known to
5267  * work for the SDR to DDR transition, and only between an HCA and a switch
5268  * with recent firmware.  It is based on observed heuristics, rather than
5269  * actual knowledge of the non-compliant speed negotiation.
5270  * It has a number of hard-coded fields, since the hope is to rewrite this
5271  * when a spec is available on how the negoation is intended to work.
5272  */
5273 static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
5274 				 u32 dcnt, u32 *data)
5275 {
5276 	int i;
5277 	u64 pbc;
5278 	u32 __iomem *piobuf;
5279 	u32 pnum, control, len;
5280 	struct qib_devdata *dd = ppd->dd;
5281 
5282 	i = 0;
5283 	len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
5284 	control = qib_7322_setpbc_control(ppd, len, 0, 15);
5285 	pbc = ((u64) control << 32) | len;
5286 	while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
5287 		if (i++ > 15)
5288 			return;
5289 		udelay(2);
5290 	}
5291 	/* disable header check on this packet, since it can't be valid */
5292 	dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL);
5293 	writeq(pbc, piobuf);
5294 	qib_flush_wc();
5295 	qib_pio_copy(piobuf + 2, hdr, 7);
5296 	qib_pio_copy(piobuf + 9, data, dcnt);
5297 	if (dd->flags & QIB_USE_SPCL_TRIG) {
5298 		u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
5299 
5300 		qib_flush_wc();
5301 		__raw_writel(0xaebecede, piobuf + spcl_off);
5302 	}
5303 	qib_flush_wc();
5304 	qib_sendbuf_done(dd, pnum);
5305 	/* and re-enable hdr check */
5306 	dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
5307 }
5308 
5309 /*
5310  * _start packet gets sent twice at start, _done gets sent twice at end
5311  */
5312 static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
5313 {
5314 	struct qib_devdata *dd = ppd->dd;
5315 	static u32 swapped;
5316 	u32 dw, i, hcnt, dcnt, *data;
5317 	static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
5318 	static u32 madpayload_start[0x40] = {
5319 		0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
5320 		0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
5321 		0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
5322 		};
5323 	static u32 madpayload_done[0x40] = {
5324 		0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
5325 		0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
5326 		0x40000001, 0x1388, 0x15e, /* rest 0's */
5327 		};
5328 
5329 	dcnt = ARRAY_SIZE(madpayload_start);
5330 	hcnt = ARRAY_SIZE(hdr);
5331 	if (!swapped) {
5332 		/* for maintainability, do it at runtime */
5333 		for (i = 0; i < hcnt; i++) {
5334 			dw = (__force u32) cpu_to_be32(hdr[i]);
5335 			hdr[i] = dw;
5336 		}
5337 		for (i = 0; i < dcnt; i++) {
5338 			dw = (__force u32) cpu_to_be32(madpayload_start[i]);
5339 			madpayload_start[i] = dw;
5340 			dw = (__force u32) cpu_to_be32(madpayload_done[i]);
5341 			madpayload_done[i] = dw;
5342 		}
5343 		swapped = 1;
5344 	}
5345 
5346 	data = which ? madpayload_done : madpayload_start;
5347 
5348 	autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5349 	qib_read_kreg64(dd, kr_scratch);
5350 	udelay(2);
5351 	autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5352 	qib_read_kreg64(dd, kr_scratch);
5353 	udelay(2);
5354 }
5355 
5356 /*
5357  * Do the absolute minimum to cause an IB speed change, and make it
5358  * ready, but don't actually trigger the change.   The caller will
5359  * do that when ready (if link is in Polling training state, it will
5360  * happen immediately, otherwise when link next goes down)
5361  *
5362  * This routine should only be used as part of the DDR autonegotation
5363  * code for devices that are not compliant with IB 1.2 (or code that
5364  * fixes things up for same).
5365  *
5366  * When link has gone down, and autoneg enabled, or autoneg has
5367  * failed and we give up until next time we set both speeds, and
5368  * then we want IBTA enabled as well as "use max enabled speed.
5369  */
5370 static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
5371 {
5372 	u64 newctrlb;
5373 	newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
5374 				    IBA7322_IBC_IBTA_1_2_MASK |
5375 				    IBA7322_IBC_MAX_SPEED_MASK);
5376 
5377 	if (speed & (speed - 1)) /* multiple speeds */
5378 		newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) |
5379 				    IBA7322_IBC_IBTA_1_2_MASK |
5380 				    IBA7322_IBC_MAX_SPEED_MASK;
5381 	else
5382 		newctrlb |= speed == QIB_IB_QDR ?
5383 			IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK :
5384 			((speed == QIB_IB_DDR ?
5385 			  IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR));
5386 
5387 	if (newctrlb == ppd->cpspec->ibcctrl_b)
5388 		return;
5389 
5390 	ppd->cpspec->ibcctrl_b = newctrlb;
5391 	qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
5392 	qib_write_kreg(ppd->dd, kr_scratch, 0);
5393 }
5394 
5395 /*
5396  * This routine is only used when we are not talking to another
5397  * IB 1.2-compliant device that we think can do DDR.
5398  * (This includes all existing switch chips as of Oct 2007.)
5399  * 1.2-compliant devices go directly to DDR prior to reaching INIT
5400  */
5401 static void try_7322_autoneg(struct qib_pportdata *ppd)
5402 {
5403 	unsigned long flags;
5404 
5405 	spin_lock_irqsave(&ppd->lflags_lock, flags);
5406 	ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
5407 	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5408 	qib_autoneg_7322_send(ppd, 0);
5409 	set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5410 	qib_7322_mini_pcs_reset(ppd);
5411 	/* 2 msec is minimum length of a poll cycle */
5412 	queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
5413 			   msecs_to_jiffies(2));
5414 }
5415 
5416 /*
5417  * Handle the empirically determined mechanism for auto-negotiation
5418  * of DDR speed with switches.
5419  */
5420 static void autoneg_7322_work(struct work_struct *work)
5421 {
5422 	struct qib_pportdata *ppd;
5423 	struct qib_devdata *dd;
5424 	u64 startms;
5425 	u32 i;
5426 	unsigned long flags;
5427 
5428 	ppd = container_of(work, struct qib_chippport_specific,
5429 			    autoneg_work.work)->ppd;
5430 	dd = ppd->dd;
5431 
5432 	startms = jiffies_to_msecs(jiffies);
5433 
5434 	/*
5435 	 * Busy wait for this first part, it should be at most a
5436 	 * few hundred usec, since we scheduled ourselves for 2msec.
5437 	 */
5438 	for (i = 0; i < 25; i++) {
5439 		if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
5440 		     == IB_7322_LT_STATE_POLLQUIET) {
5441 			qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
5442 			break;
5443 		}
5444 		udelay(100);
5445 	}
5446 
5447 	if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
5448 		goto done; /* we got there early or told to stop */
5449 
5450 	/* we expect this to timeout */
5451 	if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5452 			       !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5453 			       msecs_to_jiffies(90)))
5454 		goto done;
5455 	qib_7322_mini_pcs_reset(ppd);
5456 
5457 	/* we expect this to timeout */
5458 	if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5459 			       !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5460 			       msecs_to_jiffies(1700)))
5461 		goto done;
5462 	qib_7322_mini_pcs_reset(ppd);
5463 
5464 	set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
5465 
5466 	/*
5467 	 * Wait up to 250 msec for link to train and get to INIT at DDR;
5468 	 * this should terminate early.
5469 	 */
5470 	wait_event_timeout(ppd->cpspec->autoneg_wait,
5471 		!(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5472 		msecs_to_jiffies(250));
5473 done:
5474 	if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
5475 		spin_lock_irqsave(&ppd->lflags_lock, flags);
5476 		ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
5477 		if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
5478 			ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
5479 			ppd->cpspec->autoneg_tries = 0;
5480 		}
5481 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5482 		set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5483 	}
5484 }
5485 
5486 /*
5487  * This routine is used to request IPG set in the QLogic switch.
5488  * Only called if r1.
5489  */
5490 static void try_7322_ipg(struct qib_pportdata *ppd)
5491 {
5492 	struct qib_ibport *ibp = &ppd->ibport_data;
5493 	struct ib_mad_send_buf *send_buf;
5494 	struct ib_mad_agent *agent;
5495 	struct ib_smp *smp;
5496 	unsigned delay;
5497 	int ret;
5498 
5499 	agent = ibp->send_agent;
5500 	if (!agent)
5501 		goto retry;
5502 
5503 	send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
5504 				      IB_MGMT_MAD_DATA, GFP_ATOMIC);
5505 	if (IS_ERR(send_buf))
5506 		goto retry;
5507 
5508 	if (!ibp->smi_ah) {
5509 		struct ib_ah *ah;
5510 
5511 		ah = qib_create_qp0_ah(ibp, be16_to_cpu(IB_LID_PERMISSIVE));
5512 		if (IS_ERR(ah))
5513 			ret = PTR_ERR(ah);
5514 		else {
5515 			send_buf->ah = ah;
5516 			ibp->smi_ah = to_iah(ah);
5517 			ret = 0;
5518 		}
5519 	} else {
5520 		send_buf->ah = &ibp->smi_ah->ibah;
5521 		ret = 0;
5522 	}
5523 
5524 	smp = send_buf->mad;
5525 	smp->base_version = IB_MGMT_BASE_VERSION;
5526 	smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
5527 	smp->class_version = 1;
5528 	smp->method = IB_MGMT_METHOD_SEND;
5529 	smp->hop_cnt = 1;
5530 	smp->attr_id = QIB_VENDOR_IPG;
5531 	smp->attr_mod = 0;
5532 
5533 	if (!ret)
5534 		ret = ib_post_send_mad(send_buf, NULL);
5535 	if (ret)
5536 		ib_free_send_mad(send_buf);
5537 retry:
5538 	delay = 2 << ppd->cpspec->ipg_tries;
5539 	queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
5540 			   msecs_to_jiffies(delay));
5541 }
5542 
5543 /*
5544  * Timeout handler for setting IPG.
5545  * Only called if r1.
5546  */
5547 static void ipg_7322_work(struct work_struct *work)
5548 {
5549 	struct qib_pportdata *ppd;
5550 
5551 	ppd = container_of(work, struct qib_chippport_specific,
5552 			   ipg_work.work)->ppd;
5553 	if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
5554 	    && ++ppd->cpspec->ipg_tries <= 10)
5555 		try_7322_ipg(ppd);
5556 }
5557 
5558 static u32 qib_7322_iblink_state(u64 ibcs)
5559 {
5560 	u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState);
5561 
5562 	switch (state) {
5563 	case IB_7322_L_STATE_INIT:
5564 		state = IB_PORT_INIT;
5565 		break;
5566 	case IB_7322_L_STATE_ARM:
5567 		state = IB_PORT_ARMED;
5568 		break;
5569 	case IB_7322_L_STATE_ACTIVE:
5570 		/* fall through */
5571 	case IB_7322_L_STATE_ACT_DEFER:
5572 		state = IB_PORT_ACTIVE;
5573 		break;
5574 	default: /* fall through */
5575 	case IB_7322_L_STATE_DOWN:
5576 		state = IB_PORT_DOWN;
5577 		break;
5578 	}
5579 	return state;
5580 }
5581 
5582 /* returns the IBTA port state, rather than the IBC link training state */
5583 static u8 qib_7322_phys_portstate(u64 ibcs)
5584 {
5585 	u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState);
5586 	return qib_7322_physportstate[state];
5587 }
5588 
5589 static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
5590 {
5591 	int ret = 0, symadj = 0;
5592 	unsigned long flags;
5593 	int mult;
5594 
5595 	spin_lock_irqsave(&ppd->lflags_lock, flags);
5596 	ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
5597 	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5598 
5599 	/* Update our picture of width and speed from chip */
5600 	if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) {
5601 		ppd->link_speed_active = QIB_IB_QDR;
5602 		mult = 4;
5603 	} else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) {
5604 		ppd->link_speed_active = QIB_IB_DDR;
5605 		mult = 2;
5606 	} else {
5607 		ppd->link_speed_active = QIB_IB_SDR;
5608 		mult = 1;
5609 	}
5610 	if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) {
5611 		ppd->link_width_active = IB_WIDTH_4X;
5612 		mult *= 4;
5613 	} else
5614 		ppd->link_width_active = IB_WIDTH_1X;
5615 	ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
5616 
5617 	if (!ibup) {
5618 		u64 clr;
5619 
5620 		/* Link went down. */
5621 		/* do IPG MAD again after linkdown, even if last time failed */
5622 		ppd->cpspec->ipg_tries = 0;
5623 		clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
5624 			(SYM_MASK(IBCStatusB_0, heartbeat_timed_out) |
5625 			 SYM_MASK(IBCStatusB_0, heartbeat_crosstalk));
5626 		if (clr)
5627 			qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
5628 		if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5629 				     QIBL_IB_AUTONEG_INPROG)))
5630 			set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5631 		if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5632 			struct qib_qsfp_data *qd =
5633 				&ppd->cpspec->qsfp_data;
5634 			/* unlock the Tx settings, speed may change */
5635 			qib_write_kreg_port(ppd, krp_tx_deemph_override,
5636 				SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
5637 				reset_tx_deemphasis_override));
5638 			qib_cancel_sends(ppd);
5639 			/* on link down, ensure sane pcs state */
5640 			qib_7322_mini_pcs_reset(ppd);
5641 			/* schedule the qsfp refresh which should turn the link
5642 			   off */
5643 			if (ppd->dd->flags & QIB_HAS_QSFP) {
5644 				qd->t_insert = jiffies;
5645 				queue_work(ib_wq, &qd->work);
5646 			}
5647 			spin_lock_irqsave(&ppd->sdma_lock, flags);
5648 			if (__qib_sdma_running(ppd))
5649 				__qib_sdma_process_event(ppd,
5650 					qib_sdma_event_e70_go_idle);
5651 			spin_unlock_irqrestore(&ppd->sdma_lock, flags);
5652 		}
5653 		clr = read_7322_creg32_port(ppd, crp_iblinkdown);
5654 		if (clr == ppd->cpspec->iblnkdownsnap)
5655 			ppd->cpspec->iblnkdowndelta++;
5656 	} else {
5657 		if (qib_compat_ddr_negotiate &&
5658 		    !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5659 				     QIBL_IB_AUTONEG_INPROG)) &&
5660 		    ppd->link_speed_active == QIB_IB_SDR &&
5661 		    (ppd->link_speed_enabled & QIB_IB_DDR)
5662 		    && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
5663 			/* we are SDR, and auto-negotiation enabled */
5664 			++ppd->cpspec->autoneg_tries;
5665 			if (!ppd->cpspec->ibdeltainprog) {
5666 				ppd->cpspec->ibdeltainprog = 1;
5667 				ppd->cpspec->ibsymdelta +=
5668 					read_7322_creg32_port(ppd,
5669 						crp_ibsymbolerr) -
5670 						ppd->cpspec->ibsymsnap;
5671 				ppd->cpspec->iblnkerrdelta +=
5672 					read_7322_creg32_port(ppd,
5673 						crp_iblinkerrrecov) -
5674 						ppd->cpspec->iblnkerrsnap;
5675 			}
5676 			try_7322_autoneg(ppd);
5677 			ret = 1; /* no other IB status change processing */
5678 		} else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5679 			   ppd->link_speed_active == QIB_IB_SDR) {
5680 			qib_autoneg_7322_send(ppd, 1);
5681 			set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5682 			qib_7322_mini_pcs_reset(ppd);
5683 			udelay(2);
5684 			ret = 1; /* no other IB status change processing */
5685 		} else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5686 			   (ppd->link_speed_active & QIB_IB_DDR)) {
5687 			spin_lock_irqsave(&ppd->lflags_lock, flags);
5688 			ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
5689 					 QIBL_IB_AUTONEG_FAILED);
5690 			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5691 			ppd->cpspec->autoneg_tries = 0;
5692 			/* re-enable SDR, for next link down */
5693 			set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5694 			wake_up(&ppd->cpspec->autoneg_wait);
5695 			symadj = 1;
5696 		} else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
5697 			/*
5698 			 * Clear autoneg failure flag, and do setup
5699 			 * so we'll try next time link goes down and
5700 			 * back to INIT (possibly connected to a
5701 			 * different device).
5702 			 */
5703 			spin_lock_irqsave(&ppd->lflags_lock, flags);
5704 			ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
5705 			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5706 			ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
5707 			symadj = 1;
5708 		}
5709 		if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5710 			symadj = 1;
5711 			if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
5712 				try_7322_ipg(ppd);
5713 			if (!ppd->cpspec->recovery_init)
5714 				setup_7322_link_recovery(ppd, 0);
5715 			ppd->cpspec->qdr_dfe_time = jiffies +
5716 				msecs_to_jiffies(QDR_DFE_DISABLE_DELAY);
5717 		}
5718 		ppd->cpspec->ibmalfusesnap = 0;
5719 		ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
5720 			crp_errlink);
5721 	}
5722 	if (symadj) {
5723 		ppd->cpspec->iblnkdownsnap =
5724 			read_7322_creg32_port(ppd, crp_iblinkdown);
5725 		if (ppd->cpspec->ibdeltainprog) {
5726 			ppd->cpspec->ibdeltainprog = 0;
5727 			ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
5728 				crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
5729 			ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
5730 				crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
5731 		}
5732 	} else if (!ibup && qib_compat_ddr_negotiate &&
5733 		   !ppd->cpspec->ibdeltainprog &&
5734 			!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5735 		ppd->cpspec->ibdeltainprog = 1;
5736 		ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
5737 			crp_ibsymbolerr);
5738 		ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
5739 			crp_iblinkerrrecov);
5740 	}
5741 
5742 	if (!ret)
5743 		qib_setup_7322_setextled(ppd, ibup);
5744 	return ret;
5745 }
5746 
5747 /*
5748  * Does read/modify/write to appropriate registers to
5749  * set output and direction bits selected by mask.
5750  * these are in their canonical postions (e.g. lsb of
5751  * dir will end up in D48 of extctrl on existing chips).
5752  * returns contents of GP Inputs.
5753  */
5754 static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
5755 {
5756 	u64 read_val, new_out;
5757 	unsigned long flags;
5758 
5759 	if (mask) {
5760 		/* some bits being written, lock access to GPIO */
5761 		dir &= mask;
5762 		out &= mask;
5763 		spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
5764 		dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
5765 		dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
5766 		new_out = (dd->cspec->gpio_out & ~mask) | out;
5767 
5768 		qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
5769 		qib_write_kreg(dd, kr_gpio_out, new_out);
5770 		dd->cspec->gpio_out = new_out;
5771 		spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
5772 	}
5773 	/*
5774 	 * It is unlikely that a read at this time would get valid
5775 	 * data on a pin whose direction line was set in the same
5776 	 * call to this function. We include the read here because
5777 	 * that allows us to potentially combine a change on one pin with
5778 	 * a read on another, and because the old code did something like
5779 	 * this.
5780 	 */
5781 	read_val = qib_read_kreg64(dd, kr_extstatus);
5782 	return SYM_FIELD(read_val, EXTStatus, GPIOIn);
5783 }
5784 
5785 /* Enable writes to config EEPROM, if possible. Returns previous state */
5786 static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen)
5787 {
5788 	int prev_wen;
5789 	u32 mask;
5790 
5791 	mask = 1 << QIB_EEPROM_WEN_NUM;
5792 	prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM;
5793 	gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
5794 
5795 	return prev_wen & 1;
5796 }
5797 
5798 /*
5799  * Read fundamental info we need to use the chip.  These are
5800  * the registers that describe chip capabilities, and are
5801  * saved in shadow registers.
5802  */
5803 static void get_7322_chip_params(struct qib_devdata *dd)
5804 {
5805 	u64 val;
5806 	u32 piobufs;
5807 	int mtu;
5808 
5809 	dd->palign = qib_read_kreg32(dd, kr_pagealign);
5810 
5811 	dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
5812 
5813 	dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
5814 	dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
5815 	dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
5816 	dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
5817 	dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
5818 
5819 	val = qib_read_kreg64(dd, kr_sendpiobufcnt);
5820 	dd->piobcnt2k = val & ~0U;
5821 	dd->piobcnt4k = val >> 32;
5822 	val = qib_read_kreg64(dd, kr_sendpiosize);
5823 	dd->piosize2k = val & ~0U;
5824 	dd->piosize4k = val >> 32;
5825 
5826 	mtu = ib_mtu_enum_to_int(qib_ibmtu);
5827 	if (mtu == -1)
5828 		mtu = QIB_DEFAULT_MTU;
5829 	dd->pport[0].ibmtu = (u32)mtu;
5830 	dd->pport[1].ibmtu = (u32)mtu;
5831 
5832 	/* these may be adjusted in init_chip_wc_pat() */
5833 	dd->pio2kbase = (u32 __iomem *)
5834 		((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
5835 	dd->pio4kbase = (u32 __iomem *)
5836 		((char __iomem *) dd->kregbase +
5837 		 (dd->piobufbase >> 32));
5838 	/*
5839 	 * 4K buffers take 2 pages; we use roundup just to be
5840 	 * paranoid; we calculate it once here, rather than on
5841 	 * ever buf allocate
5842 	 */
5843 	dd->align4k = ALIGN(dd->piosize4k, dd->palign);
5844 
5845 	piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS;
5846 
5847 	dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
5848 		(sizeof(u64) * BITS_PER_BYTE / 2);
5849 }
5850 
5851 /*
5852  * The chip base addresses in cspec and cpspec have to be set
5853  * after possible init_chip_wc_pat(), rather than in
5854  * get_7322_chip_params(), so split out as separate function
5855  */
5856 static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
5857 {
5858 	u32 cregbase;
5859 	cregbase = qib_read_kreg32(dd, kr_counterregbase);
5860 
5861 	dd->cspec->cregbase = (u64 __iomem *)(cregbase +
5862 		(char __iomem *)dd->kregbase);
5863 
5864 	dd->egrtidbase = (u64 __iomem *)
5865 		((char __iomem *) dd->kregbase + dd->rcvegrbase);
5866 
5867 	/* port registers are defined as relative to base of chip */
5868 	dd->pport[0].cpspec->kpregbase =
5869 		(u64 __iomem *)((char __iomem *)dd->kregbase);
5870 	dd->pport[1].cpspec->kpregbase =
5871 		(u64 __iomem *)(dd->palign +
5872 		(char __iomem *)dd->kregbase);
5873 	dd->pport[0].cpspec->cpregbase =
5874 		(u64 __iomem *)(qib_read_kreg_port(&dd->pport[0],
5875 		kr_counterregbase) + (char __iomem *)dd->kregbase);
5876 	dd->pport[1].cpspec->cpregbase =
5877 		(u64 __iomem *)(qib_read_kreg_port(&dd->pport[1],
5878 		kr_counterregbase) + (char __iomem *)dd->kregbase);
5879 }
5880 
5881 /*
5882  * This is a fairly special-purpose observer, so we only support
5883  * the port-specific parts of SendCtrl
5884  */
5885 
5886 #define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) |		\
5887 			   SYM_MASK(SendCtrl_0, SDmaEnable) |		\
5888 			   SYM_MASK(SendCtrl_0, SDmaIntEnable) |	\
5889 			   SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \
5890 			   SYM_MASK(SendCtrl_0, SDmaHalt) |		\
5891 			   SYM_MASK(SendCtrl_0, IBVLArbiterEn) |	\
5892 			   SYM_MASK(SendCtrl_0, ForceCreditUpToDate))
5893 
5894 static int sendctrl_hook(struct qib_devdata *dd,
5895 			 const struct diag_observer *op, u32 offs,
5896 			 u64 *data, u64 mask, int only_32)
5897 {
5898 	unsigned long flags;
5899 	unsigned idx;
5900 	unsigned pidx;
5901 	struct qib_pportdata *ppd = NULL;
5902 	u64 local_data, all_bits;
5903 
5904 	/*
5905 	 * The fixed correspondence between Physical ports and pports is
5906 	 * severed. We need to hunt for the ppd that corresponds
5907 	 * to the offset we got. And we have to do that without admitting
5908 	 * we know the stride, apparently.
5909 	 */
5910 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5911 		u64 __iomem *psptr;
5912 		u32 psoffs;
5913 
5914 		ppd = dd->pport + pidx;
5915 		if (!ppd->cpspec->kpregbase)
5916 			continue;
5917 
5918 		psptr = ppd->cpspec->kpregbase + krp_sendctrl;
5919 		psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr);
5920 		if (psoffs == offs)
5921 			break;
5922 	}
5923 
5924 	/* If pport is not being managed by driver, just avoid shadows. */
5925 	if (pidx >= dd->num_pports)
5926 		ppd = NULL;
5927 
5928 	/* In any case, "idx" is flat index in kreg space */
5929 	idx = offs / sizeof(u64);
5930 
5931 	all_bits = ~0ULL;
5932 	if (only_32)
5933 		all_bits >>= 32;
5934 
5935 	spin_lock_irqsave(&dd->sendctrl_lock, flags);
5936 	if (!ppd || (mask & all_bits) != all_bits) {
5937 		/*
5938 		 * At least some mask bits are zero, so we need
5939 		 * to read. The judgement call is whether from
5940 		 * reg or shadow. First-cut: read reg, and complain
5941 		 * if any bits which should be shadowed are different
5942 		 * from their shadowed value.
5943 		 */
5944 		if (only_32)
5945 			local_data = (u64)qib_read_kreg32(dd, idx);
5946 		else
5947 			local_data = qib_read_kreg64(dd, idx);
5948 		*data = (local_data & ~mask) | (*data & mask);
5949 	}
5950 	if (mask) {
5951 		/*
5952 		 * At least some mask bits are one, so we need
5953 		 * to write, but only shadow some bits.
5954 		 */
5955 		u64 sval, tval; /* Shadowed, transient */
5956 
5957 		/*
5958 		 * New shadow val is bits we don't want to touch,
5959 		 * ORed with bits we do, that are intended for shadow.
5960 		 */
5961 		if (ppd) {
5962 			sval = ppd->p_sendctrl & ~mask;
5963 			sval |= *data & SENDCTRL_SHADOWED & mask;
5964 			ppd->p_sendctrl = sval;
5965 		} else
5966 			sval = *data & SENDCTRL_SHADOWED & mask;
5967 		tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
5968 		qib_write_kreg(dd, idx, tval);
5969 		qib_write_kreg(dd, kr_scratch, 0Ull);
5970 	}
5971 	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
5972 	return only_32 ? 4 : 8;
5973 }
5974 
5975 static const struct diag_observer sendctrl_0_observer = {
5976 	sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64),
5977 	KREG_IDX(SendCtrl_0) * sizeof(u64)
5978 };
5979 
5980 static const struct diag_observer sendctrl_1_observer = {
5981 	sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64),
5982 	KREG_IDX(SendCtrl_1) * sizeof(u64)
5983 };
5984 
5985 static ushort sdma_fetch_prio = 8;
5986 module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO);
5987 MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority");
5988 
5989 /* Besides logging QSFP events, we set appropriate TxDDS values */
5990 static void init_txdds_table(struct qib_pportdata *ppd, int override);
5991 
5992 static void qsfp_7322_event(struct work_struct *work)
5993 {
5994 	struct qib_qsfp_data *qd;
5995 	struct qib_pportdata *ppd;
5996 	unsigned long pwrup;
5997 	unsigned long flags;
5998 	int ret;
5999 	u32 le2;
6000 
6001 	qd = container_of(work, struct qib_qsfp_data, work);
6002 	ppd = qd->ppd;
6003 	pwrup = qd->t_insert +
6004 		msecs_to_jiffies(QSFP_PWR_LAG_MSEC - QSFP_MODPRS_LAG_MSEC);
6005 
6006 	/* Delay for 20 msecs to allow ModPrs resistor to setup */
6007 	mdelay(QSFP_MODPRS_LAG_MSEC);
6008 
6009 	if (!qib_qsfp_mod_present(ppd)) {
6010 		ppd->cpspec->qsfp_data.modpresent = 0;
6011 		/* Set the physical link to disabled */
6012 		qib_set_ib_7322_lstate(ppd, 0,
6013 				       QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
6014 		spin_lock_irqsave(&ppd->lflags_lock, flags);
6015 		ppd->lflags &= ~QIBL_LINKV;
6016 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
6017 	} else {
6018 		/*
6019 		 * Some QSFP's not only do not respond until the full power-up
6020 		 * time, but may behave badly if we try. So hold off responding
6021 		 * to insertion.
6022 		 */
6023 		while (1) {
6024 			if (time_is_before_jiffies(pwrup))
6025 				break;
6026 			msleep(20);
6027 		}
6028 
6029 		ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
6030 
6031 		/*
6032 		 * Need to change LE2 back to defaults if we couldn't
6033 		 * read the cable type (to handle cable swaps), so do this
6034 		 * even on failure to read cable information.  We don't
6035 		 * get here for QME, so IS_QME check not needed here.
6036 		 */
6037 		if (!ret && !ppd->dd->cspec->r1) {
6038 			if (QSFP_IS_ACTIVE_FAR(qd->cache.tech))
6039 				le2 = LE2_QME;
6040 			else if (qd->cache.atten[1] >= qib_long_atten &&
6041 				 QSFP_IS_CU(qd->cache.tech))
6042 				le2 = LE2_5m;
6043 			else
6044 				le2 = LE2_DEFAULT;
6045 		} else
6046 			le2 = LE2_DEFAULT;
6047 		ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
6048 		/*
6049 		 * We always change parameteters, since we can choose
6050 		 * values for cables without eeproms, and the cable may have
6051 		 * changed from a cable with full or partial eeprom content
6052 		 * to one with partial or no content.
6053 		 */
6054 		init_txdds_table(ppd, 0);
6055 		/* The physical link is being re-enabled only when the
6056 		 * previous state was DISABLED and the VALID bit is not
6057 		 * set. This should only happen when  the cable has been
6058 		 * physically pulled. */
6059 		if (!ppd->cpspec->qsfp_data.modpresent &&
6060 		    (ppd->lflags & (QIBL_LINKV | QIBL_IB_LINK_DISABLED))) {
6061 			ppd->cpspec->qsfp_data.modpresent = 1;
6062 			qib_set_ib_7322_lstate(ppd, 0,
6063 				QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
6064 			spin_lock_irqsave(&ppd->lflags_lock, flags);
6065 			ppd->lflags |= QIBL_LINKV;
6066 			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
6067 		}
6068 	}
6069 }
6070 
6071 /*
6072  * There is little we can do but complain to the user if QSFP
6073  * initialization fails.
6074  */
6075 static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
6076 {
6077 	unsigned long flags;
6078 	struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
6079 	struct qib_devdata *dd = ppd->dd;
6080 	u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N;
6081 
6082 	mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
6083 	qd->ppd = ppd;
6084 	qib_qsfp_init(qd, qsfp_7322_event);
6085 	spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
6086 	dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert));
6087 	dd->cspec->gpio_mask |= mod_prs_bit;
6088 	qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
6089 	qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
6090 	spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
6091 }
6092 
6093 /*
6094  * called at device initialization time, and also if the txselect
6095  * module parameter is changed.  This is used for cables that don't
6096  * have valid QSFP EEPROMs (not present, or attenuation is zero).
6097  * We initialize to the default, then if there is a specific
6098  * unit,port match, we use that (and set it immediately, for the
6099  * current speed, if the link is at INIT or better).
6100  * String format is "default# unit#,port#=# ... u,p=#", separators must
6101  * be a SPACE character.  A newline terminates.  The u,p=# tuples may
6102  * optionally have "u,p=#,#", where the final # is the H1 value
6103  * The last specific match is used (actually, all are used, but last
6104  * one is the one that winds up set); if none at all, fall back on default.
6105  */
6106 static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
6107 {
6108 	char *nxt, *str;
6109 	u32 pidx, unit, port, deflt, h1;
6110 	unsigned long val;
6111 	int any = 0, seth1;
6112 	int txdds_size;
6113 
6114 	str = txselect_list;
6115 
6116 	/* default number is validated in setup_txselect() */
6117 	deflt = simple_strtoul(str, &nxt, 0);
6118 	for (pidx = 0; pidx < dd->num_pports; ++pidx)
6119 		dd->pport[pidx].cpspec->no_eep = deflt;
6120 
6121 	txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
6122 	if (IS_QME(dd) || IS_QMH(dd))
6123 		txdds_size += TXDDS_MFG_SZ;
6124 
6125 	while (*nxt && nxt[1]) {
6126 		str = ++nxt;
6127 		unit = simple_strtoul(str, &nxt, 0);
6128 		if (nxt == str || !*nxt || *nxt != ',') {
6129 			while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6130 				;
6131 			continue;
6132 		}
6133 		str = ++nxt;
6134 		port = simple_strtoul(str, &nxt, 0);
6135 		if (nxt == str || *nxt != '=') {
6136 			while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6137 				;
6138 			continue;
6139 		}
6140 		str = ++nxt;
6141 		val = simple_strtoul(str, &nxt, 0);
6142 		if (nxt == str) {
6143 			while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6144 				;
6145 			continue;
6146 		}
6147 		if (val >= txdds_size)
6148 			continue;
6149 		seth1 = 0;
6150 		h1 = 0; /* gcc thinks it might be used uninitted */
6151 		if (*nxt == ',' && nxt[1]) {
6152 			str = ++nxt;
6153 			h1 = (u32)simple_strtoul(str, &nxt, 0);
6154 			if (nxt == str)
6155 				while (*nxt && *nxt++ != ' ') /* skip */
6156 					;
6157 			else
6158 				seth1 = 1;
6159 		}
6160 		for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
6161 		     ++pidx) {
6162 			struct qib_pportdata *ppd = &dd->pport[pidx];
6163 
6164 			if (ppd->port != port || !ppd->link_speed_supported)
6165 				continue;
6166 			ppd->cpspec->no_eep = val;
6167 			if (seth1)
6168 				ppd->cpspec->h1_val = h1;
6169 			/* now change the IBC and serdes, overriding generic */
6170 			init_txdds_table(ppd, 1);
6171 			/* Re-enable the physical state machine on mezz boards
6172 			 * now that the correct settings have been set.
6173 			 * QSFP boards are handles by the QSFP event handler */
6174 			if (IS_QMH(dd) || IS_QME(dd))
6175 				qib_set_ib_7322_lstate(ppd, 0,
6176 					    QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
6177 			any++;
6178 		}
6179 		if (*nxt == '\n')
6180 			break; /* done */
6181 	}
6182 	if (change && !any) {
6183 		/* no specific setting, use the default.
6184 		 * Change the IBC and serdes, but since it's
6185 		 * general, don't override specific settings.
6186 		 */
6187 		for (pidx = 0; pidx < dd->num_pports; ++pidx)
6188 			if (dd->pport[pidx].link_speed_supported)
6189 				init_txdds_table(&dd->pport[pidx], 0);
6190 	}
6191 }
6192 
6193 /* handle the txselect parameter changing */
6194 static int setup_txselect(const char *str, struct kernel_param *kp)
6195 {
6196 	struct qib_devdata *dd;
6197 	unsigned long val;
6198 	char *n;
6199 	if (strlen(str) >= MAX_ATTEN_LEN) {
6200 		pr_info("txselect_values string too long\n");
6201 		return -ENOSPC;
6202 	}
6203 	val = simple_strtoul(str, &n, 0);
6204 	if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
6205 				TXDDS_MFG_SZ)) {
6206 		pr_info("txselect_values must start with a number < %d\n",
6207 			TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
6208 		return -EINVAL;
6209 	}
6210 	strcpy(txselect_list, str);
6211 
6212 	list_for_each_entry(dd, &qib_dev_list, list)
6213 		if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
6214 			set_no_qsfp_atten(dd, 1);
6215 	return 0;
6216 }
6217 
6218 /*
6219  * Write the final few registers that depend on some of the
6220  * init setup.  Done late in init, just before bringing up
6221  * the serdes.
6222  */
6223 static int qib_late_7322_initreg(struct qib_devdata *dd)
6224 {
6225 	int ret = 0, n;
6226 	u64 val;
6227 
6228 	qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
6229 	qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
6230 	qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
6231 	qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
6232 	val = qib_read_kreg64(dd, kr_sendpioavailaddr);
6233 	if (val != dd->pioavailregs_phys) {
6234 		qib_dev_err(dd,
6235 			"Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
6236 			(unsigned long) dd->pioavailregs_phys,
6237 			(unsigned long long) val);
6238 		ret = -EINVAL;
6239 	}
6240 
6241 	n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
6242 	qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL);
6243 	/* driver sends get pkey, lid, etc. checking also, to catch bugs */
6244 	qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL);
6245 
6246 	qib_register_observer(dd, &sendctrl_0_observer);
6247 	qib_register_observer(dd, &sendctrl_1_observer);
6248 
6249 	dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN;
6250 	qib_write_kreg(dd, kr_control, dd->control);
6251 	/*
6252 	 * Set SendDmaFetchPriority and init Tx params, including
6253 	 * QSFP handler on boards that have QSFP.
6254 	 * First set our default attenuation entry for cables that
6255 	 * don't have valid attenuation.
6256 	 */
6257 	set_no_qsfp_atten(dd, 0);
6258 	for (n = 0; n < dd->num_pports; ++n) {
6259 		struct qib_pportdata *ppd = dd->pport + n;
6260 
6261 		qib_write_kreg_port(ppd, krp_senddmaprioritythld,
6262 				    sdma_fetch_prio & 0xf);
6263 		/* Initialize qsfp if present on board. */
6264 		if (dd->flags & QIB_HAS_QSFP)
6265 			qib_init_7322_qsfp(ppd);
6266 	}
6267 	dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN;
6268 	qib_write_kreg(dd, kr_control, dd->control);
6269 
6270 	return ret;
6271 }
6272 
6273 /* per IB port errors.  */
6274 #define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \
6275 	MASK_ACROSS(8, 15))
6276 #define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41))
6277 #define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \
6278 	MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \
6279 	MASK_ACROSS(0, 11))
6280 
6281 /*
6282  * Write the initialization per-port registers that need to be done at
6283  * driver load and after reset completes (i.e., that aren't done as part
6284  * of other init procedures called from qib_init.c).
6285  * Some of these should be redundant on reset, but play safe.
6286  */
6287 static void write_7322_init_portregs(struct qib_pportdata *ppd)
6288 {
6289 	u64 val;
6290 	int i;
6291 
6292 	if (!ppd->link_speed_supported) {
6293 		/* no buffer credits for this port */
6294 		for (i = 1; i < 8; i++)
6295 			qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
6296 		qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
6297 		qib_write_kreg(ppd->dd, kr_scratch, 0);
6298 		return;
6299 	}
6300 
6301 	/*
6302 	 * Set the number of supported virtual lanes in IBC,
6303 	 * for flow control packet handling on unsupported VLs
6304 	 */
6305 	val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
6306 	val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP);
6307 	val |= (u64)(ppd->vls_supported - 1) <<
6308 		SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP);
6309 	qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
6310 
6311 	qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
6312 
6313 	/* enable tx header checking */
6314 	qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
6315 			    IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID |
6316 			    IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ);
6317 
6318 	qib_write_kreg_port(ppd, krp_ncmodectrl,
6319 		SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal));
6320 
6321 	/*
6322 	 * Unconditionally clear the bufmask bits.  If SDMA is
6323 	 * enabled, we'll set them appropriately later.
6324 	 */
6325 	qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
6326 	qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
6327 	qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
6328 	if (ppd->dd->cspec->r1)
6329 		ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
6330 }
6331 
6332 /*
6333  * Write the initialization per-device registers that need to be done at
6334  * driver load and after reset completes (i.e., that aren't done as part
6335  * of other init procedures called from qib_init.c).  Also write per-port
6336  * registers that are affected by overall device config, such as QP mapping
6337  * Some of these should be redundant on reset, but play safe.
6338  */
6339 static void write_7322_initregs(struct qib_devdata *dd)
6340 {
6341 	struct qib_pportdata *ppd;
6342 	int i, pidx;
6343 	u64 val;
6344 
6345 	/* Set Multicast QPs received by port 2 to map to context one. */
6346 	qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
6347 
6348 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
6349 		unsigned n, regno;
6350 		unsigned long flags;
6351 
6352 		if (dd->n_krcv_queues < 2 ||
6353 			!dd->pport[pidx].link_speed_supported)
6354 			continue;
6355 
6356 		ppd = &dd->pport[pidx];
6357 
6358 		/* be paranoid against later code motion, etc. */
6359 		spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
6360 		ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
6361 		spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
6362 
6363 		/* Initialize QP to context mapping */
6364 		regno = krp_rcvqpmaptable;
6365 		val = 0;
6366 		if (dd->num_pports > 1)
6367 			n = dd->first_user_ctxt / dd->num_pports;
6368 		else
6369 			n = dd->first_user_ctxt - 1;
6370 		for (i = 0; i < 32; ) {
6371 			unsigned ctxt;
6372 
6373 			if (dd->num_pports > 1)
6374 				ctxt = (i % n) * dd->num_pports + pidx;
6375 			else if (i % n)
6376 				ctxt = (i % n) + 1;
6377 			else
6378 				ctxt = ppd->hw_pidx;
6379 			val |= ctxt << (5 * (i % 6));
6380 			i++;
6381 			if (i % 6 == 0) {
6382 				qib_write_kreg_port(ppd, regno, val);
6383 				val = 0;
6384 				regno++;
6385 			}
6386 		}
6387 		qib_write_kreg_port(ppd, regno, val);
6388 	}
6389 
6390 	/*
6391 	 * Setup up interrupt mitigation for kernel contexts, but
6392 	 * not user contexts (user contexts use interrupts when
6393 	 * stalled waiting for any packet, so want those interrupts
6394 	 * right away).
6395 	 */
6396 	for (i = 0; i < dd->first_user_ctxt; i++) {
6397 		dd->cspec->rcvavail_timeout[i] = rcv_int_timeout;
6398 		qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout);
6399 	}
6400 
6401 	/*
6402 	 * Initialize  as (disabled) rcvflow tables.  Application code
6403 	 * will setup each flow as it uses the flow.
6404 	 * Doesn't clear any of the error bits that might be set.
6405 	 */
6406 	val = TIDFLOW_ERRBITS; /* these are W1C */
6407 	for (i = 0; i < dd->cfgctxts; i++) {
6408 		int flow;
6409 		for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
6410 			qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
6411 	}
6412 
6413 	/*
6414 	 * dual cards init to dual port recovery, single port cards to
6415 	 * the one port.  Dual port cards may later adjust to 1 port,
6416 	 * and then back to dual port if both ports are connected
6417 	 * */
6418 	if (dd->num_pports)
6419 		setup_7322_link_recovery(dd->pport, dd->num_pports > 1);
6420 }
6421 
6422 static int qib_init_7322_variables(struct qib_devdata *dd)
6423 {
6424 	struct qib_pportdata *ppd;
6425 	unsigned features, pidx, sbufcnt;
6426 	int ret, mtu;
6427 	u32 sbufs, updthresh;
6428 
6429 	/* pport structs are contiguous, allocated after devdata */
6430 	ppd = (struct qib_pportdata *)(dd + 1);
6431 	dd->pport = ppd;
6432 	ppd[0].dd = dd;
6433 	ppd[1].dd = dd;
6434 
6435 	dd->cspec = (struct qib_chip_specific *)(ppd + 2);
6436 
6437 	ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
6438 	ppd[1].cpspec = &ppd[0].cpspec[1];
6439 	ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
6440 	ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
6441 
6442 	spin_lock_init(&dd->cspec->rcvmod_lock);
6443 	spin_lock_init(&dd->cspec->gpio_lock);
6444 
6445 	/* we haven't yet set QIB_PRESENT, so use read directly */
6446 	dd->revision = readq(&dd->kregbase[kr_revision]);
6447 
6448 	if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
6449 		qib_dev_err(dd,
6450 			"Revision register read failure, giving up initialization\n");
6451 		ret = -ENODEV;
6452 		goto bail;
6453 	}
6454 	dd->flags |= QIB_PRESENT;  /* now register routines work */
6455 
6456 	dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
6457 	dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
6458 	dd->cspec->r1 = dd->minrev == 1;
6459 
6460 	get_7322_chip_params(dd);
6461 	features = qib_7322_boardname(dd);
6462 
6463 	/* now that piobcnt2k and 4k set, we can allocate these */
6464 	sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
6465 		NUM_VL15_BUFS + BITS_PER_LONG - 1;
6466 	sbufcnt /= BITS_PER_LONG;
6467 	dd->cspec->sendchkenable = kmalloc(sbufcnt *
6468 		sizeof(*dd->cspec->sendchkenable), GFP_KERNEL);
6469 	dd->cspec->sendgrhchk = kmalloc(sbufcnt *
6470 		sizeof(*dd->cspec->sendgrhchk), GFP_KERNEL);
6471 	dd->cspec->sendibchk = kmalloc(sbufcnt *
6472 		sizeof(*dd->cspec->sendibchk), GFP_KERNEL);
6473 	if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
6474 		!dd->cspec->sendibchk) {
6475 		qib_dev_err(dd, "Failed allocation for hdrchk bitmaps\n");
6476 		ret = -ENOMEM;
6477 		goto bail;
6478 	}
6479 
6480 	ppd = dd->pport;
6481 
6482 	/*
6483 	 * GPIO bits for TWSI data and clock,
6484 	 * used for serial EEPROM.
6485 	 */
6486 	dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
6487 	dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
6488 	dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
6489 
6490 	dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
6491 		QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP |
6492 		QIB_HAS_THRESH_UPDATE |
6493 		(sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0);
6494 	dd->flags |= qib_special_trigger ?
6495 		QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
6496 
6497 	/*
6498 	 * Setup initial values.  These may change when PAT is enabled, but
6499 	 * we need these to do initial chip register accesses.
6500 	 */
6501 	qib_7322_set_baseaddrs(dd);
6502 
6503 	mtu = ib_mtu_enum_to_int(qib_ibmtu);
6504 	if (mtu == -1)
6505 		mtu = QIB_DEFAULT_MTU;
6506 
6507 	dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
6508 	/* all hwerrors become interrupts, unless special purposed */
6509 	dd->cspec->hwerrmask = ~0ULL;
6510 	/*  link_recovery setup causes these errors, so ignore them,
6511 	 *  other than clearing them when they occur */
6512 	dd->cspec->hwerrmask &=
6513 		~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
6514 		  SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
6515 		  HWE_MASK(LATriggered));
6516 
6517 	for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
6518 		struct qib_chippport_specific *cp = ppd->cpspec;
6519 		ppd->link_speed_supported = features & PORT_SPD_CAP;
6520 		features >>=  PORT_SPD_CAP_SHIFT;
6521 		if (!ppd->link_speed_supported) {
6522 			/* single port mode (7340, or configured) */
6523 			dd->skip_kctxt_mask |= 1 << pidx;
6524 			if (pidx == 0) {
6525 				/* Make sure port is disabled. */
6526 				qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6527 				qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6528 				ppd[0] = ppd[1];
6529 				dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6530 						  IBSerdesPClkNotDetectMask_0)
6531 						  | SYM_MASK(HwErrMask,
6532 						  SDmaMemReadErrMask_0));
6533 				dd->cspec->int_enable_mask &= ~(
6534 				     SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
6535 				     SYM_MASK(IntMask, SDmaIdleIntMask_0) |
6536 				     SYM_MASK(IntMask, SDmaProgressIntMask_0) |
6537 				     SYM_MASK(IntMask, SDmaIntMask_0) |
6538 				     SYM_MASK(IntMask, ErrIntMask_0) |
6539 				     SYM_MASK(IntMask, SendDoneIntMask_0));
6540 			} else {
6541 				/* Make sure port is disabled. */
6542 				qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6543 				qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6544 				dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6545 						  IBSerdesPClkNotDetectMask_1)
6546 						  | SYM_MASK(HwErrMask,
6547 						  SDmaMemReadErrMask_1));
6548 				dd->cspec->int_enable_mask &= ~(
6549 				     SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
6550 				     SYM_MASK(IntMask, SDmaIdleIntMask_1) |
6551 				     SYM_MASK(IntMask, SDmaProgressIntMask_1) |
6552 				     SYM_MASK(IntMask, SDmaIntMask_1) |
6553 				     SYM_MASK(IntMask, ErrIntMask_1) |
6554 				     SYM_MASK(IntMask, SendDoneIntMask_1));
6555 			}
6556 			continue;
6557 		}
6558 
6559 		dd->num_pports++;
6560 		qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
6561 
6562 		ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
6563 		ppd->link_width_enabled = IB_WIDTH_4X;
6564 		ppd->link_speed_enabled = ppd->link_speed_supported;
6565 		/*
6566 		 * Set the initial values to reasonable default, will be set
6567 		 * for real when link is up.
6568 		 */
6569 		ppd->link_width_active = IB_WIDTH_4X;
6570 		ppd->link_speed_active = QIB_IB_SDR;
6571 		ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
6572 		switch (qib_num_cfg_vls) {
6573 		case 1:
6574 			ppd->vls_supported = IB_VL_VL0;
6575 			break;
6576 		case 2:
6577 			ppd->vls_supported = IB_VL_VL0_1;
6578 			break;
6579 		default:
6580 			qib_devinfo(dd->pcidev,
6581 				    "Invalid num_vls %u, using 4 VLs\n",
6582 				    qib_num_cfg_vls);
6583 			qib_num_cfg_vls = 4;
6584 			/* fall through */
6585 		case 4:
6586 			ppd->vls_supported = IB_VL_VL0_3;
6587 			break;
6588 		case 8:
6589 			if (mtu <= 2048)
6590 				ppd->vls_supported = IB_VL_VL0_7;
6591 			else {
6592 				qib_devinfo(dd->pcidev,
6593 					    "Invalid num_vls %u for MTU %d "
6594 					    ", using 4 VLs\n",
6595 					    qib_num_cfg_vls, mtu);
6596 				ppd->vls_supported = IB_VL_VL0_3;
6597 				qib_num_cfg_vls = 4;
6598 			}
6599 			break;
6600 		}
6601 		ppd->vls_operational = ppd->vls_supported;
6602 
6603 		init_waitqueue_head(&cp->autoneg_wait);
6604 		INIT_DELAYED_WORK(&cp->autoneg_work,
6605 				  autoneg_7322_work);
6606 		if (ppd->dd->cspec->r1)
6607 			INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work);
6608 
6609 		/*
6610 		 * For Mez and similar cards, no qsfp info, so do
6611 		 * the "cable info" setup here.  Can be overridden
6612 		 * in adapter-specific routines.
6613 		 */
6614 		if (!(dd->flags & QIB_HAS_QSFP)) {
6615 			if (!IS_QMH(dd) && !IS_QME(dd))
6616 				qib_devinfo(dd->pcidev,
6617 					"IB%u:%u: Unknown mezzanine card type\n",
6618 					dd->unit, ppd->port);
6619 			cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
6620 			/*
6621 			 * Choose center value as default tx serdes setting
6622 			 * until changed through module parameter.
6623 			 */
6624 			ppd->cpspec->no_eep = IS_QMH(dd) ?
6625 				TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4;
6626 		} else
6627 			cp->h1_val = H1_FORCE_VAL;
6628 
6629 		/* Avoid writes to chip for mini_init */
6630 		if (!qib_mini_init)
6631 			write_7322_init_portregs(ppd);
6632 
6633 		init_timer(&cp->chase_timer);
6634 		cp->chase_timer.function = reenable_chase;
6635 		cp->chase_timer.data = (unsigned long)ppd;
6636 
6637 		ppd++;
6638 	}
6639 
6640 	dd->rcvhdrentsize = qib_rcvhdrentsize ?
6641 		qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
6642 	dd->rcvhdrsize = qib_rcvhdrsize ?
6643 		qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
6644 	dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
6645 
6646 	/* we always allocate at least 2048 bytes for eager buffers */
6647 	dd->rcvegrbufsize = max(mtu, 2048);
6648 	BUG_ON(!is_power_of_2(dd->rcvegrbufsize));
6649 	dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
6650 
6651 	qib_7322_tidtemplate(dd);
6652 
6653 	/*
6654 	 * We can request a receive interrupt for 1 or
6655 	 * more packets from current offset.
6656 	 */
6657 	dd->rhdrhead_intr_off =
6658 		(u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
6659 
6660 	/* setup the stats timer; the add_timer is done at end of init */
6661 	init_timer(&dd->stats_timer);
6662 	dd->stats_timer.function = qib_get_7322_faststats;
6663 	dd->stats_timer.data = (unsigned long) dd;
6664 
6665 	dd->ureg_align = 0x10000;  /* 64KB alignment */
6666 
6667 	dd->piosize2kmax_dwords = dd->piosize2k >> 2;
6668 
6669 	qib_7322_config_ctxts(dd);
6670 	qib_set_ctxtcnt(dd);
6671 
6672 	if (qib_wc_pat) {
6673 		resource_size_t vl15off;
6674 		/*
6675 		 * We do not set WC on the VL15 buffers to avoid
6676 		 * a rare problem with unaligned writes from
6677 		 * interrupt-flushed store buffers, so we need
6678 		 * to map those separately here.  We can't solve
6679 		 * this for the rarely used mtrr case.
6680 		 */
6681 		ret = init_chip_wc_pat(dd, 0);
6682 		if (ret)
6683 			goto bail;
6684 
6685 		/* vl15 buffers start just after the 4k buffers */
6686 		vl15off = dd->physaddr + (dd->piobufbase >> 32) +
6687 			dd->piobcnt4k * dd->align4k;
6688 		dd->piovl15base	= ioremap_nocache(vl15off,
6689 						  NUM_VL15_BUFS * dd->align4k);
6690 		if (!dd->piovl15base) {
6691 			ret = -ENOMEM;
6692 			goto bail;
6693 		}
6694 	}
6695 	qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
6696 
6697 	ret = 0;
6698 	if (qib_mini_init)
6699 		goto bail;
6700 	if (!dd->num_pports) {
6701 		qib_dev_err(dd, "No ports enabled, giving up initialization\n");
6702 		goto bail; /* no error, so can still figure out why err */
6703 	}
6704 
6705 	write_7322_initregs(dd);
6706 	ret = qib_create_ctxts(dd);
6707 	init_7322_cntrnames(dd);
6708 
6709 	updthresh = 8U; /* update threshold */
6710 
6711 	/* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
6712 	 * reserve the update threshold amount for other kernel use, such
6713 	 * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
6714 	 * unless we aren't enabling SDMA, in which case we want to use
6715 	 * all the 4k bufs for the kernel.
6716 	 * if this was less than the update threshold, we could wait
6717 	 * a long time for an update.  Coded this way because we
6718 	 * sometimes change the update threshold for various reasons,
6719 	 * and we want this to remain robust.
6720 	 */
6721 	if (dd->flags & QIB_HAS_SEND_DMA) {
6722 		dd->cspec->sdmabufcnt = dd->piobcnt4k;
6723 		sbufs = updthresh > 3 ? updthresh : 3;
6724 	} else {
6725 		dd->cspec->sdmabufcnt = 0;
6726 		sbufs = dd->piobcnt4k;
6727 	}
6728 	dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
6729 		dd->cspec->sdmabufcnt;
6730 	dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
6731 	dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
6732 	dd->last_pio = dd->cspec->lastbuf_for_pio;
6733 	dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
6734 		dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
6735 
6736 	/*
6737 	 * If we have 16 user contexts, we will have 7 sbufs
6738 	 * per context, so reduce the update threshold to match.  We
6739 	 * want to update before we actually run out, at low pbufs/ctxt
6740 	 * so give ourselves some margin.
6741 	 */
6742 	if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
6743 		updthresh = dd->pbufsctxt - 2;
6744 	dd->cspec->updthresh_dflt = updthresh;
6745 	dd->cspec->updthresh = updthresh;
6746 
6747 	/* before full enable, no interrupts, no locking needed */
6748 	dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
6749 			     << SYM_LSB(SendCtrl, AvailUpdThld)) |
6750 			SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
6751 
6752 	dd->psxmitwait_supported = 1;
6753 	dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
6754 bail:
6755 	if (!dd->ctxtcnt)
6756 		dd->ctxtcnt = 1; /* for other initialization code */
6757 
6758 	return ret;
6759 }
6760 
6761 static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
6762 					u32 *pbufnum)
6763 {
6764 	u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
6765 	struct qib_devdata *dd = ppd->dd;
6766 
6767 	/* last is same for 2k and 4k, because we use 4k if all 2k busy */
6768 	if (pbc & PBC_7322_VL15_SEND) {
6769 		first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
6770 		last = first;
6771 	} else {
6772 		if ((plen + 1) > dd->piosize2kmax_dwords)
6773 			first = dd->piobcnt2k;
6774 		else
6775 			first = 0;
6776 		last = dd->cspec->lastbuf_for_pio;
6777 	}
6778 	return qib_getsendbuf_range(dd, pbufnum, first, last);
6779 }
6780 
6781 static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
6782 				     u32 start)
6783 {
6784 	qib_write_kreg_port(ppd, krp_psinterval, intv);
6785 	qib_write_kreg_port(ppd, krp_psstart, start);
6786 }
6787 
6788 /*
6789  * Must be called with sdma_lock held, or before init finished.
6790  */
6791 static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
6792 {
6793 	qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
6794 }
6795 
6796 /*
6797  * sdma_lock should be acquired before calling this routine
6798  */
6799 static void dump_sdma_7322_state(struct qib_pportdata *ppd)
6800 {
6801 	u64 reg, reg1, reg2;
6802 
6803 	reg = qib_read_kreg_port(ppd, krp_senddmastatus);
6804 	qib_dev_porterr(ppd->dd, ppd->port,
6805 		"SDMA senddmastatus: 0x%016llx\n", reg);
6806 
6807 	reg = qib_read_kreg_port(ppd, krp_sendctrl);
6808 	qib_dev_porterr(ppd->dd, ppd->port,
6809 		"SDMA sendctrl: 0x%016llx\n", reg);
6810 
6811 	reg = qib_read_kreg_port(ppd, krp_senddmabase);
6812 	qib_dev_porterr(ppd->dd, ppd->port,
6813 		"SDMA senddmabase: 0x%016llx\n", reg);
6814 
6815 	reg = qib_read_kreg_port(ppd, krp_senddmabufmask0);
6816 	reg1 = qib_read_kreg_port(ppd, krp_senddmabufmask1);
6817 	reg2 = qib_read_kreg_port(ppd, krp_senddmabufmask2);
6818 	qib_dev_porterr(ppd->dd, ppd->port,
6819 		"SDMA senddmabufmask 0:%llx  1:%llx  2:%llx\n",
6820 		 reg, reg1, reg2);
6821 
6822 	/* get bufuse bits, clear them, and print them again if non-zero */
6823 	reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
6824 	qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg);
6825 	reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
6826 	qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg1);
6827 	reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
6828 	qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg2);
6829 	/* 0 and 1 should always be zero, so print as short form */
6830 	qib_dev_porterr(ppd->dd, ppd->port,
6831 		 "SDMA current senddmabuf_use 0:%llx  1:%llx  2:%llx\n",
6832 		 reg, reg1, reg2);
6833 	reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
6834 	reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
6835 	reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
6836 	/* 0 and 1 should always be zero, so print as short form */
6837 	qib_dev_porterr(ppd->dd, ppd->port,
6838 		 "SDMA cleared senddmabuf_use 0:%llx  1:%llx  2:%llx\n",
6839 		 reg, reg1, reg2);
6840 
6841 	reg = qib_read_kreg_port(ppd, krp_senddmatail);
6842 	qib_dev_porterr(ppd->dd, ppd->port,
6843 		"SDMA senddmatail: 0x%016llx\n", reg);
6844 
6845 	reg = qib_read_kreg_port(ppd, krp_senddmahead);
6846 	qib_dev_porterr(ppd->dd, ppd->port,
6847 		"SDMA senddmahead: 0x%016llx\n", reg);
6848 
6849 	reg = qib_read_kreg_port(ppd, krp_senddmaheadaddr);
6850 	qib_dev_porterr(ppd->dd, ppd->port,
6851 		"SDMA senddmaheadaddr: 0x%016llx\n", reg);
6852 
6853 	reg = qib_read_kreg_port(ppd, krp_senddmalengen);
6854 	qib_dev_porterr(ppd->dd, ppd->port,
6855 		"SDMA senddmalengen: 0x%016llx\n", reg);
6856 
6857 	reg = qib_read_kreg_port(ppd, krp_senddmadesccnt);
6858 	qib_dev_porterr(ppd->dd, ppd->port,
6859 		"SDMA senddmadesccnt: 0x%016llx\n", reg);
6860 
6861 	reg = qib_read_kreg_port(ppd, krp_senddmaidlecnt);
6862 	qib_dev_porterr(ppd->dd, ppd->port,
6863 		"SDMA senddmaidlecnt: 0x%016llx\n", reg);
6864 
6865 	reg = qib_read_kreg_port(ppd, krp_senddmaprioritythld);
6866 	qib_dev_porterr(ppd->dd, ppd->port,
6867 		"SDMA senddmapriorityhld: 0x%016llx\n", reg);
6868 
6869 	reg = qib_read_kreg_port(ppd, krp_senddmareloadcnt);
6870 	qib_dev_porterr(ppd->dd, ppd->port,
6871 		"SDMA senddmareloadcnt: 0x%016llx\n", reg);
6872 
6873 	dump_sdma_state(ppd);
6874 }
6875 
6876 static struct sdma_set_state_action sdma_7322_action_table[] = {
6877 	[qib_sdma_state_s00_hw_down] = {
6878 		.go_s99_running_tofalse = 1,
6879 		.op_enable = 0,
6880 		.op_intenable = 0,
6881 		.op_halt = 0,
6882 		.op_drain = 0,
6883 	},
6884 	[qib_sdma_state_s10_hw_start_up_wait] = {
6885 		.op_enable = 0,
6886 		.op_intenable = 1,
6887 		.op_halt = 1,
6888 		.op_drain = 0,
6889 	},
6890 	[qib_sdma_state_s20_idle] = {
6891 		.op_enable = 1,
6892 		.op_intenable = 1,
6893 		.op_halt = 1,
6894 		.op_drain = 0,
6895 	},
6896 	[qib_sdma_state_s30_sw_clean_up_wait] = {
6897 		.op_enable = 0,
6898 		.op_intenable = 1,
6899 		.op_halt = 1,
6900 		.op_drain = 0,
6901 	},
6902 	[qib_sdma_state_s40_hw_clean_up_wait] = {
6903 		.op_enable = 1,
6904 		.op_intenable = 1,
6905 		.op_halt = 1,
6906 		.op_drain = 0,
6907 	},
6908 	[qib_sdma_state_s50_hw_halt_wait] = {
6909 		.op_enable = 1,
6910 		.op_intenable = 1,
6911 		.op_halt = 1,
6912 		.op_drain = 1,
6913 	},
6914 	[qib_sdma_state_s99_running] = {
6915 		.op_enable = 1,
6916 		.op_intenable = 1,
6917 		.op_halt = 0,
6918 		.op_drain = 0,
6919 		.go_s99_running_totrue = 1,
6920 	},
6921 };
6922 
6923 static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
6924 {
6925 	ppd->sdma_state.set_state_action = sdma_7322_action_table;
6926 }
6927 
6928 static int init_sdma_7322_regs(struct qib_pportdata *ppd)
6929 {
6930 	struct qib_devdata *dd = ppd->dd;
6931 	unsigned lastbuf, erstbuf;
6932 	u64 senddmabufmask[3] = { 0 };
6933 	int n, ret = 0;
6934 
6935 	qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
6936 	qib_sdma_7322_setlengen(ppd);
6937 	qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
6938 	qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
6939 	qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
6940 	qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
6941 
6942 	if (dd->num_pports)
6943 		n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */
6944 	else
6945 		n = dd->cspec->sdmabufcnt; /* failsafe for init */
6946 	erstbuf = (dd->piobcnt2k + dd->piobcnt4k) -
6947 		((dd->num_pports == 1 || ppd->port == 2) ? n :
6948 		dd->cspec->sdmabufcnt);
6949 	lastbuf = erstbuf + n;
6950 
6951 	ppd->sdma_state.first_sendbuf = erstbuf;
6952 	ppd->sdma_state.last_sendbuf = lastbuf;
6953 	for (; erstbuf < lastbuf; ++erstbuf) {
6954 		unsigned word = erstbuf / BITS_PER_LONG;
6955 		unsigned bit = erstbuf & (BITS_PER_LONG - 1);
6956 
6957 		BUG_ON(word >= 3);
6958 		senddmabufmask[word] |= 1ULL << bit;
6959 	}
6960 	qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
6961 	qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
6962 	qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
6963 	return ret;
6964 }
6965 
6966 /* sdma_lock must be held */
6967 static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
6968 {
6969 	struct qib_devdata *dd = ppd->dd;
6970 	int sane;
6971 	int use_dmahead;
6972 	u16 swhead;
6973 	u16 swtail;
6974 	u16 cnt;
6975 	u16 hwhead;
6976 
6977 	use_dmahead = __qib_sdma_running(ppd) &&
6978 		(dd->flags & QIB_HAS_SDMA_TIMEOUT);
6979 retry:
6980 	hwhead = use_dmahead ?
6981 		(u16) le64_to_cpu(*ppd->sdma_head_dma) :
6982 		(u16) qib_read_kreg_port(ppd, krp_senddmahead);
6983 
6984 	swhead = ppd->sdma_descq_head;
6985 	swtail = ppd->sdma_descq_tail;
6986 	cnt = ppd->sdma_descq_cnt;
6987 
6988 	if (swhead < swtail)
6989 		/* not wrapped */
6990 		sane = (hwhead >= swhead) & (hwhead <= swtail);
6991 	else if (swhead > swtail)
6992 		/* wrapped around */
6993 		sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
6994 			(hwhead <= swtail);
6995 	else
6996 		/* empty */
6997 		sane = (hwhead == swhead);
6998 
6999 	if (unlikely(!sane)) {
7000 		if (use_dmahead) {
7001 			/* try one more time, directly from the register */
7002 			use_dmahead = 0;
7003 			goto retry;
7004 		}
7005 		/* proceed as if no progress */
7006 		hwhead = swhead;
7007 	}
7008 
7009 	return hwhead;
7010 }
7011 
7012 static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
7013 {
7014 	u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
7015 
7016 	return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) ||
7017 	       (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) ||
7018 	       !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) ||
7019 	       !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty));
7020 }
7021 
7022 /*
7023  * Compute the amount of delay before sending the next packet if the
7024  * port's send rate differs from the static rate set for the QP.
7025  * The delay affects the next packet and the amount of the delay is
7026  * based on the length of the this packet.
7027  */
7028 static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
7029 				   u8 srate, u8 vl)
7030 {
7031 	u8 snd_mult = ppd->delay_mult;
7032 	u8 rcv_mult = ib_rate_to_delay[srate];
7033 	u32 ret;
7034 
7035 	ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0;
7036 
7037 	/* Indicate VL15, else set the VL in the control word */
7038 	if (vl == 15)
7039 		ret |= PBC_7322_VL15_SEND_CTRL;
7040 	else
7041 		ret |= vl << PBC_VL_NUM_LSB;
7042 	ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
7043 
7044 	return ret;
7045 }
7046 
7047 /*
7048  * Enable the per-port VL15 send buffers for use.
7049  * They follow the rest of the buffers, without a config parameter.
7050  * This was in initregs, but that is done before the shadow
7051  * is set up, and this has to be done after the shadow is
7052  * set up.
7053  */
7054 static void qib_7322_initvl15_bufs(struct qib_devdata *dd)
7055 {
7056 	unsigned vl15bufs;
7057 
7058 	vl15bufs = dd->piobcnt2k + dd->piobcnt4k;
7059 	qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS,
7060 			       TXCHK_CHG_TYPE_KERN, NULL);
7061 }
7062 
7063 static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd)
7064 {
7065 	if (rcd->ctxt < NUM_IB_PORTS) {
7066 		if (rcd->dd->num_pports > 1) {
7067 			rcd->rcvegrcnt = KCTXT0_EGRCNT / 2;
7068 			rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0;
7069 		} else {
7070 			rcd->rcvegrcnt = KCTXT0_EGRCNT;
7071 			rcd->rcvegr_tid_base = 0;
7072 		}
7073 	} else {
7074 		rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
7075 		rcd->rcvegr_tid_base = KCTXT0_EGRCNT +
7076 			(rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt;
7077 	}
7078 }
7079 
7080 #define QTXSLEEPS 5000
7081 static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
7082 				  u32 len, u32 which, struct qib_ctxtdata *rcd)
7083 {
7084 	int i;
7085 	const int last = start + len - 1;
7086 	const int lastr = last / BITS_PER_LONG;
7087 	u32 sleeps = 0;
7088 	int wait = rcd != NULL;
7089 	unsigned long flags;
7090 
7091 	while (wait) {
7092 		unsigned long shadow;
7093 		int cstart, previ = -1;
7094 
7095 		/*
7096 		 * when flipping from kernel to user, we can't change
7097 		 * the checking type if the buffer is allocated to the
7098 		 * driver.   It's OK the other direction, because it's
7099 		 * from close, and we have just disarm'ed all the
7100 		 * buffers.  All the kernel to kernel changes are also
7101 		 * OK.
7102 		 */
7103 		for (cstart = start; cstart <= last; cstart++) {
7104 			i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
7105 				/ BITS_PER_LONG;
7106 			if (i != previ) {
7107 				shadow = (unsigned long)
7108 					le64_to_cpu(dd->pioavailregs_dma[i]);
7109 				previ = i;
7110 			}
7111 			if (test_bit(((2 * cstart) +
7112 				      QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
7113 				     % BITS_PER_LONG, &shadow))
7114 				break;
7115 		}
7116 
7117 		if (cstart > last)
7118 			break;
7119 
7120 		if (sleeps == QTXSLEEPS)
7121 			break;
7122 		/* make sure we see an updated copy next time around */
7123 		sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7124 		sleeps++;
7125 		msleep(20);
7126 	}
7127 
7128 	switch (which) {
7129 	case TXCHK_CHG_TYPE_DIS1:
7130 		/*
7131 		 * disable checking on a range; used by diags; just
7132 		 * one buffer, but still written generically
7133 		 */
7134 		for (i = start; i <= last; i++)
7135 			clear_bit(i, dd->cspec->sendchkenable);
7136 		break;
7137 
7138 	case TXCHK_CHG_TYPE_ENAB1:
7139 		/*
7140 		 * (re)enable checking on a range; used by diags; just
7141 		 * one buffer, but still written generically; read
7142 		 * scratch to be sure buffer actually triggered, not
7143 		 * just flushed from processor.
7144 		 */
7145 		qib_read_kreg32(dd, kr_scratch);
7146 		for (i = start; i <= last; i++)
7147 			set_bit(i, dd->cspec->sendchkenable);
7148 		break;
7149 
7150 	case TXCHK_CHG_TYPE_KERN:
7151 		/* usable by kernel */
7152 		for (i = start; i <= last; i++) {
7153 			set_bit(i, dd->cspec->sendibchk);
7154 			clear_bit(i, dd->cspec->sendgrhchk);
7155 		}
7156 		spin_lock_irqsave(&dd->uctxt_lock, flags);
7157 		/* see if we need to raise avail update threshold */
7158 		for (i = dd->first_user_ctxt;
7159 		     dd->cspec->updthresh != dd->cspec->updthresh_dflt
7160 		     && i < dd->cfgctxts; i++)
7161 			if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
7162 			   ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
7163 			   < dd->cspec->updthresh_dflt)
7164 				break;
7165 		spin_unlock_irqrestore(&dd->uctxt_lock, flags);
7166 		if (i == dd->cfgctxts) {
7167 			spin_lock_irqsave(&dd->sendctrl_lock, flags);
7168 			dd->cspec->updthresh = dd->cspec->updthresh_dflt;
7169 			dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
7170 			dd->sendctrl |= (dd->cspec->updthresh &
7171 					 SYM_RMASK(SendCtrl, AvailUpdThld)) <<
7172 					   SYM_LSB(SendCtrl, AvailUpdThld);
7173 			spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7174 			sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7175 		}
7176 		break;
7177 
7178 	case TXCHK_CHG_TYPE_USER:
7179 		/* for user process */
7180 		for (i = start; i <= last; i++) {
7181 			clear_bit(i, dd->cspec->sendibchk);
7182 			set_bit(i, dd->cspec->sendgrhchk);
7183 		}
7184 		spin_lock_irqsave(&dd->sendctrl_lock, flags);
7185 		if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
7186 			/ rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
7187 			dd->cspec->updthresh = (rcd->piocnt /
7188 						rcd->subctxt_cnt) - 1;
7189 			dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
7190 			dd->sendctrl |= (dd->cspec->updthresh &
7191 					SYM_RMASK(SendCtrl, AvailUpdThld))
7192 					<< SYM_LSB(SendCtrl, AvailUpdThld);
7193 			spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7194 			sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7195 		} else
7196 			spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7197 		break;
7198 
7199 	default:
7200 		break;
7201 	}
7202 
7203 	for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i)
7204 		qib_write_kreg(dd, kr_sendcheckmask + i,
7205 			       dd->cspec->sendchkenable[i]);
7206 
7207 	for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) {
7208 		qib_write_kreg(dd, kr_sendgrhcheckmask + i,
7209 			       dd->cspec->sendgrhchk[i]);
7210 		qib_write_kreg(dd, kr_sendibpktmask + i,
7211 			       dd->cspec->sendibchk[i]);
7212 	}
7213 
7214 	/*
7215 	 * Be sure whatever we did was seen by the chip and acted upon,
7216 	 * before we return.  Mostly important for which >= 2.
7217 	 */
7218 	qib_read_kreg32(dd, kr_scratch);
7219 }
7220 
7221 
7222 /* useful for trigger analyzers, etc. */
7223 static void writescratch(struct qib_devdata *dd, u32 val)
7224 {
7225 	qib_write_kreg(dd, kr_scratch, val);
7226 }
7227 
7228 /* Dummy for now, use chip regs soon */
7229 static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
7230 {
7231 	return -ENXIO;
7232 }
7233 
7234 /**
7235  * qib_init_iba7322_funcs - set up the chip-specific function pointers
7236  * @dev: the pci_dev for qlogic_ib device
7237  * @ent: pci_device_id struct for this dev
7238  *
7239  * Also allocates, inits, and returns the devdata struct for this
7240  * device instance
7241  *
7242  * This is global, and is called directly at init to set up the
7243  * chip-specific function pointers for later use.
7244  */
7245 struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
7246 					   const struct pci_device_id *ent)
7247 {
7248 	struct qib_devdata *dd;
7249 	int ret, i;
7250 	u32 tabsize, actual_cnt = 0;
7251 
7252 	dd = qib_alloc_devdata(pdev,
7253 		NUM_IB_PORTS * sizeof(struct qib_pportdata) +
7254 		sizeof(struct qib_chip_specific) +
7255 		NUM_IB_PORTS * sizeof(struct qib_chippport_specific));
7256 	if (IS_ERR(dd))
7257 		goto bail;
7258 
7259 	dd->f_bringup_serdes    = qib_7322_bringup_serdes;
7260 	dd->f_cleanup           = qib_setup_7322_cleanup;
7261 	dd->f_clear_tids        = qib_7322_clear_tids;
7262 	dd->f_free_irq          = qib_7322_free_irq;
7263 	dd->f_get_base_info     = qib_7322_get_base_info;
7264 	dd->f_get_msgheader     = qib_7322_get_msgheader;
7265 	dd->f_getsendbuf        = qib_7322_getsendbuf;
7266 	dd->f_gpio_mod          = gpio_7322_mod;
7267 	dd->f_eeprom_wen        = qib_7322_eeprom_wen;
7268 	dd->f_hdrqempty         = qib_7322_hdrqempty;
7269 	dd->f_ib_updown         = qib_7322_ib_updown;
7270 	dd->f_init_ctxt         = qib_7322_init_ctxt;
7271 	dd->f_initvl15_bufs     = qib_7322_initvl15_bufs;
7272 	dd->f_intr_fallback     = qib_7322_intr_fallback;
7273 	dd->f_late_initreg      = qib_late_7322_initreg;
7274 	dd->f_setpbc_control    = qib_7322_setpbc_control;
7275 	dd->f_portcntr          = qib_portcntr_7322;
7276 	dd->f_put_tid           = qib_7322_put_tid;
7277 	dd->f_quiet_serdes      = qib_7322_mini_quiet_serdes;
7278 	dd->f_rcvctrl           = rcvctrl_7322_mod;
7279 	dd->f_read_cntrs        = qib_read_7322cntrs;
7280 	dd->f_read_portcntrs    = qib_read_7322portcntrs;
7281 	dd->f_reset             = qib_do_7322_reset;
7282 	dd->f_init_sdma_regs    = init_sdma_7322_regs;
7283 	dd->f_sdma_busy         = qib_sdma_7322_busy;
7284 	dd->f_sdma_gethead      = qib_sdma_7322_gethead;
7285 	dd->f_sdma_sendctrl     = qib_7322_sdma_sendctrl;
7286 	dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt;
7287 	dd->f_sdma_update_tail  = qib_sdma_update_7322_tail;
7288 	dd->f_sendctrl          = sendctrl_7322_mod;
7289 	dd->f_set_armlaunch     = qib_set_7322_armlaunch;
7290 	dd->f_set_cntr_sample   = qib_set_cntr_7322_sample;
7291 	dd->f_iblink_state      = qib_7322_iblink_state;
7292 	dd->f_ibphys_portstate  = qib_7322_phys_portstate;
7293 	dd->f_get_ib_cfg        = qib_7322_get_ib_cfg;
7294 	dd->f_set_ib_cfg        = qib_7322_set_ib_cfg;
7295 	dd->f_set_ib_loopback   = qib_7322_set_loopback;
7296 	dd->f_get_ib_table      = qib_7322_get_ib_table;
7297 	dd->f_set_ib_table      = qib_7322_set_ib_table;
7298 	dd->f_set_intr_state    = qib_7322_set_intr_state;
7299 	dd->f_setextled         = qib_setup_7322_setextled;
7300 	dd->f_txchk_change      = qib_7322_txchk_change;
7301 	dd->f_update_usrhead    = qib_update_7322_usrhead;
7302 	dd->f_wantpiobuf_intr   = qib_wantpiobuf_7322_intr;
7303 	dd->f_xgxs_reset        = qib_7322_mini_pcs_reset;
7304 	dd->f_sdma_hw_clean_up  = qib_7322_sdma_hw_clean_up;
7305 	dd->f_sdma_hw_start_up  = qib_7322_sdma_hw_start_up;
7306 	dd->f_sdma_init_early   = qib_7322_sdma_init_early;
7307 	dd->f_writescratch      = writescratch;
7308 	dd->f_tempsense_rd	= qib_7322_tempsense_rd;
7309 #ifdef CONFIG_INFINIBAND_QIB_DCA
7310 	dd->f_notify_dca	= qib_7322_notify_dca;
7311 #endif
7312 	/*
7313 	 * Do remaining PCIe setup and save PCIe values in dd.
7314 	 * Any error printing is already done by the init code.
7315 	 * On return, we have the chip mapped, but chip registers
7316 	 * are not set up until start of qib_init_7322_variables.
7317 	 */
7318 	ret = qib_pcie_ddinit(dd, pdev, ent);
7319 	if (ret < 0)
7320 		goto bail_free;
7321 
7322 	/* initialize chip-specific variables */
7323 	ret = qib_init_7322_variables(dd);
7324 	if (ret)
7325 		goto bail_cleanup;
7326 
7327 	if (qib_mini_init || !dd->num_pports)
7328 		goto bail;
7329 
7330 	/*
7331 	 * Determine number of vectors we want; depends on port count
7332 	 * and number of configured kernel receive queues actually used.
7333 	 * Should also depend on whether sdma is enabled or not, but
7334 	 * that's such a rare testing case it's not worth worrying about.
7335 	 */
7336 	tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table);
7337 	for (i = 0; i < tabsize; i++)
7338 		if ((i < ARRAY_SIZE(irq_table) &&
7339 		     irq_table[i].port <= dd->num_pports) ||
7340 		    (i >= ARRAY_SIZE(irq_table) &&
7341 		     dd->rcd[i - ARRAY_SIZE(irq_table)]))
7342 			actual_cnt++;
7343 	/* reduce by ctxt's < 2 */
7344 	if (qib_krcvq01_no_msi)
7345 		actual_cnt -= dd->num_pports;
7346 
7347 	tabsize = actual_cnt;
7348 	dd->cspec->msix_entries = kzalloc(tabsize *
7349 			sizeof(struct qib_msix_entry), GFP_KERNEL);
7350 	if (!dd->cspec->msix_entries) {
7351 		qib_dev_err(dd, "No memory for MSIx table\n");
7352 		tabsize = 0;
7353 	}
7354 	for (i = 0; i < tabsize; i++)
7355 		dd->cspec->msix_entries[i].msix.entry = i;
7356 
7357 	if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries))
7358 		qib_dev_err(dd,
7359 			"Failed to setup PCIe or interrupts; continuing anyway\n");
7360 	/* may be less than we wanted, if not enough available */
7361 	dd->cspec->num_msix_entries = tabsize;
7362 
7363 	/* setup interrupt handler */
7364 	qib_setup_7322_interrupt(dd, 1);
7365 
7366 	/* clear diagctrl register, in case diags were running and crashed */
7367 	qib_write_kreg(dd, kr_hwdiagctrl, 0);
7368 #ifdef CONFIG_INFINIBAND_QIB_DCA
7369 	if (!dca_add_requester(&pdev->dev)) {
7370 		qib_devinfo(dd->pcidev, "DCA enabled\n");
7371 		dd->flags |= QIB_DCA_ENABLED;
7372 		qib_setup_dca(dd);
7373 	}
7374 #endif
7375 	goto bail;
7376 
7377 bail_cleanup:
7378 	qib_pcie_ddcleanup(dd);
7379 bail_free:
7380 	qib_free_devdata(dd);
7381 	dd = ERR_PTR(ret);
7382 bail:
7383 	return dd;
7384 }
7385 
7386 /*
7387  * Set the table entry at the specified index from the table specifed.
7388  * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first
7389  * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR.
7390  * 'idx' below addresses the correct entry, while its 4 LSBs select the
7391  * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table.
7392  */
7393 #define DDS_ENT_AMP_LSB 14
7394 #define DDS_ENT_MAIN_LSB 9
7395 #define DDS_ENT_POST_LSB 5
7396 #define DDS_ENT_PRE_XTRA_LSB 3
7397 #define DDS_ENT_PRE_LSB 0
7398 
7399 /*
7400  * Set one entry in the TxDDS table for spec'd port
7401  * ridx picks one of the entries, while tp points
7402  * to the appropriate table entry.
7403  */
7404 static void set_txdds(struct qib_pportdata *ppd, int ridx,
7405 		      const struct txdds_ent *tp)
7406 {
7407 	struct qib_devdata *dd = ppd->dd;
7408 	u32 pack_ent;
7409 	int regidx;
7410 
7411 	/* Get correct offset in chip-space, and in source table */
7412 	regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx;
7413 	/*
7414 	 * We do not use qib_write_kreg_port() because it was intended
7415 	 * only for registers in the lower "port specific" pages.
7416 	 * So do index calculation  by hand.
7417 	 */
7418 	if (ppd->hw_pidx)
7419 		regidx += (dd->palign / sizeof(u64));
7420 
7421 	pack_ent = tp->amp << DDS_ENT_AMP_LSB;
7422 	pack_ent |= tp->main << DDS_ENT_MAIN_LSB;
7423 	pack_ent |= tp->pre << DDS_ENT_PRE_LSB;
7424 	pack_ent |= tp->post << DDS_ENT_POST_LSB;
7425 	qib_write_kreg(dd, regidx, pack_ent);
7426 	/* Prevent back-to-back writes by hitting scratch */
7427 	qib_write_kreg(ppd->dd, kr_scratch, 0);
7428 }
7429 
7430 static const struct vendor_txdds_ent vendor_txdds[] = {
7431 	{ /* Amphenol 1m 30awg NoEq */
7432 		{ 0x41, 0x50, 0x48 }, "584470002       ",
7433 		{ 10,  0,  0,  5 }, { 10,  0,  0,  9 }, {  7,  1,  0, 13 },
7434 	},
7435 	{ /* Amphenol 3m 28awg NoEq */
7436 		{ 0x41, 0x50, 0x48 }, "584470004       ",
7437 		{  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  0,  1,  7, 15 },
7438 	},
7439 	{ /* Finisar 3m OM2 Optical */
7440 		{ 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL",
7441 		{  0,  0,  0,  3 }, {  0,  0,  0,  4 }, {  0,  0,  0, 13 },
7442 	},
7443 	{ /* Finisar 30m OM2 Optical */
7444 		{ 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL",
7445 		{  0,  0,  0,  1 }, {  0,  0,  0,  5 }, {  0,  0,  0, 11 },
7446 	},
7447 	{ /* Finisar Default OM2 Optical */
7448 		{ 0x00, 0x90, 0x65 }, NULL,
7449 		{  0,  0,  0,  2 }, {  0,  0,  0,  5 }, {  0,  0,  0, 12 },
7450 	},
7451 	{ /* Gore 1m 30awg NoEq */
7452 		{ 0x00, 0x21, 0x77 }, "QSN3300-1       ",
7453 		{  0,  0,  0,  6 }, {  0,  0,  0,  9 }, {  0,  1,  0, 15 },
7454 	},
7455 	{ /* Gore 2m 30awg NoEq */
7456 		{ 0x00, 0x21, 0x77 }, "QSN3300-2       ",
7457 		{  0,  0,  0,  8 }, {  0,  0,  0, 10 }, {  0,  1,  7, 15 },
7458 	},
7459 	{ /* Gore 1m 28awg NoEq */
7460 		{ 0x00, 0x21, 0x77 }, "QSN3800-1       ",
7461 		{  0,  0,  0,  6 }, {  0,  0,  0,  8 }, {  0,  1,  0, 15 },
7462 	},
7463 	{ /* Gore 3m 28awg NoEq */
7464 		{ 0x00, 0x21, 0x77 }, "QSN3800-3       ",
7465 		{  0,  0,  0,  9 }, {  0,  0,  0, 13 }, {  0,  1,  7, 15 },
7466 	},
7467 	{ /* Gore 5m 24awg Eq */
7468 		{ 0x00, 0x21, 0x77 }, "QSN7000-5       ",
7469 		{  0,  0,  0,  7 }, {  0,  0,  0,  9 }, {  0,  1,  3, 15 },
7470 	},
7471 	{ /* Gore 7m 24awg Eq */
7472 		{ 0x00, 0x21, 0x77 }, "QSN7000-7       ",
7473 		{  0,  0,  0,  9 }, {  0,  0,  0, 11 }, {  0,  2,  6, 15 },
7474 	},
7475 	{ /* Gore 5m 26awg Eq */
7476 		{ 0x00, 0x21, 0x77 }, "QSN7600-5       ",
7477 		{  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  0,  1,  9, 13 },
7478 	},
7479 	{ /* Gore 7m 26awg Eq */
7480 		{ 0x00, 0x21, 0x77 }, "QSN7600-7       ",
7481 		{  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  10,  1,  8, 15 },
7482 	},
7483 	{ /* Intersil 12m 24awg Active */
7484 		{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224",
7485 		{  0,  0,  0,  2 }, {  0,  0,  0,  5 }, {  0,  3,  0,  9 },
7486 	},
7487 	{ /* Intersil 10m 28awg Active */
7488 		{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028",
7489 		{  0,  0,  0,  6 }, {  0,  0,  0,  4 }, {  0,  2,  0,  2 },
7490 	},
7491 	{ /* Intersil 7m 30awg Active */
7492 		{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730",
7493 		{  0,  0,  0,  6 }, {  0,  0,  0,  4 }, {  0,  1,  0,  3 },
7494 	},
7495 	{ /* Intersil 5m 32awg Active */
7496 		{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532",
7497 		{  0,  0,  0,  6 }, {  0,  0,  0,  6 }, {  0,  2,  0,  8 },
7498 	},
7499 	{ /* Intersil Default Active */
7500 		{ 0x00, 0x30, 0xB4 }, NULL,
7501 		{  0,  0,  0,  6 }, {  0,  0,  0,  5 }, {  0,  2,  0,  5 },
7502 	},
7503 	{ /* Luxtera 20m Active Optical */
7504 		{ 0x00, 0x25, 0x63 }, NULL,
7505 		{  0,  0,  0,  5 }, {  0,  0,  0,  8 }, {  0,  2,  0,  12 },
7506 	},
7507 	{ /* Molex 1M Cu loopback */
7508 		{ 0x00, 0x09, 0x3A }, "74763-0025      ",
7509 		{  2,  2,  6, 15 }, {  2,  2,  6, 15 }, {  2,  2,  6, 15 },
7510 	},
7511 	{ /* Molex 2m 28awg NoEq */
7512 		{ 0x00, 0x09, 0x3A }, "74757-2201      ",
7513 		{  0,  0,  0,  6 }, {  0,  0,  0,  9 }, {  0,  1,  1, 15 },
7514 	},
7515 };
7516 
7517 static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = {
7518 	/* amp, pre, main, post */
7519 	{  2, 2, 15,  6 },	/* Loopback */
7520 	{  0, 0,  0,  1 },	/*  2 dB */
7521 	{  0, 0,  0,  2 },	/*  3 dB */
7522 	{  0, 0,  0,  3 },	/*  4 dB */
7523 	{  0, 0,  0,  4 },	/*  5 dB */
7524 	{  0, 0,  0,  5 },	/*  6 dB */
7525 	{  0, 0,  0,  6 },	/*  7 dB */
7526 	{  0, 0,  0,  7 },	/*  8 dB */
7527 	{  0, 0,  0,  8 },	/*  9 dB */
7528 	{  0, 0,  0,  9 },	/* 10 dB */
7529 	{  0, 0,  0, 10 },	/* 11 dB */
7530 	{  0, 0,  0, 11 },	/* 12 dB */
7531 	{  0, 0,  0, 12 },	/* 13 dB */
7532 	{  0, 0,  0, 13 },	/* 14 dB */
7533 	{  0, 0,  0, 14 },	/* 15 dB */
7534 	{  0, 0,  0, 15 },	/* 16 dB */
7535 };
7536 
7537 static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = {
7538 	/* amp, pre, main, post */
7539 	{  2, 2, 15,  6 },	/* Loopback */
7540 	{  0, 0,  0,  8 },	/*  2 dB */
7541 	{  0, 0,  0,  8 },	/*  3 dB */
7542 	{  0, 0,  0,  9 },	/*  4 dB */
7543 	{  0, 0,  0,  9 },	/*  5 dB */
7544 	{  0, 0,  0, 10 },	/*  6 dB */
7545 	{  0, 0,  0, 10 },	/*  7 dB */
7546 	{  0, 0,  0, 11 },	/*  8 dB */
7547 	{  0, 0,  0, 11 },	/*  9 dB */
7548 	{  0, 0,  0, 12 },	/* 10 dB */
7549 	{  0, 0,  0, 12 },	/* 11 dB */
7550 	{  0, 0,  0, 13 },	/* 12 dB */
7551 	{  0, 0,  0, 13 },	/* 13 dB */
7552 	{  0, 0,  0, 14 },	/* 14 dB */
7553 	{  0, 0,  0, 14 },	/* 15 dB */
7554 	{  0, 0,  0, 15 },	/* 16 dB */
7555 };
7556 
7557 static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
7558 	/* amp, pre, main, post */
7559 	{  2, 2, 15,  6 },	/* Loopback */
7560 	{  0, 1,  0,  7 },	/*  2 dB (also QMH7342) */
7561 	{  0, 1,  0,  9 },	/*  3 dB (also QMH7342) */
7562 	{  0, 1,  0, 11 },	/*  4 dB */
7563 	{  0, 1,  0, 13 },	/*  5 dB */
7564 	{  0, 1,  0, 15 },	/*  6 dB */
7565 	{  0, 1,  3, 15 },	/*  7 dB */
7566 	{  0, 1,  7, 15 },	/*  8 dB */
7567 	{  0, 1,  7, 15 },	/*  9 dB */
7568 	{  0, 1,  8, 15 },	/* 10 dB */
7569 	{  0, 1,  9, 15 },	/* 11 dB */
7570 	{  0, 1, 10, 15 },	/* 12 dB */
7571 	{  0, 2,  6, 15 },	/* 13 dB */
7572 	{  0, 2,  7, 15 },	/* 14 dB */
7573 	{  0, 2,  8, 15 },	/* 15 dB */
7574 	{  0, 2,  9, 15 },	/* 16 dB */
7575 };
7576 
7577 /*
7578  * extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ.
7579  * These are mostly used for mez cards going through connectors
7580  * and backplane traces, but can be used to add other "unusual"
7581  * table values as well.
7582  */
7583 static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = {
7584 	/* amp, pre, main, post */
7585 	{  0, 0, 0,  1 },	/* QMH7342 backplane settings */
7586 	{  0, 0, 0,  1 },	/* QMH7342 backplane settings */
7587 	{  0, 0, 0,  2 },	/* QMH7342 backplane settings */
7588 	{  0, 0, 0,  2 },	/* QMH7342 backplane settings */
7589 	{  0, 0, 0,  3 },	/* QMH7342 backplane settings */
7590 	{  0, 0, 0,  4 },	/* QMH7342 backplane settings */
7591 	{  0, 1, 4, 15 },	/* QME7342 backplane settings 1.0 */
7592 	{  0, 1, 3, 15 },	/* QME7342 backplane settings 1.0 */
7593 	{  0, 1, 0, 12 },	/* QME7342 backplane settings 1.0 */
7594 	{  0, 1, 0, 11 },	/* QME7342 backplane settings 1.0 */
7595 	{  0, 1, 0,  9 },	/* QME7342 backplane settings 1.0 */
7596 	{  0, 1, 0, 14 },	/* QME7342 backplane settings 1.0 */
7597 	{  0, 1, 2, 15 },	/* QME7342 backplane settings 1.0 */
7598 	{  0, 1, 0, 11 },       /* QME7342 backplane settings 1.1 */
7599 	{  0, 1, 0,  7 },       /* QME7342 backplane settings 1.1 */
7600 	{  0, 1, 0,  9 },       /* QME7342 backplane settings 1.1 */
7601 	{  0, 1, 0,  6 },       /* QME7342 backplane settings 1.1 */
7602 	{  0, 1, 0,  8 },       /* QME7342 backplane settings 1.1 */
7603 };
7604 
7605 static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
7606 	/* amp, pre, main, post */
7607 	{  0, 0, 0,  7 },	/* QMH7342 backplane settings */
7608 	{  0, 0, 0,  7 },	/* QMH7342 backplane settings */
7609 	{  0, 0, 0,  8 },	/* QMH7342 backplane settings */
7610 	{  0, 0, 0,  8 },	/* QMH7342 backplane settings */
7611 	{  0, 0, 0,  9 },	/* QMH7342 backplane settings */
7612 	{  0, 0, 0, 10 },	/* QMH7342 backplane settings */
7613 	{  0, 1, 4, 15 },	/* QME7342 backplane settings 1.0 */
7614 	{  0, 1, 3, 15 },	/* QME7342 backplane settings 1.0 */
7615 	{  0, 1, 0, 12 },	/* QME7342 backplane settings 1.0 */
7616 	{  0, 1, 0, 11 },	/* QME7342 backplane settings 1.0 */
7617 	{  0, 1, 0,  9 },	/* QME7342 backplane settings 1.0 */
7618 	{  0, 1, 0, 14 },	/* QME7342 backplane settings 1.0 */
7619 	{  0, 1, 2, 15 },	/* QME7342 backplane settings 1.0 */
7620 	{  0, 1, 0, 11 },       /* QME7342 backplane settings 1.1 */
7621 	{  0, 1, 0,  7 },       /* QME7342 backplane settings 1.1 */
7622 	{  0, 1, 0,  9 },       /* QME7342 backplane settings 1.1 */
7623 	{  0, 1, 0,  6 },       /* QME7342 backplane settings 1.1 */
7624 	{  0, 1, 0,  8 },       /* QME7342 backplane settings 1.1 */
7625 };
7626 
7627 static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
7628 	/* amp, pre, main, post */
7629 	{  0, 1,  0,  4 },	/* QMH7342 backplane settings */
7630 	{  0, 1,  0,  5 },	/* QMH7342 backplane settings */
7631 	{  0, 1,  0,  6 },	/* QMH7342 backplane settings */
7632 	{  0, 1,  0,  8 },	/* QMH7342 backplane settings */
7633 	{  0, 1,  0, 10 },	/* QMH7342 backplane settings */
7634 	{  0, 1,  0, 12 },	/* QMH7342 backplane settings */
7635 	{  0, 1,  4, 15 },	/* QME7342 backplane settings 1.0 */
7636 	{  0, 1,  3, 15 },	/* QME7342 backplane settings 1.0 */
7637 	{  0, 1,  0, 12 },	/* QME7342 backplane settings 1.0 */
7638 	{  0, 1,  0, 11 },	/* QME7342 backplane settings 1.0 */
7639 	{  0, 1,  0,  9 },	/* QME7342 backplane settings 1.0 */
7640 	{  0, 1,  0, 14 },	/* QME7342 backplane settings 1.0 */
7641 	{  0, 1,  2, 15 },	/* QME7342 backplane settings 1.0 */
7642 	{  0, 1,  0, 11 },      /* QME7342 backplane settings 1.1 */
7643 	{  0, 1,  0,  7 },      /* QME7342 backplane settings 1.1 */
7644 	{  0, 1,  0,  9 },      /* QME7342 backplane settings 1.1 */
7645 	{  0, 1,  0,  6 },      /* QME7342 backplane settings 1.1 */
7646 	{  0, 1,  0,  8 },      /* QME7342 backplane settings 1.1 */
7647 };
7648 
7649 static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
7650 	/* amp, pre, main, post */
7651 	{ 0, 0, 0, 0 },         /* QME7342 mfg settings */
7652 	{ 0, 0, 0, 6 },         /* QME7342 P2 mfg settings */
7653 };
7654 
7655 static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
7656 					       unsigned atten)
7657 {
7658 	/*
7659 	 * The attenuation table starts at 2dB for entry 1,
7660 	 * with entry 0 being the loopback entry.
7661 	 */
7662 	if (atten <= 2)
7663 		atten = 1;
7664 	else if (atten > TXDDS_TABLE_SZ)
7665 		atten = TXDDS_TABLE_SZ - 1;
7666 	else
7667 		atten--;
7668 	return txdds + atten;
7669 }
7670 
7671 /*
7672  * if override is set, the module parameter txselect has a value
7673  * for this specific port, so use it, rather than our normal mechanism.
7674  */
7675 static void find_best_ent(struct qib_pportdata *ppd,
7676 			  const struct txdds_ent **sdr_dds,
7677 			  const struct txdds_ent **ddr_dds,
7678 			  const struct txdds_ent **qdr_dds, int override)
7679 {
7680 	struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
7681 	int idx;
7682 
7683 	/* Search table of known cables */
7684 	for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) {
7685 		const struct vendor_txdds_ent *v = vendor_txdds + idx;
7686 
7687 		if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) &&
7688 		    (!v->partnum ||
7689 		     !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) {
7690 			*sdr_dds = &v->sdr;
7691 			*ddr_dds = &v->ddr;
7692 			*qdr_dds = &v->qdr;
7693 			return;
7694 		}
7695 	}
7696 
7697 	/* Active cables don't have attenuation so we only set SERDES
7698 	 * settings to account for the attenuation of the board traces. */
7699 	if (!override && QSFP_IS_ACTIVE(qd->tech)) {
7700 		*sdr_dds = txdds_sdr + ppd->dd->board_atten;
7701 		*ddr_dds = txdds_ddr + ppd->dd->board_atten;
7702 		*qdr_dds = txdds_qdr + ppd->dd->board_atten;
7703 		return;
7704 	}
7705 
7706 	if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] ||
7707 						      qd->atten[1])) {
7708 		*sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]);
7709 		*ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]);
7710 		*qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]);
7711 		return;
7712 	} else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) {
7713 		/*
7714 		 * If we have no (or incomplete) data from the cable
7715 		 * EEPROM, or no QSFP, or override is set, use the
7716 		 * module parameter value to index into the attentuation
7717 		 * table.
7718 		 */
7719 		idx = ppd->cpspec->no_eep;
7720 		*sdr_dds = &txdds_sdr[idx];
7721 		*ddr_dds = &txdds_ddr[idx];
7722 		*qdr_dds = &txdds_qdr[idx];
7723 	} else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
7724 		/* similar to above, but index into the "extra" table. */
7725 		idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ;
7726 		*sdr_dds = &txdds_extra_sdr[idx];
7727 		*ddr_dds = &txdds_extra_ddr[idx];
7728 		*qdr_dds = &txdds_extra_qdr[idx];
7729 	} else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
7730 		   ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
7731 					  TXDDS_MFG_SZ)) {
7732 		idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
7733 		pr_info("IB%u:%u use idx %u into txdds_mfg\n",
7734 			ppd->dd->unit, ppd->port, idx);
7735 		*sdr_dds = &txdds_extra_mfg[idx];
7736 		*ddr_dds = &txdds_extra_mfg[idx];
7737 		*qdr_dds = &txdds_extra_mfg[idx];
7738 	} else {
7739 		/* this shouldn't happen, it's range checked */
7740 		*sdr_dds = txdds_sdr + qib_long_atten;
7741 		*ddr_dds = txdds_ddr + qib_long_atten;
7742 		*qdr_dds = txdds_qdr + qib_long_atten;
7743 	}
7744 }
7745 
7746 static void init_txdds_table(struct qib_pportdata *ppd, int override)
7747 {
7748 	const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
7749 	struct txdds_ent *dds;
7750 	int idx;
7751 	int single_ent = 0;
7752 
7753 	find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
7754 
7755 	/* for mez cards or override, use the selected value for all entries */
7756 	if (!(ppd->dd->flags & QIB_HAS_QSFP) || override)
7757 		single_ent = 1;
7758 
7759 	/* Fill in the first entry with the best entry found. */
7760 	set_txdds(ppd, 0, sdr_dds);
7761 	set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
7762 	set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
7763 	if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
7764 		QIBL_LINKACTIVE)) {
7765 		dds = (struct txdds_ent *)(ppd->link_speed_active ==
7766 					   QIB_IB_QDR ?  qdr_dds :
7767 					   (ppd->link_speed_active ==
7768 					    QIB_IB_DDR ? ddr_dds : sdr_dds));
7769 		write_tx_serdes_param(ppd, dds);
7770 	}
7771 
7772 	/* Fill in the remaining entries with the default table values. */
7773 	for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) {
7774 		set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
7775 		set_txdds(ppd, idx + TXDDS_TABLE_SZ,
7776 			  single_ent ? ddr_dds : txdds_ddr + idx);
7777 		set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
7778 			  single_ent ? qdr_dds : txdds_qdr + idx);
7779 	}
7780 }
7781 
7782 #define KR_AHB_ACC KREG_IDX(ahb_access_ctrl)
7783 #define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg)
7784 #define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy)
7785 #define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address)
7786 #define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data)
7787 #define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read)
7788 #define AHB_TRANS_TRIES 10
7789 
7790 /*
7791  * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4,
7792  * 5=subsystem which is why most calls have "chan + chan >> 1"
7793  * for the channel argument.
7794  */
7795 static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr,
7796 		    u32 data, u32 mask)
7797 {
7798 	u32 rd_data, wr_data, sz_mask;
7799 	u64 trans, acc, prev_acc;
7800 	u32 ret = 0xBAD0BAD;
7801 	int tries;
7802 
7803 	prev_acc = qib_read_kreg64(dd, KR_AHB_ACC);
7804 	/* From this point on, make sure we return access */
7805 	acc = (quad << 1) | 1;
7806 	qib_write_kreg(dd, KR_AHB_ACC, acc);
7807 
7808 	for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7809 		trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7810 		if (trans & AHB_TRANS_RDY)
7811 			break;
7812 	}
7813 	if (tries >= AHB_TRANS_TRIES) {
7814 		qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
7815 		goto bail;
7816 	}
7817 
7818 	/* If mask is not all 1s, we need to read, but different SerDes
7819 	 * entities have different sizes
7820 	 */
7821 	sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1;
7822 	wr_data = data & mask & sz_mask;
7823 	if ((~mask & sz_mask) != 0) {
7824 		trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7825 		qib_write_kreg(dd, KR_AHB_TRANS, trans);
7826 
7827 		for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7828 			trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7829 			if (trans & AHB_TRANS_RDY)
7830 				break;
7831 		}
7832 		if (tries >= AHB_TRANS_TRIES) {
7833 			qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n",
7834 				    AHB_TRANS_TRIES);
7835 			goto bail;
7836 		}
7837 		/* Re-read in case host split reads and read data first */
7838 		trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7839 		rd_data = (uint32_t)(trans >> AHB_DATA_LSB);
7840 		wr_data |= (rd_data & ~mask & sz_mask);
7841 	}
7842 
7843 	/* If mask is not zero, we need to write. */
7844 	if (mask & sz_mask) {
7845 		trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7846 		trans |= ((uint64_t)wr_data << AHB_DATA_LSB);
7847 		trans |= AHB_WR;
7848 		qib_write_kreg(dd, KR_AHB_TRANS, trans);
7849 
7850 		for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7851 			trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7852 			if (trans & AHB_TRANS_RDY)
7853 				break;
7854 		}
7855 		if (tries >= AHB_TRANS_TRIES) {
7856 			qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n",
7857 				    AHB_TRANS_TRIES);
7858 			goto bail;
7859 		}
7860 	}
7861 	ret = wr_data;
7862 bail:
7863 	qib_write_kreg(dd, KR_AHB_ACC, prev_acc);
7864 	return ret;
7865 }
7866 
7867 static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
7868 			     unsigned mask)
7869 {
7870 	struct qib_devdata *dd = ppd->dd;
7871 	int chan;
7872 	u32 rbc;
7873 
7874 	for (chan = 0; chan < SERDES_CHANS; ++chan) {
7875 		ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
7876 			data, mask);
7877 		rbc = ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7878 			      addr, 0, 0);
7879 	}
7880 }
7881 
7882 static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
7883 {
7884 	u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
7885 	u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
7886 
7887 	if (enable && !state) {
7888 		pr_info("IB%u:%u Turning LOS on\n",
7889 			ppd->dd->unit, ppd->port);
7890 		data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7891 	} else if (!enable && state) {
7892 		pr_info("IB%u:%u Turning LOS off\n",
7893 			ppd->dd->unit, ppd->port);
7894 		data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7895 	}
7896 	qib_write_kreg_port(ppd, krp_serdesctrl, data);
7897 }
7898 
7899 static int serdes_7322_init(struct qib_pportdata *ppd)
7900 {
7901 	int ret = 0;
7902 	if (ppd->dd->cspec->r1)
7903 		ret = serdes_7322_init_old(ppd);
7904 	else
7905 		ret = serdes_7322_init_new(ppd);
7906 	return ret;
7907 }
7908 
7909 static int serdes_7322_init_old(struct qib_pportdata *ppd)
7910 {
7911 	u32 le_val;
7912 
7913 	/*
7914 	 * Initialize the Tx DDS tables.  Also done every QSFP event,
7915 	 * for adapters with QSFP
7916 	 */
7917 	init_txdds_table(ppd, 0);
7918 
7919 	/* ensure no tx overrides from earlier driver loads */
7920 	qib_write_kreg_port(ppd, krp_tx_deemph_override,
7921 		SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7922 		reset_tx_deemphasis_override));
7923 
7924 	/* Patch some SerDes defaults to "Better for IB" */
7925 	/* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */
7926 	ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
7927 
7928 	/* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
7929 	ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
7930 	/* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */
7931 	ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
7932 
7933 	/* May be overridden in qsfp_7322_event */
7934 	le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
7935 	ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
7936 
7937 	/* enable LE1 adaptation for all but QME, which is disabled */
7938 	le_val = IS_QME(ppd->dd) ? 0 : 1;
7939 	ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
7940 
7941 	/* Clear cmode-override, may be set from older driver */
7942 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7943 
7944 	/* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */
7945 	ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
7946 
7947 	/* setup LoS params; these are subsystem, so chan == 5 */
7948 	/* LoS filter threshold_count on, ch 0-3, set to 8 */
7949 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
7950 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
7951 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
7952 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
7953 
7954 	/* LoS filter threshold_count off, ch 0-3, set to 4 */
7955 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
7956 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
7957 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
7958 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
7959 
7960 	/* LoS filter select enabled */
7961 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
7962 
7963 	/* LoS target data:  SDR=4, DDR=2, QDR=1 */
7964 	ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
7965 	ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
7966 	ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
7967 
7968 	serdes_7322_los_enable(ppd, 1);
7969 
7970 	/* rxbistena; set 0 to avoid effects of it switch later */
7971 	ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
7972 
7973 	/* Configure 4 DFE taps, and only they adapt */
7974 	ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
7975 
7976 	/* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
7977 	le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
7978 	ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
7979 
7980 	/*
7981 	 * Set receive adaptation mode.  SDR and DDR adaptation are
7982 	 * always on, and QDR is initially enabled; later disabled.
7983 	 */
7984 	qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
7985 	qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
7986 	qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
7987 			    ppd->dd->cspec->r1 ?
7988 			    QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
7989 	ppd->cpspec->qdr_dfe_on = 1;
7990 
7991 	/* FLoop LOS gate: PPM filter  enabled */
7992 	ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
7993 
7994 	/* rx offset center enabled */
7995 	ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
7996 
7997 	if (!ppd->dd->cspec->r1) {
7998 		ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
7999 		ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
8000 	}
8001 
8002 	/* Set the frequency loop bandwidth to 15 */
8003 	ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
8004 
8005 	return 0;
8006 }
8007 
8008 static int serdes_7322_init_new(struct qib_pportdata *ppd)
8009 {
8010 	unsigned long tend;
8011 	u32 le_val, rxcaldone;
8012 	int chan, chan_done = (1 << SERDES_CHANS) - 1;
8013 
8014 	/* Clear cmode-override, may be set from older driver */
8015 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
8016 
8017 	/* ensure no tx overrides from earlier driver loads */
8018 	qib_write_kreg_port(ppd, krp_tx_deemph_override,
8019 		SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8020 		reset_tx_deemphasis_override));
8021 
8022 	/* START OF LSI SUGGESTED SERDES BRINGUP */
8023 	/* Reset - Calibration Setup */
8024 	/*       Stop DFE adaptaion */
8025 	ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
8026 	/*       Disable LE1 */
8027 	ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
8028 	/*       Disable autoadapt for LE1 */
8029 	ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
8030 	/*       Disable LE2 */
8031 	ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
8032 	/*       Disable VGA */
8033 	ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
8034 	/*       Disable AFE Offset Cancel */
8035 	ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
8036 	/*       Disable Timing Loop */
8037 	ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
8038 	/*       Disable Frequency Loop */
8039 	ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
8040 	/*       Disable Baseline Wander Correction */
8041 	ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
8042 	/*       Disable RX Calibration */
8043 	ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
8044 	/*       Disable RX Offset Calibration */
8045 	ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
8046 	/*       Select BB CDR */
8047 	ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
8048 	/*       CDR Step Size */
8049 	ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
8050 	/*       Enable phase Calibration */
8051 	ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
8052 	/*       DFE Bandwidth [2:14-12] */
8053 	ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
8054 	/*       DFE Config (4 taps only) */
8055 	ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
8056 	/*       Gain Loop Bandwidth */
8057 	if (!ppd->dd->cspec->r1) {
8058 		ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
8059 		ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
8060 	} else {
8061 		ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
8062 	}
8063 	/*       Baseline Wander Correction Gain [13:4-0] (leave as default) */
8064 	/*       Baseline Wander Correction Gain [3:7-5] (leave as default) */
8065 	/*       Data Rate Select [5:7-6] (leave as default) */
8066 	/*       RX Parallel Word Width [3:10-8] (leave as default) */
8067 
8068 	/* RX REST */
8069 	/*       Single- or Multi-channel reset */
8070 	/*       RX Analog reset */
8071 	/*       RX Digital reset */
8072 	ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
8073 	msleep(20);
8074 	/*       RX Analog reset */
8075 	ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
8076 	msleep(20);
8077 	/*       RX Digital reset */
8078 	ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
8079 	msleep(20);
8080 
8081 	/* setup LoS params; these are subsystem, so chan == 5 */
8082 	/* LoS filter threshold_count on, ch 0-3, set to 8 */
8083 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
8084 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
8085 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
8086 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
8087 
8088 	/* LoS filter threshold_count off, ch 0-3, set to 4 */
8089 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
8090 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
8091 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
8092 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
8093 
8094 	/* LoS filter select enabled */
8095 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
8096 
8097 	/* LoS target data:  SDR=4, DDR=2, QDR=1 */
8098 	ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
8099 	ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
8100 	ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
8101 
8102 	/* Turn on LOS on initial SERDES init */
8103 	serdes_7322_los_enable(ppd, 1);
8104 	/* FLoop LOS gate: PPM filter  enabled */
8105 	ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
8106 
8107 	/* RX LATCH CALIBRATION */
8108 	/*       Enable Eyefinder Phase Calibration latch */
8109 	ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
8110 	/*       Enable RX Offset Calibration latch */
8111 	ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
8112 	msleep(20);
8113 	/*       Start Calibration */
8114 	ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
8115 	tend = jiffies + msecs_to_jiffies(500);
8116 	while (chan_done && !time_is_before_jiffies(tend)) {
8117 		msleep(20);
8118 		for (chan = 0; chan < SERDES_CHANS; ++chan) {
8119 			rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
8120 					    (chan + (chan >> 1)),
8121 					    25, 0, 0);
8122 			if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
8123 			    (~chan_done & (1 << chan)) == 0)
8124 				chan_done &= ~(1 << chan);
8125 		}
8126 	}
8127 	if (chan_done) {
8128 		pr_info("Serdes %d calibration not done after .5 sec: 0x%x\n",
8129 			 IBSD(ppd->hw_pidx), chan_done);
8130 	} else {
8131 		for (chan = 0; chan < SERDES_CHANS; ++chan) {
8132 			rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
8133 					    (chan + (chan >> 1)),
8134 					    25, 0, 0);
8135 			if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
8136 				pr_info("Serdes %d chan %d calibration failed\n",
8137 					IBSD(ppd->hw_pidx), chan);
8138 		}
8139 	}
8140 
8141 	/*       Turn off Calibration */
8142 	ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
8143 	msleep(20);
8144 
8145 	/* BRING RX UP */
8146 	/*       Set LE2 value (May be overridden in qsfp_7322_event) */
8147 	le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
8148 	ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
8149 	/*       Set LE2 Loop bandwidth */
8150 	ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
8151 	/*       Enable LE2 */
8152 	ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
8153 	msleep(20);
8154 	/*       Enable H0 only */
8155 	ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
8156 	/* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
8157 	le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
8158 	ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
8159 	/*       Enable VGA */
8160 	ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
8161 	msleep(20);
8162 	/*       Set Frequency Loop Bandwidth */
8163 	ibsd_wr_allchans(ppd, 2, (15 << 5), BMASK(8, 5));
8164 	/*       Enable Frequency Loop */
8165 	ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
8166 	/*       Set Timing Loop Bandwidth */
8167 	ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
8168 	/*       Enable Timing Loop */
8169 	ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
8170 	msleep(50);
8171 	/*       Enable DFE
8172 	 *       Set receive adaptation mode.  SDR and DDR adaptation are
8173 	 *       always on, and QDR is initially enabled; later disabled.
8174 	 */
8175 	qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
8176 	qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
8177 	qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
8178 			    ppd->dd->cspec->r1 ?
8179 			    QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
8180 	ppd->cpspec->qdr_dfe_on = 1;
8181 	/*       Disable LE1  */
8182 	ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
8183 	/*       Disable auto adapt for LE1 */
8184 	ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
8185 	msleep(20);
8186 	/*       Enable AFE Offset Cancel */
8187 	ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
8188 	/*       Enable Baseline Wander Correction */
8189 	ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
8190 	/* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
8191 	ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
8192 	/* VGA output common mode */
8193 	ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
8194 
8195 	/*
8196 	 * Initialize the Tx DDS tables.  Also done every QSFP event,
8197 	 * for adapters with QSFP
8198 	 */
8199 	init_txdds_table(ppd, 0);
8200 
8201 	return 0;
8202 }
8203 
8204 /* start adjust QMH serdes parameters */
8205 
8206 static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
8207 {
8208 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8209 		9, code << 9, 0x3f << 9);
8210 }
8211 
8212 static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
8213 	int enable, u32 tapenable)
8214 {
8215 	if (enable)
8216 		ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8217 			1, 3 << 10, 0x1f << 10);
8218 	else
8219 		ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8220 			1, 0, 0x1f << 10);
8221 }
8222 
8223 /* Set clock to 1, 0, 1, 0 */
8224 static void clock_man(struct qib_pportdata *ppd, int chan)
8225 {
8226 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8227 		4, 0x4000, 0x4000);
8228 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8229 		4, 0, 0x4000);
8230 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8231 		4, 0x4000, 0x4000);
8232 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8233 		4, 0, 0x4000);
8234 }
8235 
8236 /*
8237  * write the current Tx serdes pre,post,main,amp settings into the serdes.
8238  * The caller must pass the settings appropriate for the current speed,
8239  * or not care if they are correct for the current speed.
8240  */
8241 static void write_tx_serdes_param(struct qib_pportdata *ppd,
8242 				  struct txdds_ent *txdds)
8243 {
8244 	u64 deemph;
8245 
8246 	deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
8247 	/* field names for amp, main, post, pre, respectively */
8248 	deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) |
8249 		    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
8250 		    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
8251 		    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
8252 
8253 	deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8254 			   tx_override_deemphasis_select);
8255 	deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8256 		    txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8257 				       txampcntl_d2a);
8258 	deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8259 		     txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8260 				   txc0_ena);
8261 	deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8262 		     txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8263 				    txcp1_ena);
8264 	deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8265 		     txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8266 				    txcn1_ena);
8267 	qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
8268 }
8269 
8270 /*
8271  * Set the parameters for mez cards on link bounce, so they are
8272  * always exactly what was requested.  Similar logic to init_txdds
8273  * but does just the serdes.
8274  */
8275 static void adj_tx_serdes(struct qib_pportdata *ppd)
8276 {
8277 	const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
8278 	struct txdds_ent *dds;
8279 
8280 	find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1);
8281 	dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
8282 		qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ?
8283 				ddr_dds : sdr_dds));
8284 	write_tx_serdes_param(ppd, dds);
8285 }
8286 
8287 /* set QDR forced value for H1, if needed */
8288 static void force_h1(struct qib_pportdata *ppd)
8289 {
8290 	int chan;
8291 
8292 	ppd->cpspec->qdr_reforce = 0;
8293 	if (!ppd->dd->cspec->r1)
8294 		return;
8295 
8296 	for (chan = 0; chan < SERDES_CHANS; chan++) {
8297 		set_man_mode_h1(ppd, chan, 1, 0);
8298 		set_man_code(ppd, chan, ppd->cpspec->h1_val);
8299 		clock_man(ppd, chan);
8300 		set_man_mode_h1(ppd, chan, 0, 0);
8301 	}
8302 }
8303 
8304 #define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
8305 #define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
8306 
8307 #define R_OPCODE_LSB 3
8308 #define R_OP_NOP 0
8309 #define R_OP_SHIFT 2
8310 #define R_OP_UPDATE 3
8311 #define R_TDI_LSB 2
8312 #define R_TDO_LSB 1
8313 #define R_RDY 1
8314 
8315 static int qib_r_grab(struct qib_devdata *dd)
8316 {
8317 	u64 val;
8318 	val = SJA_EN;
8319 	qib_write_kreg(dd, kr_r_access, val);
8320 	qib_read_kreg32(dd, kr_scratch);
8321 	return 0;
8322 }
8323 
8324 /* qib_r_wait_for_rdy() not only waits for the ready bit, it
8325  * returns the current state of R_TDO
8326  */
8327 static int qib_r_wait_for_rdy(struct qib_devdata *dd)
8328 {
8329 	u64 val;
8330 	int timeout;
8331 	for (timeout = 0; timeout < 100 ; ++timeout) {
8332 		val = qib_read_kreg32(dd, kr_r_access);
8333 		if (val & R_RDY)
8334 			return (val >> R_TDO_LSB) & 1;
8335 	}
8336 	return -1;
8337 }
8338 
8339 static int qib_r_shift(struct qib_devdata *dd, int bisten,
8340 		       int len, u8 *inp, u8 *outp)
8341 {
8342 	u64 valbase, val;
8343 	int ret, pos;
8344 
8345 	valbase = SJA_EN | (bisten << BISTEN_LSB) |
8346 		(R_OP_SHIFT << R_OPCODE_LSB);
8347 	ret = qib_r_wait_for_rdy(dd);
8348 	if (ret < 0)
8349 		goto bail;
8350 	for (pos = 0; pos < len; ++pos) {
8351 		val = valbase;
8352 		if (outp) {
8353 			outp[pos >> 3] &= ~(1 << (pos & 7));
8354 			outp[pos >> 3] |= (ret << (pos & 7));
8355 		}
8356 		if (inp) {
8357 			int tdi = inp[pos >> 3] >> (pos & 7);
8358 			val |= ((tdi & 1) << R_TDI_LSB);
8359 		}
8360 		qib_write_kreg(dd, kr_r_access, val);
8361 		qib_read_kreg32(dd, kr_scratch);
8362 		ret = qib_r_wait_for_rdy(dd);
8363 		if (ret < 0)
8364 			break;
8365 	}
8366 	/* Restore to NOP between operations. */
8367 	val =  SJA_EN | (bisten << BISTEN_LSB);
8368 	qib_write_kreg(dd, kr_r_access, val);
8369 	qib_read_kreg32(dd, kr_scratch);
8370 	ret = qib_r_wait_for_rdy(dd);
8371 
8372 	if (ret >= 0)
8373 		ret = pos;
8374 bail:
8375 	return ret;
8376 }
8377 
8378 static int qib_r_update(struct qib_devdata *dd, int bisten)
8379 {
8380 	u64 val;
8381 	int ret;
8382 
8383 	val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB);
8384 	ret = qib_r_wait_for_rdy(dd);
8385 	if (ret >= 0) {
8386 		qib_write_kreg(dd, kr_r_access, val);
8387 		qib_read_kreg32(dd, kr_scratch);
8388 	}
8389 	return ret;
8390 }
8391 
8392 #define BISTEN_PORT_SEL 15
8393 #define LEN_PORT_SEL 625
8394 #define BISTEN_AT 17
8395 #define LEN_AT 156
8396 #define BISTEN_ETM 16
8397 #define LEN_ETM 632
8398 
8399 #define BIT2BYTE(x) (((x) +  BITS_PER_BYTE - 1) / BITS_PER_BYTE)
8400 
8401 /* these are common for all IB port use cases. */
8402 static u8 reset_at[BIT2BYTE(LEN_AT)] = {
8403 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8404 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
8405 };
8406 static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = {
8407 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8408 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8409 	0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e,
8410 	0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7,
8411 	0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70,
8412 	0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00,
8413 	0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8414 	0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
8415 };
8416 static u8 at[BIT2BYTE(LEN_AT)] = {
8417 	0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
8418 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
8419 };
8420 
8421 /* used for IB1 or IB2, only one in use */
8422 static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = {
8423 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8424 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8425 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8426 	0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00,
8427 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8428 	0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03,
8429 	0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00,
8430 	0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00,
8431 };
8432 
8433 /* used when both IB1 and IB2 are in use */
8434 static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = {
8435 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8436 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
8437 	0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8438 	0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05,
8439 	0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
8440 	0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07,
8441 	0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00,
8442 	0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
8443 };
8444 
8445 /* used when only IB1 is in use */
8446 static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = {
8447 	0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
8448 	0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
8449 	0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8450 	0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8451 	0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
8452 	0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8453 	0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8454 	0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
8455 };
8456 
8457 /* used when only IB2 is in use */
8458 static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = {
8459 	0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39,
8460 	0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32,
8461 	0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
8462 	0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
8463 	0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32,
8464 	0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
8465 	0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
8466 	0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
8467 };
8468 
8469 /* used when both IB1 and IB2 are in use */
8470 static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = {
8471 	0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
8472 	0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
8473 	0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8474 	0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8475 	0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
8476 	0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a,
8477 	0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8478 	0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
8479 };
8480 
8481 /*
8482  * Do setup to properly handle IB link recovery; if port is zero, we
8483  * are initializing to cover both ports; otherwise we are initializing
8484  * to cover a single port card, or the port has reached INIT and we may
8485  * need to switch coverage types.
8486  */
8487 static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
8488 {
8489 	u8 *portsel, *etm;
8490 	struct qib_devdata *dd = ppd->dd;
8491 
8492 	if (!ppd->dd->cspec->r1)
8493 		return;
8494 	if (!both) {
8495 		dd->cspec->recovery_ports_initted++;
8496 		ppd->cpspec->recovery_init = 1;
8497 	}
8498 	if (!both && dd->cspec->recovery_ports_initted == 1) {
8499 		portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
8500 		etm = atetm_1port;
8501 	} else {
8502 		portsel = portsel_2port;
8503 		etm = atetm_2port;
8504 	}
8505 
8506 	if (qib_r_grab(dd) < 0 ||
8507 		qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 ||
8508 		qib_r_update(dd, BISTEN_ETM) < 0 ||
8509 		qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 ||
8510 		qib_r_update(dd, BISTEN_AT) < 0 ||
8511 		qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL,
8512 			    portsel, NULL) < 0 ||
8513 		qib_r_update(dd, BISTEN_PORT_SEL) < 0 ||
8514 		qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 ||
8515 		qib_r_update(dd, BISTEN_AT) < 0 ||
8516 		qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 ||
8517 		qib_r_update(dd, BISTEN_ETM) < 0)
8518 		qib_dev_err(dd, "Failed IB link recovery setup\n");
8519 }
8520 
8521 static void check_7322_rxe_status(struct qib_pportdata *ppd)
8522 {
8523 	struct qib_devdata *dd = ppd->dd;
8524 	u64 fmask;
8525 
8526 	if (dd->cspec->recovery_ports_initted != 1)
8527 		return; /* rest doesn't apply to dualport */
8528 	qib_write_kreg(dd, kr_control, dd->control |
8529 		       SYM_MASK(Control, FreezeMode));
8530 	(void)qib_read_kreg64(dd, kr_scratch);
8531 	udelay(3); /* ibcreset asserted 400ns, be sure that's over */
8532 	fmask = qib_read_kreg64(dd, kr_act_fmask);
8533 	if (!fmask) {
8534 		/*
8535 		 * require a powercycle before we'll work again, and make
8536 		 * sure we get no more interrupts, and don't turn off
8537 		 * freeze.
8538 		 */
8539 		ppd->dd->cspec->stay_in_freeze = 1;
8540 		qib_7322_set_intr_state(ppd->dd, 0);
8541 		qib_write_kreg(dd, kr_fmask, 0ULL);
8542 		qib_dev_err(dd, "HCA unusable until powercycled\n");
8543 		return; /* eventually reset */
8544 	}
8545 
8546 	qib_write_kreg(ppd->dd, kr_hwerrclear,
8547 	    SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1));
8548 
8549 	/* don't do the full clear_freeze(), not needed for this */
8550 	qib_write_kreg(dd, kr_control, dd->control);
8551 	qib_read_kreg32(dd, kr_scratch);
8552 	/* take IBC out of reset */
8553 	if (ppd->link_speed_supported) {
8554 		ppd->cpspec->ibcctrl_a &=
8555 			~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
8556 		qib_write_kreg_port(ppd, krp_ibcctrl_a,
8557 				    ppd->cpspec->ibcctrl_a);
8558 		qib_read_kreg32(dd, kr_scratch);
8559 		if (ppd->lflags & QIBL_IB_LINK_DISABLED)
8560 			qib_set_ib_7322_lstate(ppd, 0,
8561 				QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
8562 	}
8563 }
8564