xref: /openbmc/linux/drivers/infiniband/hw/qib/qib_iba7322.c (revision baa7eb025ab14f3cba2e35c0a8648f9c9f01d24f)
1 /*
2  * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 /*
34  * This file contains all of the code that is specific to the
35  * InfiniPath 7322 chip
36  */
37 
38 #include <linux/interrupt.h>
39 #include <linux/pci.h>
40 #include <linux/delay.h>
41 #include <linux/io.h>
42 #include <linux/jiffies.h>
43 #include <rdma/ib_verbs.h>
44 #include <rdma/ib_smi.h>
45 
46 #include "qib.h"
47 #include "qib_7322_regs.h"
48 #include "qib_qsfp.h"
49 
50 #include "qib_mad.h"
51 
52 static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
53 static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
54 static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
55 static irqreturn_t qib_7322intr(int irq, void *data);
56 static irqreturn_t qib_7322bufavail(int irq, void *data);
57 static irqreturn_t sdma_intr(int irq, void *data);
58 static irqreturn_t sdma_idle_intr(int irq, void *data);
59 static irqreturn_t sdma_progress_intr(int irq, void *data);
60 static irqreturn_t sdma_cleanup_intr(int irq, void *data);
61 static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32,
62 				  struct qib_ctxtdata *rcd);
63 static u8 qib_7322_phys_portstate(u64);
64 static u32 qib_7322_iblink_state(u64);
65 static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
66 				   u16 linitcmd);
67 static void force_h1(struct qib_pportdata *);
68 static void adj_tx_serdes(struct qib_pportdata *);
69 static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8);
70 static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
71 
72 static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
73 static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
74 
75 #define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
76 
77 /* LE2 serdes values for different cases */
78 #define LE2_DEFAULT 5
79 #define LE2_5m 4
80 #define LE2_QME 0
81 
82 /* Below is special-purpose, so only really works for the IB SerDes blocks. */
83 #define IBSD(hw_pidx) (hw_pidx + 2)
84 
85 /* these are variables for documentation and experimentation purposes */
86 static const unsigned rcv_int_timeout = 375;
87 static const unsigned rcv_int_count = 16;
88 static const unsigned sdma_idle_cnt = 64;
89 
90 /* Time to stop altering Rx Equalization parameters, after link up. */
91 #define RXEQ_DISABLE_MSECS 2500
92 
93 /*
94  * Number of VLs we are configured to use (to allow for more
95  * credits per vl, etc.)
96  */
97 ushort qib_num_cfg_vls = 2;
98 module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO);
99 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
100 
101 static ushort qib_chase = 1;
102 module_param_named(chase, qib_chase, ushort, S_IRUGO);
103 MODULE_PARM_DESC(chase, "Enable state chase handling");
104 
105 static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
106 module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
107 MODULE_PARM_DESC(long_attenuation, \
108 		 "attenuation cutoff (dB) for long copper cable setup");
109 
110 static ushort qib_singleport;
111 module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
112 MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
113 
114 #define MAX_ATTEN_LEN 64 /* plenty for any real system */
115 /* for read back, default index is ~5m copper cable */
116 static char txselect_list[MAX_ATTEN_LEN] = "10";
117 static struct kparam_string kp_txselect = {
118 	.string = txselect_list,
119 	.maxlen = MAX_ATTEN_LEN
120 };
121 static int  setup_txselect(const char *, struct kernel_param *);
122 module_param_call(txselect, setup_txselect, param_get_string,
123 		  &kp_txselect, S_IWUSR | S_IRUGO);
124 MODULE_PARM_DESC(txselect, \
125 		 "Tx serdes indices (for no QSFP or invalid QSFP data)");
126 
127 #define BOARD_QME7342 5
128 #define BOARD_QMH7342 6
129 #define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
130 		    BOARD_QMH7342)
131 #define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
132 		    BOARD_QME7342)
133 
134 #define KREG_IDX(regname)     (QIB_7322_##regname##_OFFS / sizeof(u64))
135 
136 #define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
137 
138 #define MASK_ACROSS(lsb, msb) \
139 	(((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
140 
141 #define SYM_RMASK(regname, fldname) ((u64)              \
142 	QIB_7322_##regname##_##fldname##_RMASK)
143 
144 #define SYM_MASK(regname, fldname) ((u64)               \
145 	QIB_7322_##regname##_##fldname##_RMASK <<       \
146 	 QIB_7322_##regname##_##fldname##_LSB)
147 
148 #define SYM_FIELD(value, regname, fldname) ((u64)	\
149 	(((value) >> SYM_LSB(regname, fldname)) &	\
150 	 SYM_RMASK(regname, fldname)))
151 
152 /* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */
153 #define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
154 	(((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
155 
156 #define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
157 #define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
158 #define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
159 #define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
160 #define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
161 /* Below because most, but not all, fields of IntMask have that full suffix */
162 #define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
163 
164 
165 #define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
166 
167 /*
168  * the size bits give us 2^N, in KB units.  0 marks as invalid,
169  * and 7 is reserved.  We currently use only 2KB and 4KB
170  */
171 #define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
172 #define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */
173 #define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */
174 #define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
175 
176 #define SendIBSLIDAssignMask \
177 	QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
178 #define SendIBSLMCMask \
179 	QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
180 
181 #define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
182 #define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
183 #define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
184 #define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
185 #define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
186 #define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
187 
188 #define _QIB_GPIO_SDA_NUM 1
189 #define _QIB_GPIO_SCL_NUM 0
190 #define QIB_EEPROM_WEN_NUM 14
191 #define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */
192 
193 /* HW counter clock is at 4nsec */
194 #define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
195 
196 /* full speed IB port 1 only */
197 #define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
198 #define PORT_SPD_CAP_SHIFT 3
199 
200 /* full speed featuremask, both ports */
201 #define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
202 
203 /*
204  * This file contains almost all the chip-specific register information and
205  * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip.
206  */
207 
208 /* Use defines to tie machine-generated names to lower-case names */
209 #define kr_contextcnt KREG_IDX(ContextCnt)
210 #define kr_control KREG_IDX(Control)
211 #define kr_counterregbase KREG_IDX(CntrRegBase)
212 #define kr_errclear KREG_IDX(ErrClear)
213 #define kr_errmask KREG_IDX(ErrMask)
214 #define kr_errstatus KREG_IDX(ErrStatus)
215 #define kr_extctrl KREG_IDX(EXTCtrl)
216 #define kr_extstatus KREG_IDX(EXTStatus)
217 #define kr_gpio_clear KREG_IDX(GPIOClear)
218 #define kr_gpio_mask KREG_IDX(GPIOMask)
219 #define kr_gpio_out KREG_IDX(GPIOOut)
220 #define kr_gpio_status KREG_IDX(GPIOStatus)
221 #define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
222 #define kr_debugportval KREG_IDX(DebugPortValueReg)
223 #define kr_fmask KREG_IDX(feature_mask)
224 #define kr_act_fmask KREG_IDX(active_feature_mask)
225 #define kr_hwerrclear KREG_IDX(HwErrClear)
226 #define kr_hwerrmask KREG_IDX(HwErrMask)
227 #define kr_hwerrstatus KREG_IDX(HwErrStatus)
228 #define kr_intclear KREG_IDX(IntClear)
229 #define kr_intmask KREG_IDX(IntMask)
230 #define kr_intredirect KREG_IDX(IntRedirect0)
231 #define kr_intstatus KREG_IDX(IntStatus)
232 #define kr_pagealign KREG_IDX(PageAlign)
233 #define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
234 #define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */
235 #define kr_rcvegrbase KREG_IDX(RcvEgrBase)
236 #define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
237 #define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
238 #define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
239 #define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
240 #define kr_rcvtidbase KREG_IDX(RcvTIDBase)
241 #define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
242 #define kr_revision KREG_IDX(Revision)
243 #define kr_scratch KREG_IDX(Scratch)
244 #define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */
245 #define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */
246 #define kr_sendctrl KREG_IDX(SendCtrl)
247 #define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */
248 #define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */
249 #define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
250 #define kr_sendpiobufbase KREG_IDX(SendBufBase)
251 #define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
252 #define kr_sendpiosize KREG_IDX(SendBufSize)
253 #define kr_sendregbase KREG_IDX(SendRegBase)
254 #define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
255 #define kr_userregbase KREG_IDX(UserRegBase)
256 #define kr_intgranted KREG_IDX(Int_Granted)
257 #define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
258 #define kr_intblocked KREG_IDX(IntBlocked)
259 #define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
260 
261 /*
262  * per-port kernel registers.  Access only with qib_read_kreg_port()
263  * or qib_write_kreg_port()
264  */
265 #define krp_errclear KREG_IBPORT_IDX(ErrClear)
266 #define krp_errmask KREG_IBPORT_IDX(ErrMask)
267 #define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
268 #define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
269 #define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
270 #define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
271 #define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
272 #define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
273 #define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
274 #define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
275 #define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
276 #define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
277 #define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
278 #define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
279 #define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
280 #define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
281 #define krp_psinterval KREG_IBPORT_IDX(PSInterval)
282 #define krp_psstart KREG_IBPORT_IDX(PSStart)
283 #define krp_psstat KREG_IBPORT_IDX(PSStat)
284 #define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
285 #define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
286 #define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
287 #define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
288 #define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
289 #define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
290 #define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
291 #define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
292 #define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
293 #define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
294 #define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
295 #define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
296 #define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
297 #define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
298 #define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
299 #define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
300 #define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
301 #define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
302 #define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
303 #define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
304 #define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
305 #define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
306 #define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
307 #define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
308 #define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
309 #define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
310 #define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
311 #define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
312 #define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
313 #define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
314 #define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
315 
316 /*
317  * Per-context kernel registers.  Acess only with qib_read_kreg_ctxt()
318  * or qib_write_kreg_ctxt()
319  */
320 #define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
321 #define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
322 
323 /*
324  * TID Flow table, per context.  Reduces
325  * number of hdrq updates to one per flow (or on errors).
326  * context 0 and 1 share same memory, but have distinct
327  * addresses.  Since for now, we never use expected sends
328  * on kernel contexts, we don't worry about that (we initialize
329  * those entries for ctxt 0/1 on driver load twice, for example).
330  */
331 #define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */
332 #define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
333 
334 /* these are the error bits in the tid flows, and are W1C */
335 #define TIDFLOW_ERRBITS  ( \
336 	(SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
337 	SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
338 	(SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
339 	SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
340 
341 /* Most (not all) Counters are per-IBport.
342  * Requires LBIntCnt is at offset 0 in the group
343  */
344 #define CREG_IDX(regname) \
345 ((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
346 
347 #define crp_badformat CREG_IDX(RxVersionErrCnt)
348 #define crp_err_rlen CREG_IDX(RxLenErrCnt)
349 #define crp_erricrc CREG_IDX(RxICRCErrCnt)
350 #define crp_errlink CREG_IDX(RxLinkMalformCnt)
351 #define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
352 #define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
353 #define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
354 #define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
355 #define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
356 #define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
357 #define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
358 #define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
359 #define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
360 #define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
361 #define crp_pktrcv CREG_IDX(RxDataPktCnt)
362 #define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
363 #define crp_pktsend CREG_IDX(TxDataPktCnt)
364 #define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
365 #define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
366 #define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
367 #define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
368 #define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
369 #define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
370 #define crp_rcvebp CREG_IDX(RxEBPCnt)
371 #define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
372 #define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
373 #define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
374 #define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
375 #define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
376 #define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
377 #define crp_rxvlerr CREG_IDX(RxVlErrCnt)
378 #define crp_sendstall CREG_IDX(TxFlowStallCnt)
379 #define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
380 #define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
381 #define crp_txlenerr CREG_IDX(TxLenErrCnt)
382 #define crp_txlenerr CREG_IDX(TxLenErrCnt)
383 #define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
384 #define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
385 #define crp_txunderrun CREG_IDX(TxUnderrunCnt)
386 #define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
387 #define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
388 #define crp_wordrcv CREG_IDX(RxDwordCnt)
389 #define crp_wordsend CREG_IDX(TxDwordCnt)
390 #define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
391 
392 /* these are the (few) counters that are not port-specific */
393 #define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
394 			QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
395 #define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
396 #define cr_lbint CREG_DEVIDX(LBIntCnt)
397 #define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
398 #define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
399 #define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
400 #define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
401 #define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
402 
403 /* no chip register for # of IB ports supported, so define */
404 #define NUM_IB_PORTS 2
405 
406 /* 1 VL15 buffer per hardware IB port, no register for this, so define */
407 #define NUM_VL15_BUFS NUM_IB_PORTS
408 
409 /*
410  * context 0 and 1 are special, and there is no chip register that
411  * defines this value, so we have to define it here.
412  * These are all allocated to either 0 or 1 for single port
413  * hardware configuration, otherwise each gets half
414  */
415 #define KCTXT0_EGRCNT 2048
416 
417 /* values for vl and port fields in PBC, 7322-specific */
418 #define PBC_PORT_SEL_LSB 26
419 #define PBC_PORT_SEL_RMASK 1
420 #define PBC_VL_NUM_LSB 27
421 #define PBC_VL_NUM_RMASK 7
422 #define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
423 #define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
424 
425 static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
426 	[IB_RATE_2_5_GBPS] = 16,
427 	[IB_RATE_5_GBPS] = 8,
428 	[IB_RATE_10_GBPS] = 4,
429 	[IB_RATE_20_GBPS] = 2,
430 	[IB_RATE_30_GBPS] = 2,
431 	[IB_RATE_40_GBPS] = 1
432 };
433 
434 #define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
435 #define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
436 
437 /* link training states, from IBC */
438 #define IB_7322_LT_STATE_DISABLED        0x00
439 #define IB_7322_LT_STATE_LINKUP          0x01
440 #define IB_7322_LT_STATE_POLLACTIVE      0x02
441 #define IB_7322_LT_STATE_POLLQUIET       0x03
442 #define IB_7322_LT_STATE_SLEEPDELAY      0x04
443 #define IB_7322_LT_STATE_SLEEPQUIET      0x05
444 #define IB_7322_LT_STATE_CFGDEBOUNCE     0x08
445 #define IB_7322_LT_STATE_CFGRCVFCFG      0x09
446 #define IB_7322_LT_STATE_CFGWAITRMT      0x0a
447 #define IB_7322_LT_STATE_CFGIDLE         0x0b
448 #define IB_7322_LT_STATE_RECOVERRETRAIN  0x0c
449 #define IB_7322_LT_STATE_TXREVLANES      0x0d
450 #define IB_7322_LT_STATE_RECOVERWAITRMT  0x0e
451 #define IB_7322_LT_STATE_RECOVERIDLE     0x0f
452 #define IB_7322_LT_STATE_CFGENH          0x10
453 #define IB_7322_LT_STATE_CFGTEST         0x11
454 
455 /* link state machine states from IBC */
456 #define IB_7322_L_STATE_DOWN             0x0
457 #define IB_7322_L_STATE_INIT             0x1
458 #define IB_7322_L_STATE_ARM              0x2
459 #define IB_7322_L_STATE_ACTIVE           0x3
460 #define IB_7322_L_STATE_ACT_DEFER        0x4
461 
462 static const u8 qib_7322_physportstate[0x20] = {
463 	[IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
464 	[IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
465 	[IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
466 	[IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
467 	[IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
468 	[IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
469 	[IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN,
470 	[IB_7322_LT_STATE_CFGRCVFCFG] =
471 		IB_PHYSPORTSTATE_CFG_TRAIN,
472 	[IB_7322_LT_STATE_CFGWAITRMT] =
473 		IB_PHYSPORTSTATE_CFG_TRAIN,
474 	[IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE,
475 	[IB_7322_LT_STATE_RECOVERRETRAIN] =
476 		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
477 	[IB_7322_LT_STATE_RECOVERWAITRMT] =
478 		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
479 	[IB_7322_LT_STATE_RECOVERIDLE] =
480 		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
481 	[IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
482 	[IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
483 	[0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
484 	[0x13] = IB_PHYSPORTSTATE_CFG_WAIT_ENH,
485 	[0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
486 	[0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
487 	[0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
488 	[0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
489 };
490 
491 struct qib_chip_specific {
492 	u64 __iomem *cregbase;
493 	u64 *cntrs;
494 	spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
495 	spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
496 	u64 main_int_mask;      /* clear bits which have dedicated handlers */
497 	u64 int_enable_mask;  /* for per port interrupts in single port mode */
498 	u64 errormask;
499 	u64 hwerrmask;
500 	u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
501 	u64 gpio_mask; /* shadow the gpio mask register */
502 	u64 extctrl; /* shadow the gpio output enable, etc... */
503 	u32 ncntrs;
504 	u32 nportcntrs;
505 	u32 cntrnamelen;
506 	u32 portcntrnamelen;
507 	u32 numctxts;
508 	u32 rcvegrcnt;
509 	u32 updthresh; /* current AvailUpdThld */
510 	u32 updthresh_dflt; /* default AvailUpdThld */
511 	u32 r1;
512 	int irq;
513 	u32 num_msix_entries;
514 	u32 sdmabufcnt;
515 	u32 lastbuf_for_pio;
516 	u32 stay_in_freeze;
517 	u32 recovery_ports_initted;
518 	struct msix_entry *msix_entries;
519 	void  **msix_arg;
520 	unsigned long *sendchkenable;
521 	unsigned long *sendgrhchk;
522 	unsigned long *sendibchk;
523 	u32 rcvavail_timeout[18];
524 	char emsgbuf[128]; /* for device error interrupt msg buffer */
525 };
526 
527 /* Table of entries in "human readable" form Tx Emphasis. */
528 struct txdds_ent {
529 	u8 amp;
530 	u8 pre;
531 	u8 main;
532 	u8 post;
533 };
534 
535 struct vendor_txdds_ent {
536 	u8 oui[QSFP_VOUI_LEN];
537 	u8 *partnum;
538 	struct txdds_ent sdr;
539 	struct txdds_ent ddr;
540 	struct txdds_ent qdr;
541 };
542 
543 static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
544 
545 #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
546 #define TXDDS_EXTRA_SZ 13 /* number of extra tx settings entries */
547 #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
548 
549 #define H1_FORCE_VAL 8
550 #define H1_FORCE_QME 1 /*  may be overridden via setup_txselect() */
551 #define H1_FORCE_QMH 7 /*  may be overridden via setup_txselect() */
552 
553 /* The static and dynamic registers are paired, and the pairs indexed by spd */
554 #define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
555 	+ ((spd) * 2))
556 
557 #define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */
558 #define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */
559 #define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */
560 #define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
561 #define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
562 
563 struct qib_chippport_specific {
564 	u64 __iomem *kpregbase;
565 	u64 __iomem *cpregbase;
566 	u64 *portcntrs;
567 	struct qib_pportdata *ppd;
568 	wait_queue_head_t autoneg_wait;
569 	struct delayed_work autoneg_work;
570 	struct delayed_work ipg_work;
571 	struct timer_list chase_timer;
572 	/*
573 	 * these 5 fields are used to establish deltas for IB symbol
574 	 * errors and linkrecovery errors.  They can be reported on
575 	 * some chips during link negotiation prior to INIT, and with
576 	 * DDR when faking DDR negotiations with non-IBTA switches.
577 	 * The chip counters are adjusted at driver unload if there is
578 	 * a non-zero delta.
579 	 */
580 	u64 ibdeltainprog;
581 	u64 ibsymdelta;
582 	u64 ibsymsnap;
583 	u64 iblnkerrdelta;
584 	u64 iblnkerrsnap;
585 	u64 iblnkdownsnap;
586 	u64 iblnkdowndelta;
587 	u64 ibmalfdelta;
588 	u64 ibmalfsnap;
589 	u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
590 	u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
591 	u64 qdr_dfe_time;
592 	u64 chase_end;
593 	u32 autoneg_tries;
594 	u32 recovery_init;
595 	u32 qdr_dfe_on;
596 	u32 qdr_reforce;
597 	/*
598 	 * Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
599 	 * entry zero is unused, to simplify indexing
600 	 */
601 	u8 h1_val;
602 	u8 no_eep;  /* txselect table index to use if no qsfp info */
603 	u8 ipg_tries;
604 	u8 ibmalfusesnap;
605 	struct qib_qsfp_data qsfp_data;
606 	char epmsgbuf[192]; /* for port error interrupt msg buffer */
607 };
608 
609 static struct {
610 	const char *name;
611 	irq_handler_t handler;
612 	int lsb;
613 	int port; /* 0 if not port-specific, else port # */
614 } irq_table[] = {
615 	{ QIB_DRV_NAME, qib_7322intr, -1, 0 },
616 	{ QIB_DRV_NAME " (buf avail)", qib_7322bufavail,
617 		SYM_LSB(IntStatus, SendBufAvail), 0 },
618 	{ QIB_DRV_NAME " (sdma 0)", sdma_intr,
619 		SYM_LSB(IntStatus, SDmaInt_0), 1 },
620 	{ QIB_DRV_NAME " (sdma 1)", sdma_intr,
621 		SYM_LSB(IntStatus, SDmaInt_1), 2 },
622 	{ QIB_DRV_NAME " (sdmaI 0)", sdma_idle_intr,
623 		SYM_LSB(IntStatus, SDmaIdleInt_0), 1 },
624 	{ QIB_DRV_NAME " (sdmaI 1)", sdma_idle_intr,
625 		SYM_LSB(IntStatus, SDmaIdleInt_1), 2 },
626 	{ QIB_DRV_NAME " (sdmaP 0)", sdma_progress_intr,
627 		SYM_LSB(IntStatus, SDmaProgressInt_0), 1 },
628 	{ QIB_DRV_NAME " (sdmaP 1)", sdma_progress_intr,
629 		SYM_LSB(IntStatus, SDmaProgressInt_1), 2 },
630 	{ QIB_DRV_NAME " (sdmaC 0)", sdma_cleanup_intr,
631 		SYM_LSB(IntStatus, SDmaCleanupDone_0), 1 },
632 	{ QIB_DRV_NAME " (sdmaC 1)", sdma_cleanup_intr,
633 		SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 },
634 };
635 
636 /* ibcctrl bits */
637 #define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
638 /* cycle through TS1/TS2 till OK */
639 #define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
640 /* wait for TS1, then go on */
641 #define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
642 #define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
643 
644 #define QLOGIC_IB_IBCC_LINKCMD_DOWN 1           /* move to 0x11 */
645 #define QLOGIC_IB_IBCC_LINKCMD_ARMED 2          /* move to 0x21 */
646 #define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
647 
648 #define BLOB_7322_IBCHG 0x101
649 
650 static inline void qib_write_kreg(const struct qib_devdata *dd,
651 				  const u32 regno, u64 value);
652 static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32);
653 static void write_7322_initregs(struct qib_devdata *);
654 static void write_7322_init_portregs(struct qib_pportdata *);
655 static void setup_7322_link_recovery(struct qib_pportdata *, u32);
656 static void check_7322_rxe_status(struct qib_pportdata *);
657 static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
658 
659 /**
660  * qib_read_ureg32 - read 32-bit virtualized per-context register
661  * @dd: device
662  * @regno: register number
663  * @ctxt: context number
664  *
665  * Return the contents of a register that is virtualized to be per context.
666  * Returns -1 on errors (not distinguishable from valid contents at
667  * runtime; we may add a separate error variable at some point).
668  */
669 static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
670 				  enum qib_ureg regno, int ctxt)
671 {
672 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
673 		return 0;
674 	return readl(regno + (u64 __iomem *)(
675 		(dd->ureg_align * ctxt) + (dd->userbase ?
676 		 (char __iomem *)dd->userbase :
677 		 (char __iomem *)dd->kregbase + dd->uregbase)));
678 }
679 
680 /**
681  * qib_read_ureg - read virtualized per-context register
682  * @dd: device
683  * @regno: register number
684  * @ctxt: context number
685  *
686  * Return the contents of a register that is virtualized to be per context.
687  * Returns -1 on errors (not distinguishable from valid contents at
688  * runtime; we may add a separate error variable at some point).
689  */
690 static inline u64 qib_read_ureg(const struct qib_devdata *dd,
691 				enum qib_ureg regno, int ctxt)
692 {
693 
694 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
695 		return 0;
696 	return readq(regno + (u64 __iomem *)(
697 		(dd->ureg_align * ctxt) + (dd->userbase ?
698 		 (char __iomem *)dd->userbase :
699 		 (char __iomem *)dd->kregbase + dd->uregbase)));
700 }
701 
702 /**
703  * qib_write_ureg - write virtualized per-context register
704  * @dd: device
705  * @regno: register number
706  * @value: value
707  * @ctxt: context
708  *
709  * Write the contents of a register that is virtualized to be per context.
710  */
711 static inline void qib_write_ureg(const struct qib_devdata *dd,
712 				  enum qib_ureg regno, u64 value, int ctxt)
713 {
714 	u64 __iomem *ubase;
715 	if (dd->userbase)
716 		ubase = (u64 __iomem *)
717 			((char __iomem *) dd->userbase +
718 			 dd->ureg_align * ctxt);
719 	else
720 		ubase = (u64 __iomem *)
721 			(dd->uregbase +
722 			 (char __iomem *) dd->kregbase +
723 			 dd->ureg_align * ctxt);
724 
725 	if (dd->kregbase && (dd->flags & QIB_PRESENT))
726 		writeq(value, &ubase[regno]);
727 }
728 
729 static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
730 				  const u32 regno)
731 {
732 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
733 		return -1;
734 	return readl((u32 __iomem *) &dd->kregbase[regno]);
735 }
736 
737 static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
738 				  const u32 regno)
739 {
740 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
741 		return -1;
742 	return readq(&dd->kregbase[regno]);
743 }
744 
745 static inline void qib_write_kreg(const struct qib_devdata *dd,
746 				  const u32 regno, u64 value)
747 {
748 	if (dd->kregbase && (dd->flags & QIB_PRESENT))
749 		writeq(value, &dd->kregbase[regno]);
750 }
751 
752 /*
753  * not many sanity checks for the port-specific kernel register routines,
754  * since they are only used when it's known to be safe.
755 */
756 static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
757 				     const u16 regno)
758 {
759 	if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
760 		return 0ULL;
761 	return readq(&ppd->cpspec->kpregbase[regno]);
762 }
763 
764 static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
765 				       const u16 regno, u64 value)
766 {
767 	if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
768 	    (ppd->dd->flags & QIB_PRESENT))
769 		writeq(value, &ppd->cpspec->kpregbase[regno]);
770 }
771 
772 /**
773  * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
774  * @dd: the qlogic_ib device
775  * @regno: the register number to write
776  * @ctxt: the context containing the register
777  * @value: the value to write
778  */
779 static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
780 				       const u16 regno, unsigned ctxt,
781 				       u64 value)
782 {
783 	qib_write_kreg(dd, regno + ctxt, value);
784 }
785 
786 static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
787 {
788 	if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
789 		return 0;
790 	return readq(&dd->cspec->cregbase[regno]);
791 
792 
793 }
794 
795 static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
796 {
797 	if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
798 		return 0;
799 	return readl(&dd->cspec->cregbase[regno]);
800 
801 
802 }
803 
804 static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
805 					u16 regno, u64 value)
806 {
807 	if (ppd->cpspec && ppd->cpspec->cpregbase &&
808 	    (ppd->dd->flags & QIB_PRESENT))
809 		writeq(value, &ppd->cpspec->cpregbase[regno]);
810 }
811 
812 static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
813 				      u16 regno)
814 {
815 	if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
816 	    !(ppd->dd->flags & QIB_PRESENT))
817 		return 0;
818 	return readq(&ppd->cpspec->cpregbase[regno]);
819 }
820 
821 static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
822 					u16 regno)
823 {
824 	if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
825 	    !(ppd->dd->flags & QIB_PRESENT))
826 		return 0;
827 	return readl(&ppd->cpspec->cpregbase[regno]);
828 }
829 
830 /* bits in Control register */
831 #define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
832 #define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
833 
834 /* bits in general interrupt regs */
835 #define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
836 #define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
837 #define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
838 #define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
839 #define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
840 #define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
841 #define QIB_I_C_ERROR INT_MASK(Err)
842 
843 #define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
844 #define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
845 #define QIB_I_GPIO INT_MASK(AssertGPIO)
846 #define QIB_I_P_SDMAINT(pidx) \
847 	(INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
848 	 INT_MASK_P(SDmaProgress, pidx) | \
849 	 INT_MASK_PM(SDmaCleanupDone, pidx))
850 
851 /* Interrupt bits that are "per port" */
852 #define QIB_I_P_BITSEXTANT(pidx) \
853 	(INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
854 	INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
855 	INT_MASK_P(SDmaProgress, pidx) | \
856 	INT_MASK_PM(SDmaCleanupDone, pidx))
857 
858 /* Interrupt bits that are common to a device */
859 /* currently unused: QIB_I_SPIOSENT */
860 #define QIB_I_C_BITSEXTANT \
861 	(QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
862 	QIB_I_SPIOSENT | \
863 	QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
864 
865 #define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
866 	QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
867 
868 /*
869  * Error bits that are "per port".
870  */
871 #define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
872 #define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
873 #define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
874 #define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
875 #define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
876 #define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
877 #define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
878 #define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
879 #define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
880 #define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
881 #define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
882 #define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
883 #define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
884 #define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
885 #define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
886 #define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
887 #define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
888 #define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
889 #define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
890 #define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
891 #define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
892 #define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
893 #define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
894 #define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
895 #define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
896 #define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
897 #define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
898 #define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
899 
900 #define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
901 #define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
902 #define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
903 #define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
904 #define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
905 #define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
906 #define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
907 #define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
908 #define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
909 #define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
910 #define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
911 
912 /* Error bits that are common to a device */
913 #define QIB_E_RESET ERR_MASK(ResetNegated)
914 #define QIB_E_HARDWARE ERR_MASK(HardwareErr)
915 #define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
916 
917 
918 /*
919  * Per chip (rather than per-port) errors.  Most either do
920  * nothing but trigger a print (because they self-recover, or
921  * always occur in tandem with other errors that handle the
922  * issue), or because they indicate errors with no recovery,
923  * but we want to know that they happened.
924  */
925 #define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
926 #define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
927 #define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
928 #define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
929 #define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
930 #define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
931 #define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
932 #define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
933 
934 /* SDMA chip errors (not per port)
935  * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get
936  * the SDMAHALT error immediately, so we just print the dup error via the
937  * E_AUTO mechanism.  This is true of most of the per-port fatal errors
938  * as well, but since this is port-independent, by definition, it's
939  * handled a bit differently.  SDMA_VL15 and SDMA_WRONG_PORT are per
940  * packet send errors, and so are handled in the same manner as other
941  * per-packet errors.
942  */
943 #define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
944 #define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
945 #define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
946 
947 /*
948  * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS
949  * it is used to print "common" packet errors.
950  */
951 #define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
952 	QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
953 	QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
954 	QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
955 	QIB_E_P_REBP)
956 
957 /* Error Bits that Packet-related (Receive, per-port) */
958 #define QIB_E_P_RPKTERRS (\
959 	QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
960 	QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
961 	QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
962 	QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
963 	QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
964 	QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
965 
966 /*
967  * Error bits that are Send-related (per port)
968  * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling).
969  * All of these potentially need to have a buffer disarmed
970  */
971 #define QIB_E_P_SPKTERRS (\
972 	QIB_E_P_SUNEXP_PKTNUM |\
973 	QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
974 	QIB_E_P_SMAXPKTLEN |\
975 	QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
976 	QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
977 	QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
978 
979 #define QIB_E_SPKTERRS ( \
980 		QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
981 		ERR_MASK_N(SendUnsupportedVLErr) |			\
982 		QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
983 
984 #define QIB_E_P_SDMAERRS ( \
985 	QIB_E_P_SDMAHALT | \
986 	QIB_E_P_SDMADESCADDRMISALIGN | \
987 	QIB_E_P_SDMAUNEXPDATA | \
988 	QIB_E_P_SDMAMISSINGDW | \
989 	QIB_E_P_SDMADWEN | \
990 	QIB_E_P_SDMARPYTAG | \
991 	QIB_E_P_SDMA1STDESC | \
992 	QIB_E_P_SDMABASE | \
993 	QIB_E_P_SDMATAILOUTOFBOUND | \
994 	QIB_E_P_SDMAOUTOFBOUND | \
995 	QIB_E_P_SDMAGENMISMATCH)
996 
997 /*
998  * This sets some bits more than once, but makes it more obvious which
999  * bits are not handled under other categories, and the repeat definition
1000  * is not a problem.
1001  */
1002 #define QIB_E_P_BITSEXTANT ( \
1003 	QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
1004 	QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
1005 	QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
1006 	QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
1007 	)
1008 
1009 /*
1010  * These are errors that can occur when the link
1011  * changes state while a packet is being sent or received.  This doesn't
1012  * cover things like EBP or VCRC that can be the result of a sending
1013  * having the link change state, so we receive a "known bad" packet.
1014  * All of these are "per port", so renamed:
1015  */
1016 #define QIB_E_P_LINK_PKTERRS (\
1017 	QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1018 	QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
1019 	QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
1020 	QIB_E_P_RUNEXPCHAR)
1021 
1022 /*
1023  * This sets some bits more than once, but makes it more obvious which
1024  * bits are not handled under other categories (such as QIB_E_SPKTERRS),
1025  * and the repeat definition is not a problem.
1026  */
1027 #define QIB_E_C_BITSEXTANT (\
1028 	QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
1029 	QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
1030 	QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
1031 
1032 /* Likewise Neuter E_SPKT_ERRS_IGNORE */
1033 #define E_SPKT_ERRS_IGNORE 0
1034 
1035 #define QIB_EXTS_MEMBIST_DISABLED \
1036 	SYM_MASK(EXTStatus, MemBISTDisabled)
1037 #define QIB_EXTS_MEMBIST_ENDTEST \
1038 	SYM_MASK(EXTStatus, MemBISTEndTest)
1039 
1040 #define QIB_E_SPIOARMLAUNCH \
1041 	ERR_MASK(SendArmLaunchErr)
1042 
1043 #define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
1044 #define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
1045 
1046 /*
1047  * IBTA_1_2 is set when multiple speeds are enabled (normal),
1048  * and also if forced QDR (only QDR enabled).  It's enabled for the
1049  * forced QDR case so that scrambling will be enabled by the TS3
1050  * exchange, when supported by both sides of the link.
1051  */
1052 #define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
1053 #define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
1054 #define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
1055 #define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
1056 #define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
1057 #define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
1058 	SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
1059 #define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
1060 
1061 #define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
1062 #define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
1063 
1064 #define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
1065 #define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1066 #define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1067 
1068 #define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1069 #define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1070 #define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
1071 	SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
1072 #define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
1073 	SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
1074 #define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
1075 
1076 #define IBA7322_REDIRECT_VEC_PER_REG 12
1077 
1078 #define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
1079 #define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
1080 #define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
1081 #define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
1082 #define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
1083 
1084 #define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
1085 
1086 #define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
1087 	.msg = #fldname }
1088 #define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
1089 	fldname##Mask##_##port), .msg = #fldname }
1090 static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
1091 	HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
1092 	HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
1093 	HWE_AUTO(PCIESerdesPClkNotDetect),
1094 	HWE_AUTO(PowerOnBISTFailed),
1095 	HWE_AUTO(TempsenseTholdReached),
1096 	HWE_AUTO(MemoryErr),
1097 	HWE_AUTO(PCIeBusParityErr),
1098 	HWE_AUTO(PcieCplTimeout),
1099 	HWE_AUTO(PciePoisonedTLP),
1100 	HWE_AUTO_P(SDmaMemReadErr, 1),
1101 	HWE_AUTO_P(SDmaMemReadErr, 0),
1102 	HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
1103 	HWE_AUTO_P(IBCBusToSPCParityErr, 1),
1104 	HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
1105 	HWE_AUTO(statusValidNoEop),
1106 	HWE_AUTO(LATriggered),
1107 	{ .mask = 0 }
1108 };
1109 
1110 #define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
1111 	.msg = #fldname }
1112 #define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
1113 	.msg = #fldname }
1114 static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
1115 	E_AUTO(ResetNegated),
1116 	E_AUTO(HardwareErr),
1117 	E_AUTO(InvalidAddrErr),
1118 	E_AUTO(SDmaVL15Err),
1119 	E_AUTO(SBufVL15MisUseErr),
1120 	E_AUTO(InvalidEEPCmd),
1121 	E_AUTO(RcvContextShareErr),
1122 	E_AUTO(SendVLMismatchErr),
1123 	E_AUTO(SendArmLaunchErr),
1124 	E_AUTO(SendSpecialTriggerErr),
1125 	E_AUTO(SDmaWrongPortErr),
1126 	E_AUTO(SDmaBufMaskDuplicateErr),
1127 	E_AUTO(RcvHdrFullErr),
1128 	E_AUTO(RcvEgrFullErr),
1129 	{ .mask = 0 }
1130 };
1131 
1132 static const struct  qib_hwerror_msgs qib_7322p_error_msgs[] = {
1133 	E_P_AUTO(IBStatusChanged),
1134 	E_P_AUTO(SHeadersErr),
1135 	E_P_AUTO(VL15BufMisuseErr),
1136 	/*
1137 	 * SDmaHaltErr is not really an error, make it clearer;
1138 	 */
1139 	{.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted"},
1140 	E_P_AUTO(SDmaDescAddrMisalignErr),
1141 	E_P_AUTO(SDmaUnexpDataErr),
1142 	E_P_AUTO(SDmaMissingDwErr),
1143 	E_P_AUTO(SDmaDwEnErr),
1144 	E_P_AUTO(SDmaRpyTagErr),
1145 	E_P_AUTO(SDma1stDescErr),
1146 	E_P_AUTO(SDmaBaseErr),
1147 	E_P_AUTO(SDmaTailOutOfBoundErr),
1148 	E_P_AUTO(SDmaOutOfBoundErr),
1149 	E_P_AUTO(SDmaGenMismatchErr),
1150 	E_P_AUTO(SendBufMisuseErr),
1151 	E_P_AUTO(SendUnsupportedVLErr),
1152 	E_P_AUTO(SendUnexpectedPktNumErr),
1153 	E_P_AUTO(SendDroppedDataPktErr),
1154 	E_P_AUTO(SendDroppedSmpPktErr),
1155 	E_P_AUTO(SendPktLenErr),
1156 	E_P_AUTO(SendUnderRunErr),
1157 	E_P_AUTO(SendMaxPktLenErr),
1158 	E_P_AUTO(SendMinPktLenErr),
1159 	E_P_AUTO(RcvIBLostLinkErr),
1160 	E_P_AUTO(RcvHdrErr),
1161 	E_P_AUTO(RcvHdrLenErr),
1162 	E_P_AUTO(RcvBadTidErr),
1163 	E_P_AUTO(RcvBadVersionErr),
1164 	E_P_AUTO(RcvIBFlowErr),
1165 	E_P_AUTO(RcvEBPErr),
1166 	E_P_AUTO(RcvUnsupportedVLErr),
1167 	E_P_AUTO(RcvUnexpectedCharErr),
1168 	E_P_AUTO(RcvShortPktLenErr),
1169 	E_P_AUTO(RcvLongPktLenErr),
1170 	E_P_AUTO(RcvMaxPktLenErr),
1171 	E_P_AUTO(RcvMinPktLenErr),
1172 	E_P_AUTO(RcvICRCErr),
1173 	E_P_AUTO(RcvVCRCErr),
1174 	E_P_AUTO(RcvFormatErr),
1175 	{ .mask = 0 }
1176 };
1177 
1178 /*
1179  * Below generates "auto-message" for interrupts not specific to any port or
1180  * context
1181  */
1182 #define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
1183 	.msg = #fldname }
1184 /* Below generates "auto-message" for interrupts specific to a port */
1185 #define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
1186 	SYM_LSB(IntMask, fldname##Mask##_0), \
1187 	SYM_LSB(IntMask, fldname##Mask##_1)), \
1188 	.msg = #fldname "_P" }
1189 /* For some reason, the SerDesTrimDone bits are reversed */
1190 #define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
1191 	SYM_LSB(IntMask, fldname##Mask##_1), \
1192 	SYM_LSB(IntMask, fldname##Mask##_0)), \
1193 	.msg = #fldname "_P" }
1194 /*
1195  * Below generates "auto-message" for interrupts specific to a context,
1196  * with ctxt-number appended
1197  */
1198 #define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
1199 	SYM_LSB(IntMask, fldname##0IntMask), \
1200 	SYM_LSB(IntMask, fldname##17IntMask)), \
1201 	.msg = #fldname "_C"}
1202 
1203 static const struct  qib_hwerror_msgs qib_7322_intr_msgs[] = {
1204 	INTR_AUTO_P(SDmaInt),
1205 	INTR_AUTO_P(SDmaProgressInt),
1206 	INTR_AUTO_P(SDmaIdleInt),
1207 	INTR_AUTO_P(SDmaCleanupDone),
1208 	INTR_AUTO_C(RcvUrg),
1209 	INTR_AUTO_P(ErrInt),
1210 	INTR_AUTO(ErrInt),      /* non-port-specific errs */
1211 	INTR_AUTO(AssertGPIOInt),
1212 	INTR_AUTO_P(SendDoneInt),
1213 	INTR_AUTO(SendBufAvailInt),
1214 	INTR_AUTO_C(RcvAvail),
1215 	{ .mask = 0 }
1216 };
1217 
1218 #define TXSYMPTOM_AUTO_P(fldname) \
1219 	{ .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), .msg = #fldname }
1220 static const struct  qib_hwerror_msgs hdrchk_msgs[] = {
1221 	TXSYMPTOM_AUTO_P(NonKeyPacket),
1222 	TXSYMPTOM_AUTO_P(GRHFail),
1223 	TXSYMPTOM_AUTO_P(PkeyFail),
1224 	TXSYMPTOM_AUTO_P(QPFail),
1225 	TXSYMPTOM_AUTO_P(SLIDFail),
1226 	TXSYMPTOM_AUTO_P(RawIPV6),
1227 	TXSYMPTOM_AUTO_P(PacketTooSmall),
1228 	{ .mask = 0 }
1229 };
1230 
1231 #define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
1232 
1233 /*
1234  * Called when we might have an error that is specific to a particular
1235  * PIO buffer, and may need to cancel that buffer, so it can be re-used,
1236  * because we don't need to force the update of pioavail
1237  */
1238 static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
1239 {
1240 	struct qib_devdata *dd = ppd->dd;
1241 	u32 i;
1242 	int any;
1243 	u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
1244 	u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG;
1245 	unsigned long sbuf[4];
1246 
1247 	/*
1248 	 * It's possible that sendbuffererror could have bits set; might
1249 	 * have already done this as a result of hardware error handling.
1250 	 */
1251 	any = 0;
1252 	for (i = 0; i < regcnt; ++i) {
1253 		sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
1254 		if (sbuf[i]) {
1255 			any = 1;
1256 			qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
1257 		}
1258 	}
1259 
1260 	if (any)
1261 		qib_disarm_piobufs_set(dd, sbuf, piobcnt);
1262 }
1263 
1264 /* No txe_recover yet, if ever */
1265 
1266 /* No decode__errors yet */
1267 static void err_decode(char *msg, size_t len, u64 errs,
1268 		       const struct qib_hwerror_msgs *msp)
1269 {
1270 	u64 these, lmask;
1271 	int took, multi, n = 0;
1272 
1273 	while (msp && msp->mask) {
1274 		multi = (msp->mask & (msp->mask - 1));
1275 		while (errs & msp->mask) {
1276 			these = (errs & msp->mask);
1277 			lmask = (these & (these - 1)) ^ these;
1278 			if (len) {
1279 				if (n++) {
1280 					/* separate the strings */
1281 					*msg++ = ',';
1282 					len--;
1283 				}
1284 				took = scnprintf(msg, len, "%s", msp->msg);
1285 				len -= took;
1286 				msg += took;
1287 			}
1288 			errs &= ~lmask;
1289 			if (len && multi) {
1290 				/* More than one bit this mask */
1291 				int idx = -1;
1292 
1293 				while (lmask & msp->mask) {
1294 					++idx;
1295 					lmask >>= 1;
1296 				}
1297 				took = scnprintf(msg, len, "_%d", idx);
1298 				len -= took;
1299 				msg += took;
1300 			}
1301 		}
1302 		++msp;
1303 	}
1304 	/* If some bits are left, show in hex. */
1305 	if (len && errs)
1306 		snprintf(msg, len, "%sMORE:%llX", n ? "," : "",
1307 			(unsigned long long) errs);
1308 }
1309 
1310 /* only called if r1 set */
1311 static void flush_fifo(struct qib_pportdata *ppd)
1312 {
1313 	struct qib_devdata *dd = ppd->dd;
1314 	u32 __iomem *piobuf;
1315 	u32 bufn;
1316 	u32 *hdr;
1317 	u64 pbc;
1318 	const unsigned hdrwords = 7;
1319 	static struct qib_ib_header ibhdr = {
1320 		.lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
1321 		.lrh[1] = IB_LID_PERMISSIVE,
1322 		.lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
1323 		.lrh[3] = IB_LID_PERMISSIVE,
1324 		.u.oth.bth[0] = cpu_to_be32(
1325 			(IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY),
1326 		.u.oth.bth[1] = cpu_to_be32(0),
1327 		.u.oth.bth[2] = cpu_to_be32(0),
1328 		.u.oth.u.ud.deth[0] = cpu_to_be32(0),
1329 		.u.oth.u.ud.deth[1] = cpu_to_be32(0),
1330 	};
1331 
1332 	/*
1333 	 * Send a dummy VL15 packet to flush the launch FIFO.
1334 	 * This will not actually be sent since the TxeBypassIbc bit is set.
1335 	 */
1336 	pbc = PBC_7322_VL15_SEND |
1337 		(((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
1338 		(hdrwords + SIZE_OF_CRC);
1339 	piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
1340 	if (!piobuf)
1341 		return;
1342 	writeq(pbc, piobuf);
1343 	hdr = (u32 *) &ibhdr;
1344 	if (dd->flags & QIB_PIO_FLUSH_WC) {
1345 		qib_flush_wc();
1346 		qib_pio_copy(piobuf + 2, hdr, hdrwords - 1);
1347 		qib_flush_wc();
1348 		__raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
1349 		qib_flush_wc();
1350 	} else
1351 		qib_pio_copy(piobuf + 2, hdr, hdrwords);
1352 	qib_sendbuf_done(dd, bufn);
1353 }
1354 
1355 /*
1356  * This is called with interrupts disabled and sdma_lock held.
1357  */
1358 static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
1359 {
1360 	struct qib_devdata *dd = ppd->dd;
1361 	u64 set_sendctrl = 0;
1362 	u64 clr_sendctrl = 0;
1363 
1364 	if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
1365 		set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1366 	else
1367 		clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1368 
1369 	if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
1370 		set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1371 	else
1372 		clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1373 
1374 	if (op & QIB_SDMA_SENDCTRL_OP_HALT)
1375 		set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1376 	else
1377 		clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1378 
1379 	if (op & QIB_SDMA_SENDCTRL_OP_DRAIN)
1380 		set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1381 				SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1382 				SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1383 	else
1384 		clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1385 				SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1386 				SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1387 
1388 	spin_lock(&dd->sendctrl_lock);
1389 
1390 	/* If we are draining everything, block sends first */
1391 	if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1392 		ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
1393 		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1394 		qib_write_kreg(dd, kr_scratch, 0);
1395 	}
1396 
1397 	ppd->p_sendctrl |= set_sendctrl;
1398 	ppd->p_sendctrl &= ~clr_sendctrl;
1399 
1400 	if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP)
1401 		qib_write_kreg_port(ppd, krp_sendctrl,
1402 				    ppd->p_sendctrl |
1403 				    SYM_MASK(SendCtrl_0, SDmaCleanup));
1404 	else
1405 		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1406 	qib_write_kreg(dd, kr_scratch, 0);
1407 
1408 	if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1409 		ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
1410 		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1411 		qib_write_kreg(dd, kr_scratch, 0);
1412 	}
1413 
1414 	spin_unlock(&dd->sendctrl_lock);
1415 
1416 	if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
1417 		flush_fifo(ppd);
1418 }
1419 
1420 static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
1421 {
1422 	__qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
1423 }
1424 
1425 static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
1426 {
1427 	/*
1428 	 * Set SendDmaLenGen and clear and set
1429 	 * the MSB of the generation count to enable generation checking
1430 	 * and load the internal generation counter.
1431 	 */
1432 	qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
1433 	qib_write_kreg_port(ppd, krp_senddmalengen,
1434 			    ppd->sdma_descq_cnt |
1435 			    (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB));
1436 }
1437 
1438 /*
1439  * Must be called with sdma_lock held, or before init finished.
1440  */
1441 static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
1442 {
1443 	/* Commit writes to memory and advance the tail on the chip */
1444 	wmb();
1445 	ppd->sdma_descq_tail = tail;
1446 	qib_write_kreg_port(ppd, krp_senddmatail, tail);
1447 }
1448 
1449 /*
1450  * This is called with interrupts disabled and sdma_lock held.
1451  */
1452 static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
1453 {
1454 	/*
1455 	 * Drain all FIFOs.
1456 	 * The hardware doesn't require this but we do it so that verbs
1457 	 * and user applications don't wait for link active to send stale
1458 	 * data.
1459 	 */
1460 	sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
1461 
1462 	qib_sdma_7322_setlengen(ppd);
1463 	qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
1464 	ppd->sdma_head_dma[0] = 0;
1465 	qib_7322_sdma_sendctrl(ppd,
1466 		ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
1467 }
1468 
1469 #define DISABLES_SDMA ( \
1470 	QIB_E_P_SDMAHALT | \
1471 	QIB_E_P_SDMADESCADDRMISALIGN | \
1472 	QIB_E_P_SDMAMISSINGDW | \
1473 	QIB_E_P_SDMADWEN | \
1474 	QIB_E_P_SDMARPYTAG | \
1475 	QIB_E_P_SDMA1STDESC | \
1476 	QIB_E_P_SDMABASE | \
1477 	QIB_E_P_SDMATAILOUTOFBOUND | \
1478 	QIB_E_P_SDMAOUTOFBOUND | \
1479 	QIB_E_P_SDMAGENMISMATCH)
1480 
1481 static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
1482 {
1483 	unsigned long flags;
1484 	struct qib_devdata *dd = ppd->dd;
1485 
1486 	errs &= QIB_E_P_SDMAERRS;
1487 
1488 	if (errs & QIB_E_P_SDMAUNEXPDATA)
1489 		qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
1490 			    ppd->port);
1491 
1492 	spin_lock_irqsave(&ppd->sdma_lock, flags);
1493 
1494 	switch (ppd->sdma_state.current_state) {
1495 	case qib_sdma_state_s00_hw_down:
1496 		break;
1497 
1498 	case qib_sdma_state_s10_hw_start_up_wait:
1499 		if (errs & QIB_E_P_SDMAHALT)
1500 			__qib_sdma_process_event(ppd,
1501 				qib_sdma_event_e20_hw_started);
1502 		break;
1503 
1504 	case qib_sdma_state_s20_idle:
1505 		break;
1506 
1507 	case qib_sdma_state_s30_sw_clean_up_wait:
1508 		break;
1509 
1510 	case qib_sdma_state_s40_hw_clean_up_wait:
1511 		if (errs & QIB_E_P_SDMAHALT)
1512 			__qib_sdma_process_event(ppd,
1513 				qib_sdma_event_e50_hw_cleaned);
1514 		break;
1515 
1516 	case qib_sdma_state_s50_hw_halt_wait:
1517 		if (errs & QIB_E_P_SDMAHALT)
1518 			__qib_sdma_process_event(ppd,
1519 				qib_sdma_event_e60_hw_halted);
1520 		break;
1521 
1522 	case qib_sdma_state_s99_running:
1523 		__qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
1524 		__qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
1525 		break;
1526 	}
1527 
1528 	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1529 }
1530 
1531 /*
1532  * handle per-device errors (not per-port errors)
1533  */
1534 static noinline void handle_7322_errors(struct qib_devdata *dd)
1535 {
1536 	char *msg;
1537 	u64 iserr = 0;
1538 	u64 errs;
1539 	u64 mask;
1540 	int log_idx;
1541 
1542 	qib_stats.sps_errints++;
1543 	errs = qib_read_kreg64(dd, kr_errstatus);
1544 	if (!errs) {
1545 		qib_devinfo(dd->pcidev, "device error interrupt, "
1546 			 "but no error bits set!\n");
1547 		goto done;
1548 	}
1549 
1550 	/* don't report errors that are masked */
1551 	errs &= dd->cspec->errormask;
1552 	msg = dd->cspec->emsgbuf;
1553 
1554 	/* do these first, they are most important */
1555 	if (errs & QIB_E_HARDWARE) {
1556 		*msg = '\0';
1557 		qib_7322_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
1558 	} else
1559 		for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
1560 			if (errs & dd->eep_st_masks[log_idx].errs_to_log)
1561 				qib_inc_eeprom_err(dd, log_idx, 1);
1562 
1563 	if (errs & QIB_E_SPKTERRS) {
1564 		qib_disarm_7322_senderrbufs(dd->pport);
1565 		qib_stats.sps_txerrs++;
1566 	} else if (errs & QIB_E_INVALIDADDR)
1567 		qib_stats.sps_txerrs++;
1568 	else if (errs & QIB_E_ARMLAUNCH) {
1569 		qib_stats.sps_txerrs++;
1570 		qib_disarm_7322_senderrbufs(dd->pport);
1571 	}
1572 	qib_write_kreg(dd, kr_errclear, errs);
1573 
1574 	/*
1575 	 * The ones we mask off are handled specially below
1576 	 * or above.  Also mask SDMADISABLED by default as it
1577 	 * is too chatty.
1578 	 */
1579 	mask = QIB_E_HARDWARE;
1580 	*msg = '\0';
1581 
1582 	err_decode(msg, sizeof dd->cspec->emsgbuf, errs & ~mask,
1583 		   qib_7322error_msgs);
1584 
1585 	/*
1586 	 * Getting reset is a tragedy for all ports. Mark the device
1587 	 * _and_ the ports as "offline" in way meaningful to each.
1588 	 */
1589 	if (errs & QIB_E_RESET) {
1590 		int pidx;
1591 
1592 		qib_dev_err(dd, "Got reset, requires re-init "
1593 			    "(unload and reload driver)\n");
1594 		dd->flags &= ~QIB_INITTED;  /* needs re-init */
1595 		/* mark as having had error */
1596 		*dd->devstatusp |= QIB_STATUS_HWERROR;
1597 		for (pidx = 0; pidx < dd->num_pports; ++pidx)
1598 			if (dd->pport[pidx].link_speed_supported)
1599 				*dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
1600 	}
1601 
1602 	if (*msg && iserr)
1603 		qib_dev_err(dd, "%s error\n", msg);
1604 
1605 	/*
1606 	 * If there were hdrq or egrfull errors, wake up any processes
1607 	 * waiting in poll.  We used to try to check which contexts had
1608 	 * the overflow, but given the cost of that and the chip reads
1609 	 * to support it, it's better to just wake everybody up if we
1610 	 * get an overflow; waiters can poll again if it's not them.
1611 	 */
1612 	if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
1613 		qib_handle_urcv(dd, ~0U);
1614 		if (errs & ERR_MASK(RcvEgrFullErr))
1615 			qib_stats.sps_buffull++;
1616 		else
1617 			qib_stats.sps_hdrfull++;
1618 	}
1619 
1620 done:
1621 	return;
1622 }
1623 
1624 static void reenable_chase(unsigned long opaque)
1625 {
1626 	struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
1627 
1628 	ppd->cpspec->chase_timer.expires = 0;
1629 	qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1630 		QLOGIC_IB_IBCC_LINKINITCMD_POLL);
1631 }
1632 
1633 static void disable_chase(struct qib_pportdata *ppd, u64 tnow, u8 ibclt)
1634 {
1635 	ppd->cpspec->chase_end = 0;
1636 
1637 	if (!qib_chase)
1638 		return;
1639 
1640 	qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1641 		QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1642 	ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
1643 	add_timer(&ppd->cpspec->chase_timer);
1644 }
1645 
1646 static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
1647 {
1648 	u8 ibclt;
1649 	u64 tnow;
1650 
1651 	ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
1652 
1653 	/*
1654 	 * Detect and handle the state chase issue, where we can
1655 	 * get stuck if we are unlucky on timing on both sides of
1656 	 * the link.   If we are, we disable, set a timer, and
1657 	 * then re-enable.
1658 	 */
1659 	switch (ibclt) {
1660 	case IB_7322_LT_STATE_CFGRCVFCFG:
1661 	case IB_7322_LT_STATE_CFGWAITRMT:
1662 	case IB_7322_LT_STATE_TXREVLANES:
1663 	case IB_7322_LT_STATE_CFGENH:
1664 		tnow = get_jiffies_64();
1665 		if (ppd->cpspec->chase_end &&
1666 		     time_after64(tnow, ppd->cpspec->chase_end))
1667 			disable_chase(ppd, tnow, ibclt);
1668 		else if (!ppd->cpspec->chase_end)
1669 			ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
1670 		break;
1671 	default:
1672 		ppd->cpspec->chase_end = 0;
1673 		break;
1674 	}
1675 
1676 	if (ibclt == IB_7322_LT_STATE_CFGTEST &&
1677 	    (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
1678 		force_h1(ppd);
1679 		ppd->cpspec->qdr_reforce = 1;
1680 	} else if (ppd->cpspec->qdr_reforce &&
1681 		(ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
1682 		 (ibclt == IB_7322_LT_STATE_CFGENH ||
1683 		ibclt == IB_7322_LT_STATE_CFGIDLE ||
1684 		ibclt == IB_7322_LT_STATE_LINKUP))
1685 		force_h1(ppd);
1686 
1687 	if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
1688 	    ppd->link_speed_enabled == QIB_IB_QDR &&
1689 	    (ibclt == IB_7322_LT_STATE_CFGTEST ||
1690 	     ibclt == IB_7322_LT_STATE_CFGENH ||
1691 	     (ibclt >= IB_7322_LT_STATE_POLLACTIVE &&
1692 	      ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
1693 		adj_tx_serdes(ppd);
1694 
1695 	if (!ppd->cpspec->qdr_dfe_on && ibclt != IB_7322_LT_STATE_LINKUP &&
1696 	    ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
1697 		ppd->cpspec->qdr_dfe_on = 1;
1698 		ppd->cpspec->qdr_dfe_time = 0;
1699 		/* On link down, reenable QDR adaptation */
1700 		qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
1701 			ppd->dd->cspec->r1 ?
1702 				    QDR_STATIC_ADAPT_DOWN_R1 :
1703 				    QDR_STATIC_ADAPT_DOWN);
1704 	}
1705 }
1706 
1707 /*
1708  * This is per-pport error handling.
1709  * will likely get it's own MSIx interrupt (one for each port,
1710  * although just a single handler).
1711  */
1712 static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1713 {
1714 	char *msg;
1715 	u64 ignore_this_time = 0, iserr = 0, errs, fmask;
1716 	struct qib_devdata *dd = ppd->dd;
1717 
1718 	/* do this as soon as possible */
1719 	fmask = qib_read_kreg64(dd, kr_act_fmask);
1720 	if (!fmask)
1721 		check_7322_rxe_status(ppd);
1722 
1723 	errs = qib_read_kreg_port(ppd, krp_errstatus);
1724 	if (!errs)
1725 		qib_devinfo(dd->pcidev,
1726 			 "Port%d error interrupt, but no error bits set!\n",
1727 			 ppd->port);
1728 	if (!fmask)
1729 		errs &= ~QIB_E_P_IBSTATUSCHANGED;
1730 	if (!errs)
1731 		goto done;
1732 
1733 	msg = ppd->cpspec->epmsgbuf;
1734 	*msg = '\0';
1735 
1736 	if (errs & ~QIB_E_P_BITSEXTANT) {
1737 		err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
1738 			   errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
1739 		if (!*msg)
1740 			snprintf(msg, sizeof ppd->cpspec->epmsgbuf,
1741 				 "no others");
1742 		qib_dev_porterr(dd, ppd->port, "error interrupt with unknown"
1743 				" errors 0x%016Lx set (and %s)\n",
1744 				(errs & ~QIB_E_P_BITSEXTANT), msg);
1745 		*msg = '\0';
1746 	}
1747 
1748 	if (errs & QIB_E_P_SHDR) {
1749 		u64 symptom;
1750 
1751 		/* determine cause, then write to clear */
1752 		symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
1753 		qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
1754 		err_decode(msg, sizeof ppd->cpspec->epmsgbuf, symptom,
1755 			   hdrchk_msgs);
1756 		*msg = '\0';
1757 		/* senderrbuf cleared in SPKTERRS below */
1758 	}
1759 
1760 	if (errs & QIB_E_P_SPKTERRS) {
1761 		if ((errs & QIB_E_P_LINK_PKTERRS) &&
1762 		    !(ppd->lflags & QIBL_LINKACTIVE)) {
1763 			/*
1764 			 * This can happen when trying to bring the link
1765 			 * up, but the IB link changes state at the "wrong"
1766 			 * time. The IB logic then complains that the packet
1767 			 * isn't valid.  We don't want to confuse people, so
1768 			 * we just don't print them, except at debug
1769 			 */
1770 			err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
1771 				   (errs & QIB_E_P_LINK_PKTERRS),
1772 				   qib_7322p_error_msgs);
1773 			*msg = '\0';
1774 			ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1775 		}
1776 		qib_disarm_7322_senderrbufs(ppd);
1777 	} else if ((errs & QIB_E_P_LINK_PKTERRS) &&
1778 		   !(ppd->lflags & QIBL_LINKACTIVE)) {
1779 		/*
1780 		 * This can happen when SMA is trying to bring the link
1781 		 * up, but the IB link changes state at the "wrong" time.
1782 		 * The IB logic then complains that the packet isn't
1783 		 * valid.  We don't want to confuse people, so we just
1784 		 * don't print them, except at debug
1785 		 */
1786 		err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs,
1787 			   qib_7322p_error_msgs);
1788 		ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1789 		*msg = '\0';
1790 	}
1791 
1792 	qib_write_kreg_port(ppd, krp_errclear, errs);
1793 
1794 	errs &= ~ignore_this_time;
1795 	if (!errs)
1796 		goto done;
1797 
1798 	if (errs & QIB_E_P_RPKTERRS)
1799 		qib_stats.sps_rcverrs++;
1800 	if (errs & QIB_E_P_SPKTERRS)
1801 		qib_stats.sps_txerrs++;
1802 
1803 	iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS);
1804 
1805 	if (errs & QIB_E_P_SDMAERRS)
1806 		sdma_7322_p_errors(ppd, errs);
1807 
1808 	if (errs & QIB_E_P_IBSTATUSCHANGED) {
1809 		u64 ibcs;
1810 		u8 ltstate;
1811 
1812 		ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
1813 		ltstate = qib_7322_phys_portstate(ibcs);
1814 
1815 		if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
1816 			handle_serdes_issues(ppd, ibcs);
1817 		if (!(ppd->cpspec->ibcctrl_a &
1818 		      SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
1819 			/*
1820 			 * We got our interrupt, so init code should be
1821 			 * happy and not try alternatives. Now squelch
1822 			 * other "chatter" from link-negotiation (pre Init)
1823 			 */
1824 			ppd->cpspec->ibcctrl_a |=
1825 				SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
1826 			qib_write_kreg_port(ppd, krp_ibcctrl_a,
1827 					    ppd->cpspec->ibcctrl_a);
1828 		}
1829 
1830 		/* Update our picture of width and speed from chip */
1831 		ppd->link_width_active =
1832 			(ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
1833 			    IB_WIDTH_4X : IB_WIDTH_1X;
1834 		ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
1835 			LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs &
1836 			  SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
1837 				   QIB_IB_DDR : QIB_IB_SDR;
1838 
1839 		if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
1840 		    IB_PHYSPORTSTATE_DISABLED)
1841 			qib_set_ib_7322_lstate(ppd, 0,
1842 			       QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1843 		else
1844 			/*
1845 			 * Since going into a recovery state causes the link
1846 			 * state to go down and since recovery is transitory,
1847 			 * it is better if we "miss" ever seeing the link
1848 			 * training state go into recovery (i.e., ignore this
1849 			 * transition for link state special handling purposes)
1850 			 * without updating lastibcstat.
1851 			 */
1852 			if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1853 			    ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1854 			    ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1855 			    ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1856 				qib_handle_e_ibstatuschanged(ppd, ibcs);
1857 	}
1858 	if (*msg && iserr)
1859 		qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
1860 
1861 	if (ppd->state_wanted & ppd->lflags)
1862 		wake_up_interruptible(&ppd->state_wait);
1863 done:
1864 	return;
1865 }
1866 
1867 /* enable/disable chip from delivering interrupts */
1868 static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
1869 {
1870 	if (enable) {
1871 		if (dd->flags & QIB_BADINTR)
1872 			return;
1873 		qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
1874 		/* cause any pending enabled interrupts to be re-delivered */
1875 		qib_write_kreg(dd, kr_intclear, 0ULL);
1876 		if (dd->cspec->num_msix_entries) {
1877 			/* and same for MSIx */
1878 			u64 val = qib_read_kreg64(dd, kr_intgranted);
1879 			if (val)
1880 				qib_write_kreg(dd, kr_intgranted, val);
1881 		}
1882 	} else
1883 		qib_write_kreg(dd, kr_intmask, 0ULL);
1884 }
1885 
1886 /*
1887  * Try to cleanup as much as possible for anything that might have gone
1888  * wrong while in freeze mode, such as pio buffers being written by user
1889  * processes (causing armlaunch), send errors due to going into freeze mode,
1890  * etc., and try to avoid causing extra interrupts while doing so.
1891  * Forcibly update the in-memory pioavail register copies after cleanup
1892  * because the chip won't do it while in freeze mode (the register values
1893  * themselves are kept correct).
1894  * Make sure that we don't lose any important interrupts by using the chip
1895  * feature that says that writing 0 to a bit in *clear that is set in
1896  * *status will cause an interrupt to be generated again (if allowed by
1897  * the *mask value).
1898  * This is in chip-specific code because of all of the register accesses,
1899  * even though the details are similar on most chips.
1900  */
1901 static void qib_7322_clear_freeze(struct qib_devdata *dd)
1902 {
1903 	int pidx;
1904 
1905 	/* disable error interrupts, to avoid confusion */
1906 	qib_write_kreg(dd, kr_errmask, 0ULL);
1907 
1908 	for (pidx = 0; pidx < dd->num_pports; ++pidx)
1909 		if (dd->pport[pidx].link_speed_supported)
1910 			qib_write_kreg_port(dd->pport + pidx, krp_errmask,
1911 					    0ULL);
1912 
1913 	/* also disable interrupts; errormask is sometimes overwriten */
1914 	qib_7322_set_intr_state(dd, 0);
1915 
1916 	/* clear the freeze, and be sure chip saw it */
1917 	qib_write_kreg(dd, kr_control, dd->control);
1918 	qib_read_kreg32(dd, kr_scratch);
1919 
1920 	/*
1921 	 * Force new interrupt if any hwerr, error or interrupt bits are
1922 	 * still set, and clear "safe" send packet errors related to freeze
1923 	 * and cancelling sends.  Re-enable error interrupts before possible
1924 	 * force of re-interrupt on pending interrupts.
1925 	 */
1926 	qib_write_kreg(dd, kr_hwerrclear, 0ULL);
1927 	qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
1928 	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1929 	/* We need to purge per-port errs and reset mask, too */
1930 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1931 		if (!dd->pport[pidx].link_speed_supported)
1932 			continue;
1933 		qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
1934 		qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
1935 	}
1936 	qib_7322_set_intr_state(dd, 1);
1937 }
1938 
1939 /* no error handling to speak of */
1940 /**
1941  * qib_7322_handle_hwerrors - display hardware errors.
1942  * @dd: the qlogic_ib device
1943  * @msg: the output buffer
1944  * @msgl: the size of the output buffer
1945  *
1946  * Use same msg buffer as regular errors to avoid excessive stack
1947  * use.  Most hardware errors are catastrophic, but for right now,
1948  * we'll print them and continue.  We reuse the same message buffer as
1949  * qib_handle_errors() to avoid excessive stack usage.
1950  */
1951 static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
1952 				     size_t msgl)
1953 {
1954 	u64 hwerrs;
1955 	u32 ctrl;
1956 	int isfatal = 0;
1957 
1958 	hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
1959 	if (!hwerrs)
1960 		goto bail;
1961 	if (hwerrs == ~0ULL) {
1962 		qib_dev_err(dd, "Read of hardware error status failed "
1963 			    "(all bits set); ignoring\n");
1964 		goto bail;
1965 	}
1966 	qib_stats.sps_hwerrs++;
1967 
1968 	/* Always clear the error status register, except BIST fail */
1969 	qib_write_kreg(dd, kr_hwerrclear, hwerrs &
1970 		       ~HWE_MASK(PowerOnBISTFailed));
1971 
1972 	hwerrs &= dd->cspec->hwerrmask;
1973 
1974 	/* no EEPROM logging, yet */
1975 
1976 	if (hwerrs)
1977 		qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx "
1978 			    "(cleared)\n", (unsigned long long) hwerrs);
1979 
1980 	ctrl = qib_read_kreg32(dd, kr_control);
1981 	if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
1982 		/*
1983 		 * No recovery yet...
1984 		 */
1985 		if ((hwerrs & ~HWE_MASK(LATriggered)) ||
1986 		    dd->cspec->stay_in_freeze) {
1987 			/*
1988 			 * If any set that we aren't ignoring only make the
1989 			 * complaint once, in case it's stuck or recurring,
1990 			 * and we get here multiple times
1991 			 * Force link down, so switch knows, and
1992 			 * LEDs are turned off.
1993 			 */
1994 			if (dd->flags & QIB_INITTED)
1995 				isfatal = 1;
1996 		} else
1997 			qib_7322_clear_freeze(dd);
1998 	}
1999 
2000 	if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
2001 		isfatal = 1;
2002 		strlcpy(msg, "[Memory BIST test failed, "
2003 			"InfiniPath hardware unusable]", msgl);
2004 		/* ignore from now on, so disable until driver reloaded */
2005 		dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
2006 		qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2007 	}
2008 
2009 	err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
2010 
2011 	/* Ignore esoteric PLL failures et al. */
2012 
2013 	qib_dev_err(dd, "%s hardware error\n", msg);
2014 
2015 	if (isfatal && !dd->diag_client) {
2016 		qib_dev_err(dd, "Fatal Hardware Error, no longer"
2017 			    " usable, SN %.16s\n", dd->serial);
2018 		/*
2019 		 * for /sys status file and user programs to print; if no
2020 		 * trailing brace is copied, we'll know it was truncated.
2021 		 */
2022 		if (dd->freezemsg)
2023 			snprintf(dd->freezemsg, dd->freezelen,
2024 				 "{%s}", msg);
2025 		qib_disable_after_error(dd);
2026 	}
2027 bail:;
2028 }
2029 
2030 /**
2031  * qib_7322_init_hwerrors - enable hardware errors
2032  * @dd: the qlogic_ib device
2033  *
2034  * now that we have finished initializing everything that might reasonably
2035  * cause a hardware error, and cleared those errors bits as they occur,
2036  * we can enable hardware errors in the mask (potentially enabling
2037  * freeze mode), and enable hardware errors as errors (along with
2038  * everything else) in errormask
2039  */
2040 static void qib_7322_init_hwerrors(struct qib_devdata *dd)
2041 {
2042 	int pidx;
2043 	u64 extsval;
2044 
2045 	extsval = qib_read_kreg64(dd, kr_extstatus);
2046 	if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED |
2047 			 QIB_EXTS_MEMBIST_ENDTEST)))
2048 		qib_dev_err(dd, "MemBIST did not complete!\n");
2049 
2050 	/* never clear BIST failure, so reported on each driver load */
2051 	qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
2052 	qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2053 
2054 	/* clear all */
2055 	qib_write_kreg(dd, kr_errclear, ~0ULL);
2056 	/* enable errors that are masked, at least this first time. */
2057 	qib_write_kreg(dd, kr_errmask, ~0ULL);
2058 	dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
2059 	for (pidx = 0; pidx < dd->num_pports; ++pidx)
2060 		if (dd->pport[pidx].link_speed_supported)
2061 			qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2062 					    ~0ULL);
2063 }
2064 
2065 /*
2066  * Disable and enable the armlaunch error.  Used for PIO bandwidth testing
2067  * on chips that are count-based, rather than trigger-based.  There is no
2068  * reference counting, but that's also fine, given the intended use.
2069  * Only chip-specific because it's all register accesses
2070  */
2071 static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
2072 {
2073 	if (enable) {
2074 		qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
2075 		dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
2076 	} else
2077 		dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
2078 	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2079 }
2080 
2081 /*
2082  * Formerly took parameter <which> in pre-shifted,
2083  * pre-merged form with LinkCmd and LinkInitCmd
2084  * together, and assuming the zero was NOP.
2085  */
2086 static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
2087 				   u16 linitcmd)
2088 {
2089 	u64 mod_wd;
2090 	struct qib_devdata *dd = ppd->dd;
2091 	unsigned long flags;
2092 
2093 	if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
2094 		/*
2095 		 * If we are told to disable, note that so link-recovery
2096 		 * code does not attempt to bring us back up.
2097 		 * Also reset everything that we can, so we start
2098 		 * completely clean when re-enabled (before we
2099 		 * actually issue the disable to the IBC)
2100 		 */
2101 		qib_7322_mini_pcs_reset(ppd);
2102 		spin_lock_irqsave(&ppd->lflags_lock, flags);
2103 		ppd->lflags |= QIBL_IB_LINK_DISABLED;
2104 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2105 	} else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
2106 		/*
2107 		 * Any other linkinitcmd will lead to LINKDOWN and then
2108 		 * to INIT (if all is well), so clear flag to let
2109 		 * link-recovery code attempt to bring us back up.
2110 		 */
2111 		spin_lock_irqsave(&ppd->lflags_lock, flags);
2112 		ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
2113 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2114 		/*
2115 		 * Clear status change interrupt reduction so the
2116 		 * new state is seen.
2117 		 */
2118 		ppd->cpspec->ibcctrl_a &=
2119 			~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
2120 	}
2121 
2122 	mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) |
2123 		(linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2124 
2125 	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
2126 			    mod_wd);
2127 	/* write to chip to prevent back-to-back writes of ibc reg */
2128 	qib_write_kreg(dd, kr_scratch, 0);
2129 
2130 }
2131 
2132 /*
2133  * The total RCV buffer memory is 64KB, used for both ports, and is
2134  * in units of 64 bytes (same as IB flow control credit unit).
2135  * The consumedVL unit in the same registers are in 32 byte units!
2136  * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks,
2137  * and we can therefore allocate just 9 IB credits for 2 VL15 packets
2138  * in krp_rxcreditvl15, rather than 10.
2139  */
2140 #define RCV_BUF_UNITSZ 64
2141 #define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
2142 
2143 static void set_vls(struct qib_pportdata *ppd)
2144 {
2145 	int i, numvls, totcred, cred_vl, vl0extra;
2146 	struct qib_devdata *dd = ppd->dd;
2147 	u64 val;
2148 
2149 	numvls = qib_num_vls(ppd->vls_operational);
2150 
2151 	/*
2152 	 * Set up per-VL credits. Below is kluge based on these assumptions:
2153 	 * 1) port is disabled at the time early_init is called.
2154 	 * 2) give VL15 17 credits, for two max-plausible packets.
2155 	 * 3) Give VL0-N the rest, with any rounding excess used for VL0
2156 	 */
2157 	/* 2 VL15 packets @ 288 bytes each (including IB headers) */
2158 	totcred = NUM_RCV_BUF_UNITS(dd);
2159 	cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ;
2160 	totcred -= cred_vl;
2161 	qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
2162 	cred_vl = totcred / numvls;
2163 	vl0extra = totcred - cred_vl * numvls;
2164 	qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
2165 	for (i = 1; i < numvls; i++)
2166 		qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
2167 	for (; i < 8; i++) /* no buffer space for other VLs */
2168 		qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
2169 
2170 	/* Notify IBC that credits need to be recalculated */
2171 	val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
2172 	val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2173 	qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2174 	qib_write_kreg(dd, kr_scratch, 0ULL);
2175 	val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2176 	qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2177 
2178 	for (i = 0; i < numvls; i++)
2179 		val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
2180 	val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
2181 
2182 	/* Change the number of operational VLs */
2183 	ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
2184 				~SYM_MASK(IBCCtrlA_0, NumVLane)) |
2185 		((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane));
2186 	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2187 	qib_write_kreg(dd, kr_scratch, 0ULL);
2188 }
2189 
2190 /*
2191  * The code that deals with actual SerDes is in serdes_7322_init().
2192  * Compared to the code for iba7220, it is minimal.
2193  */
2194 static int serdes_7322_init(struct qib_pportdata *ppd);
2195 
2196 /**
2197  * qib_7322_bringup_serdes - bring up the serdes
2198  * @ppd: physical port on the qlogic_ib device
2199  */
2200 static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
2201 {
2202 	struct qib_devdata *dd = ppd->dd;
2203 	u64 val, guid, ibc;
2204 	unsigned long flags;
2205 	int ret = 0;
2206 
2207 	/*
2208 	 * SerDes model not in Pd, but still need to
2209 	 * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere
2210 	 * eventually.
2211 	 */
2212 	/* Put IBC in reset, sends disabled (should be in reset already) */
2213 	ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2214 	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2215 	qib_write_kreg(dd, kr_scratch, 0ULL);
2216 
2217 	if (qib_compat_ddr_negotiate) {
2218 		ppd->cpspec->ibdeltainprog = 1;
2219 		ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
2220 						crp_ibsymbolerr);
2221 		ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
2222 						crp_iblinkerrrecov);
2223 	}
2224 
2225 	/* flowcontrolwatermark is in units of KBytes */
2226 	ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
2227 	/*
2228 	 * Flow control is sent this often, even if no changes in
2229 	 * buffer space occur.  Units are 128ns for this chip.
2230 	 * Set to 3usec.
2231 	 */
2232 	ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
2233 	/* max error tolerance */
2234 	ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
2235 	/* IB credit flow control. */
2236 	ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold);
2237 	/*
2238 	 * set initial max size pkt IBC will send, including ICRC; it's the
2239 	 * PIO buffer size in dwords, less 1; also see qib_set_mtu()
2240 	 */
2241 	ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
2242 		SYM_LSB(IBCCtrlA_0, MaxPktLen);
2243 	ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
2244 
2245 	/* initially come up waiting for TS1, without sending anything. */
2246 	val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
2247 		QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2248 
2249 	/*
2250 	 * Reset the PCS interface to the serdes (and also ibc, which is still
2251 	 * in reset from above).  Writes new value of ibcctrl_a as last step.
2252 	 */
2253 	qib_7322_mini_pcs_reset(ppd);
2254 	qib_write_kreg(dd, kr_scratch, 0ULL);
2255 
2256 	if (!ppd->cpspec->ibcctrl_b) {
2257 		unsigned lse = ppd->link_speed_enabled;
2258 
2259 		/*
2260 		 * Not on re-init after reset, establish shadow
2261 		 * and force initial config.
2262 		 */
2263 		ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
2264 							     krp_ibcctrl_b);
2265 		ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
2266 				IBA7322_IBC_SPEED_DDR |
2267 				IBA7322_IBC_SPEED_SDR |
2268 				IBA7322_IBC_WIDTH_AUTONEG |
2269 				SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
2270 		if (lse & (lse - 1)) /* Muliple speeds enabled */
2271 			ppd->cpspec->ibcctrl_b |=
2272 				(lse << IBA7322_IBC_SPEED_LSB) |
2273 				IBA7322_IBC_IBTA_1_2_MASK |
2274 				IBA7322_IBC_MAX_SPEED_MASK;
2275 		else
2276 			ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
2277 				IBA7322_IBC_SPEED_QDR |
2278 				 IBA7322_IBC_IBTA_1_2_MASK :
2279 				(lse == QIB_IB_DDR) ?
2280 					IBA7322_IBC_SPEED_DDR :
2281 					IBA7322_IBC_SPEED_SDR;
2282 		if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
2283 		    (IB_WIDTH_1X | IB_WIDTH_4X))
2284 			ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
2285 		else
2286 			ppd->cpspec->ibcctrl_b |=
2287 				ppd->link_width_enabled == IB_WIDTH_4X ?
2288 				IBA7322_IBC_WIDTH_4X_ONLY :
2289 				IBA7322_IBC_WIDTH_1X_ONLY;
2290 
2291 		/* always enable these on driver reload, not sticky */
2292 		ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
2293 			IBA7322_IBC_HRTBT_MASK);
2294 	}
2295 	qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
2296 
2297 	/* setup so we have more time at CFGTEST to change H1 */
2298 	val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
2299 	val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
2300 	val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
2301 	qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
2302 
2303 	serdes_7322_init(ppd);
2304 
2305 	guid = be64_to_cpu(ppd->guid);
2306 	if (!guid) {
2307 		if (dd->base_guid)
2308 			guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
2309 		ppd->guid = cpu_to_be64(guid);
2310 	}
2311 
2312 	qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
2313 	/* write to chip to prevent back-to-back writes of ibc reg */
2314 	qib_write_kreg(dd, kr_scratch, 0);
2315 
2316 	/* Enable port */
2317 	ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
2318 	set_vls(ppd);
2319 
2320 	/* be paranoid against later code motion, etc. */
2321 	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2322 	ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
2323 	qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
2324 	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2325 
2326 	/* Also enable IBSTATUSCHG interrupt.  */
2327 	val = qib_read_kreg_port(ppd, krp_errmask);
2328 	qib_write_kreg_port(ppd, krp_errmask,
2329 		val | ERR_MASK_N(IBStatusChanged));
2330 
2331 	/* Always zero until we start messing with SerDes for real */
2332 	return ret;
2333 }
2334 
2335 /**
2336  * qib_7322_quiet_serdes - set serdes to txidle
2337  * @dd: the qlogic_ib device
2338  * Called when driver is being unloaded
2339  */
2340 static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
2341 {
2342 	u64 val;
2343 	unsigned long flags;
2344 
2345 	qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
2346 
2347 	spin_lock_irqsave(&ppd->lflags_lock, flags);
2348 	ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
2349 	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2350 	wake_up(&ppd->cpspec->autoneg_wait);
2351 	cancel_delayed_work(&ppd->cpspec->autoneg_work);
2352 	if (ppd->dd->cspec->r1)
2353 		cancel_delayed_work(&ppd->cpspec->ipg_work);
2354 	flush_scheduled_work();
2355 
2356 	ppd->cpspec->chase_end = 0;
2357 	if (ppd->cpspec->chase_timer.data) /* if initted */
2358 		del_timer_sync(&ppd->cpspec->chase_timer);
2359 
2360 	/*
2361 	 * Despite the name, actually disables IBC as well. Do it when
2362 	 * we are as sure as possible that no more packets can be
2363 	 * received, following the down and the PCS reset.
2364 	 * The actual disabling happens in qib_7322_mini_pci_reset(),
2365 	 * along with the PCS being reset.
2366 	 */
2367 	ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2368 	qib_7322_mini_pcs_reset(ppd);
2369 
2370 	/*
2371 	 * Update the adjusted counters so the adjustment persists
2372 	 * across driver reload.
2373 	 */
2374 	if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
2375 	    ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
2376 		struct qib_devdata *dd = ppd->dd;
2377 		u64 diagc;
2378 
2379 		/* enable counter writes */
2380 		diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
2381 		qib_write_kreg(dd, kr_hwdiagctrl,
2382 			       diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
2383 
2384 		if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
2385 			val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
2386 			if (ppd->cpspec->ibdeltainprog)
2387 				val -= val - ppd->cpspec->ibsymsnap;
2388 			val -= ppd->cpspec->ibsymdelta;
2389 			write_7322_creg_port(ppd, crp_ibsymbolerr, val);
2390 		}
2391 		if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
2392 			val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
2393 			if (ppd->cpspec->ibdeltainprog)
2394 				val -= val - ppd->cpspec->iblnkerrsnap;
2395 			val -= ppd->cpspec->iblnkerrdelta;
2396 			write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
2397 		}
2398 		if (ppd->cpspec->iblnkdowndelta) {
2399 			val = read_7322_creg32_port(ppd, crp_iblinkdown);
2400 			val += ppd->cpspec->iblnkdowndelta;
2401 			write_7322_creg_port(ppd, crp_iblinkdown, val);
2402 		}
2403 		/*
2404 		 * No need to save ibmalfdelta since IB perfcounters
2405 		 * are cleared on driver reload.
2406 		 */
2407 
2408 		/* and disable counter writes */
2409 		qib_write_kreg(dd, kr_hwdiagctrl, diagc);
2410 	}
2411 }
2412 
2413 /**
2414  * qib_setup_7322_setextled - set the state of the two external LEDs
2415  * @ppd: physical port on the qlogic_ib device
2416  * @on: whether the link is up or not
2417  *
2418  * The exact combo of LEDs if on is true is determined by looking
2419  * at the ibcstatus.
2420  *
2421  * These LEDs indicate the physical and logical state of IB link.
2422  * For this chip (at least with recommended board pinouts), LED1
2423  * is Yellow (logical state) and LED2 is Green (physical state),
2424  *
2425  * Note:  We try to match the Mellanox HCA LED behavior as best
2426  * we can.  Green indicates physical link state is OK (something is
2427  * plugged in, and we can train).
2428  * Amber indicates the link is logically up (ACTIVE).
2429  * Mellanox further blinks the amber LED to indicate data packet
2430  * activity, but we have no hardware support for that, so it would
2431  * require waking up every 10-20 msecs and checking the counters
2432  * on the chip, and then turning the LED off if appropriate.  That's
2433  * visible overhead, so not something we will do.
2434  */
2435 static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
2436 {
2437 	struct qib_devdata *dd = ppd->dd;
2438 	u64 extctl, ledblink = 0, val;
2439 	unsigned long flags;
2440 	int yel, grn;
2441 
2442 	/*
2443 	 * The diags use the LED to indicate diag info, so we leave
2444 	 * the external LED alone when the diags are running.
2445 	 */
2446 	if (dd->diag_client)
2447 		return;
2448 
2449 	/* Allow override of LED display for, e.g. Locating system in rack */
2450 	if (ppd->led_override) {
2451 		grn = (ppd->led_override & QIB_LED_PHYS);
2452 		yel = (ppd->led_override & QIB_LED_LOG);
2453 	} else if (on) {
2454 		val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
2455 		grn = qib_7322_phys_portstate(val) ==
2456 			IB_PHYSPORTSTATE_LINKUP;
2457 		yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE;
2458 	} else {
2459 		grn = 0;
2460 		yel = 0;
2461 	}
2462 
2463 	spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2464 	extctl = dd->cspec->extctrl & (ppd->port == 1 ?
2465 		~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK);
2466 	if (grn) {
2467 		extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
2468 		/*
2469 		 * Counts are in chip clock (4ns) periods.
2470 		 * This is 1/16 sec (66.6ms) on,
2471 		 * 3/16 sec (187.5 ms) off, with packets rcvd.
2472 		 */
2473 		ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) |
2474 			((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT);
2475 	}
2476 	if (yel)
2477 		extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
2478 	dd->cspec->extctrl = extctl;
2479 	qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
2480 	spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2481 
2482 	if (ledblink) /* blink the LED on packet receive */
2483 		qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
2484 }
2485 
2486 /*
2487  * Disable MSIx interrupt if enabled, call generic MSIx code
2488  * to cleanup, and clear pending MSIx interrupts.
2489  * Used for fallback to INTx, after reset, and when MSIx setup fails.
2490  */
2491 static void qib_7322_nomsix(struct qib_devdata *dd)
2492 {
2493 	u64 intgranted;
2494 	int n;
2495 
2496 	dd->cspec->main_int_mask = ~0ULL;
2497 	n = dd->cspec->num_msix_entries;
2498 	if (n) {
2499 		int i;
2500 
2501 		dd->cspec->num_msix_entries = 0;
2502 		for (i = 0; i < n; i++)
2503 			free_irq(dd->cspec->msix_entries[i].vector,
2504 				 dd->cspec->msix_arg[i]);
2505 		qib_nomsix(dd);
2506 	}
2507 	/* make sure no MSIx interrupts are left pending */
2508 	intgranted = qib_read_kreg64(dd, kr_intgranted);
2509 	if (intgranted)
2510 		qib_write_kreg(dd, kr_intgranted, intgranted);
2511 }
2512 
2513 static void qib_7322_free_irq(struct qib_devdata *dd)
2514 {
2515 	if (dd->cspec->irq) {
2516 		free_irq(dd->cspec->irq, dd);
2517 		dd->cspec->irq = 0;
2518 	}
2519 	qib_7322_nomsix(dd);
2520 }
2521 
2522 static void qib_setup_7322_cleanup(struct qib_devdata *dd)
2523 {
2524 	int i;
2525 
2526 	qib_7322_free_irq(dd);
2527 	kfree(dd->cspec->cntrs);
2528 	kfree(dd->cspec->sendchkenable);
2529 	kfree(dd->cspec->sendgrhchk);
2530 	kfree(dd->cspec->sendibchk);
2531 	kfree(dd->cspec->msix_entries);
2532 	kfree(dd->cspec->msix_arg);
2533 	for (i = 0; i < dd->num_pports; i++) {
2534 		unsigned long flags;
2535 		u32 mask = QSFP_GPIO_MOD_PRS_N |
2536 			(QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT);
2537 
2538 		kfree(dd->pport[i].cpspec->portcntrs);
2539 		if (dd->flags & QIB_HAS_QSFP) {
2540 			spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2541 			dd->cspec->gpio_mask &= ~mask;
2542 			qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
2543 			spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2544 			qib_qsfp_deinit(&dd->pport[i].cpspec->qsfp_data);
2545 		}
2546 		if (dd->pport[i].ibport_data.smi_ah)
2547 			ib_destroy_ah(&dd->pport[i].ibport_data.smi_ah->ibah);
2548 	}
2549 }
2550 
2551 /* handle SDMA interrupts */
2552 static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
2553 {
2554 	struct qib_pportdata *ppd0 = &dd->pport[0];
2555 	struct qib_pportdata *ppd1 = &dd->pport[1];
2556 	u64 intr0 = istat & (INT_MASK_P(SDma, 0) |
2557 		INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0));
2558 	u64 intr1 = istat & (INT_MASK_P(SDma, 1) |
2559 		INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1));
2560 
2561 	if (intr0)
2562 		qib_sdma_intr(ppd0);
2563 	if (intr1)
2564 		qib_sdma_intr(ppd1);
2565 
2566 	if (istat & INT_MASK_PM(SDmaCleanupDone, 0))
2567 		qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started);
2568 	if (istat & INT_MASK_PM(SDmaCleanupDone, 1))
2569 		qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started);
2570 }
2571 
2572 /*
2573  * Set or clear the Send buffer available interrupt enable bit.
2574  */
2575 static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
2576 {
2577 	unsigned long flags;
2578 
2579 	spin_lock_irqsave(&dd->sendctrl_lock, flags);
2580 	if (needint)
2581 		dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
2582 	else
2583 		dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
2584 	qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
2585 	qib_write_kreg(dd, kr_scratch, 0ULL);
2586 	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
2587 }
2588 
2589 /*
2590  * Somehow got an interrupt with reserved bits set in interrupt status.
2591  * Print a message so we know it happened, then clear them.
2592  * keep mainline interrupt handler cache-friendly
2593  */
2594 static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
2595 {
2596 	u64 kills;
2597 	char msg[128];
2598 
2599 	kills = istat & ~QIB_I_BITSEXTANT;
2600 	qib_dev_err(dd, "Clearing reserved interrupt(s) 0x%016llx:"
2601 		    " %s\n", (unsigned long long) kills, msg);
2602 	qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
2603 }
2604 
2605 /* keep mainline interrupt handler cache-friendly */
2606 static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
2607 {
2608 	u32 gpiostatus;
2609 	int handled = 0;
2610 	int pidx;
2611 
2612 	/*
2613 	 * Boards for this chip currently don't use GPIO interrupts,
2614 	 * so clear by writing GPIOstatus to GPIOclear, and complain
2615 	 * to developer.  To avoid endless repeats, clear
2616 	 * the bits in the mask, since there is some kind of
2617 	 * programming error or chip problem.
2618 	 */
2619 	gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
2620 	/*
2621 	 * In theory, writing GPIOstatus to GPIOclear could
2622 	 * have a bad side-effect on some diagnostic that wanted
2623 	 * to poll for a status-change, but the various shadows
2624 	 * make that problematic at best. Diags will just suppress
2625 	 * all GPIO interrupts during such tests.
2626 	 */
2627 	qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
2628 	/*
2629 	 * Check for QSFP MOD_PRS changes
2630 	 * only works for single port if IB1 != pidx1
2631 	 */
2632 	for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
2633 	     ++pidx) {
2634 		struct qib_pportdata *ppd;
2635 		struct qib_qsfp_data *qd;
2636 		u32 mask;
2637 		if (!dd->pport[pidx].link_speed_supported)
2638 			continue;
2639 		mask = QSFP_GPIO_MOD_PRS_N;
2640 		ppd = dd->pport + pidx;
2641 		mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
2642 		if (gpiostatus & dd->cspec->gpio_mask & mask) {
2643 			u64 pins;
2644 			qd = &ppd->cpspec->qsfp_data;
2645 			gpiostatus &= ~mask;
2646 			pins = qib_read_kreg64(dd, kr_extstatus);
2647 			pins >>= SYM_LSB(EXTStatus, GPIOIn);
2648 			if (!(pins & mask)) {
2649 				++handled;
2650 				qd->t_insert = get_jiffies_64();
2651 				schedule_work(&qd->work);
2652 			}
2653 		}
2654 	}
2655 
2656 	if (gpiostatus && !handled) {
2657 		const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
2658 		u32 gpio_irq = mask & gpiostatus;
2659 
2660 		/*
2661 		 * Clear any troublemakers, and update chip from shadow
2662 		 */
2663 		dd->cspec->gpio_mask &= ~gpio_irq;
2664 		qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
2665 	}
2666 }
2667 
2668 /*
2669  * Handle errors and unusual events first, separate function
2670  * to improve cache hits for fast path interrupt handling.
2671  */
2672 static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
2673 {
2674 	if (istat & ~QIB_I_BITSEXTANT)
2675 		unknown_7322_ibits(dd, istat);
2676 	if (istat & QIB_I_GPIO)
2677 		unknown_7322_gpio_intr(dd);
2678 	if (istat & QIB_I_C_ERROR)
2679 		handle_7322_errors(dd);
2680 	if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
2681 		handle_7322_p_errors(dd->rcd[0]->ppd);
2682 	if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
2683 		handle_7322_p_errors(dd->rcd[1]->ppd);
2684 }
2685 
2686 /*
2687  * Dynamically adjust the rcv int timeout for a context based on incoming
2688  * packet rate.
2689  */
2690 static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts)
2691 {
2692 	struct qib_devdata *dd = rcd->dd;
2693 	u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
2694 
2695 	/*
2696 	 * Dynamically adjust idle timeout on chip
2697 	 * based on number of packets processed.
2698 	 */
2699 	if (npkts < rcv_int_count && timeout > 2)
2700 		timeout >>= 1;
2701 	else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
2702 		timeout = min(timeout << 1, rcv_int_timeout);
2703 	else
2704 		return;
2705 
2706 	dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
2707 	qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
2708 }
2709 
2710 /*
2711  * This is the main interrupt handler.
2712  * It will normally only be used for low frequency interrupts but may
2713  * have to handle all interrupts if INTx is enabled or fewer than normal
2714  * MSIx interrupts were allocated.
2715  * This routine should ignore the interrupt bits for any of the
2716  * dedicated MSIx handlers.
2717  */
2718 static irqreturn_t qib_7322intr(int irq, void *data)
2719 {
2720 	struct qib_devdata *dd = data;
2721 	irqreturn_t ret;
2722 	u64 istat;
2723 	u64 ctxtrbits;
2724 	u64 rmask;
2725 	unsigned i;
2726 	u32 npkts;
2727 
2728 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
2729 		/*
2730 		 * This return value is not great, but we do not want the
2731 		 * interrupt core code to remove our interrupt handler
2732 		 * because we don't appear to be handling an interrupt
2733 		 * during a chip reset.
2734 		 */
2735 		ret = IRQ_HANDLED;
2736 		goto bail;
2737 	}
2738 
2739 	istat = qib_read_kreg64(dd, kr_intstatus);
2740 
2741 	if (unlikely(istat == ~0ULL)) {
2742 		qib_bad_intrstatus(dd);
2743 		qib_dev_err(dd, "Interrupt status all f's, skipping\n");
2744 		/* don't know if it was our interrupt or not */
2745 		ret = IRQ_NONE;
2746 		goto bail;
2747 	}
2748 
2749 	istat &= dd->cspec->main_int_mask;
2750 	if (unlikely(!istat)) {
2751 		/* already handled, or shared and not us */
2752 		ret = IRQ_NONE;
2753 		goto bail;
2754 	}
2755 
2756 	qib_stats.sps_ints++;
2757 	if (dd->int_counter != (u32) -1)
2758 		dd->int_counter++;
2759 
2760 	/* handle "errors" of various kinds first, device ahead of port */
2761 	if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
2762 			      QIB_I_C_ERROR | INT_MASK_P(Err, 0) |
2763 			      INT_MASK_P(Err, 1))))
2764 		unlikely_7322_intr(dd, istat);
2765 
2766 	/*
2767 	 * Clear the interrupt bits we found set, relatively early, so we
2768 	 * "know" know the chip will have seen this by the time we process
2769 	 * the queue, and will re-interrupt if necessary.  The processor
2770 	 * itself won't take the interrupt again until we return.
2771 	 */
2772 	qib_write_kreg(dd, kr_intclear, istat);
2773 
2774 	/*
2775 	 * Handle kernel receive queues before checking for pio buffers
2776 	 * available since receives can overflow; piobuf waiters can afford
2777 	 * a few extra cycles, since they were waiting anyway.
2778 	 */
2779 	ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK);
2780 	if (ctxtrbits) {
2781 		rmask = (1ULL << QIB_I_RCVAVAIL_LSB) |
2782 			(1ULL << QIB_I_RCVURG_LSB);
2783 		for (i = 0; i < dd->first_user_ctxt; i++) {
2784 			if (ctxtrbits & rmask) {
2785 				ctxtrbits &= ~rmask;
2786 				if (dd->rcd[i]) {
2787 					qib_kreceive(dd->rcd[i], NULL, &npkts);
2788 					adjust_rcv_timeout(dd->rcd[i], npkts);
2789 				}
2790 			}
2791 			rmask <<= 1;
2792 		}
2793 		if (ctxtrbits) {
2794 			ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) |
2795 				(ctxtrbits >> QIB_I_RCVURG_LSB);
2796 			qib_handle_urcv(dd, ctxtrbits);
2797 		}
2798 	}
2799 
2800 	if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1)))
2801 		sdma_7322_intr(dd, istat);
2802 
2803 	if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
2804 		qib_ib_piobufavail(dd);
2805 
2806 	ret = IRQ_HANDLED;
2807 bail:
2808 	return ret;
2809 }
2810 
2811 /*
2812  * Dedicated receive packet available interrupt handler.
2813  */
2814 static irqreturn_t qib_7322pintr(int irq, void *data)
2815 {
2816 	struct qib_ctxtdata *rcd = data;
2817 	struct qib_devdata *dd = rcd->dd;
2818 	u32 npkts;
2819 
2820 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
2821 		/*
2822 		 * This return value is not great, but we do not want the
2823 		 * interrupt core code to remove our interrupt handler
2824 		 * because we don't appear to be handling an interrupt
2825 		 * during a chip reset.
2826 		 */
2827 		return IRQ_HANDLED;
2828 
2829 	qib_stats.sps_ints++;
2830 	if (dd->int_counter != (u32) -1)
2831 		dd->int_counter++;
2832 
2833 	/* Clear the interrupt bit we expect to be set. */
2834 	qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
2835 		       (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
2836 
2837 	qib_kreceive(rcd, NULL, &npkts);
2838 	adjust_rcv_timeout(rcd, npkts);
2839 
2840 	return IRQ_HANDLED;
2841 }
2842 
2843 /*
2844  * Dedicated Send buffer available interrupt handler.
2845  */
2846 static irqreturn_t qib_7322bufavail(int irq, void *data)
2847 {
2848 	struct qib_devdata *dd = data;
2849 
2850 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
2851 		/*
2852 		 * This return value is not great, but we do not want the
2853 		 * interrupt core code to remove our interrupt handler
2854 		 * because we don't appear to be handling an interrupt
2855 		 * during a chip reset.
2856 		 */
2857 		return IRQ_HANDLED;
2858 
2859 	qib_stats.sps_ints++;
2860 	if (dd->int_counter != (u32) -1)
2861 		dd->int_counter++;
2862 
2863 	/* Clear the interrupt bit we expect to be set. */
2864 	qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
2865 
2866 	/* qib_ib_piobufavail() will clear the want PIO interrupt if needed */
2867 	if (dd->flags & QIB_INITTED)
2868 		qib_ib_piobufavail(dd);
2869 	else
2870 		qib_wantpiobuf_7322_intr(dd, 0);
2871 
2872 	return IRQ_HANDLED;
2873 }
2874 
2875 /*
2876  * Dedicated Send DMA interrupt handler.
2877  */
2878 static irqreturn_t sdma_intr(int irq, void *data)
2879 {
2880 	struct qib_pportdata *ppd = data;
2881 	struct qib_devdata *dd = ppd->dd;
2882 
2883 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
2884 		/*
2885 		 * This return value is not great, but we do not want the
2886 		 * interrupt core code to remove our interrupt handler
2887 		 * because we don't appear to be handling an interrupt
2888 		 * during a chip reset.
2889 		 */
2890 		return IRQ_HANDLED;
2891 
2892 	qib_stats.sps_ints++;
2893 	if (dd->int_counter != (u32) -1)
2894 		dd->int_counter++;
2895 
2896 	/* Clear the interrupt bit we expect to be set. */
2897 	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
2898 		       INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
2899 	qib_sdma_intr(ppd);
2900 
2901 	return IRQ_HANDLED;
2902 }
2903 
2904 /*
2905  * Dedicated Send DMA idle interrupt handler.
2906  */
2907 static irqreturn_t sdma_idle_intr(int irq, void *data)
2908 {
2909 	struct qib_pportdata *ppd = data;
2910 	struct qib_devdata *dd = ppd->dd;
2911 
2912 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
2913 		/*
2914 		 * This return value is not great, but we do not want the
2915 		 * interrupt core code to remove our interrupt handler
2916 		 * because we don't appear to be handling an interrupt
2917 		 * during a chip reset.
2918 		 */
2919 		return IRQ_HANDLED;
2920 
2921 	qib_stats.sps_ints++;
2922 	if (dd->int_counter != (u32) -1)
2923 		dd->int_counter++;
2924 
2925 	/* Clear the interrupt bit we expect to be set. */
2926 	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
2927 		       INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
2928 	qib_sdma_intr(ppd);
2929 
2930 	return IRQ_HANDLED;
2931 }
2932 
2933 /*
2934  * Dedicated Send DMA progress interrupt handler.
2935  */
2936 static irqreturn_t sdma_progress_intr(int irq, void *data)
2937 {
2938 	struct qib_pportdata *ppd = data;
2939 	struct qib_devdata *dd = ppd->dd;
2940 
2941 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
2942 		/*
2943 		 * This return value is not great, but we do not want the
2944 		 * interrupt core code to remove our interrupt handler
2945 		 * because we don't appear to be handling an interrupt
2946 		 * during a chip reset.
2947 		 */
2948 		return IRQ_HANDLED;
2949 
2950 	qib_stats.sps_ints++;
2951 	if (dd->int_counter != (u32) -1)
2952 		dd->int_counter++;
2953 
2954 	/* Clear the interrupt bit we expect to be set. */
2955 	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
2956 		       INT_MASK_P(SDmaProgress, 1) :
2957 		       INT_MASK_P(SDmaProgress, 0));
2958 	qib_sdma_intr(ppd);
2959 
2960 	return IRQ_HANDLED;
2961 }
2962 
2963 /*
2964  * Dedicated Send DMA cleanup interrupt handler.
2965  */
2966 static irqreturn_t sdma_cleanup_intr(int irq, void *data)
2967 {
2968 	struct qib_pportdata *ppd = data;
2969 	struct qib_devdata *dd = ppd->dd;
2970 
2971 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
2972 		/*
2973 		 * This return value is not great, but we do not want the
2974 		 * interrupt core code to remove our interrupt handler
2975 		 * because we don't appear to be handling an interrupt
2976 		 * during a chip reset.
2977 		 */
2978 		return IRQ_HANDLED;
2979 
2980 	qib_stats.sps_ints++;
2981 	if (dd->int_counter != (u32) -1)
2982 		dd->int_counter++;
2983 
2984 	/* Clear the interrupt bit we expect to be set. */
2985 	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
2986 		       INT_MASK_PM(SDmaCleanupDone, 1) :
2987 		       INT_MASK_PM(SDmaCleanupDone, 0));
2988 	qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
2989 
2990 	return IRQ_HANDLED;
2991 }
2992 
2993 /*
2994  * Set up our chip-specific interrupt handler.
2995  * The interrupt type has already been setup, so
2996  * we just need to do the registration and error checking.
2997  * If we are using MSIx interrupts, we may fall back to
2998  * INTx later, if the interrupt handler doesn't get called
2999  * within 1/2 second (see verify_interrupt()).
3000  */
3001 static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
3002 {
3003 	int ret, i, msixnum;
3004 	u64 redirect[6];
3005 	u64 mask;
3006 
3007 	if (!dd->num_pports)
3008 		return;
3009 
3010 	if (clearpend) {
3011 		/*
3012 		 * if not switching interrupt types, be sure interrupts are
3013 		 * disabled, and then clear anything pending at this point,
3014 		 * because we are starting clean.
3015 		 */
3016 		qib_7322_set_intr_state(dd, 0);
3017 
3018 		/* clear the reset error, init error/hwerror mask */
3019 		qib_7322_init_hwerrors(dd);
3020 
3021 		/* clear any interrupt bits that might be set */
3022 		qib_write_kreg(dd, kr_intclear, ~0ULL);
3023 
3024 		/* make sure no pending MSIx intr, and clear diag reg */
3025 		qib_write_kreg(dd, kr_intgranted, ~0ULL);
3026 		qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
3027 	}
3028 
3029 	if (!dd->cspec->num_msix_entries) {
3030 		/* Try to get INTx interrupt */
3031 try_intx:
3032 		if (!dd->pcidev->irq) {
3033 			qib_dev_err(dd, "irq is 0, BIOS error?  "
3034 				    "Interrupts won't work\n");
3035 			goto bail;
3036 		}
3037 		ret = request_irq(dd->pcidev->irq, qib_7322intr,
3038 				  IRQF_SHARED, QIB_DRV_NAME, dd);
3039 		if (ret) {
3040 			qib_dev_err(dd, "Couldn't setup INTx "
3041 				    "interrupt (irq=%d): %d\n",
3042 				    dd->pcidev->irq, ret);
3043 			goto bail;
3044 		}
3045 		dd->cspec->irq = dd->pcidev->irq;
3046 		dd->cspec->main_int_mask = ~0ULL;
3047 		goto bail;
3048 	}
3049 
3050 	/* Try to get MSIx interrupts */
3051 	memset(redirect, 0, sizeof redirect);
3052 	mask = ~0ULL;
3053 	msixnum = 0;
3054 	for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
3055 		irq_handler_t handler;
3056 		const char *name;
3057 		void *arg;
3058 		u64 val;
3059 		int lsb, reg, sh;
3060 
3061 		if (i < ARRAY_SIZE(irq_table)) {
3062 			if (irq_table[i].port) {
3063 				/* skip if for a non-configured port */
3064 				if (irq_table[i].port > dd->num_pports)
3065 					continue;
3066 				arg = dd->pport + irq_table[i].port - 1;
3067 			} else
3068 				arg = dd;
3069 			lsb = irq_table[i].lsb;
3070 			handler = irq_table[i].handler;
3071 			name = irq_table[i].name;
3072 		} else {
3073 			unsigned ctxt;
3074 
3075 			ctxt = i - ARRAY_SIZE(irq_table);
3076 			/* per krcvq context receive interrupt */
3077 			arg = dd->rcd[ctxt];
3078 			if (!arg)
3079 				continue;
3080 			lsb = QIB_I_RCVAVAIL_LSB + ctxt;
3081 			handler = qib_7322pintr;
3082 			name = QIB_DRV_NAME " (kctx)";
3083 		}
3084 		ret = request_irq(dd->cspec->msix_entries[msixnum].vector,
3085 				  handler, 0, name, arg);
3086 		if (ret) {
3087 			/*
3088 			 * Shouldn't happen since the enable said we could
3089 			 * have as many as we are trying to setup here.
3090 			 */
3091 			qib_dev_err(dd, "Couldn't setup MSIx "
3092 				    "interrupt (vec=%d, irq=%d): %d\n", msixnum,
3093 				    dd->cspec->msix_entries[msixnum].vector,
3094 				    ret);
3095 			qib_7322_nomsix(dd);
3096 			goto try_intx;
3097 		}
3098 		dd->cspec->msix_arg[msixnum] = arg;
3099 		if (lsb >= 0) {
3100 			reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
3101 			sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
3102 				SYM_LSB(IntRedirect0, vec1);
3103 			mask &= ~(1ULL << lsb);
3104 			redirect[reg] |= ((u64) msixnum) << sh;
3105 		}
3106 		val = qib_read_kreg64(dd, 2 * msixnum + 1 +
3107 			(QIB_7322_MsixTable_OFFS / sizeof(u64)));
3108 		msixnum++;
3109 	}
3110 	/* Initialize the vector mapping */
3111 	for (i = 0; i < ARRAY_SIZE(redirect); i++)
3112 		qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
3113 	dd->cspec->main_int_mask = mask;
3114 bail:;
3115 }
3116 
3117 /**
3118  * qib_7322_boardname - fill in the board name and note features
3119  * @dd: the qlogic_ib device
3120  *
3121  * info will be based on the board revision register
3122  */
3123 static unsigned qib_7322_boardname(struct qib_devdata *dd)
3124 {
3125 	/* Will need enumeration of board-types here */
3126 	char *n;
3127 	u32 boardid, namelen;
3128 	unsigned features = DUAL_PORT_CAP;
3129 
3130 	boardid = SYM_FIELD(dd->revision, Revision, BoardID);
3131 
3132 	switch (boardid) {
3133 	case 0:
3134 		n = "InfiniPath_QLE7342_Emulation";
3135 		break;
3136 	case 1:
3137 		n = "InfiniPath_QLE7340";
3138 		dd->flags |= QIB_HAS_QSFP;
3139 		features = PORT_SPD_CAP;
3140 		break;
3141 	case 2:
3142 		n = "InfiniPath_QLE7342";
3143 		dd->flags |= QIB_HAS_QSFP;
3144 		break;
3145 	case 3:
3146 		n = "InfiniPath_QMI7342";
3147 		break;
3148 	case 4:
3149 		n = "InfiniPath_Unsupported7342";
3150 		qib_dev_err(dd, "Unsupported version of QMH7342\n");
3151 		features = 0;
3152 		break;
3153 	case BOARD_QMH7342:
3154 		n = "InfiniPath_QMH7342";
3155 		features = 0x24;
3156 		break;
3157 	case BOARD_QME7342:
3158 		n = "InfiniPath_QME7342";
3159 		break;
3160 	case 15:
3161 		n = "InfiniPath_QLE7342_TEST";
3162 		dd->flags |= QIB_HAS_QSFP;
3163 		break;
3164 	default:
3165 		n = "InfiniPath_QLE73xy_UNKNOWN";
3166 		qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
3167 		break;
3168 	}
3169 	dd->board_atten = 1; /* index into txdds_Xdr */
3170 
3171 	namelen = strlen(n) + 1;
3172 	dd->boardname = kmalloc(namelen, GFP_KERNEL);
3173 	if (!dd->boardname)
3174 		qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
3175 	else
3176 		snprintf(dd->boardname, namelen, "%s", n);
3177 
3178 	snprintf(dd->boardversion, sizeof(dd->boardversion),
3179 		 "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
3180 		 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
3181 		 (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
3182 		 dd->majrev, dd->minrev,
3183 		 (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
3184 
3185 	if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
3186 		qib_devinfo(dd->pcidev, "IB%u: Forced to single port mode"
3187 			    " by module parameter\n", dd->unit);
3188 		features &= PORT_SPD_CAP;
3189 	}
3190 
3191 	return features;
3192 }
3193 
3194 /*
3195  * This routine sleeps, so it can only be called from user context, not
3196  * from interrupt context.
3197  */
3198 static int qib_do_7322_reset(struct qib_devdata *dd)
3199 {
3200 	u64 val;
3201 	u64 *msix_vecsave;
3202 	int i, msix_entries, ret = 1;
3203 	u16 cmdval;
3204 	u8 int_line, clinesz;
3205 	unsigned long flags;
3206 
3207 	/* Use dev_err so it shows up in logs, etc. */
3208 	qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
3209 
3210 	qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
3211 
3212 	msix_entries = dd->cspec->num_msix_entries;
3213 
3214 	/* no interrupts till re-initted */
3215 	qib_7322_set_intr_state(dd, 0);
3216 
3217 	if (msix_entries) {
3218 		qib_7322_nomsix(dd);
3219 		/* can be up to 512 bytes, too big for stack */
3220 		msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries *
3221 			sizeof(u64), GFP_KERNEL);
3222 		if (!msix_vecsave)
3223 			qib_dev_err(dd, "No mem to save MSIx data\n");
3224 	} else
3225 		msix_vecsave = NULL;
3226 
3227 	/*
3228 	 * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
3229 	 * info that is set up by the BIOS, so we have to save and restore
3230 	 * it ourselves.   There is some risk something could change it,
3231 	 * after we save it, but since we have disabled the MSIx, it
3232 	 * shouldn't be touched...
3233 	 */
3234 	for (i = 0; i < msix_entries; i++) {
3235 		u64 vecaddr, vecdata;
3236 		vecaddr = qib_read_kreg64(dd, 2 * i +
3237 				  (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3238 		vecdata = qib_read_kreg64(dd, 1 + 2 * i +
3239 				  (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3240 		if (msix_vecsave) {
3241 			msix_vecsave[2 * i] = vecaddr;
3242 			/* save it without the masked bit set */
3243 			msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL;
3244 		}
3245 	}
3246 
3247 	dd->pport->cpspec->ibdeltainprog = 0;
3248 	dd->pport->cpspec->ibsymdelta = 0;
3249 	dd->pport->cpspec->iblnkerrdelta = 0;
3250 	dd->pport->cpspec->ibmalfdelta = 0;
3251 	dd->int_counter = 0; /* so we check interrupts work again */
3252 
3253 	/*
3254 	 * Keep chip from being accessed until we are ready.  Use
3255 	 * writeq() directly, to allow the write even though QIB_PRESENT
3256 	 * isnt' set.
3257 	 */
3258 	dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
3259 	dd->flags |= QIB_DOING_RESET;
3260 	val = dd->control | QLOGIC_IB_C_RESET;
3261 	writeq(val, &dd->kregbase[kr_control]);
3262 
3263 	for (i = 1; i <= 5; i++) {
3264 		/*
3265 		 * Allow MBIST, etc. to complete; longer on each retry.
3266 		 * We sometimes get machine checks from bus timeout if no
3267 		 * response, so for now, make it *really* long.
3268 		 */
3269 		msleep(1000 + (1 + i) * 3000);
3270 
3271 		qib_pcie_reenable(dd, cmdval, int_line, clinesz);
3272 
3273 		/*
3274 		 * Use readq directly, so we don't need to mark it as PRESENT
3275 		 * until we get a successful indication that all is well.
3276 		 */
3277 		val = readq(&dd->kregbase[kr_revision]);
3278 		if (val == dd->revision)
3279 			break;
3280 		if (i == 5) {
3281 			qib_dev_err(dd, "Failed to initialize after reset, "
3282 				    "unusable\n");
3283 			ret = 0;
3284 			goto  bail;
3285 		}
3286 	}
3287 
3288 	dd->flags |= QIB_PRESENT; /* it's back */
3289 
3290 	if (msix_entries) {
3291 		/* restore the MSIx vector address and data if saved above */
3292 		for (i = 0; i < msix_entries; i++) {
3293 			dd->cspec->msix_entries[i].entry = i;
3294 			if (!msix_vecsave || !msix_vecsave[2 * i])
3295 				continue;
3296 			qib_write_kreg(dd, 2 * i +
3297 				(QIB_7322_MsixTable_OFFS / sizeof(u64)),
3298 				msix_vecsave[2 * i]);
3299 			qib_write_kreg(dd, 1 + 2 * i +
3300 				(QIB_7322_MsixTable_OFFS / sizeof(u64)),
3301 				msix_vecsave[1 + 2 * i]);
3302 		}
3303 	}
3304 
3305 	/* initialize the remaining registers.  */
3306 	for (i = 0; i < dd->num_pports; ++i)
3307 		write_7322_init_portregs(&dd->pport[i]);
3308 	write_7322_initregs(dd);
3309 
3310 	if (qib_pcie_params(dd, dd->lbus_width,
3311 			    &dd->cspec->num_msix_entries,
3312 			    dd->cspec->msix_entries))
3313 		qib_dev_err(dd, "Reset failed to setup PCIe or interrupts; "
3314 				"continuing anyway\n");
3315 
3316 	qib_setup_7322_interrupt(dd, 1);
3317 
3318 	for (i = 0; i < dd->num_pports; ++i) {
3319 		struct qib_pportdata *ppd = &dd->pport[i];
3320 
3321 		spin_lock_irqsave(&ppd->lflags_lock, flags);
3322 		ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
3323 		ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
3324 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3325 	}
3326 
3327 bail:
3328 	dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */
3329 	kfree(msix_vecsave);
3330 	return ret;
3331 }
3332 
3333 /**
3334  * qib_7322_put_tid - write a TID to the chip
3335  * @dd: the qlogic_ib device
3336  * @tidptr: pointer to the expected TID (in chip) to update
3337  * @tidtype: 0 for eager, 1 for expected
3338  * @pa: physical address of in memory buffer; tidinvalid if freeing
3339  */
3340 static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
3341 			     u32 type, unsigned long pa)
3342 {
3343 	if (!(dd->flags & QIB_PRESENT))
3344 		return;
3345 	if (pa != dd->tidinvalid) {
3346 		u64 chippa = pa >> IBA7322_TID_PA_SHIFT;
3347 
3348 		/* paranoia checks */
3349 		if (pa != (chippa << IBA7322_TID_PA_SHIFT)) {
3350 			qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
3351 				    pa);
3352 			return;
3353 		}
3354 		if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
3355 			qib_dev_err(dd, "Physical page address 0x%lx "
3356 				"larger than supported\n", pa);
3357 			return;
3358 		}
3359 
3360 		if (type == RCVHQ_RCV_TYPE_EAGER)
3361 			chippa |= dd->tidtemplate;
3362 		else /* for now, always full 4KB page */
3363 			chippa |= IBA7322_TID_SZ_4K;
3364 		pa = chippa;
3365 	}
3366 	writeq(pa, tidptr);
3367 	mmiowb();
3368 }
3369 
3370 /**
3371  * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
3372  * @dd: the qlogic_ib device
3373  * @ctxt: the ctxt
3374  *
3375  * clear all TID entries for a ctxt, expected and eager.
3376  * Used from qib_close().
3377  */
3378 static void qib_7322_clear_tids(struct qib_devdata *dd,
3379 				struct qib_ctxtdata *rcd)
3380 {
3381 	u64 __iomem *tidbase;
3382 	unsigned long tidinv;
3383 	u32 ctxt;
3384 	int i;
3385 
3386 	if (!dd->kregbase || !rcd)
3387 		return;
3388 
3389 	ctxt = rcd->ctxt;
3390 
3391 	tidinv = dd->tidinvalid;
3392 	tidbase = (u64 __iomem *)
3393 		((char __iomem *) dd->kregbase +
3394 		 dd->rcvtidbase +
3395 		 ctxt * dd->rcvtidcnt * sizeof(*tidbase));
3396 
3397 	for (i = 0; i < dd->rcvtidcnt; i++)
3398 		qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
3399 				 tidinv);
3400 
3401 	tidbase = (u64 __iomem *)
3402 		((char __iomem *) dd->kregbase +
3403 		 dd->rcvegrbase +
3404 		 rcd->rcvegr_tid_base * sizeof(*tidbase));
3405 
3406 	for (i = 0; i < rcd->rcvegrcnt; i++)
3407 		qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
3408 				 tidinv);
3409 }
3410 
3411 /**
3412  * qib_7322_tidtemplate - setup constants for TID updates
3413  * @dd: the qlogic_ib device
3414  *
3415  * We setup stuff that we use a lot, to avoid calculating each time
3416  */
3417 static void qib_7322_tidtemplate(struct qib_devdata *dd)
3418 {
3419 	/*
3420 	 * For now, we always allocate 4KB buffers (at init) so we can
3421 	 * receive max size packets.  We may want a module parameter to
3422 	 * specify 2KB or 4KB and/or make it per port instead of per device
3423 	 * for those who want to reduce memory footprint.  Note that the
3424 	 * rcvhdrentsize size must be large enough to hold the largest
3425 	 * IB header (currently 96 bytes) that we expect to handle (plus of
3426 	 * course the 2 dwords of RHF).
3427 	 */
3428 	if (dd->rcvegrbufsize == 2048)
3429 		dd->tidtemplate = IBA7322_TID_SZ_2K;
3430 	else if (dd->rcvegrbufsize == 4096)
3431 		dd->tidtemplate = IBA7322_TID_SZ_4K;
3432 	dd->tidinvalid = 0;
3433 }
3434 
3435 /**
3436  * qib_init_7322_get_base_info - set chip-specific flags for user code
3437  * @rcd: the qlogic_ib ctxt
3438  * @kbase: qib_base_info pointer
3439  *
3440  * We set the PCIE flag because the lower bandwidth on PCIe vs
3441  * HyperTransport can affect some user packet algorithims.
3442  */
3443 
3444 static int qib_7322_get_base_info(struct qib_ctxtdata *rcd,
3445 				  struct qib_base_info *kinfo)
3446 {
3447 	kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP |
3448 		QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL |
3449 		QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA;
3450 	if (rcd->dd->cspec->r1)
3451 		kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK;
3452 	if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
3453 		kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
3454 
3455 	return 0;
3456 }
3457 
3458 static struct qib_message_header *
3459 qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
3460 {
3461 	u32 offset = qib_hdrget_offset(rhf_addr);
3462 
3463 	return (struct qib_message_header *)
3464 		(rhf_addr - dd->rhf_offset + offset);
3465 }
3466 
3467 /*
3468  * Configure number of contexts.
3469  */
3470 static void qib_7322_config_ctxts(struct qib_devdata *dd)
3471 {
3472 	unsigned long flags;
3473 	u32 nchipctxts;
3474 
3475 	nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
3476 	dd->cspec->numctxts = nchipctxts;
3477 	if (qib_n_krcv_queues > 1 && dd->num_pports) {
3478 		/*
3479 		 * Set the mask for which bits from the QPN are used
3480 		 * to select a context number.
3481 		 */
3482 		dd->qpn_mask = 0x3f;
3483 		dd->first_user_ctxt = NUM_IB_PORTS +
3484 			(qib_n_krcv_queues - 1) * dd->num_pports;
3485 		if (dd->first_user_ctxt > nchipctxts)
3486 			dd->first_user_ctxt = nchipctxts;
3487 		dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
3488 	} else {
3489 		dd->first_user_ctxt = NUM_IB_PORTS;
3490 		dd->n_krcv_queues = 1;
3491 	}
3492 
3493 	if (!qib_cfgctxts) {
3494 		int nctxts = dd->first_user_ctxt + num_online_cpus();
3495 
3496 		if (nctxts <= 6)
3497 			dd->ctxtcnt = 6;
3498 		else if (nctxts <= 10)
3499 			dd->ctxtcnt = 10;
3500 		else if (nctxts <= nchipctxts)
3501 			dd->ctxtcnt = nchipctxts;
3502 	} else if (qib_cfgctxts < dd->num_pports)
3503 		dd->ctxtcnt = dd->num_pports;
3504 	else if (qib_cfgctxts <= nchipctxts)
3505 		dd->ctxtcnt = qib_cfgctxts;
3506 	if (!dd->ctxtcnt) /* none of the above, set to max */
3507 		dd->ctxtcnt = nchipctxts;
3508 
3509 	/*
3510 	 * Chip can be configured for 6, 10, or 18 ctxts, and choice
3511 	 * affects number of eager TIDs per ctxt (1K, 2K, 4K).
3512 	 * Lock to be paranoid about later motion, etc.
3513 	 */
3514 	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
3515 	if (dd->ctxtcnt > 10)
3516 		dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg);
3517 	else if (dd->ctxtcnt > 6)
3518 		dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg);
3519 	/* else configure for default 6 receive ctxts */
3520 
3521 	/* The XRC opcode is 5. */
3522 	dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode);
3523 
3524 	/*
3525 	 * RcvCtrl *must* be written here so that the
3526 	 * chip understands how to change rcvegrcnt below.
3527 	 */
3528 	qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
3529 	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
3530 
3531 	/* kr_rcvegrcnt changes based on the number of contexts enabled */
3532 	dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
3533 	dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt,
3534 				dd->num_pports > 1 ? 1024U : 2048U);
3535 }
3536 
3537 static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
3538 {
3539 
3540 	int lsb, ret = 0;
3541 	u64 maskr; /* right-justified mask */
3542 
3543 	switch (which) {
3544 
3545 	case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
3546 		ret = ppd->link_width_enabled;
3547 		goto done;
3548 
3549 	case QIB_IB_CFG_LWID: /* Get currently active Link-width */
3550 		ret = ppd->link_width_active;
3551 		goto done;
3552 
3553 	case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
3554 		ret = ppd->link_speed_enabled;
3555 		goto done;
3556 
3557 	case QIB_IB_CFG_SPD: /* Get current Link spd */
3558 		ret = ppd->link_speed_active;
3559 		goto done;
3560 
3561 	case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
3562 		lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3563 		maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3564 		break;
3565 
3566 	case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
3567 		lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3568 		maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3569 		break;
3570 
3571 	case QIB_IB_CFG_LINKLATENCY:
3572 		ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
3573 			SYM_MASK(IBCStatusB_0, LinkRoundTripLatency);
3574 		goto done;
3575 
3576 	case QIB_IB_CFG_OP_VLS:
3577 		ret = ppd->vls_operational;
3578 		goto done;
3579 
3580 	case QIB_IB_CFG_VL_HIGH_CAP:
3581 		ret = 16;
3582 		goto done;
3583 
3584 	case QIB_IB_CFG_VL_LOW_CAP:
3585 		ret = 16;
3586 		goto done;
3587 
3588 	case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
3589 		ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
3590 				OverrunThreshold);
3591 		goto done;
3592 
3593 	case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
3594 		ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
3595 				PhyerrThreshold);
3596 		goto done;
3597 
3598 	case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
3599 		/* will only take effect when the link state changes */
3600 		ret = (ppd->cpspec->ibcctrl_a &
3601 		       SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ?
3602 			IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
3603 		goto done;
3604 
3605 	case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
3606 		lsb = IBA7322_IBC_HRTBT_LSB;
3607 		maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
3608 		break;
3609 
3610 	case QIB_IB_CFG_PMA_TICKS:
3611 		/*
3612 		 * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
3613 		 * Since the clock is always 250MHz, the value is 3, 1 or 0.
3614 		 */
3615 		if (ppd->link_speed_active == QIB_IB_QDR)
3616 			ret = 3;
3617 		else if (ppd->link_speed_active == QIB_IB_DDR)
3618 			ret = 1;
3619 		else
3620 			ret = 0;
3621 		goto done;
3622 
3623 	default:
3624 		ret = -EINVAL;
3625 		goto done;
3626 	}
3627 	ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
3628 done:
3629 	return ret;
3630 }
3631 
3632 /*
3633  * Below again cribbed liberally from older version. Do not lean
3634  * heavily on it.
3635  */
3636 #define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB
3637 #define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \
3638 	| (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16))
3639 
3640 static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
3641 {
3642 	struct qib_devdata *dd = ppd->dd;
3643 	u64 maskr; /* right-justified mask */
3644 	int lsb, ret = 0;
3645 	u16 lcmd, licmd;
3646 	unsigned long flags;
3647 
3648 	switch (which) {
3649 	case QIB_IB_CFG_LIDLMC:
3650 		/*
3651 		 * Set LID and LMC. Combined to avoid possible hazard
3652 		 * caller puts LMC in 16MSbits, DLID in 16LSbits of val
3653 		 */
3654 		lsb = IBA7322_IBC_DLIDLMC_SHIFT;
3655 		maskr = IBA7322_IBC_DLIDLMC_MASK;
3656 		/*
3657 		 * For header-checking, the SLID in the packet will
3658 		 * be masked with SendIBSLMCMask, and compared
3659 		 * with SendIBSLIDAssignMask. Make sure we do not
3660 		 * set any bits not covered by the mask, or we get
3661 		 * false-positives.
3662 		 */
3663 		qib_write_kreg_port(ppd, krp_sendslid,
3664 				    val & (val >> 16) & SendIBSLIDAssignMask);
3665 		qib_write_kreg_port(ppd, krp_sendslidmask,
3666 				    (val >> 16) & SendIBSLMCMask);
3667 		break;
3668 
3669 	case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
3670 		ppd->link_width_enabled = val;
3671 		/* convert IB value to chip register value */
3672 		if (val == IB_WIDTH_1X)
3673 			val = 0;
3674 		else if (val == IB_WIDTH_4X)
3675 			val = 1;
3676 		else
3677 			val = 3;
3678 		maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS);
3679 		lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS);
3680 		break;
3681 
3682 	case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
3683 		/*
3684 		 * As with width, only write the actual register if the
3685 		 * link is currently down, otherwise takes effect on next
3686 		 * link change.  Since setting is being explictly requested
3687 		 * (via MAD or sysfs), clear autoneg failure status if speed
3688 		 * autoneg is enabled.
3689 		 */
3690 		ppd->link_speed_enabled = val;
3691 		val <<= IBA7322_IBC_SPEED_LSB;
3692 		maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK |
3693 			IBA7322_IBC_MAX_SPEED_MASK;
3694 		if (val & (val - 1)) {
3695 			/* Muliple speeds enabled */
3696 			val |= IBA7322_IBC_IBTA_1_2_MASK |
3697 				IBA7322_IBC_MAX_SPEED_MASK;
3698 			spin_lock_irqsave(&ppd->lflags_lock, flags);
3699 			ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
3700 			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3701 		} else if (val & IBA7322_IBC_SPEED_QDR)
3702 			val |= IBA7322_IBC_IBTA_1_2_MASK;
3703 		/* IBTA 1.2 mode + min/max + speed bits are contiguous */
3704 		lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE);
3705 		break;
3706 
3707 	case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
3708 		lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3709 		maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3710 		break;
3711 
3712 	case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
3713 		lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3714 		maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3715 		break;
3716 
3717 	case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
3718 		maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
3719 				  OverrunThreshold);
3720 		if (maskr != val) {
3721 			ppd->cpspec->ibcctrl_a &=
3722 				~SYM_MASK(IBCCtrlA_0, OverrunThreshold);
3723 			ppd->cpspec->ibcctrl_a |= (u64) val <<
3724 				SYM_LSB(IBCCtrlA_0, OverrunThreshold);
3725 			qib_write_kreg_port(ppd, krp_ibcctrl_a,
3726 					    ppd->cpspec->ibcctrl_a);
3727 			qib_write_kreg(dd, kr_scratch, 0ULL);
3728 		}
3729 		goto bail;
3730 
3731 	case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
3732 		maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
3733 				  PhyerrThreshold);
3734 		if (maskr != val) {
3735 			ppd->cpspec->ibcctrl_a &=
3736 				~SYM_MASK(IBCCtrlA_0, PhyerrThreshold);
3737 			ppd->cpspec->ibcctrl_a |= (u64) val <<
3738 				SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
3739 			qib_write_kreg_port(ppd, krp_ibcctrl_a,
3740 					    ppd->cpspec->ibcctrl_a);
3741 			qib_write_kreg(dd, kr_scratch, 0ULL);
3742 		}
3743 		goto bail;
3744 
3745 	case QIB_IB_CFG_PKEYS: /* update pkeys */
3746 		maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
3747 			((u64) ppd->pkeys[2] << 32) |
3748 			((u64) ppd->pkeys[3] << 48);
3749 		qib_write_kreg_port(ppd, krp_partitionkey, maskr);
3750 		goto bail;
3751 
3752 	case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
3753 		/* will only take effect when the link state changes */
3754 		if (val == IB_LINKINITCMD_POLL)
3755 			ppd->cpspec->ibcctrl_a &=
3756 				~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
3757 		else /* SLEEP */
3758 			ppd->cpspec->ibcctrl_a |=
3759 				SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
3760 		qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
3761 		qib_write_kreg(dd, kr_scratch, 0ULL);
3762 		goto bail;
3763 
3764 	case QIB_IB_CFG_MTU: /* update the MTU in IBC */
3765 		/*
3766 		 * Update our housekeeping variables, and set IBC max
3767 		 * size, same as init code; max IBC is max we allow in
3768 		 * buffer, less the qword pbc, plus 1 for ICRC, in dwords
3769 		 * Set even if it's unchanged, print debug message only
3770 		 * on changes.
3771 		 */
3772 		val = (ppd->ibmaxlen >> 2) + 1;
3773 		ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
3774 		ppd->cpspec->ibcctrl_a |= (u64)val <<
3775 			SYM_LSB(IBCCtrlA_0, MaxPktLen);
3776 		qib_write_kreg_port(ppd, krp_ibcctrl_a,
3777 				    ppd->cpspec->ibcctrl_a);
3778 		qib_write_kreg(dd, kr_scratch, 0ULL);
3779 		goto bail;
3780 
3781 	case QIB_IB_CFG_LSTATE: /* set the IB link state */
3782 		switch (val & 0xffff0000) {
3783 		case IB_LINKCMD_DOWN:
3784 			lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
3785 			ppd->cpspec->ibmalfusesnap = 1;
3786 			ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
3787 				crp_errlink);
3788 			if (!ppd->cpspec->ibdeltainprog &&
3789 			    qib_compat_ddr_negotiate) {
3790 				ppd->cpspec->ibdeltainprog = 1;
3791 				ppd->cpspec->ibsymsnap =
3792 					read_7322_creg32_port(ppd,
3793 							      crp_ibsymbolerr);
3794 				ppd->cpspec->iblnkerrsnap =
3795 					read_7322_creg32_port(ppd,
3796 						      crp_iblinkerrrecov);
3797 			}
3798 			break;
3799 
3800 		case IB_LINKCMD_ARMED:
3801 			lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
3802 			if (ppd->cpspec->ibmalfusesnap) {
3803 				ppd->cpspec->ibmalfusesnap = 0;
3804 				ppd->cpspec->ibmalfdelta +=
3805 					read_7322_creg32_port(ppd,
3806 							      crp_errlink) -
3807 					ppd->cpspec->ibmalfsnap;
3808 			}
3809 			break;
3810 
3811 		case IB_LINKCMD_ACTIVE:
3812 			lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
3813 			break;
3814 
3815 		default:
3816 			ret = -EINVAL;
3817 			qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
3818 			goto bail;
3819 		}
3820 		switch (val & 0xffff) {
3821 		case IB_LINKINITCMD_NOP:
3822 			licmd = 0;
3823 			break;
3824 
3825 		case IB_LINKINITCMD_POLL:
3826 			licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
3827 			break;
3828 
3829 		case IB_LINKINITCMD_SLEEP:
3830 			licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
3831 			break;
3832 
3833 		case IB_LINKINITCMD_DISABLE:
3834 			licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
3835 			ppd->cpspec->chase_end = 0;
3836 			/*
3837 			 * stop state chase counter and timer, if running.
3838 			 * wait forpending timer, but don't clear .data (ppd)!
3839 			 */
3840 			if (ppd->cpspec->chase_timer.expires) {
3841 				del_timer_sync(&ppd->cpspec->chase_timer);
3842 				ppd->cpspec->chase_timer.expires = 0;
3843 			}
3844 			break;
3845 
3846 		default:
3847 			ret = -EINVAL;
3848 			qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
3849 				    val & 0xffff);
3850 			goto bail;
3851 		}
3852 		qib_set_ib_7322_lstate(ppd, lcmd, licmd);
3853 		goto bail;
3854 
3855 	case QIB_IB_CFG_OP_VLS:
3856 		if (ppd->vls_operational != val) {
3857 			ppd->vls_operational = val;
3858 			set_vls(ppd);
3859 		}
3860 		goto bail;
3861 
3862 	case QIB_IB_CFG_VL_HIGH_LIMIT:
3863 		qib_write_kreg_port(ppd, krp_highprio_limit, val);
3864 		goto bail;
3865 
3866 	case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
3867 		if (val > 3) {
3868 			ret = -EINVAL;
3869 			goto bail;
3870 		}
3871 		lsb = IBA7322_IBC_HRTBT_LSB;
3872 		maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
3873 		break;
3874 
3875 	case QIB_IB_CFG_PORT:
3876 		/* val is the port number of the switch we are connected to. */
3877 		if (ppd->dd->cspec->r1) {
3878 			cancel_delayed_work(&ppd->cpspec->ipg_work);
3879 			ppd->cpspec->ipg_tries = 0;
3880 		}
3881 		goto bail;
3882 
3883 	default:
3884 		ret = -EINVAL;
3885 		goto bail;
3886 	}
3887 	ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
3888 	ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
3889 	qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
3890 	qib_write_kreg(dd, kr_scratch, 0);
3891 bail:
3892 	return ret;
3893 }
3894 
3895 static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
3896 {
3897 	int ret = 0;
3898 	u64 val, ctrlb;
3899 
3900 	/* only IBC loopback, may add serdes and xgxs loopbacks later */
3901 	if (!strncmp(what, "ibc", 3)) {
3902 		ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
3903 						       Loopback);
3904 		val = 0; /* disable heart beat, so link will come up */
3905 		qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
3906 			 ppd->dd->unit, ppd->port);
3907 	} else if (!strncmp(what, "off", 3)) {
3908 		ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
3909 							Loopback);
3910 		/* enable heart beat again */
3911 		val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
3912 		qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback "
3913 			    "(normal)\n", ppd->dd->unit, ppd->port);
3914 	} else
3915 		ret = -EINVAL;
3916 	if (!ret) {
3917 		qib_write_kreg_port(ppd, krp_ibcctrl_a,
3918 				    ppd->cpspec->ibcctrl_a);
3919 		ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
3920 					     << IBA7322_IBC_HRTBT_LSB);
3921 		ppd->cpspec->ibcctrl_b = ctrlb | val;
3922 		qib_write_kreg_port(ppd, krp_ibcctrl_b,
3923 				    ppd->cpspec->ibcctrl_b);
3924 		qib_write_kreg(ppd->dd, kr_scratch, 0);
3925 	}
3926 	return ret;
3927 }
3928 
3929 static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
3930 			   struct ib_vl_weight_elem *vl)
3931 {
3932 	unsigned i;
3933 
3934 	for (i = 0; i < 16; i++, regno++, vl++) {
3935 		u32 val = qib_read_kreg_port(ppd, regno);
3936 
3937 		vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) &
3938 			SYM_RMASK(LowPriority0_0, VirtualLane);
3939 		vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) &
3940 			SYM_RMASK(LowPriority0_0, Weight);
3941 	}
3942 }
3943 
3944 static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
3945 			   struct ib_vl_weight_elem *vl)
3946 {
3947 	unsigned i;
3948 
3949 	for (i = 0; i < 16; i++, regno++, vl++) {
3950 		u64 val;
3951 
3952 		val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) <<
3953 			SYM_LSB(LowPriority0_0, VirtualLane)) |
3954 		      ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) <<
3955 			SYM_LSB(LowPriority0_0, Weight));
3956 		qib_write_kreg_port(ppd, regno, val);
3957 	}
3958 	if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
3959 		struct qib_devdata *dd = ppd->dd;
3960 		unsigned long flags;
3961 
3962 		spin_lock_irqsave(&dd->sendctrl_lock, flags);
3963 		ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
3964 		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
3965 		qib_write_kreg(dd, kr_scratch, 0);
3966 		spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
3967 	}
3968 }
3969 
3970 static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
3971 {
3972 	switch (which) {
3973 	case QIB_IB_TBL_VL_HIGH_ARB:
3974 		get_vl_weights(ppd, krp_highprio_0, t);
3975 		break;
3976 
3977 	case QIB_IB_TBL_VL_LOW_ARB:
3978 		get_vl_weights(ppd, krp_lowprio_0, t);
3979 		break;
3980 
3981 	default:
3982 		return -EINVAL;
3983 	}
3984 	return 0;
3985 }
3986 
3987 static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
3988 {
3989 	switch (which) {
3990 	case QIB_IB_TBL_VL_HIGH_ARB:
3991 		set_vl_weights(ppd, krp_highprio_0, t);
3992 		break;
3993 
3994 	case QIB_IB_TBL_VL_LOW_ARB:
3995 		set_vl_weights(ppd, krp_lowprio_0, t);
3996 		break;
3997 
3998 	default:
3999 		return -EINVAL;
4000 	}
4001 	return 0;
4002 }
4003 
4004 static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
4005 				    u32 updegr, u32 egrhd)
4006 {
4007 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4008 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4009 	if (updegr)
4010 		qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
4011 }
4012 
4013 static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
4014 {
4015 	u32 head, tail;
4016 
4017 	head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
4018 	if (rcd->rcvhdrtail_kvaddr)
4019 		tail = qib_get_rcvhdrtail(rcd);
4020 	else
4021 		tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
4022 	return head == tail;
4023 }
4024 
4025 #define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \
4026 	QIB_RCVCTRL_CTXT_DIS | \
4027 	QIB_RCVCTRL_TIDFLOW_ENB | \
4028 	QIB_RCVCTRL_TIDFLOW_DIS | \
4029 	QIB_RCVCTRL_TAILUPD_ENB | \
4030 	QIB_RCVCTRL_TAILUPD_DIS | \
4031 	QIB_RCVCTRL_INTRAVAIL_ENB | \
4032 	QIB_RCVCTRL_INTRAVAIL_DIS | \
4033 	QIB_RCVCTRL_BP_ENB | \
4034 	QIB_RCVCTRL_BP_DIS)
4035 
4036 #define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \
4037 	QIB_RCVCTRL_CTXT_DIS | \
4038 	QIB_RCVCTRL_PKEY_DIS | \
4039 	QIB_RCVCTRL_PKEY_ENB)
4040 
4041 /*
4042  * Modify the RCVCTRL register in chip-specific way. This
4043  * is a function because bit positions and (future) register
4044  * location is chip-specifc, but the needed operations are
4045  * generic. <op> is a bit-mask because we often want to
4046  * do multiple modifications.
4047  */
4048 static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
4049 			     int ctxt)
4050 {
4051 	struct qib_devdata *dd = ppd->dd;
4052 	struct qib_ctxtdata *rcd;
4053 	u64 mask, val;
4054 	unsigned long flags;
4055 
4056 	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
4057 
4058 	if (op & QIB_RCVCTRL_TIDFLOW_ENB)
4059 		dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable);
4060 	if (op & QIB_RCVCTRL_TIDFLOW_DIS)
4061 		dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable);
4062 	if (op & QIB_RCVCTRL_TAILUPD_ENB)
4063 		dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4064 	if (op & QIB_RCVCTRL_TAILUPD_DIS)
4065 		dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd);
4066 	if (op & QIB_RCVCTRL_PKEY_ENB)
4067 		ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4068 	if (op & QIB_RCVCTRL_PKEY_DIS)
4069 		ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4070 	if (ctxt < 0) {
4071 		mask = (1ULL << dd->ctxtcnt) - 1;
4072 		rcd = NULL;
4073 	} else {
4074 		mask = (1ULL << ctxt);
4075 		rcd = dd->rcd[ctxt];
4076 	}
4077 	if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) {
4078 		ppd->p_rcvctrl |=
4079 			(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4080 		if (!(dd->flags & QIB_NODMA_RTAIL)) {
4081 			op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */
4082 			dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4083 		}
4084 		/* Write these registers before the context is enabled. */
4085 		qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt,
4086 				    rcd->rcvhdrqtailaddr_phys);
4087 		qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
4088 				    rcd->rcvhdrq_phys);
4089 		rcd->seq_cnt = 1;
4090 	}
4091 	if (op & QIB_RCVCTRL_CTXT_DIS)
4092 		ppd->p_rcvctrl &=
4093 			~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4094 	if (op & QIB_RCVCTRL_BP_ENB)
4095 		dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull);
4096 	if (op & QIB_RCVCTRL_BP_DIS)
4097 		dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull));
4098 	if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
4099 		dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail));
4100 	if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
4101 		dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail));
4102 	/*
4103 	 * Decide which registers to write depending on the ops enabled.
4104 	 * Special case is "flush" (no bits set at all)
4105 	 * which needs to write both.
4106 	 */
4107 	if (op == 0 || (op & RCVCTRL_COMMON_MODS))
4108 		qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
4109 	if (op == 0 || (op & RCVCTRL_PORT_MODS))
4110 		qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
4111 	if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) {
4112 		/*
4113 		 * Init the context registers also; if we were
4114 		 * disabled, tail and head should both be zero
4115 		 * already from the enable, but since we don't
4116 		 * know, we have to do it explictly.
4117 		 */
4118 		val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
4119 		qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
4120 
4121 		/* be sure enabling write seen; hd/tl should be 0 */
4122 		(void) qib_read_kreg32(dd, kr_scratch);
4123 		val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
4124 		dd->rcd[ctxt]->head = val;
4125 		/* If kctxt, interrupt on next receive. */
4126 		if (ctxt < dd->first_user_ctxt)
4127 			val |= dd->rhdrhead_intr_off;
4128 		qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4129 	} else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) &&
4130 		dd->rcd[ctxt] && dd->rhdrhead_intr_off) {
4131 		/* arm rcv interrupt */
4132 		val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off;
4133 		qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4134 	}
4135 	if (op & QIB_RCVCTRL_CTXT_DIS) {
4136 		unsigned f;
4137 
4138 		/* Now that the context is disabled, clear these registers. */
4139 		if (ctxt >= 0) {
4140 			qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0);
4141 			qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0);
4142 			for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4143 				qib_write_ureg(dd, ur_rcvflowtable + f,
4144 					       TIDFLOW_ERRBITS, ctxt);
4145 		} else {
4146 			unsigned i;
4147 
4148 			for (i = 0; i < dd->cfgctxts; i++) {
4149 				qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr,
4150 						    i, 0);
4151 				qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0);
4152 				for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4153 					qib_write_ureg(dd, ur_rcvflowtable + f,
4154 						       TIDFLOW_ERRBITS, i);
4155 			}
4156 		}
4157 	}
4158 	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
4159 }
4160 
4161 /*
4162  * Modify the SENDCTRL register in chip-specific way. This
4163  * is a function where there are multiple such registers with
4164  * slightly different layouts.
4165  * The chip doesn't allow back-to-back sendctrl writes, so write
4166  * the scratch register after writing sendctrl.
4167  *
4168  * Which register is written depends on the operation.
4169  * Most operate on the common register, while
4170  * SEND_ENB and SEND_DIS operate on the per-port ones.
4171  * SEND_ENB is included in common because it can change SPCL_TRIG
4172  */
4173 #define SENDCTRL_COMMON_MODS (\
4174 	QIB_SENDCTRL_CLEAR | \
4175 	QIB_SENDCTRL_AVAIL_DIS | \
4176 	QIB_SENDCTRL_AVAIL_ENB | \
4177 	QIB_SENDCTRL_AVAIL_BLIP | \
4178 	QIB_SENDCTRL_DISARM | \
4179 	QIB_SENDCTRL_DISARM_ALL | \
4180 	QIB_SENDCTRL_SEND_ENB)
4181 
4182 #define SENDCTRL_PORT_MODS (\
4183 	QIB_SENDCTRL_CLEAR | \
4184 	QIB_SENDCTRL_SEND_ENB | \
4185 	QIB_SENDCTRL_SEND_DIS | \
4186 	QIB_SENDCTRL_FLUSH)
4187 
4188 static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
4189 {
4190 	struct qib_devdata *dd = ppd->dd;
4191 	u64 tmp_dd_sendctrl;
4192 	unsigned long flags;
4193 
4194 	spin_lock_irqsave(&dd->sendctrl_lock, flags);
4195 
4196 	/* First the dd ones that are "sticky", saved in shadow */
4197 	if (op & QIB_SENDCTRL_CLEAR)
4198 		dd->sendctrl = 0;
4199 	if (op & QIB_SENDCTRL_AVAIL_DIS)
4200 		dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4201 	else if (op & QIB_SENDCTRL_AVAIL_ENB) {
4202 		dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
4203 		if (dd->flags & QIB_USE_SPCL_TRIG)
4204 			dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn);
4205 	}
4206 
4207 	/* Then the ppd ones that are "sticky", saved in shadow */
4208 	if (op & QIB_SENDCTRL_SEND_DIS)
4209 		ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
4210 	else if (op & QIB_SENDCTRL_SEND_ENB)
4211 		ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
4212 
4213 	if (op & QIB_SENDCTRL_DISARM_ALL) {
4214 		u32 i, last;
4215 
4216 		tmp_dd_sendctrl = dd->sendctrl;
4217 		last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
4218 		/*
4219 		 * Disarm any buffers that are not yet launched,
4220 		 * disabling updates until done.
4221 		 */
4222 		tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4223 		for (i = 0; i < last; i++) {
4224 			qib_write_kreg(dd, kr_sendctrl,
4225 				       tmp_dd_sendctrl |
4226 				       SYM_MASK(SendCtrl, Disarm) | i);
4227 			qib_write_kreg(dd, kr_scratch, 0);
4228 		}
4229 	}
4230 
4231 	if (op & QIB_SENDCTRL_FLUSH) {
4232 		u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
4233 
4234 		/*
4235 		 * Now drain all the fifos.  The Abort bit should never be
4236 		 * needed, so for now, at least, we don't use it.
4237 		 */
4238 		tmp_ppd_sendctrl |=
4239 			SYM_MASK(SendCtrl_0, TxeDrainRmFifo) |
4240 			SYM_MASK(SendCtrl_0, TxeDrainLaFifo) |
4241 			SYM_MASK(SendCtrl_0, TxeBypassIbc);
4242 		qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
4243 		qib_write_kreg(dd, kr_scratch, 0);
4244 	}
4245 
4246 	tmp_dd_sendctrl = dd->sendctrl;
4247 
4248 	if (op & QIB_SENDCTRL_DISARM)
4249 		tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
4250 			((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) <<
4251 			 SYM_LSB(SendCtrl, DisarmSendBuf));
4252 	if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
4253 	    (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
4254 		tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4255 
4256 	if (op == 0 || (op & SENDCTRL_COMMON_MODS)) {
4257 		qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
4258 		qib_write_kreg(dd, kr_scratch, 0);
4259 	}
4260 
4261 	if (op == 0 || (op & SENDCTRL_PORT_MODS)) {
4262 		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4263 		qib_write_kreg(dd, kr_scratch, 0);
4264 	}
4265 
4266 	if (op & QIB_SENDCTRL_AVAIL_BLIP) {
4267 		qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
4268 		qib_write_kreg(dd, kr_scratch, 0);
4269 	}
4270 
4271 	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4272 
4273 	if (op & QIB_SENDCTRL_FLUSH) {
4274 		u32 v;
4275 		/*
4276 		 * ensure writes have hit chip, then do a few
4277 		 * more reads, to allow DMA of pioavail registers
4278 		 * to occur, so in-memory copy is in sync with
4279 		 * the chip.  Not always safe to sleep.
4280 		 */
4281 		v = qib_read_kreg32(dd, kr_scratch);
4282 		qib_write_kreg(dd, kr_scratch, v);
4283 		v = qib_read_kreg32(dd, kr_scratch);
4284 		qib_write_kreg(dd, kr_scratch, v);
4285 		qib_read_kreg32(dd, kr_scratch);
4286 	}
4287 }
4288 
4289 #define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */
4290 #define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */
4291 #define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */
4292 
4293 /**
4294  * qib_portcntr_7322 - read a per-port chip counter
4295  * @ppd: the qlogic_ib pport
4296  * @creg: the counter to read (not a chip offset)
4297  */
4298 static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
4299 {
4300 	struct qib_devdata *dd = ppd->dd;
4301 	u64 ret = 0ULL;
4302 	u16 creg;
4303 	/* 0xffff for unimplemented or synthesized counters */
4304 	static const u32 xlator[] = {
4305 		[QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG,
4306 		[QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG,
4307 		[QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount,
4308 		[QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount,
4309 		[QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount,
4310 		[QIBPORTCNTR_SENDSTALL] = crp_sendstall,
4311 		[QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG,
4312 		[QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount,
4313 		[QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount,
4314 		[QIBPORTCNTR_RCVEBP] = crp_rcvebp,
4315 		[QIBPORTCNTR_RCVOVFL] = crp_rcvovfl,
4316 		[QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG,
4317 		[QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed  for 7322 */
4318 		[QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr,
4319 		[QIBPORTCNTR_RXVLERR] = crp_rxvlerr,
4320 		[QIBPORTCNTR_ERRICRC] = crp_erricrc,
4321 		[QIBPORTCNTR_ERRVCRC] = crp_errvcrc,
4322 		[QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc,
4323 		[QIBPORTCNTR_BADFORMAT] = crp_badformat,
4324 		[QIBPORTCNTR_ERR_RLEN] = crp_err_rlen,
4325 		[QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr,
4326 		[QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen,
4327 		[QIBPORTCNTR_UNSUPVL] = crp_txunsupvl,
4328 		[QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl,
4329 		[QIBPORTCNTR_ERRLINK] = crp_errlink,
4330 		[QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown,
4331 		[QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov,
4332 		[QIBPORTCNTR_LLI] = crp_locallinkintegrityerr,
4333 		[QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt,
4334 		[QIBPORTCNTR_ERRPKEY] = crp_errpkey,
4335 		/*
4336 		 * the next 3 aren't really counters, but were implemented
4337 		 * as counters in older chips, so still get accessed as
4338 		 * though they were counters from this code.
4339 		 */
4340 		[QIBPORTCNTR_PSINTERVAL] = krp_psinterval,
4341 		[QIBPORTCNTR_PSSTART] = krp_psstart,
4342 		[QIBPORTCNTR_PSSTAT] = krp_psstat,
4343 		/* pseudo-counter, summed for all ports */
4344 		[QIBPORTCNTR_KHDROVFL] = 0xffff,
4345 	};
4346 
4347 	if (reg >= ARRAY_SIZE(xlator)) {
4348 		qib_devinfo(ppd->dd->pcidev,
4349 			 "Unimplemented portcounter %u\n", reg);
4350 		goto done;
4351 	}
4352 	creg = xlator[reg] & _PORT_CNTR_IDXMASK;
4353 
4354 	/* handle non-counters and special cases first */
4355 	if (reg == QIBPORTCNTR_KHDROVFL) {
4356 		int i;
4357 
4358 		/* sum over all kernel contexts (skip if mini_init) */
4359 		for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) {
4360 			struct qib_ctxtdata *rcd = dd->rcd[i];
4361 
4362 			if (!rcd || rcd->ppd != ppd)
4363 				continue;
4364 			ret += read_7322_creg32(dd, cr_base_egrovfl + i);
4365 		}
4366 		goto done;
4367 	} else if (reg == QIBPORTCNTR_RXDROPPKT) {
4368 		/*
4369 		 * Used as part of the synthesis of port_rcv_errors
4370 		 * in the verbs code for IBTA counters.  Not needed for 7322,
4371 		 * because all the errors are already counted by other cntrs.
4372 		 */
4373 		goto done;
4374 	} else if (reg == QIBPORTCNTR_PSINTERVAL ||
4375 		   reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) {
4376 		/* were counters in older chips, now per-port kernel regs */
4377 		ret = qib_read_kreg_port(ppd, creg);
4378 		goto done;
4379 	}
4380 
4381 	/*
4382 	 * Only fast increment counters are 64 bits; use 32 bit reads to
4383 	 * avoid two independent reads when on Opteron.
4384 	 */
4385 	if (xlator[reg] & _PORT_64BIT_FLAG)
4386 		ret = read_7322_creg_port(ppd, creg);
4387 	else
4388 		ret = read_7322_creg32_port(ppd, creg);
4389 	if (creg == crp_ibsymbolerr) {
4390 		if (ppd->cpspec->ibdeltainprog)
4391 			ret -= ret - ppd->cpspec->ibsymsnap;
4392 		ret -= ppd->cpspec->ibsymdelta;
4393 	} else if (creg == crp_iblinkerrrecov) {
4394 		if (ppd->cpspec->ibdeltainprog)
4395 			ret -= ret - ppd->cpspec->iblnkerrsnap;
4396 		ret -= ppd->cpspec->iblnkerrdelta;
4397 	} else if (creg == crp_errlink)
4398 		ret -= ppd->cpspec->ibmalfdelta;
4399 	else if (creg == crp_iblinkdown)
4400 		ret += ppd->cpspec->iblnkdowndelta;
4401 done:
4402 	return ret;
4403 }
4404 
4405 /*
4406  * Device counter names (not port-specific), one line per stat,
4407  * single string.  Used by utilities like ipathstats to print the stats
4408  * in a way which works for different versions of drivers, without changing
4409  * the utility.  Names need to be 12 chars or less (w/o newline), for proper
4410  * display by utility.
4411  * Non-error counters are first.
4412  * Start of "error" conters is indicated by a leading "E " on the first
4413  * "error" counter, and doesn't count in label length.
4414  * The EgrOvfl list needs to be last so we truncate them at the configured
4415  * context count for the device.
4416  * cntr7322indices contains the corresponding register indices.
4417  */
4418 static const char cntr7322names[] =
4419 	"Interrupts\n"
4420 	"HostBusStall\n"
4421 	"E RxTIDFull\n"
4422 	"RxTIDInvalid\n"
4423 	"RxTIDFloDrop\n" /* 7322 only */
4424 	"Ctxt0EgrOvfl\n"
4425 	"Ctxt1EgrOvfl\n"
4426 	"Ctxt2EgrOvfl\n"
4427 	"Ctxt3EgrOvfl\n"
4428 	"Ctxt4EgrOvfl\n"
4429 	"Ctxt5EgrOvfl\n"
4430 	"Ctxt6EgrOvfl\n"
4431 	"Ctxt7EgrOvfl\n"
4432 	"Ctxt8EgrOvfl\n"
4433 	"Ctxt9EgrOvfl\n"
4434 	"Ctx10EgrOvfl\n"
4435 	"Ctx11EgrOvfl\n"
4436 	"Ctx12EgrOvfl\n"
4437 	"Ctx13EgrOvfl\n"
4438 	"Ctx14EgrOvfl\n"
4439 	"Ctx15EgrOvfl\n"
4440 	"Ctx16EgrOvfl\n"
4441 	"Ctx17EgrOvfl\n"
4442 	;
4443 
4444 static const u32 cntr7322indices[] = {
4445 	cr_lbint | _PORT_64BIT_FLAG,
4446 	cr_lbstall | _PORT_64BIT_FLAG,
4447 	cr_tidfull,
4448 	cr_tidinvalid,
4449 	cr_rxtidflowdrop,
4450 	cr_base_egrovfl + 0,
4451 	cr_base_egrovfl + 1,
4452 	cr_base_egrovfl + 2,
4453 	cr_base_egrovfl + 3,
4454 	cr_base_egrovfl + 4,
4455 	cr_base_egrovfl + 5,
4456 	cr_base_egrovfl + 6,
4457 	cr_base_egrovfl + 7,
4458 	cr_base_egrovfl + 8,
4459 	cr_base_egrovfl + 9,
4460 	cr_base_egrovfl + 10,
4461 	cr_base_egrovfl + 11,
4462 	cr_base_egrovfl + 12,
4463 	cr_base_egrovfl + 13,
4464 	cr_base_egrovfl + 14,
4465 	cr_base_egrovfl + 15,
4466 	cr_base_egrovfl + 16,
4467 	cr_base_egrovfl + 17,
4468 };
4469 
4470 /*
4471  * same as cntr7322names and cntr7322indices, but for port-specific counters.
4472  * portcntr7322indices is somewhat complicated by some registers needing
4473  * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
4474  */
4475 static const char portcntr7322names[] =
4476 	"TxPkt\n"
4477 	"TxFlowPkt\n"
4478 	"TxWords\n"
4479 	"RxPkt\n"
4480 	"RxFlowPkt\n"
4481 	"RxWords\n"
4482 	"TxFlowStall\n"
4483 	"TxDmaDesc\n"  /* 7220 and 7322-only */
4484 	"E RxDlidFltr\n"  /* 7220 and 7322-only */
4485 	"IBStatusChng\n"
4486 	"IBLinkDown\n"
4487 	"IBLnkRecov\n"
4488 	"IBRxLinkErr\n"
4489 	"IBSymbolErr\n"
4490 	"RxLLIErr\n"
4491 	"RxBadFormat\n"
4492 	"RxBadLen\n"
4493 	"RxBufOvrfl\n"
4494 	"RxEBP\n"
4495 	"RxFlowCtlErr\n"
4496 	"RxICRCerr\n"
4497 	"RxLPCRCerr\n"
4498 	"RxVCRCerr\n"
4499 	"RxInvalLen\n"
4500 	"RxInvalPKey\n"
4501 	"RxPktDropped\n"
4502 	"TxBadLength\n"
4503 	"TxDropped\n"
4504 	"TxInvalLen\n"
4505 	"TxUnderrun\n"
4506 	"TxUnsupVL\n"
4507 	"RxLclPhyErr\n" /* 7220 and 7322-only from here down */
4508 	"RxVL15Drop\n"
4509 	"RxVlErr\n"
4510 	"XcessBufOvfl\n"
4511 	"RxQPBadCtxt\n" /* 7322-only from here down */
4512 	"TXBadHeader\n"
4513 	;
4514 
4515 static const u32 portcntr7322indices[] = {
4516 	QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
4517 	crp_pktsendflow,
4518 	QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
4519 	QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
4520 	crp_pktrcvflowctrl,
4521 	QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
4522 	QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
4523 	crp_txsdmadesc | _PORT_64BIT_FLAG,
4524 	crp_rxdlidfltr,
4525 	crp_ibstatuschange,
4526 	QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
4527 	QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
4528 	QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
4529 	QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
4530 	QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
4531 	QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
4532 	QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
4533 	QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
4534 	QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
4535 	crp_rcvflowctrlviol,
4536 	QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
4537 	QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
4538 	QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
4539 	QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
4540 	QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
4541 	QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
4542 	crp_txminmaxlenerr,
4543 	crp_txdroppedpkt,
4544 	crp_txlenerr,
4545 	crp_txunderrun,
4546 	crp_txunsupvl,
4547 	QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
4548 	QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
4549 	QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
4550 	QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
4551 	crp_rxqpinvalidctxt,
4552 	crp_txhdrerr,
4553 };
4554 
4555 /* do all the setup to make the counter reads efficient later */
4556 static void init_7322_cntrnames(struct qib_devdata *dd)
4557 {
4558 	int i, j = 0;
4559 	char *s;
4560 
4561 	for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts;
4562 	     i++) {
4563 		/* we always have at least one counter before the egrovfl */
4564 		if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
4565 			j = 1;
4566 		s = strchr(s + 1, '\n');
4567 		if (s && j)
4568 			j++;
4569 	}
4570 	dd->cspec->ncntrs = i;
4571 	if (!s)
4572 		/* full list; size is without terminating null */
4573 		dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
4574 	else
4575 		dd->cspec->cntrnamelen = 1 + s - cntr7322names;
4576 	dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
4577 		* sizeof(u64), GFP_KERNEL);
4578 	if (!dd->cspec->cntrs)
4579 		qib_dev_err(dd, "Failed allocation for counters\n");
4580 
4581 	for (i = 0, s = (char *)portcntr7322names; s; i++)
4582 		s = strchr(s + 1, '\n');
4583 	dd->cspec->nportcntrs = i - 1;
4584 	dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
4585 	for (i = 0; i < dd->num_pports; ++i) {
4586 		dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs
4587 			* sizeof(u64), GFP_KERNEL);
4588 		if (!dd->pport[i].cpspec->portcntrs)
4589 			qib_dev_err(dd, "Failed allocation for"
4590 				    " portcounters\n");
4591 	}
4592 }
4593 
4594 static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
4595 			      u64 **cntrp)
4596 {
4597 	u32 ret;
4598 
4599 	if (namep) {
4600 		ret = dd->cspec->cntrnamelen;
4601 		if (pos >= ret)
4602 			ret = 0; /* final read after getting everything */
4603 		else
4604 			*namep = (char *) cntr7322names;
4605 	} else {
4606 		u64 *cntr = dd->cspec->cntrs;
4607 		int i;
4608 
4609 		ret = dd->cspec->ncntrs * sizeof(u64);
4610 		if (!cntr || pos >= ret) {
4611 			/* everything read, or couldn't get memory */
4612 			ret = 0;
4613 			goto done;
4614 		}
4615 		*cntrp = cntr;
4616 		for (i = 0; i < dd->cspec->ncntrs; i++)
4617 			if (cntr7322indices[i] & _PORT_64BIT_FLAG)
4618 				*cntr++ = read_7322_creg(dd,
4619 							 cntr7322indices[i] &
4620 							 _PORT_CNTR_IDXMASK);
4621 			else
4622 				*cntr++ = read_7322_creg32(dd,
4623 							   cntr7322indices[i]);
4624 	}
4625 done:
4626 	return ret;
4627 }
4628 
4629 static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
4630 				  char **namep, u64 **cntrp)
4631 {
4632 	u32 ret;
4633 
4634 	if (namep) {
4635 		ret = dd->cspec->portcntrnamelen;
4636 		if (pos >= ret)
4637 			ret = 0; /* final read after getting everything */
4638 		else
4639 			*namep = (char *)portcntr7322names;
4640 	} else {
4641 		struct qib_pportdata *ppd = &dd->pport[port];
4642 		u64 *cntr = ppd->cpspec->portcntrs;
4643 		int i;
4644 
4645 		ret = dd->cspec->nportcntrs * sizeof(u64);
4646 		if (!cntr || pos >= ret) {
4647 			/* everything read, or couldn't get memory */
4648 			ret = 0;
4649 			goto done;
4650 		}
4651 		*cntrp = cntr;
4652 		for (i = 0; i < dd->cspec->nportcntrs; i++) {
4653 			if (portcntr7322indices[i] & _PORT_VIRT_FLAG)
4654 				*cntr++ = qib_portcntr_7322(ppd,
4655 					portcntr7322indices[i] &
4656 					_PORT_CNTR_IDXMASK);
4657 			else if (portcntr7322indices[i] & _PORT_64BIT_FLAG)
4658 				*cntr++ = read_7322_creg_port(ppd,
4659 					   portcntr7322indices[i] &
4660 					    _PORT_CNTR_IDXMASK);
4661 			else
4662 				*cntr++ = read_7322_creg32_port(ppd,
4663 					   portcntr7322indices[i]);
4664 		}
4665 	}
4666 done:
4667 	return ret;
4668 }
4669 
4670 /**
4671  * qib_get_7322_faststats - get word counters from chip before they overflow
4672  * @opaque - contains a pointer to the qlogic_ib device qib_devdata
4673  *
4674  * VESTIGIAL IBA7322 has no "small fast counters", so the only
4675  * real purpose of this function is to maintain the notion of
4676  * "active time", which in turn is only logged into the eeprom,
4677  * which we don;t have, yet, for 7322-based boards.
4678  *
4679  * called from add_timer
4680  */
4681 static void qib_get_7322_faststats(unsigned long opaque)
4682 {
4683 	struct qib_devdata *dd = (struct qib_devdata *) opaque;
4684 	struct qib_pportdata *ppd;
4685 	unsigned long flags;
4686 	u64 traffic_wds;
4687 	int pidx;
4688 
4689 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
4690 		ppd = dd->pport + pidx;
4691 
4692 		/*
4693 		 * If port isn't enabled or not operational ports, or
4694 		 * diags is running (can cause memory diags to fail)
4695 		 * skip this port this time.
4696 		 */
4697 		if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
4698 		    || dd->diag_client)
4699 			continue;
4700 
4701 		/*
4702 		 * Maintain an activity timer, based on traffic
4703 		 * exceeding a threshold, so we need to check the word-counts
4704 		 * even if they are 64-bit.
4705 		 */
4706 		traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
4707 			qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
4708 		spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
4709 		traffic_wds -= ppd->dd->traffic_wds;
4710 		ppd->dd->traffic_wds += traffic_wds;
4711 		if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
4712 			atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time);
4713 		spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
4714 		if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
4715 						QIB_IB_QDR) &&
4716 		    (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
4717 				    QIBL_LINKACTIVE)) &&
4718 		    ppd->cpspec->qdr_dfe_time &&
4719 		    time_after64(get_jiffies_64(), ppd->cpspec->qdr_dfe_time)) {
4720 			ppd->cpspec->qdr_dfe_on = 0;
4721 
4722 			qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
4723 					    ppd->dd->cspec->r1 ?
4724 					    QDR_STATIC_ADAPT_INIT_R1 :
4725 					    QDR_STATIC_ADAPT_INIT);
4726 			force_h1(ppd);
4727 		}
4728 	}
4729 	mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
4730 }
4731 
4732 /*
4733  * If we were using MSIx, try to fallback to INTx.
4734  */
4735 static int qib_7322_intr_fallback(struct qib_devdata *dd)
4736 {
4737 	if (!dd->cspec->num_msix_entries)
4738 		return 0; /* already using INTx */
4739 
4740 	qib_devinfo(dd->pcidev, "MSIx interrupt not detected,"
4741 		 " trying INTx interrupts\n");
4742 	qib_7322_nomsix(dd);
4743 	qib_enable_intx(dd->pcidev);
4744 	qib_setup_7322_interrupt(dd, 0);
4745 	return 1;
4746 }
4747 
4748 /*
4749  * Reset the XGXS (between serdes and IBC).  Slightly less intrusive
4750  * than resetting the IBC or external link state, and useful in some
4751  * cases to cause some retraining.  To do this right, we reset IBC
4752  * as well, then return to previous state (which may be still in reset)
4753  * NOTE: some callers of this "know" this writes the current value
4754  * of cpspec->ibcctrl_a as part of it's operation, so if that changes,
4755  * check all callers.
4756  */
4757 static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
4758 {
4759 	u64 val;
4760 	struct qib_devdata *dd = ppd->dd;
4761 	const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) |
4762 		SYM_MASK(IBPCSConfig_0, xcv_treset) |
4763 		SYM_MASK(IBPCSConfig_0, tx_rx_reset);
4764 
4765 	val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
4766 	qib_write_kreg(dd, kr_hwerrmask,
4767 		       dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop));
4768 	qib_write_kreg_port(ppd, krp_ibcctrl_a,
4769 			    ppd->cpspec->ibcctrl_a &
4770 			    ~SYM_MASK(IBCCtrlA_0, IBLinkEn));
4771 
4772 	qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
4773 	qib_read_kreg32(dd, kr_scratch);
4774 	qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
4775 	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
4776 	qib_write_kreg(dd, kr_scratch, 0ULL);
4777 	qib_write_kreg(dd, kr_hwerrclear,
4778 		       SYM_MASK(HwErrClear, statusValidNoEopClear));
4779 	qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
4780 }
4781 
4782 /*
4783  * This code for non-IBTA-compliant IB speed negotiation is only known to
4784  * work for the SDR to DDR transition, and only between an HCA and a switch
4785  * with recent firmware.  It is based on observed heuristics, rather than
4786  * actual knowledge of the non-compliant speed negotiation.
4787  * It has a number of hard-coded fields, since the hope is to rewrite this
4788  * when a spec is available on how the negoation is intended to work.
4789  */
4790 static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
4791 				 u32 dcnt, u32 *data)
4792 {
4793 	int i;
4794 	u64 pbc;
4795 	u32 __iomem *piobuf;
4796 	u32 pnum, control, len;
4797 	struct qib_devdata *dd = ppd->dd;
4798 
4799 	i = 0;
4800 	len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
4801 	control = qib_7322_setpbc_control(ppd, len, 0, 15);
4802 	pbc = ((u64) control << 32) | len;
4803 	while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
4804 		if (i++ > 15)
4805 			return;
4806 		udelay(2);
4807 	}
4808 	/* disable header check on this packet, since it can't be valid */
4809 	dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL);
4810 	writeq(pbc, piobuf);
4811 	qib_flush_wc();
4812 	qib_pio_copy(piobuf + 2, hdr, 7);
4813 	qib_pio_copy(piobuf + 9, data, dcnt);
4814 	if (dd->flags & QIB_USE_SPCL_TRIG) {
4815 		u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
4816 
4817 		qib_flush_wc();
4818 		__raw_writel(0xaebecede, piobuf + spcl_off);
4819 	}
4820 	qib_flush_wc();
4821 	qib_sendbuf_done(dd, pnum);
4822 	/* and re-enable hdr check */
4823 	dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
4824 }
4825 
4826 /*
4827  * _start packet gets sent twice at start, _done gets sent twice at end
4828  */
4829 static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
4830 {
4831 	struct qib_devdata *dd = ppd->dd;
4832 	static u32 swapped;
4833 	u32 dw, i, hcnt, dcnt, *data;
4834 	static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
4835 	static u32 madpayload_start[0x40] = {
4836 		0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
4837 		0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
4838 		0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
4839 		};
4840 	static u32 madpayload_done[0x40] = {
4841 		0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
4842 		0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
4843 		0x40000001, 0x1388, 0x15e, /* rest 0's */
4844 		};
4845 
4846 	dcnt = ARRAY_SIZE(madpayload_start);
4847 	hcnt = ARRAY_SIZE(hdr);
4848 	if (!swapped) {
4849 		/* for maintainability, do it at runtime */
4850 		for (i = 0; i < hcnt; i++) {
4851 			dw = (__force u32) cpu_to_be32(hdr[i]);
4852 			hdr[i] = dw;
4853 		}
4854 		for (i = 0; i < dcnt; i++) {
4855 			dw = (__force u32) cpu_to_be32(madpayload_start[i]);
4856 			madpayload_start[i] = dw;
4857 			dw = (__force u32) cpu_to_be32(madpayload_done[i]);
4858 			madpayload_done[i] = dw;
4859 		}
4860 		swapped = 1;
4861 	}
4862 
4863 	data = which ? madpayload_done : madpayload_start;
4864 
4865 	autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
4866 	qib_read_kreg64(dd, kr_scratch);
4867 	udelay(2);
4868 	autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
4869 	qib_read_kreg64(dd, kr_scratch);
4870 	udelay(2);
4871 }
4872 
4873 /*
4874  * Do the absolute minimum to cause an IB speed change, and make it
4875  * ready, but don't actually trigger the change.   The caller will
4876  * do that when ready (if link is in Polling training state, it will
4877  * happen immediately, otherwise when link next goes down)
4878  *
4879  * This routine should only be used as part of the DDR autonegotation
4880  * code for devices that are not compliant with IB 1.2 (or code that
4881  * fixes things up for same).
4882  *
4883  * When link has gone down, and autoneg enabled, or autoneg has
4884  * failed and we give up until next time we set both speeds, and
4885  * then we want IBTA enabled as well as "use max enabled speed.
4886  */
4887 static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
4888 {
4889 	u64 newctrlb;
4890 	newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
4891 				    IBA7322_IBC_IBTA_1_2_MASK |
4892 				    IBA7322_IBC_MAX_SPEED_MASK);
4893 
4894 	if (speed & (speed - 1)) /* multiple speeds */
4895 		newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) |
4896 				    IBA7322_IBC_IBTA_1_2_MASK |
4897 				    IBA7322_IBC_MAX_SPEED_MASK;
4898 	else
4899 		newctrlb |= speed == QIB_IB_QDR ?
4900 			IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK :
4901 			((speed == QIB_IB_DDR ?
4902 			  IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR));
4903 
4904 	if (newctrlb == ppd->cpspec->ibcctrl_b)
4905 		return;
4906 
4907 	ppd->cpspec->ibcctrl_b = newctrlb;
4908 	qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
4909 	qib_write_kreg(ppd->dd, kr_scratch, 0);
4910 }
4911 
4912 /*
4913  * This routine is only used when we are not talking to another
4914  * IB 1.2-compliant device that we think can do DDR.
4915  * (This includes all existing switch chips as of Oct 2007.)
4916  * 1.2-compliant devices go directly to DDR prior to reaching INIT
4917  */
4918 static void try_7322_autoneg(struct qib_pportdata *ppd)
4919 {
4920 	unsigned long flags;
4921 
4922 	spin_lock_irqsave(&ppd->lflags_lock, flags);
4923 	ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
4924 	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
4925 	qib_autoneg_7322_send(ppd, 0);
4926 	set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
4927 	qib_7322_mini_pcs_reset(ppd);
4928 	/* 2 msec is minimum length of a poll cycle */
4929 	schedule_delayed_work(&ppd->cpspec->autoneg_work,
4930 			      msecs_to_jiffies(2));
4931 }
4932 
4933 /*
4934  * Handle the empirically determined mechanism for auto-negotiation
4935  * of DDR speed with switches.
4936  */
4937 static void autoneg_7322_work(struct work_struct *work)
4938 {
4939 	struct qib_pportdata *ppd;
4940 	struct qib_devdata *dd;
4941 	u64 startms;
4942 	u32 i;
4943 	unsigned long flags;
4944 
4945 	ppd = container_of(work, struct qib_chippport_specific,
4946 			    autoneg_work.work)->ppd;
4947 	dd = ppd->dd;
4948 
4949 	startms = jiffies_to_msecs(jiffies);
4950 
4951 	/*
4952 	 * Busy wait for this first part, it should be at most a
4953 	 * few hundred usec, since we scheduled ourselves for 2msec.
4954 	 */
4955 	for (i = 0; i < 25; i++) {
4956 		if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
4957 		     == IB_7322_LT_STATE_POLLQUIET) {
4958 			qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
4959 			break;
4960 		}
4961 		udelay(100);
4962 	}
4963 
4964 	if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
4965 		goto done; /* we got there early or told to stop */
4966 
4967 	/* we expect this to timeout */
4968 	if (wait_event_timeout(ppd->cpspec->autoneg_wait,
4969 			       !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
4970 			       msecs_to_jiffies(90)))
4971 		goto done;
4972 	qib_7322_mini_pcs_reset(ppd);
4973 
4974 	/* we expect this to timeout */
4975 	if (wait_event_timeout(ppd->cpspec->autoneg_wait,
4976 			       !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
4977 			       msecs_to_jiffies(1700)))
4978 		goto done;
4979 	qib_7322_mini_pcs_reset(ppd);
4980 
4981 	set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
4982 
4983 	/*
4984 	 * Wait up to 250 msec for link to train and get to INIT at DDR;
4985 	 * this should terminate early.
4986 	 */
4987 	wait_event_timeout(ppd->cpspec->autoneg_wait,
4988 		!(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
4989 		msecs_to_jiffies(250));
4990 done:
4991 	if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
4992 		spin_lock_irqsave(&ppd->lflags_lock, flags);
4993 		ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
4994 		if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
4995 			ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
4996 			ppd->cpspec->autoneg_tries = 0;
4997 		}
4998 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
4999 		set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5000 	}
5001 }
5002 
5003 /*
5004  * This routine is used to request IPG set in the QLogic switch.
5005  * Only called if r1.
5006  */
5007 static void try_7322_ipg(struct qib_pportdata *ppd)
5008 {
5009 	struct qib_ibport *ibp = &ppd->ibport_data;
5010 	struct ib_mad_send_buf *send_buf;
5011 	struct ib_mad_agent *agent;
5012 	struct ib_smp *smp;
5013 	unsigned delay;
5014 	int ret;
5015 
5016 	agent = ibp->send_agent;
5017 	if (!agent)
5018 		goto retry;
5019 
5020 	send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
5021 				      IB_MGMT_MAD_DATA, GFP_ATOMIC);
5022 	if (IS_ERR(send_buf))
5023 		goto retry;
5024 
5025 	if (!ibp->smi_ah) {
5026 		struct ib_ah_attr attr;
5027 		struct ib_ah *ah;
5028 
5029 		memset(&attr, 0, sizeof attr);
5030 		attr.dlid = be16_to_cpu(IB_LID_PERMISSIVE);
5031 		attr.port_num = ppd->port;
5032 		ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr);
5033 		if (IS_ERR(ah))
5034 			ret = -EINVAL;
5035 		else {
5036 			send_buf->ah = ah;
5037 			ibp->smi_ah = to_iah(ah);
5038 			ret = 0;
5039 		}
5040 	} else {
5041 		send_buf->ah = &ibp->smi_ah->ibah;
5042 		ret = 0;
5043 	}
5044 
5045 	smp = send_buf->mad;
5046 	smp->base_version = IB_MGMT_BASE_VERSION;
5047 	smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
5048 	smp->class_version = 1;
5049 	smp->method = IB_MGMT_METHOD_SEND;
5050 	smp->hop_cnt = 1;
5051 	smp->attr_id = QIB_VENDOR_IPG;
5052 	smp->attr_mod = 0;
5053 
5054 	if (!ret)
5055 		ret = ib_post_send_mad(send_buf, NULL);
5056 	if (ret)
5057 		ib_free_send_mad(send_buf);
5058 retry:
5059 	delay = 2 << ppd->cpspec->ipg_tries;
5060 	schedule_delayed_work(&ppd->cpspec->ipg_work, msecs_to_jiffies(delay));
5061 }
5062 
5063 /*
5064  * Timeout handler for setting IPG.
5065  * Only called if r1.
5066  */
5067 static void ipg_7322_work(struct work_struct *work)
5068 {
5069 	struct qib_pportdata *ppd;
5070 
5071 	ppd = container_of(work, struct qib_chippport_specific,
5072 			   ipg_work.work)->ppd;
5073 	if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
5074 	    && ++ppd->cpspec->ipg_tries <= 10)
5075 		try_7322_ipg(ppd);
5076 }
5077 
5078 static u32 qib_7322_iblink_state(u64 ibcs)
5079 {
5080 	u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState);
5081 
5082 	switch (state) {
5083 	case IB_7322_L_STATE_INIT:
5084 		state = IB_PORT_INIT;
5085 		break;
5086 	case IB_7322_L_STATE_ARM:
5087 		state = IB_PORT_ARMED;
5088 		break;
5089 	case IB_7322_L_STATE_ACTIVE:
5090 		/* fall through */
5091 	case IB_7322_L_STATE_ACT_DEFER:
5092 		state = IB_PORT_ACTIVE;
5093 		break;
5094 	default: /* fall through */
5095 	case IB_7322_L_STATE_DOWN:
5096 		state = IB_PORT_DOWN;
5097 		break;
5098 	}
5099 	return state;
5100 }
5101 
5102 /* returns the IBTA port state, rather than the IBC link training state */
5103 static u8 qib_7322_phys_portstate(u64 ibcs)
5104 {
5105 	u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState);
5106 	return qib_7322_physportstate[state];
5107 }
5108 
5109 static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
5110 {
5111 	int ret = 0, symadj = 0;
5112 	unsigned long flags;
5113 	int mult;
5114 
5115 	spin_lock_irqsave(&ppd->lflags_lock, flags);
5116 	ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
5117 	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5118 
5119 	/* Update our picture of width and speed from chip */
5120 	if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) {
5121 		ppd->link_speed_active = QIB_IB_QDR;
5122 		mult = 4;
5123 	} else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) {
5124 		ppd->link_speed_active = QIB_IB_DDR;
5125 		mult = 2;
5126 	} else {
5127 		ppd->link_speed_active = QIB_IB_SDR;
5128 		mult = 1;
5129 	}
5130 	if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) {
5131 		ppd->link_width_active = IB_WIDTH_4X;
5132 		mult *= 4;
5133 	} else
5134 		ppd->link_width_active = IB_WIDTH_1X;
5135 	ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
5136 
5137 	if (!ibup) {
5138 		u64 clr;
5139 
5140 		/* Link went down. */
5141 		/* do IPG MAD again after linkdown, even if last time failed */
5142 		ppd->cpspec->ipg_tries = 0;
5143 		clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
5144 			(SYM_MASK(IBCStatusB_0, heartbeat_timed_out) |
5145 			 SYM_MASK(IBCStatusB_0, heartbeat_crosstalk));
5146 		if (clr)
5147 			qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
5148 		if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5149 				     QIBL_IB_AUTONEG_INPROG)))
5150 			set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5151 		if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5152 			/* unlock the Tx settings, speed may change */
5153 			qib_write_kreg_port(ppd, krp_tx_deemph_override,
5154 				SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
5155 				reset_tx_deemphasis_override));
5156 			qib_cancel_sends(ppd);
5157 			/* on link down, ensure sane pcs state */
5158 			qib_7322_mini_pcs_reset(ppd);
5159 			spin_lock_irqsave(&ppd->sdma_lock, flags);
5160 			if (__qib_sdma_running(ppd))
5161 				__qib_sdma_process_event(ppd,
5162 					qib_sdma_event_e70_go_idle);
5163 			spin_unlock_irqrestore(&ppd->sdma_lock, flags);
5164 		}
5165 		clr = read_7322_creg32_port(ppd, crp_iblinkdown);
5166 		if (clr == ppd->cpspec->iblnkdownsnap)
5167 			ppd->cpspec->iblnkdowndelta++;
5168 	} else {
5169 		if (qib_compat_ddr_negotiate &&
5170 		    !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5171 				     QIBL_IB_AUTONEG_INPROG)) &&
5172 		    ppd->link_speed_active == QIB_IB_SDR &&
5173 		    (ppd->link_speed_enabled & QIB_IB_DDR)
5174 		    && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
5175 			/* we are SDR, and auto-negotiation enabled */
5176 			++ppd->cpspec->autoneg_tries;
5177 			if (!ppd->cpspec->ibdeltainprog) {
5178 				ppd->cpspec->ibdeltainprog = 1;
5179 				ppd->cpspec->ibsymdelta +=
5180 					read_7322_creg32_port(ppd,
5181 						crp_ibsymbolerr) -
5182 						ppd->cpspec->ibsymsnap;
5183 				ppd->cpspec->iblnkerrdelta +=
5184 					read_7322_creg32_port(ppd,
5185 						crp_iblinkerrrecov) -
5186 						ppd->cpspec->iblnkerrsnap;
5187 			}
5188 			try_7322_autoneg(ppd);
5189 			ret = 1; /* no other IB status change processing */
5190 		} else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5191 			   ppd->link_speed_active == QIB_IB_SDR) {
5192 			qib_autoneg_7322_send(ppd, 1);
5193 			set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5194 			qib_7322_mini_pcs_reset(ppd);
5195 			udelay(2);
5196 			ret = 1; /* no other IB status change processing */
5197 		} else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5198 			   (ppd->link_speed_active & QIB_IB_DDR)) {
5199 			spin_lock_irqsave(&ppd->lflags_lock, flags);
5200 			ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
5201 					 QIBL_IB_AUTONEG_FAILED);
5202 			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5203 			ppd->cpspec->autoneg_tries = 0;
5204 			/* re-enable SDR, for next link down */
5205 			set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5206 			wake_up(&ppd->cpspec->autoneg_wait);
5207 			symadj = 1;
5208 		} else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
5209 			/*
5210 			 * Clear autoneg failure flag, and do setup
5211 			 * so we'll try next time link goes down and
5212 			 * back to INIT (possibly connected to a
5213 			 * different device).
5214 			 */
5215 			spin_lock_irqsave(&ppd->lflags_lock, flags);
5216 			ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
5217 			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5218 			ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
5219 			symadj = 1;
5220 		}
5221 		if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5222 			symadj = 1;
5223 			if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
5224 				try_7322_ipg(ppd);
5225 			if (!ppd->cpspec->recovery_init)
5226 				setup_7322_link_recovery(ppd, 0);
5227 			ppd->cpspec->qdr_dfe_time = jiffies +
5228 				msecs_to_jiffies(QDR_DFE_DISABLE_DELAY);
5229 		}
5230 		ppd->cpspec->ibmalfusesnap = 0;
5231 		ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
5232 			crp_errlink);
5233 	}
5234 	if (symadj) {
5235 		ppd->cpspec->iblnkdownsnap =
5236 			read_7322_creg32_port(ppd, crp_iblinkdown);
5237 		if (ppd->cpspec->ibdeltainprog) {
5238 			ppd->cpspec->ibdeltainprog = 0;
5239 			ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
5240 				crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
5241 			ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
5242 				crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
5243 		}
5244 	} else if (!ibup && qib_compat_ddr_negotiate &&
5245 		   !ppd->cpspec->ibdeltainprog &&
5246 			!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5247 		ppd->cpspec->ibdeltainprog = 1;
5248 		ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
5249 			crp_ibsymbolerr);
5250 		ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
5251 			crp_iblinkerrrecov);
5252 	}
5253 
5254 	if (!ret)
5255 		qib_setup_7322_setextled(ppd, ibup);
5256 	return ret;
5257 }
5258 
5259 /*
5260  * Does read/modify/write to appropriate registers to
5261  * set output and direction bits selected by mask.
5262  * these are in their canonical postions (e.g. lsb of
5263  * dir will end up in D48 of extctrl on existing chips).
5264  * returns contents of GP Inputs.
5265  */
5266 static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
5267 {
5268 	u64 read_val, new_out;
5269 	unsigned long flags;
5270 
5271 	if (mask) {
5272 		/* some bits being written, lock access to GPIO */
5273 		dir &= mask;
5274 		out &= mask;
5275 		spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
5276 		dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
5277 		dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
5278 		new_out = (dd->cspec->gpio_out & ~mask) | out;
5279 
5280 		qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
5281 		qib_write_kreg(dd, kr_gpio_out, new_out);
5282 		dd->cspec->gpio_out = new_out;
5283 		spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
5284 	}
5285 	/*
5286 	 * It is unlikely that a read at this time would get valid
5287 	 * data on a pin whose direction line was set in the same
5288 	 * call to this function. We include the read here because
5289 	 * that allows us to potentially combine a change on one pin with
5290 	 * a read on another, and because the old code did something like
5291 	 * this.
5292 	 */
5293 	read_val = qib_read_kreg64(dd, kr_extstatus);
5294 	return SYM_FIELD(read_val, EXTStatus, GPIOIn);
5295 }
5296 
5297 /* Enable writes to config EEPROM, if possible. Returns previous state */
5298 static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen)
5299 {
5300 	int prev_wen;
5301 	u32 mask;
5302 
5303 	mask = 1 << QIB_EEPROM_WEN_NUM;
5304 	prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM;
5305 	gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
5306 
5307 	return prev_wen & 1;
5308 }
5309 
5310 /*
5311  * Read fundamental info we need to use the chip.  These are
5312  * the registers that describe chip capabilities, and are
5313  * saved in shadow registers.
5314  */
5315 static void get_7322_chip_params(struct qib_devdata *dd)
5316 {
5317 	u64 val;
5318 	u32 piobufs;
5319 	int mtu;
5320 
5321 	dd->palign = qib_read_kreg32(dd, kr_pagealign);
5322 
5323 	dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
5324 
5325 	dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
5326 	dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
5327 	dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
5328 	dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
5329 	dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
5330 
5331 	val = qib_read_kreg64(dd, kr_sendpiobufcnt);
5332 	dd->piobcnt2k = val & ~0U;
5333 	dd->piobcnt4k = val >> 32;
5334 	val = qib_read_kreg64(dd, kr_sendpiosize);
5335 	dd->piosize2k = val & ~0U;
5336 	dd->piosize4k = val >> 32;
5337 
5338 	mtu = ib_mtu_enum_to_int(qib_ibmtu);
5339 	if (mtu == -1)
5340 		mtu = QIB_DEFAULT_MTU;
5341 	dd->pport[0].ibmtu = (u32)mtu;
5342 	dd->pport[1].ibmtu = (u32)mtu;
5343 
5344 	/* these may be adjusted in init_chip_wc_pat() */
5345 	dd->pio2kbase = (u32 __iomem *)
5346 		((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
5347 	dd->pio4kbase = (u32 __iomem *)
5348 		((char __iomem *) dd->kregbase +
5349 		 (dd->piobufbase >> 32));
5350 	/*
5351 	 * 4K buffers take 2 pages; we use roundup just to be
5352 	 * paranoid; we calculate it once here, rather than on
5353 	 * ever buf allocate
5354 	 */
5355 	dd->align4k = ALIGN(dd->piosize4k, dd->palign);
5356 
5357 	piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS;
5358 
5359 	dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
5360 		(sizeof(u64) * BITS_PER_BYTE / 2);
5361 }
5362 
5363 /*
5364  * The chip base addresses in cspec and cpspec have to be set
5365  * after possible init_chip_wc_pat(), rather than in
5366  * get_7322_chip_params(), so split out as separate function
5367  */
5368 static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
5369 {
5370 	u32 cregbase;
5371 	cregbase = qib_read_kreg32(dd, kr_counterregbase);
5372 
5373 	dd->cspec->cregbase = (u64 __iomem *)(cregbase +
5374 		(char __iomem *)dd->kregbase);
5375 
5376 	dd->egrtidbase = (u64 __iomem *)
5377 		((char __iomem *) dd->kregbase + dd->rcvegrbase);
5378 
5379 	/* port registers are defined as relative to base of chip */
5380 	dd->pport[0].cpspec->kpregbase =
5381 		(u64 __iomem *)((char __iomem *)dd->kregbase);
5382 	dd->pport[1].cpspec->kpregbase =
5383 		(u64 __iomem *)(dd->palign +
5384 		(char __iomem *)dd->kregbase);
5385 	dd->pport[0].cpspec->cpregbase =
5386 		(u64 __iomem *)(qib_read_kreg_port(&dd->pport[0],
5387 		kr_counterregbase) + (char __iomem *)dd->kregbase);
5388 	dd->pport[1].cpspec->cpregbase =
5389 		(u64 __iomem *)(qib_read_kreg_port(&dd->pport[1],
5390 		kr_counterregbase) + (char __iomem *)dd->kregbase);
5391 }
5392 
5393 /*
5394  * This is a fairly special-purpose observer, so we only support
5395  * the port-specific parts of SendCtrl
5396  */
5397 
5398 #define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) |		\
5399 			   SYM_MASK(SendCtrl_0, SDmaEnable) |		\
5400 			   SYM_MASK(SendCtrl_0, SDmaIntEnable) |	\
5401 			   SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \
5402 			   SYM_MASK(SendCtrl_0, SDmaHalt) |		\
5403 			   SYM_MASK(SendCtrl_0, IBVLArbiterEn) |	\
5404 			   SYM_MASK(SendCtrl_0, ForceCreditUpToDate))
5405 
5406 static int sendctrl_hook(struct qib_devdata *dd,
5407 			 const struct diag_observer *op, u32 offs,
5408 			 u64 *data, u64 mask, int only_32)
5409 {
5410 	unsigned long flags;
5411 	unsigned idx;
5412 	unsigned pidx;
5413 	struct qib_pportdata *ppd = NULL;
5414 	u64 local_data, all_bits;
5415 
5416 	/*
5417 	 * The fixed correspondence between Physical ports and pports is
5418 	 * severed. We need to hunt for the ppd that corresponds
5419 	 * to the offset we got. And we have to do that without admitting
5420 	 * we know the stride, apparently.
5421 	 */
5422 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5423 		u64 __iomem *psptr;
5424 		u32 psoffs;
5425 
5426 		ppd = dd->pport + pidx;
5427 		if (!ppd->cpspec->kpregbase)
5428 			continue;
5429 
5430 		psptr = ppd->cpspec->kpregbase + krp_sendctrl;
5431 		psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr);
5432 		if (psoffs == offs)
5433 			break;
5434 	}
5435 
5436 	/* If pport is not being managed by driver, just avoid shadows. */
5437 	if (pidx >= dd->num_pports)
5438 		ppd = NULL;
5439 
5440 	/* In any case, "idx" is flat index in kreg space */
5441 	idx = offs / sizeof(u64);
5442 
5443 	all_bits = ~0ULL;
5444 	if (only_32)
5445 		all_bits >>= 32;
5446 
5447 	spin_lock_irqsave(&dd->sendctrl_lock, flags);
5448 	if (!ppd || (mask & all_bits) != all_bits) {
5449 		/*
5450 		 * At least some mask bits are zero, so we need
5451 		 * to read. The judgement call is whether from
5452 		 * reg or shadow. First-cut: read reg, and complain
5453 		 * if any bits which should be shadowed are different
5454 		 * from their shadowed value.
5455 		 */
5456 		if (only_32)
5457 			local_data = (u64)qib_read_kreg32(dd, idx);
5458 		else
5459 			local_data = qib_read_kreg64(dd, idx);
5460 		*data = (local_data & ~mask) | (*data & mask);
5461 	}
5462 	if (mask) {
5463 		/*
5464 		 * At least some mask bits are one, so we need
5465 		 * to write, but only shadow some bits.
5466 		 */
5467 		u64 sval, tval; /* Shadowed, transient */
5468 
5469 		/*
5470 		 * New shadow val is bits we don't want to touch,
5471 		 * ORed with bits we do, that are intended for shadow.
5472 		 */
5473 		if (ppd) {
5474 			sval = ppd->p_sendctrl & ~mask;
5475 			sval |= *data & SENDCTRL_SHADOWED & mask;
5476 			ppd->p_sendctrl = sval;
5477 		} else
5478 			sval = *data & SENDCTRL_SHADOWED & mask;
5479 		tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
5480 		qib_write_kreg(dd, idx, tval);
5481 		qib_write_kreg(dd, kr_scratch, 0Ull);
5482 	}
5483 	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
5484 	return only_32 ? 4 : 8;
5485 }
5486 
5487 static const struct diag_observer sendctrl_0_observer = {
5488 	sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64),
5489 	KREG_IDX(SendCtrl_0) * sizeof(u64)
5490 };
5491 
5492 static const struct diag_observer sendctrl_1_observer = {
5493 	sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64),
5494 	KREG_IDX(SendCtrl_1) * sizeof(u64)
5495 };
5496 
5497 static ushort sdma_fetch_prio = 8;
5498 module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO);
5499 MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority");
5500 
5501 /* Besides logging QSFP events, we set appropriate TxDDS values */
5502 static void init_txdds_table(struct qib_pportdata *ppd, int override);
5503 
5504 static void qsfp_7322_event(struct work_struct *work)
5505 {
5506 	struct qib_qsfp_data *qd;
5507 	struct qib_pportdata *ppd;
5508 	u64 pwrup;
5509 	int ret;
5510 	u32 le2;
5511 
5512 	qd = container_of(work, struct qib_qsfp_data, work);
5513 	ppd = qd->ppd;
5514 	pwrup = qd->t_insert + msecs_to_jiffies(QSFP_PWR_LAG_MSEC);
5515 
5516 	/*
5517 	 * Some QSFP's not only do not respond until the full power-up
5518 	 * time, but may behave badly if we try. So hold off responding
5519 	 * to insertion.
5520 	 */
5521 	while (1) {
5522 		u64 now = get_jiffies_64();
5523 		if (time_after64(now, pwrup))
5524 			break;
5525 		msleep(1);
5526 	}
5527 	ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
5528 	/*
5529 	 * Need to change LE2 back to defaults if we couldn't
5530 	 * read the cable type (to handle cable swaps), so do this
5531 	 * even on failure to read cable information.  We don't
5532 	 * get here for QME, so IS_QME check not needed here.
5533 	 */
5534 	le2 = (!ret && qd->cache.atten[1] >= qib_long_atten &&
5535 	       !ppd->dd->cspec->r1 && QSFP_IS_CU(qd->cache.tech)) ?
5536 		LE2_5m : LE2_DEFAULT;
5537 	ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
5538 	init_txdds_table(ppd, 0);
5539 }
5540 
5541 /*
5542  * There is little we can do but complain to the user if QSFP
5543  * initialization fails.
5544  */
5545 static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
5546 {
5547 	unsigned long flags;
5548 	struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
5549 	struct qib_devdata *dd = ppd->dd;
5550 	u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N;
5551 
5552 	mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
5553 	qd->ppd = ppd;
5554 	qib_qsfp_init(qd, qsfp_7322_event);
5555 	spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
5556 	dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert));
5557 	dd->cspec->gpio_mask |= mod_prs_bit;
5558 	qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
5559 	qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
5560 	spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
5561 }
5562 
5563 /*
5564  * called at device initialization time, and also if the txselect
5565  * module parameter is changed.  This is used for cables that don't
5566  * have valid QSFP EEPROMs (not present, or attenuation is zero).
5567  * We initialize to the default, then if there is a specific
5568  * unit,port match, we use that (and set it immediately, for the
5569  * current speed, if the link is at INIT or better).
5570  * String format is "default# unit#,port#=# ... u,p=#", separators must
5571  * be a SPACE character.  A newline terminates.  The u,p=# tuples may
5572  * optionally have "u,p=#,#", where the final # is the H1 value
5573  * The last specific match is used (actually, all are used, but last
5574  * one is the one that winds up set); if none at all, fall back on default.
5575  */
5576 static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
5577 {
5578 	char *nxt, *str;
5579 	u32 pidx, unit, port, deflt, h1;
5580 	unsigned long val;
5581 	int any = 0, seth1;
5582 
5583 	str = txselect_list;
5584 
5585 	/* default number is validated in setup_txselect() */
5586 	deflt = simple_strtoul(str, &nxt, 0);
5587 	for (pidx = 0; pidx < dd->num_pports; ++pidx)
5588 		dd->pport[pidx].cpspec->no_eep = deflt;
5589 
5590 	while (*nxt && nxt[1]) {
5591 		str = ++nxt;
5592 		unit = simple_strtoul(str, &nxt, 0);
5593 		if (nxt == str || !*nxt || *nxt != ',') {
5594 			while (*nxt && *nxt++ != ' ') /* skip to next, if any */
5595 				;
5596 			continue;
5597 		}
5598 		str = ++nxt;
5599 		port = simple_strtoul(str, &nxt, 0);
5600 		if (nxt == str || *nxt != '=') {
5601 			while (*nxt && *nxt++ != ' ') /* skip to next, if any */
5602 				;
5603 			continue;
5604 		}
5605 		str = ++nxt;
5606 		val = simple_strtoul(str, &nxt, 0);
5607 		if (nxt == str) {
5608 			while (*nxt && *nxt++ != ' ') /* skip to next, if any */
5609 				;
5610 			continue;
5611 		}
5612 		if (val >= TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)
5613 			continue;
5614 		seth1 = 0;
5615 		h1 = 0; /* gcc thinks it might be used uninitted */
5616 		if (*nxt == ',' && nxt[1]) {
5617 			str = ++nxt;
5618 			h1 = (u32)simple_strtoul(str, &nxt, 0);
5619 			if (nxt == str)
5620 				while (*nxt && *nxt++ != ' ') /* skip */
5621 					;
5622 			else
5623 				seth1 = 1;
5624 		}
5625 		for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
5626 		     ++pidx) {
5627 			struct qib_pportdata *ppd = &dd->pport[pidx];
5628 
5629 			if (ppd->port != port || !ppd->link_speed_supported)
5630 				continue;
5631 			ppd->cpspec->no_eep = val;
5632 			if (seth1)
5633 				ppd->cpspec->h1_val = h1;
5634 			/* now change the IBC and serdes, overriding generic */
5635 			init_txdds_table(ppd, 1);
5636 			any++;
5637 		}
5638 		if (*nxt == '\n')
5639 			break; /* done */
5640 	}
5641 	if (change && !any) {
5642 		/* no specific setting, use the default.
5643 		 * Change the IBC and serdes, but since it's
5644 		 * general, don't override specific settings.
5645 		 */
5646 		for (pidx = 0; pidx < dd->num_pports; ++pidx)
5647 			if (dd->pport[pidx].link_speed_supported)
5648 				init_txdds_table(&dd->pport[pidx], 0);
5649 	}
5650 }
5651 
5652 /* handle the txselect parameter changing */
5653 static int setup_txselect(const char *str, struct kernel_param *kp)
5654 {
5655 	struct qib_devdata *dd;
5656 	unsigned long val;
5657 	char *n;
5658 	if (strlen(str) >= MAX_ATTEN_LEN) {
5659 		printk(KERN_INFO QIB_DRV_NAME " txselect_values string "
5660 		       "too long\n");
5661 		return -ENOSPC;
5662 	}
5663 	val = simple_strtoul(str, &n, 0);
5664 	if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
5665 		printk(KERN_INFO QIB_DRV_NAME
5666 		       "txselect_values must start with a number < %d\n",
5667 			TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
5668 		return -EINVAL;
5669 	}
5670 	strcpy(txselect_list, str);
5671 
5672 	list_for_each_entry(dd, &qib_dev_list, list)
5673 		if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
5674 			set_no_qsfp_atten(dd, 1);
5675 	return 0;
5676 }
5677 
5678 /*
5679  * Write the final few registers that depend on some of the
5680  * init setup.  Done late in init, just before bringing up
5681  * the serdes.
5682  */
5683 static int qib_late_7322_initreg(struct qib_devdata *dd)
5684 {
5685 	int ret = 0, n;
5686 	u64 val;
5687 
5688 	qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
5689 	qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
5690 	qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
5691 	qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
5692 	val = qib_read_kreg64(dd, kr_sendpioavailaddr);
5693 	if (val != dd->pioavailregs_phys) {
5694 		qib_dev_err(dd, "Catastrophic software error, "
5695 			    "SendPIOAvailAddr written as %lx, "
5696 			    "read back as %llx\n",
5697 			    (unsigned long) dd->pioavailregs_phys,
5698 			    (unsigned long long) val);
5699 		ret = -EINVAL;
5700 	}
5701 
5702 	n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
5703 	qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL);
5704 	/* driver sends get pkey, lid, etc. checking also, to catch bugs */
5705 	qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL);
5706 
5707 	qib_register_observer(dd, &sendctrl_0_observer);
5708 	qib_register_observer(dd, &sendctrl_1_observer);
5709 
5710 	dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN;
5711 	qib_write_kreg(dd, kr_control, dd->control);
5712 	/*
5713 	 * Set SendDmaFetchPriority and init Tx params, including
5714 	 * QSFP handler on boards that have QSFP.
5715 	 * First set our default attenuation entry for cables that
5716 	 * don't have valid attenuation.
5717 	 */
5718 	set_no_qsfp_atten(dd, 0);
5719 	for (n = 0; n < dd->num_pports; ++n) {
5720 		struct qib_pportdata *ppd = dd->pport + n;
5721 
5722 		qib_write_kreg_port(ppd, krp_senddmaprioritythld,
5723 				    sdma_fetch_prio & 0xf);
5724 		/* Initialize qsfp if present on board. */
5725 		if (dd->flags & QIB_HAS_QSFP)
5726 			qib_init_7322_qsfp(ppd);
5727 	}
5728 	dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN;
5729 	qib_write_kreg(dd, kr_control, dd->control);
5730 
5731 	return ret;
5732 }
5733 
5734 /* per IB port errors.  */
5735 #define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \
5736 	MASK_ACROSS(8, 15))
5737 #define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41))
5738 #define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \
5739 	MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \
5740 	MASK_ACROSS(0, 11))
5741 
5742 /*
5743  * Write the initialization per-port registers that need to be done at
5744  * driver load and after reset completes (i.e., that aren't done as part
5745  * of other init procedures called from qib_init.c).
5746  * Some of these should be redundant on reset, but play safe.
5747  */
5748 static void write_7322_init_portregs(struct qib_pportdata *ppd)
5749 {
5750 	u64 val;
5751 	int i;
5752 
5753 	if (!ppd->link_speed_supported) {
5754 		/* no buffer credits for this port */
5755 		for (i = 1; i < 8; i++)
5756 			qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
5757 		qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
5758 		qib_write_kreg(ppd->dd, kr_scratch, 0);
5759 		return;
5760 	}
5761 
5762 	/*
5763 	 * Set the number of supported virtual lanes in IBC,
5764 	 * for flow control packet handling on unsupported VLs
5765 	 */
5766 	val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
5767 	val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP);
5768 	val |= (u64)(ppd->vls_supported - 1) <<
5769 		SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP);
5770 	qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
5771 
5772 	qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
5773 
5774 	/* enable tx header checking */
5775 	qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
5776 			    IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID |
5777 			    IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ);
5778 
5779 	qib_write_kreg_port(ppd, krp_ncmodectrl,
5780 		SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal));
5781 
5782 	/*
5783 	 * Unconditionally clear the bufmask bits.  If SDMA is
5784 	 * enabled, we'll set them appropriately later.
5785 	 */
5786 	qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
5787 	qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
5788 	qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
5789 	if (ppd->dd->cspec->r1)
5790 		ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
5791 }
5792 
5793 /*
5794  * Write the initialization per-device registers that need to be done at
5795  * driver load and after reset completes (i.e., that aren't done as part
5796  * of other init procedures called from qib_init.c).  Also write per-port
5797  * registers that are affected by overall device config, such as QP mapping
5798  * Some of these should be redundant on reset, but play safe.
5799  */
5800 static void write_7322_initregs(struct qib_devdata *dd)
5801 {
5802 	struct qib_pportdata *ppd;
5803 	int i, pidx;
5804 	u64 val;
5805 
5806 	/* Set Multicast QPs received by port 2 to map to context one. */
5807 	qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
5808 
5809 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5810 		unsigned n, regno;
5811 		unsigned long flags;
5812 
5813 		if (!dd->qpn_mask || !dd->pport[pidx].link_speed_supported)
5814 			continue;
5815 
5816 		ppd = &dd->pport[pidx];
5817 
5818 		/* be paranoid against later code motion, etc. */
5819 		spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
5820 		ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
5821 		spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
5822 
5823 		/* Initialize QP to context mapping */
5824 		regno = krp_rcvqpmaptable;
5825 		val = 0;
5826 		if (dd->num_pports > 1)
5827 			n = dd->first_user_ctxt / dd->num_pports;
5828 		else
5829 			n = dd->first_user_ctxt - 1;
5830 		for (i = 0; i < 32; ) {
5831 			unsigned ctxt;
5832 
5833 			if (dd->num_pports > 1)
5834 				ctxt = (i % n) * dd->num_pports + pidx;
5835 			else if (i % n)
5836 				ctxt = (i % n) + 1;
5837 			else
5838 				ctxt = ppd->hw_pidx;
5839 			val |= ctxt << (5 * (i % 6));
5840 			i++;
5841 			if (i % 6 == 0) {
5842 				qib_write_kreg_port(ppd, regno, val);
5843 				val = 0;
5844 				regno++;
5845 			}
5846 		}
5847 		qib_write_kreg_port(ppd, regno, val);
5848 	}
5849 
5850 	/*
5851 	 * Setup up interrupt mitigation for kernel contexts, but
5852 	 * not user contexts (user contexts use interrupts when
5853 	 * stalled waiting for any packet, so want those interrupts
5854 	 * right away).
5855 	 */
5856 	for (i = 0; i < dd->first_user_ctxt; i++) {
5857 		dd->cspec->rcvavail_timeout[i] = rcv_int_timeout;
5858 		qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout);
5859 	}
5860 
5861 	/*
5862 	 * Initialize  as (disabled) rcvflow tables.  Application code
5863 	 * will setup each flow as it uses the flow.
5864 	 * Doesn't clear any of the error bits that might be set.
5865 	 */
5866 	val = TIDFLOW_ERRBITS; /* these are W1C */
5867 	for (i = 0; i < dd->cfgctxts; i++) {
5868 		int flow;
5869 		for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
5870 			qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
5871 	}
5872 
5873 	/*
5874 	 * dual cards init to dual port recovery, single port cards to
5875 	 * the one port.  Dual port cards may later adjust to 1 port,
5876 	 * and then back to dual port if both ports are connected
5877 	 * */
5878 	if (dd->num_pports)
5879 		setup_7322_link_recovery(dd->pport, dd->num_pports > 1);
5880 }
5881 
5882 static int qib_init_7322_variables(struct qib_devdata *dd)
5883 {
5884 	struct qib_pportdata *ppd;
5885 	unsigned features, pidx, sbufcnt;
5886 	int ret, mtu;
5887 	u32 sbufs, updthresh;
5888 
5889 	/* pport structs are contiguous, allocated after devdata */
5890 	ppd = (struct qib_pportdata *)(dd + 1);
5891 	dd->pport = ppd;
5892 	ppd[0].dd = dd;
5893 	ppd[1].dd = dd;
5894 
5895 	dd->cspec = (struct qib_chip_specific *)(ppd + 2);
5896 
5897 	ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
5898 	ppd[1].cpspec = &ppd[0].cpspec[1];
5899 	ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
5900 	ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
5901 
5902 	spin_lock_init(&dd->cspec->rcvmod_lock);
5903 	spin_lock_init(&dd->cspec->gpio_lock);
5904 
5905 	/* we haven't yet set QIB_PRESENT, so use read directly */
5906 	dd->revision = readq(&dd->kregbase[kr_revision]);
5907 
5908 	if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
5909 		qib_dev_err(dd, "Revision register read failure, "
5910 			    "giving up initialization\n");
5911 		ret = -ENODEV;
5912 		goto bail;
5913 	}
5914 	dd->flags |= QIB_PRESENT;  /* now register routines work */
5915 
5916 	dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
5917 	dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
5918 	dd->cspec->r1 = dd->minrev == 1;
5919 
5920 	get_7322_chip_params(dd);
5921 	features = qib_7322_boardname(dd);
5922 
5923 	/* now that piobcnt2k and 4k set, we can allocate these */
5924 	sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
5925 		NUM_VL15_BUFS + BITS_PER_LONG - 1;
5926 	sbufcnt /= BITS_PER_LONG;
5927 	dd->cspec->sendchkenable = kmalloc(sbufcnt *
5928 		sizeof(*dd->cspec->sendchkenable), GFP_KERNEL);
5929 	dd->cspec->sendgrhchk = kmalloc(sbufcnt *
5930 		sizeof(*dd->cspec->sendgrhchk), GFP_KERNEL);
5931 	dd->cspec->sendibchk = kmalloc(sbufcnt *
5932 		sizeof(*dd->cspec->sendibchk), GFP_KERNEL);
5933 	if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
5934 		!dd->cspec->sendibchk) {
5935 		qib_dev_err(dd, "Failed allocation for hdrchk bitmaps\n");
5936 		ret = -ENOMEM;
5937 		goto bail;
5938 	}
5939 
5940 	ppd = dd->pport;
5941 
5942 	/*
5943 	 * GPIO bits for TWSI data and clock,
5944 	 * used for serial EEPROM.
5945 	 */
5946 	dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
5947 	dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
5948 	dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
5949 
5950 	dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
5951 		QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP |
5952 		QIB_HAS_THRESH_UPDATE |
5953 		(sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0);
5954 	dd->flags |= qib_special_trigger ?
5955 		QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
5956 
5957 	/*
5958 	 * Setup initial values.  These may change when PAT is enabled, but
5959 	 * we need these to do initial chip register accesses.
5960 	 */
5961 	qib_7322_set_baseaddrs(dd);
5962 
5963 	mtu = ib_mtu_enum_to_int(qib_ibmtu);
5964 	if (mtu == -1)
5965 		mtu = QIB_DEFAULT_MTU;
5966 
5967 	dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
5968 	/* all hwerrors become interrupts, unless special purposed */
5969 	dd->cspec->hwerrmask = ~0ULL;
5970 	/*  link_recovery setup causes these errors, so ignore them,
5971 	 *  other than clearing them when they occur */
5972 	dd->cspec->hwerrmask &=
5973 		~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
5974 		  SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
5975 		  HWE_MASK(LATriggered));
5976 
5977 	for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
5978 		struct qib_chippport_specific *cp = ppd->cpspec;
5979 		ppd->link_speed_supported = features & PORT_SPD_CAP;
5980 		features >>=  PORT_SPD_CAP_SHIFT;
5981 		if (!ppd->link_speed_supported) {
5982 			/* single port mode (7340, or configured) */
5983 			dd->skip_kctxt_mask |= 1 << pidx;
5984 			if (pidx == 0) {
5985 				/* Make sure port is disabled. */
5986 				qib_write_kreg_port(ppd, krp_rcvctrl, 0);
5987 				qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
5988 				ppd[0] = ppd[1];
5989 				dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
5990 						  IBSerdesPClkNotDetectMask_0)
5991 						  | SYM_MASK(HwErrMask,
5992 						  SDmaMemReadErrMask_0));
5993 				dd->cspec->int_enable_mask &= ~(
5994 				     SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
5995 				     SYM_MASK(IntMask, SDmaIdleIntMask_0) |
5996 				     SYM_MASK(IntMask, SDmaProgressIntMask_0) |
5997 				     SYM_MASK(IntMask, SDmaIntMask_0) |
5998 				     SYM_MASK(IntMask, ErrIntMask_0) |
5999 				     SYM_MASK(IntMask, SendDoneIntMask_0));
6000 			} else {
6001 				/* Make sure port is disabled. */
6002 				qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6003 				qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6004 				dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6005 						  IBSerdesPClkNotDetectMask_1)
6006 						  | SYM_MASK(HwErrMask,
6007 						  SDmaMemReadErrMask_1));
6008 				dd->cspec->int_enable_mask &= ~(
6009 				     SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
6010 				     SYM_MASK(IntMask, SDmaIdleIntMask_1) |
6011 				     SYM_MASK(IntMask, SDmaProgressIntMask_1) |
6012 				     SYM_MASK(IntMask, SDmaIntMask_1) |
6013 				     SYM_MASK(IntMask, ErrIntMask_1) |
6014 				     SYM_MASK(IntMask, SendDoneIntMask_1));
6015 			}
6016 			continue;
6017 		}
6018 
6019 		dd->num_pports++;
6020 		qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
6021 
6022 		ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
6023 		ppd->link_width_enabled = IB_WIDTH_4X;
6024 		ppd->link_speed_enabled = ppd->link_speed_supported;
6025 		/*
6026 		 * Set the initial values to reasonable default, will be set
6027 		 * for real when link is up.
6028 		 */
6029 		ppd->link_width_active = IB_WIDTH_4X;
6030 		ppd->link_speed_active = QIB_IB_SDR;
6031 		ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
6032 		switch (qib_num_cfg_vls) {
6033 		case 1:
6034 			ppd->vls_supported = IB_VL_VL0;
6035 			break;
6036 		case 2:
6037 			ppd->vls_supported = IB_VL_VL0_1;
6038 			break;
6039 		default:
6040 			qib_devinfo(dd->pcidev,
6041 				    "Invalid num_vls %u, using 4 VLs\n",
6042 				    qib_num_cfg_vls);
6043 			qib_num_cfg_vls = 4;
6044 			/* fall through */
6045 		case 4:
6046 			ppd->vls_supported = IB_VL_VL0_3;
6047 			break;
6048 		case 8:
6049 			if (mtu <= 2048)
6050 				ppd->vls_supported = IB_VL_VL0_7;
6051 			else {
6052 				qib_devinfo(dd->pcidev,
6053 					    "Invalid num_vls %u for MTU %d "
6054 					    ", using 4 VLs\n",
6055 					    qib_num_cfg_vls, mtu);
6056 				ppd->vls_supported = IB_VL_VL0_3;
6057 				qib_num_cfg_vls = 4;
6058 			}
6059 			break;
6060 		}
6061 		ppd->vls_operational = ppd->vls_supported;
6062 
6063 		init_waitqueue_head(&cp->autoneg_wait);
6064 		INIT_DELAYED_WORK(&cp->autoneg_work,
6065 				  autoneg_7322_work);
6066 		if (ppd->dd->cspec->r1)
6067 			INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work);
6068 
6069 		/*
6070 		 * For Mez and similar cards, no qsfp info, so do
6071 		 * the "cable info" setup here.  Can be overridden
6072 		 * in adapter-specific routines.
6073 		 */
6074 		if (!(dd->flags & QIB_HAS_QSFP)) {
6075 			if (!IS_QMH(dd) && !IS_QME(dd))
6076 				qib_devinfo(dd->pcidev, "IB%u:%u: "
6077 					    "Unknown mezzanine card type\n",
6078 					    dd->unit, ppd->port);
6079 			cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
6080 			/*
6081 			 * Choose center value as default tx serdes setting
6082 			 * until changed through module parameter.
6083 			 */
6084 			ppd->cpspec->no_eep = IS_QMH(dd) ?
6085 				TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4;
6086 		} else
6087 			cp->h1_val = H1_FORCE_VAL;
6088 
6089 		/* Avoid writes to chip for mini_init */
6090 		if (!qib_mini_init)
6091 			write_7322_init_portregs(ppd);
6092 
6093 		init_timer(&cp->chase_timer);
6094 		cp->chase_timer.function = reenable_chase;
6095 		cp->chase_timer.data = (unsigned long)ppd;
6096 
6097 		ppd++;
6098 	}
6099 
6100 	dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
6101 	dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
6102 	dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
6103 
6104 	/* we always allocate at least 2048 bytes for eager buffers */
6105 	dd->rcvegrbufsize = max(mtu, 2048);
6106 
6107 	qib_7322_tidtemplate(dd);
6108 
6109 	/*
6110 	 * We can request a receive interrupt for 1 or
6111 	 * more packets from current offset.
6112 	 */
6113 	dd->rhdrhead_intr_off =
6114 		(u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
6115 
6116 	/* setup the stats timer; the add_timer is done at end of init */
6117 	init_timer(&dd->stats_timer);
6118 	dd->stats_timer.function = qib_get_7322_faststats;
6119 	dd->stats_timer.data = (unsigned long) dd;
6120 
6121 	dd->ureg_align = 0x10000;  /* 64KB alignment */
6122 
6123 	dd->piosize2kmax_dwords = dd->piosize2k >> 2;
6124 
6125 	qib_7322_config_ctxts(dd);
6126 	qib_set_ctxtcnt(dd);
6127 
6128 	if (qib_wc_pat) {
6129 		resource_size_t vl15off;
6130 		/*
6131 		 * We do not set WC on the VL15 buffers to avoid
6132 		 * a rare problem with unaligned writes from
6133 		 * interrupt-flushed store buffers, so we need
6134 		 * to map those separately here.  We can't solve
6135 		 * this for the rarely used mtrr case.
6136 		 */
6137 		ret = init_chip_wc_pat(dd, 0);
6138 		if (ret)
6139 			goto bail;
6140 
6141 		/* vl15 buffers start just after the 4k buffers */
6142 		vl15off = dd->physaddr + (dd->piobufbase >> 32) +
6143 			dd->piobcnt4k * dd->align4k;
6144 		dd->piovl15base	= ioremap_nocache(vl15off,
6145 						  NUM_VL15_BUFS * dd->align4k);
6146 		if (!dd->piovl15base)
6147 			goto bail;
6148 	}
6149 	qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
6150 
6151 	ret = 0;
6152 	if (qib_mini_init)
6153 		goto bail;
6154 	if (!dd->num_pports) {
6155 		qib_dev_err(dd, "No ports enabled, giving up initialization\n");
6156 		goto bail; /* no error, so can still figure out why err */
6157 	}
6158 
6159 	write_7322_initregs(dd);
6160 	ret = qib_create_ctxts(dd);
6161 	init_7322_cntrnames(dd);
6162 
6163 	updthresh = 8U; /* update threshold */
6164 
6165 	/* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
6166 	 * reserve the update threshold amount for other kernel use, such
6167 	 * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
6168 	 * unless we aren't enabling SDMA, in which case we want to use
6169 	 * all the 4k bufs for the kernel.
6170 	 * if this was less than the update threshold, we could wait
6171 	 * a long time for an update.  Coded this way because we
6172 	 * sometimes change the update threshold for various reasons,
6173 	 * and we want this to remain robust.
6174 	 */
6175 	if (dd->flags & QIB_HAS_SEND_DMA) {
6176 		dd->cspec->sdmabufcnt = dd->piobcnt4k;
6177 		sbufs = updthresh > 3 ? updthresh : 3;
6178 	} else {
6179 		dd->cspec->sdmabufcnt = 0;
6180 		sbufs = dd->piobcnt4k;
6181 	}
6182 	dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
6183 		dd->cspec->sdmabufcnt;
6184 	dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
6185 	dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
6186 	dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
6187 		dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
6188 
6189 	/*
6190 	 * If we have 16 user contexts, we will have 7 sbufs
6191 	 * per context, so reduce the update threshold to match.  We
6192 	 * want to update before we actually run out, at low pbufs/ctxt
6193 	 * so give ourselves some margin.
6194 	 */
6195 	if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
6196 		updthresh = dd->pbufsctxt - 2;
6197 	dd->cspec->updthresh_dflt = updthresh;
6198 	dd->cspec->updthresh = updthresh;
6199 
6200 	/* before full enable, no interrupts, no locking needed */
6201 	dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
6202 			     << SYM_LSB(SendCtrl, AvailUpdThld)) |
6203 			SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
6204 
6205 	dd->psxmitwait_supported = 1;
6206 	dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
6207 bail:
6208 	if (!dd->ctxtcnt)
6209 		dd->ctxtcnt = 1; /* for other initialization code */
6210 
6211 	return ret;
6212 }
6213 
6214 static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
6215 					u32 *pbufnum)
6216 {
6217 	u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
6218 	struct qib_devdata *dd = ppd->dd;
6219 
6220 	/* last is same for 2k and 4k, because we use 4k if all 2k busy */
6221 	if (pbc & PBC_7322_VL15_SEND) {
6222 		first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
6223 		last = first;
6224 	} else {
6225 		if ((plen + 1) > dd->piosize2kmax_dwords)
6226 			first = dd->piobcnt2k;
6227 		else
6228 			first = 0;
6229 		last = dd->cspec->lastbuf_for_pio;
6230 	}
6231 	return qib_getsendbuf_range(dd, pbufnum, first, last);
6232 }
6233 
6234 static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
6235 				     u32 start)
6236 {
6237 	qib_write_kreg_port(ppd, krp_psinterval, intv);
6238 	qib_write_kreg_port(ppd, krp_psstart, start);
6239 }
6240 
6241 /*
6242  * Must be called with sdma_lock held, or before init finished.
6243  */
6244 static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
6245 {
6246 	qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
6247 }
6248 
6249 static struct sdma_set_state_action sdma_7322_action_table[] = {
6250 	[qib_sdma_state_s00_hw_down] = {
6251 		.go_s99_running_tofalse = 1,
6252 		.op_enable = 0,
6253 		.op_intenable = 0,
6254 		.op_halt = 0,
6255 		.op_drain = 0,
6256 	},
6257 	[qib_sdma_state_s10_hw_start_up_wait] = {
6258 		.op_enable = 0,
6259 		.op_intenable = 1,
6260 		.op_halt = 1,
6261 		.op_drain = 0,
6262 	},
6263 	[qib_sdma_state_s20_idle] = {
6264 		.op_enable = 1,
6265 		.op_intenable = 1,
6266 		.op_halt = 1,
6267 		.op_drain = 0,
6268 	},
6269 	[qib_sdma_state_s30_sw_clean_up_wait] = {
6270 		.op_enable = 0,
6271 		.op_intenable = 1,
6272 		.op_halt = 1,
6273 		.op_drain = 0,
6274 	},
6275 	[qib_sdma_state_s40_hw_clean_up_wait] = {
6276 		.op_enable = 1,
6277 		.op_intenable = 1,
6278 		.op_halt = 1,
6279 		.op_drain = 0,
6280 	},
6281 	[qib_sdma_state_s50_hw_halt_wait] = {
6282 		.op_enable = 1,
6283 		.op_intenable = 1,
6284 		.op_halt = 1,
6285 		.op_drain = 1,
6286 	},
6287 	[qib_sdma_state_s99_running] = {
6288 		.op_enable = 1,
6289 		.op_intenable = 1,
6290 		.op_halt = 0,
6291 		.op_drain = 0,
6292 		.go_s99_running_totrue = 1,
6293 	},
6294 };
6295 
6296 static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
6297 {
6298 	ppd->sdma_state.set_state_action = sdma_7322_action_table;
6299 }
6300 
6301 static int init_sdma_7322_regs(struct qib_pportdata *ppd)
6302 {
6303 	struct qib_devdata *dd = ppd->dd;
6304 	unsigned lastbuf, erstbuf;
6305 	u64 senddmabufmask[3] = { 0 };
6306 	int n, ret = 0;
6307 
6308 	qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
6309 	qib_sdma_7322_setlengen(ppd);
6310 	qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
6311 	qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
6312 	qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
6313 	qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
6314 
6315 	if (dd->num_pports)
6316 		n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */
6317 	else
6318 		n = dd->cspec->sdmabufcnt; /* failsafe for init */
6319 	erstbuf = (dd->piobcnt2k + dd->piobcnt4k) -
6320 		((dd->num_pports == 1 || ppd->port == 2) ? n :
6321 		dd->cspec->sdmabufcnt);
6322 	lastbuf = erstbuf + n;
6323 
6324 	ppd->sdma_state.first_sendbuf = erstbuf;
6325 	ppd->sdma_state.last_sendbuf = lastbuf;
6326 	for (; erstbuf < lastbuf; ++erstbuf) {
6327 		unsigned word = erstbuf / BITS_PER_LONG;
6328 		unsigned bit = erstbuf & (BITS_PER_LONG - 1);
6329 
6330 		BUG_ON(word >= 3);
6331 		senddmabufmask[word] |= 1ULL << bit;
6332 	}
6333 	qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
6334 	qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
6335 	qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
6336 	return ret;
6337 }
6338 
6339 /* sdma_lock must be held */
6340 static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
6341 {
6342 	struct qib_devdata *dd = ppd->dd;
6343 	int sane;
6344 	int use_dmahead;
6345 	u16 swhead;
6346 	u16 swtail;
6347 	u16 cnt;
6348 	u16 hwhead;
6349 
6350 	use_dmahead = __qib_sdma_running(ppd) &&
6351 		(dd->flags & QIB_HAS_SDMA_TIMEOUT);
6352 retry:
6353 	hwhead = use_dmahead ?
6354 		(u16) le64_to_cpu(*ppd->sdma_head_dma) :
6355 		(u16) qib_read_kreg_port(ppd, krp_senddmahead);
6356 
6357 	swhead = ppd->sdma_descq_head;
6358 	swtail = ppd->sdma_descq_tail;
6359 	cnt = ppd->sdma_descq_cnt;
6360 
6361 	if (swhead < swtail)
6362 		/* not wrapped */
6363 		sane = (hwhead >= swhead) & (hwhead <= swtail);
6364 	else if (swhead > swtail)
6365 		/* wrapped around */
6366 		sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
6367 			(hwhead <= swtail);
6368 	else
6369 		/* empty */
6370 		sane = (hwhead == swhead);
6371 
6372 	if (unlikely(!sane)) {
6373 		if (use_dmahead) {
6374 			/* try one more time, directly from the register */
6375 			use_dmahead = 0;
6376 			goto retry;
6377 		}
6378 		/* proceed as if no progress */
6379 		hwhead = swhead;
6380 	}
6381 
6382 	return hwhead;
6383 }
6384 
6385 static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
6386 {
6387 	u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
6388 
6389 	return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) ||
6390 	       (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) ||
6391 	       !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) ||
6392 	       !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty));
6393 }
6394 
6395 /*
6396  * Compute the amount of delay before sending the next packet if the
6397  * port's send rate differs from the static rate set for the QP.
6398  * The delay affects the next packet and the amount of the delay is
6399  * based on the length of the this packet.
6400  */
6401 static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
6402 				   u8 srate, u8 vl)
6403 {
6404 	u8 snd_mult = ppd->delay_mult;
6405 	u8 rcv_mult = ib_rate_to_delay[srate];
6406 	u32 ret;
6407 
6408 	ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0;
6409 
6410 	/* Indicate VL15, else set the VL in the control word */
6411 	if (vl == 15)
6412 		ret |= PBC_7322_VL15_SEND_CTRL;
6413 	else
6414 		ret |= vl << PBC_VL_NUM_LSB;
6415 	ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
6416 
6417 	return ret;
6418 }
6419 
6420 /*
6421  * Enable the per-port VL15 send buffers for use.
6422  * They follow the rest of the buffers, without a config parameter.
6423  * This was in initregs, but that is done before the shadow
6424  * is set up, and this has to be done after the shadow is
6425  * set up.
6426  */
6427 static void qib_7322_initvl15_bufs(struct qib_devdata *dd)
6428 {
6429 	unsigned vl15bufs;
6430 
6431 	vl15bufs = dd->piobcnt2k + dd->piobcnt4k;
6432 	qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS,
6433 			       TXCHK_CHG_TYPE_KERN, NULL);
6434 }
6435 
6436 static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd)
6437 {
6438 	if (rcd->ctxt < NUM_IB_PORTS) {
6439 		if (rcd->dd->num_pports > 1) {
6440 			rcd->rcvegrcnt = KCTXT0_EGRCNT / 2;
6441 			rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0;
6442 		} else {
6443 			rcd->rcvegrcnt = KCTXT0_EGRCNT;
6444 			rcd->rcvegr_tid_base = 0;
6445 		}
6446 	} else {
6447 		rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
6448 		rcd->rcvegr_tid_base = KCTXT0_EGRCNT +
6449 			(rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt;
6450 	}
6451 }
6452 
6453 #define QTXSLEEPS 5000
6454 static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
6455 				  u32 len, u32 which, struct qib_ctxtdata *rcd)
6456 {
6457 	int i;
6458 	const int last = start + len - 1;
6459 	const int lastr = last / BITS_PER_LONG;
6460 	u32 sleeps = 0;
6461 	int wait = rcd != NULL;
6462 	unsigned long flags;
6463 
6464 	while (wait) {
6465 		unsigned long shadow;
6466 		int cstart, previ = -1;
6467 
6468 		/*
6469 		 * when flipping from kernel to user, we can't change
6470 		 * the checking type if the buffer is allocated to the
6471 		 * driver.   It's OK the other direction, because it's
6472 		 * from close, and we have just disarm'ed all the
6473 		 * buffers.  All the kernel to kernel changes are also
6474 		 * OK.
6475 		 */
6476 		for (cstart = start; cstart <= last; cstart++) {
6477 			i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
6478 				/ BITS_PER_LONG;
6479 			if (i != previ) {
6480 				shadow = (unsigned long)
6481 					le64_to_cpu(dd->pioavailregs_dma[i]);
6482 				previ = i;
6483 			}
6484 			if (test_bit(((2 * cstart) +
6485 				      QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
6486 				     % BITS_PER_LONG, &shadow))
6487 				break;
6488 		}
6489 
6490 		if (cstart > last)
6491 			break;
6492 
6493 		if (sleeps == QTXSLEEPS)
6494 			break;
6495 		/* make sure we see an updated copy next time around */
6496 		sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
6497 		sleeps++;
6498 		msleep(1);
6499 	}
6500 
6501 	switch (which) {
6502 	case TXCHK_CHG_TYPE_DIS1:
6503 		/*
6504 		 * disable checking on a range; used by diags; just
6505 		 * one buffer, but still written generically
6506 		 */
6507 		for (i = start; i <= last; i++)
6508 			clear_bit(i, dd->cspec->sendchkenable);
6509 		break;
6510 
6511 	case TXCHK_CHG_TYPE_ENAB1:
6512 		/*
6513 		 * (re)enable checking on a range; used by diags; just
6514 		 * one buffer, but still written generically; read
6515 		 * scratch to be sure buffer actually triggered, not
6516 		 * just flushed from processor.
6517 		 */
6518 		qib_read_kreg32(dd, kr_scratch);
6519 		for (i = start; i <= last; i++)
6520 			set_bit(i, dd->cspec->sendchkenable);
6521 		break;
6522 
6523 	case TXCHK_CHG_TYPE_KERN:
6524 		/* usable by kernel */
6525 		for (i = start; i <= last; i++) {
6526 			set_bit(i, dd->cspec->sendibchk);
6527 			clear_bit(i, dd->cspec->sendgrhchk);
6528 		}
6529 		spin_lock_irqsave(&dd->uctxt_lock, flags);
6530 		/* see if we need to raise avail update threshold */
6531 		for (i = dd->first_user_ctxt;
6532 		     dd->cspec->updthresh != dd->cspec->updthresh_dflt
6533 		     && i < dd->cfgctxts; i++)
6534 			if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
6535 			   ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
6536 			   < dd->cspec->updthresh_dflt)
6537 				break;
6538 		spin_unlock_irqrestore(&dd->uctxt_lock, flags);
6539 		if (i == dd->cfgctxts) {
6540 			spin_lock_irqsave(&dd->sendctrl_lock, flags);
6541 			dd->cspec->updthresh = dd->cspec->updthresh_dflt;
6542 			dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
6543 			dd->sendctrl |= (dd->cspec->updthresh &
6544 					 SYM_RMASK(SendCtrl, AvailUpdThld)) <<
6545 					   SYM_LSB(SendCtrl, AvailUpdThld);
6546 			spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
6547 			sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
6548 		}
6549 		break;
6550 
6551 	case TXCHK_CHG_TYPE_USER:
6552 		/* for user process */
6553 		for (i = start; i <= last; i++) {
6554 			clear_bit(i, dd->cspec->sendibchk);
6555 			set_bit(i, dd->cspec->sendgrhchk);
6556 		}
6557 		spin_lock_irqsave(&dd->sendctrl_lock, flags);
6558 		if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
6559 			/ rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
6560 			dd->cspec->updthresh = (rcd->piocnt /
6561 						rcd->subctxt_cnt) - 1;
6562 			dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
6563 			dd->sendctrl |= (dd->cspec->updthresh &
6564 					SYM_RMASK(SendCtrl, AvailUpdThld))
6565 					<< SYM_LSB(SendCtrl, AvailUpdThld);
6566 			spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
6567 			sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
6568 		} else
6569 			spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
6570 		break;
6571 
6572 	default:
6573 		break;
6574 	}
6575 
6576 	for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i)
6577 		qib_write_kreg(dd, kr_sendcheckmask + i,
6578 			       dd->cspec->sendchkenable[i]);
6579 
6580 	for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) {
6581 		qib_write_kreg(dd, kr_sendgrhcheckmask + i,
6582 			       dd->cspec->sendgrhchk[i]);
6583 		qib_write_kreg(dd, kr_sendibpktmask + i,
6584 			       dd->cspec->sendibchk[i]);
6585 	}
6586 
6587 	/*
6588 	 * Be sure whatever we did was seen by the chip and acted upon,
6589 	 * before we return.  Mostly important for which >= 2.
6590 	 */
6591 	qib_read_kreg32(dd, kr_scratch);
6592 }
6593 
6594 
6595 /* useful for trigger analyzers, etc. */
6596 static void writescratch(struct qib_devdata *dd, u32 val)
6597 {
6598 	qib_write_kreg(dd, kr_scratch, val);
6599 }
6600 
6601 /* Dummy for now, use chip regs soon */
6602 static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
6603 {
6604 	return -ENXIO;
6605 }
6606 
6607 /**
6608  * qib_init_iba7322_funcs - set up the chip-specific function pointers
6609  * @dev: the pci_dev for qlogic_ib device
6610  * @ent: pci_device_id struct for this dev
6611  *
6612  * Also allocates, inits, and returns the devdata struct for this
6613  * device instance
6614  *
6615  * This is global, and is called directly at init to set up the
6616  * chip-specific function pointers for later use.
6617  */
6618 struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
6619 					   const struct pci_device_id *ent)
6620 {
6621 	struct qib_devdata *dd;
6622 	int ret, i;
6623 	u32 tabsize, actual_cnt = 0;
6624 
6625 	dd = qib_alloc_devdata(pdev,
6626 		NUM_IB_PORTS * sizeof(struct qib_pportdata) +
6627 		sizeof(struct qib_chip_specific) +
6628 		NUM_IB_PORTS * sizeof(struct qib_chippport_specific));
6629 	if (IS_ERR(dd))
6630 		goto bail;
6631 
6632 	dd->f_bringup_serdes    = qib_7322_bringup_serdes;
6633 	dd->f_cleanup           = qib_setup_7322_cleanup;
6634 	dd->f_clear_tids        = qib_7322_clear_tids;
6635 	dd->f_free_irq          = qib_7322_free_irq;
6636 	dd->f_get_base_info     = qib_7322_get_base_info;
6637 	dd->f_get_msgheader     = qib_7322_get_msgheader;
6638 	dd->f_getsendbuf        = qib_7322_getsendbuf;
6639 	dd->f_gpio_mod          = gpio_7322_mod;
6640 	dd->f_eeprom_wen        = qib_7322_eeprom_wen;
6641 	dd->f_hdrqempty         = qib_7322_hdrqempty;
6642 	dd->f_ib_updown         = qib_7322_ib_updown;
6643 	dd->f_init_ctxt         = qib_7322_init_ctxt;
6644 	dd->f_initvl15_bufs     = qib_7322_initvl15_bufs;
6645 	dd->f_intr_fallback     = qib_7322_intr_fallback;
6646 	dd->f_late_initreg      = qib_late_7322_initreg;
6647 	dd->f_setpbc_control    = qib_7322_setpbc_control;
6648 	dd->f_portcntr          = qib_portcntr_7322;
6649 	dd->f_put_tid           = qib_7322_put_tid;
6650 	dd->f_quiet_serdes      = qib_7322_mini_quiet_serdes;
6651 	dd->f_rcvctrl           = rcvctrl_7322_mod;
6652 	dd->f_read_cntrs        = qib_read_7322cntrs;
6653 	dd->f_read_portcntrs    = qib_read_7322portcntrs;
6654 	dd->f_reset             = qib_do_7322_reset;
6655 	dd->f_init_sdma_regs    = init_sdma_7322_regs;
6656 	dd->f_sdma_busy         = qib_sdma_7322_busy;
6657 	dd->f_sdma_gethead      = qib_sdma_7322_gethead;
6658 	dd->f_sdma_sendctrl     = qib_7322_sdma_sendctrl;
6659 	dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt;
6660 	dd->f_sdma_update_tail  = qib_sdma_update_7322_tail;
6661 	dd->f_sendctrl          = sendctrl_7322_mod;
6662 	dd->f_set_armlaunch     = qib_set_7322_armlaunch;
6663 	dd->f_set_cntr_sample   = qib_set_cntr_7322_sample;
6664 	dd->f_iblink_state      = qib_7322_iblink_state;
6665 	dd->f_ibphys_portstate  = qib_7322_phys_portstate;
6666 	dd->f_get_ib_cfg        = qib_7322_get_ib_cfg;
6667 	dd->f_set_ib_cfg        = qib_7322_set_ib_cfg;
6668 	dd->f_set_ib_loopback   = qib_7322_set_loopback;
6669 	dd->f_get_ib_table      = qib_7322_get_ib_table;
6670 	dd->f_set_ib_table      = qib_7322_set_ib_table;
6671 	dd->f_set_intr_state    = qib_7322_set_intr_state;
6672 	dd->f_setextled         = qib_setup_7322_setextled;
6673 	dd->f_txchk_change      = qib_7322_txchk_change;
6674 	dd->f_update_usrhead    = qib_update_7322_usrhead;
6675 	dd->f_wantpiobuf_intr   = qib_wantpiobuf_7322_intr;
6676 	dd->f_xgxs_reset        = qib_7322_mini_pcs_reset;
6677 	dd->f_sdma_hw_clean_up  = qib_7322_sdma_hw_clean_up;
6678 	dd->f_sdma_hw_start_up  = qib_7322_sdma_hw_start_up;
6679 	dd->f_sdma_init_early   = qib_7322_sdma_init_early;
6680 	dd->f_writescratch      = writescratch;
6681 	dd->f_tempsense_rd	= qib_7322_tempsense_rd;
6682 	/*
6683 	 * Do remaining PCIe setup and save PCIe values in dd.
6684 	 * Any error printing is already done by the init code.
6685 	 * On return, we have the chip mapped, but chip registers
6686 	 * are not set up until start of qib_init_7322_variables.
6687 	 */
6688 	ret = qib_pcie_ddinit(dd, pdev, ent);
6689 	if (ret < 0)
6690 		goto bail_free;
6691 
6692 	/* initialize chip-specific variables */
6693 	ret = qib_init_7322_variables(dd);
6694 	if (ret)
6695 		goto bail_cleanup;
6696 
6697 	if (qib_mini_init || !dd->num_pports)
6698 		goto bail;
6699 
6700 	/*
6701 	 * Determine number of vectors we want; depends on port count
6702 	 * and number of configured kernel receive queues actually used.
6703 	 * Should also depend on whether sdma is enabled or not, but
6704 	 * that's such a rare testing case it's not worth worrying about.
6705 	 */
6706 	tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table);
6707 	for (i = 0; i < tabsize; i++)
6708 		if ((i < ARRAY_SIZE(irq_table) &&
6709 		     irq_table[i].port <= dd->num_pports) ||
6710 		    (i >= ARRAY_SIZE(irq_table) &&
6711 		     dd->rcd[i - ARRAY_SIZE(irq_table)]))
6712 			actual_cnt++;
6713 	tabsize = actual_cnt;
6714 	dd->cspec->msix_entries = kmalloc(tabsize *
6715 			sizeof(struct msix_entry), GFP_KERNEL);
6716 	dd->cspec->msix_arg = kmalloc(tabsize *
6717 			sizeof(void *), GFP_KERNEL);
6718 	if (!dd->cspec->msix_entries || !dd->cspec->msix_arg) {
6719 		qib_dev_err(dd, "No memory for MSIx table\n");
6720 		tabsize = 0;
6721 	}
6722 	for (i = 0; i < tabsize; i++)
6723 		dd->cspec->msix_entries[i].entry = i;
6724 
6725 	if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries))
6726 		qib_dev_err(dd, "Failed to setup PCIe or interrupts; "
6727 			    "continuing anyway\n");
6728 	/* may be less than we wanted, if not enough available */
6729 	dd->cspec->num_msix_entries = tabsize;
6730 
6731 	/* setup interrupt handler */
6732 	qib_setup_7322_interrupt(dd, 1);
6733 
6734 	/* clear diagctrl register, in case diags were running and crashed */
6735 	qib_write_kreg(dd, kr_hwdiagctrl, 0);
6736 
6737 	goto bail;
6738 
6739 bail_cleanup:
6740 	qib_pcie_ddcleanup(dd);
6741 bail_free:
6742 	qib_free_devdata(dd);
6743 	dd = ERR_PTR(ret);
6744 bail:
6745 	return dd;
6746 }
6747 
6748 /*
6749  * Set the table entry at the specified index from the table specifed.
6750  * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first
6751  * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR.
6752  * 'idx' below addresses the correct entry, while its 4 LSBs select the
6753  * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table.
6754  */
6755 #define DDS_ENT_AMP_LSB 14
6756 #define DDS_ENT_MAIN_LSB 9
6757 #define DDS_ENT_POST_LSB 5
6758 #define DDS_ENT_PRE_XTRA_LSB 3
6759 #define DDS_ENT_PRE_LSB 0
6760 
6761 /*
6762  * Set one entry in the TxDDS table for spec'd port
6763  * ridx picks one of the entries, while tp points
6764  * to the appropriate table entry.
6765  */
6766 static void set_txdds(struct qib_pportdata *ppd, int ridx,
6767 		      const struct txdds_ent *tp)
6768 {
6769 	struct qib_devdata *dd = ppd->dd;
6770 	u32 pack_ent;
6771 	int regidx;
6772 
6773 	/* Get correct offset in chip-space, and in source table */
6774 	regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx;
6775 	/*
6776 	 * We do not use qib_write_kreg_port() because it was intended
6777 	 * only for registers in the lower "port specific" pages.
6778 	 * So do index calculation  by hand.
6779 	 */
6780 	if (ppd->hw_pidx)
6781 		regidx += (dd->palign / sizeof(u64));
6782 
6783 	pack_ent = tp->amp << DDS_ENT_AMP_LSB;
6784 	pack_ent |= tp->main << DDS_ENT_MAIN_LSB;
6785 	pack_ent |= tp->pre << DDS_ENT_PRE_LSB;
6786 	pack_ent |= tp->post << DDS_ENT_POST_LSB;
6787 	qib_write_kreg(dd, regidx, pack_ent);
6788 	/* Prevent back-to-back writes by hitting scratch */
6789 	qib_write_kreg(ppd->dd, kr_scratch, 0);
6790 }
6791 
6792 static const struct vendor_txdds_ent vendor_txdds[] = {
6793 	{ /* Amphenol 1m 30awg NoEq */
6794 		{ 0x41, 0x50, 0x48 }, "584470002       ",
6795 		{ 10,  0,  0,  5 }, { 10,  0,  0,  9 }, {  7,  1,  0, 13 },
6796 	},
6797 	{ /* Amphenol 3m 28awg NoEq */
6798 		{ 0x41, 0x50, 0x48 }, "584470004       ",
6799 		{  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  0,  1,  7, 15 },
6800 	},
6801 	{ /* Finisar 3m OM2 Optical */
6802 		{ 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL",
6803 		{  0,  0,  0,  3 }, {  0,  0,  0,  4 }, {  0,  0,  0, 13 },
6804 	},
6805 	{ /* Finisar 30m OM2 Optical */
6806 		{ 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL",
6807 		{  0,  0,  0,  1 }, {  0,  0,  0,  5 }, {  0,  0,  0, 11 },
6808 	},
6809 	{ /* Finisar Default OM2 Optical */
6810 		{ 0x00, 0x90, 0x65 }, NULL,
6811 		{  0,  0,  0,  2 }, {  0,  0,  0,  5 }, {  0,  0,  0, 12 },
6812 	},
6813 	{ /* Gore 1m 30awg NoEq */
6814 		{ 0x00, 0x21, 0x77 }, "QSN3300-1       ",
6815 		{  0,  0,  0,  6 }, {  0,  0,  0,  9 }, {  0,  1,  0, 15 },
6816 	},
6817 	{ /* Gore 2m 30awg NoEq */
6818 		{ 0x00, 0x21, 0x77 }, "QSN3300-2       ",
6819 		{  0,  0,  0,  8 }, {  0,  0,  0, 10 }, {  0,  1,  7, 15 },
6820 	},
6821 	{ /* Gore 1m 28awg NoEq */
6822 		{ 0x00, 0x21, 0x77 }, "QSN3800-1       ",
6823 		{  0,  0,  0,  6 }, {  0,  0,  0,  8 }, {  0,  1,  0, 15 },
6824 	},
6825 	{ /* Gore 3m 28awg NoEq */
6826 		{ 0x00, 0x21, 0x77 }, "QSN3800-3       ",
6827 		{  0,  0,  0,  9 }, {  0,  0,  0, 13 }, {  0,  1,  7, 15 },
6828 	},
6829 	{ /* Gore 5m 24awg Eq */
6830 		{ 0x00, 0x21, 0x77 }, "QSN7000-5       ",
6831 		{  0,  0,  0,  7 }, {  0,  0,  0,  9 }, {  0,  1,  3, 15 },
6832 	},
6833 	{ /* Gore 7m 24awg Eq */
6834 		{ 0x00, 0x21, 0x77 }, "QSN7000-7       ",
6835 		{  0,  0,  0,  9 }, {  0,  0,  0, 11 }, {  0,  2,  6, 15 },
6836 	},
6837 	{ /* Gore 5m 26awg Eq */
6838 		{ 0x00, 0x21, 0x77 }, "QSN7600-5       ",
6839 		{  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  0,  1,  9, 13 },
6840 	},
6841 	{ /* Gore 7m 26awg Eq */
6842 		{ 0x00, 0x21, 0x77 }, "QSN7600-7       ",
6843 		{  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  10,  1,  8, 15 },
6844 	},
6845 	{ /* Intersil 12m 24awg Active */
6846 		{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224",
6847 		{  0,  0,  0,  2 }, {  0,  0,  0,  5 }, {  0,  3,  0,  9 },
6848 	},
6849 	{ /* Intersil 10m 28awg Active */
6850 		{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028",
6851 		{  0,  0,  0,  6 }, {  0,  0,  0,  4 }, {  0,  2,  0,  2 },
6852 	},
6853 	{ /* Intersil 7m 30awg Active */
6854 		{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730",
6855 		{  0,  0,  0,  6 }, {  0,  0,  0,  4 }, {  0,  1,  0,  3 },
6856 	},
6857 	{ /* Intersil 5m 32awg Active */
6858 		{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532",
6859 		{  0,  0,  0,  6 }, {  0,  0,  0,  6 }, {  0,  2,  0,  8 },
6860 	},
6861 	{ /* Intersil Default Active */
6862 		{ 0x00, 0x30, 0xB4 }, NULL,
6863 		{  0,  0,  0,  6 }, {  0,  0,  0,  5 }, {  0,  2,  0,  5 },
6864 	},
6865 	{ /* Luxtera 20m Active Optical */
6866 		{ 0x00, 0x25, 0x63 }, NULL,
6867 		{  0,  0,  0,  5 }, {  0,  0,  0,  8 }, {  0,  2,  0,  12 },
6868 	},
6869 	{ /* Molex 1M Cu loopback */
6870 		{ 0x00, 0x09, 0x3A }, "74763-0025      ",
6871 		{  2,  2,  6, 15 }, {  2,  2,  6, 15 }, {  2,  2,  6, 15 },
6872 	},
6873 	{ /* Molex 2m 28awg NoEq */
6874 		{ 0x00, 0x09, 0x3A }, "74757-2201      ",
6875 		{  0,  0,  0,  6 }, {  0,  0,  0,  9 }, {  0,  1,  1, 15 },
6876 	},
6877 };
6878 
6879 static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = {
6880 	/* amp, pre, main, post */
6881 	{  2, 2, 15,  6 },	/* Loopback */
6882 	{  0, 0,  0,  1 },	/*  2 dB */
6883 	{  0, 0,  0,  2 },	/*  3 dB */
6884 	{  0, 0,  0,  3 },	/*  4 dB */
6885 	{  0, 0,  0,  4 },	/*  5 dB */
6886 	{  0, 0,  0,  5 },	/*  6 dB */
6887 	{  0, 0,  0,  6 },	/*  7 dB */
6888 	{  0, 0,  0,  7 },	/*  8 dB */
6889 	{  0, 0,  0,  8 },	/*  9 dB */
6890 	{  0, 0,  0,  9 },	/* 10 dB */
6891 	{  0, 0,  0, 10 },	/* 11 dB */
6892 	{  0, 0,  0, 11 },	/* 12 dB */
6893 	{  0, 0,  0, 12 },	/* 13 dB */
6894 	{  0, 0,  0, 13 },	/* 14 dB */
6895 	{  0, 0,  0, 14 },	/* 15 dB */
6896 	{  0, 0,  0, 15 },	/* 16 dB */
6897 };
6898 
6899 static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = {
6900 	/* amp, pre, main, post */
6901 	{  2, 2, 15,  6 },	/* Loopback */
6902 	{  0, 0,  0,  8 },	/*  2 dB */
6903 	{  0, 0,  0,  8 },	/*  3 dB */
6904 	{  0, 0,  0,  9 },	/*  4 dB */
6905 	{  0, 0,  0,  9 },	/*  5 dB */
6906 	{  0, 0,  0, 10 },	/*  6 dB */
6907 	{  0, 0,  0, 10 },	/*  7 dB */
6908 	{  0, 0,  0, 11 },	/*  8 dB */
6909 	{  0, 0,  0, 11 },	/*  9 dB */
6910 	{  0, 0,  0, 12 },	/* 10 dB */
6911 	{  0, 0,  0, 12 },	/* 11 dB */
6912 	{  0, 0,  0, 13 },	/* 12 dB */
6913 	{  0, 0,  0, 13 },	/* 13 dB */
6914 	{  0, 0,  0, 14 },	/* 14 dB */
6915 	{  0, 0,  0, 14 },	/* 15 dB */
6916 	{  0, 0,  0, 15 },	/* 16 dB */
6917 };
6918 
6919 static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
6920 	/* amp, pre, main, post */
6921 	{  2, 2, 15,  6 },	/* Loopback */
6922 	{  0, 1,  0,  7 },	/*  2 dB (also QMH7342) */
6923 	{  0, 1,  0,  9 },	/*  3 dB (also QMH7342) */
6924 	{  0, 1,  0, 11 },	/*  4 dB */
6925 	{  0, 1,  0, 13 },	/*  5 dB */
6926 	{  0, 1,  0, 15 },	/*  6 dB */
6927 	{  0, 1,  3, 15 },	/*  7 dB */
6928 	{  0, 1,  7, 15 },	/*  8 dB */
6929 	{  0, 1,  7, 15 },	/*  9 dB */
6930 	{  0, 1,  8, 15 },	/* 10 dB */
6931 	{  0, 1,  9, 15 },	/* 11 dB */
6932 	{  0, 1, 10, 15 },	/* 12 dB */
6933 	{  0, 2,  6, 15 },	/* 13 dB */
6934 	{  0, 2,  7, 15 },	/* 14 dB */
6935 	{  0, 2,  8, 15 },	/* 15 dB */
6936 	{  0, 2,  9, 15 },	/* 16 dB */
6937 };
6938 
6939 /*
6940  * extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ.
6941  * These are mostly used for mez cards going through connectors
6942  * and backplane traces, but can be used to add other "unusual"
6943  * table values as well.
6944  */
6945 static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = {
6946 	/* amp, pre, main, post */
6947 	{  0, 0, 0,  1 },	/* QMH7342 backplane settings */
6948 	{  0, 0, 0,  1 },	/* QMH7342 backplane settings */
6949 	{  0, 0, 0,  2 },	/* QMH7342 backplane settings */
6950 	{  0, 0, 0,  2 },	/* QMH7342 backplane settings */
6951 	{  0, 0, 0, 11 },	/* QME7342 backplane settings */
6952 	{  0, 0, 0, 11 },	/* QME7342 backplane settings */
6953 	{  0, 0, 0, 11 },	/* QME7342 backplane settings */
6954 	{  0, 0, 0, 11 },	/* QME7342 backplane settings */
6955 	{  0, 0, 0, 11 },	/* QME7342 backplane settings */
6956 	{  0, 0, 0, 11 },	/* QME7342 backplane settings */
6957 	{  0, 0, 0, 11 },	/* QME7342 backplane settings */
6958 	{  0, 0, 0,  3 },	/* QMH7342 backplane settings */
6959 	{  0, 0, 0,  4 },	/* QMH7342 backplane settings */
6960 };
6961 
6962 static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
6963 	/* amp, pre, main, post */
6964 	{  0, 0, 0,  7 },	/* QMH7342 backplane settings */
6965 	{  0, 0, 0,  7 },	/* QMH7342 backplane settings */
6966 	{  0, 0, 0,  8 },	/* QMH7342 backplane settings */
6967 	{  0, 0, 0,  8 },	/* QMH7342 backplane settings */
6968 	{  0, 0, 0, 13 },	/* QME7342 backplane settings */
6969 	{  0, 0, 0, 13 },	/* QME7342 backplane settings */
6970 	{  0, 0, 0, 13 },	/* QME7342 backplane settings */
6971 	{  0, 0, 0, 13 },	/* QME7342 backplane settings */
6972 	{  0, 0, 0, 13 },	/* QME7342 backplane settings */
6973 	{  0, 0, 0, 13 },	/* QME7342 backplane settings */
6974 	{  0, 0, 0, 13 },	/* QME7342 backplane settings */
6975 	{  0, 0, 0,  9 },	/* QMH7342 backplane settings */
6976 	{  0, 0, 0, 10 },	/* QMH7342 backplane settings */
6977 };
6978 
6979 static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
6980 	/* amp, pre, main, post */
6981 	{  0, 1,  0,  4 },	/* QMH7342 backplane settings */
6982 	{  0, 1,  0,  5 },	/* QMH7342 backplane settings */
6983 	{  0, 1,  0,  6 },	/* QMH7342 backplane settings */
6984 	{  0, 1,  0,  8 },	/* QMH7342 backplane settings */
6985 	{  0, 1, 12, 10 },	/* QME7342 backplane setting */
6986 	{  0, 1, 12, 11 },	/* QME7342 backplane setting */
6987 	{  0, 1, 12, 12 },	/* QME7342 backplane setting */
6988 	{  0, 1, 12, 14 },	/* QME7342 backplane setting */
6989 	{  0, 1, 12,  6 },	/* QME7342 backplane setting */
6990 	{  0, 1, 12,  7 },	/* QME7342 backplane setting */
6991 	{  0, 1, 12,  8 },	/* QME7342 backplane setting */
6992 	{  0, 1,  0, 10 },	/* QMH7342 backplane settings */
6993 	{  0, 1,  0, 12 },	/* QMH7342 backplane settings */
6994 };
6995 
6996 static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
6997 					       unsigned atten)
6998 {
6999 	/*
7000 	 * The attenuation table starts at 2dB for entry 1,
7001 	 * with entry 0 being the loopback entry.
7002 	 */
7003 	if (atten <= 2)
7004 		atten = 1;
7005 	else if (atten > TXDDS_TABLE_SZ)
7006 		atten = TXDDS_TABLE_SZ - 1;
7007 	else
7008 		atten--;
7009 	return txdds + atten;
7010 }
7011 
7012 /*
7013  * if override is set, the module parameter txselect has a value
7014  * for this specific port, so use it, rather than our normal mechanism.
7015  */
7016 static void find_best_ent(struct qib_pportdata *ppd,
7017 			  const struct txdds_ent **sdr_dds,
7018 			  const struct txdds_ent **ddr_dds,
7019 			  const struct txdds_ent **qdr_dds, int override)
7020 {
7021 	struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
7022 	int idx;
7023 
7024 	/* Search table of known cables */
7025 	for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) {
7026 		const struct vendor_txdds_ent *v = vendor_txdds + idx;
7027 
7028 		if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) &&
7029 		    (!v->partnum ||
7030 		     !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) {
7031 			*sdr_dds = &v->sdr;
7032 			*ddr_dds = &v->ddr;
7033 			*qdr_dds = &v->qdr;
7034 			return;
7035 		}
7036 	}
7037 
7038 	/* Lookup serdes setting by cable type and attenuation */
7039 	if (!override && QSFP_IS_ACTIVE(qd->tech)) {
7040 		*sdr_dds = txdds_sdr + ppd->dd->board_atten;
7041 		*ddr_dds = txdds_ddr + ppd->dd->board_atten;
7042 		*qdr_dds = txdds_qdr + ppd->dd->board_atten;
7043 		return;
7044 	}
7045 
7046 	if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] ||
7047 						      qd->atten[1])) {
7048 		*sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]);
7049 		*ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]);
7050 		*qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]);
7051 		return;
7052 	} else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) {
7053 		/*
7054 		 * If we have no (or incomplete) data from the cable
7055 		 * EEPROM, or no QSFP, or override is set, use the
7056 		 * module parameter value to index into the attentuation
7057 		 * table.
7058 		 */
7059 		idx = ppd->cpspec->no_eep;
7060 		*sdr_dds = &txdds_sdr[idx];
7061 		*ddr_dds = &txdds_ddr[idx];
7062 		*qdr_dds = &txdds_qdr[idx];
7063 	} else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
7064 		/* similar to above, but index into the "extra" table. */
7065 		idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ;
7066 		*sdr_dds = &txdds_extra_sdr[idx];
7067 		*ddr_dds = &txdds_extra_ddr[idx];
7068 		*qdr_dds = &txdds_extra_qdr[idx];
7069 	} else {
7070 		/* this shouldn't happen, it's range checked */
7071 		*sdr_dds = txdds_sdr + qib_long_atten;
7072 		*ddr_dds = txdds_ddr + qib_long_atten;
7073 		*qdr_dds = txdds_qdr + qib_long_atten;
7074 	}
7075 }
7076 
7077 static void init_txdds_table(struct qib_pportdata *ppd, int override)
7078 {
7079 	const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
7080 	struct txdds_ent *dds;
7081 	int idx;
7082 	int single_ent = 0;
7083 
7084 	find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
7085 
7086 	/* for mez cards or override, use the selected value for all entries */
7087 	if (!(ppd->dd->flags & QIB_HAS_QSFP) || override)
7088 		single_ent = 1;
7089 
7090 	/* Fill in the first entry with the best entry found. */
7091 	set_txdds(ppd, 0, sdr_dds);
7092 	set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
7093 	set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
7094 	if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
7095 		QIBL_LINKACTIVE)) {
7096 		dds = (struct txdds_ent *)(ppd->link_speed_active ==
7097 					   QIB_IB_QDR ?  qdr_dds :
7098 					   (ppd->link_speed_active ==
7099 					    QIB_IB_DDR ? ddr_dds : sdr_dds));
7100 		write_tx_serdes_param(ppd, dds);
7101 	}
7102 
7103 	/* Fill in the remaining entries with the default table values. */
7104 	for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) {
7105 		set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
7106 		set_txdds(ppd, idx + TXDDS_TABLE_SZ,
7107 			  single_ent ? ddr_dds : txdds_ddr + idx);
7108 		set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
7109 			  single_ent ? qdr_dds : txdds_qdr + idx);
7110 	}
7111 }
7112 
7113 #define KR_AHB_ACC KREG_IDX(ahb_access_ctrl)
7114 #define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg)
7115 #define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy)
7116 #define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address)
7117 #define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data)
7118 #define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read)
7119 #define AHB_TRANS_TRIES 10
7120 
7121 /*
7122  * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4,
7123  * 5=subsystem which is why most calls have "chan + chan >> 1"
7124  * for the channel argument.
7125  */
7126 static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr,
7127 		    u32 data, u32 mask)
7128 {
7129 	u32 rd_data, wr_data, sz_mask;
7130 	u64 trans, acc, prev_acc;
7131 	u32 ret = 0xBAD0BAD;
7132 	int tries;
7133 
7134 	prev_acc = qib_read_kreg64(dd, KR_AHB_ACC);
7135 	/* From this point on, make sure we return access */
7136 	acc = (quad << 1) | 1;
7137 	qib_write_kreg(dd, KR_AHB_ACC, acc);
7138 
7139 	for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7140 		trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7141 		if (trans & AHB_TRANS_RDY)
7142 			break;
7143 	}
7144 	if (tries >= AHB_TRANS_TRIES) {
7145 		qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
7146 		goto bail;
7147 	}
7148 
7149 	/* If mask is not all 1s, we need to read, but different SerDes
7150 	 * entities have different sizes
7151 	 */
7152 	sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1;
7153 	wr_data = data & mask & sz_mask;
7154 	if ((~mask & sz_mask) != 0) {
7155 		trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7156 		qib_write_kreg(dd, KR_AHB_TRANS, trans);
7157 
7158 		for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7159 			trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7160 			if (trans & AHB_TRANS_RDY)
7161 				break;
7162 		}
7163 		if (tries >= AHB_TRANS_TRIES) {
7164 			qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n",
7165 				    AHB_TRANS_TRIES);
7166 			goto bail;
7167 		}
7168 		/* Re-read in case host split reads and read data first */
7169 		trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7170 		rd_data = (uint32_t)(trans >> AHB_DATA_LSB);
7171 		wr_data |= (rd_data & ~mask & sz_mask);
7172 	}
7173 
7174 	/* If mask is not zero, we need to write. */
7175 	if (mask & sz_mask) {
7176 		trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7177 		trans |= ((uint64_t)wr_data << AHB_DATA_LSB);
7178 		trans |= AHB_WR;
7179 		qib_write_kreg(dd, KR_AHB_TRANS, trans);
7180 
7181 		for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7182 			trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7183 			if (trans & AHB_TRANS_RDY)
7184 				break;
7185 		}
7186 		if (tries >= AHB_TRANS_TRIES) {
7187 			qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n",
7188 				    AHB_TRANS_TRIES);
7189 			goto bail;
7190 		}
7191 	}
7192 	ret = wr_data;
7193 bail:
7194 	qib_write_kreg(dd, KR_AHB_ACC, prev_acc);
7195 	return ret;
7196 }
7197 
7198 static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
7199 			     unsigned mask)
7200 {
7201 	struct qib_devdata *dd = ppd->dd;
7202 	int chan;
7203 	u32 rbc;
7204 
7205 	for (chan = 0; chan < SERDES_CHANS; ++chan) {
7206 		ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
7207 			data, mask);
7208 		rbc = ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7209 			      addr, 0, 0);
7210 	}
7211 }
7212 
7213 static int serdes_7322_init(struct qib_pportdata *ppd)
7214 {
7215 	u64 data;
7216 	u32 le_val;
7217 
7218 	/*
7219 	 * Initialize the Tx DDS tables.  Also done every QSFP event,
7220 	 * for adapters with QSFP
7221 	 */
7222 	init_txdds_table(ppd, 0);
7223 
7224 	/* ensure no tx overrides from earlier driver loads */
7225 	qib_write_kreg_port(ppd, krp_tx_deemph_override,
7226 		SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7227 		reset_tx_deemphasis_override));
7228 
7229 	/* Patch some SerDes defaults to "Better for IB" */
7230 	/* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */
7231 	ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
7232 
7233 	/* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
7234 	ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
7235 	/* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */
7236 	ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
7237 
7238 	/* May be overridden in qsfp_7322_event */
7239 	le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
7240 	ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
7241 
7242 	/* enable LE1 adaptation for all but QME, which is disabled */
7243 	le_val = IS_QME(ppd->dd) ? 0 : 1;
7244 	ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
7245 
7246 	/* Clear cmode-override, may be set from older driver */
7247 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7248 
7249 	/* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */
7250 	ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
7251 
7252 	/* setup LoS params; these are subsystem, so chan == 5 */
7253 	/* LoS filter threshold_count on, ch 0-3, set to 8 */
7254 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
7255 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
7256 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
7257 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
7258 
7259 	/* LoS filter threshold_count off, ch 0-3, set to 4 */
7260 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
7261 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
7262 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
7263 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
7264 
7265 	/* LoS filter select enabled */
7266 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
7267 
7268 	/* LoS target data:  SDR=4, DDR=2, QDR=1 */
7269 	ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
7270 	ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
7271 	ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
7272 
7273 	data = qib_read_kreg_port(ppd, krp_serdesctrl);
7274 	/* Turn off IB latency mode */
7275 	data &= ~SYM_MASK(IBSerdesCtrl_0, IB_LAT_MODE);
7276 	qib_write_kreg_port(ppd, krp_serdesctrl, data |
7277 		SYM_MASK(IBSerdesCtrl_0, RXLOSEN));
7278 
7279 	/* rxbistena; set 0 to avoid effects of it switch later */
7280 	ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
7281 
7282 	/* Configure 4 DFE taps, and only they adapt */
7283 	ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
7284 
7285 	/* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
7286 	le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
7287 	ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
7288 
7289 	/*
7290 	 * Set receive adaptation mode.  SDR and DDR adaptation are
7291 	 * always on, and QDR is initially enabled; later disabled.
7292 	 */
7293 	qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
7294 	qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
7295 	qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
7296 			    ppd->dd->cspec->r1 ?
7297 			    QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
7298 	ppd->cpspec->qdr_dfe_on = 1;
7299 
7300 	/* FLoop LOS gate: PPM filter  enabled */
7301 	ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
7302 
7303 	/* rx offset center enabled */
7304 	ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
7305 
7306 	if (!ppd->dd->cspec->r1) {
7307 		ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
7308 		ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
7309 	}
7310 
7311 	/* Set the frequency loop bandwidth to 15 */
7312 	ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
7313 
7314 	return 0;
7315 }
7316 
7317 /* start adjust QMH serdes parameters */
7318 
7319 static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
7320 {
7321 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7322 		9, code << 9, 0x3f << 9);
7323 }
7324 
7325 static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
7326 	int enable, u32 tapenable)
7327 {
7328 	if (enable)
7329 		ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7330 			1, 3 << 10, 0x1f << 10);
7331 	else
7332 		ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7333 			1, 0, 0x1f << 10);
7334 }
7335 
7336 /* Set clock to 1, 0, 1, 0 */
7337 static void clock_man(struct qib_pportdata *ppd, int chan)
7338 {
7339 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7340 		4, 0x4000, 0x4000);
7341 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7342 		4, 0, 0x4000);
7343 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7344 		4, 0x4000, 0x4000);
7345 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7346 		4, 0, 0x4000);
7347 }
7348 
7349 /*
7350  * write the current Tx serdes pre,post,main,amp settings into the serdes.
7351  * The caller must pass the settings appropriate for the current speed,
7352  * or not care if they are correct for the current speed.
7353  */
7354 static void write_tx_serdes_param(struct qib_pportdata *ppd,
7355 				  struct txdds_ent *txdds)
7356 {
7357 	u64 deemph;
7358 
7359 	deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
7360 	/* field names for amp, main, post, pre, respectively */
7361 	deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) |
7362 		    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
7363 		    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
7364 		    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
7365 
7366 	deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7367 			   tx_override_deemphasis_select);
7368 	deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7369 		    txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7370 				       txampcntl_d2a);
7371 	deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7372 		     txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7373 				   txc0_ena);
7374 	deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7375 		     txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7376 				    txcp1_ena);
7377 	deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7378 		     txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7379 				    txcn1_ena);
7380 	qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
7381 }
7382 
7383 /*
7384  * Set the parameters for mez cards on link bounce, so they are
7385  * always exactly what was requested.  Similar logic to init_txdds
7386  * but does just the serdes.
7387  */
7388 static void adj_tx_serdes(struct qib_pportdata *ppd)
7389 {
7390 	const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
7391 	struct txdds_ent *dds;
7392 
7393 	find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1);
7394 	dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
7395 		qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ?
7396 				ddr_dds : sdr_dds));
7397 	write_tx_serdes_param(ppd, dds);
7398 }
7399 
7400 /* set QDR forced value for H1, if needed */
7401 static void force_h1(struct qib_pportdata *ppd)
7402 {
7403 	int chan;
7404 
7405 	ppd->cpspec->qdr_reforce = 0;
7406 	if (!ppd->dd->cspec->r1)
7407 		return;
7408 
7409 	for (chan = 0; chan < SERDES_CHANS; chan++) {
7410 		set_man_mode_h1(ppd, chan, 1, 0);
7411 		set_man_code(ppd, chan, ppd->cpspec->h1_val);
7412 		clock_man(ppd, chan);
7413 		set_man_mode_h1(ppd, chan, 0, 0);
7414 	}
7415 }
7416 
7417 #define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
7418 #define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
7419 
7420 #define R_OPCODE_LSB 3
7421 #define R_OP_NOP 0
7422 #define R_OP_SHIFT 2
7423 #define R_OP_UPDATE 3
7424 #define R_TDI_LSB 2
7425 #define R_TDO_LSB 1
7426 #define R_RDY 1
7427 
7428 static int qib_r_grab(struct qib_devdata *dd)
7429 {
7430 	u64 val;
7431 	val = SJA_EN;
7432 	qib_write_kreg(dd, kr_r_access, val);
7433 	qib_read_kreg32(dd, kr_scratch);
7434 	return 0;
7435 }
7436 
7437 /* qib_r_wait_for_rdy() not only waits for the ready bit, it
7438  * returns the current state of R_TDO
7439  */
7440 static int qib_r_wait_for_rdy(struct qib_devdata *dd)
7441 {
7442 	u64 val;
7443 	int timeout;
7444 	for (timeout = 0; timeout < 100 ; ++timeout) {
7445 		val = qib_read_kreg32(dd, kr_r_access);
7446 		if (val & R_RDY)
7447 			return (val >> R_TDO_LSB) & 1;
7448 	}
7449 	return -1;
7450 }
7451 
7452 static int qib_r_shift(struct qib_devdata *dd, int bisten,
7453 		       int len, u8 *inp, u8 *outp)
7454 {
7455 	u64 valbase, val;
7456 	int ret, pos;
7457 
7458 	valbase = SJA_EN | (bisten << BISTEN_LSB) |
7459 		(R_OP_SHIFT << R_OPCODE_LSB);
7460 	ret = qib_r_wait_for_rdy(dd);
7461 	if (ret < 0)
7462 		goto bail;
7463 	for (pos = 0; pos < len; ++pos) {
7464 		val = valbase;
7465 		if (outp) {
7466 			outp[pos >> 3] &= ~(1 << (pos & 7));
7467 			outp[pos >> 3] |= (ret << (pos & 7));
7468 		}
7469 		if (inp) {
7470 			int tdi = inp[pos >> 3] >> (pos & 7);
7471 			val |= ((tdi & 1) << R_TDI_LSB);
7472 		}
7473 		qib_write_kreg(dd, kr_r_access, val);
7474 		qib_read_kreg32(dd, kr_scratch);
7475 		ret = qib_r_wait_for_rdy(dd);
7476 		if (ret < 0)
7477 			break;
7478 	}
7479 	/* Restore to NOP between operations. */
7480 	val =  SJA_EN | (bisten << BISTEN_LSB);
7481 	qib_write_kreg(dd, kr_r_access, val);
7482 	qib_read_kreg32(dd, kr_scratch);
7483 	ret = qib_r_wait_for_rdy(dd);
7484 
7485 	if (ret >= 0)
7486 		ret = pos;
7487 bail:
7488 	return ret;
7489 }
7490 
7491 static int qib_r_update(struct qib_devdata *dd, int bisten)
7492 {
7493 	u64 val;
7494 	int ret;
7495 
7496 	val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB);
7497 	ret = qib_r_wait_for_rdy(dd);
7498 	if (ret >= 0) {
7499 		qib_write_kreg(dd, kr_r_access, val);
7500 		qib_read_kreg32(dd, kr_scratch);
7501 	}
7502 	return ret;
7503 }
7504 
7505 #define BISTEN_PORT_SEL 15
7506 #define LEN_PORT_SEL 625
7507 #define BISTEN_AT 17
7508 #define LEN_AT 156
7509 #define BISTEN_ETM 16
7510 #define LEN_ETM 632
7511 
7512 #define BIT2BYTE(x) (((x) +  BITS_PER_BYTE - 1) / BITS_PER_BYTE)
7513 
7514 /* these are common for all IB port use cases. */
7515 static u8 reset_at[BIT2BYTE(LEN_AT)] = {
7516 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7517 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
7518 };
7519 static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = {
7520 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7521 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7522 	0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e,
7523 	0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7,
7524 	0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70,
7525 	0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00,
7526 	0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7527 	0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
7528 };
7529 static u8 at[BIT2BYTE(LEN_AT)] = {
7530 	0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
7531 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
7532 };
7533 
7534 /* used for IB1 or IB2, only one in use */
7535 static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = {
7536 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7537 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7538 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7539 	0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00,
7540 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7541 	0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03,
7542 	0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00,
7543 	0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00,
7544 };
7545 
7546 /* used when both IB1 and IB2 are in use */
7547 static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = {
7548 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7549 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
7550 	0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7551 	0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05,
7552 	0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
7553 	0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07,
7554 	0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00,
7555 	0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
7556 };
7557 
7558 /* used when only IB1 is in use */
7559 static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = {
7560 	0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
7561 	0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
7562 	0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
7563 	0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
7564 	0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
7565 	0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
7566 	0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
7567 	0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
7568 };
7569 
7570 /* used when only IB2 is in use */
7571 static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = {
7572 	0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39,
7573 	0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32,
7574 	0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
7575 	0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
7576 	0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32,
7577 	0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
7578 	0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
7579 	0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
7580 };
7581 
7582 /* used when both IB1 and IB2 are in use */
7583 static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = {
7584 	0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
7585 	0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
7586 	0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
7587 	0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
7588 	0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
7589 	0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a,
7590 	0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
7591 	0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
7592 };
7593 
7594 /*
7595  * Do setup to properly handle IB link recovery; if port is zero, we
7596  * are initializing to cover both ports; otherwise we are initializing
7597  * to cover a single port card, or the port has reached INIT and we may
7598  * need to switch coverage types.
7599  */
7600 static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
7601 {
7602 	u8 *portsel, *etm;
7603 	struct qib_devdata *dd = ppd->dd;
7604 
7605 	if (!ppd->dd->cspec->r1)
7606 		return;
7607 	if (!both) {
7608 		dd->cspec->recovery_ports_initted++;
7609 		ppd->cpspec->recovery_init = 1;
7610 	}
7611 	if (!both && dd->cspec->recovery_ports_initted == 1) {
7612 		portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
7613 		etm = atetm_1port;
7614 	} else {
7615 		portsel = portsel_2port;
7616 		etm = atetm_2port;
7617 	}
7618 
7619 	if (qib_r_grab(dd) < 0 ||
7620 		qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 ||
7621 		qib_r_update(dd, BISTEN_ETM) < 0 ||
7622 		qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 ||
7623 		qib_r_update(dd, BISTEN_AT) < 0 ||
7624 		qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL,
7625 			    portsel, NULL) < 0 ||
7626 		qib_r_update(dd, BISTEN_PORT_SEL) < 0 ||
7627 		qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 ||
7628 		qib_r_update(dd, BISTEN_AT) < 0 ||
7629 		qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 ||
7630 		qib_r_update(dd, BISTEN_ETM) < 0)
7631 		qib_dev_err(dd, "Failed IB link recovery setup\n");
7632 }
7633 
7634 static void check_7322_rxe_status(struct qib_pportdata *ppd)
7635 {
7636 	struct qib_devdata *dd = ppd->dd;
7637 	u64 fmask;
7638 
7639 	if (dd->cspec->recovery_ports_initted != 1)
7640 		return; /* rest doesn't apply to dualport */
7641 	qib_write_kreg(dd, kr_control, dd->control |
7642 		       SYM_MASK(Control, FreezeMode));
7643 	(void)qib_read_kreg64(dd, kr_scratch);
7644 	udelay(3); /* ibcreset asserted 400ns, be sure that's over */
7645 	fmask = qib_read_kreg64(dd, kr_act_fmask);
7646 	if (!fmask) {
7647 		/*
7648 		 * require a powercycle before we'll work again, and make
7649 		 * sure we get no more interrupts, and don't turn off
7650 		 * freeze.
7651 		 */
7652 		ppd->dd->cspec->stay_in_freeze = 1;
7653 		qib_7322_set_intr_state(ppd->dd, 0);
7654 		qib_write_kreg(dd, kr_fmask, 0ULL);
7655 		qib_dev_err(dd, "HCA unusable until powercycled\n");
7656 		return; /* eventually reset */
7657 	}
7658 
7659 	qib_write_kreg(ppd->dd, kr_hwerrclear,
7660 	    SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1));
7661 
7662 	/* don't do the full clear_freeze(), not needed for this */
7663 	qib_write_kreg(dd, kr_control, dd->control);
7664 	qib_read_kreg32(dd, kr_scratch);
7665 	/* take IBC out of reset */
7666 	if (ppd->link_speed_supported) {
7667 		ppd->cpspec->ibcctrl_a &=
7668 			~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
7669 		qib_write_kreg_port(ppd, krp_ibcctrl_a,
7670 				    ppd->cpspec->ibcctrl_a);
7671 		qib_read_kreg32(dd, kr_scratch);
7672 		if (ppd->lflags & QIBL_IB_LINK_DISABLED)
7673 			qib_set_ib_7322_lstate(ppd, 0,
7674 				QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
7675 	}
7676 }
7677