1 /*
2  * Copyright (c) 2012 - 2017 Intel Corporation.  All rights reserved.
3  * Copyright (c) 2008 - 2012 QLogic Corporation. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 /*
35  * This file contains all of the code that is specific to the
36  * InfiniPath 7322 chip
37  */
38 
39 #include <linux/interrupt.h>
40 #include <linux/pci.h>
41 #include <linux/delay.h>
42 #include <linux/io.h>
43 #include <linux/jiffies.h>
44 #include <linux/module.h>
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_smi.h>
47 #ifdef CONFIG_INFINIBAND_QIB_DCA
48 #include <linux/dca.h>
49 #endif
50 
51 #include "qib.h"
52 #include "qib_7322_regs.h"
53 #include "qib_qsfp.h"
54 
55 #include "qib_mad.h"
56 #include "qib_verbs.h"
57 
58 #undef pr_fmt
59 #define pr_fmt(fmt) QIB_DRV_NAME " " fmt
60 
61 static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
62 static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
63 static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
64 static irqreturn_t qib_7322intr(int irq, void *data);
65 static irqreturn_t qib_7322bufavail(int irq, void *data);
66 static irqreturn_t sdma_intr(int irq, void *data);
67 static irqreturn_t sdma_idle_intr(int irq, void *data);
68 static irqreturn_t sdma_progress_intr(int irq, void *data);
69 static irqreturn_t sdma_cleanup_intr(int irq, void *data);
70 static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32,
71 				  struct qib_ctxtdata *rcd);
72 static u8 qib_7322_phys_portstate(u64);
73 static u32 qib_7322_iblink_state(u64);
74 static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
75 				   u16 linitcmd);
76 static void force_h1(struct qib_pportdata *);
77 static void adj_tx_serdes(struct qib_pportdata *);
78 static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8);
79 static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
80 
81 static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
82 static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
83 static void serdes_7322_los_enable(struct qib_pportdata *, int);
84 static int serdes_7322_init_old(struct qib_pportdata *);
85 static int serdes_7322_init_new(struct qib_pportdata *);
86 static void dump_sdma_7322_state(struct qib_pportdata *);
87 
88 #define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
89 
90 /* LE2 serdes values for different cases */
91 #define LE2_DEFAULT 5
92 #define LE2_5m 4
93 #define LE2_QME 0
94 
95 /* Below is special-purpose, so only really works for the IB SerDes blocks. */
96 #define IBSD(hw_pidx) (hw_pidx + 2)
97 
98 /* these are variables for documentation and experimentation purposes */
99 static const unsigned rcv_int_timeout = 375;
100 static const unsigned rcv_int_count = 16;
101 static const unsigned sdma_idle_cnt = 64;
102 
103 /* Time to stop altering Rx Equalization parameters, after link up. */
104 #define RXEQ_DISABLE_MSECS 2500
105 
106 /*
107  * Number of VLs we are configured to use (to allow for more
108  * credits per vl, etc.)
109  */
110 ushort qib_num_cfg_vls = 2;
111 module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO);
112 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
113 
114 static ushort qib_chase = 1;
115 module_param_named(chase, qib_chase, ushort, S_IRUGO);
116 MODULE_PARM_DESC(chase, "Enable state chase handling");
117 
118 static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
119 module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
120 MODULE_PARM_DESC(long_attenuation,
121 		 "attenuation cutoff (dB) for long copper cable setup");
122 
123 static ushort qib_singleport;
124 module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
125 MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
126 
127 static ushort qib_krcvq01_no_msi;
128 module_param_named(krcvq01_no_msi, qib_krcvq01_no_msi, ushort, S_IRUGO);
129 MODULE_PARM_DESC(krcvq01_no_msi, "No MSI for kctx < 2");
130 
131 /*
132  * Receive header queue sizes
133  */
134 static unsigned qib_rcvhdrcnt;
135 module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
136 MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
137 
138 static unsigned qib_rcvhdrsize;
139 module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
140 MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
141 
142 static unsigned qib_rcvhdrentsize;
143 module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
144 MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
145 
146 #define MAX_ATTEN_LEN 64 /* plenty for any real system */
147 /* for read back, default index is ~5m copper cable */
148 static char txselect_list[MAX_ATTEN_LEN] = "10";
149 static struct kparam_string kp_txselect = {
150 	.string = txselect_list,
151 	.maxlen = MAX_ATTEN_LEN
152 };
153 static int  setup_txselect(const char *, const struct kernel_param *);
154 module_param_call(txselect, setup_txselect, param_get_string,
155 		  &kp_txselect, S_IWUSR | S_IRUGO);
156 MODULE_PARM_DESC(txselect,
157 		 "Tx serdes indices (for no QSFP or invalid QSFP data)");
158 
159 #define BOARD_QME7342 5
160 #define BOARD_QMH7342 6
161 #define BOARD_QMH7360 9
162 #define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
163 		    BOARD_QMH7342)
164 #define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
165 		    BOARD_QME7342)
166 
167 #define KREG_IDX(regname)     (QIB_7322_##regname##_OFFS / sizeof(u64))
168 
169 #define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
170 
171 #define MASK_ACROSS(lsb, msb) \
172 	(((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
173 
174 #define SYM_RMASK(regname, fldname) ((u64)              \
175 	QIB_7322_##regname##_##fldname##_RMASK)
176 
177 #define SYM_MASK(regname, fldname) ((u64)               \
178 	QIB_7322_##regname##_##fldname##_RMASK <<       \
179 	 QIB_7322_##regname##_##fldname##_LSB)
180 
181 #define SYM_FIELD(value, regname, fldname) ((u64)	\
182 	(((value) >> SYM_LSB(regname, fldname)) &	\
183 	 SYM_RMASK(regname, fldname)))
184 
185 /* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */
186 #define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
187 	(((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
188 
189 #define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
190 #define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
191 #define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
192 #define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
193 #define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
194 /* Below because most, but not all, fields of IntMask have that full suffix */
195 #define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
196 
197 
198 #define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
199 
200 /*
201  * the size bits give us 2^N, in KB units.  0 marks as invalid,
202  * and 7 is reserved.  We currently use only 2KB and 4KB
203  */
204 #define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
205 #define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */
206 #define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */
207 #define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
208 
209 #define SendIBSLIDAssignMask \
210 	QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
211 #define SendIBSLMCMask \
212 	QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
213 
214 #define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
215 #define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
216 #define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
217 #define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
218 #define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
219 #define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
220 
221 #define _QIB_GPIO_SDA_NUM 1
222 #define _QIB_GPIO_SCL_NUM 0
223 #define QIB_EEPROM_WEN_NUM 14
224 #define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */
225 
226 /* HW counter clock is at 4nsec */
227 #define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
228 
229 /* full speed IB port 1 only */
230 #define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
231 #define PORT_SPD_CAP_SHIFT 3
232 
233 /* full speed featuremask, both ports */
234 #define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
235 
236 /*
237  * This file contains almost all the chip-specific register information and
238  * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip.
239  */
240 
241 /* Use defines to tie machine-generated names to lower-case names */
242 #define kr_contextcnt KREG_IDX(ContextCnt)
243 #define kr_control KREG_IDX(Control)
244 #define kr_counterregbase KREG_IDX(CntrRegBase)
245 #define kr_errclear KREG_IDX(ErrClear)
246 #define kr_errmask KREG_IDX(ErrMask)
247 #define kr_errstatus KREG_IDX(ErrStatus)
248 #define kr_extctrl KREG_IDX(EXTCtrl)
249 #define kr_extstatus KREG_IDX(EXTStatus)
250 #define kr_gpio_clear KREG_IDX(GPIOClear)
251 #define kr_gpio_mask KREG_IDX(GPIOMask)
252 #define kr_gpio_out KREG_IDX(GPIOOut)
253 #define kr_gpio_status KREG_IDX(GPIOStatus)
254 #define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
255 #define kr_debugportval KREG_IDX(DebugPortValueReg)
256 #define kr_fmask KREG_IDX(feature_mask)
257 #define kr_act_fmask KREG_IDX(active_feature_mask)
258 #define kr_hwerrclear KREG_IDX(HwErrClear)
259 #define kr_hwerrmask KREG_IDX(HwErrMask)
260 #define kr_hwerrstatus KREG_IDX(HwErrStatus)
261 #define kr_intclear KREG_IDX(IntClear)
262 #define kr_intmask KREG_IDX(IntMask)
263 #define kr_intredirect KREG_IDX(IntRedirect0)
264 #define kr_intstatus KREG_IDX(IntStatus)
265 #define kr_pagealign KREG_IDX(PageAlign)
266 #define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
267 #define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */
268 #define kr_rcvegrbase KREG_IDX(RcvEgrBase)
269 #define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
270 #define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
271 #define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
272 #define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
273 #define kr_rcvtidbase KREG_IDX(RcvTIDBase)
274 #define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
275 #define kr_revision KREG_IDX(Revision)
276 #define kr_scratch KREG_IDX(Scratch)
277 #define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */
278 #define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */
279 #define kr_sendctrl KREG_IDX(SendCtrl)
280 #define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */
281 #define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */
282 #define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
283 #define kr_sendpiobufbase KREG_IDX(SendBufBase)
284 #define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
285 #define kr_sendpiosize KREG_IDX(SendBufSize)
286 #define kr_sendregbase KREG_IDX(SendRegBase)
287 #define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
288 #define kr_userregbase KREG_IDX(UserRegBase)
289 #define kr_intgranted KREG_IDX(Int_Granted)
290 #define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
291 #define kr_intblocked KREG_IDX(IntBlocked)
292 #define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
293 
294 /*
295  * per-port kernel registers.  Access only with qib_read_kreg_port()
296  * or qib_write_kreg_port()
297  */
298 #define krp_errclear KREG_IBPORT_IDX(ErrClear)
299 #define krp_errmask KREG_IBPORT_IDX(ErrMask)
300 #define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
301 #define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
302 #define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
303 #define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
304 #define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
305 #define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
306 #define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
307 #define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
308 #define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
309 #define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
310 #define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
311 #define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
312 #define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
313 #define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
314 #define krp_psinterval KREG_IBPORT_IDX(PSInterval)
315 #define krp_psstart KREG_IBPORT_IDX(PSStart)
316 #define krp_psstat KREG_IBPORT_IDX(PSStat)
317 #define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
318 #define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
319 #define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
320 #define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
321 #define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
322 #define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
323 #define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
324 #define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
325 #define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
326 #define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
327 #define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
328 #define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
329 #define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
330 #define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
331 #define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
332 #define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
333 #define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
334 #define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
335 #define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
336 #define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
337 #define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
338 #define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
339 #define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
340 #define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
341 #define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
342 #define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
343 #define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
344 #define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
345 #define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
346 #define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
347 #define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
348 
349 /*
350  * Per-context kernel registers.  Access only with qib_read_kreg_ctxt()
351  * or qib_write_kreg_ctxt()
352  */
353 #define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
354 #define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
355 
356 /*
357  * TID Flow table, per context.  Reduces
358  * number of hdrq updates to one per flow (or on errors).
359  * context 0 and 1 share same memory, but have distinct
360  * addresses.  Since for now, we never use expected sends
361  * on kernel contexts, we don't worry about that (we initialize
362  * those entries for ctxt 0/1 on driver load twice, for example).
363  */
364 #define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */
365 #define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
366 
367 /* these are the error bits in the tid flows, and are W1C */
368 #define TIDFLOW_ERRBITS  ( \
369 	(SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
370 	SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
371 	(SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
372 	SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
373 
374 /* Most (not all) Counters are per-IBport.
375  * Requires LBIntCnt is at offset 0 in the group
376  */
377 #define CREG_IDX(regname) \
378 ((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
379 
380 #define crp_badformat CREG_IDX(RxVersionErrCnt)
381 #define crp_err_rlen CREG_IDX(RxLenErrCnt)
382 #define crp_erricrc CREG_IDX(RxICRCErrCnt)
383 #define crp_errlink CREG_IDX(RxLinkMalformCnt)
384 #define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
385 #define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
386 #define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
387 #define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
388 #define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
389 #define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
390 #define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
391 #define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
392 #define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
393 #define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
394 #define crp_pktrcv CREG_IDX(RxDataPktCnt)
395 #define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
396 #define crp_pktsend CREG_IDX(TxDataPktCnt)
397 #define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
398 #define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
399 #define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
400 #define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
401 #define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
402 #define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
403 #define crp_rcvebp CREG_IDX(RxEBPCnt)
404 #define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
405 #define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
406 #define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
407 #define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
408 #define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
409 #define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
410 #define crp_rxvlerr CREG_IDX(RxVlErrCnt)
411 #define crp_sendstall CREG_IDX(TxFlowStallCnt)
412 #define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
413 #define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
414 #define crp_txlenerr CREG_IDX(TxLenErrCnt)
415 #define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
416 #define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
417 #define crp_txunderrun CREG_IDX(TxUnderrunCnt)
418 #define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
419 #define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
420 #define crp_wordrcv CREG_IDX(RxDwordCnt)
421 #define crp_wordsend CREG_IDX(TxDwordCnt)
422 #define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
423 
424 /* these are the (few) counters that are not port-specific */
425 #define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
426 			QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
427 #define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
428 #define cr_lbint CREG_DEVIDX(LBIntCnt)
429 #define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
430 #define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
431 #define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
432 #define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
433 #define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
434 
435 /* no chip register for # of IB ports supported, so define */
436 #define NUM_IB_PORTS 2
437 
438 /* 1 VL15 buffer per hardware IB port, no register for this, so define */
439 #define NUM_VL15_BUFS NUM_IB_PORTS
440 
441 /*
442  * context 0 and 1 are special, and there is no chip register that
443  * defines this value, so we have to define it here.
444  * These are all allocated to either 0 or 1 for single port
445  * hardware configuration, otherwise each gets half
446  */
447 #define KCTXT0_EGRCNT 2048
448 
449 /* values for vl and port fields in PBC, 7322-specific */
450 #define PBC_PORT_SEL_LSB 26
451 #define PBC_PORT_SEL_RMASK 1
452 #define PBC_VL_NUM_LSB 27
453 #define PBC_VL_NUM_RMASK 7
454 #define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
455 #define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
456 
457 static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
458 	[IB_RATE_2_5_GBPS] = 16,
459 	[IB_RATE_5_GBPS] = 8,
460 	[IB_RATE_10_GBPS] = 4,
461 	[IB_RATE_20_GBPS] = 2,
462 	[IB_RATE_30_GBPS] = 2,
463 	[IB_RATE_40_GBPS] = 1
464 };
465 
466 #define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
467 #define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
468 
469 /* link training states, from IBC */
470 #define IB_7322_LT_STATE_DISABLED        0x00
471 #define IB_7322_LT_STATE_LINKUP          0x01
472 #define IB_7322_LT_STATE_POLLACTIVE      0x02
473 #define IB_7322_LT_STATE_POLLQUIET       0x03
474 #define IB_7322_LT_STATE_SLEEPDELAY      0x04
475 #define IB_7322_LT_STATE_SLEEPQUIET      0x05
476 #define IB_7322_LT_STATE_CFGDEBOUNCE     0x08
477 #define IB_7322_LT_STATE_CFGRCVFCFG      0x09
478 #define IB_7322_LT_STATE_CFGWAITRMT      0x0a
479 #define IB_7322_LT_STATE_CFGIDLE         0x0b
480 #define IB_7322_LT_STATE_RECOVERRETRAIN  0x0c
481 #define IB_7322_LT_STATE_TXREVLANES      0x0d
482 #define IB_7322_LT_STATE_RECOVERWAITRMT  0x0e
483 #define IB_7322_LT_STATE_RECOVERIDLE     0x0f
484 #define IB_7322_LT_STATE_CFGENH          0x10
485 #define IB_7322_LT_STATE_CFGTEST         0x11
486 #define IB_7322_LT_STATE_CFGWAITRMTTEST  0x12
487 #define IB_7322_LT_STATE_CFGWAITENH      0x13
488 
489 /* link state machine states from IBC */
490 #define IB_7322_L_STATE_DOWN             0x0
491 #define IB_7322_L_STATE_INIT             0x1
492 #define IB_7322_L_STATE_ARM              0x2
493 #define IB_7322_L_STATE_ACTIVE           0x3
494 #define IB_7322_L_STATE_ACT_DEFER        0x4
495 
496 static const u8 qib_7322_physportstate[0x20] = {
497 	[IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
498 	[IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
499 	[IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
500 	[IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
501 	[IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
502 	[IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
503 	[IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN,
504 	[IB_7322_LT_STATE_CFGRCVFCFG] =
505 		IB_PHYSPORTSTATE_CFG_TRAIN,
506 	[IB_7322_LT_STATE_CFGWAITRMT] =
507 		IB_PHYSPORTSTATE_CFG_TRAIN,
508 	[IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE,
509 	[IB_7322_LT_STATE_RECOVERRETRAIN] =
510 		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
511 	[IB_7322_LT_STATE_RECOVERWAITRMT] =
512 		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
513 	[IB_7322_LT_STATE_RECOVERIDLE] =
514 		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
515 	[IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
516 	[IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
517 	[IB_7322_LT_STATE_CFGWAITRMTTEST] =
518 		IB_PHYSPORTSTATE_CFG_TRAIN,
519 	[IB_7322_LT_STATE_CFGWAITENH] =
520 		IB_PHYSPORTSTATE_CFG_WAIT_ENH,
521 	[0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
522 	[0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
523 	[0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
524 	[0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
525 };
526 
527 #ifdef CONFIG_INFINIBAND_QIB_DCA
528 struct qib_irq_notify {
529 	int rcv;
530 	void *arg;
531 	struct irq_affinity_notify notify;
532 };
533 #endif
534 
535 struct qib_chip_specific {
536 	u64 __iomem *cregbase;
537 	u64 *cntrs;
538 	spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
539 	spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
540 	u64 main_int_mask;      /* clear bits which have dedicated handlers */
541 	u64 int_enable_mask;  /* for per port interrupts in single port mode */
542 	u64 errormask;
543 	u64 hwerrmask;
544 	u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
545 	u64 gpio_mask; /* shadow the gpio mask register */
546 	u64 extctrl; /* shadow the gpio output enable, etc... */
547 	u32 ncntrs;
548 	u32 nportcntrs;
549 	u32 cntrnamelen;
550 	u32 portcntrnamelen;
551 	u32 numctxts;
552 	u32 rcvegrcnt;
553 	u32 updthresh; /* current AvailUpdThld */
554 	u32 updthresh_dflt; /* default AvailUpdThld */
555 	u32 r1;
556 	u32 num_msix_entries;
557 	u32 sdmabufcnt;
558 	u32 lastbuf_for_pio;
559 	u32 stay_in_freeze;
560 	u32 recovery_ports_initted;
561 #ifdef CONFIG_INFINIBAND_QIB_DCA
562 	u32 dca_ctrl;
563 	int rhdr_cpu[18];
564 	int sdma_cpu[2];
565 	u64 dca_rcvhdr_ctrl[5]; /* B, C, D, E, F */
566 #endif
567 	struct qib_msix_entry *msix_entries;
568 	unsigned long *sendchkenable;
569 	unsigned long *sendgrhchk;
570 	unsigned long *sendibchk;
571 	u32 rcvavail_timeout[18];
572 	char emsgbuf[128]; /* for device error interrupt msg buffer */
573 };
574 
575 /* Table of entries in "human readable" form Tx Emphasis. */
576 struct txdds_ent {
577 	u8 amp;
578 	u8 pre;
579 	u8 main;
580 	u8 post;
581 };
582 
583 struct vendor_txdds_ent {
584 	u8 oui[QSFP_VOUI_LEN];
585 	u8 *partnum;
586 	struct txdds_ent sdr;
587 	struct txdds_ent ddr;
588 	struct txdds_ent qdr;
589 };
590 
591 static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
592 
593 #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
594 #define TXDDS_EXTRA_SZ 18 /* number of extra tx settings entries */
595 #define TXDDS_MFG_SZ 2    /* number of mfg tx settings entries */
596 #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
597 
598 #define H1_FORCE_VAL 8
599 #define H1_FORCE_QME 1 /*  may be overridden via setup_txselect() */
600 #define H1_FORCE_QMH 7 /*  may be overridden via setup_txselect() */
601 
602 /* The static and dynamic registers are paired, and the pairs indexed by spd */
603 #define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
604 	+ ((spd) * 2))
605 
606 #define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */
607 #define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */
608 #define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */
609 #define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
610 #define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
611 
612 struct qib_chippport_specific {
613 	u64 __iomem *kpregbase;
614 	u64 __iomem *cpregbase;
615 	u64 *portcntrs;
616 	struct qib_pportdata *ppd;
617 	wait_queue_head_t autoneg_wait;
618 	struct delayed_work autoneg_work;
619 	struct delayed_work ipg_work;
620 	struct timer_list chase_timer;
621 	/*
622 	 * these 5 fields are used to establish deltas for IB symbol
623 	 * errors and linkrecovery errors.  They can be reported on
624 	 * some chips during link negotiation prior to INIT, and with
625 	 * DDR when faking DDR negotiations with non-IBTA switches.
626 	 * The chip counters are adjusted at driver unload if there is
627 	 * a non-zero delta.
628 	 */
629 	u64 ibdeltainprog;
630 	u64 ibsymdelta;
631 	u64 ibsymsnap;
632 	u64 iblnkerrdelta;
633 	u64 iblnkerrsnap;
634 	u64 iblnkdownsnap;
635 	u64 iblnkdowndelta;
636 	u64 ibmalfdelta;
637 	u64 ibmalfsnap;
638 	u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
639 	u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
640 	unsigned long qdr_dfe_time;
641 	unsigned long chase_end;
642 	u32 autoneg_tries;
643 	u32 recovery_init;
644 	u32 qdr_dfe_on;
645 	u32 qdr_reforce;
646 	/*
647 	 * Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
648 	 * entry zero is unused, to simplify indexing
649 	 */
650 	u8 h1_val;
651 	u8 no_eep;  /* txselect table index to use if no qsfp info */
652 	u8 ipg_tries;
653 	u8 ibmalfusesnap;
654 	struct qib_qsfp_data qsfp_data;
655 	char epmsgbuf[192]; /* for port error interrupt msg buffer */
656 	char sdmamsgbuf[192]; /* for per-port sdma error messages */
657 };
658 
659 static struct {
660 	const char *name;
661 	irq_handler_t handler;
662 	int lsb;
663 	int port; /* 0 if not port-specific, else port # */
664 	int dca;
665 } irq_table[] = {
666 	{ "", qib_7322intr, -1, 0, 0 },
667 	{ " (buf avail)", qib_7322bufavail,
668 		SYM_LSB(IntStatus, SendBufAvail), 0, 0},
669 	{ " (sdma 0)", sdma_intr,
670 		SYM_LSB(IntStatus, SDmaInt_0), 1, 1 },
671 	{ " (sdma 1)", sdma_intr,
672 		SYM_LSB(IntStatus, SDmaInt_1), 2, 1 },
673 	{ " (sdmaI 0)", sdma_idle_intr,
674 		SYM_LSB(IntStatus, SDmaIdleInt_0), 1, 1},
675 	{ " (sdmaI 1)", sdma_idle_intr,
676 		SYM_LSB(IntStatus, SDmaIdleInt_1), 2, 1},
677 	{ " (sdmaP 0)", sdma_progress_intr,
678 		SYM_LSB(IntStatus, SDmaProgressInt_0), 1, 1 },
679 	{ " (sdmaP 1)", sdma_progress_intr,
680 		SYM_LSB(IntStatus, SDmaProgressInt_1), 2, 1 },
681 	{ " (sdmaC 0)", sdma_cleanup_intr,
682 		SYM_LSB(IntStatus, SDmaCleanupDone_0), 1, 0 },
683 	{ " (sdmaC 1)", sdma_cleanup_intr,
684 		SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 , 0},
685 };
686 
687 #ifdef CONFIG_INFINIBAND_QIB_DCA
688 
689 static const struct dca_reg_map {
690 	int     shadow_inx;
691 	int     lsb;
692 	u64     mask;
693 	u16     regno;
694 } dca_rcvhdr_reg_map[] = {
695 	{ 0, SYM_LSB(DCACtrlB, RcvHdrq0DCAOPH),
696 	   ~SYM_MASK(DCACtrlB, RcvHdrq0DCAOPH) , KREG_IDX(DCACtrlB) },
697 	{ 0, SYM_LSB(DCACtrlB, RcvHdrq1DCAOPH),
698 	   ~SYM_MASK(DCACtrlB, RcvHdrq1DCAOPH) , KREG_IDX(DCACtrlB) },
699 	{ 0, SYM_LSB(DCACtrlB, RcvHdrq2DCAOPH),
700 	   ~SYM_MASK(DCACtrlB, RcvHdrq2DCAOPH) , KREG_IDX(DCACtrlB) },
701 	{ 0, SYM_LSB(DCACtrlB, RcvHdrq3DCAOPH),
702 	   ~SYM_MASK(DCACtrlB, RcvHdrq3DCAOPH) , KREG_IDX(DCACtrlB) },
703 	{ 1, SYM_LSB(DCACtrlC, RcvHdrq4DCAOPH),
704 	   ~SYM_MASK(DCACtrlC, RcvHdrq4DCAOPH) , KREG_IDX(DCACtrlC) },
705 	{ 1, SYM_LSB(DCACtrlC, RcvHdrq5DCAOPH),
706 	   ~SYM_MASK(DCACtrlC, RcvHdrq5DCAOPH) , KREG_IDX(DCACtrlC) },
707 	{ 1, SYM_LSB(DCACtrlC, RcvHdrq6DCAOPH),
708 	   ~SYM_MASK(DCACtrlC, RcvHdrq6DCAOPH) , KREG_IDX(DCACtrlC) },
709 	{ 1, SYM_LSB(DCACtrlC, RcvHdrq7DCAOPH),
710 	   ~SYM_MASK(DCACtrlC, RcvHdrq7DCAOPH) , KREG_IDX(DCACtrlC) },
711 	{ 2, SYM_LSB(DCACtrlD, RcvHdrq8DCAOPH),
712 	   ~SYM_MASK(DCACtrlD, RcvHdrq8DCAOPH) , KREG_IDX(DCACtrlD) },
713 	{ 2, SYM_LSB(DCACtrlD, RcvHdrq9DCAOPH),
714 	   ~SYM_MASK(DCACtrlD, RcvHdrq9DCAOPH) , KREG_IDX(DCACtrlD) },
715 	{ 2, SYM_LSB(DCACtrlD, RcvHdrq10DCAOPH),
716 	   ~SYM_MASK(DCACtrlD, RcvHdrq10DCAOPH) , KREG_IDX(DCACtrlD) },
717 	{ 2, SYM_LSB(DCACtrlD, RcvHdrq11DCAOPH),
718 	   ~SYM_MASK(DCACtrlD, RcvHdrq11DCAOPH) , KREG_IDX(DCACtrlD) },
719 	{ 3, SYM_LSB(DCACtrlE, RcvHdrq12DCAOPH),
720 	   ~SYM_MASK(DCACtrlE, RcvHdrq12DCAOPH) , KREG_IDX(DCACtrlE) },
721 	{ 3, SYM_LSB(DCACtrlE, RcvHdrq13DCAOPH),
722 	   ~SYM_MASK(DCACtrlE, RcvHdrq13DCAOPH) , KREG_IDX(DCACtrlE) },
723 	{ 3, SYM_LSB(DCACtrlE, RcvHdrq14DCAOPH),
724 	   ~SYM_MASK(DCACtrlE, RcvHdrq14DCAOPH) , KREG_IDX(DCACtrlE) },
725 	{ 3, SYM_LSB(DCACtrlE, RcvHdrq15DCAOPH),
726 	   ~SYM_MASK(DCACtrlE, RcvHdrq15DCAOPH) , KREG_IDX(DCACtrlE) },
727 	{ 4, SYM_LSB(DCACtrlF, RcvHdrq16DCAOPH),
728 	   ~SYM_MASK(DCACtrlF, RcvHdrq16DCAOPH) , KREG_IDX(DCACtrlF) },
729 	{ 4, SYM_LSB(DCACtrlF, RcvHdrq17DCAOPH),
730 	   ~SYM_MASK(DCACtrlF, RcvHdrq17DCAOPH) , KREG_IDX(DCACtrlF) },
731 };
732 #endif
733 
734 /* ibcctrl bits */
735 #define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
736 /* cycle through TS1/TS2 till OK */
737 #define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
738 /* wait for TS1, then go on */
739 #define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
740 #define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
741 
742 #define QLOGIC_IB_IBCC_LINKCMD_DOWN 1           /* move to 0x11 */
743 #define QLOGIC_IB_IBCC_LINKCMD_ARMED 2          /* move to 0x21 */
744 #define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
745 
746 #define BLOB_7322_IBCHG 0x101
747 
748 static inline void qib_write_kreg(const struct qib_devdata *dd,
749 				  const u32 regno, u64 value);
750 static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32);
751 static void write_7322_initregs(struct qib_devdata *);
752 static void write_7322_init_portregs(struct qib_pportdata *);
753 static void setup_7322_link_recovery(struct qib_pportdata *, u32);
754 static void check_7322_rxe_status(struct qib_pportdata *);
755 static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
756 #ifdef CONFIG_INFINIBAND_QIB_DCA
757 static void qib_setup_dca(struct qib_devdata *dd);
758 static void setup_dca_notifier(struct qib_devdata *dd, int msixnum);
759 static void reset_dca_notifier(struct qib_devdata *dd, int msixnum);
760 #endif
761 
762 /**
763  * qib_read_ureg32 - read 32-bit virtualized per-context register
764  * @dd: device
765  * @regno: register number
766  * @ctxt: context number
767  *
768  * Return the contents of a register that is virtualized to be per context.
769  * Returns -1 on errors (not distinguishable from valid contents at
770  * runtime; we may add a separate error variable at some point).
771  */
772 static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
773 				  enum qib_ureg regno, int ctxt)
774 {
775 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
776 		return 0;
777 	return readl(regno + (u64 __iomem *)(
778 		(dd->ureg_align * ctxt) + (dd->userbase ?
779 		 (char __iomem *)dd->userbase :
780 		 (char __iomem *)dd->kregbase + dd->uregbase)));
781 }
782 
783 /**
784  * qib_read_ureg - read virtualized per-context register
785  * @dd: device
786  * @regno: register number
787  * @ctxt: context number
788  *
789  * Return the contents of a register that is virtualized to be per context.
790  * Returns -1 on errors (not distinguishable from valid contents at
791  * runtime; we may add a separate error variable at some point).
792  */
793 static inline u64 qib_read_ureg(const struct qib_devdata *dd,
794 				enum qib_ureg regno, int ctxt)
795 {
796 
797 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
798 		return 0;
799 	return readq(regno + (u64 __iomem *)(
800 		(dd->ureg_align * ctxt) + (dd->userbase ?
801 		 (char __iomem *)dd->userbase :
802 		 (char __iomem *)dd->kregbase + dd->uregbase)));
803 }
804 
805 /**
806  * qib_write_ureg - write virtualized per-context register
807  * @dd: device
808  * @regno: register number
809  * @value: value
810  * @ctxt: context
811  *
812  * Write the contents of a register that is virtualized to be per context.
813  */
814 static inline void qib_write_ureg(const struct qib_devdata *dd,
815 				  enum qib_ureg regno, u64 value, int ctxt)
816 {
817 	u64 __iomem *ubase;
818 
819 	if (dd->userbase)
820 		ubase = (u64 __iomem *)
821 			((char __iomem *) dd->userbase +
822 			 dd->ureg_align * ctxt);
823 	else
824 		ubase = (u64 __iomem *)
825 			(dd->uregbase +
826 			 (char __iomem *) dd->kregbase +
827 			 dd->ureg_align * ctxt);
828 
829 	if (dd->kregbase && (dd->flags & QIB_PRESENT))
830 		writeq(value, &ubase[regno]);
831 }
832 
833 static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
834 				  const u32 regno)
835 {
836 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
837 		return -1;
838 	return readl((u32 __iomem *) &dd->kregbase[regno]);
839 }
840 
841 static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
842 				  const u32 regno)
843 {
844 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
845 		return -1;
846 	return readq(&dd->kregbase[regno]);
847 }
848 
849 static inline void qib_write_kreg(const struct qib_devdata *dd,
850 				  const u32 regno, u64 value)
851 {
852 	if (dd->kregbase && (dd->flags & QIB_PRESENT))
853 		writeq(value, &dd->kregbase[regno]);
854 }
855 
856 /*
857  * not many sanity checks for the port-specific kernel register routines,
858  * since they are only used when it's known to be safe.
859 */
860 static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
861 				     const u16 regno)
862 {
863 	if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
864 		return 0ULL;
865 	return readq(&ppd->cpspec->kpregbase[regno]);
866 }
867 
868 static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
869 				       const u16 regno, u64 value)
870 {
871 	if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
872 	    (ppd->dd->flags & QIB_PRESENT))
873 		writeq(value, &ppd->cpspec->kpregbase[regno]);
874 }
875 
876 /**
877  * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
878  * @dd: the qlogic_ib device
879  * @regno: the register number to write
880  * @ctxt: the context containing the register
881  * @value: the value to write
882  */
883 static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
884 				       const u16 regno, unsigned ctxt,
885 				       u64 value)
886 {
887 	qib_write_kreg(dd, regno + ctxt, value);
888 }
889 
890 static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
891 {
892 	if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
893 		return 0;
894 	return readq(&dd->cspec->cregbase[regno]);
895 
896 
897 }
898 
899 static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
900 {
901 	if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
902 		return 0;
903 	return readl(&dd->cspec->cregbase[regno]);
904 
905 
906 }
907 
908 static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
909 					u16 regno, u64 value)
910 {
911 	if (ppd->cpspec && ppd->cpspec->cpregbase &&
912 	    (ppd->dd->flags & QIB_PRESENT))
913 		writeq(value, &ppd->cpspec->cpregbase[regno]);
914 }
915 
916 static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
917 				      u16 regno)
918 {
919 	if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
920 	    !(ppd->dd->flags & QIB_PRESENT))
921 		return 0;
922 	return readq(&ppd->cpspec->cpregbase[regno]);
923 }
924 
925 static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
926 					u16 regno)
927 {
928 	if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
929 	    !(ppd->dd->flags & QIB_PRESENT))
930 		return 0;
931 	return readl(&ppd->cpspec->cpregbase[regno]);
932 }
933 
934 /* bits in Control register */
935 #define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
936 #define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
937 
938 /* bits in general interrupt regs */
939 #define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
940 #define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
941 #define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
942 #define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
943 #define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
944 #define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
945 #define QIB_I_C_ERROR INT_MASK(Err)
946 
947 #define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
948 #define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
949 #define QIB_I_GPIO INT_MASK(AssertGPIO)
950 #define QIB_I_P_SDMAINT(pidx) \
951 	(INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
952 	 INT_MASK_P(SDmaProgress, pidx) | \
953 	 INT_MASK_PM(SDmaCleanupDone, pidx))
954 
955 /* Interrupt bits that are "per port" */
956 #define QIB_I_P_BITSEXTANT(pidx) \
957 	(INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
958 	INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
959 	INT_MASK_P(SDmaProgress, pidx) | \
960 	INT_MASK_PM(SDmaCleanupDone, pidx))
961 
962 /* Interrupt bits that are common to a device */
963 /* currently unused: QIB_I_SPIOSENT */
964 #define QIB_I_C_BITSEXTANT \
965 	(QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
966 	QIB_I_SPIOSENT | \
967 	QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
968 
969 #define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
970 	QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
971 
972 /*
973  * Error bits that are "per port".
974  */
975 #define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
976 #define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
977 #define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
978 #define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
979 #define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
980 #define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
981 #define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
982 #define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
983 #define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
984 #define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
985 #define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
986 #define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
987 #define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
988 #define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
989 #define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
990 #define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
991 #define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
992 #define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
993 #define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
994 #define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
995 #define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
996 #define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
997 #define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
998 #define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
999 #define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
1000 #define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
1001 #define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
1002 #define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
1003 
1004 #define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
1005 #define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
1006 #define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
1007 #define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
1008 #define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
1009 #define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
1010 #define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
1011 #define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
1012 #define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
1013 #define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
1014 #define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
1015 
1016 /* Error bits that are common to a device */
1017 #define QIB_E_RESET ERR_MASK(ResetNegated)
1018 #define QIB_E_HARDWARE ERR_MASK(HardwareErr)
1019 #define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
1020 
1021 
1022 /*
1023  * Per chip (rather than per-port) errors.  Most either do
1024  * nothing but trigger a print (because they self-recover, or
1025  * always occur in tandem with other errors that handle the
1026  * issue), or because they indicate errors with no recovery,
1027  * but we want to know that they happened.
1028  */
1029 #define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
1030 #define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
1031 #define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
1032 #define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
1033 #define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
1034 #define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
1035 #define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
1036 #define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
1037 
1038 /* SDMA chip errors (not per port)
1039  * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get
1040  * the SDMAHALT error immediately, so we just print the dup error via the
1041  * E_AUTO mechanism.  This is true of most of the per-port fatal errors
1042  * as well, but since this is port-independent, by definition, it's
1043  * handled a bit differently.  SDMA_VL15 and SDMA_WRONG_PORT are per
1044  * packet send errors, and so are handled in the same manner as other
1045  * per-packet errors.
1046  */
1047 #define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
1048 #define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
1049 #define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
1050 
1051 /*
1052  * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS
1053  * it is used to print "common" packet errors.
1054  */
1055 #define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
1056 	QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
1057 	QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
1058 	QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1059 	QIB_E_P_REBP)
1060 
1061 /* Error Bits that Packet-related (Receive, per-port) */
1062 #define QIB_E_P_RPKTERRS (\
1063 	QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
1064 	QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
1065 	QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
1066 	QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
1067 	QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
1068 	QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
1069 
1070 /*
1071  * Error bits that are Send-related (per port)
1072  * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling).
1073  * All of these potentially need to have a buffer disarmed
1074  */
1075 #define QIB_E_P_SPKTERRS (\
1076 	QIB_E_P_SUNEXP_PKTNUM |\
1077 	QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1078 	QIB_E_P_SMAXPKTLEN |\
1079 	QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1080 	QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
1081 	QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
1082 
1083 #define QIB_E_SPKTERRS ( \
1084 		QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
1085 		ERR_MASK_N(SendUnsupportedVLErr) |			\
1086 		QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
1087 
1088 #define QIB_E_P_SDMAERRS ( \
1089 	QIB_E_P_SDMAHALT | \
1090 	QIB_E_P_SDMADESCADDRMISALIGN | \
1091 	QIB_E_P_SDMAUNEXPDATA | \
1092 	QIB_E_P_SDMAMISSINGDW | \
1093 	QIB_E_P_SDMADWEN | \
1094 	QIB_E_P_SDMARPYTAG | \
1095 	QIB_E_P_SDMA1STDESC | \
1096 	QIB_E_P_SDMABASE | \
1097 	QIB_E_P_SDMATAILOUTOFBOUND | \
1098 	QIB_E_P_SDMAOUTOFBOUND | \
1099 	QIB_E_P_SDMAGENMISMATCH)
1100 
1101 /*
1102  * This sets some bits more than once, but makes it more obvious which
1103  * bits are not handled under other categories, and the repeat definition
1104  * is not a problem.
1105  */
1106 #define QIB_E_P_BITSEXTANT ( \
1107 	QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
1108 	QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
1109 	QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
1110 	QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
1111 	)
1112 
1113 /*
1114  * These are errors that can occur when the link
1115  * changes state while a packet is being sent or received.  This doesn't
1116  * cover things like EBP or VCRC that can be the result of a sending
1117  * having the link change state, so we receive a "known bad" packet.
1118  * All of these are "per port", so renamed:
1119  */
1120 #define QIB_E_P_LINK_PKTERRS (\
1121 	QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1122 	QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
1123 	QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
1124 	QIB_E_P_RUNEXPCHAR)
1125 
1126 /*
1127  * This sets some bits more than once, but makes it more obvious which
1128  * bits are not handled under other categories (such as QIB_E_SPKTERRS),
1129  * and the repeat definition is not a problem.
1130  */
1131 #define QIB_E_C_BITSEXTANT (\
1132 	QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
1133 	QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
1134 	QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
1135 
1136 /* Likewise Neuter E_SPKT_ERRS_IGNORE */
1137 #define E_SPKT_ERRS_IGNORE 0
1138 
1139 #define QIB_EXTS_MEMBIST_DISABLED \
1140 	SYM_MASK(EXTStatus, MemBISTDisabled)
1141 #define QIB_EXTS_MEMBIST_ENDTEST \
1142 	SYM_MASK(EXTStatus, MemBISTEndTest)
1143 
1144 #define QIB_E_SPIOARMLAUNCH \
1145 	ERR_MASK(SendArmLaunchErr)
1146 
1147 #define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
1148 #define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
1149 
1150 /*
1151  * IBTA_1_2 is set when multiple speeds are enabled (normal),
1152  * and also if forced QDR (only QDR enabled).  It's enabled for the
1153  * forced QDR case so that scrambling will be enabled by the TS3
1154  * exchange, when supported by both sides of the link.
1155  */
1156 #define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
1157 #define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
1158 #define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
1159 #define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
1160 #define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
1161 #define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
1162 	SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
1163 #define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
1164 
1165 #define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
1166 #define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
1167 
1168 #define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
1169 #define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1170 #define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1171 
1172 #define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1173 #define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1174 #define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
1175 	SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
1176 #define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
1177 	SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
1178 #define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
1179 
1180 #define IBA7322_REDIRECT_VEC_PER_REG 12
1181 
1182 #define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
1183 #define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
1184 #define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
1185 #define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
1186 #define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
1187 
1188 #define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
1189 
1190 #define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
1191 	.msg = #fldname , .sz = sizeof(#fldname) }
1192 #define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
1193 	fldname##Mask##_##port), .msg = #fldname , .sz = sizeof(#fldname) }
1194 static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
1195 	HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
1196 	HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
1197 	HWE_AUTO(PCIESerdesPClkNotDetect),
1198 	HWE_AUTO(PowerOnBISTFailed),
1199 	HWE_AUTO(TempsenseTholdReached),
1200 	HWE_AUTO(MemoryErr),
1201 	HWE_AUTO(PCIeBusParityErr),
1202 	HWE_AUTO(PcieCplTimeout),
1203 	HWE_AUTO(PciePoisonedTLP),
1204 	HWE_AUTO_P(SDmaMemReadErr, 1),
1205 	HWE_AUTO_P(SDmaMemReadErr, 0),
1206 	HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
1207 	HWE_AUTO_P(IBCBusToSPCParityErr, 1),
1208 	HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
1209 	HWE_AUTO(statusValidNoEop),
1210 	HWE_AUTO(LATriggered),
1211 	{ .mask = 0, .sz = 0 }
1212 };
1213 
1214 #define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
1215 	.msg = #fldname, .sz = sizeof(#fldname) }
1216 #define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
1217 	.msg = #fldname, .sz = sizeof(#fldname) }
1218 static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
1219 	E_AUTO(RcvEgrFullErr),
1220 	E_AUTO(RcvHdrFullErr),
1221 	E_AUTO(ResetNegated),
1222 	E_AUTO(HardwareErr),
1223 	E_AUTO(InvalidAddrErr),
1224 	E_AUTO(SDmaVL15Err),
1225 	E_AUTO(SBufVL15MisUseErr),
1226 	E_AUTO(InvalidEEPCmd),
1227 	E_AUTO(RcvContextShareErr),
1228 	E_AUTO(SendVLMismatchErr),
1229 	E_AUTO(SendArmLaunchErr),
1230 	E_AUTO(SendSpecialTriggerErr),
1231 	E_AUTO(SDmaWrongPortErr),
1232 	E_AUTO(SDmaBufMaskDuplicateErr),
1233 	{ .mask = 0, .sz = 0 }
1234 };
1235 
1236 static const struct  qib_hwerror_msgs qib_7322p_error_msgs[] = {
1237 	E_P_AUTO(IBStatusChanged),
1238 	E_P_AUTO(SHeadersErr),
1239 	E_P_AUTO(VL15BufMisuseErr),
1240 	/*
1241 	 * SDmaHaltErr is not really an error, make it clearer;
1242 	 */
1243 	{.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted",
1244 		.sz = 11},
1245 	E_P_AUTO(SDmaDescAddrMisalignErr),
1246 	E_P_AUTO(SDmaUnexpDataErr),
1247 	E_P_AUTO(SDmaMissingDwErr),
1248 	E_P_AUTO(SDmaDwEnErr),
1249 	E_P_AUTO(SDmaRpyTagErr),
1250 	E_P_AUTO(SDma1stDescErr),
1251 	E_P_AUTO(SDmaBaseErr),
1252 	E_P_AUTO(SDmaTailOutOfBoundErr),
1253 	E_P_AUTO(SDmaOutOfBoundErr),
1254 	E_P_AUTO(SDmaGenMismatchErr),
1255 	E_P_AUTO(SendBufMisuseErr),
1256 	E_P_AUTO(SendUnsupportedVLErr),
1257 	E_P_AUTO(SendUnexpectedPktNumErr),
1258 	E_P_AUTO(SendDroppedDataPktErr),
1259 	E_P_AUTO(SendDroppedSmpPktErr),
1260 	E_P_AUTO(SendPktLenErr),
1261 	E_P_AUTO(SendUnderRunErr),
1262 	E_P_AUTO(SendMaxPktLenErr),
1263 	E_P_AUTO(SendMinPktLenErr),
1264 	E_P_AUTO(RcvIBLostLinkErr),
1265 	E_P_AUTO(RcvHdrErr),
1266 	E_P_AUTO(RcvHdrLenErr),
1267 	E_P_AUTO(RcvBadTidErr),
1268 	E_P_AUTO(RcvBadVersionErr),
1269 	E_P_AUTO(RcvIBFlowErr),
1270 	E_P_AUTO(RcvEBPErr),
1271 	E_P_AUTO(RcvUnsupportedVLErr),
1272 	E_P_AUTO(RcvUnexpectedCharErr),
1273 	E_P_AUTO(RcvShortPktLenErr),
1274 	E_P_AUTO(RcvLongPktLenErr),
1275 	E_P_AUTO(RcvMaxPktLenErr),
1276 	E_P_AUTO(RcvMinPktLenErr),
1277 	E_P_AUTO(RcvICRCErr),
1278 	E_P_AUTO(RcvVCRCErr),
1279 	E_P_AUTO(RcvFormatErr),
1280 	{ .mask = 0, .sz = 0 }
1281 };
1282 
1283 /*
1284  * Below generates "auto-message" for interrupts not specific to any port or
1285  * context
1286  */
1287 #define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
1288 	.msg = #fldname, .sz = sizeof(#fldname) }
1289 /* Below generates "auto-message" for interrupts specific to a port */
1290 #define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
1291 	SYM_LSB(IntMask, fldname##Mask##_0), \
1292 	SYM_LSB(IntMask, fldname##Mask##_1)), \
1293 	.msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
1294 /* For some reason, the SerDesTrimDone bits are reversed */
1295 #define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
1296 	SYM_LSB(IntMask, fldname##Mask##_1), \
1297 	SYM_LSB(IntMask, fldname##Mask##_0)), \
1298 	.msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
1299 /*
1300  * Below generates "auto-message" for interrupts specific to a context,
1301  * with ctxt-number appended
1302  */
1303 #define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
1304 	SYM_LSB(IntMask, fldname##0IntMask), \
1305 	SYM_LSB(IntMask, fldname##17IntMask)), \
1306 	.msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
1307 
1308 #define TXSYMPTOM_AUTO_P(fldname) \
1309 	{ .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
1310 	.msg = #fldname, .sz = sizeof(#fldname) }
1311 static const struct  qib_hwerror_msgs hdrchk_msgs[] = {
1312 	TXSYMPTOM_AUTO_P(NonKeyPacket),
1313 	TXSYMPTOM_AUTO_P(GRHFail),
1314 	TXSYMPTOM_AUTO_P(PkeyFail),
1315 	TXSYMPTOM_AUTO_P(QPFail),
1316 	TXSYMPTOM_AUTO_P(SLIDFail),
1317 	TXSYMPTOM_AUTO_P(RawIPV6),
1318 	TXSYMPTOM_AUTO_P(PacketTooSmall),
1319 	{ .mask = 0, .sz = 0 }
1320 };
1321 
1322 #define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
1323 
1324 /*
1325  * Called when we might have an error that is specific to a particular
1326  * PIO buffer, and may need to cancel that buffer, so it can be re-used,
1327  * because we don't need to force the update of pioavail
1328  */
1329 static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
1330 {
1331 	struct qib_devdata *dd = ppd->dd;
1332 	u32 i;
1333 	int any;
1334 	u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
1335 	u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG;
1336 	unsigned long sbuf[4];
1337 
1338 	/*
1339 	 * It's possible that sendbuffererror could have bits set; might
1340 	 * have already done this as a result of hardware error handling.
1341 	 */
1342 	any = 0;
1343 	for (i = 0; i < regcnt; ++i) {
1344 		sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
1345 		if (sbuf[i]) {
1346 			any = 1;
1347 			qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
1348 		}
1349 	}
1350 
1351 	if (any)
1352 		qib_disarm_piobufs_set(dd, sbuf, piobcnt);
1353 }
1354 
1355 /* No txe_recover yet, if ever */
1356 
1357 /* No decode__errors yet */
1358 static void err_decode(char *msg, size_t len, u64 errs,
1359 		       const struct qib_hwerror_msgs *msp)
1360 {
1361 	u64 these, lmask;
1362 	int took, multi, n = 0;
1363 
1364 	while (errs && msp && msp->mask) {
1365 		multi = (msp->mask & (msp->mask - 1));
1366 		while (errs & msp->mask) {
1367 			these = (errs & msp->mask);
1368 			lmask = (these & (these - 1)) ^ these;
1369 			if (len) {
1370 				if (n++) {
1371 					/* separate the strings */
1372 					*msg++ = ',';
1373 					len--;
1374 				}
1375 				BUG_ON(!msp->sz);
1376 				/* msp->sz counts the nul */
1377 				took = min_t(size_t, msp->sz - (size_t)1, len);
1378 				memcpy(msg,  msp->msg, took);
1379 				len -= took;
1380 				msg += took;
1381 				if (len)
1382 					*msg = '\0';
1383 			}
1384 			errs &= ~lmask;
1385 			if (len && multi) {
1386 				/* More than one bit this mask */
1387 				int idx = -1;
1388 
1389 				while (lmask & msp->mask) {
1390 					++idx;
1391 					lmask >>= 1;
1392 				}
1393 				took = scnprintf(msg, len, "_%d", idx);
1394 				len -= took;
1395 				msg += took;
1396 			}
1397 		}
1398 		++msp;
1399 	}
1400 	/* If some bits are left, show in hex. */
1401 	if (len && errs)
1402 		snprintf(msg, len, "%sMORE:%llX", n ? "," : "",
1403 			(unsigned long long) errs);
1404 }
1405 
1406 /* only called if r1 set */
1407 static void flush_fifo(struct qib_pportdata *ppd)
1408 {
1409 	struct qib_devdata *dd = ppd->dd;
1410 	u32 __iomem *piobuf;
1411 	u32 bufn;
1412 	u32 *hdr;
1413 	u64 pbc;
1414 	const unsigned hdrwords = 7;
1415 	static struct ib_header ibhdr = {
1416 		.lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
1417 		.lrh[1] = IB_LID_PERMISSIVE,
1418 		.lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
1419 		.lrh[3] = IB_LID_PERMISSIVE,
1420 		.u.oth.bth[0] = cpu_to_be32(
1421 			(IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY),
1422 		.u.oth.bth[1] = cpu_to_be32(0),
1423 		.u.oth.bth[2] = cpu_to_be32(0),
1424 		.u.oth.u.ud.deth[0] = cpu_to_be32(0),
1425 		.u.oth.u.ud.deth[1] = cpu_to_be32(0),
1426 	};
1427 
1428 	/*
1429 	 * Send a dummy VL15 packet to flush the launch FIFO.
1430 	 * This will not actually be sent since the TxeBypassIbc bit is set.
1431 	 */
1432 	pbc = PBC_7322_VL15_SEND |
1433 		(((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
1434 		(hdrwords + SIZE_OF_CRC);
1435 	piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
1436 	if (!piobuf)
1437 		return;
1438 	writeq(pbc, piobuf);
1439 	hdr = (u32 *) &ibhdr;
1440 	if (dd->flags & QIB_PIO_FLUSH_WC) {
1441 		qib_flush_wc();
1442 		qib_pio_copy(piobuf + 2, hdr, hdrwords - 1);
1443 		qib_flush_wc();
1444 		__raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
1445 		qib_flush_wc();
1446 	} else
1447 		qib_pio_copy(piobuf + 2, hdr, hdrwords);
1448 	qib_sendbuf_done(dd, bufn);
1449 }
1450 
1451 /*
1452  * This is called with interrupts disabled and sdma_lock held.
1453  */
1454 static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
1455 {
1456 	struct qib_devdata *dd = ppd->dd;
1457 	u64 set_sendctrl = 0;
1458 	u64 clr_sendctrl = 0;
1459 
1460 	if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
1461 		set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1462 	else
1463 		clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1464 
1465 	if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
1466 		set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1467 	else
1468 		clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1469 
1470 	if (op & QIB_SDMA_SENDCTRL_OP_HALT)
1471 		set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1472 	else
1473 		clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1474 
1475 	if (op & QIB_SDMA_SENDCTRL_OP_DRAIN)
1476 		set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1477 				SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1478 				SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1479 	else
1480 		clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1481 				SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1482 				SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1483 
1484 	spin_lock(&dd->sendctrl_lock);
1485 
1486 	/* If we are draining everything, block sends first */
1487 	if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1488 		ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
1489 		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1490 		qib_write_kreg(dd, kr_scratch, 0);
1491 	}
1492 
1493 	ppd->p_sendctrl |= set_sendctrl;
1494 	ppd->p_sendctrl &= ~clr_sendctrl;
1495 
1496 	if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP)
1497 		qib_write_kreg_port(ppd, krp_sendctrl,
1498 				    ppd->p_sendctrl |
1499 				    SYM_MASK(SendCtrl_0, SDmaCleanup));
1500 	else
1501 		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1502 	qib_write_kreg(dd, kr_scratch, 0);
1503 
1504 	if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1505 		ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
1506 		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1507 		qib_write_kreg(dd, kr_scratch, 0);
1508 	}
1509 
1510 	spin_unlock(&dd->sendctrl_lock);
1511 
1512 	if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
1513 		flush_fifo(ppd);
1514 }
1515 
1516 static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
1517 {
1518 	__qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
1519 }
1520 
1521 static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
1522 {
1523 	/*
1524 	 * Set SendDmaLenGen and clear and set
1525 	 * the MSB of the generation count to enable generation checking
1526 	 * and load the internal generation counter.
1527 	 */
1528 	qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
1529 	qib_write_kreg_port(ppd, krp_senddmalengen,
1530 			    ppd->sdma_descq_cnt |
1531 			    (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB));
1532 }
1533 
1534 /*
1535  * Must be called with sdma_lock held, or before init finished.
1536  */
1537 static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
1538 {
1539 	/* Commit writes to memory and advance the tail on the chip */
1540 	wmb();
1541 	ppd->sdma_descq_tail = tail;
1542 	qib_write_kreg_port(ppd, krp_senddmatail, tail);
1543 }
1544 
1545 /*
1546  * This is called with interrupts disabled and sdma_lock held.
1547  */
1548 static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
1549 {
1550 	/*
1551 	 * Drain all FIFOs.
1552 	 * The hardware doesn't require this but we do it so that verbs
1553 	 * and user applications don't wait for link active to send stale
1554 	 * data.
1555 	 */
1556 	sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
1557 
1558 	qib_sdma_7322_setlengen(ppd);
1559 	qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
1560 	ppd->sdma_head_dma[0] = 0;
1561 	qib_7322_sdma_sendctrl(ppd,
1562 		ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
1563 }
1564 
1565 #define DISABLES_SDMA ( \
1566 	QIB_E_P_SDMAHALT | \
1567 	QIB_E_P_SDMADESCADDRMISALIGN | \
1568 	QIB_E_P_SDMAMISSINGDW | \
1569 	QIB_E_P_SDMADWEN | \
1570 	QIB_E_P_SDMARPYTAG | \
1571 	QIB_E_P_SDMA1STDESC | \
1572 	QIB_E_P_SDMABASE | \
1573 	QIB_E_P_SDMATAILOUTOFBOUND | \
1574 	QIB_E_P_SDMAOUTOFBOUND | \
1575 	QIB_E_P_SDMAGENMISMATCH)
1576 
1577 static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
1578 {
1579 	unsigned long flags;
1580 	struct qib_devdata *dd = ppd->dd;
1581 
1582 	errs &= QIB_E_P_SDMAERRS;
1583 	err_decode(ppd->cpspec->sdmamsgbuf, sizeof(ppd->cpspec->sdmamsgbuf),
1584 		   errs, qib_7322p_error_msgs);
1585 
1586 	if (errs & QIB_E_P_SDMAUNEXPDATA)
1587 		qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
1588 			    ppd->port);
1589 
1590 	spin_lock_irqsave(&ppd->sdma_lock, flags);
1591 
1592 	if (errs != QIB_E_P_SDMAHALT) {
1593 		/* SDMA errors have QIB_E_P_SDMAHALT and another bit set */
1594 		qib_dev_porterr(dd, ppd->port,
1595 			"SDMA %s 0x%016llx %s\n",
1596 			qib_sdma_state_names[ppd->sdma_state.current_state],
1597 			errs, ppd->cpspec->sdmamsgbuf);
1598 		dump_sdma_7322_state(ppd);
1599 	}
1600 
1601 	switch (ppd->sdma_state.current_state) {
1602 	case qib_sdma_state_s00_hw_down:
1603 		break;
1604 
1605 	case qib_sdma_state_s10_hw_start_up_wait:
1606 		if (errs & QIB_E_P_SDMAHALT)
1607 			__qib_sdma_process_event(ppd,
1608 				qib_sdma_event_e20_hw_started);
1609 		break;
1610 
1611 	case qib_sdma_state_s20_idle:
1612 		break;
1613 
1614 	case qib_sdma_state_s30_sw_clean_up_wait:
1615 		break;
1616 
1617 	case qib_sdma_state_s40_hw_clean_up_wait:
1618 		if (errs & QIB_E_P_SDMAHALT)
1619 			__qib_sdma_process_event(ppd,
1620 				qib_sdma_event_e50_hw_cleaned);
1621 		break;
1622 
1623 	case qib_sdma_state_s50_hw_halt_wait:
1624 		if (errs & QIB_E_P_SDMAHALT)
1625 			__qib_sdma_process_event(ppd,
1626 				qib_sdma_event_e60_hw_halted);
1627 		break;
1628 
1629 	case qib_sdma_state_s99_running:
1630 		__qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
1631 		__qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
1632 		break;
1633 	}
1634 
1635 	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1636 }
1637 
1638 /*
1639  * handle per-device errors (not per-port errors)
1640  */
1641 static noinline void handle_7322_errors(struct qib_devdata *dd)
1642 {
1643 	char *msg;
1644 	u64 iserr = 0;
1645 	u64 errs;
1646 	u64 mask;
1647 
1648 	qib_stats.sps_errints++;
1649 	errs = qib_read_kreg64(dd, kr_errstatus);
1650 	if (!errs) {
1651 		qib_devinfo(dd->pcidev,
1652 			"device error interrupt, but no error bits set!\n");
1653 		goto done;
1654 	}
1655 
1656 	/* don't report errors that are masked */
1657 	errs &= dd->cspec->errormask;
1658 	msg = dd->cspec->emsgbuf;
1659 
1660 	/* do these first, they are most important */
1661 	if (errs & QIB_E_HARDWARE) {
1662 		*msg = '\0';
1663 		qib_7322_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
1664 	}
1665 
1666 	if (errs & QIB_E_SPKTERRS) {
1667 		qib_disarm_7322_senderrbufs(dd->pport);
1668 		qib_stats.sps_txerrs++;
1669 	} else if (errs & QIB_E_INVALIDADDR)
1670 		qib_stats.sps_txerrs++;
1671 	else if (errs & QIB_E_ARMLAUNCH) {
1672 		qib_stats.sps_txerrs++;
1673 		qib_disarm_7322_senderrbufs(dd->pport);
1674 	}
1675 	qib_write_kreg(dd, kr_errclear, errs);
1676 
1677 	/*
1678 	 * The ones we mask off are handled specially below
1679 	 * or above.  Also mask SDMADISABLED by default as it
1680 	 * is too chatty.
1681 	 */
1682 	mask = QIB_E_HARDWARE;
1683 	*msg = '\0';
1684 
1685 	err_decode(msg, sizeof(dd->cspec->emsgbuf), errs & ~mask,
1686 		   qib_7322error_msgs);
1687 
1688 	/*
1689 	 * Getting reset is a tragedy for all ports. Mark the device
1690 	 * _and_ the ports as "offline" in way meaningful to each.
1691 	 */
1692 	if (errs & QIB_E_RESET) {
1693 		int pidx;
1694 
1695 		qib_dev_err(dd,
1696 			"Got reset, requires re-init (unload and reload driver)\n");
1697 		dd->flags &= ~QIB_INITTED;  /* needs re-init */
1698 		/* mark as having had error */
1699 		*dd->devstatusp |= QIB_STATUS_HWERROR;
1700 		for (pidx = 0; pidx < dd->num_pports; ++pidx)
1701 			if (dd->pport[pidx].link_speed_supported)
1702 				*dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
1703 	}
1704 
1705 	if (*msg && iserr)
1706 		qib_dev_err(dd, "%s error\n", msg);
1707 
1708 	/*
1709 	 * If there were hdrq or egrfull errors, wake up any processes
1710 	 * waiting in poll.  We used to try to check which contexts had
1711 	 * the overflow, but given the cost of that and the chip reads
1712 	 * to support it, it's better to just wake everybody up if we
1713 	 * get an overflow; waiters can poll again if it's not them.
1714 	 */
1715 	if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
1716 		qib_handle_urcv(dd, ~0U);
1717 		if (errs & ERR_MASK(RcvEgrFullErr))
1718 			qib_stats.sps_buffull++;
1719 		else
1720 			qib_stats.sps_hdrfull++;
1721 	}
1722 
1723 done:
1724 	return;
1725 }
1726 
1727 static void qib_error_tasklet(unsigned long data)
1728 {
1729 	struct qib_devdata *dd = (struct qib_devdata *)data;
1730 
1731 	handle_7322_errors(dd);
1732 	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1733 }
1734 
1735 static void reenable_chase(struct timer_list *t)
1736 {
1737 	struct qib_chippport_specific *cp = from_timer(cp, t, chase_timer);
1738 	struct qib_pportdata *ppd = cp->ppd;
1739 
1740 	ppd->cpspec->chase_timer.expires = 0;
1741 	qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1742 		QLOGIC_IB_IBCC_LINKINITCMD_POLL);
1743 }
1744 
1745 static void disable_chase(struct qib_pportdata *ppd, unsigned long tnow,
1746 		u8 ibclt)
1747 {
1748 	ppd->cpspec->chase_end = 0;
1749 
1750 	if (!qib_chase)
1751 		return;
1752 
1753 	qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1754 		QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1755 	ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
1756 	add_timer(&ppd->cpspec->chase_timer);
1757 }
1758 
1759 static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
1760 {
1761 	u8 ibclt;
1762 	unsigned long tnow;
1763 
1764 	ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
1765 
1766 	/*
1767 	 * Detect and handle the state chase issue, where we can
1768 	 * get stuck if we are unlucky on timing on both sides of
1769 	 * the link.   If we are, we disable, set a timer, and
1770 	 * then re-enable.
1771 	 */
1772 	switch (ibclt) {
1773 	case IB_7322_LT_STATE_CFGRCVFCFG:
1774 	case IB_7322_LT_STATE_CFGWAITRMT:
1775 	case IB_7322_LT_STATE_TXREVLANES:
1776 	case IB_7322_LT_STATE_CFGENH:
1777 		tnow = jiffies;
1778 		if (ppd->cpspec->chase_end &&
1779 		     time_after(tnow, ppd->cpspec->chase_end))
1780 			disable_chase(ppd, tnow, ibclt);
1781 		else if (!ppd->cpspec->chase_end)
1782 			ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
1783 		break;
1784 	default:
1785 		ppd->cpspec->chase_end = 0;
1786 		break;
1787 	}
1788 
1789 	if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
1790 	      ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
1791 	     ibclt == IB_7322_LT_STATE_LINKUP) &&
1792 	    (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
1793 		force_h1(ppd);
1794 		ppd->cpspec->qdr_reforce = 1;
1795 		if (!ppd->dd->cspec->r1)
1796 			serdes_7322_los_enable(ppd, 0);
1797 	} else if (ppd->cpspec->qdr_reforce &&
1798 		(ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
1799 		 (ibclt == IB_7322_LT_STATE_CFGENH ||
1800 		ibclt == IB_7322_LT_STATE_CFGIDLE ||
1801 		ibclt == IB_7322_LT_STATE_LINKUP))
1802 		force_h1(ppd);
1803 
1804 	if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
1805 	    ppd->link_speed_enabled == QIB_IB_QDR &&
1806 	    (ibclt == IB_7322_LT_STATE_CFGTEST ||
1807 	     ibclt == IB_7322_LT_STATE_CFGENH ||
1808 	     (ibclt >= IB_7322_LT_STATE_POLLACTIVE &&
1809 	      ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
1810 		adj_tx_serdes(ppd);
1811 
1812 	if (ibclt != IB_7322_LT_STATE_LINKUP) {
1813 		u8 ltstate = qib_7322_phys_portstate(ibcst);
1814 		u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
1815 					  LinkTrainingState);
1816 		if (!ppd->dd->cspec->r1 &&
1817 		    pibclt == IB_7322_LT_STATE_LINKUP &&
1818 		    ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1819 		    ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1820 		    ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1821 		    ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1822 			/* If the link went down (but no into recovery,
1823 			 * turn LOS back on */
1824 			serdes_7322_los_enable(ppd, 1);
1825 		if (!ppd->cpspec->qdr_dfe_on &&
1826 		    ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
1827 			ppd->cpspec->qdr_dfe_on = 1;
1828 			ppd->cpspec->qdr_dfe_time = 0;
1829 			/* On link down, reenable QDR adaptation */
1830 			qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
1831 					    ppd->dd->cspec->r1 ?
1832 					    QDR_STATIC_ADAPT_DOWN_R1 :
1833 					    QDR_STATIC_ADAPT_DOWN);
1834 			pr_info(
1835 				"IB%u:%u re-enabled QDR adaptation ibclt %x\n",
1836 				ppd->dd->unit, ppd->port, ibclt);
1837 		}
1838 	}
1839 }
1840 
1841 static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
1842 
1843 /*
1844  * This is per-pport error handling.
1845  * will likely get it's own MSIx interrupt (one for each port,
1846  * although just a single handler).
1847  */
1848 static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1849 {
1850 	char *msg;
1851 	u64 ignore_this_time = 0, iserr = 0, errs, fmask;
1852 	struct qib_devdata *dd = ppd->dd;
1853 
1854 	/* do this as soon as possible */
1855 	fmask = qib_read_kreg64(dd, kr_act_fmask);
1856 	if (!fmask)
1857 		check_7322_rxe_status(ppd);
1858 
1859 	errs = qib_read_kreg_port(ppd, krp_errstatus);
1860 	if (!errs)
1861 		qib_devinfo(dd->pcidev,
1862 			 "Port%d error interrupt, but no error bits set!\n",
1863 			 ppd->port);
1864 	if (!fmask)
1865 		errs &= ~QIB_E_P_IBSTATUSCHANGED;
1866 	if (!errs)
1867 		goto done;
1868 
1869 	msg = ppd->cpspec->epmsgbuf;
1870 	*msg = '\0';
1871 
1872 	if (errs & ~QIB_E_P_BITSEXTANT) {
1873 		err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
1874 			   errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
1875 		if (!*msg)
1876 			snprintf(msg, sizeof(ppd->cpspec->epmsgbuf),
1877 				 "no others");
1878 		qib_dev_porterr(dd, ppd->port,
1879 			"error interrupt with unknown errors 0x%016Lx set (and %s)\n",
1880 			(errs & ~QIB_E_P_BITSEXTANT), msg);
1881 		*msg = '\0';
1882 	}
1883 
1884 	if (errs & QIB_E_P_SHDR) {
1885 		u64 symptom;
1886 
1887 		/* determine cause, then write to clear */
1888 		symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
1889 		qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
1890 		err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), symptom,
1891 			   hdrchk_msgs);
1892 		*msg = '\0';
1893 		/* senderrbuf cleared in SPKTERRS below */
1894 	}
1895 
1896 	if (errs & QIB_E_P_SPKTERRS) {
1897 		if ((errs & QIB_E_P_LINK_PKTERRS) &&
1898 		    !(ppd->lflags & QIBL_LINKACTIVE)) {
1899 			/*
1900 			 * This can happen when trying to bring the link
1901 			 * up, but the IB link changes state at the "wrong"
1902 			 * time. The IB logic then complains that the packet
1903 			 * isn't valid.  We don't want to confuse people, so
1904 			 * we just don't print them, except at debug
1905 			 */
1906 			err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
1907 				   (errs & QIB_E_P_LINK_PKTERRS),
1908 				   qib_7322p_error_msgs);
1909 			*msg = '\0';
1910 			ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1911 		}
1912 		qib_disarm_7322_senderrbufs(ppd);
1913 	} else if ((errs & QIB_E_P_LINK_PKTERRS) &&
1914 		   !(ppd->lflags & QIBL_LINKACTIVE)) {
1915 		/*
1916 		 * This can happen when SMA is trying to bring the link
1917 		 * up, but the IB link changes state at the "wrong" time.
1918 		 * The IB logic then complains that the packet isn't
1919 		 * valid.  We don't want to confuse people, so we just
1920 		 * don't print them, except at debug
1921 		 */
1922 		err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), errs,
1923 			   qib_7322p_error_msgs);
1924 		ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1925 		*msg = '\0';
1926 	}
1927 
1928 	qib_write_kreg_port(ppd, krp_errclear, errs);
1929 
1930 	errs &= ~ignore_this_time;
1931 	if (!errs)
1932 		goto done;
1933 
1934 	if (errs & QIB_E_P_RPKTERRS)
1935 		qib_stats.sps_rcverrs++;
1936 	if (errs & QIB_E_P_SPKTERRS)
1937 		qib_stats.sps_txerrs++;
1938 
1939 	iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS);
1940 
1941 	if (errs & QIB_E_P_SDMAERRS)
1942 		sdma_7322_p_errors(ppd, errs);
1943 
1944 	if (errs & QIB_E_P_IBSTATUSCHANGED) {
1945 		u64 ibcs;
1946 		u8 ltstate;
1947 
1948 		ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
1949 		ltstate = qib_7322_phys_portstate(ibcs);
1950 
1951 		if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
1952 			handle_serdes_issues(ppd, ibcs);
1953 		if (!(ppd->cpspec->ibcctrl_a &
1954 		      SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
1955 			/*
1956 			 * We got our interrupt, so init code should be
1957 			 * happy and not try alternatives. Now squelch
1958 			 * other "chatter" from link-negotiation (pre Init)
1959 			 */
1960 			ppd->cpspec->ibcctrl_a |=
1961 				SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
1962 			qib_write_kreg_port(ppd, krp_ibcctrl_a,
1963 					    ppd->cpspec->ibcctrl_a);
1964 		}
1965 
1966 		/* Update our picture of width and speed from chip */
1967 		ppd->link_width_active =
1968 			(ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
1969 			    IB_WIDTH_4X : IB_WIDTH_1X;
1970 		ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
1971 			LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs &
1972 			  SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
1973 				   QIB_IB_DDR : QIB_IB_SDR;
1974 
1975 		if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
1976 		    IB_PHYSPORTSTATE_DISABLED)
1977 			qib_set_ib_7322_lstate(ppd, 0,
1978 			       QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1979 		else
1980 			/*
1981 			 * Since going into a recovery state causes the link
1982 			 * state to go down and since recovery is transitory,
1983 			 * it is better if we "miss" ever seeing the link
1984 			 * training state go into recovery (i.e., ignore this
1985 			 * transition for link state special handling purposes)
1986 			 * without updating lastibcstat.
1987 			 */
1988 			if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1989 			    ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1990 			    ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1991 			    ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1992 				qib_handle_e_ibstatuschanged(ppd, ibcs);
1993 	}
1994 	if (*msg && iserr)
1995 		qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
1996 
1997 	if (ppd->state_wanted & ppd->lflags)
1998 		wake_up_interruptible(&ppd->state_wait);
1999 done:
2000 	return;
2001 }
2002 
2003 /* enable/disable chip from delivering interrupts */
2004 static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
2005 {
2006 	if (enable) {
2007 		if (dd->flags & QIB_BADINTR)
2008 			return;
2009 		qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
2010 		/* cause any pending enabled interrupts to be re-delivered */
2011 		qib_write_kreg(dd, kr_intclear, 0ULL);
2012 		if (dd->cspec->num_msix_entries) {
2013 			/* and same for MSIx */
2014 			u64 val = qib_read_kreg64(dd, kr_intgranted);
2015 
2016 			if (val)
2017 				qib_write_kreg(dd, kr_intgranted, val);
2018 		}
2019 	} else
2020 		qib_write_kreg(dd, kr_intmask, 0ULL);
2021 }
2022 
2023 /*
2024  * Try to cleanup as much as possible for anything that might have gone
2025  * wrong while in freeze mode, such as pio buffers being written by user
2026  * processes (causing armlaunch), send errors due to going into freeze mode,
2027  * etc., and try to avoid causing extra interrupts while doing so.
2028  * Forcibly update the in-memory pioavail register copies after cleanup
2029  * because the chip won't do it while in freeze mode (the register values
2030  * themselves are kept correct).
2031  * Make sure that we don't lose any important interrupts by using the chip
2032  * feature that says that writing 0 to a bit in *clear that is set in
2033  * *status will cause an interrupt to be generated again (if allowed by
2034  * the *mask value).
2035  * This is in chip-specific code because of all of the register accesses,
2036  * even though the details are similar on most chips.
2037  */
2038 static void qib_7322_clear_freeze(struct qib_devdata *dd)
2039 {
2040 	int pidx;
2041 
2042 	/* disable error interrupts, to avoid confusion */
2043 	qib_write_kreg(dd, kr_errmask, 0ULL);
2044 
2045 	for (pidx = 0; pidx < dd->num_pports; ++pidx)
2046 		if (dd->pport[pidx].link_speed_supported)
2047 			qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2048 					    0ULL);
2049 
2050 	/* also disable interrupts; errormask is sometimes overwritten */
2051 	qib_7322_set_intr_state(dd, 0);
2052 
2053 	/* clear the freeze, and be sure chip saw it */
2054 	qib_write_kreg(dd, kr_control, dd->control);
2055 	qib_read_kreg32(dd, kr_scratch);
2056 
2057 	/*
2058 	 * Force new interrupt if any hwerr, error or interrupt bits are
2059 	 * still set, and clear "safe" send packet errors related to freeze
2060 	 * and cancelling sends.  Re-enable error interrupts before possible
2061 	 * force of re-interrupt on pending interrupts.
2062 	 */
2063 	qib_write_kreg(dd, kr_hwerrclear, 0ULL);
2064 	qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
2065 	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2066 	/* We need to purge per-port errs and reset mask, too */
2067 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
2068 		if (!dd->pport[pidx].link_speed_supported)
2069 			continue;
2070 		qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
2071 		qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
2072 	}
2073 	qib_7322_set_intr_state(dd, 1);
2074 }
2075 
2076 /* no error handling to speak of */
2077 /**
2078  * qib_7322_handle_hwerrors - display hardware errors.
2079  * @dd: the qlogic_ib device
2080  * @msg: the output buffer
2081  * @msgl: the size of the output buffer
2082  *
2083  * Use same msg buffer as regular errors to avoid excessive stack
2084  * use.  Most hardware errors are catastrophic, but for right now,
2085  * we'll print them and continue.  We reuse the same message buffer as
2086  * qib_handle_errors() to avoid excessive stack usage.
2087  */
2088 static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
2089 				     size_t msgl)
2090 {
2091 	u64 hwerrs;
2092 	u32 ctrl;
2093 	int isfatal = 0;
2094 
2095 	hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
2096 	if (!hwerrs)
2097 		goto bail;
2098 	if (hwerrs == ~0ULL) {
2099 		qib_dev_err(dd,
2100 			"Read of hardware error status failed (all bits set); ignoring\n");
2101 		goto bail;
2102 	}
2103 	qib_stats.sps_hwerrs++;
2104 
2105 	/* Always clear the error status register, except BIST fail */
2106 	qib_write_kreg(dd, kr_hwerrclear, hwerrs &
2107 		       ~HWE_MASK(PowerOnBISTFailed));
2108 
2109 	hwerrs &= dd->cspec->hwerrmask;
2110 
2111 	/* no EEPROM logging, yet */
2112 
2113 	if (hwerrs)
2114 		qib_devinfo(dd->pcidev,
2115 			"Hardware error: hwerr=0x%llx (cleared)\n",
2116 			(unsigned long long) hwerrs);
2117 
2118 	ctrl = qib_read_kreg32(dd, kr_control);
2119 	if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
2120 		/*
2121 		 * No recovery yet...
2122 		 */
2123 		if ((hwerrs & ~HWE_MASK(LATriggered)) ||
2124 		    dd->cspec->stay_in_freeze) {
2125 			/*
2126 			 * If any set that we aren't ignoring only make the
2127 			 * complaint once, in case it's stuck or recurring,
2128 			 * and we get here multiple times
2129 			 * Force link down, so switch knows, and
2130 			 * LEDs are turned off.
2131 			 */
2132 			if (dd->flags & QIB_INITTED)
2133 				isfatal = 1;
2134 		} else
2135 			qib_7322_clear_freeze(dd);
2136 	}
2137 
2138 	if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
2139 		isfatal = 1;
2140 		strlcpy(msg,
2141 			"[Memory BIST test failed, InfiniPath hardware unusable]",
2142 			msgl);
2143 		/* ignore from now on, so disable until driver reloaded */
2144 		dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
2145 		qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2146 	}
2147 
2148 	err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
2149 
2150 	/* Ignore esoteric PLL failures et al. */
2151 
2152 	qib_dev_err(dd, "%s hardware error\n", msg);
2153 
2154 	if (hwerrs &
2155 		   (SYM_MASK(HwErrMask, SDmaMemReadErrMask_0) |
2156 		    SYM_MASK(HwErrMask, SDmaMemReadErrMask_1))) {
2157 		int pidx = 0;
2158 		int err;
2159 		unsigned long flags;
2160 		struct qib_pportdata *ppd = dd->pport;
2161 
2162 		for (; pidx < dd->num_pports; ++pidx, ppd++) {
2163 			err = 0;
2164 			if (pidx == 0 && (hwerrs &
2165 				SYM_MASK(HwErrMask, SDmaMemReadErrMask_0)))
2166 				err++;
2167 			if (pidx == 1 && (hwerrs &
2168 				SYM_MASK(HwErrMask, SDmaMemReadErrMask_1)))
2169 				err++;
2170 			if (err) {
2171 				spin_lock_irqsave(&ppd->sdma_lock, flags);
2172 				dump_sdma_7322_state(ppd);
2173 				spin_unlock_irqrestore(&ppd->sdma_lock, flags);
2174 			}
2175 		}
2176 	}
2177 
2178 	if (isfatal && !dd->diag_client) {
2179 		qib_dev_err(dd,
2180 			"Fatal Hardware Error, no longer usable, SN %.16s\n",
2181 			dd->serial);
2182 		/*
2183 		 * for /sys status file and user programs to print; if no
2184 		 * trailing brace is copied, we'll know it was truncated.
2185 		 */
2186 		if (dd->freezemsg)
2187 			snprintf(dd->freezemsg, dd->freezelen,
2188 				 "{%s}", msg);
2189 		qib_disable_after_error(dd);
2190 	}
2191 bail:;
2192 }
2193 
2194 /**
2195  * qib_7322_init_hwerrors - enable hardware errors
2196  * @dd: the qlogic_ib device
2197  *
2198  * now that we have finished initializing everything that might reasonably
2199  * cause a hardware error, and cleared those errors bits as they occur,
2200  * we can enable hardware errors in the mask (potentially enabling
2201  * freeze mode), and enable hardware errors as errors (along with
2202  * everything else) in errormask
2203  */
2204 static void qib_7322_init_hwerrors(struct qib_devdata *dd)
2205 {
2206 	int pidx;
2207 	u64 extsval;
2208 
2209 	extsval = qib_read_kreg64(dd, kr_extstatus);
2210 	if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED |
2211 			 QIB_EXTS_MEMBIST_ENDTEST)))
2212 		qib_dev_err(dd, "MemBIST did not complete!\n");
2213 
2214 	/* never clear BIST failure, so reported on each driver load */
2215 	qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
2216 	qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2217 
2218 	/* clear all */
2219 	qib_write_kreg(dd, kr_errclear, ~0ULL);
2220 	/* enable errors that are masked, at least this first time. */
2221 	qib_write_kreg(dd, kr_errmask, ~0ULL);
2222 	dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
2223 	for (pidx = 0; pidx < dd->num_pports; ++pidx)
2224 		if (dd->pport[pidx].link_speed_supported)
2225 			qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2226 					    ~0ULL);
2227 }
2228 
2229 /*
2230  * Disable and enable the armlaunch error.  Used for PIO bandwidth testing
2231  * on chips that are count-based, rather than trigger-based.  There is no
2232  * reference counting, but that's also fine, given the intended use.
2233  * Only chip-specific because it's all register accesses
2234  */
2235 static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
2236 {
2237 	if (enable) {
2238 		qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
2239 		dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
2240 	} else
2241 		dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
2242 	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2243 }
2244 
2245 /*
2246  * Formerly took parameter <which> in pre-shifted,
2247  * pre-merged form with LinkCmd and LinkInitCmd
2248  * together, and assuming the zero was NOP.
2249  */
2250 static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
2251 				   u16 linitcmd)
2252 {
2253 	u64 mod_wd;
2254 	struct qib_devdata *dd = ppd->dd;
2255 	unsigned long flags;
2256 
2257 	if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
2258 		/*
2259 		 * If we are told to disable, note that so link-recovery
2260 		 * code does not attempt to bring us back up.
2261 		 * Also reset everything that we can, so we start
2262 		 * completely clean when re-enabled (before we
2263 		 * actually issue the disable to the IBC)
2264 		 */
2265 		qib_7322_mini_pcs_reset(ppd);
2266 		spin_lock_irqsave(&ppd->lflags_lock, flags);
2267 		ppd->lflags |= QIBL_IB_LINK_DISABLED;
2268 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2269 	} else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
2270 		/*
2271 		 * Any other linkinitcmd will lead to LINKDOWN and then
2272 		 * to INIT (if all is well), so clear flag to let
2273 		 * link-recovery code attempt to bring us back up.
2274 		 */
2275 		spin_lock_irqsave(&ppd->lflags_lock, flags);
2276 		ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
2277 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2278 		/*
2279 		 * Clear status change interrupt reduction so the
2280 		 * new state is seen.
2281 		 */
2282 		ppd->cpspec->ibcctrl_a &=
2283 			~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
2284 	}
2285 
2286 	mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) |
2287 		(linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2288 
2289 	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
2290 			    mod_wd);
2291 	/* write to chip to prevent back-to-back writes of ibc reg */
2292 	qib_write_kreg(dd, kr_scratch, 0);
2293 
2294 }
2295 
2296 /*
2297  * The total RCV buffer memory is 64KB, used for both ports, and is
2298  * in units of 64 bytes (same as IB flow control credit unit).
2299  * The consumedVL unit in the same registers are in 32 byte units!
2300  * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks,
2301  * and we can therefore allocate just 9 IB credits for 2 VL15 packets
2302  * in krp_rxcreditvl15, rather than 10.
2303  */
2304 #define RCV_BUF_UNITSZ 64
2305 #define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
2306 
2307 static void set_vls(struct qib_pportdata *ppd)
2308 {
2309 	int i, numvls, totcred, cred_vl, vl0extra;
2310 	struct qib_devdata *dd = ppd->dd;
2311 	u64 val;
2312 
2313 	numvls = qib_num_vls(ppd->vls_operational);
2314 
2315 	/*
2316 	 * Set up per-VL credits. Below is kluge based on these assumptions:
2317 	 * 1) port is disabled at the time early_init is called.
2318 	 * 2) give VL15 17 credits, for two max-plausible packets.
2319 	 * 3) Give VL0-N the rest, with any rounding excess used for VL0
2320 	 */
2321 	/* 2 VL15 packets @ 288 bytes each (including IB headers) */
2322 	totcred = NUM_RCV_BUF_UNITS(dd);
2323 	cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ;
2324 	totcred -= cred_vl;
2325 	qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
2326 	cred_vl = totcred / numvls;
2327 	vl0extra = totcred - cred_vl * numvls;
2328 	qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
2329 	for (i = 1; i < numvls; i++)
2330 		qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
2331 	for (; i < 8; i++) /* no buffer space for other VLs */
2332 		qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
2333 
2334 	/* Notify IBC that credits need to be recalculated */
2335 	val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
2336 	val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2337 	qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2338 	qib_write_kreg(dd, kr_scratch, 0ULL);
2339 	val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2340 	qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2341 
2342 	for (i = 0; i < numvls; i++)
2343 		val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
2344 	val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
2345 
2346 	/* Change the number of operational VLs */
2347 	ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
2348 				~SYM_MASK(IBCCtrlA_0, NumVLane)) |
2349 		((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane));
2350 	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2351 	qib_write_kreg(dd, kr_scratch, 0ULL);
2352 }
2353 
2354 /*
2355  * The code that deals with actual SerDes is in serdes_7322_init().
2356  * Compared to the code for iba7220, it is minimal.
2357  */
2358 static int serdes_7322_init(struct qib_pportdata *ppd);
2359 
2360 /**
2361  * qib_7322_bringup_serdes - bring up the serdes
2362  * @ppd: physical port on the qlogic_ib device
2363  */
2364 static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
2365 {
2366 	struct qib_devdata *dd = ppd->dd;
2367 	u64 val, guid, ibc;
2368 	unsigned long flags;
2369 	int ret = 0;
2370 
2371 	/*
2372 	 * SerDes model not in Pd, but still need to
2373 	 * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere
2374 	 * eventually.
2375 	 */
2376 	/* Put IBC in reset, sends disabled (should be in reset already) */
2377 	ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2378 	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2379 	qib_write_kreg(dd, kr_scratch, 0ULL);
2380 
2381 	/* ensure previous Tx parameters are not still forced */
2382 	qib_write_kreg_port(ppd, krp_tx_deemph_override,
2383 		SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
2384 		reset_tx_deemphasis_override));
2385 
2386 	if (qib_compat_ddr_negotiate) {
2387 		ppd->cpspec->ibdeltainprog = 1;
2388 		ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
2389 						crp_ibsymbolerr);
2390 		ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
2391 						crp_iblinkerrrecov);
2392 	}
2393 
2394 	/* flowcontrolwatermark is in units of KBytes */
2395 	ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
2396 	/*
2397 	 * Flow control is sent this often, even if no changes in
2398 	 * buffer space occur.  Units are 128ns for this chip.
2399 	 * Set to 3usec.
2400 	 */
2401 	ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
2402 	/* max error tolerance */
2403 	ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
2404 	/* IB credit flow control. */
2405 	ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold);
2406 	/*
2407 	 * set initial max size pkt IBC will send, including ICRC; it's the
2408 	 * PIO buffer size in dwords, less 1; also see qib_set_mtu()
2409 	 */
2410 	ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
2411 		SYM_LSB(IBCCtrlA_0, MaxPktLen);
2412 	ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
2413 
2414 	/*
2415 	 * Reset the PCS interface to the serdes (and also ibc, which is still
2416 	 * in reset from above).  Writes new value of ibcctrl_a as last step.
2417 	 */
2418 	qib_7322_mini_pcs_reset(ppd);
2419 
2420 	if (!ppd->cpspec->ibcctrl_b) {
2421 		unsigned lse = ppd->link_speed_enabled;
2422 
2423 		/*
2424 		 * Not on re-init after reset, establish shadow
2425 		 * and force initial config.
2426 		 */
2427 		ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
2428 							     krp_ibcctrl_b);
2429 		ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
2430 				IBA7322_IBC_SPEED_DDR |
2431 				IBA7322_IBC_SPEED_SDR |
2432 				IBA7322_IBC_WIDTH_AUTONEG |
2433 				SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
2434 		if (lse & (lse - 1)) /* Muliple speeds enabled */
2435 			ppd->cpspec->ibcctrl_b |=
2436 				(lse << IBA7322_IBC_SPEED_LSB) |
2437 				IBA7322_IBC_IBTA_1_2_MASK |
2438 				IBA7322_IBC_MAX_SPEED_MASK;
2439 		else
2440 			ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
2441 				IBA7322_IBC_SPEED_QDR |
2442 				 IBA7322_IBC_IBTA_1_2_MASK :
2443 				(lse == QIB_IB_DDR) ?
2444 					IBA7322_IBC_SPEED_DDR :
2445 					IBA7322_IBC_SPEED_SDR;
2446 		if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
2447 		    (IB_WIDTH_1X | IB_WIDTH_4X))
2448 			ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
2449 		else
2450 			ppd->cpspec->ibcctrl_b |=
2451 				ppd->link_width_enabled == IB_WIDTH_4X ?
2452 				IBA7322_IBC_WIDTH_4X_ONLY :
2453 				IBA7322_IBC_WIDTH_1X_ONLY;
2454 
2455 		/* always enable these on driver reload, not sticky */
2456 		ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
2457 			IBA7322_IBC_HRTBT_MASK);
2458 	}
2459 	qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
2460 
2461 	/* setup so we have more time at CFGTEST to change H1 */
2462 	val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
2463 	val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
2464 	val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
2465 	qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
2466 
2467 	serdes_7322_init(ppd);
2468 
2469 	guid = be64_to_cpu(ppd->guid);
2470 	if (!guid) {
2471 		if (dd->base_guid)
2472 			guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
2473 		ppd->guid = cpu_to_be64(guid);
2474 	}
2475 
2476 	qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
2477 	/* write to chip to prevent back-to-back writes of ibc reg */
2478 	qib_write_kreg(dd, kr_scratch, 0);
2479 
2480 	/* Enable port */
2481 	ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
2482 	set_vls(ppd);
2483 
2484 	/* initially come up DISABLED, without sending anything. */
2485 	val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
2486 					QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2487 	qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
2488 	qib_write_kreg(dd, kr_scratch, 0ULL);
2489 	/* clear the linkinit cmds */
2490 	ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
2491 
2492 	/* be paranoid against later code motion, etc. */
2493 	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2494 	ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
2495 	qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
2496 	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2497 
2498 	/* Also enable IBSTATUSCHG interrupt.  */
2499 	val = qib_read_kreg_port(ppd, krp_errmask);
2500 	qib_write_kreg_port(ppd, krp_errmask,
2501 		val | ERR_MASK_N(IBStatusChanged));
2502 
2503 	/* Always zero until we start messing with SerDes for real */
2504 	return ret;
2505 }
2506 
2507 /**
2508  * qib_7322_quiet_serdes - set serdes to txidle
2509  * @dd: the qlogic_ib device
2510  * Called when driver is being unloaded
2511  */
2512 static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
2513 {
2514 	u64 val;
2515 	unsigned long flags;
2516 
2517 	qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
2518 
2519 	spin_lock_irqsave(&ppd->lflags_lock, flags);
2520 	ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
2521 	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2522 	wake_up(&ppd->cpspec->autoneg_wait);
2523 	cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
2524 	if (ppd->dd->cspec->r1)
2525 		cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
2526 
2527 	ppd->cpspec->chase_end = 0;
2528 	if (ppd->cpspec->chase_timer.function) /* if initted */
2529 		del_timer_sync(&ppd->cpspec->chase_timer);
2530 
2531 	/*
2532 	 * Despite the name, actually disables IBC as well. Do it when
2533 	 * we are as sure as possible that no more packets can be
2534 	 * received, following the down and the PCS reset.
2535 	 * The actual disabling happens in qib_7322_mini_pci_reset(),
2536 	 * along with the PCS being reset.
2537 	 */
2538 	ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2539 	qib_7322_mini_pcs_reset(ppd);
2540 
2541 	/*
2542 	 * Update the adjusted counters so the adjustment persists
2543 	 * across driver reload.
2544 	 */
2545 	if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
2546 	    ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
2547 		struct qib_devdata *dd = ppd->dd;
2548 		u64 diagc;
2549 
2550 		/* enable counter writes */
2551 		diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
2552 		qib_write_kreg(dd, kr_hwdiagctrl,
2553 			       diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
2554 
2555 		if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
2556 			val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
2557 			if (ppd->cpspec->ibdeltainprog)
2558 				val -= val - ppd->cpspec->ibsymsnap;
2559 			val -= ppd->cpspec->ibsymdelta;
2560 			write_7322_creg_port(ppd, crp_ibsymbolerr, val);
2561 		}
2562 		if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
2563 			val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
2564 			if (ppd->cpspec->ibdeltainprog)
2565 				val -= val - ppd->cpspec->iblnkerrsnap;
2566 			val -= ppd->cpspec->iblnkerrdelta;
2567 			write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
2568 		}
2569 		if (ppd->cpspec->iblnkdowndelta) {
2570 			val = read_7322_creg32_port(ppd, crp_iblinkdown);
2571 			val += ppd->cpspec->iblnkdowndelta;
2572 			write_7322_creg_port(ppd, crp_iblinkdown, val);
2573 		}
2574 		/*
2575 		 * No need to save ibmalfdelta since IB perfcounters
2576 		 * are cleared on driver reload.
2577 		 */
2578 
2579 		/* and disable counter writes */
2580 		qib_write_kreg(dd, kr_hwdiagctrl, diagc);
2581 	}
2582 }
2583 
2584 /**
2585  * qib_setup_7322_setextled - set the state of the two external LEDs
2586  * @ppd: physical port on the qlogic_ib device
2587  * @on: whether the link is up or not
2588  *
2589  * The exact combo of LEDs if on is true is determined by looking
2590  * at the ibcstatus.
2591  *
2592  * These LEDs indicate the physical and logical state of IB link.
2593  * For this chip (at least with recommended board pinouts), LED1
2594  * is Yellow (logical state) and LED2 is Green (physical state),
2595  *
2596  * Note:  We try to match the Mellanox HCA LED behavior as best
2597  * we can.  Green indicates physical link state is OK (something is
2598  * plugged in, and we can train).
2599  * Amber indicates the link is logically up (ACTIVE).
2600  * Mellanox further blinks the amber LED to indicate data packet
2601  * activity, but we have no hardware support for that, so it would
2602  * require waking up every 10-20 msecs and checking the counters
2603  * on the chip, and then turning the LED off if appropriate.  That's
2604  * visible overhead, so not something we will do.
2605  */
2606 static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
2607 {
2608 	struct qib_devdata *dd = ppd->dd;
2609 	u64 extctl, ledblink = 0, val;
2610 	unsigned long flags;
2611 	int yel, grn;
2612 
2613 	/*
2614 	 * The diags use the LED to indicate diag info, so we leave
2615 	 * the external LED alone when the diags are running.
2616 	 */
2617 	if (dd->diag_client)
2618 		return;
2619 
2620 	/* Allow override of LED display for, e.g. Locating system in rack */
2621 	if (ppd->led_override) {
2622 		grn = (ppd->led_override & QIB_LED_PHYS);
2623 		yel = (ppd->led_override & QIB_LED_LOG);
2624 	} else if (on) {
2625 		val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
2626 		grn = qib_7322_phys_portstate(val) ==
2627 			IB_PHYSPORTSTATE_LINKUP;
2628 		yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE;
2629 	} else {
2630 		grn = 0;
2631 		yel = 0;
2632 	}
2633 
2634 	spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2635 	extctl = dd->cspec->extctrl & (ppd->port == 1 ?
2636 		~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK);
2637 	if (grn) {
2638 		extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
2639 		/*
2640 		 * Counts are in chip clock (4ns) periods.
2641 		 * This is 1/16 sec (66.6ms) on,
2642 		 * 3/16 sec (187.5 ms) off, with packets rcvd.
2643 		 */
2644 		ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) |
2645 			((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT);
2646 	}
2647 	if (yel)
2648 		extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
2649 	dd->cspec->extctrl = extctl;
2650 	qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
2651 	spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2652 
2653 	if (ledblink) /* blink the LED on packet receive */
2654 		qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
2655 }
2656 
2657 #ifdef CONFIG_INFINIBAND_QIB_DCA
2658 
2659 static int qib_7322_notify_dca(struct qib_devdata *dd, unsigned long event)
2660 {
2661 	switch (event) {
2662 	case DCA_PROVIDER_ADD:
2663 		if (dd->flags & QIB_DCA_ENABLED)
2664 			break;
2665 		if (!dca_add_requester(&dd->pcidev->dev)) {
2666 			qib_devinfo(dd->pcidev, "DCA enabled\n");
2667 			dd->flags |= QIB_DCA_ENABLED;
2668 			qib_setup_dca(dd);
2669 		}
2670 		break;
2671 	case DCA_PROVIDER_REMOVE:
2672 		if (dd->flags & QIB_DCA_ENABLED) {
2673 			dca_remove_requester(&dd->pcidev->dev);
2674 			dd->flags &= ~QIB_DCA_ENABLED;
2675 			dd->cspec->dca_ctrl = 0;
2676 			qib_write_kreg(dd, KREG_IDX(DCACtrlA),
2677 				dd->cspec->dca_ctrl);
2678 		}
2679 		break;
2680 	}
2681 	return 0;
2682 }
2683 
2684 static void qib_update_rhdrq_dca(struct qib_ctxtdata *rcd, int cpu)
2685 {
2686 	struct qib_devdata *dd = rcd->dd;
2687 	struct qib_chip_specific *cspec = dd->cspec;
2688 
2689 	if (!(dd->flags & QIB_DCA_ENABLED))
2690 		return;
2691 	if (cspec->rhdr_cpu[rcd->ctxt] != cpu) {
2692 		const struct dca_reg_map *rmp;
2693 
2694 		cspec->rhdr_cpu[rcd->ctxt] = cpu;
2695 		rmp = &dca_rcvhdr_reg_map[rcd->ctxt];
2696 		cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] &= rmp->mask;
2697 		cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] |=
2698 			(u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb;
2699 		qib_devinfo(dd->pcidev,
2700 			"Ctxt %d cpu %d dca %llx\n", rcd->ctxt, cpu,
2701 			(long long) cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
2702 		qib_write_kreg(dd, rmp->regno,
2703 			       cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
2704 		cspec->dca_ctrl |= SYM_MASK(DCACtrlA, RcvHdrqDCAEnable);
2705 		qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2706 	}
2707 }
2708 
2709 static void qib_update_sdma_dca(struct qib_pportdata *ppd, int cpu)
2710 {
2711 	struct qib_devdata *dd = ppd->dd;
2712 	struct qib_chip_specific *cspec = dd->cspec;
2713 	unsigned pidx = ppd->port - 1;
2714 
2715 	if (!(dd->flags & QIB_DCA_ENABLED))
2716 		return;
2717 	if (cspec->sdma_cpu[pidx] != cpu) {
2718 		cspec->sdma_cpu[pidx] = cpu;
2719 		cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ?
2720 			SYM_MASK(DCACtrlF, SendDma1DCAOPH) :
2721 			SYM_MASK(DCACtrlF, SendDma0DCAOPH));
2722 		cspec->dca_rcvhdr_ctrl[4] |=
2723 			(u64) dca3_get_tag(&dd->pcidev->dev, cpu) <<
2724 				(ppd->hw_pidx ?
2725 					SYM_LSB(DCACtrlF, SendDma1DCAOPH) :
2726 					SYM_LSB(DCACtrlF, SendDma0DCAOPH));
2727 		qib_devinfo(dd->pcidev,
2728 			"sdma %d cpu %d dca %llx\n", ppd->hw_pidx, cpu,
2729 			(long long) cspec->dca_rcvhdr_ctrl[4]);
2730 		qib_write_kreg(dd, KREG_IDX(DCACtrlF),
2731 			       cspec->dca_rcvhdr_ctrl[4]);
2732 		cspec->dca_ctrl |= ppd->hw_pidx ?
2733 			SYM_MASK(DCACtrlA, SendDMAHead1DCAEnable) :
2734 			SYM_MASK(DCACtrlA, SendDMAHead0DCAEnable);
2735 		qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2736 	}
2737 }
2738 
2739 static void qib_setup_dca(struct qib_devdata *dd)
2740 {
2741 	struct qib_chip_specific *cspec = dd->cspec;
2742 	int i;
2743 
2744 	for (i = 0; i < ARRAY_SIZE(cspec->rhdr_cpu); i++)
2745 		cspec->rhdr_cpu[i] = -1;
2746 	for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2747 		cspec->sdma_cpu[i] = -1;
2748 	cspec->dca_rcvhdr_ctrl[0] =
2749 		(1ULL << SYM_LSB(DCACtrlB, RcvHdrq0DCAXfrCnt)) |
2750 		(1ULL << SYM_LSB(DCACtrlB, RcvHdrq1DCAXfrCnt)) |
2751 		(1ULL << SYM_LSB(DCACtrlB, RcvHdrq2DCAXfrCnt)) |
2752 		(1ULL << SYM_LSB(DCACtrlB, RcvHdrq3DCAXfrCnt));
2753 	cspec->dca_rcvhdr_ctrl[1] =
2754 		(1ULL << SYM_LSB(DCACtrlC, RcvHdrq4DCAXfrCnt)) |
2755 		(1ULL << SYM_LSB(DCACtrlC, RcvHdrq5DCAXfrCnt)) |
2756 		(1ULL << SYM_LSB(DCACtrlC, RcvHdrq6DCAXfrCnt)) |
2757 		(1ULL << SYM_LSB(DCACtrlC, RcvHdrq7DCAXfrCnt));
2758 	cspec->dca_rcvhdr_ctrl[2] =
2759 		(1ULL << SYM_LSB(DCACtrlD, RcvHdrq8DCAXfrCnt)) |
2760 		(1ULL << SYM_LSB(DCACtrlD, RcvHdrq9DCAXfrCnt)) |
2761 		(1ULL << SYM_LSB(DCACtrlD, RcvHdrq10DCAXfrCnt)) |
2762 		(1ULL << SYM_LSB(DCACtrlD, RcvHdrq11DCAXfrCnt));
2763 	cspec->dca_rcvhdr_ctrl[3] =
2764 		(1ULL << SYM_LSB(DCACtrlE, RcvHdrq12DCAXfrCnt)) |
2765 		(1ULL << SYM_LSB(DCACtrlE, RcvHdrq13DCAXfrCnt)) |
2766 		(1ULL << SYM_LSB(DCACtrlE, RcvHdrq14DCAXfrCnt)) |
2767 		(1ULL << SYM_LSB(DCACtrlE, RcvHdrq15DCAXfrCnt));
2768 	cspec->dca_rcvhdr_ctrl[4] =
2769 		(1ULL << SYM_LSB(DCACtrlF, RcvHdrq16DCAXfrCnt)) |
2770 		(1ULL << SYM_LSB(DCACtrlF, RcvHdrq17DCAXfrCnt));
2771 	for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2772 		qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i,
2773 			       cspec->dca_rcvhdr_ctrl[i]);
2774 	for (i = 0; i < cspec->num_msix_entries; i++)
2775 		setup_dca_notifier(dd, i);
2776 }
2777 
2778 static void qib_irq_notifier_notify(struct irq_affinity_notify *notify,
2779 			     const cpumask_t *mask)
2780 {
2781 	struct qib_irq_notify *n =
2782 		container_of(notify, struct qib_irq_notify, notify);
2783 	int cpu = cpumask_first(mask);
2784 
2785 	if (n->rcv) {
2786 		struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2787 
2788 		qib_update_rhdrq_dca(rcd, cpu);
2789 	} else {
2790 		struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2791 
2792 		qib_update_sdma_dca(ppd, cpu);
2793 	}
2794 }
2795 
2796 static void qib_irq_notifier_release(struct kref *ref)
2797 {
2798 	struct qib_irq_notify *n =
2799 		container_of(ref, struct qib_irq_notify, notify.kref);
2800 	struct qib_devdata *dd;
2801 
2802 	if (n->rcv) {
2803 		struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2804 
2805 		dd = rcd->dd;
2806 	} else {
2807 		struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2808 
2809 		dd = ppd->dd;
2810 	}
2811 	qib_devinfo(dd->pcidev,
2812 		"release on HCA notify 0x%p n 0x%p\n", ref, n);
2813 	kfree(n);
2814 }
2815 #endif
2816 
2817 static void qib_7322_free_irq(struct qib_devdata *dd)
2818 {
2819 	u64 intgranted;
2820 	int i;
2821 
2822 	dd->cspec->main_int_mask = ~0ULL;
2823 
2824 	for (i = 0; i < dd->cspec->num_msix_entries; i++) {
2825 		/* only free IRQs that were allocated */
2826 		if (dd->cspec->msix_entries[i].arg) {
2827 #ifdef CONFIG_INFINIBAND_QIB_DCA
2828 			reset_dca_notifier(dd, i);
2829 #endif
2830 			irq_set_affinity_hint(pci_irq_vector(dd->pcidev, i),
2831 					      NULL);
2832 			free_cpumask_var(dd->cspec->msix_entries[i].mask);
2833 			pci_free_irq(dd->pcidev, i,
2834 				     dd->cspec->msix_entries[i].arg);
2835 		}
2836 	}
2837 
2838 	/* If num_msix_entries was 0, disable the INTx IRQ */
2839 	if (!dd->cspec->num_msix_entries)
2840 		pci_free_irq(dd->pcidev, 0, dd);
2841 	else
2842 		dd->cspec->num_msix_entries = 0;
2843 
2844 	pci_free_irq_vectors(dd->pcidev);
2845 
2846 	/* make sure no MSIx interrupts are left pending */
2847 	intgranted = qib_read_kreg64(dd, kr_intgranted);
2848 	if (intgranted)
2849 		qib_write_kreg(dd, kr_intgranted, intgranted);
2850 }
2851 
2852 static void qib_setup_7322_cleanup(struct qib_devdata *dd)
2853 {
2854 	int i;
2855 
2856 #ifdef CONFIG_INFINIBAND_QIB_DCA
2857 	if (dd->flags & QIB_DCA_ENABLED) {
2858 		dca_remove_requester(&dd->pcidev->dev);
2859 		dd->flags &= ~QIB_DCA_ENABLED;
2860 		dd->cspec->dca_ctrl = 0;
2861 		qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl);
2862 	}
2863 #endif
2864 
2865 	qib_7322_free_irq(dd);
2866 	kfree(dd->cspec->cntrs);
2867 	kfree(dd->cspec->sendchkenable);
2868 	kfree(dd->cspec->sendgrhchk);
2869 	kfree(dd->cspec->sendibchk);
2870 	kfree(dd->cspec->msix_entries);
2871 	for (i = 0; i < dd->num_pports; i++) {
2872 		unsigned long flags;
2873 		u32 mask = QSFP_GPIO_MOD_PRS_N |
2874 			(QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT);
2875 
2876 		kfree(dd->pport[i].cpspec->portcntrs);
2877 		if (dd->flags & QIB_HAS_QSFP) {
2878 			spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2879 			dd->cspec->gpio_mask &= ~mask;
2880 			qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
2881 			spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2882 		}
2883 	}
2884 }
2885 
2886 /* handle SDMA interrupts */
2887 static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
2888 {
2889 	struct qib_pportdata *ppd0 = &dd->pport[0];
2890 	struct qib_pportdata *ppd1 = &dd->pport[1];
2891 	u64 intr0 = istat & (INT_MASK_P(SDma, 0) |
2892 		INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0));
2893 	u64 intr1 = istat & (INT_MASK_P(SDma, 1) |
2894 		INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1));
2895 
2896 	if (intr0)
2897 		qib_sdma_intr(ppd0);
2898 	if (intr1)
2899 		qib_sdma_intr(ppd1);
2900 
2901 	if (istat & INT_MASK_PM(SDmaCleanupDone, 0))
2902 		qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started);
2903 	if (istat & INT_MASK_PM(SDmaCleanupDone, 1))
2904 		qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started);
2905 }
2906 
2907 /*
2908  * Set or clear the Send buffer available interrupt enable bit.
2909  */
2910 static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
2911 {
2912 	unsigned long flags;
2913 
2914 	spin_lock_irqsave(&dd->sendctrl_lock, flags);
2915 	if (needint)
2916 		dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
2917 	else
2918 		dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
2919 	qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
2920 	qib_write_kreg(dd, kr_scratch, 0ULL);
2921 	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
2922 }
2923 
2924 /*
2925  * Somehow got an interrupt with reserved bits set in interrupt status.
2926  * Print a message so we know it happened, then clear them.
2927  * keep mainline interrupt handler cache-friendly
2928  */
2929 static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
2930 {
2931 	u64 kills;
2932 	char msg[128];
2933 
2934 	kills = istat & ~QIB_I_BITSEXTANT;
2935 	qib_dev_err(dd,
2936 		"Clearing reserved interrupt(s) 0x%016llx: %s\n",
2937 		(unsigned long long) kills, msg);
2938 	qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
2939 }
2940 
2941 /* keep mainline interrupt handler cache-friendly */
2942 static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
2943 {
2944 	u32 gpiostatus;
2945 	int handled = 0;
2946 	int pidx;
2947 
2948 	/*
2949 	 * Boards for this chip currently don't use GPIO interrupts,
2950 	 * so clear by writing GPIOstatus to GPIOclear, and complain
2951 	 * to developer.  To avoid endless repeats, clear
2952 	 * the bits in the mask, since there is some kind of
2953 	 * programming error or chip problem.
2954 	 */
2955 	gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
2956 	/*
2957 	 * In theory, writing GPIOstatus to GPIOclear could
2958 	 * have a bad side-effect on some diagnostic that wanted
2959 	 * to poll for a status-change, but the various shadows
2960 	 * make that problematic at best. Diags will just suppress
2961 	 * all GPIO interrupts during such tests.
2962 	 */
2963 	qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
2964 	/*
2965 	 * Check for QSFP MOD_PRS changes
2966 	 * only works for single port if IB1 != pidx1
2967 	 */
2968 	for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
2969 	     ++pidx) {
2970 		struct qib_pportdata *ppd;
2971 		struct qib_qsfp_data *qd;
2972 		u32 mask;
2973 
2974 		if (!dd->pport[pidx].link_speed_supported)
2975 			continue;
2976 		mask = QSFP_GPIO_MOD_PRS_N;
2977 		ppd = dd->pport + pidx;
2978 		mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
2979 		if (gpiostatus & dd->cspec->gpio_mask & mask) {
2980 			u64 pins;
2981 
2982 			qd = &ppd->cpspec->qsfp_data;
2983 			gpiostatus &= ~mask;
2984 			pins = qib_read_kreg64(dd, kr_extstatus);
2985 			pins >>= SYM_LSB(EXTStatus, GPIOIn);
2986 			if (!(pins & mask)) {
2987 				++handled;
2988 				qd->t_insert = jiffies;
2989 				queue_work(ib_wq, &qd->work);
2990 			}
2991 		}
2992 	}
2993 
2994 	if (gpiostatus && !handled) {
2995 		const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
2996 		u32 gpio_irq = mask & gpiostatus;
2997 
2998 		/*
2999 		 * Clear any troublemakers, and update chip from shadow
3000 		 */
3001 		dd->cspec->gpio_mask &= ~gpio_irq;
3002 		qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
3003 	}
3004 }
3005 
3006 /*
3007  * Handle errors and unusual events first, separate function
3008  * to improve cache hits for fast path interrupt handling.
3009  */
3010 static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
3011 {
3012 	if (istat & ~QIB_I_BITSEXTANT)
3013 		unknown_7322_ibits(dd, istat);
3014 	if (istat & QIB_I_GPIO)
3015 		unknown_7322_gpio_intr(dd);
3016 	if (istat & QIB_I_C_ERROR) {
3017 		qib_write_kreg(dd, kr_errmask, 0ULL);
3018 		tasklet_schedule(&dd->error_tasklet);
3019 	}
3020 	if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
3021 		handle_7322_p_errors(dd->rcd[0]->ppd);
3022 	if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
3023 		handle_7322_p_errors(dd->rcd[1]->ppd);
3024 }
3025 
3026 /*
3027  * Dynamically adjust the rcv int timeout for a context based on incoming
3028  * packet rate.
3029  */
3030 static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts)
3031 {
3032 	struct qib_devdata *dd = rcd->dd;
3033 	u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
3034 
3035 	/*
3036 	 * Dynamically adjust idle timeout on chip
3037 	 * based on number of packets processed.
3038 	 */
3039 	if (npkts < rcv_int_count && timeout > 2)
3040 		timeout >>= 1;
3041 	else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
3042 		timeout = min(timeout << 1, rcv_int_timeout);
3043 	else
3044 		return;
3045 
3046 	dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
3047 	qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
3048 }
3049 
3050 /*
3051  * This is the main interrupt handler.
3052  * It will normally only be used for low frequency interrupts but may
3053  * have to handle all interrupts if INTx is enabled or fewer than normal
3054  * MSIx interrupts were allocated.
3055  * This routine should ignore the interrupt bits for any of the
3056  * dedicated MSIx handlers.
3057  */
3058 static irqreturn_t qib_7322intr(int irq, void *data)
3059 {
3060 	struct qib_devdata *dd = data;
3061 	irqreturn_t ret;
3062 	u64 istat;
3063 	u64 ctxtrbits;
3064 	u64 rmask;
3065 	unsigned i;
3066 	u32 npkts;
3067 
3068 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
3069 		/*
3070 		 * This return value is not great, but we do not want the
3071 		 * interrupt core code to remove our interrupt handler
3072 		 * because we don't appear to be handling an interrupt
3073 		 * during a chip reset.
3074 		 */
3075 		ret = IRQ_HANDLED;
3076 		goto bail;
3077 	}
3078 
3079 	istat = qib_read_kreg64(dd, kr_intstatus);
3080 
3081 	if (unlikely(istat == ~0ULL)) {
3082 		qib_bad_intrstatus(dd);
3083 		qib_dev_err(dd, "Interrupt status all f's, skipping\n");
3084 		/* don't know if it was our interrupt or not */
3085 		ret = IRQ_NONE;
3086 		goto bail;
3087 	}
3088 
3089 	istat &= dd->cspec->main_int_mask;
3090 	if (unlikely(!istat)) {
3091 		/* already handled, or shared and not us */
3092 		ret = IRQ_NONE;
3093 		goto bail;
3094 	}
3095 
3096 	this_cpu_inc(*dd->int_counter);
3097 
3098 	/* handle "errors" of various kinds first, device ahead of port */
3099 	if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
3100 			      QIB_I_C_ERROR | INT_MASK_P(Err, 0) |
3101 			      INT_MASK_P(Err, 1))))
3102 		unlikely_7322_intr(dd, istat);
3103 
3104 	/*
3105 	 * Clear the interrupt bits we found set, relatively early, so we
3106 	 * "know" know the chip will have seen this by the time we process
3107 	 * the queue, and will re-interrupt if necessary.  The processor
3108 	 * itself won't take the interrupt again until we return.
3109 	 */
3110 	qib_write_kreg(dd, kr_intclear, istat);
3111 
3112 	/*
3113 	 * Handle kernel receive queues before checking for pio buffers
3114 	 * available since receives can overflow; piobuf waiters can afford
3115 	 * a few extra cycles, since they were waiting anyway.
3116 	 */
3117 	ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK);
3118 	if (ctxtrbits) {
3119 		rmask = (1ULL << QIB_I_RCVAVAIL_LSB) |
3120 			(1ULL << QIB_I_RCVURG_LSB);
3121 		for (i = 0; i < dd->first_user_ctxt; i++) {
3122 			if (ctxtrbits & rmask) {
3123 				ctxtrbits &= ~rmask;
3124 				if (dd->rcd[i])
3125 					qib_kreceive(dd->rcd[i], NULL, &npkts);
3126 			}
3127 			rmask <<= 1;
3128 		}
3129 		if (ctxtrbits) {
3130 			ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) |
3131 				(ctxtrbits >> QIB_I_RCVURG_LSB);
3132 			qib_handle_urcv(dd, ctxtrbits);
3133 		}
3134 	}
3135 
3136 	if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1)))
3137 		sdma_7322_intr(dd, istat);
3138 
3139 	if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
3140 		qib_ib_piobufavail(dd);
3141 
3142 	ret = IRQ_HANDLED;
3143 bail:
3144 	return ret;
3145 }
3146 
3147 /*
3148  * Dedicated receive packet available interrupt handler.
3149  */
3150 static irqreturn_t qib_7322pintr(int irq, void *data)
3151 {
3152 	struct qib_ctxtdata *rcd = data;
3153 	struct qib_devdata *dd = rcd->dd;
3154 	u32 npkts;
3155 
3156 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3157 		/*
3158 		 * This return value is not great, but we do not want the
3159 		 * interrupt core code to remove our interrupt handler
3160 		 * because we don't appear to be handling an interrupt
3161 		 * during a chip reset.
3162 		 */
3163 		return IRQ_HANDLED;
3164 
3165 	this_cpu_inc(*dd->int_counter);
3166 
3167 	/* Clear the interrupt bit we expect to be set. */
3168 	qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
3169 		       (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
3170 
3171 	qib_kreceive(rcd, NULL, &npkts);
3172 
3173 	return IRQ_HANDLED;
3174 }
3175 
3176 /*
3177  * Dedicated Send buffer available interrupt handler.
3178  */
3179 static irqreturn_t qib_7322bufavail(int irq, void *data)
3180 {
3181 	struct qib_devdata *dd = data;
3182 
3183 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3184 		/*
3185 		 * This return value is not great, but we do not want the
3186 		 * interrupt core code to remove our interrupt handler
3187 		 * because we don't appear to be handling an interrupt
3188 		 * during a chip reset.
3189 		 */
3190 		return IRQ_HANDLED;
3191 
3192 	this_cpu_inc(*dd->int_counter);
3193 
3194 	/* Clear the interrupt bit we expect to be set. */
3195 	qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
3196 
3197 	/* qib_ib_piobufavail() will clear the want PIO interrupt if needed */
3198 	if (dd->flags & QIB_INITTED)
3199 		qib_ib_piobufavail(dd);
3200 	else
3201 		qib_wantpiobuf_7322_intr(dd, 0);
3202 
3203 	return IRQ_HANDLED;
3204 }
3205 
3206 /*
3207  * Dedicated Send DMA interrupt handler.
3208  */
3209 static irqreturn_t sdma_intr(int irq, void *data)
3210 {
3211 	struct qib_pportdata *ppd = data;
3212 	struct qib_devdata *dd = ppd->dd;
3213 
3214 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3215 		/*
3216 		 * This return value is not great, but we do not want the
3217 		 * interrupt core code to remove our interrupt handler
3218 		 * because we don't appear to be handling an interrupt
3219 		 * during a chip reset.
3220 		 */
3221 		return IRQ_HANDLED;
3222 
3223 	this_cpu_inc(*dd->int_counter);
3224 
3225 	/* Clear the interrupt bit we expect to be set. */
3226 	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3227 		       INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
3228 	qib_sdma_intr(ppd);
3229 
3230 	return IRQ_HANDLED;
3231 }
3232 
3233 /*
3234  * Dedicated Send DMA idle interrupt handler.
3235  */
3236 static irqreturn_t sdma_idle_intr(int irq, void *data)
3237 {
3238 	struct qib_pportdata *ppd = data;
3239 	struct qib_devdata *dd = ppd->dd;
3240 
3241 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3242 		/*
3243 		 * This return value is not great, but we do not want the
3244 		 * interrupt core code to remove our interrupt handler
3245 		 * because we don't appear to be handling an interrupt
3246 		 * during a chip reset.
3247 		 */
3248 		return IRQ_HANDLED;
3249 
3250 	this_cpu_inc(*dd->int_counter);
3251 
3252 	/* Clear the interrupt bit we expect to be set. */
3253 	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3254 		       INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
3255 	qib_sdma_intr(ppd);
3256 
3257 	return IRQ_HANDLED;
3258 }
3259 
3260 /*
3261  * Dedicated Send DMA progress interrupt handler.
3262  */
3263 static irqreturn_t sdma_progress_intr(int irq, void *data)
3264 {
3265 	struct qib_pportdata *ppd = data;
3266 	struct qib_devdata *dd = ppd->dd;
3267 
3268 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3269 		/*
3270 		 * This return value is not great, but we do not want the
3271 		 * interrupt core code to remove our interrupt handler
3272 		 * because we don't appear to be handling an interrupt
3273 		 * during a chip reset.
3274 		 */
3275 		return IRQ_HANDLED;
3276 
3277 	this_cpu_inc(*dd->int_counter);
3278 
3279 	/* Clear the interrupt bit we expect to be set. */
3280 	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3281 		       INT_MASK_P(SDmaProgress, 1) :
3282 		       INT_MASK_P(SDmaProgress, 0));
3283 	qib_sdma_intr(ppd);
3284 
3285 	return IRQ_HANDLED;
3286 }
3287 
3288 /*
3289  * Dedicated Send DMA cleanup interrupt handler.
3290  */
3291 static irqreturn_t sdma_cleanup_intr(int irq, void *data)
3292 {
3293 	struct qib_pportdata *ppd = data;
3294 	struct qib_devdata *dd = ppd->dd;
3295 
3296 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3297 		/*
3298 		 * This return value is not great, but we do not want the
3299 		 * interrupt core code to remove our interrupt handler
3300 		 * because we don't appear to be handling an interrupt
3301 		 * during a chip reset.
3302 		 */
3303 		return IRQ_HANDLED;
3304 
3305 	this_cpu_inc(*dd->int_counter);
3306 
3307 	/* Clear the interrupt bit we expect to be set. */
3308 	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3309 		       INT_MASK_PM(SDmaCleanupDone, 1) :
3310 		       INT_MASK_PM(SDmaCleanupDone, 0));
3311 	qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
3312 
3313 	return IRQ_HANDLED;
3314 }
3315 
3316 #ifdef CONFIG_INFINIBAND_QIB_DCA
3317 
3318 static void reset_dca_notifier(struct qib_devdata *dd, int msixnum)
3319 {
3320 	if (!dd->cspec->msix_entries[msixnum].dca)
3321 		return;
3322 
3323 	qib_devinfo(dd->pcidev, "Disabling notifier on HCA %d irq %d\n",
3324 		    dd->unit, pci_irq_vector(dd->pcidev, msixnum));
3325 	irq_set_affinity_notifier(pci_irq_vector(dd->pcidev, msixnum), NULL);
3326 	dd->cspec->msix_entries[msixnum].notifier = NULL;
3327 }
3328 
3329 static void setup_dca_notifier(struct qib_devdata *dd, int msixnum)
3330 {
3331 	struct qib_msix_entry *m = &dd->cspec->msix_entries[msixnum];
3332 	struct qib_irq_notify *n;
3333 
3334 	if (!m->dca)
3335 		return;
3336 	n = kzalloc(sizeof(*n), GFP_KERNEL);
3337 	if (n) {
3338 		int ret;
3339 
3340 		m->notifier = n;
3341 		n->notify.irq = pci_irq_vector(dd->pcidev, msixnum);
3342 		n->notify.notify = qib_irq_notifier_notify;
3343 		n->notify.release = qib_irq_notifier_release;
3344 		n->arg = m->arg;
3345 		n->rcv = m->rcv;
3346 		qib_devinfo(dd->pcidev,
3347 			"set notifier irq %d rcv %d notify %p\n",
3348 			n->notify.irq, n->rcv, &n->notify);
3349 		ret = irq_set_affinity_notifier(
3350 				n->notify.irq,
3351 				&n->notify);
3352 		if (ret) {
3353 			m->notifier = NULL;
3354 			kfree(n);
3355 		}
3356 	}
3357 }
3358 
3359 #endif
3360 
3361 /*
3362  * Set up our chip-specific interrupt handler.
3363  * The interrupt type has already been setup, so
3364  * we just need to do the registration and error checking.
3365  * If we are using MSIx interrupts, we may fall back to
3366  * INTx later, if the interrupt handler doesn't get called
3367  * within 1/2 second (see verify_interrupt()).
3368  */
3369 static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
3370 {
3371 	int ret, i, msixnum;
3372 	u64 redirect[6];
3373 	u64 mask;
3374 	const struct cpumask *local_mask;
3375 	int firstcpu, secondcpu = 0, currrcvcpu = 0;
3376 
3377 	if (!dd->num_pports)
3378 		return;
3379 
3380 	if (clearpend) {
3381 		/*
3382 		 * if not switching interrupt types, be sure interrupts are
3383 		 * disabled, and then clear anything pending at this point,
3384 		 * because we are starting clean.
3385 		 */
3386 		qib_7322_set_intr_state(dd, 0);
3387 
3388 		/* clear the reset error, init error/hwerror mask */
3389 		qib_7322_init_hwerrors(dd);
3390 
3391 		/* clear any interrupt bits that might be set */
3392 		qib_write_kreg(dd, kr_intclear, ~0ULL);
3393 
3394 		/* make sure no pending MSIx intr, and clear diag reg */
3395 		qib_write_kreg(dd, kr_intgranted, ~0ULL);
3396 		qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
3397 	}
3398 
3399 	if (!dd->cspec->num_msix_entries) {
3400 		/* Try to get INTx interrupt */
3401 try_intx:
3402 		ret = pci_request_irq(dd->pcidev, 0, qib_7322intr, NULL, dd,
3403 				      QIB_DRV_NAME);
3404 		if (ret) {
3405 			qib_dev_err(
3406 				dd,
3407 				"Couldn't setup INTx interrupt (irq=%d): %d\n",
3408 				pci_irq_vector(dd->pcidev, 0), ret);
3409 			return;
3410 		}
3411 		dd->cspec->main_int_mask = ~0ULL;
3412 		return;
3413 	}
3414 
3415 	/* Try to get MSIx interrupts */
3416 	memset(redirect, 0, sizeof(redirect));
3417 	mask = ~0ULL;
3418 	msixnum = 0;
3419 	local_mask = cpumask_of_pcibus(dd->pcidev->bus);
3420 	firstcpu = cpumask_first(local_mask);
3421 	if (firstcpu >= nr_cpu_ids ||
3422 			cpumask_weight(local_mask) == num_online_cpus()) {
3423 		local_mask = topology_core_cpumask(0);
3424 		firstcpu = cpumask_first(local_mask);
3425 	}
3426 	if (firstcpu < nr_cpu_ids) {
3427 		secondcpu = cpumask_next(firstcpu, local_mask);
3428 		if (secondcpu >= nr_cpu_ids)
3429 			secondcpu = firstcpu;
3430 		currrcvcpu = secondcpu;
3431 	}
3432 	for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
3433 		irq_handler_t handler;
3434 		void *arg;
3435 		int lsb, reg, sh;
3436 #ifdef CONFIG_INFINIBAND_QIB_DCA
3437 		int dca = 0;
3438 #endif
3439 		if (i < ARRAY_SIZE(irq_table)) {
3440 			if (irq_table[i].port) {
3441 				/* skip if for a non-configured port */
3442 				if (irq_table[i].port > dd->num_pports)
3443 					continue;
3444 				arg = dd->pport + irq_table[i].port - 1;
3445 			} else
3446 				arg = dd;
3447 #ifdef CONFIG_INFINIBAND_QIB_DCA
3448 			dca = irq_table[i].dca;
3449 #endif
3450 			lsb = irq_table[i].lsb;
3451 			handler = irq_table[i].handler;
3452 			ret = pci_request_irq(dd->pcidev, msixnum, handler,
3453 					      NULL, arg, QIB_DRV_NAME "%d%s",
3454 					      dd->unit,
3455 					      irq_table[i].name);
3456 		} else {
3457 			unsigned ctxt;
3458 
3459 			ctxt = i - ARRAY_SIZE(irq_table);
3460 			/* per krcvq context receive interrupt */
3461 			arg = dd->rcd[ctxt];
3462 			if (!arg)
3463 				continue;
3464 			if (qib_krcvq01_no_msi && ctxt < 2)
3465 				continue;
3466 #ifdef CONFIG_INFINIBAND_QIB_DCA
3467 			dca = 1;
3468 #endif
3469 			lsb = QIB_I_RCVAVAIL_LSB + ctxt;
3470 			handler = qib_7322pintr;
3471 			ret = pci_request_irq(dd->pcidev, msixnum, handler,
3472 					      NULL, arg,
3473 					      QIB_DRV_NAME "%d (kctx)",
3474 					      dd->unit);
3475 		}
3476 
3477 		if (ret) {
3478 			/*
3479 			 * Shouldn't happen since the enable said we could
3480 			 * have as many as we are trying to setup here.
3481 			 */
3482 			qib_dev_err(dd,
3483 				    "Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n",
3484 				    msixnum,
3485 				    pci_irq_vector(dd->pcidev, msixnum),
3486 				    ret);
3487 			qib_7322_free_irq(dd);
3488 			pci_alloc_irq_vectors(dd->pcidev, 1, 1,
3489 					      PCI_IRQ_LEGACY);
3490 			goto try_intx;
3491 		}
3492 		dd->cspec->msix_entries[msixnum].arg = arg;
3493 #ifdef CONFIG_INFINIBAND_QIB_DCA
3494 		dd->cspec->msix_entries[msixnum].dca = dca;
3495 		dd->cspec->msix_entries[msixnum].rcv =
3496 			handler == qib_7322pintr;
3497 #endif
3498 		if (lsb >= 0) {
3499 			reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
3500 			sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
3501 				SYM_LSB(IntRedirect0, vec1);
3502 			mask &= ~(1ULL << lsb);
3503 			redirect[reg] |= ((u64) msixnum) << sh;
3504 		}
3505 		qib_read_kreg64(dd, 2 * msixnum + 1 +
3506 				(QIB_7322_MsixTable_OFFS / sizeof(u64)));
3507 		if (firstcpu < nr_cpu_ids &&
3508 			zalloc_cpumask_var(
3509 				&dd->cspec->msix_entries[msixnum].mask,
3510 				GFP_KERNEL)) {
3511 			if (handler == qib_7322pintr) {
3512 				cpumask_set_cpu(currrcvcpu,
3513 					dd->cspec->msix_entries[msixnum].mask);
3514 				currrcvcpu = cpumask_next(currrcvcpu,
3515 					local_mask);
3516 				if (currrcvcpu >= nr_cpu_ids)
3517 					currrcvcpu = secondcpu;
3518 			} else {
3519 				cpumask_set_cpu(firstcpu,
3520 					dd->cspec->msix_entries[msixnum].mask);
3521 			}
3522 			irq_set_affinity_hint(
3523 				pci_irq_vector(dd->pcidev, msixnum),
3524 				dd->cspec->msix_entries[msixnum].mask);
3525 		}
3526 		msixnum++;
3527 	}
3528 	/* Initialize the vector mapping */
3529 	for (i = 0; i < ARRAY_SIZE(redirect); i++)
3530 		qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
3531 	dd->cspec->main_int_mask = mask;
3532 	tasklet_init(&dd->error_tasklet, qib_error_tasklet,
3533 		(unsigned long)dd);
3534 }
3535 
3536 /**
3537  * qib_7322_boardname - fill in the board name and note features
3538  * @dd: the qlogic_ib device
3539  *
3540  * info will be based on the board revision register
3541  */
3542 static unsigned qib_7322_boardname(struct qib_devdata *dd)
3543 {
3544 	/* Will need enumeration of board-types here */
3545 	u32 boardid;
3546 	unsigned int features = DUAL_PORT_CAP;
3547 
3548 	boardid = SYM_FIELD(dd->revision, Revision, BoardID);
3549 
3550 	switch (boardid) {
3551 	case 0:
3552 		dd->boardname = "InfiniPath_QLE7342_Emulation";
3553 		break;
3554 	case 1:
3555 		dd->boardname = "InfiniPath_QLE7340";
3556 		dd->flags |= QIB_HAS_QSFP;
3557 		features = PORT_SPD_CAP;
3558 		break;
3559 	case 2:
3560 		dd->boardname = "InfiniPath_QLE7342";
3561 		dd->flags |= QIB_HAS_QSFP;
3562 		break;
3563 	case 3:
3564 		dd->boardname = "InfiniPath_QMI7342";
3565 		break;
3566 	case 4:
3567 		dd->boardname = "InfiniPath_Unsupported7342";
3568 		qib_dev_err(dd, "Unsupported version of QMH7342\n");
3569 		features = 0;
3570 		break;
3571 	case BOARD_QMH7342:
3572 		dd->boardname = "InfiniPath_QMH7342";
3573 		features = 0x24;
3574 		break;
3575 	case BOARD_QME7342:
3576 		dd->boardname = "InfiniPath_QME7342";
3577 		break;
3578 	case 8:
3579 		dd->boardname = "InfiniPath_QME7362";
3580 		dd->flags |= QIB_HAS_QSFP;
3581 		break;
3582 	case BOARD_QMH7360:
3583 		dd->boardname = "Intel IB QDR 1P FLR-QSFP Adptr";
3584 		dd->flags |= QIB_HAS_QSFP;
3585 		break;
3586 	case 15:
3587 		dd->boardname = "InfiniPath_QLE7342_TEST";
3588 		dd->flags |= QIB_HAS_QSFP;
3589 		break;
3590 	default:
3591 		dd->boardname = "InfiniPath_QLE73xy_UNKNOWN";
3592 		qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
3593 		break;
3594 	}
3595 	dd->board_atten = 1; /* index into txdds_Xdr */
3596 
3597 	snprintf(dd->boardversion, sizeof(dd->boardversion),
3598 		 "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
3599 		 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
3600 		 (unsigned int)SYM_FIELD(dd->revision, Revision_R, Arch),
3601 		 dd->majrev, dd->minrev,
3602 		 (unsigned int)SYM_FIELD(dd->revision, Revision_R, SW));
3603 
3604 	if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
3605 		qib_devinfo(dd->pcidev,
3606 			    "IB%u: Forced to single port mode by module parameter\n",
3607 			    dd->unit);
3608 		features &= PORT_SPD_CAP;
3609 	}
3610 
3611 	return features;
3612 }
3613 
3614 /*
3615  * This routine sleeps, so it can only be called from user context, not
3616  * from interrupt context.
3617  */
3618 static int qib_do_7322_reset(struct qib_devdata *dd)
3619 {
3620 	u64 val;
3621 	u64 *msix_vecsave = NULL;
3622 	int i, msix_entries, ret = 1;
3623 	u16 cmdval;
3624 	u8 int_line, clinesz;
3625 	unsigned long flags;
3626 
3627 	/* Use dev_err so it shows up in logs, etc. */
3628 	qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
3629 
3630 	qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
3631 
3632 	msix_entries = dd->cspec->num_msix_entries;
3633 
3634 	/* no interrupts till re-initted */
3635 	qib_7322_set_intr_state(dd, 0);
3636 
3637 	qib_7322_free_irq(dd);
3638 
3639 	if (msix_entries) {
3640 		/* can be up to 512 bytes, too big for stack */
3641 		msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries *
3642 			sizeof(u64), GFP_KERNEL);
3643 	}
3644 
3645 	/*
3646 	 * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
3647 	 * info that is set up by the BIOS, so we have to save and restore
3648 	 * it ourselves.   There is some risk something could change it,
3649 	 * after we save it, but since we have disabled the MSIx, it
3650 	 * shouldn't be touched...
3651 	 */
3652 	for (i = 0; i < msix_entries; i++) {
3653 		u64 vecaddr, vecdata;
3654 
3655 		vecaddr = qib_read_kreg64(dd, 2 * i +
3656 				  (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3657 		vecdata = qib_read_kreg64(dd, 1 + 2 * i +
3658 				  (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3659 		if (msix_vecsave) {
3660 			msix_vecsave[2 * i] = vecaddr;
3661 			/* save it without the masked bit set */
3662 			msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL;
3663 		}
3664 	}
3665 
3666 	dd->pport->cpspec->ibdeltainprog = 0;
3667 	dd->pport->cpspec->ibsymdelta = 0;
3668 	dd->pport->cpspec->iblnkerrdelta = 0;
3669 	dd->pport->cpspec->ibmalfdelta = 0;
3670 	/* so we check interrupts work again */
3671 	dd->z_int_counter = qib_int_counter(dd);
3672 
3673 	/*
3674 	 * Keep chip from being accessed until we are ready.  Use
3675 	 * writeq() directly, to allow the write even though QIB_PRESENT
3676 	 * isn't set.
3677 	 */
3678 	dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
3679 	dd->flags |= QIB_DOING_RESET;
3680 	val = dd->control | QLOGIC_IB_C_RESET;
3681 	writeq(val, &dd->kregbase[kr_control]);
3682 
3683 	for (i = 1; i <= 5; i++) {
3684 		/*
3685 		 * Allow MBIST, etc. to complete; longer on each retry.
3686 		 * We sometimes get machine checks from bus timeout if no
3687 		 * response, so for now, make it *really* long.
3688 		 */
3689 		msleep(1000 + (1 + i) * 3000);
3690 
3691 		qib_pcie_reenable(dd, cmdval, int_line, clinesz);
3692 
3693 		/*
3694 		 * Use readq directly, so we don't need to mark it as PRESENT
3695 		 * until we get a successful indication that all is well.
3696 		 */
3697 		val = readq(&dd->kregbase[kr_revision]);
3698 		if (val == dd->revision)
3699 			break;
3700 		if (i == 5) {
3701 			qib_dev_err(dd,
3702 				"Failed to initialize after reset, unusable\n");
3703 			ret = 0;
3704 			goto  bail;
3705 		}
3706 	}
3707 
3708 	dd->flags |= QIB_PRESENT; /* it's back */
3709 
3710 	if (msix_entries) {
3711 		/* restore the MSIx vector address and data if saved above */
3712 		for (i = 0; i < msix_entries; i++) {
3713 			if (!msix_vecsave || !msix_vecsave[2 * i])
3714 				continue;
3715 			qib_write_kreg(dd, 2 * i +
3716 				(QIB_7322_MsixTable_OFFS / sizeof(u64)),
3717 				msix_vecsave[2 * i]);
3718 			qib_write_kreg(dd, 1 + 2 * i +
3719 				(QIB_7322_MsixTable_OFFS / sizeof(u64)),
3720 				msix_vecsave[1 + 2 * i]);
3721 		}
3722 	}
3723 
3724 	/* initialize the remaining registers.  */
3725 	for (i = 0; i < dd->num_pports; ++i)
3726 		write_7322_init_portregs(&dd->pport[i]);
3727 	write_7322_initregs(dd);
3728 
3729 	if (qib_pcie_params(dd, dd->lbus_width, &msix_entries))
3730 		qib_dev_err(dd,
3731 			"Reset failed to setup PCIe or interrupts; continuing anyway\n");
3732 
3733 	dd->cspec->num_msix_entries = msix_entries;
3734 	qib_setup_7322_interrupt(dd, 1);
3735 
3736 	for (i = 0; i < dd->num_pports; ++i) {
3737 		struct qib_pportdata *ppd = &dd->pport[i];
3738 
3739 		spin_lock_irqsave(&ppd->lflags_lock, flags);
3740 		ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
3741 		ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
3742 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3743 	}
3744 
3745 bail:
3746 	dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */
3747 	kfree(msix_vecsave);
3748 	return ret;
3749 }
3750 
3751 /**
3752  * qib_7322_put_tid - write a TID to the chip
3753  * @dd: the qlogic_ib device
3754  * @tidptr: pointer to the expected TID (in chip) to update
3755  * @tidtype: 0 for eager, 1 for expected
3756  * @pa: physical address of in memory buffer; tidinvalid if freeing
3757  */
3758 static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
3759 			     u32 type, unsigned long pa)
3760 {
3761 	if (!(dd->flags & QIB_PRESENT))
3762 		return;
3763 	if (pa != dd->tidinvalid) {
3764 		u64 chippa = pa >> IBA7322_TID_PA_SHIFT;
3765 
3766 		/* paranoia checks */
3767 		if (pa != (chippa << IBA7322_TID_PA_SHIFT)) {
3768 			qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
3769 				    pa);
3770 			return;
3771 		}
3772 		if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
3773 			qib_dev_err(dd,
3774 				"Physical page address 0x%lx larger than supported\n",
3775 				pa);
3776 			return;
3777 		}
3778 
3779 		if (type == RCVHQ_RCV_TYPE_EAGER)
3780 			chippa |= dd->tidtemplate;
3781 		else /* for now, always full 4KB page */
3782 			chippa |= IBA7322_TID_SZ_4K;
3783 		pa = chippa;
3784 	}
3785 	writeq(pa, tidptr);
3786 	mmiowb();
3787 }
3788 
3789 /**
3790  * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
3791  * @dd: the qlogic_ib device
3792  * @ctxt: the ctxt
3793  *
3794  * clear all TID entries for a ctxt, expected and eager.
3795  * Used from qib_close().
3796  */
3797 static void qib_7322_clear_tids(struct qib_devdata *dd,
3798 				struct qib_ctxtdata *rcd)
3799 {
3800 	u64 __iomem *tidbase;
3801 	unsigned long tidinv;
3802 	u32 ctxt;
3803 	int i;
3804 
3805 	if (!dd->kregbase || !rcd)
3806 		return;
3807 
3808 	ctxt = rcd->ctxt;
3809 
3810 	tidinv = dd->tidinvalid;
3811 	tidbase = (u64 __iomem *)
3812 		((char __iomem *) dd->kregbase +
3813 		 dd->rcvtidbase +
3814 		 ctxt * dd->rcvtidcnt * sizeof(*tidbase));
3815 
3816 	for (i = 0; i < dd->rcvtidcnt; i++)
3817 		qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
3818 				 tidinv);
3819 
3820 	tidbase = (u64 __iomem *)
3821 		((char __iomem *) dd->kregbase +
3822 		 dd->rcvegrbase +
3823 		 rcd->rcvegr_tid_base * sizeof(*tidbase));
3824 
3825 	for (i = 0; i < rcd->rcvegrcnt; i++)
3826 		qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
3827 				 tidinv);
3828 }
3829 
3830 /**
3831  * qib_7322_tidtemplate - setup constants for TID updates
3832  * @dd: the qlogic_ib device
3833  *
3834  * We setup stuff that we use a lot, to avoid calculating each time
3835  */
3836 static void qib_7322_tidtemplate(struct qib_devdata *dd)
3837 {
3838 	/*
3839 	 * For now, we always allocate 4KB buffers (at init) so we can
3840 	 * receive max size packets.  We may want a module parameter to
3841 	 * specify 2KB or 4KB and/or make it per port instead of per device
3842 	 * for those who want to reduce memory footprint.  Note that the
3843 	 * rcvhdrentsize size must be large enough to hold the largest
3844 	 * IB header (currently 96 bytes) that we expect to handle (plus of
3845 	 * course the 2 dwords of RHF).
3846 	 */
3847 	if (dd->rcvegrbufsize == 2048)
3848 		dd->tidtemplate = IBA7322_TID_SZ_2K;
3849 	else if (dd->rcvegrbufsize == 4096)
3850 		dd->tidtemplate = IBA7322_TID_SZ_4K;
3851 	dd->tidinvalid = 0;
3852 }
3853 
3854 /**
3855  * qib_init_7322_get_base_info - set chip-specific flags for user code
3856  * @rcd: the qlogic_ib ctxt
3857  * @kbase: qib_base_info pointer
3858  *
3859  * We set the PCIE flag because the lower bandwidth on PCIe vs
3860  * HyperTransport can affect some user packet algorithims.
3861  */
3862 
3863 static int qib_7322_get_base_info(struct qib_ctxtdata *rcd,
3864 				  struct qib_base_info *kinfo)
3865 {
3866 	kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP |
3867 		QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL |
3868 		QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA;
3869 	if (rcd->dd->cspec->r1)
3870 		kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK;
3871 	if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
3872 		kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
3873 
3874 	return 0;
3875 }
3876 
3877 static struct qib_message_header *
3878 qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
3879 {
3880 	u32 offset = qib_hdrget_offset(rhf_addr);
3881 
3882 	return (struct qib_message_header *)
3883 		(rhf_addr - dd->rhf_offset + offset);
3884 }
3885 
3886 /*
3887  * Configure number of contexts.
3888  */
3889 static void qib_7322_config_ctxts(struct qib_devdata *dd)
3890 {
3891 	unsigned long flags;
3892 	u32 nchipctxts;
3893 
3894 	nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
3895 	dd->cspec->numctxts = nchipctxts;
3896 	if (qib_n_krcv_queues > 1 && dd->num_pports) {
3897 		dd->first_user_ctxt = NUM_IB_PORTS +
3898 			(qib_n_krcv_queues - 1) * dd->num_pports;
3899 		if (dd->first_user_ctxt > nchipctxts)
3900 			dd->first_user_ctxt = nchipctxts;
3901 		dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
3902 	} else {
3903 		dd->first_user_ctxt = NUM_IB_PORTS;
3904 		dd->n_krcv_queues = 1;
3905 	}
3906 
3907 	if (!qib_cfgctxts) {
3908 		int nctxts = dd->first_user_ctxt + num_online_cpus();
3909 
3910 		if (nctxts <= 6)
3911 			dd->ctxtcnt = 6;
3912 		else if (nctxts <= 10)
3913 			dd->ctxtcnt = 10;
3914 		else if (nctxts <= nchipctxts)
3915 			dd->ctxtcnt = nchipctxts;
3916 	} else if (qib_cfgctxts < dd->num_pports)
3917 		dd->ctxtcnt = dd->num_pports;
3918 	else if (qib_cfgctxts <= nchipctxts)
3919 		dd->ctxtcnt = qib_cfgctxts;
3920 	if (!dd->ctxtcnt) /* none of the above, set to max */
3921 		dd->ctxtcnt = nchipctxts;
3922 
3923 	/*
3924 	 * Chip can be configured for 6, 10, or 18 ctxts, and choice
3925 	 * affects number of eager TIDs per ctxt (1K, 2K, 4K).
3926 	 * Lock to be paranoid about later motion, etc.
3927 	 */
3928 	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
3929 	if (dd->ctxtcnt > 10)
3930 		dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg);
3931 	else if (dd->ctxtcnt > 6)
3932 		dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg);
3933 	/* else configure for default 6 receive ctxts */
3934 
3935 	/* The XRC opcode is 5. */
3936 	dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode);
3937 
3938 	/*
3939 	 * RcvCtrl *must* be written here so that the
3940 	 * chip understands how to change rcvegrcnt below.
3941 	 */
3942 	qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
3943 	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
3944 
3945 	/* kr_rcvegrcnt changes based on the number of contexts enabled */
3946 	dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
3947 	if (qib_rcvhdrcnt)
3948 		dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
3949 	else
3950 		dd->rcvhdrcnt = 2 * max(dd->cspec->rcvegrcnt,
3951 				    dd->num_pports > 1 ? 1024U : 2048U);
3952 }
3953 
3954 static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
3955 {
3956 
3957 	int lsb, ret = 0;
3958 	u64 maskr; /* right-justified mask */
3959 
3960 	switch (which) {
3961 
3962 	case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
3963 		ret = ppd->link_width_enabled;
3964 		goto done;
3965 
3966 	case QIB_IB_CFG_LWID: /* Get currently active Link-width */
3967 		ret = ppd->link_width_active;
3968 		goto done;
3969 
3970 	case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
3971 		ret = ppd->link_speed_enabled;
3972 		goto done;
3973 
3974 	case QIB_IB_CFG_SPD: /* Get current Link spd */
3975 		ret = ppd->link_speed_active;
3976 		goto done;
3977 
3978 	case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
3979 		lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3980 		maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3981 		break;
3982 
3983 	case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
3984 		lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3985 		maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3986 		break;
3987 
3988 	case QIB_IB_CFG_LINKLATENCY:
3989 		ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
3990 			SYM_MASK(IBCStatusB_0, LinkRoundTripLatency);
3991 		goto done;
3992 
3993 	case QIB_IB_CFG_OP_VLS:
3994 		ret = ppd->vls_operational;
3995 		goto done;
3996 
3997 	case QIB_IB_CFG_VL_HIGH_CAP:
3998 		ret = 16;
3999 		goto done;
4000 
4001 	case QIB_IB_CFG_VL_LOW_CAP:
4002 		ret = 16;
4003 		goto done;
4004 
4005 	case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
4006 		ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4007 				OverrunThreshold);
4008 		goto done;
4009 
4010 	case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
4011 		ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4012 				PhyerrThreshold);
4013 		goto done;
4014 
4015 	case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
4016 		/* will only take effect when the link state changes */
4017 		ret = (ppd->cpspec->ibcctrl_a &
4018 		       SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ?
4019 			IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
4020 		goto done;
4021 
4022 	case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
4023 		lsb = IBA7322_IBC_HRTBT_LSB;
4024 		maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
4025 		break;
4026 
4027 	case QIB_IB_CFG_PMA_TICKS:
4028 		/*
4029 		 * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
4030 		 * Since the clock is always 250MHz, the value is 3, 1 or 0.
4031 		 */
4032 		if (ppd->link_speed_active == QIB_IB_QDR)
4033 			ret = 3;
4034 		else if (ppd->link_speed_active == QIB_IB_DDR)
4035 			ret = 1;
4036 		else
4037 			ret = 0;
4038 		goto done;
4039 
4040 	default:
4041 		ret = -EINVAL;
4042 		goto done;
4043 	}
4044 	ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
4045 done:
4046 	return ret;
4047 }
4048 
4049 /*
4050  * Below again cribbed liberally from older version. Do not lean
4051  * heavily on it.
4052  */
4053 #define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB
4054 #define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \
4055 	| (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16))
4056 
4057 static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
4058 {
4059 	struct qib_devdata *dd = ppd->dd;
4060 	u64 maskr; /* right-justified mask */
4061 	int lsb, ret = 0;
4062 	u16 lcmd, licmd;
4063 	unsigned long flags;
4064 
4065 	switch (which) {
4066 	case QIB_IB_CFG_LIDLMC:
4067 		/*
4068 		 * Set LID and LMC. Combined to avoid possible hazard
4069 		 * caller puts LMC in 16MSbits, DLID in 16LSbits of val
4070 		 */
4071 		lsb = IBA7322_IBC_DLIDLMC_SHIFT;
4072 		maskr = IBA7322_IBC_DLIDLMC_MASK;
4073 		/*
4074 		 * For header-checking, the SLID in the packet will
4075 		 * be masked with SendIBSLMCMask, and compared
4076 		 * with SendIBSLIDAssignMask. Make sure we do not
4077 		 * set any bits not covered by the mask, or we get
4078 		 * false-positives.
4079 		 */
4080 		qib_write_kreg_port(ppd, krp_sendslid,
4081 				    val & (val >> 16) & SendIBSLIDAssignMask);
4082 		qib_write_kreg_port(ppd, krp_sendslidmask,
4083 				    (val >> 16) & SendIBSLMCMask);
4084 		break;
4085 
4086 	case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
4087 		ppd->link_width_enabled = val;
4088 		/* convert IB value to chip register value */
4089 		if (val == IB_WIDTH_1X)
4090 			val = 0;
4091 		else if (val == IB_WIDTH_4X)
4092 			val = 1;
4093 		else
4094 			val = 3;
4095 		maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS);
4096 		lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS);
4097 		break;
4098 
4099 	case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
4100 		/*
4101 		 * As with width, only write the actual register if the
4102 		 * link is currently down, otherwise takes effect on next
4103 		 * link change.  Since setting is being explicitly requested
4104 		 * (via MAD or sysfs), clear autoneg failure status if speed
4105 		 * autoneg is enabled.
4106 		 */
4107 		ppd->link_speed_enabled = val;
4108 		val <<= IBA7322_IBC_SPEED_LSB;
4109 		maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK |
4110 			IBA7322_IBC_MAX_SPEED_MASK;
4111 		if (val & (val - 1)) {
4112 			/* Muliple speeds enabled */
4113 			val |= IBA7322_IBC_IBTA_1_2_MASK |
4114 				IBA7322_IBC_MAX_SPEED_MASK;
4115 			spin_lock_irqsave(&ppd->lflags_lock, flags);
4116 			ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
4117 			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
4118 		} else if (val & IBA7322_IBC_SPEED_QDR)
4119 			val |= IBA7322_IBC_IBTA_1_2_MASK;
4120 		/* IBTA 1.2 mode + min/max + speed bits are contiguous */
4121 		lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE);
4122 		break;
4123 
4124 	case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
4125 		lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4126 		maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4127 		break;
4128 
4129 	case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
4130 		lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4131 		maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4132 		break;
4133 
4134 	case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
4135 		maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4136 				  OverrunThreshold);
4137 		if (maskr != val) {
4138 			ppd->cpspec->ibcctrl_a &=
4139 				~SYM_MASK(IBCCtrlA_0, OverrunThreshold);
4140 			ppd->cpspec->ibcctrl_a |= (u64) val <<
4141 				SYM_LSB(IBCCtrlA_0, OverrunThreshold);
4142 			qib_write_kreg_port(ppd, krp_ibcctrl_a,
4143 					    ppd->cpspec->ibcctrl_a);
4144 			qib_write_kreg(dd, kr_scratch, 0ULL);
4145 		}
4146 		goto bail;
4147 
4148 	case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
4149 		maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4150 				  PhyerrThreshold);
4151 		if (maskr != val) {
4152 			ppd->cpspec->ibcctrl_a &=
4153 				~SYM_MASK(IBCCtrlA_0, PhyerrThreshold);
4154 			ppd->cpspec->ibcctrl_a |= (u64) val <<
4155 				SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
4156 			qib_write_kreg_port(ppd, krp_ibcctrl_a,
4157 					    ppd->cpspec->ibcctrl_a);
4158 			qib_write_kreg(dd, kr_scratch, 0ULL);
4159 		}
4160 		goto bail;
4161 
4162 	case QIB_IB_CFG_PKEYS: /* update pkeys */
4163 		maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
4164 			((u64) ppd->pkeys[2] << 32) |
4165 			((u64) ppd->pkeys[3] << 48);
4166 		qib_write_kreg_port(ppd, krp_partitionkey, maskr);
4167 		goto bail;
4168 
4169 	case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
4170 		/* will only take effect when the link state changes */
4171 		if (val == IB_LINKINITCMD_POLL)
4172 			ppd->cpspec->ibcctrl_a &=
4173 				~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
4174 		else /* SLEEP */
4175 			ppd->cpspec->ibcctrl_a |=
4176 				SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
4177 		qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
4178 		qib_write_kreg(dd, kr_scratch, 0ULL);
4179 		goto bail;
4180 
4181 	case QIB_IB_CFG_MTU: /* update the MTU in IBC */
4182 		/*
4183 		 * Update our housekeeping variables, and set IBC max
4184 		 * size, same as init code; max IBC is max we allow in
4185 		 * buffer, less the qword pbc, plus 1 for ICRC, in dwords
4186 		 * Set even if it's unchanged, print debug message only
4187 		 * on changes.
4188 		 */
4189 		val = (ppd->ibmaxlen >> 2) + 1;
4190 		ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
4191 		ppd->cpspec->ibcctrl_a |= (u64)val <<
4192 			SYM_LSB(IBCCtrlA_0, MaxPktLen);
4193 		qib_write_kreg_port(ppd, krp_ibcctrl_a,
4194 				    ppd->cpspec->ibcctrl_a);
4195 		qib_write_kreg(dd, kr_scratch, 0ULL);
4196 		goto bail;
4197 
4198 	case QIB_IB_CFG_LSTATE: /* set the IB link state */
4199 		switch (val & 0xffff0000) {
4200 		case IB_LINKCMD_DOWN:
4201 			lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
4202 			ppd->cpspec->ibmalfusesnap = 1;
4203 			ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
4204 				crp_errlink);
4205 			if (!ppd->cpspec->ibdeltainprog &&
4206 			    qib_compat_ddr_negotiate) {
4207 				ppd->cpspec->ibdeltainprog = 1;
4208 				ppd->cpspec->ibsymsnap =
4209 					read_7322_creg32_port(ppd,
4210 							      crp_ibsymbolerr);
4211 				ppd->cpspec->iblnkerrsnap =
4212 					read_7322_creg32_port(ppd,
4213 						      crp_iblinkerrrecov);
4214 			}
4215 			break;
4216 
4217 		case IB_LINKCMD_ARMED:
4218 			lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
4219 			if (ppd->cpspec->ibmalfusesnap) {
4220 				ppd->cpspec->ibmalfusesnap = 0;
4221 				ppd->cpspec->ibmalfdelta +=
4222 					read_7322_creg32_port(ppd,
4223 							      crp_errlink) -
4224 					ppd->cpspec->ibmalfsnap;
4225 			}
4226 			break;
4227 
4228 		case IB_LINKCMD_ACTIVE:
4229 			lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
4230 			break;
4231 
4232 		default:
4233 			ret = -EINVAL;
4234 			qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
4235 			goto bail;
4236 		}
4237 		switch (val & 0xffff) {
4238 		case IB_LINKINITCMD_NOP:
4239 			licmd = 0;
4240 			break;
4241 
4242 		case IB_LINKINITCMD_POLL:
4243 			licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
4244 			break;
4245 
4246 		case IB_LINKINITCMD_SLEEP:
4247 			licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
4248 			break;
4249 
4250 		case IB_LINKINITCMD_DISABLE:
4251 			licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
4252 			ppd->cpspec->chase_end = 0;
4253 			/*
4254 			 * stop state chase counter and timer, if running.
4255 			 * wait forpending timer, but don't clear .data (ppd)!
4256 			 */
4257 			if (ppd->cpspec->chase_timer.expires) {
4258 				del_timer_sync(&ppd->cpspec->chase_timer);
4259 				ppd->cpspec->chase_timer.expires = 0;
4260 			}
4261 			break;
4262 
4263 		default:
4264 			ret = -EINVAL;
4265 			qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
4266 				    val & 0xffff);
4267 			goto bail;
4268 		}
4269 		qib_set_ib_7322_lstate(ppd, lcmd, licmd);
4270 		goto bail;
4271 
4272 	case QIB_IB_CFG_OP_VLS:
4273 		if (ppd->vls_operational != val) {
4274 			ppd->vls_operational = val;
4275 			set_vls(ppd);
4276 		}
4277 		goto bail;
4278 
4279 	case QIB_IB_CFG_VL_HIGH_LIMIT:
4280 		qib_write_kreg_port(ppd, krp_highprio_limit, val);
4281 		goto bail;
4282 
4283 	case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
4284 		if (val > 3) {
4285 			ret = -EINVAL;
4286 			goto bail;
4287 		}
4288 		lsb = IBA7322_IBC_HRTBT_LSB;
4289 		maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
4290 		break;
4291 
4292 	case QIB_IB_CFG_PORT:
4293 		/* val is the port number of the switch we are connected to. */
4294 		if (ppd->dd->cspec->r1) {
4295 			cancel_delayed_work(&ppd->cpspec->ipg_work);
4296 			ppd->cpspec->ipg_tries = 0;
4297 		}
4298 		goto bail;
4299 
4300 	default:
4301 		ret = -EINVAL;
4302 		goto bail;
4303 	}
4304 	ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
4305 	ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
4306 	qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
4307 	qib_write_kreg(dd, kr_scratch, 0);
4308 bail:
4309 	return ret;
4310 }
4311 
4312 static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
4313 {
4314 	int ret = 0;
4315 	u64 val, ctrlb;
4316 
4317 	/* only IBC loopback, may add serdes and xgxs loopbacks later */
4318 	if (!strncmp(what, "ibc", 3)) {
4319 		ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
4320 						       Loopback);
4321 		val = 0; /* disable heart beat, so link will come up */
4322 		qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
4323 			 ppd->dd->unit, ppd->port);
4324 	} else if (!strncmp(what, "off", 3)) {
4325 		ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
4326 							Loopback);
4327 		/* enable heart beat again */
4328 		val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
4329 		qib_devinfo(ppd->dd->pcidev,
4330 			"Disabling IB%u:%u IBC loopback (normal)\n",
4331 			ppd->dd->unit, ppd->port);
4332 	} else
4333 		ret = -EINVAL;
4334 	if (!ret) {
4335 		qib_write_kreg_port(ppd, krp_ibcctrl_a,
4336 				    ppd->cpspec->ibcctrl_a);
4337 		ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
4338 					     << IBA7322_IBC_HRTBT_LSB);
4339 		ppd->cpspec->ibcctrl_b = ctrlb | val;
4340 		qib_write_kreg_port(ppd, krp_ibcctrl_b,
4341 				    ppd->cpspec->ibcctrl_b);
4342 		qib_write_kreg(ppd->dd, kr_scratch, 0);
4343 	}
4344 	return ret;
4345 }
4346 
4347 static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4348 			   struct ib_vl_weight_elem *vl)
4349 {
4350 	unsigned i;
4351 
4352 	for (i = 0; i < 16; i++, regno++, vl++) {
4353 		u32 val = qib_read_kreg_port(ppd, regno);
4354 
4355 		vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) &
4356 			SYM_RMASK(LowPriority0_0, VirtualLane);
4357 		vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) &
4358 			SYM_RMASK(LowPriority0_0, Weight);
4359 	}
4360 }
4361 
4362 static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4363 			   struct ib_vl_weight_elem *vl)
4364 {
4365 	unsigned i;
4366 
4367 	for (i = 0; i < 16; i++, regno++, vl++) {
4368 		u64 val;
4369 
4370 		val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) <<
4371 			SYM_LSB(LowPriority0_0, VirtualLane)) |
4372 		      ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) <<
4373 			SYM_LSB(LowPriority0_0, Weight));
4374 		qib_write_kreg_port(ppd, regno, val);
4375 	}
4376 	if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
4377 		struct qib_devdata *dd = ppd->dd;
4378 		unsigned long flags;
4379 
4380 		spin_lock_irqsave(&dd->sendctrl_lock, flags);
4381 		ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
4382 		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4383 		qib_write_kreg(dd, kr_scratch, 0);
4384 		spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4385 	}
4386 }
4387 
4388 static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
4389 {
4390 	switch (which) {
4391 	case QIB_IB_TBL_VL_HIGH_ARB:
4392 		get_vl_weights(ppd, krp_highprio_0, t);
4393 		break;
4394 
4395 	case QIB_IB_TBL_VL_LOW_ARB:
4396 		get_vl_weights(ppd, krp_lowprio_0, t);
4397 		break;
4398 
4399 	default:
4400 		return -EINVAL;
4401 	}
4402 	return 0;
4403 }
4404 
4405 static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
4406 {
4407 	switch (which) {
4408 	case QIB_IB_TBL_VL_HIGH_ARB:
4409 		set_vl_weights(ppd, krp_highprio_0, t);
4410 		break;
4411 
4412 	case QIB_IB_TBL_VL_LOW_ARB:
4413 		set_vl_weights(ppd, krp_lowprio_0, t);
4414 		break;
4415 
4416 	default:
4417 		return -EINVAL;
4418 	}
4419 	return 0;
4420 }
4421 
4422 static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
4423 				    u32 updegr, u32 egrhd, u32 npkts)
4424 {
4425 	/*
4426 	 * Need to write timeout register before updating rcvhdrhead to ensure
4427 	 * that the timer is enabled on reception of a packet.
4428 	 */
4429 	if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
4430 		adjust_rcv_timeout(rcd, npkts);
4431 	if (updegr)
4432 		qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
4433 	mmiowb();
4434 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4435 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4436 	mmiowb();
4437 }
4438 
4439 static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
4440 {
4441 	u32 head, tail;
4442 
4443 	head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
4444 	if (rcd->rcvhdrtail_kvaddr)
4445 		tail = qib_get_rcvhdrtail(rcd);
4446 	else
4447 		tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
4448 	return head == tail;
4449 }
4450 
4451 #define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \
4452 	QIB_RCVCTRL_CTXT_DIS | \
4453 	QIB_RCVCTRL_TIDFLOW_ENB | \
4454 	QIB_RCVCTRL_TIDFLOW_DIS | \
4455 	QIB_RCVCTRL_TAILUPD_ENB | \
4456 	QIB_RCVCTRL_TAILUPD_DIS | \
4457 	QIB_RCVCTRL_INTRAVAIL_ENB | \
4458 	QIB_RCVCTRL_INTRAVAIL_DIS | \
4459 	QIB_RCVCTRL_BP_ENB | \
4460 	QIB_RCVCTRL_BP_DIS)
4461 
4462 #define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \
4463 	QIB_RCVCTRL_CTXT_DIS | \
4464 	QIB_RCVCTRL_PKEY_DIS | \
4465 	QIB_RCVCTRL_PKEY_ENB)
4466 
4467 /*
4468  * Modify the RCVCTRL register in chip-specific way. This
4469  * is a function because bit positions and (future) register
4470  * location is chip-specifc, but the needed operations are
4471  * generic. <op> is a bit-mask because we often want to
4472  * do multiple modifications.
4473  */
4474 static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
4475 			     int ctxt)
4476 {
4477 	struct qib_devdata *dd = ppd->dd;
4478 	struct qib_ctxtdata *rcd;
4479 	u64 mask, val;
4480 	unsigned long flags;
4481 
4482 	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
4483 
4484 	if (op & QIB_RCVCTRL_TIDFLOW_ENB)
4485 		dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable);
4486 	if (op & QIB_RCVCTRL_TIDFLOW_DIS)
4487 		dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable);
4488 	if (op & QIB_RCVCTRL_TAILUPD_ENB)
4489 		dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4490 	if (op & QIB_RCVCTRL_TAILUPD_DIS)
4491 		dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd);
4492 	if (op & QIB_RCVCTRL_PKEY_ENB)
4493 		ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4494 	if (op & QIB_RCVCTRL_PKEY_DIS)
4495 		ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4496 	if (ctxt < 0) {
4497 		mask = (1ULL << dd->ctxtcnt) - 1;
4498 		rcd = NULL;
4499 	} else {
4500 		mask = (1ULL << ctxt);
4501 		rcd = dd->rcd[ctxt];
4502 	}
4503 	if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) {
4504 		ppd->p_rcvctrl |=
4505 			(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4506 		if (!(dd->flags & QIB_NODMA_RTAIL)) {
4507 			op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */
4508 			dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4509 		}
4510 		/* Write these registers before the context is enabled. */
4511 		qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt,
4512 				    rcd->rcvhdrqtailaddr_phys);
4513 		qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
4514 				    rcd->rcvhdrq_phys);
4515 		rcd->seq_cnt = 1;
4516 	}
4517 	if (op & QIB_RCVCTRL_CTXT_DIS)
4518 		ppd->p_rcvctrl &=
4519 			~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4520 	if (op & QIB_RCVCTRL_BP_ENB)
4521 		dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull);
4522 	if (op & QIB_RCVCTRL_BP_DIS)
4523 		dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull));
4524 	if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
4525 		dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail));
4526 	if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
4527 		dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail));
4528 	/*
4529 	 * Decide which registers to write depending on the ops enabled.
4530 	 * Special case is "flush" (no bits set at all)
4531 	 * which needs to write both.
4532 	 */
4533 	if (op == 0 || (op & RCVCTRL_COMMON_MODS))
4534 		qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
4535 	if (op == 0 || (op & RCVCTRL_PORT_MODS))
4536 		qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
4537 	if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) {
4538 		/*
4539 		 * Init the context registers also; if we were
4540 		 * disabled, tail and head should both be zero
4541 		 * already from the enable, but since we don't
4542 		 * know, we have to do it explicitly.
4543 		 */
4544 		val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
4545 		qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
4546 
4547 		/* be sure enabling write seen; hd/tl should be 0 */
4548 		(void) qib_read_kreg32(dd, kr_scratch);
4549 		val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
4550 		dd->rcd[ctxt]->head = val;
4551 		/* If kctxt, interrupt on next receive. */
4552 		if (ctxt < dd->first_user_ctxt)
4553 			val |= dd->rhdrhead_intr_off;
4554 		qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4555 	} else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) &&
4556 		dd->rcd[ctxt] && dd->rhdrhead_intr_off) {
4557 		/* arm rcv interrupt */
4558 		val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off;
4559 		qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4560 	}
4561 	if (op & QIB_RCVCTRL_CTXT_DIS) {
4562 		unsigned f;
4563 
4564 		/* Now that the context is disabled, clear these registers. */
4565 		if (ctxt >= 0) {
4566 			qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0);
4567 			qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0);
4568 			for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4569 				qib_write_ureg(dd, ur_rcvflowtable + f,
4570 					       TIDFLOW_ERRBITS, ctxt);
4571 		} else {
4572 			unsigned i;
4573 
4574 			for (i = 0; i < dd->cfgctxts; i++) {
4575 				qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr,
4576 						    i, 0);
4577 				qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0);
4578 				for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4579 					qib_write_ureg(dd, ur_rcvflowtable + f,
4580 						       TIDFLOW_ERRBITS, i);
4581 			}
4582 		}
4583 	}
4584 	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
4585 }
4586 
4587 /*
4588  * Modify the SENDCTRL register in chip-specific way. This
4589  * is a function where there are multiple such registers with
4590  * slightly different layouts.
4591  * The chip doesn't allow back-to-back sendctrl writes, so write
4592  * the scratch register after writing sendctrl.
4593  *
4594  * Which register is written depends on the operation.
4595  * Most operate on the common register, while
4596  * SEND_ENB and SEND_DIS operate on the per-port ones.
4597  * SEND_ENB is included in common because it can change SPCL_TRIG
4598  */
4599 #define SENDCTRL_COMMON_MODS (\
4600 	QIB_SENDCTRL_CLEAR | \
4601 	QIB_SENDCTRL_AVAIL_DIS | \
4602 	QIB_SENDCTRL_AVAIL_ENB | \
4603 	QIB_SENDCTRL_AVAIL_BLIP | \
4604 	QIB_SENDCTRL_DISARM | \
4605 	QIB_SENDCTRL_DISARM_ALL | \
4606 	QIB_SENDCTRL_SEND_ENB)
4607 
4608 #define SENDCTRL_PORT_MODS (\
4609 	QIB_SENDCTRL_CLEAR | \
4610 	QIB_SENDCTRL_SEND_ENB | \
4611 	QIB_SENDCTRL_SEND_DIS | \
4612 	QIB_SENDCTRL_FLUSH)
4613 
4614 static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
4615 {
4616 	struct qib_devdata *dd = ppd->dd;
4617 	u64 tmp_dd_sendctrl;
4618 	unsigned long flags;
4619 
4620 	spin_lock_irqsave(&dd->sendctrl_lock, flags);
4621 
4622 	/* First the dd ones that are "sticky", saved in shadow */
4623 	if (op & QIB_SENDCTRL_CLEAR)
4624 		dd->sendctrl = 0;
4625 	if (op & QIB_SENDCTRL_AVAIL_DIS)
4626 		dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4627 	else if (op & QIB_SENDCTRL_AVAIL_ENB) {
4628 		dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
4629 		if (dd->flags & QIB_USE_SPCL_TRIG)
4630 			dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn);
4631 	}
4632 
4633 	/* Then the ppd ones that are "sticky", saved in shadow */
4634 	if (op & QIB_SENDCTRL_SEND_DIS)
4635 		ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
4636 	else if (op & QIB_SENDCTRL_SEND_ENB)
4637 		ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
4638 
4639 	if (op & QIB_SENDCTRL_DISARM_ALL) {
4640 		u32 i, last;
4641 
4642 		tmp_dd_sendctrl = dd->sendctrl;
4643 		last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
4644 		/*
4645 		 * Disarm any buffers that are not yet launched,
4646 		 * disabling updates until done.
4647 		 */
4648 		tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4649 		for (i = 0; i < last; i++) {
4650 			qib_write_kreg(dd, kr_sendctrl,
4651 				       tmp_dd_sendctrl |
4652 				       SYM_MASK(SendCtrl, Disarm) | i);
4653 			qib_write_kreg(dd, kr_scratch, 0);
4654 		}
4655 	}
4656 
4657 	if (op & QIB_SENDCTRL_FLUSH) {
4658 		u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
4659 
4660 		/*
4661 		 * Now drain all the fifos.  The Abort bit should never be
4662 		 * needed, so for now, at least, we don't use it.
4663 		 */
4664 		tmp_ppd_sendctrl |=
4665 			SYM_MASK(SendCtrl_0, TxeDrainRmFifo) |
4666 			SYM_MASK(SendCtrl_0, TxeDrainLaFifo) |
4667 			SYM_MASK(SendCtrl_0, TxeBypassIbc);
4668 		qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
4669 		qib_write_kreg(dd, kr_scratch, 0);
4670 	}
4671 
4672 	tmp_dd_sendctrl = dd->sendctrl;
4673 
4674 	if (op & QIB_SENDCTRL_DISARM)
4675 		tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
4676 			((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) <<
4677 			 SYM_LSB(SendCtrl, DisarmSendBuf));
4678 	if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
4679 	    (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
4680 		tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4681 
4682 	if (op == 0 || (op & SENDCTRL_COMMON_MODS)) {
4683 		qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
4684 		qib_write_kreg(dd, kr_scratch, 0);
4685 	}
4686 
4687 	if (op == 0 || (op & SENDCTRL_PORT_MODS)) {
4688 		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4689 		qib_write_kreg(dd, kr_scratch, 0);
4690 	}
4691 
4692 	if (op & QIB_SENDCTRL_AVAIL_BLIP) {
4693 		qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
4694 		qib_write_kreg(dd, kr_scratch, 0);
4695 	}
4696 
4697 	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4698 
4699 	if (op & QIB_SENDCTRL_FLUSH) {
4700 		u32 v;
4701 		/*
4702 		 * ensure writes have hit chip, then do a few
4703 		 * more reads, to allow DMA of pioavail registers
4704 		 * to occur, so in-memory copy is in sync with
4705 		 * the chip.  Not always safe to sleep.
4706 		 */
4707 		v = qib_read_kreg32(dd, kr_scratch);
4708 		qib_write_kreg(dd, kr_scratch, v);
4709 		v = qib_read_kreg32(dd, kr_scratch);
4710 		qib_write_kreg(dd, kr_scratch, v);
4711 		qib_read_kreg32(dd, kr_scratch);
4712 	}
4713 }
4714 
4715 #define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */
4716 #define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */
4717 #define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */
4718 
4719 /**
4720  * qib_portcntr_7322 - read a per-port chip counter
4721  * @ppd: the qlogic_ib pport
4722  * @creg: the counter to read (not a chip offset)
4723  */
4724 static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
4725 {
4726 	struct qib_devdata *dd = ppd->dd;
4727 	u64 ret = 0ULL;
4728 	u16 creg;
4729 	/* 0xffff for unimplemented or synthesized counters */
4730 	static const u32 xlator[] = {
4731 		[QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG,
4732 		[QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG,
4733 		[QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount,
4734 		[QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount,
4735 		[QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount,
4736 		[QIBPORTCNTR_SENDSTALL] = crp_sendstall,
4737 		[QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG,
4738 		[QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount,
4739 		[QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount,
4740 		[QIBPORTCNTR_RCVEBP] = crp_rcvebp,
4741 		[QIBPORTCNTR_RCVOVFL] = crp_rcvovfl,
4742 		[QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG,
4743 		[QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed  for 7322 */
4744 		[QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr,
4745 		[QIBPORTCNTR_RXVLERR] = crp_rxvlerr,
4746 		[QIBPORTCNTR_ERRICRC] = crp_erricrc,
4747 		[QIBPORTCNTR_ERRVCRC] = crp_errvcrc,
4748 		[QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc,
4749 		[QIBPORTCNTR_BADFORMAT] = crp_badformat,
4750 		[QIBPORTCNTR_ERR_RLEN] = crp_err_rlen,
4751 		[QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr,
4752 		[QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen,
4753 		[QIBPORTCNTR_UNSUPVL] = crp_txunsupvl,
4754 		[QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl,
4755 		[QIBPORTCNTR_ERRLINK] = crp_errlink,
4756 		[QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown,
4757 		[QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov,
4758 		[QIBPORTCNTR_LLI] = crp_locallinkintegrityerr,
4759 		[QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt,
4760 		[QIBPORTCNTR_ERRPKEY] = crp_errpkey,
4761 		/*
4762 		 * the next 3 aren't really counters, but were implemented
4763 		 * as counters in older chips, so still get accessed as
4764 		 * though they were counters from this code.
4765 		 */
4766 		[QIBPORTCNTR_PSINTERVAL] = krp_psinterval,
4767 		[QIBPORTCNTR_PSSTART] = krp_psstart,
4768 		[QIBPORTCNTR_PSSTAT] = krp_psstat,
4769 		/* pseudo-counter, summed for all ports */
4770 		[QIBPORTCNTR_KHDROVFL] = 0xffff,
4771 	};
4772 
4773 	if (reg >= ARRAY_SIZE(xlator)) {
4774 		qib_devinfo(ppd->dd->pcidev,
4775 			 "Unimplemented portcounter %u\n", reg);
4776 		goto done;
4777 	}
4778 	creg = xlator[reg] & _PORT_CNTR_IDXMASK;
4779 
4780 	/* handle non-counters and special cases first */
4781 	if (reg == QIBPORTCNTR_KHDROVFL) {
4782 		int i;
4783 
4784 		/* sum over all kernel contexts (skip if mini_init) */
4785 		for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) {
4786 			struct qib_ctxtdata *rcd = dd->rcd[i];
4787 
4788 			if (!rcd || rcd->ppd != ppd)
4789 				continue;
4790 			ret += read_7322_creg32(dd, cr_base_egrovfl + i);
4791 		}
4792 		goto done;
4793 	} else if (reg == QIBPORTCNTR_RXDROPPKT) {
4794 		/*
4795 		 * Used as part of the synthesis of port_rcv_errors
4796 		 * in the verbs code for IBTA counters.  Not needed for 7322,
4797 		 * because all the errors are already counted by other cntrs.
4798 		 */
4799 		goto done;
4800 	} else if (reg == QIBPORTCNTR_PSINTERVAL ||
4801 		   reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) {
4802 		/* were counters in older chips, now per-port kernel regs */
4803 		ret = qib_read_kreg_port(ppd, creg);
4804 		goto done;
4805 	}
4806 
4807 	/*
4808 	 * Only fast increment counters are 64 bits; use 32 bit reads to
4809 	 * avoid two independent reads when on Opteron.
4810 	 */
4811 	if (xlator[reg] & _PORT_64BIT_FLAG)
4812 		ret = read_7322_creg_port(ppd, creg);
4813 	else
4814 		ret = read_7322_creg32_port(ppd, creg);
4815 	if (creg == crp_ibsymbolerr) {
4816 		if (ppd->cpspec->ibdeltainprog)
4817 			ret -= ret - ppd->cpspec->ibsymsnap;
4818 		ret -= ppd->cpspec->ibsymdelta;
4819 	} else if (creg == crp_iblinkerrrecov) {
4820 		if (ppd->cpspec->ibdeltainprog)
4821 			ret -= ret - ppd->cpspec->iblnkerrsnap;
4822 		ret -= ppd->cpspec->iblnkerrdelta;
4823 	} else if (creg == crp_errlink)
4824 		ret -= ppd->cpspec->ibmalfdelta;
4825 	else if (creg == crp_iblinkdown)
4826 		ret += ppd->cpspec->iblnkdowndelta;
4827 done:
4828 	return ret;
4829 }
4830 
4831 /*
4832  * Device counter names (not port-specific), one line per stat,
4833  * single string.  Used by utilities like ipathstats to print the stats
4834  * in a way which works for different versions of drivers, without changing
4835  * the utility.  Names need to be 12 chars or less (w/o newline), for proper
4836  * display by utility.
4837  * Non-error counters are first.
4838  * Start of "error" conters is indicated by a leading "E " on the first
4839  * "error" counter, and doesn't count in label length.
4840  * The EgrOvfl list needs to be last so we truncate them at the configured
4841  * context count for the device.
4842  * cntr7322indices contains the corresponding register indices.
4843  */
4844 static const char cntr7322names[] =
4845 	"Interrupts\n"
4846 	"HostBusStall\n"
4847 	"E RxTIDFull\n"
4848 	"RxTIDInvalid\n"
4849 	"RxTIDFloDrop\n" /* 7322 only */
4850 	"Ctxt0EgrOvfl\n"
4851 	"Ctxt1EgrOvfl\n"
4852 	"Ctxt2EgrOvfl\n"
4853 	"Ctxt3EgrOvfl\n"
4854 	"Ctxt4EgrOvfl\n"
4855 	"Ctxt5EgrOvfl\n"
4856 	"Ctxt6EgrOvfl\n"
4857 	"Ctxt7EgrOvfl\n"
4858 	"Ctxt8EgrOvfl\n"
4859 	"Ctxt9EgrOvfl\n"
4860 	"Ctx10EgrOvfl\n"
4861 	"Ctx11EgrOvfl\n"
4862 	"Ctx12EgrOvfl\n"
4863 	"Ctx13EgrOvfl\n"
4864 	"Ctx14EgrOvfl\n"
4865 	"Ctx15EgrOvfl\n"
4866 	"Ctx16EgrOvfl\n"
4867 	"Ctx17EgrOvfl\n"
4868 	;
4869 
4870 static const u32 cntr7322indices[] = {
4871 	cr_lbint | _PORT_64BIT_FLAG,
4872 	cr_lbstall | _PORT_64BIT_FLAG,
4873 	cr_tidfull,
4874 	cr_tidinvalid,
4875 	cr_rxtidflowdrop,
4876 	cr_base_egrovfl + 0,
4877 	cr_base_egrovfl + 1,
4878 	cr_base_egrovfl + 2,
4879 	cr_base_egrovfl + 3,
4880 	cr_base_egrovfl + 4,
4881 	cr_base_egrovfl + 5,
4882 	cr_base_egrovfl + 6,
4883 	cr_base_egrovfl + 7,
4884 	cr_base_egrovfl + 8,
4885 	cr_base_egrovfl + 9,
4886 	cr_base_egrovfl + 10,
4887 	cr_base_egrovfl + 11,
4888 	cr_base_egrovfl + 12,
4889 	cr_base_egrovfl + 13,
4890 	cr_base_egrovfl + 14,
4891 	cr_base_egrovfl + 15,
4892 	cr_base_egrovfl + 16,
4893 	cr_base_egrovfl + 17,
4894 };
4895 
4896 /*
4897  * same as cntr7322names and cntr7322indices, but for port-specific counters.
4898  * portcntr7322indices is somewhat complicated by some registers needing
4899  * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
4900  */
4901 static const char portcntr7322names[] =
4902 	"TxPkt\n"
4903 	"TxFlowPkt\n"
4904 	"TxWords\n"
4905 	"RxPkt\n"
4906 	"RxFlowPkt\n"
4907 	"RxWords\n"
4908 	"TxFlowStall\n"
4909 	"TxDmaDesc\n"  /* 7220 and 7322-only */
4910 	"E RxDlidFltr\n"  /* 7220 and 7322-only */
4911 	"IBStatusChng\n"
4912 	"IBLinkDown\n"
4913 	"IBLnkRecov\n"
4914 	"IBRxLinkErr\n"
4915 	"IBSymbolErr\n"
4916 	"RxLLIErr\n"
4917 	"RxBadFormat\n"
4918 	"RxBadLen\n"
4919 	"RxBufOvrfl\n"
4920 	"RxEBP\n"
4921 	"RxFlowCtlErr\n"
4922 	"RxICRCerr\n"
4923 	"RxLPCRCerr\n"
4924 	"RxVCRCerr\n"
4925 	"RxInvalLen\n"
4926 	"RxInvalPKey\n"
4927 	"RxPktDropped\n"
4928 	"TxBadLength\n"
4929 	"TxDropped\n"
4930 	"TxInvalLen\n"
4931 	"TxUnderrun\n"
4932 	"TxUnsupVL\n"
4933 	"RxLclPhyErr\n" /* 7220 and 7322-only from here down */
4934 	"RxVL15Drop\n"
4935 	"RxVlErr\n"
4936 	"XcessBufOvfl\n"
4937 	"RxQPBadCtxt\n" /* 7322-only from here down */
4938 	"TXBadHeader\n"
4939 	;
4940 
4941 static const u32 portcntr7322indices[] = {
4942 	QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
4943 	crp_pktsendflow,
4944 	QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
4945 	QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
4946 	crp_pktrcvflowctrl,
4947 	QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
4948 	QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
4949 	crp_txsdmadesc | _PORT_64BIT_FLAG,
4950 	crp_rxdlidfltr,
4951 	crp_ibstatuschange,
4952 	QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
4953 	QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
4954 	QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
4955 	QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
4956 	QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
4957 	QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
4958 	QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
4959 	QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
4960 	QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
4961 	crp_rcvflowctrlviol,
4962 	QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
4963 	QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
4964 	QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
4965 	QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
4966 	QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
4967 	QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
4968 	crp_txminmaxlenerr,
4969 	crp_txdroppedpkt,
4970 	crp_txlenerr,
4971 	crp_txunderrun,
4972 	crp_txunsupvl,
4973 	QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
4974 	QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
4975 	QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
4976 	QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
4977 	crp_rxqpinvalidctxt,
4978 	crp_txhdrerr,
4979 };
4980 
4981 /* do all the setup to make the counter reads efficient later */
4982 static void init_7322_cntrnames(struct qib_devdata *dd)
4983 {
4984 	int i, j = 0;
4985 	char *s;
4986 
4987 	for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts;
4988 	     i++) {
4989 		/* we always have at least one counter before the egrovfl */
4990 		if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
4991 			j = 1;
4992 		s = strchr(s + 1, '\n');
4993 		if (s && j)
4994 			j++;
4995 	}
4996 	dd->cspec->ncntrs = i;
4997 	if (!s)
4998 		/* full list; size is without terminating null */
4999 		dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
5000 	else
5001 		dd->cspec->cntrnamelen = 1 + s - cntr7322names;
5002 	dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
5003 		* sizeof(u64), GFP_KERNEL);
5004 
5005 	for (i = 0, s = (char *)portcntr7322names; s; i++)
5006 		s = strchr(s + 1, '\n');
5007 	dd->cspec->nportcntrs = i - 1;
5008 	dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
5009 	for (i = 0; i < dd->num_pports; ++i) {
5010 		dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs
5011 			* sizeof(u64), GFP_KERNEL);
5012 	}
5013 }
5014 
5015 static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
5016 			      u64 **cntrp)
5017 {
5018 	u32 ret;
5019 
5020 	if (namep) {
5021 		ret = dd->cspec->cntrnamelen;
5022 		if (pos >= ret)
5023 			ret = 0; /* final read after getting everything */
5024 		else
5025 			*namep = (char *) cntr7322names;
5026 	} else {
5027 		u64 *cntr = dd->cspec->cntrs;
5028 		int i;
5029 
5030 		ret = dd->cspec->ncntrs * sizeof(u64);
5031 		if (!cntr || pos >= ret) {
5032 			/* everything read, or couldn't get memory */
5033 			ret = 0;
5034 			goto done;
5035 		}
5036 		*cntrp = cntr;
5037 		for (i = 0; i < dd->cspec->ncntrs; i++)
5038 			if (cntr7322indices[i] & _PORT_64BIT_FLAG)
5039 				*cntr++ = read_7322_creg(dd,
5040 							 cntr7322indices[i] &
5041 							 _PORT_CNTR_IDXMASK);
5042 			else
5043 				*cntr++ = read_7322_creg32(dd,
5044 							   cntr7322indices[i]);
5045 	}
5046 done:
5047 	return ret;
5048 }
5049 
5050 static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
5051 				  char **namep, u64 **cntrp)
5052 {
5053 	u32 ret;
5054 
5055 	if (namep) {
5056 		ret = dd->cspec->portcntrnamelen;
5057 		if (pos >= ret)
5058 			ret = 0; /* final read after getting everything */
5059 		else
5060 			*namep = (char *)portcntr7322names;
5061 	} else {
5062 		struct qib_pportdata *ppd = &dd->pport[port];
5063 		u64 *cntr = ppd->cpspec->portcntrs;
5064 		int i;
5065 
5066 		ret = dd->cspec->nportcntrs * sizeof(u64);
5067 		if (!cntr || pos >= ret) {
5068 			/* everything read, or couldn't get memory */
5069 			ret = 0;
5070 			goto done;
5071 		}
5072 		*cntrp = cntr;
5073 		for (i = 0; i < dd->cspec->nportcntrs; i++) {
5074 			if (portcntr7322indices[i] & _PORT_VIRT_FLAG)
5075 				*cntr++ = qib_portcntr_7322(ppd,
5076 					portcntr7322indices[i] &
5077 					_PORT_CNTR_IDXMASK);
5078 			else if (portcntr7322indices[i] & _PORT_64BIT_FLAG)
5079 				*cntr++ = read_7322_creg_port(ppd,
5080 					   portcntr7322indices[i] &
5081 					    _PORT_CNTR_IDXMASK);
5082 			else
5083 				*cntr++ = read_7322_creg32_port(ppd,
5084 					   portcntr7322indices[i]);
5085 		}
5086 	}
5087 done:
5088 	return ret;
5089 }
5090 
5091 /**
5092  * qib_get_7322_faststats - get word counters from chip before they overflow
5093  * @opaque - contains a pointer to the qlogic_ib device qib_devdata
5094  *
5095  * VESTIGIAL IBA7322 has no "small fast counters", so the only
5096  * real purpose of this function is to maintain the notion of
5097  * "active time", which in turn is only logged into the eeprom,
5098  * which we don;t have, yet, for 7322-based boards.
5099  *
5100  * called from add_timer
5101  */
5102 static void qib_get_7322_faststats(struct timer_list *t)
5103 {
5104 	struct qib_devdata *dd = from_timer(dd, t, stats_timer);
5105 	struct qib_pportdata *ppd;
5106 	unsigned long flags;
5107 	u64 traffic_wds;
5108 	int pidx;
5109 
5110 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5111 		ppd = dd->pport + pidx;
5112 
5113 		/*
5114 		 * If port isn't enabled or not operational ports, or
5115 		 * diags is running (can cause memory diags to fail)
5116 		 * skip this port this time.
5117 		 */
5118 		if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
5119 		    || dd->diag_client)
5120 			continue;
5121 
5122 		/*
5123 		 * Maintain an activity timer, based on traffic
5124 		 * exceeding a threshold, so we need to check the word-counts
5125 		 * even if they are 64-bit.
5126 		 */
5127 		traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
5128 			qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
5129 		spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
5130 		traffic_wds -= ppd->dd->traffic_wds;
5131 		ppd->dd->traffic_wds += traffic_wds;
5132 		spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
5133 		if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
5134 						QIB_IB_QDR) &&
5135 		    (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
5136 				    QIBL_LINKACTIVE)) &&
5137 		    ppd->cpspec->qdr_dfe_time &&
5138 		    time_is_before_jiffies(ppd->cpspec->qdr_dfe_time)) {
5139 			ppd->cpspec->qdr_dfe_on = 0;
5140 
5141 			qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
5142 					    ppd->dd->cspec->r1 ?
5143 					    QDR_STATIC_ADAPT_INIT_R1 :
5144 					    QDR_STATIC_ADAPT_INIT);
5145 			force_h1(ppd);
5146 		}
5147 	}
5148 	mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
5149 }
5150 
5151 /*
5152  * If we were using MSIx, try to fallback to INTx.
5153  */
5154 static int qib_7322_intr_fallback(struct qib_devdata *dd)
5155 {
5156 	if (!dd->cspec->num_msix_entries)
5157 		return 0; /* already using INTx */
5158 
5159 	qib_devinfo(dd->pcidev,
5160 		"MSIx interrupt not detected, trying INTx interrupts\n");
5161 	qib_7322_free_irq(dd);
5162 	if (pci_alloc_irq_vectors(dd->pcidev, 1, 1, PCI_IRQ_LEGACY) < 0)
5163 		qib_dev_err(dd, "Failed to enable INTx\n");
5164 	qib_setup_7322_interrupt(dd, 0);
5165 	return 1;
5166 }
5167 
5168 /*
5169  * Reset the XGXS (between serdes and IBC).  Slightly less intrusive
5170  * than resetting the IBC or external link state, and useful in some
5171  * cases to cause some retraining.  To do this right, we reset IBC
5172  * as well, then return to previous state (which may be still in reset)
5173  * NOTE: some callers of this "know" this writes the current value
5174  * of cpspec->ibcctrl_a as part of it's operation, so if that changes,
5175  * check all callers.
5176  */
5177 static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
5178 {
5179 	u64 val;
5180 	struct qib_devdata *dd = ppd->dd;
5181 	const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) |
5182 		SYM_MASK(IBPCSConfig_0, xcv_treset) |
5183 		SYM_MASK(IBPCSConfig_0, tx_rx_reset);
5184 
5185 	val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
5186 	qib_write_kreg(dd, kr_hwerrmask,
5187 		       dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop));
5188 	qib_write_kreg_port(ppd, krp_ibcctrl_a,
5189 			    ppd->cpspec->ibcctrl_a &
5190 			    ~SYM_MASK(IBCCtrlA_0, IBLinkEn));
5191 
5192 	qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
5193 	qib_read_kreg32(dd, kr_scratch);
5194 	qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
5195 	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
5196 	qib_write_kreg(dd, kr_scratch, 0ULL);
5197 	qib_write_kreg(dd, kr_hwerrclear,
5198 		       SYM_MASK(HwErrClear, statusValidNoEopClear));
5199 	qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
5200 }
5201 
5202 /*
5203  * This code for non-IBTA-compliant IB speed negotiation is only known to
5204  * work for the SDR to DDR transition, and only between an HCA and a switch
5205  * with recent firmware.  It is based on observed heuristics, rather than
5206  * actual knowledge of the non-compliant speed negotiation.
5207  * It has a number of hard-coded fields, since the hope is to rewrite this
5208  * when a spec is available on how the negoation is intended to work.
5209  */
5210 static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
5211 				 u32 dcnt, u32 *data)
5212 {
5213 	int i;
5214 	u64 pbc;
5215 	u32 __iomem *piobuf;
5216 	u32 pnum, control, len;
5217 	struct qib_devdata *dd = ppd->dd;
5218 
5219 	i = 0;
5220 	len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
5221 	control = qib_7322_setpbc_control(ppd, len, 0, 15);
5222 	pbc = ((u64) control << 32) | len;
5223 	while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
5224 		if (i++ > 15)
5225 			return;
5226 		udelay(2);
5227 	}
5228 	/* disable header check on this packet, since it can't be valid */
5229 	dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL);
5230 	writeq(pbc, piobuf);
5231 	qib_flush_wc();
5232 	qib_pio_copy(piobuf + 2, hdr, 7);
5233 	qib_pio_copy(piobuf + 9, data, dcnt);
5234 	if (dd->flags & QIB_USE_SPCL_TRIG) {
5235 		u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
5236 
5237 		qib_flush_wc();
5238 		__raw_writel(0xaebecede, piobuf + spcl_off);
5239 	}
5240 	qib_flush_wc();
5241 	qib_sendbuf_done(dd, pnum);
5242 	/* and re-enable hdr check */
5243 	dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
5244 }
5245 
5246 /*
5247  * _start packet gets sent twice at start, _done gets sent twice at end
5248  */
5249 static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
5250 {
5251 	struct qib_devdata *dd = ppd->dd;
5252 	static u32 swapped;
5253 	u32 dw, i, hcnt, dcnt, *data;
5254 	static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
5255 	static u32 madpayload_start[0x40] = {
5256 		0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
5257 		0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
5258 		0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
5259 		};
5260 	static u32 madpayload_done[0x40] = {
5261 		0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
5262 		0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
5263 		0x40000001, 0x1388, 0x15e, /* rest 0's */
5264 		};
5265 
5266 	dcnt = ARRAY_SIZE(madpayload_start);
5267 	hcnt = ARRAY_SIZE(hdr);
5268 	if (!swapped) {
5269 		/* for maintainability, do it at runtime */
5270 		for (i = 0; i < hcnt; i++) {
5271 			dw = (__force u32) cpu_to_be32(hdr[i]);
5272 			hdr[i] = dw;
5273 		}
5274 		for (i = 0; i < dcnt; i++) {
5275 			dw = (__force u32) cpu_to_be32(madpayload_start[i]);
5276 			madpayload_start[i] = dw;
5277 			dw = (__force u32) cpu_to_be32(madpayload_done[i]);
5278 			madpayload_done[i] = dw;
5279 		}
5280 		swapped = 1;
5281 	}
5282 
5283 	data = which ? madpayload_done : madpayload_start;
5284 
5285 	autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5286 	qib_read_kreg64(dd, kr_scratch);
5287 	udelay(2);
5288 	autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5289 	qib_read_kreg64(dd, kr_scratch);
5290 	udelay(2);
5291 }
5292 
5293 /*
5294  * Do the absolute minimum to cause an IB speed change, and make it
5295  * ready, but don't actually trigger the change.   The caller will
5296  * do that when ready (if link is in Polling training state, it will
5297  * happen immediately, otherwise when link next goes down)
5298  *
5299  * This routine should only be used as part of the DDR autonegotation
5300  * code for devices that are not compliant with IB 1.2 (or code that
5301  * fixes things up for same).
5302  *
5303  * When link has gone down, and autoneg enabled, or autoneg has
5304  * failed and we give up until next time we set both speeds, and
5305  * then we want IBTA enabled as well as "use max enabled speed.
5306  */
5307 static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
5308 {
5309 	u64 newctrlb;
5310 
5311 	newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
5312 				    IBA7322_IBC_IBTA_1_2_MASK |
5313 				    IBA7322_IBC_MAX_SPEED_MASK);
5314 
5315 	if (speed & (speed - 1)) /* multiple speeds */
5316 		newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) |
5317 				    IBA7322_IBC_IBTA_1_2_MASK |
5318 				    IBA7322_IBC_MAX_SPEED_MASK;
5319 	else
5320 		newctrlb |= speed == QIB_IB_QDR ?
5321 			IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK :
5322 			((speed == QIB_IB_DDR ?
5323 			  IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR));
5324 
5325 	if (newctrlb == ppd->cpspec->ibcctrl_b)
5326 		return;
5327 
5328 	ppd->cpspec->ibcctrl_b = newctrlb;
5329 	qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
5330 	qib_write_kreg(ppd->dd, kr_scratch, 0);
5331 }
5332 
5333 /*
5334  * This routine is only used when we are not talking to another
5335  * IB 1.2-compliant device that we think can do DDR.
5336  * (This includes all existing switch chips as of Oct 2007.)
5337  * 1.2-compliant devices go directly to DDR prior to reaching INIT
5338  */
5339 static void try_7322_autoneg(struct qib_pportdata *ppd)
5340 {
5341 	unsigned long flags;
5342 
5343 	spin_lock_irqsave(&ppd->lflags_lock, flags);
5344 	ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
5345 	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5346 	qib_autoneg_7322_send(ppd, 0);
5347 	set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5348 	qib_7322_mini_pcs_reset(ppd);
5349 	/* 2 msec is minimum length of a poll cycle */
5350 	queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
5351 			   msecs_to_jiffies(2));
5352 }
5353 
5354 /*
5355  * Handle the empirically determined mechanism for auto-negotiation
5356  * of DDR speed with switches.
5357  */
5358 static void autoneg_7322_work(struct work_struct *work)
5359 {
5360 	struct qib_pportdata *ppd;
5361 	u32 i;
5362 	unsigned long flags;
5363 
5364 	ppd = container_of(work, struct qib_chippport_specific,
5365 			    autoneg_work.work)->ppd;
5366 
5367 	/*
5368 	 * Busy wait for this first part, it should be at most a
5369 	 * few hundred usec, since we scheduled ourselves for 2msec.
5370 	 */
5371 	for (i = 0; i < 25; i++) {
5372 		if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
5373 		     == IB_7322_LT_STATE_POLLQUIET) {
5374 			qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
5375 			break;
5376 		}
5377 		udelay(100);
5378 	}
5379 
5380 	if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
5381 		goto done; /* we got there early or told to stop */
5382 
5383 	/* we expect this to timeout */
5384 	if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5385 			       !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5386 			       msecs_to_jiffies(90)))
5387 		goto done;
5388 	qib_7322_mini_pcs_reset(ppd);
5389 
5390 	/* we expect this to timeout */
5391 	if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5392 			       !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5393 			       msecs_to_jiffies(1700)))
5394 		goto done;
5395 	qib_7322_mini_pcs_reset(ppd);
5396 
5397 	set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
5398 
5399 	/*
5400 	 * Wait up to 250 msec for link to train and get to INIT at DDR;
5401 	 * this should terminate early.
5402 	 */
5403 	wait_event_timeout(ppd->cpspec->autoneg_wait,
5404 		!(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5405 		msecs_to_jiffies(250));
5406 done:
5407 	if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
5408 		spin_lock_irqsave(&ppd->lflags_lock, flags);
5409 		ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
5410 		if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
5411 			ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
5412 			ppd->cpspec->autoneg_tries = 0;
5413 		}
5414 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5415 		set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5416 	}
5417 }
5418 
5419 /*
5420  * This routine is used to request IPG set in the QLogic switch.
5421  * Only called if r1.
5422  */
5423 static void try_7322_ipg(struct qib_pportdata *ppd)
5424 {
5425 	struct qib_ibport *ibp = &ppd->ibport_data;
5426 	struct ib_mad_send_buf *send_buf;
5427 	struct ib_mad_agent *agent;
5428 	struct ib_smp *smp;
5429 	unsigned delay;
5430 	int ret;
5431 
5432 	agent = ibp->rvp.send_agent;
5433 	if (!agent)
5434 		goto retry;
5435 
5436 	send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
5437 				      IB_MGMT_MAD_DATA, GFP_ATOMIC,
5438 				      IB_MGMT_BASE_VERSION);
5439 	if (IS_ERR(send_buf))
5440 		goto retry;
5441 
5442 	if (!ibp->smi_ah) {
5443 		struct ib_ah *ah;
5444 
5445 		ah = qib_create_qp0_ah(ibp, be16_to_cpu(IB_LID_PERMISSIVE));
5446 		if (IS_ERR(ah))
5447 			ret = PTR_ERR(ah);
5448 		else {
5449 			send_buf->ah = ah;
5450 			ibp->smi_ah = ibah_to_rvtah(ah);
5451 			ret = 0;
5452 		}
5453 	} else {
5454 		send_buf->ah = &ibp->smi_ah->ibah;
5455 		ret = 0;
5456 	}
5457 
5458 	smp = send_buf->mad;
5459 	smp->base_version = IB_MGMT_BASE_VERSION;
5460 	smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
5461 	smp->class_version = 1;
5462 	smp->method = IB_MGMT_METHOD_SEND;
5463 	smp->hop_cnt = 1;
5464 	smp->attr_id = QIB_VENDOR_IPG;
5465 	smp->attr_mod = 0;
5466 
5467 	if (!ret)
5468 		ret = ib_post_send_mad(send_buf, NULL);
5469 	if (ret)
5470 		ib_free_send_mad(send_buf);
5471 retry:
5472 	delay = 2 << ppd->cpspec->ipg_tries;
5473 	queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
5474 			   msecs_to_jiffies(delay));
5475 }
5476 
5477 /*
5478  * Timeout handler for setting IPG.
5479  * Only called if r1.
5480  */
5481 static void ipg_7322_work(struct work_struct *work)
5482 {
5483 	struct qib_pportdata *ppd;
5484 
5485 	ppd = container_of(work, struct qib_chippport_specific,
5486 			   ipg_work.work)->ppd;
5487 	if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
5488 	    && ++ppd->cpspec->ipg_tries <= 10)
5489 		try_7322_ipg(ppd);
5490 }
5491 
5492 static u32 qib_7322_iblink_state(u64 ibcs)
5493 {
5494 	u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState);
5495 
5496 	switch (state) {
5497 	case IB_7322_L_STATE_INIT:
5498 		state = IB_PORT_INIT;
5499 		break;
5500 	case IB_7322_L_STATE_ARM:
5501 		state = IB_PORT_ARMED;
5502 		break;
5503 	case IB_7322_L_STATE_ACTIVE:
5504 		/* fall through */
5505 	case IB_7322_L_STATE_ACT_DEFER:
5506 		state = IB_PORT_ACTIVE;
5507 		break;
5508 	default: /* fall through */
5509 	case IB_7322_L_STATE_DOWN:
5510 		state = IB_PORT_DOWN;
5511 		break;
5512 	}
5513 	return state;
5514 }
5515 
5516 /* returns the IBTA port state, rather than the IBC link training state */
5517 static u8 qib_7322_phys_portstate(u64 ibcs)
5518 {
5519 	u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState);
5520 	return qib_7322_physportstate[state];
5521 }
5522 
5523 static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
5524 {
5525 	int ret = 0, symadj = 0;
5526 	unsigned long flags;
5527 	int mult;
5528 
5529 	spin_lock_irqsave(&ppd->lflags_lock, flags);
5530 	ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
5531 	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5532 
5533 	/* Update our picture of width and speed from chip */
5534 	if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) {
5535 		ppd->link_speed_active = QIB_IB_QDR;
5536 		mult = 4;
5537 	} else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) {
5538 		ppd->link_speed_active = QIB_IB_DDR;
5539 		mult = 2;
5540 	} else {
5541 		ppd->link_speed_active = QIB_IB_SDR;
5542 		mult = 1;
5543 	}
5544 	if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) {
5545 		ppd->link_width_active = IB_WIDTH_4X;
5546 		mult *= 4;
5547 	} else
5548 		ppd->link_width_active = IB_WIDTH_1X;
5549 	ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
5550 
5551 	if (!ibup) {
5552 		u64 clr;
5553 
5554 		/* Link went down. */
5555 		/* do IPG MAD again after linkdown, even if last time failed */
5556 		ppd->cpspec->ipg_tries = 0;
5557 		clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
5558 			(SYM_MASK(IBCStatusB_0, heartbeat_timed_out) |
5559 			 SYM_MASK(IBCStatusB_0, heartbeat_crosstalk));
5560 		if (clr)
5561 			qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
5562 		if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5563 				     QIBL_IB_AUTONEG_INPROG)))
5564 			set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5565 		if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5566 			struct qib_qsfp_data *qd =
5567 				&ppd->cpspec->qsfp_data;
5568 			/* unlock the Tx settings, speed may change */
5569 			qib_write_kreg_port(ppd, krp_tx_deemph_override,
5570 				SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
5571 				reset_tx_deemphasis_override));
5572 			qib_cancel_sends(ppd);
5573 			/* on link down, ensure sane pcs state */
5574 			qib_7322_mini_pcs_reset(ppd);
5575 			/* schedule the qsfp refresh which should turn the link
5576 			   off */
5577 			if (ppd->dd->flags & QIB_HAS_QSFP) {
5578 				qd->t_insert = jiffies;
5579 				queue_work(ib_wq, &qd->work);
5580 			}
5581 			spin_lock_irqsave(&ppd->sdma_lock, flags);
5582 			if (__qib_sdma_running(ppd))
5583 				__qib_sdma_process_event(ppd,
5584 					qib_sdma_event_e70_go_idle);
5585 			spin_unlock_irqrestore(&ppd->sdma_lock, flags);
5586 		}
5587 		clr = read_7322_creg32_port(ppd, crp_iblinkdown);
5588 		if (clr == ppd->cpspec->iblnkdownsnap)
5589 			ppd->cpspec->iblnkdowndelta++;
5590 	} else {
5591 		if (qib_compat_ddr_negotiate &&
5592 		    !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5593 				     QIBL_IB_AUTONEG_INPROG)) &&
5594 		    ppd->link_speed_active == QIB_IB_SDR &&
5595 		    (ppd->link_speed_enabled & QIB_IB_DDR)
5596 		    && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
5597 			/* we are SDR, and auto-negotiation enabled */
5598 			++ppd->cpspec->autoneg_tries;
5599 			if (!ppd->cpspec->ibdeltainprog) {
5600 				ppd->cpspec->ibdeltainprog = 1;
5601 				ppd->cpspec->ibsymdelta +=
5602 					read_7322_creg32_port(ppd,
5603 						crp_ibsymbolerr) -
5604 						ppd->cpspec->ibsymsnap;
5605 				ppd->cpspec->iblnkerrdelta +=
5606 					read_7322_creg32_port(ppd,
5607 						crp_iblinkerrrecov) -
5608 						ppd->cpspec->iblnkerrsnap;
5609 			}
5610 			try_7322_autoneg(ppd);
5611 			ret = 1; /* no other IB status change processing */
5612 		} else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5613 			   ppd->link_speed_active == QIB_IB_SDR) {
5614 			qib_autoneg_7322_send(ppd, 1);
5615 			set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5616 			qib_7322_mini_pcs_reset(ppd);
5617 			udelay(2);
5618 			ret = 1; /* no other IB status change processing */
5619 		} else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5620 			   (ppd->link_speed_active & QIB_IB_DDR)) {
5621 			spin_lock_irqsave(&ppd->lflags_lock, flags);
5622 			ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
5623 					 QIBL_IB_AUTONEG_FAILED);
5624 			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5625 			ppd->cpspec->autoneg_tries = 0;
5626 			/* re-enable SDR, for next link down */
5627 			set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5628 			wake_up(&ppd->cpspec->autoneg_wait);
5629 			symadj = 1;
5630 		} else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
5631 			/*
5632 			 * Clear autoneg failure flag, and do setup
5633 			 * so we'll try next time link goes down and
5634 			 * back to INIT (possibly connected to a
5635 			 * different device).
5636 			 */
5637 			spin_lock_irqsave(&ppd->lflags_lock, flags);
5638 			ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
5639 			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5640 			ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
5641 			symadj = 1;
5642 		}
5643 		if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5644 			symadj = 1;
5645 			if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
5646 				try_7322_ipg(ppd);
5647 			if (!ppd->cpspec->recovery_init)
5648 				setup_7322_link_recovery(ppd, 0);
5649 			ppd->cpspec->qdr_dfe_time = jiffies +
5650 				msecs_to_jiffies(QDR_DFE_DISABLE_DELAY);
5651 		}
5652 		ppd->cpspec->ibmalfusesnap = 0;
5653 		ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
5654 			crp_errlink);
5655 	}
5656 	if (symadj) {
5657 		ppd->cpspec->iblnkdownsnap =
5658 			read_7322_creg32_port(ppd, crp_iblinkdown);
5659 		if (ppd->cpspec->ibdeltainprog) {
5660 			ppd->cpspec->ibdeltainprog = 0;
5661 			ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
5662 				crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
5663 			ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
5664 				crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
5665 		}
5666 	} else if (!ibup && qib_compat_ddr_negotiate &&
5667 		   !ppd->cpspec->ibdeltainprog &&
5668 			!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5669 		ppd->cpspec->ibdeltainprog = 1;
5670 		ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
5671 			crp_ibsymbolerr);
5672 		ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
5673 			crp_iblinkerrrecov);
5674 	}
5675 
5676 	if (!ret)
5677 		qib_setup_7322_setextled(ppd, ibup);
5678 	return ret;
5679 }
5680 
5681 /*
5682  * Does read/modify/write to appropriate registers to
5683  * set output and direction bits selected by mask.
5684  * these are in their canonical postions (e.g. lsb of
5685  * dir will end up in D48 of extctrl on existing chips).
5686  * returns contents of GP Inputs.
5687  */
5688 static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
5689 {
5690 	u64 read_val, new_out;
5691 	unsigned long flags;
5692 
5693 	if (mask) {
5694 		/* some bits being written, lock access to GPIO */
5695 		dir &= mask;
5696 		out &= mask;
5697 		spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
5698 		dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
5699 		dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
5700 		new_out = (dd->cspec->gpio_out & ~mask) | out;
5701 
5702 		qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
5703 		qib_write_kreg(dd, kr_gpio_out, new_out);
5704 		dd->cspec->gpio_out = new_out;
5705 		spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
5706 	}
5707 	/*
5708 	 * It is unlikely that a read at this time would get valid
5709 	 * data on a pin whose direction line was set in the same
5710 	 * call to this function. We include the read here because
5711 	 * that allows us to potentially combine a change on one pin with
5712 	 * a read on another, and because the old code did something like
5713 	 * this.
5714 	 */
5715 	read_val = qib_read_kreg64(dd, kr_extstatus);
5716 	return SYM_FIELD(read_val, EXTStatus, GPIOIn);
5717 }
5718 
5719 /* Enable writes to config EEPROM, if possible. Returns previous state */
5720 static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen)
5721 {
5722 	int prev_wen;
5723 	u32 mask;
5724 
5725 	mask = 1 << QIB_EEPROM_WEN_NUM;
5726 	prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM;
5727 	gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
5728 
5729 	return prev_wen & 1;
5730 }
5731 
5732 /*
5733  * Read fundamental info we need to use the chip.  These are
5734  * the registers that describe chip capabilities, and are
5735  * saved in shadow registers.
5736  */
5737 static void get_7322_chip_params(struct qib_devdata *dd)
5738 {
5739 	u64 val;
5740 	u32 piobufs;
5741 	int mtu;
5742 
5743 	dd->palign = qib_read_kreg32(dd, kr_pagealign);
5744 
5745 	dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
5746 
5747 	dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
5748 	dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
5749 	dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
5750 	dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
5751 	dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
5752 
5753 	val = qib_read_kreg64(dd, kr_sendpiobufcnt);
5754 	dd->piobcnt2k = val & ~0U;
5755 	dd->piobcnt4k = val >> 32;
5756 	val = qib_read_kreg64(dd, kr_sendpiosize);
5757 	dd->piosize2k = val & ~0U;
5758 	dd->piosize4k = val >> 32;
5759 
5760 	mtu = ib_mtu_enum_to_int(qib_ibmtu);
5761 	if (mtu == -1)
5762 		mtu = QIB_DEFAULT_MTU;
5763 	dd->pport[0].ibmtu = (u32)mtu;
5764 	dd->pport[1].ibmtu = (u32)mtu;
5765 
5766 	/* these may be adjusted in init_chip_wc_pat() */
5767 	dd->pio2kbase = (u32 __iomem *)
5768 		((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
5769 	dd->pio4kbase = (u32 __iomem *)
5770 		((char __iomem *) dd->kregbase +
5771 		 (dd->piobufbase >> 32));
5772 	/*
5773 	 * 4K buffers take 2 pages; we use roundup just to be
5774 	 * paranoid; we calculate it once here, rather than on
5775 	 * ever buf allocate
5776 	 */
5777 	dd->align4k = ALIGN(dd->piosize4k, dd->palign);
5778 
5779 	piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS;
5780 
5781 	dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
5782 		(sizeof(u64) * BITS_PER_BYTE / 2);
5783 }
5784 
5785 /*
5786  * The chip base addresses in cspec and cpspec have to be set
5787  * after possible init_chip_wc_pat(), rather than in
5788  * get_7322_chip_params(), so split out as separate function
5789  */
5790 static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
5791 {
5792 	u32 cregbase;
5793 
5794 	cregbase = qib_read_kreg32(dd, kr_counterregbase);
5795 
5796 	dd->cspec->cregbase = (u64 __iomem *)(cregbase +
5797 		(char __iomem *)dd->kregbase);
5798 
5799 	dd->egrtidbase = (u64 __iomem *)
5800 		((char __iomem *) dd->kregbase + dd->rcvegrbase);
5801 
5802 	/* port registers are defined as relative to base of chip */
5803 	dd->pport[0].cpspec->kpregbase =
5804 		(u64 __iomem *)((char __iomem *)dd->kregbase);
5805 	dd->pport[1].cpspec->kpregbase =
5806 		(u64 __iomem *)(dd->palign +
5807 		(char __iomem *)dd->kregbase);
5808 	dd->pport[0].cpspec->cpregbase =
5809 		(u64 __iomem *)(qib_read_kreg_port(&dd->pport[0],
5810 		kr_counterregbase) + (char __iomem *)dd->kregbase);
5811 	dd->pport[1].cpspec->cpregbase =
5812 		(u64 __iomem *)(qib_read_kreg_port(&dd->pport[1],
5813 		kr_counterregbase) + (char __iomem *)dd->kregbase);
5814 }
5815 
5816 /*
5817  * This is a fairly special-purpose observer, so we only support
5818  * the port-specific parts of SendCtrl
5819  */
5820 
5821 #define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) |		\
5822 			   SYM_MASK(SendCtrl_0, SDmaEnable) |		\
5823 			   SYM_MASK(SendCtrl_0, SDmaIntEnable) |	\
5824 			   SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \
5825 			   SYM_MASK(SendCtrl_0, SDmaHalt) |		\
5826 			   SYM_MASK(SendCtrl_0, IBVLArbiterEn) |	\
5827 			   SYM_MASK(SendCtrl_0, ForceCreditUpToDate))
5828 
5829 static int sendctrl_hook(struct qib_devdata *dd,
5830 			 const struct diag_observer *op, u32 offs,
5831 			 u64 *data, u64 mask, int only_32)
5832 {
5833 	unsigned long flags;
5834 	unsigned idx;
5835 	unsigned pidx;
5836 	struct qib_pportdata *ppd = NULL;
5837 	u64 local_data, all_bits;
5838 
5839 	/*
5840 	 * The fixed correspondence between Physical ports and pports is
5841 	 * severed. We need to hunt for the ppd that corresponds
5842 	 * to the offset we got. And we have to do that without admitting
5843 	 * we know the stride, apparently.
5844 	 */
5845 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5846 		u64 __iomem *psptr;
5847 		u32 psoffs;
5848 
5849 		ppd = dd->pport + pidx;
5850 		if (!ppd->cpspec->kpregbase)
5851 			continue;
5852 
5853 		psptr = ppd->cpspec->kpregbase + krp_sendctrl;
5854 		psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr);
5855 		if (psoffs == offs)
5856 			break;
5857 	}
5858 
5859 	/* If pport is not being managed by driver, just avoid shadows. */
5860 	if (pidx >= dd->num_pports)
5861 		ppd = NULL;
5862 
5863 	/* In any case, "idx" is flat index in kreg space */
5864 	idx = offs / sizeof(u64);
5865 
5866 	all_bits = ~0ULL;
5867 	if (only_32)
5868 		all_bits >>= 32;
5869 
5870 	spin_lock_irqsave(&dd->sendctrl_lock, flags);
5871 	if (!ppd || (mask & all_bits) != all_bits) {
5872 		/*
5873 		 * At least some mask bits are zero, so we need
5874 		 * to read. The judgement call is whether from
5875 		 * reg or shadow. First-cut: read reg, and complain
5876 		 * if any bits which should be shadowed are different
5877 		 * from their shadowed value.
5878 		 */
5879 		if (only_32)
5880 			local_data = (u64)qib_read_kreg32(dd, idx);
5881 		else
5882 			local_data = qib_read_kreg64(dd, idx);
5883 		*data = (local_data & ~mask) | (*data & mask);
5884 	}
5885 	if (mask) {
5886 		/*
5887 		 * At least some mask bits are one, so we need
5888 		 * to write, but only shadow some bits.
5889 		 */
5890 		u64 sval, tval; /* Shadowed, transient */
5891 
5892 		/*
5893 		 * New shadow val is bits we don't want to touch,
5894 		 * ORed with bits we do, that are intended for shadow.
5895 		 */
5896 		if (ppd) {
5897 			sval = ppd->p_sendctrl & ~mask;
5898 			sval |= *data & SENDCTRL_SHADOWED & mask;
5899 			ppd->p_sendctrl = sval;
5900 		} else
5901 			sval = *data & SENDCTRL_SHADOWED & mask;
5902 		tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
5903 		qib_write_kreg(dd, idx, tval);
5904 		qib_write_kreg(dd, kr_scratch, 0Ull);
5905 	}
5906 	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
5907 	return only_32 ? 4 : 8;
5908 }
5909 
5910 static const struct diag_observer sendctrl_0_observer = {
5911 	sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64),
5912 	KREG_IDX(SendCtrl_0) * sizeof(u64)
5913 };
5914 
5915 static const struct diag_observer sendctrl_1_observer = {
5916 	sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64),
5917 	KREG_IDX(SendCtrl_1) * sizeof(u64)
5918 };
5919 
5920 static ushort sdma_fetch_prio = 8;
5921 module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO);
5922 MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority");
5923 
5924 /* Besides logging QSFP events, we set appropriate TxDDS values */
5925 static void init_txdds_table(struct qib_pportdata *ppd, int override);
5926 
5927 static void qsfp_7322_event(struct work_struct *work)
5928 {
5929 	struct qib_qsfp_data *qd;
5930 	struct qib_pportdata *ppd;
5931 	unsigned long pwrup;
5932 	unsigned long flags;
5933 	int ret;
5934 	u32 le2;
5935 
5936 	qd = container_of(work, struct qib_qsfp_data, work);
5937 	ppd = qd->ppd;
5938 	pwrup = qd->t_insert +
5939 		msecs_to_jiffies(QSFP_PWR_LAG_MSEC - QSFP_MODPRS_LAG_MSEC);
5940 
5941 	/* Delay for 20 msecs to allow ModPrs resistor to setup */
5942 	mdelay(QSFP_MODPRS_LAG_MSEC);
5943 
5944 	if (!qib_qsfp_mod_present(ppd)) {
5945 		ppd->cpspec->qsfp_data.modpresent = 0;
5946 		/* Set the physical link to disabled */
5947 		qib_set_ib_7322_lstate(ppd, 0,
5948 				       QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
5949 		spin_lock_irqsave(&ppd->lflags_lock, flags);
5950 		ppd->lflags &= ~QIBL_LINKV;
5951 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5952 	} else {
5953 		/*
5954 		 * Some QSFP's not only do not respond until the full power-up
5955 		 * time, but may behave badly if we try. So hold off responding
5956 		 * to insertion.
5957 		 */
5958 		while (1) {
5959 			if (time_is_before_jiffies(pwrup))
5960 				break;
5961 			msleep(20);
5962 		}
5963 
5964 		ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
5965 
5966 		/*
5967 		 * Need to change LE2 back to defaults if we couldn't
5968 		 * read the cable type (to handle cable swaps), so do this
5969 		 * even on failure to read cable information.  We don't
5970 		 * get here for QME, so IS_QME check not needed here.
5971 		 */
5972 		if (!ret && !ppd->dd->cspec->r1) {
5973 			if (QSFP_IS_ACTIVE_FAR(qd->cache.tech))
5974 				le2 = LE2_QME;
5975 			else if (qd->cache.atten[1] >= qib_long_atten &&
5976 				 QSFP_IS_CU(qd->cache.tech))
5977 				le2 = LE2_5m;
5978 			else
5979 				le2 = LE2_DEFAULT;
5980 		} else
5981 			le2 = LE2_DEFAULT;
5982 		ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
5983 		/*
5984 		 * We always change parameteters, since we can choose
5985 		 * values for cables without eeproms, and the cable may have
5986 		 * changed from a cable with full or partial eeprom content
5987 		 * to one with partial or no content.
5988 		 */
5989 		init_txdds_table(ppd, 0);
5990 		/* The physical link is being re-enabled only when the
5991 		 * previous state was DISABLED and the VALID bit is not
5992 		 * set. This should only happen when  the cable has been
5993 		 * physically pulled. */
5994 		if (!ppd->cpspec->qsfp_data.modpresent &&
5995 		    (ppd->lflags & (QIBL_LINKV | QIBL_IB_LINK_DISABLED))) {
5996 			ppd->cpspec->qsfp_data.modpresent = 1;
5997 			qib_set_ib_7322_lstate(ppd, 0,
5998 				QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
5999 			spin_lock_irqsave(&ppd->lflags_lock, flags);
6000 			ppd->lflags |= QIBL_LINKV;
6001 			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
6002 		}
6003 	}
6004 }
6005 
6006 /*
6007  * There is little we can do but complain to the user if QSFP
6008  * initialization fails.
6009  */
6010 static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
6011 {
6012 	unsigned long flags;
6013 	struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
6014 	struct qib_devdata *dd = ppd->dd;
6015 	u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N;
6016 
6017 	mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
6018 	qd->ppd = ppd;
6019 	qib_qsfp_init(qd, qsfp_7322_event);
6020 	spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
6021 	dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert));
6022 	dd->cspec->gpio_mask |= mod_prs_bit;
6023 	qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
6024 	qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
6025 	spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
6026 }
6027 
6028 /*
6029  * called at device initialization time, and also if the txselect
6030  * module parameter is changed.  This is used for cables that don't
6031  * have valid QSFP EEPROMs (not present, or attenuation is zero).
6032  * We initialize to the default, then if there is a specific
6033  * unit,port match, we use that (and set it immediately, for the
6034  * current speed, if the link is at INIT or better).
6035  * String format is "default# unit#,port#=# ... u,p=#", separators must
6036  * be a SPACE character.  A newline terminates.  The u,p=# tuples may
6037  * optionally have "u,p=#,#", where the final # is the H1 value
6038  * The last specific match is used (actually, all are used, but last
6039  * one is the one that winds up set); if none at all, fall back on default.
6040  */
6041 static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
6042 {
6043 	char *nxt, *str;
6044 	u32 pidx, unit, port, deflt, h1;
6045 	unsigned long val;
6046 	int any = 0, seth1;
6047 	int txdds_size;
6048 
6049 	str = txselect_list;
6050 
6051 	/* default number is validated in setup_txselect() */
6052 	deflt = simple_strtoul(str, &nxt, 0);
6053 	for (pidx = 0; pidx < dd->num_pports; ++pidx)
6054 		dd->pport[pidx].cpspec->no_eep = deflt;
6055 
6056 	txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
6057 	if (IS_QME(dd) || IS_QMH(dd))
6058 		txdds_size += TXDDS_MFG_SZ;
6059 
6060 	while (*nxt && nxt[1]) {
6061 		str = ++nxt;
6062 		unit = simple_strtoul(str, &nxt, 0);
6063 		if (nxt == str || !*nxt || *nxt != ',') {
6064 			while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6065 				;
6066 			continue;
6067 		}
6068 		str = ++nxt;
6069 		port = simple_strtoul(str, &nxt, 0);
6070 		if (nxt == str || *nxt != '=') {
6071 			while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6072 				;
6073 			continue;
6074 		}
6075 		str = ++nxt;
6076 		val = simple_strtoul(str, &nxt, 0);
6077 		if (nxt == str) {
6078 			while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6079 				;
6080 			continue;
6081 		}
6082 		if (val >= txdds_size)
6083 			continue;
6084 		seth1 = 0;
6085 		h1 = 0; /* gcc thinks it might be used uninitted */
6086 		if (*nxt == ',' && nxt[1]) {
6087 			str = ++nxt;
6088 			h1 = (u32)simple_strtoul(str, &nxt, 0);
6089 			if (nxt == str)
6090 				while (*nxt && *nxt++ != ' ') /* skip */
6091 					;
6092 			else
6093 				seth1 = 1;
6094 		}
6095 		for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
6096 		     ++pidx) {
6097 			struct qib_pportdata *ppd = &dd->pport[pidx];
6098 
6099 			if (ppd->port != port || !ppd->link_speed_supported)
6100 				continue;
6101 			ppd->cpspec->no_eep = val;
6102 			if (seth1)
6103 				ppd->cpspec->h1_val = h1;
6104 			/* now change the IBC and serdes, overriding generic */
6105 			init_txdds_table(ppd, 1);
6106 			/* Re-enable the physical state machine on mezz boards
6107 			 * now that the correct settings have been set.
6108 			 * QSFP boards are handles by the QSFP event handler */
6109 			if (IS_QMH(dd) || IS_QME(dd))
6110 				qib_set_ib_7322_lstate(ppd, 0,
6111 					    QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
6112 			any++;
6113 		}
6114 		if (*nxt == '\n')
6115 			break; /* done */
6116 	}
6117 	if (change && !any) {
6118 		/* no specific setting, use the default.
6119 		 * Change the IBC and serdes, but since it's
6120 		 * general, don't override specific settings.
6121 		 */
6122 		for (pidx = 0; pidx < dd->num_pports; ++pidx)
6123 			if (dd->pport[pidx].link_speed_supported)
6124 				init_txdds_table(&dd->pport[pidx], 0);
6125 	}
6126 }
6127 
6128 /* handle the txselect parameter changing */
6129 static int setup_txselect(const char *str, const struct kernel_param *kp)
6130 {
6131 	struct qib_devdata *dd;
6132 	unsigned long val;
6133 	char *n;
6134 
6135 	if (strlen(str) >= ARRAY_SIZE(txselect_list)) {
6136 		pr_info("txselect_values string too long\n");
6137 		return -ENOSPC;
6138 	}
6139 	val = simple_strtoul(str, &n, 0);
6140 	if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
6141 				TXDDS_MFG_SZ)) {
6142 		pr_info("txselect_values must start with a number < %d\n",
6143 			TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
6144 		return -EINVAL;
6145 	}
6146 	strncpy(txselect_list, str, ARRAY_SIZE(txselect_list) - 1);
6147 
6148 	list_for_each_entry(dd, &qib_dev_list, list)
6149 		if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
6150 			set_no_qsfp_atten(dd, 1);
6151 	return 0;
6152 }
6153 
6154 /*
6155  * Write the final few registers that depend on some of the
6156  * init setup.  Done late in init, just before bringing up
6157  * the serdes.
6158  */
6159 static int qib_late_7322_initreg(struct qib_devdata *dd)
6160 {
6161 	int ret = 0, n;
6162 	u64 val;
6163 
6164 	qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
6165 	qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
6166 	qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
6167 	qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
6168 	val = qib_read_kreg64(dd, kr_sendpioavailaddr);
6169 	if (val != dd->pioavailregs_phys) {
6170 		qib_dev_err(dd,
6171 			"Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
6172 			(unsigned long) dd->pioavailregs_phys,
6173 			(unsigned long long) val);
6174 		ret = -EINVAL;
6175 	}
6176 
6177 	n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
6178 	qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL);
6179 	/* driver sends get pkey, lid, etc. checking also, to catch bugs */
6180 	qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL);
6181 
6182 	qib_register_observer(dd, &sendctrl_0_observer);
6183 	qib_register_observer(dd, &sendctrl_1_observer);
6184 
6185 	dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN;
6186 	qib_write_kreg(dd, kr_control, dd->control);
6187 	/*
6188 	 * Set SendDmaFetchPriority and init Tx params, including
6189 	 * QSFP handler on boards that have QSFP.
6190 	 * First set our default attenuation entry for cables that
6191 	 * don't have valid attenuation.
6192 	 */
6193 	set_no_qsfp_atten(dd, 0);
6194 	for (n = 0; n < dd->num_pports; ++n) {
6195 		struct qib_pportdata *ppd = dd->pport + n;
6196 
6197 		qib_write_kreg_port(ppd, krp_senddmaprioritythld,
6198 				    sdma_fetch_prio & 0xf);
6199 		/* Initialize qsfp if present on board. */
6200 		if (dd->flags & QIB_HAS_QSFP)
6201 			qib_init_7322_qsfp(ppd);
6202 	}
6203 	dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN;
6204 	qib_write_kreg(dd, kr_control, dd->control);
6205 
6206 	return ret;
6207 }
6208 
6209 /* per IB port errors.  */
6210 #define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \
6211 	MASK_ACROSS(8, 15))
6212 #define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41))
6213 #define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \
6214 	MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \
6215 	MASK_ACROSS(0, 11))
6216 
6217 /*
6218  * Write the initialization per-port registers that need to be done at
6219  * driver load and after reset completes (i.e., that aren't done as part
6220  * of other init procedures called from qib_init.c).
6221  * Some of these should be redundant on reset, but play safe.
6222  */
6223 static void write_7322_init_portregs(struct qib_pportdata *ppd)
6224 {
6225 	u64 val;
6226 	int i;
6227 
6228 	if (!ppd->link_speed_supported) {
6229 		/* no buffer credits for this port */
6230 		for (i = 1; i < 8; i++)
6231 			qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
6232 		qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
6233 		qib_write_kreg(ppd->dd, kr_scratch, 0);
6234 		return;
6235 	}
6236 
6237 	/*
6238 	 * Set the number of supported virtual lanes in IBC,
6239 	 * for flow control packet handling on unsupported VLs
6240 	 */
6241 	val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
6242 	val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP);
6243 	val |= (u64)(ppd->vls_supported - 1) <<
6244 		SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP);
6245 	qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
6246 
6247 	qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
6248 
6249 	/* enable tx header checking */
6250 	qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
6251 			    IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID |
6252 			    IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ);
6253 
6254 	qib_write_kreg_port(ppd, krp_ncmodectrl,
6255 		SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal));
6256 
6257 	/*
6258 	 * Unconditionally clear the bufmask bits.  If SDMA is
6259 	 * enabled, we'll set them appropriately later.
6260 	 */
6261 	qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
6262 	qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
6263 	qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
6264 	if (ppd->dd->cspec->r1)
6265 		ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
6266 }
6267 
6268 /*
6269  * Write the initialization per-device registers that need to be done at
6270  * driver load and after reset completes (i.e., that aren't done as part
6271  * of other init procedures called from qib_init.c).  Also write per-port
6272  * registers that are affected by overall device config, such as QP mapping
6273  * Some of these should be redundant on reset, but play safe.
6274  */
6275 static void write_7322_initregs(struct qib_devdata *dd)
6276 {
6277 	struct qib_pportdata *ppd;
6278 	int i, pidx;
6279 	u64 val;
6280 
6281 	/* Set Multicast QPs received by port 2 to map to context one. */
6282 	qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
6283 
6284 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
6285 		unsigned n, regno;
6286 		unsigned long flags;
6287 
6288 		if (dd->n_krcv_queues < 2 ||
6289 			!dd->pport[pidx].link_speed_supported)
6290 			continue;
6291 
6292 		ppd = &dd->pport[pidx];
6293 
6294 		/* be paranoid against later code motion, etc. */
6295 		spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
6296 		ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
6297 		spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
6298 
6299 		/* Initialize QP to context mapping */
6300 		regno = krp_rcvqpmaptable;
6301 		val = 0;
6302 		if (dd->num_pports > 1)
6303 			n = dd->first_user_ctxt / dd->num_pports;
6304 		else
6305 			n = dd->first_user_ctxt - 1;
6306 		for (i = 0; i < 32; ) {
6307 			unsigned ctxt;
6308 
6309 			if (dd->num_pports > 1)
6310 				ctxt = (i % n) * dd->num_pports + pidx;
6311 			else if (i % n)
6312 				ctxt = (i % n) + 1;
6313 			else
6314 				ctxt = ppd->hw_pidx;
6315 			val |= ctxt << (5 * (i % 6));
6316 			i++;
6317 			if (i % 6 == 0) {
6318 				qib_write_kreg_port(ppd, regno, val);
6319 				val = 0;
6320 				regno++;
6321 			}
6322 		}
6323 		qib_write_kreg_port(ppd, regno, val);
6324 	}
6325 
6326 	/*
6327 	 * Setup up interrupt mitigation for kernel contexts, but
6328 	 * not user contexts (user contexts use interrupts when
6329 	 * stalled waiting for any packet, so want those interrupts
6330 	 * right away).
6331 	 */
6332 	for (i = 0; i < dd->first_user_ctxt; i++) {
6333 		dd->cspec->rcvavail_timeout[i] = rcv_int_timeout;
6334 		qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout);
6335 	}
6336 
6337 	/*
6338 	 * Initialize  as (disabled) rcvflow tables.  Application code
6339 	 * will setup each flow as it uses the flow.
6340 	 * Doesn't clear any of the error bits that might be set.
6341 	 */
6342 	val = TIDFLOW_ERRBITS; /* these are W1C */
6343 	for (i = 0; i < dd->cfgctxts; i++) {
6344 		int flow;
6345 
6346 		for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
6347 			qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
6348 	}
6349 
6350 	/*
6351 	 * dual cards init to dual port recovery, single port cards to
6352 	 * the one port.  Dual port cards may later adjust to 1 port,
6353 	 * and then back to dual port if both ports are connected
6354 	 * */
6355 	if (dd->num_pports)
6356 		setup_7322_link_recovery(dd->pport, dd->num_pports > 1);
6357 }
6358 
6359 static int qib_init_7322_variables(struct qib_devdata *dd)
6360 {
6361 	struct qib_pportdata *ppd;
6362 	unsigned features, pidx, sbufcnt;
6363 	int ret, mtu;
6364 	u32 sbufs, updthresh;
6365 	resource_size_t vl15off;
6366 
6367 	/* pport structs are contiguous, allocated after devdata */
6368 	ppd = (struct qib_pportdata *)(dd + 1);
6369 	dd->pport = ppd;
6370 	ppd[0].dd = dd;
6371 	ppd[1].dd = dd;
6372 
6373 	dd->cspec = (struct qib_chip_specific *)(ppd + 2);
6374 
6375 	ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
6376 	ppd[1].cpspec = &ppd[0].cpspec[1];
6377 	ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
6378 	ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
6379 
6380 	spin_lock_init(&dd->cspec->rcvmod_lock);
6381 	spin_lock_init(&dd->cspec->gpio_lock);
6382 
6383 	/* we haven't yet set QIB_PRESENT, so use read directly */
6384 	dd->revision = readq(&dd->kregbase[kr_revision]);
6385 
6386 	if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
6387 		qib_dev_err(dd,
6388 			"Revision register read failure, giving up initialization\n");
6389 		ret = -ENODEV;
6390 		goto bail;
6391 	}
6392 	dd->flags |= QIB_PRESENT;  /* now register routines work */
6393 
6394 	dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
6395 	dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
6396 	dd->cspec->r1 = dd->minrev == 1;
6397 
6398 	get_7322_chip_params(dd);
6399 	features = qib_7322_boardname(dd);
6400 
6401 	/* now that piobcnt2k and 4k set, we can allocate these */
6402 	sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
6403 		NUM_VL15_BUFS + BITS_PER_LONG - 1;
6404 	sbufcnt /= BITS_PER_LONG;
6405 	dd->cspec->sendchkenable = kmalloc(sbufcnt *
6406 		sizeof(*dd->cspec->sendchkenable), GFP_KERNEL);
6407 	dd->cspec->sendgrhchk = kmalloc(sbufcnt *
6408 		sizeof(*dd->cspec->sendgrhchk), GFP_KERNEL);
6409 	dd->cspec->sendibchk = kmalloc(sbufcnt *
6410 		sizeof(*dd->cspec->sendibchk), GFP_KERNEL);
6411 	if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
6412 		!dd->cspec->sendibchk) {
6413 		ret = -ENOMEM;
6414 		goto bail;
6415 	}
6416 
6417 	ppd = dd->pport;
6418 
6419 	/*
6420 	 * GPIO bits for TWSI data and clock,
6421 	 * used for serial EEPROM.
6422 	 */
6423 	dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
6424 	dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
6425 	dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
6426 
6427 	dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
6428 		QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP |
6429 		QIB_HAS_THRESH_UPDATE |
6430 		(sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0);
6431 	dd->flags |= qib_special_trigger ?
6432 		QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
6433 
6434 	/*
6435 	 * Setup initial values.  These may change when PAT is enabled, but
6436 	 * we need these to do initial chip register accesses.
6437 	 */
6438 	qib_7322_set_baseaddrs(dd);
6439 
6440 	mtu = ib_mtu_enum_to_int(qib_ibmtu);
6441 	if (mtu == -1)
6442 		mtu = QIB_DEFAULT_MTU;
6443 
6444 	dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
6445 	/* all hwerrors become interrupts, unless special purposed */
6446 	dd->cspec->hwerrmask = ~0ULL;
6447 	/*  link_recovery setup causes these errors, so ignore them,
6448 	 *  other than clearing them when they occur */
6449 	dd->cspec->hwerrmask &=
6450 		~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
6451 		  SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
6452 		  HWE_MASK(LATriggered));
6453 
6454 	for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
6455 		struct qib_chippport_specific *cp = ppd->cpspec;
6456 
6457 		ppd->link_speed_supported = features & PORT_SPD_CAP;
6458 		features >>=  PORT_SPD_CAP_SHIFT;
6459 		if (!ppd->link_speed_supported) {
6460 			/* single port mode (7340, or configured) */
6461 			dd->skip_kctxt_mask |= 1 << pidx;
6462 			if (pidx == 0) {
6463 				/* Make sure port is disabled. */
6464 				qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6465 				qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6466 				ppd[0] = ppd[1];
6467 				dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6468 						  IBSerdesPClkNotDetectMask_0)
6469 						  | SYM_MASK(HwErrMask,
6470 						  SDmaMemReadErrMask_0));
6471 				dd->cspec->int_enable_mask &= ~(
6472 				     SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
6473 				     SYM_MASK(IntMask, SDmaIdleIntMask_0) |
6474 				     SYM_MASK(IntMask, SDmaProgressIntMask_0) |
6475 				     SYM_MASK(IntMask, SDmaIntMask_0) |
6476 				     SYM_MASK(IntMask, ErrIntMask_0) |
6477 				     SYM_MASK(IntMask, SendDoneIntMask_0));
6478 			} else {
6479 				/* Make sure port is disabled. */
6480 				qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6481 				qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6482 				dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6483 						  IBSerdesPClkNotDetectMask_1)
6484 						  | SYM_MASK(HwErrMask,
6485 						  SDmaMemReadErrMask_1));
6486 				dd->cspec->int_enable_mask &= ~(
6487 				     SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
6488 				     SYM_MASK(IntMask, SDmaIdleIntMask_1) |
6489 				     SYM_MASK(IntMask, SDmaProgressIntMask_1) |
6490 				     SYM_MASK(IntMask, SDmaIntMask_1) |
6491 				     SYM_MASK(IntMask, ErrIntMask_1) |
6492 				     SYM_MASK(IntMask, SendDoneIntMask_1));
6493 			}
6494 			continue;
6495 		}
6496 
6497 		dd->num_pports++;
6498 		ret = qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
6499 		if (ret) {
6500 			dd->num_pports--;
6501 			goto bail;
6502 		}
6503 
6504 		ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
6505 		ppd->link_width_enabled = IB_WIDTH_4X;
6506 		ppd->link_speed_enabled = ppd->link_speed_supported;
6507 		/*
6508 		 * Set the initial values to reasonable default, will be set
6509 		 * for real when link is up.
6510 		 */
6511 		ppd->link_width_active = IB_WIDTH_4X;
6512 		ppd->link_speed_active = QIB_IB_SDR;
6513 		ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
6514 		switch (qib_num_cfg_vls) {
6515 		case 1:
6516 			ppd->vls_supported = IB_VL_VL0;
6517 			break;
6518 		case 2:
6519 			ppd->vls_supported = IB_VL_VL0_1;
6520 			break;
6521 		default:
6522 			qib_devinfo(dd->pcidev,
6523 				    "Invalid num_vls %u, using 4 VLs\n",
6524 				    qib_num_cfg_vls);
6525 			qib_num_cfg_vls = 4;
6526 			/* fall through */
6527 		case 4:
6528 			ppd->vls_supported = IB_VL_VL0_3;
6529 			break;
6530 		case 8:
6531 			if (mtu <= 2048)
6532 				ppd->vls_supported = IB_VL_VL0_7;
6533 			else {
6534 				qib_devinfo(dd->pcidev,
6535 					    "Invalid num_vls %u for MTU %d , using 4 VLs\n",
6536 					    qib_num_cfg_vls, mtu);
6537 				ppd->vls_supported = IB_VL_VL0_3;
6538 				qib_num_cfg_vls = 4;
6539 			}
6540 			break;
6541 		}
6542 		ppd->vls_operational = ppd->vls_supported;
6543 
6544 		init_waitqueue_head(&cp->autoneg_wait);
6545 		INIT_DELAYED_WORK(&cp->autoneg_work,
6546 				  autoneg_7322_work);
6547 		if (ppd->dd->cspec->r1)
6548 			INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work);
6549 
6550 		/*
6551 		 * For Mez and similar cards, no qsfp info, so do
6552 		 * the "cable info" setup here.  Can be overridden
6553 		 * in adapter-specific routines.
6554 		 */
6555 		if (!(dd->flags & QIB_HAS_QSFP)) {
6556 			if (!IS_QMH(dd) && !IS_QME(dd))
6557 				qib_devinfo(dd->pcidev,
6558 					"IB%u:%u: Unknown mezzanine card type\n",
6559 					dd->unit, ppd->port);
6560 			cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
6561 			/*
6562 			 * Choose center value as default tx serdes setting
6563 			 * until changed through module parameter.
6564 			 */
6565 			ppd->cpspec->no_eep = IS_QMH(dd) ?
6566 				TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4;
6567 		} else
6568 			cp->h1_val = H1_FORCE_VAL;
6569 
6570 		/* Avoid writes to chip for mini_init */
6571 		if (!qib_mini_init)
6572 			write_7322_init_portregs(ppd);
6573 
6574 		timer_setup(&cp->chase_timer, reenable_chase, 0);
6575 
6576 		ppd++;
6577 	}
6578 
6579 	dd->rcvhdrentsize = qib_rcvhdrentsize ?
6580 		qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
6581 	dd->rcvhdrsize = qib_rcvhdrsize ?
6582 		qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
6583 	dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
6584 
6585 	/* we always allocate at least 2048 bytes for eager buffers */
6586 	dd->rcvegrbufsize = max(mtu, 2048);
6587 	BUG_ON(!is_power_of_2(dd->rcvegrbufsize));
6588 	dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
6589 
6590 	qib_7322_tidtemplate(dd);
6591 
6592 	/*
6593 	 * We can request a receive interrupt for 1 or
6594 	 * more packets from current offset.
6595 	 */
6596 	dd->rhdrhead_intr_off =
6597 		(u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
6598 
6599 	/* setup the stats timer; the add_timer is done at end of init */
6600 	timer_setup(&dd->stats_timer, qib_get_7322_faststats, 0);
6601 
6602 	dd->ureg_align = 0x10000;  /* 64KB alignment */
6603 
6604 	dd->piosize2kmax_dwords = dd->piosize2k >> 2;
6605 
6606 	qib_7322_config_ctxts(dd);
6607 	qib_set_ctxtcnt(dd);
6608 
6609 	/*
6610 	 * We do not set WC on the VL15 buffers to avoid
6611 	 * a rare problem with unaligned writes from
6612 	 * interrupt-flushed store buffers, so we need
6613 	 * to map those separately here.  We can't solve
6614 	 * this for the rarely used mtrr case.
6615 	 */
6616 	ret = init_chip_wc_pat(dd, 0);
6617 	if (ret)
6618 		goto bail;
6619 
6620 	/* vl15 buffers start just after the 4k buffers */
6621 	vl15off = dd->physaddr + (dd->piobufbase >> 32) +
6622 		  dd->piobcnt4k * dd->align4k;
6623 	dd->piovl15base	= ioremap_nocache(vl15off,
6624 					  NUM_VL15_BUFS * dd->align4k);
6625 	if (!dd->piovl15base) {
6626 		ret = -ENOMEM;
6627 		goto bail;
6628 	}
6629 
6630 	qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
6631 
6632 	ret = 0;
6633 	if (qib_mini_init)
6634 		goto bail;
6635 	if (!dd->num_pports) {
6636 		qib_dev_err(dd, "No ports enabled, giving up initialization\n");
6637 		goto bail; /* no error, so can still figure out why err */
6638 	}
6639 
6640 	write_7322_initregs(dd);
6641 	ret = qib_create_ctxts(dd);
6642 	init_7322_cntrnames(dd);
6643 
6644 	updthresh = 8U; /* update threshold */
6645 
6646 	/* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
6647 	 * reserve the update threshold amount for other kernel use, such
6648 	 * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
6649 	 * unless we aren't enabling SDMA, in which case we want to use
6650 	 * all the 4k bufs for the kernel.
6651 	 * if this was less than the update threshold, we could wait
6652 	 * a long time for an update.  Coded this way because we
6653 	 * sometimes change the update threshold for various reasons,
6654 	 * and we want this to remain robust.
6655 	 */
6656 	if (dd->flags & QIB_HAS_SEND_DMA) {
6657 		dd->cspec->sdmabufcnt = dd->piobcnt4k;
6658 		sbufs = updthresh > 3 ? updthresh : 3;
6659 	} else {
6660 		dd->cspec->sdmabufcnt = 0;
6661 		sbufs = dd->piobcnt4k;
6662 	}
6663 	dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
6664 		dd->cspec->sdmabufcnt;
6665 	dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
6666 	dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
6667 	dd->last_pio = dd->cspec->lastbuf_for_pio;
6668 	dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
6669 		dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
6670 
6671 	/*
6672 	 * If we have 16 user contexts, we will have 7 sbufs
6673 	 * per context, so reduce the update threshold to match.  We
6674 	 * want to update before we actually run out, at low pbufs/ctxt
6675 	 * so give ourselves some margin.
6676 	 */
6677 	if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
6678 		updthresh = dd->pbufsctxt - 2;
6679 	dd->cspec->updthresh_dflt = updthresh;
6680 	dd->cspec->updthresh = updthresh;
6681 
6682 	/* before full enable, no interrupts, no locking needed */
6683 	dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
6684 			     << SYM_LSB(SendCtrl, AvailUpdThld)) |
6685 			SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
6686 
6687 	dd->psxmitwait_supported = 1;
6688 	dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
6689 bail:
6690 	if (!dd->ctxtcnt)
6691 		dd->ctxtcnt = 1; /* for other initialization code */
6692 
6693 	return ret;
6694 }
6695 
6696 static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
6697 					u32 *pbufnum)
6698 {
6699 	u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
6700 	struct qib_devdata *dd = ppd->dd;
6701 
6702 	/* last is same for 2k and 4k, because we use 4k if all 2k busy */
6703 	if (pbc & PBC_7322_VL15_SEND) {
6704 		first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
6705 		last = first;
6706 	} else {
6707 		if ((plen + 1) > dd->piosize2kmax_dwords)
6708 			first = dd->piobcnt2k;
6709 		else
6710 			first = 0;
6711 		last = dd->cspec->lastbuf_for_pio;
6712 	}
6713 	return qib_getsendbuf_range(dd, pbufnum, first, last);
6714 }
6715 
6716 static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
6717 				     u32 start)
6718 {
6719 	qib_write_kreg_port(ppd, krp_psinterval, intv);
6720 	qib_write_kreg_port(ppd, krp_psstart, start);
6721 }
6722 
6723 /*
6724  * Must be called with sdma_lock held, or before init finished.
6725  */
6726 static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
6727 {
6728 	qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
6729 }
6730 
6731 /*
6732  * sdma_lock should be acquired before calling this routine
6733  */
6734 static void dump_sdma_7322_state(struct qib_pportdata *ppd)
6735 {
6736 	u64 reg, reg1, reg2;
6737 
6738 	reg = qib_read_kreg_port(ppd, krp_senddmastatus);
6739 	qib_dev_porterr(ppd->dd, ppd->port,
6740 		"SDMA senddmastatus: 0x%016llx\n", reg);
6741 
6742 	reg = qib_read_kreg_port(ppd, krp_sendctrl);
6743 	qib_dev_porterr(ppd->dd, ppd->port,
6744 		"SDMA sendctrl: 0x%016llx\n", reg);
6745 
6746 	reg = qib_read_kreg_port(ppd, krp_senddmabase);
6747 	qib_dev_porterr(ppd->dd, ppd->port,
6748 		"SDMA senddmabase: 0x%016llx\n", reg);
6749 
6750 	reg = qib_read_kreg_port(ppd, krp_senddmabufmask0);
6751 	reg1 = qib_read_kreg_port(ppd, krp_senddmabufmask1);
6752 	reg2 = qib_read_kreg_port(ppd, krp_senddmabufmask2);
6753 	qib_dev_porterr(ppd->dd, ppd->port,
6754 		"SDMA senddmabufmask 0:%llx  1:%llx  2:%llx\n",
6755 		 reg, reg1, reg2);
6756 
6757 	/* get bufuse bits, clear them, and print them again if non-zero */
6758 	reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
6759 	qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg);
6760 	reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
6761 	qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg1);
6762 	reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
6763 	qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg2);
6764 	/* 0 and 1 should always be zero, so print as short form */
6765 	qib_dev_porterr(ppd->dd, ppd->port,
6766 		 "SDMA current senddmabuf_use 0:%llx  1:%llx  2:%llx\n",
6767 		 reg, reg1, reg2);
6768 	reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
6769 	reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
6770 	reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
6771 	/* 0 and 1 should always be zero, so print as short form */
6772 	qib_dev_porterr(ppd->dd, ppd->port,
6773 		 "SDMA cleared senddmabuf_use 0:%llx  1:%llx  2:%llx\n",
6774 		 reg, reg1, reg2);
6775 
6776 	reg = qib_read_kreg_port(ppd, krp_senddmatail);
6777 	qib_dev_porterr(ppd->dd, ppd->port,
6778 		"SDMA senddmatail: 0x%016llx\n", reg);
6779 
6780 	reg = qib_read_kreg_port(ppd, krp_senddmahead);
6781 	qib_dev_porterr(ppd->dd, ppd->port,
6782 		"SDMA senddmahead: 0x%016llx\n", reg);
6783 
6784 	reg = qib_read_kreg_port(ppd, krp_senddmaheadaddr);
6785 	qib_dev_porterr(ppd->dd, ppd->port,
6786 		"SDMA senddmaheadaddr: 0x%016llx\n", reg);
6787 
6788 	reg = qib_read_kreg_port(ppd, krp_senddmalengen);
6789 	qib_dev_porterr(ppd->dd, ppd->port,
6790 		"SDMA senddmalengen: 0x%016llx\n", reg);
6791 
6792 	reg = qib_read_kreg_port(ppd, krp_senddmadesccnt);
6793 	qib_dev_porterr(ppd->dd, ppd->port,
6794 		"SDMA senddmadesccnt: 0x%016llx\n", reg);
6795 
6796 	reg = qib_read_kreg_port(ppd, krp_senddmaidlecnt);
6797 	qib_dev_porterr(ppd->dd, ppd->port,
6798 		"SDMA senddmaidlecnt: 0x%016llx\n", reg);
6799 
6800 	reg = qib_read_kreg_port(ppd, krp_senddmaprioritythld);
6801 	qib_dev_porterr(ppd->dd, ppd->port,
6802 		"SDMA senddmapriorityhld: 0x%016llx\n", reg);
6803 
6804 	reg = qib_read_kreg_port(ppd, krp_senddmareloadcnt);
6805 	qib_dev_porterr(ppd->dd, ppd->port,
6806 		"SDMA senddmareloadcnt: 0x%016llx\n", reg);
6807 
6808 	dump_sdma_state(ppd);
6809 }
6810 
6811 static struct sdma_set_state_action sdma_7322_action_table[] = {
6812 	[qib_sdma_state_s00_hw_down] = {
6813 		.go_s99_running_tofalse = 1,
6814 		.op_enable = 0,
6815 		.op_intenable = 0,
6816 		.op_halt = 0,
6817 		.op_drain = 0,
6818 	},
6819 	[qib_sdma_state_s10_hw_start_up_wait] = {
6820 		.op_enable = 0,
6821 		.op_intenable = 1,
6822 		.op_halt = 1,
6823 		.op_drain = 0,
6824 	},
6825 	[qib_sdma_state_s20_idle] = {
6826 		.op_enable = 1,
6827 		.op_intenable = 1,
6828 		.op_halt = 1,
6829 		.op_drain = 0,
6830 	},
6831 	[qib_sdma_state_s30_sw_clean_up_wait] = {
6832 		.op_enable = 0,
6833 		.op_intenable = 1,
6834 		.op_halt = 1,
6835 		.op_drain = 0,
6836 	},
6837 	[qib_sdma_state_s40_hw_clean_up_wait] = {
6838 		.op_enable = 1,
6839 		.op_intenable = 1,
6840 		.op_halt = 1,
6841 		.op_drain = 0,
6842 	},
6843 	[qib_sdma_state_s50_hw_halt_wait] = {
6844 		.op_enable = 1,
6845 		.op_intenable = 1,
6846 		.op_halt = 1,
6847 		.op_drain = 1,
6848 	},
6849 	[qib_sdma_state_s99_running] = {
6850 		.op_enable = 1,
6851 		.op_intenable = 1,
6852 		.op_halt = 0,
6853 		.op_drain = 0,
6854 		.go_s99_running_totrue = 1,
6855 	},
6856 };
6857 
6858 static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
6859 {
6860 	ppd->sdma_state.set_state_action = sdma_7322_action_table;
6861 }
6862 
6863 static int init_sdma_7322_regs(struct qib_pportdata *ppd)
6864 {
6865 	struct qib_devdata *dd = ppd->dd;
6866 	unsigned lastbuf, erstbuf;
6867 	u64 senddmabufmask[3] = { 0 };
6868 	int n, ret = 0;
6869 
6870 	qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
6871 	qib_sdma_7322_setlengen(ppd);
6872 	qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
6873 	qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
6874 	qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
6875 	qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
6876 
6877 	if (dd->num_pports)
6878 		n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */
6879 	else
6880 		n = dd->cspec->sdmabufcnt; /* failsafe for init */
6881 	erstbuf = (dd->piobcnt2k + dd->piobcnt4k) -
6882 		((dd->num_pports == 1 || ppd->port == 2) ? n :
6883 		dd->cspec->sdmabufcnt);
6884 	lastbuf = erstbuf + n;
6885 
6886 	ppd->sdma_state.first_sendbuf = erstbuf;
6887 	ppd->sdma_state.last_sendbuf = lastbuf;
6888 	for (; erstbuf < lastbuf; ++erstbuf) {
6889 		unsigned word = erstbuf / BITS_PER_LONG;
6890 		unsigned bit = erstbuf & (BITS_PER_LONG - 1);
6891 
6892 		BUG_ON(word >= 3);
6893 		senddmabufmask[word] |= 1ULL << bit;
6894 	}
6895 	qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
6896 	qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
6897 	qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
6898 	return ret;
6899 }
6900 
6901 /* sdma_lock must be held */
6902 static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
6903 {
6904 	struct qib_devdata *dd = ppd->dd;
6905 	int sane;
6906 	int use_dmahead;
6907 	u16 swhead;
6908 	u16 swtail;
6909 	u16 cnt;
6910 	u16 hwhead;
6911 
6912 	use_dmahead = __qib_sdma_running(ppd) &&
6913 		(dd->flags & QIB_HAS_SDMA_TIMEOUT);
6914 retry:
6915 	hwhead = use_dmahead ?
6916 		(u16) le64_to_cpu(*ppd->sdma_head_dma) :
6917 		(u16) qib_read_kreg_port(ppd, krp_senddmahead);
6918 
6919 	swhead = ppd->sdma_descq_head;
6920 	swtail = ppd->sdma_descq_tail;
6921 	cnt = ppd->sdma_descq_cnt;
6922 
6923 	if (swhead < swtail)
6924 		/* not wrapped */
6925 		sane = (hwhead >= swhead) & (hwhead <= swtail);
6926 	else if (swhead > swtail)
6927 		/* wrapped around */
6928 		sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
6929 			(hwhead <= swtail);
6930 	else
6931 		/* empty */
6932 		sane = (hwhead == swhead);
6933 
6934 	if (unlikely(!sane)) {
6935 		if (use_dmahead) {
6936 			/* try one more time, directly from the register */
6937 			use_dmahead = 0;
6938 			goto retry;
6939 		}
6940 		/* proceed as if no progress */
6941 		hwhead = swhead;
6942 	}
6943 
6944 	return hwhead;
6945 }
6946 
6947 static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
6948 {
6949 	u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
6950 
6951 	return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) ||
6952 	       (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) ||
6953 	       !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) ||
6954 	       !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty));
6955 }
6956 
6957 /*
6958  * Compute the amount of delay before sending the next packet if the
6959  * port's send rate differs from the static rate set for the QP.
6960  * The delay affects the next packet and the amount of the delay is
6961  * based on the length of the this packet.
6962  */
6963 static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
6964 				   u8 srate, u8 vl)
6965 {
6966 	u8 snd_mult = ppd->delay_mult;
6967 	u8 rcv_mult = ib_rate_to_delay[srate];
6968 	u32 ret;
6969 
6970 	ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0;
6971 
6972 	/* Indicate VL15, else set the VL in the control word */
6973 	if (vl == 15)
6974 		ret |= PBC_7322_VL15_SEND_CTRL;
6975 	else
6976 		ret |= vl << PBC_VL_NUM_LSB;
6977 	ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
6978 
6979 	return ret;
6980 }
6981 
6982 /*
6983  * Enable the per-port VL15 send buffers for use.
6984  * They follow the rest of the buffers, without a config parameter.
6985  * This was in initregs, but that is done before the shadow
6986  * is set up, and this has to be done after the shadow is
6987  * set up.
6988  */
6989 static void qib_7322_initvl15_bufs(struct qib_devdata *dd)
6990 {
6991 	unsigned vl15bufs;
6992 
6993 	vl15bufs = dd->piobcnt2k + dd->piobcnt4k;
6994 	qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS,
6995 			       TXCHK_CHG_TYPE_KERN, NULL);
6996 }
6997 
6998 static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd)
6999 {
7000 	if (rcd->ctxt < NUM_IB_PORTS) {
7001 		if (rcd->dd->num_pports > 1) {
7002 			rcd->rcvegrcnt = KCTXT0_EGRCNT / 2;
7003 			rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0;
7004 		} else {
7005 			rcd->rcvegrcnt = KCTXT0_EGRCNT;
7006 			rcd->rcvegr_tid_base = 0;
7007 		}
7008 	} else {
7009 		rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
7010 		rcd->rcvegr_tid_base = KCTXT0_EGRCNT +
7011 			(rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt;
7012 	}
7013 }
7014 
7015 #define QTXSLEEPS 5000
7016 static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
7017 				  u32 len, u32 which, struct qib_ctxtdata *rcd)
7018 {
7019 	int i;
7020 	const int last = start + len - 1;
7021 	const int lastr = last / BITS_PER_LONG;
7022 	u32 sleeps = 0;
7023 	int wait = rcd != NULL;
7024 	unsigned long flags;
7025 
7026 	while (wait) {
7027 		unsigned long shadow = 0;
7028 		int cstart, previ = -1;
7029 
7030 		/*
7031 		 * when flipping from kernel to user, we can't change
7032 		 * the checking type if the buffer is allocated to the
7033 		 * driver.   It's OK the other direction, because it's
7034 		 * from close, and we have just disarm'ed all the
7035 		 * buffers.  All the kernel to kernel changes are also
7036 		 * OK.
7037 		 */
7038 		for (cstart = start; cstart <= last; cstart++) {
7039 			i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
7040 				/ BITS_PER_LONG;
7041 			if (i != previ) {
7042 				shadow = (unsigned long)
7043 					le64_to_cpu(dd->pioavailregs_dma[i]);
7044 				previ = i;
7045 			}
7046 			if (test_bit(((2 * cstart) +
7047 				      QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
7048 				     % BITS_PER_LONG, &shadow))
7049 				break;
7050 		}
7051 
7052 		if (cstart > last)
7053 			break;
7054 
7055 		if (sleeps == QTXSLEEPS)
7056 			break;
7057 		/* make sure we see an updated copy next time around */
7058 		sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7059 		sleeps++;
7060 		msleep(20);
7061 	}
7062 
7063 	switch (which) {
7064 	case TXCHK_CHG_TYPE_DIS1:
7065 		/*
7066 		 * disable checking on a range; used by diags; just
7067 		 * one buffer, but still written generically
7068 		 */
7069 		for (i = start; i <= last; i++)
7070 			clear_bit(i, dd->cspec->sendchkenable);
7071 		break;
7072 
7073 	case TXCHK_CHG_TYPE_ENAB1:
7074 		/*
7075 		 * (re)enable checking on a range; used by diags; just
7076 		 * one buffer, but still written generically; read
7077 		 * scratch to be sure buffer actually triggered, not
7078 		 * just flushed from processor.
7079 		 */
7080 		qib_read_kreg32(dd, kr_scratch);
7081 		for (i = start; i <= last; i++)
7082 			set_bit(i, dd->cspec->sendchkenable);
7083 		break;
7084 
7085 	case TXCHK_CHG_TYPE_KERN:
7086 		/* usable by kernel */
7087 		for (i = start; i <= last; i++) {
7088 			set_bit(i, dd->cspec->sendibchk);
7089 			clear_bit(i, dd->cspec->sendgrhchk);
7090 		}
7091 		spin_lock_irqsave(&dd->uctxt_lock, flags);
7092 		/* see if we need to raise avail update threshold */
7093 		for (i = dd->first_user_ctxt;
7094 		     dd->cspec->updthresh != dd->cspec->updthresh_dflt
7095 		     && i < dd->cfgctxts; i++)
7096 			if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
7097 			   ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
7098 			   < dd->cspec->updthresh_dflt)
7099 				break;
7100 		spin_unlock_irqrestore(&dd->uctxt_lock, flags);
7101 		if (i == dd->cfgctxts) {
7102 			spin_lock_irqsave(&dd->sendctrl_lock, flags);
7103 			dd->cspec->updthresh = dd->cspec->updthresh_dflt;
7104 			dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
7105 			dd->sendctrl |= (dd->cspec->updthresh &
7106 					 SYM_RMASK(SendCtrl, AvailUpdThld)) <<
7107 					   SYM_LSB(SendCtrl, AvailUpdThld);
7108 			spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7109 			sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7110 		}
7111 		break;
7112 
7113 	case TXCHK_CHG_TYPE_USER:
7114 		/* for user process */
7115 		for (i = start; i <= last; i++) {
7116 			clear_bit(i, dd->cspec->sendibchk);
7117 			set_bit(i, dd->cspec->sendgrhchk);
7118 		}
7119 		spin_lock_irqsave(&dd->sendctrl_lock, flags);
7120 		if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
7121 			/ rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
7122 			dd->cspec->updthresh = (rcd->piocnt /
7123 						rcd->subctxt_cnt) - 1;
7124 			dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
7125 			dd->sendctrl |= (dd->cspec->updthresh &
7126 					SYM_RMASK(SendCtrl, AvailUpdThld))
7127 					<< SYM_LSB(SendCtrl, AvailUpdThld);
7128 			spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7129 			sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7130 		} else
7131 			spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7132 		break;
7133 
7134 	default:
7135 		break;
7136 	}
7137 
7138 	for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i)
7139 		qib_write_kreg(dd, kr_sendcheckmask + i,
7140 			       dd->cspec->sendchkenable[i]);
7141 
7142 	for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) {
7143 		qib_write_kreg(dd, kr_sendgrhcheckmask + i,
7144 			       dd->cspec->sendgrhchk[i]);
7145 		qib_write_kreg(dd, kr_sendibpktmask + i,
7146 			       dd->cspec->sendibchk[i]);
7147 	}
7148 
7149 	/*
7150 	 * Be sure whatever we did was seen by the chip and acted upon,
7151 	 * before we return.  Mostly important for which >= 2.
7152 	 */
7153 	qib_read_kreg32(dd, kr_scratch);
7154 }
7155 
7156 
7157 /* useful for trigger analyzers, etc. */
7158 static void writescratch(struct qib_devdata *dd, u32 val)
7159 {
7160 	qib_write_kreg(dd, kr_scratch, val);
7161 }
7162 
7163 /* Dummy for now, use chip regs soon */
7164 static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
7165 {
7166 	return -ENXIO;
7167 }
7168 
7169 /**
7170  * qib_init_iba7322_funcs - set up the chip-specific function pointers
7171  * @dev: the pci_dev for qlogic_ib device
7172  * @ent: pci_device_id struct for this dev
7173  *
7174  * Also allocates, inits, and returns the devdata struct for this
7175  * device instance
7176  *
7177  * This is global, and is called directly at init to set up the
7178  * chip-specific function pointers for later use.
7179  */
7180 struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
7181 					   const struct pci_device_id *ent)
7182 {
7183 	struct qib_devdata *dd;
7184 	int ret, i;
7185 	u32 tabsize, actual_cnt = 0;
7186 
7187 	dd = qib_alloc_devdata(pdev,
7188 		NUM_IB_PORTS * sizeof(struct qib_pportdata) +
7189 		sizeof(struct qib_chip_specific) +
7190 		NUM_IB_PORTS * sizeof(struct qib_chippport_specific));
7191 	if (IS_ERR(dd))
7192 		goto bail;
7193 
7194 	dd->f_bringup_serdes    = qib_7322_bringup_serdes;
7195 	dd->f_cleanup           = qib_setup_7322_cleanup;
7196 	dd->f_clear_tids        = qib_7322_clear_tids;
7197 	dd->f_free_irq          = qib_7322_free_irq;
7198 	dd->f_get_base_info     = qib_7322_get_base_info;
7199 	dd->f_get_msgheader     = qib_7322_get_msgheader;
7200 	dd->f_getsendbuf        = qib_7322_getsendbuf;
7201 	dd->f_gpio_mod          = gpio_7322_mod;
7202 	dd->f_eeprom_wen        = qib_7322_eeprom_wen;
7203 	dd->f_hdrqempty         = qib_7322_hdrqempty;
7204 	dd->f_ib_updown         = qib_7322_ib_updown;
7205 	dd->f_init_ctxt         = qib_7322_init_ctxt;
7206 	dd->f_initvl15_bufs     = qib_7322_initvl15_bufs;
7207 	dd->f_intr_fallback     = qib_7322_intr_fallback;
7208 	dd->f_late_initreg      = qib_late_7322_initreg;
7209 	dd->f_setpbc_control    = qib_7322_setpbc_control;
7210 	dd->f_portcntr          = qib_portcntr_7322;
7211 	dd->f_put_tid           = qib_7322_put_tid;
7212 	dd->f_quiet_serdes      = qib_7322_mini_quiet_serdes;
7213 	dd->f_rcvctrl           = rcvctrl_7322_mod;
7214 	dd->f_read_cntrs        = qib_read_7322cntrs;
7215 	dd->f_read_portcntrs    = qib_read_7322portcntrs;
7216 	dd->f_reset             = qib_do_7322_reset;
7217 	dd->f_init_sdma_regs    = init_sdma_7322_regs;
7218 	dd->f_sdma_busy         = qib_sdma_7322_busy;
7219 	dd->f_sdma_gethead      = qib_sdma_7322_gethead;
7220 	dd->f_sdma_sendctrl     = qib_7322_sdma_sendctrl;
7221 	dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt;
7222 	dd->f_sdma_update_tail  = qib_sdma_update_7322_tail;
7223 	dd->f_sendctrl          = sendctrl_7322_mod;
7224 	dd->f_set_armlaunch     = qib_set_7322_armlaunch;
7225 	dd->f_set_cntr_sample   = qib_set_cntr_7322_sample;
7226 	dd->f_iblink_state      = qib_7322_iblink_state;
7227 	dd->f_ibphys_portstate  = qib_7322_phys_portstate;
7228 	dd->f_get_ib_cfg        = qib_7322_get_ib_cfg;
7229 	dd->f_set_ib_cfg        = qib_7322_set_ib_cfg;
7230 	dd->f_set_ib_loopback   = qib_7322_set_loopback;
7231 	dd->f_get_ib_table      = qib_7322_get_ib_table;
7232 	dd->f_set_ib_table      = qib_7322_set_ib_table;
7233 	dd->f_set_intr_state    = qib_7322_set_intr_state;
7234 	dd->f_setextled         = qib_setup_7322_setextled;
7235 	dd->f_txchk_change      = qib_7322_txchk_change;
7236 	dd->f_update_usrhead    = qib_update_7322_usrhead;
7237 	dd->f_wantpiobuf_intr   = qib_wantpiobuf_7322_intr;
7238 	dd->f_xgxs_reset        = qib_7322_mini_pcs_reset;
7239 	dd->f_sdma_hw_clean_up  = qib_7322_sdma_hw_clean_up;
7240 	dd->f_sdma_hw_start_up  = qib_7322_sdma_hw_start_up;
7241 	dd->f_sdma_init_early   = qib_7322_sdma_init_early;
7242 	dd->f_writescratch      = writescratch;
7243 	dd->f_tempsense_rd	= qib_7322_tempsense_rd;
7244 #ifdef CONFIG_INFINIBAND_QIB_DCA
7245 	dd->f_notify_dca	= qib_7322_notify_dca;
7246 #endif
7247 	/*
7248 	 * Do remaining PCIe setup and save PCIe values in dd.
7249 	 * Any error printing is already done by the init code.
7250 	 * On return, we have the chip mapped, but chip registers
7251 	 * are not set up until start of qib_init_7322_variables.
7252 	 */
7253 	ret = qib_pcie_ddinit(dd, pdev, ent);
7254 	if (ret < 0)
7255 		goto bail_free;
7256 
7257 	/* initialize chip-specific variables */
7258 	ret = qib_init_7322_variables(dd);
7259 	if (ret)
7260 		goto bail_cleanup;
7261 
7262 	if (qib_mini_init || !dd->num_pports)
7263 		goto bail;
7264 
7265 	/*
7266 	 * Determine number of vectors we want; depends on port count
7267 	 * and number of configured kernel receive queues actually used.
7268 	 * Should also depend on whether sdma is enabled or not, but
7269 	 * that's such a rare testing case it's not worth worrying about.
7270 	 */
7271 	tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table);
7272 	for (i = 0; i < tabsize; i++)
7273 		if ((i < ARRAY_SIZE(irq_table) &&
7274 		     irq_table[i].port <= dd->num_pports) ||
7275 		    (i >= ARRAY_SIZE(irq_table) &&
7276 		     dd->rcd[i - ARRAY_SIZE(irq_table)]))
7277 			actual_cnt++;
7278 	/* reduce by ctxt's < 2 */
7279 	if (qib_krcvq01_no_msi)
7280 		actual_cnt -= dd->num_pports;
7281 
7282 	tabsize = actual_cnt;
7283 	dd->cspec->msix_entries = kzalloc(tabsize *
7284 			sizeof(struct qib_msix_entry), GFP_KERNEL);
7285 	if (!dd->cspec->msix_entries)
7286 		tabsize = 0;
7287 
7288 	if (qib_pcie_params(dd, 8, &tabsize))
7289 		qib_dev_err(dd,
7290 			"Failed to setup PCIe or interrupts; continuing anyway\n");
7291 	/* may be less than we wanted, if not enough available */
7292 	dd->cspec->num_msix_entries = tabsize;
7293 
7294 	/* setup interrupt handler */
7295 	qib_setup_7322_interrupt(dd, 1);
7296 
7297 	/* clear diagctrl register, in case diags were running and crashed */
7298 	qib_write_kreg(dd, kr_hwdiagctrl, 0);
7299 #ifdef CONFIG_INFINIBAND_QIB_DCA
7300 	if (!dca_add_requester(&pdev->dev)) {
7301 		qib_devinfo(dd->pcidev, "DCA enabled\n");
7302 		dd->flags |= QIB_DCA_ENABLED;
7303 		qib_setup_dca(dd);
7304 	}
7305 #endif
7306 	goto bail;
7307 
7308 bail_cleanup:
7309 	qib_pcie_ddcleanup(dd);
7310 bail_free:
7311 	qib_free_devdata(dd);
7312 	dd = ERR_PTR(ret);
7313 bail:
7314 	return dd;
7315 }
7316 
7317 /*
7318  * Set the table entry at the specified index from the table specifed.
7319  * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first
7320  * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR.
7321  * 'idx' below addresses the correct entry, while its 4 LSBs select the
7322  * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table.
7323  */
7324 #define DDS_ENT_AMP_LSB 14
7325 #define DDS_ENT_MAIN_LSB 9
7326 #define DDS_ENT_POST_LSB 5
7327 #define DDS_ENT_PRE_XTRA_LSB 3
7328 #define DDS_ENT_PRE_LSB 0
7329 
7330 /*
7331  * Set one entry in the TxDDS table for spec'd port
7332  * ridx picks one of the entries, while tp points
7333  * to the appropriate table entry.
7334  */
7335 static void set_txdds(struct qib_pportdata *ppd, int ridx,
7336 		      const struct txdds_ent *tp)
7337 {
7338 	struct qib_devdata *dd = ppd->dd;
7339 	u32 pack_ent;
7340 	int regidx;
7341 
7342 	/* Get correct offset in chip-space, and in source table */
7343 	regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx;
7344 	/*
7345 	 * We do not use qib_write_kreg_port() because it was intended
7346 	 * only for registers in the lower "port specific" pages.
7347 	 * So do index calculation  by hand.
7348 	 */
7349 	if (ppd->hw_pidx)
7350 		regidx += (dd->palign / sizeof(u64));
7351 
7352 	pack_ent = tp->amp << DDS_ENT_AMP_LSB;
7353 	pack_ent |= tp->main << DDS_ENT_MAIN_LSB;
7354 	pack_ent |= tp->pre << DDS_ENT_PRE_LSB;
7355 	pack_ent |= tp->post << DDS_ENT_POST_LSB;
7356 	qib_write_kreg(dd, regidx, pack_ent);
7357 	/* Prevent back-to-back writes by hitting scratch */
7358 	qib_write_kreg(ppd->dd, kr_scratch, 0);
7359 }
7360 
7361 static const struct vendor_txdds_ent vendor_txdds[] = {
7362 	{ /* Amphenol 1m 30awg NoEq */
7363 		{ 0x41, 0x50, 0x48 }, "584470002       ",
7364 		{ 10,  0,  0,  5 }, { 10,  0,  0,  9 }, {  7,  1,  0, 13 },
7365 	},
7366 	{ /* Amphenol 3m 28awg NoEq */
7367 		{ 0x41, 0x50, 0x48 }, "584470004       ",
7368 		{  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  0,  1,  7, 15 },
7369 	},
7370 	{ /* Finisar 3m OM2 Optical */
7371 		{ 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL",
7372 		{  0,  0,  0,  3 }, {  0,  0,  0,  4 }, {  0,  0,  0, 13 },
7373 	},
7374 	{ /* Finisar 30m OM2 Optical */
7375 		{ 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL",
7376 		{  0,  0,  0,  1 }, {  0,  0,  0,  5 }, {  0,  0,  0, 11 },
7377 	},
7378 	{ /* Finisar Default OM2 Optical */
7379 		{ 0x00, 0x90, 0x65 }, NULL,
7380 		{  0,  0,  0,  2 }, {  0,  0,  0,  5 }, {  0,  0,  0, 12 },
7381 	},
7382 	{ /* Gore 1m 30awg NoEq */
7383 		{ 0x00, 0x21, 0x77 }, "QSN3300-1       ",
7384 		{  0,  0,  0,  6 }, {  0,  0,  0,  9 }, {  0,  1,  0, 15 },
7385 	},
7386 	{ /* Gore 2m 30awg NoEq */
7387 		{ 0x00, 0x21, 0x77 }, "QSN3300-2       ",
7388 		{  0,  0,  0,  8 }, {  0,  0,  0, 10 }, {  0,  1,  7, 15 },
7389 	},
7390 	{ /* Gore 1m 28awg NoEq */
7391 		{ 0x00, 0x21, 0x77 }, "QSN3800-1       ",
7392 		{  0,  0,  0,  6 }, {  0,  0,  0,  8 }, {  0,  1,  0, 15 },
7393 	},
7394 	{ /* Gore 3m 28awg NoEq */
7395 		{ 0x00, 0x21, 0x77 }, "QSN3800-3       ",
7396 		{  0,  0,  0,  9 }, {  0,  0,  0, 13 }, {  0,  1,  7, 15 },
7397 	},
7398 	{ /* Gore 5m 24awg Eq */
7399 		{ 0x00, 0x21, 0x77 }, "QSN7000-5       ",
7400 		{  0,  0,  0,  7 }, {  0,  0,  0,  9 }, {  0,  1,  3, 15 },
7401 	},
7402 	{ /* Gore 7m 24awg Eq */
7403 		{ 0x00, 0x21, 0x77 }, "QSN7000-7       ",
7404 		{  0,  0,  0,  9 }, {  0,  0,  0, 11 }, {  0,  2,  6, 15 },
7405 	},
7406 	{ /* Gore 5m 26awg Eq */
7407 		{ 0x00, 0x21, 0x77 }, "QSN7600-5       ",
7408 		{  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  0,  1,  9, 13 },
7409 	},
7410 	{ /* Gore 7m 26awg Eq */
7411 		{ 0x00, 0x21, 0x77 }, "QSN7600-7       ",
7412 		{  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  10,  1,  8, 15 },
7413 	},
7414 	{ /* Intersil 12m 24awg Active */
7415 		{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224",
7416 		{  0,  0,  0,  2 }, {  0,  0,  0,  5 }, {  0,  3,  0,  9 },
7417 	},
7418 	{ /* Intersil 10m 28awg Active */
7419 		{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028",
7420 		{  0,  0,  0,  6 }, {  0,  0,  0,  4 }, {  0,  2,  0,  2 },
7421 	},
7422 	{ /* Intersil 7m 30awg Active */
7423 		{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730",
7424 		{  0,  0,  0,  6 }, {  0,  0,  0,  4 }, {  0,  1,  0,  3 },
7425 	},
7426 	{ /* Intersil 5m 32awg Active */
7427 		{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532",
7428 		{  0,  0,  0,  6 }, {  0,  0,  0,  6 }, {  0,  2,  0,  8 },
7429 	},
7430 	{ /* Intersil Default Active */
7431 		{ 0x00, 0x30, 0xB4 }, NULL,
7432 		{  0,  0,  0,  6 }, {  0,  0,  0,  5 }, {  0,  2,  0,  5 },
7433 	},
7434 	{ /* Luxtera 20m Active Optical */
7435 		{ 0x00, 0x25, 0x63 }, NULL,
7436 		{  0,  0,  0,  5 }, {  0,  0,  0,  8 }, {  0,  2,  0,  12 },
7437 	},
7438 	{ /* Molex 1M Cu loopback */
7439 		{ 0x00, 0x09, 0x3A }, "74763-0025      ",
7440 		{  2,  2,  6, 15 }, {  2,  2,  6, 15 }, {  2,  2,  6, 15 },
7441 	},
7442 	{ /* Molex 2m 28awg NoEq */
7443 		{ 0x00, 0x09, 0x3A }, "74757-2201      ",
7444 		{  0,  0,  0,  6 }, {  0,  0,  0,  9 }, {  0,  1,  1, 15 },
7445 	},
7446 };
7447 
7448 static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = {
7449 	/* amp, pre, main, post */
7450 	{  2, 2, 15,  6 },	/* Loopback */
7451 	{  0, 0,  0,  1 },	/*  2 dB */
7452 	{  0, 0,  0,  2 },	/*  3 dB */
7453 	{  0, 0,  0,  3 },	/*  4 dB */
7454 	{  0, 0,  0,  4 },	/*  5 dB */
7455 	{  0, 0,  0,  5 },	/*  6 dB */
7456 	{  0, 0,  0,  6 },	/*  7 dB */
7457 	{  0, 0,  0,  7 },	/*  8 dB */
7458 	{  0, 0,  0,  8 },	/*  9 dB */
7459 	{  0, 0,  0,  9 },	/* 10 dB */
7460 	{  0, 0,  0, 10 },	/* 11 dB */
7461 	{  0, 0,  0, 11 },	/* 12 dB */
7462 	{  0, 0,  0, 12 },	/* 13 dB */
7463 	{  0, 0,  0, 13 },	/* 14 dB */
7464 	{  0, 0,  0, 14 },	/* 15 dB */
7465 	{  0, 0,  0, 15 },	/* 16 dB */
7466 };
7467 
7468 static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = {
7469 	/* amp, pre, main, post */
7470 	{  2, 2, 15,  6 },	/* Loopback */
7471 	{  0, 0,  0,  8 },	/*  2 dB */
7472 	{  0, 0,  0,  8 },	/*  3 dB */
7473 	{  0, 0,  0,  9 },	/*  4 dB */
7474 	{  0, 0,  0,  9 },	/*  5 dB */
7475 	{  0, 0,  0, 10 },	/*  6 dB */
7476 	{  0, 0,  0, 10 },	/*  7 dB */
7477 	{  0, 0,  0, 11 },	/*  8 dB */
7478 	{  0, 0,  0, 11 },	/*  9 dB */
7479 	{  0, 0,  0, 12 },	/* 10 dB */
7480 	{  0, 0,  0, 12 },	/* 11 dB */
7481 	{  0, 0,  0, 13 },	/* 12 dB */
7482 	{  0, 0,  0, 13 },	/* 13 dB */
7483 	{  0, 0,  0, 14 },	/* 14 dB */
7484 	{  0, 0,  0, 14 },	/* 15 dB */
7485 	{  0, 0,  0, 15 },	/* 16 dB */
7486 };
7487 
7488 static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
7489 	/* amp, pre, main, post */
7490 	{  2, 2, 15,  6 },	/* Loopback */
7491 	{  0, 1,  0,  7 },	/*  2 dB (also QMH7342) */
7492 	{  0, 1,  0,  9 },	/*  3 dB (also QMH7342) */
7493 	{  0, 1,  0, 11 },	/*  4 dB */
7494 	{  0, 1,  0, 13 },	/*  5 dB */
7495 	{  0, 1,  0, 15 },	/*  6 dB */
7496 	{  0, 1,  3, 15 },	/*  7 dB */
7497 	{  0, 1,  7, 15 },	/*  8 dB */
7498 	{  0, 1,  7, 15 },	/*  9 dB */
7499 	{  0, 1,  8, 15 },	/* 10 dB */
7500 	{  0, 1,  9, 15 },	/* 11 dB */
7501 	{  0, 1, 10, 15 },	/* 12 dB */
7502 	{  0, 2,  6, 15 },	/* 13 dB */
7503 	{  0, 2,  7, 15 },	/* 14 dB */
7504 	{  0, 2,  8, 15 },	/* 15 dB */
7505 	{  0, 2,  9, 15 },	/* 16 dB */
7506 };
7507 
7508 /*
7509  * extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ.
7510  * These are mostly used for mez cards going through connectors
7511  * and backplane traces, but can be used to add other "unusual"
7512  * table values as well.
7513  */
7514 static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = {
7515 	/* amp, pre, main, post */
7516 	{  0, 0, 0,  1 },	/* QMH7342 backplane settings */
7517 	{  0, 0, 0,  1 },	/* QMH7342 backplane settings */
7518 	{  0, 0, 0,  2 },	/* QMH7342 backplane settings */
7519 	{  0, 0, 0,  2 },	/* QMH7342 backplane settings */
7520 	{  0, 0, 0,  3 },	/* QMH7342 backplane settings */
7521 	{  0, 0, 0,  4 },	/* QMH7342 backplane settings */
7522 	{  0, 1, 4, 15 },	/* QME7342 backplane settings 1.0 */
7523 	{  0, 1, 3, 15 },	/* QME7342 backplane settings 1.0 */
7524 	{  0, 1, 0, 12 },	/* QME7342 backplane settings 1.0 */
7525 	{  0, 1, 0, 11 },	/* QME7342 backplane settings 1.0 */
7526 	{  0, 1, 0,  9 },	/* QME7342 backplane settings 1.0 */
7527 	{  0, 1, 0, 14 },	/* QME7342 backplane settings 1.0 */
7528 	{  0, 1, 2, 15 },	/* QME7342 backplane settings 1.0 */
7529 	{  0, 1, 0, 11 },       /* QME7342 backplane settings 1.1 */
7530 	{  0, 1, 0,  7 },       /* QME7342 backplane settings 1.1 */
7531 	{  0, 1, 0,  9 },       /* QME7342 backplane settings 1.1 */
7532 	{  0, 1, 0,  6 },       /* QME7342 backplane settings 1.1 */
7533 	{  0, 1, 0,  8 },       /* QME7342 backplane settings 1.1 */
7534 };
7535 
7536 static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
7537 	/* amp, pre, main, post */
7538 	{  0, 0, 0,  7 },	/* QMH7342 backplane settings */
7539 	{  0, 0, 0,  7 },	/* QMH7342 backplane settings */
7540 	{  0, 0, 0,  8 },	/* QMH7342 backplane settings */
7541 	{  0, 0, 0,  8 },	/* QMH7342 backplane settings */
7542 	{  0, 0, 0,  9 },	/* QMH7342 backplane settings */
7543 	{  0, 0, 0, 10 },	/* QMH7342 backplane settings */
7544 	{  0, 1, 4, 15 },	/* QME7342 backplane settings 1.0 */
7545 	{  0, 1, 3, 15 },	/* QME7342 backplane settings 1.0 */
7546 	{  0, 1, 0, 12 },	/* QME7342 backplane settings 1.0 */
7547 	{  0, 1, 0, 11 },	/* QME7342 backplane settings 1.0 */
7548 	{  0, 1, 0,  9 },	/* QME7342 backplane settings 1.0 */
7549 	{  0, 1, 0, 14 },	/* QME7342 backplane settings 1.0 */
7550 	{  0, 1, 2, 15 },	/* QME7342 backplane settings 1.0 */
7551 	{  0, 1, 0, 11 },       /* QME7342 backplane settings 1.1 */
7552 	{  0, 1, 0,  7 },       /* QME7342 backplane settings 1.1 */
7553 	{  0, 1, 0,  9 },       /* QME7342 backplane settings 1.1 */
7554 	{  0, 1, 0,  6 },       /* QME7342 backplane settings 1.1 */
7555 	{  0, 1, 0,  8 },       /* QME7342 backplane settings 1.1 */
7556 };
7557 
7558 static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
7559 	/* amp, pre, main, post */
7560 	{  0, 1,  0,  4 },	/* QMH7342 backplane settings */
7561 	{  0, 1,  0,  5 },	/* QMH7342 backplane settings */
7562 	{  0, 1,  0,  6 },	/* QMH7342 backplane settings */
7563 	{  0, 1,  0,  8 },	/* QMH7342 backplane settings */
7564 	{  0, 1,  0, 10 },	/* QMH7342 backplane settings */
7565 	{  0, 1,  0, 12 },	/* QMH7342 backplane settings */
7566 	{  0, 1,  4, 15 },	/* QME7342 backplane settings 1.0 */
7567 	{  0, 1,  3, 15 },	/* QME7342 backplane settings 1.0 */
7568 	{  0, 1,  0, 12 },	/* QME7342 backplane settings 1.0 */
7569 	{  0, 1,  0, 11 },	/* QME7342 backplane settings 1.0 */
7570 	{  0, 1,  0,  9 },	/* QME7342 backplane settings 1.0 */
7571 	{  0, 1,  0, 14 },	/* QME7342 backplane settings 1.0 */
7572 	{  0, 1,  2, 15 },	/* QME7342 backplane settings 1.0 */
7573 	{  0, 1,  0, 11 },      /* QME7342 backplane settings 1.1 */
7574 	{  0, 1,  0,  7 },      /* QME7342 backplane settings 1.1 */
7575 	{  0, 1,  0,  9 },      /* QME7342 backplane settings 1.1 */
7576 	{  0, 1,  0,  6 },      /* QME7342 backplane settings 1.1 */
7577 	{  0, 1,  0,  8 },      /* QME7342 backplane settings 1.1 */
7578 };
7579 
7580 static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
7581 	/* amp, pre, main, post */
7582 	{ 0, 0, 0, 0 },         /* QME7342 mfg settings */
7583 	{ 0, 0, 0, 6 },         /* QME7342 P2 mfg settings */
7584 };
7585 
7586 static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
7587 					       unsigned atten)
7588 {
7589 	/*
7590 	 * The attenuation table starts at 2dB for entry 1,
7591 	 * with entry 0 being the loopback entry.
7592 	 */
7593 	if (atten <= 2)
7594 		atten = 1;
7595 	else if (atten > TXDDS_TABLE_SZ)
7596 		atten = TXDDS_TABLE_SZ - 1;
7597 	else
7598 		atten--;
7599 	return txdds + atten;
7600 }
7601 
7602 /*
7603  * if override is set, the module parameter txselect has a value
7604  * for this specific port, so use it, rather than our normal mechanism.
7605  */
7606 static void find_best_ent(struct qib_pportdata *ppd,
7607 			  const struct txdds_ent **sdr_dds,
7608 			  const struct txdds_ent **ddr_dds,
7609 			  const struct txdds_ent **qdr_dds, int override)
7610 {
7611 	struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
7612 	int idx;
7613 
7614 	/* Search table of known cables */
7615 	for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) {
7616 		const struct vendor_txdds_ent *v = vendor_txdds + idx;
7617 
7618 		if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) &&
7619 		    (!v->partnum ||
7620 		     !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) {
7621 			*sdr_dds = &v->sdr;
7622 			*ddr_dds = &v->ddr;
7623 			*qdr_dds = &v->qdr;
7624 			return;
7625 		}
7626 	}
7627 
7628 	/* Active cables don't have attenuation so we only set SERDES
7629 	 * settings to account for the attenuation of the board traces. */
7630 	if (!override && QSFP_IS_ACTIVE(qd->tech)) {
7631 		*sdr_dds = txdds_sdr + ppd->dd->board_atten;
7632 		*ddr_dds = txdds_ddr + ppd->dd->board_atten;
7633 		*qdr_dds = txdds_qdr + ppd->dd->board_atten;
7634 		return;
7635 	}
7636 
7637 	if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] ||
7638 						      qd->atten[1])) {
7639 		*sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]);
7640 		*ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]);
7641 		*qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]);
7642 		return;
7643 	} else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) {
7644 		/*
7645 		 * If we have no (or incomplete) data from the cable
7646 		 * EEPROM, or no QSFP, or override is set, use the
7647 		 * module parameter value to index into the attentuation
7648 		 * table.
7649 		 */
7650 		idx = ppd->cpspec->no_eep;
7651 		*sdr_dds = &txdds_sdr[idx];
7652 		*ddr_dds = &txdds_ddr[idx];
7653 		*qdr_dds = &txdds_qdr[idx];
7654 	} else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
7655 		/* similar to above, but index into the "extra" table. */
7656 		idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ;
7657 		*sdr_dds = &txdds_extra_sdr[idx];
7658 		*ddr_dds = &txdds_extra_ddr[idx];
7659 		*qdr_dds = &txdds_extra_qdr[idx];
7660 	} else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
7661 		   ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
7662 					  TXDDS_MFG_SZ)) {
7663 		idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
7664 		pr_info("IB%u:%u use idx %u into txdds_mfg\n",
7665 			ppd->dd->unit, ppd->port, idx);
7666 		*sdr_dds = &txdds_extra_mfg[idx];
7667 		*ddr_dds = &txdds_extra_mfg[idx];
7668 		*qdr_dds = &txdds_extra_mfg[idx];
7669 	} else {
7670 		/* this shouldn't happen, it's range checked */
7671 		*sdr_dds = txdds_sdr + qib_long_atten;
7672 		*ddr_dds = txdds_ddr + qib_long_atten;
7673 		*qdr_dds = txdds_qdr + qib_long_atten;
7674 	}
7675 }
7676 
7677 static void init_txdds_table(struct qib_pportdata *ppd, int override)
7678 {
7679 	const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
7680 	struct txdds_ent *dds;
7681 	int idx;
7682 	int single_ent = 0;
7683 
7684 	find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
7685 
7686 	/* for mez cards or override, use the selected value for all entries */
7687 	if (!(ppd->dd->flags & QIB_HAS_QSFP) || override)
7688 		single_ent = 1;
7689 
7690 	/* Fill in the first entry with the best entry found. */
7691 	set_txdds(ppd, 0, sdr_dds);
7692 	set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
7693 	set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
7694 	if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
7695 		QIBL_LINKACTIVE)) {
7696 		dds = (struct txdds_ent *)(ppd->link_speed_active ==
7697 					   QIB_IB_QDR ?  qdr_dds :
7698 					   (ppd->link_speed_active ==
7699 					    QIB_IB_DDR ? ddr_dds : sdr_dds));
7700 		write_tx_serdes_param(ppd, dds);
7701 	}
7702 
7703 	/* Fill in the remaining entries with the default table values. */
7704 	for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) {
7705 		set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
7706 		set_txdds(ppd, idx + TXDDS_TABLE_SZ,
7707 			  single_ent ? ddr_dds : txdds_ddr + idx);
7708 		set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
7709 			  single_ent ? qdr_dds : txdds_qdr + idx);
7710 	}
7711 }
7712 
7713 #define KR_AHB_ACC KREG_IDX(ahb_access_ctrl)
7714 #define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg)
7715 #define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy)
7716 #define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address)
7717 #define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data)
7718 #define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read)
7719 #define AHB_TRANS_TRIES 10
7720 
7721 /*
7722  * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4,
7723  * 5=subsystem which is why most calls have "chan + chan >> 1"
7724  * for the channel argument.
7725  */
7726 static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr,
7727 		    u32 data, u32 mask)
7728 {
7729 	u32 rd_data, wr_data, sz_mask;
7730 	u64 trans, acc, prev_acc;
7731 	u32 ret = 0xBAD0BAD;
7732 	int tries;
7733 
7734 	prev_acc = qib_read_kreg64(dd, KR_AHB_ACC);
7735 	/* From this point on, make sure we return access */
7736 	acc = (quad << 1) | 1;
7737 	qib_write_kreg(dd, KR_AHB_ACC, acc);
7738 
7739 	for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7740 		trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7741 		if (trans & AHB_TRANS_RDY)
7742 			break;
7743 	}
7744 	if (tries >= AHB_TRANS_TRIES) {
7745 		qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
7746 		goto bail;
7747 	}
7748 
7749 	/* If mask is not all 1s, we need to read, but different SerDes
7750 	 * entities have different sizes
7751 	 */
7752 	sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1;
7753 	wr_data = data & mask & sz_mask;
7754 	if ((~mask & sz_mask) != 0) {
7755 		trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7756 		qib_write_kreg(dd, KR_AHB_TRANS, trans);
7757 
7758 		for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7759 			trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7760 			if (trans & AHB_TRANS_RDY)
7761 				break;
7762 		}
7763 		if (tries >= AHB_TRANS_TRIES) {
7764 			qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n",
7765 				    AHB_TRANS_TRIES);
7766 			goto bail;
7767 		}
7768 		/* Re-read in case host split reads and read data first */
7769 		trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7770 		rd_data = (uint32_t)(trans >> AHB_DATA_LSB);
7771 		wr_data |= (rd_data & ~mask & sz_mask);
7772 	}
7773 
7774 	/* If mask is not zero, we need to write. */
7775 	if (mask & sz_mask) {
7776 		trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7777 		trans |= ((uint64_t)wr_data << AHB_DATA_LSB);
7778 		trans |= AHB_WR;
7779 		qib_write_kreg(dd, KR_AHB_TRANS, trans);
7780 
7781 		for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7782 			trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7783 			if (trans & AHB_TRANS_RDY)
7784 				break;
7785 		}
7786 		if (tries >= AHB_TRANS_TRIES) {
7787 			qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n",
7788 				    AHB_TRANS_TRIES);
7789 			goto bail;
7790 		}
7791 	}
7792 	ret = wr_data;
7793 bail:
7794 	qib_write_kreg(dd, KR_AHB_ACC, prev_acc);
7795 	return ret;
7796 }
7797 
7798 static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
7799 			     unsigned mask)
7800 {
7801 	struct qib_devdata *dd = ppd->dd;
7802 	int chan;
7803 
7804 	for (chan = 0; chan < SERDES_CHANS; ++chan) {
7805 		ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
7806 			data, mask);
7807 		ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
7808 			0, 0);
7809 	}
7810 }
7811 
7812 static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
7813 {
7814 	u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
7815 	u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
7816 
7817 	if (enable && !state) {
7818 		pr_info("IB%u:%u Turning LOS on\n",
7819 			ppd->dd->unit, ppd->port);
7820 		data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7821 	} else if (!enable && state) {
7822 		pr_info("IB%u:%u Turning LOS off\n",
7823 			ppd->dd->unit, ppd->port);
7824 		data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7825 	}
7826 	qib_write_kreg_port(ppd, krp_serdesctrl, data);
7827 }
7828 
7829 static int serdes_7322_init(struct qib_pportdata *ppd)
7830 {
7831 	int ret = 0;
7832 
7833 	if (ppd->dd->cspec->r1)
7834 		ret = serdes_7322_init_old(ppd);
7835 	else
7836 		ret = serdes_7322_init_new(ppd);
7837 	return ret;
7838 }
7839 
7840 static int serdes_7322_init_old(struct qib_pportdata *ppd)
7841 {
7842 	u32 le_val;
7843 
7844 	/*
7845 	 * Initialize the Tx DDS tables.  Also done every QSFP event,
7846 	 * for adapters with QSFP
7847 	 */
7848 	init_txdds_table(ppd, 0);
7849 
7850 	/* ensure no tx overrides from earlier driver loads */
7851 	qib_write_kreg_port(ppd, krp_tx_deemph_override,
7852 		SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7853 		reset_tx_deemphasis_override));
7854 
7855 	/* Patch some SerDes defaults to "Better for IB" */
7856 	/* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */
7857 	ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
7858 
7859 	/* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
7860 	ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
7861 	/* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */
7862 	ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
7863 
7864 	/* May be overridden in qsfp_7322_event */
7865 	le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
7866 	ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
7867 
7868 	/* enable LE1 adaptation for all but QME, which is disabled */
7869 	le_val = IS_QME(ppd->dd) ? 0 : 1;
7870 	ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
7871 
7872 	/* Clear cmode-override, may be set from older driver */
7873 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7874 
7875 	/* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */
7876 	ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
7877 
7878 	/* setup LoS params; these are subsystem, so chan == 5 */
7879 	/* LoS filter threshold_count on, ch 0-3, set to 8 */
7880 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
7881 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
7882 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
7883 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
7884 
7885 	/* LoS filter threshold_count off, ch 0-3, set to 4 */
7886 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
7887 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
7888 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
7889 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
7890 
7891 	/* LoS filter select enabled */
7892 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
7893 
7894 	/* LoS target data:  SDR=4, DDR=2, QDR=1 */
7895 	ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
7896 	ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
7897 	ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
7898 
7899 	serdes_7322_los_enable(ppd, 1);
7900 
7901 	/* rxbistena; set 0 to avoid effects of it switch later */
7902 	ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
7903 
7904 	/* Configure 4 DFE taps, and only they adapt */
7905 	ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
7906 
7907 	/* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
7908 	le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
7909 	ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
7910 
7911 	/*
7912 	 * Set receive adaptation mode.  SDR and DDR adaptation are
7913 	 * always on, and QDR is initially enabled; later disabled.
7914 	 */
7915 	qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
7916 	qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
7917 	qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
7918 			    ppd->dd->cspec->r1 ?
7919 			    QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
7920 	ppd->cpspec->qdr_dfe_on = 1;
7921 
7922 	/* FLoop LOS gate: PPM filter  enabled */
7923 	ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
7924 
7925 	/* rx offset center enabled */
7926 	ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
7927 
7928 	if (!ppd->dd->cspec->r1) {
7929 		ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
7930 		ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
7931 	}
7932 
7933 	/* Set the frequency loop bandwidth to 15 */
7934 	ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
7935 
7936 	return 0;
7937 }
7938 
7939 static int serdes_7322_init_new(struct qib_pportdata *ppd)
7940 {
7941 	unsigned long tend;
7942 	u32 le_val, rxcaldone;
7943 	int chan, chan_done = (1 << SERDES_CHANS) - 1;
7944 
7945 	/* Clear cmode-override, may be set from older driver */
7946 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7947 
7948 	/* ensure no tx overrides from earlier driver loads */
7949 	qib_write_kreg_port(ppd, krp_tx_deemph_override,
7950 		SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7951 		reset_tx_deemphasis_override));
7952 
7953 	/* START OF LSI SUGGESTED SERDES BRINGUP */
7954 	/* Reset - Calibration Setup */
7955 	/*       Stop DFE adaptaion */
7956 	ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
7957 	/*       Disable LE1 */
7958 	ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
7959 	/*       Disable autoadapt for LE1 */
7960 	ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
7961 	/*       Disable LE2 */
7962 	ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
7963 	/*       Disable VGA */
7964 	ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
7965 	/*       Disable AFE Offset Cancel */
7966 	ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
7967 	/*       Disable Timing Loop */
7968 	ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
7969 	/*       Disable Frequency Loop */
7970 	ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
7971 	/*       Disable Baseline Wander Correction */
7972 	ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
7973 	/*       Disable RX Calibration */
7974 	ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
7975 	/*       Disable RX Offset Calibration */
7976 	ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
7977 	/*       Select BB CDR */
7978 	ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
7979 	/*       CDR Step Size */
7980 	ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
7981 	/*       Enable phase Calibration */
7982 	ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
7983 	/*       DFE Bandwidth [2:14-12] */
7984 	ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
7985 	/*       DFE Config (4 taps only) */
7986 	ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
7987 	/*       Gain Loop Bandwidth */
7988 	if (!ppd->dd->cspec->r1) {
7989 		ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
7990 		ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
7991 	} else {
7992 		ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
7993 	}
7994 	/*       Baseline Wander Correction Gain [13:4-0] (leave as default) */
7995 	/*       Baseline Wander Correction Gain [3:7-5] (leave as default) */
7996 	/*       Data Rate Select [5:7-6] (leave as default) */
7997 	/*       RX Parallel Word Width [3:10-8] (leave as default) */
7998 
7999 	/* RX REST */
8000 	/*       Single- or Multi-channel reset */
8001 	/*       RX Analog reset */
8002 	/*       RX Digital reset */
8003 	ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
8004 	msleep(20);
8005 	/*       RX Analog reset */
8006 	ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
8007 	msleep(20);
8008 	/*       RX Digital reset */
8009 	ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
8010 	msleep(20);
8011 
8012 	/* setup LoS params; these are subsystem, so chan == 5 */
8013 	/* LoS filter threshold_count on, ch 0-3, set to 8 */
8014 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
8015 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
8016 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
8017 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
8018 
8019 	/* LoS filter threshold_count off, ch 0-3, set to 4 */
8020 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
8021 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
8022 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
8023 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
8024 
8025 	/* LoS filter select enabled */
8026 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
8027 
8028 	/* LoS target data:  SDR=4, DDR=2, QDR=1 */
8029 	ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
8030 	ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
8031 	ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
8032 
8033 	/* Turn on LOS on initial SERDES init */
8034 	serdes_7322_los_enable(ppd, 1);
8035 	/* FLoop LOS gate: PPM filter  enabled */
8036 	ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
8037 
8038 	/* RX LATCH CALIBRATION */
8039 	/*       Enable Eyefinder Phase Calibration latch */
8040 	ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
8041 	/*       Enable RX Offset Calibration latch */
8042 	ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
8043 	msleep(20);
8044 	/*       Start Calibration */
8045 	ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
8046 	tend = jiffies + msecs_to_jiffies(500);
8047 	while (chan_done && !time_is_before_jiffies(tend)) {
8048 		msleep(20);
8049 		for (chan = 0; chan < SERDES_CHANS; ++chan) {
8050 			rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
8051 					    (chan + (chan >> 1)),
8052 					    25, 0, 0);
8053 			if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
8054 			    (~chan_done & (1 << chan)) == 0)
8055 				chan_done &= ~(1 << chan);
8056 		}
8057 	}
8058 	if (chan_done) {
8059 		pr_info("Serdes %d calibration not done after .5 sec: 0x%x\n",
8060 			 IBSD(ppd->hw_pidx), chan_done);
8061 	} else {
8062 		for (chan = 0; chan < SERDES_CHANS; ++chan) {
8063 			rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
8064 					    (chan + (chan >> 1)),
8065 					    25, 0, 0);
8066 			if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
8067 				pr_info("Serdes %d chan %d calibration failed\n",
8068 					IBSD(ppd->hw_pidx), chan);
8069 		}
8070 	}
8071 
8072 	/*       Turn off Calibration */
8073 	ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
8074 	msleep(20);
8075 
8076 	/* BRING RX UP */
8077 	/*       Set LE2 value (May be overridden in qsfp_7322_event) */
8078 	le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
8079 	ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
8080 	/*       Set LE2 Loop bandwidth */
8081 	ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
8082 	/*       Enable LE2 */
8083 	ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
8084 	msleep(20);
8085 	/*       Enable H0 only */
8086 	ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
8087 	/* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
8088 	le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
8089 	ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
8090 	/*       Enable VGA */
8091 	ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
8092 	msleep(20);
8093 	/*       Set Frequency Loop Bandwidth */
8094 	ibsd_wr_allchans(ppd, 2, (15 << 5), BMASK(8, 5));
8095 	/*       Enable Frequency Loop */
8096 	ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
8097 	/*       Set Timing Loop Bandwidth */
8098 	ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
8099 	/*       Enable Timing Loop */
8100 	ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
8101 	msleep(50);
8102 	/*       Enable DFE
8103 	 *       Set receive adaptation mode.  SDR and DDR adaptation are
8104 	 *       always on, and QDR is initially enabled; later disabled.
8105 	 */
8106 	qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
8107 	qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
8108 	qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
8109 			    ppd->dd->cspec->r1 ?
8110 			    QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
8111 	ppd->cpspec->qdr_dfe_on = 1;
8112 	/*       Disable LE1  */
8113 	ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
8114 	/*       Disable auto adapt for LE1 */
8115 	ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
8116 	msleep(20);
8117 	/*       Enable AFE Offset Cancel */
8118 	ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
8119 	/*       Enable Baseline Wander Correction */
8120 	ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
8121 	/* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
8122 	ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
8123 	/* VGA output common mode */
8124 	ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
8125 
8126 	/*
8127 	 * Initialize the Tx DDS tables.  Also done every QSFP event,
8128 	 * for adapters with QSFP
8129 	 */
8130 	init_txdds_table(ppd, 0);
8131 
8132 	return 0;
8133 }
8134 
8135 /* start adjust QMH serdes parameters */
8136 
8137 static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
8138 {
8139 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8140 		9, code << 9, 0x3f << 9);
8141 }
8142 
8143 static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
8144 	int enable, u32 tapenable)
8145 {
8146 	if (enable)
8147 		ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8148 			1, 3 << 10, 0x1f << 10);
8149 	else
8150 		ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8151 			1, 0, 0x1f << 10);
8152 }
8153 
8154 /* Set clock to 1, 0, 1, 0 */
8155 static void clock_man(struct qib_pportdata *ppd, int chan)
8156 {
8157 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8158 		4, 0x4000, 0x4000);
8159 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8160 		4, 0, 0x4000);
8161 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8162 		4, 0x4000, 0x4000);
8163 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8164 		4, 0, 0x4000);
8165 }
8166 
8167 /*
8168  * write the current Tx serdes pre,post,main,amp settings into the serdes.
8169  * The caller must pass the settings appropriate for the current speed,
8170  * or not care if they are correct for the current speed.
8171  */
8172 static void write_tx_serdes_param(struct qib_pportdata *ppd,
8173 				  struct txdds_ent *txdds)
8174 {
8175 	u64 deemph;
8176 
8177 	deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
8178 	/* field names for amp, main, post, pre, respectively */
8179 	deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) |
8180 		    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
8181 		    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
8182 		    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
8183 
8184 	deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8185 			   tx_override_deemphasis_select);
8186 	deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8187 		    txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8188 				       txampcntl_d2a);
8189 	deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8190 		     txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8191 				   txc0_ena);
8192 	deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8193 		     txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8194 				    txcp1_ena);
8195 	deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8196 		     txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8197 				    txcn1_ena);
8198 	qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
8199 }
8200 
8201 /*
8202  * Set the parameters for mez cards on link bounce, so they are
8203  * always exactly what was requested.  Similar logic to init_txdds
8204  * but does just the serdes.
8205  */
8206 static void adj_tx_serdes(struct qib_pportdata *ppd)
8207 {
8208 	const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
8209 	struct txdds_ent *dds;
8210 
8211 	find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1);
8212 	dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
8213 		qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ?
8214 				ddr_dds : sdr_dds));
8215 	write_tx_serdes_param(ppd, dds);
8216 }
8217 
8218 /* set QDR forced value for H1, if needed */
8219 static void force_h1(struct qib_pportdata *ppd)
8220 {
8221 	int chan;
8222 
8223 	ppd->cpspec->qdr_reforce = 0;
8224 	if (!ppd->dd->cspec->r1)
8225 		return;
8226 
8227 	for (chan = 0; chan < SERDES_CHANS; chan++) {
8228 		set_man_mode_h1(ppd, chan, 1, 0);
8229 		set_man_code(ppd, chan, ppd->cpspec->h1_val);
8230 		clock_man(ppd, chan);
8231 		set_man_mode_h1(ppd, chan, 0, 0);
8232 	}
8233 }
8234 
8235 #define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
8236 #define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
8237 
8238 #define R_OPCODE_LSB 3
8239 #define R_OP_NOP 0
8240 #define R_OP_SHIFT 2
8241 #define R_OP_UPDATE 3
8242 #define R_TDI_LSB 2
8243 #define R_TDO_LSB 1
8244 #define R_RDY 1
8245 
8246 static int qib_r_grab(struct qib_devdata *dd)
8247 {
8248 	u64 val = SJA_EN;
8249 
8250 	qib_write_kreg(dd, kr_r_access, val);
8251 	qib_read_kreg32(dd, kr_scratch);
8252 	return 0;
8253 }
8254 
8255 /* qib_r_wait_for_rdy() not only waits for the ready bit, it
8256  * returns the current state of R_TDO
8257  */
8258 static int qib_r_wait_for_rdy(struct qib_devdata *dd)
8259 {
8260 	u64 val;
8261 	int timeout;
8262 
8263 	for (timeout = 0; timeout < 100 ; ++timeout) {
8264 		val = qib_read_kreg32(dd, kr_r_access);
8265 		if (val & R_RDY)
8266 			return (val >> R_TDO_LSB) & 1;
8267 	}
8268 	return -1;
8269 }
8270 
8271 static int qib_r_shift(struct qib_devdata *dd, int bisten,
8272 		       int len, u8 *inp, u8 *outp)
8273 {
8274 	u64 valbase, val;
8275 	int ret, pos;
8276 
8277 	valbase = SJA_EN | (bisten << BISTEN_LSB) |
8278 		(R_OP_SHIFT << R_OPCODE_LSB);
8279 	ret = qib_r_wait_for_rdy(dd);
8280 	if (ret < 0)
8281 		goto bail;
8282 	for (pos = 0; pos < len; ++pos) {
8283 		val = valbase;
8284 		if (outp) {
8285 			outp[pos >> 3] &= ~(1 << (pos & 7));
8286 			outp[pos >> 3] |= (ret << (pos & 7));
8287 		}
8288 		if (inp) {
8289 			int tdi = inp[pos >> 3] >> (pos & 7);
8290 
8291 			val |= ((tdi & 1) << R_TDI_LSB);
8292 		}
8293 		qib_write_kreg(dd, kr_r_access, val);
8294 		qib_read_kreg32(dd, kr_scratch);
8295 		ret = qib_r_wait_for_rdy(dd);
8296 		if (ret < 0)
8297 			break;
8298 	}
8299 	/* Restore to NOP between operations. */
8300 	val =  SJA_EN | (bisten << BISTEN_LSB);
8301 	qib_write_kreg(dd, kr_r_access, val);
8302 	qib_read_kreg32(dd, kr_scratch);
8303 	ret = qib_r_wait_for_rdy(dd);
8304 
8305 	if (ret >= 0)
8306 		ret = pos;
8307 bail:
8308 	return ret;
8309 }
8310 
8311 static int qib_r_update(struct qib_devdata *dd, int bisten)
8312 {
8313 	u64 val;
8314 	int ret;
8315 
8316 	val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB);
8317 	ret = qib_r_wait_for_rdy(dd);
8318 	if (ret >= 0) {
8319 		qib_write_kreg(dd, kr_r_access, val);
8320 		qib_read_kreg32(dd, kr_scratch);
8321 	}
8322 	return ret;
8323 }
8324 
8325 #define BISTEN_PORT_SEL 15
8326 #define LEN_PORT_SEL 625
8327 #define BISTEN_AT 17
8328 #define LEN_AT 156
8329 #define BISTEN_ETM 16
8330 #define LEN_ETM 632
8331 
8332 #define BIT2BYTE(x) (((x) +  BITS_PER_BYTE - 1) / BITS_PER_BYTE)
8333 
8334 /* these are common for all IB port use cases. */
8335 static u8 reset_at[BIT2BYTE(LEN_AT)] = {
8336 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8337 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
8338 };
8339 static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = {
8340 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8341 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8342 	0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e,
8343 	0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7,
8344 	0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70,
8345 	0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00,
8346 	0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8347 	0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
8348 };
8349 static u8 at[BIT2BYTE(LEN_AT)] = {
8350 	0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
8351 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
8352 };
8353 
8354 /* used for IB1 or IB2, only one in use */
8355 static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = {
8356 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8357 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8358 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8359 	0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00,
8360 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8361 	0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03,
8362 	0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00,
8363 	0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00,
8364 };
8365 
8366 /* used when both IB1 and IB2 are in use */
8367 static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = {
8368 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8369 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
8370 	0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8371 	0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05,
8372 	0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
8373 	0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07,
8374 	0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00,
8375 	0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
8376 };
8377 
8378 /* used when only IB1 is in use */
8379 static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = {
8380 	0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
8381 	0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
8382 	0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8383 	0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8384 	0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
8385 	0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8386 	0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8387 	0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
8388 };
8389 
8390 /* used when only IB2 is in use */
8391 static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = {
8392 	0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39,
8393 	0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32,
8394 	0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
8395 	0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
8396 	0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32,
8397 	0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
8398 	0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
8399 	0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
8400 };
8401 
8402 /* used when both IB1 and IB2 are in use */
8403 static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = {
8404 	0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
8405 	0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
8406 	0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8407 	0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8408 	0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
8409 	0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a,
8410 	0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8411 	0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
8412 };
8413 
8414 /*
8415  * Do setup to properly handle IB link recovery; if port is zero, we
8416  * are initializing to cover both ports; otherwise we are initializing
8417  * to cover a single port card, or the port has reached INIT and we may
8418  * need to switch coverage types.
8419  */
8420 static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
8421 {
8422 	u8 *portsel, *etm;
8423 	struct qib_devdata *dd = ppd->dd;
8424 
8425 	if (!ppd->dd->cspec->r1)
8426 		return;
8427 	if (!both) {
8428 		dd->cspec->recovery_ports_initted++;
8429 		ppd->cpspec->recovery_init = 1;
8430 	}
8431 	if (!both && dd->cspec->recovery_ports_initted == 1) {
8432 		portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
8433 		etm = atetm_1port;
8434 	} else {
8435 		portsel = portsel_2port;
8436 		etm = atetm_2port;
8437 	}
8438 
8439 	if (qib_r_grab(dd) < 0 ||
8440 		qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 ||
8441 		qib_r_update(dd, BISTEN_ETM) < 0 ||
8442 		qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 ||
8443 		qib_r_update(dd, BISTEN_AT) < 0 ||
8444 		qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL,
8445 			    portsel, NULL) < 0 ||
8446 		qib_r_update(dd, BISTEN_PORT_SEL) < 0 ||
8447 		qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 ||
8448 		qib_r_update(dd, BISTEN_AT) < 0 ||
8449 		qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 ||
8450 		qib_r_update(dd, BISTEN_ETM) < 0)
8451 		qib_dev_err(dd, "Failed IB link recovery setup\n");
8452 }
8453 
8454 static void check_7322_rxe_status(struct qib_pportdata *ppd)
8455 {
8456 	struct qib_devdata *dd = ppd->dd;
8457 	u64 fmask;
8458 
8459 	if (dd->cspec->recovery_ports_initted != 1)
8460 		return; /* rest doesn't apply to dualport */
8461 	qib_write_kreg(dd, kr_control, dd->control |
8462 		       SYM_MASK(Control, FreezeMode));
8463 	(void)qib_read_kreg64(dd, kr_scratch);
8464 	udelay(3); /* ibcreset asserted 400ns, be sure that's over */
8465 	fmask = qib_read_kreg64(dd, kr_act_fmask);
8466 	if (!fmask) {
8467 		/*
8468 		 * require a powercycle before we'll work again, and make
8469 		 * sure we get no more interrupts, and don't turn off
8470 		 * freeze.
8471 		 */
8472 		ppd->dd->cspec->stay_in_freeze = 1;
8473 		qib_7322_set_intr_state(ppd->dd, 0);
8474 		qib_write_kreg(dd, kr_fmask, 0ULL);
8475 		qib_dev_err(dd, "HCA unusable until powercycled\n");
8476 		return; /* eventually reset */
8477 	}
8478 
8479 	qib_write_kreg(ppd->dd, kr_hwerrclear,
8480 	    SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1));
8481 
8482 	/* don't do the full clear_freeze(), not needed for this */
8483 	qib_write_kreg(dd, kr_control, dd->control);
8484 	qib_read_kreg32(dd, kr_scratch);
8485 	/* take IBC out of reset */
8486 	if (ppd->link_speed_supported) {
8487 		ppd->cpspec->ibcctrl_a &=
8488 			~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
8489 		qib_write_kreg_port(ppd, krp_ibcctrl_a,
8490 				    ppd->cpspec->ibcctrl_a);
8491 		qib_read_kreg32(dd, kr_scratch);
8492 		if (ppd->lflags & QIBL_IB_LINK_DISABLED)
8493 			qib_set_ib_7322_lstate(ppd, 0,
8494 				QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
8495 	}
8496 }
8497