1f931551bSRalph Campbell /*
2581d01aaSMichael J. Ruhl  * Copyright (c) 2013 - 2017 Intel Corporation. All rights reserved.
3f931551bSRalph Campbell  * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
4f931551bSRalph Campbell  * All rights reserved.
5f931551bSRalph Campbell  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
6f931551bSRalph Campbell  *
7f931551bSRalph Campbell  * This software is available to you under a choice of one of two
8f931551bSRalph Campbell  * licenses.  You may choose to be licensed under the terms of the GNU
9f931551bSRalph Campbell  * General Public License (GPL) Version 2, available from the file
10f931551bSRalph Campbell  * COPYING in the main directory of this source tree, or the
11f931551bSRalph Campbell  * OpenIB.org BSD license below:
12f931551bSRalph Campbell  *
13f931551bSRalph Campbell  *     Redistribution and use in source and binary forms, with or
14f931551bSRalph Campbell  *     without modification, are permitted provided that the following
15f931551bSRalph Campbell  *     conditions are met:
16f931551bSRalph Campbell  *
17f931551bSRalph Campbell  *      - Redistributions of source code must retain the above
18f931551bSRalph Campbell  *        copyright notice, this list of conditions and the following
19f931551bSRalph Campbell  *        disclaimer.
20f931551bSRalph Campbell  *
21f931551bSRalph Campbell  *      - Redistributions in binary form must reproduce the above
22f931551bSRalph Campbell  *        copyright notice, this list of conditions and the following
23f931551bSRalph Campbell  *        disclaimer in the documentation and/or other materials
24f931551bSRalph Campbell  *        provided with the distribution.
25f931551bSRalph Campbell  *
26f931551bSRalph Campbell  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27f931551bSRalph Campbell  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28f931551bSRalph Campbell  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29f931551bSRalph Campbell  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30f931551bSRalph Campbell  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31f931551bSRalph Campbell  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32f931551bSRalph Campbell  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33f931551bSRalph Campbell  * SOFTWARE.
34f931551bSRalph Campbell  */
35f931551bSRalph Campbell /*
36f931551bSRalph Campbell  * This file contains all of the code that is specific to the
37f931551bSRalph Campbell  * QLogic_IB 6120 PCIe chip.
38f931551bSRalph Campbell  */
39f931551bSRalph Campbell 
40f931551bSRalph Campbell #include <linux/interrupt.h>
41f931551bSRalph Campbell #include <linux/pci.h>
42f931551bSRalph Campbell #include <linux/delay.h>
43f931551bSRalph Campbell #include <rdma/ib_verbs.h>
44f931551bSRalph Campbell 
45f931551bSRalph Campbell #include "qib.h"
46f931551bSRalph Campbell #include "qib_6120_regs.h"
47f931551bSRalph Campbell 
48f931551bSRalph Campbell static void qib_6120_setup_setextled(struct qib_pportdata *, u32);
49f931551bSRalph Campbell static void sendctrl_6120_mod(struct qib_pportdata *ppd, u32 op);
50f931551bSRalph Campbell static u8 qib_6120_phys_portstate(u64);
51f931551bSRalph Campbell static u32 qib_6120_iblink_state(u64);
52f931551bSRalph Campbell 
53f931551bSRalph Campbell /*
54f931551bSRalph Campbell  * This file contains all the chip-specific register information and
55e2eed58bSVinit Agnihotri  * access functions for the Intel Intel_IB PCI-Express chip.
56f931551bSRalph Campbell  *
57f931551bSRalph Campbell  */
58f931551bSRalph Campbell 
59f931551bSRalph Campbell /* KREG_IDX uses machine-generated #defines */
60f931551bSRalph Campbell #define KREG_IDX(regname) (QIB_6120_##regname##_OFFS / sizeof(u64))
61f931551bSRalph Campbell 
62f931551bSRalph Campbell /* Use defines to tie machine-generated names to lower-case names */
63f931551bSRalph Campbell #define kr_extctrl KREG_IDX(EXTCtrl)
64f931551bSRalph Campbell #define kr_extstatus KREG_IDX(EXTStatus)
65f931551bSRalph Campbell #define kr_gpio_clear KREG_IDX(GPIOClear)
66f931551bSRalph Campbell #define kr_gpio_mask KREG_IDX(GPIOMask)
67f931551bSRalph Campbell #define kr_gpio_out KREG_IDX(GPIOOut)
68f931551bSRalph Campbell #define kr_gpio_status KREG_IDX(GPIOStatus)
69f931551bSRalph Campbell #define kr_rcvctrl KREG_IDX(RcvCtrl)
70f931551bSRalph Campbell #define kr_sendctrl KREG_IDX(SendCtrl)
71f931551bSRalph Campbell #define kr_partitionkey KREG_IDX(RcvPartitionKey)
72f931551bSRalph Campbell #define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
73f931551bSRalph Campbell #define kr_ibcstatus KREG_IDX(IBCStatus)
74f931551bSRalph Campbell #define kr_ibcctrl KREG_IDX(IBCCtrl)
75f931551bSRalph Campbell #define kr_sendbuffererror KREG_IDX(SendBufErr0)
76f931551bSRalph Campbell #define kr_rcvbthqp KREG_IDX(RcvBTHQP)
77f931551bSRalph Campbell #define kr_counterregbase KREG_IDX(CntrRegBase)
78f931551bSRalph Campbell #define kr_palign KREG_IDX(PageAlign)
79f931551bSRalph Campbell #define kr_rcvegrbase KREG_IDX(RcvEgrBase)
80f931551bSRalph Campbell #define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
81f931551bSRalph Campbell #define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
82f931551bSRalph Campbell #define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
83f931551bSRalph Campbell #define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
84f931551bSRalph Campbell #define kr_rcvtidbase KREG_IDX(RcvTIDBase)
85f931551bSRalph Campbell #define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
86f931551bSRalph Campbell #define kr_scratch KREG_IDX(Scratch)
87f931551bSRalph Campbell #define kr_sendctrl KREG_IDX(SendCtrl)
88f931551bSRalph Campbell #define kr_sendpioavailaddr KREG_IDX(SendPIOAvailAddr)
89f931551bSRalph Campbell #define kr_sendpiobufbase KREG_IDX(SendPIOBufBase)
90f931551bSRalph Campbell #define kr_sendpiobufcnt KREG_IDX(SendPIOBufCnt)
91f931551bSRalph Campbell #define kr_sendpiosize KREG_IDX(SendPIOSize)
92f931551bSRalph Campbell #define kr_sendregbase KREG_IDX(SendRegBase)
93f931551bSRalph Campbell #define kr_userregbase KREG_IDX(UserRegBase)
94f931551bSRalph Campbell #define kr_control KREG_IDX(Control)
95f931551bSRalph Campbell #define kr_intclear KREG_IDX(IntClear)
96f931551bSRalph Campbell #define kr_intmask KREG_IDX(IntMask)
97f931551bSRalph Campbell #define kr_intstatus KREG_IDX(IntStatus)
98f931551bSRalph Campbell #define kr_errclear KREG_IDX(ErrClear)
99f931551bSRalph Campbell #define kr_errmask KREG_IDX(ErrMask)
100f931551bSRalph Campbell #define kr_errstatus KREG_IDX(ErrStatus)
101f931551bSRalph Campbell #define kr_hwerrclear KREG_IDX(HwErrClear)
102f931551bSRalph Campbell #define kr_hwerrmask KREG_IDX(HwErrMask)
103f931551bSRalph Campbell #define kr_hwerrstatus KREG_IDX(HwErrStatus)
104f931551bSRalph Campbell #define kr_revision KREG_IDX(Revision)
105f931551bSRalph Campbell #define kr_portcnt KREG_IDX(PortCnt)
106f931551bSRalph Campbell #define kr_serdes_cfg0 KREG_IDX(SerdesCfg0)
107f931551bSRalph Campbell #define kr_serdes_cfg1 (kr_serdes_cfg0 + 1)
108f931551bSRalph Campbell #define kr_serdes_stat KREG_IDX(SerdesStat)
109f931551bSRalph Campbell #define kr_xgxs_cfg KREG_IDX(XGXSCfg)
110f931551bSRalph Campbell 
111f931551bSRalph Campbell /* These must only be written via qib_write_kreg_ctxt() */
112f931551bSRalph Campbell #define kr_rcvhdraddr KREG_IDX(RcvHdrAddr0)
113f931551bSRalph Campbell #define kr_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
114f931551bSRalph Campbell 
115f931551bSRalph Campbell #define CREG_IDX(regname) ((QIB_6120_##regname##_OFFS - \
116f931551bSRalph Campbell 			QIB_6120_LBIntCnt_OFFS) / sizeof(u64))
117f931551bSRalph Campbell 
118f931551bSRalph Campbell #define cr_badformat CREG_IDX(RxBadFormatCnt)
119f931551bSRalph Campbell #define cr_erricrc CREG_IDX(RxICRCErrCnt)
120f931551bSRalph Campbell #define cr_errlink CREG_IDX(RxLinkProblemCnt)
121f931551bSRalph Campbell #define cr_errlpcrc CREG_IDX(RxLPCRCErrCnt)
122f931551bSRalph Campbell #define cr_errpkey CREG_IDX(RxPKeyMismatchCnt)
123f931551bSRalph Campbell #define cr_rcvflowctrl_err CREG_IDX(RxFlowCtrlErrCnt)
124f931551bSRalph Campbell #define cr_err_rlen CREG_IDX(RxLenErrCnt)
125f931551bSRalph Campbell #define cr_errslen CREG_IDX(TxLenErrCnt)
126f931551bSRalph Campbell #define cr_errtidfull CREG_IDX(RxTIDFullErrCnt)
127f931551bSRalph Campbell #define cr_errtidvalid CREG_IDX(RxTIDValidErrCnt)
128f931551bSRalph Campbell #define cr_errvcrc CREG_IDX(RxVCRCErrCnt)
129f931551bSRalph Campbell #define cr_ibstatuschange CREG_IDX(IBStatusChangeCnt)
130f931551bSRalph Campbell #define cr_lbint CREG_IDX(LBIntCnt)
131f931551bSRalph Campbell #define cr_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
132f931551bSRalph Campbell #define cr_invalidslen CREG_IDX(TxMaxMinLenErrCnt)
133f931551bSRalph Campbell #define cr_lbflowstall CREG_IDX(LBFlowStallCnt)
134f931551bSRalph Campbell #define cr_pktrcv CREG_IDX(RxDataPktCnt)
135f931551bSRalph Campbell #define cr_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
136f931551bSRalph Campbell #define cr_pktsend CREG_IDX(TxDataPktCnt)
137f931551bSRalph Campbell #define cr_pktsendflow CREG_IDX(TxFlowPktCnt)
138f931551bSRalph Campbell #define cr_portovfl CREG_IDX(RxP0HdrEgrOvflCnt)
139f931551bSRalph Campbell #define cr_rcvebp CREG_IDX(RxEBPCnt)
140f931551bSRalph Campbell #define cr_rcvovfl CREG_IDX(RxBufOvflCnt)
141f931551bSRalph Campbell #define cr_senddropped CREG_IDX(TxDroppedPktCnt)
142f931551bSRalph Campbell #define cr_sendstall CREG_IDX(TxFlowStallCnt)
143f931551bSRalph Campbell #define cr_sendunderrun CREG_IDX(TxUnderrunCnt)
144f931551bSRalph Campbell #define cr_wordrcv CREG_IDX(RxDwordCnt)
145f931551bSRalph Campbell #define cr_wordsend CREG_IDX(TxDwordCnt)
146f931551bSRalph Campbell #define cr_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
147f931551bSRalph Campbell #define cr_rxdroppkt CREG_IDX(RxDroppedPktCnt)
148f931551bSRalph Campbell #define cr_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
149f931551bSRalph Campbell #define cr_iblinkdown CREG_IDX(IBLinkDownedCnt)
150f931551bSRalph Campbell #define cr_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
151f931551bSRalph Campbell 
152f931551bSRalph Campbell #define SYM_RMASK(regname, fldname) ((u64)              \
153f931551bSRalph Campbell 	QIB_6120_##regname##_##fldname##_RMASK)
154f931551bSRalph Campbell #define SYM_MASK(regname, fldname) ((u64)               \
155f931551bSRalph Campbell 	QIB_6120_##regname##_##fldname##_RMASK <<       \
156f931551bSRalph Campbell 	 QIB_6120_##regname##_##fldname##_LSB)
157f931551bSRalph Campbell #define SYM_LSB(regname, fldname) (QIB_6120_##regname##_##fldname##_LSB)
158f931551bSRalph Campbell 
159f931551bSRalph Campbell #define SYM_FIELD(value, regname, fldname) ((u64) \
160f931551bSRalph Campbell 	(((value) >> SYM_LSB(regname, fldname)) & \
161f931551bSRalph Campbell 	 SYM_RMASK(regname, fldname)))
162f931551bSRalph Campbell #define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
163f931551bSRalph Campbell #define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
164f931551bSRalph Campbell 
165f931551bSRalph Campbell /* link training states, from IBC */
166f931551bSRalph Campbell #define IB_6120_LT_STATE_DISABLED        0x00
167f931551bSRalph Campbell #define IB_6120_LT_STATE_LINKUP          0x01
168f931551bSRalph Campbell #define IB_6120_LT_STATE_POLLACTIVE      0x02
169f931551bSRalph Campbell #define IB_6120_LT_STATE_POLLQUIET       0x03
170f931551bSRalph Campbell #define IB_6120_LT_STATE_SLEEPDELAY      0x04
171f931551bSRalph Campbell #define IB_6120_LT_STATE_SLEEPQUIET      0x05
172f931551bSRalph Campbell #define IB_6120_LT_STATE_CFGDEBOUNCE     0x08
173f931551bSRalph Campbell #define IB_6120_LT_STATE_CFGRCVFCFG      0x09
174f931551bSRalph Campbell #define IB_6120_LT_STATE_CFGWAITRMT      0x0a
175f931551bSRalph Campbell #define IB_6120_LT_STATE_CFGIDLE 0x0b
176f931551bSRalph Campbell #define IB_6120_LT_STATE_RECOVERRETRAIN  0x0c
177f931551bSRalph Campbell #define IB_6120_LT_STATE_RECOVERWAITRMT  0x0e
178f931551bSRalph Campbell #define IB_6120_LT_STATE_RECOVERIDLE     0x0f
179f931551bSRalph Campbell 
180f931551bSRalph Campbell /* link state machine states from IBC */
181f931551bSRalph Campbell #define IB_6120_L_STATE_DOWN             0x0
182f931551bSRalph Campbell #define IB_6120_L_STATE_INIT             0x1
183f931551bSRalph Campbell #define IB_6120_L_STATE_ARM              0x2
184f931551bSRalph Campbell #define IB_6120_L_STATE_ACTIVE           0x3
185f931551bSRalph Campbell #define IB_6120_L_STATE_ACT_DEFER        0x4
186f931551bSRalph Campbell 
187f931551bSRalph Campbell static const u8 qib_6120_physportstate[0x20] = {
188f931551bSRalph Campbell 	[IB_6120_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
189f931551bSRalph Campbell 	[IB_6120_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
190f931551bSRalph Campbell 	[IB_6120_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
191f931551bSRalph Campbell 	[IB_6120_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
192f931551bSRalph Campbell 	[IB_6120_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
193f931551bSRalph Campbell 	[IB_6120_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
194f931551bSRalph Campbell 	[IB_6120_LT_STATE_CFGDEBOUNCE] =
195f931551bSRalph Campbell 		IB_PHYSPORTSTATE_CFG_TRAIN,
196f931551bSRalph Campbell 	[IB_6120_LT_STATE_CFGRCVFCFG] =
197f931551bSRalph Campbell 		IB_PHYSPORTSTATE_CFG_TRAIN,
198f931551bSRalph Campbell 	[IB_6120_LT_STATE_CFGWAITRMT] =
199f931551bSRalph Campbell 		IB_PHYSPORTSTATE_CFG_TRAIN,
200f931551bSRalph Campbell 	[IB_6120_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,
201f931551bSRalph Campbell 	[IB_6120_LT_STATE_RECOVERRETRAIN] =
202f931551bSRalph Campbell 		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
203f931551bSRalph Campbell 	[IB_6120_LT_STATE_RECOVERWAITRMT] =
204f931551bSRalph Campbell 		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
205f931551bSRalph Campbell 	[IB_6120_LT_STATE_RECOVERIDLE] =
206f931551bSRalph Campbell 		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
207f931551bSRalph Campbell 	[0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,
208f931551bSRalph Campbell 	[0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,
209f931551bSRalph Campbell 	[0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
210f931551bSRalph Campbell 	[0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,
211f931551bSRalph Campbell 	[0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
212f931551bSRalph Campbell 	[0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
213f931551bSRalph Campbell 	[0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
214f931551bSRalph Campbell 	[0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
215f931551bSRalph Campbell };
216f931551bSRalph Campbell 
217f931551bSRalph Campbell 
218f931551bSRalph Campbell struct qib_chip_specific {
219f931551bSRalph Campbell 	u64 __iomem *cregbase;
220f931551bSRalph Campbell 	u64 *cntrs;
221f931551bSRalph Campbell 	u64 *portcntrs;
222f931551bSRalph Campbell 	void *dummy_hdrq;   /* used after ctxt close */
223f931551bSRalph Campbell 	dma_addr_t dummy_hdrq_phys;
224f931551bSRalph Campbell 	spinlock_t kernel_tid_lock; /* no back to back kernel TID writes */
225f931551bSRalph Campbell 	spinlock_t user_tid_lock; /* no back to back user TID writes */
226f931551bSRalph Campbell 	spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
227f931551bSRalph Campbell 	spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
228f931551bSRalph Campbell 	u64 hwerrmask;
229f931551bSRalph Campbell 	u64 errormask;
230f931551bSRalph Campbell 	u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
231f931551bSRalph Campbell 	u64 gpio_mask; /* shadow the gpio mask register */
232f931551bSRalph Campbell 	u64 extctrl; /* shadow the gpio output enable, etc... */
233f931551bSRalph Campbell 	/*
234f931551bSRalph Campbell 	 * these 5 fields are used to establish deltas for IB symbol
235f931551bSRalph Campbell 	 * errors and linkrecovery errors.  They can be reported on
236f931551bSRalph Campbell 	 * some chips during link negotiation prior to INIT, and with
237f931551bSRalph Campbell 	 * DDR when faking DDR negotiations with non-IBTA switches.
238f931551bSRalph Campbell 	 * The chip counters are adjusted at driver unload if there is
239f931551bSRalph Campbell 	 * a non-zero delta.
240f931551bSRalph Campbell 	 */
241f931551bSRalph Campbell 	u64 ibdeltainprog;
242f931551bSRalph Campbell 	u64 ibsymdelta;
243f931551bSRalph Campbell 	u64 ibsymsnap;
244f931551bSRalph Campbell 	u64 iblnkerrdelta;
245f931551bSRalph Campbell 	u64 iblnkerrsnap;
246f931551bSRalph Campbell 	u64 ibcctrl; /* shadow for kr_ibcctrl */
247f931551bSRalph Campbell 	u32 lastlinkrecov; /* link recovery issue */
248f931551bSRalph Campbell 	u32 cntrnamelen;
249f931551bSRalph Campbell 	u32 portcntrnamelen;
250f931551bSRalph Campbell 	u32 ncntrs;
251f931551bSRalph Campbell 	u32 nportcntrs;
252f931551bSRalph Campbell 	/* used with gpio interrupts to implement IB counters */
253f931551bSRalph Campbell 	u32 rxfc_unsupvl_errs;
254f931551bSRalph Campbell 	u32 overrun_thresh_errs;
255f931551bSRalph Campbell 	/*
256f931551bSRalph Campbell 	 * these count only cases where _successive_ LocalLinkIntegrity
257f931551bSRalph Campbell 	 * errors were seen in the receive headers of IB standard packets
258f931551bSRalph Campbell 	 */
259f931551bSRalph Campbell 	u32 lli_errs;
260f931551bSRalph Campbell 	u32 lli_counter;
261f931551bSRalph Campbell 	u64 lli_thresh;
262f931551bSRalph Campbell 	u64 sword; /* total dwords sent (sample result) */
263f931551bSRalph Campbell 	u64 rword; /* total dwords received (sample result) */
264f931551bSRalph Campbell 	u64 spkts; /* total packets sent (sample result) */
265f931551bSRalph Campbell 	u64 rpkts; /* total packets received (sample result) */
266f931551bSRalph Campbell 	u64 xmit_wait; /* # of ticks no data sent (sample result) */
267f931551bSRalph Campbell 	struct timer_list pma_timer;
2684037c92fSKees Cook 	struct qib_pportdata *ppd;
269f931551bSRalph Campbell 	char emsgbuf[128];
270f931551bSRalph Campbell 	char bitsmsgbuf[64];
271f931551bSRalph Campbell 	u8 pma_sample_status;
272f931551bSRalph Campbell };
273f931551bSRalph Campbell 
274f931551bSRalph Campbell /* ibcctrl bits */
275f931551bSRalph Campbell #define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
276f931551bSRalph Campbell /* cycle through TS1/TS2 till OK */
277f931551bSRalph Campbell #define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
278f931551bSRalph Campbell /* wait for TS1, then go on */
279f931551bSRalph Campbell #define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
280f931551bSRalph Campbell #define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
281f931551bSRalph Campbell 
282f931551bSRalph Campbell #define QLOGIC_IB_IBCC_LINKCMD_DOWN 1           /* move to 0x11 */
283f931551bSRalph Campbell #define QLOGIC_IB_IBCC_LINKCMD_ARMED 2          /* move to 0x21 */
284f931551bSRalph Campbell #define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
285f931551bSRalph Campbell #define QLOGIC_IB_IBCC_LINKCMD_SHIFT 18
286f931551bSRalph Campbell 
287f931551bSRalph Campbell /*
288f931551bSRalph Campbell  * We could have a single register get/put routine, that takes a group type,
289f931551bSRalph Campbell  * but this is somewhat clearer and cleaner.  It also gives us some error
290f931551bSRalph Campbell  * checking.  64 bit register reads should always work, but are inefficient
291f931551bSRalph Campbell  * on opteron (the northbridge always generates 2 separate HT 32 bit reads),
292f931551bSRalph Campbell  * so we use kreg32 wherever possible.  User register and counter register
293f931551bSRalph Campbell  * reads are always 32 bit reads, so only one form of those routines.
294f931551bSRalph Campbell  */
295f931551bSRalph Campbell 
296f931551bSRalph Campbell /**
297f931551bSRalph Campbell  * qib_read_ureg32 - read 32-bit virtualized per-context register
298f931551bSRalph Campbell  * @dd: device
299f931551bSRalph Campbell  * @regno: register number
300f931551bSRalph Campbell  * @ctxt: context number
301f931551bSRalph Campbell  *
302f931551bSRalph Campbell  * Return the contents of a register that is virtualized to be per context.
303f931551bSRalph Campbell  * Returns -1 on errors (not distinguishable from valid contents at
304f931551bSRalph Campbell  * runtime; we may add a separate error variable at some point).
305f931551bSRalph Campbell  */
qib_read_ureg32(const struct qib_devdata * dd,enum qib_ureg regno,int ctxt)306f931551bSRalph Campbell static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
307f931551bSRalph Campbell 				  enum qib_ureg regno, int ctxt)
308f931551bSRalph Campbell {
309f931551bSRalph Campbell 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
310f931551bSRalph Campbell 		return 0;
311f931551bSRalph Campbell 
312f931551bSRalph Campbell 	if (dd->userbase)
313f931551bSRalph Campbell 		return readl(regno + (u64 __iomem *)
314f931551bSRalph Campbell 			     ((char __iomem *)dd->userbase +
315f931551bSRalph Campbell 			      dd->ureg_align * ctxt));
316f931551bSRalph Campbell 	else
317f931551bSRalph Campbell 		return readl(regno + (u64 __iomem *)
318f931551bSRalph Campbell 			     (dd->uregbase +
319f931551bSRalph Campbell 			      (char __iomem *)dd->kregbase +
320f931551bSRalph Campbell 			      dd->ureg_align * ctxt));
321f931551bSRalph Campbell }
322f931551bSRalph Campbell 
323f931551bSRalph Campbell /**
324f931551bSRalph Campbell  * qib_write_ureg - write 32-bit virtualized per-context register
325f931551bSRalph Campbell  * @dd: device
326f931551bSRalph Campbell  * @regno: register number
327f931551bSRalph Campbell  * @value: value
328f931551bSRalph Campbell  * @ctxt: context
329f931551bSRalph Campbell  *
330f931551bSRalph Campbell  * Write the contents of a register that is virtualized to be per context.
331f931551bSRalph Campbell  */
qib_write_ureg(const struct qib_devdata * dd,enum qib_ureg regno,u64 value,int ctxt)332f931551bSRalph Campbell static inline void qib_write_ureg(const struct qib_devdata *dd,
333f931551bSRalph Campbell 				  enum qib_ureg regno, u64 value, int ctxt)
334f931551bSRalph Campbell {
335f931551bSRalph Campbell 	u64 __iomem *ubase;
336da12c1f6SMike Marciniszyn 
337f931551bSRalph Campbell 	if (dd->userbase)
338f931551bSRalph Campbell 		ubase = (u64 __iomem *)
339f931551bSRalph Campbell 			((char __iomem *) dd->userbase +
340f931551bSRalph Campbell 			 dd->ureg_align * ctxt);
341f931551bSRalph Campbell 	else
342f931551bSRalph Campbell 		ubase = (u64 __iomem *)
343f931551bSRalph Campbell 			(dd->uregbase +
344f931551bSRalph Campbell 			 (char __iomem *) dd->kregbase +
345f931551bSRalph Campbell 			 dd->ureg_align * ctxt);
346f931551bSRalph Campbell 
347f931551bSRalph Campbell 	if (dd->kregbase && (dd->flags & QIB_PRESENT))
348f931551bSRalph Campbell 		writeq(value, &ubase[regno]);
349f931551bSRalph Campbell }
350f931551bSRalph Campbell 
qib_read_kreg32(const struct qib_devdata * dd,const u16 regno)351f931551bSRalph Campbell static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
352f931551bSRalph Campbell 				  const u16 regno)
353f931551bSRalph Campbell {
354f931551bSRalph Campbell 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
355f931551bSRalph Campbell 		return -1;
356f931551bSRalph Campbell 	return readl((u32 __iomem *)&dd->kregbase[regno]);
357f931551bSRalph Campbell }
358f931551bSRalph Campbell 
qib_read_kreg64(const struct qib_devdata * dd,const u16 regno)359f931551bSRalph Campbell static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
360f931551bSRalph Campbell 				  const u16 regno)
361f931551bSRalph Campbell {
362f931551bSRalph Campbell 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
363f931551bSRalph Campbell 		return -1;
364f931551bSRalph Campbell 
365f931551bSRalph Campbell 	return readq(&dd->kregbase[regno]);
366f931551bSRalph Campbell }
367f931551bSRalph Campbell 
qib_write_kreg(const struct qib_devdata * dd,const u16 regno,u64 value)368f931551bSRalph Campbell static inline void qib_write_kreg(const struct qib_devdata *dd,
369f931551bSRalph Campbell 				  const u16 regno, u64 value)
370f931551bSRalph Campbell {
371f931551bSRalph Campbell 	if (dd->kregbase && (dd->flags & QIB_PRESENT))
372f931551bSRalph Campbell 		writeq(value, &dd->kregbase[regno]);
373f931551bSRalph Campbell }
374f931551bSRalph Campbell 
375f931551bSRalph Campbell /**
376f931551bSRalph Campbell  * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
377f931551bSRalph Campbell  * @dd: the qlogic_ib device
378f931551bSRalph Campbell  * @regno: the register number to write
379f931551bSRalph Campbell  * @ctxt: the context containing the register
380f931551bSRalph Campbell  * @value: the value to write
381f931551bSRalph Campbell  */
qib_write_kreg_ctxt(const struct qib_devdata * dd,const u16 regno,unsigned ctxt,u64 value)382f931551bSRalph Campbell static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
383f931551bSRalph Campbell 				       const u16 regno, unsigned ctxt,
384f931551bSRalph Campbell 				       u64 value)
385f931551bSRalph Campbell {
386f931551bSRalph Campbell 	qib_write_kreg(dd, regno + ctxt, value);
387f931551bSRalph Campbell }
388f931551bSRalph Campbell 
write_6120_creg(const struct qib_devdata * dd,u16 regno,u64 value)389f931551bSRalph Campbell static inline void write_6120_creg(const struct qib_devdata *dd,
390f931551bSRalph Campbell 				   u16 regno, u64 value)
391f931551bSRalph Campbell {
392f931551bSRalph Campbell 	if (dd->cspec->cregbase && (dd->flags & QIB_PRESENT))
393f931551bSRalph Campbell 		writeq(value, &dd->cspec->cregbase[regno]);
394f931551bSRalph Campbell }
395f931551bSRalph Campbell 
read_6120_creg(const struct qib_devdata * dd,u16 regno)396f931551bSRalph Campbell static inline u64 read_6120_creg(const struct qib_devdata *dd, u16 regno)
397f931551bSRalph Campbell {
398f931551bSRalph Campbell 	if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
399f931551bSRalph Campbell 		return 0;
400f931551bSRalph Campbell 	return readq(&dd->cspec->cregbase[regno]);
401f931551bSRalph Campbell }
402f931551bSRalph Campbell 
read_6120_creg32(const struct qib_devdata * dd,u16 regno)403f931551bSRalph Campbell static inline u32 read_6120_creg32(const struct qib_devdata *dd, u16 regno)
404f931551bSRalph Campbell {
405f931551bSRalph Campbell 	if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
406f931551bSRalph Campbell 		return 0;
407f931551bSRalph Campbell 	return readl(&dd->cspec->cregbase[regno]);
408f931551bSRalph Campbell }
409f931551bSRalph Campbell 
410f931551bSRalph Campbell /* kr_control bits */
411f931551bSRalph Campbell #define QLOGIC_IB_C_RESET 1U
412f931551bSRalph Campbell 
413f931551bSRalph Campbell /* kr_intstatus, kr_intclear, kr_intmask bits */
414f931551bSRalph Campbell #define QLOGIC_IB_I_RCVURG_MASK ((1U << 5) - 1)
415f931551bSRalph Campbell #define QLOGIC_IB_I_RCVURG_SHIFT 0
416f931551bSRalph Campbell #define QLOGIC_IB_I_RCVAVAIL_MASK ((1U << 5) - 1)
417f931551bSRalph Campbell #define QLOGIC_IB_I_RCVAVAIL_SHIFT 12
418f931551bSRalph Campbell 
419f931551bSRalph Campbell #define QLOGIC_IB_C_FREEZEMODE 0x00000002
420f931551bSRalph Campbell #define QLOGIC_IB_C_LINKENABLE 0x00000004
421f931551bSRalph Campbell #define QLOGIC_IB_I_ERROR               0x0000000080000000ULL
422f931551bSRalph Campbell #define QLOGIC_IB_I_SPIOSENT            0x0000000040000000ULL
423f931551bSRalph Campbell #define QLOGIC_IB_I_SPIOBUFAVAIL        0x0000000020000000ULL
424f931551bSRalph Campbell #define QLOGIC_IB_I_GPIO                0x0000000010000000ULL
425f931551bSRalph Campbell #define QLOGIC_IB_I_BITSEXTANT \
426f931551bSRalph Campbell 		((QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT) | \
427f931551bSRalph Campbell 		(QLOGIC_IB_I_RCVAVAIL_MASK << \
428f931551bSRalph Campbell 		 QLOGIC_IB_I_RCVAVAIL_SHIFT) | \
429f931551bSRalph Campbell 		QLOGIC_IB_I_ERROR | QLOGIC_IB_I_SPIOSENT | \
430f931551bSRalph Campbell 		QLOGIC_IB_I_SPIOBUFAVAIL | QLOGIC_IB_I_GPIO)
431f931551bSRalph Campbell 
432f931551bSRalph Campbell /* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
433f931551bSRalph Campbell #define QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK  0x000000000000003fULL
434f931551bSRalph Campbell #define QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT 0
435f931551bSRalph Campbell #define QLOGIC_IB_HWE_PCIEPOISONEDTLP      0x0000000010000000ULL
436f931551bSRalph Campbell #define QLOGIC_IB_HWE_PCIECPLTIMEOUT       0x0000000020000000ULL
437f931551bSRalph Campbell #define QLOGIC_IB_HWE_PCIEBUSPARITYXTLH    0x0000000040000000ULL
438f931551bSRalph Campbell #define QLOGIC_IB_HWE_PCIEBUSPARITYXADM    0x0000000080000000ULL
439f931551bSRalph Campbell #define QLOGIC_IB_HWE_PCIEBUSPARITYRADM    0x0000000100000000ULL
440f931551bSRalph Campbell #define QLOGIC_IB_HWE_COREPLL_FBSLIP       0x0080000000000000ULL
441f931551bSRalph Campbell #define QLOGIC_IB_HWE_COREPLL_RFSLIP       0x0100000000000000ULL
442f931551bSRalph Campbell #define QLOGIC_IB_HWE_PCIE1PLLFAILED       0x0400000000000000ULL
443f931551bSRalph Campbell #define QLOGIC_IB_HWE_PCIE0PLLFAILED       0x0800000000000000ULL
444f931551bSRalph Campbell #define QLOGIC_IB_HWE_SERDESPLLFAILED      0x1000000000000000ULL
445f931551bSRalph Campbell 
446f931551bSRalph Campbell 
447f931551bSRalph Campbell /* kr_extstatus bits */
448f931551bSRalph Campbell #define QLOGIC_IB_EXTS_FREQSEL 0x2
449f931551bSRalph Campbell #define QLOGIC_IB_EXTS_SERDESSEL 0x4
450f931551bSRalph Campbell #define QLOGIC_IB_EXTS_MEMBIST_ENDTEST     0x0000000000004000
451f931551bSRalph Campbell #define QLOGIC_IB_EXTS_MEMBIST_FOUND       0x0000000000008000
452f931551bSRalph Campbell 
453f931551bSRalph Campbell /* kr_xgxsconfig bits */
454f931551bSRalph Campbell #define QLOGIC_IB_XGXS_RESET          0x5ULL
455f931551bSRalph Campbell 
456f931551bSRalph Campbell #define _QIB_GPIO_SDA_NUM 1
457f931551bSRalph Campbell #define _QIB_GPIO_SCL_NUM 0
458f931551bSRalph Campbell 
459f931551bSRalph Campbell /* Bits in GPIO for the added IB link interrupts */
460f931551bSRalph Campbell #define GPIO_RXUVL_BIT 3
461f931551bSRalph Campbell #define GPIO_OVRUN_BIT 4
462f931551bSRalph Campbell #define GPIO_LLI_BIT 5
463f931551bSRalph Campbell #define GPIO_ERRINTR_MASK 0x38
464f931551bSRalph Campbell 
465f931551bSRalph Campbell 
466f931551bSRalph Campbell #define QLOGIC_IB_RT_BUFSIZE_MASK 0xe0000000ULL
467f931551bSRalph Campbell #define QLOGIC_IB_RT_BUFSIZE_SHIFTVAL(tid) \
468f931551bSRalph Campbell 	((((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) >> 29) + 11 - 1)
469f931551bSRalph Campbell #define QLOGIC_IB_RT_BUFSIZE(tid) (1 << QLOGIC_IB_RT_BUFSIZE_SHIFTVAL(tid))
470f931551bSRalph Campbell #define QLOGIC_IB_RT_IS_VALID(tid) \
471f931551bSRalph Campbell 	(((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) && \
472f931551bSRalph Campbell 	 ((((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) != QLOGIC_IB_RT_BUFSIZE_MASK)))
473f931551bSRalph Campbell #define QLOGIC_IB_RT_ADDR_MASK 0x1FFFFFFFULL /* 29 bits valid */
474f931551bSRalph Campbell #define QLOGIC_IB_RT_ADDR_SHIFT 10
475f931551bSRalph Campbell 
476f931551bSRalph Campbell #define QLOGIC_IB_R_INTRAVAIL_SHIFT 16
477f931551bSRalph Campbell #define QLOGIC_IB_R_TAILUPD_SHIFT 31
478f931551bSRalph Campbell #define IBA6120_R_PKEY_DIS_SHIFT 30
479f931551bSRalph Campbell 
480f931551bSRalph Campbell #define PBC_6120_VL15_SEND_CTRL (1ULL << 31) /* pbc; VL15; link_buf only */
481f931551bSRalph Campbell 
482f931551bSRalph Campbell #define IBCBUSFRSPCPARITYERR HWE_MASK(IBCBusFromSPCParityErr)
483f931551bSRalph Campbell #define IBCBUSTOSPCPARITYERR HWE_MASK(IBCBusToSPCParityErr)
484f931551bSRalph Campbell 
485f931551bSRalph Campbell #define SYM_MASK_BIT(regname, fldname, bit) ((u64) \
486f931551bSRalph Campbell 	((1ULL << (SYM_LSB(regname, fldname) + (bit)))))
487f931551bSRalph Campbell 
488f931551bSRalph Campbell #define TXEMEMPARITYERR_PIOBUF \
489f931551bSRalph Campbell 	SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 0)
490f931551bSRalph Campbell #define TXEMEMPARITYERR_PIOPBC \
491f931551bSRalph Campbell 	SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 1)
492f931551bSRalph Campbell #define TXEMEMPARITYERR_PIOLAUNCHFIFO \
493f931551bSRalph Campbell 	SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 2)
494f931551bSRalph Campbell 
495f931551bSRalph Campbell #define RXEMEMPARITYERR_RCVBUF \
496f931551bSRalph Campbell 	SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 0)
497f931551bSRalph Campbell #define RXEMEMPARITYERR_LOOKUPQ \
498f931551bSRalph Campbell 	SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 1)
499f931551bSRalph Campbell #define RXEMEMPARITYERR_EXPTID \
500f931551bSRalph Campbell 	SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 2)
501f931551bSRalph Campbell #define RXEMEMPARITYERR_EAGERTID \
502f931551bSRalph Campbell 	SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 3)
503f931551bSRalph Campbell #define RXEMEMPARITYERR_FLAGBUF \
504f931551bSRalph Campbell 	SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 4)
505f931551bSRalph Campbell #define RXEMEMPARITYERR_DATAINFO \
506f931551bSRalph Campbell 	SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 5)
507f931551bSRalph Campbell #define RXEMEMPARITYERR_HDRINFO \
508f931551bSRalph Campbell 	SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 6)
509f931551bSRalph Campbell 
510f931551bSRalph Campbell /* 6120 specific hardware errors... */
511f931551bSRalph Campbell static const struct qib_hwerror_msgs qib_6120_hwerror_msgs[] = {
512f931551bSRalph Campbell 	/* generic hardware errors */
513f931551bSRalph Campbell 	QLOGIC_IB_HWE_MSG(IBCBUSFRSPCPARITYERR, "QIB2IB Parity"),
514f931551bSRalph Campbell 	QLOGIC_IB_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2QIB Parity"),
515f931551bSRalph Campbell 
516f931551bSRalph Campbell 	QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOBUF,
517f931551bSRalph Campbell 			  "TXE PIOBUF Memory Parity"),
518f931551bSRalph Campbell 	QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOPBC,
519f931551bSRalph Campbell 			  "TXE PIOPBC Memory Parity"),
520f931551bSRalph Campbell 	QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOLAUNCHFIFO,
521f931551bSRalph Campbell 			  "TXE PIOLAUNCHFIFO Memory Parity"),
522f931551bSRalph Campbell 
523f931551bSRalph Campbell 	QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_RCVBUF,
524f931551bSRalph Campbell 			  "RXE RCVBUF Memory Parity"),
525f931551bSRalph Campbell 	QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_LOOKUPQ,
526f931551bSRalph Campbell 			  "RXE LOOKUPQ Memory Parity"),
527f931551bSRalph Campbell 	QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EAGERTID,
528f931551bSRalph Campbell 			  "RXE EAGERTID Memory Parity"),
529f931551bSRalph Campbell 	QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EXPTID,
530f931551bSRalph Campbell 			  "RXE EXPTID Memory Parity"),
531f931551bSRalph Campbell 	QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_FLAGBUF,
532f931551bSRalph Campbell 			  "RXE FLAGBUF Memory Parity"),
533f931551bSRalph Campbell 	QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_DATAINFO,
534f931551bSRalph Campbell 			  "RXE DATAINFO Memory Parity"),
535f931551bSRalph Campbell 	QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_HDRINFO,
536f931551bSRalph Campbell 			  "RXE HDRINFO Memory Parity"),
537f931551bSRalph Campbell 
538f931551bSRalph Campbell 	/* chip-specific hardware errors */
539f931551bSRalph Campbell 	QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEPOISONEDTLP,
540f931551bSRalph Campbell 			  "PCIe Poisoned TLP"),
541f931551bSRalph Campbell 	QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLTIMEOUT,
542f931551bSRalph Campbell 			  "PCIe completion timeout"),
543f931551bSRalph Campbell 	/*
544f931551bSRalph Campbell 	 * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
545f931551bSRalph Campbell 	 * parity or memory parity error failures, because most likely we
546f931551bSRalph Campbell 	 * won't be able to talk to the core of the chip.  Nonetheless, we
547f931551bSRalph Campbell 	 * might see them, if they are in parts of the PCIe core that aren't
548f931551bSRalph Campbell 	 * essential.
549f931551bSRalph Campbell 	 */
550f931551bSRalph Campbell 	QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE1PLLFAILED,
551f931551bSRalph Campbell 			  "PCIePLL1"),
552f931551bSRalph Campbell 	QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE0PLLFAILED,
553f931551bSRalph Campbell 			  "PCIePLL0"),
554f931551bSRalph Campbell 	QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXTLH,
555f931551bSRalph Campbell 			  "PCIe XTLH core parity"),
556f931551bSRalph Campbell 	QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXADM,
557f931551bSRalph Campbell 			  "PCIe ADM TX core parity"),
558f931551bSRalph Campbell 	QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYRADM,
559f931551bSRalph Campbell 			  "PCIe ADM RX core parity"),
560f931551bSRalph Campbell 	QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SERDESPLLFAILED,
561f931551bSRalph Campbell 			  "SerDes PLL"),
562f931551bSRalph Campbell };
563f931551bSRalph Campbell 
564f931551bSRalph Campbell #define TXE_PIO_PARITY (TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC)
565f931551bSRalph Campbell #define _QIB_PLL_FAIL (QLOGIC_IB_HWE_COREPLL_FBSLIP |   \
566f931551bSRalph Campbell 		QLOGIC_IB_HWE_COREPLL_RFSLIP)
567f931551bSRalph Campbell 
568f931551bSRalph Campbell 	/* variables for sanity checking interrupt and errors */
569f931551bSRalph Campbell #define IB_HWE_BITSEXTANT \
570f931551bSRalph Campbell 	(HWE_MASK(RXEMemParityErr) |					\
571f931551bSRalph Campbell 	 HWE_MASK(TXEMemParityErr) |					\
572f931551bSRalph Campbell 	 (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK <<			\
573f931551bSRalph Campbell 	  QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) |			\
574f931551bSRalph Campbell 	 QLOGIC_IB_HWE_PCIE1PLLFAILED |					\
575f931551bSRalph Campbell 	 QLOGIC_IB_HWE_PCIE0PLLFAILED |					\
576f931551bSRalph Campbell 	 QLOGIC_IB_HWE_PCIEPOISONEDTLP |				\
577f931551bSRalph Campbell 	 QLOGIC_IB_HWE_PCIECPLTIMEOUT |					\
578f931551bSRalph Campbell 	 QLOGIC_IB_HWE_PCIEBUSPARITYXTLH |				\
579f931551bSRalph Campbell 	 QLOGIC_IB_HWE_PCIEBUSPARITYXADM |				\
580f931551bSRalph Campbell 	 QLOGIC_IB_HWE_PCIEBUSPARITYRADM |				\
581f931551bSRalph Campbell 	 HWE_MASK(PowerOnBISTFailed) |					\
582f931551bSRalph Campbell 	 QLOGIC_IB_HWE_COREPLL_FBSLIP |					\
583f931551bSRalph Campbell 	 QLOGIC_IB_HWE_COREPLL_RFSLIP |					\
584f931551bSRalph Campbell 	 QLOGIC_IB_HWE_SERDESPLLFAILED |				\
585f931551bSRalph Campbell 	 HWE_MASK(IBCBusToSPCParityErr) |				\
586f931551bSRalph Campbell 	 HWE_MASK(IBCBusFromSPCParityErr))
587f931551bSRalph Campbell 
588f931551bSRalph Campbell #define IB_E_BITSEXTANT \
589f931551bSRalph Campbell 	(ERR_MASK(RcvFormatErr) | ERR_MASK(RcvVCRCErr) |		\
590f931551bSRalph Campbell 	 ERR_MASK(RcvICRCErr) | ERR_MASK(RcvMinPktLenErr) |		\
591f931551bSRalph Campbell 	 ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvLongPktLenErr) |	\
592f931551bSRalph Campbell 	 ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvUnexpectedCharErr) | \
593f931551bSRalph Campbell 	 ERR_MASK(RcvUnsupportedVLErr) | ERR_MASK(RcvEBPErr) |		\
594f931551bSRalph Campbell 	 ERR_MASK(RcvIBFlowErr) | ERR_MASK(RcvBadVersionErr) |		\
595f931551bSRalph Campbell 	 ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) |		\
596f931551bSRalph Campbell 	 ERR_MASK(RcvBadTidErr) | ERR_MASK(RcvHdrLenErr) |		\
597f931551bSRalph Campbell 	 ERR_MASK(RcvHdrErr) | ERR_MASK(RcvIBLostLinkErr) |		\
598f931551bSRalph Campbell 	 ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendMaxPktLenErr) |	\
599f931551bSRalph Campbell 	 ERR_MASK(SendUnderRunErr) | ERR_MASK(SendPktLenErr) |		\
600f931551bSRalph Campbell 	 ERR_MASK(SendDroppedSmpPktErr) |				\
601f931551bSRalph Campbell 	 ERR_MASK(SendDroppedDataPktErr) |				\
602f931551bSRalph Campbell 	 ERR_MASK(SendPioArmLaunchErr) |				\
603f931551bSRalph Campbell 	 ERR_MASK(SendUnexpectedPktNumErr) |				\
604f931551bSRalph Campbell 	 ERR_MASK(SendUnsupportedVLErr) | ERR_MASK(IBStatusChanged) |	\
605f931551bSRalph Campbell 	 ERR_MASK(InvalidAddrErr) | ERR_MASK(ResetNegated) |		\
606f931551bSRalph Campbell 	 ERR_MASK(HardwareErr))
607f931551bSRalph Campbell 
608f931551bSRalph Campbell #define QLOGIC_IB_E_PKTERRS ( \
609f931551bSRalph Campbell 		ERR_MASK(SendPktLenErr) |				\
610f931551bSRalph Campbell 		ERR_MASK(SendDroppedDataPktErr) |			\
611f931551bSRalph Campbell 		ERR_MASK(RcvVCRCErr) |					\
612f931551bSRalph Campbell 		ERR_MASK(RcvICRCErr) |					\
613f931551bSRalph Campbell 		ERR_MASK(RcvShortPktLenErr) |				\
614f931551bSRalph Campbell 		ERR_MASK(RcvEBPErr))
615f931551bSRalph Campbell 
616f931551bSRalph Campbell /* These are all rcv-related errors which we want to count for stats */
617f931551bSRalph Campbell #define E_SUM_PKTERRS						\
618f931551bSRalph Campbell 	(ERR_MASK(RcvHdrLenErr) | ERR_MASK(RcvBadTidErr) |		\
619f931551bSRalph Campbell 	 ERR_MASK(RcvBadVersionErr) | ERR_MASK(RcvHdrErr) |		\
620f931551bSRalph Campbell 	 ERR_MASK(RcvLongPktLenErr) | ERR_MASK(RcvShortPktLenErr) |	\
621f931551bSRalph Campbell 	 ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvMinPktLenErr) |	\
622f931551bSRalph Campbell 	 ERR_MASK(RcvFormatErr) | ERR_MASK(RcvUnsupportedVLErr) |	\
623f931551bSRalph Campbell 	 ERR_MASK(RcvUnexpectedCharErr) | ERR_MASK(RcvEBPErr))
624f931551bSRalph Campbell 
625f931551bSRalph Campbell /* These are all send-related errors which we want to count for stats */
626f931551bSRalph Campbell #define E_SUM_ERRS							\
627f931551bSRalph Campbell 	(ERR_MASK(SendPioArmLaunchErr) |				\
628f931551bSRalph Campbell 	 ERR_MASK(SendUnexpectedPktNumErr) |				\
629f931551bSRalph Campbell 	 ERR_MASK(SendDroppedDataPktErr) |				\
630f931551bSRalph Campbell 	 ERR_MASK(SendDroppedSmpPktErr) |				\
631f931551bSRalph Campbell 	 ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnsupportedVLErr) |	\
632f931551bSRalph Campbell 	 ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) |		\
633f931551bSRalph Campbell 	 ERR_MASK(InvalidAddrErr))
634f931551bSRalph Campbell 
635f931551bSRalph Campbell /*
636f931551bSRalph Campbell  * this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore
637f931551bSRalph Campbell  * errors not related to freeze and cancelling buffers.  Can't ignore
638f931551bSRalph Campbell  * armlaunch because could get more while still cleaning up, and need
639f931551bSRalph Campbell  * to cancel those as they happen.
640f931551bSRalph Campbell  */
641f931551bSRalph Campbell #define E_SPKT_ERRS_IGNORE \
642f931551bSRalph Campbell 	(ERR_MASK(SendDroppedDataPktErr) |				\
643f931551bSRalph Campbell 	 ERR_MASK(SendDroppedSmpPktErr) |				\
644f931551bSRalph Campbell 	 ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendMinPktLenErr) |	\
645f931551bSRalph Campbell 	 ERR_MASK(SendPktLenErr))
646f931551bSRalph Campbell 
647f931551bSRalph Campbell /*
648f931551bSRalph Campbell  * these are errors that can occur when the link changes state while
649f931551bSRalph Campbell  * a packet is being sent or received.  This doesn't cover things
650f931551bSRalph Campbell  * like EBP or VCRC that can be the result of a sending having the
651f931551bSRalph Campbell  * link change state, so we receive a "known bad" packet.
652f931551bSRalph Campbell  */
653f931551bSRalph Campbell #define E_SUM_LINK_PKTERRS		\
654f931551bSRalph Campbell 	(ERR_MASK(SendDroppedDataPktErr) |				\
655f931551bSRalph Campbell 	 ERR_MASK(SendDroppedSmpPktErr) |				\
656f931551bSRalph Campbell 	 ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) |		\
657f931551bSRalph Campbell 	 ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvMinPktLenErr) |	\
658f931551bSRalph Campbell 	 ERR_MASK(RcvUnexpectedCharErr))
659f931551bSRalph Campbell 
660f931551bSRalph Campbell static void qib_6120_put_tid_2(struct qib_devdata *, u64 __iomem *,
661f931551bSRalph Campbell 			       u32, unsigned long);
662f931551bSRalph Campbell 
663f931551bSRalph Campbell /*
664f931551bSRalph Campbell  * On platforms using this chip, and not having ordered WC stores, we
665f931551bSRalph Campbell  * can get TXE parity errors due to speculative reads to the PIO buffers,
666f931551bSRalph Campbell  * and this, due to a chip issue can result in (many) false parity error
667f931551bSRalph Campbell  * reports.  So it's a debug print on those, and an info print on systems
668f931551bSRalph Campbell  * where the speculative reads don't occur.
669f931551bSRalph Campbell  */
qib_6120_txe_recover(struct qib_devdata * dd)670f931551bSRalph Campbell static void qib_6120_txe_recover(struct qib_devdata *dd)
671f931551bSRalph Campbell {
672f931551bSRalph Campbell 	if (!qib_unordered_wc())
673f931551bSRalph Campbell 		qib_devinfo(dd->pcidev,
674f931551bSRalph Campbell 			    "Recovering from TXE PIO parity error\n");
675f931551bSRalph Campbell }
676f931551bSRalph Campbell 
677f931551bSRalph Campbell /* enable/disable chip from delivering interrupts */
qib_6120_set_intr_state(struct qib_devdata * dd,u32 enable)678f931551bSRalph Campbell static void qib_6120_set_intr_state(struct qib_devdata *dd, u32 enable)
679f931551bSRalph Campbell {
680f931551bSRalph Campbell 	if (enable) {
681f931551bSRalph Campbell 		if (dd->flags & QIB_BADINTR)
682f931551bSRalph Campbell 			return;
683f931551bSRalph Campbell 		qib_write_kreg(dd, kr_intmask, ~0ULL);
684f931551bSRalph Campbell 		/* force re-interrupt of any pending interrupts. */
685f931551bSRalph Campbell 		qib_write_kreg(dd, kr_intclear, 0ULL);
686f931551bSRalph Campbell 	} else
687f931551bSRalph Campbell 		qib_write_kreg(dd, kr_intmask, 0ULL);
688f931551bSRalph Campbell }
689f931551bSRalph Campbell 
690f931551bSRalph Campbell /*
691f931551bSRalph Campbell  * Try to cleanup as much as possible for anything that might have gone
692f931551bSRalph Campbell  * wrong while in freeze mode, such as pio buffers being written by user
693f931551bSRalph Campbell  * processes (causing armlaunch), send errors due to going into freeze mode,
694f931551bSRalph Campbell  * etc., and try to avoid causing extra interrupts while doing so.
695f931551bSRalph Campbell  * Forcibly update the in-memory pioavail register copies after cleanup
696f931551bSRalph Campbell  * because the chip won't do it while in freeze mode (the register values
697f931551bSRalph Campbell  * themselves are kept correct).
698f931551bSRalph Campbell  * Make sure that we don't lose any important interrupts by using the chip
699f931551bSRalph Campbell  * feature that says that writing 0 to a bit in *clear that is set in
700f931551bSRalph Campbell  * *status will cause an interrupt to be generated again (if allowed by
701f931551bSRalph Campbell  * the *mask value).
702f931551bSRalph Campbell  * This is in chip-specific code because of all of the register accesses,
703f931551bSRalph Campbell  * even though the details are similar on most chips
704f931551bSRalph Campbell  */
qib_6120_clear_freeze(struct qib_devdata * dd)705f931551bSRalph Campbell static void qib_6120_clear_freeze(struct qib_devdata *dd)
706f931551bSRalph Campbell {
707f931551bSRalph Campbell 	/* disable error interrupts, to avoid confusion */
708f931551bSRalph Campbell 	qib_write_kreg(dd, kr_errmask, 0ULL);
709f931551bSRalph Campbell 
710b8a14f33SMasahiro Yamada 	/* also disable interrupts; errormask is sometimes overwritten */
711f931551bSRalph Campbell 	qib_6120_set_intr_state(dd, 0);
712f931551bSRalph Campbell 
713f931551bSRalph Campbell 	qib_cancel_sends(dd->pport);
714f931551bSRalph Campbell 
715f931551bSRalph Campbell 	/* clear the freeze, and be sure chip saw it */
716f931551bSRalph Campbell 	qib_write_kreg(dd, kr_control, dd->control);
717f931551bSRalph Campbell 	qib_read_kreg32(dd, kr_scratch);
718f931551bSRalph Campbell 
719f931551bSRalph Campbell 	/* force in-memory update now we are out of freeze */
720f931551bSRalph Campbell 	qib_force_pio_avail_update(dd);
721f931551bSRalph Campbell 
722f931551bSRalph Campbell 	/*
723f931551bSRalph Campbell 	 * force new interrupt if any hwerr, error or interrupt bits are
724f931551bSRalph Campbell 	 * still set, and clear "safe" send packet errors related to freeze
725f931551bSRalph Campbell 	 * and cancelling sends.  Re-enable error interrupts before possible
726f931551bSRalph Campbell 	 * force of re-interrupt on pending interrupts.
727f931551bSRalph Campbell 	 */
728f931551bSRalph Campbell 	qib_write_kreg(dd, kr_hwerrclear, 0ULL);
729f931551bSRalph Campbell 	qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
730f931551bSRalph Campbell 	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
731f931551bSRalph Campbell 	qib_6120_set_intr_state(dd, 1);
732f931551bSRalph Campbell }
733f931551bSRalph Campbell 
734f931551bSRalph Campbell /**
735f931551bSRalph Campbell  * qib_handle_6120_hwerrors - display hardware errors.
736f931551bSRalph Campbell  * @dd: the qlogic_ib device
737f931551bSRalph Campbell  * @msg: the output buffer
738f931551bSRalph Campbell  * @msgl: the size of the output buffer
739f931551bSRalph Campbell  *
740f931551bSRalph Campbell  * Use same msg buffer as regular errors to avoid excessive stack
741f931551bSRalph Campbell  * use.  Most hardware errors are catastrophic, but for right now,
742f931551bSRalph Campbell  * we'll print them and continue.  Reuse the same message buffer as
743f931551bSRalph Campbell  * handle_6120_errors() to avoid excessive stack usage.
744f931551bSRalph Campbell  */
qib_handle_6120_hwerrors(struct qib_devdata * dd,char * msg,size_t msgl)745f931551bSRalph Campbell static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg,
746f931551bSRalph Campbell 				     size_t msgl)
747f931551bSRalph Campbell {
748f931551bSRalph Campbell 	u64 hwerrs;
749f931551bSRalph Campbell 	u32 bits, ctrl;
750f931551bSRalph Campbell 	int isfatal = 0;
751f931551bSRalph Campbell 	char *bitsmsg;
752f931551bSRalph Campbell 
753f931551bSRalph Campbell 	hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
754f931551bSRalph Campbell 	if (!hwerrs)
755f931551bSRalph Campbell 		return;
756f931551bSRalph Campbell 	if (hwerrs == ~0ULL) {
7577fac3301SMike Marciniszyn 		qib_dev_err(dd,
7587fac3301SMike Marciniszyn 			"Read of hardware error status failed (all bits set); ignoring\n");
759f931551bSRalph Campbell 		return;
760f931551bSRalph Campbell 	}
761f931551bSRalph Campbell 	qib_stats.sps_hwerrs++;
762f931551bSRalph Campbell 
763f931551bSRalph Campbell 	/* Always clear the error status register, except MEMBISTFAIL,
764f931551bSRalph Campbell 	 * regardless of whether we continue or stop using the chip.
765f931551bSRalph Campbell 	 * We want that set so we know it failed, even across driver reload.
766f931551bSRalph Campbell 	 * We'll still ignore it in the hwerrmask.  We do this partly for
767f931551bSRalph Campbell 	 * diagnostics, but also for support */
768f931551bSRalph Campbell 	qib_write_kreg(dd, kr_hwerrclear,
769f931551bSRalph Campbell 		       hwerrs & ~HWE_MASK(PowerOnBISTFailed));
770f931551bSRalph Campbell 
771f931551bSRalph Campbell 	hwerrs &= dd->cspec->hwerrmask;
772f931551bSRalph Campbell 
773f931551bSRalph Campbell 	/*
774f931551bSRalph Campbell 	 * Make sure we get this much out, unless told to be quiet,
775f931551bSRalph Campbell 	 * or it's occurred within the last 5 seconds.
776f931551bSRalph Campbell 	 */
777f931551bSRalph Campbell 	if (hwerrs & ~(TXE_PIO_PARITY | RXEMEMPARITYERR_EAGERTID))
7787fac3301SMike Marciniszyn 		qib_devinfo(dd->pcidev,
7797fac3301SMike Marciniszyn 			"Hardware error: hwerr=0x%llx (cleared)\n",
7807fac3301SMike Marciniszyn 			(unsigned long long) hwerrs);
781f931551bSRalph Campbell 
782f931551bSRalph Campbell 	if (hwerrs & ~IB_HWE_BITSEXTANT)
7837fac3301SMike Marciniszyn 		qib_dev_err(dd,
7847fac3301SMike Marciniszyn 			"hwerror interrupt with unknown errors %llx set\n",
7857fac3301SMike Marciniszyn 			(unsigned long long)(hwerrs & ~IB_HWE_BITSEXTANT));
786f931551bSRalph Campbell 
787f931551bSRalph Campbell 	ctrl = qib_read_kreg32(dd, kr_control);
788f931551bSRalph Campbell 	if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) {
789f931551bSRalph Campbell 		/*
790f931551bSRalph Campbell 		 * Parity errors in send memory are recoverable,
791f931551bSRalph Campbell 		 * just cancel the send (if indicated in * sendbuffererror),
792f931551bSRalph Campbell 		 * count the occurrence, unfreeze (if no other handled
793f931551bSRalph Campbell 		 * hardware error bits are set), and continue. They can
794f931551bSRalph Campbell 		 * occur if a processor speculative read is done to the PIO
795f931551bSRalph Campbell 		 * buffer while we are sending a packet, for example.
796f931551bSRalph Campbell 		 */
797f931551bSRalph Campbell 		if (hwerrs & TXE_PIO_PARITY) {
798f931551bSRalph Campbell 			qib_6120_txe_recover(dd);
799f931551bSRalph Campbell 			hwerrs &= ~TXE_PIO_PARITY;
800f931551bSRalph Campbell 		}
801f931551bSRalph Campbell 
802f931551bSRalph Campbell 		if (!hwerrs)
803f931551bSRalph Campbell 			qib_6120_clear_freeze(dd);
804f931551bSRalph Campbell 		else
805f931551bSRalph Campbell 			isfatal = 1;
806f931551bSRalph Campbell 	}
807f931551bSRalph Campbell 
808f931551bSRalph Campbell 	*msg = '\0';
809f931551bSRalph Campbell 
810f931551bSRalph Campbell 	if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
811f931551bSRalph Campbell 		isfatal = 1;
812f931551bSRalph Campbell 		strlcat(msg,
813f931551bSRalph Campbell 			"[Memory BIST test failed, InfiniPath hardware unusable]",
814f931551bSRalph Campbell 			msgl);
8157fac3301SMike Marciniszyn 		/* ignore from now on, so disable until driver reloaded */
8167fac3301SMike Marciniszyn 		dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
8177fac3301SMike Marciniszyn 		qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
818f931551bSRalph Campbell 	}
819f931551bSRalph Campbell 
820f931551bSRalph Campbell 	qib_format_hwerrors(hwerrs, qib_6120_hwerror_msgs,
821f931551bSRalph Campbell 			    ARRAY_SIZE(qib_6120_hwerror_msgs), msg, msgl);
822f931551bSRalph Campbell 
823f931551bSRalph Campbell 	bitsmsg = dd->cspec->bitsmsgbuf;
824f931551bSRalph Campbell 	if (hwerrs & (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK <<
825f931551bSRalph Campbell 		      QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT)) {
826f931551bSRalph Campbell 		bits = (u32) ((hwerrs >>
827f931551bSRalph Campbell 			       QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
828f931551bSRalph Campbell 			      QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
829f931551bSRalph Campbell 		snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
830f931551bSRalph Campbell 			 "[PCIe Mem Parity Errs %x] ", bits);
831f931551bSRalph Campbell 		strlcat(msg, bitsmsg, msgl);
832041af0bbSMike Marciniszyn 	}
833f931551bSRalph Campbell 
834f931551bSRalph Campbell 	if (hwerrs & _QIB_PLL_FAIL) {
835f931551bSRalph Campbell 		isfatal = 1;
836f931551bSRalph Campbell 		snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
837f931551bSRalph Campbell 			 "[PLL failed (%llx), InfiniPath hardware unusable]",
838f931551bSRalph Campbell 			 (unsigned long long) hwerrs & _QIB_PLL_FAIL);
839041af0bbSMike Marciniszyn 		strlcat(msg, bitsmsg, msgl);
840f931551bSRalph Campbell 		/* ignore from now on, so disable until driver reloaded */
841f931551bSRalph Campbell 		dd->cspec->hwerrmask &= ~(hwerrs & _QIB_PLL_FAIL);
842f931551bSRalph Campbell 		qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
843f931551bSRalph Campbell 	}
844f931551bSRalph Campbell 
845f931551bSRalph Campbell 	if (hwerrs & QLOGIC_IB_HWE_SERDESPLLFAILED) {
846f931551bSRalph Campbell 		/*
847f931551bSRalph Campbell 		 * If it occurs, it is left masked since the external
848f931551bSRalph Campbell 		 * interface is unused
849f931551bSRalph Campbell 		 */
850f931551bSRalph Campbell 		dd->cspec->hwerrmask &= ~QLOGIC_IB_HWE_SERDESPLLFAILED;
851f931551bSRalph Campbell 		qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
852f931551bSRalph Campbell 	}
853f931551bSRalph Campbell 
854f931551bSRalph Campbell 	if (hwerrs)
855f931551bSRalph Campbell 		/*
856f931551bSRalph Campbell 		 * if any set that we aren't ignoring; only
857f931551bSRalph Campbell 		 * make the complaint once, in case it's stuck
858f931551bSRalph Campbell 		 * or recurring, and we get here multiple
859f931551bSRalph Campbell 		 * times.
860f931551bSRalph Campbell 		 */
861f931551bSRalph Campbell 		qib_dev_err(dd, "%s hardware error\n", msg);
862f931551bSRalph Campbell 	else
863f931551bSRalph Campbell 		*msg = 0; /* recovered from all of them */
864f931551bSRalph Campbell 
865f931551bSRalph Campbell 	if (isfatal && !dd->diag_client) {
866f931551bSRalph Campbell 		qib_dev_err(dd,
867f931551bSRalph Campbell 			"Fatal Hardware Error, no longer usable, SN %.16s\n",
868f931551bSRalph Campbell 			dd->serial);
8697fac3301SMike Marciniszyn 		/*
8707fac3301SMike Marciniszyn 		 * for /sys status file and user programs to print; if no
8717fac3301SMike Marciniszyn 		 * trailing brace is copied, we'll know it was truncated.
872f931551bSRalph Campbell 		 */
873f931551bSRalph Campbell 		if (dd->freezemsg)
874f931551bSRalph Campbell 			snprintf(dd->freezemsg, dd->freezelen,
875f931551bSRalph Campbell 				 "{%s}", msg);
876f931551bSRalph Campbell 		qib_disable_after_error(dd);
877f931551bSRalph Campbell 	}
878f931551bSRalph Campbell }
879f931551bSRalph Campbell 
880f931551bSRalph Campbell /*
881f931551bSRalph Campbell  * Decode the error status into strings, deciding whether to always
882f931551bSRalph Campbell  * print * it or not depending on "normal packet errors" vs everything
883f931551bSRalph Campbell  * else.   Return 1 if "real" errors, otherwise 0 if only packet
884f931551bSRalph Campbell  * errors, so caller can decide what to print with the string.
885f931551bSRalph Campbell  */
qib_decode_6120_err(struct qib_devdata * dd,char * buf,size_t blen,u64 err)886f931551bSRalph Campbell static int qib_decode_6120_err(struct qib_devdata *dd, char *buf, size_t blen,
887f931551bSRalph Campbell 			       u64 err)
888f931551bSRalph Campbell {
889f931551bSRalph Campbell 	int iserr = 1;
890f931551bSRalph Campbell 
891f931551bSRalph Campbell 	*buf = '\0';
892f931551bSRalph Campbell 	if (err & QLOGIC_IB_E_PKTERRS) {
893f931551bSRalph Campbell 		if (!(err & ~QLOGIC_IB_E_PKTERRS))
894f931551bSRalph Campbell 			iserr = 0;
895f931551bSRalph Campbell 		if ((err & ERR_MASK(RcvICRCErr)) &&
896f931551bSRalph Campbell 		    !(err&(ERR_MASK(RcvVCRCErr)|ERR_MASK(RcvEBPErr))))
897f931551bSRalph Campbell 			strlcat(buf, "CRC ", blen);
898f931551bSRalph Campbell 		if (!iserr)
899f931551bSRalph Campbell 			goto done;
900f931551bSRalph Campbell 	}
901f931551bSRalph Campbell 	if (err & ERR_MASK(RcvHdrLenErr))
902f931551bSRalph Campbell 		strlcat(buf, "rhdrlen ", blen);
903f931551bSRalph Campbell 	if (err & ERR_MASK(RcvBadTidErr))
904f931551bSRalph Campbell 		strlcat(buf, "rbadtid ", blen);
905f931551bSRalph Campbell 	if (err & ERR_MASK(RcvBadVersionErr))
906f931551bSRalph Campbell 		strlcat(buf, "rbadversion ", blen);
907f931551bSRalph Campbell 	if (err & ERR_MASK(RcvHdrErr))
908f931551bSRalph Campbell 		strlcat(buf, "rhdr ", blen);
909f931551bSRalph Campbell 	if (err & ERR_MASK(RcvLongPktLenErr))
910f931551bSRalph Campbell 		strlcat(buf, "rlongpktlen ", blen);
911f931551bSRalph Campbell 	if (err & ERR_MASK(RcvMaxPktLenErr))
912f931551bSRalph Campbell 		strlcat(buf, "rmaxpktlen ", blen);
913f931551bSRalph Campbell 	if (err & ERR_MASK(RcvMinPktLenErr))
914f931551bSRalph Campbell 		strlcat(buf, "rminpktlen ", blen);
915f931551bSRalph Campbell 	if (err & ERR_MASK(SendMinPktLenErr))
916f931551bSRalph Campbell 		strlcat(buf, "sminpktlen ", blen);
917f931551bSRalph Campbell 	if (err & ERR_MASK(RcvFormatErr))
918f931551bSRalph Campbell 		strlcat(buf, "rformaterr ", blen);
919f931551bSRalph Campbell 	if (err & ERR_MASK(RcvUnsupportedVLErr))
920f931551bSRalph Campbell 		strlcat(buf, "runsupvl ", blen);
921f931551bSRalph Campbell 	if (err & ERR_MASK(RcvUnexpectedCharErr))
922f931551bSRalph Campbell 		strlcat(buf, "runexpchar ", blen);
923f931551bSRalph Campbell 	if (err & ERR_MASK(RcvIBFlowErr))
924f931551bSRalph Campbell 		strlcat(buf, "ribflow ", blen);
925f931551bSRalph Campbell 	if (err & ERR_MASK(SendUnderRunErr))
926f931551bSRalph Campbell 		strlcat(buf, "sunderrun ", blen);
927f931551bSRalph Campbell 	if (err & ERR_MASK(SendPioArmLaunchErr))
928f931551bSRalph Campbell 		strlcat(buf, "spioarmlaunch ", blen);
929f931551bSRalph Campbell 	if (err & ERR_MASK(SendUnexpectedPktNumErr))
930f931551bSRalph Campbell 		strlcat(buf, "sunexperrpktnum ", blen);
931f931551bSRalph Campbell 	if (err & ERR_MASK(SendDroppedSmpPktErr))
932f931551bSRalph Campbell 		strlcat(buf, "sdroppedsmppkt ", blen);
933f931551bSRalph Campbell 	if (err & ERR_MASK(SendMaxPktLenErr))
934f931551bSRalph Campbell 		strlcat(buf, "smaxpktlen ", blen);
935f931551bSRalph Campbell 	if (err & ERR_MASK(SendUnsupportedVLErr))
936f931551bSRalph Campbell 		strlcat(buf, "sunsupVL ", blen);
937f931551bSRalph Campbell 	if (err & ERR_MASK(InvalidAddrErr))
938f931551bSRalph Campbell 		strlcat(buf, "invalidaddr ", blen);
939f931551bSRalph Campbell 	if (err & ERR_MASK(RcvEgrFullErr))
940f931551bSRalph Campbell 		strlcat(buf, "rcvegrfull ", blen);
941f931551bSRalph Campbell 	if (err & ERR_MASK(RcvHdrFullErr))
942f931551bSRalph Campbell 		strlcat(buf, "rcvhdrfull ", blen);
943f931551bSRalph Campbell 	if (err & ERR_MASK(IBStatusChanged))
944f931551bSRalph Campbell 		strlcat(buf, "ibcstatuschg ", blen);
945f931551bSRalph Campbell 	if (err & ERR_MASK(RcvIBLostLinkErr))
946f931551bSRalph Campbell 		strlcat(buf, "riblostlink ", blen);
947f931551bSRalph Campbell 	if (err & ERR_MASK(HardwareErr))
948f931551bSRalph Campbell 		strlcat(buf, "hardware ", blen);
949f931551bSRalph Campbell 	if (err & ERR_MASK(ResetNegated))
950f931551bSRalph Campbell 		strlcat(buf, "reset ", blen);
951f931551bSRalph Campbell done:
952f931551bSRalph Campbell 	return iserr;
953f931551bSRalph Campbell }
954f931551bSRalph Campbell 
955f931551bSRalph Campbell /*
956f931551bSRalph Campbell  * Called when we might have an error that is specific to a particular
957f931551bSRalph Campbell  * PIO buffer, and may need to cancel that buffer, so it can be re-used.
958f931551bSRalph Campbell  */
qib_disarm_6120_senderrbufs(struct qib_pportdata * ppd)959f931551bSRalph Campbell static void qib_disarm_6120_senderrbufs(struct qib_pportdata *ppd)
960f931551bSRalph Campbell {
961f931551bSRalph Campbell 	unsigned long sbuf[2];
962f931551bSRalph Campbell 	struct qib_devdata *dd = ppd->dd;
963f931551bSRalph Campbell 
964f931551bSRalph Campbell 	/*
965f931551bSRalph Campbell 	 * It's possible that sendbuffererror could have bits set; might
966f931551bSRalph Campbell 	 * have already done this as a result of hardware error handling.
967f931551bSRalph Campbell 	 */
968f931551bSRalph Campbell 	sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
969f931551bSRalph Campbell 	sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
970f931551bSRalph Campbell 
971f931551bSRalph Campbell 	if (sbuf[0] || sbuf[1])
972f931551bSRalph Campbell 		qib_disarm_piobufs_set(dd, sbuf,
973f931551bSRalph Campbell 				       dd->piobcnt2k + dd->piobcnt4k);
974f931551bSRalph Campbell }
975f931551bSRalph Campbell 
chk_6120_linkrecovery(struct qib_devdata * dd,u64 ibcs)976f931551bSRalph Campbell static int chk_6120_linkrecovery(struct qib_devdata *dd, u64 ibcs)
977f931551bSRalph Campbell {
978f931551bSRalph Campbell 	int ret = 1;
979f931551bSRalph Campbell 	u32 ibstate = qib_6120_iblink_state(ibcs);
980f931551bSRalph Campbell 	u32 linkrecov = read_6120_creg32(dd, cr_iblinkerrrecov);
981f931551bSRalph Campbell 
982f931551bSRalph Campbell 	if (linkrecov != dd->cspec->lastlinkrecov) {
983f931551bSRalph Campbell 		/* and no more until active again */
984f931551bSRalph Campbell 		dd->cspec->lastlinkrecov = 0;
985f931551bSRalph Campbell 		qib_set_linkstate(dd->pport, QIB_IB_LINKDOWN);
986f931551bSRalph Campbell 		ret = 0;
987f931551bSRalph Campbell 	}
988f931551bSRalph Campbell 	if (ibstate == IB_PORT_ACTIVE)
989f931551bSRalph Campbell 		dd->cspec->lastlinkrecov =
990f931551bSRalph Campbell 			read_6120_creg32(dd, cr_iblinkerrrecov);
991f931551bSRalph Campbell 	return ret;
992f931551bSRalph Campbell }
993f931551bSRalph Campbell 
handle_6120_errors(struct qib_devdata * dd,u64 errs)994f931551bSRalph Campbell static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
995f931551bSRalph Campbell {
996f931551bSRalph Campbell 	char *msg;
997f931551bSRalph Campbell 	u64 ignore_this_time = 0;
998f931551bSRalph Campbell 	u64 iserr = 0;
999f931551bSRalph Campbell 	struct qib_pportdata *ppd = dd->pport;
1000f931551bSRalph Campbell 	u64 mask;
1001f931551bSRalph Campbell 
1002f931551bSRalph Campbell 	/* don't report errors that are masked */
1003f931551bSRalph Campbell 	errs &= dd->cspec->errormask;
1004f931551bSRalph Campbell 	msg = dd->cspec->emsgbuf;
1005f931551bSRalph Campbell 
1006f931551bSRalph Campbell 	/* do these first, they are most important */
1007f931551bSRalph Campbell 	if (errs & ERR_MASK(HardwareErr))
1008f931551bSRalph Campbell 		qib_handle_6120_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
1009f931551bSRalph Campbell 
1010f931551bSRalph Campbell 	if (errs & ~IB_E_BITSEXTANT)
1011041af0bbSMike Marciniszyn 		qib_dev_err(dd,
1012f931551bSRalph Campbell 			"error interrupt with unknown errors %llx set\n",
1013f931551bSRalph Campbell 			(unsigned long long) (errs & ~IB_E_BITSEXTANT));
10147fac3301SMike Marciniszyn 
10157fac3301SMike Marciniszyn 	if (errs & E_SUM_ERRS) {
1016f931551bSRalph Campbell 		qib_disarm_6120_senderrbufs(ppd);
1017f931551bSRalph Campbell 		if ((errs & E_SUM_LINK_PKTERRS) &&
1018f931551bSRalph Campbell 		    !(ppd->lflags & QIBL_LINKACTIVE)) {
1019f931551bSRalph Campbell 			/*
1020f931551bSRalph Campbell 			 * This can happen when trying to bring the link
1021f931551bSRalph Campbell 			 * up, but the IB link changes state at the "wrong"
1022f931551bSRalph Campbell 			 * time. The IB logic then complains that the packet
1023f931551bSRalph Campbell 			 * isn't valid.  We don't want to confuse people, so
1024f931551bSRalph Campbell 			 * we just don't print them, except at debug
1025f931551bSRalph Campbell 			 */
1026f931551bSRalph Campbell 			ignore_this_time = errs & E_SUM_LINK_PKTERRS;
1027f931551bSRalph Campbell 		}
1028f931551bSRalph Campbell 	} else if ((errs & E_SUM_LINK_PKTERRS) &&
1029f931551bSRalph Campbell 		   !(ppd->lflags & QIBL_LINKACTIVE)) {
1030f931551bSRalph Campbell 		/*
1031f931551bSRalph Campbell 		 * This can happen when SMA is trying to bring the link
1032f931551bSRalph Campbell 		 * up, but the IB link changes state at the "wrong" time.
1033f931551bSRalph Campbell 		 * The IB logic then complains that the packet isn't
1034f931551bSRalph Campbell 		 * valid.  We don't want to confuse people, so we just
1035f931551bSRalph Campbell 		 * don't print them, except at debug
1036f931551bSRalph Campbell 		 */
1037f931551bSRalph Campbell 		ignore_this_time = errs & E_SUM_LINK_PKTERRS;
1038f931551bSRalph Campbell 	}
1039f931551bSRalph Campbell 
1040f931551bSRalph Campbell 	qib_write_kreg(dd, kr_errclear, errs);
1041f931551bSRalph Campbell 
1042f931551bSRalph Campbell 	errs &= ~ignore_this_time;
1043f931551bSRalph Campbell 	if (!errs)
1044f931551bSRalph Campbell 		goto done;
1045f931551bSRalph Campbell 
1046f931551bSRalph Campbell 	/*
1047f931551bSRalph Campbell 	 * The ones we mask off are handled specially below
1048f931551bSRalph Campbell 	 * or above.
1049f931551bSRalph Campbell 	 */
1050f931551bSRalph Campbell 	mask = ERR_MASK(IBStatusChanged) | ERR_MASK(RcvEgrFullErr) |
1051f931551bSRalph Campbell 		ERR_MASK(RcvHdrFullErr) | ERR_MASK(HardwareErr);
1052f931551bSRalph Campbell 	qib_decode_6120_err(dd, msg, sizeof(dd->cspec->emsgbuf), errs & ~mask);
1053f931551bSRalph Campbell 
1054f931551bSRalph Campbell 	if (errs & E_SUM_PKTERRS)
1055041af0bbSMike Marciniszyn 		qib_stats.sps_rcverrs++;
1056f931551bSRalph Campbell 	if (errs & E_SUM_ERRS)
1057f931551bSRalph Campbell 		qib_stats.sps_txerrs++;
1058f931551bSRalph Campbell 
1059f931551bSRalph Campbell 	iserr = errs & ~(E_SUM_PKTERRS | QLOGIC_IB_E_PKTERRS);
1060f931551bSRalph Campbell 
1061f931551bSRalph Campbell 	if (errs & ERR_MASK(IBStatusChanged)) {
1062f931551bSRalph Campbell 		u64 ibcs = qib_read_kreg64(dd, kr_ibcstatus);
1063f931551bSRalph Campbell 		u32 ibstate = qib_6120_iblink_state(ibcs);
1064f931551bSRalph Campbell 		int handle = 1;
1065f931551bSRalph Campbell 
1066f931551bSRalph Campbell 		if (ibstate != IB_PORT_INIT && dd->cspec->lastlinkrecov)
1067f931551bSRalph Campbell 			handle = chk_6120_linkrecovery(dd, ibcs);
1068f931551bSRalph Campbell 		/*
1069f931551bSRalph Campbell 		 * Since going into a recovery state causes the link state
1070f931551bSRalph Campbell 		 * to go down and since recovery is transitory, it is better
1071f931551bSRalph Campbell 		 * if we "miss" ever seeing the link training state go into
1072f931551bSRalph Campbell 		 * recovery (i.e., ignore this transition for link state
1073f931551bSRalph Campbell 		 * special handling purposes) without updating lastibcstat.
1074f931551bSRalph Campbell 		 */
1075f931551bSRalph Campbell 		if (handle && qib_6120_phys_portstate(ibcs) ==
1076f931551bSRalph Campbell 					    IB_PHYSPORTSTATE_LINK_ERR_RECOVER)
1077f931551bSRalph Campbell 			handle = 0;
1078f931551bSRalph Campbell 		if (handle)
1079f931551bSRalph Campbell 			qib_handle_e_ibstatuschanged(ppd, ibcs);
1080f931551bSRalph Campbell 	}
1081f931551bSRalph Campbell 
1082f931551bSRalph Campbell 	if (errs & ERR_MASK(ResetNegated)) {
1083f931551bSRalph Campbell 		qib_dev_err(dd,
1084f931551bSRalph Campbell 			"Got reset, requires re-init (unload and reload driver)\n");
1085f931551bSRalph Campbell 		dd->flags &= ~QIB_INITTED;  /* needs re-init */
10867fac3301SMike Marciniszyn 		/* mark as having had error */
10877fac3301SMike Marciniszyn 		*dd->devstatusp |= QIB_STATUS_HWERROR;
1088f931551bSRalph Campbell 		*dd->pport->statusp &= ~QIB_STATUS_IB_CONF;
1089f931551bSRalph Campbell 	}
1090f931551bSRalph Campbell 
1091f931551bSRalph Campbell 	if (*msg && iserr)
1092f931551bSRalph Campbell 		qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
1093f931551bSRalph Campbell 
1094f931551bSRalph Campbell 	if (ppd->state_wanted & ppd->lflags)
1095f931551bSRalph Campbell 		wake_up_interruptible(&ppd->state_wait);
1096f931551bSRalph Campbell 
1097f931551bSRalph Campbell 	/*
1098f931551bSRalph Campbell 	 * If there were hdrq or egrfull errors, wake up any processes
1099f931551bSRalph Campbell 	 * waiting in poll.  We used to try to check which contexts had
1100f931551bSRalph Campbell 	 * the overflow, but given the cost of that and the chip reads
1101f931551bSRalph Campbell 	 * to support it, it's better to just wake everybody up if we
1102f931551bSRalph Campbell 	 * get an overflow; waiters can poll again if it's not them.
1103f931551bSRalph Campbell 	 */
1104f931551bSRalph Campbell 	if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
1105f931551bSRalph Campbell 		qib_handle_urcv(dd, ~0U);
1106f931551bSRalph Campbell 		if (errs & ERR_MASK(RcvEgrFullErr))
1107f931551bSRalph Campbell 			qib_stats.sps_buffull++;
1108f931551bSRalph Campbell 		else
1109f931551bSRalph Campbell 			qib_stats.sps_hdrfull++;
1110f931551bSRalph Campbell 	}
1111f931551bSRalph Campbell done:
1112f931551bSRalph Campbell 	return;
1113f931551bSRalph Campbell }
1114f931551bSRalph Campbell 
1115f931551bSRalph Campbell /**
1116f931551bSRalph Campbell  * qib_6120_init_hwerrors - enable hardware errors
1117f931551bSRalph Campbell  * @dd: the qlogic_ib device
1118f931551bSRalph Campbell  *
1119f931551bSRalph Campbell  * now that we have finished initializing everything that might reasonably
1120f931551bSRalph Campbell  * cause a hardware error, and cleared those errors bits as they occur,
1121f931551bSRalph Campbell  * we can enable hardware errors in the mask (potentially enabling
1122f931551bSRalph Campbell  * freeze mode), and enable hardware errors as errors (along with
1123f931551bSRalph Campbell  * everything else) in errormask
1124f931551bSRalph Campbell  */
qib_6120_init_hwerrors(struct qib_devdata * dd)1125f931551bSRalph Campbell static void qib_6120_init_hwerrors(struct qib_devdata *dd)
1126f931551bSRalph Campbell {
1127f931551bSRalph Campbell 	u64 val;
1128f931551bSRalph Campbell 	u64 extsval;
1129f931551bSRalph Campbell 
1130f931551bSRalph Campbell 	extsval = qib_read_kreg64(dd, kr_extstatus);
1131f931551bSRalph Campbell 
1132f931551bSRalph Campbell 	if (!(extsval & QLOGIC_IB_EXTS_MEMBIST_ENDTEST))
1133f931551bSRalph Campbell 		qib_dev_err(dd, "MemBIST did not complete!\n");
1134f931551bSRalph Campbell 
1135f931551bSRalph Campbell 	/* init so all hwerrors interrupt, and enter freeze, ajdust below */
1136f931551bSRalph Campbell 	val = ~0ULL;
1137f931551bSRalph Campbell 	if (dd->minrev < 2) {
1138f931551bSRalph Campbell 		/*
1139f931551bSRalph Campbell 		 * Avoid problem with internal interface bus parity
1140f931551bSRalph Campbell 		 * checking. Fixed in Rev2.
1141f931551bSRalph Campbell 		 */
1142f931551bSRalph Campbell 		val &= ~QLOGIC_IB_HWE_PCIEBUSPARITYRADM;
1143f931551bSRalph Campbell 	}
1144f931551bSRalph Campbell 	/* avoid some intel cpu's speculative read freeze mode issue */
1145f931551bSRalph Campbell 	val &= ~TXEMEMPARITYERR_PIOBUF;
1146f931551bSRalph Campbell 
1147f931551bSRalph Campbell 	dd->cspec->hwerrmask = val;
1148f931551bSRalph Campbell 
1149f931551bSRalph Campbell 	qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
1150f931551bSRalph Campbell 	qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
1151f931551bSRalph Campbell 
1152f931551bSRalph Campbell 	/* clear all */
1153f931551bSRalph Campbell 	qib_write_kreg(dd, kr_errclear, ~0ULL);
1154f931551bSRalph Campbell 	/* enable errors that are masked, at least this first time. */
1155f931551bSRalph Campbell 	qib_write_kreg(dd, kr_errmask, ~0ULL);
1156f931551bSRalph Campbell 	dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
1157f931551bSRalph Campbell 	/* clear any interrupts up to this point (ints still not enabled) */
1158f931551bSRalph Campbell 	qib_write_kreg(dd, kr_intclear, ~0ULL);
1159f931551bSRalph Campbell 
1160f931551bSRalph Campbell 	qib_write_kreg(dd, kr_rcvbthqp,
1161f931551bSRalph Campbell 		       dd->qpn_mask << (QIB_6120_RcvBTHQP_BTHQP_Mask_LSB - 1) |
1162f931551bSRalph Campbell 		       QIB_KD_QP);
1163f931551bSRalph Campbell }
1164f931551bSRalph Campbell 
1165f931551bSRalph Campbell /*
1166f931551bSRalph Campbell  * Disable and enable the armlaunch error.  Used for PIO bandwidth testing
1167f931551bSRalph Campbell  * on chips that are count-based, rather than trigger-based.  There is no
1168f931551bSRalph Campbell  * reference counting, but that's also fine, given the intended use.
1169f931551bSRalph Campbell  * Only chip-specific because it's all register accesses
1170f931551bSRalph Campbell  */
qib_set_6120_armlaunch(struct qib_devdata * dd,u32 enable)1171f931551bSRalph Campbell static void qib_set_6120_armlaunch(struct qib_devdata *dd, u32 enable)
1172f931551bSRalph Campbell {
1173f931551bSRalph Campbell 	if (enable) {
1174f931551bSRalph Campbell 		qib_write_kreg(dd, kr_errclear,
1175f931551bSRalph Campbell 			       ERR_MASK(SendPioArmLaunchErr));
1176f931551bSRalph Campbell 		dd->cspec->errormask |= ERR_MASK(SendPioArmLaunchErr);
1177f931551bSRalph Campbell 	} else
1178f931551bSRalph Campbell 		dd->cspec->errormask &= ~ERR_MASK(SendPioArmLaunchErr);
1179f931551bSRalph Campbell 	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1180f931551bSRalph Campbell }
1181f931551bSRalph Campbell 
1182f931551bSRalph Campbell /*
1183f931551bSRalph Campbell  * Formerly took parameter <which> in pre-shifted,
1184f931551bSRalph Campbell  * pre-merged form with LinkCmd and LinkInitCmd
1185f931551bSRalph Campbell  * together, and assuming the zero was NOP.
1186f931551bSRalph Campbell  */
qib_set_ib_6120_lstate(struct qib_pportdata * ppd,u16 linkcmd,u16 linitcmd)1187f931551bSRalph Campbell static void qib_set_ib_6120_lstate(struct qib_pportdata *ppd, u16 linkcmd,
1188f931551bSRalph Campbell 				   u16 linitcmd)
1189f931551bSRalph Campbell {
1190f931551bSRalph Campbell 	u64 mod_wd;
1191f931551bSRalph Campbell 	struct qib_devdata *dd = ppd->dd;
1192f931551bSRalph Campbell 	unsigned long flags;
1193f931551bSRalph Campbell 
1194f931551bSRalph Campbell 	if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
1195f931551bSRalph Campbell 		/*
1196f931551bSRalph Campbell 		 * If we are told to disable, note that so link-recovery
1197f931551bSRalph Campbell 		 * code does not attempt to bring us back up.
1198f931551bSRalph Campbell 		 */
1199f931551bSRalph Campbell 		spin_lock_irqsave(&ppd->lflags_lock, flags);
1200f931551bSRalph Campbell 		ppd->lflags |= QIBL_IB_LINK_DISABLED;
1201f931551bSRalph Campbell 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
1202f931551bSRalph Campbell 	} else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
1203f931551bSRalph Campbell 		/*
1204f931551bSRalph Campbell 		 * Any other linkinitcmd will lead to LINKDOWN and then
1205f931551bSRalph Campbell 		 * to INIT (if all is well), so clear flag to let
1206f931551bSRalph Campbell 		 * link-recovery code attempt to bring us back up.
1207f931551bSRalph Campbell 		 */
1208f931551bSRalph Campbell 		spin_lock_irqsave(&ppd->lflags_lock, flags);
1209f931551bSRalph Campbell 		ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
1210f931551bSRalph Campbell 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
1211f931551bSRalph Campbell 	}
1212f931551bSRalph Campbell 
1213f931551bSRalph Campbell 	mod_wd = (linkcmd << QLOGIC_IB_IBCC_LINKCMD_SHIFT) |
1214f931551bSRalph Campbell 		(linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
1215f931551bSRalph Campbell 
1216f931551bSRalph Campbell 	qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl | mod_wd);
1217f931551bSRalph Campbell 	/* write to chip to prevent back-to-back writes of control reg */
1218f931551bSRalph Campbell 	qib_write_kreg(dd, kr_scratch, 0);
1219f931551bSRalph Campbell }
1220f931551bSRalph Campbell 
1221f931551bSRalph Campbell /**
1222f931551bSRalph Campbell  * qib_6120_bringup_serdes - bring up the serdes
1223f931551bSRalph Campbell  * @ppd: the qlogic_ib device
1224f931551bSRalph Campbell  */
qib_6120_bringup_serdes(struct qib_pportdata * ppd)1225f931551bSRalph Campbell static int qib_6120_bringup_serdes(struct qib_pportdata *ppd)
122671f964c3SLee Jones {
1227f931551bSRalph Campbell 	struct qib_devdata *dd = ppd->dd;
1228f931551bSRalph Campbell 	u64 val, config1, prev_val, hwstat, ibc;
1229f931551bSRalph Campbell 
1230f931551bSRalph Campbell 	/* Put IBC in reset, sends disabled */
1231f931551bSRalph Campbell 	dd->control &= ~QLOGIC_IB_C_LINKENABLE;
1232f931551bSRalph Campbell 	qib_write_kreg(dd, kr_control, 0ULL);
1233f931551bSRalph Campbell 
1234f931551bSRalph Campbell 	dd->cspec->ibdeltainprog = 1;
1235f931551bSRalph Campbell 	dd->cspec->ibsymsnap = read_6120_creg32(dd, cr_ibsymbolerr);
1236f931551bSRalph Campbell 	dd->cspec->iblnkerrsnap = read_6120_creg32(dd, cr_iblinkerrrecov);
1237f931551bSRalph Campbell 
1238f931551bSRalph Campbell 	/* flowcontrolwatermark is in units of KBytes */
1239f931551bSRalph Campbell 	ibc = 0x5ULL << SYM_LSB(IBCCtrl, FlowCtrlWaterMark);
1240f931551bSRalph Campbell 	/*
1241f931551bSRalph Campbell 	 * How often flowctrl sent.  More or less in usecs; balance against
1242f931551bSRalph Campbell 	 * watermark value, so that in theory senders always get a flow
1243f931551bSRalph Campbell 	 * control update in time to not let the IB link go idle.
1244f931551bSRalph Campbell 	 */
1245f931551bSRalph Campbell 	ibc |= 0x3ULL << SYM_LSB(IBCCtrl, FlowCtrlPeriod);
1246f931551bSRalph Campbell 	/* max error tolerance */
1247f931551bSRalph Campbell 	dd->cspec->lli_thresh = 0xf;
1248f931551bSRalph Campbell 	ibc |= (u64) dd->cspec->lli_thresh << SYM_LSB(IBCCtrl, PhyerrThreshold);
1249f931551bSRalph Campbell 	/* use "real" buffer space for */
1250f931551bSRalph Campbell 	ibc |= 4ULL << SYM_LSB(IBCCtrl, CreditScale);
1251f931551bSRalph Campbell 	/* IB credit flow control. */
1252f931551bSRalph Campbell 	ibc |= 0xfULL << SYM_LSB(IBCCtrl, OverrunThreshold);
1253f931551bSRalph Campbell 	/*
1254f931551bSRalph Campbell 	 * set initial max size pkt IBC will send, including ICRC; it's the
1255f931551bSRalph Campbell 	 * PIO buffer size in dwords, less 1; also see qib_set_mtu()
1256f931551bSRalph Campbell 	 */
1257f931551bSRalph Campbell 	ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << SYM_LSB(IBCCtrl, MaxPktLen);
1258f931551bSRalph Campbell 	dd->cspec->ibcctrl = ibc; /* without linkcmd or linkinitcmd! */
1259f931551bSRalph Campbell 
1260f931551bSRalph Campbell 	/* initially come up waiting for TS1, without sending anything. */
1261f931551bSRalph Campbell 	val = dd->cspec->ibcctrl | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
1262f931551bSRalph Campbell 		QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
1263f931551bSRalph Campbell 	qib_write_kreg(dd, kr_ibcctrl, val);
1264f931551bSRalph Campbell 
1265f931551bSRalph Campbell 	val = qib_read_kreg64(dd, kr_serdes_cfg0);
1266f931551bSRalph Campbell 	config1 = qib_read_kreg64(dd, kr_serdes_cfg1);
1267f931551bSRalph Campbell 
1268f931551bSRalph Campbell 	/*
1269f931551bSRalph Campbell 	 * Force reset on, also set rxdetect enable.  Must do before reading
1270f931551bSRalph Campbell 	 * serdesstatus at least for simulation, or some of the bits in
1271f931551bSRalph Campbell 	 * serdes status will come back as undefined and cause simulation
1272f931551bSRalph Campbell 	 * failures
1273f931551bSRalph Campbell 	 */
1274f931551bSRalph Campbell 	val |= SYM_MASK(SerdesCfg0, ResetPLL) |
1275f931551bSRalph Campbell 		SYM_MASK(SerdesCfg0, RxDetEnX) |
1276f931551bSRalph Campbell 		(SYM_MASK(SerdesCfg0, L1PwrDnA) |
1277f931551bSRalph Campbell 		 SYM_MASK(SerdesCfg0, L1PwrDnB) |
1278f931551bSRalph Campbell 		 SYM_MASK(SerdesCfg0, L1PwrDnC) |
1279f931551bSRalph Campbell 		 SYM_MASK(SerdesCfg0, L1PwrDnD));
1280f931551bSRalph Campbell 	qib_write_kreg(dd, kr_serdes_cfg0, val);
1281f931551bSRalph Campbell 	/* be sure chip saw it */
1282f931551bSRalph Campbell 	qib_read_kreg64(dd, kr_scratch);
1283f931551bSRalph Campbell 	udelay(5);              /* need pll reset set at least for a bit */
1284f931551bSRalph Campbell 	/*
1285f931551bSRalph Campbell 	 * after PLL is reset, set the per-lane Resets and TxIdle and
1286f931551bSRalph Campbell 	 * clear the PLL reset and rxdetect (to get falling edge).
1287f931551bSRalph Campbell 	 * Leave L1PWR bits set (permanently)
1288f931551bSRalph Campbell 	 */
1289f931551bSRalph Campbell 	val &= ~(SYM_MASK(SerdesCfg0, RxDetEnX) |
1290f931551bSRalph Campbell 		 SYM_MASK(SerdesCfg0, ResetPLL) |
1291f931551bSRalph Campbell 		 (SYM_MASK(SerdesCfg0, L1PwrDnA) |
1292f931551bSRalph Campbell 		  SYM_MASK(SerdesCfg0, L1PwrDnB) |
1293f931551bSRalph Campbell 		  SYM_MASK(SerdesCfg0, L1PwrDnC) |
1294f931551bSRalph Campbell 		  SYM_MASK(SerdesCfg0, L1PwrDnD)));
1295f931551bSRalph Campbell 	val |= (SYM_MASK(SerdesCfg0, ResetA) |
1296f931551bSRalph Campbell 		SYM_MASK(SerdesCfg0, ResetB) |
1297f931551bSRalph Campbell 		SYM_MASK(SerdesCfg0, ResetC) |
1298f931551bSRalph Campbell 		SYM_MASK(SerdesCfg0, ResetD)) |
1299f931551bSRalph Campbell 		SYM_MASK(SerdesCfg0, TxIdeEnX);
1300f931551bSRalph Campbell 	qib_write_kreg(dd, kr_serdes_cfg0, val);
1301f931551bSRalph Campbell 	/* be sure chip saw it */
1302f931551bSRalph Campbell 	(void) qib_read_kreg64(dd, kr_scratch);
1303f931551bSRalph Campbell 	/* need PLL reset clear for at least 11 usec before lane
1304f931551bSRalph Campbell 	 * resets cleared; give it a few more to be sure */
1305f931551bSRalph Campbell 	udelay(15);
1306f931551bSRalph Campbell 	val &= ~((SYM_MASK(SerdesCfg0, ResetA) |
1307f931551bSRalph Campbell 		  SYM_MASK(SerdesCfg0, ResetB) |
1308f931551bSRalph Campbell 		  SYM_MASK(SerdesCfg0, ResetC) |
1309f931551bSRalph Campbell 		  SYM_MASK(SerdesCfg0, ResetD)) |
1310f931551bSRalph Campbell 		 SYM_MASK(SerdesCfg0, TxIdeEnX));
1311f931551bSRalph Campbell 
1312f931551bSRalph Campbell 	qib_write_kreg(dd, kr_serdes_cfg0, val);
1313f931551bSRalph Campbell 	/* be sure chip saw it */
1314f931551bSRalph Campbell 	(void) qib_read_kreg64(dd, kr_scratch);
1315f931551bSRalph Campbell 
1316f931551bSRalph Campbell 	val = qib_read_kreg64(dd, kr_xgxs_cfg);
1317f931551bSRalph Campbell 	prev_val = val;
1318f931551bSRalph Campbell 	if (val & QLOGIC_IB_XGXS_RESET)
1319f931551bSRalph Campbell 		val &= ~QLOGIC_IB_XGXS_RESET;
1320f931551bSRalph Campbell 	if (SYM_FIELD(val, XGXSCfg, polarity_inv) != ppd->rx_pol_inv) {
1321f931551bSRalph Campbell 		/* need to compensate for Tx inversion in partner */
1322f931551bSRalph Campbell 		val &= ~SYM_MASK(XGXSCfg, polarity_inv);
1323f931551bSRalph Campbell 		val |= (u64)ppd->rx_pol_inv << SYM_LSB(XGXSCfg, polarity_inv);
1324f931551bSRalph Campbell 	}
1325f931551bSRalph Campbell 	if (val != prev_val)
1326f931551bSRalph Campbell 		qib_write_kreg(dd, kr_xgxs_cfg, val);
1327f931551bSRalph Campbell 
1328f931551bSRalph Campbell 	val = qib_read_kreg64(dd, kr_serdes_cfg0);
1329f931551bSRalph Campbell 
1330f931551bSRalph Campbell 	/* clear current and de-emphasis bits */
1331f931551bSRalph Campbell 	config1 &= ~0x0ffffffff00ULL;
1332f931551bSRalph Campbell 	/* set current to 20ma */
1333f931551bSRalph Campbell 	config1 |= 0x00000000000ULL;
1334f931551bSRalph Campbell 	/* set de-emphasis to -5.68dB */
1335f931551bSRalph Campbell 	config1 |= 0x0cccc000000ULL;
1336f931551bSRalph Campbell 	qib_write_kreg(dd, kr_serdes_cfg1, config1);
1337f931551bSRalph Campbell 
1338f931551bSRalph Campbell 	/* base and port guid same for single port */
1339f931551bSRalph Campbell 	ppd->guid = dd->base_guid;
1340f931551bSRalph Campbell 
1341f931551bSRalph Campbell 	/*
1342f931551bSRalph Campbell 	 * the process of setting and un-resetting the serdes normally
1343f931551bSRalph Campbell 	 * causes a serdes PLL error, so check for that and clear it
1344f931551bSRalph Campbell 	 * here.  Also clearr hwerr bit in errstatus, but not others.
1345f931551bSRalph Campbell 	 */
1346f931551bSRalph Campbell 	hwstat = qib_read_kreg64(dd, kr_hwerrstatus);
1347f931551bSRalph Campbell 	if (hwstat) {
1348f931551bSRalph Campbell 		/* should just have PLL, clear all set, in an case */
1349f931551bSRalph Campbell 		qib_write_kreg(dd, kr_hwerrclear, hwstat);
1350f931551bSRalph Campbell 		qib_write_kreg(dd, kr_errclear, ERR_MASK(HardwareErr));
1351f931551bSRalph Campbell 	}
1352f931551bSRalph Campbell 
1353f931551bSRalph Campbell 	dd->control |= QLOGIC_IB_C_LINKENABLE;
1354f931551bSRalph Campbell 	dd->control &= ~QLOGIC_IB_C_FREEZEMODE;
1355f931551bSRalph Campbell 	qib_write_kreg(dd, kr_control, dd->control);
1356f931551bSRalph Campbell 
1357f931551bSRalph Campbell 	return 0;
1358f931551bSRalph Campbell }
1359f931551bSRalph Campbell 
1360f931551bSRalph Campbell /**
1361f931551bSRalph Campbell  * qib_6120_quiet_serdes - set serdes to txidle
1362f931551bSRalph Campbell  * @ppd: physical port of the qlogic_ib device
1363f931551bSRalph Campbell  * Called when driver is being unloaded
1364f931551bSRalph Campbell  */
qib_6120_quiet_serdes(struct qib_pportdata * ppd)1365f931551bSRalph Campbell static void qib_6120_quiet_serdes(struct qib_pportdata *ppd)
1366f931551bSRalph Campbell {
1367f931551bSRalph Campbell 	struct qib_devdata *dd = ppd->dd;
1368f931551bSRalph Campbell 	u64 val;
1369f931551bSRalph Campbell 
1370f931551bSRalph Campbell 	qib_set_ib_6120_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1371f931551bSRalph Campbell 
1372f931551bSRalph Campbell 	/* disable IBC */
1373f931551bSRalph Campbell 	dd->control &= ~QLOGIC_IB_C_LINKENABLE;
1374f931551bSRalph Campbell 	qib_write_kreg(dd, kr_control,
1375f931551bSRalph Campbell 		       dd->control | QLOGIC_IB_C_FREEZEMODE);
1376f931551bSRalph Campbell 
1377f931551bSRalph Campbell 	if (dd->cspec->ibsymdelta || dd->cspec->iblnkerrdelta ||
1378f931551bSRalph Campbell 	    dd->cspec->ibdeltainprog) {
1379f931551bSRalph Campbell 		u64 diagc;
1380f931551bSRalph Campbell 
1381f931551bSRalph Campbell 		/* enable counter writes */
1382f931551bSRalph Campbell 		diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
1383f931551bSRalph Campbell 		qib_write_kreg(dd, kr_hwdiagctrl,
1384f931551bSRalph Campbell 			       diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
1385f931551bSRalph Campbell 
1386f931551bSRalph Campbell 		if (dd->cspec->ibsymdelta || dd->cspec->ibdeltainprog) {
1387f931551bSRalph Campbell 			val = read_6120_creg32(dd, cr_ibsymbolerr);
1388f931551bSRalph Campbell 			if (dd->cspec->ibdeltainprog)
1389f931551bSRalph Campbell 				val -= val - dd->cspec->ibsymsnap;
1390f931551bSRalph Campbell 			val -= dd->cspec->ibsymdelta;
1391f931551bSRalph Campbell 			write_6120_creg(dd, cr_ibsymbolerr, val);
1392f931551bSRalph Campbell 		}
1393f931551bSRalph Campbell 		if (dd->cspec->iblnkerrdelta || dd->cspec->ibdeltainprog) {
1394f931551bSRalph Campbell 			val = read_6120_creg32(dd, cr_iblinkerrrecov);
1395f931551bSRalph Campbell 			if (dd->cspec->ibdeltainprog)
1396f931551bSRalph Campbell 				val -= val - dd->cspec->iblnkerrsnap;
1397f931551bSRalph Campbell 			val -= dd->cspec->iblnkerrdelta;
1398f931551bSRalph Campbell 			write_6120_creg(dd, cr_iblinkerrrecov, val);
1399f931551bSRalph Campbell 		}
1400f931551bSRalph Campbell 
1401f931551bSRalph Campbell 		/* and disable counter writes */
1402f931551bSRalph Campbell 		qib_write_kreg(dd, kr_hwdiagctrl, diagc);
1403f931551bSRalph Campbell 	}
1404f931551bSRalph Campbell 
1405f931551bSRalph Campbell 	val = qib_read_kreg64(dd, kr_serdes_cfg0);
1406f931551bSRalph Campbell 	val |= SYM_MASK(SerdesCfg0, TxIdeEnX);
1407f931551bSRalph Campbell 	qib_write_kreg(dd, kr_serdes_cfg0, val);
1408f931551bSRalph Campbell }
1409f931551bSRalph Campbell 
1410f931551bSRalph Campbell /**
1411f931551bSRalph Campbell  * qib_6120_setup_setextled - set the state of the two external LEDs
1412f931551bSRalph Campbell  * @ppd: the qlogic_ib device
1413f931551bSRalph Campbell  * @on: whether the link is up or not
1414f931551bSRalph Campbell  *
141571f964c3SLee Jones  * The exact combo of LEDs if on is true is determined by looking
1416f931551bSRalph Campbell  * at the ibcstatus.
1417f931551bSRalph Campbell  * These LEDs indicate the physical and logical state of IB link.
1418f931551bSRalph Campbell  * For this chip (at least with recommended board pinouts), LED1
1419f931551bSRalph Campbell  * is Yellow (logical state) and LED2 is Green (physical state),
1420f931551bSRalph Campbell  *
1421f931551bSRalph Campbell  * Note:  We try to match the Mellanox HCA LED behavior as best
1422f931551bSRalph Campbell  * we can.  Green indicates physical link state is OK (something is
1423f931551bSRalph Campbell  * plugged in, and we can train).
1424f931551bSRalph Campbell  * Amber indicates the link is logically up (ACTIVE).
1425f931551bSRalph Campbell  * Mellanox further blinks the amber LED to indicate data packet
1426f931551bSRalph Campbell  * activity, but we have no hardware support for that, so it would
1427f931551bSRalph Campbell  * require waking up every 10-20 msecs and checking the counters
1428f931551bSRalph Campbell  * on the chip, and then turning the LED off if appropriate.  That's
1429f931551bSRalph Campbell  * visible overhead, so not something we will do.
1430f931551bSRalph Campbell  *
1431f931551bSRalph Campbell  */
qib_6120_setup_setextled(struct qib_pportdata * ppd,u32 on)1432f931551bSRalph Campbell static void qib_6120_setup_setextled(struct qib_pportdata *ppd, u32 on)
1433f931551bSRalph Campbell {
1434f931551bSRalph Campbell 	u64 extctl, val, lst, ltst;
1435f931551bSRalph Campbell 	unsigned long flags;
1436f931551bSRalph Campbell 	struct qib_devdata *dd = ppd->dd;
1437f931551bSRalph Campbell 
1438f931551bSRalph Campbell 	/*
1439f931551bSRalph Campbell 	 * The diags use the LED to indicate diag info, so we leave
1440f931551bSRalph Campbell 	 * the external LED alone when the diags are running.
1441f931551bSRalph Campbell 	 */
1442f931551bSRalph Campbell 	if (dd->diag_client)
1443f931551bSRalph Campbell 		return;
1444f931551bSRalph Campbell 
1445f931551bSRalph Campbell 	/* Allow override of LED display for, e.g. Locating system in rack */
1446f931551bSRalph Campbell 	if (ppd->led_override) {
1447f931551bSRalph Campbell 		ltst = (ppd->led_override & QIB_LED_PHYS) ?
1448f931551bSRalph Campbell 			IB_PHYSPORTSTATE_LINKUP : IB_PHYSPORTSTATE_DISABLED,
1449f931551bSRalph Campbell 		lst = (ppd->led_override & QIB_LED_LOG) ?
1450f931551bSRalph Campbell 			IB_PORT_ACTIVE : IB_PORT_DOWN;
1451f931551bSRalph Campbell 	} else if (on) {
1452f931551bSRalph Campbell 		val = qib_read_kreg64(dd, kr_ibcstatus);
1453f931551bSRalph Campbell 		ltst = qib_6120_phys_portstate(val);
1454f931551bSRalph Campbell 		lst = qib_6120_iblink_state(val);
1455f931551bSRalph Campbell 	} else {
1456f931551bSRalph Campbell 		ltst = 0;
1457f931551bSRalph Campbell 		lst = 0;
1458f931551bSRalph Campbell 	}
1459f931551bSRalph Campbell 
1460f931551bSRalph Campbell 	spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
1461f931551bSRalph Campbell 	extctl = dd->cspec->extctrl & ~(SYM_MASK(EXTCtrl, LEDPriPortGreenOn) |
1462f931551bSRalph Campbell 				 SYM_MASK(EXTCtrl, LEDPriPortYellowOn));
1463f931551bSRalph Campbell 
1464f931551bSRalph Campbell 	if (ltst == IB_PHYSPORTSTATE_LINKUP)
1465f931551bSRalph Campbell 		extctl |= SYM_MASK(EXTCtrl, LEDPriPortYellowOn);
1466f931551bSRalph Campbell 	if (lst == IB_PORT_ACTIVE)
1467f931551bSRalph Campbell 		extctl |= SYM_MASK(EXTCtrl, LEDPriPortGreenOn);
1468f931551bSRalph Campbell 	dd->cspec->extctrl = extctl;
1469f931551bSRalph Campbell 	qib_write_kreg(dd, kr_extctrl, extctl);
1470f931551bSRalph Campbell 	spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
1471f931551bSRalph Campbell }
1472f931551bSRalph Campbell 
1473f931551bSRalph Campbell /**
1474f931551bSRalph Campbell  * qib_6120_setup_cleanup - clean up any per-chip chip-specific stuff
1475f931551bSRalph Campbell  * @dd: the qlogic_ib device
1476f931551bSRalph Campbell  *
1477f931551bSRalph Campbell  * This is called during driver unload.
1478f931551bSRalph Campbell */
qib_6120_setup_cleanup(struct qib_devdata * dd)1479f931551bSRalph Campbell static void qib_6120_setup_cleanup(struct qib_devdata *dd)
1480f931551bSRalph Campbell {
1481f931551bSRalph Campbell 	qib_free_irq(dd);
1482f931551bSRalph Campbell 	kfree(dd->cspec->cntrs);
1483f931551bSRalph Campbell 	kfree(dd->cspec->portcntrs);
1484c4bc6156SMichael J. Ruhl 	if (dd->cspec->dummy_hdrq) {
1485f931551bSRalph Campbell 		dma_free_coherent(&dd->pcidev->dev,
1486f931551bSRalph Campbell 				  ALIGN(dd->rcvhdrcnt *
1487f931551bSRalph Campbell 					dd->rcvhdrentsize *
1488f931551bSRalph Campbell 					sizeof(u32), PAGE_SIZE),
1489f931551bSRalph Campbell 				  dd->cspec->dummy_hdrq,
1490f931551bSRalph Campbell 				  dd->cspec->dummy_hdrq_phys);
1491f931551bSRalph Campbell 		dd->cspec->dummy_hdrq = NULL;
1492f931551bSRalph Campbell 	}
1493f931551bSRalph Campbell }
1494f931551bSRalph Campbell 
qib_wantpiobuf_6120_intr(struct qib_devdata * dd,u32 needint)1495f931551bSRalph Campbell static void qib_wantpiobuf_6120_intr(struct qib_devdata *dd, u32 needint)
1496f931551bSRalph Campbell {
1497f931551bSRalph Campbell 	unsigned long flags;
1498f931551bSRalph Campbell 
1499f931551bSRalph Campbell 	spin_lock_irqsave(&dd->sendctrl_lock, flags);
1500f931551bSRalph Campbell 	if (needint)
1501f931551bSRalph Campbell 		dd->sendctrl |= SYM_MASK(SendCtrl, PIOIntBufAvail);
1502f931551bSRalph Campbell 	else
1503f931551bSRalph Campbell 		dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOIntBufAvail);
1504f931551bSRalph Campbell 	qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
1505f931551bSRalph Campbell 	qib_write_kreg(dd, kr_scratch, 0ULL);
1506f931551bSRalph Campbell 	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
1507f931551bSRalph Campbell }
1508f931551bSRalph Campbell 
1509f931551bSRalph Campbell /*
1510f931551bSRalph Campbell  * handle errors and unusual events first, separate function
1511f931551bSRalph Campbell  * to improve cache hits for fast path interrupt handling
1512f931551bSRalph Campbell  */
unlikely_6120_intr(struct qib_devdata * dd,u64 istat)1513f931551bSRalph Campbell static noinline void unlikely_6120_intr(struct qib_devdata *dd, u64 istat)
1514f931551bSRalph Campbell {
1515f931551bSRalph Campbell 	if (unlikely(istat & ~QLOGIC_IB_I_BITSEXTANT))
1516f931551bSRalph Campbell 		qib_dev_err(dd, "interrupt with unknown interrupts %Lx set\n",
1517f931551bSRalph Campbell 			    istat & ~QLOGIC_IB_I_BITSEXTANT);
1518f931551bSRalph Campbell 
1519f931551bSRalph Campbell 	if (istat & QLOGIC_IB_I_ERROR) {
1520f931551bSRalph Campbell 		u64 estat = 0;
1521f931551bSRalph Campbell 
1522f931551bSRalph Campbell 		qib_stats.sps_errints++;
1523f931551bSRalph Campbell 		estat = qib_read_kreg64(dd, kr_errstatus);
1524f931551bSRalph Campbell 		if (!estat)
1525f931551bSRalph Campbell 			qib_devinfo(dd->pcidev,
1526f931551bSRalph Campbell 				"error interrupt (%Lx), but no error bits set!\n",
1527f931551bSRalph Campbell 				istat);
15287fac3301SMike Marciniszyn 		handle_6120_errors(dd, estat);
15297fac3301SMike Marciniszyn 	}
15307fac3301SMike Marciniszyn 
1531f931551bSRalph Campbell 	if (istat & QLOGIC_IB_I_GPIO) {
1532f931551bSRalph Campbell 		u32 gpiostatus;
1533f931551bSRalph Campbell 		u32 to_clear = 0;
1534f931551bSRalph Campbell 
1535f931551bSRalph Campbell 		/*
1536f931551bSRalph Campbell 		 * GPIO_3..5 on IBA6120 Rev2 chips indicate
1537f931551bSRalph Campbell 		 * errors that we need to count.
1538f931551bSRalph Campbell 		 */
1539f931551bSRalph Campbell 		gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
1540f931551bSRalph Campbell 		/* First the error-counter case. */
1541f931551bSRalph Campbell 		if (gpiostatus & GPIO_ERRINTR_MASK) {
1542f931551bSRalph Campbell 			/* want to clear the bits we see asserted. */
1543f931551bSRalph Campbell 			to_clear |= (gpiostatus & GPIO_ERRINTR_MASK);
1544f931551bSRalph Campbell 
1545f931551bSRalph Campbell 			/*
1546f931551bSRalph Campbell 			 * Count appropriately, clear bits out of our copy,
1547f931551bSRalph Campbell 			 * as they have been "handled".
1548f931551bSRalph Campbell 			 */
1549f931551bSRalph Campbell 			if (gpiostatus & (1 << GPIO_RXUVL_BIT))
1550f931551bSRalph Campbell 				dd->cspec->rxfc_unsupvl_errs++;
1551f931551bSRalph Campbell 			if (gpiostatus & (1 << GPIO_OVRUN_BIT))
1552f931551bSRalph Campbell 				dd->cspec->overrun_thresh_errs++;
1553f931551bSRalph Campbell 			if (gpiostatus & (1 << GPIO_LLI_BIT))
1554f931551bSRalph Campbell 				dd->cspec->lli_errs++;
1555f931551bSRalph Campbell 			gpiostatus &= ~GPIO_ERRINTR_MASK;
1556f931551bSRalph Campbell 		}
1557f931551bSRalph Campbell 		if (gpiostatus) {
1558f931551bSRalph Campbell 			/*
1559f931551bSRalph Campbell 			 * Some unexpected bits remain. If they could have
1560f931551bSRalph Campbell 			 * caused the interrupt, complain and clear.
1561f931551bSRalph Campbell 			 * To avoid repetition of this condition, also clear
1562f931551bSRalph Campbell 			 * the mask. It is almost certainly due to error.
1563f931551bSRalph Campbell 			 */
1564f931551bSRalph Campbell 			const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
1565f931551bSRalph Campbell 
1566f931551bSRalph Campbell 			/*
1567f931551bSRalph Campbell 			 * Also check that the chip reflects our shadow,
1568f931551bSRalph Campbell 			 * and report issues, If they caused the interrupt.
1569f931551bSRalph Campbell 			 * we will suppress by refreshing from the shadow.
1570f931551bSRalph Campbell 			 */
1571f931551bSRalph Campbell 			if (mask & gpiostatus) {
1572f931551bSRalph Campbell 				to_clear |= (gpiostatus & mask);
1573f931551bSRalph Campbell 				dd->cspec->gpio_mask &= ~(gpiostatus & mask);
1574f931551bSRalph Campbell 				qib_write_kreg(dd, kr_gpio_mask,
1575f931551bSRalph Campbell 					       dd->cspec->gpio_mask);
1576f931551bSRalph Campbell 			}
1577f931551bSRalph Campbell 		}
1578f931551bSRalph Campbell 		if (to_clear)
1579f931551bSRalph Campbell 			qib_write_kreg(dd, kr_gpio_clear, (u64) to_clear);
1580f931551bSRalph Campbell 	}
1581f931551bSRalph Campbell }
1582f931551bSRalph Campbell 
qib_6120intr(int irq,void * data)1583f931551bSRalph Campbell static irqreturn_t qib_6120intr(int irq, void *data)
1584f931551bSRalph Campbell {
1585f931551bSRalph Campbell 	struct qib_devdata *dd = data;
1586f931551bSRalph Campbell 	irqreturn_t ret;
1587f931551bSRalph Campbell 	u32 istat, ctxtrbits, rmask, crcs = 0;
1588f931551bSRalph Campbell 	unsigned i;
1589f931551bSRalph Campbell 
1590f931551bSRalph Campbell 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
1591f931551bSRalph Campbell 		/*
1592f931551bSRalph Campbell 		 * This return value is not great, but we do not want the
1593f931551bSRalph Campbell 		 * interrupt core code to remove our interrupt handler
1594f931551bSRalph Campbell 		 * because we don't appear to be handling an interrupt
1595f931551bSRalph Campbell 		 * during a chip reset.
1596f931551bSRalph Campbell 		 */
1597f931551bSRalph Campbell 		ret = IRQ_HANDLED;
1598f931551bSRalph Campbell 		goto bail;
1599f931551bSRalph Campbell 	}
1600f931551bSRalph Campbell 
1601f931551bSRalph Campbell 	istat = qib_read_kreg32(dd, kr_intstatus);
1602f931551bSRalph Campbell 
1603f931551bSRalph Campbell 	if (unlikely(!istat)) {
1604f931551bSRalph Campbell 		ret = IRQ_NONE; /* not our interrupt, or already handled */
1605f931551bSRalph Campbell 		goto bail;
1606f931551bSRalph Campbell 	}
1607f931551bSRalph Campbell 	if (unlikely(istat == -1)) {
1608f931551bSRalph Campbell 		qib_bad_intrstatus(dd);
1609f931551bSRalph Campbell 		/* don't know if it was our interrupt or not */
1610f931551bSRalph Campbell 		ret = IRQ_NONE;
1611f931551bSRalph Campbell 		goto bail;
1612f931551bSRalph Campbell 	}
1613f931551bSRalph Campbell 
1614f931551bSRalph Campbell 	this_cpu_inc(*dd->int_counter);
1615f931551bSRalph Campbell 
1616f931551bSRalph Campbell 	if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT |
16171ed88dd7SMike Marciniszyn 			      QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR)))
1618f931551bSRalph Campbell 		unlikely_6120_intr(dd, istat);
1619f931551bSRalph Campbell 
1620f931551bSRalph Campbell 	/*
1621f931551bSRalph Campbell 	 * Clear the interrupt bits we found set, relatively early, so we
1622f931551bSRalph Campbell 	 * "know" know the chip will have seen this by the time we process
1623f931551bSRalph Campbell 	 * the queue, and will re-interrupt if necessary.  The processor
1624f931551bSRalph Campbell 	 * itself won't take the interrupt again until we return.
1625f931551bSRalph Campbell 	 */
1626f931551bSRalph Campbell 	qib_write_kreg(dd, kr_intclear, istat);
1627f931551bSRalph Campbell 
1628f931551bSRalph Campbell 	/*
1629f931551bSRalph Campbell 	 * Handle kernel receive queues before checking for pio buffers
1630f931551bSRalph Campbell 	 * available since receives can overflow; piobuf waiters can afford
1631f931551bSRalph Campbell 	 * a few extra cycles, since they were waiting anyway.
1632f931551bSRalph Campbell 	 */
1633f931551bSRalph Campbell 	ctxtrbits = istat &
1634f931551bSRalph Campbell 		((QLOGIC_IB_I_RCVAVAIL_MASK << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
1635f931551bSRalph Campbell 		 (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT));
1636f931551bSRalph Campbell 	if (ctxtrbits) {
1637f931551bSRalph Campbell 		rmask = (1U << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
1638f931551bSRalph Campbell 			(1U << QLOGIC_IB_I_RCVURG_SHIFT);
1639f931551bSRalph Campbell 		for (i = 0; i < dd->first_user_ctxt; i++) {
1640f931551bSRalph Campbell 			if (ctxtrbits & rmask) {
1641f931551bSRalph Campbell 				ctxtrbits &= ~rmask;
1642f931551bSRalph Campbell 				crcs += qib_kreceive(dd->rcd[i],
1643f931551bSRalph Campbell 						     &dd->cspec->lli_counter,
1644f931551bSRalph Campbell 						     NULL);
1645f931551bSRalph Campbell 			}
1646f931551bSRalph Campbell 			rmask <<= 1;
1647f931551bSRalph Campbell 		}
1648f931551bSRalph Campbell 		if (crcs) {
1649f931551bSRalph Campbell 			u32 cntr = dd->cspec->lli_counter;
1650f931551bSRalph Campbell 
1651f931551bSRalph Campbell 			cntr += crcs;
1652f931551bSRalph Campbell 			if (cntr) {
1653da12c1f6SMike Marciniszyn 				if (cntr > dd->cspec->lli_thresh) {
1654f931551bSRalph Campbell 					dd->cspec->lli_counter = 0;
1655f931551bSRalph Campbell 					dd->cspec->lli_errs++;
1656f931551bSRalph Campbell 				} else
1657f931551bSRalph Campbell 					dd->cspec->lli_counter += cntr;
1658f931551bSRalph Campbell 			}
1659f931551bSRalph Campbell 		}
1660f931551bSRalph Campbell 
1661f931551bSRalph Campbell 
1662f931551bSRalph Campbell 		if (ctxtrbits) {
1663f931551bSRalph Campbell 			ctxtrbits =
1664f931551bSRalph Campbell 				(ctxtrbits >> QLOGIC_IB_I_RCVAVAIL_SHIFT) |
1665f931551bSRalph Campbell 				(ctxtrbits >> QLOGIC_IB_I_RCVURG_SHIFT);
1666f931551bSRalph Campbell 			qib_handle_urcv(dd, ctxtrbits);
1667f931551bSRalph Campbell 		}
1668f931551bSRalph Campbell 	}
1669f931551bSRalph Campbell 
1670f931551bSRalph Campbell 	if ((istat & QLOGIC_IB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
1671f931551bSRalph Campbell 		qib_ib_piobufavail(dd);
1672f931551bSRalph Campbell 
1673f931551bSRalph Campbell 	ret = IRQ_HANDLED;
1674f931551bSRalph Campbell bail:
1675f931551bSRalph Campbell 	return ret;
1676f931551bSRalph Campbell }
1677f931551bSRalph Campbell 
1678f931551bSRalph Campbell /*
1679f931551bSRalph Campbell  * Set up our chip-specific interrupt handler
1680f931551bSRalph Campbell  * The interrupt type has already been setup, so
1681f931551bSRalph Campbell  * we just need to do the registration and error checking.
1682f931551bSRalph Campbell  */
qib_setup_6120_interrupt(struct qib_devdata * dd)1683f931551bSRalph Campbell static void qib_setup_6120_interrupt(struct qib_devdata *dd)
1684f931551bSRalph Campbell {
1685f931551bSRalph Campbell 	int ret;
1686f931551bSRalph Campbell 
1687f931551bSRalph Campbell 	/*
1688c4bc6156SMichael J. Ruhl 	 * If the chip supports added error indication via GPIO pins,
1689c4bc6156SMichael J. Ruhl 	 * enable interrupts on those bits so the interrupt routine
1690f931551bSRalph Campbell 	 * can count the events. Also set flag so interrupt routine
1691f931551bSRalph Campbell 	 * can know they are expected.
1692f931551bSRalph Campbell 	 */
1693f931551bSRalph Campbell 	if (SYM_FIELD(dd->revision, Revision_R,
1694f931551bSRalph Campbell 		      ChipRevMinor) > 1) {
1695f931551bSRalph Campbell 		/* Rev2+ reports extra errors via internal GPIO pins */
1696f931551bSRalph Campbell 		dd->cspec->gpio_mask |= GPIO_ERRINTR_MASK;
1697f931551bSRalph Campbell 		qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
1698f931551bSRalph Campbell 	}
1699f931551bSRalph Campbell 
1700f931551bSRalph Campbell 	ret = pci_request_irq(dd->pcidev, 0, qib_6120intr, NULL, dd,
1701f931551bSRalph Campbell 			      QIB_DRV_NAME);
1702f931551bSRalph Campbell 	if (ret)
1703c4bc6156SMichael J. Ruhl 		qib_dev_err(dd,
1704c4bc6156SMichael J. Ruhl 			    "Couldn't setup interrupt (irq=%d): %d\n",
1705f931551bSRalph Campbell 			    pci_irq_vector(dd->pcidev, 0), ret);
17067fac3301SMike Marciniszyn }
17077fac3301SMike Marciniszyn 
1708c4bc6156SMichael J. Ruhl /**
1709f931551bSRalph Campbell  * pe_boardname - fill in the board name
1710f931551bSRalph Campbell  * @dd: the qlogic_ib device
1711f931551bSRalph Campbell  *
1712f931551bSRalph Campbell  * info is based on the board revision register
1713f931551bSRalph Campbell  */
pe_boardname(struct qib_devdata * dd)1714f931551bSRalph Campbell static void pe_boardname(struct qib_devdata *dd)
1715f931551bSRalph Campbell {
1716f931551bSRalph Campbell 	u32 boardid;
1717f931551bSRalph Campbell 
1718f931551bSRalph Campbell 	boardid = SYM_FIELD(dd->revision, Revision,
17193b716933SKamenee Arumugam 			    BoardID);
1720f931551bSRalph Campbell 
1721f931551bSRalph Campbell 	switch (boardid) {
1722f931551bSRalph Campbell 	case 2:
1723f931551bSRalph Campbell 		dd->boardname = "InfiniPath_QLE7140";
1724f931551bSRalph Campbell 		break;
1725f931551bSRalph Campbell 	default:
17263b716933SKamenee Arumugam 		qib_dev_err(dd, "Unknown 6120 board with ID %u\n", boardid);
1727f931551bSRalph Campbell 		dd->boardname = "Unknown_InfiniPath_6120";
1728f931551bSRalph Campbell 		break;
1729f931551bSRalph Campbell 	}
17303b716933SKamenee Arumugam 
1731f931551bSRalph Campbell 	if (dd->majrev != 4 || !dd->minrev || dd->minrev > 2)
1732f931551bSRalph Campbell 		qib_dev_err(dd,
1733f931551bSRalph Campbell 			    "Unsupported InfiniPath hardware revision %u.%u!\n",
1734f931551bSRalph Campbell 			    dd->majrev, dd->minrev);
17357fac3301SMike Marciniszyn 
17367fac3301SMike Marciniszyn 	snprintf(dd->boardversion, sizeof(dd->boardversion),
17377fac3301SMike Marciniszyn 		 "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
1738f931551bSRalph Campbell 		 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
1739f931551bSRalph Campbell 		 (unsigned int)SYM_FIELD(dd->revision, Revision_R, Arch),
1740f931551bSRalph Campbell 		 dd->majrev, dd->minrev,
1741f931551bSRalph Campbell 		 (unsigned int)SYM_FIELD(dd->revision, Revision_R, SW));
17423b716933SKamenee Arumugam }
1743f931551bSRalph Campbell 
17443b716933SKamenee Arumugam /*
1745f931551bSRalph Campbell  * This routine sleeps, so it can only be called from user context, not
1746f931551bSRalph Campbell  * from interrupt context.  If we need interrupt context, we can split
1747f931551bSRalph Campbell  * it into two routines.
1748f931551bSRalph Campbell  */
qib_6120_setup_reset(struct qib_devdata * dd)1749f931551bSRalph Campbell static int qib_6120_setup_reset(struct qib_devdata *dd)
1750f931551bSRalph Campbell {
1751f931551bSRalph Campbell 	u64 val;
1752f931551bSRalph Campbell 	int i;
1753f931551bSRalph Campbell 	int ret;
1754f931551bSRalph Campbell 	u16 cmdval;
1755f931551bSRalph Campbell 	u8 int_line, clinesz;
1756f931551bSRalph Campbell 
1757f931551bSRalph Campbell 	qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
1758f931551bSRalph Campbell 
1759f931551bSRalph Campbell 	/* Use ERROR so it shows up in logs, etc. */
1760f931551bSRalph Campbell 	qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
1761f931551bSRalph Campbell 
1762f931551bSRalph Campbell 	/* no interrupts till re-initted */
1763f931551bSRalph Campbell 	qib_6120_set_intr_state(dd, 0);
1764f931551bSRalph Campbell 
1765f931551bSRalph Campbell 	dd->cspec->ibdeltainprog = 0;
1766f931551bSRalph Campbell 	dd->cspec->ibsymdelta = 0;
1767f931551bSRalph Campbell 	dd->cspec->iblnkerrdelta = 0;
1768f931551bSRalph Campbell 
1769f931551bSRalph Campbell 	/*
1770f931551bSRalph Campbell 	 * Keep chip from being accessed until we are ready.  Use
1771f931551bSRalph Campbell 	 * writeq() directly, to allow the write even though QIB_PRESENT
1772f931551bSRalph Campbell 	 * isn't set.
1773f931551bSRalph Campbell 	 */
1774f931551bSRalph Campbell 	dd->flags &= ~(QIB_INITTED | QIB_PRESENT);
1775e9c54999SLucas De Marchi 	/* so we check interrupts work again */
1776f931551bSRalph Campbell 	dd->z_int_counter = qib_int_counter(dd);
1777f931551bSRalph Campbell 	val = dd->control | QLOGIC_IB_C_RESET;
17781ed88dd7SMike Marciniszyn 	writeq(val, &dd->kregbase[kr_control]);
17791ed88dd7SMike Marciniszyn 	mb(); /* prevent compiler re-ordering around actual reset */
1780f931551bSRalph Campbell 
1781f931551bSRalph Campbell 	for (i = 1; i <= 5; i++) {
1782f931551bSRalph Campbell 		/*
1783f931551bSRalph Campbell 		 * Allow MBIST, etc. to complete; longer on each retry.
1784f931551bSRalph Campbell 		 * We sometimes get machine checks from bus timeout if no
1785f931551bSRalph Campbell 		 * response, so for now, make it *really* long.
1786f931551bSRalph Campbell 		 */
1787f931551bSRalph Campbell 		msleep(1000 + (1 + i) * 2000);
1788f931551bSRalph Campbell 
1789f931551bSRalph Campbell 		qib_pcie_reenable(dd, cmdval, int_line, clinesz);
1790f931551bSRalph Campbell 
1791f931551bSRalph Campbell 		/*
1792f931551bSRalph Campbell 		 * Use readq directly, so we don't need to mark it as PRESENT
1793f931551bSRalph Campbell 		 * until we get a successful indication that all is well.
1794f931551bSRalph Campbell 		 */
1795f931551bSRalph Campbell 		val = readq(&dd->kregbase[kr_revision]);
1796f931551bSRalph Campbell 		if (val == dd->revision) {
1797f931551bSRalph Campbell 			dd->flags |= QIB_PRESENT; /* it's back */
1798f931551bSRalph Campbell 			ret = qib_reinit_intr(dd);
1799f931551bSRalph Campbell 			goto bail;
1800f931551bSRalph Campbell 		}
1801f931551bSRalph Campbell 	}
1802f931551bSRalph Campbell 	ret = 0; /* failed */
1803f931551bSRalph Campbell 
1804f931551bSRalph Campbell bail:
1805f931551bSRalph Campbell 	if (ret) {
1806f931551bSRalph Campbell 		if (qib_pcie_params(dd, dd->lbus_width, NULL))
1807f931551bSRalph Campbell 			qib_dev_err(dd,
1808f931551bSRalph Campbell 				"Reset failed to setup PCIe or interrupts; continuing anyway\n");
1809581d01aaSMichael J. Ruhl 		/* clear the reset error, init error/hwerror mask */
18107fac3301SMike Marciniszyn 		qib_6120_init_hwerrors(dd);
18117fac3301SMike Marciniszyn 		/* for Rev2 error interrupts; nop for rev 1 */
1812f931551bSRalph Campbell 		qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
1813f931551bSRalph Campbell 		/* clear the reset error, init error/hwerror mask */
1814f931551bSRalph Campbell 		qib_6120_init_hwerrors(dd);
1815f931551bSRalph Campbell 	}
1816f931551bSRalph Campbell 	return ret;
1817f931551bSRalph Campbell }
1818f931551bSRalph Campbell 
1819f931551bSRalph Campbell /**
1820f931551bSRalph Campbell  * qib_6120_put_tid - write a TID in chip
1821f931551bSRalph Campbell  * @dd: the qlogic_ib device
1822f931551bSRalph Campbell  * @tidptr: pointer to the expected TID (in chip) to update
1823f931551bSRalph Campbell  * @type: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0)
1824f931551bSRalph Campbell  * for expected
1825f931551bSRalph Campbell  * @pa: physical address of in memory buffer; tidinvalid if freeing
182671f964c3SLee Jones  *
1827f931551bSRalph Campbell  * This exists as a separate routine to allow for special locking etc.
1828f931551bSRalph Campbell  * It's used for both the full cleanup on exit, as well as the normal
1829f931551bSRalph Campbell  * setup and teardown.
1830f931551bSRalph Campbell  */
qib_6120_put_tid(struct qib_devdata * dd,u64 __iomem * tidptr,u32 type,unsigned long pa)1831f931551bSRalph Campbell static void qib_6120_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
1832f931551bSRalph Campbell 			     u32 type, unsigned long pa)
1833f931551bSRalph Campbell {
1834f931551bSRalph Campbell 	u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
1835f931551bSRalph Campbell 	unsigned long flags;
1836f931551bSRalph Campbell 	int tidx;
1837f931551bSRalph Campbell 	spinlock_t *tidlockp; /* select appropriate spinlock */
1838f931551bSRalph Campbell 
1839f931551bSRalph Campbell 	if (!dd->kregbase)
1840f931551bSRalph Campbell 		return;
1841f931551bSRalph Campbell 
1842f931551bSRalph Campbell 	if (pa != dd->tidinvalid) {
1843f931551bSRalph Campbell 		if (pa & ((1U << 11) - 1)) {
1844f931551bSRalph Campbell 			qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
1845f931551bSRalph Campbell 				    pa);
1846f931551bSRalph Campbell 			return;
1847f931551bSRalph Campbell 		}
1848f931551bSRalph Campbell 		pa >>= 11;
1849f931551bSRalph Campbell 		if (pa & ~QLOGIC_IB_RT_ADDR_MASK) {
1850f931551bSRalph Campbell 			qib_dev_err(dd,
1851f931551bSRalph Campbell 				"Physical page address 0x%lx larger than supported\n",
1852f931551bSRalph Campbell 				pa);
18537fac3301SMike Marciniszyn 			return;
18547fac3301SMike Marciniszyn 		}
18557fac3301SMike Marciniszyn 
1856f931551bSRalph Campbell 		if (type == RCVHQ_RCV_TYPE_EAGER)
1857f931551bSRalph Campbell 			pa |= dd->tidtemplate;
1858f931551bSRalph Campbell 		else /* for now, always full 4KB page */
1859f931551bSRalph Campbell 			pa |= 2 << 29;
1860f931551bSRalph Campbell 	}
1861f931551bSRalph Campbell 
1862f931551bSRalph Campbell 	/*
1863f931551bSRalph Campbell 	 * Avoid chip issue by writing the scratch register
1864f931551bSRalph Campbell 	 * before and after the TID, and with an io write barrier.
1865f931551bSRalph Campbell 	 * We use a spinlock around the writes, so they can't intermix
1866f931551bSRalph Campbell 	 * with other TID (eager or expected) writes (the chip problem
1867f931551bSRalph Campbell 	 * is triggered by back to back TID writes). Unfortunately, this
1868f931551bSRalph Campbell 	 * call can be done from interrupt level for the ctxt 0 eager TIDs,
1869f931551bSRalph Campbell 	 * so we have to use irqsave locks.
1870f931551bSRalph Campbell 	 */
1871f931551bSRalph Campbell 	/*
1872f931551bSRalph Campbell 	 * Assumes tidptr always > egrtidbase
1873f931551bSRalph Campbell 	 * if type == RCVHQ_RCV_TYPE_EAGER.
1874f931551bSRalph Campbell 	 */
1875f931551bSRalph Campbell 	tidx = tidptr - dd->egrtidbase;
1876f931551bSRalph Campbell 
1877f931551bSRalph Campbell 	tidlockp = (type == RCVHQ_RCV_TYPE_EAGER && tidx < dd->rcvhdrcnt)
1878f931551bSRalph Campbell 		? &dd->cspec->kernel_tid_lock : &dd->cspec->user_tid_lock;
1879f931551bSRalph Campbell 	spin_lock_irqsave(tidlockp, flags);
1880f931551bSRalph Campbell 	qib_write_kreg(dd, kr_scratch, 0xfeeddeaf);
1881f931551bSRalph Campbell 	writel(pa, tidp32);
1882f931551bSRalph Campbell 	qib_write_kreg(dd, kr_scratch, 0xdeadbeef);
1883f931551bSRalph Campbell 	spin_unlock_irqrestore(tidlockp, flags);
1884f931551bSRalph Campbell }
1885f931551bSRalph Campbell 
1886f931551bSRalph Campbell /**
1887f931551bSRalph Campbell  * qib_6120_put_tid_2 - write a TID in chip, Revision 2 or higher
1888f931551bSRalph Campbell  * @dd: the qlogic_ib device
1889f931551bSRalph Campbell  * @tidptr: pointer to the expected TID (in chip) to update
1890f931551bSRalph Campbell  * @type: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0)
1891f931551bSRalph Campbell  * for expected
1892f931551bSRalph Campbell  * @pa: physical address of in memory buffer; tidinvalid if freeing
189371f964c3SLee Jones  *
1894f931551bSRalph Campbell  * This exists as a separate routine to allow for selection of the
1895f931551bSRalph Campbell  * appropriate "flavor". The static calls in cleanup just use the
1896f931551bSRalph Campbell  * revision-agnostic form, as they are not performance critical.
1897f931551bSRalph Campbell  */
qib_6120_put_tid_2(struct qib_devdata * dd,u64 __iomem * tidptr,u32 type,unsigned long pa)1898f931551bSRalph Campbell static void qib_6120_put_tid_2(struct qib_devdata *dd, u64 __iomem *tidptr,
1899f931551bSRalph Campbell 			       u32 type, unsigned long pa)
1900f931551bSRalph Campbell {
1901f931551bSRalph Campbell 	u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
1902f931551bSRalph Campbell 
1903f931551bSRalph Campbell 	if (!dd->kregbase)
1904f931551bSRalph Campbell 		return;
1905f931551bSRalph Campbell 
1906f931551bSRalph Campbell 	if (pa != dd->tidinvalid) {
1907f931551bSRalph Campbell 		if (pa & ((1U << 11) - 1)) {
1908f931551bSRalph Campbell 			qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
1909f931551bSRalph Campbell 				    pa);
1910f931551bSRalph Campbell 			return;
1911f931551bSRalph Campbell 		}
1912f931551bSRalph Campbell 		pa >>= 11;
1913f931551bSRalph Campbell 		if (pa & ~QLOGIC_IB_RT_ADDR_MASK) {
1914f931551bSRalph Campbell 			qib_dev_err(dd,
1915f931551bSRalph Campbell 				"Physical page address 0x%lx larger than supported\n",
1916f931551bSRalph Campbell 				pa);
19177fac3301SMike Marciniszyn 			return;
19187fac3301SMike Marciniszyn 		}
19197fac3301SMike Marciniszyn 
1920f931551bSRalph Campbell 		if (type == RCVHQ_RCV_TYPE_EAGER)
1921f931551bSRalph Campbell 			pa |= dd->tidtemplate;
1922f931551bSRalph Campbell 		else /* for now, always full 4KB page */
1923f931551bSRalph Campbell 			pa |= 2 << 29;
1924f931551bSRalph Campbell 	}
1925f931551bSRalph Campbell 	writel(pa, tidp32);
1926f931551bSRalph Campbell }
1927f931551bSRalph Campbell 
1928f931551bSRalph Campbell 
1929f931551bSRalph Campbell /**
1930f931551bSRalph Campbell  * qib_6120_clear_tids - clear all TID entries for a context, expected and eager
1931f931551bSRalph Campbell  * @dd: the qlogic_ib device
1932f931551bSRalph Campbell  * @rcd: the context
1933f931551bSRalph Campbell  *
1934f931551bSRalph Campbell  * clear all TID entries for a context, expected and eager.
193571f964c3SLee Jones  * Used from qib_close().  On this chip, TIDs are only 32 bits,
1936f931551bSRalph Campbell  * not 64, but they are still on 64 bit boundaries, so tidbase
1937f931551bSRalph Campbell  * is declared as u64 * for the pointer math, even though we write 32 bits
1938f931551bSRalph Campbell  */
qib_6120_clear_tids(struct qib_devdata * dd,struct qib_ctxtdata * rcd)1939f931551bSRalph Campbell static void qib_6120_clear_tids(struct qib_devdata *dd,
1940f931551bSRalph Campbell 				struct qib_ctxtdata *rcd)
1941f931551bSRalph Campbell {
1942f931551bSRalph Campbell 	u64 __iomem *tidbase;
1943f931551bSRalph Campbell 	unsigned long tidinv;
1944f931551bSRalph Campbell 	u32 ctxt;
1945f931551bSRalph Campbell 	int i;
1946f931551bSRalph Campbell 
1947f931551bSRalph Campbell 	if (!dd->kregbase || !rcd)
1948f931551bSRalph Campbell 		return;
1949f931551bSRalph Campbell 
1950f931551bSRalph Campbell 	ctxt = rcd->ctxt;
1951f931551bSRalph Campbell 
1952f931551bSRalph Campbell 	tidinv = dd->tidinvalid;
1953f931551bSRalph Campbell 	tidbase = (u64 __iomem *)
1954f931551bSRalph Campbell 		((char __iomem *)(dd->kregbase) +
1955f931551bSRalph Campbell 		 dd->rcvtidbase +
1956f931551bSRalph Campbell 		 ctxt * dd->rcvtidcnt * sizeof(*tidbase));
1957f931551bSRalph Campbell 
1958f931551bSRalph Campbell 	for (i = 0; i < dd->rcvtidcnt; i++)
1959f931551bSRalph Campbell 		/* use func pointer because could be one of two funcs */
1960f931551bSRalph Campbell 		dd->f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
1961f931551bSRalph Campbell 				  tidinv);
1962f931551bSRalph Campbell 
1963f931551bSRalph Campbell 	tidbase = (u64 __iomem *)
1964f931551bSRalph Campbell 		((char __iomem *)(dd->kregbase) +
1965f931551bSRalph Campbell 		 dd->rcvegrbase +
1966f931551bSRalph Campbell 		 rcd->rcvegr_tid_base * sizeof(*tidbase));
1967f931551bSRalph Campbell 
1968f931551bSRalph Campbell 	for (i = 0; i < rcd->rcvegrcnt; i++)
1969f931551bSRalph Campbell 		/* use func pointer because could be one of two funcs */
1970f931551bSRalph Campbell 		dd->f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
1971f931551bSRalph Campbell 				  tidinv);
1972f931551bSRalph Campbell }
1973f931551bSRalph Campbell 
1974f931551bSRalph Campbell /**
1975f931551bSRalph Campbell  * qib_6120_tidtemplate - setup constants for TID updates
1976f931551bSRalph Campbell  * @dd: the qlogic_ib device
1977f931551bSRalph Campbell  *
1978f931551bSRalph Campbell  * We setup stuff that we use a lot, to avoid calculating each time
1979f931551bSRalph Campbell  */
qib_6120_tidtemplate(struct qib_devdata * dd)1980f931551bSRalph Campbell static void qib_6120_tidtemplate(struct qib_devdata *dd)
1981f931551bSRalph Campbell {
1982f931551bSRalph Campbell 	u32 egrsize = dd->rcvegrbufsize;
1983f931551bSRalph Campbell 
1984f931551bSRalph Campbell 	/*
1985f931551bSRalph Campbell 	 * For now, we always allocate 4KB buffers (at init) so we can
1986f931551bSRalph Campbell 	 * receive max size packets.  We may want a module parameter to
1987f931551bSRalph Campbell 	 * specify 2KB or 4KB and/or make be per ctxt instead of per device
1988f931551bSRalph Campbell 	 * for those who want to reduce memory footprint.  Note that the
1989f931551bSRalph Campbell 	 * rcvhdrentsize size must be large enough to hold the largest
1990f931551bSRalph Campbell 	 * IB header (currently 96 bytes) that we expect to handle (plus of
1991f931551bSRalph Campbell 	 * course the 2 dwords of RHF).
1992f931551bSRalph Campbell 	 */
1993f931551bSRalph Campbell 	if (egrsize == 2048)
1994f931551bSRalph Campbell 		dd->tidtemplate = 1U << 29;
1995f931551bSRalph Campbell 	else if (egrsize == 4096)
1996f931551bSRalph Campbell 		dd->tidtemplate = 2U << 29;
1997f931551bSRalph Campbell 	dd->tidinvalid = 0;
1998f931551bSRalph Campbell }
1999f931551bSRalph Campbell 
qib_unordered_wc(void)2000f931551bSRalph Campbell int __attribute__((weak)) qib_unordered_wc(void)
2001f931551bSRalph Campbell {
2002f931551bSRalph Campbell 	return 0;
2003f931551bSRalph Campbell }
2004f931551bSRalph Campbell 
2005f931551bSRalph Campbell /**
2006f931551bSRalph Campbell  * qib_6120_get_base_info - set chip-specific flags for user code
2007f931551bSRalph Campbell  * @rcd: the qlogic_ib ctxt
2008f931551bSRalph Campbell  * @kinfo: qib_base_info pointer
2009f931551bSRalph Campbell  *
2010f931551bSRalph Campbell  * We set the PCIE flag because the lower bandwidth on PCIe vs
201171f964c3SLee Jones  * HyperTransport can affect some user packet algorithms.
2012f931551bSRalph Campbell  */
qib_6120_get_base_info(struct qib_ctxtdata * rcd,struct qib_base_info * kinfo)2013f931551bSRalph Campbell static int qib_6120_get_base_info(struct qib_ctxtdata *rcd,
2014f931551bSRalph Campbell 				  struct qib_base_info *kinfo)
2015f931551bSRalph Campbell {
2016f931551bSRalph Campbell 	if (qib_unordered_wc())
2017f931551bSRalph Campbell 		kinfo->spi_runtime_flags |= QIB_RUNTIME_FORCE_WC_ORDER;
2018f931551bSRalph Campbell 
2019f931551bSRalph Campbell 	kinfo->spi_runtime_flags |= QIB_RUNTIME_PCIE |
2020f931551bSRalph Campbell 		QIB_RUNTIME_FORCE_PIOAVAIL | QIB_RUNTIME_PIO_REGSWAPPED;
2021f931551bSRalph Campbell 	return 0;
2022f931551bSRalph Campbell }
2023f931551bSRalph Campbell 
2024f931551bSRalph Campbell 
2025f931551bSRalph Campbell static struct qib_message_header *
qib_6120_get_msgheader(struct qib_devdata * dd,__le32 * rhf_addr)2026f931551bSRalph Campbell qib_6120_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
2027f931551bSRalph Campbell {
2028f931551bSRalph Campbell 	return (struct qib_message_header *)
2029f931551bSRalph Campbell 		&rhf_addr[sizeof(u64) / sizeof(u32)];
2030f931551bSRalph Campbell }
2031f931551bSRalph Campbell 
qib_6120_config_ctxts(struct qib_devdata * dd)2032f931551bSRalph Campbell static void qib_6120_config_ctxts(struct qib_devdata *dd)
2033f931551bSRalph Campbell {
2034f931551bSRalph Campbell 	dd->ctxtcnt = qib_read_kreg32(dd, kr_portcnt);
2035f931551bSRalph Campbell 	if (qib_n_krcv_queues > 1) {
2036f931551bSRalph Campbell 		dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
2037f931551bSRalph Campbell 		if (dd->first_user_ctxt > dd->ctxtcnt)
2038f931551bSRalph Campbell 			dd->first_user_ctxt = dd->ctxtcnt;
2039f931551bSRalph Campbell 		dd->qpn_mask = dd->first_user_ctxt <= 2 ? 2 : 6;
2040f931551bSRalph Campbell 	} else
2041f931551bSRalph Campbell 		dd->first_user_ctxt = dd->num_pports;
2042f931551bSRalph Campbell 	dd->n_krcv_queues = dd->first_user_ctxt;
2043f931551bSRalph Campbell }
2044f931551bSRalph Campbell 
qib_update_6120_usrhead(struct qib_ctxtdata * rcd,u64 hd,u32 updegr,u32 egrhd,u32 npkts)2045f931551bSRalph Campbell static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd,
2046f931551bSRalph Campbell 				    u32 updegr, u32 egrhd, u32 npkts)
2047f931551bSRalph Campbell {
2048f931551bSRalph Campbell 	if (updegr)
204919ede2e4SMike Marciniszyn 		qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
2050f931551bSRalph Campbell 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
2051f931551bSRalph Campbell }
2052f931551bSRalph Campbell 
qib_6120_hdrqempty(struct qib_ctxtdata * rcd)2053eddfb675SRam Vepa static u32 qib_6120_hdrqempty(struct qib_ctxtdata *rcd)
2054f931551bSRalph Campbell {
2055f931551bSRalph Campbell 	u32 head, tail;
2056f931551bSRalph Campbell 
2057f931551bSRalph Campbell 	head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
2058f931551bSRalph Campbell 	if (rcd->rcvhdrtail_kvaddr)
2059f931551bSRalph Campbell 		tail = qib_get_rcvhdrtail(rcd);
2060f931551bSRalph Campbell 	else
2061f931551bSRalph Campbell 		tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
2062f931551bSRalph Campbell 	return head == tail;
2063f931551bSRalph Campbell }
2064f931551bSRalph Campbell 
2065f931551bSRalph Campbell /*
2066f931551bSRalph Campbell  * Used when we close any ctxt, for DMA already in flight
2067f931551bSRalph Campbell  * at close.  Can't be done until we know hdrq size, so not
2068f931551bSRalph Campbell  * early in chip init.
2069f931551bSRalph Campbell  */
alloc_dummy_hdrq(struct qib_devdata * dd)2070f931551bSRalph Campbell static void alloc_dummy_hdrq(struct qib_devdata *dd)
2071f931551bSRalph Campbell {
2072f931551bSRalph Campbell 	dd->cspec->dummy_hdrq = dma_alloc_coherent(&dd->pcidev->dev,
2073f931551bSRalph Campbell 					dd->rcd[0]->rcvhdrq_size,
2074f931551bSRalph Campbell 					&dd->cspec->dummy_hdrq_phys,
2075f931551bSRalph Campbell 					GFP_ATOMIC);
2076f931551bSRalph Campbell 	if (!dd->cspec->dummy_hdrq) {
2077f931551bSRalph Campbell 		qib_devinfo(dd->pcidev, "Couldn't allocate dummy hdrq\n");
2078*2fce26a1SChristoph Hellwig 		/* fallback to just 0'ing */
2079f931551bSRalph Campbell 		dd->cspec->dummy_hdrq_phys = 0UL;
2080f931551bSRalph Campbell 	}
2081f931551bSRalph Campbell }
2082f931551bSRalph Campbell 
2083f931551bSRalph Campbell /*
2084f931551bSRalph Campbell  * Modify the RCVCTRL register in chip-specific way. This
2085f931551bSRalph Campbell  * is a function because bit positions and (future) register
2086f931551bSRalph Campbell  * location is chip-specific, but the needed operations are
2087f931551bSRalph Campbell  * generic. <op> is a bit-mask because we often want to
2088f931551bSRalph Campbell  * do multiple modifications.
2089f931551bSRalph Campbell  */
rcvctrl_6120_mod(struct qib_pportdata * ppd,unsigned int op,int ctxt)2090f931551bSRalph Campbell static void rcvctrl_6120_mod(struct qib_pportdata *ppd, unsigned int op,
2091f931551bSRalph Campbell 			     int ctxt)
2092f931551bSRalph Campbell {
2093f931551bSRalph Campbell 	struct qib_devdata *dd = ppd->dd;
2094f931551bSRalph Campbell 	u64 mask, val;
2095f931551bSRalph Campbell 	unsigned long flags;
2096f931551bSRalph Campbell 
2097f931551bSRalph Campbell 	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2098f931551bSRalph Campbell 
2099f931551bSRalph Campbell 	if (op & QIB_RCVCTRL_TAILUPD_ENB)
2100f931551bSRalph Campbell 		dd->rcvctrl |= (1ULL << QLOGIC_IB_R_TAILUPD_SHIFT);
2101f931551bSRalph Campbell 	if (op & QIB_RCVCTRL_TAILUPD_DIS)
2102f931551bSRalph Campbell 		dd->rcvctrl &= ~(1ULL << QLOGIC_IB_R_TAILUPD_SHIFT);
2103f931551bSRalph Campbell 	if (op & QIB_RCVCTRL_PKEY_ENB)
2104f931551bSRalph Campbell 		dd->rcvctrl &= ~(1ULL << IBA6120_R_PKEY_DIS_SHIFT);
2105f931551bSRalph Campbell 	if (op & QIB_RCVCTRL_PKEY_DIS)
2106f931551bSRalph Campbell 		dd->rcvctrl |= (1ULL << IBA6120_R_PKEY_DIS_SHIFT);
2107f931551bSRalph Campbell 	if (ctxt < 0)
2108f931551bSRalph Campbell 		mask = (1ULL << dd->ctxtcnt) - 1;
2109f931551bSRalph Campbell 	else
2110f931551bSRalph Campbell 		mask = (1ULL << ctxt);
2111f931551bSRalph Campbell 	if (op & QIB_RCVCTRL_CTXT_ENB) {
2112f931551bSRalph Campbell 		/* always done for specific ctxt */
2113f931551bSRalph Campbell 		dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, PortEnable));
2114f931551bSRalph Campbell 		if (!(dd->flags & QIB_NODMA_RTAIL))
2115f931551bSRalph Campbell 			dd->rcvctrl |= 1ULL << QLOGIC_IB_R_TAILUPD_SHIFT;
2116f931551bSRalph Campbell 		/* Write these registers before the context is enabled. */
2117f931551bSRalph Campbell 		qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
2118f931551bSRalph Campbell 			dd->rcd[ctxt]->rcvhdrqtailaddr_phys);
2119f931551bSRalph Campbell 		qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
2120f931551bSRalph Campbell 			dd->rcd[ctxt]->rcvhdrq_phys);
2121f931551bSRalph Campbell 
2122f931551bSRalph Campbell 		if (ctxt == 0 && !dd->cspec->dummy_hdrq)
2123f931551bSRalph Campbell 			alloc_dummy_hdrq(dd);
2124f931551bSRalph Campbell 	}
2125f931551bSRalph Campbell 	if (op & QIB_RCVCTRL_CTXT_DIS)
2126f931551bSRalph Campbell 		dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, PortEnable));
2127f931551bSRalph Campbell 	if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
2128f931551bSRalph Campbell 		dd->rcvctrl |= (mask << QLOGIC_IB_R_INTRAVAIL_SHIFT);
2129f931551bSRalph Campbell 	if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
2130f931551bSRalph Campbell 		dd->rcvctrl &= ~(mask << QLOGIC_IB_R_INTRAVAIL_SHIFT);
2131f931551bSRalph Campbell 	qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
2132f931551bSRalph Campbell 	if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && dd->rhdrhead_intr_off) {
2133f931551bSRalph Campbell 		/* arm rcv interrupt */
2134f931551bSRalph Campbell 		val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) |
2135f931551bSRalph Campbell 			dd->rhdrhead_intr_off;
2136f931551bSRalph Campbell 		qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
2137f931551bSRalph Campbell 	}
2138f931551bSRalph Campbell 	if (op & QIB_RCVCTRL_CTXT_ENB) {
2139f931551bSRalph Campbell 		/*
2140f931551bSRalph Campbell 		 * Init the context registers also; if we were
2141f931551bSRalph Campbell 		 * disabled, tail and head should both be zero
2142f931551bSRalph Campbell 		 * already from the enable, but since we don't
2143f931551bSRalph Campbell 		 * know, we have to do it explicitly.
2144f931551bSRalph Campbell 		 */
2145f931551bSRalph Campbell 		val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
214625985edcSLucas De Marchi 		qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
2147f931551bSRalph Campbell 
2148f931551bSRalph Campbell 		val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
2149f931551bSRalph Campbell 		dd->rcd[ctxt]->head = val;
2150f931551bSRalph Campbell 		/* If kctxt, interrupt on next receive. */
2151f931551bSRalph Campbell 		if (ctxt < dd->first_user_ctxt)
2152f931551bSRalph Campbell 			val |= dd->rhdrhead_intr_off;
2153f931551bSRalph Campbell 		qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
2154f931551bSRalph Campbell 	}
2155f931551bSRalph Campbell 	if (op & QIB_RCVCTRL_CTXT_DIS) {
2156f931551bSRalph Campbell 		/*
2157f931551bSRalph Campbell 		 * Be paranoid, and never write 0's to these, just use an
2158f931551bSRalph Campbell 		 * unused page.  Of course,
2159f931551bSRalph Campbell 		 * rcvhdraddr points to a large chunk of memory, so this
2160f931551bSRalph Campbell 		 * could still trash things, but at least it won't trash
2161f931551bSRalph Campbell 		 * page 0, and by disabling the ctxt, it should stop "soon",
2162f931551bSRalph Campbell 		 * even if a packet or two is in already in flight after we
2163f931551bSRalph Campbell 		 * disabled the ctxt.  Only 6120 has this issue.
2164f931551bSRalph Campbell 		 */
2165f931551bSRalph Campbell 		if (ctxt >= 0) {
2166f931551bSRalph Campbell 			qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
2167f931551bSRalph Campbell 					    dd->cspec->dummy_hdrq_phys);
2168f931551bSRalph Campbell 			qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
2169f931551bSRalph Campbell 					    dd->cspec->dummy_hdrq_phys);
2170f931551bSRalph Campbell 		} else {
2171f931551bSRalph Campbell 			unsigned i;
2172f931551bSRalph Campbell 
2173f931551bSRalph Campbell 			for (i = 0; i < dd->cfgctxts; i++) {
2174f931551bSRalph Campbell 				qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr,
2175f931551bSRalph Campbell 					    i, dd->cspec->dummy_hdrq_phys);
2176f931551bSRalph Campbell 				qib_write_kreg_ctxt(dd, kr_rcvhdraddr,
2177f931551bSRalph Campbell 					    i, dd->cspec->dummy_hdrq_phys);
2178f931551bSRalph Campbell 			}
2179f931551bSRalph Campbell 		}
2180f931551bSRalph Campbell 	}
2181f931551bSRalph Campbell 	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2182f931551bSRalph Campbell }
2183f931551bSRalph Campbell 
2184f931551bSRalph Campbell /*
2185f931551bSRalph Campbell  * Modify the SENDCTRL register in chip-specific way. This
2186f931551bSRalph Campbell  * is a function there may be multiple such registers with
2187f931551bSRalph Campbell  * slightly different layouts. Only operations actually used
2188f931551bSRalph Campbell  * are implemented yet.
2189f931551bSRalph Campbell  * Chip requires no back-back sendctrl writes, so write
2190f931551bSRalph Campbell  * scratch register after writing sendctrl
2191f931551bSRalph Campbell  */
sendctrl_6120_mod(struct qib_pportdata * ppd,u32 op)2192f931551bSRalph Campbell static void sendctrl_6120_mod(struct qib_pportdata *ppd, u32 op)
2193f931551bSRalph Campbell {
2194f931551bSRalph Campbell 	struct qib_devdata *dd = ppd->dd;
2195f931551bSRalph Campbell 	u64 tmp_dd_sendctrl;
2196f931551bSRalph Campbell 	unsigned long flags;
2197f931551bSRalph Campbell 
2198f931551bSRalph Campbell 	spin_lock_irqsave(&dd->sendctrl_lock, flags);
2199f931551bSRalph Campbell 
2200f931551bSRalph Campbell 	/* First the ones that are "sticky", saved in shadow */
2201f931551bSRalph Campbell 	if (op & QIB_SENDCTRL_CLEAR)
2202f931551bSRalph Campbell 		dd->sendctrl = 0;
2203f931551bSRalph Campbell 	if (op & QIB_SENDCTRL_SEND_DIS)
2204f931551bSRalph Campbell 		dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOEnable);
2205f931551bSRalph Campbell 	else if (op & QIB_SENDCTRL_SEND_ENB)
2206f931551bSRalph Campbell 		dd->sendctrl |= SYM_MASK(SendCtrl, PIOEnable);
2207f931551bSRalph Campbell 	if (op & QIB_SENDCTRL_AVAIL_DIS)
2208f931551bSRalph Campbell 		dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOBufAvailUpd);
2209f931551bSRalph Campbell 	else if (op & QIB_SENDCTRL_AVAIL_ENB)
2210f931551bSRalph Campbell 		dd->sendctrl |= SYM_MASK(SendCtrl, PIOBufAvailUpd);
2211f931551bSRalph Campbell 
2212f931551bSRalph Campbell 	if (op & QIB_SENDCTRL_DISARM_ALL) {
2213f931551bSRalph Campbell 		u32 i, last;
2214f931551bSRalph Campbell 
2215f931551bSRalph Campbell 		tmp_dd_sendctrl = dd->sendctrl;
2216f931551bSRalph Campbell 		/*
2217f931551bSRalph Campbell 		 * disarm any that are not yet launched, disabling sends
2218f931551bSRalph Campbell 		 * and updates until done.
2219f931551bSRalph Campbell 		 */
2220f931551bSRalph Campbell 		last = dd->piobcnt2k + dd->piobcnt4k;
2221f931551bSRalph Campbell 		tmp_dd_sendctrl &=
2222f931551bSRalph Campbell 			~(SYM_MASK(SendCtrl, PIOEnable) |
2223f931551bSRalph Campbell 			  SYM_MASK(SendCtrl, PIOBufAvailUpd));
2224f931551bSRalph Campbell 		for (i = 0; i < last; i++) {
2225f931551bSRalph Campbell 			qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl |
2226f931551bSRalph Campbell 				       SYM_MASK(SendCtrl, Disarm) | i);
2227f931551bSRalph Campbell 			qib_write_kreg(dd, kr_scratch, 0);
2228f931551bSRalph Campbell 		}
2229f931551bSRalph Campbell 	}
2230f931551bSRalph Campbell 
2231f931551bSRalph Campbell 	tmp_dd_sendctrl = dd->sendctrl;
2232f931551bSRalph Campbell 
2233f931551bSRalph Campbell 	if (op & QIB_SENDCTRL_FLUSH)
2234f931551bSRalph Campbell 		tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Abort);
2235f931551bSRalph Campbell 	if (op & QIB_SENDCTRL_DISARM)
2236f931551bSRalph Campbell 		tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
2237f931551bSRalph Campbell 			((op & QIB_6120_SendCtrl_DisarmPIOBuf_RMASK) <<
2238f931551bSRalph Campbell 			 SYM_LSB(SendCtrl, DisarmPIOBuf));
2239f931551bSRalph Campbell 	if (op & QIB_SENDCTRL_AVAIL_BLIP)
2240f931551bSRalph Campbell 		tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, PIOBufAvailUpd);
2241f931551bSRalph Campbell 
2242f931551bSRalph Campbell 	qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
2243f931551bSRalph Campbell 	qib_write_kreg(dd, kr_scratch, 0);
2244f931551bSRalph Campbell 
2245f931551bSRalph Campbell 	if (op & QIB_SENDCTRL_AVAIL_BLIP) {
2246f931551bSRalph Campbell 		qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
2247f931551bSRalph Campbell 		qib_write_kreg(dd, kr_scratch, 0);
2248f931551bSRalph Campbell 	}
2249f931551bSRalph Campbell 
2250f931551bSRalph Campbell 	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
2251f931551bSRalph Campbell 
2252f931551bSRalph Campbell 	if (op & QIB_SENDCTRL_FLUSH) {
2253f931551bSRalph Campbell 		u32 v;
2254f931551bSRalph Campbell 		/*
2255f931551bSRalph Campbell 		 * ensure writes have hit chip, then do a few
2256f931551bSRalph Campbell 		 * more reads, to allow DMA of pioavail registers
2257f931551bSRalph Campbell 		 * to occur, so in-memory copy is in sync with
2258f931551bSRalph Campbell 		 * the chip.  Not always safe to sleep.
2259f931551bSRalph Campbell 		 */
2260f931551bSRalph Campbell 		v = qib_read_kreg32(dd, kr_scratch);
2261f931551bSRalph Campbell 		qib_write_kreg(dd, kr_scratch, v);
2262f931551bSRalph Campbell 		v = qib_read_kreg32(dd, kr_scratch);
2263f931551bSRalph Campbell 		qib_write_kreg(dd, kr_scratch, v);
2264f931551bSRalph Campbell 		qib_read_kreg32(dd, kr_scratch);
2265f931551bSRalph Campbell 	}
2266f931551bSRalph Campbell }
2267f931551bSRalph Campbell 
2268f931551bSRalph Campbell /**
2269f931551bSRalph Campbell  * qib_portcntr_6120 - read a per-port counter
2270f931551bSRalph Campbell  * @ppd: the qlogic_ib device
2271f931551bSRalph Campbell  * @reg: the counter to snapshot
2272f931551bSRalph Campbell  */
qib_portcntr_6120(struct qib_pportdata * ppd,u32 reg)227371f964c3SLee Jones static u64 qib_portcntr_6120(struct qib_pportdata *ppd, u32 reg)
227471f964c3SLee Jones {
2275f931551bSRalph Campbell 	u64 ret = 0ULL;
2276f931551bSRalph Campbell 	struct qib_devdata *dd = ppd->dd;
2277f931551bSRalph Campbell 	u16 creg;
2278f931551bSRalph Campbell 	/* 0xffff for unimplemented or synthesized counters */
2279f931551bSRalph Campbell 	static const u16 xlator[] = {
2280f931551bSRalph Campbell 		[QIBPORTCNTR_PKTSEND] = cr_pktsend,
2281f931551bSRalph Campbell 		[QIBPORTCNTR_WORDSEND] = cr_wordsend,
2282f931551bSRalph Campbell 		[QIBPORTCNTR_PSXMITDATA] = 0xffff,
2283f931551bSRalph Campbell 		[QIBPORTCNTR_PSXMITPKTS] = 0xffff,
2284f931551bSRalph Campbell 		[QIBPORTCNTR_PSXMITWAIT] = 0xffff,
2285f931551bSRalph Campbell 		[QIBPORTCNTR_SENDSTALL] = cr_sendstall,
2286f931551bSRalph Campbell 		[QIBPORTCNTR_PKTRCV] = cr_pktrcv,
2287f931551bSRalph Campbell 		[QIBPORTCNTR_PSRCVDATA] = 0xffff,
2288f931551bSRalph Campbell 		[QIBPORTCNTR_PSRCVPKTS] = 0xffff,
2289f931551bSRalph Campbell 		[QIBPORTCNTR_RCVEBP] = cr_rcvebp,
2290f931551bSRalph Campbell 		[QIBPORTCNTR_RCVOVFL] = cr_rcvovfl,
2291f931551bSRalph Campbell 		[QIBPORTCNTR_WORDRCV] = cr_wordrcv,
2292f931551bSRalph Campbell 		[QIBPORTCNTR_RXDROPPKT] = cr_rxdroppkt,
2293f931551bSRalph Campbell 		[QIBPORTCNTR_RXLOCALPHYERR] = 0xffff,
2294f931551bSRalph Campbell 		[QIBPORTCNTR_RXVLERR] = 0xffff,
2295f931551bSRalph Campbell 		[QIBPORTCNTR_ERRICRC] = cr_erricrc,
2296f931551bSRalph Campbell 		[QIBPORTCNTR_ERRVCRC] = cr_errvcrc,
2297f931551bSRalph Campbell 		[QIBPORTCNTR_ERRLPCRC] = cr_errlpcrc,
2298f931551bSRalph Campbell 		[QIBPORTCNTR_BADFORMAT] = cr_badformat,
2299f931551bSRalph Campbell 		[QIBPORTCNTR_ERR_RLEN] = cr_err_rlen,
2300f931551bSRalph Campbell 		[QIBPORTCNTR_IBSYMBOLERR] = cr_ibsymbolerr,
2301f931551bSRalph Campbell 		[QIBPORTCNTR_INVALIDRLEN] = cr_invalidrlen,
2302f931551bSRalph Campbell 		[QIBPORTCNTR_UNSUPVL] = cr_txunsupvl,
2303f931551bSRalph Campbell 		[QIBPORTCNTR_EXCESSBUFOVFL] = 0xffff,
2304f931551bSRalph Campbell 		[QIBPORTCNTR_ERRLINK] = cr_errlink,
2305f931551bSRalph Campbell 		[QIBPORTCNTR_IBLINKDOWN] = cr_iblinkdown,
2306f931551bSRalph Campbell 		[QIBPORTCNTR_IBLINKERRRECOV] = cr_iblinkerrrecov,
2307f931551bSRalph Campbell 		[QIBPORTCNTR_LLI] = 0xffff,
2308f931551bSRalph Campbell 		[QIBPORTCNTR_PSINTERVAL] = 0xffff,
2309f931551bSRalph Campbell 		[QIBPORTCNTR_PSSTART] = 0xffff,
2310f931551bSRalph Campbell 		[QIBPORTCNTR_PSSTAT] = 0xffff,
2311f931551bSRalph Campbell 		[QIBPORTCNTR_VL15PKTDROP] = 0xffff,
2312f931551bSRalph Campbell 		[QIBPORTCNTR_ERRPKEY] = cr_errpkey,
2313f931551bSRalph Campbell 		[QIBPORTCNTR_KHDROVFL] = 0xffff,
2314f931551bSRalph Campbell 	};
2315f931551bSRalph Campbell 
2316f931551bSRalph Campbell 	if (reg >= ARRAY_SIZE(xlator)) {
2317f931551bSRalph Campbell 		qib_devinfo(ppd->dd->pcidev,
2318f931551bSRalph Campbell 			 "Unimplemented portcounter %u\n", reg);
2319f931551bSRalph Campbell 		goto done;
2320f931551bSRalph Campbell 	}
2321f931551bSRalph Campbell 	creg = xlator[reg];
2322f931551bSRalph Campbell 
2323f931551bSRalph Campbell 	/* handle counters requests not implemented as chip counters */
2324f931551bSRalph Campbell 	if (reg == QIBPORTCNTR_LLI)
2325f931551bSRalph Campbell 		ret = dd->cspec->lli_errs;
2326f931551bSRalph Campbell 	else if (reg == QIBPORTCNTR_EXCESSBUFOVFL)
2327f931551bSRalph Campbell 		ret = dd->cspec->overrun_thresh_errs;
2328f931551bSRalph Campbell 	else if (reg == QIBPORTCNTR_KHDROVFL) {
2329f931551bSRalph Campbell 		int i;
2330f931551bSRalph Campbell 
2331f931551bSRalph Campbell 		/* sum over all kernel contexts */
2332f931551bSRalph Campbell 		for (i = 0; i < dd->first_user_ctxt; i++)
2333f931551bSRalph Campbell 			ret += read_6120_creg32(dd, cr_portovfl + i);
2334f931551bSRalph Campbell 	} else if (reg == QIBPORTCNTR_PSSTAT)
2335f931551bSRalph Campbell 		ret = dd->cspec->pma_sample_status;
2336f931551bSRalph Campbell 	if (creg == 0xffff)
2337f931551bSRalph Campbell 		goto done;
2338f931551bSRalph Campbell 
2339f931551bSRalph Campbell 	/*
2340f931551bSRalph Campbell 	 * only fast incrementing counters are 64bit; use 32 bit reads to
2341f931551bSRalph Campbell 	 * avoid two independent reads when on opteron
2342f931551bSRalph Campbell 	 */
2343f931551bSRalph Campbell 	if (creg == cr_wordsend || creg == cr_wordrcv ||
2344f931551bSRalph Campbell 	    creg == cr_pktsend || creg == cr_pktrcv)
2345f931551bSRalph Campbell 		ret = read_6120_creg(dd, creg);
2346f931551bSRalph Campbell 	else
2347f931551bSRalph Campbell 		ret = read_6120_creg32(dd, creg);
2348f931551bSRalph Campbell 	if (creg == cr_ibsymbolerr) {
2349f931551bSRalph Campbell 		if (dd->cspec->ibdeltainprog)
2350f931551bSRalph Campbell 			ret -= ret - dd->cspec->ibsymsnap;
2351f931551bSRalph Campbell 		ret -= dd->cspec->ibsymdelta;
2352f931551bSRalph Campbell 	} else if (creg == cr_iblinkerrrecov) {
2353f931551bSRalph Campbell 		if (dd->cspec->ibdeltainprog)
2354f931551bSRalph Campbell 			ret -= ret - dd->cspec->iblnkerrsnap;
2355f931551bSRalph Campbell 		ret -= dd->cspec->iblnkerrdelta;
2356f931551bSRalph Campbell 	}
2357f931551bSRalph Campbell 	if (reg == QIBPORTCNTR_RXDROPPKT) /* add special cased count */
2358f931551bSRalph Campbell 		ret += dd->cspec->rxfc_unsupvl_errs;
2359f931551bSRalph Campbell 
2360f931551bSRalph Campbell done:
2361f931551bSRalph Campbell 	return ret;
2362f931551bSRalph Campbell }
2363f931551bSRalph Campbell 
2364f931551bSRalph Campbell /*
2365f931551bSRalph Campbell  * Device counter names (not port-specific), one line per stat,
2366f931551bSRalph Campbell  * single string.  Used by utilities like ipathstats to print the stats
2367f931551bSRalph Campbell  * in a way which works for different versions of drivers, without changing
2368f931551bSRalph Campbell  * the utility.  Names need to be 12 chars or less (w/o newline), for proper
2369f931551bSRalph Campbell  * display by utility.
2370f931551bSRalph Campbell  * Non-error counters are first.
2371f931551bSRalph Campbell  * Start of "error" conters is indicated by a leading "E " on the first
2372f931551bSRalph Campbell  * "error" counter, and doesn't count in label length.
2373f931551bSRalph Campbell  * The EgrOvfl list needs to be last so we truncate them at the configured
2374f931551bSRalph Campbell  * context count for the device.
2375f931551bSRalph Campbell  * cntr6120indices contains the corresponding register indices.
2376f931551bSRalph Campbell  */
2377f931551bSRalph Campbell static const char cntr6120names[] =
2378f931551bSRalph Campbell 	"Interrupts\n"
2379f931551bSRalph Campbell 	"HostBusStall\n"
2380f931551bSRalph Campbell 	"E RxTIDFull\n"
2381f931551bSRalph Campbell 	"RxTIDInvalid\n"
2382f931551bSRalph Campbell 	"Ctxt0EgrOvfl\n"
2383f931551bSRalph Campbell 	"Ctxt1EgrOvfl\n"
2384f931551bSRalph Campbell 	"Ctxt2EgrOvfl\n"
2385f931551bSRalph Campbell 	"Ctxt3EgrOvfl\n"
2386f931551bSRalph Campbell 	"Ctxt4EgrOvfl\n";
2387f931551bSRalph Campbell 
2388f931551bSRalph Campbell static const size_t cntr6120indices[] = {
2389f931551bSRalph Campbell 	cr_lbint,
2390f931551bSRalph Campbell 	cr_lbflowstall,
2391f931551bSRalph Campbell 	cr_errtidfull,
2392f931551bSRalph Campbell 	cr_errtidvalid,
2393f931551bSRalph Campbell 	cr_portovfl + 0,
2394f931551bSRalph Campbell 	cr_portovfl + 1,
2395f931551bSRalph Campbell 	cr_portovfl + 2,
2396f931551bSRalph Campbell 	cr_portovfl + 3,
2397f931551bSRalph Campbell 	cr_portovfl + 4,
2398f931551bSRalph Campbell };
2399f931551bSRalph Campbell 
2400f931551bSRalph Campbell /*
2401f931551bSRalph Campbell  * same as cntr6120names and cntr6120indices, but for port-specific counters.
2402f931551bSRalph Campbell  * portcntr6120indices is somewhat complicated by some registers needing
2403f931551bSRalph Campbell  * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
2404f931551bSRalph Campbell  */
2405f931551bSRalph Campbell static const char portcntr6120names[] =
2406f931551bSRalph Campbell 	"TxPkt\n"
2407f931551bSRalph Campbell 	"TxFlowPkt\n"
2408f931551bSRalph Campbell 	"TxWords\n"
2409f931551bSRalph Campbell 	"RxPkt\n"
2410f931551bSRalph Campbell 	"RxFlowPkt\n"
2411f931551bSRalph Campbell 	"RxWords\n"
2412f931551bSRalph Campbell 	"TxFlowStall\n"
2413f931551bSRalph Campbell 	"E IBStatusChng\n"
2414f931551bSRalph Campbell 	"IBLinkDown\n"
2415f931551bSRalph Campbell 	"IBLnkRecov\n"
2416f931551bSRalph Campbell 	"IBRxLinkErr\n"
2417f931551bSRalph Campbell 	"IBSymbolErr\n"
2418f931551bSRalph Campbell 	"RxLLIErr\n"
2419f931551bSRalph Campbell 	"RxBadFormat\n"
2420f931551bSRalph Campbell 	"RxBadLen\n"
2421f931551bSRalph Campbell 	"RxBufOvrfl\n"
2422f931551bSRalph Campbell 	"RxEBP\n"
2423f931551bSRalph Campbell 	"RxFlowCtlErr\n"
2424f931551bSRalph Campbell 	"RxICRCerr\n"
2425f931551bSRalph Campbell 	"RxLPCRCerr\n"
2426f931551bSRalph Campbell 	"RxVCRCerr\n"
2427f931551bSRalph Campbell 	"RxInvalLen\n"
2428f931551bSRalph Campbell 	"RxInvalPKey\n"
2429f931551bSRalph Campbell 	"RxPktDropped\n"
2430f931551bSRalph Campbell 	"TxBadLength\n"
2431f931551bSRalph Campbell 	"TxDropped\n"
2432f931551bSRalph Campbell 	"TxInvalLen\n"
2433f931551bSRalph Campbell 	"TxUnderrun\n"
2434f931551bSRalph Campbell 	"TxUnsupVL\n"
2435f931551bSRalph Campbell 	;
2436f931551bSRalph Campbell 
2437f931551bSRalph Campbell #define _PORT_VIRT_FLAG 0x8000 /* "virtual", need adjustments */
2438f931551bSRalph Campbell static const size_t portcntr6120indices[] = {
2439f931551bSRalph Campbell 	QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
2440f931551bSRalph Campbell 	cr_pktsendflow,
2441f931551bSRalph Campbell 	QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
2442f931551bSRalph Campbell 	QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
2443f931551bSRalph Campbell 	cr_pktrcvflowctrl,
2444f931551bSRalph Campbell 	QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
2445f931551bSRalph Campbell 	QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
2446f931551bSRalph Campbell 	cr_ibstatuschange,
2447f931551bSRalph Campbell 	QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
2448f931551bSRalph Campbell 	QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
2449f931551bSRalph Campbell 	QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
2450f931551bSRalph Campbell 	QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
2451f931551bSRalph Campbell 	QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
2452f931551bSRalph Campbell 	QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
2453f931551bSRalph Campbell 	QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
2454f931551bSRalph Campbell 	QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
2455f931551bSRalph Campbell 	QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
2456f931551bSRalph Campbell 	cr_rcvflowctrl_err,
2457f931551bSRalph Campbell 	QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
2458f931551bSRalph Campbell 	QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
2459f931551bSRalph Campbell 	QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
2460f931551bSRalph Campbell 	QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
2461f931551bSRalph Campbell 	QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
2462f931551bSRalph Campbell 	QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
2463f931551bSRalph Campbell 	cr_invalidslen,
2464f931551bSRalph Campbell 	cr_senddropped,
2465f931551bSRalph Campbell 	cr_errslen,
2466f931551bSRalph Campbell 	cr_sendunderrun,
2467f931551bSRalph Campbell 	cr_txunsupvl,
2468f931551bSRalph Campbell };
2469f931551bSRalph Campbell 
2470f931551bSRalph Campbell /* do all the setup to make the counter reads efficient later */
init_6120_cntrnames(struct qib_devdata * dd)2471f931551bSRalph Campbell static void init_6120_cntrnames(struct qib_devdata *dd)
2472f931551bSRalph Campbell {
2473f931551bSRalph Campbell 	int i, j = 0;
2474f931551bSRalph Campbell 	char *s;
2475f931551bSRalph Campbell 
2476f931551bSRalph Campbell 	for (i = 0, s = (char *)cntr6120names; s && j <= dd->cfgctxts;
2477f931551bSRalph Campbell 	     i++) {
2478f931551bSRalph Campbell 		/* we always have at least one counter before the egrovfl */
2479f931551bSRalph Campbell 		if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
2480f931551bSRalph Campbell 			j = 1;
2481f931551bSRalph Campbell 		s = strchr(s + 1, '\n');
2482f931551bSRalph Campbell 		if (s && j)
2483f931551bSRalph Campbell 			j++;
2484f931551bSRalph Campbell 	}
2485f931551bSRalph Campbell 	dd->cspec->ncntrs = i;
2486f931551bSRalph Campbell 	if (!s)
2487f931551bSRalph Campbell 		/* full list; size is without terminating null */
2488f931551bSRalph Campbell 		dd->cspec->cntrnamelen = sizeof(cntr6120names) - 1;
2489f931551bSRalph Campbell 	else
2490f931551bSRalph Campbell 		dd->cspec->cntrnamelen = 1 + s - cntr6120names;
2491f931551bSRalph Campbell 	dd->cspec->cntrs = kmalloc_array(dd->cspec->ncntrs, sizeof(u64),
2492f931551bSRalph Campbell 					 GFP_KERNEL);
2493f931551bSRalph Campbell 
24946da2ec56SKees Cook 	for (i = 0, s = (char *)portcntr6120names; s; i++)
24956da2ec56SKees Cook 		s = strchr(s + 1, '\n');
2496f931551bSRalph Campbell 	dd->cspec->nportcntrs = i - 1;
2497f931551bSRalph Campbell 	dd->cspec->portcntrnamelen = sizeof(portcntr6120names) - 1;
2498f931551bSRalph Campbell 	dd->cspec->portcntrs = kmalloc_array(dd->cspec->nportcntrs,
2499f931551bSRalph Campbell 					     sizeof(u64),
2500f931551bSRalph Campbell 					     GFP_KERNEL);
25016da2ec56SKees Cook }
25026da2ec56SKees Cook 
qib_read_6120cntrs(struct qib_devdata * dd,loff_t pos,char ** namep,u64 ** cntrp)25036da2ec56SKees Cook static u32 qib_read_6120cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
2504f931551bSRalph Campbell 			      u64 **cntrp)
2505f931551bSRalph Campbell {
2506f931551bSRalph Campbell 	u32 ret;
2507f931551bSRalph Campbell 
2508f931551bSRalph Campbell 	if (namep) {
2509f931551bSRalph Campbell 		ret = dd->cspec->cntrnamelen;
2510f931551bSRalph Campbell 		if (pos >= ret)
2511f931551bSRalph Campbell 			ret = 0; /* final read after getting everything */
2512f931551bSRalph Campbell 		else
2513f931551bSRalph Campbell 			*namep = (char *)cntr6120names;
2514f931551bSRalph Campbell 	} else {
2515f931551bSRalph Campbell 		u64 *cntr = dd->cspec->cntrs;
2516f931551bSRalph Campbell 		int i;
2517f931551bSRalph Campbell 
2518f931551bSRalph Campbell 		ret = dd->cspec->ncntrs * sizeof(u64);
2519f931551bSRalph Campbell 		if (!cntr || pos >= ret) {
2520f931551bSRalph Campbell 			/* everything read, or couldn't get memory */
2521f931551bSRalph Campbell 			ret = 0;
2522f931551bSRalph Campbell 			goto done;
2523f931551bSRalph Campbell 		}
2524f931551bSRalph Campbell 		if (pos >= ret) {
2525f931551bSRalph Campbell 			ret = 0; /* final read after getting everything */
2526f931551bSRalph Campbell 			goto done;
2527f931551bSRalph Campbell 		}
2528f931551bSRalph Campbell 		*cntrp = cntr;
2529f931551bSRalph Campbell 		for (i = 0; i < dd->cspec->ncntrs; i++)
2530f931551bSRalph Campbell 			*cntr++ = read_6120_creg32(dd, cntr6120indices[i]);
2531f931551bSRalph Campbell 	}
2532f931551bSRalph Campbell done:
2533f931551bSRalph Campbell 	return ret;
2534f931551bSRalph Campbell }
2535f931551bSRalph Campbell 
qib_read_6120portcntrs(struct qib_devdata * dd,loff_t pos,u32 port,char ** namep,u64 ** cntrp)2536f931551bSRalph Campbell static u32 qib_read_6120portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
2537f931551bSRalph Campbell 				  char **namep, u64 **cntrp)
2538f931551bSRalph Campbell {
2539f931551bSRalph Campbell 	u32 ret;
2540f931551bSRalph Campbell 
2541f931551bSRalph Campbell 	if (namep) {
2542f931551bSRalph Campbell 		ret = dd->cspec->portcntrnamelen;
2543f931551bSRalph Campbell 		if (pos >= ret)
2544f931551bSRalph Campbell 			ret = 0; /* final read after getting everything */
2545f931551bSRalph Campbell 		else
2546f931551bSRalph Campbell 			*namep = (char *)portcntr6120names;
2547f931551bSRalph Campbell 	} else {
2548f931551bSRalph Campbell 		u64 *cntr = dd->cspec->portcntrs;
2549f931551bSRalph Campbell 		struct qib_pportdata *ppd = &dd->pport[port];
2550f931551bSRalph Campbell 		int i;
2551f931551bSRalph Campbell 
2552f931551bSRalph Campbell 		ret = dd->cspec->nportcntrs * sizeof(u64);
2553f931551bSRalph Campbell 		if (!cntr || pos >= ret) {
2554f931551bSRalph Campbell 			/* everything read, or couldn't get memory */
2555f931551bSRalph Campbell 			ret = 0;
2556f931551bSRalph Campbell 			goto done;
2557f931551bSRalph Campbell 		}
2558f931551bSRalph Campbell 		*cntrp = cntr;
2559f931551bSRalph Campbell 		for (i = 0; i < dd->cspec->nportcntrs; i++) {
2560f931551bSRalph Campbell 			if (portcntr6120indices[i] & _PORT_VIRT_FLAG)
2561f931551bSRalph Campbell 				*cntr++ = qib_portcntr_6120(ppd,
2562f931551bSRalph Campbell 					portcntr6120indices[i] &
2563f931551bSRalph Campbell 					~_PORT_VIRT_FLAG);
2564f931551bSRalph Campbell 			else
2565f931551bSRalph Campbell 				*cntr++ = read_6120_creg32(dd,
2566f931551bSRalph Campbell 					   portcntr6120indices[i]);
2567f931551bSRalph Campbell 		}
2568f931551bSRalph Campbell 	}
2569f931551bSRalph Campbell done:
2570f931551bSRalph Campbell 	return ret;
2571f931551bSRalph Campbell }
2572f931551bSRalph Campbell 
qib_chk_6120_errormask(struct qib_devdata * dd)2573f931551bSRalph Campbell static void qib_chk_6120_errormask(struct qib_devdata *dd)
2574f931551bSRalph Campbell {
2575f931551bSRalph Campbell 	static u32 fixed;
2576f931551bSRalph Campbell 	u32 ctrl;
2577f931551bSRalph Campbell 	unsigned long errormask;
2578f931551bSRalph Campbell 	unsigned long hwerrs;
2579f931551bSRalph Campbell 
2580f931551bSRalph Campbell 	if (!dd->cspec->errormask || !(dd->flags & QIB_INITTED))
2581f931551bSRalph Campbell 		return;
2582f931551bSRalph Campbell 
2583f931551bSRalph Campbell 	errormask = qib_read_kreg64(dd, kr_errmask);
2584f931551bSRalph Campbell 
2585f931551bSRalph Campbell 	if (errormask == dd->cspec->errormask)
2586f931551bSRalph Campbell 		return;
2587f931551bSRalph Campbell 	fixed++;
2588f931551bSRalph Campbell 
2589f931551bSRalph Campbell 	hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
2590f931551bSRalph Campbell 	ctrl = qib_read_kreg32(dd, kr_control);
2591f931551bSRalph Campbell 
2592f931551bSRalph Campbell 	qib_write_kreg(dd, kr_errmask,
2593f931551bSRalph Campbell 		dd->cspec->errormask);
2594f931551bSRalph Campbell 
2595f931551bSRalph Campbell 	if ((hwerrs & dd->cspec->hwerrmask) ||
2596f931551bSRalph Campbell 	    (ctrl & QLOGIC_IB_C_FREEZEMODE)) {
2597f931551bSRalph Campbell 		qib_write_kreg(dd, kr_hwerrclear, 0ULL);
2598f931551bSRalph Campbell 		qib_write_kreg(dd, kr_errclear, 0ULL);
2599f931551bSRalph Campbell 		/* force re-interrupt of pending events, just in case */
2600f931551bSRalph Campbell 		qib_write_kreg(dd, kr_intclear, 0ULL);
2601f931551bSRalph Campbell 		qib_devinfo(dd->pcidev,
2602f931551bSRalph Campbell 			 "errormask fixed(%u) %lx->%lx, ctrl %x hwerr %lx\n",
2603f931551bSRalph Campbell 			 fixed, errormask, (unsigned long)dd->cspec->errormask,
2604f931551bSRalph Campbell 			 ctrl, hwerrs);
2605f931551bSRalph Campbell 	}
2606f931551bSRalph Campbell }
2607f931551bSRalph Campbell 
2608f931551bSRalph Campbell /**
2609f931551bSRalph Campbell  * qib_get_6120_faststats - get word counters from chip before they overflow
2610f931551bSRalph Campbell  * @t: contains a pointer to the qlogic_ib device qib_devdata
2611f931551bSRalph Campbell  *
2612ae360f41SLeon Romanovsky  * This needs more work; in particular, decision on whether we really
261371f964c3SLee Jones  * need traffic_wds done the way it is
2614f931551bSRalph Campbell  * called from add_timer
2615f931551bSRalph Campbell  */
qib_get_6120_faststats(struct timer_list * t)2616f931551bSRalph Campbell static void qib_get_6120_faststats(struct timer_list *t)
2617f931551bSRalph Campbell {
2618f931551bSRalph Campbell 	struct qib_devdata *dd = from_timer(dd, t, stats_timer);
26194037c92fSKees Cook 	struct qib_pportdata *ppd = dd->pport;
2620f931551bSRalph Campbell 	unsigned long flags;
26214037c92fSKees Cook 	u64 traffic_wds;
2622f931551bSRalph Campbell 
2623f931551bSRalph Campbell 	/*
2624f931551bSRalph Campbell 	 * don't access the chip while running diags, or memory diags can
2625f931551bSRalph Campbell 	 * fail
2626f931551bSRalph Campbell 	 */
2627f931551bSRalph Campbell 	if (!(dd->flags & QIB_INITTED) || dd->diag_client)
2628f931551bSRalph Campbell 		/* but re-arm the timer, for diags case; won't hurt other */
2629f931551bSRalph Campbell 		goto done;
2630f931551bSRalph Campbell 
2631f931551bSRalph Campbell 	/*
2632f931551bSRalph Campbell 	 * We now try to maintain an activity timer, based on traffic
2633f931551bSRalph Campbell 	 * exceeding a threshold, so we need to check the word-counts
2634f931551bSRalph Campbell 	 * even if they are 64-bit.
2635f931551bSRalph Campbell 	 */
2636f931551bSRalph Campbell 	traffic_wds = qib_portcntr_6120(ppd, cr_wordsend) +
2637f931551bSRalph Campbell 		qib_portcntr_6120(ppd, cr_wordrcv);
2638f931551bSRalph Campbell 	spin_lock_irqsave(&dd->eep_st_lock, flags);
2639f931551bSRalph Campbell 	traffic_wds -= dd->traffic_wds;
2640f931551bSRalph Campbell 	dd->traffic_wds += traffic_wds;
2641f931551bSRalph Campbell 	spin_unlock_irqrestore(&dd->eep_st_lock, flags);
2642f931551bSRalph Campbell 
2643f931551bSRalph Campbell 	qib_chk_6120_errormask(dd);
2644f931551bSRalph Campbell done:
2645f931551bSRalph Campbell 	mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
2646f931551bSRalph Campbell }
2647f931551bSRalph Campbell 
2648f931551bSRalph Campbell /* no interrupt fallback for these chips */
qib_6120_nointr_fallback(struct qib_devdata * dd)2649f931551bSRalph Campbell static int qib_6120_nointr_fallback(struct qib_devdata *dd)
2650f931551bSRalph Campbell {
2651f931551bSRalph Campbell 	return 0;
2652f931551bSRalph Campbell }
2653f931551bSRalph Campbell 
2654f931551bSRalph Campbell /*
2655f931551bSRalph Campbell  * reset the XGXS (between serdes and IBC).  Slightly less intrusive
2656f931551bSRalph Campbell  * than resetting the IBC or external link state, and useful in some
2657f931551bSRalph Campbell  * cases to cause some retraining.  To do this right, we reset IBC
2658f931551bSRalph Campbell  * as well.
2659f931551bSRalph Campbell  */
qib_6120_xgxs_reset(struct qib_pportdata * ppd)2660f931551bSRalph Campbell static void qib_6120_xgxs_reset(struct qib_pportdata *ppd)
2661f931551bSRalph Campbell {
2662f931551bSRalph Campbell 	u64 val, prev_val;
2663f931551bSRalph Campbell 	struct qib_devdata *dd = ppd->dd;
2664f931551bSRalph Campbell 
2665f931551bSRalph Campbell 	prev_val = qib_read_kreg64(dd, kr_xgxs_cfg);
2666f931551bSRalph Campbell 	val = prev_val | QLOGIC_IB_XGXS_RESET;
2667f931551bSRalph Campbell 	prev_val &= ~QLOGIC_IB_XGXS_RESET; /* be sure */
2668f931551bSRalph Campbell 	qib_write_kreg(dd, kr_control,
2669f931551bSRalph Campbell 		       dd->control & ~QLOGIC_IB_C_LINKENABLE);
2670f931551bSRalph Campbell 	qib_write_kreg(dd, kr_xgxs_cfg, val);
2671f931551bSRalph Campbell 	qib_read_kreg32(dd, kr_scratch);
2672f931551bSRalph Campbell 	qib_write_kreg(dd, kr_xgxs_cfg, prev_val);
2673f931551bSRalph Campbell 	qib_write_kreg(dd, kr_control, dd->control);
2674f931551bSRalph Campbell }
2675f931551bSRalph Campbell 
qib_6120_get_ib_cfg(struct qib_pportdata * ppd,int which)2676f931551bSRalph Campbell static int qib_6120_get_ib_cfg(struct qib_pportdata *ppd, int which)
2677f931551bSRalph Campbell {
2678f931551bSRalph Campbell 	int ret;
2679f931551bSRalph Campbell 
2680f931551bSRalph Campbell 	switch (which) {
2681f931551bSRalph Campbell 	case QIB_IB_CFG_LWID:
2682f931551bSRalph Campbell 		ret = ppd->link_width_active;
2683f931551bSRalph Campbell 		break;
2684f931551bSRalph Campbell 
2685f931551bSRalph Campbell 	case QIB_IB_CFG_SPD:
2686f931551bSRalph Campbell 		ret = ppd->link_speed_active;
2687f931551bSRalph Campbell 		break;
2688f931551bSRalph Campbell 
2689f931551bSRalph Campbell 	case QIB_IB_CFG_LWID_ENB:
2690f931551bSRalph Campbell 		ret = ppd->link_width_enabled;
2691f931551bSRalph Campbell 		break;
2692f931551bSRalph Campbell 
2693f931551bSRalph Campbell 	case QIB_IB_CFG_SPD_ENB:
2694f931551bSRalph Campbell 		ret = ppd->link_speed_enabled;
2695f931551bSRalph Campbell 		break;
2696f931551bSRalph Campbell 
2697f931551bSRalph Campbell 	case QIB_IB_CFG_OP_VLS:
2698f931551bSRalph Campbell 		ret = ppd->vls_operational;
2699f931551bSRalph Campbell 		break;
2700f931551bSRalph Campbell 
2701f931551bSRalph Campbell 	case QIB_IB_CFG_VL_HIGH_CAP:
2702f931551bSRalph Campbell 		ret = 0;
2703f931551bSRalph Campbell 		break;
2704f931551bSRalph Campbell 
2705f931551bSRalph Campbell 	case QIB_IB_CFG_VL_LOW_CAP:
2706f931551bSRalph Campbell 		ret = 0;
2707f931551bSRalph Campbell 		break;
2708f931551bSRalph Campbell 
2709f931551bSRalph Campbell 	case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
2710f931551bSRalph Campbell 		ret = SYM_FIELD(ppd->dd->cspec->ibcctrl, IBCCtrl,
2711f931551bSRalph Campbell 				OverrunThreshold);
2712f931551bSRalph Campbell 		break;
2713f931551bSRalph Campbell 
2714f931551bSRalph Campbell 	case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
2715f931551bSRalph Campbell 		ret = SYM_FIELD(ppd->dd->cspec->ibcctrl, IBCCtrl,
2716f931551bSRalph Campbell 				PhyerrThreshold);
2717f931551bSRalph Campbell 		break;
2718f931551bSRalph Campbell 
2719f931551bSRalph Campbell 	case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
2720f931551bSRalph Campbell 		/* will only take effect when the link state changes */
2721f931551bSRalph Campbell 		ret = (ppd->dd->cspec->ibcctrl &
2722f931551bSRalph Campbell 		       SYM_MASK(IBCCtrl, LinkDownDefaultState)) ?
2723f931551bSRalph Campbell 			IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
2724f931551bSRalph Campbell 		break;
2725f931551bSRalph Campbell 
2726f931551bSRalph Campbell 	case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
2727f931551bSRalph Campbell 		ret = 0; /* no heartbeat on this chip */
2728f931551bSRalph Campbell 		break;
2729f931551bSRalph Campbell 
2730f931551bSRalph Campbell 	case QIB_IB_CFG_PMA_TICKS:
2731f931551bSRalph Campbell 		ret = 250; /* 1 usec. */
2732f931551bSRalph Campbell 		break;
2733f931551bSRalph Campbell 
2734f931551bSRalph Campbell 	default:
2735f931551bSRalph Campbell 		ret =  -EINVAL;
2736f931551bSRalph Campbell 		break;
2737f931551bSRalph Campbell 	}
2738f931551bSRalph Campbell 	return ret;
2739f931551bSRalph Campbell }
2740f931551bSRalph Campbell 
2741f931551bSRalph Campbell /*
2742f931551bSRalph Campbell  * We assume range checking is already done, if needed.
2743f931551bSRalph Campbell  */
qib_6120_set_ib_cfg(struct qib_pportdata * ppd,int which,u32 val)2744f931551bSRalph Campbell static int qib_6120_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
2745f931551bSRalph Campbell {
2746f931551bSRalph Campbell 	struct qib_devdata *dd = ppd->dd;
2747f931551bSRalph Campbell 	int ret = 0;
2748f931551bSRalph Campbell 	u64 val64;
2749f931551bSRalph Campbell 	u16 lcmd, licmd;
2750f931551bSRalph Campbell 
2751f931551bSRalph Campbell 	switch (which) {
2752f931551bSRalph Campbell 	case QIB_IB_CFG_LWID_ENB:
2753f931551bSRalph Campbell 		ppd->link_width_enabled = val;
2754f931551bSRalph Campbell 		break;
2755f931551bSRalph Campbell 
2756f931551bSRalph Campbell 	case QIB_IB_CFG_SPD_ENB:
2757f931551bSRalph Campbell 		ppd->link_speed_enabled = val;
2758f931551bSRalph Campbell 		break;
2759f931551bSRalph Campbell 
2760f931551bSRalph Campbell 	case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
2761f931551bSRalph Campbell 		val64 = SYM_FIELD(dd->cspec->ibcctrl, IBCCtrl,
2762f931551bSRalph Campbell 				  OverrunThreshold);
2763f931551bSRalph Campbell 		if (val64 != val) {
2764f931551bSRalph Campbell 			dd->cspec->ibcctrl &=
2765f931551bSRalph Campbell 				~SYM_MASK(IBCCtrl, OverrunThreshold);
2766f931551bSRalph Campbell 			dd->cspec->ibcctrl |= (u64) val <<
2767f931551bSRalph Campbell 				SYM_LSB(IBCCtrl, OverrunThreshold);
2768f931551bSRalph Campbell 			qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
2769f931551bSRalph Campbell 			qib_write_kreg(dd, kr_scratch, 0);
2770f931551bSRalph Campbell 		}
2771f931551bSRalph Campbell 		break;
2772f931551bSRalph Campbell 
2773f931551bSRalph Campbell 	case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
2774f931551bSRalph Campbell 		val64 = SYM_FIELD(dd->cspec->ibcctrl, IBCCtrl,
2775f931551bSRalph Campbell 				  PhyerrThreshold);
2776f931551bSRalph Campbell 		if (val64 != val) {
2777f931551bSRalph Campbell 			dd->cspec->ibcctrl &=
2778f931551bSRalph Campbell 				~SYM_MASK(IBCCtrl, PhyerrThreshold);
2779f931551bSRalph Campbell 			dd->cspec->ibcctrl |= (u64) val <<
2780f931551bSRalph Campbell 				SYM_LSB(IBCCtrl, PhyerrThreshold);
2781f931551bSRalph Campbell 			qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
2782f931551bSRalph Campbell 			qib_write_kreg(dd, kr_scratch, 0);
2783f931551bSRalph Campbell 		}
2784f931551bSRalph Campbell 		break;
2785f931551bSRalph Campbell 
2786f931551bSRalph Campbell 	case QIB_IB_CFG_PKEYS: /* update pkeys */
2787f931551bSRalph Campbell 		val64 = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
2788f931551bSRalph Campbell 			((u64) ppd->pkeys[2] << 32) |
2789f931551bSRalph Campbell 			((u64) ppd->pkeys[3] << 48);
2790f931551bSRalph Campbell 		qib_write_kreg(dd, kr_partitionkey, val64);
2791f931551bSRalph Campbell 		break;
2792f931551bSRalph Campbell 
2793f931551bSRalph Campbell 	case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
2794f931551bSRalph Campbell 		/* will only take effect when the link state changes */
2795f931551bSRalph Campbell 		if (val == IB_LINKINITCMD_POLL)
2796f931551bSRalph Campbell 			dd->cspec->ibcctrl &=
2797f931551bSRalph Campbell 				~SYM_MASK(IBCCtrl, LinkDownDefaultState);
2798f931551bSRalph Campbell 		else /* SLEEP */
2799f931551bSRalph Campbell 			dd->cspec->ibcctrl |=
2800f931551bSRalph Campbell 				SYM_MASK(IBCCtrl, LinkDownDefaultState);
2801f931551bSRalph Campbell 		qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
2802f931551bSRalph Campbell 		qib_write_kreg(dd, kr_scratch, 0);
2803f931551bSRalph Campbell 		break;
2804f931551bSRalph Campbell 
2805f931551bSRalph Campbell 	case QIB_IB_CFG_MTU: /* update the MTU in IBC */
2806f931551bSRalph Campbell 		/*
2807f931551bSRalph Campbell 		 * Update our housekeeping variables, and set IBC max
2808f931551bSRalph Campbell 		 * size, same as init code; max IBC is max we allow in
2809f931551bSRalph Campbell 		 * buffer, less the qword pbc, plus 1 for ICRC, in dwords
2810f931551bSRalph Campbell 		 * Set even if it's unchanged, print debug message only
2811f931551bSRalph Campbell 		 * on changes.
2812f931551bSRalph Campbell 		 */
2813f931551bSRalph Campbell 		val = (ppd->ibmaxlen >> 2) + 1;
2814f931551bSRalph Campbell 		dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, MaxPktLen);
2815f931551bSRalph Campbell 		dd->cspec->ibcctrl |= (u64)val <<
2816f931551bSRalph Campbell 			SYM_LSB(IBCCtrl, MaxPktLen);
2817f931551bSRalph Campbell 		qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
2818f931551bSRalph Campbell 		qib_write_kreg(dd, kr_scratch, 0);
2819f931551bSRalph Campbell 		break;
2820f931551bSRalph Campbell 
2821f931551bSRalph Campbell 	case QIB_IB_CFG_LSTATE: /* set the IB link state */
2822f931551bSRalph Campbell 		switch (val & 0xffff0000) {
2823f931551bSRalph Campbell 		case IB_LINKCMD_DOWN:
2824f931551bSRalph Campbell 			lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
2825f931551bSRalph Campbell 			if (!dd->cspec->ibdeltainprog) {
2826f931551bSRalph Campbell 				dd->cspec->ibdeltainprog = 1;
2827f931551bSRalph Campbell 				dd->cspec->ibsymsnap =
2828f931551bSRalph Campbell 					read_6120_creg32(dd, cr_ibsymbolerr);
2829f931551bSRalph Campbell 				dd->cspec->iblnkerrsnap =
2830f931551bSRalph Campbell 					read_6120_creg32(dd, cr_iblinkerrrecov);
2831f931551bSRalph Campbell 			}
2832f931551bSRalph Campbell 			break;
2833f931551bSRalph Campbell 
2834f931551bSRalph Campbell 		case IB_LINKCMD_ARMED:
2835f931551bSRalph Campbell 			lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
2836f931551bSRalph Campbell 			break;
2837f931551bSRalph Campbell 
2838f931551bSRalph Campbell 		case IB_LINKCMD_ACTIVE:
2839f931551bSRalph Campbell 			lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
2840f931551bSRalph Campbell 			break;
2841f931551bSRalph Campbell 
2842f931551bSRalph Campbell 		default:
2843f931551bSRalph Campbell 			ret = -EINVAL;
2844f931551bSRalph Campbell 			qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
2845f931551bSRalph Campbell 			goto bail;
2846f931551bSRalph Campbell 		}
2847f931551bSRalph Campbell 		switch (val & 0xffff) {
2848f931551bSRalph Campbell 		case IB_LINKINITCMD_NOP:
2849f931551bSRalph Campbell 			licmd = 0;
2850f931551bSRalph Campbell 			break;
2851f931551bSRalph Campbell 
2852f931551bSRalph Campbell 		case IB_LINKINITCMD_POLL:
2853f931551bSRalph Campbell 			licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
2854f931551bSRalph Campbell 			break;
2855f931551bSRalph Campbell 
2856f931551bSRalph Campbell 		case IB_LINKINITCMD_SLEEP:
2857f931551bSRalph Campbell 			licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
2858f931551bSRalph Campbell 			break;
2859f931551bSRalph Campbell 
2860f931551bSRalph Campbell 		case IB_LINKINITCMD_DISABLE:
2861f931551bSRalph Campbell 			licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
2862f931551bSRalph Campbell 			break;
2863f931551bSRalph Campbell 
2864f931551bSRalph Campbell 		default:
2865f931551bSRalph Campbell 			ret = -EINVAL;
2866f931551bSRalph Campbell 			qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
2867f931551bSRalph Campbell 				    val & 0xffff);
2868f931551bSRalph Campbell 			goto bail;
2869f931551bSRalph Campbell 		}
2870f931551bSRalph Campbell 		qib_set_ib_6120_lstate(ppd, lcmd, licmd);
2871f931551bSRalph Campbell 		goto bail;
2872f931551bSRalph Campbell 
2873f931551bSRalph Campbell 	case QIB_IB_CFG_HRTBT:
2874f931551bSRalph Campbell 		ret = -EINVAL;
2875f931551bSRalph Campbell 		break;
2876f931551bSRalph Campbell 
2877f931551bSRalph Campbell 	default:
2878f931551bSRalph Campbell 		ret = -EINVAL;
2879f931551bSRalph Campbell 	}
2880f931551bSRalph Campbell bail:
2881f931551bSRalph Campbell 	return ret;
2882f931551bSRalph Campbell }
2883f931551bSRalph Campbell 
qib_6120_set_loopback(struct qib_pportdata * ppd,const char * what)2884f931551bSRalph Campbell static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what)
2885f931551bSRalph Campbell {
2886f931551bSRalph Campbell 	int ret = 0;
2887f931551bSRalph Campbell 
2888f931551bSRalph Campbell 	if (!strncmp(what, "ibc", 3)) {
2889f931551bSRalph Campbell 		ppd->dd->cspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);
2890da12c1f6SMike Marciniszyn 		qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
2891f931551bSRalph Campbell 			 ppd->dd->unit, ppd->port);
2892f931551bSRalph Campbell 	} else if (!strncmp(what, "off", 3)) {
2893f931551bSRalph Campbell 		ppd->dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback);
2894f931551bSRalph Campbell 		qib_devinfo(ppd->dd->pcidev,
2895f931551bSRalph Campbell 			"Disabling IB%u:%u IBC loopback (normal)\n",
2896f931551bSRalph Campbell 			ppd->dd->unit, ppd->port);
28977fac3301SMike Marciniszyn 	} else
28987fac3301SMike Marciniszyn 		ret = -EINVAL;
28997fac3301SMike Marciniszyn 	if (!ret) {
2900f931551bSRalph Campbell 		qib_write_kreg(ppd->dd, kr_ibcctrl, ppd->dd->cspec->ibcctrl);
2901f931551bSRalph Campbell 		qib_write_kreg(ppd->dd, kr_scratch, 0);
2902f931551bSRalph Campbell 	}
2903f931551bSRalph Campbell 	return ret;
2904f931551bSRalph Campbell }
2905f931551bSRalph Campbell 
pma_6120_timer(struct timer_list * t)2906f931551bSRalph Campbell static void pma_6120_timer(struct timer_list *t)
2907f931551bSRalph Campbell {
2908f931551bSRalph Campbell 	struct qib_chip_specific *cs = from_timer(cs, t, pma_timer);
29094037c92fSKees Cook 	struct qib_pportdata *ppd = cs->ppd;
2910f931551bSRalph Campbell 	struct qib_ibport *ibp = &ppd->ibport_data;
29114037c92fSKees Cook 	unsigned long flags;
29124037c92fSKees Cook 
2913f931551bSRalph Campbell 	spin_lock_irqsave(&ibp->rvp.lock, flags);
2914f931551bSRalph Campbell 	if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED) {
2915f931551bSRalph Campbell 		cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
2916f24a6d48SHarish Chegondi 		qib_snapshot_counters(ppd, &cs->sword, &cs->rword,
2917f931551bSRalph Campbell 				      &cs->spkts, &cs->rpkts, &cs->xmit_wait);
2918f931551bSRalph Campbell 		mod_timer(&cs->pma_timer,
2919f931551bSRalph Campbell 		      jiffies + usecs_to_jiffies(ibp->rvp.pma_sample_interval));
2920f931551bSRalph Campbell 	} else if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {
2921f931551bSRalph Campbell 		u64 ta, tb, tc, td, te;
2922f24a6d48SHarish Chegondi 
2923f931551bSRalph Campbell 		cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
2924f931551bSRalph Campbell 		qib_snapshot_counters(ppd, &ta, &tb, &tc, &td, &te);
2925f931551bSRalph Campbell 
2926f931551bSRalph Campbell 		cs->sword = ta - cs->sword;
2927f931551bSRalph Campbell 		cs->rword = tb - cs->rword;
2928f931551bSRalph Campbell 		cs->spkts = tc - cs->spkts;
2929f931551bSRalph Campbell 		cs->rpkts = td - cs->rpkts;
2930f931551bSRalph Campbell 		cs->xmit_wait = te - cs->xmit_wait;
2931f931551bSRalph Campbell 	}
2932f931551bSRalph Campbell 	spin_unlock_irqrestore(&ibp->rvp.lock, flags);
2933f931551bSRalph Campbell }
2934f931551bSRalph Campbell 
2935f24a6d48SHarish Chegondi /*
2936f931551bSRalph Campbell  * Note that the caller has the ibp->rvp.lock held.
2937f931551bSRalph Campbell  */
qib_set_cntr_6120_sample(struct qib_pportdata * ppd,u32 intv,u32 start)2938f931551bSRalph Campbell static void qib_set_cntr_6120_sample(struct qib_pportdata *ppd, u32 intv,
2939f24a6d48SHarish Chegondi 				     u32 start)
2940f931551bSRalph Campbell {
2941f931551bSRalph Campbell 	struct qib_chip_specific *cs = ppd->dd->cspec;
2942f931551bSRalph Campbell 
2943f931551bSRalph Campbell 	if (start && intv) {
2944f931551bSRalph Campbell 		cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_STARTED;
2945f931551bSRalph Campbell 		mod_timer(&cs->pma_timer, jiffies + usecs_to_jiffies(start));
2946f931551bSRalph Campbell 	} else if (intv) {
2947f931551bSRalph Campbell 		cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
2948f931551bSRalph Campbell 		qib_snapshot_counters(ppd, &cs->sword, &cs->rword,
2949f931551bSRalph Campbell 				      &cs->spkts, &cs->rpkts, &cs->xmit_wait);
2950f931551bSRalph Campbell 		mod_timer(&cs->pma_timer, jiffies + usecs_to_jiffies(intv));
2951f931551bSRalph Campbell 	} else {
2952f931551bSRalph Campbell 		cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
2953f931551bSRalph Campbell 		cs->sword = 0;
2954f931551bSRalph Campbell 		cs->rword = 0;
2955f931551bSRalph Campbell 		cs->spkts = 0;
2956f931551bSRalph Campbell 		cs->rpkts = 0;
2957f931551bSRalph Campbell 		cs->xmit_wait = 0;
2958f931551bSRalph Campbell 	}
2959f931551bSRalph Campbell }
2960f931551bSRalph Campbell 
qib_6120_iblink_state(u64 ibcs)2961f931551bSRalph Campbell static u32 qib_6120_iblink_state(u64 ibcs)
2962f931551bSRalph Campbell {
2963f931551bSRalph Campbell 	u32 state = (u32)SYM_FIELD(ibcs, IBCStatus, LinkState);
2964f931551bSRalph Campbell 
2965f931551bSRalph Campbell 	switch (state) {
2966f931551bSRalph Campbell 	case IB_6120_L_STATE_INIT:
2967f931551bSRalph Campbell 		state = IB_PORT_INIT;
2968f931551bSRalph Campbell 		break;
2969f931551bSRalph Campbell 	case IB_6120_L_STATE_ARM:
2970f931551bSRalph Campbell 		state = IB_PORT_ARMED;
2971f931551bSRalph Campbell 		break;
2972f931551bSRalph Campbell 	case IB_6120_L_STATE_ACTIVE:
2973f931551bSRalph Campbell 	case IB_6120_L_STATE_ACT_DEFER:
2974f931551bSRalph Campbell 		state = IB_PORT_ACTIVE;
2975f931551bSRalph Campbell 		break;
2976f931551bSRalph Campbell 	default:
2977f931551bSRalph Campbell 		fallthrough;
2978f931551bSRalph Campbell 	case IB_6120_L_STATE_DOWN:
2979df561f66SGustavo A. R. Silva 		state = IB_PORT_DOWN;
2980df561f66SGustavo A. R. Silva 		break;
2981f931551bSRalph Campbell 	}
2982f931551bSRalph Campbell 	return state;
2983f931551bSRalph Campbell }
2984f931551bSRalph Campbell 
2985f931551bSRalph Campbell /* returns the IBTA port state, rather than the IBC link training state */
qib_6120_phys_portstate(u64 ibcs)2986f931551bSRalph Campbell static u8 qib_6120_phys_portstate(u64 ibcs)
2987f931551bSRalph Campbell {
2988f931551bSRalph Campbell 	u8 state = (u8)SYM_FIELD(ibcs, IBCStatus, LinkTrainingState);
2989f931551bSRalph Campbell 	return qib_6120_physportstate[state];
2990f931551bSRalph Campbell }
2991f931551bSRalph Campbell 
qib_6120_ib_updown(struct qib_pportdata * ppd,int ibup,u64 ibcs)2992f931551bSRalph Campbell static int qib_6120_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
2993f931551bSRalph Campbell {
2994f931551bSRalph Campbell 	unsigned long flags;
2995f931551bSRalph Campbell 
2996f931551bSRalph Campbell 	spin_lock_irqsave(&ppd->lflags_lock, flags);
2997f931551bSRalph Campbell 	ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
2998f931551bSRalph Campbell 	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2999f931551bSRalph Campbell 
3000f931551bSRalph Campbell 	if (ibup) {
3001f931551bSRalph Campbell 		if (ppd->dd->cspec->ibdeltainprog) {
3002f931551bSRalph Campbell 			ppd->dd->cspec->ibdeltainprog = 0;
3003f931551bSRalph Campbell 			ppd->dd->cspec->ibsymdelta +=
3004f931551bSRalph Campbell 				read_6120_creg32(ppd->dd, cr_ibsymbolerr) -
3005f931551bSRalph Campbell 					ppd->dd->cspec->ibsymsnap;
3006f931551bSRalph Campbell 			ppd->dd->cspec->iblnkerrdelta +=
3007f931551bSRalph Campbell 				read_6120_creg32(ppd->dd, cr_iblinkerrrecov) -
3008f931551bSRalph Campbell 					ppd->dd->cspec->iblnkerrsnap;
3009f931551bSRalph Campbell 		}
3010f931551bSRalph Campbell 		qib_hol_init(ppd);
3011f931551bSRalph Campbell 	} else {
3012f931551bSRalph Campbell 		ppd->dd->cspec->lli_counter = 0;
3013f931551bSRalph Campbell 		if (!ppd->dd->cspec->ibdeltainprog) {
3014f931551bSRalph Campbell 			ppd->dd->cspec->ibdeltainprog = 1;
3015f931551bSRalph Campbell 			ppd->dd->cspec->ibsymsnap =
3016f931551bSRalph Campbell 				read_6120_creg32(ppd->dd, cr_ibsymbolerr);
3017f931551bSRalph Campbell 			ppd->dd->cspec->iblnkerrsnap =
3018f931551bSRalph Campbell 				read_6120_creg32(ppd->dd, cr_iblinkerrrecov);
3019f931551bSRalph Campbell 		}
3020f931551bSRalph Campbell 		qib_hol_down(ppd);
3021f931551bSRalph Campbell 	}
3022f931551bSRalph Campbell 
3023f931551bSRalph Campbell 	qib_6120_setup_setextled(ppd, ibup);
3024f931551bSRalph Campbell 
3025f931551bSRalph Campbell 	return 0;
3026f931551bSRalph Campbell }
3027f931551bSRalph Campbell 
3028f931551bSRalph Campbell /* Does read/modify/write to appropriate registers to
3029f931551bSRalph Campbell  * set output and direction bits selected by mask.
3030f931551bSRalph Campbell  * these are in their canonical positions (e.g. lsb of
3031f931551bSRalph Campbell  * dir will end up in D48 of extctrl on existing chips).
3032f931551bSRalph Campbell  * returns contents of GP Inputs.
30334d2a3c16SQinghua Jin  */
gpio_6120_mod(struct qib_devdata * dd,u32 out,u32 dir,u32 mask)3034f931551bSRalph Campbell static int gpio_6120_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
3035f931551bSRalph Campbell {
3036f931551bSRalph Campbell 	u64 read_val, new_out;
3037f931551bSRalph Campbell 	unsigned long flags;
3038f931551bSRalph Campbell 
3039f931551bSRalph Campbell 	if (mask) {
3040f931551bSRalph Campbell 		/* some bits being written, lock access to GPIO */
3041f931551bSRalph Campbell 		dir &= mask;
3042f931551bSRalph Campbell 		out &= mask;
3043f931551bSRalph Campbell 		spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
3044f931551bSRalph Campbell 		dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
3045f931551bSRalph Campbell 		dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
3046f931551bSRalph Campbell 		new_out = (dd->cspec->gpio_out & ~mask) | out;
3047f931551bSRalph Campbell 
3048f931551bSRalph Campbell 		qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
3049f931551bSRalph Campbell 		qib_write_kreg(dd, kr_gpio_out, new_out);
3050f931551bSRalph Campbell 		dd->cspec->gpio_out = new_out;
3051f931551bSRalph Campbell 		spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
3052f931551bSRalph Campbell 	}
3053f931551bSRalph Campbell 	/*
3054f931551bSRalph Campbell 	 * It is unlikely that a read at this time would get valid
3055f931551bSRalph Campbell 	 * data on a pin whose direction line was set in the same
3056f931551bSRalph Campbell 	 * call to this function. We include the read here because
3057f931551bSRalph Campbell 	 * that allows us to potentially combine a change on one pin with
3058f931551bSRalph Campbell 	 * a read on another, and because the old code did something like
3059f931551bSRalph Campbell 	 * this.
3060f931551bSRalph Campbell 	 */
3061f931551bSRalph Campbell 	read_val = qib_read_kreg64(dd, kr_extstatus);
3062f931551bSRalph Campbell 	return SYM_FIELD(read_val, EXTStatus, GPIOIn);
3063f931551bSRalph Campbell }
3064f931551bSRalph Campbell 
3065f931551bSRalph Campbell /*
3066f931551bSRalph Campbell  * Read fundamental info we need to use the chip.  These are
3067f931551bSRalph Campbell  * the registers that describe chip capabilities, and are
3068f931551bSRalph Campbell  * saved in shadow registers.
3069f931551bSRalph Campbell  */
get_6120_chip_params(struct qib_devdata * dd)3070f931551bSRalph Campbell static void get_6120_chip_params(struct qib_devdata *dd)
3071f931551bSRalph Campbell {
3072f931551bSRalph Campbell 	u64 val;
3073f931551bSRalph Campbell 	u32 piobufs;
3074f931551bSRalph Campbell 	int mtu;
3075f931551bSRalph Campbell 
3076f931551bSRalph Campbell 	dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
3077f931551bSRalph Campbell 
3078f931551bSRalph Campbell 	dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
3079f931551bSRalph Campbell 	dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
3080f931551bSRalph Campbell 	dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
3081f931551bSRalph Campbell 	dd->palign = qib_read_kreg32(dd, kr_palign);
3082f931551bSRalph Campbell 	dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
3083f931551bSRalph Campbell 	dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
3084f931551bSRalph Campbell 
3085f931551bSRalph Campbell 	dd->rcvhdrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
3086f931551bSRalph Campbell 
3087f931551bSRalph Campbell 	val = qib_read_kreg64(dd, kr_sendpiosize);
3088f931551bSRalph Campbell 	dd->piosize2k = val & ~0U;
3089f931551bSRalph Campbell 	dd->piosize4k = val >> 32;
3090f931551bSRalph Campbell 
3091f931551bSRalph Campbell 	mtu = ib_mtu_enum_to_int(qib_ibmtu);
3092f931551bSRalph Campbell 	if (mtu == -1)
3093f931551bSRalph Campbell 		mtu = QIB_DEFAULT_MTU;
3094f931551bSRalph Campbell 	dd->pport->ibmtu = (u32)mtu;
3095f931551bSRalph Campbell 
3096f931551bSRalph Campbell 	val = qib_read_kreg64(dd, kr_sendpiobufcnt);
3097f931551bSRalph Campbell 	dd->piobcnt2k = val & ~0U;
3098f931551bSRalph Campbell 	dd->piobcnt4k = val >> 32;
3099f931551bSRalph Campbell 	dd->last_pio = dd->piobcnt4k + dd->piobcnt2k - 1;
3100f931551bSRalph Campbell 	/* these may be adjusted in init_chip_wc_pat() */
3101f931551bSRalph Campbell 	dd->pio2kbase = (u32 __iomem *)
3102bb77a077SMike Marciniszyn 		(((char __iomem *)dd->kregbase) + dd->pio2k_bufbase);
3103f931551bSRalph Campbell 	if (dd->piobcnt4k) {
3104f931551bSRalph Campbell 		dd->pio4kbase = (u32 __iomem *)
3105f931551bSRalph Campbell 			(((char __iomem *) dd->kregbase) +
3106f931551bSRalph Campbell 			 (dd->piobufbase >> 32));
3107f931551bSRalph Campbell 		/*
3108f931551bSRalph Campbell 		 * 4K buffers take 2 pages; we use roundup just to be
3109f931551bSRalph Campbell 		 * paranoid; we calculate it once here, rather than on
3110f931551bSRalph Campbell 		 * ever buf allocate
3111f931551bSRalph Campbell 		 */
3112f931551bSRalph Campbell 		dd->align4k = ALIGN(dd->piosize4k, dd->palign);
3113f931551bSRalph Campbell 	}
3114f931551bSRalph Campbell 
3115f931551bSRalph Campbell 	piobufs = dd->piobcnt4k + dd->piobcnt2k;
3116f931551bSRalph Campbell 
3117f931551bSRalph Campbell 	dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
3118f931551bSRalph Campbell 		(sizeof(u64) * BITS_PER_BYTE / 2);
3119f931551bSRalph Campbell }
3120f931551bSRalph Campbell 
3121f931551bSRalph Campbell /*
3122f931551bSRalph Campbell  * The chip base addresses in cspec and cpspec have to be set
3123f931551bSRalph Campbell  * after possible init_chip_wc_pat(), rather than in
3124f931551bSRalph Campbell  * get_6120_chip_params(), so split out as separate function
3125f931551bSRalph Campbell  */
set_6120_baseaddrs(struct qib_devdata * dd)3126f931551bSRalph Campbell static void set_6120_baseaddrs(struct qib_devdata *dd)
3127f931551bSRalph Campbell {
3128f931551bSRalph Campbell 	u32 cregbase;
3129f931551bSRalph Campbell 
3130f931551bSRalph Campbell 	cregbase = qib_read_kreg32(dd, kr_counterregbase);
3131f931551bSRalph Campbell 	dd->cspec->cregbase = (u64 __iomem *)
3132da12c1f6SMike Marciniszyn 		((char __iomem *) dd->kregbase + cregbase);
3133f931551bSRalph Campbell 
3134f931551bSRalph Campbell 	dd->egrtidbase = (u64 __iomem *)
3135f931551bSRalph Campbell 		((char __iomem *) dd->kregbase + dd->rcvegrbase);
3136f931551bSRalph Campbell }
3137f931551bSRalph Campbell 
3138f931551bSRalph Campbell /*
3139f931551bSRalph Campbell  * Write the final few registers that depend on some of the
3140f931551bSRalph Campbell  * init setup.  Done late in init, just before bringing up
3141f931551bSRalph Campbell  * the serdes.
3142f931551bSRalph Campbell  */
qib_late_6120_initreg(struct qib_devdata * dd)3143f931551bSRalph Campbell static int qib_late_6120_initreg(struct qib_devdata *dd)
3144f931551bSRalph Campbell {
3145f931551bSRalph Campbell 	int ret = 0;
3146f931551bSRalph Campbell 	u64 val;
3147f931551bSRalph Campbell 
3148f931551bSRalph Campbell 	qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
3149f931551bSRalph Campbell 	qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
3150f931551bSRalph Campbell 	qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
3151f931551bSRalph Campbell 	qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
3152f931551bSRalph Campbell 	val = qib_read_kreg64(dd, kr_sendpioavailaddr);
3153f931551bSRalph Campbell 	if (val != dd->pioavailregs_phys) {
3154f931551bSRalph Campbell 		qib_dev_err(dd,
3155f931551bSRalph Campbell 			"Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
3156f931551bSRalph Campbell 			(unsigned long) dd->pioavailregs_phys,
31577fac3301SMike Marciniszyn 			(unsigned long long) val);
31587fac3301SMike Marciniszyn 		ret = -EINVAL;
3159f931551bSRalph Campbell 	}
3160f931551bSRalph Campbell 	return ret;
3161f931551bSRalph Campbell }
3162f931551bSRalph Campbell 
init_6120_variables(struct qib_devdata * dd)3163f931551bSRalph Campbell static int init_6120_variables(struct qib_devdata *dd)
3164f931551bSRalph Campbell {
3165f931551bSRalph Campbell 	int ret = 0;
3166f931551bSRalph Campbell 	struct qib_pportdata *ppd;
3167f931551bSRalph Campbell 	u32 sbufs;
3168f931551bSRalph Campbell 
3169f931551bSRalph Campbell 	ppd = (struct qib_pportdata *)(dd + 1);
3170f931551bSRalph Campbell 	dd->pport = ppd;
3171f931551bSRalph Campbell 	dd->num_pports = 1;
3172f931551bSRalph Campbell 
3173f931551bSRalph Campbell 	dd->cspec = (struct qib_chip_specific *)(ppd + dd->num_pports);
3174f931551bSRalph Campbell 	dd->cspec->ppd = ppd;
3175f931551bSRalph Campbell 	ppd->cpspec = NULL; /* not used in this chip */
3176f931551bSRalph Campbell 
31774037c92fSKees Cook 	spin_lock_init(&dd->cspec->kernel_tid_lock);
3178f931551bSRalph Campbell 	spin_lock_init(&dd->cspec->user_tid_lock);
3179f931551bSRalph Campbell 	spin_lock_init(&dd->cspec->rcvmod_lock);
3180f931551bSRalph Campbell 	spin_lock_init(&dd->cspec->gpio_lock);
3181f931551bSRalph Campbell 
3182f931551bSRalph Campbell 	/* we haven't yet set QIB_PRESENT, so use read directly */
3183f931551bSRalph Campbell 	dd->revision = readq(&dd->kregbase[kr_revision]);
3184f931551bSRalph Campbell 
3185f931551bSRalph Campbell 	if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
3186f931551bSRalph Campbell 		qib_dev_err(dd,
3187f931551bSRalph Campbell 			"Revision register read failure, giving up initialization\n");
3188f931551bSRalph Campbell 		ret = -ENODEV;
31897fac3301SMike Marciniszyn 		goto bail;
31907fac3301SMike Marciniszyn 	}
3191f931551bSRalph Campbell 	dd->flags |= QIB_PRESENT;  /* now register routines work */
3192f931551bSRalph Campbell 
3193f931551bSRalph Campbell 	dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R,
3194f931551bSRalph Campbell 				    ChipRevMajor);
3195f931551bSRalph Campbell 	dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R,
3196f931551bSRalph Campbell 				    ChipRevMinor);
3197f931551bSRalph Campbell 
3198f931551bSRalph Campbell 	get_6120_chip_params(dd);
3199f931551bSRalph Campbell 	pe_boardname(dd); /* fill in boardname */
3200f931551bSRalph Campbell 
3201f931551bSRalph Campbell 	/*
3202f931551bSRalph Campbell 	 * GPIO bits for TWSI data and clock,
3203f931551bSRalph Campbell 	 * used for serial EEPROM.
3204f931551bSRalph Campbell 	 */
3205f931551bSRalph Campbell 	dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
3206f931551bSRalph Campbell 	dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
3207f931551bSRalph Campbell 	dd->twsi_eeprom_dev = QIB_TWSI_NO_DEV;
3208f931551bSRalph Campbell 
3209f931551bSRalph Campbell 	if (qib_unordered_wc())
3210f931551bSRalph Campbell 		dd->flags |= QIB_PIO_FLUSH_WC;
3211f931551bSRalph Campbell 
3212f931551bSRalph Campbell 	ret = qib_init_pportdata(ppd, dd, 0, 1);
3213f931551bSRalph Campbell 	if (ret)
3214f931551bSRalph Campbell 		goto bail;
32157d7632adSMike Marciniszyn 	ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
32167d7632adSMike Marciniszyn 	ppd->link_speed_supported = QIB_IB_SDR;
32177d7632adSMike Marciniszyn 	ppd->link_width_enabled = IB_WIDTH_4X;
3218f931551bSRalph Campbell 	ppd->link_speed_enabled = ppd->link_speed_supported;
3219f931551bSRalph Campbell 	/* these can't change for this chip, so set once */
3220f931551bSRalph Campbell 	ppd->link_width_active = ppd->link_width_enabled;
3221f931551bSRalph Campbell 	ppd->link_speed_active = ppd->link_speed_enabled;
3222f931551bSRalph Campbell 	ppd->vls_supported = IB_VL_VL0;
3223f931551bSRalph Campbell 	ppd->vls_operational = ppd->vls_supported;
3224f931551bSRalph Campbell 
3225f931551bSRalph Campbell 	dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
3226f931551bSRalph Campbell 	dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
3227f931551bSRalph Campbell 	dd->rhf_offset = 0;
3228f931551bSRalph Campbell 
3229f931551bSRalph Campbell 	/* we always allocate at least 2048 bytes for eager buffers */
3230f931551bSRalph Campbell 	ret = ib_mtu_enum_to_int(qib_ibmtu);
3231f931551bSRalph Campbell 	dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU;
3232f931551bSRalph Campbell 	dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
3233f931551bSRalph Campbell 
3234f931551bSRalph Campbell 	qib_6120_tidtemplate(dd);
32359e1c0e43SMike Marciniszyn 
3236f931551bSRalph Campbell 	/*
3237f931551bSRalph Campbell 	 * We can request a receive interrupt for 1 or
3238f931551bSRalph Campbell 	 * more packets from current offset.  For now, we set this
3239f931551bSRalph Campbell 	 * up for a single packet.
3240f931551bSRalph Campbell 	 */
3241f931551bSRalph Campbell 	dd->rhdrhead_intr_off = 1ULL << 32;
3242f931551bSRalph Campbell 
3243f931551bSRalph Campbell 	/* setup the stats timer; the add_timer is done at end of init */
3244f931551bSRalph Campbell 	timer_setup(&dd->stats_timer, qib_get_6120_faststats, 0);
3245f931551bSRalph Campbell 	timer_setup(&dd->cspec->pma_timer, pma_6120_timer, 0);
3246f931551bSRalph Campbell 
32474037c92fSKees Cook 	dd->ureg_align = qib_read_kreg32(dd, kr_palign);
32484037c92fSKees Cook 
3249f931551bSRalph Campbell 	dd->piosize2kmax_dwords = dd->piosize2k >> 2;
3250f931551bSRalph Campbell 	qib_6120_config_ctxts(dd);
3251f931551bSRalph Campbell 	qib_set_ctxtcnt(dd);
3252f931551bSRalph Campbell 
3253f931551bSRalph Campbell 	ret = init_chip_wc_pat(dd, 0);
3254f931551bSRalph Campbell 	if (ret)
3255f931551bSRalph Campbell 		goto bail;
3256f931551bSRalph Campbell 	set_6120_baseaddrs(dd); /* set chip access pointers now */
3257f931551bSRalph Campbell 
3258f931551bSRalph Campbell 	ret = 0;
3259f931551bSRalph Campbell 	if (qib_mini_init)
3260f931551bSRalph Campbell 		goto bail;
3261f931551bSRalph Campbell 
3262f931551bSRalph Campbell 	qib_num_cfg_vls = 1; /* if any 6120's, only one VL */
3263f931551bSRalph Campbell 
3264f931551bSRalph Campbell 	ret = qib_create_ctxts(dd);
3265f931551bSRalph Campbell 	init_6120_cntrnames(dd);
3266f931551bSRalph Campbell 
3267f931551bSRalph Campbell 	/* use all of 4KB buffers for the kernel, otherwise 16 */
3268f931551bSRalph Campbell 	sbufs = dd->piobcnt4k ?  dd->piobcnt4k : 16;
3269f931551bSRalph Campbell 
3270f931551bSRalph Campbell 	dd->lastctxt_piobuf = dd->piobcnt2k + dd->piobcnt4k - sbufs;
3271f931551bSRalph Campbell 	dd->pbufsctxt = dd->lastctxt_piobuf /
3272f931551bSRalph Campbell 		(dd->cfgctxts - dd->first_user_ctxt);
3273f931551bSRalph Campbell 
3274f931551bSRalph Campbell 	if (ret)
3275f931551bSRalph Campbell 		goto bail;
3276f931551bSRalph Campbell bail:
3277f931551bSRalph Campbell 	return ret;
3278f931551bSRalph Campbell }
3279f931551bSRalph Campbell 
3280f931551bSRalph Campbell /*
3281f931551bSRalph Campbell  * For this chip, we want to use the same buffer every time
3282f931551bSRalph Campbell  * when we are trying to bring the link up (they are always VL15
3283f931551bSRalph Campbell  * packets).  At that link state the packet should always go out immediately
3284f931551bSRalph Campbell  * (or at least be discarded at the tx interface if the link is down).
3285f931551bSRalph Campbell  * If it doesn't, and the buffer isn't available, that means some other
3286f931551bSRalph Campbell  * sender has gotten ahead of us, and is preventing our packet from going
3287f931551bSRalph Campbell  * out.  In that case, we flush all packets, and try again.  If that still
3288f931551bSRalph Campbell  * fails, we fail the request, and hope things work the next time around.
3289f931551bSRalph Campbell  *
3290f931551bSRalph Campbell  * We don't need very complicated heuristics on whether the packet had
3291f931551bSRalph Campbell  * time to go out or not, since even at SDR 1X, it goes out in very short
3292f931551bSRalph Campbell  * time periods, covered by the chip reads done here and as part of the
3293f931551bSRalph Campbell  * flush.
3294f931551bSRalph Campbell  */
get_6120_link_buf(struct qib_pportdata * ppd,u32 * bnum)3295f931551bSRalph Campbell static u32 __iomem *get_6120_link_buf(struct qib_pportdata *ppd, u32 *bnum)
3296f931551bSRalph Campbell {
3297f931551bSRalph Campbell 	u32 __iomem *buf;
3298f931551bSRalph Campbell 	u32 lbuf = ppd->dd->piobcnt2k + ppd->dd->piobcnt4k - 1;
3299f931551bSRalph Campbell 
3300f931551bSRalph Campbell 	/*
3301f931551bSRalph Campbell 	 * always blip to get avail list updated, since it's almost
3302f931551bSRalph Campbell 	 * always needed, and is fairly cheap.
3303f931551bSRalph Campbell 	 */
3304f931551bSRalph Campbell 	sendctrl_6120_mod(ppd->dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
3305f931551bSRalph Campbell 	qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
3306f931551bSRalph Campbell 	buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
3307f931551bSRalph Campbell 	if (buf)
3308f931551bSRalph Campbell 		goto done;
3309f931551bSRalph Campbell 
3310f931551bSRalph Campbell 	sendctrl_6120_mod(ppd, QIB_SENDCTRL_DISARM_ALL | QIB_SENDCTRL_FLUSH |
3311f931551bSRalph Campbell 			  QIB_SENDCTRL_AVAIL_BLIP);
3312f931551bSRalph Campbell 	ppd->dd->upd_pio_shadow  = 1; /* update our idea of what's busy */
3313f931551bSRalph Campbell 	qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
3314f931551bSRalph Campbell 	buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
3315f931551bSRalph Campbell done:
3316f931551bSRalph Campbell 	return buf;
3317f931551bSRalph Campbell }
3318f931551bSRalph Campbell 
qib_6120_getsendbuf(struct qib_pportdata * ppd,u64 pbc,u32 * pbufnum)3319f931551bSRalph Campbell static u32 __iomem *qib_6120_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
3320f931551bSRalph Campbell 					u32 *pbufnum)
3321f931551bSRalph Campbell {
3322f931551bSRalph Campbell 	u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
3323f931551bSRalph Campbell 	struct qib_devdata *dd = ppd->dd;
3324f931551bSRalph Campbell 	u32 __iomem *buf;
3325f931551bSRalph Campbell 
3326f931551bSRalph Campbell 	if (((pbc >> 32) & PBC_6120_VL15_SEND_CTRL) &&
3327f931551bSRalph Campbell 		!(ppd->lflags & (QIBL_IB_AUTONEG_INPROG | QIBL_LINKACTIVE)))
3328f931551bSRalph Campbell 		buf = get_6120_link_buf(ppd, pbufnum);
3329f931551bSRalph Campbell 	else {
3330f931551bSRalph Campbell 
3331f931551bSRalph Campbell 		if ((plen + 1) > dd->piosize2kmax_dwords)
3332f931551bSRalph Campbell 			first = dd->piobcnt2k;
3333f931551bSRalph Campbell 		else
3334f931551bSRalph Campbell 			first = 0;
3335f931551bSRalph Campbell 		/* try 4k if all 2k busy, so same last for both sizes */
3336f931551bSRalph Campbell 		last = dd->piobcnt2k + dd->piobcnt4k - 1;
3337f931551bSRalph Campbell 		buf = qib_getsendbuf_range(dd, pbufnum, first, last);
3338f931551bSRalph Campbell 	}
3339f931551bSRalph Campbell 	return buf;
3340f931551bSRalph Campbell }
3341f931551bSRalph Campbell 
init_sdma_6120_regs(struct qib_pportdata * ppd)3342f931551bSRalph Campbell static int init_sdma_6120_regs(struct qib_pportdata *ppd)
3343f931551bSRalph Campbell {
3344f931551bSRalph Campbell 	return -ENODEV;
3345f931551bSRalph Campbell }
3346f931551bSRalph Campbell 
qib_sdma_6120_gethead(struct qib_pportdata * ppd)3347f931551bSRalph Campbell static u16 qib_sdma_6120_gethead(struct qib_pportdata *ppd)
3348f931551bSRalph Campbell {
3349f931551bSRalph Campbell 	return 0;
3350f931551bSRalph Campbell }
3351f931551bSRalph Campbell 
qib_sdma_6120_busy(struct qib_pportdata * ppd)3352f931551bSRalph Campbell static int qib_sdma_6120_busy(struct qib_pportdata *ppd)
3353f931551bSRalph Campbell {
3354f931551bSRalph Campbell 	return 0;
3355f931551bSRalph Campbell }
3356f931551bSRalph Campbell 
qib_sdma_update_6120_tail(struct qib_pportdata * ppd,u16 tail)3357f931551bSRalph Campbell static void qib_sdma_update_6120_tail(struct qib_pportdata *ppd, u16 tail)
3358f931551bSRalph Campbell {
3359f931551bSRalph Campbell }
3360f931551bSRalph Campbell 
qib_6120_sdma_sendctrl(struct qib_pportdata * ppd,unsigned op)3361f931551bSRalph Campbell static void qib_6120_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
3362f931551bSRalph Campbell {
3363f931551bSRalph Campbell }
3364f931551bSRalph Campbell 
qib_sdma_set_6120_desc_cnt(struct qib_pportdata * ppd,unsigned cnt)3365f931551bSRalph Campbell static void qib_sdma_set_6120_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
3366f931551bSRalph Campbell {
3367f931551bSRalph Campbell }
3368f931551bSRalph Campbell 
3369f931551bSRalph Campbell /*
3370f931551bSRalph Campbell  * the pbc doesn't need a VL15 indicator, but we need it for link_buf.
3371f931551bSRalph Campbell  * The chip ignores the bit if set.
3372f931551bSRalph Campbell  */
qib_6120_setpbc_control(struct qib_pportdata * ppd,u32 plen,u8 srate,u8 vl)3373f931551bSRalph Campbell static u32 qib_6120_setpbc_control(struct qib_pportdata *ppd, u32 plen,
3374f931551bSRalph Campbell 				   u8 srate, u8 vl)
3375f931551bSRalph Campbell {
3376f931551bSRalph Campbell 	return vl == 15 ? PBC_6120_VL15_SEND_CTRL : 0;
3377f931551bSRalph Campbell }
3378f931551bSRalph Campbell 
qib_6120_initvl15_bufs(struct qib_devdata * dd)3379f931551bSRalph Campbell static void qib_6120_initvl15_bufs(struct qib_devdata *dd)
3380f931551bSRalph Campbell {
3381f931551bSRalph Campbell }
3382f931551bSRalph Campbell 
qib_6120_init_ctxt(struct qib_ctxtdata * rcd)3383f931551bSRalph Campbell static void qib_6120_init_ctxt(struct qib_ctxtdata *rcd)
3384f931551bSRalph Campbell {
3385f931551bSRalph Campbell 	rcd->rcvegrcnt = rcd->dd->rcvhdrcnt;
3386f931551bSRalph Campbell 	rcd->rcvegr_tid_base = rcd->ctxt * rcd->rcvegrcnt;
3387f931551bSRalph Campbell }
3388f931551bSRalph Campbell 
qib_6120_txchk_change(struct qib_devdata * dd,u32 start,u32 len,u32 avail,struct qib_ctxtdata * rcd)3389f931551bSRalph Campbell static void qib_6120_txchk_change(struct qib_devdata *dd, u32 start,
3390f931551bSRalph Campbell 	u32 len, u32 avail, struct qib_ctxtdata *rcd)
3391f931551bSRalph Campbell {
3392f931551bSRalph Campbell }
3393f931551bSRalph Campbell 
writescratch(struct qib_devdata * dd,u32 val)3394f931551bSRalph Campbell static void writescratch(struct qib_devdata *dd, u32 val)
3395f931551bSRalph Campbell {
3396f931551bSRalph Campbell 	(void) qib_write_kreg(dd, kr_scratch, val);
3397f931551bSRalph Campbell }
3398f931551bSRalph Campbell 
qib_6120_tempsense_rd(struct qib_devdata * dd,int regnum)3399f931551bSRalph Campbell static int qib_6120_tempsense_rd(struct qib_devdata *dd, int regnum)
3400f931551bSRalph Campbell {
3401f931551bSRalph Campbell 	return -ENXIO;
3402f931551bSRalph Campbell }
3403f931551bSRalph Campbell 
3404f931551bSRalph Campbell #ifdef CONFIG_INFINIBAND_QIB_DCA
qib_6120_notify_dca(struct qib_devdata * dd,unsigned long event)3405f931551bSRalph Campbell static int qib_6120_notify_dca(struct qib_devdata *dd, unsigned long event)
3406f931551bSRalph Campbell {
34078469ba39SMike Marciniszyn 	return 0;
34088469ba39SMike Marciniszyn }
34098469ba39SMike Marciniszyn #endif
34108469ba39SMike Marciniszyn 
34118469ba39SMike Marciniszyn /* Dummy function, as 6120 boards never disable EEPROM Write */
qib_6120_eeprom_wen(struct qib_devdata * dd,int wen)34128469ba39SMike Marciniszyn static int qib_6120_eeprom_wen(struct qib_devdata *dd, int wen)
34138469ba39SMike Marciniszyn {
3414f931551bSRalph Campbell 	return 1;
3415f931551bSRalph Campbell }
3416f931551bSRalph Campbell 
3417f931551bSRalph Campbell /**
3418f931551bSRalph Campbell  * qib_init_iba6120_funcs - set up the chip-specific function pointers
3419f931551bSRalph Campbell  * @pdev: pci_dev of the qlogic_ib device
3420f931551bSRalph Campbell  * @ent: pci_device_id matching this chip
3421f931551bSRalph Campbell  *
3422f931551bSRalph Campbell  * This is global, and is called directly at init to set up the
3423f931551bSRalph Campbell  * chip-specific function pointers for later use.
3424f931551bSRalph Campbell  *
3425f931551bSRalph Campbell  * It also allocates/partially-inits the qib_devdata struct for
3426f931551bSRalph Campbell  * this device.
3427f931551bSRalph Campbell  */
qib_init_iba6120_funcs(struct pci_dev * pdev,const struct pci_device_id * ent)3428f931551bSRalph Campbell struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *pdev,
3429f931551bSRalph Campbell 					   const struct pci_device_id *ent)
3430f931551bSRalph Campbell {
3431f931551bSRalph Campbell 	struct qib_devdata *dd;
3432f931551bSRalph Campbell 	int ret;
3433f931551bSRalph Campbell 
3434f931551bSRalph Campbell 	dd = qib_alloc_devdata(pdev, sizeof(struct qib_pportdata) +
3435f931551bSRalph Campbell 			       sizeof(struct qib_chip_specific));
3436f931551bSRalph Campbell 	if (IS_ERR(dd))
3437f931551bSRalph Campbell 		goto bail;
3438f931551bSRalph Campbell 
3439f931551bSRalph Campbell 	dd->f_bringup_serdes    = qib_6120_bringup_serdes;
3440f931551bSRalph Campbell 	dd->f_cleanup           = qib_6120_setup_cleanup;
3441f931551bSRalph Campbell 	dd->f_clear_tids        = qib_6120_clear_tids;
3442f931551bSRalph Campbell 	dd->f_free_irq          = qib_free_irq;
3443f931551bSRalph Campbell 	dd->f_get_base_info     = qib_6120_get_base_info;
3444f931551bSRalph Campbell 	dd->f_get_msgheader     = qib_6120_get_msgheader;
3445c4bc6156SMichael J. Ruhl 	dd->f_getsendbuf        = qib_6120_getsendbuf;
3446f931551bSRalph Campbell 	dd->f_gpio_mod          = gpio_6120_mod;
3447f931551bSRalph Campbell 	dd->f_eeprom_wen	= qib_6120_eeprom_wen;
3448f931551bSRalph Campbell 	dd->f_hdrqempty         = qib_6120_hdrqempty;
3449f931551bSRalph Campbell 	dd->f_ib_updown         = qib_6120_ib_updown;
3450f931551bSRalph Campbell 	dd->f_init_ctxt         = qib_6120_init_ctxt;
3451f931551bSRalph Campbell 	dd->f_initvl15_bufs     = qib_6120_initvl15_bufs;
3452f931551bSRalph Campbell 	dd->f_intr_fallback     = qib_6120_nointr_fallback;
3453f931551bSRalph Campbell 	dd->f_late_initreg      = qib_late_6120_initreg;
3454f931551bSRalph Campbell 	dd->f_setpbc_control    = qib_6120_setpbc_control;
3455f931551bSRalph Campbell 	dd->f_portcntr          = qib_portcntr_6120;
3456f931551bSRalph Campbell 	dd->f_put_tid           = (dd->minrev >= 2) ?
3457f931551bSRalph Campbell 				      qib_6120_put_tid_2 :
3458f931551bSRalph Campbell 				      qib_6120_put_tid;
3459f931551bSRalph Campbell 	dd->f_quiet_serdes      = qib_6120_quiet_serdes;
3460f931551bSRalph Campbell 	dd->f_rcvctrl           = rcvctrl_6120_mod;
3461f931551bSRalph Campbell 	dd->f_read_cntrs        = qib_read_6120cntrs;
3462f931551bSRalph Campbell 	dd->f_read_portcntrs    = qib_read_6120portcntrs;
3463f931551bSRalph Campbell 	dd->f_reset             = qib_6120_setup_reset;
3464f931551bSRalph Campbell 	dd->f_init_sdma_regs    = init_sdma_6120_regs;
3465f931551bSRalph Campbell 	dd->f_sdma_busy         = qib_sdma_6120_busy;
3466f931551bSRalph Campbell 	dd->f_sdma_gethead      = qib_sdma_6120_gethead;
3467f931551bSRalph Campbell 	dd->f_sdma_sendctrl     = qib_6120_sdma_sendctrl;
3468f931551bSRalph Campbell 	dd->f_sdma_set_desc_cnt = qib_sdma_set_6120_desc_cnt;
3469f931551bSRalph Campbell 	dd->f_sdma_update_tail  = qib_sdma_update_6120_tail;
3470f931551bSRalph Campbell 	dd->f_sendctrl          = sendctrl_6120_mod;
3471f931551bSRalph Campbell 	dd->f_set_armlaunch     = qib_set_6120_armlaunch;
3472f931551bSRalph Campbell 	dd->f_set_cntr_sample   = qib_set_cntr_6120_sample;
3473f931551bSRalph Campbell 	dd->f_iblink_state      = qib_6120_iblink_state;
3474f931551bSRalph Campbell 	dd->f_ibphys_portstate  = qib_6120_phys_portstate;
3475f931551bSRalph Campbell 	dd->f_get_ib_cfg        = qib_6120_get_ib_cfg;
3476f931551bSRalph Campbell 	dd->f_set_ib_cfg        = qib_6120_set_ib_cfg;
3477f931551bSRalph Campbell 	dd->f_set_ib_loopback   = qib_6120_set_loopback;
3478f931551bSRalph Campbell 	dd->f_set_intr_state    = qib_6120_set_intr_state;
3479f931551bSRalph Campbell 	dd->f_setextled         = qib_6120_setup_setextled;
3480f931551bSRalph Campbell 	dd->f_txchk_change      = qib_6120_txchk_change;
3481f931551bSRalph Campbell 	dd->f_update_usrhead    = qib_update_6120_usrhead;
3482f931551bSRalph Campbell 	dd->f_wantpiobuf_intr   = qib_wantpiobuf_6120_intr;
3483f931551bSRalph Campbell 	dd->f_xgxs_reset        = qib_6120_xgxs_reset;
3484f931551bSRalph Campbell 	dd->f_writescratch      = writescratch;
3485f931551bSRalph Campbell 	dd->f_tempsense_rd	= qib_6120_tempsense_rd;
3486f931551bSRalph Campbell #ifdef CONFIG_INFINIBAND_QIB_DCA
3487f931551bSRalph Campbell 	dd->f_notify_dca = qib_6120_notify_dca;
3488f931551bSRalph Campbell #endif
34898469ba39SMike Marciniszyn 	/*
34908469ba39SMike Marciniszyn 	 * Do remaining pcie setup and save pcie values in dd.
34918469ba39SMike Marciniszyn 	 * Any error printing is already done by the init code.
3492f931551bSRalph Campbell 	 * On return, we have the chip mapped and accessible,
3493f931551bSRalph Campbell 	 * but chip registers are not set up until start of
3494f931551bSRalph Campbell 	 * init_6120_variables.
3495f931551bSRalph Campbell 	 */
3496f931551bSRalph Campbell 	ret = qib_pcie_ddinit(dd, pdev, ent);
3497f931551bSRalph Campbell 	if (ret < 0)
3498f931551bSRalph Campbell 		goto bail_free;
3499f931551bSRalph Campbell 
3500f931551bSRalph Campbell 	/* initialize chip-specific variables */
3501f931551bSRalph Campbell 	ret = init_6120_variables(dd);
3502f931551bSRalph Campbell 	if (ret)
3503f931551bSRalph Campbell 		goto bail_cleanup;
3504f931551bSRalph Campbell 
3505f931551bSRalph Campbell 	if (qib_mini_init)
3506f931551bSRalph Campbell 		goto bail;
3507f931551bSRalph Campbell 
3508f931551bSRalph Campbell 	if (qib_pcie_params(dd, 8, NULL))
3509f931551bSRalph Campbell 		qib_dev_err(dd,
3510f931551bSRalph Campbell 			"Failed to setup PCIe or interrupts; continuing anyway\n");
3511581d01aaSMichael J. Ruhl 	/* clear diagctrl register, in case diags were running and crashed */
35127fac3301SMike Marciniszyn 	qib_write_kreg(dd, kr_hwdiagctrl, 0);
35137fac3301SMike Marciniszyn 
3514f931551bSRalph Campbell 	if (qib_read_kreg64(dd, kr_hwerrstatus) &
3515f931551bSRalph Campbell 	    QLOGIC_IB_HWE_SERDESPLLFAILED)
3516f931551bSRalph Campbell 		qib_write_kreg(dd, kr_hwerrclear,
3517f931551bSRalph Campbell 			       QLOGIC_IB_HWE_SERDESPLLFAILED);
3518f931551bSRalph Campbell 
3519f931551bSRalph Campbell 	/* setup interrupt handler (interrupt type handled above) */
3520f931551bSRalph Campbell 	qib_setup_6120_interrupt(dd);
3521f931551bSRalph Campbell 	/* Note that qpn_mask is set by qib_6120_config_ctxts() first */
3522f931551bSRalph Campbell 	qib_6120_init_hwerrors(dd);
3523f931551bSRalph Campbell 
3524f931551bSRalph Campbell 	goto bail;
3525f931551bSRalph Campbell 
3526f931551bSRalph Campbell bail_cleanup:
3527f931551bSRalph Campbell 	qib_pcie_ddcleanup(dd);
3528f931551bSRalph Campbell bail_free:
3529f931551bSRalph Campbell 	qib_free_devdata(dd);
3530f931551bSRalph Campbell 	dd = ERR_PTR(ret);
3531f931551bSRalph Campbell bail:
3532f931551bSRalph Campbell 	return dd;
3533f931551bSRalph Campbell }
3534f931551bSRalph Campbell