1 /*
2  * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/etherdevice.h>
33 #include "common.h"
34 #include "regs.h"
35 #include "sge_defs.h"
36 #include "firmware_exports.h"
37 
38 static void t3_port_intr_clear(struct adapter *adapter, int idx);
39 
40 /**
41  *	t3_wait_op_done_val - wait until an operation is completed
42  *	@adapter: the adapter performing the operation
43  *	@reg: the register to check for completion
44  *	@mask: a single-bit field within @reg that indicates completion
45  *	@polarity: the value of the field when the operation is completed
46  *	@attempts: number of check iterations
47  *	@delay: delay in usecs between iterations
48  *	@valp: where to store the value of the register at completion time
49  *
50  *	Wait until an operation is completed by checking a bit in a register
51  *	up to @attempts times.  If @valp is not NULL the value of the register
52  *	at the time it indicated completion is stored there.  Returns 0 if the
53  *	operation completes and -EAGAIN otherwise.
54  */
55 
56 int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
57 			int polarity, int attempts, int delay, u32 *valp)
58 {
59 	while (1) {
60 		u32 val = t3_read_reg(adapter, reg);
61 
62 		if (!!(val & mask) == polarity) {
63 			if (valp)
64 				*valp = val;
65 			return 0;
66 		}
67 		if (--attempts == 0)
68 			return -EAGAIN;
69 		if (delay)
70 			udelay(delay);
71 	}
72 }
73 
74 /**
75  *	t3_write_regs - write a bunch of registers
76  *	@adapter: the adapter to program
77  *	@p: an array of register address/register value pairs
78  *	@n: the number of address/value pairs
79  *	@offset: register address offset
80  *
81  *	Takes an array of register address/register value pairs and writes each
82  *	value to the corresponding register.  Register addresses are adjusted
83  *	by the supplied offset.
84  */
85 void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
86 		   int n, unsigned int offset)
87 {
88 	while (n--) {
89 		t3_write_reg(adapter, p->reg_addr + offset, p->val);
90 		p++;
91 	}
92 }
93 
94 /**
95  *	t3_set_reg_field - set a register field to a value
96  *	@adapter: the adapter to program
97  *	@addr: the register address
98  *	@mask: specifies the portion of the register to modify
99  *	@val: the new value for the register field
100  *
101  *	Sets a register field specified by the supplied mask to the
102  *	given value.
103  */
104 void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
105 		      u32 val)
106 {
107 	u32 v = t3_read_reg(adapter, addr) & ~mask;
108 
109 	t3_write_reg(adapter, addr, v | val);
110 	t3_read_reg(adapter, addr);	/* flush */
111 }
112 
113 /**
114  *	t3_read_indirect - read indirectly addressed registers
115  *	@adap: the adapter
116  *	@addr_reg: register holding the indirect address
117  *	@data_reg: register holding the value of the indirect register
118  *	@vals: where the read register values are stored
119  *	@start_idx: index of first indirect register to read
120  *	@nregs: how many indirect registers to read
121  *
122  *	Reads registers that are accessed indirectly through an address/data
123  *	register pair.
124  */
125 static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
126 			     unsigned int data_reg, u32 *vals,
127 			     unsigned int nregs, unsigned int start_idx)
128 {
129 	while (nregs--) {
130 		t3_write_reg(adap, addr_reg, start_idx);
131 		*vals++ = t3_read_reg(adap, data_reg);
132 		start_idx++;
133 	}
134 }
135 
136 /**
137  *	t3_mc7_bd_read - read from MC7 through backdoor accesses
138  *	@mc7: identifies MC7 to read from
139  *	@start: index of first 64-bit word to read
140  *	@n: number of 64-bit words to read
141  *	@buf: where to store the read result
142  *
143  *	Read n 64-bit words from MC7 starting at word start, using backdoor
144  *	accesses.
145  */
146 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
147 		   u64 *buf)
148 {
149 	static const int shift[] = { 0, 0, 16, 24 };
150 	static const int step[] = { 0, 32, 16, 8 };
151 
152 	unsigned int size64 = mc7->size / 8;	/* # of 64-bit words */
153 	struct adapter *adap = mc7->adapter;
154 
155 	if (start >= size64 || start + n > size64)
156 		return -EINVAL;
157 
158 	start *= (8 << mc7->width);
159 	while (n--) {
160 		int i;
161 		u64 val64 = 0;
162 
163 		for (i = (1 << mc7->width) - 1; i >= 0; --i) {
164 			int attempts = 10;
165 			u32 val;
166 
167 			t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
168 			t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
169 			val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
170 			while ((val & F_BUSY) && attempts--)
171 				val = t3_read_reg(adap,
172 						  mc7->offset + A_MC7_BD_OP);
173 			if (val & F_BUSY)
174 				return -EIO;
175 
176 			val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
177 			if (mc7->width == 0) {
178 				val64 = t3_read_reg(adap,
179 						    mc7->offset +
180 						    A_MC7_BD_DATA0);
181 				val64 |= (u64) val << 32;
182 			} else {
183 				if (mc7->width > 1)
184 					val >>= shift[mc7->width];
185 				val64 |= (u64) val << (step[mc7->width] * i);
186 			}
187 			start += 8;
188 		}
189 		*buf++ = val64;
190 	}
191 	return 0;
192 }
193 
194 /*
195  * Initialize MI1.
196  */
197 static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
198 {
199 	u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
200 	u32 val = F_PREEN | V_CLKDIV(clkdiv);
201 
202 	t3_write_reg(adap, A_MI1_CFG, val);
203 }
204 
205 #define MDIO_ATTEMPTS 20
206 
207 /*
208  * MI1 read/write operations for clause 22 PHYs.
209  */
210 static int t3_mi1_read(struct net_device *dev, int phy_addr, int mmd_addr,
211 		       u16 reg_addr)
212 {
213 	struct port_info *pi = netdev_priv(dev);
214 	struct adapter *adapter = pi->adapter;
215 	int ret;
216 	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
217 
218 	mutex_lock(&adapter->mdio_lock);
219 	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
220 	t3_write_reg(adapter, A_MI1_ADDR, addr);
221 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
222 	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
223 	if (!ret)
224 		ret = t3_read_reg(adapter, A_MI1_DATA);
225 	mutex_unlock(&adapter->mdio_lock);
226 	return ret;
227 }
228 
229 static int t3_mi1_write(struct net_device *dev, int phy_addr, int mmd_addr,
230 			u16 reg_addr, u16 val)
231 {
232 	struct port_info *pi = netdev_priv(dev);
233 	struct adapter *adapter = pi->adapter;
234 	int ret;
235 	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
236 
237 	mutex_lock(&adapter->mdio_lock);
238 	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
239 	t3_write_reg(adapter, A_MI1_ADDR, addr);
240 	t3_write_reg(adapter, A_MI1_DATA, val);
241 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
242 	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
243 	mutex_unlock(&adapter->mdio_lock);
244 	return ret;
245 }
246 
247 static const struct mdio_ops mi1_mdio_ops = {
248 	.read = t3_mi1_read,
249 	.write = t3_mi1_write,
250 	.mode_support = MDIO_SUPPORTS_C22
251 };
252 
253 /*
254  * Performs the address cycle for clause 45 PHYs.
255  * Must be called with the MDIO_LOCK held.
256  */
257 static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
258 		       int reg_addr)
259 {
260 	u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
261 
262 	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
263 	t3_write_reg(adapter, A_MI1_ADDR, addr);
264 	t3_write_reg(adapter, A_MI1_DATA, reg_addr);
265 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
266 	return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
267 			       MDIO_ATTEMPTS, 10);
268 }
269 
270 /*
271  * MI1 read/write operations for indirect-addressed PHYs.
272  */
273 static int mi1_ext_read(struct net_device *dev, int phy_addr, int mmd_addr,
274 			u16 reg_addr)
275 {
276 	struct port_info *pi = netdev_priv(dev);
277 	struct adapter *adapter = pi->adapter;
278 	int ret;
279 
280 	mutex_lock(&adapter->mdio_lock);
281 	ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
282 	if (!ret) {
283 		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
284 		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
285 				      MDIO_ATTEMPTS, 10);
286 		if (!ret)
287 			ret = t3_read_reg(adapter, A_MI1_DATA);
288 	}
289 	mutex_unlock(&adapter->mdio_lock);
290 	return ret;
291 }
292 
293 static int mi1_ext_write(struct net_device *dev, int phy_addr, int mmd_addr,
294 			 u16 reg_addr, u16 val)
295 {
296 	struct port_info *pi = netdev_priv(dev);
297 	struct adapter *adapter = pi->adapter;
298 	int ret;
299 
300 	mutex_lock(&adapter->mdio_lock);
301 	ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
302 	if (!ret) {
303 		t3_write_reg(adapter, A_MI1_DATA, val);
304 		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
305 		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
306 				      MDIO_ATTEMPTS, 10);
307 	}
308 	mutex_unlock(&adapter->mdio_lock);
309 	return ret;
310 }
311 
312 static const struct mdio_ops mi1_mdio_ext_ops = {
313 	.read = mi1_ext_read,
314 	.write = mi1_ext_write,
315 	.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22
316 };
317 
318 /**
319  *	t3_mdio_change_bits - modify the value of a PHY register
320  *	@phy: the PHY to operate on
321  *	@mmd: the device address
322  *	@reg: the register address
323  *	@clear: what part of the register value to mask off
324  *	@set: what part of the register value to set
325  *
326  *	Changes the value of a PHY register by applying a mask to its current
327  *	value and ORing the result with a new value.
328  */
329 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
330 			unsigned int set)
331 {
332 	int ret;
333 	unsigned int val;
334 
335 	ret = t3_mdio_read(phy, mmd, reg, &val);
336 	if (!ret) {
337 		val &= ~clear;
338 		ret = t3_mdio_write(phy, mmd, reg, val | set);
339 	}
340 	return ret;
341 }
342 
343 /**
344  *	t3_phy_reset - reset a PHY block
345  *	@phy: the PHY to operate on
346  *	@mmd: the device address of the PHY block to reset
347  *	@wait: how long to wait for the reset to complete in 1ms increments
348  *
349  *	Resets a PHY block and optionally waits for the reset to complete.
350  *	@mmd should be 0 for 10/100/1000 PHYs and the device address to reset
351  *	for 10G PHYs.
352  */
353 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
354 {
355 	int err;
356 	unsigned int ctl;
357 
358 	err = t3_mdio_change_bits(phy, mmd, MDIO_CTRL1, MDIO_CTRL1_LPOWER,
359 				  MDIO_CTRL1_RESET);
360 	if (err || !wait)
361 		return err;
362 
363 	do {
364 		err = t3_mdio_read(phy, mmd, MDIO_CTRL1, &ctl);
365 		if (err)
366 			return err;
367 		ctl &= MDIO_CTRL1_RESET;
368 		if (ctl)
369 			msleep(1);
370 	} while (ctl && --wait);
371 
372 	return ctl ? -1 : 0;
373 }
374 
375 /**
376  *	t3_phy_advertise - set the PHY advertisement registers for autoneg
377  *	@phy: the PHY to operate on
378  *	@advert: bitmap of capabilities the PHY should advertise
379  *
380  *	Sets a 10/100/1000 PHY's advertisement registers to advertise the
381  *	requested capabilities.
382  */
383 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
384 {
385 	int err;
386 	unsigned int val = 0;
387 
388 	err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_CTRL1000, &val);
389 	if (err)
390 		return err;
391 
392 	val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
393 	if (advert & ADVERTISED_1000baseT_Half)
394 		val |= ADVERTISE_1000HALF;
395 	if (advert & ADVERTISED_1000baseT_Full)
396 		val |= ADVERTISE_1000FULL;
397 
398 	err = t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_CTRL1000, val);
399 	if (err)
400 		return err;
401 
402 	val = 1;
403 	if (advert & ADVERTISED_10baseT_Half)
404 		val |= ADVERTISE_10HALF;
405 	if (advert & ADVERTISED_10baseT_Full)
406 		val |= ADVERTISE_10FULL;
407 	if (advert & ADVERTISED_100baseT_Half)
408 		val |= ADVERTISE_100HALF;
409 	if (advert & ADVERTISED_100baseT_Full)
410 		val |= ADVERTISE_100FULL;
411 	if (advert & ADVERTISED_Pause)
412 		val |= ADVERTISE_PAUSE_CAP;
413 	if (advert & ADVERTISED_Asym_Pause)
414 		val |= ADVERTISE_PAUSE_ASYM;
415 	return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
416 }
417 
418 /**
419  *	t3_phy_advertise_fiber - set fiber PHY advertisement register
420  *	@phy: the PHY to operate on
421  *	@advert: bitmap of capabilities the PHY should advertise
422  *
423  *	Sets a fiber PHY's advertisement register to advertise the
424  *	requested capabilities.
425  */
426 int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
427 {
428 	unsigned int val = 0;
429 
430 	if (advert & ADVERTISED_1000baseT_Half)
431 		val |= ADVERTISE_1000XHALF;
432 	if (advert & ADVERTISED_1000baseT_Full)
433 		val |= ADVERTISE_1000XFULL;
434 	if (advert & ADVERTISED_Pause)
435 		val |= ADVERTISE_1000XPAUSE;
436 	if (advert & ADVERTISED_Asym_Pause)
437 		val |= ADVERTISE_1000XPSE_ASYM;
438 	return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
439 }
440 
441 /**
442  *	t3_set_phy_speed_duplex - force PHY speed and duplex
443  *	@phy: the PHY to operate on
444  *	@speed: requested PHY speed
445  *	@duplex: requested PHY duplex
446  *
447  *	Force a 10/100/1000 PHY's speed and duplex.  This also disables
448  *	auto-negotiation except for GigE, where auto-negotiation is mandatory.
449  */
450 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
451 {
452 	int err;
453 	unsigned int ctl;
454 
455 	err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_BMCR, &ctl);
456 	if (err)
457 		return err;
458 
459 	if (speed >= 0) {
460 		ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
461 		if (speed == SPEED_100)
462 			ctl |= BMCR_SPEED100;
463 		else if (speed == SPEED_1000)
464 			ctl |= BMCR_SPEED1000;
465 	}
466 	if (duplex >= 0) {
467 		ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
468 		if (duplex == DUPLEX_FULL)
469 			ctl |= BMCR_FULLDPLX;
470 	}
471 	if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
472 		ctl |= BMCR_ANENABLE;
473 	return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_BMCR, ctl);
474 }
475 
476 int t3_phy_lasi_intr_enable(struct cphy *phy)
477 {
478 	return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
479 			     MDIO_PMA_LASI_LSALARM);
480 }
481 
482 int t3_phy_lasi_intr_disable(struct cphy *phy)
483 {
484 	return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
485 }
486 
487 int t3_phy_lasi_intr_clear(struct cphy *phy)
488 {
489 	u32 val;
490 
491 	return t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val);
492 }
493 
494 int t3_phy_lasi_intr_handler(struct cphy *phy)
495 {
496 	unsigned int status;
497 	int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT,
498 			       &status);
499 
500 	if (err)
501 		return err;
502 	return (status & MDIO_PMA_LASI_LSALARM) ? cphy_cause_link_change : 0;
503 }
504 
505 static const struct adapter_info t3_adap_info[] = {
506 	{1, 1, 0,
507 	 F_GPIO2_OEN | F_GPIO4_OEN |
508 	 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
509 	 &mi1_mdio_ops, "Chelsio PE9000"},
510 	{1, 1, 0,
511 	 F_GPIO2_OEN | F_GPIO4_OEN |
512 	 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
513 	 &mi1_mdio_ops, "Chelsio T302"},
514 	{1, 0, 0,
515 	 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
516 	 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
517 	 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
518 	 &mi1_mdio_ext_ops, "Chelsio T310"},
519 	{1, 1, 0,
520 	 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
521 	 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
522 	 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
523 	 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
524 	 &mi1_mdio_ext_ops, "Chelsio T320"},
525 	{},
526 	{},
527 	{1, 0, 0,
528 	 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
529 	 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
530 	 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
531 	 &mi1_mdio_ext_ops, "Chelsio T310" },
532 	{1, 0, 0,
533 	 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
534 	 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL,
535 	 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
536 	 &mi1_mdio_ext_ops, "Chelsio N320E-G2" },
537 };
538 
539 /*
540  * Return the adapter_info structure with a given index.  Out-of-range indices
541  * return NULL.
542  */
543 const struct adapter_info *t3_get_adapter_info(unsigned int id)
544 {
545 	return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
546 }
547 
548 struct port_type_info {
549 	int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
550 			int phy_addr, const struct mdio_ops *ops);
551 };
552 
553 static const struct port_type_info port_types[] = {
554 	{ NULL },
555 	{ t3_ael1002_phy_prep },
556 	{ t3_vsc8211_phy_prep },
557 	{ NULL},
558 	{ t3_xaui_direct_phy_prep },
559 	{ t3_ael2005_phy_prep },
560 	{ t3_qt2045_phy_prep },
561 	{ t3_ael1006_phy_prep },
562 	{ NULL },
563 	{ t3_aq100x_phy_prep },
564 	{ t3_ael2020_phy_prep },
565 };
566 
567 #define VPD_ENTRY(name, len) \
568 	u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
569 
570 /*
571  * Partial EEPROM Vital Product Data structure.  Includes only the ID and
572  * VPD-R sections.
573  */
574 struct t3_vpd {
575 	u8 id_tag;
576 	u8 id_len[2];
577 	u8 id_data[16];
578 	u8 vpdr_tag;
579 	u8 vpdr_len[2];
580 	VPD_ENTRY(pn, 16);	/* part number */
581 	VPD_ENTRY(ec, 16);	/* EC level */
582 	VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
583 	VPD_ENTRY(na, 12);	/* MAC address base */
584 	VPD_ENTRY(cclk, 6);	/* core clock */
585 	VPD_ENTRY(mclk, 6);	/* mem clock */
586 	VPD_ENTRY(uclk, 6);	/* uP clk */
587 	VPD_ENTRY(mdc, 6);	/* MDIO clk */
588 	VPD_ENTRY(mt, 2);	/* mem timing */
589 	VPD_ENTRY(xaui0cfg, 6);	/* XAUI0 config */
590 	VPD_ENTRY(xaui1cfg, 6);	/* XAUI1 config */
591 	VPD_ENTRY(port0, 2);	/* PHY0 complex */
592 	VPD_ENTRY(port1, 2);	/* PHY1 complex */
593 	VPD_ENTRY(port2, 2);	/* PHY2 complex */
594 	VPD_ENTRY(port3, 2);	/* PHY3 complex */
595 	VPD_ENTRY(rv, 1);	/* csum */
596 	u32 pad;		/* for multiple-of-4 sizing and alignment */
597 };
598 
599 #define EEPROM_MAX_POLL   40
600 #define EEPROM_STAT_ADDR  0x4000
601 #define VPD_BASE          0xc00
602 
603 /**
604  *	t3_seeprom_read - read a VPD EEPROM location
605  *	@adapter: adapter to read
606  *	@addr: EEPROM address
607  *	@data: where to store the read data
608  *
609  *	Read a 32-bit word from a location in VPD EEPROM using the card's PCI
610  *	VPD ROM capability.  A zero is written to the flag bit when the
611  *	address is written to the control register.  The hardware device will
612  *	set the flag to 1 when 4 bytes have been read into the data register.
613  */
614 int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
615 {
616 	u16 val;
617 	int attempts = EEPROM_MAX_POLL;
618 	u32 v;
619 	unsigned int base = adapter->params.pci.vpd_cap_addr;
620 
621 	if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
622 		return -EINVAL;
623 
624 	pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
625 	do {
626 		udelay(10);
627 		pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
628 	} while (!(val & PCI_VPD_ADDR_F) && --attempts);
629 
630 	if (!(val & PCI_VPD_ADDR_F)) {
631 		CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
632 		return -EIO;
633 	}
634 	pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
635 	*data = cpu_to_le32(v);
636 	return 0;
637 }
638 
639 /**
640  *	t3_seeprom_write - write a VPD EEPROM location
641  *	@adapter: adapter to write
642  *	@addr: EEPROM address
643  *	@data: value to write
644  *
645  *	Write a 32-bit word to a location in VPD EEPROM using the card's PCI
646  *	VPD ROM capability.
647  */
648 int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
649 {
650 	u16 val;
651 	int attempts = EEPROM_MAX_POLL;
652 	unsigned int base = adapter->params.pci.vpd_cap_addr;
653 
654 	if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
655 		return -EINVAL;
656 
657 	pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
658 			       le32_to_cpu(data));
659 	pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
660 			      addr | PCI_VPD_ADDR_F);
661 	do {
662 		msleep(1);
663 		pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
664 	} while ((val & PCI_VPD_ADDR_F) && --attempts);
665 
666 	if (val & PCI_VPD_ADDR_F) {
667 		CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
668 		return -EIO;
669 	}
670 	return 0;
671 }
672 
673 /**
674  *	t3_seeprom_wp - enable/disable EEPROM write protection
675  *	@adapter: the adapter
676  *	@enable: 1 to enable write protection, 0 to disable it
677  *
678  *	Enables or disables write protection on the serial EEPROM.
679  */
680 int t3_seeprom_wp(struct adapter *adapter, int enable)
681 {
682 	return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
683 }
684 
685 static int vpdstrtouint(char *s, u8 len, unsigned int base, unsigned int *val)
686 {
687 	char tok[256];
688 
689 	memcpy(tok, s, len);
690 	tok[len] = 0;
691 	return kstrtouint(strim(tok), base, val);
692 }
693 
694 static int vpdstrtou16(char *s, u8 len, unsigned int base, u16 *val)
695 {
696 	char tok[256];
697 
698 	memcpy(tok, s, len);
699 	tok[len] = 0;
700 	return kstrtou16(strim(tok), base, val);
701 }
702 
703 /**
704  *	get_vpd_params - read VPD parameters from VPD EEPROM
705  *	@adapter: adapter to read
706  *	@p: where to store the parameters
707  *
708  *	Reads card parameters stored in VPD EEPROM.
709  */
710 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
711 {
712 	int i, addr, ret;
713 	struct t3_vpd vpd;
714 
715 	/*
716 	 * Card information is normally at VPD_BASE but some early cards had
717 	 * it at 0.
718 	 */
719 	ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
720 	if (ret)
721 		return ret;
722 	addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
723 
724 	for (i = 0; i < sizeof(vpd); i += 4) {
725 		ret = t3_seeprom_read(adapter, addr + i,
726 				      (__le32 *)((u8 *)&vpd + i));
727 		if (ret)
728 			return ret;
729 	}
730 
731 	ret = vpdstrtouint(vpd.cclk_data, vpd.cclk_len, 10, &p->cclk);
732 	if (ret)
733 		return ret;
734 	ret = vpdstrtouint(vpd.mclk_data, vpd.mclk_len, 10, &p->mclk);
735 	if (ret)
736 		return ret;
737 	ret = vpdstrtouint(vpd.uclk_data, vpd.uclk_len, 10, &p->uclk);
738 	if (ret)
739 		return ret;
740 	ret = vpdstrtouint(vpd.mdc_data, vpd.mdc_len, 10, &p->mdc);
741 	if (ret)
742 		return ret;
743 	ret = vpdstrtouint(vpd.mt_data, vpd.mt_len, 10, &p->mem_timing);
744 	if (ret)
745 		return ret;
746 	memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
747 
748 	/* Old eeproms didn't have port information */
749 	if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
750 		p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
751 		p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
752 	} else {
753 		p->port_type[0] = hex_to_bin(vpd.port0_data[0]);
754 		p->port_type[1] = hex_to_bin(vpd.port1_data[0]);
755 		ret = vpdstrtou16(vpd.xaui0cfg_data, vpd.xaui0cfg_len, 16,
756 				  &p->xauicfg[0]);
757 		if (ret)
758 			return ret;
759 		ret = vpdstrtou16(vpd.xaui1cfg_data, vpd.xaui1cfg_len, 16,
760 				  &p->xauicfg[1]);
761 		if (ret)
762 			return ret;
763 	}
764 
765 	ret = hex2bin(p->eth_base, vpd.na_data, 6);
766 	if (ret < 0)
767 		return -EINVAL;
768 	return 0;
769 }
770 
771 /* serial flash and firmware constants */
772 enum {
773 	SF_ATTEMPTS = 5,	/* max retries for SF1 operations */
774 	SF_SEC_SIZE = 64 * 1024,	/* serial flash sector size */
775 	SF_SIZE = SF_SEC_SIZE * 8,	/* serial flash size */
776 
777 	/* flash command opcodes */
778 	SF_PROG_PAGE = 2,	/* program page */
779 	SF_WR_DISABLE = 4,	/* disable writes */
780 	SF_RD_STATUS = 5,	/* read status register */
781 	SF_WR_ENABLE = 6,	/* enable writes */
782 	SF_RD_DATA_FAST = 0xb,	/* read flash */
783 	SF_ERASE_SECTOR = 0xd8,	/* erase sector */
784 
785 	FW_FLASH_BOOT_ADDR = 0x70000,	/* start address of FW in flash */
786 	FW_VERS_ADDR = 0x7fffc,    /* flash address holding FW version */
787 	FW_MIN_SIZE = 8            /* at least version and csum */
788 };
789 
790 /**
791  *	sf1_read - read data from the serial flash
792  *	@adapter: the adapter
793  *	@byte_cnt: number of bytes to read
794  *	@cont: whether another operation will be chained
795  *	@valp: where to store the read data
796  *
797  *	Reads up to 4 bytes of data from the serial flash.  The location of
798  *	the read needs to be specified prior to calling this by issuing the
799  *	appropriate commands to the serial flash.
800  */
801 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
802 		    u32 *valp)
803 {
804 	int ret;
805 
806 	if (!byte_cnt || byte_cnt > 4)
807 		return -EINVAL;
808 	if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
809 		return -EBUSY;
810 	t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
811 	ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
812 	if (!ret)
813 		*valp = t3_read_reg(adapter, A_SF_DATA);
814 	return ret;
815 }
816 
817 /**
818  *	sf1_write - write data to the serial flash
819  *	@adapter: the adapter
820  *	@byte_cnt: number of bytes to write
821  *	@cont: whether another operation will be chained
822  *	@val: value to write
823  *
824  *	Writes up to 4 bytes of data to the serial flash.  The location of
825  *	the write needs to be specified prior to calling this by issuing the
826  *	appropriate commands to the serial flash.
827  */
828 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
829 		     u32 val)
830 {
831 	if (!byte_cnt || byte_cnt > 4)
832 		return -EINVAL;
833 	if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
834 		return -EBUSY;
835 	t3_write_reg(adapter, A_SF_DATA, val);
836 	t3_write_reg(adapter, A_SF_OP,
837 		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
838 	return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
839 }
840 
841 /**
842  *	flash_wait_op - wait for a flash operation to complete
843  *	@adapter: the adapter
844  *	@attempts: max number of polls of the status register
845  *	@delay: delay between polls in ms
846  *
847  *	Wait for a flash operation to complete by polling the status register.
848  */
849 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
850 {
851 	int ret;
852 	u32 status;
853 
854 	while (1) {
855 		if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
856 		    (ret = sf1_read(adapter, 1, 0, &status)) != 0)
857 			return ret;
858 		if (!(status & 1))
859 			return 0;
860 		if (--attempts == 0)
861 			return -EAGAIN;
862 		if (delay)
863 			msleep(delay);
864 	}
865 }
866 
867 /**
868  *	t3_read_flash - read words from serial flash
869  *	@adapter: the adapter
870  *	@addr: the start address for the read
871  *	@nwords: how many 32-bit words to read
872  *	@data: where to store the read data
873  *	@byte_oriented: whether to store data as bytes or as words
874  *
875  *	Read the specified number of 32-bit words from the serial flash.
876  *	If @byte_oriented is set the read data is stored as a byte array
877  *	(i.e., big-endian), otherwise as 32-bit words in the platform's
878  *	natural endianness.
879  */
880 static int t3_read_flash(struct adapter *adapter, unsigned int addr,
881 			 unsigned int nwords, u32 *data, int byte_oriented)
882 {
883 	int ret;
884 
885 	if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
886 		return -EINVAL;
887 
888 	addr = swab32(addr) | SF_RD_DATA_FAST;
889 
890 	if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
891 	    (ret = sf1_read(adapter, 1, 1, data)) != 0)
892 		return ret;
893 
894 	for (; nwords; nwords--, data++) {
895 		ret = sf1_read(adapter, 4, nwords > 1, data);
896 		if (ret)
897 			return ret;
898 		if (byte_oriented)
899 			*data = htonl(*data);
900 	}
901 	return 0;
902 }
903 
904 /**
905  *	t3_write_flash - write up to a page of data to the serial flash
906  *	@adapter: the adapter
907  *	@addr: the start address to write
908  *	@n: length of data to write
909  *	@data: the data to write
910  *
911  *	Writes up to a page of data (256 bytes) to the serial flash starting
912  *	at the given address.
913  */
914 static int t3_write_flash(struct adapter *adapter, unsigned int addr,
915 			  unsigned int n, const u8 *data)
916 {
917 	int ret;
918 	u32 buf[64];
919 	unsigned int i, c, left, val, offset = addr & 0xff;
920 
921 	if (addr + n > SF_SIZE || offset + n > 256)
922 		return -EINVAL;
923 
924 	val = swab32(addr) | SF_PROG_PAGE;
925 
926 	if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
927 	    (ret = sf1_write(adapter, 4, 1, val)) != 0)
928 		return ret;
929 
930 	for (left = n; left; left -= c) {
931 		c = min(left, 4U);
932 		for (val = 0, i = 0; i < c; ++i)
933 			val = (val << 8) + *data++;
934 
935 		ret = sf1_write(adapter, c, c != left, val);
936 		if (ret)
937 			return ret;
938 	}
939 	if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
940 		return ret;
941 
942 	/* Read the page to verify the write succeeded */
943 	ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
944 	if (ret)
945 		return ret;
946 
947 	if (memcmp(data - n, (u8 *) buf + offset, n))
948 		return -EIO;
949 	return 0;
950 }
951 
952 /**
953  *	t3_get_tp_version - read the tp sram version
954  *	@adapter: the adapter
955  *	@vers: where to place the version
956  *
957  *	Reads the protocol sram version from sram.
958  */
959 int t3_get_tp_version(struct adapter *adapter, u32 *vers)
960 {
961 	int ret;
962 
963 	/* Get version loaded in SRAM */
964 	t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
965 	ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
966 			      1, 1, 5, 1);
967 	if (ret)
968 		return ret;
969 
970 	*vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
971 
972 	return 0;
973 }
974 
975 /**
976  *	t3_check_tpsram_version - read the tp sram version
977  *	@adapter: the adapter
978  *
979  *	Reads the protocol sram version from flash.
980  */
981 int t3_check_tpsram_version(struct adapter *adapter)
982 {
983 	int ret;
984 	u32 vers;
985 	unsigned int major, minor;
986 
987 	if (adapter->params.rev == T3_REV_A)
988 		return 0;
989 
990 
991 	ret = t3_get_tp_version(adapter, &vers);
992 	if (ret)
993 		return ret;
994 
995 	major = G_TP_VERSION_MAJOR(vers);
996 	minor = G_TP_VERSION_MINOR(vers);
997 
998 	if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
999 		return 0;
1000 	else {
1001 		CH_ERR(adapter, "found wrong TP version (%u.%u), "
1002 		       "driver compiled for version %d.%d\n", major, minor,
1003 		       TP_VERSION_MAJOR, TP_VERSION_MINOR);
1004 	}
1005 	return -EINVAL;
1006 }
1007 
1008 /**
1009  *	t3_check_tpsram - check if provided protocol SRAM
1010  *			  is compatible with this driver
1011  *	@adapter: the adapter
1012  *	@tp_sram: the firmware image to write
1013  *	@size: image size
1014  *
1015  *	Checks if an adapter's tp sram is compatible with the driver.
1016  *	Returns 0 if the versions are compatible, a negative error otherwise.
1017  */
1018 int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
1019 		    unsigned int size)
1020 {
1021 	u32 csum;
1022 	unsigned int i;
1023 	const __be32 *p = (const __be32 *)tp_sram;
1024 
1025 	/* Verify checksum */
1026 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1027 		csum += ntohl(p[i]);
1028 	if (csum != 0xffffffff) {
1029 		CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
1030 		       csum);
1031 		return -EINVAL;
1032 	}
1033 
1034 	return 0;
1035 }
1036 
1037 enum fw_version_type {
1038 	FW_VERSION_N3,
1039 	FW_VERSION_T3
1040 };
1041 
1042 /**
1043  *	t3_get_fw_version - read the firmware version
1044  *	@adapter: the adapter
1045  *	@vers: where to place the version
1046  *
1047  *	Reads the FW version from flash.
1048  */
1049 int t3_get_fw_version(struct adapter *adapter, u32 *vers)
1050 {
1051 	return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1052 }
1053 
1054 /**
1055  *	t3_check_fw_version - check if the FW is compatible with this driver
1056  *	@adapter: the adapter
1057  *
1058  *	Checks if an adapter's FW is compatible with the driver.  Returns 0
1059  *	if the versions are compatible, a negative error otherwise.
1060  */
1061 int t3_check_fw_version(struct adapter *adapter)
1062 {
1063 	int ret;
1064 	u32 vers;
1065 	unsigned int type, major, minor;
1066 
1067 	ret = t3_get_fw_version(adapter, &vers);
1068 	if (ret)
1069 		return ret;
1070 
1071 	type = G_FW_VERSION_TYPE(vers);
1072 	major = G_FW_VERSION_MAJOR(vers);
1073 	minor = G_FW_VERSION_MINOR(vers);
1074 
1075 	if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1076 	    minor == FW_VERSION_MINOR)
1077 		return 0;
1078 	else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
1079 		CH_WARN(adapter, "found old FW minor version(%u.%u), "
1080 		        "driver compiled for version %u.%u\n", major, minor,
1081 			FW_VERSION_MAJOR, FW_VERSION_MINOR);
1082 	else {
1083 		CH_WARN(adapter, "found newer FW version(%u.%u), "
1084 		        "driver compiled for version %u.%u\n", major, minor,
1085 			FW_VERSION_MAJOR, FW_VERSION_MINOR);
1086 		return 0;
1087 	}
1088 	return -EINVAL;
1089 }
1090 
1091 /**
1092  *	t3_flash_erase_sectors - erase a range of flash sectors
1093  *	@adapter: the adapter
1094  *	@start: the first sector to erase
1095  *	@end: the last sector to erase
1096  *
1097  *	Erases the sectors in the given range.
1098  */
1099 static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1100 {
1101 	while (start <= end) {
1102 		int ret;
1103 
1104 		if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1105 		    (ret = sf1_write(adapter, 4, 0,
1106 				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
1107 		    (ret = flash_wait_op(adapter, 5, 500)) != 0)
1108 			return ret;
1109 		start++;
1110 	}
1111 	return 0;
1112 }
1113 
1114 /**
1115  *	t3_load_fw - download firmware
1116  *	@adapter: the adapter
1117  *	@fw_data: the firmware image to write
1118  *	@size: image size
1119  *
1120  *	Write the supplied firmware image to the card's serial flash.
1121  *	The FW image has the following sections: @size - 8 bytes of code and
1122  *	data, followed by 4 bytes of FW version, followed by the 32-bit
1123  *	1's complement checksum of the whole image.
1124  */
1125 int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1126 {
1127 	u32 csum;
1128 	unsigned int i;
1129 	const __be32 *p = (const __be32 *)fw_data;
1130 	int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1131 
1132 	if ((size & 3) || size < FW_MIN_SIZE)
1133 		return -EINVAL;
1134 	if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1135 		return -EFBIG;
1136 
1137 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1138 		csum += ntohl(p[i]);
1139 	if (csum != 0xffffffff) {
1140 		CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1141 		       csum);
1142 		return -EINVAL;
1143 	}
1144 
1145 	ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1146 	if (ret)
1147 		goto out;
1148 
1149 	size -= 8;		/* trim off version and checksum */
1150 	for (addr = FW_FLASH_BOOT_ADDR; size;) {
1151 		unsigned int chunk_size = min(size, 256U);
1152 
1153 		ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1154 		if (ret)
1155 			goto out;
1156 
1157 		addr += chunk_size;
1158 		fw_data += chunk_size;
1159 		size -= chunk_size;
1160 	}
1161 
1162 	ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1163 out:
1164 	if (ret)
1165 		CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1166 	return ret;
1167 }
1168 
1169 #define CIM_CTL_BASE 0x2000
1170 
1171 /**
1172  *      t3_cim_ctl_blk_read - read a block from CIM control region
1173  *
1174  *      @adap: the adapter
1175  *      @addr: the start address within the CIM control region
1176  *      @n: number of words to read
1177  *      @valp: where to store the result
1178  *
1179  *      Reads a block of 4-byte words from the CIM control region.
1180  */
1181 int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1182 			unsigned int n, unsigned int *valp)
1183 {
1184 	int ret = 0;
1185 
1186 	if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1187 		return -EBUSY;
1188 
1189 	for ( ; !ret && n--; addr += 4) {
1190 		t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1191 		ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1192 				      0, 5, 2);
1193 		if (!ret)
1194 			*valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1195 	}
1196 	return ret;
1197 }
1198 
1199 static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg,
1200 			       u32 *rx_hash_high, u32 *rx_hash_low)
1201 {
1202 	/* stop Rx unicast traffic */
1203 	t3_mac_disable_exact_filters(mac);
1204 
1205 	/* stop broadcast, multicast, promiscuous mode traffic */
1206 	*rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG);
1207 	t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1208 			 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1209 			 F_DISBCAST);
1210 
1211 	*rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH);
1212 	t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, 0);
1213 
1214 	*rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW);
1215 	t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, 0);
1216 
1217 	/* Leave time to drain max RX fifo */
1218 	msleep(1);
1219 }
1220 
1221 static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg,
1222 			       u32 rx_hash_high, u32 rx_hash_low)
1223 {
1224 	t3_mac_enable_exact_filters(mac);
1225 	t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1226 			 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1227 			 rx_cfg);
1228 	t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, rx_hash_high);
1229 	t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, rx_hash_low);
1230 }
1231 
1232 /**
1233  *	t3_link_changed - handle interface link changes
1234  *	@adapter: the adapter
1235  *	@port_id: the port index that changed link state
1236  *
1237  *	Called when a port's link settings change to propagate the new values
1238  *	to the associated PHY and MAC.  After performing the common tasks it
1239  *	invokes an OS-specific handler.
1240  */
1241 void t3_link_changed(struct adapter *adapter, int port_id)
1242 {
1243 	int link_ok, speed, duplex, fc;
1244 	struct port_info *pi = adap2pinfo(adapter, port_id);
1245 	struct cphy *phy = &pi->phy;
1246 	struct cmac *mac = &pi->mac;
1247 	struct link_config *lc = &pi->link_config;
1248 
1249 	phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1250 
1251 	if (!lc->link_ok && link_ok) {
1252 		u32 rx_cfg, rx_hash_high, rx_hash_low;
1253 		u32 status;
1254 
1255 		t3_xgm_intr_enable(adapter, port_id);
1256 		t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1257 		t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1258 		t3_mac_enable(mac, MAC_DIRECTION_RX);
1259 
1260 		status = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1261 		if (status & F_LINKFAULTCHANGE) {
1262 			mac->stats.link_faults++;
1263 			pi->link_fault = 1;
1264 		}
1265 		t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1266 	}
1267 
1268 	if (lc->requested_fc & PAUSE_AUTONEG)
1269 		fc &= lc->requested_fc;
1270 	else
1271 		fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1272 
1273 	if (link_ok == lc->link_ok && speed == lc->speed &&
1274 	    duplex == lc->duplex && fc == lc->fc)
1275 		return;                            /* nothing changed */
1276 
1277 	if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1278 	    uses_xaui(adapter)) {
1279 		if (link_ok)
1280 			t3b_pcs_reset(mac);
1281 		t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1282 			     link_ok ? F_TXACTENABLE | F_RXEN : 0);
1283 	}
1284 	lc->link_ok = link_ok;
1285 	lc->speed = speed < 0 ? SPEED_INVALID : speed;
1286 	lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1287 
1288 	if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1289 		/* Set MAC speed, duplex, and flow control to match PHY. */
1290 		t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1291 		lc->fc = fc;
1292 	}
1293 
1294 	t3_os_link_changed(adapter, port_id, link_ok && !pi->link_fault,
1295 			   speed, duplex, fc);
1296 }
1297 
1298 void t3_link_fault(struct adapter *adapter, int port_id)
1299 {
1300 	struct port_info *pi = adap2pinfo(adapter, port_id);
1301 	struct cmac *mac = &pi->mac;
1302 	struct cphy *phy = &pi->phy;
1303 	struct link_config *lc = &pi->link_config;
1304 	int link_ok, speed, duplex, fc, link_fault;
1305 	u32 rx_cfg, rx_hash_high, rx_hash_low;
1306 
1307 	t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1308 
1309 	if (adapter->params.rev > 0 && uses_xaui(adapter))
1310 		t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, 0);
1311 
1312 	t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1313 	t3_mac_enable(mac, MAC_DIRECTION_RX);
1314 
1315 	t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1316 
1317 	link_fault = t3_read_reg(adapter,
1318 				 A_XGM_INT_STATUS + mac->offset);
1319 	link_fault &= F_LINKFAULTCHANGE;
1320 
1321 	link_ok = lc->link_ok;
1322 	speed = lc->speed;
1323 	duplex = lc->duplex;
1324 	fc = lc->fc;
1325 
1326 	phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1327 
1328 	if (link_fault) {
1329 		lc->link_ok = 0;
1330 		lc->speed = SPEED_INVALID;
1331 		lc->duplex = DUPLEX_INVALID;
1332 
1333 		t3_os_link_fault(adapter, port_id, 0);
1334 
1335 		/* Account link faults only when the phy reports a link up */
1336 		if (link_ok)
1337 			mac->stats.link_faults++;
1338 	} else {
1339 		if (link_ok)
1340 			t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1341 				     F_TXACTENABLE | F_RXEN);
1342 
1343 		pi->link_fault = 0;
1344 		lc->link_ok = (unsigned char)link_ok;
1345 		lc->speed = speed < 0 ? SPEED_INVALID : speed;
1346 		lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1347 		t3_os_link_fault(adapter, port_id, link_ok);
1348 	}
1349 }
1350 
1351 /**
1352  *	t3_link_start - apply link configuration to MAC/PHY
1353  *	@phy: the PHY to setup
1354  *	@mac: the MAC to setup
1355  *	@lc: the requested link configuration
1356  *
1357  *	Set up a port's MAC and PHY according to a desired link configuration.
1358  *	- If the PHY can auto-negotiate first decide what to advertise, then
1359  *	  enable/disable auto-negotiation as desired, and reset.
1360  *	- If the PHY does not auto-negotiate just reset it.
1361  *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1362  *	  otherwise do it later based on the outcome of auto-negotiation.
1363  */
1364 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1365 {
1366 	unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1367 
1368 	lc->link_ok = 0;
1369 	if (lc->supported & SUPPORTED_Autoneg) {
1370 		lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1371 		if (fc) {
1372 			lc->advertising |= ADVERTISED_Asym_Pause;
1373 			if (fc & PAUSE_RX)
1374 				lc->advertising |= ADVERTISED_Pause;
1375 		}
1376 		phy->ops->advertise(phy, lc->advertising);
1377 
1378 		if (lc->autoneg == AUTONEG_DISABLE) {
1379 			lc->speed = lc->requested_speed;
1380 			lc->duplex = lc->requested_duplex;
1381 			lc->fc = (unsigned char)fc;
1382 			t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1383 						   fc);
1384 			/* Also disables autoneg */
1385 			phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1386 		} else
1387 			phy->ops->autoneg_enable(phy);
1388 	} else {
1389 		t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1390 		lc->fc = (unsigned char)fc;
1391 		phy->ops->reset(phy, 0);
1392 	}
1393 	return 0;
1394 }
1395 
1396 /**
1397  *	t3_set_vlan_accel - control HW VLAN extraction
1398  *	@adapter: the adapter
1399  *	@ports: bitmap of adapter ports to operate on
1400  *	@on: enable (1) or disable (0) HW VLAN extraction
1401  *
1402  *	Enables or disables HW extraction of VLAN tags for the given port.
1403  */
1404 void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1405 {
1406 	t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1407 			 ports << S_VLANEXTRACTIONENABLE,
1408 			 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1409 }
1410 
1411 struct intr_info {
1412 	unsigned int mask;	/* bits to check in interrupt status */
1413 	const char *msg;	/* message to print or NULL */
1414 	short stat_idx;		/* stat counter to increment or -1 */
1415 	unsigned short fatal;	/* whether the condition reported is fatal */
1416 };
1417 
1418 /**
1419  *	t3_handle_intr_status - table driven interrupt handler
1420  *	@adapter: the adapter that generated the interrupt
1421  *	@reg: the interrupt status register to process
1422  *	@mask: a mask to apply to the interrupt status
1423  *	@acts: table of interrupt actions
1424  *	@stats: statistics counters tracking interrupt occurrences
1425  *
1426  *	A table driven interrupt handler that applies a set of masks to an
1427  *	interrupt status word and performs the corresponding actions if the
1428  *	interrupts described by the mask have occurred.  The actions include
1429  *	optionally printing a warning or alert message, and optionally
1430  *	incrementing a stat counter.  The table is terminated by an entry
1431  *	specifying mask 0.  Returns the number of fatal interrupt conditions.
1432  */
1433 static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1434 				 unsigned int mask,
1435 				 const struct intr_info *acts,
1436 				 unsigned long *stats)
1437 {
1438 	int fatal = 0;
1439 	unsigned int status = t3_read_reg(adapter, reg) & mask;
1440 
1441 	for (; acts->mask; ++acts) {
1442 		if (!(status & acts->mask))
1443 			continue;
1444 		if (acts->fatal) {
1445 			fatal++;
1446 			CH_ALERT(adapter, "%s (0x%x)\n",
1447 				 acts->msg, status & acts->mask);
1448 			status &= ~acts->mask;
1449 		} else if (acts->msg)
1450 			CH_WARN(adapter, "%s (0x%x)\n",
1451 				acts->msg, status & acts->mask);
1452 		if (acts->stat_idx >= 0)
1453 			stats[acts->stat_idx]++;
1454 	}
1455 	if (status)		/* clear processed interrupts */
1456 		t3_write_reg(adapter, reg, status);
1457 	return fatal;
1458 }
1459 
1460 #define SGE_INTR_MASK (F_RSPQDISABLED | \
1461 		       F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1462 		       F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1463 		       F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1464 		       V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1465 		       F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1466 		       F_HIRCQPARITYERROR | F_LOPRIORITYDBFULL | \
1467 		       F_HIPRIORITYDBFULL | F_LOPRIORITYDBEMPTY | \
1468 		       F_HIPRIORITYDBEMPTY | F_HIPIODRBDROPERR | \
1469 		       F_LOPIODRBDROPERR)
1470 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1471 		       F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1472 		       F_NFASRCHFAIL)
1473 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1474 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1475 		       V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1476 		       F_TXFIFO_UNDERRUN)
1477 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1478 			F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1479 			F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1480 			F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1481 			V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1482 			V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1483 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1484 			F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1485 			/* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1486 			F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1487 			F_TXPARERR | V_BISTERR(M_BISTERR))
1488 #define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1489 			 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1490 			 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1491 #define ULPTX_INTR_MASK 0xfc
1492 #define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1493 			 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1494 			 F_ZERO_SWITCH_ERROR)
1495 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1496 		       F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1497 		       F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1498 	 	       F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1499 		       F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1500 		       F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1501 		       F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1502 		       F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1503 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1504 			V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1505 			V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1506 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1507 			V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1508 			V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1509 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1510 		       V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1511 		       V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1512 		       V_MCAPARERRENB(M_MCAPARERRENB))
1513 #define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE)
1514 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1515 		      F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1516 		      F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1517 		      F_MPS0 | F_CPL_SWITCH)
1518 /*
1519  * Interrupt handler for the PCIX1 module.
1520  */
1521 static void pci_intr_handler(struct adapter *adapter)
1522 {
1523 	static const struct intr_info pcix1_intr_info[] = {
1524 		{F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1525 		{F_SIGTARABT, "PCI signaled target abort", -1, 1},
1526 		{F_RCVTARABT, "PCI received target abort", -1, 1},
1527 		{F_RCVMSTABT, "PCI received master abort", -1, 1},
1528 		{F_SIGSYSERR, "PCI signaled system error", -1, 1},
1529 		{F_DETPARERR, "PCI detected parity error", -1, 1},
1530 		{F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1531 		{F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1532 		{F_RCVSPLCMPERR, "PCI received split completion error", -1,
1533 		 1},
1534 		{F_DETCORECCERR, "PCI correctable ECC error",
1535 		 STAT_PCI_CORR_ECC, 0},
1536 		{F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1537 		{F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1538 		{V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1539 		 1},
1540 		{V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1541 		 1},
1542 		{V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1543 		 1},
1544 		{V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1545 		 "error", -1, 1},
1546 		{0}
1547 	};
1548 
1549 	if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1550 				  pcix1_intr_info, adapter->irq_stats))
1551 		t3_fatal_err(adapter);
1552 }
1553 
1554 /*
1555  * Interrupt handler for the PCIE module.
1556  */
1557 static void pcie_intr_handler(struct adapter *adapter)
1558 {
1559 	static const struct intr_info pcie_intr_info[] = {
1560 		{F_PEXERR, "PCI PEX error", -1, 1},
1561 		{F_UNXSPLCPLERRR,
1562 		 "PCI unexpected split completion DMA read error", -1, 1},
1563 		{F_UNXSPLCPLERRC,
1564 		 "PCI unexpected split completion DMA command error", -1, 1},
1565 		{F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1566 		{F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1567 		{F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1568 		{F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1569 		{V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1570 		 "PCI MSI-X table/PBA parity error", -1, 1},
1571 		{F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1572 		{F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1573 		{F_RXPARERR, "PCI Rx parity error", -1, 1},
1574 		{F_TXPARERR, "PCI Tx parity error", -1, 1},
1575 		{V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1576 		{0}
1577 	};
1578 
1579 	if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1580 		CH_ALERT(adapter, "PEX error code 0x%x\n",
1581 			 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1582 
1583 	if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1584 				  pcie_intr_info, adapter->irq_stats))
1585 		t3_fatal_err(adapter);
1586 }
1587 
1588 /*
1589  * TP interrupt handler.
1590  */
1591 static void tp_intr_handler(struct adapter *adapter)
1592 {
1593 	static const struct intr_info tp_intr_info[] = {
1594 		{0xffffff, "TP parity error", -1, 1},
1595 		{0x1000000, "TP out of Rx pages", -1, 1},
1596 		{0x2000000, "TP out of Tx pages", -1, 1},
1597 		{0}
1598 	};
1599 
1600 	static const struct intr_info tp_intr_info_t3c[] = {
1601 		{0x1fffffff, "TP parity error", -1, 1},
1602 		{F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1603 		{F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1604 		{0}
1605 	};
1606 
1607 	if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1608 				  adapter->params.rev < T3_REV_C ?
1609 				  tp_intr_info : tp_intr_info_t3c, NULL))
1610 		t3_fatal_err(adapter);
1611 }
1612 
1613 /*
1614  * CIM interrupt handler.
1615  */
1616 static void cim_intr_handler(struct adapter *adapter)
1617 {
1618 	static const struct intr_info cim_intr_info[] = {
1619 		{F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1620 		{F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1621 		{F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1622 		{F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1623 		{F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1624 		{F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1625 		{F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1626 		{F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1627 		{F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1628 		{F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1629 		{F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1630 		{F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1631 		{F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1632 		{F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1633 		{F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1634 		{F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1635 		{F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1636 		{F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1637 		{F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1638 		{F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1639 		{F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1640 		{F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1641 		{F_ITAGPARERR, "CIM itag parity error", -1, 1},
1642 		{F_DTAGPARERR, "CIM dtag parity error", -1, 1},
1643 		{0}
1644 	};
1645 
1646 	if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1647 				  cim_intr_info, NULL))
1648 		t3_fatal_err(adapter);
1649 }
1650 
1651 /*
1652  * ULP RX interrupt handler.
1653  */
1654 static void ulprx_intr_handler(struct adapter *adapter)
1655 {
1656 	static const struct intr_info ulprx_intr_info[] = {
1657 		{F_PARERRDATA, "ULP RX data parity error", -1, 1},
1658 		{F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1659 		{F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1660 		{F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1661 		{F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1662 		{F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1663 		{F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1664 		{F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
1665 		{0}
1666 	};
1667 
1668 	if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1669 				  ulprx_intr_info, NULL))
1670 		t3_fatal_err(adapter);
1671 }
1672 
1673 /*
1674  * ULP TX interrupt handler.
1675  */
1676 static void ulptx_intr_handler(struct adapter *adapter)
1677 {
1678 	static const struct intr_info ulptx_intr_info[] = {
1679 		{F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1680 		 STAT_ULP_CH0_PBL_OOB, 0},
1681 		{F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1682 		 STAT_ULP_CH1_PBL_OOB, 0},
1683 		{0xfc, "ULP TX parity error", -1, 1},
1684 		{0}
1685 	};
1686 
1687 	if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1688 				  ulptx_intr_info, adapter->irq_stats))
1689 		t3_fatal_err(adapter);
1690 }
1691 
1692 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1693 	F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1694 	F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1695 	F_ICSPI1_TX_FRAMING_ERROR)
1696 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1697 	F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1698 	F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1699 	F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1700 
1701 /*
1702  * PM TX interrupt handler.
1703  */
1704 static void pmtx_intr_handler(struct adapter *adapter)
1705 {
1706 	static const struct intr_info pmtx_intr_info[] = {
1707 		{F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1708 		{ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1709 		{OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1710 		{V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1711 		 "PMTX ispi parity error", -1, 1},
1712 		{V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1713 		 "PMTX ospi parity error", -1, 1},
1714 		{0}
1715 	};
1716 
1717 	if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1718 				  pmtx_intr_info, NULL))
1719 		t3_fatal_err(adapter);
1720 }
1721 
1722 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1723 	F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1724 	F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1725 	F_IESPI1_TX_FRAMING_ERROR)
1726 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1727 	F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1728 	F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1729 	F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1730 
1731 /*
1732  * PM RX interrupt handler.
1733  */
1734 static void pmrx_intr_handler(struct adapter *adapter)
1735 {
1736 	static const struct intr_info pmrx_intr_info[] = {
1737 		{F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1738 		{IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1739 		{OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1740 		{V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1741 		 "PMRX ispi parity error", -1, 1},
1742 		{V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1743 		 "PMRX ospi parity error", -1, 1},
1744 		{0}
1745 	};
1746 
1747 	if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1748 				  pmrx_intr_info, NULL))
1749 		t3_fatal_err(adapter);
1750 }
1751 
1752 /*
1753  * CPL switch interrupt handler.
1754  */
1755 static void cplsw_intr_handler(struct adapter *adapter)
1756 {
1757 	static const struct intr_info cplsw_intr_info[] = {
1758 		{F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1759 		{F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
1760 		{F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1761 		{F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1762 		{F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1763 		{F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1764 		{0}
1765 	};
1766 
1767 	if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1768 				  cplsw_intr_info, NULL))
1769 		t3_fatal_err(adapter);
1770 }
1771 
1772 /*
1773  * MPS interrupt handler.
1774  */
1775 static void mps_intr_handler(struct adapter *adapter)
1776 {
1777 	static const struct intr_info mps_intr_info[] = {
1778 		{0x1ff, "MPS parity error", -1, 1},
1779 		{0}
1780 	};
1781 
1782 	if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1783 				  mps_intr_info, NULL))
1784 		t3_fatal_err(adapter);
1785 }
1786 
1787 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1788 
1789 /*
1790  * MC7 interrupt handler.
1791  */
1792 static void mc7_intr_handler(struct mc7 *mc7)
1793 {
1794 	struct adapter *adapter = mc7->adapter;
1795 	u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1796 
1797 	if (cause & F_CE) {
1798 		mc7->stats.corr_err++;
1799 		CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1800 			"data 0x%x 0x%x 0x%x\n", mc7->name,
1801 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1802 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1803 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1804 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1805 	}
1806 
1807 	if (cause & F_UE) {
1808 		mc7->stats.uncorr_err++;
1809 		CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1810 			 "data 0x%x 0x%x 0x%x\n", mc7->name,
1811 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1812 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1813 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1814 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1815 	}
1816 
1817 	if (G_PE(cause)) {
1818 		mc7->stats.parity_err++;
1819 		CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1820 			 mc7->name, G_PE(cause));
1821 	}
1822 
1823 	if (cause & F_AE) {
1824 		u32 addr = 0;
1825 
1826 		if (adapter->params.rev > 0)
1827 			addr = t3_read_reg(adapter,
1828 					   mc7->offset + A_MC7_ERR_ADDR);
1829 		mc7->stats.addr_err++;
1830 		CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1831 			 mc7->name, addr);
1832 	}
1833 
1834 	if (cause & MC7_INTR_FATAL)
1835 		t3_fatal_err(adapter);
1836 
1837 	t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1838 }
1839 
1840 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1841 			V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1842 /*
1843  * XGMAC interrupt handler.
1844  */
1845 static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1846 {
1847 	struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1848 	/*
1849 	 * We mask out interrupt causes for which we're not taking interrupts.
1850 	 * This allows us to use polling logic to monitor some of the other
1851 	 * conditions when taking interrupts would impose too much load on the
1852 	 * system.
1853 	 */
1854 	u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset) &
1855 		    ~F_RXFIFO_OVERFLOW;
1856 
1857 	if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1858 		mac->stats.tx_fifo_parity_err++;
1859 		CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1860 	}
1861 	if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1862 		mac->stats.rx_fifo_parity_err++;
1863 		CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1864 	}
1865 	if (cause & F_TXFIFO_UNDERRUN)
1866 		mac->stats.tx_fifo_urun++;
1867 	if (cause & F_RXFIFO_OVERFLOW)
1868 		mac->stats.rx_fifo_ovfl++;
1869 	if (cause & V_SERDES_LOS(M_SERDES_LOS))
1870 		mac->stats.serdes_signal_loss++;
1871 	if (cause & F_XAUIPCSCTCERR)
1872 		mac->stats.xaui_pcs_ctc_err++;
1873 	if (cause & F_XAUIPCSALIGNCHANGE)
1874 		mac->stats.xaui_pcs_align_change++;
1875 	if (cause & F_XGM_INT) {
1876 		t3_set_reg_field(adap,
1877 				 A_XGM_INT_ENABLE + mac->offset,
1878 				 F_XGM_INT, 0);
1879 		mac->stats.link_faults++;
1880 
1881 		t3_os_link_fault_handler(adap, idx);
1882 	}
1883 
1884 	if (cause & XGM_INTR_FATAL)
1885 		t3_fatal_err(adap);
1886 
1887 	t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1888 	return cause != 0;
1889 }
1890 
1891 /*
1892  * Interrupt handler for PHY events.
1893  */
1894 int t3_phy_intr_handler(struct adapter *adapter)
1895 {
1896 	u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1897 
1898 	for_each_port(adapter, i) {
1899 		struct port_info *p = adap2pinfo(adapter, i);
1900 
1901 		if (!(p->phy.caps & SUPPORTED_IRQ))
1902 			continue;
1903 
1904 		if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
1905 			int phy_cause = p->phy.ops->intr_handler(&p->phy);
1906 
1907 			if (phy_cause & cphy_cause_link_change)
1908 				t3_link_changed(adapter, i);
1909 			if (phy_cause & cphy_cause_fifo_error)
1910 				p->phy.fifo_errors++;
1911 			if (phy_cause & cphy_cause_module_change)
1912 				t3_os_phymod_changed(adapter, i);
1913 		}
1914 	}
1915 
1916 	t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1917 	return 0;
1918 }
1919 
1920 /*
1921  * T3 slow path (non-data) interrupt handler.
1922  */
1923 int t3_slow_intr_handler(struct adapter *adapter)
1924 {
1925 	u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1926 
1927 	cause &= adapter->slow_intr_mask;
1928 	if (!cause)
1929 		return 0;
1930 	if (cause & F_PCIM0) {
1931 		if (is_pcie(adapter))
1932 			pcie_intr_handler(adapter);
1933 		else
1934 			pci_intr_handler(adapter);
1935 	}
1936 	if (cause & F_SGE3)
1937 		t3_sge_err_intr_handler(adapter);
1938 	if (cause & F_MC7_PMRX)
1939 		mc7_intr_handler(&adapter->pmrx);
1940 	if (cause & F_MC7_PMTX)
1941 		mc7_intr_handler(&adapter->pmtx);
1942 	if (cause & F_MC7_CM)
1943 		mc7_intr_handler(&adapter->cm);
1944 	if (cause & F_CIM)
1945 		cim_intr_handler(adapter);
1946 	if (cause & F_TP1)
1947 		tp_intr_handler(adapter);
1948 	if (cause & F_ULP2_RX)
1949 		ulprx_intr_handler(adapter);
1950 	if (cause & F_ULP2_TX)
1951 		ulptx_intr_handler(adapter);
1952 	if (cause & F_PM1_RX)
1953 		pmrx_intr_handler(adapter);
1954 	if (cause & F_PM1_TX)
1955 		pmtx_intr_handler(adapter);
1956 	if (cause & F_CPL_SWITCH)
1957 		cplsw_intr_handler(adapter);
1958 	if (cause & F_MPS0)
1959 		mps_intr_handler(adapter);
1960 	if (cause & F_MC5A)
1961 		t3_mc5_intr_handler(&adapter->mc5);
1962 	if (cause & F_XGMAC0_0)
1963 		mac_intr_handler(adapter, 0);
1964 	if (cause & F_XGMAC0_1)
1965 		mac_intr_handler(adapter, 1);
1966 	if (cause & F_T3DBG)
1967 		t3_os_ext_intr_handler(adapter);
1968 
1969 	/* Clear the interrupts just processed. */
1970 	t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1971 	t3_read_reg(adapter, A_PL_INT_CAUSE0);	/* flush */
1972 	return 1;
1973 }
1974 
1975 static unsigned int calc_gpio_intr(struct adapter *adap)
1976 {
1977 	unsigned int i, gpi_intr = 0;
1978 
1979 	for_each_port(adap, i)
1980 		if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
1981 		    adapter_info(adap)->gpio_intr[i])
1982 			gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
1983 	return gpi_intr;
1984 }
1985 
1986 /**
1987  *	t3_intr_enable - enable interrupts
1988  *	@adapter: the adapter whose interrupts should be enabled
1989  *
1990  *	Enable interrupts by setting the interrupt enable registers of the
1991  *	various HW modules and then enabling the top-level interrupt
1992  *	concentrator.
1993  */
1994 void t3_intr_enable(struct adapter *adapter)
1995 {
1996 	static const struct addr_val_pair intr_en_avp[] = {
1997 		{A_SG_INT_ENABLE, SGE_INTR_MASK},
1998 		{A_MC7_INT_ENABLE, MC7_INTR_MASK},
1999 		{A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2000 		 MC7_INTR_MASK},
2001 		{A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2002 		 MC7_INTR_MASK},
2003 		{A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
2004 		{A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
2005 		{A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
2006 		{A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
2007 		{A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
2008 		{A_MPS_INT_ENABLE, MPS_INTR_MASK},
2009 	};
2010 
2011 	adapter->slow_intr_mask = PL_INTR_MASK;
2012 
2013 	t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
2014 	t3_write_reg(adapter, A_TP_INT_ENABLE,
2015 		     adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
2016 
2017 	if (adapter->params.rev > 0) {
2018 		t3_write_reg(adapter, A_CPL_INTR_ENABLE,
2019 			     CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
2020 		t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
2021 			     ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
2022 			     F_PBL_BOUND_ERR_CH1);
2023 	} else {
2024 		t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
2025 		t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
2026 	}
2027 
2028 	t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
2029 
2030 	if (is_pcie(adapter))
2031 		t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
2032 	else
2033 		t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
2034 	t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
2035 	t3_read_reg(adapter, A_PL_INT_ENABLE0);	/* flush */
2036 }
2037 
2038 /**
2039  *	t3_intr_disable - disable a card's interrupts
2040  *	@adapter: the adapter whose interrupts should be disabled
2041  *
2042  *	Disable interrupts.  We only disable the top-level interrupt
2043  *	concentrator and the SGE data interrupts.
2044  */
2045 void t3_intr_disable(struct adapter *adapter)
2046 {
2047 	t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
2048 	t3_read_reg(adapter, A_PL_INT_ENABLE0);	/* flush */
2049 	adapter->slow_intr_mask = 0;
2050 }
2051 
2052 /**
2053  *	t3_intr_clear - clear all interrupts
2054  *	@adapter: the adapter whose interrupts should be cleared
2055  *
2056  *	Clears all interrupts.
2057  */
2058 void t3_intr_clear(struct adapter *adapter)
2059 {
2060 	static const unsigned int cause_reg_addr[] = {
2061 		A_SG_INT_CAUSE,
2062 		A_SG_RSPQ_FL_STATUS,
2063 		A_PCIX_INT_CAUSE,
2064 		A_MC7_INT_CAUSE,
2065 		A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2066 		A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2067 		A_CIM_HOST_INT_CAUSE,
2068 		A_TP_INT_CAUSE,
2069 		A_MC5_DB_INT_CAUSE,
2070 		A_ULPRX_INT_CAUSE,
2071 		A_ULPTX_INT_CAUSE,
2072 		A_CPL_INTR_CAUSE,
2073 		A_PM1_TX_INT_CAUSE,
2074 		A_PM1_RX_INT_CAUSE,
2075 		A_MPS_INT_CAUSE,
2076 		A_T3DBG_INT_CAUSE,
2077 	};
2078 	unsigned int i;
2079 
2080 	/* Clear PHY and MAC interrupts for each port. */
2081 	for_each_port(adapter, i)
2082 	    t3_port_intr_clear(adapter, i);
2083 
2084 	for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
2085 		t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
2086 
2087 	if (is_pcie(adapter))
2088 		t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
2089 	t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
2090 	t3_read_reg(adapter, A_PL_INT_CAUSE0);	/* flush */
2091 }
2092 
2093 void t3_xgm_intr_enable(struct adapter *adapter, int idx)
2094 {
2095 	struct port_info *pi = adap2pinfo(adapter, idx);
2096 
2097 	t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset,
2098 		     XGM_EXTRA_INTR_MASK);
2099 }
2100 
2101 void t3_xgm_intr_disable(struct adapter *adapter, int idx)
2102 {
2103 	struct port_info *pi = adap2pinfo(adapter, idx);
2104 
2105 	t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset,
2106 		     0x7ff);
2107 }
2108 
2109 /**
2110  *	t3_port_intr_enable - enable port-specific interrupts
2111  *	@adapter: associated adapter
2112  *	@idx: index of port whose interrupts should be enabled
2113  *
2114  *	Enable port-specific (i.e., MAC and PHY) interrupts for the given
2115  *	adapter port.
2116  */
2117 void t3_port_intr_enable(struct adapter *adapter, int idx)
2118 {
2119 	struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2120 
2121 	t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
2122 	t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2123 	phy->ops->intr_enable(phy);
2124 }
2125 
2126 /**
2127  *	t3_port_intr_disable - disable port-specific interrupts
2128  *	@adapter: associated adapter
2129  *	@idx: index of port whose interrupts should be disabled
2130  *
2131  *	Disable port-specific (i.e., MAC and PHY) interrupts for the given
2132  *	adapter port.
2133  */
2134 void t3_port_intr_disable(struct adapter *adapter, int idx)
2135 {
2136 	struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2137 
2138 	t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
2139 	t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2140 	phy->ops->intr_disable(phy);
2141 }
2142 
2143 /**
2144  *	t3_port_intr_clear - clear port-specific interrupts
2145  *	@adapter: associated adapter
2146  *	@idx: index of port whose interrupts to clear
2147  *
2148  *	Clear port-specific (i.e., MAC and PHY) interrupts for the given
2149  *	adapter port.
2150  */
2151 static void t3_port_intr_clear(struct adapter *adapter, int idx)
2152 {
2153 	struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2154 
2155 	t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
2156 	t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
2157 	phy->ops->intr_clear(phy);
2158 }
2159 
2160 #define SG_CONTEXT_CMD_ATTEMPTS 100
2161 
2162 /**
2163  * 	t3_sge_write_context - write an SGE context
2164  * 	@adapter: the adapter
2165  * 	@id: the context id
2166  * 	@type: the context type
2167  *
2168  * 	Program an SGE context with the values already loaded in the
2169  * 	CONTEXT_DATA? registers.
2170  */
2171 static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
2172 				unsigned int type)
2173 {
2174 	if (type == F_RESPONSEQ) {
2175 		/*
2176 		 * Can't write the Response Queue Context bits for
2177 		 * Interrupt Armed or the Reserve bits after the chip
2178 		 * has been initialized out of reset.  Writing to these
2179 		 * bits can confuse the hardware.
2180 		 */
2181 		t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2182 		t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2183 		t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
2184 		t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2185 	} else {
2186 		t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2187 		t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2188 		t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2189 		t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2190 	}
2191 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2192 		     V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2193 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2194 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2195 }
2196 
2197 /**
2198  *	clear_sge_ctxt - completely clear an SGE context
2199  *	@adap: the adapter
2200  *	@id: the context id
2201  *	@type: the context type
2202  *
2203  *	Completely clear an SGE context.  Used predominantly at post-reset
2204  *	initialization.  Note in particular that we don't skip writing to any
2205  *	"sensitive bits" in the contexts the way that t3_sge_write_context()
2206  *	does ...
2207  */
2208 static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
2209 			  unsigned int type)
2210 {
2211 	t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2212 	t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2213 	t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2214 	t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2215 	t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
2216 	t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
2217 	t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
2218 	t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
2219 	t3_write_reg(adap, A_SG_CONTEXT_CMD,
2220 		     V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2221 	return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2222 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2223 }
2224 
2225 /**
2226  *	t3_sge_init_ecntxt - initialize an SGE egress context
2227  *	@adapter: the adapter to configure
2228  *	@id: the context id
2229  *	@gts_enable: whether to enable GTS for the context
2230  *	@type: the egress context type
2231  *	@respq: associated response queue
2232  *	@base_addr: base address of queue
2233  *	@size: number of queue entries
2234  *	@token: uP token
2235  *	@gen: initial generation value for the context
2236  *	@cidx: consumer pointer
2237  *
2238  *	Initialize an SGE egress context and make it ready for use.  If the
2239  *	platform allows concurrent context operations, the caller is
2240  *	responsible for appropriate locking.
2241  */
2242 int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
2243 		       enum sge_context_type type, int respq, u64 base_addr,
2244 		       unsigned int size, unsigned int token, int gen,
2245 		       unsigned int cidx)
2246 {
2247 	unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2248 
2249 	if (base_addr & 0xfff)	/* must be 4K aligned */
2250 		return -EINVAL;
2251 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2252 		return -EBUSY;
2253 
2254 	base_addr >>= 12;
2255 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2256 		     V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2257 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2258 		     V_EC_BASE_LO(base_addr & 0xffff));
2259 	base_addr >>= 16;
2260 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
2261 	base_addr >>= 32;
2262 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2263 		     V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
2264 		     V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2265 		     F_EC_VALID);
2266 	return t3_sge_write_context(adapter, id, F_EGRESS);
2267 }
2268 
2269 /**
2270  *	t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2271  *	@adapter: the adapter to configure
2272  *	@id: the context id
2273  *	@gts_enable: whether to enable GTS for the context
2274  *	@base_addr: base address of queue
2275  *	@size: number of queue entries
2276  *	@bsize: size of each buffer for this queue
2277  *	@cong_thres: threshold to signal congestion to upstream producers
2278  *	@gen: initial generation value for the context
2279  *	@cidx: consumer pointer
2280  *
2281  *	Initialize an SGE free list context and make it ready for use.  The
2282  *	caller is responsible for ensuring only one context operation occurs
2283  *	at a time.
2284  */
2285 int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2286 			int gts_enable, u64 base_addr, unsigned int size,
2287 			unsigned int bsize, unsigned int cong_thres, int gen,
2288 			unsigned int cidx)
2289 {
2290 	if (base_addr & 0xfff)	/* must be 4K aligned */
2291 		return -EINVAL;
2292 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2293 		return -EBUSY;
2294 
2295 	base_addr >>= 12;
2296 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2297 	base_addr >>= 32;
2298 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2299 		     V_FL_BASE_HI((u32) base_addr) |
2300 		     V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2301 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2302 		     V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2303 		     V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2304 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2305 		     V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2306 		     V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2307 	return t3_sge_write_context(adapter, id, F_FREELIST);
2308 }
2309 
2310 /**
2311  *	t3_sge_init_rspcntxt - initialize an SGE response queue context
2312  *	@adapter: the adapter to configure
2313  *	@id: the context id
2314  *	@irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2315  *	@base_addr: base address of queue
2316  *	@size: number of queue entries
2317  *	@fl_thres: threshold for selecting the normal or jumbo free list
2318  *	@gen: initial generation value for the context
2319  *	@cidx: consumer pointer
2320  *
2321  *	Initialize an SGE response queue context and make it ready for use.
2322  *	The caller is responsible for ensuring only one context operation
2323  *	occurs at a time.
2324  */
2325 int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2326 			 int irq_vec_idx, u64 base_addr, unsigned int size,
2327 			 unsigned int fl_thres, int gen, unsigned int cidx)
2328 {
2329 	unsigned int intr = 0;
2330 
2331 	if (base_addr & 0xfff)	/* must be 4K aligned */
2332 		return -EINVAL;
2333 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2334 		return -EBUSY;
2335 
2336 	base_addr >>= 12;
2337 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2338 		     V_CQ_INDEX(cidx));
2339 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2340 	base_addr >>= 32;
2341 	if (irq_vec_idx >= 0)
2342 		intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2343 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2344 		     V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2345 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2346 	return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2347 }
2348 
2349 /**
2350  *	t3_sge_init_cqcntxt - initialize an SGE completion queue context
2351  *	@adapter: the adapter to configure
2352  *	@id: the context id
2353  *	@base_addr: base address of queue
2354  *	@size: number of queue entries
2355  *	@rspq: response queue for async notifications
2356  *	@ovfl_mode: CQ overflow mode
2357  *	@credits: completion queue credits
2358  *	@credit_thres: the credit threshold
2359  *
2360  *	Initialize an SGE completion queue context and make it ready for use.
2361  *	The caller is responsible for ensuring only one context operation
2362  *	occurs at a time.
2363  */
2364 int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2365 			unsigned int size, int rspq, int ovfl_mode,
2366 			unsigned int credits, unsigned int credit_thres)
2367 {
2368 	if (base_addr & 0xfff)	/* must be 4K aligned */
2369 		return -EINVAL;
2370 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2371 		return -EBUSY;
2372 
2373 	base_addr >>= 12;
2374 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2375 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2376 	base_addr >>= 32;
2377 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2378 		     V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
2379 		     V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2380 		     V_CQ_ERR(ovfl_mode));
2381 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2382 		     V_CQ_CREDIT_THRES(credit_thres));
2383 	return t3_sge_write_context(adapter, id, F_CQ);
2384 }
2385 
2386 /**
2387  *	t3_sge_enable_ecntxt - enable/disable an SGE egress context
2388  *	@adapter: the adapter
2389  *	@id: the egress context id
2390  *	@enable: enable (1) or disable (0) the context
2391  *
2392  *	Enable or disable an SGE egress context.  The caller is responsible for
2393  *	ensuring only one context operation occurs at a time.
2394  */
2395 int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2396 {
2397 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2398 		return -EBUSY;
2399 
2400 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2401 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2402 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2403 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2404 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2405 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2406 		     V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2407 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2408 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2409 }
2410 
2411 /**
2412  *	t3_sge_disable_fl - disable an SGE free-buffer list
2413  *	@adapter: the adapter
2414  *	@id: the free list context id
2415  *
2416  *	Disable an SGE free-buffer list.  The caller is responsible for
2417  *	ensuring only one context operation occurs at a time.
2418  */
2419 int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2420 {
2421 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2422 		return -EBUSY;
2423 
2424 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2425 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2426 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2427 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2428 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2429 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2430 		     V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2431 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2432 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2433 }
2434 
2435 /**
2436  *	t3_sge_disable_rspcntxt - disable an SGE response queue
2437  *	@adapter: the adapter
2438  *	@id: the response queue context id
2439  *
2440  *	Disable an SGE response queue.  The caller is responsible for
2441  *	ensuring only one context operation occurs at a time.
2442  */
2443 int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2444 {
2445 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2446 		return -EBUSY;
2447 
2448 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2449 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2450 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2451 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2452 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2453 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2454 		     V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2455 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2456 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2457 }
2458 
2459 /**
2460  *	t3_sge_disable_cqcntxt - disable an SGE completion queue
2461  *	@adapter: the adapter
2462  *	@id: the completion queue context id
2463  *
2464  *	Disable an SGE completion queue.  The caller is responsible for
2465  *	ensuring only one context operation occurs at a time.
2466  */
2467 int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2468 {
2469 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2470 		return -EBUSY;
2471 
2472 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2473 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2474 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2475 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2476 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2477 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2478 		     V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2479 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2480 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2481 }
2482 
2483 /**
2484  *	t3_sge_cqcntxt_op - perform an operation on a completion queue context
2485  *	@adapter: the adapter
2486  *	@id: the context id
2487  *	@op: the operation to perform
2488  *	@credits: credit value to write
2489  *
2490  *	Perform the selected operation on an SGE completion queue context.
2491  *	The caller is responsible for ensuring only one context operation
2492  *	occurs at a time.
2493  */
2494 int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2495 		      unsigned int credits)
2496 {
2497 	u32 val;
2498 
2499 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2500 		return -EBUSY;
2501 
2502 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2503 	t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2504 		     V_CONTEXT(id) | F_CQ);
2505 	if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2506 				0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2507 		return -EIO;
2508 
2509 	if (op >= 2 && op < 7) {
2510 		if (adapter->params.rev > 0)
2511 			return G_CQ_INDEX(val);
2512 
2513 		t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2514 			     V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2515 		if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2516 				    F_CONTEXT_CMD_BUSY, 0,
2517 				    SG_CONTEXT_CMD_ATTEMPTS, 1))
2518 			return -EIO;
2519 		return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2520 	}
2521 	return 0;
2522 }
2523 
2524 /**
2525  *	t3_config_rss - configure Rx packet steering
2526  *	@adapter: the adapter
2527  *	@rss_config: RSS settings (written to TP_RSS_CONFIG)
2528  *	@cpus: values for the CPU lookup table (0xff terminated)
2529  *	@rspq: values for the response queue lookup table (0xffff terminated)
2530  *
2531  *	Programs the receive packet steering logic.  @cpus and @rspq provide
2532  *	the values for the CPU and response queue lookup tables.  If they
2533  *	provide fewer values than the size of the tables the supplied values
2534  *	are used repeatedly until the tables are fully populated.
2535  */
2536 void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2537 		   const u8 * cpus, const u16 *rspq)
2538 {
2539 	int i, j, cpu_idx = 0, q_idx = 0;
2540 
2541 	if (cpus)
2542 		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2543 			u32 val = i << 16;
2544 
2545 			for (j = 0; j < 2; ++j) {
2546 				val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2547 				if (cpus[cpu_idx] == 0xff)
2548 					cpu_idx = 0;
2549 			}
2550 			t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2551 		}
2552 
2553 	if (rspq)
2554 		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2555 			t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2556 				     (i << 16) | rspq[q_idx++]);
2557 			if (rspq[q_idx] == 0xffff)
2558 				q_idx = 0;
2559 		}
2560 
2561 	t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2562 }
2563 
2564 /**
2565  *	t3_tp_set_offload_mode - put TP in NIC/offload mode
2566  *	@adap: the adapter
2567  *	@enable: 1 to select offload mode, 0 for regular NIC
2568  *
2569  *	Switches TP to NIC/offload mode.
2570  */
2571 void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2572 {
2573 	if (is_offload(adap) || !enable)
2574 		t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2575 				 V_NICMODE(!enable));
2576 }
2577 
2578 /**
2579  *	pm_num_pages - calculate the number of pages of the payload memory
2580  *	@mem_size: the size of the payload memory
2581  *	@pg_size: the size of each payload memory page
2582  *
2583  *	Calculate the number of pages, each of the given size, that fit in a
2584  *	memory of the specified size, respecting the HW requirement that the
2585  *	number of pages must be a multiple of 24.
2586  */
2587 static inline unsigned int pm_num_pages(unsigned int mem_size,
2588 					unsigned int pg_size)
2589 {
2590 	unsigned int n = mem_size / pg_size;
2591 
2592 	return n - n % 24;
2593 }
2594 
2595 #define mem_region(adap, start, size, reg) \
2596 	t3_write_reg((adap), A_ ## reg, (start)); \
2597 	start += size
2598 
2599 /**
2600  *	partition_mem - partition memory and configure TP memory settings
2601  *	@adap: the adapter
2602  *	@p: the TP parameters
2603  *
2604  *	Partitions context and payload memory and configures TP's memory
2605  *	registers.
2606  */
2607 static void partition_mem(struct adapter *adap, const struct tp_params *p)
2608 {
2609 	unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2610 	unsigned int timers = 0, timers_shift = 22;
2611 
2612 	if (adap->params.rev > 0) {
2613 		if (tids <= 16 * 1024) {
2614 			timers = 1;
2615 			timers_shift = 16;
2616 		} else if (tids <= 64 * 1024) {
2617 			timers = 2;
2618 			timers_shift = 18;
2619 		} else if (tids <= 256 * 1024) {
2620 			timers = 3;
2621 			timers_shift = 20;
2622 		}
2623 	}
2624 
2625 	t3_write_reg(adap, A_TP_PMM_SIZE,
2626 		     p->chan_rx_size | (p->chan_tx_size >> 16));
2627 
2628 	t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2629 	t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2630 	t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2631 	t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2632 			 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2633 
2634 	t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2635 	t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2636 	t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2637 
2638 	pstructs = p->rx_num_pgs + p->tx_num_pgs;
2639 	/* Add a bit of headroom and make multiple of 24 */
2640 	pstructs += 48;
2641 	pstructs -= pstructs % 24;
2642 	t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2643 
2644 	m = tids * TCB_SIZE;
2645 	mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2646 	mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2647 	t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2648 	m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2649 	mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2650 	mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2651 	mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2652 	mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2653 
2654 	m = (m + 4095) & ~0xfff;
2655 	t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2656 	t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2657 
2658 	tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2659 	m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2660 	    adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2661 	if (tids < m)
2662 		adap->params.mc5.nservers += m - tids;
2663 }
2664 
2665 static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2666 				  u32 val)
2667 {
2668 	t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2669 	t3_write_reg(adap, A_TP_PIO_DATA, val);
2670 }
2671 
2672 static void tp_config(struct adapter *adap, const struct tp_params *p)
2673 {
2674 	t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2675 		     F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2676 		     F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2677 	t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2678 		     F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2679 		     V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2680 	t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2681 		     V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2682 		     V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
2683 		     F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2684 	t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
2685 			 F_IPV6ENABLE | F_NICMODE);
2686 	t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2687 	t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2688 	t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2689 			 adap->params.rev > 0 ? F_ENABLEESND :
2690 			 F_T3A_ENABLEESND);
2691 
2692 	t3_set_reg_field(adap, A_TP_PC_CONFIG,
2693 			 F_ENABLEEPCMDAFULL,
2694 			 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2695 			 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2696 	t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2697 			 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2698 			 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
2699 	t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2700 	t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2701 
2702 	if (adap->params.rev > 0) {
2703 		tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2704 		t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2705 				 F_TXPACEAUTO);
2706 		t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2707 		t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2708 	} else
2709 		t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2710 
2711 	if (adap->params.rev == T3_REV_C)
2712 		t3_set_reg_field(adap, A_TP_PC_CONFIG,
2713 				 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2714 				 V_TABLELATENCYDELTA(4));
2715 
2716 	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2717 	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2718 	t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2719 	t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2720 }
2721 
2722 /* Desired TP timer resolution in usec */
2723 #define TP_TMR_RES 50
2724 
2725 /* TCP timer values in ms */
2726 #define TP_DACK_TIMER 50
2727 #define TP_RTO_MIN    250
2728 
2729 /**
2730  *	tp_set_timers - set TP timing parameters
2731  *	@adap: the adapter to set
2732  *	@core_clk: the core clock frequency in Hz
2733  *
2734  *	Set TP's timing parameters, such as the various timer resolutions and
2735  *	the TCP timer values.
2736  */
2737 static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2738 {
2739 	unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2740 	unsigned int dack_re = fls(core_clk / 5000) - 1;	/* 200us */
2741 	unsigned int tstamp_re = fls(core_clk / 1000);	/* 1ms, at least */
2742 	unsigned int tps = core_clk >> tre;
2743 
2744 	t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2745 		     V_DELAYEDACKRESOLUTION(dack_re) |
2746 		     V_TIMESTAMPRESOLUTION(tstamp_re));
2747 	t3_write_reg(adap, A_TP_DACK_TIMER,
2748 		     (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2749 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2750 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2751 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2752 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2753 	t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2754 		     V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2755 		     V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2756 		     V_KEEPALIVEMAX(9));
2757 
2758 #define SECONDS * tps
2759 
2760 	t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2761 	t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2762 	t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2763 	t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2764 	t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2765 	t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2766 	t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2767 	t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2768 	t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2769 
2770 #undef SECONDS
2771 }
2772 
2773 /**
2774  *	t3_tp_set_coalescing_size - set receive coalescing size
2775  *	@adap: the adapter
2776  *	@size: the receive coalescing size
2777  *	@psh: whether a set PSH bit should deliver coalesced data
2778  *
2779  *	Set the receive coalescing size and PSH bit handling.
2780  */
2781 static int t3_tp_set_coalescing_size(struct adapter *adap,
2782 				     unsigned int size, int psh)
2783 {
2784 	u32 val;
2785 
2786 	if (size > MAX_RX_COALESCING_LEN)
2787 		return -EINVAL;
2788 
2789 	val = t3_read_reg(adap, A_TP_PARA_REG3);
2790 	val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2791 
2792 	if (size) {
2793 		val |= F_RXCOALESCEENABLE;
2794 		if (psh)
2795 			val |= F_RXCOALESCEPSHEN;
2796 		size = min(MAX_RX_COALESCING_LEN, size);
2797 		t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2798 			     V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2799 	}
2800 	t3_write_reg(adap, A_TP_PARA_REG3, val);
2801 	return 0;
2802 }
2803 
2804 /**
2805  *	t3_tp_set_max_rxsize - set the max receive size
2806  *	@adap: the adapter
2807  *	@size: the max receive size
2808  *
2809  *	Set TP's max receive size.  This is the limit that applies when
2810  *	receive coalescing is disabled.
2811  */
2812 static void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2813 {
2814 	t3_write_reg(adap, A_TP_PARA_REG7,
2815 		     V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2816 }
2817 
2818 static void init_mtus(unsigned short mtus[])
2819 {
2820 	/*
2821 	 * See draft-mathis-plpmtud-00.txt for the values.  The min is 88 so
2822 	 * it can accommodate max size TCP/IP headers when SACK and timestamps
2823 	 * are enabled and still have at least 8 bytes of payload.
2824 	 */
2825 	mtus[0] = 88;
2826 	mtus[1] = 88;
2827 	mtus[2] = 256;
2828 	mtus[3] = 512;
2829 	mtus[4] = 576;
2830 	mtus[5] = 1024;
2831 	mtus[6] = 1280;
2832 	mtus[7] = 1492;
2833 	mtus[8] = 1500;
2834 	mtus[9] = 2002;
2835 	mtus[10] = 2048;
2836 	mtus[11] = 4096;
2837 	mtus[12] = 4352;
2838 	mtus[13] = 8192;
2839 	mtus[14] = 9000;
2840 	mtus[15] = 9600;
2841 }
2842 
2843 /*
2844  * Initial congestion control parameters.
2845  */
2846 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2847 {
2848 	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2849 	a[9] = 2;
2850 	a[10] = 3;
2851 	a[11] = 4;
2852 	a[12] = 5;
2853 	a[13] = 6;
2854 	a[14] = 7;
2855 	a[15] = 8;
2856 	a[16] = 9;
2857 	a[17] = 10;
2858 	a[18] = 14;
2859 	a[19] = 17;
2860 	a[20] = 21;
2861 	a[21] = 25;
2862 	a[22] = 30;
2863 	a[23] = 35;
2864 	a[24] = 45;
2865 	a[25] = 60;
2866 	a[26] = 80;
2867 	a[27] = 100;
2868 	a[28] = 200;
2869 	a[29] = 300;
2870 	a[30] = 400;
2871 	a[31] = 500;
2872 
2873 	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2874 	b[9] = b[10] = 1;
2875 	b[11] = b[12] = 2;
2876 	b[13] = b[14] = b[15] = b[16] = 3;
2877 	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2878 	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2879 	b[28] = b[29] = 6;
2880 	b[30] = b[31] = 7;
2881 }
2882 
2883 /* The minimum additive increment value for the congestion control table */
2884 #define CC_MIN_INCR 2U
2885 
2886 /**
2887  *	t3_load_mtus - write the MTU and congestion control HW tables
2888  *	@adap: the adapter
2889  *	@mtus: the unrestricted values for the MTU table
2890  *	@alpha: the values for the congestion control alpha parameter
2891  *	@beta: the values for the congestion control beta parameter
2892  *	@mtu_cap: the maximum permitted effective MTU
2893  *
2894  *	Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2895  *	Update the high-speed congestion control table with the supplied alpha,
2896  * 	beta, and MTUs.
2897  */
2898 void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2899 		  unsigned short alpha[NCCTRL_WIN],
2900 		  unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2901 {
2902 	static const unsigned int avg_pkts[NCCTRL_WIN] = {
2903 		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2904 		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2905 		28672, 40960, 57344, 81920, 114688, 163840, 229376
2906 	};
2907 
2908 	unsigned int i, w;
2909 
2910 	for (i = 0; i < NMTUS; ++i) {
2911 		unsigned int mtu = min(mtus[i], mtu_cap);
2912 		unsigned int log2 = fls(mtu);
2913 
2914 		if (!(mtu & ((1 << log2) >> 2)))	/* round */
2915 			log2--;
2916 		t3_write_reg(adap, A_TP_MTU_TABLE,
2917 			     (i << 24) | (log2 << 16) | mtu);
2918 
2919 		for (w = 0; w < NCCTRL_WIN; ++w) {
2920 			unsigned int inc;
2921 
2922 			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2923 				  CC_MIN_INCR);
2924 
2925 			t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2926 				     (w << 16) | (beta[w] << 13) | inc);
2927 		}
2928 	}
2929 }
2930 
2931 /**
2932  *	t3_tp_get_mib_stats - read TP's MIB counters
2933  *	@adap: the adapter
2934  *	@tps: holds the returned counter values
2935  *
2936  *	Returns the values of TP's MIB counters.
2937  */
2938 void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2939 {
2940 	t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2941 			 sizeof(*tps) / sizeof(u32), 0);
2942 }
2943 
2944 #define ulp_region(adap, name, start, len) \
2945 	t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2946 	t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2947 		     (start) + (len) - 1); \
2948 	start += len
2949 
2950 #define ulptx_region(adap, name, start, len) \
2951 	t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2952 	t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2953 		     (start) + (len) - 1)
2954 
2955 static void ulp_config(struct adapter *adap, const struct tp_params *p)
2956 {
2957 	unsigned int m = p->chan_rx_size;
2958 
2959 	ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2960 	ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2961 	ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2962 	ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2963 	ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2964 	ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2965 	ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2966 	t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2967 }
2968 
2969 /**
2970  *	t3_set_proto_sram - set the contents of the protocol sram
2971  *	@adap: the adapter
2972  *	@data: the protocol image
2973  *
2974  *	Write the contents of the protocol SRAM.
2975  */
2976 int t3_set_proto_sram(struct adapter *adap, const u8 *data)
2977 {
2978 	int i;
2979 	const __be32 *buf = (const __be32 *)data;
2980 
2981 	for (i = 0; i < PROTO_SRAM_LINES; i++) {
2982 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
2983 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
2984 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
2985 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
2986 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
2987 
2988 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
2989 		if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
2990 			return -EIO;
2991 	}
2992 	t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
2993 
2994 	return 0;
2995 }
2996 
2997 void t3_config_trace_filter(struct adapter *adapter,
2998 			    const struct trace_params *tp, int filter_index,
2999 			    int invert, int enable)
3000 {
3001 	u32 addr, key[4], mask[4];
3002 
3003 	key[0] = tp->sport | (tp->sip << 16);
3004 	key[1] = (tp->sip >> 16) | (tp->dport << 16);
3005 	key[2] = tp->dip;
3006 	key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
3007 
3008 	mask[0] = tp->sport_mask | (tp->sip_mask << 16);
3009 	mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
3010 	mask[2] = tp->dip_mask;
3011 	mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
3012 
3013 	if (invert)
3014 		key[3] |= (1 << 29);
3015 	if (enable)
3016 		key[3] |= (1 << 28);
3017 
3018 	addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
3019 	tp_wr_indirect(adapter, addr++, key[0]);
3020 	tp_wr_indirect(adapter, addr++, mask[0]);
3021 	tp_wr_indirect(adapter, addr++, key[1]);
3022 	tp_wr_indirect(adapter, addr++, mask[1]);
3023 	tp_wr_indirect(adapter, addr++, key[2]);
3024 	tp_wr_indirect(adapter, addr++, mask[2]);
3025 	tp_wr_indirect(adapter, addr++, key[3]);
3026 	tp_wr_indirect(adapter, addr, mask[3]);
3027 	t3_read_reg(adapter, A_TP_PIO_DATA);
3028 }
3029 
3030 /**
3031  *	t3_config_sched - configure a HW traffic scheduler
3032  *	@adap: the adapter
3033  *	@kbps: target rate in Kbps
3034  *	@sched: the scheduler index
3035  *
3036  *	Configure a HW scheduler for the target rate
3037  */
3038 int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
3039 {
3040 	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3041 	unsigned int clk = adap->params.vpd.cclk * 1000;
3042 	unsigned int selected_cpt = 0, selected_bpt = 0;
3043 
3044 	if (kbps > 0) {
3045 		kbps *= 125;	/* -> bytes */
3046 		for (cpt = 1; cpt <= 255; cpt++) {
3047 			tps = clk / cpt;
3048 			bpt = (kbps + tps / 2) / tps;
3049 			if (bpt > 0 && bpt <= 255) {
3050 				v = bpt * tps;
3051 				delta = v >= kbps ? v - kbps : kbps - v;
3052 				if (delta <= mindelta) {
3053 					mindelta = delta;
3054 					selected_cpt = cpt;
3055 					selected_bpt = bpt;
3056 				}
3057 			} else if (selected_cpt)
3058 				break;
3059 		}
3060 		if (!selected_cpt)
3061 			return -EINVAL;
3062 	}
3063 	t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3064 		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3065 	v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3066 	if (sched & 1)
3067 		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3068 	else
3069 		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3070 	t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3071 	return 0;
3072 }
3073 
3074 static int tp_init(struct adapter *adap, const struct tp_params *p)
3075 {
3076 	int busy = 0;
3077 
3078 	tp_config(adap, p);
3079 	t3_set_vlan_accel(adap, 3, 0);
3080 
3081 	if (is_offload(adap)) {
3082 		tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3083 		t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3084 		busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3085 				       0, 1000, 5);
3086 		if (busy)
3087 			CH_ERR(adap, "TP initialization timed out\n");
3088 	}
3089 
3090 	if (!busy)
3091 		t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3092 	return busy;
3093 }
3094 
3095 /*
3096  * Perform the bits of HW initialization that are dependent on the Tx
3097  * channels being used.
3098  */
3099 static void chan_init_hw(struct adapter *adap, unsigned int chan_map)
3100 {
3101 	int i;
3102 
3103 	if (chan_map != 3) {                                 /* one channel */
3104 		t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3105 		t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3106 		t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3107 			     (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3108 					      F_TPTXPORT1EN | F_PORT1ACTIVE));
3109 		t3_write_reg(adap, A_PM1_TX_CFG,
3110 			     chan_map == 1 ? 0xffffffff : 0);
3111 	} else {                                             /* two channels */
3112 		t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3113 		t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3114 		t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3115 			     V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3116 		t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3117 			     F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3118 			     F_ENFORCEPKT);
3119 		t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3120 		t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3121 		t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3122 			     V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3123 		for (i = 0; i < 16; i++)
3124 			t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3125 				     (i << 16) | 0x1010);
3126 	}
3127 }
3128 
3129 static int calibrate_xgm(struct adapter *adapter)
3130 {
3131 	if (uses_xaui(adapter)) {
3132 		unsigned int v, i;
3133 
3134 		for (i = 0; i < 5; ++i) {
3135 			t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3136 			t3_read_reg(adapter, A_XGM_XAUI_IMP);
3137 			msleep(1);
3138 			v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3139 			if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3140 				t3_write_reg(adapter, A_XGM_XAUI_IMP,
3141 					     V_XAUIIMP(G_CALIMP(v) >> 2));
3142 				return 0;
3143 			}
3144 		}
3145 		CH_ERR(adapter, "MAC calibration failed\n");
3146 		return -1;
3147 	} else {
3148 		t3_write_reg(adapter, A_XGM_RGMII_IMP,
3149 			     V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3150 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3151 				 F_XGM_IMPSETUPDATE);
3152 	}
3153 	return 0;
3154 }
3155 
3156 static void calibrate_xgm_t3b(struct adapter *adapter)
3157 {
3158 	if (!uses_xaui(adapter)) {
3159 		t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3160 			     F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3161 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3162 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3163 				 F_XGM_IMPSETUPDATE);
3164 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3165 				 0);
3166 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3167 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3168 	}
3169 }
3170 
3171 struct mc7_timing_params {
3172 	unsigned char ActToPreDly;
3173 	unsigned char ActToRdWrDly;
3174 	unsigned char PreCyc;
3175 	unsigned char RefCyc[5];
3176 	unsigned char BkCyc;
3177 	unsigned char WrToRdDly;
3178 	unsigned char RdToWrDly;
3179 };
3180 
3181 /*
3182  * Write a value to a register and check that the write completed.  These
3183  * writes normally complete in a cycle or two, so one read should suffice.
3184  * The very first read exists to flush the posted write to the device.
3185  */
3186 static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3187 {
3188 	t3_write_reg(adapter, addr, val);
3189 	t3_read_reg(adapter, addr);	/* flush */
3190 	if (!(t3_read_reg(adapter, addr) & F_BUSY))
3191 		return 0;
3192 	CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3193 	return -EIO;
3194 }
3195 
3196 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3197 {
3198 	static const unsigned int mc7_mode[] = {
3199 		0x632, 0x642, 0x652, 0x432, 0x442
3200 	};
3201 	static const struct mc7_timing_params mc7_timings[] = {
3202 		{12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3203 		{12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3204 		{12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3205 		{9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3206 		{9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3207 	};
3208 
3209 	u32 val;
3210 	unsigned int width, density, slow, attempts;
3211 	struct adapter *adapter = mc7->adapter;
3212 	const struct mc7_timing_params *p = &mc7_timings[mem_type];
3213 
3214 	if (!mc7->size)
3215 		return 0;
3216 
3217 	val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3218 	slow = val & F_SLOW;
3219 	width = G_WIDTH(val);
3220 	density = G_DEN(val);
3221 
3222 	t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3223 	val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);	/* flush */
3224 	msleep(1);
3225 
3226 	if (!slow) {
3227 		t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3228 		t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3229 		msleep(1);
3230 		if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3231 		    (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3232 			CH_ERR(adapter, "%s MC7 calibration timed out\n",
3233 			       mc7->name);
3234 			goto out_fail;
3235 		}
3236 	}
3237 
3238 	t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3239 		     V_ACTTOPREDLY(p->ActToPreDly) |
3240 		     V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3241 		     V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3242 		     V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3243 
3244 	t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3245 		     val | F_CLKEN | F_TERM150);
3246 	t3_read_reg(adapter, mc7->offset + A_MC7_CFG);	/* flush */
3247 
3248 	if (!slow)
3249 		t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3250 				 F_DLLENB);
3251 	udelay(1);
3252 
3253 	val = slow ? 3 : 6;
3254 	if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3255 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3256 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3257 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3258 		goto out_fail;
3259 
3260 	if (!slow) {
3261 		t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3262 		t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3263 		udelay(5);
3264 	}
3265 
3266 	if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3267 	    wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3268 	    wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3269 	    wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3270 		       mc7_mode[mem_type]) ||
3271 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3272 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3273 		goto out_fail;
3274 
3275 	/* clock value is in KHz */
3276 	mc7_clock = mc7_clock * 7812 + mc7_clock / 2;	/* ns */
3277 	mc7_clock /= 1000000;	/* KHz->MHz, ns->us */
3278 
3279 	t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3280 		     F_PERREFEN | V_PREREFDIV(mc7_clock));
3281 	t3_read_reg(adapter, mc7->offset + A_MC7_REF);	/* flush */
3282 
3283 	t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3284 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3285 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3286 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3287 		     (mc7->size << width) - 1);
3288 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3289 	t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);	/* flush */
3290 
3291 	attempts = 50;
3292 	do {
3293 		msleep(250);
3294 		val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3295 	} while ((val & F_BUSY) && --attempts);
3296 	if (val & F_BUSY) {
3297 		CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3298 		goto out_fail;
3299 	}
3300 
3301 	/* Enable normal memory accesses. */
3302 	t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3303 	return 0;
3304 
3305 out_fail:
3306 	return -1;
3307 }
3308 
3309 static void config_pcie(struct adapter *adap)
3310 {
3311 	static const u16 ack_lat[4][6] = {
3312 		{237, 416, 559, 1071, 2095, 4143},
3313 		{128, 217, 289, 545, 1057, 2081},
3314 		{73, 118, 154, 282, 538, 1050},
3315 		{67, 107, 86, 150, 278, 534}
3316 	};
3317 	static const u16 rpl_tmr[4][6] = {
3318 		{711, 1248, 1677, 3213, 6285, 12429},
3319 		{384, 651, 867, 1635, 3171, 6243},
3320 		{219, 354, 462, 846, 1614, 3150},
3321 		{201, 321, 258, 450, 834, 1602}
3322 	};
3323 
3324 	u16 val, devid;
3325 	unsigned int log2_width, pldsize;
3326 	unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3327 
3328 	pcie_capability_read_word(adap->pdev, PCI_EXP_DEVCTL, &val);
3329 	pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3330 
3331 	pci_read_config_word(adap->pdev, 0x2, &devid);
3332 	if (devid == 0x37) {
3333 		pcie_capability_write_word(adap->pdev, PCI_EXP_DEVCTL,
3334 					   val & ~PCI_EXP_DEVCTL_READRQ &
3335 					   ~PCI_EXP_DEVCTL_PAYLOAD);
3336 		pldsize = 0;
3337 	}
3338 
3339 	pcie_capability_read_word(adap->pdev, PCI_EXP_LNKCTL, &val);
3340 
3341 	fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3342 	fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3343 	    G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3344 	log2_width = fls(adap->params.pci.width) - 1;
3345 	acklat = ack_lat[log2_width][pldsize];
3346 	if (val & PCI_EXP_LNKCTL_ASPM_L0S)	/* check LOsEnable */
3347 		acklat += fst_trn_tx * 4;
3348 	rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3349 
3350 	if (adap->params.rev == 0)
3351 		t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3352 				 V_T3A_ACKLAT(M_T3A_ACKLAT),
3353 				 V_T3A_ACKLAT(acklat));
3354 	else
3355 		t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3356 				 V_ACKLAT(acklat));
3357 
3358 	t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3359 			 V_REPLAYLMT(rpllmt));
3360 
3361 	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3362 	t3_set_reg_field(adap, A_PCIE_CFG, 0,
3363 			 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
3364 			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
3365 }
3366 
3367 /*
3368  * Initialize and configure T3 HW modules.  This performs the
3369  * initialization steps that need to be done once after a card is reset.
3370  * MAC and PHY initialization is handled separarely whenever a port is enabled.
3371  *
3372  * fw_params are passed to FW and their value is platform dependent.  Only the
3373  * top 8 bits are available for use, the rest must be 0.
3374  */
3375 int t3_init_hw(struct adapter *adapter, u32 fw_params)
3376 {
3377 	int err = -EIO, attempts, i;
3378 	const struct vpd_params *vpd = &adapter->params.vpd;
3379 
3380 	if (adapter->params.rev > 0)
3381 		calibrate_xgm_t3b(adapter);
3382 	else if (calibrate_xgm(adapter))
3383 		goto out_err;
3384 
3385 	if (vpd->mclk) {
3386 		partition_mem(adapter, &adapter->params.tp);
3387 
3388 		if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3389 		    mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3390 		    mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3391 		    t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3392 				adapter->params.mc5.nfilters,
3393 				adapter->params.mc5.nroutes))
3394 			goto out_err;
3395 
3396 		for (i = 0; i < 32; i++)
3397 			if (clear_sge_ctxt(adapter, i, F_CQ))
3398 				goto out_err;
3399 	}
3400 
3401 	if (tp_init(adapter, &adapter->params.tp))
3402 		goto out_err;
3403 
3404 	t3_tp_set_coalescing_size(adapter,
3405 				  min(adapter->params.sge.max_pkt_size,
3406 				      MAX_RX_COALESCING_LEN), 1);
3407 	t3_tp_set_max_rxsize(adapter,
3408 			     min(adapter->params.sge.max_pkt_size, 16384U));
3409 	ulp_config(adapter, &adapter->params.tp);
3410 
3411 	if (is_pcie(adapter))
3412 		config_pcie(adapter);
3413 	else
3414 		t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3415 				 F_DMASTOPEN | F_CLIDECEN);
3416 
3417 	if (adapter->params.rev == T3_REV_C)
3418 		t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3419 				 F_CFG_CQE_SOP_MASK);
3420 
3421 	t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3422 	t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3423 	t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3424 	chan_init_hw(adapter, adapter->params.chan_map);
3425 	t3_sge_init(adapter, &adapter->params.sge);
3426 	t3_set_reg_field(adapter, A_PL_RST, 0, F_FATALPERREN);
3427 
3428 	t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3429 
3430 	t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3431 	t3_write_reg(adapter, A_CIM_BOOT_CFG,
3432 		     V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3433 	t3_read_reg(adapter, A_CIM_BOOT_CFG);	/* flush */
3434 
3435 	attempts = 100;
3436 	do {			/* wait for uP to initialize */
3437 		msleep(20);
3438 	} while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3439 	if (!attempts) {
3440 		CH_ERR(adapter, "uP initialization timed out\n");
3441 		goto out_err;
3442 	}
3443 
3444 	err = 0;
3445 out_err:
3446 	return err;
3447 }
3448 
3449 /**
3450  *	get_pci_mode - determine a card's PCI mode
3451  *	@adapter: the adapter
3452  *	@p: where to store the PCI settings
3453  *
3454  *	Determines a card's PCI mode and associated parameters, such as speed
3455  *	and width.
3456  */
3457 static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
3458 {
3459 	static unsigned short speed_map[] = { 33, 66, 100, 133 };
3460 	u32 pci_mode;
3461 
3462 	if (pci_is_pcie(adapter->pdev)) {
3463 		u16 val;
3464 
3465 		p->variant = PCI_VARIANT_PCIE;
3466 		pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
3467 		p->width = (val >> 4) & 0x3f;
3468 		return;
3469 	}
3470 
3471 	pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3472 	p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3473 	p->width = (pci_mode & F_64BIT) ? 64 : 32;
3474 	pci_mode = G_PCIXINITPAT(pci_mode);
3475 	if (pci_mode == 0)
3476 		p->variant = PCI_VARIANT_PCI;
3477 	else if (pci_mode < 4)
3478 		p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3479 	else if (pci_mode < 8)
3480 		p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3481 	else
3482 		p->variant = PCI_VARIANT_PCIX_266_MODE2;
3483 }
3484 
3485 /**
3486  *	init_link_config - initialize a link's SW state
3487  *	@lc: structure holding the link state
3488  *	@caps: information about the current card
3489  *
3490  *	Initializes the SW state maintained for each link, including the link's
3491  *	capabilities and default speed/duplex/flow-control/autonegotiation
3492  *	settings.
3493  */
3494 static void init_link_config(struct link_config *lc, unsigned int caps)
3495 {
3496 	lc->supported = caps;
3497 	lc->requested_speed = lc->speed = SPEED_INVALID;
3498 	lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3499 	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3500 	if (lc->supported & SUPPORTED_Autoneg) {
3501 		lc->advertising = lc->supported;
3502 		lc->autoneg = AUTONEG_ENABLE;
3503 		lc->requested_fc |= PAUSE_AUTONEG;
3504 	} else {
3505 		lc->advertising = 0;
3506 		lc->autoneg = AUTONEG_DISABLE;
3507 	}
3508 }
3509 
3510 /**
3511  *	mc7_calc_size - calculate MC7 memory size
3512  *	@cfg: the MC7 configuration
3513  *
3514  *	Calculates the size of an MC7 memory in bytes from the value of its
3515  *	configuration register.
3516  */
3517 static unsigned int mc7_calc_size(u32 cfg)
3518 {
3519 	unsigned int width = G_WIDTH(cfg);
3520 	unsigned int banks = !!(cfg & F_BKS) + 1;
3521 	unsigned int org = !!(cfg & F_ORG) + 1;
3522 	unsigned int density = G_DEN(cfg);
3523 	unsigned int MBs = ((256 << density) * banks) / (org << width);
3524 
3525 	return MBs << 20;
3526 }
3527 
3528 static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3529 		     unsigned int base_addr, const char *name)
3530 {
3531 	u32 cfg;
3532 
3533 	mc7->adapter = adapter;
3534 	mc7->name = name;
3535 	mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3536 	cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3537 	mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3538 	mc7->width = G_WIDTH(cfg);
3539 }
3540 
3541 static void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3542 {
3543 	u16 devid;
3544 
3545 	mac->adapter = adapter;
3546 	pci_read_config_word(adapter->pdev, 0x2, &devid);
3547 
3548 	if (devid == 0x37 && !adapter->params.vpd.xauicfg[1])
3549 		index = 0;
3550 	mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3551 	mac->nucast = 1;
3552 
3553 	if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3554 		t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3555 			     is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3556 		t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3557 				 F_ENRGMII, 0);
3558 	}
3559 }
3560 
3561 static void early_hw_init(struct adapter *adapter,
3562 			  const struct adapter_info *ai)
3563 {
3564 	u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3565 
3566 	mi1_init(adapter, ai);
3567 	t3_write_reg(adapter, A_I2C_CFG,	/* set for 80KHz */
3568 		     V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3569 	t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3570 		     ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3571 	t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3572 	t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
3573 
3574 	if (adapter->params.rev == 0 || !uses_xaui(adapter))
3575 		val |= F_ENRGMII;
3576 
3577 	/* Enable MAC clocks so we can access the registers */
3578 	t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3579 	t3_read_reg(adapter, A_XGM_PORT_CFG);
3580 
3581 	val |= F_CLKDIVRESET_;
3582 	t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3583 	t3_read_reg(adapter, A_XGM_PORT_CFG);
3584 	t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3585 	t3_read_reg(adapter, A_XGM_PORT_CFG);
3586 }
3587 
3588 /*
3589  * Reset the adapter.
3590  * Older PCIe cards lose their config space during reset, PCI-X
3591  * ones don't.
3592  */
3593 int t3_reset_adapter(struct adapter *adapter)
3594 {
3595 	int i, save_and_restore_pcie =
3596 	    adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3597 	uint16_t devid = 0;
3598 
3599 	if (save_and_restore_pcie)
3600 		pci_save_state(adapter->pdev);
3601 	t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3602 
3603 	/*
3604 	 * Delay. Give Some time to device to reset fully.
3605 	 * XXX The delay time should be modified.
3606 	 */
3607 	for (i = 0; i < 10; i++) {
3608 		msleep(50);
3609 		pci_read_config_word(adapter->pdev, 0x00, &devid);
3610 		if (devid == 0x1425)
3611 			break;
3612 	}
3613 
3614 	if (devid != 0x1425)
3615 		return -1;
3616 
3617 	if (save_and_restore_pcie)
3618 		pci_restore_state(adapter->pdev);
3619 	return 0;
3620 }
3621 
3622 static int init_parity(struct adapter *adap)
3623 {
3624 	int i, err, addr;
3625 
3626 	if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3627 		return -EBUSY;
3628 
3629 	for (err = i = 0; !err && i < 16; i++)
3630 		err = clear_sge_ctxt(adap, i, F_EGRESS);
3631 	for (i = 0xfff0; !err && i <= 0xffff; i++)
3632 		err = clear_sge_ctxt(adap, i, F_EGRESS);
3633 	for (i = 0; !err && i < SGE_QSETS; i++)
3634 		err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3635 	if (err)
3636 		return err;
3637 
3638 	t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3639 	for (i = 0; i < 4; i++)
3640 		for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3641 			t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3642 				     F_IBQDBGWR | V_IBQDBGQID(i) |
3643 				     V_IBQDBGADDR(addr));
3644 			err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3645 					      F_IBQDBGBUSY, 0, 2, 1);
3646 			if (err)
3647 				return err;
3648 		}
3649 	return 0;
3650 }
3651 
3652 /*
3653  * Initialize adapter SW state for the various HW modules, set initial values
3654  * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3655  * interface.
3656  */
3657 int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3658 		    int reset)
3659 {
3660 	int ret;
3661 	unsigned int i, j = -1;
3662 
3663 	get_pci_mode(adapter, &adapter->params.pci);
3664 
3665 	adapter->params.info = ai;
3666 	adapter->params.nports = ai->nports0 + ai->nports1;
3667 	adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1);
3668 	adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3669 	/*
3670 	 * We used to only run the "adapter check task" once a second if
3671 	 * we had PHYs which didn't support interrupts (we would check
3672 	 * their link status once a second).  Now we check other conditions
3673 	 * in that routine which could potentially impose a very high
3674 	 * interrupt load on the system.  As such, we now always scan the
3675 	 * adapter state once a second ...
3676 	 */
3677 	adapter->params.linkpoll_period = 10;
3678 	adapter->params.stats_update_period = is_10G(adapter) ?
3679 	    MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3680 	adapter->params.pci.vpd_cap_addr =
3681 	    pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3682 	ret = get_vpd_params(adapter, &adapter->params.vpd);
3683 	if (ret < 0)
3684 		return ret;
3685 
3686 	if (reset && t3_reset_adapter(adapter))
3687 		return -1;
3688 
3689 	t3_sge_prep(adapter, &adapter->params.sge);
3690 
3691 	if (adapter->params.vpd.mclk) {
3692 		struct tp_params *p = &adapter->params.tp;
3693 
3694 		mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3695 		mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3696 		mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3697 
3698 		p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
3699 		p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3700 		p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3701 		p->cm_size = t3_mc7_size(&adapter->cm);
3702 		p->chan_rx_size = p->pmrx_size / 2;	/* only 1 Rx channel */
3703 		p->chan_tx_size = p->pmtx_size / p->nchan;
3704 		p->rx_pg_size = 64 * 1024;
3705 		p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3706 		p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3707 		p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3708 		p->ntimer_qs = p->cm_size >= (128 << 20) ||
3709 		    adapter->params.rev > 0 ? 12 : 6;
3710 	}
3711 
3712 	adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3713 				  t3_mc7_size(&adapter->pmtx) &&
3714 				  t3_mc7_size(&adapter->cm);
3715 
3716 	if (is_offload(adapter)) {
3717 		adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3718 		adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3719 		    DEFAULT_NFILTERS : 0;
3720 		adapter->params.mc5.nroutes = 0;
3721 		t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3722 
3723 		init_mtus(adapter->params.mtus);
3724 		init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3725 	}
3726 
3727 	early_hw_init(adapter, ai);
3728 	ret = init_parity(adapter);
3729 	if (ret)
3730 		return ret;
3731 
3732 	for_each_port(adapter, i) {
3733 		u8 hw_addr[6];
3734 		const struct port_type_info *pti;
3735 		struct port_info *p = adap2pinfo(adapter, i);
3736 
3737 		while (!adapter->params.vpd.port_type[++j])
3738 			;
3739 
3740 		pti = &port_types[adapter->params.vpd.port_type[j]];
3741 		if (!pti->phy_prep) {
3742 			CH_ALERT(adapter, "Invalid port type index %d\n",
3743 				 adapter->params.vpd.port_type[j]);
3744 			return -EINVAL;
3745 		}
3746 
3747 		p->phy.mdio.dev = adapter->port[i];
3748 		ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3749 				    ai->mdio_ops);
3750 		if (ret)
3751 			return ret;
3752 		mac_prep(&p->mac, adapter, j);
3753 
3754 		/*
3755 		 * The VPD EEPROM stores the base Ethernet address for the
3756 		 * card.  A port's address is derived from the base by adding
3757 		 * the port's index to the base's low octet.
3758 		 */
3759 		memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3760 		hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3761 
3762 		eth_hw_addr_set(adapter->port[i], hw_addr);
3763 		init_link_config(&p->link_config, p->phy.caps);
3764 		p->phy.ops->power_down(&p->phy, 1);
3765 
3766 		/*
3767 		 * If the PHY doesn't support interrupts for link status
3768 		 * changes, schedule a scan of the adapter links at least
3769 		 * once a second.
3770 		 */
3771 		if (!(p->phy.caps & SUPPORTED_IRQ) &&
3772 		    adapter->params.linkpoll_period > 10)
3773 			adapter->params.linkpoll_period = 10;
3774 	}
3775 
3776 	return 0;
3777 }
3778 
3779 void t3_led_ready(struct adapter *adapter)
3780 {
3781 	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3782 			 F_GPIO0_OUT_VAL);
3783 }
3784 
3785 int t3_replay_prep_adapter(struct adapter *adapter)
3786 {
3787 	const struct adapter_info *ai = adapter->params.info;
3788 	unsigned int i, j = -1;
3789 	int ret;
3790 
3791 	early_hw_init(adapter, ai);
3792 	ret = init_parity(adapter);
3793 	if (ret)
3794 		return ret;
3795 
3796 	for_each_port(adapter, i) {
3797 		const struct port_type_info *pti;
3798 		struct port_info *p = adap2pinfo(adapter, i);
3799 
3800 		while (!adapter->params.vpd.port_type[++j])
3801 			;
3802 
3803 		pti = &port_types[adapter->params.vpd.port_type[j]];
3804 		ret = pti->phy_prep(&p->phy, adapter, p->phy.mdio.prtad, NULL);
3805 		if (ret)
3806 			return ret;
3807 		p->phy.ops->power_down(&p->phy, 1);
3808 	}
3809 
3810 	return 0;
3811 }
3812 
3813