1 /*
2  * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include "common.h"
33 #include "regs.h"
34 #include "sge_defs.h"
35 #include "firmware_exports.h"
36 
37 static void t3_port_intr_clear(struct adapter *adapter, int idx);
38 
39 /**
40  *	t3_wait_op_done_val - wait until an operation is completed
41  *	@adapter: the adapter performing the operation
42  *	@reg: the register to check for completion
43  *	@mask: a single-bit field within @reg that indicates completion
44  *	@polarity: the value of the field when the operation is completed
45  *	@attempts: number of check iterations
46  *	@delay: delay in usecs between iterations
47  *	@valp: where to store the value of the register at completion time
48  *
49  *	Wait until an operation is completed by checking a bit in a register
50  *	up to @attempts times.  If @valp is not NULL the value of the register
51  *	at the time it indicated completion is stored there.  Returns 0 if the
52  *	operation completes and -EAGAIN otherwise.
53  */
54 
55 int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
56 			int polarity, int attempts, int delay, u32 *valp)
57 {
58 	while (1) {
59 		u32 val = t3_read_reg(adapter, reg);
60 
61 		if (!!(val & mask) == polarity) {
62 			if (valp)
63 				*valp = val;
64 			return 0;
65 		}
66 		if (--attempts == 0)
67 			return -EAGAIN;
68 		if (delay)
69 			udelay(delay);
70 	}
71 }
72 
73 /**
74  *	t3_write_regs - write a bunch of registers
75  *	@adapter: the adapter to program
76  *	@p: an array of register address/register value pairs
77  *	@n: the number of address/value pairs
78  *	@offset: register address offset
79  *
80  *	Takes an array of register address/register value pairs and writes each
81  *	value to the corresponding register.  Register addresses are adjusted
82  *	by the supplied offset.
83  */
84 void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
85 		   int n, unsigned int offset)
86 {
87 	while (n--) {
88 		t3_write_reg(adapter, p->reg_addr + offset, p->val);
89 		p++;
90 	}
91 }
92 
93 /**
94  *	t3_set_reg_field - set a register field to a value
95  *	@adapter: the adapter to program
96  *	@addr: the register address
97  *	@mask: specifies the portion of the register to modify
98  *	@val: the new value for the register field
99  *
100  *	Sets a register field specified by the supplied mask to the
101  *	given value.
102  */
103 void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
104 		      u32 val)
105 {
106 	u32 v = t3_read_reg(adapter, addr) & ~mask;
107 
108 	t3_write_reg(adapter, addr, v | val);
109 	t3_read_reg(adapter, addr);	/* flush */
110 }
111 
112 /**
113  *	t3_read_indirect - read indirectly addressed registers
114  *	@adap: the adapter
115  *	@addr_reg: register holding the indirect address
116  *	@data_reg: register holding the value of the indirect register
117  *	@vals: where the read register values are stored
118  *	@start_idx: index of first indirect register to read
119  *	@nregs: how many indirect registers to read
120  *
121  *	Reads registers that are accessed indirectly through an address/data
122  *	register pair.
123  */
124 static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
125 			     unsigned int data_reg, u32 *vals,
126 			     unsigned int nregs, unsigned int start_idx)
127 {
128 	while (nregs--) {
129 		t3_write_reg(adap, addr_reg, start_idx);
130 		*vals++ = t3_read_reg(adap, data_reg);
131 		start_idx++;
132 	}
133 }
134 
135 /**
136  *	t3_mc7_bd_read - read from MC7 through backdoor accesses
137  *	@mc7: identifies MC7 to read from
138  *	@start: index of first 64-bit word to read
139  *	@n: number of 64-bit words to read
140  *	@buf: where to store the read result
141  *
142  *	Read n 64-bit words from MC7 starting at word start, using backdoor
143  *	accesses.
144  */
145 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
146 		   u64 *buf)
147 {
148 	static const int shift[] = { 0, 0, 16, 24 };
149 	static const int step[] = { 0, 32, 16, 8 };
150 
151 	unsigned int size64 = mc7->size / 8;	/* # of 64-bit words */
152 	struct adapter *adap = mc7->adapter;
153 
154 	if (start >= size64 || start + n > size64)
155 		return -EINVAL;
156 
157 	start *= (8 << mc7->width);
158 	while (n--) {
159 		int i;
160 		u64 val64 = 0;
161 
162 		for (i = (1 << mc7->width) - 1; i >= 0; --i) {
163 			int attempts = 10;
164 			u32 val;
165 
166 			t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
167 			t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
168 			val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
169 			while ((val & F_BUSY) && attempts--)
170 				val = t3_read_reg(adap,
171 						  mc7->offset + A_MC7_BD_OP);
172 			if (val & F_BUSY)
173 				return -EIO;
174 
175 			val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
176 			if (mc7->width == 0) {
177 				val64 = t3_read_reg(adap,
178 						    mc7->offset +
179 						    A_MC7_BD_DATA0);
180 				val64 |= (u64) val << 32;
181 			} else {
182 				if (mc7->width > 1)
183 					val >>= shift[mc7->width];
184 				val64 |= (u64) val << (step[mc7->width] * i);
185 			}
186 			start += 8;
187 		}
188 		*buf++ = val64;
189 	}
190 	return 0;
191 }
192 
193 /*
194  * Initialize MI1.
195  */
196 static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
197 {
198 	u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
199 	u32 val = F_PREEN | V_CLKDIV(clkdiv);
200 
201 	t3_write_reg(adap, A_MI1_CFG, val);
202 }
203 
204 #define MDIO_ATTEMPTS 20
205 
206 /*
207  * MI1 read/write operations for clause 22 PHYs.
208  */
209 static int t3_mi1_read(struct net_device *dev, int phy_addr, int mmd_addr,
210 		       u16 reg_addr)
211 {
212 	struct port_info *pi = netdev_priv(dev);
213 	struct adapter *adapter = pi->adapter;
214 	int ret;
215 	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
216 
217 	mutex_lock(&adapter->mdio_lock);
218 	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
219 	t3_write_reg(adapter, A_MI1_ADDR, addr);
220 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
221 	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
222 	if (!ret)
223 		ret = t3_read_reg(adapter, A_MI1_DATA);
224 	mutex_unlock(&adapter->mdio_lock);
225 	return ret;
226 }
227 
228 static int t3_mi1_write(struct net_device *dev, int phy_addr, int mmd_addr,
229 			u16 reg_addr, u16 val)
230 {
231 	struct port_info *pi = netdev_priv(dev);
232 	struct adapter *adapter = pi->adapter;
233 	int ret;
234 	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
235 
236 	mutex_lock(&adapter->mdio_lock);
237 	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
238 	t3_write_reg(adapter, A_MI1_ADDR, addr);
239 	t3_write_reg(adapter, A_MI1_DATA, val);
240 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
241 	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
242 	mutex_unlock(&adapter->mdio_lock);
243 	return ret;
244 }
245 
246 static const struct mdio_ops mi1_mdio_ops = {
247 	.read = t3_mi1_read,
248 	.write = t3_mi1_write,
249 	.mode_support = MDIO_SUPPORTS_C22
250 };
251 
252 /*
253  * Performs the address cycle for clause 45 PHYs.
254  * Must be called with the MDIO_LOCK held.
255  */
256 static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
257 		       int reg_addr)
258 {
259 	u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
260 
261 	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
262 	t3_write_reg(adapter, A_MI1_ADDR, addr);
263 	t3_write_reg(adapter, A_MI1_DATA, reg_addr);
264 	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
265 	return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
266 			       MDIO_ATTEMPTS, 10);
267 }
268 
269 /*
270  * MI1 read/write operations for indirect-addressed PHYs.
271  */
272 static int mi1_ext_read(struct net_device *dev, int phy_addr, int mmd_addr,
273 			u16 reg_addr)
274 {
275 	struct port_info *pi = netdev_priv(dev);
276 	struct adapter *adapter = pi->adapter;
277 	int ret;
278 
279 	mutex_lock(&adapter->mdio_lock);
280 	ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
281 	if (!ret) {
282 		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
283 		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
284 				      MDIO_ATTEMPTS, 10);
285 		if (!ret)
286 			ret = t3_read_reg(adapter, A_MI1_DATA);
287 	}
288 	mutex_unlock(&adapter->mdio_lock);
289 	return ret;
290 }
291 
292 static int mi1_ext_write(struct net_device *dev, int phy_addr, int mmd_addr,
293 			 u16 reg_addr, u16 val)
294 {
295 	struct port_info *pi = netdev_priv(dev);
296 	struct adapter *adapter = pi->adapter;
297 	int ret;
298 
299 	mutex_lock(&adapter->mdio_lock);
300 	ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
301 	if (!ret) {
302 		t3_write_reg(adapter, A_MI1_DATA, val);
303 		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
304 		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
305 				      MDIO_ATTEMPTS, 10);
306 	}
307 	mutex_unlock(&adapter->mdio_lock);
308 	return ret;
309 }
310 
311 static const struct mdio_ops mi1_mdio_ext_ops = {
312 	.read = mi1_ext_read,
313 	.write = mi1_ext_write,
314 	.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22
315 };
316 
317 /**
318  *	t3_mdio_change_bits - modify the value of a PHY register
319  *	@phy: the PHY to operate on
320  *	@mmd: the device address
321  *	@reg: the register address
322  *	@clear: what part of the register value to mask off
323  *	@set: what part of the register value to set
324  *
325  *	Changes the value of a PHY register by applying a mask to its current
326  *	value and ORing the result with a new value.
327  */
328 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
329 			unsigned int set)
330 {
331 	int ret;
332 	unsigned int val;
333 
334 	ret = t3_mdio_read(phy, mmd, reg, &val);
335 	if (!ret) {
336 		val &= ~clear;
337 		ret = t3_mdio_write(phy, mmd, reg, val | set);
338 	}
339 	return ret;
340 }
341 
342 /**
343  *	t3_phy_reset - reset a PHY block
344  *	@phy: the PHY to operate on
345  *	@mmd: the device address of the PHY block to reset
346  *	@wait: how long to wait for the reset to complete in 1ms increments
347  *
348  *	Resets a PHY block and optionally waits for the reset to complete.
349  *	@mmd should be 0 for 10/100/1000 PHYs and the device address to reset
350  *	for 10G PHYs.
351  */
352 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
353 {
354 	int err;
355 	unsigned int ctl;
356 
357 	err = t3_mdio_change_bits(phy, mmd, MDIO_CTRL1, MDIO_CTRL1_LPOWER,
358 				  MDIO_CTRL1_RESET);
359 	if (err || !wait)
360 		return err;
361 
362 	do {
363 		err = t3_mdio_read(phy, mmd, MDIO_CTRL1, &ctl);
364 		if (err)
365 			return err;
366 		ctl &= MDIO_CTRL1_RESET;
367 		if (ctl)
368 			msleep(1);
369 	} while (ctl && --wait);
370 
371 	return ctl ? -1 : 0;
372 }
373 
374 /**
375  *	t3_phy_advertise - set the PHY advertisement registers for autoneg
376  *	@phy: the PHY to operate on
377  *	@advert: bitmap of capabilities the PHY should advertise
378  *
379  *	Sets a 10/100/1000 PHY's advertisement registers to advertise the
380  *	requested capabilities.
381  */
382 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
383 {
384 	int err;
385 	unsigned int val = 0;
386 
387 	err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_CTRL1000, &val);
388 	if (err)
389 		return err;
390 
391 	val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
392 	if (advert & ADVERTISED_1000baseT_Half)
393 		val |= ADVERTISE_1000HALF;
394 	if (advert & ADVERTISED_1000baseT_Full)
395 		val |= ADVERTISE_1000FULL;
396 
397 	err = t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_CTRL1000, val);
398 	if (err)
399 		return err;
400 
401 	val = 1;
402 	if (advert & ADVERTISED_10baseT_Half)
403 		val |= ADVERTISE_10HALF;
404 	if (advert & ADVERTISED_10baseT_Full)
405 		val |= ADVERTISE_10FULL;
406 	if (advert & ADVERTISED_100baseT_Half)
407 		val |= ADVERTISE_100HALF;
408 	if (advert & ADVERTISED_100baseT_Full)
409 		val |= ADVERTISE_100FULL;
410 	if (advert & ADVERTISED_Pause)
411 		val |= ADVERTISE_PAUSE_CAP;
412 	if (advert & ADVERTISED_Asym_Pause)
413 		val |= ADVERTISE_PAUSE_ASYM;
414 	return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
415 }
416 
417 /**
418  *	t3_phy_advertise_fiber - set fiber PHY advertisement register
419  *	@phy: the PHY to operate on
420  *	@advert: bitmap of capabilities the PHY should advertise
421  *
422  *	Sets a fiber PHY's advertisement register to advertise the
423  *	requested capabilities.
424  */
425 int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
426 {
427 	unsigned int val = 0;
428 
429 	if (advert & ADVERTISED_1000baseT_Half)
430 		val |= ADVERTISE_1000XHALF;
431 	if (advert & ADVERTISED_1000baseT_Full)
432 		val |= ADVERTISE_1000XFULL;
433 	if (advert & ADVERTISED_Pause)
434 		val |= ADVERTISE_1000XPAUSE;
435 	if (advert & ADVERTISED_Asym_Pause)
436 		val |= ADVERTISE_1000XPSE_ASYM;
437 	return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
438 }
439 
440 /**
441  *	t3_set_phy_speed_duplex - force PHY speed and duplex
442  *	@phy: the PHY to operate on
443  *	@speed: requested PHY speed
444  *	@duplex: requested PHY duplex
445  *
446  *	Force a 10/100/1000 PHY's speed and duplex.  This also disables
447  *	auto-negotiation except for GigE, where auto-negotiation is mandatory.
448  */
449 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
450 {
451 	int err;
452 	unsigned int ctl;
453 
454 	err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_BMCR, &ctl);
455 	if (err)
456 		return err;
457 
458 	if (speed >= 0) {
459 		ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
460 		if (speed == SPEED_100)
461 			ctl |= BMCR_SPEED100;
462 		else if (speed == SPEED_1000)
463 			ctl |= BMCR_SPEED1000;
464 	}
465 	if (duplex >= 0) {
466 		ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
467 		if (duplex == DUPLEX_FULL)
468 			ctl |= BMCR_FULLDPLX;
469 	}
470 	if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
471 		ctl |= BMCR_ANENABLE;
472 	return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_BMCR, ctl);
473 }
474 
475 int t3_phy_lasi_intr_enable(struct cphy *phy)
476 {
477 	return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
478 			     MDIO_PMA_LASI_LSALARM);
479 }
480 
481 int t3_phy_lasi_intr_disable(struct cphy *phy)
482 {
483 	return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
484 }
485 
486 int t3_phy_lasi_intr_clear(struct cphy *phy)
487 {
488 	u32 val;
489 
490 	return t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val);
491 }
492 
493 int t3_phy_lasi_intr_handler(struct cphy *phy)
494 {
495 	unsigned int status;
496 	int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT,
497 			       &status);
498 
499 	if (err)
500 		return err;
501 	return (status & MDIO_PMA_LASI_LSALARM) ? cphy_cause_link_change : 0;
502 }
503 
504 static const struct adapter_info t3_adap_info[] = {
505 	{1, 1, 0,
506 	 F_GPIO2_OEN | F_GPIO4_OEN |
507 	 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
508 	 &mi1_mdio_ops, "Chelsio PE9000"},
509 	{1, 1, 0,
510 	 F_GPIO2_OEN | F_GPIO4_OEN |
511 	 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
512 	 &mi1_mdio_ops, "Chelsio T302"},
513 	{1, 0, 0,
514 	 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
515 	 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
516 	 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
517 	 &mi1_mdio_ext_ops, "Chelsio T310"},
518 	{1, 1, 0,
519 	 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
520 	 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
521 	 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
522 	 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
523 	 &mi1_mdio_ext_ops, "Chelsio T320"},
524 	{},
525 	{},
526 	{1, 0, 0,
527 	 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
528 	 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
529 	 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
530 	 &mi1_mdio_ext_ops, "Chelsio T310" },
531 	{1, 0, 0,
532 	 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
533 	 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL,
534 	 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
535 	 &mi1_mdio_ext_ops, "Chelsio N320E-G2" },
536 };
537 
538 /*
539  * Return the adapter_info structure with a given index.  Out-of-range indices
540  * return NULL.
541  */
542 const struct adapter_info *t3_get_adapter_info(unsigned int id)
543 {
544 	return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
545 }
546 
547 struct port_type_info {
548 	int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
549 			int phy_addr, const struct mdio_ops *ops);
550 };
551 
552 static const struct port_type_info port_types[] = {
553 	{ NULL },
554 	{ t3_ael1002_phy_prep },
555 	{ t3_vsc8211_phy_prep },
556 	{ NULL},
557 	{ t3_xaui_direct_phy_prep },
558 	{ t3_ael2005_phy_prep },
559 	{ t3_qt2045_phy_prep },
560 	{ t3_ael1006_phy_prep },
561 	{ NULL },
562 	{ t3_aq100x_phy_prep },
563 	{ t3_ael2020_phy_prep },
564 };
565 
566 #define VPD_ENTRY(name, len) \
567 	u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
568 
569 /*
570  * Partial EEPROM Vital Product Data structure.  Includes only the ID and
571  * VPD-R sections.
572  */
573 struct t3_vpd {
574 	u8 id_tag;
575 	u8 id_len[2];
576 	u8 id_data[16];
577 	u8 vpdr_tag;
578 	u8 vpdr_len[2];
579 	VPD_ENTRY(pn, 16);	/* part number */
580 	VPD_ENTRY(ec, 16);	/* EC level */
581 	VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
582 	VPD_ENTRY(na, 12);	/* MAC address base */
583 	VPD_ENTRY(cclk, 6);	/* core clock */
584 	VPD_ENTRY(mclk, 6);	/* mem clock */
585 	VPD_ENTRY(uclk, 6);	/* uP clk */
586 	VPD_ENTRY(mdc, 6);	/* MDIO clk */
587 	VPD_ENTRY(mt, 2);	/* mem timing */
588 	VPD_ENTRY(xaui0cfg, 6);	/* XAUI0 config */
589 	VPD_ENTRY(xaui1cfg, 6);	/* XAUI1 config */
590 	VPD_ENTRY(port0, 2);	/* PHY0 complex */
591 	VPD_ENTRY(port1, 2);	/* PHY1 complex */
592 	VPD_ENTRY(port2, 2);	/* PHY2 complex */
593 	VPD_ENTRY(port3, 2);	/* PHY3 complex */
594 	VPD_ENTRY(rv, 1);	/* csum */
595 	u32 pad;		/* for multiple-of-4 sizing and alignment */
596 };
597 
598 #define EEPROM_MAX_POLL   40
599 #define EEPROM_STAT_ADDR  0x4000
600 #define VPD_BASE          0xc00
601 
602 /**
603  *	t3_seeprom_read - read a VPD EEPROM location
604  *	@adapter: adapter to read
605  *	@addr: EEPROM address
606  *	@data: where to store the read data
607  *
608  *	Read a 32-bit word from a location in VPD EEPROM using the card's PCI
609  *	VPD ROM capability.  A zero is written to the flag bit when the
610  *	address is written to the control register.  The hardware device will
611  *	set the flag to 1 when 4 bytes have been read into the data register.
612  */
613 int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
614 {
615 	u16 val;
616 	int attempts = EEPROM_MAX_POLL;
617 	u32 v;
618 	unsigned int base = adapter->params.pci.vpd_cap_addr;
619 
620 	if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
621 		return -EINVAL;
622 
623 	pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
624 	do {
625 		udelay(10);
626 		pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
627 	} while (!(val & PCI_VPD_ADDR_F) && --attempts);
628 
629 	if (!(val & PCI_VPD_ADDR_F)) {
630 		CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
631 		return -EIO;
632 	}
633 	pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
634 	*data = cpu_to_le32(v);
635 	return 0;
636 }
637 
638 /**
639  *	t3_seeprom_write - write a VPD EEPROM location
640  *	@adapter: adapter to write
641  *	@addr: EEPROM address
642  *	@data: value to write
643  *
644  *	Write a 32-bit word to a location in VPD EEPROM using the card's PCI
645  *	VPD ROM capability.
646  */
647 int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
648 {
649 	u16 val;
650 	int attempts = EEPROM_MAX_POLL;
651 	unsigned int base = adapter->params.pci.vpd_cap_addr;
652 
653 	if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
654 		return -EINVAL;
655 
656 	pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
657 			       le32_to_cpu(data));
658 	pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
659 			      addr | PCI_VPD_ADDR_F);
660 	do {
661 		msleep(1);
662 		pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
663 	} while ((val & PCI_VPD_ADDR_F) && --attempts);
664 
665 	if (val & PCI_VPD_ADDR_F) {
666 		CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
667 		return -EIO;
668 	}
669 	return 0;
670 }
671 
672 /**
673  *	t3_seeprom_wp - enable/disable EEPROM write protection
674  *	@adapter: the adapter
675  *	@enable: 1 to enable write protection, 0 to disable it
676  *
677  *	Enables or disables write protection on the serial EEPROM.
678  */
679 int t3_seeprom_wp(struct adapter *adapter, int enable)
680 {
681 	return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
682 }
683 
684 /**
685  *	get_vpd_params - read VPD parameters from VPD EEPROM
686  *	@adapter: adapter to read
687  *	@p: where to store the parameters
688  *
689  *	Reads card parameters stored in VPD EEPROM.
690  */
691 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
692 {
693 	int i, addr, ret;
694 	struct t3_vpd vpd;
695 
696 	/*
697 	 * Card information is normally at VPD_BASE but some early cards had
698 	 * it at 0.
699 	 */
700 	ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
701 	if (ret)
702 		return ret;
703 	addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
704 
705 	for (i = 0; i < sizeof(vpd); i += 4) {
706 		ret = t3_seeprom_read(adapter, addr + i,
707 				      (__le32 *)((u8 *)&vpd + i));
708 		if (ret)
709 			return ret;
710 	}
711 
712 	ret = kstrtouint(vpd.cclk_data, 10, &p->cclk);
713 	if (ret)
714 		return ret;
715 	ret = kstrtouint(vpd.mclk_data, 10, &p->mclk);
716 	if (ret)
717 		return ret;
718 	ret = kstrtouint(vpd.uclk_data, 10, &p->uclk);
719 	if (ret)
720 		return ret;
721 	ret = kstrtouint(vpd.mdc_data, 10, &p->mdc);
722 	if (ret)
723 		return ret;
724 	ret = kstrtouint(vpd.mt_data, 10, &p->mem_timing);
725 	if (ret)
726 		return ret;
727 	memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
728 
729 	/* Old eeproms didn't have port information */
730 	if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
731 		p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
732 		p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
733 	} else {
734 		p->port_type[0] = hex_to_bin(vpd.port0_data[0]);
735 		p->port_type[1] = hex_to_bin(vpd.port1_data[0]);
736 		ret = kstrtou16(vpd.xaui0cfg_data, 16, &p->xauicfg[0]);
737 		if (ret)
738 			return ret;
739 		ret = kstrtou16(vpd.xaui1cfg_data, 16, &p->xauicfg[1]);
740 		if (ret)
741 			return ret;
742 	}
743 
744 	ret = hex2bin(p->eth_base, vpd.na_data, 6);
745 	if (ret < 0)
746 		return -EINVAL;
747 	return 0;
748 }
749 
750 /* serial flash and firmware constants */
751 enum {
752 	SF_ATTEMPTS = 5,	/* max retries for SF1 operations */
753 	SF_SEC_SIZE = 64 * 1024,	/* serial flash sector size */
754 	SF_SIZE = SF_SEC_SIZE * 8,	/* serial flash size */
755 
756 	/* flash command opcodes */
757 	SF_PROG_PAGE = 2,	/* program page */
758 	SF_WR_DISABLE = 4,	/* disable writes */
759 	SF_RD_STATUS = 5,	/* read status register */
760 	SF_WR_ENABLE = 6,	/* enable writes */
761 	SF_RD_DATA_FAST = 0xb,	/* read flash */
762 	SF_ERASE_SECTOR = 0xd8,	/* erase sector */
763 
764 	FW_FLASH_BOOT_ADDR = 0x70000,	/* start address of FW in flash */
765 	FW_VERS_ADDR = 0x7fffc,    /* flash address holding FW version */
766 	FW_MIN_SIZE = 8            /* at least version and csum */
767 };
768 
769 /**
770  *	sf1_read - read data from the serial flash
771  *	@adapter: the adapter
772  *	@byte_cnt: number of bytes to read
773  *	@cont: whether another operation will be chained
774  *	@valp: where to store the read data
775  *
776  *	Reads up to 4 bytes of data from the serial flash.  The location of
777  *	the read needs to be specified prior to calling this by issuing the
778  *	appropriate commands to the serial flash.
779  */
780 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
781 		    u32 *valp)
782 {
783 	int ret;
784 
785 	if (!byte_cnt || byte_cnt > 4)
786 		return -EINVAL;
787 	if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
788 		return -EBUSY;
789 	t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
790 	ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
791 	if (!ret)
792 		*valp = t3_read_reg(adapter, A_SF_DATA);
793 	return ret;
794 }
795 
796 /**
797  *	sf1_write - write data to the serial flash
798  *	@adapter: the adapter
799  *	@byte_cnt: number of bytes to write
800  *	@cont: whether another operation will be chained
801  *	@val: value to write
802  *
803  *	Writes up to 4 bytes of data to the serial flash.  The location of
804  *	the write needs to be specified prior to calling this by issuing the
805  *	appropriate commands to the serial flash.
806  */
807 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
808 		     u32 val)
809 {
810 	if (!byte_cnt || byte_cnt > 4)
811 		return -EINVAL;
812 	if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
813 		return -EBUSY;
814 	t3_write_reg(adapter, A_SF_DATA, val);
815 	t3_write_reg(adapter, A_SF_OP,
816 		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
817 	return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
818 }
819 
820 /**
821  *	flash_wait_op - wait for a flash operation to complete
822  *	@adapter: the adapter
823  *	@attempts: max number of polls of the status register
824  *	@delay: delay between polls in ms
825  *
826  *	Wait for a flash operation to complete by polling the status register.
827  */
828 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
829 {
830 	int ret;
831 	u32 status;
832 
833 	while (1) {
834 		if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
835 		    (ret = sf1_read(adapter, 1, 0, &status)) != 0)
836 			return ret;
837 		if (!(status & 1))
838 			return 0;
839 		if (--attempts == 0)
840 			return -EAGAIN;
841 		if (delay)
842 			msleep(delay);
843 	}
844 }
845 
846 /**
847  *	t3_read_flash - read words from serial flash
848  *	@adapter: the adapter
849  *	@addr: the start address for the read
850  *	@nwords: how many 32-bit words to read
851  *	@data: where to store the read data
852  *	@byte_oriented: whether to store data as bytes or as words
853  *
854  *	Read the specified number of 32-bit words from the serial flash.
855  *	If @byte_oriented is set the read data is stored as a byte array
856  *	(i.e., big-endian), otherwise as 32-bit words in the platform's
857  *	natural endianness.
858  */
859 static int t3_read_flash(struct adapter *adapter, unsigned int addr,
860 			 unsigned int nwords, u32 *data, int byte_oriented)
861 {
862 	int ret;
863 
864 	if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
865 		return -EINVAL;
866 
867 	addr = swab32(addr) | SF_RD_DATA_FAST;
868 
869 	if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
870 	    (ret = sf1_read(adapter, 1, 1, data)) != 0)
871 		return ret;
872 
873 	for (; nwords; nwords--, data++) {
874 		ret = sf1_read(adapter, 4, nwords > 1, data);
875 		if (ret)
876 			return ret;
877 		if (byte_oriented)
878 			*data = htonl(*data);
879 	}
880 	return 0;
881 }
882 
883 /**
884  *	t3_write_flash - write up to a page of data to the serial flash
885  *	@adapter: the adapter
886  *	@addr: the start address to write
887  *	@n: length of data to write
888  *	@data: the data to write
889  *
890  *	Writes up to a page of data (256 bytes) to the serial flash starting
891  *	at the given address.
892  */
893 static int t3_write_flash(struct adapter *adapter, unsigned int addr,
894 			  unsigned int n, const u8 *data)
895 {
896 	int ret;
897 	u32 buf[64];
898 	unsigned int i, c, left, val, offset = addr & 0xff;
899 
900 	if (addr + n > SF_SIZE || offset + n > 256)
901 		return -EINVAL;
902 
903 	val = swab32(addr) | SF_PROG_PAGE;
904 
905 	if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
906 	    (ret = sf1_write(adapter, 4, 1, val)) != 0)
907 		return ret;
908 
909 	for (left = n; left; left -= c) {
910 		c = min(left, 4U);
911 		for (val = 0, i = 0; i < c; ++i)
912 			val = (val << 8) + *data++;
913 
914 		ret = sf1_write(adapter, c, c != left, val);
915 		if (ret)
916 			return ret;
917 	}
918 	if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
919 		return ret;
920 
921 	/* Read the page to verify the write succeeded */
922 	ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
923 	if (ret)
924 		return ret;
925 
926 	if (memcmp(data - n, (u8 *) buf + offset, n))
927 		return -EIO;
928 	return 0;
929 }
930 
931 /**
932  *	t3_get_tp_version - read the tp sram version
933  *	@adapter: the adapter
934  *	@vers: where to place the version
935  *
936  *	Reads the protocol sram version from sram.
937  */
938 int t3_get_tp_version(struct adapter *adapter, u32 *vers)
939 {
940 	int ret;
941 
942 	/* Get version loaded in SRAM */
943 	t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
944 	ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
945 			      1, 1, 5, 1);
946 	if (ret)
947 		return ret;
948 
949 	*vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
950 
951 	return 0;
952 }
953 
954 /**
955  *	t3_check_tpsram_version - read the tp sram version
956  *	@adapter: the adapter
957  *
958  *	Reads the protocol sram version from flash.
959  */
960 int t3_check_tpsram_version(struct adapter *adapter)
961 {
962 	int ret;
963 	u32 vers;
964 	unsigned int major, minor;
965 
966 	if (adapter->params.rev == T3_REV_A)
967 		return 0;
968 
969 
970 	ret = t3_get_tp_version(adapter, &vers);
971 	if (ret)
972 		return ret;
973 
974 	major = G_TP_VERSION_MAJOR(vers);
975 	minor = G_TP_VERSION_MINOR(vers);
976 
977 	if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
978 		return 0;
979 	else {
980 		CH_ERR(adapter, "found wrong TP version (%u.%u), "
981 		       "driver compiled for version %d.%d\n", major, minor,
982 		       TP_VERSION_MAJOR, TP_VERSION_MINOR);
983 	}
984 	return -EINVAL;
985 }
986 
987 /**
988  *	t3_check_tpsram - check if provided protocol SRAM
989  *			  is compatible with this driver
990  *	@adapter: the adapter
991  *	@tp_sram: the firmware image to write
992  *	@size: image size
993  *
994  *	Checks if an adapter's tp sram is compatible with the driver.
995  *	Returns 0 if the versions are compatible, a negative error otherwise.
996  */
997 int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
998 		    unsigned int size)
999 {
1000 	u32 csum;
1001 	unsigned int i;
1002 	const __be32 *p = (const __be32 *)tp_sram;
1003 
1004 	/* Verify checksum */
1005 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1006 		csum += ntohl(p[i]);
1007 	if (csum != 0xffffffff) {
1008 		CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
1009 		       csum);
1010 		return -EINVAL;
1011 	}
1012 
1013 	return 0;
1014 }
1015 
1016 enum fw_version_type {
1017 	FW_VERSION_N3,
1018 	FW_VERSION_T3
1019 };
1020 
1021 /**
1022  *	t3_get_fw_version - read the firmware version
1023  *	@adapter: the adapter
1024  *	@vers: where to place the version
1025  *
1026  *	Reads the FW version from flash.
1027  */
1028 int t3_get_fw_version(struct adapter *adapter, u32 *vers)
1029 {
1030 	return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1031 }
1032 
1033 /**
1034  *	t3_check_fw_version - check if the FW is compatible with this driver
1035  *	@adapter: the adapter
1036  *
1037  *	Checks if an adapter's FW is compatible with the driver.  Returns 0
1038  *	if the versions are compatible, a negative error otherwise.
1039  */
1040 int t3_check_fw_version(struct adapter *adapter)
1041 {
1042 	int ret;
1043 	u32 vers;
1044 	unsigned int type, major, minor;
1045 
1046 	ret = t3_get_fw_version(adapter, &vers);
1047 	if (ret)
1048 		return ret;
1049 
1050 	type = G_FW_VERSION_TYPE(vers);
1051 	major = G_FW_VERSION_MAJOR(vers);
1052 	minor = G_FW_VERSION_MINOR(vers);
1053 
1054 	if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1055 	    minor == FW_VERSION_MINOR)
1056 		return 0;
1057 	else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
1058 		CH_WARN(adapter, "found old FW minor version(%u.%u), "
1059 		        "driver compiled for version %u.%u\n", major, minor,
1060 			FW_VERSION_MAJOR, FW_VERSION_MINOR);
1061 	else {
1062 		CH_WARN(adapter, "found newer FW version(%u.%u), "
1063 		        "driver compiled for version %u.%u\n", major, minor,
1064 			FW_VERSION_MAJOR, FW_VERSION_MINOR);
1065 			return 0;
1066 	}
1067 	return -EINVAL;
1068 }
1069 
1070 /**
1071  *	t3_flash_erase_sectors - erase a range of flash sectors
1072  *	@adapter: the adapter
1073  *	@start: the first sector to erase
1074  *	@end: the last sector to erase
1075  *
1076  *	Erases the sectors in the given range.
1077  */
1078 static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1079 {
1080 	while (start <= end) {
1081 		int ret;
1082 
1083 		if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1084 		    (ret = sf1_write(adapter, 4, 0,
1085 				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
1086 		    (ret = flash_wait_op(adapter, 5, 500)) != 0)
1087 			return ret;
1088 		start++;
1089 	}
1090 	return 0;
1091 }
1092 
1093 /**
1094  *	t3_load_fw - download firmware
1095  *	@adapter: the adapter
1096  *	@fw_data: the firmware image to write
1097  *	@size: image size
1098  *
1099  *	Write the supplied firmware image to the card's serial flash.
1100  *	The FW image has the following sections: @size - 8 bytes of code and
1101  *	data, followed by 4 bytes of FW version, followed by the 32-bit
1102  *	1's complement checksum of the whole image.
1103  */
1104 int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1105 {
1106 	u32 csum;
1107 	unsigned int i;
1108 	const __be32 *p = (const __be32 *)fw_data;
1109 	int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1110 
1111 	if ((size & 3) || size < FW_MIN_SIZE)
1112 		return -EINVAL;
1113 	if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1114 		return -EFBIG;
1115 
1116 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1117 		csum += ntohl(p[i]);
1118 	if (csum != 0xffffffff) {
1119 		CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1120 		       csum);
1121 		return -EINVAL;
1122 	}
1123 
1124 	ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1125 	if (ret)
1126 		goto out;
1127 
1128 	size -= 8;		/* trim off version and checksum */
1129 	for (addr = FW_FLASH_BOOT_ADDR; size;) {
1130 		unsigned int chunk_size = min(size, 256U);
1131 
1132 		ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1133 		if (ret)
1134 			goto out;
1135 
1136 		addr += chunk_size;
1137 		fw_data += chunk_size;
1138 		size -= chunk_size;
1139 	}
1140 
1141 	ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1142 out:
1143 	if (ret)
1144 		CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1145 	return ret;
1146 }
1147 
1148 #define CIM_CTL_BASE 0x2000
1149 
1150 /**
1151  *      t3_cim_ctl_blk_read - read a block from CIM control region
1152  *
1153  *      @adap: the adapter
1154  *      @addr: the start address within the CIM control region
1155  *      @n: number of words to read
1156  *      @valp: where to store the result
1157  *
1158  *      Reads a block of 4-byte words from the CIM control region.
1159  */
1160 int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1161 			unsigned int n, unsigned int *valp)
1162 {
1163 	int ret = 0;
1164 
1165 	if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1166 		return -EBUSY;
1167 
1168 	for ( ; !ret && n--; addr += 4) {
1169 		t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1170 		ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1171 				      0, 5, 2);
1172 		if (!ret)
1173 			*valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1174 	}
1175 	return ret;
1176 }
1177 
1178 static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg,
1179 			       u32 *rx_hash_high, u32 *rx_hash_low)
1180 {
1181 	/* stop Rx unicast traffic */
1182 	t3_mac_disable_exact_filters(mac);
1183 
1184 	/* stop broadcast, multicast, promiscuous mode traffic */
1185 	*rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG);
1186 	t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1187 			 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1188 			 F_DISBCAST);
1189 
1190 	*rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH);
1191 	t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, 0);
1192 
1193 	*rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW);
1194 	t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, 0);
1195 
1196 	/* Leave time to drain max RX fifo */
1197 	msleep(1);
1198 }
1199 
1200 static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg,
1201 			       u32 rx_hash_high, u32 rx_hash_low)
1202 {
1203 	t3_mac_enable_exact_filters(mac);
1204 	t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1205 			 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1206 			 rx_cfg);
1207 	t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, rx_hash_high);
1208 	t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, rx_hash_low);
1209 }
1210 
1211 /**
1212  *	t3_link_changed - handle interface link changes
1213  *	@adapter: the adapter
1214  *	@port_id: the port index that changed link state
1215  *
1216  *	Called when a port's link settings change to propagate the new values
1217  *	to the associated PHY and MAC.  After performing the common tasks it
1218  *	invokes an OS-specific handler.
1219  */
1220 void t3_link_changed(struct adapter *adapter, int port_id)
1221 {
1222 	int link_ok, speed, duplex, fc;
1223 	struct port_info *pi = adap2pinfo(adapter, port_id);
1224 	struct cphy *phy = &pi->phy;
1225 	struct cmac *mac = &pi->mac;
1226 	struct link_config *lc = &pi->link_config;
1227 
1228 	phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1229 
1230 	if (!lc->link_ok && link_ok) {
1231 		u32 rx_cfg, rx_hash_high, rx_hash_low;
1232 		u32 status;
1233 
1234 		t3_xgm_intr_enable(adapter, port_id);
1235 		t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1236 		t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1237 		t3_mac_enable(mac, MAC_DIRECTION_RX);
1238 
1239 		status = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1240 		if (status & F_LINKFAULTCHANGE) {
1241 			mac->stats.link_faults++;
1242 			pi->link_fault = 1;
1243 		}
1244 		t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1245 	}
1246 
1247 	if (lc->requested_fc & PAUSE_AUTONEG)
1248 		fc &= lc->requested_fc;
1249 	else
1250 		fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1251 
1252 	if (link_ok == lc->link_ok && speed == lc->speed &&
1253 	    duplex == lc->duplex && fc == lc->fc)
1254 		return;                            /* nothing changed */
1255 
1256 	if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1257 	    uses_xaui(adapter)) {
1258 		if (link_ok)
1259 			t3b_pcs_reset(mac);
1260 		t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1261 			     link_ok ? F_TXACTENABLE | F_RXEN : 0);
1262 	}
1263 	lc->link_ok = link_ok;
1264 	lc->speed = speed < 0 ? SPEED_INVALID : speed;
1265 	lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1266 
1267 	if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1268 		/* Set MAC speed, duplex, and flow control to match PHY. */
1269 		t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1270 		lc->fc = fc;
1271 	}
1272 
1273 	t3_os_link_changed(adapter, port_id, link_ok && !pi->link_fault,
1274 			   speed, duplex, fc);
1275 }
1276 
1277 void t3_link_fault(struct adapter *adapter, int port_id)
1278 {
1279 	struct port_info *pi = adap2pinfo(adapter, port_id);
1280 	struct cmac *mac = &pi->mac;
1281 	struct cphy *phy = &pi->phy;
1282 	struct link_config *lc = &pi->link_config;
1283 	int link_ok, speed, duplex, fc, link_fault;
1284 	u32 rx_cfg, rx_hash_high, rx_hash_low;
1285 
1286 	t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1287 
1288 	if (adapter->params.rev > 0 && uses_xaui(adapter))
1289 		t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, 0);
1290 
1291 	t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1292 	t3_mac_enable(mac, MAC_DIRECTION_RX);
1293 
1294 	t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1295 
1296 	link_fault = t3_read_reg(adapter,
1297 				 A_XGM_INT_STATUS + mac->offset);
1298 	link_fault &= F_LINKFAULTCHANGE;
1299 
1300 	link_ok = lc->link_ok;
1301 	speed = lc->speed;
1302 	duplex = lc->duplex;
1303 	fc = lc->fc;
1304 
1305 	phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1306 
1307 	if (link_fault) {
1308 		lc->link_ok = 0;
1309 		lc->speed = SPEED_INVALID;
1310 		lc->duplex = DUPLEX_INVALID;
1311 
1312 		t3_os_link_fault(adapter, port_id, 0);
1313 
1314 		/* Account link faults only when the phy reports a link up */
1315 		if (link_ok)
1316 			mac->stats.link_faults++;
1317 	} else {
1318 		if (link_ok)
1319 			t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1320 				     F_TXACTENABLE | F_RXEN);
1321 
1322 		pi->link_fault = 0;
1323 		lc->link_ok = (unsigned char)link_ok;
1324 		lc->speed = speed < 0 ? SPEED_INVALID : speed;
1325 		lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1326 		t3_os_link_fault(adapter, port_id, link_ok);
1327 	}
1328 }
1329 
1330 /**
1331  *	t3_link_start - apply link configuration to MAC/PHY
1332  *	@phy: the PHY to setup
1333  *	@mac: the MAC to setup
1334  *	@lc: the requested link configuration
1335  *
1336  *	Set up a port's MAC and PHY according to a desired link configuration.
1337  *	- If the PHY can auto-negotiate first decide what to advertise, then
1338  *	  enable/disable auto-negotiation as desired, and reset.
1339  *	- If the PHY does not auto-negotiate just reset it.
1340  *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1341  *	  otherwise do it later based on the outcome of auto-negotiation.
1342  */
1343 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1344 {
1345 	unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1346 
1347 	lc->link_ok = 0;
1348 	if (lc->supported & SUPPORTED_Autoneg) {
1349 		lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1350 		if (fc) {
1351 			lc->advertising |= ADVERTISED_Asym_Pause;
1352 			if (fc & PAUSE_RX)
1353 				lc->advertising |= ADVERTISED_Pause;
1354 		}
1355 		phy->ops->advertise(phy, lc->advertising);
1356 
1357 		if (lc->autoneg == AUTONEG_DISABLE) {
1358 			lc->speed = lc->requested_speed;
1359 			lc->duplex = lc->requested_duplex;
1360 			lc->fc = (unsigned char)fc;
1361 			t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1362 						   fc);
1363 			/* Also disables autoneg */
1364 			phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1365 		} else
1366 			phy->ops->autoneg_enable(phy);
1367 	} else {
1368 		t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1369 		lc->fc = (unsigned char)fc;
1370 		phy->ops->reset(phy, 0);
1371 	}
1372 	return 0;
1373 }
1374 
1375 /**
1376  *	t3_set_vlan_accel - control HW VLAN extraction
1377  *	@adapter: the adapter
1378  *	@ports: bitmap of adapter ports to operate on
1379  *	@on: enable (1) or disable (0) HW VLAN extraction
1380  *
1381  *	Enables or disables HW extraction of VLAN tags for the given port.
1382  */
1383 void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1384 {
1385 	t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1386 			 ports << S_VLANEXTRACTIONENABLE,
1387 			 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1388 }
1389 
1390 struct intr_info {
1391 	unsigned int mask;	/* bits to check in interrupt status */
1392 	const char *msg;	/* message to print or NULL */
1393 	short stat_idx;		/* stat counter to increment or -1 */
1394 	unsigned short fatal;	/* whether the condition reported is fatal */
1395 };
1396 
1397 /**
1398  *	t3_handle_intr_status - table driven interrupt handler
1399  *	@adapter: the adapter that generated the interrupt
1400  *	@reg: the interrupt status register to process
1401  *	@mask: a mask to apply to the interrupt status
1402  *	@acts: table of interrupt actions
1403  *	@stats: statistics counters tracking interrupt occurrences
1404  *
1405  *	A table driven interrupt handler that applies a set of masks to an
1406  *	interrupt status word and performs the corresponding actions if the
1407  *	interrupts described by the mask have occurred.  The actions include
1408  *	optionally printing a warning or alert message, and optionally
1409  *	incrementing a stat counter.  The table is terminated by an entry
1410  *	specifying mask 0.  Returns the number of fatal interrupt conditions.
1411  */
1412 static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1413 				 unsigned int mask,
1414 				 const struct intr_info *acts,
1415 				 unsigned long *stats)
1416 {
1417 	int fatal = 0;
1418 	unsigned int status = t3_read_reg(adapter, reg) & mask;
1419 
1420 	for (; acts->mask; ++acts) {
1421 		if (!(status & acts->mask))
1422 			continue;
1423 		if (acts->fatal) {
1424 			fatal++;
1425 			CH_ALERT(adapter, "%s (0x%x)\n",
1426 				 acts->msg, status & acts->mask);
1427 			status &= ~acts->mask;
1428 		} else if (acts->msg)
1429 			CH_WARN(adapter, "%s (0x%x)\n",
1430 				acts->msg, status & acts->mask);
1431 		if (acts->stat_idx >= 0)
1432 			stats[acts->stat_idx]++;
1433 	}
1434 	if (status)		/* clear processed interrupts */
1435 		t3_write_reg(adapter, reg, status);
1436 	return fatal;
1437 }
1438 
1439 #define SGE_INTR_MASK (F_RSPQDISABLED | \
1440 		       F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1441 		       F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1442 		       F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1443 		       V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1444 		       F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1445 		       F_HIRCQPARITYERROR | F_LOPRIORITYDBFULL | \
1446 		       F_HIPRIORITYDBFULL | F_LOPRIORITYDBEMPTY | \
1447 		       F_HIPRIORITYDBEMPTY | F_HIPIODRBDROPERR | \
1448 		       F_LOPIODRBDROPERR)
1449 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1450 		       F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1451 		       F_NFASRCHFAIL)
1452 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1453 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1454 		       V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1455 		       F_TXFIFO_UNDERRUN)
1456 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1457 			F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1458 			F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1459 			F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1460 			V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1461 			V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1462 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1463 			F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1464 			/* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1465 			F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1466 			F_TXPARERR | V_BISTERR(M_BISTERR))
1467 #define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1468 			 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1469 			 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1470 #define ULPTX_INTR_MASK 0xfc
1471 #define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1472 			 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1473 			 F_ZERO_SWITCH_ERROR)
1474 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1475 		       F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1476 		       F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1477 	 	       F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1478 		       F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1479 		       F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1480 		       F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1481 		       F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1482 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1483 			V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1484 			V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1485 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1486 			V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1487 			V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1488 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1489 		       V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1490 		       V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1491 		       V_MCAPARERRENB(M_MCAPARERRENB))
1492 #define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE)
1493 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1494 		      F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1495 		      F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1496 		      F_MPS0 | F_CPL_SWITCH)
1497 /*
1498  * Interrupt handler for the PCIX1 module.
1499  */
1500 static void pci_intr_handler(struct adapter *adapter)
1501 {
1502 	static const struct intr_info pcix1_intr_info[] = {
1503 		{F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1504 		{F_SIGTARABT, "PCI signaled target abort", -1, 1},
1505 		{F_RCVTARABT, "PCI received target abort", -1, 1},
1506 		{F_RCVMSTABT, "PCI received master abort", -1, 1},
1507 		{F_SIGSYSERR, "PCI signaled system error", -1, 1},
1508 		{F_DETPARERR, "PCI detected parity error", -1, 1},
1509 		{F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1510 		{F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1511 		{F_RCVSPLCMPERR, "PCI received split completion error", -1,
1512 		 1},
1513 		{F_DETCORECCERR, "PCI correctable ECC error",
1514 		 STAT_PCI_CORR_ECC, 0},
1515 		{F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1516 		{F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1517 		{V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1518 		 1},
1519 		{V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1520 		 1},
1521 		{V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1522 		 1},
1523 		{V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1524 		 "error", -1, 1},
1525 		{0}
1526 	};
1527 
1528 	if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1529 				  pcix1_intr_info, adapter->irq_stats))
1530 		t3_fatal_err(adapter);
1531 }
1532 
1533 /*
1534  * Interrupt handler for the PCIE module.
1535  */
1536 static void pcie_intr_handler(struct adapter *adapter)
1537 {
1538 	static const struct intr_info pcie_intr_info[] = {
1539 		{F_PEXERR, "PCI PEX error", -1, 1},
1540 		{F_UNXSPLCPLERRR,
1541 		 "PCI unexpected split completion DMA read error", -1, 1},
1542 		{F_UNXSPLCPLERRC,
1543 		 "PCI unexpected split completion DMA command error", -1, 1},
1544 		{F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1545 		{F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1546 		{F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1547 		{F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1548 		{V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1549 		 "PCI MSI-X table/PBA parity error", -1, 1},
1550 		{F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1551 		{F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1552 		{F_RXPARERR, "PCI Rx parity error", -1, 1},
1553 		{F_TXPARERR, "PCI Tx parity error", -1, 1},
1554 		{V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1555 		{0}
1556 	};
1557 
1558 	if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1559 		CH_ALERT(adapter, "PEX error code 0x%x\n",
1560 			 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1561 
1562 	if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1563 				  pcie_intr_info, adapter->irq_stats))
1564 		t3_fatal_err(adapter);
1565 }
1566 
1567 /*
1568  * TP interrupt handler.
1569  */
1570 static void tp_intr_handler(struct adapter *adapter)
1571 {
1572 	static const struct intr_info tp_intr_info[] = {
1573 		{0xffffff, "TP parity error", -1, 1},
1574 		{0x1000000, "TP out of Rx pages", -1, 1},
1575 		{0x2000000, "TP out of Tx pages", -1, 1},
1576 		{0}
1577 	};
1578 
1579 	static const struct intr_info tp_intr_info_t3c[] = {
1580 		{0x1fffffff, "TP parity error", -1, 1},
1581 		{F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1582 		{F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1583 		{0}
1584 	};
1585 
1586 	if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1587 				  adapter->params.rev < T3_REV_C ?
1588 				  tp_intr_info : tp_intr_info_t3c, NULL))
1589 		t3_fatal_err(adapter);
1590 }
1591 
1592 /*
1593  * CIM interrupt handler.
1594  */
1595 static void cim_intr_handler(struct adapter *adapter)
1596 {
1597 	static const struct intr_info cim_intr_info[] = {
1598 		{F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1599 		{F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1600 		{F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1601 		{F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1602 		{F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1603 		{F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1604 		{F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1605 		{F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1606 		{F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1607 		{F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1608 		{F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1609 		{F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1610 		{F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1611 		{F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1612 		{F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1613 		{F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1614 		{F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1615 		{F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1616 		{F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1617 		{F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1618 		{F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1619 		{F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1620 		{F_ITAGPARERR, "CIM itag parity error", -1, 1},
1621 		{F_DTAGPARERR, "CIM dtag parity error", -1, 1},
1622 		{0}
1623 	};
1624 
1625 	if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1626 				  cim_intr_info, NULL))
1627 		t3_fatal_err(adapter);
1628 }
1629 
1630 /*
1631  * ULP RX interrupt handler.
1632  */
1633 static void ulprx_intr_handler(struct adapter *adapter)
1634 {
1635 	static const struct intr_info ulprx_intr_info[] = {
1636 		{F_PARERRDATA, "ULP RX data parity error", -1, 1},
1637 		{F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1638 		{F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1639 		{F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1640 		{F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1641 		{F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1642 		{F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1643 		{F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
1644 		{0}
1645 	};
1646 
1647 	if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1648 				  ulprx_intr_info, NULL))
1649 		t3_fatal_err(adapter);
1650 }
1651 
1652 /*
1653  * ULP TX interrupt handler.
1654  */
1655 static void ulptx_intr_handler(struct adapter *adapter)
1656 {
1657 	static const struct intr_info ulptx_intr_info[] = {
1658 		{F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1659 		 STAT_ULP_CH0_PBL_OOB, 0},
1660 		{F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1661 		 STAT_ULP_CH1_PBL_OOB, 0},
1662 		{0xfc, "ULP TX parity error", -1, 1},
1663 		{0}
1664 	};
1665 
1666 	if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1667 				  ulptx_intr_info, adapter->irq_stats))
1668 		t3_fatal_err(adapter);
1669 }
1670 
1671 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1672 	F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1673 	F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1674 	F_ICSPI1_TX_FRAMING_ERROR)
1675 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1676 	F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1677 	F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1678 	F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1679 
1680 /*
1681  * PM TX interrupt handler.
1682  */
1683 static void pmtx_intr_handler(struct adapter *adapter)
1684 {
1685 	static const struct intr_info pmtx_intr_info[] = {
1686 		{F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1687 		{ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1688 		{OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1689 		{V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1690 		 "PMTX ispi parity error", -1, 1},
1691 		{V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1692 		 "PMTX ospi parity error", -1, 1},
1693 		{0}
1694 	};
1695 
1696 	if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1697 				  pmtx_intr_info, NULL))
1698 		t3_fatal_err(adapter);
1699 }
1700 
1701 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1702 	F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1703 	F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1704 	F_IESPI1_TX_FRAMING_ERROR)
1705 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1706 	F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1707 	F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1708 	F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1709 
1710 /*
1711  * PM RX interrupt handler.
1712  */
1713 static void pmrx_intr_handler(struct adapter *adapter)
1714 {
1715 	static const struct intr_info pmrx_intr_info[] = {
1716 		{F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1717 		{IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1718 		{OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1719 		{V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1720 		 "PMRX ispi parity error", -1, 1},
1721 		{V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1722 		 "PMRX ospi parity error", -1, 1},
1723 		{0}
1724 	};
1725 
1726 	if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1727 				  pmrx_intr_info, NULL))
1728 		t3_fatal_err(adapter);
1729 }
1730 
1731 /*
1732  * CPL switch interrupt handler.
1733  */
1734 static void cplsw_intr_handler(struct adapter *adapter)
1735 {
1736 	static const struct intr_info cplsw_intr_info[] = {
1737 		{F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1738 		{F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
1739 		{F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1740 		{F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1741 		{F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1742 		{F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1743 		{0}
1744 	};
1745 
1746 	if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1747 				  cplsw_intr_info, NULL))
1748 		t3_fatal_err(adapter);
1749 }
1750 
1751 /*
1752  * MPS interrupt handler.
1753  */
1754 static void mps_intr_handler(struct adapter *adapter)
1755 {
1756 	static const struct intr_info mps_intr_info[] = {
1757 		{0x1ff, "MPS parity error", -1, 1},
1758 		{0}
1759 	};
1760 
1761 	if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1762 				  mps_intr_info, NULL))
1763 		t3_fatal_err(adapter);
1764 }
1765 
1766 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1767 
1768 /*
1769  * MC7 interrupt handler.
1770  */
1771 static void mc7_intr_handler(struct mc7 *mc7)
1772 {
1773 	struct adapter *adapter = mc7->adapter;
1774 	u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1775 
1776 	if (cause & F_CE) {
1777 		mc7->stats.corr_err++;
1778 		CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1779 			"data 0x%x 0x%x 0x%x\n", mc7->name,
1780 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1781 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1782 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1783 			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1784 	}
1785 
1786 	if (cause & F_UE) {
1787 		mc7->stats.uncorr_err++;
1788 		CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1789 			 "data 0x%x 0x%x 0x%x\n", mc7->name,
1790 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1791 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1792 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1793 			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1794 	}
1795 
1796 	if (G_PE(cause)) {
1797 		mc7->stats.parity_err++;
1798 		CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1799 			 mc7->name, G_PE(cause));
1800 	}
1801 
1802 	if (cause & F_AE) {
1803 		u32 addr = 0;
1804 
1805 		if (adapter->params.rev > 0)
1806 			addr = t3_read_reg(adapter,
1807 					   mc7->offset + A_MC7_ERR_ADDR);
1808 		mc7->stats.addr_err++;
1809 		CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1810 			 mc7->name, addr);
1811 	}
1812 
1813 	if (cause & MC7_INTR_FATAL)
1814 		t3_fatal_err(adapter);
1815 
1816 	t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1817 }
1818 
1819 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1820 			V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1821 /*
1822  * XGMAC interrupt handler.
1823  */
1824 static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1825 {
1826 	struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1827 	/*
1828 	 * We mask out interrupt causes for which we're not taking interrupts.
1829 	 * This allows us to use polling logic to monitor some of the other
1830 	 * conditions when taking interrupts would impose too much load on the
1831 	 * system.
1832 	 */
1833 	u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset) &
1834 		    ~F_RXFIFO_OVERFLOW;
1835 
1836 	if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1837 		mac->stats.tx_fifo_parity_err++;
1838 		CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1839 	}
1840 	if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1841 		mac->stats.rx_fifo_parity_err++;
1842 		CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1843 	}
1844 	if (cause & F_TXFIFO_UNDERRUN)
1845 		mac->stats.tx_fifo_urun++;
1846 	if (cause & F_RXFIFO_OVERFLOW)
1847 		mac->stats.rx_fifo_ovfl++;
1848 	if (cause & V_SERDES_LOS(M_SERDES_LOS))
1849 		mac->stats.serdes_signal_loss++;
1850 	if (cause & F_XAUIPCSCTCERR)
1851 		mac->stats.xaui_pcs_ctc_err++;
1852 	if (cause & F_XAUIPCSALIGNCHANGE)
1853 		mac->stats.xaui_pcs_align_change++;
1854 	if (cause & F_XGM_INT) {
1855 		t3_set_reg_field(adap,
1856 				 A_XGM_INT_ENABLE + mac->offset,
1857 				 F_XGM_INT, 0);
1858 		mac->stats.link_faults++;
1859 
1860 		t3_os_link_fault_handler(adap, idx);
1861 	}
1862 
1863 	if (cause & XGM_INTR_FATAL)
1864 		t3_fatal_err(adap);
1865 
1866 	t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1867 	return cause != 0;
1868 }
1869 
1870 /*
1871  * Interrupt handler for PHY events.
1872  */
1873 int t3_phy_intr_handler(struct adapter *adapter)
1874 {
1875 	u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1876 
1877 	for_each_port(adapter, i) {
1878 		struct port_info *p = adap2pinfo(adapter, i);
1879 
1880 		if (!(p->phy.caps & SUPPORTED_IRQ))
1881 			continue;
1882 
1883 		if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
1884 			int phy_cause = p->phy.ops->intr_handler(&p->phy);
1885 
1886 			if (phy_cause & cphy_cause_link_change)
1887 				t3_link_changed(adapter, i);
1888 			if (phy_cause & cphy_cause_fifo_error)
1889 				p->phy.fifo_errors++;
1890 			if (phy_cause & cphy_cause_module_change)
1891 				t3_os_phymod_changed(adapter, i);
1892 		}
1893 	}
1894 
1895 	t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1896 	return 0;
1897 }
1898 
1899 /*
1900  * T3 slow path (non-data) interrupt handler.
1901  */
1902 int t3_slow_intr_handler(struct adapter *adapter)
1903 {
1904 	u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1905 
1906 	cause &= adapter->slow_intr_mask;
1907 	if (!cause)
1908 		return 0;
1909 	if (cause & F_PCIM0) {
1910 		if (is_pcie(adapter))
1911 			pcie_intr_handler(adapter);
1912 		else
1913 			pci_intr_handler(adapter);
1914 	}
1915 	if (cause & F_SGE3)
1916 		t3_sge_err_intr_handler(adapter);
1917 	if (cause & F_MC7_PMRX)
1918 		mc7_intr_handler(&adapter->pmrx);
1919 	if (cause & F_MC7_PMTX)
1920 		mc7_intr_handler(&adapter->pmtx);
1921 	if (cause & F_MC7_CM)
1922 		mc7_intr_handler(&adapter->cm);
1923 	if (cause & F_CIM)
1924 		cim_intr_handler(adapter);
1925 	if (cause & F_TP1)
1926 		tp_intr_handler(adapter);
1927 	if (cause & F_ULP2_RX)
1928 		ulprx_intr_handler(adapter);
1929 	if (cause & F_ULP2_TX)
1930 		ulptx_intr_handler(adapter);
1931 	if (cause & F_PM1_RX)
1932 		pmrx_intr_handler(adapter);
1933 	if (cause & F_PM1_TX)
1934 		pmtx_intr_handler(adapter);
1935 	if (cause & F_CPL_SWITCH)
1936 		cplsw_intr_handler(adapter);
1937 	if (cause & F_MPS0)
1938 		mps_intr_handler(adapter);
1939 	if (cause & F_MC5A)
1940 		t3_mc5_intr_handler(&adapter->mc5);
1941 	if (cause & F_XGMAC0_0)
1942 		mac_intr_handler(adapter, 0);
1943 	if (cause & F_XGMAC0_1)
1944 		mac_intr_handler(adapter, 1);
1945 	if (cause & F_T3DBG)
1946 		t3_os_ext_intr_handler(adapter);
1947 
1948 	/* Clear the interrupts just processed. */
1949 	t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1950 	t3_read_reg(adapter, A_PL_INT_CAUSE0);	/* flush */
1951 	return 1;
1952 }
1953 
1954 static unsigned int calc_gpio_intr(struct adapter *adap)
1955 {
1956 	unsigned int i, gpi_intr = 0;
1957 
1958 	for_each_port(adap, i)
1959 		if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
1960 		    adapter_info(adap)->gpio_intr[i])
1961 			gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
1962 	return gpi_intr;
1963 }
1964 
1965 /**
1966  *	t3_intr_enable - enable interrupts
1967  *	@adapter: the adapter whose interrupts should be enabled
1968  *
1969  *	Enable interrupts by setting the interrupt enable registers of the
1970  *	various HW modules and then enabling the top-level interrupt
1971  *	concentrator.
1972  */
1973 void t3_intr_enable(struct adapter *adapter)
1974 {
1975 	static const struct addr_val_pair intr_en_avp[] = {
1976 		{A_SG_INT_ENABLE, SGE_INTR_MASK},
1977 		{A_MC7_INT_ENABLE, MC7_INTR_MASK},
1978 		{A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1979 		 MC7_INTR_MASK},
1980 		{A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1981 		 MC7_INTR_MASK},
1982 		{A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1983 		{A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1984 		{A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1985 		{A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1986 		{A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1987 		{A_MPS_INT_ENABLE, MPS_INTR_MASK},
1988 	};
1989 
1990 	adapter->slow_intr_mask = PL_INTR_MASK;
1991 
1992 	t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1993 	t3_write_reg(adapter, A_TP_INT_ENABLE,
1994 		     adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
1995 
1996 	if (adapter->params.rev > 0) {
1997 		t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1998 			     CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1999 		t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
2000 			     ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
2001 			     F_PBL_BOUND_ERR_CH1);
2002 	} else {
2003 		t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
2004 		t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
2005 	}
2006 
2007 	t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
2008 
2009 	if (is_pcie(adapter))
2010 		t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
2011 	else
2012 		t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
2013 	t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
2014 	t3_read_reg(adapter, A_PL_INT_ENABLE0);	/* flush */
2015 }
2016 
2017 /**
2018  *	t3_intr_disable - disable a card's interrupts
2019  *	@adapter: the adapter whose interrupts should be disabled
2020  *
2021  *	Disable interrupts.  We only disable the top-level interrupt
2022  *	concentrator and the SGE data interrupts.
2023  */
2024 void t3_intr_disable(struct adapter *adapter)
2025 {
2026 	t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
2027 	t3_read_reg(adapter, A_PL_INT_ENABLE0);	/* flush */
2028 	adapter->slow_intr_mask = 0;
2029 }
2030 
2031 /**
2032  *	t3_intr_clear - clear all interrupts
2033  *	@adapter: the adapter whose interrupts should be cleared
2034  *
2035  *	Clears all interrupts.
2036  */
2037 void t3_intr_clear(struct adapter *adapter)
2038 {
2039 	static const unsigned int cause_reg_addr[] = {
2040 		A_SG_INT_CAUSE,
2041 		A_SG_RSPQ_FL_STATUS,
2042 		A_PCIX_INT_CAUSE,
2043 		A_MC7_INT_CAUSE,
2044 		A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2045 		A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2046 		A_CIM_HOST_INT_CAUSE,
2047 		A_TP_INT_CAUSE,
2048 		A_MC5_DB_INT_CAUSE,
2049 		A_ULPRX_INT_CAUSE,
2050 		A_ULPTX_INT_CAUSE,
2051 		A_CPL_INTR_CAUSE,
2052 		A_PM1_TX_INT_CAUSE,
2053 		A_PM1_RX_INT_CAUSE,
2054 		A_MPS_INT_CAUSE,
2055 		A_T3DBG_INT_CAUSE,
2056 	};
2057 	unsigned int i;
2058 
2059 	/* Clear PHY and MAC interrupts for each port. */
2060 	for_each_port(adapter, i)
2061 	    t3_port_intr_clear(adapter, i);
2062 
2063 	for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
2064 		t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
2065 
2066 	if (is_pcie(adapter))
2067 		t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
2068 	t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
2069 	t3_read_reg(adapter, A_PL_INT_CAUSE0);	/* flush */
2070 }
2071 
2072 void t3_xgm_intr_enable(struct adapter *adapter, int idx)
2073 {
2074 	struct port_info *pi = adap2pinfo(adapter, idx);
2075 
2076 	t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset,
2077 		     XGM_EXTRA_INTR_MASK);
2078 }
2079 
2080 void t3_xgm_intr_disable(struct adapter *adapter, int idx)
2081 {
2082 	struct port_info *pi = adap2pinfo(adapter, idx);
2083 
2084 	t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset,
2085 		     0x7ff);
2086 }
2087 
2088 /**
2089  *	t3_port_intr_enable - enable port-specific interrupts
2090  *	@adapter: associated adapter
2091  *	@idx: index of port whose interrupts should be enabled
2092  *
2093  *	Enable port-specific (i.e., MAC and PHY) interrupts for the given
2094  *	adapter port.
2095  */
2096 void t3_port_intr_enable(struct adapter *adapter, int idx)
2097 {
2098 	struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2099 
2100 	t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
2101 	t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2102 	phy->ops->intr_enable(phy);
2103 }
2104 
2105 /**
2106  *	t3_port_intr_disable - disable port-specific interrupts
2107  *	@adapter: associated adapter
2108  *	@idx: index of port whose interrupts should be disabled
2109  *
2110  *	Disable port-specific (i.e., MAC and PHY) interrupts for the given
2111  *	adapter port.
2112  */
2113 void t3_port_intr_disable(struct adapter *adapter, int idx)
2114 {
2115 	struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2116 
2117 	t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
2118 	t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2119 	phy->ops->intr_disable(phy);
2120 }
2121 
2122 /**
2123  *	t3_port_intr_clear - clear port-specific interrupts
2124  *	@adapter: associated adapter
2125  *	@idx: index of port whose interrupts to clear
2126  *
2127  *	Clear port-specific (i.e., MAC and PHY) interrupts for the given
2128  *	adapter port.
2129  */
2130 static void t3_port_intr_clear(struct adapter *adapter, int idx)
2131 {
2132 	struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2133 
2134 	t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
2135 	t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
2136 	phy->ops->intr_clear(phy);
2137 }
2138 
2139 #define SG_CONTEXT_CMD_ATTEMPTS 100
2140 
2141 /**
2142  * 	t3_sge_write_context - write an SGE context
2143  * 	@adapter: the adapter
2144  * 	@id: the context id
2145  * 	@type: the context type
2146  *
2147  * 	Program an SGE context with the values already loaded in the
2148  * 	CONTEXT_DATA? registers.
2149  */
2150 static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
2151 				unsigned int type)
2152 {
2153 	if (type == F_RESPONSEQ) {
2154 		/*
2155 		 * Can't write the Response Queue Context bits for
2156 		 * Interrupt Armed or the Reserve bits after the chip
2157 		 * has been initialized out of reset.  Writing to these
2158 		 * bits can confuse the hardware.
2159 		 */
2160 		t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2161 		t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2162 		t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
2163 		t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2164 	} else {
2165 		t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2166 		t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2167 		t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2168 		t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2169 	}
2170 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2171 		     V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2172 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2173 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2174 }
2175 
2176 /**
2177  *	clear_sge_ctxt - completely clear an SGE context
2178  *	@adapter: the adapter
2179  *	@id: the context id
2180  *	@type: the context type
2181  *
2182  *	Completely clear an SGE context.  Used predominantly at post-reset
2183  *	initialization.  Note in particular that we don't skip writing to any
2184  *	"sensitive bits" in the contexts the way that t3_sge_write_context()
2185  *	does ...
2186  */
2187 static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
2188 			  unsigned int type)
2189 {
2190 	t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2191 	t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2192 	t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2193 	t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2194 	t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
2195 	t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
2196 	t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
2197 	t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
2198 	t3_write_reg(adap, A_SG_CONTEXT_CMD,
2199 		     V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2200 	return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2201 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2202 }
2203 
2204 /**
2205  *	t3_sge_init_ecntxt - initialize an SGE egress context
2206  *	@adapter: the adapter to configure
2207  *	@id: the context id
2208  *	@gts_enable: whether to enable GTS for the context
2209  *	@type: the egress context type
2210  *	@respq: associated response queue
2211  *	@base_addr: base address of queue
2212  *	@size: number of queue entries
2213  *	@token: uP token
2214  *	@gen: initial generation value for the context
2215  *	@cidx: consumer pointer
2216  *
2217  *	Initialize an SGE egress context and make it ready for use.  If the
2218  *	platform allows concurrent context operations, the caller is
2219  *	responsible for appropriate locking.
2220  */
2221 int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
2222 		       enum sge_context_type type, int respq, u64 base_addr,
2223 		       unsigned int size, unsigned int token, int gen,
2224 		       unsigned int cidx)
2225 {
2226 	unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2227 
2228 	if (base_addr & 0xfff)	/* must be 4K aligned */
2229 		return -EINVAL;
2230 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2231 		return -EBUSY;
2232 
2233 	base_addr >>= 12;
2234 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2235 		     V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2236 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2237 		     V_EC_BASE_LO(base_addr & 0xffff));
2238 	base_addr >>= 16;
2239 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
2240 	base_addr >>= 32;
2241 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2242 		     V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
2243 		     V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2244 		     F_EC_VALID);
2245 	return t3_sge_write_context(adapter, id, F_EGRESS);
2246 }
2247 
2248 /**
2249  *	t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2250  *	@adapter: the adapter to configure
2251  *	@id: the context id
2252  *	@gts_enable: whether to enable GTS for the context
2253  *	@base_addr: base address of queue
2254  *	@size: number of queue entries
2255  *	@bsize: size of each buffer for this queue
2256  *	@cong_thres: threshold to signal congestion to upstream producers
2257  *	@gen: initial generation value for the context
2258  *	@cidx: consumer pointer
2259  *
2260  *	Initialize an SGE free list context and make it ready for use.  The
2261  *	caller is responsible for ensuring only one context operation occurs
2262  *	at a time.
2263  */
2264 int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2265 			int gts_enable, u64 base_addr, unsigned int size,
2266 			unsigned int bsize, unsigned int cong_thres, int gen,
2267 			unsigned int cidx)
2268 {
2269 	if (base_addr & 0xfff)	/* must be 4K aligned */
2270 		return -EINVAL;
2271 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2272 		return -EBUSY;
2273 
2274 	base_addr >>= 12;
2275 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2276 	base_addr >>= 32;
2277 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2278 		     V_FL_BASE_HI((u32) base_addr) |
2279 		     V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2280 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2281 		     V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2282 		     V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2283 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2284 		     V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2285 		     V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2286 	return t3_sge_write_context(adapter, id, F_FREELIST);
2287 }
2288 
2289 /**
2290  *	t3_sge_init_rspcntxt - initialize an SGE response queue context
2291  *	@adapter: the adapter to configure
2292  *	@id: the context id
2293  *	@irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2294  *	@base_addr: base address of queue
2295  *	@size: number of queue entries
2296  *	@fl_thres: threshold for selecting the normal or jumbo free list
2297  *	@gen: initial generation value for the context
2298  *	@cidx: consumer pointer
2299  *
2300  *	Initialize an SGE response queue context and make it ready for use.
2301  *	The caller is responsible for ensuring only one context operation
2302  *	occurs at a time.
2303  */
2304 int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2305 			 int irq_vec_idx, u64 base_addr, unsigned int size,
2306 			 unsigned int fl_thres, int gen, unsigned int cidx)
2307 {
2308 	unsigned int intr = 0;
2309 
2310 	if (base_addr & 0xfff)	/* must be 4K aligned */
2311 		return -EINVAL;
2312 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2313 		return -EBUSY;
2314 
2315 	base_addr >>= 12;
2316 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2317 		     V_CQ_INDEX(cidx));
2318 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2319 	base_addr >>= 32;
2320 	if (irq_vec_idx >= 0)
2321 		intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2322 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2323 		     V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2324 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2325 	return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2326 }
2327 
2328 /**
2329  *	t3_sge_init_cqcntxt - initialize an SGE completion queue context
2330  *	@adapter: the adapter to configure
2331  *	@id: the context id
2332  *	@base_addr: base address of queue
2333  *	@size: number of queue entries
2334  *	@rspq: response queue for async notifications
2335  *	@ovfl_mode: CQ overflow mode
2336  *	@credits: completion queue credits
2337  *	@credit_thres: the credit threshold
2338  *
2339  *	Initialize an SGE completion queue context and make it ready for use.
2340  *	The caller is responsible for ensuring only one context operation
2341  *	occurs at a time.
2342  */
2343 int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2344 			unsigned int size, int rspq, int ovfl_mode,
2345 			unsigned int credits, unsigned int credit_thres)
2346 {
2347 	if (base_addr & 0xfff)	/* must be 4K aligned */
2348 		return -EINVAL;
2349 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2350 		return -EBUSY;
2351 
2352 	base_addr >>= 12;
2353 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2354 	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2355 	base_addr >>= 32;
2356 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2357 		     V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
2358 		     V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2359 		     V_CQ_ERR(ovfl_mode));
2360 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2361 		     V_CQ_CREDIT_THRES(credit_thres));
2362 	return t3_sge_write_context(adapter, id, F_CQ);
2363 }
2364 
2365 /**
2366  *	t3_sge_enable_ecntxt - enable/disable an SGE egress context
2367  *	@adapter: the adapter
2368  *	@id: the egress context id
2369  *	@enable: enable (1) or disable (0) the context
2370  *
2371  *	Enable or disable an SGE egress context.  The caller is responsible for
2372  *	ensuring only one context operation occurs at a time.
2373  */
2374 int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2375 {
2376 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2377 		return -EBUSY;
2378 
2379 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2380 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2381 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2382 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2383 	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2384 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2385 		     V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2386 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2387 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2388 }
2389 
2390 /**
2391  *	t3_sge_disable_fl - disable an SGE free-buffer list
2392  *	@adapter: the adapter
2393  *	@id: the free list context id
2394  *
2395  *	Disable an SGE free-buffer list.  The caller is responsible for
2396  *	ensuring only one context operation occurs at a time.
2397  */
2398 int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2399 {
2400 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2401 		return -EBUSY;
2402 
2403 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2404 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2405 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2406 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2407 	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2408 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2409 		     V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2410 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2411 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2412 }
2413 
2414 /**
2415  *	t3_sge_disable_rspcntxt - disable an SGE response queue
2416  *	@adapter: the adapter
2417  *	@id: the response queue context id
2418  *
2419  *	Disable an SGE response queue.  The caller is responsible for
2420  *	ensuring only one context operation occurs at a time.
2421  */
2422 int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2423 {
2424 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2425 		return -EBUSY;
2426 
2427 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2428 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2429 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2430 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2431 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2432 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2433 		     V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2434 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2435 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2436 }
2437 
2438 /**
2439  *	t3_sge_disable_cqcntxt - disable an SGE completion queue
2440  *	@adapter: the adapter
2441  *	@id: the completion queue context id
2442  *
2443  *	Disable an SGE completion queue.  The caller is responsible for
2444  *	ensuring only one context operation occurs at a time.
2445  */
2446 int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2447 {
2448 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2449 		return -EBUSY;
2450 
2451 	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2452 	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2453 	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2454 	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2455 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2456 	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2457 		     V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2458 	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2459 			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2460 }
2461 
2462 /**
2463  *	t3_sge_cqcntxt_op - perform an operation on a completion queue context
2464  *	@adapter: the adapter
2465  *	@id: the context id
2466  *	@op: the operation to perform
2467  *
2468  *	Perform the selected operation on an SGE completion queue context.
2469  *	The caller is responsible for ensuring only one context operation
2470  *	occurs at a time.
2471  */
2472 int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2473 		      unsigned int credits)
2474 {
2475 	u32 val;
2476 
2477 	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2478 		return -EBUSY;
2479 
2480 	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2481 	t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2482 		     V_CONTEXT(id) | F_CQ);
2483 	if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2484 				0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2485 		return -EIO;
2486 
2487 	if (op >= 2 && op < 7) {
2488 		if (adapter->params.rev > 0)
2489 			return G_CQ_INDEX(val);
2490 
2491 		t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2492 			     V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2493 		if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2494 				    F_CONTEXT_CMD_BUSY, 0,
2495 				    SG_CONTEXT_CMD_ATTEMPTS, 1))
2496 			return -EIO;
2497 		return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2498 	}
2499 	return 0;
2500 }
2501 
2502 /**
2503  *	t3_config_rss - configure Rx packet steering
2504  *	@adapter: the adapter
2505  *	@rss_config: RSS settings (written to TP_RSS_CONFIG)
2506  *	@cpus: values for the CPU lookup table (0xff terminated)
2507  *	@rspq: values for the response queue lookup table (0xffff terminated)
2508  *
2509  *	Programs the receive packet steering logic.  @cpus and @rspq provide
2510  *	the values for the CPU and response queue lookup tables.  If they
2511  *	provide fewer values than the size of the tables the supplied values
2512  *	are used repeatedly until the tables are fully populated.
2513  */
2514 void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2515 		   const u8 * cpus, const u16 *rspq)
2516 {
2517 	int i, j, cpu_idx = 0, q_idx = 0;
2518 
2519 	if (cpus)
2520 		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2521 			u32 val = i << 16;
2522 
2523 			for (j = 0; j < 2; ++j) {
2524 				val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2525 				if (cpus[cpu_idx] == 0xff)
2526 					cpu_idx = 0;
2527 			}
2528 			t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2529 		}
2530 
2531 	if (rspq)
2532 		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2533 			t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2534 				     (i << 16) | rspq[q_idx++]);
2535 			if (rspq[q_idx] == 0xffff)
2536 				q_idx = 0;
2537 		}
2538 
2539 	t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2540 }
2541 
2542 /**
2543  *	t3_tp_set_offload_mode - put TP in NIC/offload mode
2544  *	@adap: the adapter
2545  *	@enable: 1 to select offload mode, 0 for regular NIC
2546  *
2547  *	Switches TP to NIC/offload mode.
2548  */
2549 void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2550 {
2551 	if (is_offload(adap) || !enable)
2552 		t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2553 				 V_NICMODE(!enable));
2554 }
2555 
2556 /**
2557  *	pm_num_pages - calculate the number of pages of the payload memory
2558  *	@mem_size: the size of the payload memory
2559  *	@pg_size: the size of each payload memory page
2560  *
2561  *	Calculate the number of pages, each of the given size, that fit in a
2562  *	memory of the specified size, respecting the HW requirement that the
2563  *	number of pages must be a multiple of 24.
2564  */
2565 static inline unsigned int pm_num_pages(unsigned int mem_size,
2566 					unsigned int pg_size)
2567 {
2568 	unsigned int n = mem_size / pg_size;
2569 
2570 	return n - n % 24;
2571 }
2572 
2573 #define mem_region(adap, start, size, reg) \
2574 	t3_write_reg((adap), A_ ## reg, (start)); \
2575 	start += size
2576 
2577 /**
2578  *	partition_mem - partition memory and configure TP memory settings
2579  *	@adap: the adapter
2580  *	@p: the TP parameters
2581  *
2582  *	Partitions context and payload memory and configures TP's memory
2583  *	registers.
2584  */
2585 static void partition_mem(struct adapter *adap, const struct tp_params *p)
2586 {
2587 	unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2588 	unsigned int timers = 0, timers_shift = 22;
2589 
2590 	if (adap->params.rev > 0) {
2591 		if (tids <= 16 * 1024) {
2592 			timers = 1;
2593 			timers_shift = 16;
2594 		} else if (tids <= 64 * 1024) {
2595 			timers = 2;
2596 			timers_shift = 18;
2597 		} else if (tids <= 256 * 1024) {
2598 			timers = 3;
2599 			timers_shift = 20;
2600 		}
2601 	}
2602 
2603 	t3_write_reg(adap, A_TP_PMM_SIZE,
2604 		     p->chan_rx_size | (p->chan_tx_size >> 16));
2605 
2606 	t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2607 	t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2608 	t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2609 	t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2610 			 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2611 
2612 	t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2613 	t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2614 	t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2615 
2616 	pstructs = p->rx_num_pgs + p->tx_num_pgs;
2617 	/* Add a bit of headroom and make multiple of 24 */
2618 	pstructs += 48;
2619 	pstructs -= pstructs % 24;
2620 	t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2621 
2622 	m = tids * TCB_SIZE;
2623 	mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2624 	mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2625 	t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2626 	m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2627 	mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2628 	mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2629 	mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2630 	mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2631 
2632 	m = (m + 4095) & ~0xfff;
2633 	t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2634 	t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2635 
2636 	tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2637 	m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2638 	    adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2639 	if (tids < m)
2640 		adap->params.mc5.nservers += m - tids;
2641 }
2642 
2643 static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2644 				  u32 val)
2645 {
2646 	t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2647 	t3_write_reg(adap, A_TP_PIO_DATA, val);
2648 }
2649 
2650 static void tp_config(struct adapter *adap, const struct tp_params *p)
2651 {
2652 	t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2653 		     F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2654 		     F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2655 	t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2656 		     F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2657 		     V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2658 	t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2659 		     V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2660 		     V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
2661 		     F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2662 	t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
2663 			 F_IPV6ENABLE | F_NICMODE);
2664 	t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2665 	t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2666 	t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2667 			 adap->params.rev > 0 ? F_ENABLEESND :
2668 			 F_T3A_ENABLEESND);
2669 
2670 	t3_set_reg_field(adap, A_TP_PC_CONFIG,
2671 			 F_ENABLEEPCMDAFULL,
2672 			 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2673 			 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2674 	t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2675 			 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2676 			 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
2677 	t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2678 	t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2679 
2680 	if (adap->params.rev > 0) {
2681 		tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2682 		t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2683 				 F_TXPACEAUTO);
2684 		t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2685 		t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2686 	} else
2687 		t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2688 
2689 	if (adap->params.rev == T3_REV_C)
2690 		t3_set_reg_field(adap, A_TP_PC_CONFIG,
2691 				 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2692 				 V_TABLELATENCYDELTA(4));
2693 
2694 	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2695 	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2696 	t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2697 	t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2698 }
2699 
2700 /* Desired TP timer resolution in usec */
2701 #define TP_TMR_RES 50
2702 
2703 /* TCP timer values in ms */
2704 #define TP_DACK_TIMER 50
2705 #define TP_RTO_MIN    250
2706 
2707 /**
2708  *	tp_set_timers - set TP timing parameters
2709  *	@adap: the adapter to set
2710  *	@core_clk: the core clock frequency in Hz
2711  *
2712  *	Set TP's timing parameters, such as the various timer resolutions and
2713  *	the TCP timer values.
2714  */
2715 static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2716 {
2717 	unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2718 	unsigned int dack_re = fls(core_clk / 5000) - 1;	/* 200us */
2719 	unsigned int tstamp_re = fls(core_clk / 1000);	/* 1ms, at least */
2720 	unsigned int tps = core_clk >> tre;
2721 
2722 	t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2723 		     V_DELAYEDACKRESOLUTION(dack_re) |
2724 		     V_TIMESTAMPRESOLUTION(tstamp_re));
2725 	t3_write_reg(adap, A_TP_DACK_TIMER,
2726 		     (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2727 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2728 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2729 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2730 	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2731 	t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2732 		     V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2733 		     V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2734 		     V_KEEPALIVEMAX(9));
2735 
2736 #define SECONDS * tps
2737 
2738 	t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2739 	t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2740 	t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2741 	t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2742 	t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2743 	t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2744 	t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2745 	t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2746 	t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2747 
2748 #undef SECONDS
2749 }
2750 
2751 /**
2752  *	t3_tp_set_coalescing_size - set receive coalescing size
2753  *	@adap: the adapter
2754  *	@size: the receive coalescing size
2755  *	@psh: whether a set PSH bit should deliver coalesced data
2756  *
2757  *	Set the receive coalescing size and PSH bit handling.
2758  */
2759 static int t3_tp_set_coalescing_size(struct adapter *adap,
2760 				     unsigned int size, int psh)
2761 {
2762 	u32 val;
2763 
2764 	if (size > MAX_RX_COALESCING_LEN)
2765 		return -EINVAL;
2766 
2767 	val = t3_read_reg(adap, A_TP_PARA_REG3);
2768 	val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2769 
2770 	if (size) {
2771 		val |= F_RXCOALESCEENABLE;
2772 		if (psh)
2773 			val |= F_RXCOALESCEPSHEN;
2774 		size = min(MAX_RX_COALESCING_LEN, size);
2775 		t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2776 			     V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2777 	}
2778 	t3_write_reg(adap, A_TP_PARA_REG3, val);
2779 	return 0;
2780 }
2781 
2782 /**
2783  *	t3_tp_set_max_rxsize - set the max receive size
2784  *	@adap: the adapter
2785  *	@size: the max receive size
2786  *
2787  *	Set TP's max receive size.  This is the limit that applies when
2788  *	receive coalescing is disabled.
2789  */
2790 static void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2791 {
2792 	t3_write_reg(adap, A_TP_PARA_REG7,
2793 		     V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2794 }
2795 
2796 static void init_mtus(unsigned short mtus[])
2797 {
2798 	/*
2799 	 * See draft-mathis-plpmtud-00.txt for the values.  The min is 88 so
2800 	 * it can accommodate max size TCP/IP headers when SACK and timestamps
2801 	 * are enabled and still have at least 8 bytes of payload.
2802 	 */
2803 	mtus[0] = 88;
2804 	mtus[1] = 88;
2805 	mtus[2] = 256;
2806 	mtus[3] = 512;
2807 	mtus[4] = 576;
2808 	mtus[5] = 1024;
2809 	mtus[6] = 1280;
2810 	mtus[7] = 1492;
2811 	mtus[8] = 1500;
2812 	mtus[9] = 2002;
2813 	mtus[10] = 2048;
2814 	mtus[11] = 4096;
2815 	mtus[12] = 4352;
2816 	mtus[13] = 8192;
2817 	mtus[14] = 9000;
2818 	mtus[15] = 9600;
2819 }
2820 
2821 /*
2822  * Initial congestion control parameters.
2823  */
2824 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2825 {
2826 	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2827 	a[9] = 2;
2828 	a[10] = 3;
2829 	a[11] = 4;
2830 	a[12] = 5;
2831 	a[13] = 6;
2832 	a[14] = 7;
2833 	a[15] = 8;
2834 	a[16] = 9;
2835 	a[17] = 10;
2836 	a[18] = 14;
2837 	a[19] = 17;
2838 	a[20] = 21;
2839 	a[21] = 25;
2840 	a[22] = 30;
2841 	a[23] = 35;
2842 	a[24] = 45;
2843 	a[25] = 60;
2844 	a[26] = 80;
2845 	a[27] = 100;
2846 	a[28] = 200;
2847 	a[29] = 300;
2848 	a[30] = 400;
2849 	a[31] = 500;
2850 
2851 	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2852 	b[9] = b[10] = 1;
2853 	b[11] = b[12] = 2;
2854 	b[13] = b[14] = b[15] = b[16] = 3;
2855 	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2856 	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2857 	b[28] = b[29] = 6;
2858 	b[30] = b[31] = 7;
2859 }
2860 
2861 /* The minimum additive increment value for the congestion control table */
2862 #define CC_MIN_INCR 2U
2863 
2864 /**
2865  *	t3_load_mtus - write the MTU and congestion control HW tables
2866  *	@adap: the adapter
2867  *	@mtus: the unrestricted values for the MTU table
2868  *	@alphs: the values for the congestion control alpha parameter
2869  *	@beta: the values for the congestion control beta parameter
2870  *	@mtu_cap: the maximum permitted effective MTU
2871  *
2872  *	Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2873  *	Update the high-speed congestion control table with the supplied alpha,
2874  * 	beta, and MTUs.
2875  */
2876 void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2877 		  unsigned short alpha[NCCTRL_WIN],
2878 		  unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2879 {
2880 	static const unsigned int avg_pkts[NCCTRL_WIN] = {
2881 		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2882 		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2883 		28672, 40960, 57344, 81920, 114688, 163840, 229376
2884 	};
2885 
2886 	unsigned int i, w;
2887 
2888 	for (i = 0; i < NMTUS; ++i) {
2889 		unsigned int mtu = min(mtus[i], mtu_cap);
2890 		unsigned int log2 = fls(mtu);
2891 
2892 		if (!(mtu & ((1 << log2) >> 2)))	/* round */
2893 			log2--;
2894 		t3_write_reg(adap, A_TP_MTU_TABLE,
2895 			     (i << 24) | (log2 << 16) | mtu);
2896 
2897 		for (w = 0; w < NCCTRL_WIN; ++w) {
2898 			unsigned int inc;
2899 
2900 			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2901 				  CC_MIN_INCR);
2902 
2903 			t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2904 				     (w << 16) | (beta[w] << 13) | inc);
2905 		}
2906 	}
2907 }
2908 
2909 /**
2910  *	t3_tp_get_mib_stats - read TP's MIB counters
2911  *	@adap: the adapter
2912  *	@tps: holds the returned counter values
2913  *
2914  *	Returns the values of TP's MIB counters.
2915  */
2916 void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2917 {
2918 	t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2919 			 sizeof(*tps) / sizeof(u32), 0);
2920 }
2921 
2922 #define ulp_region(adap, name, start, len) \
2923 	t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2924 	t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2925 		     (start) + (len) - 1); \
2926 	start += len
2927 
2928 #define ulptx_region(adap, name, start, len) \
2929 	t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2930 	t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2931 		     (start) + (len) - 1)
2932 
2933 static void ulp_config(struct adapter *adap, const struct tp_params *p)
2934 {
2935 	unsigned int m = p->chan_rx_size;
2936 
2937 	ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2938 	ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2939 	ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2940 	ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2941 	ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2942 	ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2943 	ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2944 	t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2945 }
2946 
2947 /**
2948  *	t3_set_proto_sram - set the contents of the protocol sram
2949  *	@adapter: the adapter
2950  *	@data: the protocol image
2951  *
2952  *	Write the contents of the protocol SRAM.
2953  */
2954 int t3_set_proto_sram(struct adapter *adap, const u8 *data)
2955 {
2956 	int i;
2957 	const __be32 *buf = (const __be32 *)data;
2958 
2959 	for (i = 0; i < PROTO_SRAM_LINES; i++) {
2960 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
2961 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
2962 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
2963 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
2964 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
2965 
2966 		t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
2967 		if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
2968 			return -EIO;
2969 	}
2970 	t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
2971 
2972 	return 0;
2973 }
2974 
2975 void t3_config_trace_filter(struct adapter *adapter,
2976 			    const struct trace_params *tp, int filter_index,
2977 			    int invert, int enable)
2978 {
2979 	u32 addr, key[4], mask[4];
2980 
2981 	key[0] = tp->sport | (tp->sip << 16);
2982 	key[1] = (tp->sip >> 16) | (tp->dport << 16);
2983 	key[2] = tp->dip;
2984 	key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2985 
2986 	mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2987 	mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2988 	mask[2] = tp->dip_mask;
2989 	mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2990 
2991 	if (invert)
2992 		key[3] |= (1 << 29);
2993 	if (enable)
2994 		key[3] |= (1 << 28);
2995 
2996 	addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2997 	tp_wr_indirect(adapter, addr++, key[0]);
2998 	tp_wr_indirect(adapter, addr++, mask[0]);
2999 	tp_wr_indirect(adapter, addr++, key[1]);
3000 	tp_wr_indirect(adapter, addr++, mask[1]);
3001 	tp_wr_indirect(adapter, addr++, key[2]);
3002 	tp_wr_indirect(adapter, addr++, mask[2]);
3003 	tp_wr_indirect(adapter, addr++, key[3]);
3004 	tp_wr_indirect(adapter, addr, mask[3]);
3005 	t3_read_reg(adapter, A_TP_PIO_DATA);
3006 }
3007 
3008 /**
3009  *	t3_config_sched - configure a HW traffic scheduler
3010  *	@adap: the adapter
3011  *	@kbps: target rate in Kbps
3012  *	@sched: the scheduler index
3013  *
3014  *	Configure a HW scheduler for the target rate
3015  */
3016 int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
3017 {
3018 	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3019 	unsigned int clk = adap->params.vpd.cclk * 1000;
3020 	unsigned int selected_cpt = 0, selected_bpt = 0;
3021 
3022 	if (kbps > 0) {
3023 		kbps *= 125;	/* -> bytes */
3024 		for (cpt = 1; cpt <= 255; cpt++) {
3025 			tps = clk / cpt;
3026 			bpt = (kbps + tps / 2) / tps;
3027 			if (bpt > 0 && bpt <= 255) {
3028 				v = bpt * tps;
3029 				delta = v >= kbps ? v - kbps : kbps - v;
3030 				if (delta <= mindelta) {
3031 					mindelta = delta;
3032 					selected_cpt = cpt;
3033 					selected_bpt = bpt;
3034 				}
3035 			} else if (selected_cpt)
3036 				break;
3037 		}
3038 		if (!selected_cpt)
3039 			return -EINVAL;
3040 	}
3041 	t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3042 		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3043 	v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3044 	if (sched & 1)
3045 		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3046 	else
3047 		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3048 	t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3049 	return 0;
3050 }
3051 
3052 static int tp_init(struct adapter *adap, const struct tp_params *p)
3053 {
3054 	int busy = 0;
3055 
3056 	tp_config(adap, p);
3057 	t3_set_vlan_accel(adap, 3, 0);
3058 
3059 	if (is_offload(adap)) {
3060 		tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3061 		t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3062 		busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3063 				       0, 1000, 5);
3064 		if (busy)
3065 			CH_ERR(adap, "TP initialization timed out\n");
3066 	}
3067 
3068 	if (!busy)
3069 		t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3070 	return busy;
3071 }
3072 
3073 /*
3074  * Perform the bits of HW initialization that are dependent on the Tx
3075  * channels being used.
3076  */
3077 static void chan_init_hw(struct adapter *adap, unsigned int chan_map)
3078 {
3079 	int i;
3080 
3081 	if (chan_map != 3) {                                 /* one channel */
3082 		t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3083 		t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3084 		t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3085 			     (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3086 					      F_TPTXPORT1EN | F_PORT1ACTIVE));
3087 		t3_write_reg(adap, A_PM1_TX_CFG,
3088 			     chan_map == 1 ? 0xffffffff : 0);
3089 	} else {                                             /* two channels */
3090 		t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3091 		t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3092 		t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3093 			     V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3094 		t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3095 			     F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3096 			     F_ENFORCEPKT);
3097 		t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3098 		t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3099 		t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3100 			     V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3101 		for (i = 0; i < 16; i++)
3102 			t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3103 				     (i << 16) | 0x1010);
3104 	}
3105 }
3106 
3107 static int calibrate_xgm(struct adapter *adapter)
3108 {
3109 	if (uses_xaui(adapter)) {
3110 		unsigned int v, i;
3111 
3112 		for (i = 0; i < 5; ++i) {
3113 			t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3114 			t3_read_reg(adapter, A_XGM_XAUI_IMP);
3115 			msleep(1);
3116 			v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3117 			if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3118 				t3_write_reg(adapter, A_XGM_XAUI_IMP,
3119 					     V_XAUIIMP(G_CALIMP(v) >> 2));
3120 				return 0;
3121 			}
3122 		}
3123 		CH_ERR(adapter, "MAC calibration failed\n");
3124 		return -1;
3125 	} else {
3126 		t3_write_reg(adapter, A_XGM_RGMII_IMP,
3127 			     V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3128 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3129 				 F_XGM_IMPSETUPDATE);
3130 	}
3131 	return 0;
3132 }
3133 
3134 static void calibrate_xgm_t3b(struct adapter *adapter)
3135 {
3136 	if (!uses_xaui(adapter)) {
3137 		t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3138 			     F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3139 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3140 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3141 				 F_XGM_IMPSETUPDATE);
3142 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3143 				 0);
3144 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3145 		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3146 	}
3147 }
3148 
3149 struct mc7_timing_params {
3150 	unsigned char ActToPreDly;
3151 	unsigned char ActToRdWrDly;
3152 	unsigned char PreCyc;
3153 	unsigned char RefCyc[5];
3154 	unsigned char BkCyc;
3155 	unsigned char WrToRdDly;
3156 	unsigned char RdToWrDly;
3157 };
3158 
3159 /*
3160  * Write a value to a register and check that the write completed.  These
3161  * writes normally complete in a cycle or two, so one read should suffice.
3162  * The very first read exists to flush the posted write to the device.
3163  */
3164 static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3165 {
3166 	t3_write_reg(adapter, addr, val);
3167 	t3_read_reg(adapter, addr);	/* flush */
3168 	if (!(t3_read_reg(adapter, addr) & F_BUSY))
3169 		return 0;
3170 	CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3171 	return -EIO;
3172 }
3173 
3174 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3175 {
3176 	static const unsigned int mc7_mode[] = {
3177 		0x632, 0x642, 0x652, 0x432, 0x442
3178 	};
3179 	static const struct mc7_timing_params mc7_timings[] = {
3180 		{12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3181 		{12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3182 		{12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3183 		{9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3184 		{9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3185 	};
3186 
3187 	u32 val;
3188 	unsigned int width, density, slow, attempts;
3189 	struct adapter *adapter = mc7->adapter;
3190 	const struct mc7_timing_params *p = &mc7_timings[mem_type];
3191 
3192 	if (!mc7->size)
3193 		return 0;
3194 
3195 	val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3196 	slow = val & F_SLOW;
3197 	width = G_WIDTH(val);
3198 	density = G_DEN(val);
3199 
3200 	t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3201 	val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);	/* flush */
3202 	msleep(1);
3203 
3204 	if (!slow) {
3205 		t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3206 		t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3207 		msleep(1);
3208 		if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3209 		    (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3210 			CH_ERR(adapter, "%s MC7 calibration timed out\n",
3211 			       mc7->name);
3212 			goto out_fail;
3213 		}
3214 	}
3215 
3216 	t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3217 		     V_ACTTOPREDLY(p->ActToPreDly) |
3218 		     V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3219 		     V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3220 		     V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3221 
3222 	t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3223 		     val | F_CLKEN | F_TERM150);
3224 	t3_read_reg(adapter, mc7->offset + A_MC7_CFG);	/* flush */
3225 
3226 	if (!slow)
3227 		t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3228 				 F_DLLENB);
3229 	udelay(1);
3230 
3231 	val = slow ? 3 : 6;
3232 	if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3233 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3234 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3235 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3236 		goto out_fail;
3237 
3238 	if (!slow) {
3239 		t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3240 		t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3241 		udelay(5);
3242 	}
3243 
3244 	if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3245 	    wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3246 	    wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3247 	    wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3248 		       mc7_mode[mem_type]) ||
3249 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3250 	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3251 		goto out_fail;
3252 
3253 	/* clock value is in KHz */
3254 	mc7_clock = mc7_clock * 7812 + mc7_clock / 2;	/* ns */
3255 	mc7_clock /= 1000000;	/* KHz->MHz, ns->us */
3256 
3257 	t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3258 		     F_PERREFEN | V_PREREFDIV(mc7_clock));
3259 	t3_read_reg(adapter, mc7->offset + A_MC7_REF);	/* flush */
3260 
3261 	t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3262 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3263 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3264 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3265 		     (mc7->size << width) - 1);
3266 	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3267 	t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);	/* flush */
3268 
3269 	attempts = 50;
3270 	do {
3271 		msleep(250);
3272 		val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3273 	} while ((val & F_BUSY) && --attempts);
3274 	if (val & F_BUSY) {
3275 		CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3276 		goto out_fail;
3277 	}
3278 
3279 	/* Enable normal memory accesses. */
3280 	t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3281 	return 0;
3282 
3283 out_fail:
3284 	return -1;
3285 }
3286 
3287 static void config_pcie(struct adapter *adap)
3288 {
3289 	static const u16 ack_lat[4][6] = {
3290 		{237, 416, 559, 1071, 2095, 4143},
3291 		{128, 217, 289, 545, 1057, 2081},
3292 		{73, 118, 154, 282, 538, 1050},
3293 		{67, 107, 86, 150, 278, 534}
3294 	};
3295 	static const u16 rpl_tmr[4][6] = {
3296 		{711, 1248, 1677, 3213, 6285, 12429},
3297 		{384, 651, 867, 1635, 3171, 6243},
3298 		{219, 354, 462, 846, 1614, 3150},
3299 		{201, 321, 258, 450, 834, 1602}
3300 	};
3301 
3302 	u16 val, devid;
3303 	unsigned int log2_width, pldsize;
3304 	unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3305 
3306 	pcie_capability_read_word(adap->pdev, PCI_EXP_DEVCTL, &val);
3307 	pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3308 
3309 	pci_read_config_word(adap->pdev, 0x2, &devid);
3310 	if (devid == 0x37) {
3311 		pcie_capability_write_word(adap->pdev, PCI_EXP_DEVCTL,
3312 					   val & ~PCI_EXP_DEVCTL_READRQ &
3313 					   ~PCI_EXP_DEVCTL_PAYLOAD);
3314 		pldsize = 0;
3315 	}
3316 
3317 	pcie_capability_read_word(adap->pdev, PCI_EXP_LNKCTL, &val);
3318 
3319 	fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3320 	fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3321 	    G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3322 	log2_width = fls(adap->params.pci.width) - 1;
3323 	acklat = ack_lat[log2_width][pldsize];
3324 	if (val & PCI_EXP_LNKCTL_ASPM_L0S)	/* check LOsEnable */
3325 		acklat += fst_trn_tx * 4;
3326 	rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3327 
3328 	if (adap->params.rev == 0)
3329 		t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3330 				 V_T3A_ACKLAT(M_T3A_ACKLAT),
3331 				 V_T3A_ACKLAT(acklat));
3332 	else
3333 		t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3334 				 V_ACKLAT(acklat));
3335 
3336 	t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3337 			 V_REPLAYLMT(rpllmt));
3338 
3339 	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3340 	t3_set_reg_field(adap, A_PCIE_CFG, 0,
3341 			 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
3342 			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
3343 }
3344 
3345 /*
3346  * Initialize and configure T3 HW modules.  This performs the
3347  * initialization steps that need to be done once after a card is reset.
3348  * MAC and PHY initialization is handled separarely whenever a port is enabled.
3349  *
3350  * fw_params are passed to FW and their value is platform dependent.  Only the
3351  * top 8 bits are available for use, the rest must be 0.
3352  */
3353 int t3_init_hw(struct adapter *adapter, u32 fw_params)
3354 {
3355 	int err = -EIO, attempts, i;
3356 	const struct vpd_params *vpd = &adapter->params.vpd;
3357 
3358 	if (adapter->params.rev > 0)
3359 		calibrate_xgm_t3b(adapter);
3360 	else if (calibrate_xgm(adapter))
3361 		goto out_err;
3362 
3363 	if (vpd->mclk) {
3364 		partition_mem(adapter, &adapter->params.tp);
3365 
3366 		if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3367 		    mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3368 		    mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3369 		    t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3370 				adapter->params.mc5.nfilters,
3371 				adapter->params.mc5.nroutes))
3372 			goto out_err;
3373 
3374 		for (i = 0; i < 32; i++)
3375 			if (clear_sge_ctxt(adapter, i, F_CQ))
3376 				goto out_err;
3377 	}
3378 
3379 	if (tp_init(adapter, &adapter->params.tp))
3380 		goto out_err;
3381 
3382 	t3_tp_set_coalescing_size(adapter,
3383 				  min(adapter->params.sge.max_pkt_size,
3384 				      MAX_RX_COALESCING_LEN), 1);
3385 	t3_tp_set_max_rxsize(adapter,
3386 			     min(adapter->params.sge.max_pkt_size, 16384U));
3387 	ulp_config(adapter, &adapter->params.tp);
3388 
3389 	if (is_pcie(adapter))
3390 		config_pcie(adapter);
3391 	else
3392 		t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3393 				 F_DMASTOPEN | F_CLIDECEN);
3394 
3395 	if (adapter->params.rev == T3_REV_C)
3396 		t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3397 				 F_CFG_CQE_SOP_MASK);
3398 
3399 	t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3400 	t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3401 	t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3402 	chan_init_hw(adapter, adapter->params.chan_map);
3403 	t3_sge_init(adapter, &adapter->params.sge);
3404 	t3_set_reg_field(adapter, A_PL_RST, 0, F_FATALPERREN);
3405 
3406 	t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3407 
3408 	t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3409 	t3_write_reg(adapter, A_CIM_BOOT_CFG,
3410 		     V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3411 	t3_read_reg(adapter, A_CIM_BOOT_CFG);	/* flush */
3412 
3413 	attempts = 100;
3414 	do {			/* wait for uP to initialize */
3415 		msleep(20);
3416 	} while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3417 	if (!attempts) {
3418 		CH_ERR(adapter, "uP initialization timed out\n");
3419 		goto out_err;
3420 	}
3421 
3422 	err = 0;
3423 out_err:
3424 	return err;
3425 }
3426 
3427 /**
3428  *	get_pci_mode - determine a card's PCI mode
3429  *	@adapter: the adapter
3430  *	@p: where to store the PCI settings
3431  *
3432  *	Determines a card's PCI mode and associated parameters, such as speed
3433  *	and width.
3434  */
3435 static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
3436 {
3437 	static unsigned short speed_map[] = { 33, 66, 100, 133 };
3438 	u32 pci_mode;
3439 
3440 	if (pci_is_pcie(adapter->pdev)) {
3441 		u16 val;
3442 
3443 		p->variant = PCI_VARIANT_PCIE;
3444 		pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
3445 		p->width = (val >> 4) & 0x3f;
3446 		return;
3447 	}
3448 
3449 	pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3450 	p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3451 	p->width = (pci_mode & F_64BIT) ? 64 : 32;
3452 	pci_mode = G_PCIXINITPAT(pci_mode);
3453 	if (pci_mode == 0)
3454 		p->variant = PCI_VARIANT_PCI;
3455 	else if (pci_mode < 4)
3456 		p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3457 	else if (pci_mode < 8)
3458 		p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3459 	else
3460 		p->variant = PCI_VARIANT_PCIX_266_MODE2;
3461 }
3462 
3463 /**
3464  *	init_link_config - initialize a link's SW state
3465  *	@lc: structure holding the link state
3466  *	@ai: information about the current card
3467  *
3468  *	Initializes the SW state maintained for each link, including the link's
3469  *	capabilities and default speed/duplex/flow-control/autonegotiation
3470  *	settings.
3471  */
3472 static void init_link_config(struct link_config *lc, unsigned int caps)
3473 {
3474 	lc->supported = caps;
3475 	lc->requested_speed = lc->speed = SPEED_INVALID;
3476 	lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3477 	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3478 	if (lc->supported & SUPPORTED_Autoneg) {
3479 		lc->advertising = lc->supported;
3480 		lc->autoneg = AUTONEG_ENABLE;
3481 		lc->requested_fc |= PAUSE_AUTONEG;
3482 	} else {
3483 		lc->advertising = 0;
3484 		lc->autoneg = AUTONEG_DISABLE;
3485 	}
3486 }
3487 
3488 /**
3489  *	mc7_calc_size - calculate MC7 memory size
3490  *	@cfg: the MC7 configuration
3491  *
3492  *	Calculates the size of an MC7 memory in bytes from the value of its
3493  *	configuration register.
3494  */
3495 static unsigned int mc7_calc_size(u32 cfg)
3496 {
3497 	unsigned int width = G_WIDTH(cfg);
3498 	unsigned int banks = !!(cfg & F_BKS) + 1;
3499 	unsigned int org = !!(cfg & F_ORG) + 1;
3500 	unsigned int density = G_DEN(cfg);
3501 	unsigned int MBs = ((256 << density) * banks) / (org << width);
3502 
3503 	return MBs << 20;
3504 }
3505 
3506 static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3507 		     unsigned int base_addr, const char *name)
3508 {
3509 	u32 cfg;
3510 
3511 	mc7->adapter = adapter;
3512 	mc7->name = name;
3513 	mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3514 	cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3515 	mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3516 	mc7->width = G_WIDTH(cfg);
3517 }
3518 
3519 static void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3520 {
3521 	u16 devid;
3522 
3523 	mac->adapter = adapter;
3524 	pci_read_config_word(adapter->pdev, 0x2, &devid);
3525 
3526 	if (devid == 0x37 && !adapter->params.vpd.xauicfg[1])
3527 		index = 0;
3528 	mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3529 	mac->nucast = 1;
3530 
3531 	if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3532 		t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3533 			     is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3534 		t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3535 				 F_ENRGMII, 0);
3536 	}
3537 }
3538 
3539 static void early_hw_init(struct adapter *adapter,
3540 			  const struct adapter_info *ai)
3541 {
3542 	u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3543 
3544 	mi1_init(adapter, ai);
3545 	t3_write_reg(adapter, A_I2C_CFG,	/* set for 80KHz */
3546 		     V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3547 	t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3548 		     ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3549 	t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3550 	t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
3551 
3552 	if (adapter->params.rev == 0 || !uses_xaui(adapter))
3553 		val |= F_ENRGMII;
3554 
3555 	/* Enable MAC clocks so we can access the registers */
3556 	t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3557 	t3_read_reg(adapter, A_XGM_PORT_CFG);
3558 
3559 	val |= F_CLKDIVRESET_;
3560 	t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3561 	t3_read_reg(adapter, A_XGM_PORT_CFG);
3562 	t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3563 	t3_read_reg(adapter, A_XGM_PORT_CFG);
3564 }
3565 
3566 /*
3567  * Reset the adapter.
3568  * Older PCIe cards lose their config space during reset, PCI-X
3569  * ones don't.
3570  */
3571 int t3_reset_adapter(struct adapter *adapter)
3572 {
3573 	int i, save_and_restore_pcie =
3574 	    adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3575 	uint16_t devid = 0;
3576 
3577 	if (save_and_restore_pcie)
3578 		pci_save_state(adapter->pdev);
3579 	t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3580 
3581 	/*
3582 	 * Delay. Give Some time to device to reset fully.
3583 	 * XXX The delay time should be modified.
3584 	 */
3585 	for (i = 0; i < 10; i++) {
3586 		msleep(50);
3587 		pci_read_config_word(adapter->pdev, 0x00, &devid);
3588 		if (devid == 0x1425)
3589 			break;
3590 	}
3591 
3592 	if (devid != 0x1425)
3593 		return -1;
3594 
3595 	if (save_and_restore_pcie)
3596 		pci_restore_state(adapter->pdev);
3597 	return 0;
3598 }
3599 
3600 static int init_parity(struct adapter *adap)
3601 {
3602 		int i, err, addr;
3603 
3604 	if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3605 		return -EBUSY;
3606 
3607 	for (err = i = 0; !err && i < 16; i++)
3608 		err = clear_sge_ctxt(adap, i, F_EGRESS);
3609 	for (i = 0xfff0; !err && i <= 0xffff; i++)
3610 		err = clear_sge_ctxt(adap, i, F_EGRESS);
3611 	for (i = 0; !err && i < SGE_QSETS; i++)
3612 		err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3613 	if (err)
3614 		return err;
3615 
3616 	t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3617 	for (i = 0; i < 4; i++)
3618 		for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3619 			t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3620 				     F_IBQDBGWR | V_IBQDBGQID(i) |
3621 				     V_IBQDBGADDR(addr));
3622 			err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3623 					      F_IBQDBGBUSY, 0, 2, 1);
3624 			if (err)
3625 				return err;
3626 		}
3627 	return 0;
3628 }
3629 
3630 /*
3631  * Initialize adapter SW state for the various HW modules, set initial values
3632  * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3633  * interface.
3634  */
3635 int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3636 		    int reset)
3637 {
3638 	int ret;
3639 	unsigned int i, j = -1;
3640 
3641 	get_pci_mode(adapter, &adapter->params.pci);
3642 
3643 	adapter->params.info = ai;
3644 	adapter->params.nports = ai->nports0 + ai->nports1;
3645 	adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1);
3646 	adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3647 	/*
3648 	 * We used to only run the "adapter check task" once a second if
3649 	 * we had PHYs which didn't support interrupts (we would check
3650 	 * their link status once a second).  Now we check other conditions
3651 	 * in that routine which could potentially impose a very high
3652 	 * interrupt load on the system.  As such, we now always scan the
3653 	 * adapter state once a second ...
3654 	 */
3655 	adapter->params.linkpoll_period = 10;
3656 	adapter->params.stats_update_period = is_10G(adapter) ?
3657 	    MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3658 	adapter->params.pci.vpd_cap_addr =
3659 	    pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3660 	ret = get_vpd_params(adapter, &adapter->params.vpd);
3661 	if (ret < 0)
3662 		return ret;
3663 
3664 	if (reset && t3_reset_adapter(adapter))
3665 		return -1;
3666 
3667 	t3_sge_prep(adapter, &adapter->params.sge);
3668 
3669 	if (adapter->params.vpd.mclk) {
3670 		struct tp_params *p = &adapter->params.tp;
3671 
3672 		mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3673 		mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3674 		mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3675 
3676 		p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
3677 		p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3678 		p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3679 		p->cm_size = t3_mc7_size(&adapter->cm);
3680 		p->chan_rx_size = p->pmrx_size / 2;	/* only 1 Rx channel */
3681 		p->chan_tx_size = p->pmtx_size / p->nchan;
3682 		p->rx_pg_size = 64 * 1024;
3683 		p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3684 		p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3685 		p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3686 		p->ntimer_qs = p->cm_size >= (128 << 20) ||
3687 		    adapter->params.rev > 0 ? 12 : 6;
3688 	}
3689 
3690 	adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3691 				  t3_mc7_size(&adapter->pmtx) &&
3692 				  t3_mc7_size(&adapter->cm);
3693 
3694 	if (is_offload(adapter)) {
3695 		adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3696 		adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3697 		    DEFAULT_NFILTERS : 0;
3698 		adapter->params.mc5.nroutes = 0;
3699 		t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3700 
3701 		init_mtus(adapter->params.mtus);
3702 		init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3703 	}
3704 
3705 	early_hw_init(adapter, ai);
3706 	ret = init_parity(adapter);
3707 	if (ret)
3708 		return ret;
3709 
3710 	for_each_port(adapter, i) {
3711 		u8 hw_addr[6];
3712 		const struct port_type_info *pti;
3713 		struct port_info *p = adap2pinfo(adapter, i);
3714 
3715 		while (!adapter->params.vpd.port_type[++j])
3716 			;
3717 
3718 		pti = &port_types[adapter->params.vpd.port_type[j]];
3719 		if (!pti->phy_prep) {
3720 			CH_ALERT(adapter, "Invalid port type index %d\n",
3721 				 adapter->params.vpd.port_type[j]);
3722 			return -EINVAL;
3723 		}
3724 
3725 		p->phy.mdio.dev = adapter->port[i];
3726 		ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3727 				    ai->mdio_ops);
3728 		if (ret)
3729 			return ret;
3730 		mac_prep(&p->mac, adapter, j);
3731 
3732 		/*
3733 		 * The VPD EEPROM stores the base Ethernet address for the
3734 		 * card.  A port's address is derived from the base by adding
3735 		 * the port's index to the base's low octet.
3736 		 */
3737 		memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3738 		hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3739 
3740 		memcpy(adapter->port[i]->dev_addr, hw_addr,
3741 		       ETH_ALEN);
3742 		init_link_config(&p->link_config, p->phy.caps);
3743 		p->phy.ops->power_down(&p->phy, 1);
3744 
3745 		/*
3746 		 * If the PHY doesn't support interrupts for link status
3747 		 * changes, schedule a scan of the adapter links at least
3748 		 * once a second.
3749 		 */
3750 		if (!(p->phy.caps & SUPPORTED_IRQ) &&
3751 		    adapter->params.linkpoll_period > 10)
3752 			adapter->params.linkpoll_period = 10;
3753 	}
3754 
3755 	return 0;
3756 }
3757 
3758 void t3_led_ready(struct adapter *adapter)
3759 {
3760 	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3761 			 F_GPIO0_OUT_VAL);
3762 }
3763 
3764 int t3_replay_prep_adapter(struct adapter *adapter)
3765 {
3766 	const struct adapter_info *ai = adapter->params.info;
3767 	unsigned int i, j = -1;
3768 	int ret;
3769 
3770 	early_hw_init(adapter, ai);
3771 	ret = init_parity(adapter);
3772 	if (ret)
3773 		return ret;
3774 
3775 	for_each_port(adapter, i) {
3776 		const struct port_type_info *pti;
3777 		struct port_info *p = adap2pinfo(adapter, i);
3778 
3779 		while (!adapter->params.vpd.port_type[++j])
3780 			;
3781 
3782 		pti = &port_types[adapter->params.vpd.port_type[j]];
3783 		ret = pti->phy_prep(&p->phy, adapter, p->phy.mdio.prtad, NULL);
3784 		if (ret)
3785 			return ret;
3786 		p->phy.ops->power_down(&p->phy, 1);
3787 	}
3788 
3789 return 0;
3790 }
3791 
3792