1 /* 2 * This file is part of the Chelsio T4 Ethernet driver for Linux. 3 * 4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/init.h> 36 #include <linux/delay.h> 37 #include "cxgb4.h" 38 #include "t4_regs.h" 39 #include "t4fw_api.h" 40 41 /** 42 * t4_wait_op_done_val - wait until an operation is completed 43 * @adapter: the adapter performing the operation 44 * @reg: the register to check for completion 45 * @mask: a single-bit field within @reg that indicates completion 46 * @polarity: the value of the field when the operation is completed 47 * @attempts: number of check iterations 48 * @delay: delay in usecs between iterations 49 * @valp: where to store the value of the register at completion time 50 * 51 * Wait until an operation is completed by checking a bit in a register 52 * up to @attempts times. If @valp is not NULL the value of the register 53 * at the time it indicated completion is stored there. Returns 0 if the 54 * operation completes and -EAGAIN otherwise. 55 */ 56 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, 57 int polarity, int attempts, int delay, u32 *valp) 58 { 59 while (1) { 60 u32 val = t4_read_reg(adapter, reg); 61 62 if (!!(val & mask) == polarity) { 63 if (valp) 64 *valp = val; 65 return 0; 66 } 67 if (--attempts == 0) 68 return -EAGAIN; 69 if (delay) 70 udelay(delay); 71 } 72 } 73 74 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask, 75 int polarity, int attempts, int delay) 76 { 77 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts, 78 delay, NULL); 79 } 80 81 /** 82 * t4_set_reg_field - set a register field to a value 83 * @adapter: the adapter to program 84 * @addr: the register address 85 * @mask: specifies the portion of the register to modify 86 * @val: the new value for the register field 87 * 88 * Sets a register field specified by the supplied mask to the 89 * given value. 90 */ 91 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, 92 u32 val) 93 { 94 u32 v = t4_read_reg(adapter, addr) & ~mask; 95 96 t4_write_reg(adapter, addr, v | val); 97 (void) t4_read_reg(adapter, addr); /* flush */ 98 } 99 100 /** 101 * t4_read_indirect - read indirectly addressed registers 102 * @adap: the adapter 103 * @addr_reg: register holding the indirect address 104 * @data_reg: register holding the value of the indirect register 105 * @vals: where the read register values are stored 106 * @nregs: how many indirect registers to read 107 * @start_idx: index of first indirect register to read 108 * 109 * Reads registers that are accessed indirectly through an address/data 110 * register pair. 111 */ 112 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, 113 unsigned int data_reg, u32 *vals, 114 unsigned int nregs, unsigned int start_idx) 115 { 116 while (nregs--) { 117 t4_write_reg(adap, addr_reg, start_idx); 118 *vals++ = t4_read_reg(adap, data_reg); 119 start_idx++; 120 } 121 } 122 123 /** 124 * t4_write_indirect - write indirectly addressed registers 125 * @adap: the adapter 126 * @addr_reg: register holding the indirect addresses 127 * @data_reg: register holding the value for the indirect registers 128 * @vals: values to write 129 * @nregs: how many indirect registers to write 130 * @start_idx: address of first indirect register to write 131 * 132 * Writes a sequential block of registers that are accessed indirectly 133 * through an address/data register pair. 134 */ 135 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, 136 unsigned int data_reg, const u32 *vals, 137 unsigned int nregs, unsigned int start_idx) 138 { 139 while (nregs--) { 140 t4_write_reg(adap, addr_reg, start_idx++); 141 t4_write_reg(adap, data_reg, *vals++); 142 } 143 } 144 145 /* 146 * Get the reply to a mailbox command and store it in @rpl in big-endian order. 147 */ 148 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, 149 u32 mbox_addr) 150 { 151 for ( ; nflit; nflit--, mbox_addr += 8) 152 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr)); 153 } 154 155 /* 156 * Handle a FW assertion reported in a mailbox. 157 */ 158 static void fw_asrt(struct adapter *adap, u32 mbox_addr) 159 { 160 struct fw_debug_cmd asrt; 161 162 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr); 163 dev_alert(adap->pdev_dev, 164 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", 165 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line), 166 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y)); 167 } 168 169 static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg) 170 { 171 dev_err(adap->pdev_dev, 172 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox, 173 (unsigned long long)t4_read_reg64(adap, data_reg), 174 (unsigned long long)t4_read_reg64(adap, data_reg + 8), 175 (unsigned long long)t4_read_reg64(adap, data_reg + 16), 176 (unsigned long long)t4_read_reg64(adap, data_reg + 24), 177 (unsigned long long)t4_read_reg64(adap, data_reg + 32), 178 (unsigned long long)t4_read_reg64(adap, data_reg + 40), 179 (unsigned long long)t4_read_reg64(adap, data_reg + 48), 180 (unsigned long long)t4_read_reg64(adap, data_reg + 56)); 181 } 182 183 /** 184 * t4_wr_mbox_meat - send a command to FW through the given mailbox 185 * @adap: the adapter 186 * @mbox: index of the mailbox to use 187 * @cmd: the command to write 188 * @size: command length in bytes 189 * @rpl: where to optionally store the reply 190 * @sleep_ok: if true we may sleep while awaiting command completion 191 * 192 * Sends the given command to FW through the selected mailbox and waits 193 * for the FW to execute the command. If @rpl is not %NULL it is used to 194 * store the FW's reply to the command. The command and its optional 195 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms 196 * to respond. @sleep_ok determines whether we may sleep while awaiting 197 * the response. If sleeping is allowed we use progressive backoff 198 * otherwise we spin. 199 * 200 * The return value is 0 on success or a negative errno on failure. A 201 * failure can happen either because we are not able to execute the 202 * command or FW executes it but signals an error. In the latter case 203 * the return value is the error code indicated by FW (negated). 204 */ 205 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, 206 void *rpl, bool sleep_ok) 207 { 208 static const int delay[] = { 209 1, 1, 3, 5, 10, 10, 20, 50, 100, 200 210 }; 211 212 u32 v; 213 u64 res; 214 int i, ms, delay_idx; 215 const __be64 *p = cmd; 216 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA); 217 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL); 218 219 if ((size & 15) || size > MBOX_LEN) 220 return -EINVAL; 221 222 /* 223 * If the device is off-line, as in EEH, commands will time out. 224 * Fail them early so we don't waste time waiting. 225 */ 226 if (adap->pdev->error_state != pci_channel_io_normal) 227 return -EIO; 228 229 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); 230 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) 231 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); 232 233 if (v != MBOX_OWNER_DRV) 234 return v ? -EBUSY : -ETIMEDOUT; 235 236 for (i = 0; i < size; i += 8) 237 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++)); 238 239 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); 240 t4_read_reg(adap, ctl_reg); /* flush write */ 241 242 delay_idx = 0; 243 ms = delay[0]; 244 245 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { 246 if (sleep_ok) { 247 ms = delay[delay_idx]; /* last element may repeat */ 248 if (delay_idx < ARRAY_SIZE(delay) - 1) 249 delay_idx++; 250 msleep(ms); 251 } else 252 mdelay(ms); 253 254 v = t4_read_reg(adap, ctl_reg); 255 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) { 256 if (!(v & MBMSGVALID)) { 257 t4_write_reg(adap, ctl_reg, 0); 258 continue; 259 } 260 261 res = t4_read_reg64(adap, data_reg); 262 if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) { 263 fw_asrt(adap, data_reg); 264 res = FW_CMD_RETVAL(EIO); 265 } else if (rpl) 266 get_mbox_rpl(adap, rpl, size / 8, data_reg); 267 268 if (FW_CMD_RETVAL_GET((int)res)) 269 dump_mbox(adap, mbox, data_reg); 270 t4_write_reg(adap, ctl_reg, 0); 271 return -FW_CMD_RETVAL_GET((int)res); 272 } 273 } 274 275 dump_mbox(adap, mbox, data_reg); 276 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", 277 *(const u8 *)cmd, mbox); 278 return -ETIMEDOUT; 279 } 280 281 /** 282 * t4_mc_read - read from MC through backdoor accesses 283 * @adap: the adapter 284 * @addr: address of first byte requested 285 * @idx: which MC to access 286 * @data: 64 bytes of data containing the requested address 287 * @ecc: where to store the corresponding 64-bit ECC word 288 * 289 * Read 64 bytes of data from MC starting at a 64-byte-aligned address 290 * that covers the requested address @addr. If @parity is not %NULL it 291 * is assigned the 64-bit ECC word for the read data. 292 */ 293 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) 294 { 295 int i; 296 u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len; 297 u32 mc_bist_status_rdata, mc_bist_data_pattern; 298 299 if (is_t4(adap->chip)) { 300 mc_bist_cmd = MC_BIST_CMD; 301 mc_bist_cmd_addr = MC_BIST_CMD_ADDR; 302 mc_bist_cmd_len = MC_BIST_CMD_LEN; 303 mc_bist_status_rdata = MC_BIST_STATUS_RDATA; 304 mc_bist_data_pattern = MC_BIST_DATA_PATTERN; 305 } else { 306 mc_bist_cmd = MC_REG(MC_P_BIST_CMD, idx); 307 mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR, idx); 308 mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN, idx); 309 mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA, idx); 310 mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN, idx); 311 } 312 313 if (t4_read_reg(adap, mc_bist_cmd) & START_BIST) 314 return -EBUSY; 315 t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU); 316 t4_write_reg(adap, mc_bist_cmd_len, 64); 317 t4_write_reg(adap, mc_bist_data_pattern, 0xc); 318 t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE(1) | START_BIST | 319 BIST_CMD_GAP(1)); 320 i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST, 0, 10, 1); 321 if (i) 322 return i; 323 324 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata, i) 325 326 for (i = 15; i >= 0; i--) 327 *data++ = htonl(t4_read_reg(adap, MC_DATA(i))); 328 if (ecc) 329 *ecc = t4_read_reg64(adap, MC_DATA(16)); 330 #undef MC_DATA 331 return 0; 332 } 333 334 /** 335 * t4_edc_read - read from EDC through backdoor accesses 336 * @adap: the adapter 337 * @idx: which EDC to access 338 * @addr: address of first byte requested 339 * @data: 64 bytes of data containing the requested address 340 * @ecc: where to store the corresponding 64-bit ECC word 341 * 342 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address 343 * that covers the requested address @addr. If @parity is not %NULL it 344 * is assigned the 64-bit ECC word for the read data. 345 */ 346 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) 347 { 348 int i; 349 u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len; 350 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata; 351 352 if (is_t4(adap->chip)) { 353 edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx); 354 edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx); 355 edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx); 356 edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN, 357 idx); 358 edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA, 359 idx); 360 } else { 361 edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD, idx); 362 edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx); 363 edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx); 364 edc_bist_cmd_data_pattern = 365 EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx); 366 edc_bist_status_rdata = 367 EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx); 368 } 369 370 if (t4_read_reg(adap, edc_bist_cmd) & START_BIST) 371 return -EBUSY; 372 t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU); 373 t4_write_reg(adap, edc_bist_cmd_len, 64); 374 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc); 375 t4_write_reg(adap, edc_bist_cmd, 376 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST); 377 i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST, 0, 10, 1); 378 if (i) 379 return i; 380 381 #define EDC_DATA(i) (EDC_BIST_STATUS_REG(edc_bist_status_rdata, i)) 382 383 for (i = 15; i >= 0; i--) 384 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i))); 385 if (ecc) 386 *ecc = t4_read_reg64(adap, EDC_DATA(16)); 387 #undef EDC_DATA 388 return 0; 389 } 390 391 /* 392 * t4_mem_win_rw - read/write memory through PCIE memory window 393 * @adap: the adapter 394 * @addr: address of first byte requested 395 * @data: MEMWIN0_APERTURE bytes of data containing the requested address 396 * @dir: direction of transfer 1 => read, 0 => write 397 * 398 * Read/write MEMWIN0_APERTURE bytes of data from MC starting at a 399 * MEMWIN0_APERTURE-byte-aligned address that covers the requested 400 * address @addr. 401 */ 402 static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir) 403 { 404 int i; 405 u32 win_pf = is_t4(adap->chip) ? 0 : V_PFNUM(adap->fn); 406 407 /* 408 * Setup offset into PCIE memory window. Address must be a 409 * MEMWIN0_APERTURE-byte-aligned address. (Read back MA register to 410 * ensure that changes propagate before we attempt to use the new 411 * values.) 412 */ 413 t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET, 414 (addr & ~(MEMWIN0_APERTURE - 1)) | win_pf); 415 t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET); 416 417 /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */ 418 for (i = 0; i < MEMWIN0_APERTURE; i = i+0x4) { 419 if (dir) 420 *data++ = (__force __be32) t4_read_reg(adap, 421 (MEMWIN0_BASE + i)); 422 else 423 t4_write_reg(adap, (MEMWIN0_BASE + i), 424 (__force u32) *data++); 425 } 426 427 return 0; 428 } 429 430 /** 431 * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window 432 * @adap: the adapter 433 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC 434 * @addr: address within indicated memory type 435 * @len: amount of memory to transfer 436 * @buf: host memory buffer 437 * @dir: direction of transfer 1 => read, 0 => write 438 * 439 * Reads/writes an [almost] arbitrary memory region in the firmware: the 440 * firmware memory address, length and host buffer must be aligned on 441 * 32-bit boudaries. The memory is transferred as a raw byte sequence 442 * from/to the firmware's memory. If this memory contains data 443 * structures which contain multi-byte integers, it's the callers 444 * responsibility to perform appropriate byte order conversions. 445 */ 446 static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len, 447 __be32 *buf, int dir) 448 { 449 u32 pos, start, end, offset, memoffset; 450 u32 edc_size, mc_size; 451 int ret = 0; 452 __be32 *data; 453 454 /* 455 * Argument sanity checks ... 456 */ 457 if ((addr & 0x3) || (len & 0x3)) 458 return -EINVAL; 459 460 data = vmalloc(MEMWIN0_APERTURE); 461 if (!data) 462 return -ENOMEM; 463 464 /* Offset into the region of memory which is being accessed 465 * MEM_EDC0 = 0 466 * MEM_EDC1 = 1 467 * MEM_MC = 2 -- T4 468 * MEM_MC0 = 2 -- For T5 469 * MEM_MC1 = 3 -- For T5 470 */ 471 edc_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR)); 472 if (mtype != MEM_MC1) 473 memoffset = (mtype * (edc_size * 1024 * 1024)); 474 else { 475 mc_size = EXT_MEM_SIZE_GET(t4_read_reg(adap, 476 MA_EXT_MEMORY_BAR)); 477 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024; 478 } 479 480 /* Determine the PCIE_MEM_ACCESS_OFFSET */ 481 addr = addr + memoffset; 482 483 /* 484 * The underlaying EDC/MC read routines read MEMWIN0_APERTURE bytes 485 * at a time so we need to round down the start and round up the end. 486 * We'll start copying out of the first line at (addr - start) a word 487 * at a time. 488 */ 489 start = addr & ~(MEMWIN0_APERTURE-1); 490 end = (addr + len + MEMWIN0_APERTURE-1) & ~(MEMWIN0_APERTURE-1); 491 offset = (addr - start)/sizeof(__be32); 492 493 for (pos = start; pos < end; pos += MEMWIN0_APERTURE, offset = 0) { 494 495 /* 496 * If we're writing, copy the data from the caller's memory 497 * buffer 498 */ 499 if (!dir) { 500 /* 501 * If we're doing a partial write, then we need to do 502 * a read-modify-write ... 503 */ 504 if (offset || len < MEMWIN0_APERTURE) { 505 ret = t4_mem_win_rw(adap, pos, data, 1); 506 if (ret) 507 break; 508 } 509 while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) && 510 len > 0) { 511 data[offset++] = *buf++; 512 len -= sizeof(__be32); 513 } 514 } 515 516 /* 517 * Transfer a block of memory and bail if there's an error. 518 */ 519 ret = t4_mem_win_rw(adap, pos, data, dir); 520 if (ret) 521 break; 522 523 /* 524 * If we're reading, copy the data into the caller's memory 525 * buffer. 526 */ 527 if (dir) 528 while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) && 529 len > 0) { 530 *buf++ = data[offset++]; 531 len -= sizeof(__be32); 532 } 533 } 534 535 vfree(data); 536 return ret; 537 } 538 539 int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len, 540 __be32 *buf) 541 { 542 return t4_memory_rw(adap, mtype, addr, len, buf, 0); 543 } 544 545 #define EEPROM_STAT_ADDR 0x7bfc 546 #define VPD_BASE 0x400 547 #define VPD_BASE_OLD 0 548 #define VPD_LEN 1024 549 550 /** 551 * t4_seeprom_wp - enable/disable EEPROM write protection 552 * @adapter: the adapter 553 * @enable: whether to enable or disable write protection 554 * 555 * Enables or disables write protection on the serial EEPROM. 556 */ 557 int t4_seeprom_wp(struct adapter *adapter, bool enable) 558 { 559 unsigned int v = enable ? 0xc : 0; 560 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v); 561 return ret < 0 ? ret : 0; 562 } 563 564 /** 565 * get_vpd_params - read VPD parameters from VPD EEPROM 566 * @adapter: adapter to read 567 * @p: where to store the parameters 568 * 569 * Reads card parameters stored in VPD EEPROM. 570 */ 571 int get_vpd_params(struct adapter *adapter, struct vpd_params *p) 572 { 573 u32 cclk_param, cclk_val; 574 int i, ret, addr; 575 int ec, sn; 576 u8 *vpd, csum; 577 unsigned int vpdr_len, kw_offset, id_len; 578 579 vpd = vmalloc(VPD_LEN); 580 if (!vpd) 581 return -ENOMEM; 582 583 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd); 584 if (ret < 0) 585 goto out; 586 addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD; 587 588 ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd); 589 if (ret < 0) 590 goto out; 591 592 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) { 593 dev_err(adapter->pdev_dev, "missing VPD ID string\n"); 594 ret = -EINVAL; 595 goto out; 596 } 597 598 id_len = pci_vpd_lrdt_size(vpd); 599 if (id_len > ID_LEN) 600 id_len = ID_LEN; 601 602 i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA); 603 if (i < 0) { 604 dev_err(adapter->pdev_dev, "missing VPD-R section\n"); 605 ret = -EINVAL; 606 goto out; 607 } 608 609 vpdr_len = pci_vpd_lrdt_size(&vpd[i]); 610 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE; 611 if (vpdr_len + kw_offset > VPD_LEN) { 612 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len); 613 ret = -EINVAL; 614 goto out; 615 } 616 617 #define FIND_VPD_KW(var, name) do { \ 618 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \ 619 if (var < 0) { \ 620 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \ 621 ret = -EINVAL; \ 622 goto out; \ 623 } \ 624 var += PCI_VPD_INFO_FLD_HDR_SIZE; \ 625 } while (0) 626 627 FIND_VPD_KW(i, "RV"); 628 for (csum = 0; i >= 0; i--) 629 csum += vpd[i]; 630 631 if (csum) { 632 dev_err(adapter->pdev_dev, 633 "corrupted VPD EEPROM, actual csum %u\n", csum); 634 ret = -EINVAL; 635 goto out; 636 } 637 638 FIND_VPD_KW(ec, "EC"); 639 FIND_VPD_KW(sn, "SN"); 640 #undef FIND_VPD_KW 641 642 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len); 643 strim(p->id); 644 memcpy(p->ec, vpd + ec, EC_LEN); 645 strim(p->ec); 646 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE); 647 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); 648 strim(p->sn); 649 650 /* 651 * Ask firmware for the Core Clock since it knows how to translate the 652 * Reference Clock ('V2') VPD field into a Core Clock value ... 653 */ 654 cclk_param = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 655 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK)); 656 ret = t4_query_params(adapter, adapter->mbox, 0, 0, 657 1, &cclk_param, &cclk_val); 658 659 out: 660 vfree(vpd); 661 if (ret) 662 return ret; 663 p->cclk = cclk_val; 664 665 return 0; 666 } 667 668 /* serial flash and firmware constants */ 669 enum { 670 SF_ATTEMPTS = 10, /* max retries for SF operations */ 671 672 /* flash command opcodes */ 673 SF_PROG_PAGE = 2, /* program page */ 674 SF_WR_DISABLE = 4, /* disable writes */ 675 SF_RD_STATUS = 5, /* read status register */ 676 SF_WR_ENABLE = 6, /* enable writes */ 677 SF_RD_DATA_FAST = 0xb, /* read flash */ 678 SF_RD_ID = 0x9f, /* read ID */ 679 SF_ERASE_SECTOR = 0xd8, /* erase sector */ 680 681 FW_MAX_SIZE = 512 * 1024, 682 }; 683 684 /** 685 * sf1_read - read data from the serial flash 686 * @adapter: the adapter 687 * @byte_cnt: number of bytes to read 688 * @cont: whether another operation will be chained 689 * @lock: whether to lock SF for PL access only 690 * @valp: where to store the read data 691 * 692 * Reads up to 4 bytes of data from the serial flash. The location of 693 * the read needs to be specified prior to calling this by issuing the 694 * appropriate commands to the serial flash. 695 */ 696 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, 697 int lock, u32 *valp) 698 { 699 int ret; 700 701 if (!byte_cnt || byte_cnt > 4) 702 return -EINVAL; 703 if (t4_read_reg(adapter, SF_OP) & SF_BUSY) 704 return -EBUSY; 705 cont = cont ? SF_CONT : 0; 706 lock = lock ? SF_LOCK : 0; 707 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1)); 708 ret = t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5); 709 if (!ret) 710 *valp = t4_read_reg(adapter, SF_DATA); 711 return ret; 712 } 713 714 /** 715 * sf1_write - write data to the serial flash 716 * @adapter: the adapter 717 * @byte_cnt: number of bytes to write 718 * @cont: whether another operation will be chained 719 * @lock: whether to lock SF for PL access only 720 * @val: value to write 721 * 722 * Writes up to 4 bytes of data to the serial flash. The location of 723 * the write needs to be specified prior to calling this by issuing the 724 * appropriate commands to the serial flash. 725 */ 726 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, 727 int lock, u32 val) 728 { 729 if (!byte_cnt || byte_cnt > 4) 730 return -EINVAL; 731 if (t4_read_reg(adapter, SF_OP) & SF_BUSY) 732 return -EBUSY; 733 cont = cont ? SF_CONT : 0; 734 lock = lock ? SF_LOCK : 0; 735 t4_write_reg(adapter, SF_DATA, val); 736 t4_write_reg(adapter, SF_OP, lock | 737 cont | BYTECNT(byte_cnt - 1) | OP_WR); 738 return t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5); 739 } 740 741 /** 742 * flash_wait_op - wait for a flash operation to complete 743 * @adapter: the adapter 744 * @attempts: max number of polls of the status register 745 * @delay: delay between polls in ms 746 * 747 * Wait for a flash operation to complete by polling the status register. 748 */ 749 static int flash_wait_op(struct adapter *adapter, int attempts, int delay) 750 { 751 int ret; 752 u32 status; 753 754 while (1) { 755 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 || 756 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0) 757 return ret; 758 if (!(status & 1)) 759 return 0; 760 if (--attempts == 0) 761 return -EAGAIN; 762 if (delay) 763 msleep(delay); 764 } 765 } 766 767 /** 768 * t4_read_flash - read words from serial flash 769 * @adapter: the adapter 770 * @addr: the start address for the read 771 * @nwords: how many 32-bit words to read 772 * @data: where to store the read data 773 * @byte_oriented: whether to store data as bytes or as words 774 * 775 * Read the specified number of 32-bit words from the serial flash. 776 * If @byte_oriented is set the read data is stored as a byte array 777 * (i.e., big-endian), otherwise as 32-bit words in the platform's 778 * natural endianess. 779 */ 780 static int t4_read_flash(struct adapter *adapter, unsigned int addr, 781 unsigned int nwords, u32 *data, int byte_oriented) 782 { 783 int ret; 784 785 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3)) 786 return -EINVAL; 787 788 addr = swab32(addr) | SF_RD_DATA_FAST; 789 790 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 || 791 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0) 792 return ret; 793 794 for ( ; nwords; nwords--, data++) { 795 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data); 796 if (nwords == 1) 797 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ 798 if (ret) 799 return ret; 800 if (byte_oriented) 801 *data = (__force __u32) (htonl(*data)); 802 } 803 return 0; 804 } 805 806 /** 807 * t4_write_flash - write up to a page of data to the serial flash 808 * @adapter: the adapter 809 * @addr: the start address to write 810 * @n: length of data to write in bytes 811 * @data: the data to write 812 * 813 * Writes up to a page of data (256 bytes) to the serial flash starting 814 * at the given address. All the data must be written to the same page. 815 */ 816 static int t4_write_flash(struct adapter *adapter, unsigned int addr, 817 unsigned int n, const u8 *data) 818 { 819 int ret; 820 u32 buf[64]; 821 unsigned int i, c, left, val, offset = addr & 0xff; 822 823 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE) 824 return -EINVAL; 825 826 val = swab32(addr) | SF_PROG_PAGE; 827 828 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 829 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0) 830 goto unlock; 831 832 for (left = n; left; left -= c) { 833 c = min(left, 4U); 834 for (val = 0, i = 0; i < c; ++i) 835 val = (val << 8) + *data++; 836 837 ret = sf1_write(adapter, c, c != left, 1, val); 838 if (ret) 839 goto unlock; 840 } 841 ret = flash_wait_op(adapter, 8, 1); 842 if (ret) 843 goto unlock; 844 845 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ 846 847 /* Read the page to verify the write succeeded */ 848 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); 849 if (ret) 850 return ret; 851 852 if (memcmp(data - n, (u8 *)buf + offset, n)) { 853 dev_err(adapter->pdev_dev, 854 "failed to correctly write the flash page at %#x\n", 855 addr); 856 return -EIO; 857 } 858 return 0; 859 860 unlock: 861 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ 862 return ret; 863 } 864 865 /** 866 * get_fw_version - read the firmware version 867 * @adapter: the adapter 868 * @vers: where to place the version 869 * 870 * Reads the FW version from flash. 871 */ 872 static int get_fw_version(struct adapter *adapter, u32 *vers) 873 { 874 return t4_read_flash(adapter, adapter->params.sf_fw_start + 875 offsetof(struct fw_hdr, fw_ver), 1, vers, 0); 876 } 877 878 /** 879 * get_tp_version - read the TP microcode version 880 * @adapter: the adapter 881 * @vers: where to place the version 882 * 883 * Reads the TP microcode version from flash. 884 */ 885 static int get_tp_version(struct adapter *adapter, u32 *vers) 886 { 887 return t4_read_flash(adapter, adapter->params.sf_fw_start + 888 offsetof(struct fw_hdr, tp_microcode_ver), 889 1, vers, 0); 890 } 891 892 /** 893 * t4_check_fw_version - check if the FW is compatible with this driver 894 * @adapter: the adapter 895 * 896 * Checks if an adapter's FW is compatible with the driver. Returns 0 897 * if there's exact match, a negative error if the version could not be 898 * read or there's a major version mismatch, and a positive value if the 899 * expected major version is found but there's a minor version mismatch. 900 */ 901 int t4_check_fw_version(struct adapter *adapter) 902 { 903 u32 api_vers[2]; 904 int ret, major, minor, micro; 905 int exp_major, exp_minor, exp_micro; 906 907 ret = get_fw_version(adapter, &adapter->params.fw_vers); 908 if (!ret) 909 ret = get_tp_version(adapter, &adapter->params.tp_vers); 910 if (!ret) 911 ret = t4_read_flash(adapter, adapter->params.sf_fw_start + 912 offsetof(struct fw_hdr, intfver_nic), 913 2, api_vers, 1); 914 if (ret) 915 return ret; 916 917 major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers); 918 minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers); 919 micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers); 920 921 switch (CHELSIO_CHIP_VERSION(adapter->chip)) { 922 case CHELSIO_T4: 923 exp_major = FW_VERSION_MAJOR; 924 exp_minor = FW_VERSION_MINOR; 925 exp_micro = FW_VERSION_MICRO; 926 break; 927 case CHELSIO_T5: 928 exp_major = FW_VERSION_MAJOR_T5; 929 exp_minor = FW_VERSION_MINOR_T5; 930 exp_micro = FW_VERSION_MICRO_T5; 931 break; 932 default: 933 dev_err(adapter->pdev_dev, "Unsupported chip type, %x\n", 934 adapter->chip); 935 return -EINVAL; 936 } 937 938 memcpy(adapter->params.api_vers, api_vers, 939 sizeof(adapter->params.api_vers)); 940 941 if (major != exp_major) { /* major mismatch - fail */ 942 dev_err(adapter->pdev_dev, 943 "card FW has major version %u, driver wants %u\n", 944 major, exp_major); 945 return -EINVAL; 946 } 947 948 if (minor == exp_minor && micro == exp_micro) 949 return 0; /* perfect match */ 950 951 /* Minor/micro version mismatch. Report it but often it's OK. */ 952 return 1; 953 } 954 955 /** 956 * t4_flash_erase_sectors - erase a range of flash sectors 957 * @adapter: the adapter 958 * @start: the first sector to erase 959 * @end: the last sector to erase 960 * 961 * Erases the sectors in the given inclusive range. 962 */ 963 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) 964 { 965 int ret = 0; 966 967 while (start <= end) { 968 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 969 (ret = sf1_write(adapter, 4, 0, 1, 970 SF_ERASE_SECTOR | (start << 8))) != 0 || 971 (ret = flash_wait_op(adapter, 14, 500)) != 0) { 972 dev_err(adapter->pdev_dev, 973 "erase of flash sector %d failed, error %d\n", 974 start, ret); 975 break; 976 } 977 start++; 978 } 979 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ 980 return ret; 981 } 982 983 /** 984 * t4_flash_cfg_addr - return the address of the flash configuration file 985 * @adapter: the adapter 986 * 987 * Return the address within the flash where the Firmware Configuration 988 * File is stored. 989 */ 990 unsigned int t4_flash_cfg_addr(struct adapter *adapter) 991 { 992 if (adapter->params.sf_size == 0x100000) 993 return FLASH_FPGA_CFG_START; 994 else 995 return FLASH_CFG_START; 996 } 997 998 /** 999 * t4_load_cfg - download config file 1000 * @adap: the adapter 1001 * @cfg_data: the cfg text file to write 1002 * @size: text file size 1003 * 1004 * Write the supplied config text file to the card's serial flash. 1005 */ 1006 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size) 1007 { 1008 int ret, i, n; 1009 unsigned int addr; 1010 unsigned int flash_cfg_start_sec; 1011 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 1012 1013 addr = t4_flash_cfg_addr(adap); 1014 flash_cfg_start_sec = addr / SF_SEC_SIZE; 1015 1016 if (size > FLASH_CFG_MAX_SIZE) { 1017 dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n", 1018 FLASH_CFG_MAX_SIZE); 1019 return -EFBIG; 1020 } 1021 1022 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */ 1023 sf_sec_size); 1024 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec, 1025 flash_cfg_start_sec + i - 1); 1026 /* 1027 * If size == 0 then we're simply erasing the FLASH sectors associated 1028 * with the on-adapter Firmware Configuration File. 1029 */ 1030 if (ret || size == 0) 1031 goto out; 1032 1033 /* this will write to the flash up to SF_PAGE_SIZE at a time */ 1034 for (i = 0; i < size; i += SF_PAGE_SIZE) { 1035 if ((size - i) < SF_PAGE_SIZE) 1036 n = size - i; 1037 else 1038 n = SF_PAGE_SIZE; 1039 ret = t4_write_flash(adap, addr, n, cfg_data); 1040 if (ret) 1041 goto out; 1042 1043 addr += SF_PAGE_SIZE; 1044 cfg_data += SF_PAGE_SIZE; 1045 } 1046 1047 out: 1048 if (ret) 1049 dev_err(adap->pdev_dev, "config file %s failed %d\n", 1050 (size == 0 ? "clear" : "download"), ret); 1051 return ret; 1052 } 1053 1054 /** 1055 * t4_load_fw - download firmware 1056 * @adap: the adapter 1057 * @fw_data: the firmware image to write 1058 * @size: image size 1059 * 1060 * Write the supplied firmware image to the card's serial flash. 1061 */ 1062 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) 1063 { 1064 u32 csum; 1065 int ret, addr; 1066 unsigned int i; 1067 u8 first_page[SF_PAGE_SIZE]; 1068 const __be32 *p = (const __be32 *)fw_data; 1069 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; 1070 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 1071 unsigned int fw_img_start = adap->params.sf_fw_start; 1072 unsigned int fw_start_sec = fw_img_start / sf_sec_size; 1073 1074 if (!size) { 1075 dev_err(adap->pdev_dev, "FW image has no data\n"); 1076 return -EINVAL; 1077 } 1078 if (size & 511) { 1079 dev_err(adap->pdev_dev, 1080 "FW image size not multiple of 512 bytes\n"); 1081 return -EINVAL; 1082 } 1083 if (ntohs(hdr->len512) * 512 != size) { 1084 dev_err(adap->pdev_dev, 1085 "FW image size differs from size in FW header\n"); 1086 return -EINVAL; 1087 } 1088 if (size > FW_MAX_SIZE) { 1089 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n", 1090 FW_MAX_SIZE); 1091 return -EFBIG; 1092 } 1093 1094 for (csum = 0, i = 0; i < size / sizeof(csum); i++) 1095 csum += ntohl(p[i]); 1096 1097 if (csum != 0xffffffff) { 1098 dev_err(adap->pdev_dev, 1099 "corrupted firmware image, checksum %#x\n", csum); 1100 return -EINVAL; 1101 } 1102 1103 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ 1104 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1); 1105 if (ret) 1106 goto out; 1107 1108 /* 1109 * We write the correct version at the end so the driver can see a bad 1110 * version if the FW write fails. Start by writing a copy of the 1111 * first page with a bad version. 1112 */ 1113 memcpy(first_page, fw_data, SF_PAGE_SIZE); 1114 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff); 1115 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page); 1116 if (ret) 1117 goto out; 1118 1119 addr = fw_img_start; 1120 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 1121 addr += SF_PAGE_SIZE; 1122 fw_data += SF_PAGE_SIZE; 1123 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data); 1124 if (ret) 1125 goto out; 1126 } 1127 1128 ret = t4_write_flash(adap, 1129 fw_img_start + offsetof(struct fw_hdr, fw_ver), 1130 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver); 1131 out: 1132 if (ret) 1133 dev_err(adap->pdev_dev, "firmware download failed, error %d\n", 1134 ret); 1135 return ret; 1136 } 1137 1138 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ 1139 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG) 1140 1141 /** 1142 * t4_link_start - apply link configuration to MAC/PHY 1143 * @phy: the PHY to setup 1144 * @mac: the MAC to setup 1145 * @lc: the requested link configuration 1146 * 1147 * Set up a port's MAC and PHY according to a desired link configuration. 1148 * - If the PHY can auto-negotiate first decide what to advertise, then 1149 * enable/disable auto-negotiation as desired, and reset. 1150 * - If the PHY does not auto-negotiate just reset it. 1151 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, 1152 * otherwise do it later based on the outcome of auto-negotiation. 1153 */ 1154 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, 1155 struct link_config *lc) 1156 { 1157 struct fw_port_cmd c; 1158 unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO); 1159 1160 lc->link_ok = 0; 1161 if (lc->requested_fc & PAUSE_RX) 1162 fc |= FW_PORT_CAP_FC_RX; 1163 if (lc->requested_fc & PAUSE_TX) 1164 fc |= FW_PORT_CAP_FC_TX; 1165 1166 memset(&c, 0, sizeof(c)); 1167 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | 1168 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port)); 1169 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | 1170 FW_LEN16(c)); 1171 1172 if (!(lc->supported & FW_PORT_CAP_ANEG)) { 1173 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc); 1174 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); 1175 } else if (lc->autoneg == AUTONEG_DISABLE) { 1176 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi); 1177 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); 1178 } else 1179 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi); 1180 1181 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 1182 } 1183 1184 /** 1185 * t4_restart_aneg - restart autonegotiation 1186 * @adap: the adapter 1187 * @mbox: mbox to use for the FW command 1188 * @port: the port id 1189 * 1190 * Restarts autonegotiation for the selected port. 1191 */ 1192 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) 1193 { 1194 struct fw_port_cmd c; 1195 1196 memset(&c, 0, sizeof(c)); 1197 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | 1198 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port)); 1199 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | 1200 FW_LEN16(c)); 1201 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG); 1202 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 1203 } 1204 1205 typedef void (*int_handler_t)(struct adapter *adap); 1206 1207 struct intr_info { 1208 unsigned int mask; /* bits to check in interrupt status */ 1209 const char *msg; /* message to print or NULL */ 1210 short stat_idx; /* stat counter to increment or -1 */ 1211 unsigned short fatal; /* whether the condition reported is fatal */ 1212 int_handler_t int_handler; /* platform-specific int handler */ 1213 }; 1214 1215 /** 1216 * t4_handle_intr_status - table driven interrupt handler 1217 * @adapter: the adapter that generated the interrupt 1218 * @reg: the interrupt status register to process 1219 * @acts: table of interrupt actions 1220 * 1221 * A table driven interrupt handler that applies a set of masks to an 1222 * interrupt status word and performs the corresponding actions if the 1223 * interrupts described by the mask have occurred. The actions include 1224 * optionally emitting a warning or alert message. The table is terminated 1225 * by an entry specifying mask 0. Returns the number of fatal interrupt 1226 * conditions. 1227 */ 1228 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg, 1229 const struct intr_info *acts) 1230 { 1231 int fatal = 0; 1232 unsigned int mask = 0; 1233 unsigned int status = t4_read_reg(adapter, reg); 1234 1235 for ( ; acts->mask; ++acts) { 1236 if (!(status & acts->mask)) 1237 continue; 1238 if (acts->fatal) { 1239 fatal++; 1240 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg, 1241 status & acts->mask); 1242 } else if (acts->msg && printk_ratelimit()) 1243 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg, 1244 status & acts->mask); 1245 if (acts->int_handler) 1246 acts->int_handler(adapter); 1247 mask |= acts->mask; 1248 } 1249 status &= mask; 1250 if (status) /* clear processed interrupts */ 1251 t4_write_reg(adapter, reg, status); 1252 return fatal; 1253 } 1254 1255 /* 1256 * Interrupt handler for the PCIE module. 1257 */ 1258 static void pcie_intr_handler(struct adapter *adapter) 1259 { 1260 static const struct intr_info sysbus_intr_info[] = { 1261 { RNPP, "RXNP array parity error", -1, 1 }, 1262 { RPCP, "RXPC array parity error", -1, 1 }, 1263 { RCIP, "RXCIF array parity error", -1, 1 }, 1264 { RCCP, "Rx completions control array parity error", -1, 1 }, 1265 { RFTP, "RXFT array parity error", -1, 1 }, 1266 { 0 } 1267 }; 1268 static const struct intr_info pcie_port_intr_info[] = { 1269 { TPCP, "TXPC array parity error", -1, 1 }, 1270 { TNPP, "TXNP array parity error", -1, 1 }, 1271 { TFTP, "TXFT array parity error", -1, 1 }, 1272 { TCAP, "TXCA array parity error", -1, 1 }, 1273 { TCIP, "TXCIF array parity error", -1, 1 }, 1274 { RCAP, "RXCA array parity error", -1, 1 }, 1275 { OTDD, "outbound request TLP discarded", -1, 1 }, 1276 { RDPE, "Rx data parity error", -1, 1 }, 1277 { TDUE, "Tx uncorrectable data error", -1, 1 }, 1278 { 0 } 1279 }; 1280 static const struct intr_info pcie_intr_info[] = { 1281 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 }, 1282 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 }, 1283 { MSIDATAPERR, "MSI data parity error", -1, 1 }, 1284 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, 1285 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, 1286 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, 1287 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, 1288 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 }, 1289 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 }, 1290 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, 1291 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 }, 1292 { CREQPERR, "PCI CMD channel request parity error", -1, 1 }, 1293 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, 1294 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 }, 1295 { DREQPERR, "PCI DMA channel request parity error", -1, 1 }, 1296 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, 1297 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 }, 1298 { HREQPERR, "PCI HMA channel request parity error", -1, 1 }, 1299 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, 1300 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, 1301 { FIDPERR, "PCI FID parity error", -1, 1 }, 1302 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 }, 1303 { MATAGPERR, "PCI MA tag parity error", -1, 1 }, 1304 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, 1305 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 }, 1306 { RXWRPERR, "PCI Rx write parity error", -1, 1 }, 1307 { RPLPERR, "PCI replay buffer parity error", -1, 1 }, 1308 { PCIESINT, "PCI core secondary fault", -1, 1 }, 1309 { PCIEPINT, "PCI core primary fault", -1, 1 }, 1310 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 }, 1311 { 0 } 1312 }; 1313 1314 static struct intr_info t5_pcie_intr_info[] = { 1315 { MSTGRPPERR, "Master Response Read Queue parity error", 1316 -1, 1 }, 1317 { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 }, 1318 { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 }, 1319 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, 1320 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, 1321 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, 1322 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, 1323 { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error", 1324 -1, 1 }, 1325 { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error", 1326 -1, 1 }, 1327 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, 1328 { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 }, 1329 { CREQPERR, "PCI CMD channel request parity error", -1, 1 }, 1330 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, 1331 { DREQWRPERR, "PCI DMA channel write request parity error", 1332 -1, 1 }, 1333 { DREQPERR, "PCI DMA channel request parity error", -1, 1 }, 1334 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, 1335 { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 }, 1336 { HREQPERR, "PCI HMA channel request parity error", -1, 1 }, 1337 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, 1338 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, 1339 { FIDPERR, "PCI FID parity error", -1, 1 }, 1340 { VFIDPERR, "PCI INTx clear parity error", -1, 1 }, 1341 { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 }, 1342 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, 1343 { IPRXHDRGRPPERR, "PCI IP Rx header group parity error", 1344 -1, 1 }, 1345 { IPRXDATAGRPPERR, "PCI IP Rx data group parity error", -1, 1 }, 1346 { RPLPERR, "PCI IP replay buffer parity error", -1, 1 }, 1347 { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 }, 1348 { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 }, 1349 { READRSPERR, "Outbound read error", -1, 0 }, 1350 { 0 } 1351 }; 1352 1353 int fat; 1354 1355 fat = t4_handle_intr_status(adapter, 1356 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, 1357 sysbus_intr_info) + 1358 t4_handle_intr_status(adapter, 1359 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 1360 pcie_port_intr_info) + 1361 t4_handle_intr_status(adapter, PCIE_INT_CAUSE, 1362 is_t4(adapter->chip) ? 1363 pcie_intr_info : t5_pcie_intr_info); 1364 1365 if (fat) 1366 t4_fatal_err(adapter); 1367 } 1368 1369 /* 1370 * TP interrupt handler. 1371 */ 1372 static void tp_intr_handler(struct adapter *adapter) 1373 { 1374 static const struct intr_info tp_intr_info[] = { 1375 { 0x3fffffff, "TP parity error", -1, 1 }, 1376 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, 1377 { 0 } 1378 }; 1379 1380 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info)) 1381 t4_fatal_err(adapter); 1382 } 1383 1384 /* 1385 * SGE interrupt handler. 1386 */ 1387 static void sge_intr_handler(struct adapter *adapter) 1388 { 1389 u64 v; 1390 1391 static const struct intr_info sge_intr_info[] = { 1392 { ERR_CPL_EXCEED_IQE_SIZE, 1393 "SGE received CPL exceeding IQE size", -1, 1 }, 1394 { ERR_INVALID_CIDX_INC, 1395 "SGE GTS CIDX increment too large", -1, 0 }, 1396 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, 1397 { DBFIFO_LP_INT, NULL, -1, 0, t4_db_full }, 1398 { DBFIFO_HP_INT, NULL, -1, 0, t4_db_full }, 1399 { ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped }, 1400 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0, 1401 "SGE IQID > 1023 received CPL for FL", -1, 0 }, 1402 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, 1403 0 }, 1404 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1, 1405 0 }, 1406 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1, 1407 0 }, 1408 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1, 1409 0 }, 1410 { ERR_ING_CTXT_PRIO, 1411 "SGE too many priority ingress contexts", -1, 0 }, 1412 { ERR_EGR_CTXT_PRIO, 1413 "SGE too many priority egress contexts", -1, 0 }, 1414 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 }, 1415 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 }, 1416 { 0 } 1417 }; 1418 1419 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) | 1420 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32); 1421 if (v) { 1422 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n", 1423 (unsigned long long)v); 1424 t4_write_reg(adapter, SGE_INT_CAUSE1, v); 1425 t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32); 1426 } 1427 1428 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) || 1429 v != 0) 1430 t4_fatal_err(adapter); 1431 } 1432 1433 /* 1434 * CIM interrupt handler. 1435 */ 1436 static void cim_intr_handler(struct adapter *adapter) 1437 { 1438 static const struct intr_info cim_intr_info[] = { 1439 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 }, 1440 { OBQPARERR, "CIM OBQ parity error", -1, 1 }, 1441 { IBQPARERR, "CIM IBQ parity error", -1, 1 }, 1442 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 }, 1443 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 }, 1444 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 }, 1445 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 }, 1446 { 0 } 1447 }; 1448 static const struct intr_info cim_upintr_info[] = { 1449 { RSVDSPACEINT, "CIM reserved space access", -1, 1 }, 1450 { ILLTRANSINT, "CIM illegal transaction", -1, 1 }, 1451 { ILLWRINT, "CIM illegal write", -1, 1 }, 1452 { ILLRDINT, "CIM illegal read", -1, 1 }, 1453 { ILLRDBEINT, "CIM illegal read BE", -1, 1 }, 1454 { ILLWRBEINT, "CIM illegal write BE", -1, 1 }, 1455 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 }, 1456 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 }, 1457 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 }, 1458 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 }, 1459 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 }, 1460 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 }, 1461 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 }, 1462 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 }, 1463 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 }, 1464 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 }, 1465 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 }, 1466 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 }, 1467 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 }, 1468 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 }, 1469 { SGLRDPLINT , "CIM single read from PL space", -1, 1 }, 1470 { SGLWRPLINT , "CIM single write to PL space", -1, 1 }, 1471 { BLKRDPLINT , "CIM block read from PL space", -1, 1 }, 1472 { BLKWRPLINT , "CIM block write to PL space", -1, 1 }, 1473 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 }, 1474 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 }, 1475 { TIMEOUTINT , "CIM PIF timeout", -1, 1 }, 1476 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 }, 1477 { 0 } 1478 }; 1479 1480 int fat; 1481 1482 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE, 1483 cim_intr_info) + 1484 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE, 1485 cim_upintr_info); 1486 if (fat) 1487 t4_fatal_err(adapter); 1488 } 1489 1490 /* 1491 * ULP RX interrupt handler. 1492 */ 1493 static void ulprx_intr_handler(struct adapter *adapter) 1494 { 1495 static const struct intr_info ulprx_intr_info[] = { 1496 { 0x1800000, "ULPRX context error", -1, 1 }, 1497 { 0x7fffff, "ULPRX parity error", -1, 1 }, 1498 { 0 } 1499 }; 1500 1501 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info)) 1502 t4_fatal_err(adapter); 1503 } 1504 1505 /* 1506 * ULP TX interrupt handler. 1507 */ 1508 static void ulptx_intr_handler(struct adapter *adapter) 1509 { 1510 static const struct intr_info ulptx_intr_info[] = { 1511 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1, 1512 0 }, 1513 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1, 1514 0 }, 1515 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1, 1516 0 }, 1517 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1, 1518 0 }, 1519 { 0xfffffff, "ULPTX parity error", -1, 1 }, 1520 { 0 } 1521 }; 1522 1523 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info)) 1524 t4_fatal_err(adapter); 1525 } 1526 1527 /* 1528 * PM TX interrupt handler. 1529 */ 1530 static void pmtx_intr_handler(struct adapter *adapter) 1531 { 1532 static const struct intr_info pmtx_intr_info[] = { 1533 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 }, 1534 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 }, 1535 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 }, 1536 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 }, 1537 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 }, 1538 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 }, 1539 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 }, 1540 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 }, 1541 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1}, 1542 { 0 } 1543 }; 1544 1545 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info)) 1546 t4_fatal_err(adapter); 1547 } 1548 1549 /* 1550 * PM RX interrupt handler. 1551 */ 1552 static void pmrx_intr_handler(struct adapter *adapter) 1553 { 1554 static const struct intr_info pmrx_intr_info[] = { 1555 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 }, 1556 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 }, 1557 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 }, 1558 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 }, 1559 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 }, 1560 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1}, 1561 { 0 } 1562 }; 1563 1564 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info)) 1565 t4_fatal_err(adapter); 1566 } 1567 1568 /* 1569 * CPL switch interrupt handler. 1570 */ 1571 static void cplsw_intr_handler(struct adapter *adapter) 1572 { 1573 static const struct intr_info cplsw_intr_info[] = { 1574 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 }, 1575 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 }, 1576 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 }, 1577 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 }, 1578 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 }, 1579 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 }, 1580 { 0 } 1581 }; 1582 1583 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info)) 1584 t4_fatal_err(adapter); 1585 } 1586 1587 /* 1588 * LE interrupt handler. 1589 */ 1590 static void le_intr_handler(struct adapter *adap) 1591 { 1592 static const struct intr_info le_intr_info[] = { 1593 { LIPMISS, "LE LIP miss", -1, 0 }, 1594 { LIP0, "LE 0 LIP error", -1, 0 }, 1595 { PARITYERR, "LE parity error", -1, 1 }, 1596 { UNKNOWNCMD, "LE unknown command", -1, 1 }, 1597 { REQQPARERR, "LE request queue parity error", -1, 1 }, 1598 { 0 } 1599 }; 1600 1601 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info)) 1602 t4_fatal_err(adap); 1603 } 1604 1605 /* 1606 * MPS interrupt handler. 1607 */ 1608 static void mps_intr_handler(struct adapter *adapter) 1609 { 1610 static const struct intr_info mps_rx_intr_info[] = { 1611 { 0xffffff, "MPS Rx parity error", -1, 1 }, 1612 { 0 } 1613 }; 1614 static const struct intr_info mps_tx_intr_info[] = { 1615 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 }, 1616 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 }, 1617 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 }, 1618 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 }, 1619 { BUBBLE, "MPS Tx underflow", -1, 1 }, 1620 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 }, 1621 { FRMERR, "MPS Tx framing error", -1, 1 }, 1622 { 0 } 1623 }; 1624 static const struct intr_info mps_trc_intr_info[] = { 1625 { FILTMEM, "MPS TRC filter parity error", -1, 1 }, 1626 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 }, 1627 { MISCPERR, "MPS TRC misc parity error", -1, 1 }, 1628 { 0 } 1629 }; 1630 static const struct intr_info mps_stat_sram_intr_info[] = { 1631 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, 1632 { 0 } 1633 }; 1634 static const struct intr_info mps_stat_tx_intr_info[] = { 1635 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, 1636 { 0 } 1637 }; 1638 static const struct intr_info mps_stat_rx_intr_info[] = { 1639 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, 1640 { 0 } 1641 }; 1642 static const struct intr_info mps_cls_intr_info[] = { 1643 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 }, 1644 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 }, 1645 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 }, 1646 { 0 } 1647 }; 1648 1649 int fat; 1650 1651 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE, 1652 mps_rx_intr_info) + 1653 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE, 1654 mps_tx_intr_info) + 1655 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE, 1656 mps_trc_intr_info) + 1657 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM, 1658 mps_stat_sram_intr_info) + 1659 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO, 1660 mps_stat_tx_intr_info) + 1661 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO, 1662 mps_stat_rx_intr_info) + 1663 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE, 1664 mps_cls_intr_info); 1665 1666 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT | 1667 RXINT | TXINT | STATINT); 1668 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */ 1669 if (fat) 1670 t4_fatal_err(adapter); 1671 } 1672 1673 #define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE) 1674 1675 /* 1676 * EDC/MC interrupt handler. 1677 */ 1678 static void mem_intr_handler(struct adapter *adapter, int idx) 1679 { 1680 static const char name[3][5] = { "EDC0", "EDC1", "MC" }; 1681 1682 unsigned int addr, cnt_addr, v; 1683 1684 if (idx <= MEM_EDC1) { 1685 addr = EDC_REG(EDC_INT_CAUSE, idx); 1686 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx); 1687 } else { 1688 addr = MC_INT_CAUSE; 1689 cnt_addr = MC_ECC_STATUS; 1690 } 1691 1692 v = t4_read_reg(adapter, addr) & MEM_INT_MASK; 1693 if (v & PERR_INT_CAUSE) 1694 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n", 1695 name[idx]); 1696 if (v & ECC_CE_INT_CAUSE) { 1697 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr)); 1698 1699 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK); 1700 if (printk_ratelimit()) 1701 dev_warn(adapter->pdev_dev, 1702 "%u %s correctable ECC data error%s\n", 1703 cnt, name[idx], cnt > 1 ? "s" : ""); 1704 } 1705 if (v & ECC_UE_INT_CAUSE) 1706 dev_alert(adapter->pdev_dev, 1707 "%s uncorrectable ECC data error\n", name[idx]); 1708 1709 t4_write_reg(adapter, addr, v); 1710 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE)) 1711 t4_fatal_err(adapter); 1712 } 1713 1714 /* 1715 * MA interrupt handler. 1716 */ 1717 static void ma_intr_handler(struct adapter *adap) 1718 { 1719 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE); 1720 1721 if (status & MEM_PERR_INT_CAUSE) 1722 dev_alert(adap->pdev_dev, 1723 "MA parity error, parity status %#x\n", 1724 t4_read_reg(adap, MA_PARITY_ERROR_STATUS)); 1725 if (status & MEM_WRAP_INT_CAUSE) { 1726 v = t4_read_reg(adap, MA_INT_WRAP_STATUS); 1727 dev_alert(adap->pdev_dev, "MA address wrap-around error by " 1728 "client %u to address %#x\n", 1729 MEM_WRAP_CLIENT_NUM_GET(v), 1730 MEM_WRAP_ADDRESS_GET(v) << 4); 1731 } 1732 t4_write_reg(adap, MA_INT_CAUSE, status); 1733 t4_fatal_err(adap); 1734 } 1735 1736 /* 1737 * SMB interrupt handler. 1738 */ 1739 static void smb_intr_handler(struct adapter *adap) 1740 { 1741 static const struct intr_info smb_intr_info[] = { 1742 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 }, 1743 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 }, 1744 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 }, 1745 { 0 } 1746 }; 1747 1748 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info)) 1749 t4_fatal_err(adap); 1750 } 1751 1752 /* 1753 * NC-SI interrupt handler. 1754 */ 1755 static void ncsi_intr_handler(struct adapter *adap) 1756 { 1757 static const struct intr_info ncsi_intr_info[] = { 1758 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 }, 1759 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 }, 1760 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 }, 1761 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 }, 1762 { 0 } 1763 }; 1764 1765 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info)) 1766 t4_fatal_err(adap); 1767 } 1768 1769 /* 1770 * XGMAC interrupt handler. 1771 */ 1772 static void xgmac_intr_handler(struct adapter *adap, int port) 1773 { 1774 u32 v, int_cause_reg; 1775 1776 if (is_t4(adap->chip)) 1777 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE); 1778 else 1779 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE); 1780 1781 v = t4_read_reg(adap, int_cause_reg); 1782 1783 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR; 1784 if (!v) 1785 return; 1786 1787 if (v & TXFIFO_PRTY_ERR) 1788 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n", 1789 port); 1790 if (v & RXFIFO_PRTY_ERR) 1791 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n", 1792 port); 1793 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v); 1794 t4_fatal_err(adap); 1795 } 1796 1797 /* 1798 * PL interrupt handler. 1799 */ 1800 static void pl_intr_handler(struct adapter *adap) 1801 { 1802 static const struct intr_info pl_intr_info[] = { 1803 { FATALPERR, "T4 fatal parity error", -1, 1 }, 1804 { PERRVFID, "PL VFID_MAP parity error", -1, 1 }, 1805 { 0 } 1806 }; 1807 1808 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info)) 1809 t4_fatal_err(adap); 1810 } 1811 1812 #define PF_INTR_MASK (PFSW) 1813 #define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \ 1814 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \ 1815 CPL_SWITCH | SGE | ULP_TX) 1816 1817 /** 1818 * t4_slow_intr_handler - control path interrupt handler 1819 * @adapter: the adapter 1820 * 1821 * T4 interrupt handler for non-data global interrupt events, e.g., errors. 1822 * The designation 'slow' is because it involves register reads, while 1823 * data interrupts typically don't involve any MMIOs. 1824 */ 1825 int t4_slow_intr_handler(struct adapter *adapter) 1826 { 1827 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE); 1828 1829 if (!(cause & GLBL_INTR_MASK)) 1830 return 0; 1831 if (cause & CIM) 1832 cim_intr_handler(adapter); 1833 if (cause & MPS) 1834 mps_intr_handler(adapter); 1835 if (cause & NCSI) 1836 ncsi_intr_handler(adapter); 1837 if (cause & PL) 1838 pl_intr_handler(adapter); 1839 if (cause & SMB) 1840 smb_intr_handler(adapter); 1841 if (cause & XGMAC0) 1842 xgmac_intr_handler(adapter, 0); 1843 if (cause & XGMAC1) 1844 xgmac_intr_handler(adapter, 1); 1845 if (cause & XGMAC_KR0) 1846 xgmac_intr_handler(adapter, 2); 1847 if (cause & XGMAC_KR1) 1848 xgmac_intr_handler(adapter, 3); 1849 if (cause & PCIE) 1850 pcie_intr_handler(adapter); 1851 if (cause & MC) 1852 mem_intr_handler(adapter, MEM_MC); 1853 if (cause & EDC0) 1854 mem_intr_handler(adapter, MEM_EDC0); 1855 if (cause & EDC1) 1856 mem_intr_handler(adapter, MEM_EDC1); 1857 if (cause & LE) 1858 le_intr_handler(adapter); 1859 if (cause & TP) 1860 tp_intr_handler(adapter); 1861 if (cause & MA) 1862 ma_intr_handler(adapter); 1863 if (cause & PM_TX) 1864 pmtx_intr_handler(adapter); 1865 if (cause & PM_RX) 1866 pmrx_intr_handler(adapter); 1867 if (cause & ULP_RX) 1868 ulprx_intr_handler(adapter); 1869 if (cause & CPL_SWITCH) 1870 cplsw_intr_handler(adapter); 1871 if (cause & SGE) 1872 sge_intr_handler(adapter); 1873 if (cause & ULP_TX) 1874 ulptx_intr_handler(adapter); 1875 1876 /* Clear the interrupts just processed for which we are the master. */ 1877 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK); 1878 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */ 1879 return 1; 1880 } 1881 1882 /** 1883 * t4_intr_enable - enable interrupts 1884 * @adapter: the adapter whose interrupts should be enabled 1885 * 1886 * Enable PF-specific interrupts for the calling function and the top-level 1887 * interrupt concentrator for global interrupts. Interrupts are already 1888 * enabled at each module, here we just enable the roots of the interrupt 1889 * hierarchies. 1890 * 1891 * Note: this function should be called only when the driver manages 1892 * non PF-specific interrupts from the various HW modules. Only one PCI 1893 * function at a time should be doing this. 1894 */ 1895 void t4_intr_enable(struct adapter *adapter) 1896 { 1897 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI)); 1898 1899 t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE | 1900 ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 | 1901 ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 | 1902 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 | 1903 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 | 1904 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO | 1905 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR | 1906 DBFIFO_HP_INT | DBFIFO_LP_INT | 1907 EGRESS_SIZE_ERR); 1908 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK); 1909 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf); 1910 } 1911 1912 /** 1913 * t4_intr_disable - disable interrupts 1914 * @adapter: the adapter whose interrupts should be disabled 1915 * 1916 * Disable interrupts. We only disable the top-level interrupt 1917 * concentrators. The caller must be a PCI function managing global 1918 * interrupts. 1919 */ 1920 void t4_intr_disable(struct adapter *adapter) 1921 { 1922 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI)); 1923 1924 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0); 1925 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0); 1926 } 1927 1928 /** 1929 * hash_mac_addr - return the hash value of a MAC address 1930 * @addr: the 48-bit Ethernet MAC address 1931 * 1932 * Hashes a MAC address according to the hash function used by HW inexact 1933 * (hash) address matching. 1934 */ 1935 static int hash_mac_addr(const u8 *addr) 1936 { 1937 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; 1938 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; 1939 a ^= b; 1940 a ^= (a >> 12); 1941 a ^= (a >> 6); 1942 return a & 0x3f; 1943 } 1944 1945 /** 1946 * t4_config_rss_range - configure a portion of the RSS mapping table 1947 * @adapter: the adapter 1948 * @mbox: mbox to use for the FW command 1949 * @viid: virtual interface whose RSS subtable is to be written 1950 * @start: start entry in the table to write 1951 * @n: how many table entries to write 1952 * @rspq: values for the response queue lookup table 1953 * @nrspq: number of values in @rspq 1954 * 1955 * Programs the selected part of the VI's RSS mapping table with the 1956 * provided values. If @nrspq < @n the supplied values are used repeatedly 1957 * until the full table range is populated. 1958 * 1959 * The caller must ensure the values in @rspq are in the range allowed for 1960 * @viid. 1961 */ 1962 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, 1963 int start, int n, const u16 *rspq, unsigned int nrspq) 1964 { 1965 int ret; 1966 const u16 *rsp = rspq; 1967 const u16 *rsp_end = rspq + nrspq; 1968 struct fw_rss_ind_tbl_cmd cmd; 1969 1970 memset(&cmd, 0, sizeof(cmd)); 1971 cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) | 1972 FW_CMD_REQUEST | FW_CMD_WRITE | 1973 FW_RSS_IND_TBL_CMD_VIID(viid)); 1974 cmd.retval_len16 = htonl(FW_LEN16(cmd)); 1975 1976 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */ 1977 while (n > 0) { 1978 int nq = min(n, 32); 1979 __be32 *qp = &cmd.iq0_to_iq2; 1980 1981 cmd.niqid = htons(nq); 1982 cmd.startidx = htons(start); 1983 1984 start += nq; 1985 n -= nq; 1986 1987 while (nq > 0) { 1988 unsigned int v; 1989 1990 v = FW_RSS_IND_TBL_CMD_IQ0(*rsp); 1991 if (++rsp >= rsp_end) 1992 rsp = rspq; 1993 v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp); 1994 if (++rsp >= rsp_end) 1995 rsp = rspq; 1996 v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp); 1997 if (++rsp >= rsp_end) 1998 rsp = rspq; 1999 2000 *qp++ = htonl(v); 2001 nq -= 3; 2002 } 2003 2004 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); 2005 if (ret) 2006 return ret; 2007 } 2008 return 0; 2009 } 2010 2011 /** 2012 * t4_config_glbl_rss - configure the global RSS mode 2013 * @adapter: the adapter 2014 * @mbox: mbox to use for the FW command 2015 * @mode: global RSS mode 2016 * @flags: mode-specific flags 2017 * 2018 * Sets the global RSS mode. 2019 */ 2020 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, 2021 unsigned int flags) 2022 { 2023 struct fw_rss_glb_config_cmd c; 2024 2025 memset(&c, 0, sizeof(c)); 2026 c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | 2027 FW_CMD_REQUEST | FW_CMD_WRITE); 2028 c.retval_len16 = htonl(FW_LEN16(c)); 2029 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) { 2030 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); 2031 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { 2032 c.u.basicvirtual.mode_pkd = 2033 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); 2034 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags); 2035 } else 2036 return -EINVAL; 2037 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); 2038 } 2039 2040 /** 2041 * t4_tp_get_tcp_stats - read TP's TCP MIB counters 2042 * @adap: the adapter 2043 * @v4: holds the TCP/IP counter values 2044 * @v6: holds the TCP/IPv6 counter values 2045 * 2046 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters. 2047 * Either @v4 or @v6 may be %NULL to skip the corresponding stats. 2048 */ 2049 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, 2050 struct tp_tcp_stats *v6) 2051 { 2052 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1]; 2053 2054 #define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST) 2055 #define STAT(x) val[STAT_IDX(x)] 2056 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO)) 2057 2058 if (v4) { 2059 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val, 2060 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST); 2061 v4->tcpOutRsts = STAT(OUT_RST); 2062 v4->tcpInSegs = STAT64(IN_SEG); 2063 v4->tcpOutSegs = STAT64(OUT_SEG); 2064 v4->tcpRetransSegs = STAT64(RXT_SEG); 2065 } 2066 if (v6) { 2067 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val, 2068 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST); 2069 v6->tcpOutRsts = STAT(OUT_RST); 2070 v6->tcpInSegs = STAT64(IN_SEG); 2071 v6->tcpOutSegs = STAT64(OUT_SEG); 2072 v6->tcpRetransSegs = STAT64(RXT_SEG); 2073 } 2074 #undef STAT64 2075 #undef STAT 2076 #undef STAT_IDX 2077 } 2078 2079 /** 2080 * t4_read_mtu_tbl - returns the values in the HW path MTU table 2081 * @adap: the adapter 2082 * @mtus: where to store the MTU values 2083 * @mtu_log: where to store the MTU base-2 log (may be %NULL) 2084 * 2085 * Reads the HW path MTU table. 2086 */ 2087 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log) 2088 { 2089 u32 v; 2090 int i; 2091 2092 for (i = 0; i < NMTUS; ++i) { 2093 t4_write_reg(adap, TP_MTU_TABLE, 2094 MTUINDEX(0xff) | MTUVALUE(i)); 2095 v = t4_read_reg(adap, TP_MTU_TABLE); 2096 mtus[i] = MTUVALUE_GET(v); 2097 if (mtu_log) 2098 mtu_log[i] = MTUWIDTH_GET(v); 2099 } 2100 } 2101 2102 /** 2103 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register 2104 * @adap: the adapter 2105 * @addr: the indirect TP register address 2106 * @mask: specifies the field within the register to modify 2107 * @val: new value for the field 2108 * 2109 * Sets a field of an indirect TP register to the given value. 2110 */ 2111 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, 2112 unsigned int mask, unsigned int val) 2113 { 2114 t4_write_reg(adap, TP_PIO_ADDR, addr); 2115 val |= t4_read_reg(adap, TP_PIO_DATA) & ~mask; 2116 t4_write_reg(adap, TP_PIO_DATA, val); 2117 } 2118 2119 /** 2120 * init_cong_ctrl - initialize congestion control parameters 2121 * @a: the alpha values for congestion control 2122 * @b: the beta values for congestion control 2123 * 2124 * Initialize the congestion control parameters. 2125 */ 2126 static void init_cong_ctrl(unsigned short *a, unsigned short *b) 2127 { 2128 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; 2129 a[9] = 2; 2130 a[10] = 3; 2131 a[11] = 4; 2132 a[12] = 5; 2133 a[13] = 6; 2134 a[14] = 7; 2135 a[15] = 8; 2136 a[16] = 9; 2137 a[17] = 10; 2138 a[18] = 14; 2139 a[19] = 17; 2140 a[20] = 21; 2141 a[21] = 25; 2142 a[22] = 30; 2143 a[23] = 35; 2144 a[24] = 45; 2145 a[25] = 60; 2146 a[26] = 80; 2147 a[27] = 100; 2148 a[28] = 200; 2149 a[29] = 300; 2150 a[30] = 400; 2151 a[31] = 500; 2152 2153 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0; 2154 b[9] = b[10] = 1; 2155 b[11] = b[12] = 2; 2156 b[13] = b[14] = b[15] = b[16] = 3; 2157 b[17] = b[18] = b[19] = b[20] = b[21] = 4; 2158 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5; 2159 b[28] = b[29] = 6; 2160 b[30] = b[31] = 7; 2161 } 2162 2163 /* The minimum additive increment value for the congestion control table */ 2164 #define CC_MIN_INCR 2U 2165 2166 /** 2167 * t4_load_mtus - write the MTU and congestion control HW tables 2168 * @adap: the adapter 2169 * @mtus: the values for the MTU table 2170 * @alpha: the values for the congestion control alpha parameter 2171 * @beta: the values for the congestion control beta parameter 2172 * 2173 * Write the HW MTU table with the supplied MTUs and the high-speed 2174 * congestion control table with the supplied alpha, beta, and MTUs. 2175 * We write the two tables together because the additive increments 2176 * depend on the MTUs. 2177 */ 2178 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, 2179 const unsigned short *alpha, const unsigned short *beta) 2180 { 2181 static const unsigned int avg_pkts[NCCTRL_WIN] = { 2182 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, 2183 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, 2184 28672, 40960, 57344, 81920, 114688, 163840, 229376 2185 }; 2186 2187 unsigned int i, w; 2188 2189 for (i = 0; i < NMTUS; ++i) { 2190 unsigned int mtu = mtus[i]; 2191 unsigned int log2 = fls(mtu); 2192 2193 if (!(mtu & ((1 << log2) >> 2))) /* round */ 2194 log2--; 2195 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) | 2196 MTUWIDTH(log2) | MTUVALUE(mtu)); 2197 2198 for (w = 0; w < NCCTRL_WIN; ++w) { 2199 unsigned int inc; 2200 2201 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], 2202 CC_MIN_INCR); 2203 2204 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) | 2205 (w << 16) | (beta[w] << 13) | inc); 2206 } 2207 } 2208 } 2209 2210 /** 2211 * get_mps_bg_map - return the buffer groups associated with a port 2212 * @adap: the adapter 2213 * @idx: the port index 2214 * 2215 * Returns a bitmap indicating which MPS buffer groups are associated 2216 * with the given port. Bit i is set if buffer group i is used by the 2217 * port. 2218 */ 2219 static unsigned int get_mps_bg_map(struct adapter *adap, int idx) 2220 { 2221 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL)); 2222 2223 if (n == 0) 2224 return idx == 0 ? 0xf : 0; 2225 if (n == 1) 2226 return idx < 2 ? (3 << (2 * idx)) : 0; 2227 return 1 << idx; 2228 } 2229 2230 /** 2231 * t4_get_port_stats - collect port statistics 2232 * @adap: the adapter 2233 * @idx: the port index 2234 * @p: the stats structure to fill 2235 * 2236 * Collect statistics related to the given port from HW. 2237 */ 2238 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) 2239 { 2240 u32 bgmap = get_mps_bg_map(adap, idx); 2241 2242 #define GET_STAT(name) \ 2243 t4_read_reg64(adap, \ 2244 (is_t4(adap->chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \ 2245 T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L))) 2246 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) 2247 2248 p->tx_octets = GET_STAT(TX_PORT_BYTES); 2249 p->tx_frames = GET_STAT(TX_PORT_FRAMES); 2250 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST); 2251 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST); 2252 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST); 2253 p->tx_error_frames = GET_STAT(TX_PORT_ERROR); 2254 p->tx_frames_64 = GET_STAT(TX_PORT_64B); 2255 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B); 2256 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B); 2257 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B); 2258 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B); 2259 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B); 2260 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX); 2261 p->tx_drop = GET_STAT(TX_PORT_DROP); 2262 p->tx_pause = GET_STAT(TX_PORT_PAUSE); 2263 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0); 2264 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1); 2265 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2); 2266 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3); 2267 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4); 2268 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5); 2269 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); 2270 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); 2271 2272 p->rx_octets = GET_STAT(RX_PORT_BYTES); 2273 p->rx_frames = GET_STAT(RX_PORT_FRAMES); 2274 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); 2275 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST); 2276 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST); 2277 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR); 2278 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR); 2279 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR); 2280 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR); 2281 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR); 2282 p->rx_runt = GET_STAT(RX_PORT_LESS_64B); 2283 p->rx_frames_64 = GET_STAT(RX_PORT_64B); 2284 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B); 2285 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B); 2286 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B); 2287 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B); 2288 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B); 2289 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX); 2290 p->rx_pause = GET_STAT(RX_PORT_PAUSE); 2291 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0); 2292 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1); 2293 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2); 2294 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3); 2295 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4); 2296 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5); 2297 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); 2298 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); 2299 2300 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; 2301 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; 2302 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; 2303 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0; 2304 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0; 2305 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0; 2306 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0; 2307 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0; 2308 2309 #undef GET_STAT 2310 #undef GET_STAT_COM 2311 } 2312 2313 /** 2314 * t4_wol_magic_enable - enable/disable magic packet WoL 2315 * @adap: the adapter 2316 * @port: the physical port index 2317 * @addr: MAC address expected in magic packets, %NULL to disable 2318 * 2319 * Enables/disables magic packet wake-on-LAN for the selected port. 2320 */ 2321 void t4_wol_magic_enable(struct adapter *adap, unsigned int port, 2322 const u8 *addr) 2323 { 2324 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg; 2325 2326 if (is_t4(adap->chip)) { 2327 mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO); 2328 mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI); 2329 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2); 2330 } else { 2331 mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO); 2332 mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI); 2333 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2); 2334 } 2335 2336 if (addr) { 2337 t4_write_reg(adap, mag_id_reg_l, 2338 (addr[2] << 24) | (addr[3] << 16) | 2339 (addr[4] << 8) | addr[5]); 2340 t4_write_reg(adap, mag_id_reg_h, 2341 (addr[0] << 8) | addr[1]); 2342 } 2343 t4_set_reg_field(adap, port_cfg_reg, MAGICEN, 2344 addr ? MAGICEN : 0); 2345 } 2346 2347 /** 2348 * t4_wol_pat_enable - enable/disable pattern-based WoL 2349 * @adap: the adapter 2350 * @port: the physical port index 2351 * @map: bitmap of which HW pattern filters to set 2352 * @mask0: byte mask for bytes 0-63 of a packet 2353 * @mask1: byte mask for bytes 64-127 of a packet 2354 * @crc: Ethernet CRC for selected bytes 2355 * @enable: enable/disable switch 2356 * 2357 * Sets the pattern filters indicated in @map to mask out the bytes 2358 * specified in @mask0/@mask1 in received packets and compare the CRC of 2359 * the resulting packet against @crc. If @enable is %true pattern-based 2360 * WoL is enabled, otherwise disabled. 2361 */ 2362 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, 2363 u64 mask0, u64 mask1, unsigned int crc, bool enable) 2364 { 2365 int i; 2366 u32 port_cfg_reg; 2367 2368 if (is_t4(adap->chip)) 2369 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2); 2370 else 2371 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2); 2372 2373 if (!enable) { 2374 t4_set_reg_field(adap, port_cfg_reg, PATEN, 0); 2375 return 0; 2376 } 2377 if (map > 0xff) 2378 return -EINVAL; 2379 2380 #define EPIO_REG(name) \ 2381 (is_t4(adap->chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \ 2382 T5_PORT_REG(port, MAC_PORT_EPIO_##name)) 2383 2384 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); 2385 t4_write_reg(adap, EPIO_REG(DATA2), mask1); 2386 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32); 2387 2388 for (i = 0; i < NWOL_PAT; i++, map >>= 1) { 2389 if (!(map & 1)) 2390 continue; 2391 2392 /* write byte masks */ 2393 t4_write_reg(adap, EPIO_REG(DATA0), mask0); 2394 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR); 2395 t4_read_reg(adap, EPIO_REG(OP)); /* flush */ 2396 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY) 2397 return -ETIMEDOUT; 2398 2399 /* write CRC */ 2400 t4_write_reg(adap, EPIO_REG(DATA0), crc); 2401 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR); 2402 t4_read_reg(adap, EPIO_REG(OP)); /* flush */ 2403 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY) 2404 return -ETIMEDOUT; 2405 } 2406 #undef EPIO_REG 2407 2408 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN); 2409 return 0; 2410 } 2411 2412 /* t4_mk_filtdelwr - create a delete filter WR 2413 * @ftid: the filter ID 2414 * @wr: the filter work request to populate 2415 * @qid: ingress queue to receive the delete notification 2416 * 2417 * Creates a filter work request to delete the supplied filter. If @qid is 2418 * negative the delete notification is suppressed. 2419 */ 2420 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid) 2421 { 2422 memset(wr, 0, sizeof(*wr)); 2423 wr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR)); 2424 wr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*wr) / 16)); 2425 wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) | 2426 V_FW_FILTER_WR_NOREPLY(qid < 0)); 2427 wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER); 2428 if (qid >= 0) 2429 wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid)); 2430 } 2431 2432 #define INIT_CMD(var, cmd, rd_wr) do { \ 2433 (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \ 2434 FW_CMD_REQUEST | FW_CMD_##rd_wr); \ 2435 (var).retval_len16 = htonl(FW_LEN16(var)); \ 2436 } while (0) 2437 2438 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, 2439 u32 addr, u32 val) 2440 { 2441 struct fw_ldst_cmd c; 2442 2443 memset(&c, 0, sizeof(c)); 2444 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | 2445 FW_CMD_WRITE | 2446 FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE)); 2447 c.cycles_to_len16 = htonl(FW_LEN16(c)); 2448 c.u.addrval.addr = htonl(addr); 2449 c.u.addrval.val = htonl(val); 2450 2451 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2452 } 2453 2454 /** 2455 * t4_mem_win_read_len - read memory through PCIE memory window 2456 * @adap: the adapter 2457 * @addr: address of first byte requested aligned on 32b. 2458 * @data: len bytes to hold the data read 2459 * @len: amount of data to read from window. Must be <= 2460 * MEMWIN0_APERATURE after adjusting for 16B for T4 and 2461 * 128B for T5 alignment requirements of the the memory window. 2462 * 2463 * Read len bytes of data from MC starting at @addr. 2464 */ 2465 int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len) 2466 { 2467 int i, off; 2468 u32 win_pf = is_t4(adap->chip) ? 0 : V_PFNUM(adap->fn); 2469 2470 /* Align on a 2KB boundary. 2471 */ 2472 off = addr & MEMWIN0_APERTURE; 2473 if ((addr & 3) || (len + off) > MEMWIN0_APERTURE) 2474 return -EINVAL; 2475 2476 t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET, 2477 (addr & ~MEMWIN0_APERTURE) | win_pf); 2478 t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET); 2479 2480 for (i = 0; i < len; i += 4) 2481 *data++ = (__force __be32) t4_read_reg(adap, 2482 (MEMWIN0_BASE + off + i)); 2483 2484 return 0; 2485 } 2486 2487 /** 2488 * t4_mdio_rd - read a PHY register through MDIO 2489 * @adap: the adapter 2490 * @mbox: mailbox to use for the FW command 2491 * @phy_addr: the PHY address 2492 * @mmd: the PHY MMD to access (0 for clause 22 PHYs) 2493 * @reg: the register to read 2494 * @valp: where to store the value 2495 * 2496 * Issues a FW command through the given mailbox to read a PHY register. 2497 */ 2498 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 2499 unsigned int mmd, unsigned int reg, u16 *valp) 2500 { 2501 int ret; 2502 struct fw_ldst_cmd c; 2503 2504 memset(&c, 0, sizeof(c)); 2505 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | 2506 FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); 2507 c.cycles_to_len16 = htonl(FW_LEN16(c)); 2508 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) | 2509 FW_LDST_CMD_MMD(mmd)); 2510 c.u.mdio.raddr = htons(reg); 2511 2512 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 2513 if (ret == 0) 2514 *valp = ntohs(c.u.mdio.rval); 2515 return ret; 2516 } 2517 2518 /** 2519 * t4_mdio_wr - write a PHY register through MDIO 2520 * @adap: the adapter 2521 * @mbox: mailbox to use for the FW command 2522 * @phy_addr: the PHY address 2523 * @mmd: the PHY MMD to access (0 for clause 22 PHYs) 2524 * @reg: the register to write 2525 * @valp: value to write 2526 * 2527 * Issues a FW command through the given mailbox to write a PHY register. 2528 */ 2529 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 2530 unsigned int mmd, unsigned int reg, u16 val) 2531 { 2532 struct fw_ldst_cmd c; 2533 2534 memset(&c, 0, sizeof(c)); 2535 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | 2536 FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); 2537 c.cycles_to_len16 = htonl(FW_LEN16(c)); 2538 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) | 2539 FW_LDST_CMD_MMD(mmd)); 2540 c.u.mdio.raddr = htons(reg); 2541 c.u.mdio.rval = htons(val); 2542 2543 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2544 } 2545 2546 /** 2547 * t4_fw_hello - establish communication with FW 2548 * @adap: the adapter 2549 * @mbox: mailbox to use for the FW command 2550 * @evt_mbox: mailbox to receive async FW events 2551 * @master: specifies the caller's willingness to be the device master 2552 * @state: returns the current device state (if non-NULL) 2553 * 2554 * Issues a command to establish communication with FW. Returns either 2555 * an error (negative integer) or the mailbox of the Master PF. 2556 */ 2557 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, 2558 enum dev_master master, enum dev_state *state) 2559 { 2560 int ret; 2561 struct fw_hello_cmd c; 2562 u32 v; 2563 unsigned int master_mbox; 2564 int retries = FW_CMD_HELLO_RETRIES; 2565 2566 retry: 2567 memset(&c, 0, sizeof(c)); 2568 INIT_CMD(c, HELLO, WRITE); 2569 c.err_to_clearinit = htonl( 2570 FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | 2571 FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | 2572 FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 2573 FW_HELLO_CMD_MBMASTER_MASK) | 2574 FW_HELLO_CMD_MBASYNCNOT(evt_mbox) | 2575 FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) | 2576 FW_HELLO_CMD_CLEARINIT); 2577 2578 /* 2579 * Issue the HELLO command to the firmware. If it's not successful 2580 * but indicates that we got a "busy" or "timeout" condition, retry 2581 * the HELLO until we exhaust our retry limit. 2582 */ 2583 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 2584 if (ret < 0) { 2585 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) 2586 goto retry; 2587 return ret; 2588 } 2589 2590 v = ntohl(c.err_to_clearinit); 2591 master_mbox = FW_HELLO_CMD_MBMASTER_GET(v); 2592 if (state) { 2593 if (v & FW_HELLO_CMD_ERR) 2594 *state = DEV_STATE_ERR; 2595 else if (v & FW_HELLO_CMD_INIT) 2596 *state = DEV_STATE_INIT; 2597 else 2598 *state = DEV_STATE_UNINIT; 2599 } 2600 2601 /* 2602 * If we're not the Master PF then we need to wait around for the 2603 * Master PF Driver to finish setting up the adapter. 2604 * 2605 * Note that we also do this wait if we're a non-Master-capable PF and 2606 * there is no current Master PF; a Master PF may show up momentarily 2607 * and we wouldn't want to fail pointlessly. (This can happen when an 2608 * OS loads lots of different drivers rapidly at the same time). In 2609 * this case, the Master PF returned by the firmware will be 2610 * FW_PCIE_FW_MASTER_MASK so the test below will work ... 2611 */ 2612 if ((v & (FW_HELLO_CMD_ERR|FW_HELLO_CMD_INIT)) == 0 && 2613 master_mbox != mbox) { 2614 int waiting = FW_CMD_HELLO_TIMEOUT; 2615 2616 /* 2617 * Wait for the firmware to either indicate an error or 2618 * initialized state. If we see either of these we bail out 2619 * and report the issue to the caller. If we exhaust the 2620 * "hello timeout" and we haven't exhausted our retries, try 2621 * again. Otherwise bail with a timeout error. 2622 */ 2623 for (;;) { 2624 u32 pcie_fw; 2625 2626 msleep(50); 2627 waiting -= 50; 2628 2629 /* 2630 * If neither Error nor Initialialized are indicated 2631 * by the firmware keep waiting till we exaust our 2632 * timeout ... and then retry if we haven't exhausted 2633 * our retries ... 2634 */ 2635 pcie_fw = t4_read_reg(adap, MA_PCIE_FW); 2636 if (!(pcie_fw & (FW_PCIE_FW_ERR|FW_PCIE_FW_INIT))) { 2637 if (waiting <= 0) { 2638 if (retries-- > 0) 2639 goto retry; 2640 2641 return -ETIMEDOUT; 2642 } 2643 continue; 2644 } 2645 2646 /* 2647 * We either have an Error or Initialized condition 2648 * report errors preferentially. 2649 */ 2650 if (state) { 2651 if (pcie_fw & FW_PCIE_FW_ERR) 2652 *state = DEV_STATE_ERR; 2653 else if (pcie_fw & FW_PCIE_FW_INIT) 2654 *state = DEV_STATE_INIT; 2655 } 2656 2657 /* 2658 * If we arrived before a Master PF was selected and 2659 * there's not a valid Master PF, grab its identity 2660 * for our caller. 2661 */ 2662 if (master_mbox == FW_PCIE_FW_MASTER_MASK && 2663 (pcie_fw & FW_PCIE_FW_MASTER_VLD)) 2664 master_mbox = FW_PCIE_FW_MASTER_GET(pcie_fw); 2665 break; 2666 } 2667 } 2668 2669 return master_mbox; 2670 } 2671 2672 /** 2673 * t4_fw_bye - end communication with FW 2674 * @adap: the adapter 2675 * @mbox: mailbox to use for the FW command 2676 * 2677 * Issues a command to terminate communication with FW. 2678 */ 2679 int t4_fw_bye(struct adapter *adap, unsigned int mbox) 2680 { 2681 struct fw_bye_cmd c; 2682 2683 memset(&c, 0, sizeof(c)); 2684 INIT_CMD(c, BYE, WRITE); 2685 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2686 } 2687 2688 /** 2689 * t4_init_cmd - ask FW to initialize the device 2690 * @adap: the adapter 2691 * @mbox: mailbox to use for the FW command 2692 * 2693 * Issues a command to FW to partially initialize the device. This 2694 * performs initialization that generally doesn't depend on user input. 2695 */ 2696 int t4_early_init(struct adapter *adap, unsigned int mbox) 2697 { 2698 struct fw_initialize_cmd c; 2699 2700 memset(&c, 0, sizeof(c)); 2701 INIT_CMD(c, INITIALIZE, WRITE); 2702 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2703 } 2704 2705 /** 2706 * t4_fw_reset - issue a reset to FW 2707 * @adap: the adapter 2708 * @mbox: mailbox to use for the FW command 2709 * @reset: specifies the type of reset to perform 2710 * 2711 * Issues a reset command of the specified type to FW. 2712 */ 2713 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) 2714 { 2715 struct fw_reset_cmd c; 2716 2717 memset(&c, 0, sizeof(c)); 2718 INIT_CMD(c, RESET, WRITE); 2719 c.val = htonl(reset); 2720 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2721 } 2722 2723 /** 2724 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET 2725 * @adap: the adapter 2726 * @mbox: mailbox to use for the FW RESET command (if desired) 2727 * @force: force uP into RESET even if FW RESET command fails 2728 * 2729 * Issues a RESET command to firmware (if desired) with a HALT indication 2730 * and then puts the microprocessor into RESET state. The RESET command 2731 * will only be issued if a legitimate mailbox is provided (mbox <= 2732 * FW_PCIE_FW_MASTER_MASK). 2733 * 2734 * This is generally used in order for the host to safely manipulate the 2735 * adapter without fear of conflicting with whatever the firmware might 2736 * be doing. The only way out of this state is to RESTART the firmware 2737 * ... 2738 */ 2739 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force) 2740 { 2741 int ret = 0; 2742 2743 /* 2744 * If a legitimate mailbox is provided, issue a RESET command 2745 * with a HALT indication. 2746 */ 2747 if (mbox <= FW_PCIE_FW_MASTER_MASK) { 2748 struct fw_reset_cmd c; 2749 2750 memset(&c, 0, sizeof(c)); 2751 INIT_CMD(c, RESET, WRITE); 2752 c.val = htonl(PIORST | PIORSTMODE); 2753 c.halt_pkd = htonl(FW_RESET_CMD_HALT(1U)); 2754 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2755 } 2756 2757 /* 2758 * Normally we won't complete the operation if the firmware RESET 2759 * command fails but if our caller insists we'll go ahead and put the 2760 * uP into RESET. This can be useful if the firmware is hung or even 2761 * missing ... We'll have to take the risk of putting the uP into 2762 * RESET without the cooperation of firmware in that case. 2763 * 2764 * We also force the firmware's HALT flag to be on in case we bypassed 2765 * the firmware RESET command above or we're dealing with old firmware 2766 * which doesn't have the HALT capability. This will serve as a flag 2767 * for the incoming firmware to know that it's coming out of a HALT 2768 * rather than a RESET ... if it's new enough to understand that ... 2769 */ 2770 if (ret == 0 || force) { 2771 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST); 2772 t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, 2773 FW_PCIE_FW_HALT); 2774 } 2775 2776 /* 2777 * And we always return the result of the firmware RESET command 2778 * even when we force the uP into RESET ... 2779 */ 2780 return ret; 2781 } 2782 2783 /** 2784 * t4_fw_restart - restart the firmware by taking the uP out of RESET 2785 * @adap: the adapter 2786 * @reset: if we want to do a RESET to restart things 2787 * 2788 * Restart firmware previously halted by t4_fw_halt(). On successful 2789 * return the previous PF Master remains as the new PF Master and there 2790 * is no need to issue a new HELLO command, etc. 2791 * 2792 * We do this in two ways: 2793 * 2794 * 1. If we're dealing with newer firmware we'll simply want to take 2795 * the chip's microprocessor out of RESET. This will cause the 2796 * firmware to start up from its start vector. And then we'll loop 2797 * until the firmware indicates it's started again (PCIE_FW.HALT 2798 * reset to 0) or we timeout. 2799 * 2800 * 2. If we're dealing with older firmware then we'll need to RESET 2801 * the chip since older firmware won't recognize the PCIE_FW.HALT 2802 * flag and automatically RESET itself on startup. 2803 */ 2804 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset) 2805 { 2806 if (reset) { 2807 /* 2808 * Since we're directing the RESET instead of the firmware 2809 * doing it automatically, we need to clear the PCIE_FW.HALT 2810 * bit. 2811 */ 2812 t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, 0); 2813 2814 /* 2815 * If we've been given a valid mailbox, first try to get the 2816 * firmware to do the RESET. If that works, great and we can 2817 * return success. Otherwise, if we haven't been given a 2818 * valid mailbox or the RESET command failed, fall back to 2819 * hitting the chip with a hammer. 2820 */ 2821 if (mbox <= FW_PCIE_FW_MASTER_MASK) { 2822 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0); 2823 msleep(100); 2824 if (t4_fw_reset(adap, mbox, 2825 PIORST | PIORSTMODE) == 0) 2826 return 0; 2827 } 2828 2829 t4_write_reg(adap, PL_RST, PIORST | PIORSTMODE); 2830 msleep(2000); 2831 } else { 2832 int ms; 2833 2834 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0); 2835 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { 2836 if (!(t4_read_reg(adap, PCIE_FW) & FW_PCIE_FW_HALT)) 2837 return 0; 2838 msleep(100); 2839 ms += 100; 2840 } 2841 return -ETIMEDOUT; 2842 } 2843 return 0; 2844 } 2845 2846 /** 2847 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW 2848 * @adap: the adapter 2849 * @mbox: mailbox to use for the FW RESET command (if desired) 2850 * @fw_data: the firmware image to write 2851 * @size: image size 2852 * @force: force upgrade even if firmware doesn't cooperate 2853 * 2854 * Perform all of the steps necessary for upgrading an adapter's 2855 * firmware image. Normally this requires the cooperation of the 2856 * existing firmware in order to halt all existing activities 2857 * but if an invalid mailbox token is passed in we skip that step 2858 * (though we'll still put the adapter microprocessor into RESET in 2859 * that case). 2860 * 2861 * On successful return the new firmware will have been loaded and 2862 * the adapter will have been fully RESET losing all previous setup 2863 * state. On unsuccessful return the adapter may be completely hosed ... 2864 * positive errno indicates that the adapter is ~probably~ intact, a 2865 * negative errno indicates that things are looking bad ... 2866 */ 2867 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, 2868 const u8 *fw_data, unsigned int size, int force) 2869 { 2870 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; 2871 int reset, ret; 2872 2873 ret = t4_fw_halt(adap, mbox, force); 2874 if (ret < 0 && !force) 2875 return ret; 2876 2877 ret = t4_load_fw(adap, fw_data, size); 2878 if (ret < 0) 2879 return ret; 2880 2881 /* 2882 * Older versions of the firmware don't understand the new 2883 * PCIE_FW.HALT flag and so won't know to perform a RESET when they 2884 * restart. So for newly loaded older firmware we'll have to do the 2885 * RESET for it so it starts up on a clean slate. We can tell if 2886 * the newly loaded firmware will handle this right by checking 2887 * its header flags to see if it advertises the capability. 2888 */ 2889 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0); 2890 return t4_fw_restart(adap, mbox, reset); 2891 } 2892 2893 2894 /** 2895 * t4_fw_config_file - setup an adapter via a Configuration File 2896 * @adap: the adapter 2897 * @mbox: mailbox to use for the FW command 2898 * @mtype: the memory type where the Configuration File is located 2899 * @maddr: the memory address where the Configuration File is located 2900 * @finiver: return value for CF [fini] version 2901 * @finicsum: return value for CF [fini] checksum 2902 * @cfcsum: return value for CF computed checksum 2903 * 2904 * Issue a command to get the firmware to process the Configuration 2905 * File located at the specified mtype/maddress. If the Configuration 2906 * File is processed successfully and return value pointers are 2907 * provided, the Configuration File "[fini] section version and 2908 * checksum values will be returned along with the computed checksum. 2909 * It's up to the caller to decide how it wants to respond to the 2910 * checksums not matching but it recommended that a prominant warning 2911 * be emitted in order to help people rapidly identify changed or 2912 * corrupted Configuration Files. 2913 * 2914 * Also note that it's possible to modify things like "niccaps", 2915 * "toecaps",etc. between processing the Configuration File and telling 2916 * the firmware to use the new configuration. Callers which want to 2917 * do this will need to "hand-roll" their own CAPS_CONFIGS commands for 2918 * Configuration Files if they want to do this. 2919 */ 2920 int t4_fw_config_file(struct adapter *adap, unsigned int mbox, 2921 unsigned int mtype, unsigned int maddr, 2922 u32 *finiver, u32 *finicsum, u32 *cfcsum) 2923 { 2924 struct fw_caps_config_cmd caps_cmd; 2925 int ret; 2926 2927 /* 2928 * Tell the firmware to process the indicated Configuration File. 2929 * If there are no errors and the caller has provided return value 2930 * pointers for the [fini] section version, checksum and computed 2931 * checksum, pass those back to the caller. 2932 */ 2933 memset(&caps_cmd, 0, sizeof(caps_cmd)); 2934 caps_cmd.op_to_write = 2935 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 2936 FW_CMD_REQUEST | 2937 FW_CMD_READ); 2938 caps_cmd.cfvalid_to_len16 = 2939 htonl(FW_CAPS_CONFIG_CMD_CFVALID | 2940 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | 2941 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | 2942 FW_LEN16(caps_cmd)); 2943 ret = t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd); 2944 if (ret < 0) 2945 return ret; 2946 2947 if (finiver) 2948 *finiver = ntohl(caps_cmd.finiver); 2949 if (finicsum) 2950 *finicsum = ntohl(caps_cmd.finicsum); 2951 if (cfcsum) 2952 *cfcsum = ntohl(caps_cmd.cfcsum); 2953 2954 /* 2955 * And now tell the firmware to use the configuration we just loaded. 2956 */ 2957 caps_cmd.op_to_write = 2958 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 2959 FW_CMD_REQUEST | 2960 FW_CMD_WRITE); 2961 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); 2962 return t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), NULL); 2963 } 2964 2965 /** 2966 * t4_fixup_host_params - fix up host-dependent parameters 2967 * @adap: the adapter 2968 * @page_size: the host's Base Page Size 2969 * @cache_line_size: the host's Cache Line Size 2970 * 2971 * Various registers in T4 contain values which are dependent on the 2972 * host's Base Page and Cache Line Sizes. This function will fix all of 2973 * those registers with the appropriate values as passed in ... 2974 */ 2975 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, 2976 unsigned int cache_line_size) 2977 { 2978 unsigned int page_shift = fls(page_size) - 1; 2979 unsigned int sge_hps = page_shift - 10; 2980 unsigned int stat_len = cache_line_size > 64 ? 128 : 64; 2981 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size; 2982 unsigned int fl_align_log = fls(fl_align) - 1; 2983 2984 t4_write_reg(adap, SGE_HOST_PAGE_SIZE, 2985 HOSTPAGESIZEPF0(sge_hps) | 2986 HOSTPAGESIZEPF1(sge_hps) | 2987 HOSTPAGESIZEPF2(sge_hps) | 2988 HOSTPAGESIZEPF3(sge_hps) | 2989 HOSTPAGESIZEPF4(sge_hps) | 2990 HOSTPAGESIZEPF5(sge_hps) | 2991 HOSTPAGESIZEPF6(sge_hps) | 2992 HOSTPAGESIZEPF7(sge_hps)); 2993 2994 t4_set_reg_field(adap, SGE_CONTROL, 2995 INGPADBOUNDARY_MASK | 2996 EGRSTATUSPAGESIZE_MASK, 2997 INGPADBOUNDARY(fl_align_log - 5) | 2998 EGRSTATUSPAGESIZE(stat_len != 64)); 2999 3000 /* 3001 * Adjust various SGE Free List Host Buffer Sizes. 3002 * 3003 * This is something of a crock since we're using fixed indices into 3004 * the array which are also known by the sge.c code and the T4 3005 * Firmware Configuration File. We need to come up with a much better 3006 * approach to managing this array. For now, the first four entries 3007 * are: 3008 * 3009 * 0: Host Page Size 3010 * 1: 64KB 3011 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode) 3012 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode) 3013 * 3014 * For the single-MTU buffers in unpacked mode we need to include 3015 * space for the SGE Control Packet Shift, 14 byte Ethernet header, 3016 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet 3017 * Padding boundry. All of these are accommodated in the Factory 3018 * Default Firmware Configuration File but we need to adjust it for 3019 * this host's cache line size. 3020 */ 3021 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, page_size); 3022 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2, 3023 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2) + fl_align-1) 3024 & ~(fl_align-1)); 3025 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3, 3026 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3) + fl_align-1) 3027 & ~(fl_align-1)); 3028 3029 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12)); 3030 3031 return 0; 3032 } 3033 3034 /** 3035 * t4_fw_initialize - ask FW to initialize the device 3036 * @adap: the adapter 3037 * @mbox: mailbox to use for the FW command 3038 * 3039 * Issues a command to FW to partially initialize the device. This 3040 * performs initialization that generally doesn't depend on user input. 3041 */ 3042 int t4_fw_initialize(struct adapter *adap, unsigned int mbox) 3043 { 3044 struct fw_initialize_cmd c; 3045 3046 memset(&c, 0, sizeof(c)); 3047 INIT_CMD(c, INITIALIZE, WRITE); 3048 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3049 } 3050 3051 /** 3052 * t4_query_params - query FW or device parameters 3053 * @adap: the adapter 3054 * @mbox: mailbox to use for the FW command 3055 * @pf: the PF 3056 * @vf: the VF 3057 * @nparams: the number of parameters 3058 * @params: the parameter names 3059 * @val: the parameter values 3060 * 3061 * Reads the value of FW or device parameters. Up to 7 parameters can be 3062 * queried at once. 3063 */ 3064 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 3065 unsigned int vf, unsigned int nparams, const u32 *params, 3066 u32 *val) 3067 { 3068 int i, ret; 3069 struct fw_params_cmd c; 3070 __be32 *p = &c.param[0].mnem; 3071 3072 if (nparams > 7) 3073 return -EINVAL; 3074 3075 memset(&c, 0, sizeof(c)); 3076 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST | 3077 FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) | 3078 FW_PARAMS_CMD_VFN(vf)); 3079 c.retval_len16 = htonl(FW_LEN16(c)); 3080 for (i = 0; i < nparams; i++, p += 2) 3081 *p = htonl(*params++); 3082 3083 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 3084 if (ret == 0) 3085 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2) 3086 *val++ = ntohl(*p); 3087 return ret; 3088 } 3089 3090 /** 3091 * t4_set_params - sets FW or device parameters 3092 * @adap: the adapter 3093 * @mbox: mailbox to use for the FW command 3094 * @pf: the PF 3095 * @vf: the VF 3096 * @nparams: the number of parameters 3097 * @params: the parameter names 3098 * @val: the parameter values 3099 * 3100 * Sets the value of FW or device parameters. Up to 7 parameters can be 3101 * specified at once. 3102 */ 3103 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 3104 unsigned int vf, unsigned int nparams, const u32 *params, 3105 const u32 *val) 3106 { 3107 struct fw_params_cmd c; 3108 __be32 *p = &c.param[0].mnem; 3109 3110 if (nparams > 7) 3111 return -EINVAL; 3112 3113 memset(&c, 0, sizeof(c)); 3114 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST | 3115 FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) | 3116 FW_PARAMS_CMD_VFN(vf)); 3117 c.retval_len16 = htonl(FW_LEN16(c)); 3118 while (nparams--) { 3119 *p++ = htonl(*params++); 3120 *p++ = htonl(*val++); 3121 } 3122 3123 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3124 } 3125 3126 /** 3127 * t4_cfg_pfvf - configure PF/VF resource limits 3128 * @adap: the adapter 3129 * @mbox: mailbox to use for the FW command 3130 * @pf: the PF being configured 3131 * @vf: the VF being configured 3132 * @txq: the max number of egress queues 3133 * @txq_eth_ctrl: the max number of egress Ethernet or control queues 3134 * @rxqi: the max number of interrupt-capable ingress queues 3135 * @rxq: the max number of interruptless ingress queues 3136 * @tc: the PCI traffic class 3137 * @vi: the max number of virtual interfaces 3138 * @cmask: the channel access rights mask for the PF/VF 3139 * @pmask: the port access rights mask for the PF/VF 3140 * @nexact: the maximum number of exact MPS filters 3141 * @rcaps: read capabilities 3142 * @wxcaps: write/execute capabilities 3143 * 3144 * Configures resource limits and capabilities for a physical or virtual 3145 * function. 3146 */ 3147 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, 3148 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, 3149 unsigned int rxqi, unsigned int rxq, unsigned int tc, 3150 unsigned int vi, unsigned int cmask, unsigned int pmask, 3151 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps) 3152 { 3153 struct fw_pfvf_cmd c; 3154 3155 memset(&c, 0, sizeof(c)); 3156 c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST | 3157 FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) | 3158 FW_PFVF_CMD_VFN(vf)); 3159 c.retval_len16 = htonl(FW_LEN16(c)); 3160 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) | 3161 FW_PFVF_CMD_NIQ(rxq)); 3162 c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) | 3163 FW_PFVF_CMD_PMASK(pmask) | 3164 FW_PFVF_CMD_NEQ(txq)); 3165 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) | 3166 FW_PFVF_CMD_NEXACTF(nexact)); 3167 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) | 3168 FW_PFVF_CMD_WX_CAPS(wxcaps) | 3169 FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl)); 3170 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3171 } 3172 3173 /** 3174 * t4_alloc_vi - allocate a virtual interface 3175 * @adap: the adapter 3176 * @mbox: mailbox to use for the FW command 3177 * @port: physical port associated with the VI 3178 * @pf: the PF owning the VI 3179 * @vf: the VF owning the VI 3180 * @nmac: number of MAC addresses needed (1 to 5) 3181 * @mac: the MAC addresses of the VI 3182 * @rss_size: size of RSS table slice associated with this VI 3183 * 3184 * Allocates a virtual interface for the given physical port. If @mac is 3185 * not %NULL it contains the MAC addresses of the VI as assigned by FW. 3186 * @mac should be large enough to hold @nmac Ethernet addresses, they are 3187 * stored consecutively so the space needed is @nmac * 6 bytes. 3188 * Returns a negative error number or the non-negative VI id. 3189 */ 3190 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, 3191 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, 3192 unsigned int *rss_size) 3193 { 3194 int ret; 3195 struct fw_vi_cmd c; 3196 3197 memset(&c, 0, sizeof(c)); 3198 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST | 3199 FW_CMD_WRITE | FW_CMD_EXEC | 3200 FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf)); 3201 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c)); 3202 c.portid_pkd = FW_VI_CMD_PORTID(port); 3203 c.nmac = nmac - 1; 3204 3205 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 3206 if (ret) 3207 return ret; 3208 3209 if (mac) { 3210 memcpy(mac, c.mac, sizeof(c.mac)); 3211 switch (nmac) { 3212 case 5: 3213 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); 3214 case 4: 3215 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); 3216 case 3: 3217 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); 3218 case 2: 3219 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); 3220 } 3221 } 3222 if (rss_size) 3223 *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd)); 3224 return FW_VI_CMD_VIID_GET(ntohs(c.type_viid)); 3225 } 3226 3227 /** 3228 * t4_set_rxmode - set Rx properties of a virtual interface 3229 * @adap: the adapter 3230 * @mbox: mailbox to use for the FW command 3231 * @viid: the VI id 3232 * @mtu: the new MTU or -1 3233 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change 3234 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change 3235 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change 3236 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change 3237 * @sleep_ok: if true we may sleep while awaiting command completion 3238 * 3239 * Sets Rx properties of a virtual interface. 3240 */ 3241 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, 3242 int mtu, int promisc, int all_multi, int bcast, int vlanex, 3243 bool sleep_ok) 3244 { 3245 struct fw_vi_rxmode_cmd c; 3246 3247 /* convert to FW values */ 3248 if (mtu < 0) 3249 mtu = FW_RXMODE_MTU_NO_CHG; 3250 if (promisc < 0) 3251 promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK; 3252 if (all_multi < 0) 3253 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK; 3254 if (bcast < 0) 3255 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK; 3256 if (vlanex < 0) 3257 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK; 3258 3259 memset(&c, 0, sizeof(c)); 3260 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST | 3261 FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid)); 3262 c.retval_len16 = htonl(FW_LEN16(c)); 3263 c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) | 3264 FW_VI_RXMODE_CMD_PROMISCEN(promisc) | 3265 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | 3266 FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | 3267 FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); 3268 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 3269 } 3270 3271 /** 3272 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses 3273 * @adap: the adapter 3274 * @mbox: mailbox to use for the FW command 3275 * @viid: the VI id 3276 * @free: if true any existing filters for this VI id are first removed 3277 * @naddr: the number of MAC addresses to allocate filters for (up to 7) 3278 * @addr: the MAC address(es) 3279 * @idx: where to store the index of each allocated filter 3280 * @hash: pointer to hash address filter bitmap 3281 * @sleep_ok: call is allowed to sleep 3282 * 3283 * Allocates an exact-match filter for each of the supplied addresses and 3284 * sets it to the corresponding address. If @idx is not %NULL it should 3285 * have at least @naddr entries, each of which will be set to the index of 3286 * the filter allocated for the corresponding MAC address. If a filter 3287 * could not be allocated for an address its index is set to 0xffff. 3288 * If @hash is not %NULL addresses that fail to allocate an exact filter 3289 * are hashed and update the hash filter bitmap pointed at by @hash. 3290 * 3291 * Returns a negative error number or the number of filters allocated. 3292 */ 3293 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, 3294 unsigned int viid, bool free, unsigned int naddr, 3295 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok) 3296 { 3297 int i, ret; 3298 struct fw_vi_mac_cmd c; 3299 struct fw_vi_mac_exact *p; 3300 unsigned int max_naddr = is_t4(adap->chip) ? 3301 NUM_MPS_CLS_SRAM_L_INSTANCES : 3302 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 3303 3304 if (naddr > 7) 3305 return -EINVAL; 3306 3307 memset(&c, 0, sizeof(c)); 3308 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | 3309 FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) | 3310 FW_VI_MAC_CMD_VIID(viid)); 3311 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) | 3312 FW_CMD_LEN16((naddr + 2) / 2)); 3313 3314 for (i = 0, p = c.u.exact; i < naddr; i++, p++) { 3315 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID | 3316 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); 3317 memcpy(p->macaddr, addr[i], sizeof(p->macaddr)); 3318 } 3319 3320 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); 3321 if (ret) 3322 return ret; 3323 3324 for (i = 0, p = c.u.exact; i < naddr; i++, p++) { 3325 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); 3326 3327 if (idx) 3328 idx[i] = index >= max_naddr ? 0xffff : index; 3329 if (index < max_naddr) 3330 ret++; 3331 else if (hash) 3332 *hash |= (1ULL << hash_mac_addr(addr[i])); 3333 } 3334 return ret; 3335 } 3336 3337 /** 3338 * t4_change_mac - modifies the exact-match filter for a MAC address 3339 * @adap: the adapter 3340 * @mbox: mailbox to use for the FW command 3341 * @viid: the VI id 3342 * @idx: index of existing filter for old value of MAC address, or -1 3343 * @addr: the new MAC address value 3344 * @persist: whether a new MAC allocation should be persistent 3345 * @add_smt: if true also add the address to the HW SMT 3346 * 3347 * Modifies an exact-match filter and sets it to the new MAC address. 3348 * Note that in general it is not possible to modify the value of a given 3349 * filter so the generic way to modify an address filter is to free the one 3350 * being used by the old address value and allocate a new filter for the 3351 * new address value. @idx can be -1 if the address is a new addition. 3352 * 3353 * Returns a negative error number or the index of the filter with the new 3354 * MAC value. 3355 */ 3356 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, 3357 int idx, const u8 *addr, bool persist, bool add_smt) 3358 { 3359 int ret, mode; 3360 struct fw_vi_mac_cmd c; 3361 struct fw_vi_mac_exact *p = c.u.exact; 3362 unsigned int max_mac_addr = is_t4(adap->chip) ? 3363 NUM_MPS_CLS_SRAM_L_INSTANCES : 3364 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 3365 3366 if (idx < 0) /* new allocation */ 3367 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; 3368 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; 3369 3370 memset(&c, 0, sizeof(c)); 3371 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | 3372 FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid)); 3373 c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1)); 3374 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID | 3375 FW_VI_MAC_CMD_SMAC_RESULT(mode) | 3376 FW_VI_MAC_CMD_IDX(idx)); 3377 memcpy(p->macaddr, addr, sizeof(p->macaddr)); 3378 3379 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 3380 if (ret == 0) { 3381 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); 3382 if (ret >= max_mac_addr) 3383 ret = -ENOMEM; 3384 } 3385 return ret; 3386 } 3387 3388 /** 3389 * t4_set_addr_hash - program the MAC inexact-match hash filter 3390 * @adap: the adapter 3391 * @mbox: mailbox to use for the FW command 3392 * @viid: the VI id 3393 * @ucast: whether the hash filter should also match unicast addresses 3394 * @vec: the value to be written to the hash filter 3395 * @sleep_ok: call is allowed to sleep 3396 * 3397 * Sets the 64-bit inexact-match hash filter for a virtual interface. 3398 */ 3399 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, 3400 bool ucast, u64 vec, bool sleep_ok) 3401 { 3402 struct fw_vi_mac_cmd c; 3403 3404 memset(&c, 0, sizeof(c)); 3405 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | 3406 FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid)); 3407 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN | 3408 FW_VI_MAC_CMD_HASHUNIEN(ucast) | 3409 FW_CMD_LEN16(1)); 3410 c.u.hash.hashvec = cpu_to_be64(vec); 3411 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 3412 } 3413 3414 /** 3415 * t4_enable_vi - enable/disable a virtual interface 3416 * @adap: the adapter 3417 * @mbox: mailbox to use for the FW command 3418 * @viid: the VI id 3419 * @rx_en: 1=enable Rx, 0=disable Rx 3420 * @tx_en: 1=enable Tx, 0=disable Tx 3421 * 3422 * Enables/disables a virtual interface. 3423 */ 3424 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, 3425 bool rx_en, bool tx_en) 3426 { 3427 struct fw_vi_enable_cmd c; 3428 3429 memset(&c, 0, sizeof(c)); 3430 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | 3431 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); 3432 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) | 3433 FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c)); 3434 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3435 } 3436 3437 /** 3438 * t4_identify_port - identify a VI's port by blinking its LED 3439 * @adap: the adapter 3440 * @mbox: mailbox to use for the FW command 3441 * @viid: the VI id 3442 * @nblinks: how many times to blink LED at 2.5 Hz 3443 * 3444 * Identifies a VI's port by blinking its LED. 3445 */ 3446 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, 3447 unsigned int nblinks) 3448 { 3449 struct fw_vi_enable_cmd c; 3450 3451 memset(&c, 0, sizeof(c)); 3452 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | 3453 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); 3454 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); 3455 c.blinkdur = htons(nblinks); 3456 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3457 } 3458 3459 /** 3460 * t4_iq_free - free an ingress queue and its FLs 3461 * @adap: the adapter 3462 * @mbox: mailbox to use for the FW command 3463 * @pf: the PF owning the queues 3464 * @vf: the VF owning the queues 3465 * @iqtype: the ingress queue type 3466 * @iqid: ingress queue id 3467 * @fl0id: FL0 queue id or 0xffff if no attached FL0 3468 * @fl1id: FL1 queue id or 0xffff if no attached FL1 3469 * 3470 * Frees an ingress queue and its associated FLs, if any. 3471 */ 3472 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 3473 unsigned int vf, unsigned int iqtype, unsigned int iqid, 3474 unsigned int fl0id, unsigned int fl1id) 3475 { 3476 struct fw_iq_cmd c; 3477 3478 memset(&c, 0, sizeof(c)); 3479 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | 3480 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) | 3481 FW_IQ_CMD_VFN(vf)); 3482 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c)); 3483 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype)); 3484 c.iqid = htons(iqid); 3485 c.fl0id = htons(fl0id); 3486 c.fl1id = htons(fl1id); 3487 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3488 } 3489 3490 /** 3491 * t4_eth_eq_free - free an Ethernet egress queue 3492 * @adap: the adapter 3493 * @mbox: mailbox to use for the FW command 3494 * @pf: the PF owning the queue 3495 * @vf: the VF owning the queue 3496 * @eqid: egress queue id 3497 * 3498 * Frees an Ethernet egress queue. 3499 */ 3500 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 3501 unsigned int vf, unsigned int eqid) 3502 { 3503 struct fw_eq_eth_cmd c; 3504 3505 memset(&c, 0, sizeof(c)); 3506 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST | 3507 FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) | 3508 FW_EQ_ETH_CMD_VFN(vf)); 3509 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); 3510 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid)); 3511 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3512 } 3513 3514 /** 3515 * t4_ctrl_eq_free - free a control egress queue 3516 * @adap: the adapter 3517 * @mbox: mailbox to use for the FW command 3518 * @pf: the PF owning the queue 3519 * @vf: the VF owning the queue 3520 * @eqid: egress queue id 3521 * 3522 * Frees a control egress queue. 3523 */ 3524 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 3525 unsigned int vf, unsigned int eqid) 3526 { 3527 struct fw_eq_ctrl_cmd c; 3528 3529 memset(&c, 0, sizeof(c)); 3530 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST | 3531 FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) | 3532 FW_EQ_CTRL_CMD_VFN(vf)); 3533 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c)); 3534 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid)); 3535 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3536 } 3537 3538 /** 3539 * t4_ofld_eq_free - free an offload egress queue 3540 * @adap: the adapter 3541 * @mbox: mailbox to use for the FW command 3542 * @pf: the PF owning the queue 3543 * @vf: the VF owning the queue 3544 * @eqid: egress queue id 3545 * 3546 * Frees a control egress queue. 3547 */ 3548 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 3549 unsigned int vf, unsigned int eqid) 3550 { 3551 struct fw_eq_ofld_cmd c; 3552 3553 memset(&c, 0, sizeof(c)); 3554 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST | 3555 FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) | 3556 FW_EQ_OFLD_CMD_VFN(vf)); 3557 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c)); 3558 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid)); 3559 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3560 } 3561 3562 /** 3563 * t4_handle_fw_rpl - process a FW reply message 3564 * @adap: the adapter 3565 * @rpl: start of the FW message 3566 * 3567 * Processes a FW message, such as link state change messages. 3568 */ 3569 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) 3570 { 3571 u8 opcode = *(const u8 *)rpl; 3572 3573 if (opcode == FW_PORT_CMD) { /* link/module state change message */ 3574 int speed = 0, fc = 0; 3575 const struct fw_port_cmd *p = (void *)rpl; 3576 int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid)); 3577 int port = adap->chan_map[chan]; 3578 struct port_info *pi = adap2pinfo(adap, port); 3579 struct link_config *lc = &pi->link_cfg; 3580 u32 stat = ntohl(p->u.info.lstatus_to_modtype); 3581 int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0; 3582 u32 mod = FW_PORT_CMD_MODTYPE_GET(stat); 3583 3584 if (stat & FW_PORT_CMD_RXPAUSE) 3585 fc |= PAUSE_RX; 3586 if (stat & FW_PORT_CMD_TXPAUSE) 3587 fc |= PAUSE_TX; 3588 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) 3589 speed = SPEED_100; 3590 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) 3591 speed = SPEED_1000; 3592 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) 3593 speed = SPEED_10000; 3594 3595 if (link_ok != lc->link_ok || speed != lc->speed || 3596 fc != lc->fc) { /* something changed */ 3597 lc->link_ok = link_ok; 3598 lc->speed = speed; 3599 lc->fc = fc; 3600 t4_os_link_changed(adap, port, link_ok); 3601 } 3602 if (mod != pi->mod_type) { 3603 pi->mod_type = mod; 3604 t4_os_portmod_changed(adap, port); 3605 } 3606 } 3607 return 0; 3608 } 3609 3610 static void get_pci_mode(struct adapter *adapter, struct pci_params *p) 3611 { 3612 u16 val; 3613 3614 if (pci_is_pcie(adapter->pdev)) { 3615 pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val); 3616 p->speed = val & PCI_EXP_LNKSTA_CLS; 3617 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4; 3618 } 3619 } 3620 3621 /** 3622 * init_link_config - initialize a link's SW state 3623 * @lc: structure holding the link state 3624 * @caps: link capabilities 3625 * 3626 * Initializes the SW state maintained for each link, including the link's 3627 * capabilities and default speed/flow-control/autonegotiation settings. 3628 */ 3629 static void init_link_config(struct link_config *lc, unsigned int caps) 3630 { 3631 lc->supported = caps; 3632 lc->requested_speed = 0; 3633 lc->speed = 0; 3634 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; 3635 if (lc->supported & FW_PORT_CAP_ANEG) { 3636 lc->advertising = lc->supported & ADVERT_MASK; 3637 lc->autoneg = AUTONEG_ENABLE; 3638 lc->requested_fc |= PAUSE_AUTONEG; 3639 } else { 3640 lc->advertising = 0; 3641 lc->autoneg = AUTONEG_DISABLE; 3642 } 3643 } 3644 3645 int t4_wait_dev_ready(struct adapter *adap) 3646 { 3647 if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff) 3648 return 0; 3649 msleep(500); 3650 return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO; 3651 } 3652 3653 static int get_flash_params(struct adapter *adap) 3654 { 3655 int ret; 3656 u32 info; 3657 3658 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID); 3659 if (!ret) 3660 ret = sf1_read(adap, 3, 0, 1, &info); 3661 t4_write_reg(adap, SF_OP, 0); /* unlock SF */ 3662 if (ret) 3663 return ret; 3664 3665 if ((info & 0xff) != 0x20) /* not a Numonix flash */ 3666 return -EINVAL; 3667 info >>= 16; /* log2 of size */ 3668 if (info >= 0x14 && info < 0x18) 3669 adap->params.sf_nsec = 1 << (info - 16); 3670 else if (info == 0x18) 3671 adap->params.sf_nsec = 64; 3672 else 3673 return -EINVAL; 3674 adap->params.sf_size = 1 << info; 3675 adap->params.sf_fw_start = 3676 t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK; 3677 return 0; 3678 } 3679 3680 /** 3681 * t4_prep_adapter - prepare SW and HW for operation 3682 * @adapter: the adapter 3683 * @reset: if true perform a HW reset 3684 * 3685 * Initialize adapter SW state for the various HW modules, set initial 3686 * values for some adapter tunables, take PHYs out of reset, and 3687 * initialize the MDIO interface. 3688 */ 3689 int t4_prep_adapter(struct adapter *adapter) 3690 { 3691 int ret, ver; 3692 uint16_t device_id; 3693 3694 ret = t4_wait_dev_ready(adapter); 3695 if (ret < 0) 3696 return ret; 3697 3698 get_pci_mode(adapter, &adapter->params.pci); 3699 adapter->params.rev = t4_read_reg(adapter, PL_REV); 3700 3701 ret = get_flash_params(adapter); 3702 if (ret < 0) { 3703 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret); 3704 return ret; 3705 } 3706 3707 /* Retrieve adapter's device ID 3708 */ 3709 pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id); 3710 ver = device_id >> 12; 3711 switch (ver) { 3712 case CHELSIO_T4: 3713 adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T4, 3714 adapter->params.rev); 3715 break; 3716 case CHELSIO_T5: 3717 adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T5, 3718 adapter->params.rev); 3719 break; 3720 default: 3721 dev_err(adapter->pdev_dev, "Device %d is not supported\n", 3722 device_id); 3723 return -EINVAL; 3724 } 3725 3726 /* Reassign the updated revision field */ 3727 adapter->params.rev = adapter->chip; 3728 3729 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); 3730 3731 /* 3732 * Default port for debugging in case we can't reach FW. 3733 */ 3734 adapter->params.nports = 1; 3735 adapter->params.portvec = 1; 3736 adapter->params.vpd.cclk = 50000; 3737 return 0; 3738 } 3739 3740 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) 3741 { 3742 u8 addr[6]; 3743 int ret, i, j = 0; 3744 struct fw_port_cmd c; 3745 struct fw_rss_vi_config_cmd rvc; 3746 3747 memset(&c, 0, sizeof(c)); 3748 memset(&rvc, 0, sizeof(rvc)); 3749 3750 for_each_port(adap, i) { 3751 unsigned int rss_size; 3752 struct port_info *p = adap2pinfo(adap, i); 3753 3754 while ((adap->params.portvec & (1 << j)) == 0) 3755 j++; 3756 3757 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | 3758 FW_CMD_REQUEST | FW_CMD_READ | 3759 FW_PORT_CMD_PORTID(j)); 3760 c.action_to_len16 = htonl( 3761 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | 3762 FW_LEN16(c)); 3763 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 3764 if (ret) 3765 return ret; 3766 3767 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size); 3768 if (ret < 0) 3769 return ret; 3770 3771 p->viid = ret; 3772 p->tx_chan = j; 3773 p->lport = j; 3774 p->rss_size = rss_size; 3775 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN); 3776 adap->port[i]->dev_id = j; 3777 3778 ret = ntohl(c.u.info.lstatus_to_modtype); 3779 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ? 3780 FW_PORT_CMD_MDIOADDR_GET(ret) : -1; 3781 p->port_type = FW_PORT_CMD_PTYPE_GET(ret); 3782 p->mod_type = FW_PORT_MOD_TYPE_NA; 3783 3784 rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | 3785 FW_CMD_REQUEST | FW_CMD_READ | 3786 FW_RSS_VI_CONFIG_CMD_VIID(p->viid)); 3787 rvc.retval_len16 = htonl(FW_LEN16(rvc)); 3788 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc); 3789 if (ret) 3790 return ret; 3791 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen); 3792 3793 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap)); 3794 j++; 3795 } 3796 return 0; 3797 } 3798