1 /* 2 * This file is part of the Chelsio T4 Ethernet driver for Linux. 3 * 4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/delay.h> 36 #include "cxgb4.h" 37 #include "t4_regs.h" 38 #include "t4fw_api.h" 39 40 static int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, 41 const u8 *fw_data, unsigned int size, int force); 42 /** 43 * t4_wait_op_done_val - wait until an operation is completed 44 * @adapter: the adapter performing the operation 45 * @reg: the register to check for completion 46 * @mask: a single-bit field within @reg that indicates completion 47 * @polarity: the value of the field when the operation is completed 48 * @attempts: number of check iterations 49 * @delay: delay in usecs between iterations 50 * @valp: where to store the value of the register at completion time 51 * 52 * Wait until an operation is completed by checking a bit in a register 53 * up to @attempts times. If @valp is not NULL the value of the register 54 * at the time it indicated completion is stored there. Returns 0 if the 55 * operation completes and -EAGAIN otherwise. 56 */ 57 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, 58 int polarity, int attempts, int delay, u32 *valp) 59 { 60 while (1) { 61 u32 val = t4_read_reg(adapter, reg); 62 63 if (!!(val & mask) == polarity) { 64 if (valp) 65 *valp = val; 66 return 0; 67 } 68 if (--attempts == 0) 69 return -EAGAIN; 70 if (delay) 71 udelay(delay); 72 } 73 } 74 75 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask, 76 int polarity, int attempts, int delay) 77 { 78 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts, 79 delay, NULL); 80 } 81 82 /** 83 * t4_set_reg_field - set a register field to a value 84 * @adapter: the adapter to program 85 * @addr: the register address 86 * @mask: specifies the portion of the register to modify 87 * @val: the new value for the register field 88 * 89 * Sets a register field specified by the supplied mask to the 90 * given value. 91 */ 92 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, 93 u32 val) 94 { 95 u32 v = t4_read_reg(adapter, addr) & ~mask; 96 97 t4_write_reg(adapter, addr, v | val); 98 (void) t4_read_reg(adapter, addr); /* flush */ 99 } 100 101 /** 102 * t4_read_indirect - read indirectly addressed registers 103 * @adap: the adapter 104 * @addr_reg: register holding the indirect address 105 * @data_reg: register holding the value of the indirect register 106 * @vals: where the read register values are stored 107 * @nregs: how many indirect registers to read 108 * @start_idx: index of first indirect register to read 109 * 110 * Reads registers that are accessed indirectly through an address/data 111 * register pair. 112 */ 113 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, 114 unsigned int data_reg, u32 *vals, 115 unsigned int nregs, unsigned int start_idx) 116 { 117 while (nregs--) { 118 t4_write_reg(adap, addr_reg, start_idx); 119 *vals++ = t4_read_reg(adap, data_reg); 120 start_idx++; 121 } 122 } 123 124 /** 125 * t4_write_indirect - write indirectly addressed registers 126 * @adap: the adapter 127 * @addr_reg: register holding the indirect addresses 128 * @data_reg: register holding the value for the indirect registers 129 * @vals: values to write 130 * @nregs: how many indirect registers to write 131 * @start_idx: address of first indirect register to write 132 * 133 * Writes a sequential block of registers that are accessed indirectly 134 * through an address/data register pair. 135 */ 136 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, 137 unsigned int data_reg, const u32 *vals, 138 unsigned int nregs, unsigned int start_idx) 139 { 140 while (nregs--) { 141 t4_write_reg(adap, addr_reg, start_idx++); 142 t4_write_reg(adap, data_reg, *vals++); 143 } 144 } 145 146 /* 147 * Get the reply to a mailbox command and store it in @rpl in big-endian order. 148 */ 149 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, 150 u32 mbox_addr) 151 { 152 for ( ; nflit; nflit--, mbox_addr += 8) 153 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr)); 154 } 155 156 /* 157 * Handle a FW assertion reported in a mailbox. 158 */ 159 static void fw_asrt(struct adapter *adap, u32 mbox_addr) 160 { 161 struct fw_debug_cmd asrt; 162 163 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr); 164 dev_alert(adap->pdev_dev, 165 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", 166 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line), 167 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y)); 168 } 169 170 static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg) 171 { 172 dev_err(adap->pdev_dev, 173 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox, 174 (unsigned long long)t4_read_reg64(adap, data_reg), 175 (unsigned long long)t4_read_reg64(adap, data_reg + 8), 176 (unsigned long long)t4_read_reg64(adap, data_reg + 16), 177 (unsigned long long)t4_read_reg64(adap, data_reg + 24), 178 (unsigned long long)t4_read_reg64(adap, data_reg + 32), 179 (unsigned long long)t4_read_reg64(adap, data_reg + 40), 180 (unsigned long long)t4_read_reg64(adap, data_reg + 48), 181 (unsigned long long)t4_read_reg64(adap, data_reg + 56)); 182 } 183 184 /** 185 * t4_wr_mbox_meat - send a command to FW through the given mailbox 186 * @adap: the adapter 187 * @mbox: index of the mailbox to use 188 * @cmd: the command to write 189 * @size: command length in bytes 190 * @rpl: where to optionally store the reply 191 * @sleep_ok: if true we may sleep while awaiting command completion 192 * 193 * Sends the given command to FW through the selected mailbox and waits 194 * for the FW to execute the command. If @rpl is not %NULL it is used to 195 * store the FW's reply to the command. The command and its optional 196 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms 197 * to respond. @sleep_ok determines whether we may sleep while awaiting 198 * the response. If sleeping is allowed we use progressive backoff 199 * otherwise we spin. 200 * 201 * The return value is 0 on success or a negative errno on failure. A 202 * failure can happen either because we are not able to execute the 203 * command or FW executes it but signals an error. In the latter case 204 * the return value is the error code indicated by FW (negated). 205 */ 206 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, 207 void *rpl, bool sleep_ok) 208 { 209 static const int delay[] = { 210 1, 1, 3, 5, 10, 10, 20, 50, 100, 200 211 }; 212 213 u32 v; 214 u64 res; 215 int i, ms, delay_idx; 216 const __be64 *p = cmd; 217 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA); 218 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL); 219 220 if ((size & 15) || size > MBOX_LEN) 221 return -EINVAL; 222 223 /* 224 * If the device is off-line, as in EEH, commands will time out. 225 * Fail them early so we don't waste time waiting. 226 */ 227 if (adap->pdev->error_state != pci_channel_io_normal) 228 return -EIO; 229 230 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); 231 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) 232 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); 233 234 if (v != MBOX_OWNER_DRV) 235 return v ? -EBUSY : -ETIMEDOUT; 236 237 for (i = 0; i < size; i += 8) 238 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++)); 239 240 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); 241 t4_read_reg(adap, ctl_reg); /* flush write */ 242 243 delay_idx = 0; 244 ms = delay[0]; 245 246 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { 247 if (sleep_ok) { 248 ms = delay[delay_idx]; /* last element may repeat */ 249 if (delay_idx < ARRAY_SIZE(delay) - 1) 250 delay_idx++; 251 msleep(ms); 252 } else 253 mdelay(ms); 254 255 v = t4_read_reg(adap, ctl_reg); 256 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) { 257 if (!(v & MBMSGVALID)) { 258 t4_write_reg(adap, ctl_reg, 0); 259 continue; 260 } 261 262 res = t4_read_reg64(adap, data_reg); 263 if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) { 264 fw_asrt(adap, data_reg); 265 res = FW_CMD_RETVAL(EIO); 266 } else if (rpl) 267 get_mbox_rpl(adap, rpl, size / 8, data_reg); 268 269 if (FW_CMD_RETVAL_GET((int)res)) 270 dump_mbox(adap, mbox, data_reg); 271 t4_write_reg(adap, ctl_reg, 0); 272 return -FW_CMD_RETVAL_GET((int)res); 273 } 274 } 275 276 dump_mbox(adap, mbox, data_reg); 277 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", 278 *(const u8 *)cmd, mbox); 279 return -ETIMEDOUT; 280 } 281 282 /** 283 * t4_mc_read - read from MC through backdoor accesses 284 * @adap: the adapter 285 * @addr: address of first byte requested 286 * @idx: which MC to access 287 * @data: 64 bytes of data containing the requested address 288 * @ecc: where to store the corresponding 64-bit ECC word 289 * 290 * Read 64 bytes of data from MC starting at a 64-byte-aligned address 291 * that covers the requested address @addr. If @parity is not %NULL it 292 * is assigned the 64-bit ECC word for the read data. 293 */ 294 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) 295 { 296 int i; 297 u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len; 298 u32 mc_bist_status_rdata, mc_bist_data_pattern; 299 300 if (is_t4(adap->params.chip)) { 301 mc_bist_cmd = MC_BIST_CMD; 302 mc_bist_cmd_addr = MC_BIST_CMD_ADDR; 303 mc_bist_cmd_len = MC_BIST_CMD_LEN; 304 mc_bist_status_rdata = MC_BIST_STATUS_RDATA; 305 mc_bist_data_pattern = MC_BIST_DATA_PATTERN; 306 } else { 307 mc_bist_cmd = MC_REG(MC_P_BIST_CMD, idx); 308 mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR, idx); 309 mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN, idx); 310 mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA, idx); 311 mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN, idx); 312 } 313 314 if (t4_read_reg(adap, mc_bist_cmd) & START_BIST) 315 return -EBUSY; 316 t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU); 317 t4_write_reg(adap, mc_bist_cmd_len, 64); 318 t4_write_reg(adap, mc_bist_data_pattern, 0xc); 319 t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE(1) | START_BIST | 320 BIST_CMD_GAP(1)); 321 i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST, 0, 10, 1); 322 if (i) 323 return i; 324 325 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata, i) 326 327 for (i = 15; i >= 0; i--) 328 *data++ = htonl(t4_read_reg(adap, MC_DATA(i))); 329 if (ecc) 330 *ecc = t4_read_reg64(adap, MC_DATA(16)); 331 #undef MC_DATA 332 return 0; 333 } 334 335 /** 336 * t4_edc_read - read from EDC through backdoor accesses 337 * @adap: the adapter 338 * @idx: which EDC to access 339 * @addr: address of first byte requested 340 * @data: 64 bytes of data containing the requested address 341 * @ecc: where to store the corresponding 64-bit ECC word 342 * 343 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address 344 * that covers the requested address @addr. If @parity is not %NULL it 345 * is assigned the 64-bit ECC word for the read data. 346 */ 347 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) 348 { 349 int i; 350 u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len; 351 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata; 352 353 if (is_t4(adap->params.chip)) { 354 edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx); 355 edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx); 356 edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx); 357 edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN, 358 idx); 359 edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA, 360 idx); 361 } else { 362 edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD, idx); 363 edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx); 364 edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx); 365 edc_bist_cmd_data_pattern = 366 EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx); 367 edc_bist_status_rdata = 368 EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx); 369 } 370 371 if (t4_read_reg(adap, edc_bist_cmd) & START_BIST) 372 return -EBUSY; 373 t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU); 374 t4_write_reg(adap, edc_bist_cmd_len, 64); 375 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc); 376 t4_write_reg(adap, edc_bist_cmd, 377 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST); 378 i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST, 0, 10, 1); 379 if (i) 380 return i; 381 382 #define EDC_DATA(i) (EDC_BIST_STATUS_REG(edc_bist_status_rdata, i)) 383 384 for (i = 15; i >= 0; i--) 385 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i))); 386 if (ecc) 387 *ecc = t4_read_reg64(adap, EDC_DATA(16)); 388 #undef EDC_DATA 389 return 0; 390 } 391 392 /* 393 * t4_mem_win_rw - read/write memory through PCIE memory window 394 * @adap: the adapter 395 * @addr: address of first byte requested 396 * @data: MEMWIN0_APERTURE bytes of data containing the requested address 397 * @dir: direction of transfer 1 => read, 0 => write 398 * 399 * Read/write MEMWIN0_APERTURE bytes of data from MC starting at a 400 * MEMWIN0_APERTURE-byte-aligned address that covers the requested 401 * address @addr. 402 */ 403 static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir) 404 { 405 int i; 406 u32 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn); 407 408 /* 409 * Setup offset into PCIE memory window. Address must be a 410 * MEMWIN0_APERTURE-byte-aligned address. (Read back MA register to 411 * ensure that changes propagate before we attempt to use the new 412 * values.) 413 */ 414 t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET, 415 (addr & ~(MEMWIN0_APERTURE - 1)) | win_pf); 416 t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET); 417 418 /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */ 419 for (i = 0; i < MEMWIN0_APERTURE; i = i+0x4) { 420 if (dir) 421 *data++ = (__force __be32) t4_read_reg(adap, 422 (MEMWIN0_BASE + i)); 423 else 424 t4_write_reg(adap, (MEMWIN0_BASE + i), 425 (__force u32) *data++); 426 } 427 428 return 0; 429 } 430 431 /** 432 * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window 433 * @adap: the adapter 434 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC 435 * @addr: address within indicated memory type 436 * @len: amount of memory to transfer 437 * @buf: host memory buffer 438 * @dir: direction of transfer 1 => read, 0 => write 439 * 440 * Reads/writes an [almost] arbitrary memory region in the firmware: the 441 * firmware memory address, length and host buffer must be aligned on 442 * 32-bit boudaries. The memory is transferred as a raw byte sequence 443 * from/to the firmware's memory. If this memory contains data 444 * structures which contain multi-byte integers, it's the callers 445 * responsibility to perform appropriate byte order conversions. 446 */ 447 static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len, 448 __be32 *buf, int dir) 449 { 450 u32 pos, start, end, offset, memoffset; 451 u32 edc_size, mc_size; 452 int ret = 0; 453 __be32 *data; 454 455 /* 456 * Argument sanity checks ... 457 */ 458 if ((addr & 0x3) || (len & 0x3)) 459 return -EINVAL; 460 461 data = vmalloc(MEMWIN0_APERTURE); 462 if (!data) 463 return -ENOMEM; 464 465 /* Offset into the region of memory which is being accessed 466 * MEM_EDC0 = 0 467 * MEM_EDC1 = 1 468 * MEM_MC = 2 -- T4 469 * MEM_MC0 = 2 -- For T5 470 * MEM_MC1 = 3 -- For T5 471 */ 472 edc_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR)); 473 if (mtype != MEM_MC1) 474 memoffset = (mtype * (edc_size * 1024 * 1024)); 475 else { 476 mc_size = EXT_MEM_SIZE_GET(t4_read_reg(adap, 477 MA_EXT_MEMORY_BAR)); 478 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024; 479 } 480 481 /* Determine the PCIE_MEM_ACCESS_OFFSET */ 482 addr = addr + memoffset; 483 484 /* 485 * The underlaying EDC/MC read routines read MEMWIN0_APERTURE bytes 486 * at a time so we need to round down the start and round up the end. 487 * We'll start copying out of the first line at (addr - start) a word 488 * at a time. 489 */ 490 start = addr & ~(MEMWIN0_APERTURE-1); 491 end = (addr + len + MEMWIN0_APERTURE-1) & ~(MEMWIN0_APERTURE-1); 492 offset = (addr - start)/sizeof(__be32); 493 494 for (pos = start; pos < end; pos += MEMWIN0_APERTURE, offset = 0) { 495 496 /* 497 * If we're writing, copy the data from the caller's memory 498 * buffer 499 */ 500 if (!dir) { 501 /* 502 * If we're doing a partial write, then we need to do 503 * a read-modify-write ... 504 */ 505 if (offset || len < MEMWIN0_APERTURE) { 506 ret = t4_mem_win_rw(adap, pos, data, 1); 507 if (ret) 508 break; 509 } 510 while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) && 511 len > 0) { 512 data[offset++] = *buf++; 513 len -= sizeof(__be32); 514 } 515 } 516 517 /* 518 * Transfer a block of memory and bail if there's an error. 519 */ 520 ret = t4_mem_win_rw(adap, pos, data, dir); 521 if (ret) 522 break; 523 524 /* 525 * If we're reading, copy the data into the caller's memory 526 * buffer. 527 */ 528 if (dir) 529 while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) && 530 len > 0) { 531 *buf++ = data[offset++]; 532 len -= sizeof(__be32); 533 } 534 } 535 536 vfree(data); 537 return ret; 538 } 539 540 int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len, 541 __be32 *buf) 542 { 543 return t4_memory_rw(adap, mtype, addr, len, buf, 0); 544 } 545 546 #define EEPROM_STAT_ADDR 0x7bfc 547 #define VPD_BASE 0x400 548 #define VPD_BASE_OLD 0 549 #define VPD_LEN 1024 550 551 /** 552 * t4_seeprom_wp - enable/disable EEPROM write protection 553 * @adapter: the adapter 554 * @enable: whether to enable or disable write protection 555 * 556 * Enables or disables write protection on the serial EEPROM. 557 */ 558 int t4_seeprom_wp(struct adapter *adapter, bool enable) 559 { 560 unsigned int v = enable ? 0xc : 0; 561 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v); 562 return ret < 0 ? ret : 0; 563 } 564 565 /** 566 * get_vpd_params - read VPD parameters from VPD EEPROM 567 * @adapter: adapter to read 568 * @p: where to store the parameters 569 * 570 * Reads card parameters stored in VPD EEPROM. 571 */ 572 int get_vpd_params(struct adapter *adapter, struct vpd_params *p) 573 { 574 u32 cclk_param, cclk_val; 575 int i, ret, addr; 576 int ec, sn, pn; 577 u8 *vpd, csum; 578 unsigned int vpdr_len, kw_offset, id_len; 579 580 vpd = vmalloc(VPD_LEN); 581 if (!vpd) 582 return -ENOMEM; 583 584 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd); 585 if (ret < 0) 586 goto out; 587 addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD; 588 589 ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd); 590 if (ret < 0) 591 goto out; 592 593 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) { 594 dev_err(adapter->pdev_dev, "missing VPD ID string\n"); 595 ret = -EINVAL; 596 goto out; 597 } 598 599 id_len = pci_vpd_lrdt_size(vpd); 600 if (id_len > ID_LEN) 601 id_len = ID_LEN; 602 603 i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA); 604 if (i < 0) { 605 dev_err(adapter->pdev_dev, "missing VPD-R section\n"); 606 ret = -EINVAL; 607 goto out; 608 } 609 610 vpdr_len = pci_vpd_lrdt_size(&vpd[i]); 611 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE; 612 if (vpdr_len + kw_offset > VPD_LEN) { 613 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len); 614 ret = -EINVAL; 615 goto out; 616 } 617 618 #define FIND_VPD_KW(var, name) do { \ 619 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \ 620 if (var < 0) { \ 621 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \ 622 ret = -EINVAL; \ 623 goto out; \ 624 } \ 625 var += PCI_VPD_INFO_FLD_HDR_SIZE; \ 626 } while (0) 627 628 FIND_VPD_KW(i, "RV"); 629 for (csum = 0; i >= 0; i--) 630 csum += vpd[i]; 631 632 if (csum) { 633 dev_err(adapter->pdev_dev, 634 "corrupted VPD EEPROM, actual csum %u\n", csum); 635 ret = -EINVAL; 636 goto out; 637 } 638 639 FIND_VPD_KW(ec, "EC"); 640 FIND_VPD_KW(sn, "SN"); 641 FIND_VPD_KW(pn, "PN"); 642 #undef FIND_VPD_KW 643 644 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len); 645 strim(p->id); 646 memcpy(p->ec, vpd + ec, EC_LEN); 647 strim(p->ec); 648 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE); 649 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); 650 strim(p->sn); 651 memcpy(p->pn, vpd + pn, min(i, PN_LEN)); 652 strim(p->pn); 653 654 /* 655 * Ask firmware for the Core Clock since it knows how to translate the 656 * Reference Clock ('V2') VPD field into a Core Clock value ... 657 */ 658 cclk_param = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 659 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK)); 660 ret = t4_query_params(adapter, adapter->mbox, 0, 0, 661 1, &cclk_param, &cclk_val); 662 663 out: 664 vfree(vpd); 665 if (ret) 666 return ret; 667 p->cclk = cclk_val; 668 669 return 0; 670 } 671 672 /* serial flash and firmware constants */ 673 enum { 674 SF_ATTEMPTS = 10, /* max retries for SF operations */ 675 676 /* flash command opcodes */ 677 SF_PROG_PAGE = 2, /* program page */ 678 SF_WR_DISABLE = 4, /* disable writes */ 679 SF_RD_STATUS = 5, /* read status register */ 680 SF_WR_ENABLE = 6, /* enable writes */ 681 SF_RD_DATA_FAST = 0xb, /* read flash */ 682 SF_RD_ID = 0x9f, /* read ID */ 683 SF_ERASE_SECTOR = 0xd8, /* erase sector */ 684 685 FW_MAX_SIZE = 16 * SF_SEC_SIZE, 686 }; 687 688 /** 689 * sf1_read - read data from the serial flash 690 * @adapter: the adapter 691 * @byte_cnt: number of bytes to read 692 * @cont: whether another operation will be chained 693 * @lock: whether to lock SF for PL access only 694 * @valp: where to store the read data 695 * 696 * Reads up to 4 bytes of data from the serial flash. The location of 697 * the read needs to be specified prior to calling this by issuing the 698 * appropriate commands to the serial flash. 699 */ 700 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, 701 int lock, u32 *valp) 702 { 703 int ret; 704 705 if (!byte_cnt || byte_cnt > 4) 706 return -EINVAL; 707 if (t4_read_reg(adapter, SF_OP) & SF_BUSY) 708 return -EBUSY; 709 cont = cont ? SF_CONT : 0; 710 lock = lock ? SF_LOCK : 0; 711 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1)); 712 ret = t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5); 713 if (!ret) 714 *valp = t4_read_reg(adapter, SF_DATA); 715 return ret; 716 } 717 718 /** 719 * sf1_write - write data to the serial flash 720 * @adapter: the adapter 721 * @byte_cnt: number of bytes to write 722 * @cont: whether another operation will be chained 723 * @lock: whether to lock SF for PL access only 724 * @val: value to write 725 * 726 * Writes up to 4 bytes of data to the serial flash. The location of 727 * the write needs to be specified prior to calling this by issuing the 728 * appropriate commands to the serial flash. 729 */ 730 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, 731 int lock, u32 val) 732 { 733 if (!byte_cnt || byte_cnt > 4) 734 return -EINVAL; 735 if (t4_read_reg(adapter, SF_OP) & SF_BUSY) 736 return -EBUSY; 737 cont = cont ? SF_CONT : 0; 738 lock = lock ? SF_LOCK : 0; 739 t4_write_reg(adapter, SF_DATA, val); 740 t4_write_reg(adapter, SF_OP, lock | 741 cont | BYTECNT(byte_cnt - 1) | OP_WR); 742 return t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5); 743 } 744 745 /** 746 * flash_wait_op - wait for a flash operation to complete 747 * @adapter: the adapter 748 * @attempts: max number of polls of the status register 749 * @delay: delay between polls in ms 750 * 751 * Wait for a flash operation to complete by polling the status register. 752 */ 753 static int flash_wait_op(struct adapter *adapter, int attempts, int delay) 754 { 755 int ret; 756 u32 status; 757 758 while (1) { 759 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 || 760 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0) 761 return ret; 762 if (!(status & 1)) 763 return 0; 764 if (--attempts == 0) 765 return -EAGAIN; 766 if (delay) 767 msleep(delay); 768 } 769 } 770 771 /** 772 * t4_read_flash - read words from serial flash 773 * @adapter: the adapter 774 * @addr: the start address for the read 775 * @nwords: how many 32-bit words to read 776 * @data: where to store the read data 777 * @byte_oriented: whether to store data as bytes or as words 778 * 779 * Read the specified number of 32-bit words from the serial flash. 780 * If @byte_oriented is set the read data is stored as a byte array 781 * (i.e., big-endian), otherwise as 32-bit words in the platform's 782 * natural endianess. 783 */ 784 static int t4_read_flash(struct adapter *adapter, unsigned int addr, 785 unsigned int nwords, u32 *data, int byte_oriented) 786 { 787 int ret; 788 789 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3)) 790 return -EINVAL; 791 792 addr = swab32(addr) | SF_RD_DATA_FAST; 793 794 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 || 795 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0) 796 return ret; 797 798 for ( ; nwords; nwords--, data++) { 799 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data); 800 if (nwords == 1) 801 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ 802 if (ret) 803 return ret; 804 if (byte_oriented) 805 *data = (__force __u32) (htonl(*data)); 806 } 807 return 0; 808 } 809 810 /** 811 * t4_write_flash - write up to a page of data to the serial flash 812 * @adapter: the adapter 813 * @addr: the start address to write 814 * @n: length of data to write in bytes 815 * @data: the data to write 816 * 817 * Writes up to a page of data (256 bytes) to the serial flash starting 818 * at the given address. All the data must be written to the same page. 819 */ 820 static int t4_write_flash(struct adapter *adapter, unsigned int addr, 821 unsigned int n, const u8 *data) 822 { 823 int ret; 824 u32 buf[64]; 825 unsigned int i, c, left, val, offset = addr & 0xff; 826 827 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE) 828 return -EINVAL; 829 830 val = swab32(addr) | SF_PROG_PAGE; 831 832 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 833 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0) 834 goto unlock; 835 836 for (left = n; left; left -= c) { 837 c = min(left, 4U); 838 for (val = 0, i = 0; i < c; ++i) 839 val = (val << 8) + *data++; 840 841 ret = sf1_write(adapter, c, c != left, 1, val); 842 if (ret) 843 goto unlock; 844 } 845 ret = flash_wait_op(adapter, 8, 1); 846 if (ret) 847 goto unlock; 848 849 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ 850 851 /* Read the page to verify the write succeeded */ 852 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); 853 if (ret) 854 return ret; 855 856 if (memcmp(data - n, (u8 *)buf + offset, n)) { 857 dev_err(adapter->pdev_dev, 858 "failed to correctly write the flash page at %#x\n", 859 addr); 860 return -EIO; 861 } 862 return 0; 863 864 unlock: 865 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ 866 return ret; 867 } 868 869 /** 870 * t4_get_fw_version - read the firmware version 871 * @adapter: the adapter 872 * @vers: where to place the version 873 * 874 * Reads the FW version from flash. 875 */ 876 int t4_get_fw_version(struct adapter *adapter, u32 *vers) 877 { 878 return t4_read_flash(adapter, FLASH_FW_START + 879 offsetof(struct fw_hdr, fw_ver), 1, 880 vers, 0); 881 } 882 883 /** 884 * t4_get_tp_version - read the TP microcode version 885 * @adapter: the adapter 886 * @vers: where to place the version 887 * 888 * Reads the TP microcode version from flash. 889 */ 890 int t4_get_tp_version(struct adapter *adapter, u32 *vers) 891 { 892 return t4_read_flash(adapter, FLASH_FW_START + 893 offsetof(struct fw_hdr, tp_microcode_ver), 894 1, vers, 0); 895 } 896 897 /* Is the given firmware API compatible with the one the driver was compiled 898 * with? 899 */ 900 static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2) 901 { 902 903 /* short circuit if it's the exact same firmware version */ 904 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) 905 return 1; 906 907 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) 908 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && 909 SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe)) 910 return 1; 911 #undef SAME_INTF 912 913 return 0; 914 } 915 916 /* The firmware in the filesystem is usable, but should it be installed? 917 * This routine explains itself in detail if it indicates the filesystem 918 * firmware should be installed. 919 */ 920 static int should_install_fs_fw(struct adapter *adap, int card_fw_usable, 921 int k, int c) 922 { 923 const char *reason; 924 925 if (!card_fw_usable) { 926 reason = "incompatible or unusable"; 927 goto install; 928 } 929 930 if (k > c) { 931 reason = "older than the version supported with this driver"; 932 goto install; 933 } 934 935 return 0; 936 937 install: 938 dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, " 939 "installing firmware %u.%u.%u.%u on card.\n", 940 FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c), 941 FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), reason, 942 FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k), 943 FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k)); 944 945 return 1; 946 } 947 948 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, 949 const u8 *fw_data, unsigned int fw_size, 950 struct fw_hdr *card_fw, enum dev_state state, 951 int *reset) 952 { 953 int ret, card_fw_usable, fs_fw_usable; 954 const struct fw_hdr *fs_fw; 955 const struct fw_hdr *drv_fw; 956 957 drv_fw = &fw_info->fw_hdr; 958 959 /* Read the header of the firmware on the card */ 960 ret = -t4_read_flash(adap, FLASH_FW_START, 961 sizeof(*card_fw) / sizeof(uint32_t), 962 (uint32_t *)card_fw, 1); 963 if (ret == 0) { 964 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw); 965 } else { 966 dev_err(adap->pdev_dev, 967 "Unable to read card's firmware header: %d\n", ret); 968 card_fw_usable = 0; 969 } 970 971 if (fw_data != NULL) { 972 fs_fw = (const void *)fw_data; 973 fs_fw_usable = fw_compatible(drv_fw, fs_fw); 974 } else { 975 fs_fw = NULL; 976 fs_fw_usable = 0; 977 } 978 979 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver && 980 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) { 981 /* Common case: the firmware on the card is an exact match and 982 * the filesystem one is an exact match too, or the filesystem 983 * one is absent/incompatible. 984 */ 985 } else if (fs_fw_usable && state == DEV_STATE_UNINIT && 986 should_install_fs_fw(adap, card_fw_usable, 987 be32_to_cpu(fs_fw->fw_ver), 988 be32_to_cpu(card_fw->fw_ver))) { 989 ret = -t4_fw_upgrade(adap, adap->mbox, fw_data, 990 fw_size, 0); 991 if (ret != 0) { 992 dev_err(adap->pdev_dev, 993 "failed to install firmware: %d\n", ret); 994 goto bye; 995 } 996 997 /* Installed successfully, update the cached header too. */ 998 memcpy(card_fw, fs_fw, sizeof(*card_fw)); 999 card_fw_usable = 1; 1000 *reset = 0; /* already reset as part of load_fw */ 1001 } 1002 1003 if (!card_fw_usable) { 1004 uint32_t d, c, k; 1005 1006 d = be32_to_cpu(drv_fw->fw_ver); 1007 c = be32_to_cpu(card_fw->fw_ver); 1008 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0; 1009 1010 dev_err(adap->pdev_dev, "Cannot find a usable firmware: " 1011 "chip state %d, " 1012 "driver compiled with %d.%d.%d.%d, " 1013 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n", 1014 state, 1015 FW_HDR_FW_VER_MAJOR_GET(d), FW_HDR_FW_VER_MINOR_GET(d), 1016 FW_HDR_FW_VER_MICRO_GET(d), FW_HDR_FW_VER_BUILD_GET(d), 1017 FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c), 1018 FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), 1019 FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k), 1020 FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k)); 1021 ret = EINVAL; 1022 goto bye; 1023 } 1024 1025 /* We're using whatever's on the card and it's known to be good. */ 1026 adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver); 1027 adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver); 1028 1029 bye: 1030 return ret; 1031 } 1032 1033 /** 1034 * t4_flash_erase_sectors - erase a range of flash sectors 1035 * @adapter: the adapter 1036 * @start: the first sector to erase 1037 * @end: the last sector to erase 1038 * 1039 * Erases the sectors in the given inclusive range. 1040 */ 1041 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) 1042 { 1043 int ret = 0; 1044 1045 while (start <= end) { 1046 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 1047 (ret = sf1_write(adapter, 4, 0, 1, 1048 SF_ERASE_SECTOR | (start << 8))) != 0 || 1049 (ret = flash_wait_op(adapter, 14, 500)) != 0) { 1050 dev_err(adapter->pdev_dev, 1051 "erase of flash sector %d failed, error %d\n", 1052 start, ret); 1053 break; 1054 } 1055 start++; 1056 } 1057 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ 1058 return ret; 1059 } 1060 1061 /** 1062 * t4_flash_cfg_addr - return the address of the flash configuration file 1063 * @adapter: the adapter 1064 * 1065 * Return the address within the flash where the Firmware Configuration 1066 * File is stored. 1067 */ 1068 unsigned int t4_flash_cfg_addr(struct adapter *adapter) 1069 { 1070 if (adapter->params.sf_size == 0x100000) 1071 return FLASH_FPGA_CFG_START; 1072 else 1073 return FLASH_CFG_START; 1074 } 1075 1076 /** 1077 * t4_load_fw - download firmware 1078 * @adap: the adapter 1079 * @fw_data: the firmware image to write 1080 * @size: image size 1081 * 1082 * Write the supplied firmware image to the card's serial flash. 1083 */ 1084 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) 1085 { 1086 u32 csum; 1087 int ret, addr; 1088 unsigned int i; 1089 u8 first_page[SF_PAGE_SIZE]; 1090 const __be32 *p = (const __be32 *)fw_data; 1091 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; 1092 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 1093 unsigned int fw_img_start = adap->params.sf_fw_start; 1094 unsigned int fw_start_sec = fw_img_start / sf_sec_size; 1095 1096 if (!size) { 1097 dev_err(adap->pdev_dev, "FW image has no data\n"); 1098 return -EINVAL; 1099 } 1100 if (size & 511) { 1101 dev_err(adap->pdev_dev, 1102 "FW image size not multiple of 512 bytes\n"); 1103 return -EINVAL; 1104 } 1105 if (ntohs(hdr->len512) * 512 != size) { 1106 dev_err(adap->pdev_dev, 1107 "FW image size differs from size in FW header\n"); 1108 return -EINVAL; 1109 } 1110 if (size > FW_MAX_SIZE) { 1111 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n", 1112 FW_MAX_SIZE); 1113 return -EFBIG; 1114 } 1115 1116 for (csum = 0, i = 0; i < size / sizeof(csum); i++) 1117 csum += ntohl(p[i]); 1118 1119 if (csum != 0xffffffff) { 1120 dev_err(adap->pdev_dev, 1121 "corrupted firmware image, checksum %#x\n", csum); 1122 return -EINVAL; 1123 } 1124 1125 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ 1126 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1); 1127 if (ret) 1128 goto out; 1129 1130 /* 1131 * We write the correct version at the end so the driver can see a bad 1132 * version if the FW write fails. Start by writing a copy of the 1133 * first page with a bad version. 1134 */ 1135 memcpy(first_page, fw_data, SF_PAGE_SIZE); 1136 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff); 1137 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page); 1138 if (ret) 1139 goto out; 1140 1141 addr = fw_img_start; 1142 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 1143 addr += SF_PAGE_SIZE; 1144 fw_data += SF_PAGE_SIZE; 1145 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data); 1146 if (ret) 1147 goto out; 1148 } 1149 1150 ret = t4_write_flash(adap, 1151 fw_img_start + offsetof(struct fw_hdr, fw_ver), 1152 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver); 1153 out: 1154 if (ret) 1155 dev_err(adap->pdev_dev, "firmware download failed, error %d\n", 1156 ret); 1157 return ret; 1158 } 1159 1160 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ 1161 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \ 1162 FW_PORT_CAP_ANEG) 1163 1164 /** 1165 * t4_link_start - apply link configuration to MAC/PHY 1166 * @phy: the PHY to setup 1167 * @mac: the MAC to setup 1168 * @lc: the requested link configuration 1169 * 1170 * Set up a port's MAC and PHY according to a desired link configuration. 1171 * - If the PHY can auto-negotiate first decide what to advertise, then 1172 * enable/disable auto-negotiation as desired, and reset. 1173 * - If the PHY does not auto-negotiate just reset it. 1174 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, 1175 * otherwise do it later based on the outcome of auto-negotiation. 1176 */ 1177 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, 1178 struct link_config *lc) 1179 { 1180 struct fw_port_cmd c; 1181 unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO); 1182 1183 lc->link_ok = 0; 1184 if (lc->requested_fc & PAUSE_RX) 1185 fc |= FW_PORT_CAP_FC_RX; 1186 if (lc->requested_fc & PAUSE_TX) 1187 fc |= FW_PORT_CAP_FC_TX; 1188 1189 memset(&c, 0, sizeof(c)); 1190 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | 1191 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port)); 1192 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | 1193 FW_LEN16(c)); 1194 1195 if (!(lc->supported & FW_PORT_CAP_ANEG)) { 1196 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc); 1197 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); 1198 } else if (lc->autoneg == AUTONEG_DISABLE) { 1199 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi); 1200 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); 1201 } else 1202 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi); 1203 1204 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 1205 } 1206 1207 /** 1208 * t4_restart_aneg - restart autonegotiation 1209 * @adap: the adapter 1210 * @mbox: mbox to use for the FW command 1211 * @port: the port id 1212 * 1213 * Restarts autonegotiation for the selected port. 1214 */ 1215 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) 1216 { 1217 struct fw_port_cmd c; 1218 1219 memset(&c, 0, sizeof(c)); 1220 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | 1221 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port)); 1222 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | 1223 FW_LEN16(c)); 1224 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG); 1225 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 1226 } 1227 1228 typedef void (*int_handler_t)(struct adapter *adap); 1229 1230 struct intr_info { 1231 unsigned int mask; /* bits to check in interrupt status */ 1232 const char *msg; /* message to print or NULL */ 1233 short stat_idx; /* stat counter to increment or -1 */ 1234 unsigned short fatal; /* whether the condition reported is fatal */ 1235 int_handler_t int_handler; /* platform-specific int handler */ 1236 }; 1237 1238 /** 1239 * t4_handle_intr_status - table driven interrupt handler 1240 * @adapter: the adapter that generated the interrupt 1241 * @reg: the interrupt status register to process 1242 * @acts: table of interrupt actions 1243 * 1244 * A table driven interrupt handler that applies a set of masks to an 1245 * interrupt status word and performs the corresponding actions if the 1246 * interrupts described by the mask have occurred. The actions include 1247 * optionally emitting a warning or alert message. The table is terminated 1248 * by an entry specifying mask 0. Returns the number of fatal interrupt 1249 * conditions. 1250 */ 1251 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg, 1252 const struct intr_info *acts) 1253 { 1254 int fatal = 0; 1255 unsigned int mask = 0; 1256 unsigned int status = t4_read_reg(adapter, reg); 1257 1258 for ( ; acts->mask; ++acts) { 1259 if (!(status & acts->mask)) 1260 continue; 1261 if (acts->fatal) { 1262 fatal++; 1263 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg, 1264 status & acts->mask); 1265 } else if (acts->msg && printk_ratelimit()) 1266 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg, 1267 status & acts->mask); 1268 if (acts->int_handler) 1269 acts->int_handler(adapter); 1270 mask |= acts->mask; 1271 } 1272 status &= mask; 1273 if (status) /* clear processed interrupts */ 1274 t4_write_reg(adapter, reg, status); 1275 return fatal; 1276 } 1277 1278 /* 1279 * Interrupt handler for the PCIE module. 1280 */ 1281 static void pcie_intr_handler(struct adapter *adapter) 1282 { 1283 static const struct intr_info sysbus_intr_info[] = { 1284 { RNPP, "RXNP array parity error", -1, 1 }, 1285 { RPCP, "RXPC array parity error", -1, 1 }, 1286 { RCIP, "RXCIF array parity error", -1, 1 }, 1287 { RCCP, "Rx completions control array parity error", -1, 1 }, 1288 { RFTP, "RXFT array parity error", -1, 1 }, 1289 { 0 } 1290 }; 1291 static const struct intr_info pcie_port_intr_info[] = { 1292 { TPCP, "TXPC array parity error", -1, 1 }, 1293 { TNPP, "TXNP array parity error", -1, 1 }, 1294 { TFTP, "TXFT array parity error", -1, 1 }, 1295 { TCAP, "TXCA array parity error", -1, 1 }, 1296 { TCIP, "TXCIF array parity error", -1, 1 }, 1297 { RCAP, "RXCA array parity error", -1, 1 }, 1298 { OTDD, "outbound request TLP discarded", -1, 1 }, 1299 { RDPE, "Rx data parity error", -1, 1 }, 1300 { TDUE, "Tx uncorrectable data error", -1, 1 }, 1301 { 0 } 1302 }; 1303 static const struct intr_info pcie_intr_info[] = { 1304 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 }, 1305 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 }, 1306 { MSIDATAPERR, "MSI data parity error", -1, 1 }, 1307 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, 1308 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, 1309 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, 1310 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, 1311 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 }, 1312 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 }, 1313 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, 1314 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 }, 1315 { CREQPERR, "PCI CMD channel request parity error", -1, 1 }, 1316 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, 1317 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 }, 1318 { DREQPERR, "PCI DMA channel request parity error", -1, 1 }, 1319 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, 1320 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 }, 1321 { HREQPERR, "PCI HMA channel request parity error", -1, 1 }, 1322 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, 1323 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, 1324 { FIDPERR, "PCI FID parity error", -1, 1 }, 1325 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 }, 1326 { MATAGPERR, "PCI MA tag parity error", -1, 1 }, 1327 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, 1328 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 }, 1329 { RXWRPERR, "PCI Rx write parity error", -1, 1 }, 1330 { RPLPERR, "PCI replay buffer parity error", -1, 1 }, 1331 { PCIESINT, "PCI core secondary fault", -1, 1 }, 1332 { PCIEPINT, "PCI core primary fault", -1, 1 }, 1333 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 }, 1334 { 0 } 1335 }; 1336 1337 static struct intr_info t5_pcie_intr_info[] = { 1338 { MSTGRPPERR, "Master Response Read Queue parity error", 1339 -1, 1 }, 1340 { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 }, 1341 { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 }, 1342 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, 1343 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, 1344 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, 1345 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, 1346 { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error", 1347 -1, 1 }, 1348 { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error", 1349 -1, 1 }, 1350 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, 1351 { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 }, 1352 { CREQPERR, "PCI CMD channel request parity error", -1, 1 }, 1353 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, 1354 { DREQWRPERR, "PCI DMA channel write request parity error", 1355 -1, 1 }, 1356 { DREQPERR, "PCI DMA channel request parity error", -1, 1 }, 1357 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, 1358 { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 }, 1359 { HREQPERR, "PCI HMA channel request parity error", -1, 1 }, 1360 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, 1361 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, 1362 { FIDPERR, "PCI FID parity error", -1, 1 }, 1363 { VFIDPERR, "PCI INTx clear parity error", -1, 1 }, 1364 { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 }, 1365 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, 1366 { IPRXHDRGRPPERR, "PCI IP Rx header group parity error", 1367 -1, 1 }, 1368 { IPRXDATAGRPPERR, "PCI IP Rx data group parity error", -1, 1 }, 1369 { RPLPERR, "PCI IP replay buffer parity error", -1, 1 }, 1370 { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 }, 1371 { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 }, 1372 { READRSPERR, "Outbound read error", -1, 0 }, 1373 { 0 } 1374 }; 1375 1376 int fat; 1377 1378 fat = t4_handle_intr_status(adapter, 1379 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, 1380 sysbus_intr_info) + 1381 t4_handle_intr_status(adapter, 1382 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 1383 pcie_port_intr_info) + 1384 t4_handle_intr_status(adapter, PCIE_INT_CAUSE, 1385 is_t4(adapter->params.chip) ? 1386 pcie_intr_info : t5_pcie_intr_info); 1387 1388 if (fat) 1389 t4_fatal_err(adapter); 1390 } 1391 1392 /* 1393 * TP interrupt handler. 1394 */ 1395 static void tp_intr_handler(struct adapter *adapter) 1396 { 1397 static const struct intr_info tp_intr_info[] = { 1398 { 0x3fffffff, "TP parity error", -1, 1 }, 1399 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, 1400 { 0 } 1401 }; 1402 1403 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info)) 1404 t4_fatal_err(adapter); 1405 } 1406 1407 /* 1408 * SGE interrupt handler. 1409 */ 1410 static void sge_intr_handler(struct adapter *adapter) 1411 { 1412 u64 v; 1413 1414 static const struct intr_info sge_intr_info[] = { 1415 { ERR_CPL_EXCEED_IQE_SIZE, 1416 "SGE received CPL exceeding IQE size", -1, 1 }, 1417 { ERR_INVALID_CIDX_INC, 1418 "SGE GTS CIDX increment too large", -1, 0 }, 1419 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, 1420 { DBFIFO_LP_INT, NULL, -1, 0, t4_db_full }, 1421 { DBFIFO_HP_INT, NULL, -1, 0, t4_db_full }, 1422 { ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped }, 1423 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0, 1424 "SGE IQID > 1023 received CPL for FL", -1, 0 }, 1425 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, 1426 0 }, 1427 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1, 1428 0 }, 1429 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1, 1430 0 }, 1431 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1, 1432 0 }, 1433 { ERR_ING_CTXT_PRIO, 1434 "SGE too many priority ingress contexts", -1, 0 }, 1435 { ERR_EGR_CTXT_PRIO, 1436 "SGE too many priority egress contexts", -1, 0 }, 1437 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 }, 1438 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 }, 1439 { 0 } 1440 }; 1441 1442 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) | 1443 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32); 1444 if (v) { 1445 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n", 1446 (unsigned long long)v); 1447 t4_write_reg(adapter, SGE_INT_CAUSE1, v); 1448 t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32); 1449 } 1450 1451 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) || 1452 v != 0) 1453 t4_fatal_err(adapter); 1454 } 1455 1456 /* 1457 * CIM interrupt handler. 1458 */ 1459 static void cim_intr_handler(struct adapter *adapter) 1460 { 1461 static const struct intr_info cim_intr_info[] = { 1462 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 }, 1463 { OBQPARERR, "CIM OBQ parity error", -1, 1 }, 1464 { IBQPARERR, "CIM IBQ parity error", -1, 1 }, 1465 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 }, 1466 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 }, 1467 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 }, 1468 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 }, 1469 { 0 } 1470 }; 1471 static const struct intr_info cim_upintr_info[] = { 1472 { RSVDSPACEINT, "CIM reserved space access", -1, 1 }, 1473 { ILLTRANSINT, "CIM illegal transaction", -1, 1 }, 1474 { ILLWRINT, "CIM illegal write", -1, 1 }, 1475 { ILLRDINT, "CIM illegal read", -1, 1 }, 1476 { ILLRDBEINT, "CIM illegal read BE", -1, 1 }, 1477 { ILLWRBEINT, "CIM illegal write BE", -1, 1 }, 1478 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 }, 1479 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 }, 1480 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 }, 1481 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 }, 1482 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 }, 1483 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 }, 1484 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 }, 1485 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 }, 1486 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 }, 1487 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 }, 1488 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 }, 1489 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 }, 1490 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 }, 1491 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 }, 1492 { SGLRDPLINT , "CIM single read from PL space", -1, 1 }, 1493 { SGLWRPLINT , "CIM single write to PL space", -1, 1 }, 1494 { BLKRDPLINT , "CIM block read from PL space", -1, 1 }, 1495 { BLKWRPLINT , "CIM block write to PL space", -1, 1 }, 1496 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 }, 1497 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 }, 1498 { TIMEOUTINT , "CIM PIF timeout", -1, 1 }, 1499 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 }, 1500 { 0 } 1501 }; 1502 1503 int fat; 1504 1505 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE, 1506 cim_intr_info) + 1507 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE, 1508 cim_upintr_info); 1509 if (fat) 1510 t4_fatal_err(adapter); 1511 } 1512 1513 /* 1514 * ULP RX interrupt handler. 1515 */ 1516 static void ulprx_intr_handler(struct adapter *adapter) 1517 { 1518 static const struct intr_info ulprx_intr_info[] = { 1519 { 0x1800000, "ULPRX context error", -1, 1 }, 1520 { 0x7fffff, "ULPRX parity error", -1, 1 }, 1521 { 0 } 1522 }; 1523 1524 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info)) 1525 t4_fatal_err(adapter); 1526 } 1527 1528 /* 1529 * ULP TX interrupt handler. 1530 */ 1531 static void ulptx_intr_handler(struct adapter *adapter) 1532 { 1533 static const struct intr_info ulptx_intr_info[] = { 1534 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1, 1535 0 }, 1536 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1, 1537 0 }, 1538 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1, 1539 0 }, 1540 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1, 1541 0 }, 1542 { 0xfffffff, "ULPTX parity error", -1, 1 }, 1543 { 0 } 1544 }; 1545 1546 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info)) 1547 t4_fatal_err(adapter); 1548 } 1549 1550 /* 1551 * PM TX interrupt handler. 1552 */ 1553 static void pmtx_intr_handler(struct adapter *adapter) 1554 { 1555 static const struct intr_info pmtx_intr_info[] = { 1556 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 }, 1557 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 }, 1558 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 }, 1559 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 }, 1560 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 }, 1561 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 }, 1562 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 }, 1563 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 }, 1564 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1}, 1565 { 0 } 1566 }; 1567 1568 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info)) 1569 t4_fatal_err(adapter); 1570 } 1571 1572 /* 1573 * PM RX interrupt handler. 1574 */ 1575 static void pmrx_intr_handler(struct adapter *adapter) 1576 { 1577 static const struct intr_info pmrx_intr_info[] = { 1578 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 }, 1579 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 }, 1580 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 }, 1581 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 }, 1582 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 }, 1583 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1}, 1584 { 0 } 1585 }; 1586 1587 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info)) 1588 t4_fatal_err(adapter); 1589 } 1590 1591 /* 1592 * CPL switch interrupt handler. 1593 */ 1594 static void cplsw_intr_handler(struct adapter *adapter) 1595 { 1596 static const struct intr_info cplsw_intr_info[] = { 1597 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 }, 1598 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 }, 1599 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 }, 1600 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 }, 1601 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 }, 1602 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 }, 1603 { 0 } 1604 }; 1605 1606 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info)) 1607 t4_fatal_err(adapter); 1608 } 1609 1610 /* 1611 * LE interrupt handler. 1612 */ 1613 static void le_intr_handler(struct adapter *adap) 1614 { 1615 static const struct intr_info le_intr_info[] = { 1616 { LIPMISS, "LE LIP miss", -1, 0 }, 1617 { LIP0, "LE 0 LIP error", -1, 0 }, 1618 { PARITYERR, "LE parity error", -1, 1 }, 1619 { UNKNOWNCMD, "LE unknown command", -1, 1 }, 1620 { REQQPARERR, "LE request queue parity error", -1, 1 }, 1621 { 0 } 1622 }; 1623 1624 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info)) 1625 t4_fatal_err(adap); 1626 } 1627 1628 /* 1629 * MPS interrupt handler. 1630 */ 1631 static void mps_intr_handler(struct adapter *adapter) 1632 { 1633 static const struct intr_info mps_rx_intr_info[] = { 1634 { 0xffffff, "MPS Rx parity error", -1, 1 }, 1635 { 0 } 1636 }; 1637 static const struct intr_info mps_tx_intr_info[] = { 1638 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 }, 1639 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 }, 1640 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 }, 1641 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 }, 1642 { BUBBLE, "MPS Tx underflow", -1, 1 }, 1643 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 }, 1644 { FRMERR, "MPS Tx framing error", -1, 1 }, 1645 { 0 } 1646 }; 1647 static const struct intr_info mps_trc_intr_info[] = { 1648 { FILTMEM, "MPS TRC filter parity error", -1, 1 }, 1649 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 }, 1650 { MISCPERR, "MPS TRC misc parity error", -1, 1 }, 1651 { 0 } 1652 }; 1653 static const struct intr_info mps_stat_sram_intr_info[] = { 1654 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, 1655 { 0 } 1656 }; 1657 static const struct intr_info mps_stat_tx_intr_info[] = { 1658 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, 1659 { 0 } 1660 }; 1661 static const struct intr_info mps_stat_rx_intr_info[] = { 1662 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, 1663 { 0 } 1664 }; 1665 static const struct intr_info mps_cls_intr_info[] = { 1666 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 }, 1667 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 }, 1668 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 }, 1669 { 0 } 1670 }; 1671 1672 int fat; 1673 1674 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE, 1675 mps_rx_intr_info) + 1676 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE, 1677 mps_tx_intr_info) + 1678 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE, 1679 mps_trc_intr_info) + 1680 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM, 1681 mps_stat_sram_intr_info) + 1682 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO, 1683 mps_stat_tx_intr_info) + 1684 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO, 1685 mps_stat_rx_intr_info) + 1686 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE, 1687 mps_cls_intr_info); 1688 1689 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT | 1690 RXINT | TXINT | STATINT); 1691 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */ 1692 if (fat) 1693 t4_fatal_err(adapter); 1694 } 1695 1696 #define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE) 1697 1698 /* 1699 * EDC/MC interrupt handler. 1700 */ 1701 static void mem_intr_handler(struct adapter *adapter, int idx) 1702 { 1703 static const char name[3][5] = { "EDC0", "EDC1", "MC" }; 1704 1705 unsigned int addr, cnt_addr, v; 1706 1707 if (idx <= MEM_EDC1) { 1708 addr = EDC_REG(EDC_INT_CAUSE, idx); 1709 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx); 1710 } else { 1711 addr = MC_INT_CAUSE; 1712 cnt_addr = MC_ECC_STATUS; 1713 } 1714 1715 v = t4_read_reg(adapter, addr) & MEM_INT_MASK; 1716 if (v & PERR_INT_CAUSE) 1717 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n", 1718 name[idx]); 1719 if (v & ECC_CE_INT_CAUSE) { 1720 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr)); 1721 1722 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK); 1723 if (printk_ratelimit()) 1724 dev_warn(adapter->pdev_dev, 1725 "%u %s correctable ECC data error%s\n", 1726 cnt, name[idx], cnt > 1 ? "s" : ""); 1727 } 1728 if (v & ECC_UE_INT_CAUSE) 1729 dev_alert(adapter->pdev_dev, 1730 "%s uncorrectable ECC data error\n", name[idx]); 1731 1732 t4_write_reg(adapter, addr, v); 1733 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE)) 1734 t4_fatal_err(adapter); 1735 } 1736 1737 /* 1738 * MA interrupt handler. 1739 */ 1740 static void ma_intr_handler(struct adapter *adap) 1741 { 1742 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE); 1743 1744 if (status & MEM_PERR_INT_CAUSE) 1745 dev_alert(adap->pdev_dev, 1746 "MA parity error, parity status %#x\n", 1747 t4_read_reg(adap, MA_PARITY_ERROR_STATUS)); 1748 if (status & MEM_WRAP_INT_CAUSE) { 1749 v = t4_read_reg(adap, MA_INT_WRAP_STATUS); 1750 dev_alert(adap->pdev_dev, "MA address wrap-around error by " 1751 "client %u to address %#x\n", 1752 MEM_WRAP_CLIENT_NUM_GET(v), 1753 MEM_WRAP_ADDRESS_GET(v) << 4); 1754 } 1755 t4_write_reg(adap, MA_INT_CAUSE, status); 1756 t4_fatal_err(adap); 1757 } 1758 1759 /* 1760 * SMB interrupt handler. 1761 */ 1762 static void smb_intr_handler(struct adapter *adap) 1763 { 1764 static const struct intr_info smb_intr_info[] = { 1765 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 }, 1766 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 }, 1767 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 }, 1768 { 0 } 1769 }; 1770 1771 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info)) 1772 t4_fatal_err(adap); 1773 } 1774 1775 /* 1776 * NC-SI interrupt handler. 1777 */ 1778 static void ncsi_intr_handler(struct adapter *adap) 1779 { 1780 static const struct intr_info ncsi_intr_info[] = { 1781 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 }, 1782 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 }, 1783 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 }, 1784 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 }, 1785 { 0 } 1786 }; 1787 1788 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info)) 1789 t4_fatal_err(adap); 1790 } 1791 1792 /* 1793 * XGMAC interrupt handler. 1794 */ 1795 static void xgmac_intr_handler(struct adapter *adap, int port) 1796 { 1797 u32 v, int_cause_reg; 1798 1799 if (is_t4(adap->params.chip)) 1800 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE); 1801 else 1802 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE); 1803 1804 v = t4_read_reg(adap, int_cause_reg); 1805 1806 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR; 1807 if (!v) 1808 return; 1809 1810 if (v & TXFIFO_PRTY_ERR) 1811 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n", 1812 port); 1813 if (v & RXFIFO_PRTY_ERR) 1814 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n", 1815 port); 1816 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v); 1817 t4_fatal_err(adap); 1818 } 1819 1820 /* 1821 * PL interrupt handler. 1822 */ 1823 static void pl_intr_handler(struct adapter *adap) 1824 { 1825 static const struct intr_info pl_intr_info[] = { 1826 { FATALPERR, "T4 fatal parity error", -1, 1 }, 1827 { PERRVFID, "PL VFID_MAP parity error", -1, 1 }, 1828 { 0 } 1829 }; 1830 1831 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info)) 1832 t4_fatal_err(adap); 1833 } 1834 1835 #define PF_INTR_MASK (PFSW) 1836 #define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \ 1837 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \ 1838 CPL_SWITCH | SGE | ULP_TX) 1839 1840 /** 1841 * t4_slow_intr_handler - control path interrupt handler 1842 * @adapter: the adapter 1843 * 1844 * T4 interrupt handler for non-data global interrupt events, e.g., errors. 1845 * The designation 'slow' is because it involves register reads, while 1846 * data interrupts typically don't involve any MMIOs. 1847 */ 1848 int t4_slow_intr_handler(struct adapter *adapter) 1849 { 1850 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE); 1851 1852 if (!(cause & GLBL_INTR_MASK)) 1853 return 0; 1854 if (cause & CIM) 1855 cim_intr_handler(adapter); 1856 if (cause & MPS) 1857 mps_intr_handler(adapter); 1858 if (cause & NCSI) 1859 ncsi_intr_handler(adapter); 1860 if (cause & PL) 1861 pl_intr_handler(adapter); 1862 if (cause & SMB) 1863 smb_intr_handler(adapter); 1864 if (cause & XGMAC0) 1865 xgmac_intr_handler(adapter, 0); 1866 if (cause & XGMAC1) 1867 xgmac_intr_handler(adapter, 1); 1868 if (cause & XGMAC_KR0) 1869 xgmac_intr_handler(adapter, 2); 1870 if (cause & XGMAC_KR1) 1871 xgmac_intr_handler(adapter, 3); 1872 if (cause & PCIE) 1873 pcie_intr_handler(adapter); 1874 if (cause & MC) 1875 mem_intr_handler(adapter, MEM_MC); 1876 if (cause & EDC0) 1877 mem_intr_handler(adapter, MEM_EDC0); 1878 if (cause & EDC1) 1879 mem_intr_handler(adapter, MEM_EDC1); 1880 if (cause & LE) 1881 le_intr_handler(adapter); 1882 if (cause & TP) 1883 tp_intr_handler(adapter); 1884 if (cause & MA) 1885 ma_intr_handler(adapter); 1886 if (cause & PM_TX) 1887 pmtx_intr_handler(adapter); 1888 if (cause & PM_RX) 1889 pmrx_intr_handler(adapter); 1890 if (cause & ULP_RX) 1891 ulprx_intr_handler(adapter); 1892 if (cause & CPL_SWITCH) 1893 cplsw_intr_handler(adapter); 1894 if (cause & SGE) 1895 sge_intr_handler(adapter); 1896 if (cause & ULP_TX) 1897 ulptx_intr_handler(adapter); 1898 1899 /* Clear the interrupts just processed for which we are the master. */ 1900 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK); 1901 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */ 1902 return 1; 1903 } 1904 1905 /** 1906 * t4_intr_enable - enable interrupts 1907 * @adapter: the adapter whose interrupts should be enabled 1908 * 1909 * Enable PF-specific interrupts for the calling function and the top-level 1910 * interrupt concentrator for global interrupts. Interrupts are already 1911 * enabled at each module, here we just enable the roots of the interrupt 1912 * hierarchies. 1913 * 1914 * Note: this function should be called only when the driver manages 1915 * non PF-specific interrupts from the various HW modules. Only one PCI 1916 * function at a time should be doing this. 1917 */ 1918 void t4_intr_enable(struct adapter *adapter) 1919 { 1920 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI)); 1921 1922 t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE | 1923 ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 | 1924 ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 | 1925 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 | 1926 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 | 1927 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO | 1928 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR | 1929 DBFIFO_HP_INT | DBFIFO_LP_INT | 1930 EGRESS_SIZE_ERR); 1931 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK); 1932 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf); 1933 } 1934 1935 /** 1936 * t4_intr_disable - disable interrupts 1937 * @adapter: the adapter whose interrupts should be disabled 1938 * 1939 * Disable interrupts. We only disable the top-level interrupt 1940 * concentrators. The caller must be a PCI function managing global 1941 * interrupts. 1942 */ 1943 void t4_intr_disable(struct adapter *adapter) 1944 { 1945 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI)); 1946 1947 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0); 1948 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0); 1949 } 1950 1951 /** 1952 * hash_mac_addr - return the hash value of a MAC address 1953 * @addr: the 48-bit Ethernet MAC address 1954 * 1955 * Hashes a MAC address according to the hash function used by HW inexact 1956 * (hash) address matching. 1957 */ 1958 static int hash_mac_addr(const u8 *addr) 1959 { 1960 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; 1961 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; 1962 a ^= b; 1963 a ^= (a >> 12); 1964 a ^= (a >> 6); 1965 return a & 0x3f; 1966 } 1967 1968 /** 1969 * t4_config_rss_range - configure a portion of the RSS mapping table 1970 * @adapter: the adapter 1971 * @mbox: mbox to use for the FW command 1972 * @viid: virtual interface whose RSS subtable is to be written 1973 * @start: start entry in the table to write 1974 * @n: how many table entries to write 1975 * @rspq: values for the response queue lookup table 1976 * @nrspq: number of values in @rspq 1977 * 1978 * Programs the selected part of the VI's RSS mapping table with the 1979 * provided values. If @nrspq < @n the supplied values are used repeatedly 1980 * until the full table range is populated. 1981 * 1982 * The caller must ensure the values in @rspq are in the range allowed for 1983 * @viid. 1984 */ 1985 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, 1986 int start, int n, const u16 *rspq, unsigned int nrspq) 1987 { 1988 int ret; 1989 const u16 *rsp = rspq; 1990 const u16 *rsp_end = rspq + nrspq; 1991 struct fw_rss_ind_tbl_cmd cmd; 1992 1993 memset(&cmd, 0, sizeof(cmd)); 1994 cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) | 1995 FW_CMD_REQUEST | FW_CMD_WRITE | 1996 FW_RSS_IND_TBL_CMD_VIID(viid)); 1997 cmd.retval_len16 = htonl(FW_LEN16(cmd)); 1998 1999 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */ 2000 while (n > 0) { 2001 int nq = min(n, 32); 2002 __be32 *qp = &cmd.iq0_to_iq2; 2003 2004 cmd.niqid = htons(nq); 2005 cmd.startidx = htons(start); 2006 2007 start += nq; 2008 n -= nq; 2009 2010 while (nq > 0) { 2011 unsigned int v; 2012 2013 v = FW_RSS_IND_TBL_CMD_IQ0(*rsp); 2014 if (++rsp >= rsp_end) 2015 rsp = rspq; 2016 v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp); 2017 if (++rsp >= rsp_end) 2018 rsp = rspq; 2019 v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp); 2020 if (++rsp >= rsp_end) 2021 rsp = rspq; 2022 2023 *qp++ = htonl(v); 2024 nq -= 3; 2025 } 2026 2027 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); 2028 if (ret) 2029 return ret; 2030 } 2031 return 0; 2032 } 2033 2034 /** 2035 * t4_config_glbl_rss - configure the global RSS mode 2036 * @adapter: the adapter 2037 * @mbox: mbox to use for the FW command 2038 * @mode: global RSS mode 2039 * @flags: mode-specific flags 2040 * 2041 * Sets the global RSS mode. 2042 */ 2043 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, 2044 unsigned int flags) 2045 { 2046 struct fw_rss_glb_config_cmd c; 2047 2048 memset(&c, 0, sizeof(c)); 2049 c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | 2050 FW_CMD_REQUEST | FW_CMD_WRITE); 2051 c.retval_len16 = htonl(FW_LEN16(c)); 2052 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) { 2053 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); 2054 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { 2055 c.u.basicvirtual.mode_pkd = 2056 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); 2057 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags); 2058 } else 2059 return -EINVAL; 2060 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); 2061 } 2062 2063 /** 2064 * t4_tp_get_tcp_stats - read TP's TCP MIB counters 2065 * @adap: the adapter 2066 * @v4: holds the TCP/IP counter values 2067 * @v6: holds the TCP/IPv6 counter values 2068 * 2069 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters. 2070 * Either @v4 or @v6 may be %NULL to skip the corresponding stats. 2071 */ 2072 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, 2073 struct tp_tcp_stats *v6) 2074 { 2075 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1]; 2076 2077 #define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST) 2078 #define STAT(x) val[STAT_IDX(x)] 2079 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO)) 2080 2081 if (v4) { 2082 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val, 2083 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST); 2084 v4->tcpOutRsts = STAT(OUT_RST); 2085 v4->tcpInSegs = STAT64(IN_SEG); 2086 v4->tcpOutSegs = STAT64(OUT_SEG); 2087 v4->tcpRetransSegs = STAT64(RXT_SEG); 2088 } 2089 if (v6) { 2090 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val, 2091 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST); 2092 v6->tcpOutRsts = STAT(OUT_RST); 2093 v6->tcpInSegs = STAT64(IN_SEG); 2094 v6->tcpOutSegs = STAT64(OUT_SEG); 2095 v6->tcpRetransSegs = STAT64(RXT_SEG); 2096 } 2097 #undef STAT64 2098 #undef STAT 2099 #undef STAT_IDX 2100 } 2101 2102 /** 2103 * t4_read_mtu_tbl - returns the values in the HW path MTU table 2104 * @adap: the adapter 2105 * @mtus: where to store the MTU values 2106 * @mtu_log: where to store the MTU base-2 log (may be %NULL) 2107 * 2108 * Reads the HW path MTU table. 2109 */ 2110 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log) 2111 { 2112 u32 v; 2113 int i; 2114 2115 for (i = 0; i < NMTUS; ++i) { 2116 t4_write_reg(adap, TP_MTU_TABLE, 2117 MTUINDEX(0xff) | MTUVALUE(i)); 2118 v = t4_read_reg(adap, TP_MTU_TABLE); 2119 mtus[i] = MTUVALUE_GET(v); 2120 if (mtu_log) 2121 mtu_log[i] = MTUWIDTH_GET(v); 2122 } 2123 } 2124 2125 /** 2126 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register 2127 * @adap: the adapter 2128 * @addr: the indirect TP register address 2129 * @mask: specifies the field within the register to modify 2130 * @val: new value for the field 2131 * 2132 * Sets a field of an indirect TP register to the given value. 2133 */ 2134 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, 2135 unsigned int mask, unsigned int val) 2136 { 2137 t4_write_reg(adap, TP_PIO_ADDR, addr); 2138 val |= t4_read_reg(adap, TP_PIO_DATA) & ~mask; 2139 t4_write_reg(adap, TP_PIO_DATA, val); 2140 } 2141 2142 /** 2143 * init_cong_ctrl - initialize congestion control parameters 2144 * @a: the alpha values for congestion control 2145 * @b: the beta values for congestion control 2146 * 2147 * Initialize the congestion control parameters. 2148 */ 2149 static void init_cong_ctrl(unsigned short *a, unsigned short *b) 2150 { 2151 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; 2152 a[9] = 2; 2153 a[10] = 3; 2154 a[11] = 4; 2155 a[12] = 5; 2156 a[13] = 6; 2157 a[14] = 7; 2158 a[15] = 8; 2159 a[16] = 9; 2160 a[17] = 10; 2161 a[18] = 14; 2162 a[19] = 17; 2163 a[20] = 21; 2164 a[21] = 25; 2165 a[22] = 30; 2166 a[23] = 35; 2167 a[24] = 45; 2168 a[25] = 60; 2169 a[26] = 80; 2170 a[27] = 100; 2171 a[28] = 200; 2172 a[29] = 300; 2173 a[30] = 400; 2174 a[31] = 500; 2175 2176 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0; 2177 b[9] = b[10] = 1; 2178 b[11] = b[12] = 2; 2179 b[13] = b[14] = b[15] = b[16] = 3; 2180 b[17] = b[18] = b[19] = b[20] = b[21] = 4; 2181 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5; 2182 b[28] = b[29] = 6; 2183 b[30] = b[31] = 7; 2184 } 2185 2186 /* The minimum additive increment value for the congestion control table */ 2187 #define CC_MIN_INCR 2U 2188 2189 /** 2190 * t4_load_mtus - write the MTU and congestion control HW tables 2191 * @adap: the adapter 2192 * @mtus: the values for the MTU table 2193 * @alpha: the values for the congestion control alpha parameter 2194 * @beta: the values for the congestion control beta parameter 2195 * 2196 * Write the HW MTU table with the supplied MTUs and the high-speed 2197 * congestion control table with the supplied alpha, beta, and MTUs. 2198 * We write the two tables together because the additive increments 2199 * depend on the MTUs. 2200 */ 2201 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, 2202 const unsigned short *alpha, const unsigned short *beta) 2203 { 2204 static const unsigned int avg_pkts[NCCTRL_WIN] = { 2205 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, 2206 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, 2207 28672, 40960, 57344, 81920, 114688, 163840, 229376 2208 }; 2209 2210 unsigned int i, w; 2211 2212 for (i = 0; i < NMTUS; ++i) { 2213 unsigned int mtu = mtus[i]; 2214 unsigned int log2 = fls(mtu); 2215 2216 if (!(mtu & ((1 << log2) >> 2))) /* round */ 2217 log2--; 2218 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) | 2219 MTUWIDTH(log2) | MTUVALUE(mtu)); 2220 2221 for (w = 0; w < NCCTRL_WIN; ++w) { 2222 unsigned int inc; 2223 2224 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], 2225 CC_MIN_INCR); 2226 2227 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) | 2228 (w << 16) | (beta[w] << 13) | inc); 2229 } 2230 } 2231 } 2232 2233 /** 2234 * get_mps_bg_map - return the buffer groups associated with a port 2235 * @adap: the adapter 2236 * @idx: the port index 2237 * 2238 * Returns a bitmap indicating which MPS buffer groups are associated 2239 * with the given port. Bit i is set if buffer group i is used by the 2240 * port. 2241 */ 2242 static unsigned int get_mps_bg_map(struct adapter *adap, int idx) 2243 { 2244 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL)); 2245 2246 if (n == 0) 2247 return idx == 0 ? 0xf : 0; 2248 if (n == 1) 2249 return idx < 2 ? (3 << (2 * idx)) : 0; 2250 return 1 << idx; 2251 } 2252 2253 /** 2254 * t4_get_port_type_description - return Port Type string description 2255 * @port_type: firmware Port Type enumeration 2256 */ 2257 const char *t4_get_port_type_description(enum fw_port_type port_type) 2258 { 2259 static const char *const port_type_description[] = { 2260 "R XFI", 2261 "R XAUI", 2262 "T SGMII", 2263 "T XFI", 2264 "T XAUI", 2265 "KX4", 2266 "CX4", 2267 "KX", 2268 "KR", 2269 "R SFP+", 2270 "KR/KX", 2271 "KR/KX/KX4", 2272 "R QSFP_10G", 2273 "", 2274 "R QSFP", 2275 "R BP40_BA", 2276 }; 2277 2278 if (port_type < ARRAY_SIZE(port_type_description)) 2279 return port_type_description[port_type]; 2280 return "UNKNOWN"; 2281 } 2282 2283 /** 2284 * t4_get_port_stats - collect port statistics 2285 * @adap: the adapter 2286 * @idx: the port index 2287 * @p: the stats structure to fill 2288 * 2289 * Collect statistics related to the given port from HW. 2290 */ 2291 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) 2292 { 2293 u32 bgmap = get_mps_bg_map(adap, idx); 2294 2295 #define GET_STAT(name) \ 2296 t4_read_reg64(adap, \ 2297 (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \ 2298 T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L))) 2299 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) 2300 2301 p->tx_octets = GET_STAT(TX_PORT_BYTES); 2302 p->tx_frames = GET_STAT(TX_PORT_FRAMES); 2303 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST); 2304 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST); 2305 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST); 2306 p->tx_error_frames = GET_STAT(TX_PORT_ERROR); 2307 p->tx_frames_64 = GET_STAT(TX_PORT_64B); 2308 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B); 2309 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B); 2310 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B); 2311 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B); 2312 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B); 2313 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX); 2314 p->tx_drop = GET_STAT(TX_PORT_DROP); 2315 p->tx_pause = GET_STAT(TX_PORT_PAUSE); 2316 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0); 2317 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1); 2318 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2); 2319 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3); 2320 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4); 2321 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5); 2322 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); 2323 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); 2324 2325 p->rx_octets = GET_STAT(RX_PORT_BYTES); 2326 p->rx_frames = GET_STAT(RX_PORT_FRAMES); 2327 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); 2328 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST); 2329 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST); 2330 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR); 2331 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR); 2332 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR); 2333 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR); 2334 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR); 2335 p->rx_runt = GET_STAT(RX_PORT_LESS_64B); 2336 p->rx_frames_64 = GET_STAT(RX_PORT_64B); 2337 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B); 2338 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B); 2339 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B); 2340 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B); 2341 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B); 2342 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX); 2343 p->rx_pause = GET_STAT(RX_PORT_PAUSE); 2344 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0); 2345 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1); 2346 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2); 2347 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3); 2348 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4); 2349 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5); 2350 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); 2351 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); 2352 2353 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; 2354 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; 2355 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; 2356 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0; 2357 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0; 2358 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0; 2359 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0; 2360 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0; 2361 2362 #undef GET_STAT 2363 #undef GET_STAT_COM 2364 } 2365 2366 /** 2367 * t4_wol_magic_enable - enable/disable magic packet WoL 2368 * @adap: the adapter 2369 * @port: the physical port index 2370 * @addr: MAC address expected in magic packets, %NULL to disable 2371 * 2372 * Enables/disables magic packet wake-on-LAN for the selected port. 2373 */ 2374 void t4_wol_magic_enable(struct adapter *adap, unsigned int port, 2375 const u8 *addr) 2376 { 2377 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg; 2378 2379 if (is_t4(adap->params.chip)) { 2380 mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO); 2381 mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI); 2382 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2); 2383 } else { 2384 mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO); 2385 mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI); 2386 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2); 2387 } 2388 2389 if (addr) { 2390 t4_write_reg(adap, mag_id_reg_l, 2391 (addr[2] << 24) | (addr[3] << 16) | 2392 (addr[4] << 8) | addr[5]); 2393 t4_write_reg(adap, mag_id_reg_h, 2394 (addr[0] << 8) | addr[1]); 2395 } 2396 t4_set_reg_field(adap, port_cfg_reg, MAGICEN, 2397 addr ? MAGICEN : 0); 2398 } 2399 2400 /** 2401 * t4_wol_pat_enable - enable/disable pattern-based WoL 2402 * @adap: the adapter 2403 * @port: the physical port index 2404 * @map: bitmap of which HW pattern filters to set 2405 * @mask0: byte mask for bytes 0-63 of a packet 2406 * @mask1: byte mask for bytes 64-127 of a packet 2407 * @crc: Ethernet CRC for selected bytes 2408 * @enable: enable/disable switch 2409 * 2410 * Sets the pattern filters indicated in @map to mask out the bytes 2411 * specified in @mask0/@mask1 in received packets and compare the CRC of 2412 * the resulting packet against @crc. If @enable is %true pattern-based 2413 * WoL is enabled, otherwise disabled. 2414 */ 2415 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, 2416 u64 mask0, u64 mask1, unsigned int crc, bool enable) 2417 { 2418 int i; 2419 u32 port_cfg_reg; 2420 2421 if (is_t4(adap->params.chip)) 2422 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2); 2423 else 2424 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2); 2425 2426 if (!enable) { 2427 t4_set_reg_field(adap, port_cfg_reg, PATEN, 0); 2428 return 0; 2429 } 2430 if (map > 0xff) 2431 return -EINVAL; 2432 2433 #define EPIO_REG(name) \ 2434 (is_t4(adap->params.chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \ 2435 T5_PORT_REG(port, MAC_PORT_EPIO_##name)) 2436 2437 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); 2438 t4_write_reg(adap, EPIO_REG(DATA2), mask1); 2439 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32); 2440 2441 for (i = 0; i < NWOL_PAT; i++, map >>= 1) { 2442 if (!(map & 1)) 2443 continue; 2444 2445 /* write byte masks */ 2446 t4_write_reg(adap, EPIO_REG(DATA0), mask0); 2447 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR); 2448 t4_read_reg(adap, EPIO_REG(OP)); /* flush */ 2449 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY) 2450 return -ETIMEDOUT; 2451 2452 /* write CRC */ 2453 t4_write_reg(adap, EPIO_REG(DATA0), crc); 2454 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR); 2455 t4_read_reg(adap, EPIO_REG(OP)); /* flush */ 2456 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY) 2457 return -ETIMEDOUT; 2458 } 2459 #undef EPIO_REG 2460 2461 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN); 2462 return 0; 2463 } 2464 2465 /* t4_mk_filtdelwr - create a delete filter WR 2466 * @ftid: the filter ID 2467 * @wr: the filter work request to populate 2468 * @qid: ingress queue to receive the delete notification 2469 * 2470 * Creates a filter work request to delete the supplied filter. If @qid is 2471 * negative the delete notification is suppressed. 2472 */ 2473 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid) 2474 { 2475 memset(wr, 0, sizeof(*wr)); 2476 wr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR)); 2477 wr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*wr) / 16)); 2478 wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) | 2479 V_FW_FILTER_WR_NOREPLY(qid < 0)); 2480 wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER); 2481 if (qid >= 0) 2482 wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid)); 2483 } 2484 2485 #define INIT_CMD(var, cmd, rd_wr) do { \ 2486 (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \ 2487 FW_CMD_REQUEST | FW_CMD_##rd_wr); \ 2488 (var).retval_len16 = htonl(FW_LEN16(var)); \ 2489 } while (0) 2490 2491 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, 2492 u32 addr, u32 val) 2493 { 2494 struct fw_ldst_cmd c; 2495 2496 memset(&c, 0, sizeof(c)); 2497 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | 2498 FW_CMD_WRITE | 2499 FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE)); 2500 c.cycles_to_len16 = htonl(FW_LEN16(c)); 2501 c.u.addrval.addr = htonl(addr); 2502 c.u.addrval.val = htonl(val); 2503 2504 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2505 } 2506 2507 /** 2508 * t4_mem_win_read_len - read memory through PCIE memory window 2509 * @adap: the adapter 2510 * @addr: address of first byte requested aligned on 32b. 2511 * @data: len bytes to hold the data read 2512 * @len: amount of data to read from window. Must be <= 2513 * MEMWIN0_APERATURE after adjusting for 16B for T4 and 2514 * 128B for T5 alignment requirements of the the memory window. 2515 * 2516 * Read len bytes of data from MC starting at @addr. 2517 */ 2518 int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len) 2519 { 2520 int i, off; 2521 u32 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn); 2522 2523 /* Align on a 2KB boundary. 2524 */ 2525 off = addr & MEMWIN0_APERTURE; 2526 if ((addr & 3) || (len + off) > MEMWIN0_APERTURE) 2527 return -EINVAL; 2528 2529 t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET, 2530 (addr & ~MEMWIN0_APERTURE) | win_pf); 2531 t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET); 2532 2533 for (i = 0; i < len; i += 4) 2534 *data++ = (__force __be32) t4_read_reg(adap, 2535 (MEMWIN0_BASE + off + i)); 2536 2537 return 0; 2538 } 2539 2540 /** 2541 * t4_mdio_rd - read a PHY register through MDIO 2542 * @adap: the adapter 2543 * @mbox: mailbox to use for the FW command 2544 * @phy_addr: the PHY address 2545 * @mmd: the PHY MMD to access (0 for clause 22 PHYs) 2546 * @reg: the register to read 2547 * @valp: where to store the value 2548 * 2549 * Issues a FW command through the given mailbox to read a PHY register. 2550 */ 2551 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 2552 unsigned int mmd, unsigned int reg, u16 *valp) 2553 { 2554 int ret; 2555 struct fw_ldst_cmd c; 2556 2557 memset(&c, 0, sizeof(c)); 2558 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | 2559 FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); 2560 c.cycles_to_len16 = htonl(FW_LEN16(c)); 2561 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) | 2562 FW_LDST_CMD_MMD(mmd)); 2563 c.u.mdio.raddr = htons(reg); 2564 2565 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 2566 if (ret == 0) 2567 *valp = ntohs(c.u.mdio.rval); 2568 return ret; 2569 } 2570 2571 /** 2572 * t4_mdio_wr - write a PHY register through MDIO 2573 * @adap: the adapter 2574 * @mbox: mailbox to use for the FW command 2575 * @phy_addr: the PHY address 2576 * @mmd: the PHY MMD to access (0 for clause 22 PHYs) 2577 * @reg: the register to write 2578 * @valp: value to write 2579 * 2580 * Issues a FW command through the given mailbox to write a PHY register. 2581 */ 2582 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 2583 unsigned int mmd, unsigned int reg, u16 val) 2584 { 2585 struct fw_ldst_cmd c; 2586 2587 memset(&c, 0, sizeof(c)); 2588 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | 2589 FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); 2590 c.cycles_to_len16 = htonl(FW_LEN16(c)); 2591 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) | 2592 FW_LDST_CMD_MMD(mmd)); 2593 c.u.mdio.raddr = htons(reg); 2594 c.u.mdio.rval = htons(val); 2595 2596 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2597 } 2598 2599 /** 2600 * t4_sge_decode_idma_state - decode the idma state 2601 * @adap: the adapter 2602 * @state: the state idma is stuck in 2603 */ 2604 void t4_sge_decode_idma_state(struct adapter *adapter, int state) 2605 { 2606 static const char * const t4_decode[] = { 2607 "IDMA_IDLE", 2608 "IDMA_PUSH_MORE_CPL_FIFO", 2609 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", 2610 "Not used", 2611 "IDMA_PHYSADDR_SEND_PCIEHDR", 2612 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", 2613 "IDMA_PHYSADDR_SEND_PAYLOAD", 2614 "IDMA_SEND_FIFO_TO_IMSG", 2615 "IDMA_FL_REQ_DATA_FL_PREP", 2616 "IDMA_FL_REQ_DATA_FL", 2617 "IDMA_FL_DROP", 2618 "IDMA_FL_H_REQ_HEADER_FL", 2619 "IDMA_FL_H_SEND_PCIEHDR", 2620 "IDMA_FL_H_PUSH_CPL_FIFO", 2621 "IDMA_FL_H_SEND_CPL", 2622 "IDMA_FL_H_SEND_IP_HDR_FIRST", 2623 "IDMA_FL_H_SEND_IP_HDR", 2624 "IDMA_FL_H_REQ_NEXT_HEADER_FL", 2625 "IDMA_FL_H_SEND_NEXT_PCIEHDR", 2626 "IDMA_FL_H_SEND_IP_HDR_PADDING", 2627 "IDMA_FL_D_SEND_PCIEHDR", 2628 "IDMA_FL_D_SEND_CPL_AND_IP_HDR", 2629 "IDMA_FL_D_REQ_NEXT_DATA_FL", 2630 "IDMA_FL_SEND_PCIEHDR", 2631 "IDMA_FL_PUSH_CPL_FIFO", 2632 "IDMA_FL_SEND_CPL", 2633 "IDMA_FL_SEND_PAYLOAD_FIRST", 2634 "IDMA_FL_SEND_PAYLOAD", 2635 "IDMA_FL_REQ_NEXT_DATA_FL", 2636 "IDMA_FL_SEND_NEXT_PCIEHDR", 2637 "IDMA_FL_SEND_PADDING", 2638 "IDMA_FL_SEND_COMPLETION_TO_IMSG", 2639 "IDMA_FL_SEND_FIFO_TO_IMSG", 2640 "IDMA_FL_REQ_DATAFL_DONE", 2641 "IDMA_FL_REQ_HEADERFL_DONE", 2642 }; 2643 static const char * const t5_decode[] = { 2644 "IDMA_IDLE", 2645 "IDMA_ALMOST_IDLE", 2646 "IDMA_PUSH_MORE_CPL_FIFO", 2647 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", 2648 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR", 2649 "IDMA_PHYSADDR_SEND_PCIEHDR", 2650 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", 2651 "IDMA_PHYSADDR_SEND_PAYLOAD", 2652 "IDMA_SEND_FIFO_TO_IMSG", 2653 "IDMA_FL_REQ_DATA_FL", 2654 "IDMA_FL_DROP", 2655 "IDMA_FL_DROP_SEND_INC", 2656 "IDMA_FL_H_REQ_HEADER_FL", 2657 "IDMA_FL_H_SEND_PCIEHDR", 2658 "IDMA_FL_H_PUSH_CPL_FIFO", 2659 "IDMA_FL_H_SEND_CPL", 2660 "IDMA_FL_H_SEND_IP_HDR_FIRST", 2661 "IDMA_FL_H_SEND_IP_HDR", 2662 "IDMA_FL_H_REQ_NEXT_HEADER_FL", 2663 "IDMA_FL_H_SEND_NEXT_PCIEHDR", 2664 "IDMA_FL_H_SEND_IP_HDR_PADDING", 2665 "IDMA_FL_D_SEND_PCIEHDR", 2666 "IDMA_FL_D_SEND_CPL_AND_IP_HDR", 2667 "IDMA_FL_D_REQ_NEXT_DATA_FL", 2668 "IDMA_FL_SEND_PCIEHDR", 2669 "IDMA_FL_PUSH_CPL_FIFO", 2670 "IDMA_FL_SEND_CPL", 2671 "IDMA_FL_SEND_PAYLOAD_FIRST", 2672 "IDMA_FL_SEND_PAYLOAD", 2673 "IDMA_FL_REQ_NEXT_DATA_FL", 2674 "IDMA_FL_SEND_NEXT_PCIEHDR", 2675 "IDMA_FL_SEND_PADDING", 2676 "IDMA_FL_SEND_COMPLETION_TO_IMSG", 2677 }; 2678 static const u32 sge_regs[] = { 2679 SGE_DEBUG_DATA_LOW_INDEX_2, 2680 SGE_DEBUG_DATA_LOW_INDEX_3, 2681 SGE_DEBUG_DATA_HIGH_INDEX_10, 2682 }; 2683 const char **sge_idma_decode; 2684 int sge_idma_decode_nstates; 2685 int i; 2686 2687 if (is_t4(adapter->params.chip)) { 2688 sge_idma_decode = (const char **)t4_decode; 2689 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode); 2690 } else { 2691 sge_idma_decode = (const char **)t5_decode; 2692 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode); 2693 } 2694 2695 if (state < sge_idma_decode_nstates) 2696 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]); 2697 else 2698 CH_WARN(adapter, "idma state %d unknown\n", state); 2699 2700 for (i = 0; i < ARRAY_SIZE(sge_regs); i++) 2701 CH_WARN(adapter, "SGE register %#x value %#x\n", 2702 sge_regs[i], t4_read_reg(adapter, sge_regs[i])); 2703 } 2704 2705 /** 2706 * t4_fw_hello - establish communication with FW 2707 * @adap: the adapter 2708 * @mbox: mailbox to use for the FW command 2709 * @evt_mbox: mailbox to receive async FW events 2710 * @master: specifies the caller's willingness to be the device master 2711 * @state: returns the current device state (if non-NULL) 2712 * 2713 * Issues a command to establish communication with FW. Returns either 2714 * an error (negative integer) or the mailbox of the Master PF. 2715 */ 2716 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, 2717 enum dev_master master, enum dev_state *state) 2718 { 2719 int ret; 2720 struct fw_hello_cmd c; 2721 u32 v; 2722 unsigned int master_mbox; 2723 int retries = FW_CMD_HELLO_RETRIES; 2724 2725 retry: 2726 memset(&c, 0, sizeof(c)); 2727 INIT_CMD(c, HELLO, WRITE); 2728 c.err_to_clearinit = htonl( 2729 FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | 2730 FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | 2731 FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 2732 FW_HELLO_CMD_MBMASTER_MASK) | 2733 FW_HELLO_CMD_MBASYNCNOT(evt_mbox) | 2734 FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) | 2735 FW_HELLO_CMD_CLEARINIT); 2736 2737 /* 2738 * Issue the HELLO command to the firmware. If it's not successful 2739 * but indicates that we got a "busy" or "timeout" condition, retry 2740 * the HELLO until we exhaust our retry limit. 2741 */ 2742 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 2743 if (ret < 0) { 2744 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) 2745 goto retry; 2746 return ret; 2747 } 2748 2749 v = ntohl(c.err_to_clearinit); 2750 master_mbox = FW_HELLO_CMD_MBMASTER_GET(v); 2751 if (state) { 2752 if (v & FW_HELLO_CMD_ERR) 2753 *state = DEV_STATE_ERR; 2754 else if (v & FW_HELLO_CMD_INIT) 2755 *state = DEV_STATE_INIT; 2756 else 2757 *state = DEV_STATE_UNINIT; 2758 } 2759 2760 /* 2761 * If we're not the Master PF then we need to wait around for the 2762 * Master PF Driver to finish setting up the adapter. 2763 * 2764 * Note that we also do this wait if we're a non-Master-capable PF and 2765 * there is no current Master PF; a Master PF may show up momentarily 2766 * and we wouldn't want to fail pointlessly. (This can happen when an 2767 * OS loads lots of different drivers rapidly at the same time). In 2768 * this case, the Master PF returned by the firmware will be 2769 * FW_PCIE_FW_MASTER_MASK so the test below will work ... 2770 */ 2771 if ((v & (FW_HELLO_CMD_ERR|FW_HELLO_CMD_INIT)) == 0 && 2772 master_mbox != mbox) { 2773 int waiting = FW_CMD_HELLO_TIMEOUT; 2774 2775 /* 2776 * Wait for the firmware to either indicate an error or 2777 * initialized state. If we see either of these we bail out 2778 * and report the issue to the caller. If we exhaust the 2779 * "hello timeout" and we haven't exhausted our retries, try 2780 * again. Otherwise bail with a timeout error. 2781 */ 2782 for (;;) { 2783 u32 pcie_fw; 2784 2785 msleep(50); 2786 waiting -= 50; 2787 2788 /* 2789 * If neither Error nor Initialialized are indicated 2790 * by the firmware keep waiting till we exaust our 2791 * timeout ... and then retry if we haven't exhausted 2792 * our retries ... 2793 */ 2794 pcie_fw = t4_read_reg(adap, MA_PCIE_FW); 2795 if (!(pcie_fw & (FW_PCIE_FW_ERR|FW_PCIE_FW_INIT))) { 2796 if (waiting <= 0) { 2797 if (retries-- > 0) 2798 goto retry; 2799 2800 return -ETIMEDOUT; 2801 } 2802 continue; 2803 } 2804 2805 /* 2806 * We either have an Error or Initialized condition 2807 * report errors preferentially. 2808 */ 2809 if (state) { 2810 if (pcie_fw & FW_PCIE_FW_ERR) 2811 *state = DEV_STATE_ERR; 2812 else if (pcie_fw & FW_PCIE_FW_INIT) 2813 *state = DEV_STATE_INIT; 2814 } 2815 2816 /* 2817 * If we arrived before a Master PF was selected and 2818 * there's not a valid Master PF, grab its identity 2819 * for our caller. 2820 */ 2821 if (master_mbox == FW_PCIE_FW_MASTER_MASK && 2822 (pcie_fw & FW_PCIE_FW_MASTER_VLD)) 2823 master_mbox = FW_PCIE_FW_MASTER_GET(pcie_fw); 2824 break; 2825 } 2826 } 2827 2828 return master_mbox; 2829 } 2830 2831 /** 2832 * t4_fw_bye - end communication with FW 2833 * @adap: the adapter 2834 * @mbox: mailbox to use for the FW command 2835 * 2836 * Issues a command to terminate communication with FW. 2837 */ 2838 int t4_fw_bye(struct adapter *adap, unsigned int mbox) 2839 { 2840 struct fw_bye_cmd c; 2841 2842 memset(&c, 0, sizeof(c)); 2843 INIT_CMD(c, BYE, WRITE); 2844 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2845 } 2846 2847 /** 2848 * t4_init_cmd - ask FW to initialize the device 2849 * @adap: the adapter 2850 * @mbox: mailbox to use for the FW command 2851 * 2852 * Issues a command to FW to partially initialize the device. This 2853 * performs initialization that generally doesn't depend on user input. 2854 */ 2855 int t4_early_init(struct adapter *adap, unsigned int mbox) 2856 { 2857 struct fw_initialize_cmd c; 2858 2859 memset(&c, 0, sizeof(c)); 2860 INIT_CMD(c, INITIALIZE, WRITE); 2861 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2862 } 2863 2864 /** 2865 * t4_fw_reset - issue a reset to FW 2866 * @adap: the adapter 2867 * @mbox: mailbox to use for the FW command 2868 * @reset: specifies the type of reset to perform 2869 * 2870 * Issues a reset command of the specified type to FW. 2871 */ 2872 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) 2873 { 2874 struct fw_reset_cmd c; 2875 2876 memset(&c, 0, sizeof(c)); 2877 INIT_CMD(c, RESET, WRITE); 2878 c.val = htonl(reset); 2879 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2880 } 2881 2882 /** 2883 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET 2884 * @adap: the adapter 2885 * @mbox: mailbox to use for the FW RESET command (if desired) 2886 * @force: force uP into RESET even if FW RESET command fails 2887 * 2888 * Issues a RESET command to firmware (if desired) with a HALT indication 2889 * and then puts the microprocessor into RESET state. The RESET command 2890 * will only be issued if a legitimate mailbox is provided (mbox <= 2891 * FW_PCIE_FW_MASTER_MASK). 2892 * 2893 * This is generally used in order for the host to safely manipulate the 2894 * adapter without fear of conflicting with whatever the firmware might 2895 * be doing. The only way out of this state is to RESTART the firmware 2896 * ... 2897 */ 2898 static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force) 2899 { 2900 int ret = 0; 2901 2902 /* 2903 * If a legitimate mailbox is provided, issue a RESET command 2904 * with a HALT indication. 2905 */ 2906 if (mbox <= FW_PCIE_FW_MASTER_MASK) { 2907 struct fw_reset_cmd c; 2908 2909 memset(&c, 0, sizeof(c)); 2910 INIT_CMD(c, RESET, WRITE); 2911 c.val = htonl(PIORST | PIORSTMODE); 2912 c.halt_pkd = htonl(FW_RESET_CMD_HALT(1U)); 2913 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2914 } 2915 2916 /* 2917 * Normally we won't complete the operation if the firmware RESET 2918 * command fails but if our caller insists we'll go ahead and put the 2919 * uP into RESET. This can be useful if the firmware is hung or even 2920 * missing ... We'll have to take the risk of putting the uP into 2921 * RESET without the cooperation of firmware in that case. 2922 * 2923 * We also force the firmware's HALT flag to be on in case we bypassed 2924 * the firmware RESET command above or we're dealing with old firmware 2925 * which doesn't have the HALT capability. This will serve as a flag 2926 * for the incoming firmware to know that it's coming out of a HALT 2927 * rather than a RESET ... if it's new enough to understand that ... 2928 */ 2929 if (ret == 0 || force) { 2930 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST); 2931 t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, 2932 FW_PCIE_FW_HALT); 2933 } 2934 2935 /* 2936 * And we always return the result of the firmware RESET command 2937 * even when we force the uP into RESET ... 2938 */ 2939 return ret; 2940 } 2941 2942 /** 2943 * t4_fw_restart - restart the firmware by taking the uP out of RESET 2944 * @adap: the adapter 2945 * @reset: if we want to do a RESET to restart things 2946 * 2947 * Restart firmware previously halted by t4_fw_halt(). On successful 2948 * return the previous PF Master remains as the new PF Master and there 2949 * is no need to issue a new HELLO command, etc. 2950 * 2951 * We do this in two ways: 2952 * 2953 * 1. If we're dealing with newer firmware we'll simply want to take 2954 * the chip's microprocessor out of RESET. This will cause the 2955 * firmware to start up from its start vector. And then we'll loop 2956 * until the firmware indicates it's started again (PCIE_FW.HALT 2957 * reset to 0) or we timeout. 2958 * 2959 * 2. If we're dealing with older firmware then we'll need to RESET 2960 * the chip since older firmware won't recognize the PCIE_FW.HALT 2961 * flag and automatically RESET itself on startup. 2962 */ 2963 static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset) 2964 { 2965 if (reset) { 2966 /* 2967 * Since we're directing the RESET instead of the firmware 2968 * doing it automatically, we need to clear the PCIE_FW.HALT 2969 * bit. 2970 */ 2971 t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, 0); 2972 2973 /* 2974 * If we've been given a valid mailbox, first try to get the 2975 * firmware to do the RESET. If that works, great and we can 2976 * return success. Otherwise, if we haven't been given a 2977 * valid mailbox or the RESET command failed, fall back to 2978 * hitting the chip with a hammer. 2979 */ 2980 if (mbox <= FW_PCIE_FW_MASTER_MASK) { 2981 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0); 2982 msleep(100); 2983 if (t4_fw_reset(adap, mbox, 2984 PIORST | PIORSTMODE) == 0) 2985 return 0; 2986 } 2987 2988 t4_write_reg(adap, PL_RST, PIORST | PIORSTMODE); 2989 msleep(2000); 2990 } else { 2991 int ms; 2992 2993 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0); 2994 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { 2995 if (!(t4_read_reg(adap, PCIE_FW) & FW_PCIE_FW_HALT)) 2996 return 0; 2997 msleep(100); 2998 ms += 100; 2999 } 3000 return -ETIMEDOUT; 3001 } 3002 return 0; 3003 } 3004 3005 /** 3006 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW 3007 * @adap: the adapter 3008 * @mbox: mailbox to use for the FW RESET command (if desired) 3009 * @fw_data: the firmware image to write 3010 * @size: image size 3011 * @force: force upgrade even if firmware doesn't cooperate 3012 * 3013 * Perform all of the steps necessary for upgrading an adapter's 3014 * firmware image. Normally this requires the cooperation of the 3015 * existing firmware in order to halt all existing activities 3016 * but if an invalid mailbox token is passed in we skip that step 3017 * (though we'll still put the adapter microprocessor into RESET in 3018 * that case). 3019 * 3020 * On successful return the new firmware will have been loaded and 3021 * the adapter will have been fully RESET losing all previous setup 3022 * state. On unsuccessful return the adapter may be completely hosed ... 3023 * positive errno indicates that the adapter is ~probably~ intact, a 3024 * negative errno indicates that things are looking bad ... 3025 */ 3026 static int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, 3027 const u8 *fw_data, unsigned int size, int force) 3028 { 3029 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; 3030 int reset, ret; 3031 3032 ret = t4_fw_halt(adap, mbox, force); 3033 if (ret < 0 && !force) 3034 return ret; 3035 3036 ret = t4_load_fw(adap, fw_data, size); 3037 if (ret < 0) 3038 return ret; 3039 3040 /* 3041 * Older versions of the firmware don't understand the new 3042 * PCIE_FW.HALT flag and so won't know to perform a RESET when they 3043 * restart. So for newly loaded older firmware we'll have to do the 3044 * RESET for it so it starts up on a clean slate. We can tell if 3045 * the newly loaded firmware will handle this right by checking 3046 * its header flags to see if it advertises the capability. 3047 */ 3048 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0); 3049 return t4_fw_restart(adap, mbox, reset); 3050 } 3051 3052 /** 3053 * t4_fixup_host_params - fix up host-dependent parameters 3054 * @adap: the adapter 3055 * @page_size: the host's Base Page Size 3056 * @cache_line_size: the host's Cache Line Size 3057 * 3058 * Various registers in T4 contain values which are dependent on the 3059 * host's Base Page and Cache Line Sizes. This function will fix all of 3060 * those registers with the appropriate values as passed in ... 3061 */ 3062 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, 3063 unsigned int cache_line_size) 3064 { 3065 unsigned int page_shift = fls(page_size) - 1; 3066 unsigned int sge_hps = page_shift - 10; 3067 unsigned int stat_len = cache_line_size > 64 ? 128 : 64; 3068 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size; 3069 unsigned int fl_align_log = fls(fl_align) - 1; 3070 3071 t4_write_reg(adap, SGE_HOST_PAGE_SIZE, 3072 HOSTPAGESIZEPF0(sge_hps) | 3073 HOSTPAGESIZEPF1(sge_hps) | 3074 HOSTPAGESIZEPF2(sge_hps) | 3075 HOSTPAGESIZEPF3(sge_hps) | 3076 HOSTPAGESIZEPF4(sge_hps) | 3077 HOSTPAGESIZEPF5(sge_hps) | 3078 HOSTPAGESIZEPF6(sge_hps) | 3079 HOSTPAGESIZEPF7(sge_hps)); 3080 3081 t4_set_reg_field(adap, SGE_CONTROL, 3082 INGPADBOUNDARY_MASK | 3083 EGRSTATUSPAGESIZE_MASK, 3084 INGPADBOUNDARY(fl_align_log - 5) | 3085 EGRSTATUSPAGESIZE(stat_len != 64)); 3086 3087 /* 3088 * Adjust various SGE Free List Host Buffer Sizes. 3089 * 3090 * This is something of a crock since we're using fixed indices into 3091 * the array which are also known by the sge.c code and the T4 3092 * Firmware Configuration File. We need to come up with a much better 3093 * approach to managing this array. For now, the first four entries 3094 * are: 3095 * 3096 * 0: Host Page Size 3097 * 1: 64KB 3098 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode) 3099 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode) 3100 * 3101 * For the single-MTU buffers in unpacked mode we need to include 3102 * space for the SGE Control Packet Shift, 14 byte Ethernet header, 3103 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet 3104 * Padding boundry. All of these are accommodated in the Factory 3105 * Default Firmware Configuration File but we need to adjust it for 3106 * this host's cache line size. 3107 */ 3108 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, page_size); 3109 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2, 3110 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2) + fl_align-1) 3111 & ~(fl_align-1)); 3112 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3, 3113 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3) + fl_align-1) 3114 & ~(fl_align-1)); 3115 3116 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12)); 3117 3118 return 0; 3119 } 3120 3121 /** 3122 * t4_fw_initialize - ask FW to initialize the device 3123 * @adap: the adapter 3124 * @mbox: mailbox to use for the FW command 3125 * 3126 * Issues a command to FW to partially initialize the device. This 3127 * performs initialization that generally doesn't depend on user input. 3128 */ 3129 int t4_fw_initialize(struct adapter *adap, unsigned int mbox) 3130 { 3131 struct fw_initialize_cmd c; 3132 3133 memset(&c, 0, sizeof(c)); 3134 INIT_CMD(c, INITIALIZE, WRITE); 3135 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3136 } 3137 3138 /** 3139 * t4_query_params - query FW or device parameters 3140 * @adap: the adapter 3141 * @mbox: mailbox to use for the FW command 3142 * @pf: the PF 3143 * @vf: the VF 3144 * @nparams: the number of parameters 3145 * @params: the parameter names 3146 * @val: the parameter values 3147 * 3148 * Reads the value of FW or device parameters. Up to 7 parameters can be 3149 * queried at once. 3150 */ 3151 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 3152 unsigned int vf, unsigned int nparams, const u32 *params, 3153 u32 *val) 3154 { 3155 int i, ret; 3156 struct fw_params_cmd c; 3157 __be32 *p = &c.param[0].mnem; 3158 3159 if (nparams > 7) 3160 return -EINVAL; 3161 3162 memset(&c, 0, sizeof(c)); 3163 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST | 3164 FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) | 3165 FW_PARAMS_CMD_VFN(vf)); 3166 c.retval_len16 = htonl(FW_LEN16(c)); 3167 for (i = 0; i < nparams; i++, p += 2) 3168 *p = htonl(*params++); 3169 3170 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 3171 if (ret == 0) 3172 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2) 3173 *val++ = ntohl(*p); 3174 return ret; 3175 } 3176 3177 /** 3178 * t4_set_params - sets FW or device parameters 3179 * @adap: the adapter 3180 * @mbox: mailbox to use for the FW command 3181 * @pf: the PF 3182 * @vf: the VF 3183 * @nparams: the number of parameters 3184 * @params: the parameter names 3185 * @val: the parameter values 3186 * 3187 * Sets the value of FW or device parameters. Up to 7 parameters can be 3188 * specified at once. 3189 */ 3190 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 3191 unsigned int vf, unsigned int nparams, const u32 *params, 3192 const u32 *val) 3193 { 3194 struct fw_params_cmd c; 3195 __be32 *p = &c.param[0].mnem; 3196 3197 if (nparams > 7) 3198 return -EINVAL; 3199 3200 memset(&c, 0, sizeof(c)); 3201 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST | 3202 FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) | 3203 FW_PARAMS_CMD_VFN(vf)); 3204 c.retval_len16 = htonl(FW_LEN16(c)); 3205 while (nparams--) { 3206 *p++ = htonl(*params++); 3207 *p++ = htonl(*val++); 3208 } 3209 3210 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3211 } 3212 3213 /** 3214 * t4_cfg_pfvf - configure PF/VF resource limits 3215 * @adap: the adapter 3216 * @mbox: mailbox to use for the FW command 3217 * @pf: the PF being configured 3218 * @vf: the VF being configured 3219 * @txq: the max number of egress queues 3220 * @txq_eth_ctrl: the max number of egress Ethernet or control queues 3221 * @rxqi: the max number of interrupt-capable ingress queues 3222 * @rxq: the max number of interruptless ingress queues 3223 * @tc: the PCI traffic class 3224 * @vi: the max number of virtual interfaces 3225 * @cmask: the channel access rights mask for the PF/VF 3226 * @pmask: the port access rights mask for the PF/VF 3227 * @nexact: the maximum number of exact MPS filters 3228 * @rcaps: read capabilities 3229 * @wxcaps: write/execute capabilities 3230 * 3231 * Configures resource limits and capabilities for a physical or virtual 3232 * function. 3233 */ 3234 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, 3235 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, 3236 unsigned int rxqi, unsigned int rxq, unsigned int tc, 3237 unsigned int vi, unsigned int cmask, unsigned int pmask, 3238 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps) 3239 { 3240 struct fw_pfvf_cmd c; 3241 3242 memset(&c, 0, sizeof(c)); 3243 c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST | 3244 FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) | 3245 FW_PFVF_CMD_VFN(vf)); 3246 c.retval_len16 = htonl(FW_LEN16(c)); 3247 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) | 3248 FW_PFVF_CMD_NIQ(rxq)); 3249 c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) | 3250 FW_PFVF_CMD_PMASK(pmask) | 3251 FW_PFVF_CMD_NEQ(txq)); 3252 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) | 3253 FW_PFVF_CMD_NEXACTF(nexact)); 3254 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) | 3255 FW_PFVF_CMD_WX_CAPS(wxcaps) | 3256 FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl)); 3257 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3258 } 3259 3260 /** 3261 * t4_alloc_vi - allocate a virtual interface 3262 * @adap: the adapter 3263 * @mbox: mailbox to use for the FW command 3264 * @port: physical port associated with the VI 3265 * @pf: the PF owning the VI 3266 * @vf: the VF owning the VI 3267 * @nmac: number of MAC addresses needed (1 to 5) 3268 * @mac: the MAC addresses of the VI 3269 * @rss_size: size of RSS table slice associated with this VI 3270 * 3271 * Allocates a virtual interface for the given physical port. If @mac is 3272 * not %NULL it contains the MAC addresses of the VI as assigned by FW. 3273 * @mac should be large enough to hold @nmac Ethernet addresses, they are 3274 * stored consecutively so the space needed is @nmac * 6 bytes. 3275 * Returns a negative error number or the non-negative VI id. 3276 */ 3277 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, 3278 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, 3279 unsigned int *rss_size) 3280 { 3281 int ret; 3282 struct fw_vi_cmd c; 3283 3284 memset(&c, 0, sizeof(c)); 3285 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST | 3286 FW_CMD_WRITE | FW_CMD_EXEC | 3287 FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf)); 3288 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c)); 3289 c.portid_pkd = FW_VI_CMD_PORTID(port); 3290 c.nmac = nmac - 1; 3291 3292 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 3293 if (ret) 3294 return ret; 3295 3296 if (mac) { 3297 memcpy(mac, c.mac, sizeof(c.mac)); 3298 switch (nmac) { 3299 case 5: 3300 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); 3301 case 4: 3302 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); 3303 case 3: 3304 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); 3305 case 2: 3306 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); 3307 } 3308 } 3309 if (rss_size) 3310 *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd)); 3311 return FW_VI_CMD_VIID_GET(ntohs(c.type_viid)); 3312 } 3313 3314 /** 3315 * t4_set_rxmode - set Rx properties of a virtual interface 3316 * @adap: the adapter 3317 * @mbox: mailbox to use for the FW command 3318 * @viid: the VI id 3319 * @mtu: the new MTU or -1 3320 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change 3321 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change 3322 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change 3323 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change 3324 * @sleep_ok: if true we may sleep while awaiting command completion 3325 * 3326 * Sets Rx properties of a virtual interface. 3327 */ 3328 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, 3329 int mtu, int promisc, int all_multi, int bcast, int vlanex, 3330 bool sleep_ok) 3331 { 3332 struct fw_vi_rxmode_cmd c; 3333 3334 /* convert to FW values */ 3335 if (mtu < 0) 3336 mtu = FW_RXMODE_MTU_NO_CHG; 3337 if (promisc < 0) 3338 promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK; 3339 if (all_multi < 0) 3340 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK; 3341 if (bcast < 0) 3342 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK; 3343 if (vlanex < 0) 3344 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK; 3345 3346 memset(&c, 0, sizeof(c)); 3347 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST | 3348 FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid)); 3349 c.retval_len16 = htonl(FW_LEN16(c)); 3350 c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) | 3351 FW_VI_RXMODE_CMD_PROMISCEN(promisc) | 3352 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | 3353 FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | 3354 FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); 3355 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 3356 } 3357 3358 /** 3359 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses 3360 * @adap: the adapter 3361 * @mbox: mailbox to use for the FW command 3362 * @viid: the VI id 3363 * @free: if true any existing filters for this VI id are first removed 3364 * @naddr: the number of MAC addresses to allocate filters for (up to 7) 3365 * @addr: the MAC address(es) 3366 * @idx: where to store the index of each allocated filter 3367 * @hash: pointer to hash address filter bitmap 3368 * @sleep_ok: call is allowed to sleep 3369 * 3370 * Allocates an exact-match filter for each of the supplied addresses and 3371 * sets it to the corresponding address. If @idx is not %NULL it should 3372 * have at least @naddr entries, each of which will be set to the index of 3373 * the filter allocated for the corresponding MAC address. If a filter 3374 * could not be allocated for an address its index is set to 0xffff. 3375 * If @hash is not %NULL addresses that fail to allocate an exact filter 3376 * are hashed and update the hash filter bitmap pointed at by @hash. 3377 * 3378 * Returns a negative error number or the number of filters allocated. 3379 */ 3380 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, 3381 unsigned int viid, bool free, unsigned int naddr, 3382 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok) 3383 { 3384 int i, ret; 3385 struct fw_vi_mac_cmd c; 3386 struct fw_vi_mac_exact *p; 3387 unsigned int max_naddr = is_t4(adap->params.chip) ? 3388 NUM_MPS_CLS_SRAM_L_INSTANCES : 3389 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 3390 3391 if (naddr > 7) 3392 return -EINVAL; 3393 3394 memset(&c, 0, sizeof(c)); 3395 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | 3396 FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) | 3397 FW_VI_MAC_CMD_VIID(viid)); 3398 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) | 3399 FW_CMD_LEN16((naddr + 2) / 2)); 3400 3401 for (i = 0, p = c.u.exact; i < naddr; i++, p++) { 3402 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID | 3403 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); 3404 memcpy(p->macaddr, addr[i], sizeof(p->macaddr)); 3405 } 3406 3407 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); 3408 if (ret) 3409 return ret; 3410 3411 for (i = 0, p = c.u.exact; i < naddr; i++, p++) { 3412 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); 3413 3414 if (idx) 3415 idx[i] = index >= max_naddr ? 0xffff : index; 3416 if (index < max_naddr) 3417 ret++; 3418 else if (hash) 3419 *hash |= (1ULL << hash_mac_addr(addr[i])); 3420 } 3421 return ret; 3422 } 3423 3424 /** 3425 * t4_change_mac - modifies the exact-match filter for a MAC address 3426 * @adap: the adapter 3427 * @mbox: mailbox to use for the FW command 3428 * @viid: the VI id 3429 * @idx: index of existing filter for old value of MAC address, or -1 3430 * @addr: the new MAC address value 3431 * @persist: whether a new MAC allocation should be persistent 3432 * @add_smt: if true also add the address to the HW SMT 3433 * 3434 * Modifies an exact-match filter and sets it to the new MAC address. 3435 * Note that in general it is not possible to modify the value of a given 3436 * filter so the generic way to modify an address filter is to free the one 3437 * being used by the old address value and allocate a new filter for the 3438 * new address value. @idx can be -1 if the address is a new addition. 3439 * 3440 * Returns a negative error number or the index of the filter with the new 3441 * MAC value. 3442 */ 3443 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, 3444 int idx, const u8 *addr, bool persist, bool add_smt) 3445 { 3446 int ret, mode; 3447 struct fw_vi_mac_cmd c; 3448 struct fw_vi_mac_exact *p = c.u.exact; 3449 unsigned int max_mac_addr = is_t4(adap->params.chip) ? 3450 NUM_MPS_CLS_SRAM_L_INSTANCES : 3451 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 3452 3453 if (idx < 0) /* new allocation */ 3454 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; 3455 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; 3456 3457 memset(&c, 0, sizeof(c)); 3458 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | 3459 FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid)); 3460 c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1)); 3461 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID | 3462 FW_VI_MAC_CMD_SMAC_RESULT(mode) | 3463 FW_VI_MAC_CMD_IDX(idx)); 3464 memcpy(p->macaddr, addr, sizeof(p->macaddr)); 3465 3466 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 3467 if (ret == 0) { 3468 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); 3469 if (ret >= max_mac_addr) 3470 ret = -ENOMEM; 3471 } 3472 return ret; 3473 } 3474 3475 /** 3476 * t4_set_addr_hash - program the MAC inexact-match hash filter 3477 * @adap: the adapter 3478 * @mbox: mailbox to use for the FW command 3479 * @viid: the VI id 3480 * @ucast: whether the hash filter should also match unicast addresses 3481 * @vec: the value to be written to the hash filter 3482 * @sleep_ok: call is allowed to sleep 3483 * 3484 * Sets the 64-bit inexact-match hash filter for a virtual interface. 3485 */ 3486 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, 3487 bool ucast, u64 vec, bool sleep_ok) 3488 { 3489 struct fw_vi_mac_cmd c; 3490 3491 memset(&c, 0, sizeof(c)); 3492 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | 3493 FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid)); 3494 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN | 3495 FW_VI_MAC_CMD_HASHUNIEN(ucast) | 3496 FW_CMD_LEN16(1)); 3497 c.u.hash.hashvec = cpu_to_be64(vec); 3498 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 3499 } 3500 3501 /** 3502 * t4_enable_vi - enable/disable a virtual interface 3503 * @adap: the adapter 3504 * @mbox: mailbox to use for the FW command 3505 * @viid: the VI id 3506 * @rx_en: 1=enable Rx, 0=disable Rx 3507 * @tx_en: 1=enable Tx, 0=disable Tx 3508 * 3509 * Enables/disables a virtual interface. 3510 */ 3511 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, 3512 bool rx_en, bool tx_en) 3513 { 3514 struct fw_vi_enable_cmd c; 3515 3516 memset(&c, 0, sizeof(c)); 3517 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | 3518 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); 3519 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) | 3520 FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c)); 3521 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3522 } 3523 3524 /** 3525 * t4_identify_port - identify a VI's port by blinking its LED 3526 * @adap: the adapter 3527 * @mbox: mailbox to use for the FW command 3528 * @viid: the VI id 3529 * @nblinks: how many times to blink LED at 2.5 Hz 3530 * 3531 * Identifies a VI's port by blinking its LED. 3532 */ 3533 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, 3534 unsigned int nblinks) 3535 { 3536 struct fw_vi_enable_cmd c; 3537 3538 memset(&c, 0, sizeof(c)); 3539 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | 3540 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); 3541 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); 3542 c.blinkdur = htons(nblinks); 3543 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3544 } 3545 3546 /** 3547 * t4_iq_free - free an ingress queue and its FLs 3548 * @adap: the adapter 3549 * @mbox: mailbox to use for the FW command 3550 * @pf: the PF owning the queues 3551 * @vf: the VF owning the queues 3552 * @iqtype: the ingress queue type 3553 * @iqid: ingress queue id 3554 * @fl0id: FL0 queue id or 0xffff if no attached FL0 3555 * @fl1id: FL1 queue id or 0xffff if no attached FL1 3556 * 3557 * Frees an ingress queue and its associated FLs, if any. 3558 */ 3559 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 3560 unsigned int vf, unsigned int iqtype, unsigned int iqid, 3561 unsigned int fl0id, unsigned int fl1id) 3562 { 3563 struct fw_iq_cmd c; 3564 3565 memset(&c, 0, sizeof(c)); 3566 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | 3567 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) | 3568 FW_IQ_CMD_VFN(vf)); 3569 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c)); 3570 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype)); 3571 c.iqid = htons(iqid); 3572 c.fl0id = htons(fl0id); 3573 c.fl1id = htons(fl1id); 3574 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3575 } 3576 3577 /** 3578 * t4_eth_eq_free - free an Ethernet egress queue 3579 * @adap: the adapter 3580 * @mbox: mailbox to use for the FW command 3581 * @pf: the PF owning the queue 3582 * @vf: the VF owning the queue 3583 * @eqid: egress queue id 3584 * 3585 * Frees an Ethernet egress queue. 3586 */ 3587 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 3588 unsigned int vf, unsigned int eqid) 3589 { 3590 struct fw_eq_eth_cmd c; 3591 3592 memset(&c, 0, sizeof(c)); 3593 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST | 3594 FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) | 3595 FW_EQ_ETH_CMD_VFN(vf)); 3596 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); 3597 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid)); 3598 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3599 } 3600 3601 /** 3602 * t4_ctrl_eq_free - free a control egress queue 3603 * @adap: the adapter 3604 * @mbox: mailbox to use for the FW command 3605 * @pf: the PF owning the queue 3606 * @vf: the VF owning the queue 3607 * @eqid: egress queue id 3608 * 3609 * Frees a control egress queue. 3610 */ 3611 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 3612 unsigned int vf, unsigned int eqid) 3613 { 3614 struct fw_eq_ctrl_cmd c; 3615 3616 memset(&c, 0, sizeof(c)); 3617 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST | 3618 FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) | 3619 FW_EQ_CTRL_CMD_VFN(vf)); 3620 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c)); 3621 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid)); 3622 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3623 } 3624 3625 /** 3626 * t4_ofld_eq_free - free an offload egress queue 3627 * @adap: the adapter 3628 * @mbox: mailbox to use for the FW command 3629 * @pf: the PF owning the queue 3630 * @vf: the VF owning the queue 3631 * @eqid: egress queue id 3632 * 3633 * Frees a control egress queue. 3634 */ 3635 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 3636 unsigned int vf, unsigned int eqid) 3637 { 3638 struct fw_eq_ofld_cmd c; 3639 3640 memset(&c, 0, sizeof(c)); 3641 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST | 3642 FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) | 3643 FW_EQ_OFLD_CMD_VFN(vf)); 3644 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c)); 3645 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid)); 3646 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3647 } 3648 3649 /** 3650 * t4_handle_fw_rpl - process a FW reply message 3651 * @adap: the adapter 3652 * @rpl: start of the FW message 3653 * 3654 * Processes a FW message, such as link state change messages. 3655 */ 3656 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) 3657 { 3658 u8 opcode = *(const u8 *)rpl; 3659 3660 if (opcode == FW_PORT_CMD) { /* link/module state change message */ 3661 int speed = 0, fc = 0; 3662 const struct fw_port_cmd *p = (void *)rpl; 3663 int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid)); 3664 int port = adap->chan_map[chan]; 3665 struct port_info *pi = adap2pinfo(adap, port); 3666 struct link_config *lc = &pi->link_cfg; 3667 u32 stat = ntohl(p->u.info.lstatus_to_modtype); 3668 int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0; 3669 u32 mod = FW_PORT_CMD_MODTYPE_GET(stat); 3670 3671 if (stat & FW_PORT_CMD_RXPAUSE) 3672 fc |= PAUSE_RX; 3673 if (stat & FW_PORT_CMD_TXPAUSE) 3674 fc |= PAUSE_TX; 3675 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) 3676 speed = 100; 3677 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) 3678 speed = 1000; 3679 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) 3680 speed = 10000; 3681 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G)) 3682 speed = 40000; 3683 3684 if (link_ok != lc->link_ok || speed != lc->speed || 3685 fc != lc->fc) { /* something changed */ 3686 lc->link_ok = link_ok; 3687 lc->speed = speed; 3688 lc->fc = fc; 3689 t4_os_link_changed(adap, port, link_ok); 3690 } 3691 if (mod != pi->mod_type) { 3692 pi->mod_type = mod; 3693 t4_os_portmod_changed(adap, port); 3694 } 3695 } 3696 return 0; 3697 } 3698 3699 static void get_pci_mode(struct adapter *adapter, struct pci_params *p) 3700 { 3701 u16 val; 3702 3703 if (pci_is_pcie(adapter->pdev)) { 3704 pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val); 3705 p->speed = val & PCI_EXP_LNKSTA_CLS; 3706 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4; 3707 } 3708 } 3709 3710 /** 3711 * init_link_config - initialize a link's SW state 3712 * @lc: structure holding the link state 3713 * @caps: link capabilities 3714 * 3715 * Initializes the SW state maintained for each link, including the link's 3716 * capabilities and default speed/flow-control/autonegotiation settings. 3717 */ 3718 static void init_link_config(struct link_config *lc, unsigned int caps) 3719 { 3720 lc->supported = caps; 3721 lc->requested_speed = 0; 3722 lc->speed = 0; 3723 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; 3724 if (lc->supported & FW_PORT_CAP_ANEG) { 3725 lc->advertising = lc->supported & ADVERT_MASK; 3726 lc->autoneg = AUTONEG_ENABLE; 3727 lc->requested_fc |= PAUSE_AUTONEG; 3728 } else { 3729 lc->advertising = 0; 3730 lc->autoneg = AUTONEG_DISABLE; 3731 } 3732 } 3733 3734 int t4_wait_dev_ready(struct adapter *adap) 3735 { 3736 if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff) 3737 return 0; 3738 msleep(500); 3739 return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO; 3740 } 3741 3742 static int get_flash_params(struct adapter *adap) 3743 { 3744 int ret; 3745 u32 info; 3746 3747 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID); 3748 if (!ret) 3749 ret = sf1_read(adap, 3, 0, 1, &info); 3750 t4_write_reg(adap, SF_OP, 0); /* unlock SF */ 3751 if (ret) 3752 return ret; 3753 3754 if ((info & 0xff) != 0x20) /* not a Numonix flash */ 3755 return -EINVAL; 3756 info >>= 16; /* log2 of size */ 3757 if (info >= 0x14 && info < 0x18) 3758 adap->params.sf_nsec = 1 << (info - 16); 3759 else if (info == 0x18) 3760 adap->params.sf_nsec = 64; 3761 else 3762 return -EINVAL; 3763 adap->params.sf_size = 1 << info; 3764 adap->params.sf_fw_start = 3765 t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK; 3766 return 0; 3767 } 3768 3769 /** 3770 * t4_prep_adapter - prepare SW and HW for operation 3771 * @adapter: the adapter 3772 * @reset: if true perform a HW reset 3773 * 3774 * Initialize adapter SW state for the various HW modules, set initial 3775 * values for some adapter tunables, take PHYs out of reset, and 3776 * initialize the MDIO interface. 3777 */ 3778 int t4_prep_adapter(struct adapter *adapter) 3779 { 3780 int ret, ver; 3781 uint16_t device_id; 3782 u32 pl_rev; 3783 3784 ret = t4_wait_dev_ready(adapter); 3785 if (ret < 0) 3786 return ret; 3787 3788 get_pci_mode(adapter, &adapter->params.pci); 3789 pl_rev = G_REV(t4_read_reg(adapter, PL_REV)); 3790 3791 ret = get_flash_params(adapter); 3792 if (ret < 0) { 3793 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret); 3794 return ret; 3795 } 3796 3797 /* Retrieve adapter's device ID 3798 */ 3799 pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id); 3800 ver = device_id >> 12; 3801 adapter->params.chip = 0; 3802 switch (ver) { 3803 case CHELSIO_T4: 3804 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev); 3805 break; 3806 case CHELSIO_T5: 3807 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev); 3808 break; 3809 default: 3810 dev_err(adapter->pdev_dev, "Device %d is not supported\n", 3811 device_id); 3812 return -EINVAL; 3813 } 3814 3815 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); 3816 3817 /* 3818 * Default port for debugging in case we can't reach FW. 3819 */ 3820 adapter->params.nports = 1; 3821 adapter->params.portvec = 1; 3822 adapter->params.vpd.cclk = 50000; 3823 return 0; 3824 } 3825 3826 /** 3827 * t4_init_tp_params - initialize adap->params.tp 3828 * @adap: the adapter 3829 * 3830 * Initialize various fields of the adapter's TP Parameters structure. 3831 */ 3832 int t4_init_tp_params(struct adapter *adap) 3833 { 3834 int chan; 3835 u32 v; 3836 3837 v = t4_read_reg(adap, TP_TIMER_RESOLUTION); 3838 adap->params.tp.tre = TIMERRESOLUTION_GET(v); 3839 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v); 3840 3841 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ 3842 for (chan = 0; chan < NCHAN; chan++) 3843 adap->params.tp.tx_modq[chan] = chan; 3844 3845 /* Cache the adapter's Compressed Filter Mode and global Incress 3846 * Configuration. 3847 */ 3848 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, 3849 &adap->params.tp.vlan_pri_map, 1, 3850 TP_VLAN_PRI_MAP); 3851 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, 3852 &adap->params.tp.ingress_config, 1, 3853 TP_INGRESS_CONFIG); 3854 3855 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field 3856 * shift positions of several elements of the Compressed Filter Tuple 3857 * for this adapter which we need frequently ... 3858 */ 3859 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN); 3860 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID); 3861 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT); 3862 adap->params.tp.protocol_shift = t4_filter_field_shift(adap, 3863 F_PROTOCOL); 3864 3865 /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID 3866 * represents the presense of an Outer VLAN instead of a VNIC ID. 3867 */ 3868 if ((adap->params.tp.ingress_config & F_VNIC) == 0) 3869 adap->params.tp.vnic_shift = -1; 3870 3871 return 0; 3872 } 3873 3874 /** 3875 * t4_filter_field_shift - calculate filter field shift 3876 * @adap: the adapter 3877 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits) 3878 * 3879 * Return the shift position of a filter field within the Compressed 3880 * Filter Tuple. The filter field is specified via its selection bit 3881 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN. 3882 */ 3883 int t4_filter_field_shift(const struct adapter *adap, int filter_sel) 3884 { 3885 unsigned int filter_mode = adap->params.tp.vlan_pri_map; 3886 unsigned int sel; 3887 int field_shift; 3888 3889 if ((filter_mode & filter_sel) == 0) 3890 return -1; 3891 3892 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) { 3893 switch (filter_mode & sel) { 3894 case F_FCOE: 3895 field_shift += W_FT_FCOE; 3896 break; 3897 case F_PORT: 3898 field_shift += W_FT_PORT; 3899 break; 3900 case F_VNIC_ID: 3901 field_shift += W_FT_VNIC_ID; 3902 break; 3903 case F_VLAN: 3904 field_shift += W_FT_VLAN; 3905 break; 3906 case F_TOS: 3907 field_shift += W_FT_TOS; 3908 break; 3909 case F_PROTOCOL: 3910 field_shift += W_FT_PROTOCOL; 3911 break; 3912 case F_ETHERTYPE: 3913 field_shift += W_FT_ETHERTYPE; 3914 break; 3915 case F_MACMATCH: 3916 field_shift += W_FT_MACMATCH; 3917 break; 3918 case F_MPSHITTYPE: 3919 field_shift += W_FT_MPSHITTYPE; 3920 break; 3921 case F_FRAGMENTATION: 3922 field_shift += W_FT_FRAGMENTATION; 3923 break; 3924 } 3925 } 3926 return field_shift; 3927 } 3928 3929 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) 3930 { 3931 u8 addr[6]; 3932 int ret, i, j = 0; 3933 struct fw_port_cmd c; 3934 struct fw_rss_vi_config_cmd rvc; 3935 3936 memset(&c, 0, sizeof(c)); 3937 memset(&rvc, 0, sizeof(rvc)); 3938 3939 for_each_port(adap, i) { 3940 unsigned int rss_size; 3941 struct port_info *p = adap2pinfo(adap, i); 3942 3943 while ((adap->params.portvec & (1 << j)) == 0) 3944 j++; 3945 3946 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | 3947 FW_CMD_REQUEST | FW_CMD_READ | 3948 FW_PORT_CMD_PORTID(j)); 3949 c.action_to_len16 = htonl( 3950 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | 3951 FW_LEN16(c)); 3952 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 3953 if (ret) 3954 return ret; 3955 3956 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size); 3957 if (ret < 0) 3958 return ret; 3959 3960 p->viid = ret; 3961 p->tx_chan = j; 3962 p->lport = j; 3963 p->rss_size = rss_size; 3964 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN); 3965 adap->port[i]->dev_port = j; 3966 3967 ret = ntohl(c.u.info.lstatus_to_modtype); 3968 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ? 3969 FW_PORT_CMD_MDIOADDR_GET(ret) : -1; 3970 p->port_type = FW_PORT_CMD_PTYPE_GET(ret); 3971 p->mod_type = FW_PORT_MOD_TYPE_NA; 3972 3973 rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | 3974 FW_CMD_REQUEST | FW_CMD_READ | 3975 FW_RSS_VI_CONFIG_CMD_VIID(p->viid)); 3976 rvc.retval_len16 = htonl(FW_LEN16(rvc)); 3977 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc); 3978 if (ret) 3979 return ret; 3980 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen); 3981 3982 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap)); 3983 j++; 3984 } 3985 return 0; 3986 } 3987