1 /* 2 * This file is part of the Chelsio T4 Ethernet driver for Linux. 3 * 4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/init.h> 36 #include <linux/delay.h> 37 #include "cxgb4.h" 38 #include "t4_regs.h" 39 #include "t4fw_api.h" 40 41 /** 42 * t4_wait_op_done_val - wait until an operation is completed 43 * @adapter: the adapter performing the operation 44 * @reg: the register to check for completion 45 * @mask: a single-bit field within @reg that indicates completion 46 * @polarity: the value of the field when the operation is completed 47 * @attempts: number of check iterations 48 * @delay: delay in usecs between iterations 49 * @valp: where to store the value of the register at completion time 50 * 51 * Wait until an operation is completed by checking a bit in a register 52 * up to @attempts times. If @valp is not NULL the value of the register 53 * at the time it indicated completion is stored there. Returns 0 if the 54 * operation completes and -EAGAIN otherwise. 55 */ 56 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, 57 int polarity, int attempts, int delay, u32 *valp) 58 { 59 while (1) { 60 u32 val = t4_read_reg(adapter, reg); 61 62 if (!!(val & mask) == polarity) { 63 if (valp) 64 *valp = val; 65 return 0; 66 } 67 if (--attempts == 0) 68 return -EAGAIN; 69 if (delay) 70 udelay(delay); 71 } 72 } 73 74 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask, 75 int polarity, int attempts, int delay) 76 { 77 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts, 78 delay, NULL); 79 } 80 81 /** 82 * t4_set_reg_field - set a register field to a value 83 * @adapter: the adapter to program 84 * @addr: the register address 85 * @mask: specifies the portion of the register to modify 86 * @val: the new value for the register field 87 * 88 * Sets a register field specified by the supplied mask to the 89 * given value. 90 */ 91 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, 92 u32 val) 93 { 94 u32 v = t4_read_reg(adapter, addr) & ~mask; 95 96 t4_write_reg(adapter, addr, v | val); 97 (void) t4_read_reg(adapter, addr); /* flush */ 98 } 99 100 /** 101 * t4_read_indirect - read indirectly addressed registers 102 * @adap: the adapter 103 * @addr_reg: register holding the indirect address 104 * @data_reg: register holding the value of the indirect register 105 * @vals: where the read register values are stored 106 * @nregs: how many indirect registers to read 107 * @start_idx: index of first indirect register to read 108 * 109 * Reads registers that are accessed indirectly through an address/data 110 * register pair. 111 */ 112 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, 113 unsigned int data_reg, u32 *vals, 114 unsigned int nregs, unsigned int start_idx) 115 { 116 while (nregs--) { 117 t4_write_reg(adap, addr_reg, start_idx); 118 *vals++ = t4_read_reg(adap, data_reg); 119 start_idx++; 120 } 121 } 122 123 /** 124 * t4_write_indirect - write indirectly addressed registers 125 * @adap: the adapter 126 * @addr_reg: register holding the indirect addresses 127 * @data_reg: register holding the value for the indirect registers 128 * @vals: values to write 129 * @nregs: how many indirect registers to write 130 * @start_idx: address of first indirect register to write 131 * 132 * Writes a sequential block of registers that are accessed indirectly 133 * through an address/data register pair. 134 */ 135 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, 136 unsigned int data_reg, const u32 *vals, 137 unsigned int nregs, unsigned int start_idx) 138 { 139 while (nregs--) { 140 t4_write_reg(adap, addr_reg, start_idx++); 141 t4_write_reg(adap, data_reg, *vals++); 142 } 143 } 144 145 /* 146 * Get the reply to a mailbox command and store it in @rpl in big-endian order. 147 */ 148 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, 149 u32 mbox_addr) 150 { 151 for ( ; nflit; nflit--, mbox_addr += 8) 152 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr)); 153 } 154 155 /* 156 * Handle a FW assertion reported in a mailbox. 157 */ 158 static void fw_asrt(struct adapter *adap, u32 mbox_addr) 159 { 160 struct fw_debug_cmd asrt; 161 162 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr); 163 dev_alert(adap->pdev_dev, 164 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", 165 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line), 166 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y)); 167 } 168 169 static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg) 170 { 171 dev_err(adap->pdev_dev, 172 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox, 173 (unsigned long long)t4_read_reg64(adap, data_reg), 174 (unsigned long long)t4_read_reg64(adap, data_reg + 8), 175 (unsigned long long)t4_read_reg64(adap, data_reg + 16), 176 (unsigned long long)t4_read_reg64(adap, data_reg + 24), 177 (unsigned long long)t4_read_reg64(adap, data_reg + 32), 178 (unsigned long long)t4_read_reg64(adap, data_reg + 40), 179 (unsigned long long)t4_read_reg64(adap, data_reg + 48), 180 (unsigned long long)t4_read_reg64(adap, data_reg + 56)); 181 } 182 183 /** 184 * t4_wr_mbox_meat - send a command to FW through the given mailbox 185 * @adap: the adapter 186 * @mbox: index of the mailbox to use 187 * @cmd: the command to write 188 * @size: command length in bytes 189 * @rpl: where to optionally store the reply 190 * @sleep_ok: if true we may sleep while awaiting command completion 191 * 192 * Sends the given command to FW through the selected mailbox and waits 193 * for the FW to execute the command. If @rpl is not %NULL it is used to 194 * store the FW's reply to the command. The command and its optional 195 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms 196 * to respond. @sleep_ok determines whether we may sleep while awaiting 197 * the response. If sleeping is allowed we use progressive backoff 198 * otherwise we spin. 199 * 200 * The return value is 0 on success or a negative errno on failure. A 201 * failure can happen either because we are not able to execute the 202 * command or FW executes it but signals an error. In the latter case 203 * the return value is the error code indicated by FW (negated). 204 */ 205 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, 206 void *rpl, bool sleep_ok) 207 { 208 static const int delay[] = { 209 1, 1, 3, 5, 10, 10, 20, 50, 100, 200 210 }; 211 212 u32 v; 213 u64 res; 214 int i, ms, delay_idx; 215 const __be64 *p = cmd; 216 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA); 217 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL); 218 219 if ((size & 15) || size > MBOX_LEN) 220 return -EINVAL; 221 222 /* 223 * If the device is off-line, as in EEH, commands will time out. 224 * Fail them early so we don't waste time waiting. 225 */ 226 if (adap->pdev->error_state != pci_channel_io_normal) 227 return -EIO; 228 229 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); 230 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) 231 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); 232 233 if (v != MBOX_OWNER_DRV) 234 return v ? -EBUSY : -ETIMEDOUT; 235 236 for (i = 0; i < size; i += 8) 237 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++)); 238 239 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); 240 t4_read_reg(adap, ctl_reg); /* flush write */ 241 242 delay_idx = 0; 243 ms = delay[0]; 244 245 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { 246 if (sleep_ok) { 247 ms = delay[delay_idx]; /* last element may repeat */ 248 if (delay_idx < ARRAY_SIZE(delay) - 1) 249 delay_idx++; 250 msleep(ms); 251 } else 252 mdelay(ms); 253 254 v = t4_read_reg(adap, ctl_reg); 255 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) { 256 if (!(v & MBMSGVALID)) { 257 t4_write_reg(adap, ctl_reg, 0); 258 continue; 259 } 260 261 res = t4_read_reg64(adap, data_reg); 262 if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) { 263 fw_asrt(adap, data_reg); 264 res = FW_CMD_RETVAL(EIO); 265 } else if (rpl) 266 get_mbox_rpl(adap, rpl, size / 8, data_reg); 267 268 if (FW_CMD_RETVAL_GET((int)res)) 269 dump_mbox(adap, mbox, data_reg); 270 t4_write_reg(adap, ctl_reg, 0); 271 return -FW_CMD_RETVAL_GET((int)res); 272 } 273 } 274 275 dump_mbox(adap, mbox, data_reg); 276 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", 277 *(const u8 *)cmd, mbox); 278 return -ETIMEDOUT; 279 } 280 281 /** 282 * t4_mc_read - read from MC through backdoor accesses 283 * @adap: the adapter 284 * @addr: address of first byte requested 285 * @idx: which MC to access 286 * @data: 64 bytes of data containing the requested address 287 * @ecc: where to store the corresponding 64-bit ECC word 288 * 289 * Read 64 bytes of data from MC starting at a 64-byte-aligned address 290 * that covers the requested address @addr. If @parity is not %NULL it 291 * is assigned the 64-bit ECC word for the read data. 292 */ 293 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) 294 { 295 int i; 296 u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len; 297 u32 mc_bist_status_rdata, mc_bist_data_pattern; 298 299 if (is_t4(adap->chip)) { 300 mc_bist_cmd = MC_BIST_CMD; 301 mc_bist_cmd_addr = MC_BIST_CMD_ADDR; 302 mc_bist_cmd_len = MC_BIST_CMD_LEN; 303 mc_bist_status_rdata = MC_BIST_STATUS_RDATA; 304 mc_bist_data_pattern = MC_BIST_DATA_PATTERN; 305 } else { 306 mc_bist_cmd = MC_REG(MC_P_BIST_CMD, idx); 307 mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR, idx); 308 mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN, idx); 309 mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA, idx); 310 mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN, idx); 311 } 312 313 if (t4_read_reg(adap, mc_bist_cmd) & START_BIST) 314 return -EBUSY; 315 t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU); 316 t4_write_reg(adap, mc_bist_cmd_len, 64); 317 t4_write_reg(adap, mc_bist_data_pattern, 0xc); 318 t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE(1) | START_BIST | 319 BIST_CMD_GAP(1)); 320 i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST, 0, 10, 1); 321 if (i) 322 return i; 323 324 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata, i) 325 326 for (i = 15; i >= 0; i--) 327 *data++ = htonl(t4_read_reg(adap, MC_DATA(i))); 328 if (ecc) 329 *ecc = t4_read_reg64(adap, MC_DATA(16)); 330 #undef MC_DATA 331 return 0; 332 } 333 334 /** 335 * t4_edc_read - read from EDC through backdoor accesses 336 * @adap: the adapter 337 * @idx: which EDC to access 338 * @addr: address of first byte requested 339 * @data: 64 bytes of data containing the requested address 340 * @ecc: where to store the corresponding 64-bit ECC word 341 * 342 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address 343 * that covers the requested address @addr. If @parity is not %NULL it 344 * is assigned the 64-bit ECC word for the read data. 345 */ 346 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) 347 { 348 int i; 349 u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len; 350 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata; 351 352 if (is_t4(adap->chip)) { 353 edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx); 354 edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx); 355 edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx); 356 edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN, 357 idx); 358 edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA, 359 idx); 360 } else { 361 edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD, idx); 362 edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx); 363 edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx); 364 edc_bist_cmd_data_pattern = 365 EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx); 366 edc_bist_status_rdata = 367 EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx); 368 } 369 370 if (t4_read_reg(adap, edc_bist_cmd) & START_BIST) 371 return -EBUSY; 372 t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU); 373 t4_write_reg(adap, edc_bist_cmd_len, 64); 374 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc); 375 t4_write_reg(adap, edc_bist_cmd, 376 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST); 377 i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST, 0, 10, 1); 378 if (i) 379 return i; 380 381 #define EDC_DATA(i) (EDC_BIST_STATUS_REG(edc_bist_status_rdata, i)) 382 383 for (i = 15; i >= 0; i--) 384 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i))); 385 if (ecc) 386 *ecc = t4_read_reg64(adap, EDC_DATA(16)); 387 #undef EDC_DATA 388 return 0; 389 } 390 391 /* 392 * t4_mem_win_rw - read/write memory through PCIE memory window 393 * @adap: the adapter 394 * @addr: address of first byte requested 395 * @data: MEMWIN0_APERTURE bytes of data containing the requested address 396 * @dir: direction of transfer 1 => read, 0 => write 397 * 398 * Read/write MEMWIN0_APERTURE bytes of data from MC starting at a 399 * MEMWIN0_APERTURE-byte-aligned address that covers the requested 400 * address @addr. 401 */ 402 static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir) 403 { 404 int i; 405 u32 win_pf = is_t4(adap->chip) ? 0 : V_PFNUM(adap->fn); 406 407 /* 408 * Setup offset into PCIE memory window. Address must be a 409 * MEMWIN0_APERTURE-byte-aligned address. (Read back MA register to 410 * ensure that changes propagate before we attempt to use the new 411 * values.) 412 */ 413 t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET, 414 (addr & ~(MEMWIN0_APERTURE - 1)) | win_pf); 415 t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET); 416 417 /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */ 418 for (i = 0; i < MEMWIN0_APERTURE; i = i+0x4) { 419 if (dir) 420 *data++ = (__force __be32) t4_read_reg(adap, 421 (MEMWIN0_BASE + i)); 422 else 423 t4_write_reg(adap, (MEMWIN0_BASE + i), 424 (__force u32) *data++); 425 } 426 427 return 0; 428 } 429 430 /** 431 * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window 432 * @adap: the adapter 433 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC 434 * @addr: address within indicated memory type 435 * @len: amount of memory to transfer 436 * @buf: host memory buffer 437 * @dir: direction of transfer 1 => read, 0 => write 438 * 439 * Reads/writes an [almost] arbitrary memory region in the firmware: the 440 * firmware memory address, length and host buffer must be aligned on 441 * 32-bit boudaries. The memory is transferred as a raw byte sequence 442 * from/to the firmware's memory. If this memory contains data 443 * structures which contain multi-byte integers, it's the callers 444 * responsibility to perform appropriate byte order conversions. 445 */ 446 static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len, 447 __be32 *buf, int dir) 448 { 449 u32 pos, start, end, offset, memoffset; 450 u32 edc_size, mc_size; 451 int ret = 0; 452 __be32 *data; 453 454 /* 455 * Argument sanity checks ... 456 */ 457 if ((addr & 0x3) || (len & 0x3)) 458 return -EINVAL; 459 460 data = vmalloc(MEMWIN0_APERTURE); 461 if (!data) 462 return -ENOMEM; 463 464 /* Offset into the region of memory which is being accessed 465 * MEM_EDC0 = 0 466 * MEM_EDC1 = 1 467 * MEM_MC = 2 -- T4 468 * MEM_MC0 = 2 -- For T5 469 * MEM_MC1 = 3 -- For T5 470 */ 471 edc_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR)); 472 if (mtype != MEM_MC1) 473 memoffset = (mtype * (edc_size * 1024 * 1024)); 474 else { 475 mc_size = EXT_MEM_SIZE_GET(t4_read_reg(adap, 476 MA_EXT_MEMORY_BAR)); 477 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024; 478 } 479 480 /* Determine the PCIE_MEM_ACCESS_OFFSET */ 481 addr = addr + memoffset; 482 483 /* 484 * The underlaying EDC/MC read routines read MEMWIN0_APERTURE bytes 485 * at a time so we need to round down the start and round up the end. 486 * We'll start copying out of the first line at (addr - start) a word 487 * at a time. 488 */ 489 start = addr & ~(MEMWIN0_APERTURE-1); 490 end = (addr + len + MEMWIN0_APERTURE-1) & ~(MEMWIN0_APERTURE-1); 491 offset = (addr - start)/sizeof(__be32); 492 493 for (pos = start; pos < end; pos += MEMWIN0_APERTURE, offset = 0) { 494 495 /* 496 * If we're writing, copy the data from the caller's memory 497 * buffer 498 */ 499 if (!dir) { 500 /* 501 * If we're doing a partial write, then we need to do 502 * a read-modify-write ... 503 */ 504 if (offset || len < MEMWIN0_APERTURE) { 505 ret = t4_mem_win_rw(adap, pos, data, 1); 506 if (ret) 507 break; 508 } 509 while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) && 510 len > 0) { 511 data[offset++] = *buf++; 512 len -= sizeof(__be32); 513 } 514 } 515 516 /* 517 * Transfer a block of memory and bail if there's an error. 518 */ 519 ret = t4_mem_win_rw(adap, pos, data, dir); 520 if (ret) 521 break; 522 523 /* 524 * If we're reading, copy the data into the caller's memory 525 * buffer. 526 */ 527 if (dir) 528 while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) && 529 len > 0) { 530 *buf++ = data[offset++]; 531 len -= sizeof(__be32); 532 } 533 } 534 535 vfree(data); 536 return ret; 537 } 538 539 int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len, 540 __be32 *buf) 541 { 542 return t4_memory_rw(adap, mtype, addr, len, buf, 0); 543 } 544 545 #define EEPROM_STAT_ADDR 0x7bfc 546 #define VPD_BASE 0x400 547 #define VPD_BASE_OLD 0 548 #define VPD_LEN 1024 549 550 /** 551 * t4_seeprom_wp - enable/disable EEPROM write protection 552 * @adapter: the adapter 553 * @enable: whether to enable or disable write protection 554 * 555 * Enables or disables write protection on the serial EEPROM. 556 */ 557 int t4_seeprom_wp(struct adapter *adapter, bool enable) 558 { 559 unsigned int v = enable ? 0xc : 0; 560 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v); 561 return ret < 0 ? ret : 0; 562 } 563 564 /** 565 * get_vpd_params - read VPD parameters from VPD EEPROM 566 * @adapter: adapter to read 567 * @p: where to store the parameters 568 * 569 * Reads card parameters stored in VPD EEPROM. 570 */ 571 int get_vpd_params(struct adapter *adapter, struct vpd_params *p) 572 { 573 u32 cclk_param, cclk_val; 574 int i, ret, addr; 575 int ec, sn; 576 u8 *vpd, csum; 577 unsigned int vpdr_len, kw_offset, id_len; 578 579 vpd = vmalloc(VPD_LEN); 580 if (!vpd) 581 return -ENOMEM; 582 583 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd); 584 if (ret < 0) 585 goto out; 586 addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD; 587 588 ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd); 589 if (ret < 0) 590 goto out; 591 592 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) { 593 dev_err(adapter->pdev_dev, "missing VPD ID string\n"); 594 ret = -EINVAL; 595 goto out; 596 } 597 598 id_len = pci_vpd_lrdt_size(vpd); 599 if (id_len > ID_LEN) 600 id_len = ID_LEN; 601 602 i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA); 603 if (i < 0) { 604 dev_err(adapter->pdev_dev, "missing VPD-R section\n"); 605 ret = -EINVAL; 606 goto out; 607 } 608 609 vpdr_len = pci_vpd_lrdt_size(&vpd[i]); 610 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE; 611 if (vpdr_len + kw_offset > VPD_LEN) { 612 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len); 613 ret = -EINVAL; 614 goto out; 615 } 616 617 #define FIND_VPD_KW(var, name) do { \ 618 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \ 619 if (var < 0) { \ 620 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \ 621 ret = -EINVAL; \ 622 goto out; \ 623 } \ 624 var += PCI_VPD_INFO_FLD_HDR_SIZE; \ 625 } while (0) 626 627 FIND_VPD_KW(i, "RV"); 628 for (csum = 0; i >= 0; i--) 629 csum += vpd[i]; 630 631 if (csum) { 632 dev_err(adapter->pdev_dev, 633 "corrupted VPD EEPROM, actual csum %u\n", csum); 634 ret = -EINVAL; 635 goto out; 636 } 637 638 FIND_VPD_KW(ec, "EC"); 639 FIND_VPD_KW(sn, "SN"); 640 #undef FIND_VPD_KW 641 642 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len); 643 strim(p->id); 644 memcpy(p->ec, vpd + ec, EC_LEN); 645 strim(p->ec); 646 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE); 647 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); 648 strim(p->sn); 649 650 /* 651 * Ask firmware for the Core Clock since it knows how to translate the 652 * Reference Clock ('V2') VPD field into a Core Clock value ... 653 */ 654 cclk_param = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 655 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK)); 656 ret = t4_query_params(adapter, adapter->mbox, 0, 0, 657 1, &cclk_param, &cclk_val); 658 659 out: 660 vfree(vpd); 661 if (ret) 662 return ret; 663 p->cclk = cclk_val; 664 665 return 0; 666 } 667 668 /* serial flash and firmware constants */ 669 enum { 670 SF_ATTEMPTS = 10, /* max retries for SF operations */ 671 672 /* flash command opcodes */ 673 SF_PROG_PAGE = 2, /* program page */ 674 SF_WR_DISABLE = 4, /* disable writes */ 675 SF_RD_STATUS = 5, /* read status register */ 676 SF_WR_ENABLE = 6, /* enable writes */ 677 SF_RD_DATA_FAST = 0xb, /* read flash */ 678 SF_RD_ID = 0x9f, /* read ID */ 679 SF_ERASE_SECTOR = 0xd8, /* erase sector */ 680 681 FW_MAX_SIZE = 512 * 1024, 682 }; 683 684 /** 685 * sf1_read - read data from the serial flash 686 * @adapter: the adapter 687 * @byte_cnt: number of bytes to read 688 * @cont: whether another operation will be chained 689 * @lock: whether to lock SF for PL access only 690 * @valp: where to store the read data 691 * 692 * Reads up to 4 bytes of data from the serial flash. The location of 693 * the read needs to be specified prior to calling this by issuing the 694 * appropriate commands to the serial flash. 695 */ 696 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, 697 int lock, u32 *valp) 698 { 699 int ret; 700 701 if (!byte_cnt || byte_cnt > 4) 702 return -EINVAL; 703 if (t4_read_reg(adapter, SF_OP) & SF_BUSY) 704 return -EBUSY; 705 cont = cont ? SF_CONT : 0; 706 lock = lock ? SF_LOCK : 0; 707 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1)); 708 ret = t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5); 709 if (!ret) 710 *valp = t4_read_reg(adapter, SF_DATA); 711 return ret; 712 } 713 714 /** 715 * sf1_write - write data to the serial flash 716 * @adapter: the adapter 717 * @byte_cnt: number of bytes to write 718 * @cont: whether another operation will be chained 719 * @lock: whether to lock SF for PL access only 720 * @val: value to write 721 * 722 * Writes up to 4 bytes of data to the serial flash. The location of 723 * the write needs to be specified prior to calling this by issuing the 724 * appropriate commands to the serial flash. 725 */ 726 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, 727 int lock, u32 val) 728 { 729 if (!byte_cnt || byte_cnt > 4) 730 return -EINVAL; 731 if (t4_read_reg(adapter, SF_OP) & SF_BUSY) 732 return -EBUSY; 733 cont = cont ? SF_CONT : 0; 734 lock = lock ? SF_LOCK : 0; 735 t4_write_reg(adapter, SF_DATA, val); 736 t4_write_reg(adapter, SF_OP, lock | 737 cont | BYTECNT(byte_cnt - 1) | OP_WR); 738 return t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5); 739 } 740 741 /** 742 * flash_wait_op - wait for a flash operation to complete 743 * @adapter: the adapter 744 * @attempts: max number of polls of the status register 745 * @delay: delay between polls in ms 746 * 747 * Wait for a flash operation to complete by polling the status register. 748 */ 749 static int flash_wait_op(struct adapter *adapter, int attempts, int delay) 750 { 751 int ret; 752 u32 status; 753 754 while (1) { 755 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 || 756 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0) 757 return ret; 758 if (!(status & 1)) 759 return 0; 760 if (--attempts == 0) 761 return -EAGAIN; 762 if (delay) 763 msleep(delay); 764 } 765 } 766 767 /** 768 * t4_read_flash - read words from serial flash 769 * @adapter: the adapter 770 * @addr: the start address for the read 771 * @nwords: how many 32-bit words to read 772 * @data: where to store the read data 773 * @byte_oriented: whether to store data as bytes or as words 774 * 775 * Read the specified number of 32-bit words from the serial flash. 776 * If @byte_oriented is set the read data is stored as a byte array 777 * (i.e., big-endian), otherwise as 32-bit words in the platform's 778 * natural endianess. 779 */ 780 static int t4_read_flash(struct adapter *adapter, unsigned int addr, 781 unsigned int nwords, u32 *data, int byte_oriented) 782 { 783 int ret; 784 785 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3)) 786 return -EINVAL; 787 788 addr = swab32(addr) | SF_RD_DATA_FAST; 789 790 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 || 791 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0) 792 return ret; 793 794 for ( ; nwords; nwords--, data++) { 795 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data); 796 if (nwords == 1) 797 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ 798 if (ret) 799 return ret; 800 if (byte_oriented) 801 *data = (__force __u32) (htonl(*data)); 802 } 803 return 0; 804 } 805 806 /** 807 * t4_write_flash - write up to a page of data to the serial flash 808 * @adapter: the adapter 809 * @addr: the start address to write 810 * @n: length of data to write in bytes 811 * @data: the data to write 812 * 813 * Writes up to a page of data (256 bytes) to the serial flash starting 814 * at the given address. All the data must be written to the same page. 815 */ 816 static int t4_write_flash(struct adapter *adapter, unsigned int addr, 817 unsigned int n, const u8 *data) 818 { 819 int ret; 820 u32 buf[64]; 821 unsigned int i, c, left, val, offset = addr & 0xff; 822 823 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE) 824 return -EINVAL; 825 826 val = swab32(addr) | SF_PROG_PAGE; 827 828 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 829 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0) 830 goto unlock; 831 832 for (left = n; left; left -= c) { 833 c = min(left, 4U); 834 for (val = 0, i = 0; i < c; ++i) 835 val = (val << 8) + *data++; 836 837 ret = sf1_write(adapter, c, c != left, 1, val); 838 if (ret) 839 goto unlock; 840 } 841 ret = flash_wait_op(adapter, 8, 1); 842 if (ret) 843 goto unlock; 844 845 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ 846 847 /* Read the page to verify the write succeeded */ 848 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); 849 if (ret) 850 return ret; 851 852 if (memcmp(data - n, (u8 *)buf + offset, n)) { 853 dev_err(adapter->pdev_dev, 854 "failed to correctly write the flash page at %#x\n", 855 addr); 856 return -EIO; 857 } 858 return 0; 859 860 unlock: 861 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ 862 return ret; 863 } 864 865 /** 866 * get_fw_version - read the firmware version 867 * @adapter: the adapter 868 * @vers: where to place the version 869 * 870 * Reads the FW version from flash. 871 */ 872 static int get_fw_version(struct adapter *adapter, u32 *vers) 873 { 874 return t4_read_flash(adapter, adapter->params.sf_fw_start + 875 offsetof(struct fw_hdr, fw_ver), 1, vers, 0); 876 } 877 878 /** 879 * get_tp_version - read the TP microcode version 880 * @adapter: the adapter 881 * @vers: where to place the version 882 * 883 * Reads the TP microcode version from flash. 884 */ 885 static int get_tp_version(struct adapter *adapter, u32 *vers) 886 { 887 return t4_read_flash(adapter, adapter->params.sf_fw_start + 888 offsetof(struct fw_hdr, tp_microcode_ver), 889 1, vers, 0); 890 } 891 892 /** 893 * t4_check_fw_version - check if the FW is compatible with this driver 894 * @adapter: the adapter 895 * 896 * Checks if an adapter's FW is compatible with the driver. Returns 0 897 * if there's exact match, a negative error if the version could not be 898 * read or there's a major version mismatch, and a positive value if the 899 * expected major version is found but there's a minor version mismatch. 900 */ 901 int t4_check_fw_version(struct adapter *adapter) 902 { 903 u32 api_vers[2]; 904 int ret, major, minor, micro; 905 int exp_major, exp_minor, exp_micro; 906 907 ret = get_fw_version(adapter, &adapter->params.fw_vers); 908 if (!ret) 909 ret = get_tp_version(adapter, &adapter->params.tp_vers); 910 if (!ret) 911 ret = t4_read_flash(adapter, adapter->params.sf_fw_start + 912 offsetof(struct fw_hdr, intfver_nic), 913 2, api_vers, 1); 914 if (ret) 915 return ret; 916 917 major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers); 918 minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers); 919 micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers); 920 921 switch (CHELSIO_CHIP_VERSION(adapter->chip)) { 922 case CHELSIO_T4: 923 exp_major = FW_VERSION_MAJOR; 924 exp_minor = FW_VERSION_MINOR; 925 exp_micro = FW_VERSION_MICRO; 926 break; 927 case CHELSIO_T5: 928 exp_major = FW_VERSION_MAJOR_T5; 929 exp_minor = FW_VERSION_MINOR_T5; 930 exp_micro = FW_VERSION_MICRO_T5; 931 break; 932 default: 933 dev_err(adapter->pdev_dev, "Unsupported chip type, %x\n", 934 adapter->chip); 935 return -EINVAL; 936 } 937 938 memcpy(adapter->params.api_vers, api_vers, 939 sizeof(adapter->params.api_vers)); 940 941 if (major < exp_major || (major == exp_major && minor < exp_minor) || 942 (major == exp_major && minor == exp_minor && micro < exp_micro)) { 943 dev_err(adapter->pdev_dev, 944 "Card has firmware version %u.%u.%u, minimum " 945 "supported firmware is %u.%u.%u.\n", major, minor, 946 micro, exp_major, exp_minor, exp_micro); 947 return -EFAULT; 948 } 949 950 if (major != exp_major) { /* major mismatch - fail */ 951 dev_err(adapter->pdev_dev, 952 "card FW has major version %u, driver wants %u\n", 953 major, exp_major); 954 return -EINVAL; 955 } 956 957 if (minor == exp_minor && micro == exp_micro) 958 return 0; /* perfect match */ 959 960 /* Minor/micro version mismatch. Report it but often it's OK. */ 961 return 1; 962 } 963 964 /** 965 * t4_flash_erase_sectors - erase a range of flash sectors 966 * @adapter: the adapter 967 * @start: the first sector to erase 968 * @end: the last sector to erase 969 * 970 * Erases the sectors in the given inclusive range. 971 */ 972 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) 973 { 974 int ret = 0; 975 976 while (start <= end) { 977 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 978 (ret = sf1_write(adapter, 4, 0, 1, 979 SF_ERASE_SECTOR | (start << 8))) != 0 || 980 (ret = flash_wait_op(adapter, 14, 500)) != 0) { 981 dev_err(adapter->pdev_dev, 982 "erase of flash sector %d failed, error %d\n", 983 start, ret); 984 break; 985 } 986 start++; 987 } 988 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ 989 return ret; 990 } 991 992 /** 993 * t4_flash_cfg_addr - return the address of the flash configuration file 994 * @adapter: the adapter 995 * 996 * Return the address within the flash where the Firmware Configuration 997 * File is stored. 998 */ 999 unsigned int t4_flash_cfg_addr(struct adapter *adapter) 1000 { 1001 if (adapter->params.sf_size == 0x100000) 1002 return FLASH_FPGA_CFG_START; 1003 else 1004 return FLASH_CFG_START; 1005 } 1006 1007 /** 1008 * t4_load_cfg - download config file 1009 * @adap: the adapter 1010 * @cfg_data: the cfg text file to write 1011 * @size: text file size 1012 * 1013 * Write the supplied config text file to the card's serial flash. 1014 */ 1015 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size) 1016 { 1017 int ret, i, n; 1018 unsigned int addr; 1019 unsigned int flash_cfg_start_sec; 1020 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 1021 1022 addr = t4_flash_cfg_addr(adap); 1023 flash_cfg_start_sec = addr / SF_SEC_SIZE; 1024 1025 if (size > FLASH_CFG_MAX_SIZE) { 1026 dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n", 1027 FLASH_CFG_MAX_SIZE); 1028 return -EFBIG; 1029 } 1030 1031 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */ 1032 sf_sec_size); 1033 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec, 1034 flash_cfg_start_sec + i - 1); 1035 /* 1036 * If size == 0 then we're simply erasing the FLASH sectors associated 1037 * with the on-adapter Firmware Configuration File. 1038 */ 1039 if (ret || size == 0) 1040 goto out; 1041 1042 /* this will write to the flash up to SF_PAGE_SIZE at a time */ 1043 for (i = 0; i < size; i += SF_PAGE_SIZE) { 1044 if ((size - i) < SF_PAGE_SIZE) 1045 n = size - i; 1046 else 1047 n = SF_PAGE_SIZE; 1048 ret = t4_write_flash(adap, addr, n, cfg_data); 1049 if (ret) 1050 goto out; 1051 1052 addr += SF_PAGE_SIZE; 1053 cfg_data += SF_PAGE_SIZE; 1054 } 1055 1056 out: 1057 if (ret) 1058 dev_err(adap->pdev_dev, "config file %s failed %d\n", 1059 (size == 0 ? "clear" : "download"), ret); 1060 return ret; 1061 } 1062 1063 /** 1064 * t4_load_fw - download firmware 1065 * @adap: the adapter 1066 * @fw_data: the firmware image to write 1067 * @size: image size 1068 * 1069 * Write the supplied firmware image to the card's serial flash. 1070 */ 1071 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) 1072 { 1073 u32 csum; 1074 int ret, addr; 1075 unsigned int i; 1076 u8 first_page[SF_PAGE_SIZE]; 1077 const __be32 *p = (const __be32 *)fw_data; 1078 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; 1079 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 1080 unsigned int fw_img_start = adap->params.sf_fw_start; 1081 unsigned int fw_start_sec = fw_img_start / sf_sec_size; 1082 1083 if (!size) { 1084 dev_err(adap->pdev_dev, "FW image has no data\n"); 1085 return -EINVAL; 1086 } 1087 if (size & 511) { 1088 dev_err(adap->pdev_dev, 1089 "FW image size not multiple of 512 bytes\n"); 1090 return -EINVAL; 1091 } 1092 if (ntohs(hdr->len512) * 512 != size) { 1093 dev_err(adap->pdev_dev, 1094 "FW image size differs from size in FW header\n"); 1095 return -EINVAL; 1096 } 1097 if (size > FW_MAX_SIZE) { 1098 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n", 1099 FW_MAX_SIZE); 1100 return -EFBIG; 1101 } 1102 1103 for (csum = 0, i = 0; i < size / sizeof(csum); i++) 1104 csum += ntohl(p[i]); 1105 1106 if (csum != 0xffffffff) { 1107 dev_err(adap->pdev_dev, 1108 "corrupted firmware image, checksum %#x\n", csum); 1109 return -EINVAL; 1110 } 1111 1112 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ 1113 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1); 1114 if (ret) 1115 goto out; 1116 1117 /* 1118 * We write the correct version at the end so the driver can see a bad 1119 * version if the FW write fails. Start by writing a copy of the 1120 * first page with a bad version. 1121 */ 1122 memcpy(first_page, fw_data, SF_PAGE_SIZE); 1123 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff); 1124 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page); 1125 if (ret) 1126 goto out; 1127 1128 addr = fw_img_start; 1129 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 1130 addr += SF_PAGE_SIZE; 1131 fw_data += SF_PAGE_SIZE; 1132 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data); 1133 if (ret) 1134 goto out; 1135 } 1136 1137 ret = t4_write_flash(adap, 1138 fw_img_start + offsetof(struct fw_hdr, fw_ver), 1139 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver); 1140 out: 1141 if (ret) 1142 dev_err(adap->pdev_dev, "firmware download failed, error %d\n", 1143 ret); 1144 return ret; 1145 } 1146 1147 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ 1148 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG) 1149 1150 /** 1151 * t4_link_start - apply link configuration to MAC/PHY 1152 * @phy: the PHY to setup 1153 * @mac: the MAC to setup 1154 * @lc: the requested link configuration 1155 * 1156 * Set up a port's MAC and PHY according to a desired link configuration. 1157 * - If the PHY can auto-negotiate first decide what to advertise, then 1158 * enable/disable auto-negotiation as desired, and reset. 1159 * - If the PHY does not auto-negotiate just reset it. 1160 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, 1161 * otherwise do it later based on the outcome of auto-negotiation. 1162 */ 1163 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, 1164 struct link_config *lc) 1165 { 1166 struct fw_port_cmd c; 1167 unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO); 1168 1169 lc->link_ok = 0; 1170 if (lc->requested_fc & PAUSE_RX) 1171 fc |= FW_PORT_CAP_FC_RX; 1172 if (lc->requested_fc & PAUSE_TX) 1173 fc |= FW_PORT_CAP_FC_TX; 1174 1175 memset(&c, 0, sizeof(c)); 1176 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | 1177 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port)); 1178 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | 1179 FW_LEN16(c)); 1180 1181 if (!(lc->supported & FW_PORT_CAP_ANEG)) { 1182 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc); 1183 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); 1184 } else if (lc->autoneg == AUTONEG_DISABLE) { 1185 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi); 1186 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); 1187 } else 1188 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi); 1189 1190 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 1191 } 1192 1193 /** 1194 * t4_restart_aneg - restart autonegotiation 1195 * @adap: the adapter 1196 * @mbox: mbox to use for the FW command 1197 * @port: the port id 1198 * 1199 * Restarts autonegotiation for the selected port. 1200 */ 1201 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) 1202 { 1203 struct fw_port_cmd c; 1204 1205 memset(&c, 0, sizeof(c)); 1206 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | 1207 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port)); 1208 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | 1209 FW_LEN16(c)); 1210 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG); 1211 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 1212 } 1213 1214 typedef void (*int_handler_t)(struct adapter *adap); 1215 1216 struct intr_info { 1217 unsigned int mask; /* bits to check in interrupt status */ 1218 const char *msg; /* message to print or NULL */ 1219 short stat_idx; /* stat counter to increment or -1 */ 1220 unsigned short fatal; /* whether the condition reported is fatal */ 1221 int_handler_t int_handler; /* platform-specific int handler */ 1222 }; 1223 1224 /** 1225 * t4_handle_intr_status - table driven interrupt handler 1226 * @adapter: the adapter that generated the interrupt 1227 * @reg: the interrupt status register to process 1228 * @acts: table of interrupt actions 1229 * 1230 * A table driven interrupt handler that applies a set of masks to an 1231 * interrupt status word and performs the corresponding actions if the 1232 * interrupts described by the mask have occurred. The actions include 1233 * optionally emitting a warning or alert message. The table is terminated 1234 * by an entry specifying mask 0. Returns the number of fatal interrupt 1235 * conditions. 1236 */ 1237 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg, 1238 const struct intr_info *acts) 1239 { 1240 int fatal = 0; 1241 unsigned int mask = 0; 1242 unsigned int status = t4_read_reg(adapter, reg); 1243 1244 for ( ; acts->mask; ++acts) { 1245 if (!(status & acts->mask)) 1246 continue; 1247 if (acts->fatal) { 1248 fatal++; 1249 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg, 1250 status & acts->mask); 1251 } else if (acts->msg && printk_ratelimit()) 1252 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg, 1253 status & acts->mask); 1254 if (acts->int_handler) 1255 acts->int_handler(adapter); 1256 mask |= acts->mask; 1257 } 1258 status &= mask; 1259 if (status) /* clear processed interrupts */ 1260 t4_write_reg(adapter, reg, status); 1261 return fatal; 1262 } 1263 1264 /* 1265 * Interrupt handler for the PCIE module. 1266 */ 1267 static void pcie_intr_handler(struct adapter *adapter) 1268 { 1269 static const struct intr_info sysbus_intr_info[] = { 1270 { RNPP, "RXNP array parity error", -1, 1 }, 1271 { RPCP, "RXPC array parity error", -1, 1 }, 1272 { RCIP, "RXCIF array parity error", -1, 1 }, 1273 { RCCP, "Rx completions control array parity error", -1, 1 }, 1274 { RFTP, "RXFT array parity error", -1, 1 }, 1275 { 0 } 1276 }; 1277 static const struct intr_info pcie_port_intr_info[] = { 1278 { TPCP, "TXPC array parity error", -1, 1 }, 1279 { TNPP, "TXNP array parity error", -1, 1 }, 1280 { TFTP, "TXFT array parity error", -1, 1 }, 1281 { TCAP, "TXCA array parity error", -1, 1 }, 1282 { TCIP, "TXCIF array parity error", -1, 1 }, 1283 { RCAP, "RXCA array parity error", -1, 1 }, 1284 { OTDD, "outbound request TLP discarded", -1, 1 }, 1285 { RDPE, "Rx data parity error", -1, 1 }, 1286 { TDUE, "Tx uncorrectable data error", -1, 1 }, 1287 { 0 } 1288 }; 1289 static const struct intr_info pcie_intr_info[] = { 1290 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 }, 1291 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 }, 1292 { MSIDATAPERR, "MSI data parity error", -1, 1 }, 1293 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, 1294 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, 1295 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, 1296 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, 1297 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 }, 1298 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 }, 1299 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, 1300 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 }, 1301 { CREQPERR, "PCI CMD channel request parity error", -1, 1 }, 1302 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, 1303 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 }, 1304 { DREQPERR, "PCI DMA channel request parity error", -1, 1 }, 1305 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, 1306 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 }, 1307 { HREQPERR, "PCI HMA channel request parity error", -1, 1 }, 1308 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, 1309 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, 1310 { FIDPERR, "PCI FID parity error", -1, 1 }, 1311 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 }, 1312 { MATAGPERR, "PCI MA tag parity error", -1, 1 }, 1313 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, 1314 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 }, 1315 { RXWRPERR, "PCI Rx write parity error", -1, 1 }, 1316 { RPLPERR, "PCI replay buffer parity error", -1, 1 }, 1317 { PCIESINT, "PCI core secondary fault", -1, 1 }, 1318 { PCIEPINT, "PCI core primary fault", -1, 1 }, 1319 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 }, 1320 { 0 } 1321 }; 1322 1323 static struct intr_info t5_pcie_intr_info[] = { 1324 { MSTGRPPERR, "Master Response Read Queue parity error", 1325 -1, 1 }, 1326 { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 }, 1327 { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 }, 1328 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, 1329 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, 1330 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, 1331 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, 1332 { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error", 1333 -1, 1 }, 1334 { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error", 1335 -1, 1 }, 1336 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, 1337 { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 }, 1338 { CREQPERR, "PCI CMD channel request parity error", -1, 1 }, 1339 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, 1340 { DREQWRPERR, "PCI DMA channel write request parity error", 1341 -1, 1 }, 1342 { DREQPERR, "PCI DMA channel request parity error", -1, 1 }, 1343 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, 1344 { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 }, 1345 { HREQPERR, "PCI HMA channel request parity error", -1, 1 }, 1346 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, 1347 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, 1348 { FIDPERR, "PCI FID parity error", -1, 1 }, 1349 { VFIDPERR, "PCI INTx clear parity error", -1, 1 }, 1350 { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 }, 1351 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, 1352 { IPRXHDRGRPPERR, "PCI IP Rx header group parity error", 1353 -1, 1 }, 1354 { IPRXDATAGRPPERR, "PCI IP Rx data group parity error", -1, 1 }, 1355 { RPLPERR, "PCI IP replay buffer parity error", -1, 1 }, 1356 { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 }, 1357 { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 }, 1358 { READRSPERR, "Outbound read error", -1, 0 }, 1359 { 0 } 1360 }; 1361 1362 int fat; 1363 1364 fat = t4_handle_intr_status(adapter, 1365 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, 1366 sysbus_intr_info) + 1367 t4_handle_intr_status(adapter, 1368 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 1369 pcie_port_intr_info) + 1370 t4_handle_intr_status(adapter, PCIE_INT_CAUSE, 1371 is_t4(adapter->chip) ? 1372 pcie_intr_info : t5_pcie_intr_info); 1373 1374 if (fat) 1375 t4_fatal_err(adapter); 1376 } 1377 1378 /* 1379 * TP interrupt handler. 1380 */ 1381 static void tp_intr_handler(struct adapter *adapter) 1382 { 1383 static const struct intr_info tp_intr_info[] = { 1384 { 0x3fffffff, "TP parity error", -1, 1 }, 1385 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, 1386 { 0 } 1387 }; 1388 1389 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info)) 1390 t4_fatal_err(adapter); 1391 } 1392 1393 /* 1394 * SGE interrupt handler. 1395 */ 1396 static void sge_intr_handler(struct adapter *adapter) 1397 { 1398 u64 v; 1399 1400 static const struct intr_info sge_intr_info[] = { 1401 { ERR_CPL_EXCEED_IQE_SIZE, 1402 "SGE received CPL exceeding IQE size", -1, 1 }, 1403 { ERR_INVALID_CIDX_INC, 1404 "SGE GTS CIDX increment too large", -1, 0 }, 1405 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, 1406 { DBFIFO_LP_INT, NULL, -1, 0, t4_db_full }, 1407 { DBFIFO_HP_INT, NULL, -1, 0, t4_db_full }, 1408 { ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped }, 1409 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0, 1410 "SGE IQID > 1023 received CPL for FL", -1, 0 }, 1411 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, 1412 0 }, 1413 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1, 1414 0 }, 1415 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1, 1416 0 }, 1417 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1, 1418 0 }, 1419 { ERR_ING_CTXT_PRIO, 1420 "SGE too many priority ingress contexts", -1, 0 }, 1421 { ERR_EGR_CTXT_PRIO, 1422 "SGE too many priority egress contexts", -1, 0 }, 1423 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 }, 1424 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 }, 1425 { 0 } 1426 }; 1427 1428 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) | 1429 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32); 1430 if (v) { 1431 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n", 1432 (unsigned long long)v); 1433 t4_write_reg(adapter, SGE_INT_CAUSE1, v); 1434 t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32); 1435 } 1436 1437 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) || 1438 v != 0) 1439 t4_fatal_err(adapter); 1440 } 1441 1442 /* 1443 * CIM interrupt handler. 1444 */ 1445 static void cim_intr_handler(struct adapter *adapter) 1446 { 1447 static const struct intr_info cim_intr_info[] = { 1448 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 }, 1449 { OBQPARERR, "CIM OBQ parity error", -1, 1 }, 1450 { IBQPARERR, "CIM IBQ parity error", -1, 1 }, 1451 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 }, 1452 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 }, 1453 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 }, 1454 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 }, 1455 { 0 } 1456 }; 1457 static const struct intr_info cim_upintr_info[] = { 1458 { RSVDSPACEINT, "CIM reserved space access", -1, 1 }, 1459 { ILLTRANSINT, "CIM illegal transaction", -1, 1 }, 1460 { ILLWRINT, "CIM illegal write", -1, 1 }, 1461 { ILLRDINT, "CIM illegal read", -1, 1 }, 1462 { ILLRDBEINT, "CIM illegal read BE", -1, 1 }, 1463 { ILLWRBEINT, "CIM illegal write BE", -1, 1 }, 1464 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 }, 1465 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 }, 1466 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 }, 1467 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 }, 1468 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 }, 1469 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 }, 1470 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 }, 1471 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 }, 1472 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 }, 1473 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 }, 1474 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 }, 1475 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 }, 1476 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 }, 1477 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 }, 1478 { SGLRDPLINT , "CIM single read from PL space", -1, 1 }, 1479 { SGLWRPLINT , "CIM single write to PL space", -1, 1 }, 1480 { BLKRDPLINT , "CIM block read from PL space", -1, 1 }, 1481 { BLKWRPLINT , "CIM block write to PL space", -1, 1 }, 1482 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 }, 1483 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 }, 1484 { TIMEOUTINT , "CIM PIF timeout", -1, 1 }, 1485 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 }, 1486 { 0 } 1487 }; 1488 1489 int fat; 1490 1491 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE, 1492 cim_intr_info) + 1493 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE, 1494 cim_upintr_info); 1495 if (fat) 1496 t4_fatal_err(adapter); 1497 } 1498 1499 /* 1500 * ULP RX interrupt handler. 1501 */ 1502 static void ulprx_intr_handler(struct adapter *adapter) 1503 { 1504 static const struct intr_info ulprx_intr_info[] = { 1505 { 0x1800000, "ULPRX context error", -1, 1 }, 1506 { 0x7fffff, "ULPRX parity error", -1, 1 }, 1507 { 0 } 1508 }; 1509 1510 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info)) 1511 t4_fatal_err(adapter); 1512 } 1513 1514 /* 1515 * ULP TX interrupt handler. 1516 */ 1517 static void ulptx_intr_handler(struct adapter *adapter) 1518 { 1519 static const struct intr_info ulptx_intr_info[] = { 1520 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1, 1521 0 }, 1522 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1, 1523 0 }, 1524 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1, 1525 0 }, 1526 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1, 1527 0 }, 1528 { 0xfffffff, "ULPTX parity error", -1, 1 }, 1529 { 0 } 1530 }; 1531 1532 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info)) 1533 t4_fatal_err(adapter); 1534 } 1535 1536 /* 1537 * PM TX interrupt handler. 1538 */ 1539 static void pmtx_intr_handler(struct adapter *adapter) 1540 { 1541 static const struct intr_info pmtx_intr_info[] = { 1542 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 }, 1543 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 }, 1544 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 }, 1545 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 }, 1546 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 }, 1547 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 }, 1548 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 }, 1549 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 }, 1550 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1}, 1551 { 0 } 1552 }; 1553 1554 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info)) 1555 t4_fatal_err(adapter); 1556 } 1557 1558 /* 1559 * PM RX interrupt handler. 1560 */ 1561 static void pmrx_intr_handler(struct adapter *adapter) 1562 { 1563 static const struct intr_info pmrx_intr_info[] = { 1564 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 }, 1565 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 }, 1566 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 }, 1567 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 }, 1568 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 }, 1569 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1}, 1570 { 0 } 1571 }; 1572 1573 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info)) 1574 t4_fatal_err(adapter); 1575 } 1576 1577 /* 1578 * CPL switch interrupt handler. 1579 */ 1580 static void cplsw_intr_handler(struct adapter *adapter) 1581 { 1582 static const struct intr_info cplsw_intr_info[] = { 1583 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 }, 1584 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 }, 1585 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 }, 1586 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 }, 1587 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 }, 1588 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 }, 1589 { 0 } 1590 }; 1591 1592 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info)) 1593 t4_fatal_err(adapter); 1594 } 1595 1596 /* 1597 * LE interrupt handler. 1598 */ 1599 static void le_intr_handler(struct adapter *adap) 1600 { 1601 static const struct intr_info le_intr_info[] = { 1602 { LIPMISS, "LE LIP miss", -1, 0 }, 1603 { LIP0, "LE 0 LIP error", -1, 0 }, 1604 { PARITYERR, "LE parity error", -1, 1 }, 1605 { UNKNOWNCMD, "LE unknown command", -1, 1 }, 1606 { REQQPARERR, "LE request queue parity error", -1, 1 }, 1607 { 0 } 1608 }; 1609 1610 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info)) 1611 t4_fatal_err(adap); 1612 } 1613 1614 /* 1615 * MPS interrupt handler. 1616 */ 1617 static void mps_intr_handler(struct adapter *adapter) 1618 { 1619 static const struct intr_info mps_rx_intr_info[] = { 1620 { 0xffffff, "MPS Rx parity error", -1, 1 }, 1621 { 0 } 1622 }; 1623 static const struct intr_info mps_tx_intr_info[] = { 1624 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 }, 1625 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 }, 1626 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 }, 1627 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 }, 1628 { BUBBLE, "MPS Tx underflow", -1, 1 }, 1629 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 }, 1630 { FRMERR, "MPS Tx framing error", -1, 1 }, 1631 { 0 } 1632 }; 1633 static const struct intr_info mps_trc_intr_info[] = { 1634 { FILTMEM, "MPS TRC filter parity error", -1, 1 }, 1635 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 }, 1636 { MISCPERR, "MPS TRC misc parity error", -1, 1 }, 1637 { 0 } 1638 }; 1639 static const struct intr_info mps_stat_sram_intr_info[] = { 1640 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, 1641 { 0 } 1642 }; 1643 static const struct intr_info mps_stat_tx_intr_info[] = { 1644 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, 1645 { 0 } 1646 }; 1647 static const struct intr_info mps_stat_rx_intr_info[] = { 1648 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, 1649 { 0 } 1650 }; 1651 static const struct intr_info mps_cls_intr_info[] = { 1652 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 }, 1653 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 }, 1654 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 }, 1655 { 0 } 1656 }; 1657 1658 int fat; 1659 1660 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE, 1661 mps_rx_intr_info) + 1662 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE, 1663 mps_tx_intr_info) + 1664 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE, 1665 mps_trc_intr_info) + 1666 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM, 1667 mps_stat_sram_intr_info) + 1668 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO, 1669 mps_stat_tx_intr_info) + 1670 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO, 1671 mps_stat_rx_intr_info) + 1672 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE, 1673 mps_cls_intr_info); 1674 1675 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT | 1676 RXINT | TXINT | STATINT); 1677 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */ 1678 if (fat) 1679 t4_fatal_err(adapter); 1680 } 1681 1682 #define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE) 1683 1684 /* 1685 * EDC/MC interrupt handler. 1686 */ 1687 static void mem_intr_handler(struct adapter *adapter, int idx) 1688 { 1689 static const char name[3][5] = { "EDC0", "EDC1", "MC" }; 1690 1691 unsigned int addr, cnt_addr, v; 1692 1693 if (idx <= MEM_EDC1) { 1694 addr = EDC_REG(EDC_INT_CAUSE, idx); 1695 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx); 1696 } else { 1697 addr = MC_INT_CAUSE; 1698 cnt_addr = MC_ECC_STATUS; 1699 } 1700 1701 v = t4_read_reg(adapter, addr) & MEM_INT_MASK; 1702 if (v & PERR_INT_CAUSE) 1703 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n", 1704 name[idx]); 1705 if (v & ECC_CE_INT_CAUSE) { 1706 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr)); 1707 1708 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK); 1709 if (printk_ratelimit()) 1710 dev_warn(adapter->pdev_dev, 1711 "%u %s correctable ECC data error%s\n", 1712 cnt, name[idx], cnt > 1 ? "s" : ""); 1713 } 1714 if (v & ECC_UE_INT_CAUSE) 1715 dev_alert(adapter->pdev_dev, 1716 "%s uncorrectable ECC data error\n", name[idx]); 1717 1718 t4_write_reg(adapter, addr, v); 1719 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE)) 1720 t4_fatal_err(adapter); 1721 } 1722 1723 /* 1724 * MA interrupt handler. 1725 */ 1726 static void ma_intr_handler(struct adapter *adap) 1727 { 1728 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE); 1729 1730 if (status & MEM_PERR_INT_CAUSE) 1731 dev_alert(adap->pdev_dev, 1732 "MA parity error, parity status %#x\n", 1733 t4_read_reg(adap, MA_PARITY_ERROR_STATUS)); 1734 if (status & MEM_WRAP_INT_CAUSE) { 1735 v = t4_read_reg(adap, MA_INT_WRAP_STATUS); 1736 dev_alert(adap->pdev_dev, "MA address wrap-around error by " 1737 "client %u to address %#x\n", 1738 MEM_WRAP_CLIENT_NUM_GET(v), 1739 MEM_WRAP_ADDRESS_GET(v) << 4); 1740 } 1741 t4_write_reg(adap, MA_INT_CAUSE, status); 1742 t4_fatal_err(adap); 1743 } 1744 1745 /* 1746 * SMB interrupt handler. 1747 */ 1748 static void smb_intr_handler(struct adapter *adap) 1749 { 1750 static const struct intr_info smb_intr_info[] = { 1751 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 }, 1752 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 }, 1753 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 }, 1754 { 0 } 1755 }; 1756 1757 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info)) 1758 t4_fatal_err(adap); 1759 } 1760 1761 /* 1762 * NC-SI interrupt handler. 1763 */ 1764 static void ncsi_intr_handler(struct adapter *adap) 1765 { 1766 static const struct intr_info ncsi_intr_info[] = { 1767 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 }, 1768 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 }, 1769 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 }, 1770 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 }, 1771 { 0 } 1772 }; 1773 1774 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info)) 1775 t4_fatal_err(adap); 1776 } 1777 1778 /* 1779 * XGMAC interrupt handler. 1780 */ 1781 static void xgmac_intr_handler(struct adapter *adap, int port) 1782 { 1783 u32 v, int_cause_reg; 1784 1785 if (is_t4(adap->chip)) 1786 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE); 1787 else 1788 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE); 1789 1790 v = t4_read_reg(adap, int_cause_reg); 1791 1792 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR; 1793 if (!v) 1794 return; 1795 1796 if (v & TXFIFO_PRTY_ERR) 1797 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n", 1798 port); 1799 if (v & RXFIFO_PRTY_ERR) 1800 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n", 1801 port); 1802 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v); 1803 t4_fatal_err(adap); 1804 } 1805 1806 /* 1807 * PL interrupt handler. 1808 */ 1809 static void pl_intr_handler(struct adapter *adap) 1810 { 1811 static const struct intr_info pl_intr_info[] = { 1812 { FATALPERR, "T4 fatal parity error", -1, 1 }, 1813 { PERRVFID, "PL VFID_MAP parity error", -1, 1 }, 1814 { 0 } 1815 }; 1816 1817 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info)) 1818 t4_fatal_err(adap); 1819 } 1820 1821 #define PF_INTR_MASK (PFSW) 1822 #define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \ 1823 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \ 1824 CPL_SWITCH | SGE | ULP_TX) 1825 1826 /** 1827 * t4_slow_intr_handler - control path interrupt handler 1828 * @adapter: the adapter 1829 * 1830 * T4 interrupt handler for non-data global interrupt events, e.g., errors. 1831 * The designation 'slow' is because it involves register reads, while 1832 * data interrupts typically don't involve any MMIOs. 1833 */ 1834 int t4_slow_intr_handler(struct adapter *adapter) 1835 { 1836 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE); 1837 1838 if (!(cause & GLBL_INTR_MASK)) 1839 return 0; 1840 if (cause & CIM) 1841 cim_intr_handler(adapter); 1842 if (cause & MPS) 1843 mps_intr_handler(adapter); 1844 if (cause & NCSI) 1845 ncsi_intr_handler(adapter); 1846 if (cause & PL) 1847 pl_intr_handler(adapter); 1848 if (cause & SMB) 1849 smb_intr_handler(adapter); 1850 if (cause & XGMAC0) 1851 xgmac_intr_handler(adapter, 0); 1852 if (cause & XGMAC1) 1853 xgmac_intr_handler(adapter, 1); 1854 if (cause & XGMAC_KR0) 1855 xgmac_intr_handler(adapter, 2); 1856 if (cause & XGMAC_KR1) 1857 xgmac_intr_handler(adapter, 3); 1858 if (cause & PCIE) 1859 pcie_intr_handler(adapter); 1860 if (cause & MC) 1861 mem_intr_handler(adapter, MEM_MC); 1862 if (cause & EDC0) 1863 mem_intr_handler(adapter, MEM_EDC0); 1864 if (cause & EDC1) 1865 mem_intr_handler(adapter, MEM_EDC1); 1866 if (cause & LE) 1867 le_intr_handler(adapter); 1868 if (cause & TP) 1869 tp_intr_handler(adapter); 1870 if (cause & MA) 1871 ma_intr_handler(adapter); 1872 if (cause & PM_TX) 1873 pmtx_intr_handler(adapter); 1874 if (cause & PM_RX) 1875 pmrx_intr_handler(adapter); 1876 if (cause & ULP_RX) 1877 ulprx_intr_handler(adapter); 1878 if (cause & CPL_SWITCH) 1879 cplsw_intr_handler(adapter); 1880 if (cause & SGE) 1881 sge_intr_handler(adapter); 1882 if (cause & ULP_TX) 1883 ulptx_intr_handler(adapter); 1884 1885 /* Clear the interrupts just processed for which we are the master. */ 1886 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK); 1887 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */ 1888 return 1; 1889 } 1890 1891 /** 1892 * t4_intr_enable - enable interrupts 1893 * @adapter: the adapter whose interrupts should be enabled 1894 * 1895 * Enable PF-specific interrupts for the calling function and the top-level 1896 * interrupt concentrator for global interrupts. Interrupts are already 1897 * enabled at each module, here we just enable the roots of the interrupt 1898 * hierarchies. 1899 * 1900 * Note: this function should be called only when the driver manages 1901 * non PF-specific interrupts from the various HW modules. Only one PCI 1902 * function at a time should be doing this. 1903 */ 1904 void t4_intr_enable(struct adapter *adapter) 1905 { 1906 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI)); 1907 1908 t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE | 1909 ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 | 1910 ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 | 1911 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 | 1912 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 | 1913 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO | 1914 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR | 1915 DBFIFO_HP_INT | DBFIFO_LP_INT | 1916 EGRESS_SIZE_ERR); 1917 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK); 1918 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf); 1919 } 1920 1921 /** 1922 * t4_intr_disable - disable interrupts 1923 * @adapter: the adapter whose interrupts should be disabled 1924 * 1925 * Disable interrupts. We only disable the top-level interrupt 1926 * concentrators. The caller must be a PCI function managing global 1927 * interrupts. 1928 */ 1929 void t4_intr_disable(struct adapter *adapter) 1930 { 1931 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI)); 1932 1933 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0); 1934 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0); 1935 } 1936 1937 /** 1938 * hash_mac_addr - return the hash value of a MAC address 1939 * @addr: the 48-bit Ethernet MAC address 1940 * 1941 * Hashes a MAC address according to the hash function used by HW inexact 1942 * (hash) address matching. 1943 */ 1944 static int hash_mac_addr(const u8 *addr) 1945 { 1946 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; 1947 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; 1948 a ^= b; 1949 a ^= (a >> 12); 1950 a ^= (a >> 6); 1951 return a & 0x3f; 1952 } 1953 1954 /** 1955 * t4_config_rss_range - configure a portion of the RSS mapping table 1956 * @adapter: the adapter 1957 * @mbox: mbox to use for the FW command 1958 * @viid: virtual interface whose RSS subtable is to be written 1959 * @start: start entry in the table to write 1960 * @n: how many table entries to write 1961 * @rspq: values for the response queue lookup table 1962 * @nrspq: number of values in @rspq 1963 * 1964 * Programs the selected part of the VI's RSS mapping table with the 1965 * provided values. If @nrspq < @n the supplied values are used repeatedly 1966 * until the full table range is populated. 1967 * 1968 * The caller must ensure the values in @rspq are in the range allowed for 1969 * @viid. 1970 */ 1971 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, 1972 int start, int n, const u16 *rspq, unsigned int nrspq) 1973 { 1974 int ret; 1975 const u16 *rsp = rspq; 1976 const u16 *rsp_end = rspq + nrspq; 1977 struct fw_rss_ind_tbl_cmd cmd; 1978 1979 memset(&cmd, 0, sizeof(cmd)); 1980 cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) | 1981 FW_CMD_REQUEST | FW_CMD_WRITE | 1982 FW_RSS_IND_TBL_CMD_VIID(viid)); 1983 cmd.retval_len16 = htonl(FW_LEN16(cmd)); 1984 1985 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */ 1986 while (n > 0) { 1987 int nq = min(n, 32); 1988 __be32 *qp = &cmd.iq0_to_iq2; 1989 1990 cmd.niqid = htons(nq); 1991 cmd.startidx = htons(start); 1992 1993 start += nq; 1994 n -= nq; 1995 1996 while (nq > 0) { 1997 unsigned int v; 1998 1999 v = FW_RSS_IND_TBL_CMD_IQ0(*rsp); 2000 if (++rsp >= rsp_end) 2001 rsp = rspq; 2002 v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp); 2003 if (++rsp >= rsp_end) 2004 rsp = rspq; 2005 v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp); 2006 if (++rsp >= rsp_end) 2007 rsp = rspq; 2008 2009 *qp++ = htonl(v); 2010 nq -= 3; 2011 } 2012 2013 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); 2014 if (ret) 2015 return ret; 2016 } 2017 return 0; 2018 } 2019 2020 /** 2021 * t4_config_glbl_rss - configure the global RSS mode 2022 * @adapter: the adapter 2023 * @mbox: mbox to use for the FW command 2024 * @mode: global RSS mode 2025 * @flags: mode-specific flags 2026 * 2027 * Sets the global RSS mode. 2028 */ 2029 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, 2030 unsigned int flags) 2031 { 2032 struct fw_rss_glb_config_cmd c; 2033 2034 memset(&c, 0, sizeof(c)); 2035 c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | 2036 FW_CMD_REQUEST | FW_CMD_WRITE); 2037 c.retval_len16 = htonl(FW_LEN16(c)); 2038 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) { 2039 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); 2040 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { 2041 c.u.basicvirtual.mode_pkd = 2042 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); 2043 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags); 2044 } else 2045 return -EINVAL; 2046 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); 2047 } 2048 2049 /** 2050 * t4_tp_get_tcp_stats - read TP's TCP MIB counters 2051 * @adap: the adapter 2052 * @v4: holds the TCP/IP counter values 2053 * @v6: holds the TCP/IPv6 counter values 2054 * 2055 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters. 2056 * Either @v4 or @v6 may be %NULL to skip the corresponding stats. 2057 */ 2058 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, 2059 struct tp_tcp_stats *v6) 2060 { 2061 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1]; 2062 2063 #define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST) 2064 #define STAT(x) val[STAT_IDX(x)] 2065 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO)) 2066 2067 if (v4) { 2068 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val, 2069 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST); 2070 v4->tcpOutRsts = STAT(OUT_RST); 2071 v4->tcpInSegs = STAT64(IN_SEG); 2072 v4->tcpOutSegs = STAT64(OUT_SEG); 2073 v4->tcpRetransSegs = STAT64(RXT_SEG); 2074 } 2075 if (v6) { 2076 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val, 2077 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST); 2078 v6->tcpOutRsts = STAT(OUT_RST); 2079 v6->tcpInSegs = STAT64(IN_SEG); 2080 v6->tcpOutSegs = STAT64(OUT_SEG); 2081 v6->tcpRetransSegs = STAT64(RXT_SEG); 2082 } 2083 #undef STAT64 2084 #undef STAT 2085 #undef STAT_IDX 2086 } 2087 2088 /** 2089 * t4_read_mtu_tbl - returns the values in the HW path MTU table 2090 * @adap: the adapter 2091 * @mtus: where to store the MTU values 2092 * @mtu_log: where to store the MTU base-2 log (may be %NULL) 2093 * 2094 * Reads the HW path MTU table. 2095 */ 2096 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log) 2097 { 2098 u32 v; 2099 int i; 2100 2101 for (i = 0; i < NMTUS; ++i) { 2102 t4_write_reg(adap, TP_MTU_TABLE, 2103 MTUINDEX(0xff) | MTUVALUE(i)); 2104 v = t4_read_reg(adap, TP_MTU_TABLE); 2105 mtus[i] = MTUVALUE_GET(v); 2106 if (mtu_log) 2107 mtu_log[i] = MTUWIDTH_GET(v); 2108 } 2109 } 2110 2111 /** 2112 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register 2113 * @adap: the adapter 2114 * @addr: the indirect TP register address 2115 * @mask: specifies the field within the register to modify 2116 * @val: new value for the field 2117 * 2118 * Sets a field of an indirect TP register to the given value. 2119 */ 2120 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, 2121 unsigned int mask, unsigned int val) 2122 { 2123 t4_write_reg(adap, TP_PIO_ADDR, addr); 2124 val |= t4_read_reg(adap, TP_PIO_DATA) & ~mask; 2125 t4_write_reg(adap, TP_PIO_DATA, val); 2126 } 2127 2128 /** 2129 * init_cong_ctrl - initialize congestion control parameters 2130 * @a: the alpha values for congestion control 2131 * @b: the beta values for congestion control 2132 * 2133 * Initialize the congestion control parameters. 2134 */ 2135 static void init_cong_ctrl(unsigned short *a, unsigned short *b) 2136 { 2137 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; 2138 a[9] = 2; 2139 a[10] = 3; 2140 a[11] = 4; 2141 a[12] = 5; 2142 a[13] = 6; 2143 a[14] = 7; 2144 a[15] = 8; 2145 a[16] = 9; 2146 a[17] = 10; 2147 a[18] = 14; 2148 a[19] = 17; 2149 a[20] = 21; 2150 a[21] = 25; 2151 a[22] = 30; 2152 a[23] = 35; 2153 a[24] = 45; 2154 a[25] = 60; 2155 a[26] = 80; 2156 a[27] = 100; 2157 a[28] = 200; 2158 a[29] = 300; 2159 a[30] = 400; 2160 a[31] = 500; 2161 2162 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0; 2163 b[9] = b[10] = 1; 2164 b[11] = b[12] = 2; 2165 b[13] = b[14] = b[15] = b[16] = 3; 2166 b[17] = b[18] = b[19] = b[20] = b[21] = 4; 2167 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5; 2168 b[28] = b[29] = 6; 2169 b[30] = b[31] = 7; 2170 } 2171 2172 /* The minimum additive increment value for the congestion control table */ 2173 #define CC_MIN_INCR 2U 2174 2175 /** 2176 * t4_load_mtus - write the MTU and congestion control HW tables 2177 * @adap: the adapter 2178 * @mtus: the values for the MTU table 2179 * @alpha: the values for the congestion control alpha parameter 2180 * @beta: the values for the congestion control beta parameter 2181 * 2182 * Write the HW MTU table with the supplied MTUs and the high-speed 2183 * congestion control table with the supplied alpha, beta, and MTUs. 2184 * We write the two tables together because the additive increments 2185 * depend on the MTUs. 2186 */ 2187 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, 2188 const unsigned short *alpha, const unsigned short *beta) 2189 { 2190 static const unsigned int avg_pkts[NCCTRL_WIN] = { 2191 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, 2192 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, 2193 28672, 40960, 57344, 81920, 114688, 163840, 229376 2194 }; 2195 2196 unsigned int i, w; 2197 2198 for (i = 0; i < NMTUS; ++i) { 2199 unsigned int mtu = mtus[i]; 2200 unsigned int log2 = fls(mtu); 2201 2202 if (!(mtu & ((1 << log2) >> 2))) /* round */ 2203 log2--; 2204 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) | 2205 MTUWIDTH(log2) | MTUVALUE(mtu)); 2206 2207 for (w = 0; w < NCCTRL_WIN; ++w) { 2208 unsigned int inc; 2209 2210 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], 2211 CC_MIN_INCR); 2212 2213 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) | 2214 (w << 16) | (beta[w] << 13) | inc); 2215 } 2216 } 2217 } 2218 2219 /** 2220 * get_mps_bg_map - return the buffer groups associated with a port 2221 * @adap: the adapter 2222 * @idx: the port index 2223 * 2224 * Returns a bitmap indicating which MPS buffer groups are associated 2225 * with the given port. Bit i is set if buffer group i is used by the 2226 * port. 2227 */ 2228 static unsigned int get_mps_bg_map(struct adapter *adap, int idx) 2229 { 2230 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL)); 2231 2232 if (n == 0) 2233 return idx == 0 ? 0xf : 0; 2234 if (n == 1) 2235 return idx < 2 ? (3 << (2 * idx)) : 0; 2236 return 1 << idx; 2237 } 2238 2239 /** 2240 * t4_get_port_stats - collect port statistics 2241 * @adap: the adapter 2242 * @idx: the port index 2243 * @p: the stats structure to fill 2244 * 2245 * Collect statistics related to the given port from HW. 2246 */ 2247 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) 2248 { 2249 u32 bgmap = get_mps_bg_map(adap, idx); 2250 2251 #define GET_STAT(name) \ 2252 t4_read_reg64(adap, \ 2253 (is_t4(adap->chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \ 2254 T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L))) 2255 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) 2256 2257 p->tx_octets = GET_STAT(TX_PORT_BYTES); 2258 p->tx_frames = GET_STAT(TX_PORT_FRAMES); 2259 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST); 2260 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST); 2261 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST); 2262 p->tx_error_frames = GET_STAT(TX_PORT_ERROR); 2263 p->tx_frames_64 = GET_STAT(TX_PORT_64B); 2264 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B); 2265 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B); 2266 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B); 2267 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B); 2268 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B); 2269 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX); 2270 p->tx_drop = GET_STAT(TX_PORT_DROP); 2271 p->tx_pause = GET_STAT(TX_PORT_PAUSE); 2272 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0); 2273 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1); 2274 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2); 2275 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3); 2276 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4); 2277 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5); 2278 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); 2279 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); 2280 2281 p->rx_octets = GET_STAT(RX_PORT_BYTES); 2282 p->rx_frames = GET_STAT(RX_PORT_FRAMES); 2283 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); 2284 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST); 2285 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST); 2286 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR); 2287 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR); 2288 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR); 2289 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR); 2290 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR); 2291 p->rx_runt = GET_STAT(RX_PORT_LESS_64B); 2292 p->rx_frames_64 = GET_STAT(RX_PORT_64B); 2293 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B); 2294 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B); 2295 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B); 2296 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B); 2297 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B); 2298 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX); 2299 p->rx_pause = GET_STAT(RX_PORT_PAUSE); 2300 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0); 2301 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1); 2302 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2); 2303 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3); 2304 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4); 2305 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5); 2306 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); 2307 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); 2308 2309 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; 2310 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; 2311 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; 2312 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0; 2313 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0; 2314 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0; 2315 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0; 2316 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0; 2317 2318 #undef GET_STAT 2319 #undef GET_STAT_COM 2320 } 2321 2322 /** 2323 * t4_wol_magic_enable - enable/disable magic packet WoL 2324 * @adap: the adapter 2325 * @port: the physical port index 2326 * @addr: MAC address expected in magic packets, %NULL to disable 2327 * 2328 * Enables/disables magic packet wake-on-LAN for the selected port. 2329 */ 2330 void t4_wol_magic_enable(struct adapter *adap, unsigned int port, 2331 const u8 *addr) 2332 { 2333 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg; 2334 2335 if (is_t4(adap->chip)) { 2336 mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO); 2337 mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI); 2338 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2); 2339 } else { 2340 mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO); 2341 mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI); 2342 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2); 2343 } 2344 2345 if (addr) { 2346 t4_write_reg(adap, mag_id_reg_l, 2347 (addr[2] << 24) | (addr[3] << 16) | 2348 (addr[4] << 8) | addr[5]); 2349 t4_write_reg(adap, mag_id_reg_h, 2350 (addr[0] << 8) | addr[1]); 2351 } 2352 t4_set_reg_field(adap, port_cfg_reg, MAGICEN, 2353 addr ? MAGICEN : 0); 2354 } 2355 2356 /** 2357 * t4_wol_pat_enable - enable/disable pattern-based WoL 2358 * @adap: the adapter 2359 * @port: the physical port index 2360 * @map: bitmap of which HW pattern filters to set 2361 * @mask0: byte mask for bytes 0-63 of a packet 2362 * @mask1: byte mask for bytes 64-127 of a packet 2363 * @crc: Ethernet CRC for selected bytes 2364 * @enable: enable/disable switch 2365 * 2366 * Sets the pattern filters indicated in @map to mask out the bytes 2367 * specified in @mask0/@mask1 in received packets and compare the CRC of 2368 * the resulting packet against @crc. If @enable is %true pattern-based 2369 * WoL is enabled, otherwise disabled. 2370 */ 2371 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, 2372 u64 mask0, u64 mask1, unsigned int crc, bool enable) 2373 { 2374 int i; 2375 u32 port_cfg_reg; 2376 2377 if (is_t4(adap->chip)) 2378 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2); 2379 else 2380 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2); 2381 2382 if (!enable) { 2383 t4_set_reg_field(adap, port_cfg_reg, PATEN, 0); 2384 return 0; 2385 } 2386 if (map > 0xff) 2387 return -EINVAL; 2388 2389 #define EPIO_REG(name) \ 2390 (is_t4(adap->chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \ 2391 T5_PORT_REG(port, MAC_PORT_EPIO_##name)) 2392 2393 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); 2394 t4_write_reg(adap, EPIO_REG(DATA2), mask1); 2395 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32); 2396 2397 for (i = 0; i < NWOL_PAT; i++, map >>= 1) { 2398 if (!(map & 1)) 2399 continue; 2400 2401 /* write byte masks */ 2402 t4_write_reg(adap, EPIO_REG(DATA0), mask0); 2403 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR); 2404 t4_read_reg(adap, EPIO_REG(OP)); /* flush */ 2405 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY) 2406 return -ETIMEDOUT; 2407 2408 /* write CRC */ 2409 t4_write_reg(adap, EPIO_REG(DATA0), crc); 2410 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR); 2411 t4_read_reg(adap, EPIO_REG(OP)); /* flush */ 2412 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY) 2413 return -ETIMEDOUT; 2414 } 2415 #undef EPIO_REG 2416 2417 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN); 2418 return 0; 2419 } 2420 2421 /* t4_mk_filtdelwr - create a delete filter WR 2422 * @ftid: the filter ID 2423 * @wr: the filter work request to populate 2424 * @qid: ingress queue to receive the delete notification 2425 * 2426 * Creates a filter work request to delete the supplied filter. If @qid is 2427 * negative the delete notification is suppressed. 2428 */ 2429 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid) 2430 { 2431 memset(wr, 0, sizeof(*wr)); 2432 wr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR)); 2433 wr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*wr) / 16)); 2434 wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) | 2435 V_FW_FILTER_WR_NOREPLY(qid < 0)); 2436 wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER); 2437 if (qid >= 0) 2438 wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid)); 2439 } 2440 2441 #define INIT_CMD(var, cmd, rd_wr) do { \ 2442 (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \ 2443 FW_CMD_REQUEST | FW_CMD_##rd_wr); \ 2444 (var).retval_len16 = htonl(FW_LEN16(var)); \ 2445 } while (0) 2446 2447 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, 2448 u32 addr, u32 val) 2449 { 2450 struct fw_ldst_cmd c; 2451 2452 memset(&c, 0, sizeof(c)); 2453 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | 2454 FW_CMD_WRITE | 2455 FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE)); 2456 c.cycles_to_len16 = htonl(FW_LEN16(c)); 2457 c.u.addrval.addr = htonl(addr); 2458 c.u.addrval.val = htonl(val); 2459 2460 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2461 } 2462 2463 /** 2464 * t4_mem_win_read_len - read memory through PCIE memory window 2465 * @adap: the adapter 2466 * @addr: address of first byte requested aligned on 32b. 2467 * @data: len bytes to hold the data read 2468 * @len: amount of data to read from window. Must be <= 2469 * MEMWIN0_APERATURE after adjusting for 16B for T4 and 2470 * 128B for T5 alignment requirements of the the memory window. 2471 * 2472 * Read len bytes of data from MC starting at @addr. 2473 */ 2474 int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len) 2475 { 2476 int i, off; 2477 u32 win_pf = is_t4(adap->chip) ? 0 : V_PFNUM(adap->fn); 2478 2479 /* Align on a 2KB boundary. 2480 */ 2481 off = addr & MEMWIN0_APERTURE; 2482 if ((addr & 3) || (len + off) > MEMWIN0_APERTURE) 2483 return -EINVAL; 2484 2485 t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET, 2486 (addr & ~MEMWIN0_APERTURE) | win_pf); 2487 t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET); 2488 2489 for (i = 0; i < len; i += 4) 2490 *data++ = (__force __be32) t4_read_reg(adap, 2491 (MEMWIN0_BASE + off + i)); 2492 2493 return 0; 2494 } 2495 2496 /** 2497 * t4_mdio_rd - read a PHY register through MDIO 2498 * @adap: the adapter 2499 * @mbox: mailbox to use for the FW command 2500 * @phy_addr: the PHY address 2501 * @mmd: the PHY MMD to access (0 for clause 22 PHYs) 2502 * @reg: the register to read 2503 * @valp: where to store the value 2504 * 2505 * Issues a FW command through the given mailbox to read a PHY register. 2506 */ 2507 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 2508 unsigned int mmd, unsigned int reg, u16 *valp) 2509 { 2510 int ret; 2511 struct fw_ldst_cmd c; 2512 2513 memset(&c, 0, sizeof(c)); 2514 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | 2515 FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); 2516 c.cycles_to_len16 = htonl(FW_LEN16(c)); 2517 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) | 2518 FW_LDST_CMD_MMD(mmd)); 2519 c.u.mdio.raddr = htons(reg); 2520 2521 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 2522 if (ret == 0) 2523 *valp = ntohs(c.u.mdio.rval); 2524 return ret; 2525 } 2526 2527 /** 2528 * t4_mdio_wr - write a PHY register through MDIO 2529 * @adap: the adapter 2530 * @mbox: mailbox to use for the FW command 2531 * @phy_addr: the PHY address 2532 * @mmd: the PHY MMD to access (0 for clause 22 PHYs) 2533 * @reg: the register to write 2534 * @valp: value to write 2535 * 2536 * Issues a FW command through the given mailbox to write a PHY register. 2537 */ 2538 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 2539 unsigned int mmd, unsigned int reg, u16 val) 2540 { 2541 struct fw_ldst_cmd c; 2542 2543 memset(&c, 0, sizeof(c)); 2544 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | 2545 FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); 2546 c.cycles_to_len16 = htonl(FW_LEN16(c)); 2547 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) | 2548 FW_LDST_CMD_MMD(mmd)); 2549 c.u.mdio.raddr = htons(reg); 2550 c.u.mdio.rval = htons(val); 2551 2552 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2553 } 2554 2555 /** 2556 * t4_fw_hello - establish communication with FW 2557 * @adap: the adapter 2558 * @mbox: mailbox to use for the FW command 2559 * @evt_mbox: mailbox to receive async FW events 2560 * @master: specifies the caller's willingness to be the device master 2561 * @state: returns the current device state (if non-NULL) 2562 * 2563 * Issues a command to establish communication with FW. Returns either 2564 * an error (negative integer) or the mailbox of the Master PF. 2565 */ 2566 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, 2567 enum dev_master master, enum dev_state *state) 2568 { 2569 int ret; 2570 struct fw_hello_cmd c; 2571 u32 v; 2572 unsigned int master_mbox; 2573 int retries = FW_CMD_HELLO_RETRIES; 2574 2575 retry: 2576 memset(&c, 0, sizeof(c)); 2577 INIT_CMD(c, HELLO, WRITE); 2578 c.err_to_clearinit = htonl( 2579 FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | 2580 FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | 2581 FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 2582 FW_HELLO_CMD_MBMASTER_MASK) | 2583 FW_HELLO_CMD_MBASYNCNOT(evt_mbox) | 2584 FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) | 2585 FW_HELLO_CMD_CLEARINIT); 2586 2587 /* 2588 * Issue the HELLO command to the firmware. If it's not successful 2589 * but indicates that we got a "busy" or "timeout" condition, retry 2590 * the HELLO until we exhaust our retry limit. 2591 */ 2592 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 2593 if (ret < 0) { 2594 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) 2595 goto retry; 2596 return ret; 2597 } 2598 2599 v = ntohl(c.err_to_clearinit); 2600 master_mbox = FW_HELLO_CMD_MBMASTER_GET(v); 2601 if (state) { 2602 if (v & FW_HELLO_CMD_ERR) 2603 *state = DEV_STATE_ERR; 2604 else if (v & FW_HELLO_CMD_INIT) 2605 *state = DEV_STATE_INIT; 2606 else 2607 *state = DEV_STATE_UNINIT; 2608 } 2609 2610 /* 2611 * If we're not the Master PF then we need to wait around for the 2612 * Master PF Driver to finish setting up the adapter. 2613 * 2614 * Note that we also do this wait if we're a non-Master-capable PF and 2615 * there is no current Master PF; a Master PF may show up momentarily 2616 * and we wouldn't want to fail pointlessly. (This can happen when an 2617 * OS loads lots of different drivers rapidly at the same time). In 2618 * this case, the Master PF returned by the firmware will be 2619 * FW_PCIE_FW_MASTER_MASK so the test below will work ... 2620 */ 2621 if ((v & (FW_HELLO_CMD_ERR|FW_HELLO_CMD_INIT)) == 0 && 2622 master_mbox != mbox) { 2623 int waiting = FW_CMD_HELLO_TIMEOUT; 2624 2625 /* 2626 * Wait for the firmware to either indicate an error or 2627 * initialized state. If we see either of these we bail out 2628 * and report the issue to the caller. If we exhaust the 2629 * "hello timeout" and we haven't exhausted our retries, try 2630 * again. Otherwise bail with a timeout error. 2631 */ 2632 for (;;) { 2633 u32 pcie_fw; 2634 2635 msleep(50); 2636 waiting -= 50; 2637 2638 /* 2639 * If neither Error nor Initialialized are indicated 2640 * by the firmware keep waiting till we exaust our 2641 * timeout ... and then retry if we haven't exhausted 2642 * our retries ... 2643 */ 2644 pcie_fw = t4_read_reg(adap, MA_PCIE_FW); 2645 if (!(pcie_fw & (FW_PCIE_FW_ERR|FW_PCIE_FW_INIT))) { 2646 if (waiting <= 0) { 2647 if (retries-- > 0) 2648 goto retry; 2649 2650 return -ETIMEDOUT; 2651 } 2652 continue; 2653 } 2654 2655 /* 2656 * We either have an Error or Initialized condition 2657 * report errors preferentially. 2658 */ 2659 if (state) { 2660 if (pcie_fw & FW_PCIE_FW_ERR) 2661 *state = DEV_STATE_ERR; 2662 else if (pcie_fw & FW_PCIE_FW_INIT) 2663 *state = DEV_STATE_INIT; 2664 } 2665 2666 /* 2667 * If we arrived before a Master PF was selected and 2668 * there's not a valid Master PF, grab its identity 2669 * for our caller. 2670 */ 2671 if (master_mbox == FW_PCIE_FW_MASTER_MASK && 2672 (pcie_fw & FW_PCIE_FW_MASTER_VLD)) 2673 master_mbox = FW_PCIE_FW_MASTER_GET(pcie_fw); 2674 break; 2675 } 2676 } 2677 2678 return master_mbox; 2679 } 2680 2681 /** 2682 * t4_fw_bye - end communication with FW 2683 * @adap: the adapter 2684 * @mbox: mailbox to use for the FW command 2685 * 2686 * Issues a command to terminate communication with FW. 2687 */ 2688 int t4_fw_bye(struct adapter *adap, unsigned int mbox) 2689 { 2690 struct fw_bye_cmd c; 2691 2692 memset(&c, 0, sizeof(c)); 2693 INIT_CMD(c, BYE, WRITE); 2694 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2695 } 2696 2697 /** 2698 * t4_init_cmd - ask FW to initialize the device 2699 * @adap: the adapter 2700 * @mbox: mailbox to use for the FW command 2701 * 2702 * Issues a command to FW to partially initialize the device. This 2703 * performs initialization that generally doesn't depend on user input. 2704 */ 2705 int t4_early_init(struct adapter *adap, unsigned int mbox) 2706 { 2707 struct fw_initialize_cmd c; 2708 2709 memset(&c, 0, sizeof(c)); 2710 INIT_CMD(c, INITIALIZE, WRITE); 2711 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2712 } 2713 2714 /** 2715 * t4_fw_reset - issue a reset to FW 2716 * @adap: the adapter 2717 * @mbox: mailbox to use for the FW command 2718 * @reset: specifies the type of reset to perform 2719 * 2720 * Issues a reset command of the specified type to FW. 2721 */ 2722 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) 2723 { 2724 struct fw_reset_cmd c; 2725 2726 memset(&c, 0, sizeof(c)); 2727 INIT_CMD(c, RESET, WRITE); 2728 c.val = htonl(reset); 2729 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2730 } 2731 2732 /** 2733 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET 2734 * @adap: the adapter 2735 * @mbox: mailbox to use for the FW RESET command (if desired) 2736 * @force: force uP into RESET even if FW RESET command fails 2737 * 2738 * Issues a RESET command to firmware (if desired) with a HALT indication 2739 * and then puts the microprocessor into RESET state. The RESET command 2740 * will only be issued if a legitimate mailbox is provided (mbox <= 2741 * FW_PCIE_FW_MASTER_MASK). 2742 * 2743 * This is generally used in order for the host to safely manipulate the 2744 * adapter without fear of conflicting with whatever the firmware might 2745 * be doing. The only way out of this state is to RESTART the firmware 2746 * ... 2747 */ 2748 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force) 2749 { 2750 int ret = 0; 2751 2752 /* 2753 * If a legitimate mailbox is provided, issue a RESET command 2754 * with a HALT indication. 2755 */ 2756 if (mbox <= FW_PCIE_FW_MASTER_MASK) { 2757 struct fw_reset_cmd c; 2758 2759 memset(&c, 0, sizeof(c)); 2760 INIT_CMD(c, RESET, WRITE); 2761 c.val = htonl(PIORST | PIORSTMODE); 2762 c.halt_pkd = htonl(FW_RESET_CMD_HALT(1U)); 2763 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2764 } 2765 2766 /* 2767 * Normally we won't complete the operation if the firmware RESET 2768 * command fails but if our caller insists we'll go ahead and put the 2769 * uP into RESET. This can be useful if the firmware is hung or even 2770 * missing ... We'll have to take the risk of putting the uP into 2771 * RESET without the cooperation of firmware in that case. 2772 * 2773 * We also force the firmware's HALT flag to be on in case we bypassed 2774 * the firmware RESET command above or we're dealing with old firmware 2775 * which doesn't have the HALT capability. This will serve as a flag 2776 * for the incoming firmware to know that it's coming out of a HALT 2777 * rather than a RESET ... if it's new enough to understand that ... 2778 */ 2779 if (ret == 0 || force) { 2780 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST); 2781 t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, 2782 FW_PCIE_FW_HALT); 2783 } 2784 2785 /* 2786 * And we always return the result of the firmware RESET command 2787 * even when we force the uP into RESET ... 2788 */ 2789 return ret; 2790 } 2791 2792 /** 2793 * t4_fw_restart - restart the firmware by taking the uP out of RESET 2794 * @adap: the adapter 2795 * @reset: if we want to do a RESET to restart things 2796 * 2797 * Restart firmware previously halted by t4_fw_halt(). On successful 2798 * return the previous PF Master remains as the new PF Master and there 2799 * is no need to issue a new HELLO command, etc. 2800 * 2801 * We do this in two ways: 2802 * 2803 * 1. If we're dealing with newer firmware we'll simply want to take 2804 * the chip's microprocessor out of RESET. This will cause the 2805 * firmware to start up from its start vector. And then we'll loop 2806 * until the firmware indicates it's started again (PCIE_FW.HALT 2807 * reset to 0) or we timeout. 2808 * 2809 * 2. If we're dealing with older firmware then we'll need to RESET 2810 * the chip since older firmware won't recognize the PCIE_FW.HALT 2811 * flag and automatically RESET itself on startup. 2812 */ 2813 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset) 2814 { 2815 if (reset) { 2816 /* 2817 * Since we're directing the RESET instead of the firmware 2818 * doing it automatically, we need to clear the PCIE_FW.HALT 2819 * bit. 2820 */ 2821 t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, 0); 2822 2823 /* 2824 * If we've been given a valid mailbox, first try to get the 2825 * firmware to do the RESET. If that works, great and we can 2826 * return success. Otherwise, if we haven't been given a 2827 * valid mailbox or the RESET command failed, fall back to 2828 * hitting the chip with a hammer. 2829 */ 2830 if (mbox <= FW_PCIE_FW_MASTER_MASK) { 2831 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0); 2832 msleep(100); 2833 if (t4_fw_reset(adap, mbox, 2834 PIORST | PIORSTMODE) == 0) 2835 return 0; 2836 } 2837 2838 t4_write_reg(adap, PL_RST, PIORST | PIORSTMODE); 2839 msleep(2000); 2840 } else { 2841 int ms; 2842 2843 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0); 2844 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { 2845 if (!(t4_read_reg(adap, PCIE_FW) & FW_PCIE_FW_HALT)) 2846 return 0; 2847 msleep(100); 2848 ms += 100; 2849 } 2850 return -ETIMEDOUT; 2851 } 2852 return 0; 2853 } 2854 2855 /** 2856 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW 2857 * @adap: the adapter 2858 * @mbox: mailbox to use for the FW RESET command (if desired) 2859 * @fw_data: the firmware image to write 2860 * @size: image size 2861 * @force: force upgrade even if firmware doesn't cooperate 2862 * 2863 * Perform all of the steps necessary for upgrading an adapter's 2864 * firmware image. Normally this requires the cooperation of the 2865 * existing firmware in order to halt all existing activities 2866 * but if an invalid mailbox token is passed in we skip that step 2867 * (though we'll still put the adapter microprocessor into RESET in 2868 * that case). 2869 * 2870 * On successful return the new firmware will have been loaded and 2871 * the adapter will have been fully RESET losing all previous setup 2872 * state. On unsuccessful return the adapter may be completely hosed ... 2873 * positive errno indicates that the adapter is ~probably~ intact, a 2874 * negative errno indicates that things are looking bad ... 2875 */ 2876 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, 2877 const u8 *fw_data, unsigned int size, int force) 2878 { 2879 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; 2880 int reset, ret; 2881 2882 ret = t4_fw_halt(adap, mbox, force); 2883 if (ret < 0 && !force) 2884 return ret; 2885 2886 ret = t4_load_fw(adap, fw_data, size); 2887 if (ret < 0) 2888 return ret; 2889 2890 /* 2891 * Older versions of the firmware don't understand the new 2892 * PCIE_FW.HALT flag and so won't know to perform a RESET when they 2893 * restart. So for newly loaded older firmware we'll have to do the 2894 * RESET for it so it starts up on a clean slate. We can tell if 2895 * the newly loaded firmware will handle this right by checking 2896 * its header flags to see if it advertises the capability. 2897 */ 2898 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0); 2899 return t4_fw_restart(adap, mbox, reset); 2900 } 2901 2902 2903 /** 2904 * t4_fw_config_file - setup an adapter via a Configuration File 2905 * @adap: the adapter 2906 * @mbox: mailbox to use for the FW command 2907 * @mtype: the memory type where the Configuration File is located 2908 * @maddr: the memory address where the Configuration File is located 2909 * @finiver: return value for CF [fini] version 2910 * @finicsum: return value for CF [fini] checksum 2911 * @cfcsum: return value for CF computed checksum 2912 * 2913 * Issue a command to get the firmware to process the Configuration 2914 * File located at the specified mtype/maddress. If the Configuration 2915 * File is processed successfully and return value pointers are 2916 * provided, the Configuration File "[fini] section version and 2917 * checksum values will be returned along with the computed checksum. 2918 * It's up to the caller to decide how it wants to respond to the 2919 * checksums not matching but it recommended that a prominant warning 2920 * be emitted in order to help people rapidly identify changed or 2921 * corrupted Configuration Files. 2922 * 2923 * Also note that it's possible to modify things like "niccaps", 2924 * "toecaps",etc. between processing the Configuration File and telling 2925 * the firmware to use the new configuration. Callers which want to 2926 * do this will need to "hand-roll" their own CAPS_CONFIGS commands for 2927 * Configuration Files if they want to do this. 2928 */ 2929 int t4_fw_config_file(struct adapter *adap, unsigned int mbox, 2930 unsigned int mtype, unsigned int maddr, 2931 u32 *finiver, u32 *finicsum, u32 *cfcsum) 2932 { 2933 struct fw_caps_config_cmd caps_cmd; 2934 int ret; 2935 2936 /* 2937 * Tell the firmware to process the indicated Configuration File. 2938 * If there are no errors and the caller has provided return value 2939 * pointers for the [fini] section version, checksum and computed 2940 * checksum, pass those back to the caller. 2941 */ 2942 memset(&caps_cmd, 0, sizeof(caps_cmd)); 2943 caps_cmd.op_to_write = 2944 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 2945 FW_CMD_REQUEST | 2946 FW_CMD_READ); 2947 caps_cmd.cfvalid_to_len16 = 2948 htonl(FW_CAPS_CONFIG_CMD_CFVALID | 2949 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | 2950 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | 2951 FW_LEN16(caps_cmd)); 2952 ret = t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd); 2953 if (ret < 0) 2954 return ret; 2955 2956 if (finiver) 2957 *finiver = ntohl(caps_cmd.finiver); 2958 if (finicsum) 2959 *finicsum = ntohl(caps_cmd.finicsum); 2960 if (cfcsum) 2961 *cfcsum = ntohl(caps_cmd.cfcsum); 2962 2963 /* 2964 * And now tell the firmware to use the configuration we just loaded. 2965 */ 2966 caps_cmd.op_to_write = 2967 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 2968 FW_CMD_REQUEST | 2969 FW_CMD_WRITE); 2970 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); 2971 return t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), NULL); 2972 } 2973 2974 /** 2975 * t4_fixup_host_params - fix up host-dependent parameters 2976 * @adap: the adapter 2977 * @page_size: the host's Base Page Size 2978 * @cache_line_size: the host's Cache Line Size 2979 * 2980 * Various registers in T4 contain values which are dependent on the 2981 * host's Base Page and Cache Line Sizes. This function will fix all of 2982 * those registers with the appropriate values as passed in ... 2983 */ 2984 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, 2985 unsigned int cache_line_size) 2986 { 2987 unsigned int page_shift = fls(page_size) - 1; 2988 unsigned int sge_hps = page_shift - 10; 2989 unsigned int stat_len = cache_line_size > 64 ? 128 : 64; 2990 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size; 2991 unsigned int fl_align_log = fls(fl_align) - 1; 2992 2993 t4_write_reg(adap, SGE_HOST_PAGE_SIZE, 2994 HOSTPAGESIZEPF0(sge_hps) | 2995 HOSTPAGESIZEPF1(sge_hps) | 2996 HOSTPAGESIZEPF2(sge_hps) | 2997 HOSTPAGESIZEPF3(sge_hps) | 2998 HOSTPAGESIZEPF4(sge_hps) | 2999 HOSTPAGESIZEPF5(sge_hps) | 3000 HOSTPAGESIZEPF6(sge_hps) | 3001 HOSTPAGESIZEPF7(sge_hps)); 3002 3003 t4_set_reg_field(adap, SGE_CONTROL, 3004 INGPADBOUNDARY_MASK | 3005 EGRSTATUSPAGESIZE_MASK, 3006 INGPADBOUNDARY(fl_align_log - 5) | 3007 EGRSTATUSPAGESIZE(stat_len != 64)); 3008 3009 /* 3010 * Adjust various SGE Free List Host Buffer Sizes. 3011 * 3012 * This is something of a crock since we're using fixed indices into 3013 * the array which are also known by the sge.c code and the T4 3014 * Firmware Configuration File. We need to come up with a much better 3015 * approach to managing this array. For now, the first four entries 3016 * are: 3017 * 3018 * 0: Host Page Size 3019 * 1: 64KB 3020 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode) 3021 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode) 3022 * 3023 * For the single-MTU buffers in unpacked mode we need to include 3024 * space for the SGE Control Packet Shift, 14 byte Ethernet header, 3025 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet 3026 * Padding boundry. All of these are accommodated in the Factory 3027 * Default Firmware Configuration File but we need to adjust it for 3028 * this host's cache line size. 3029 */ 3030 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, page_size); 3031 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2, 3032 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2) + fl_align-1) 3033 & ~(fl_align-1)); 3034 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3, 3035 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3) + fl_align-1) 3036 & ~(fl_align-1)); 3037 3038 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12)); 3039 3040 return 0; 3041 } 3042 3043 /** 3044 * t4_fw_initialize - ask FW to initialize the device 3045 * @adap: the adapter 3046 * @mbox: mailbox to use for the FW command 3047 * 3048 * Issues a command to FW to partially initialize the device. This 3049 * performs initialization that generally doesn't depend on user input. 3050 */ 3051 int t4_fw_initialize(struct adapter *adap, unsigned int mbox) 3052 { 3053 struct fw_initialize_cmd c; 3054 3055 memset(&c, 0, sizeof(c)); 3056 INIT_CMD(c, INITIALIZE, WRITE); 3057 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3058 } 3059 3060 /** 3061 * t4_query_params - query FW or device parameters 3062 * @adap: the adapter 3063 * @mbox: mailbox to use for the FW command 3064 * @pf: the PF 3065 * @vf: the VF 3066 * @nparams: the number of parameters 3067 * @params: the parameter names 3068 * @val: the parameter values 3069 * 3070 * Reads the value of FW or device parameters. Up to 7 parameters can be 3071 * queried at once. 3072 */ 3073 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 3074 unsigned int vf, unsigned int nparams, const u32 *params, 3075 u32 *val) 3076 { 3077 int i, ret; 3078 struct fw_params_cmd c; 3079 __be32 *p = &c.param[0].mnem; 3080 3081 if (nparams > 7) 3082 return -EINVAL; 3083 3084 memset(&c, 0, sizeof(c)); 3085 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST | 3086 FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) | 3087 FW_PARAMS_CMD_VFN(vf)); 3088 c.retval_len16 = htonl(FW_LEN16(c)); 3089 for (i = 0; i < nparams; i++, p += 2) 3090 *p = htonl(*params++); 3091 3092 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 3093 if (ret == 0) 3094 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2) 3095 *val++ = ntohl(*p); 3096 return ret; 3097 } 3098 3099 /** 3100 * t4_set_params - sets FW or device parameters 3101 * @adap: the adapter 3102 * @mbox: mailbox to use for the FW command 3103 * @pf: the PF 3104 * @vf: the VF 3105 * @nparams: the number of parameters 3106 * @params: the parameter names 3107 * @val: the parameter values 3108 * 3109 * Sets the value of FW or device parameters. Up to 7 parameters can be 3110 * specified at once. 3111 */ 3112 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 3113 unsigned int vf, unsigned int nparams, const u32 *params, 3114 const u32 *val) 3115 { 3116 struct fw_params_cmd c; 3117 __be32 *p = &c.param[0].mnem; 3118 3119 if (nparams > 7) 3120 return -EINVAL; 3121 3122 memset(&c, 0, sizeof(c)); 3123 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST | 3124 FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) | 3125 FW_PARAMS_CMD_VFN(vf)); 3126 c.retval_len16 = htonl(FW_LEN16(c)); 3127 while (nparams--) { 3128 *p++ = htonl(*params++); 3129 *p++ = htonl(*val++); 3130 } 3131 3132 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3133 } 3134 3135 /** 3136 * t4_cfg_pfvf - configure PF/VF resource limits 3137 * @adap: the adapter 3138 * @mbox: mailbox to use for the FW command 3139 * @pf: the PF being configured 3140 * @vf: the VF being configured 3141 * @txq: the max number of egress queues 3142 * @txq_eth_ctrl: the max number of egress Ethernet or control queues 3143 * @rxqi: the max number of interrupt-capable ingress queues 3144 * @rxq: the max number of interruptless ingress queues 3145 * @tc: the PCI traffic class 3146 * @vi: the max number of virtual interfaces 3147 * @cmask: the channel access rights mask for the PF/VF 3148 * @pmask: the port access rights mask for the PF/VF 3149 * @nexact: the maximum number of exact MPS filters 3150 * @rcaps: read capabilities 3151 * @wxcaps: write/execute capabilities 3152 * 3153 * Configures resource limits and capabilities for a physical or virtual 3154 * function. 3155 */ 3156 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, 3157 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, 3158 unsigned int rxqi, unsigned int rxq, unsigned int tc, 3159 unsigned int vi, unsigned int cmask, unsigned int pmask, 3160 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps) 3161 { 3162 struct fw_pfvf_cmd c; 3163 3164 memset(&c, 0, sizeof(c)); 3165 c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST | 3166 FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) | 3167 FW_PFVF_CMD_VFN(vf)); 3168 c.retval_len16 = htonl(FW_LEN16(c)); 3169 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) | 3170 FW_PFVF_CMD_NIQ(rxq)); 3171 c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) | 3172 FW_PFVF_CMD_PMASK(pmask) | 3173 FW_PFVF_CMD_NEQ(txq)); 3174 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) | 3175 FW_PFVF_CMD_NEXACTF(nexact)); 3176 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) | 3177 FW_PFVF_CMD_WX_CAPS(wxcaps) | 3178 FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl)); 3179 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3180 } 3181 3182 /** 3183 * t4_alloc_vi - allocate a virtual interface 3184 * @adap: the adapter 3185 * @mbox: mailbox to use for the FW command 3186 * @port: physical port associated with the VI 3187 * @pf: the PF owning the VI 3188 * @vf: the VF owning the VI 3189 * @nmac: number of MAC addresses needed (1 to 5) 3190 * @mac: the MAC addresses of the VI 3191 * @rss_size: size of RSS table slice associated with this VI 3192 * 3193 * Allocates a virtual interface for the given physical port. If @mac is 3194 * not %NULL it contains the MAC addresses of the VI as assigned by FW. 3195 * @mac should be large enough to hold @nmac Ethernet addresses, they are 3196 * stored consecutively so the space needed is @nmac * 6 bytes. 3197 * Returns a negative error number or the non-negative VI id. 3198 */ 3199 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, 3200 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, 3201 unsigned int *rss_size) 3202 { 3203 int ret; 3204 struct fw_vi_cmd c; 3205 3206 memset(&c, 0, sizeof(c)); 3207 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST | 3208 FW_CMD_WRITE | FW_CMD_EXEC | 3209 FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf)); 3210 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c)); 3211 c.portid_pkd = FW_VI_CMD_PORTID(port); 3212 c.nmac = nmac - 1; 3213 3214 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 3215 if (ret) 3216 return ret; 3217 3218 if (mac) { 3219 memcpy(mac, c.mac, sizeof(c.mac)); 3220 switch (nmac) { 3221 case 5: 3222 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); 3223 case 4: 3224 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); 3225 case 3: 3226 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); 3227 case 2: 3228 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); 3229 } 3230 } 3231 if (rss_size) 3232 *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd)); 3233 return FW_VI_CMD_VIID_GET(ntohs(c.type_viid)); 3234 } 3235 3236 /** 3237 * t4_set_rxmode - set Rx properties of a virtual interface 3238 * @adap: the adapter 3239 * @mbox: mailbox to use for the FW command 3240 * @viid: the VI id 3241 * @mtu: the new MTU or -1 3242 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change 3243 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change 3244 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change 3245 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change 3246 * @sleep_ok: if true we may sleep while awaiting command completion 3247 * 3248 * Sets Rx properties of a virtual interface. 3249 */ 3250 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, 3251 int mtu, int promisc, int all_multi, int bcast, int vlanex, 3252 bool sleep_ok) 3253 { 3254 struct fw_vi_rxmode_cmd c; 3255 3256 /* convert to FW values */ 3257 if (mtu < 0) 3258 mtu = FW_RXMODE_MTU_NO_CHG; 3259 if (promisc < 0) 3260 promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK; 3261 if (all_multi < 0) 3262 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK; 3263 if (bcast < 0) 3264 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK; 3265 if (vlanex < 0) 3266 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK; 3267 3268 memset(&c, 0, sizeof(c)); 3269 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST | 3270 FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid)); 3271 c.retval_len16 = htonl(FW_LEN16(c)); 3272 c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) | 3273 FW_VI_RXMODE_CMD_PROMISCEN(promisc) | 3274 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | 3275 FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | 3276 FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); 3277 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 3278 } 3279 3280 /** 3281 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses 3282 * @adap: the adapter 3283 * @mbox: mailbox to use for the FW command 3284 * @viid: the VI id 3285 * @free: if true any existing filters for this VI id are first removed 3286 * @naddr: the number of MAC addresses to allocate filters for (up to 7) 3287 * @addr: the MAC address(es) 3288 * @idx: where to store the index of each allocated filter 3289 * @hash: pointer to hash address filter bitmap 3290 * @sleep_ok: call is allowed to sleep 3291 * 3292 * Allocates an exact-match filter for each of the supplied addresses and 3293 * sets it to the corresponding address. If @idx is not %NULL it should 3294 * have at least @naddr entries, each of which will be set to the index of 3295 * the filter allocated for the corresponding MAC address. If a filter 3296 * could not be allocated for an address its index is set to 0xffff. 3297 * If @hash is not %NULL addresses that fail to allocate an exact filter 3298 * are hashed and update the hash filter bitmap pointed at by @hash. 3299 * 3300 * Returns a negative error number or the number of filters allocated. 3301 */ 3302 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, 3303 unsigned int viid, bool free, unsigned int naddr, 3304 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok) 3305 { 3306 int i, ret; 3307 struct fw_vi_mac_cmd c; 3308 struct fw_vi_mac_exact *p; 3309 unsigned int max_naddr = is_t4(adap->chip) ? 3310 NUM_MPS_CLS_SRAM_L_INSTANCES : 3311 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 3312 3313 if (naddr > 7) 3314 return -EINVAL; 3315 3316 memset(&c, 0, sizeof(c)); 3317 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | 3318 FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) | 3319 FW_VI_MAC_CMD_VIID(viid)); 3320 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) | 3321 FW_CMD_LEN16((naddr + 2) / 2)); 3322 3323 for (i = 0, p = c.u.exact; i < naddr; i++, p++) { 3324 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID | 3325 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); 3326 memcpy(p->macaddr, addr[i], sizeof(p->macaddr)); 3327 } 3328 3329 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); 3330 if (ret) 3331 return ret; 3332 3333 for (i = 0, p = c.u.exact; i < naddr; i++, p++) { 3334 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); 3335 3336 if (idx) 3337 idx[i] = index >= max_naddr ? 0xffff : index; 3338 if (index < max_naddr) 3339 ret++; 3340 else if (hash) 3341 *hash |= (1ULL << hash_mac_addr(addr[i])); 3342 } 3343 return ret; 3344 } 3345 3346 /** 3347 * t4_change_mac - modifies the exact-match filter for a MAC address 3348 * @adap: the adapter 3349 * @mbox: mailbox to use for the FW command 3350 * @viid: the VI id 3351 * @idx: index of existing filter for old value of MAC address, or -1 3352 * @addr: the new MAC address value 3353 * @persist: whether a new MAC allocation should be persistent 3354 * @add_smt: if true also add the address to the HW SMT 3355 * 3356 * Modifies an exact-match filter and sets it to the new MAC address. 3357 * Note that in general it is not possible to modify the value of a given 3358 * filter so the generic way to modify an address filter is to free the one 3359 * being used by the old address value and allocate a new filter for the 3360 * new address value. @idx can be -1 if the address is a new addition. 3361 * 3362 * Returns a negative error number or the index of the filter with the new 3363 * MAC value. 3364 */ 3365 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, 3366 int idx, const u8 *addr, bool persist, bool add_smt) 3367 { 3368 int ret, mode; 3369 struct fw_vi_mac_cmd c; 3370 struct fw_vi_mac_exact *p = c.u.exact; 3371 unsigned int max_mac_addr = is_t4(adap->chip) ? 3372 NUM_MPS_CLS_SRAM_L_INSTANCES : 3373 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 3374 3375 if (idx < 0) /* new allocation */ 3376 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; 3377 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; 3378 3379 memset(&c, 0, sizeof(c)); 3380 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | 3381 FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid)); 3382 c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1)); 3383 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID | 3384 FW_VI_MAC_CMD_SMAC_RESULT(mode) | 3385 FW_VI_MAC_CMD_IDX(idx)); 3386 memcpy(p->macaddr, addr, sizeof(p->macaddr)); 3387 3388 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 3389 if (ret == 0) { 3390 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); 3391 if (ret >= max_mac_addr) 3392 ret = -ENOMEM; 3393 } 3394 return ret; 3395 } 3396 3397 /** 3398 * t4_set_addr_hash - program the MAC inexact-match hash filter 3399 * @adap: the adapter 3400 * @mbox: mailbox to use for the FW command 3401 * @viid: the VI id 3402 * @ucast: whether the hash filter should also match unicast addresses 3403 * @vec: the value to be written to the hash filter 3404 * @sleep_ok: call is allowed to sleep 3405 * 3406 * Sets the 64-bit inexact-match hash filter for a virtual interface. 3407 */ 3408 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, 3409 bool ucast, u64 vec, bool sleep_ok) 3410 { 3411 struct fw_vi_mac_cmd c; 3412 3413 memset(&c, 0, sizeof(c)); 3414 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | 3415 FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid)); 3416 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN | 3417 FW_VI_MAC_CMD_HASHUNIEN(ucast) | 3418 FW_CMD_LEN16(1)); 3419 c.u.hash.hashvec = cpu_to_be64(vec); 3420 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 3421 } 3422 3423 /** 3424 * t4_enable_vi - enable/disable a virtual interface 3425 * @adap: the adapter 3426 * @mbox: mailbox to use for the FW command 3427 * @viid: the VI id 3428 * @rx_en: 1=enable Rx, 0=disable Rx 3429 * @tx_en: 1=enable Tx, 0=disable Tx 3430 * 3431 * Enables/disables a virtual interface. 3432 */ 3433 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, 3434 bool rx_en, bool tx_en) 3435 { 3436 struct fw_vi_enable_cmd c; 3437 3438 memset(&c, 0, sizeof(c)); 3439 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | 3440 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); 3441 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) | 3442 FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c)); 3443 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3444 } 3445 3446 /** 3447 * t4_identify_port - identify a VI's port by blinking its LED 3448 * @adap: the adapter 3449 * @mbox: mailbox to use for the FW command 3450 * @viid: the VI id 3451 * @nblinks: how many times to blink LED at 2.5 Hz 3452 * 3453 * Identifies a VI's port by blinking its LED. 3454 */ 3455 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, 3456 unsigned int nblinks) 3457 { 3458 struct fw_vi_enable_cmd c; 3459 3460 memset(&c, 0, sizeof(c)); 3461 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | 3462 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); 3463 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); 3464 c.blinkdur = htons(nblinks); 3465 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3466 } 3467 3468 /** 3469 * t4_iq_free - free an ingress queue and its FLs 3470 * @adap: the adapter 3471 * @mbox: mailbox to use for the FW command 3472 * @pf: the PF owning the queues 3473 * @vf: the VF owning the queues 3474 * @iqtype: the ingress queue type 3475 * @iqid: ingress queue id 3476 * @fl0id: FL0 queue id or 0xffff if no attached FL0 3477 * @fl1id: FL1 queue id or 0xffff if no attached FL1 3478 * 3479 * Frees an ingress queue and its associated FLs, if any. 3480 */ 3481 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 3482 unsigned int vf, unsigned int iqtype, unsigned int iqid, 3483 unsigned int fl0id, unsigned int fl1id) 3484 { 3485 struct fw_iq_cmd c; 3486 3487 memset(&c, 0, sizeof(c)); 3488 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | 3489 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) | 3490 FW_IQ_CMD_VFN(vf)); 3491 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c)); 3492 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype)); 3493 c.iqid = htons(iqid); 3494 c.fl0id = htons(fl0id); 3495 c.fl1id = htons(fl1id); 3496 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3497 } 3498 3499 /** 3500 * t4_eth_eq_free - free an Ethernet egress queue 3501 * @adap: the adapter 3502 * @mbox: mailbox to use for the FW command 3503 * @pf: the PF owning the queue 3504 * @vf: the VF owning the queue 3505 * @eqid: egress queue id 3506 * 3507 * Frees an Ethernet egress queue. 3508 */ 3509 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 3510 unsigned int vf, unsigned int eqid) 3511 { 3512 struct fw_eq_eth_cmd c; 3513 3514 memset(&c, 0, sizeof(c)); 3515 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST | 3516 FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) | 3517 FW_EQ_ETH_CMD_VFN(vf)); 3518 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); 3519 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid)); 3520 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3521 } 3522 3523 /** 3524 * t4_ctrl_eq_free - free a control egress queue 3525 * @adap: the adapter 3526 * @mbox: mailbox to use for the FW command 3527 * @pf: the PF owning the queue 3528 * @vf: the VF owning the queue 3529 * @eqid: egress queue id 3530 * 3531 * Frees a control egress queue. 3532 */ 3533 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 3534 unsigned int vf, unsigned int eqid) 3535 { 3536 struct fw_eq_ctrl_cmd c; 3537 3538 memset(&c, 0, sizeof(c)); 3539 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST | 3540 FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) | 3541 FW_EQ_CTRL_CMD_VFN(vf)); 3542 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c)); 3543 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid)); 3544 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3545 } 3546 3547 /** 3548 * t4_ofld_eq_free - free an offload egress queue 3549 * @adap: the adapter 3550 * @mbox: mailbox to use for the FW command 3551 * @pf: the PF owning the queue 3552 * @vf: the VF owning the queue 3553 * @eqid: egress queue id 3554 * 3555 * Frees a control egress queue. 3556 */ 3557 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 3558 unsigned int vf, unsigned int eqid) 3559 { 3560 struct fw_eq_ofld_cmd c; 3561 3562 memset(&c, 0, sizeof(c)); 3563 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST | 3564 FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) | 3565 FW_EQ_OFLD_CMD_VFN(vf)); 3566 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c)); 3567 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid)); 3568 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3569 } 3570 3571 /** 3572 * t4_handle_fw_rpl - process a FW reply message 3573 * @adap: the adapter 3574 * @rpl: start of the FW message 3575 * 3576 * Processes a FW message, such as link state change messages. 3577 */ 3578 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) 3579 { 3580 u8 opcode = *(const u8 *)rpl; 3581 3582 if (opcode == FW_PORT_CMD) { /* link/module state change message */ 3583 int speed = 0, fc = 0; 3584 const struct fw_port_cmd *p = (void *)rpl; 3585 int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid)); 3586 int port = adap->chan_map[chan]; 3587 struct port_info *pi = adap2pinfo(adap, port); 3588 struct link_config *lc = &pi->link_cfg; 3589 u32 stat = ntohl(p->u.info.lstatus_to_modtype); 3590 int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0; 3591 u32 mod = FW_PORT_CMD_MODTYPE_GET(stat); 3592 3593 if (stat & FW_PORT_CMD_RXPAUSE) 3594 fc |= PAUSE_RX; 3595 if (stat & FW_PORT_CMD_TXPAUSE) 3596 fc |= PAUSE_TX; 3597 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) 3598 speed = SPEED_100; 3599 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) 3600 speed = SPEED_1000; 3601 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) 3602 speed = SPEED_10000; 3603 3604 if (link_ok != lc->link_ok || speed != lc->speed || 3605 fc != lc->fc) { /* something changed */ 3606 lc->link_ok = link_ok; 3607 lc->speed = speed; 3608 lc->fc = fc; 3609 t4_os_link_changed(adap, port, link_ok); 3610 } 3611 if (mod != pi->mod_type) { 3612 pi->mod_type = mod; 3613 t4_os_portmod_changed(adap, port); 3614 } 3615 } 3616 return 0; 3617 } 3618 3619 static void get_pci_mode(struct adapter *adapter, struct pci_params *p) 3620 { 3621 u16 val; 3622 3623 if (pci_is_pcie(adapter->pdev)) { 3624 pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val); 3625 p->speed = val & PCI_EXP_LNKSTA_CLS; 3626 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4; 3627 } 3628 } 3629 3630 /** 3631 * init_link_config - initialize a link's SW state 3632 * @lc: structure holding the link state 3633 * @caps: link capabilities 3634 * 3635 * Initializes the SW state maintained for each link, including the link's 3636 * capabilities and default speed/flow-control/autonegotiation settings. 3637 */ 3638 static void init_link_config(struct link_config *lc, unsigned int caps) 3639 { 3640 lc->supported = caps; 3641 lc->requested_speed = 0; 3642 lc->speed = 0; 3643 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; 3644 if (lc->supported & FW_PORT_CAP_ANEG) { 3645 lc->advertising = lc->supported & ADVERT_MASK; 3646 lc->autoneg = AUTONEG_ENABLE; 3647 lc->requested_fc |= PAUSE_AUTONEG; 3648 } else { 3649 lc->advertising = 0; 3650 lc->autoneg = AUTONEG_DISABLE; 3651 } 3652 } 3653 3654 int t4_wait_dev_ready(struct adapter *adap) 3655 { 3656 if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff) 3657 return 0; 3658 msleep(500); 3659 return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO; 3660 } 3661 3662 static int get_flash_params(struct adapter *adap) 3663 { 3664 int ret; 3665 u32 info; 3666 3667 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID); 3668 if (!ret) 3669 ret = sf1_read(adap, 3, 0, 1, &info); 3670 t4_write_reg(adap, SF_OP, 0); /* unlock SF */ 3671 if (ret) 3672 return ret; 3673 3674 if ((info & 0xff) != 0x20) /* not a Numonix flash */ 3675 return -EINVAL; 3676 info >>= 16; /* log2 of size */ 3677 if (info >= 0x14 && info < 0x18) 3678 adap->params.sf_nsec = 1 << (info - 16); 3679 else if (info == 0x18) 3680 adap->params.sf_nsec = 64; 3681 else 3682 return -EINVAL; 3683 adap->params.sf_size = 1 << info; 3684 adap->params.sf_fw_start = 3685 t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK; 3686 return 0; 3687 } 3688 3689 /** 3690 * t4_prep_adapter - prepare SW and HW for operation 3691 * @adapter: the adapter 3692 * @reset: if true perform a HW reset 3693 * 3694 * Initialize adapter SW state for the various HW modules, set initial 3695 * values for some adapter tunables, take PHYs out of reset, and 3696 * initialize the MDIO interface. 3697 */ 3698 int t4_prep_adapter(struct adapter *adapter) 3699 { 3700 int ret, ver; 3701 uint16_t device_id; 3702 3703 ret = t4_wait_dev_ready(adapter); 3704 if (ret < 0) 3705 return ret; 3706 3707 get_pci_mode(adapter, &adapter->params.pci); 3708 adapter->params.rev = t4_read_reg(adapter, PL_REV); 3709 3710 ret = get_flash_params(adapter); 3711 if (ret < 0) { 3712 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret); 3713 return ret; 3714 } 3715 3716 /* Retrieve adapter's device ID 3717 */ 3718 pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id); 3719 ver = device_id >> 12; 3720 switch (ver) { 3721 case CHELSIO_T4: 3722 adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T4, 3723 adapter->params.rev); 3724 break; 3725 case CHELSIO_T5: 3726 adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T5, 3727 adapter->params.rev); 3728 break; 3729 default: 3730 dev_err(adapter->pdev_dev, "Device %d is not supported\n", 3731 device_id); 3732 return -EINVAL; 3733 } 3734 3735 /* Reassign the updated revision field */ 3736 adapter->params.rev = adapter->chip; 3737 3738 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); 3739 3740 /* 3741 * Default port for debugging in case we can't reach FW. 3742 */ 3743 adapter->params.nports = 1; 3744 adapter->params.portvec = 1; 3745 adapter->params.vpd.cclk = 50000; 3746 return 0; 3747 } 3748 3749 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) 3750 { 3751 u8 addr[6]; 3752 int ret, i, j = 0; 3753 struct fw_port_cmd c; 3754 struct fw_rss_vi_config_cmd rvc; 3755 3756 memset(&c, 0, sizeof(c)); 3757 memset(&rvc, 0, sizeof(rvc)); 3758 3759 for_each_port(adap, i) { 3760 unsigned int rss_size; 3761 struct port_info *p = adap2pinfo(adap, i); 3762 3763 while ((adap->params.portvec & (1 << j)) == 0) 3764 j++; 3765 3766 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | 3767 FW_CMD_REQUEST | FW_CMD_READ | 3768 FW_PORT_CMD_PORTID(j)); 3769 c.action_to_len16 = htonl( 3770 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | 3771 FW_LEN16(c)); 3772 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 3773 if (ret) 3774 return ret; 3775 3776 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size); 3777 if (ret < 0) 3778 return ret; 3779 3780 p->viid = ret; 3781 p->tx_chan = j; 3782 p->lport = j; 3783 p->rss_size = rss_size; 3784 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN); 3785 3786 ret = ntohl(c.u.info.lstatus_to_modtype); 3787 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ? 3788 FW_PORT_CMD_MDIOADDR_GET(ret) : -1; 3789 p->port_type = FW_PORT_CMD_PTYPE_GET(ret); 3790 p->mod_type = FW_PORT_MOD_TYPE_NA; 3791 3792 rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | 3793 FW_CMD_REQUEST | FW_CMD_READ | 3794 FW_RSS_VI_CONFIG_CMD_VIID(p->viid)); 3795 rvc.retval_len16 = htonl(FW_LEN16(rvc)); 3796 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc); 3797 if (ret) 3798 return ret; 3799 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen); 3800 3801 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap)); 3802 j++; 3803 } 3804 return 0; 3805 } 3806