1 /* 2 * This file is part of the Chelsio T4 Ethernet driver for Linux. 3 * 4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/delay.h> 36 #include "cxgb4.h" 37 #include "t4_regs.h" 38 #include "t4_values.h" 39 #include "t4fw_api.h" 40 41 /** 42 * t4_wait_op_done_val - wait until an operation is completed 43 * @adapter: the adapter performing the operation 44 * @reg: the register to check for completion 45 * @mask: a single-bit field within @reg that indicates completion 46 * @polarity: the value of the field when the operation is completed 47 * @attempts: number of check iterations 48 * @delay: delay in usecs between iterations 49 * @valp: where to store the value of the register at completion time 50 * 51 * Wait until an operation is completed by checking a bit in a register 52 * up to @attempts times. If @valp is not NULL the value of the register 53 * at the time it indicated completion is stored there. Returns 0 if the 54 * operation completes and -EAGAIN otherwise. 55 */ 56 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, 57 int polarity, int attempts, int delay, u32 *valp) 58 { 59 while (1) { 60 u32 val = t4_read_reg(adapter, reg); 61 62 if (!!(val & mask) == polarity) { 63 if (valp) 64 *valp = val; 65 return 0; 66 } 67 if (--attempts == 0) 68 return -EAGAIN; 69 if (delay) 70 udelay(delay); 71 } 72 } 73 74 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask, 75 int polarity, int attempts, int delay) 76 { 77 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts, 78 delay, NULL); 79 } 80 81 /** 82 * t4_set_reg_field - set a register field to a value 83 * @adapter: the adapter to program 84 * @addr: the register address 85 * @mask: specifies the portion of the register to modify 86 * @val: the new value for the register field 87 * 88 * Sets a register field specified by the supplied mask to the 89 * given value. 90 */ 91 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, 92 u32 val) 93 { 94 u32 v = t4_read_reg(adapter, addr) & ~mask; 95 96 t4_write_reg(adapter, addr, v | val); 97 (void) t4_read_reg(adapter, addr); /* flush */ 98 } 99 100 /** 101 * t4_read_indirect - read indirectly addressed registers 102 * @adap: the adapter 103 * @addr_reg: register holding the indirect address 104 * @data_reg: register holding the value of the indirect register 105 * @vals: where the read register values are stored 106 * @nregs: how many indirect registers to read 107 * @start_idx: index of first indirect register to read 108 * 109 * Reads registers that are accessed indirectly through an address/data 110 * register pair. 111 */ 112 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, 113 unsigned int data_reg, u32 *vals, 114 unsigned int nregs, unsigned int start_idx) 115 { 116 while (nregs--) { 117 t4_write_reg(adap, addr_reg, start_idx); 118 *vals++ = t4_read_reg(adap, data_reg); 119 start_idx++; 120 } 121 } 122 123 /** 124 * t4_write_indirect - write indirectly addressed registers 125 * @adap: the adapter 126 * @addr_reg: register holding the indirect addresses 127 * @data_reg: register holding the value for the indirect registers 128 * @vals: values to write 129 * @nregs: how many indirect registers to write 130 * @start_idx: address of first indirect register to write 131 * 132 * Writes a sequential block of registers that are accessed indirectly 133 * through an address/data register pair. 134 */ 135 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, 136 unsigned int data_reg, const u32 *vals, 137 unsigned int nregs, unsigned int start_idx) 138 { 139 while (nregs--) { 140 t4_write_reg(adap, addr_reg, start_idx++); 141 t4_write_reg(adap, data_reg, *vals++); 142 } 143 } 144 145 /* 146 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor 147 * mechanism. This guarantees that we get the real value even if we're 148 * operating within a Virtual Machine and the Hypervisor is trapping our 149 * Configuration Space accesses. 150 */ 151 void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val) 152 { 153 u32 req = ENABLE_F | FUNCTION_V(adap->fn) | REGISTER_V(reg); 154 155 if (is_t4(adap->params.chip)) 156 req |= LOCALCFG_F; 157 158 t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, req); 159 *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA_A); 160 161 /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a 162 * Configuration Space read. (None of the other fields matter when 163 * ENABLE is 0 so a simple register write is easier than a 164 * read-modify-write via t4_set_reg_field().) 165 */ 166 t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, 0); 167 } 168 169 /* 170 * t4_report_fw_error - report firmware error 171 * @adap: the adapter 172 * 173 * The adapter firmware can indicate error conditions to the host. 174 * If the firmware has indicated an error, print out the reason for 175 * the firmware error. 176 */ 177 static void t4_report_fw_error(struct adapter *adap) 178 { 179 static const char *const reason[] = { 180 "Crash", /* PCIE_FW_EVAL_CRASH */ 181 "During Device Preparation", /* PCIE_FW_EVAL_PREP */ 182 "During Device Configuration", /* PCIE_FW_EVAL_CONF */ 183 "During Device Initialization", /* PCIE_FW_EVAL_INIT */ 184 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */ 185 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */ 186 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */ 187 "Reserved", /* reserved */ 188 }; 189 u32 pcie_fw; 190 191 pcie_fw = t4_read_reg(adap, PCIE_FW_A); 192 if (pcie_fw & PCIE_FW_ERR_F) 193 dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n", 194 reason[PCIE_FW_EVAL_G(pcie_fw)]); 195 } 196 197 /* 198 * Get the reply to a mailbox command and store it in @rpl in big-endian order. 199 */ 200 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, 201 u32 mbox_addr) 202 { 203 for ( ; nflit; nflit--, mbox_addr += 8) 204 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr)); 205 } 206 207 /* 208 * Handle a FW assertion reported in a mailbox. 209 */ 210 static void fw_asrt(struct adapter *adap, u32 mbox_addr) 211 { 212 struct fw_debug_cmd asrt; 213 214 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr); 215 dev_alert(adap->pdev_dev, 216 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", 217 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line), 218 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y)); 219 } 220 221 static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg) 222 { 223 dev_err(adap->pdev_dev, 224 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox, 225 (unsigned long long)t4_read_reg64(adap, data_reg), 226 (unsigned long long)t4_read_reg64(adap, data_reg + 8), 227 (unsigned long long)t4_read_reg64(adap, data_reg + 16), 228 (unsigned long long)t4_read_reg64(adap, data_reg + 24), 229 (unsigned long long)t4_read_reg64(adap, data_reg + 32), 230 (unsigned long long)t4_read_reg64(adap, data_reg + 40), 231 (unsigned long long)t4_read_reg64(adap, data_reg + 48), 232 (unsigned long long)t4_read_reg64(adap, data_reg + 56)); 233 } 234 235 /** 236 * t4_wr_mbox_meat - send a command to FW through the given mailbox 237 * @adap: the adapter 238 * @mbox: index of the mailbox to use 239 * @cmd: the command to write 240 * @size: command length in bytes 241 * @rpl: where to optionally store the reply 242 * @sleep_ok: if true we may sleep while awaiting command completion 243 * 244 * Sends the given command to FW through the selected mailbox and waits 245 * for the FW to execute the command. If @rpl is not %NULL it is used to 246 * store the FW's reply to the command. The command and its optional 247 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms 248 * to respond. @sleep_ok determines whether we may sleep while awaiting 249 * the response. If sleeping is allowed we use progressive backoff 250 * otherwise we spin. 251 * 252 * The return value is 0 on success or a negative errno on failure. A 253 * failure can happen either because we are not able to execute the 254 * command or FW executes it but signals an error. In the latter case 255 * the return value is the error code indicated by FW (negated). 256 */ 257 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, 258 void *rpl, bool sleep_ok) 259 { 260 static const int delay[] = { 261 1, 1, 3, 5, 10, 10, 20, 50, 100, 200 262 }; 263 264 u32 v; 265 u64 res; 266 int i, ms, delay_idx; 267 const __be64 *p = cmd; 268 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A); 269 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A); 270 271 if ((size & 15) || size > MBOX_LEN) 272 return -EINVAL; 273 274 /* 275 * If the device is off-line, as in EEH, commands will time out. 276 * Fail them early so we don't waste time waiting. 277 */ 278 if (adap->pdev->error_state != pci_channel_io_normal) 279 return -EIO; 280 281 v = MBOWNER_G(t4_read_reg(adap, ctl_reg)); 282 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) 283 v = MBOWNER_G(t4_read_reg(adap, ctl_reg)); 284 285 if (v != MBOX_OWNER_DRV) 286 return v ? -EBUSY : -ETIMEDOUT; 287 288 for (i = 0; i < size; i += 8) 289 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++)); 290 291 t4_write_reg(adap, ctl_reg, MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW)); 292 t4_read_reg(adap, ctl_reg); /* flush write */ 293 294 delay_idx = 0; 295 ms = delay[0]; 296 297 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { 298 if (sleep_ok) { 299 ms = delay[delay_idx]; /* last element may repeat */ 300 if (delay_idx < ARRAY_SIZE(delay) - 1) 301 delay_idx++; 302 msleep(ms); 303 } else 304 mdelay(ms); 305 306 v = t4_read_reg(adap, ctl_reg); 307 if (MBOWNER_G(v) == MBOX_OWNER_DRV) { 308 if (!(v & MBMSGVALID_F)) { 309 t4_write_reg(adap, ctl_reg, 0); 310 continue; 311 } 312 313 res = t4_read_reg64(adap, data_reg); 314 if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) { 315 fw_asrt(adap, data_reg); 316 res = FW_CMD_RETVAL_V(EIO); 317 } else if (rpl) { 318 get_mbox_rpl(adap, rpl, size / 8, data_reg); 319 } 320 321 if (FW_CMD_RETVAL_G((int)res)) 322 dump_mbox(adap, mbox, data_reg); 323 t4_write_reg(adap, ctl_reg, 0); 324 return -FW_CMD_RETVAL_G((int)res); 325 } 326 } 327 328 dump_mbox(adap, mbox, data_reg); 329 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", 330 *(const u8 *)cmd, mbox); 331 t4_report_fw_error(adap); 332 return -ETIMEDOUT; 333 } 334 335 /** 336 * t4_mc_read - read from MC through backdoor accesses 337 * @adap: the adapter 338 * @addr: address of first byte requested 339 * @idx: which MC to access 340 * @data: 64 bytes of data containing the requested address 341 * @ecc: where to store the corresponding 64-bit ECC word 342 * 343 * Read 64 bytes of data from MC starting at a 64-byte-aligned address 344 * that covers the requested address @addr. If @parity is not %NULL it 345 * is assigned the 64-bit ECC word for the read data. 346 */ 347 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) 348 { 349 int i; 350 u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len; 351 u32 mc_bist_status_rdata, mc_bist_data_pattern; 352 353 if (is_t4(adap->params.chip)) { 354 mc_bist_cmd = MC_BIST_CMD_A; 355 mc_bist_cmd_addr = MC_BIST_CMD_ADDR_A; 356 mc_bist_cmd_len = MC_BIST_CMD_LEN_A; 357 mc_bist_status_rdata = MC_BIST_STATUS_RDATA_A; 358 mc_bist_data_pattern = MC_BIST_DATA_PATTERN_A; 359 } else { 360 mc_bist_cmd = MC_REG(MC_P_BIST_CMD_A, idx); 361 mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR_A, idx); 362 mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN_A, idx); 363 mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA_A, idx); 364 mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN_A, idx); 365 } 366 367 if (t4_read_reg(adap, mc_bist_cmd) & START_BIST_F) 368 return -EBUSY; 369 t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU); 370 t4_write_reg(adap, mc_bist_cmd_len, 64); 371 t4_write_reg(adap, mc_bist_data_pattern, 0xc); 372 t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE_V(1) | START_BIST_F | 373 BIST_CMD_GAP_V(1)); 374 i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST_F, 0, 10, 1); 375 if (i) 376 return i; 377 378 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata, i) 379 380 for (i = 15; i >= 0; i--) 381 *data++ = htonl(t4_read_reg(adap, MC_DATA(i))); 382 if (ecc) 383 *ecc = t4_read_reg64(adap, MC_DATA(16)); 384 #undef MC_DATA 385 return 0; 386 } 387 388 /** 389 * t4_edc_read - read from EDC through backdoor accesses 390 * @adap: the adapter 391 * @idx: which EDC to access 392 * @addr: address of first byte requested 393 * @data: 64 bytes of data containing the requested address 394 * @ecc: where to store the corresponding 64-bit ECC word 395 * 396 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address 397 * that covers the requested address @addr. If @parity is not %NULL it 398 * is assigned the 64-bit ECC word for the read data. 399 */ 400 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) 401 { 402 int i; 403 u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len; 404 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata; 405 406 if (is_t4(adap->params.chip)) { 407 edc_bist_cmd = EDC_REG(EDC_BIST_CMD_A, idx); 408 edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR_A, idx); 409 edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN_A, idx); 410 edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN_A, 411 idx); 412 edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA_A, 413 idx); 414 } else { 415 edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD_A, idx); 416 edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR_A, idx); 417 edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN_A, idx); 418 edc_bist_cmd_data_pattern = 419 EDC_REG_T5(EDC_H_BIST_DATA_PATTERN_A, idx); 420 edc_bist_status_rdata = 421 EDC_REG_T5(EDC_H_BIST_STATUS_RDATA_A, idx); 422 } 423 424 if (t4_read_reg(adap, edc_bist_cmd) & START_BIST_F) 425 return -EBUSY; 426 t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU); 427 t4_write_reg(adap, edc_bist_cmd_len, 64); 428 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc); 429 t4_write_reg(adap, edc_bist_cmd, 430 BIST_OPCODE_V(1) | BIST_CMD_GAP_V(1) | START_BIST_F); 431 i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST_F, 0, 10, 1); 432 if (i) 433 return i; 434 435 #define EDC_DATA(i) (EDC_BIST_STATUS_REG(edc_bist_status_rdata, i)) 436 437 for (i = 15; i >= 0; i--) 438 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i))); 439 if (ecc) 440 *ecc = t4_read_reg64(adap, EDC_DATA(16)); 441 #undef EDC_DATA 442 return 0; 443 } 444 445 /** 446 * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window 447 * @adap: the adapter 448 * @win: PCI-E Memory Window to use 449 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC 450 * @addr: address within indicated memory type 451 * @len: amount of memory to transfer 452 * @hbuf: host memory buffer 453 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0) 454 * 455 * Reads/writes an [almost] arbitrary memory region in the firmware: the 456 * firmware memory address and host buffer must be aligned on 32-bit 457 * boudaries; the length may be arbitrary. The memory is transferred as 458 * a raw byte sequence from/to the firmware's memory. If this memory 459 * contains data structures which contain multi-byte integers, it's the 460 * caller's responsibility to perform appropriate byte order conversions. 461 */ 462 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, 463 u32 len, void *hbuf, int dir) 464 { 465 u32 pos, offset, resid, memoffset; 466 u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base; 467 u32 *buf; 468 469 /* Argument sanity checks ... 470 */ 471 if (addr & 0x3 || (uintptr_t)hbuf & 0x3) 472 return -EINVAL; 473 buf = (u32 *)hbuf; 474 475 /* It's convenient to be able to handle lengths which aren't a 476 * multiple of 32-bits because we often end up transferring files to 477 * the firmware. So we'll handle that by normalizing the length here 478 * and then handling any residual transfer at the end. 479 */ 480 resid = len & 0x3; 481 len -= resid; 482 483 /* Offset into the region of memory which is being accessed 484 * MEM_EDC0 = 0 485 * MEM_EDC1 = 1 486 * MEM_MC = 2 -- T4 487 * MEM_MC0 = 2 -- For T5 488 * MEM_MC1 = 3 -- For T5 489 */ 490 edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A)); 491 if (mtype != MEM_MC1) 492 memoffset = (mtype * (edc_size * 1024 * 1024)); 493 else { 494 mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap, 495 MA_EXT_MEMORY1_BAR_A)); 496 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024; 497 } 498 499 /* Determine the PCIE_MEM_ACCESS_OFFSET */ 500 addr = addr + memoffset; 501 502 /* Each PCI-E Memory Window is programmed with a window size -- or 503 * "aperture" -- which controls the granularity of its mapping onto 504 * adapter memory. We need to grab that aperture in order to know 505 * how to use the specified window. The window is also programmed 506 * with the base address of the Memory Window in BAR0's address 507 * space. For T4 this is an absolute PCI-E Bus Address. For T5 508 * the address is relative to BAR0. 509 */ 510 mem_reg = t4_read_reg(adap, 511 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 512 win)); 513 mem_aperture = 1 << (WINDOW_G(mem_reg) + WINDOW_SHIFT_X); 514 mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X; 515 if (is_t4(adap->params.chip)) 516 mem_base -= adap->t4_bar0; 517 win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->fn); 518 519 /* Calculate our initial PCI-E Memory Window Position and Offset into 520 * that Window. 521 */ 522 pos = addr & ~(mem_aperture-1); 523 offset = addr - pos; 524 525 /* Set up initial PCI-E Memory Window to cover the start of our 526 * transfer. (Read it back to ensure that changes propagate before we 527 * attempt to use the new value.) 528 */ 529 t4_write_reg(adap, 530 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win), 531 pos | win_pf); 532 t4_read_reg(adap, 533 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win)); 534 535 /* Transfer data to/from the adapter as long as there's an integral 536 * number of 32-bit transfers to complete. 537 * 538 * A note on Endianness issues: 539 * 540 * The "register" reads and writes below from/to the PCI-E Memory 541 * Window invoke the standard adapter Big-Endian to PCI-E Link 542 * Little-Endian "swizzel." As a result, if we have the following 543 * data in adapter memory: 544 * 545 * Memory: ... | b0 | b1 | b2 | b3 | ... 546 * Address: i+0 i+1 i+2 i+3 547 * 548 * Then a read of the adapter memory via the PCI-E Memory Window 549 * will yield: 550 * 551 * x = readl(i) 552 * 31 0 553 * [ b3 | b2 | b1 | b0 ] 554 * 555 * If this value is stored into local memory on a Little-Endian system 556 * it will show up correctly in local memory as: 557 * 558 * ( ..., b0, b1, b2, b3, ... ) 559 * 560 * But on a Big-Endian system, the store will show up in memory 561 * incorrectly swizzled as: 562 * 563 * ( ..., b3, b2, b1, b0, ... ) 564 * 565 * So we need to account for this in the reads and writes to the 566 * PCI-E Memory Window below by undoing the register read/write 567 * swizzels. 568 */ 569 while (len > 0) { 570 if (dir == T4_MEMORY_READ) 571 *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap, 572 mem_base + offset)); 573 else 574 t4_write_reg(adap, mem_base + offset, 575 (__force u32)cpu_to_le32(*buf++)); 576 offset += sizeof(__be32); 577 len -= sizeof(__be32); 578 579 /* If we've reached the end of our current window aperture, 580 * move the PCI-E Memory Window on to the next. Note that 581 * doing this here after "len" may be 0 allows us to set up 582 * the PCI-E Memory Window for a possible final residual 583 * transfer below ... 584 */ 585 if (offset == mem_aperture) { 586 pos += mem_aperture; 587 offset = 0; 588 t4_write_reg(adap, 589 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 590 win), pos | win_pf); 591 t4_read_reg(adap, 592 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 593 win)); 594 } 595 } 596 597 /* If the original transfer had a length which wasn't a multiple of 598 * 32-bits, now's where we need to finish off the transfer of the 599 * residual amount. The PCI-E Memory Window has already been moved 600 * above (if necessary) to cover this final transfer. 601 */ 602 if (resid) { 603 union { 604 u32 word; 605 char byte[4]; 606 } last; 607 unsigned char *bp; 608 int i; 609 610 if (dir == T4_MEMORY_READ) { 611 last.word = le32_to_cpu( 612 (__force __le32)t4_read_reg(adap, 613 mem_base + offset)); 614 for (bp = (unsigned char *)buf, i = resid; i < 4; i++) 615 bp[i] = last.byte[i]; 616 } else { 617 last.word = *buf; 618 for (i = resid; i < 4; i++) 619 last.byte[i] = 0; 620 t4_write_reg(adap, mem_base + offset, 621 (__force u32)cpu_to_le32(last.word)); 622 } 623 } 624 625 return 0; 626 } 627 628 #define EEPROM_STAT_ADDR 0x7bfc 629 #define VPD_BASE 0x400 630 #define VPD_BASE_OLD 0 631 #define VPD_LEN 1024 632 #define CHELSIO_VPD_UNIQUE_ID 0x82 633 634 /** 635 * t4_seeprom_wp - enable/disable EEPROM write protection 636 * @adapter: the adapter 637 * @enable: whether to enable or disable write protection 638 * 639 * Enables or disables write protection on the serial EEPROM. 640 */ 641 int t4_seeprom_wp(struct adapter *adapter, bool enable) 642 { 643 unsigned int v = enable ? 0xc : 0; 644 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v); 645 return ret < 0 ? ret : 0; 646 } 647 648 /** 649 * get_vpd_params - read VPD parameters from VPD EEPROM 650 * @adapter: adapter to read 651 * @p: where to store the parameters 652 * 653 * Reads card parameters stored in VPD EEPROM. 654 */ 655 int get_vpd_params(struct adapter *adapter, struct vpd_params *p) 656 { 657 u32 cclk_param, cclk_val; 658 int i, ret, addr; 659 int ec, sn, pn; 660 u8 *vpd, csum; 661 unsigned int vpdr_len, kw_offset, id_len; 662 663 vpd = vmalloc(VPD_LEN); 664 if (!vpd) 665 return -ENOMEM; 666 667 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd); 668 if (ret < 0) 669 goto out; 670 671 /* The VPD shall have a unique identifier specified by the PCI SIG. 672 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD 673 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software 674 * is expected to automatically put this entry at the 675 * beginning of the VPD. 676 */ 677 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD; 678 679 ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd); 680 if (ret < 0) 681 goto out; 682 683 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) { 684 dev_err(adapter->pdev_dev, "missing VPD ID string\n"); 685 ret = -EINVAL; 686 goto out; 687 } 688 689 id_len = pci_vpd_lrdt_size(vpd); 690 if (id_len > ID_LEN) 691 id_len = ID_LEN; 692 693 i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA); 694 if (i < 0) { 695 dev_err(adapter->pdev_dev, "missing VPD-R section\n"); 696 ret = -EINVAL; 697 goto out; 698 } 699 700 vpdr_len = pci_vpd_lrdt_size(&vpd[i]); 701 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE; 702 if (vpdr_len + kw_offset > VPD_LEN) { 703 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len); 704 ret = -EINVAL; 705 goto out; 706 } 707 708 #define FIND_VPD_KW(var, name) do { \ 709 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \ 710 if (var < 0) { \ 711 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \ 712 ret = -EINVAL; \ 713 goto out; \ 714 } \ 715 var += PCI_VPD_INFO_FLD_HDR_SIZE; \ 716 } while (0) 717 718 FIND_VPD_KW(i, "RV"); 719 for (csum = 0; i >= 0; i--) 720 csum += vpd[i]; 721 722 if (csum) { 723 dev_err(adapter->pdev_dev, 724 "corrupted VPD EEPROM, actual csum %u\n", csum); 725 ret = -EINVAL; 726 goto out; 727 } 728 729 FIND_VPD_KW(ec, "EC"); 730 FIND_VPD_KW(sn, "SN"); 731 FIND_VPD_KW(pn, "PN"); 732 #undef FIND_VPD_KW 733 734 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len); 735 strim(p->id); 736 memcpy(p->ec, vpd + ec, EC_LEN); 737 strim(p->ec); 738 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE); 739 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); 740 strim(p->sn); 741 i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE); 742 memcpy(p->pn, vpd + pn, min(i, PN_LEN)); 743 strim(p->pn); 744 745 /* 746 * Ask firmware for the Core Clock since it knows how to translate the 747 * Reference Clock ('V2') VPD field into a Core Clock value ... 748 */ 749 cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | 750 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK)); 751 ret = t4_query_params(adapter, adapter->mbox, 0, 0, 752 1, &cclk_param, &cclk_val); 753 754 out: 755 vfree(vpd); 756 if (ret) 757 return ret; 758 p->cclk = cclk_val; 759 760 return 0; 761 } 762 763 /* serial flash and firmware constants */ 764 enum { 765 SF_ATTEMPTS = 10, /* max retries for SF operations */ 766 767 /* flash command opcodes */ 768 SF_PROG_PAGE = 2, /* program page */ 769 SF_WR_DISABLE = 4, /* disable writes */ 770 SF_RD_STATUS = 5, /* read status register */ 771 SF_WR_ENABLE = 6, /* enable writes */ 772 SF_RD_DATA_FAST = 0xb, /* read flash */ 773 SF_RD_ID = 0x9f, /* read ID */ 774 SF_ERASE_SECTOR = 0xd8, /* erase sector */ 775 776 FW_MAX_SIZE = 16 * SF_SEC_SIZE, 777 }; 778 779 /** 780 * sf1_read - read data from the serial flash 781 * @adapter: the adapter 782 * @byte_cnt: number of bytes to read 783 * @cont: whether another operation will be chained 784 * @lock: whether to lock SF for PL access only 785 * @valp: where to store the read data 786 * 787 * Reads up to 4 bytes of data from the serial flash. The location of 788 * the read needs to be specified prior to calling this by issuing the 789 * appropriate commands to the serial flash. 790 */ 791 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, 792 int lock, u32 *valp) 793 { 794 int ret; 795 796 if (!byte_cnt || byte_cnt > 4) 797 return -EINVAL; 798 if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F) 799 return -EBUSY; 800 t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) | 801 SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1)); 802 ret = t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5); 803 if (!ret) 804 *valp = t4_read_reg(adapter, SF_DATA_A); 805 return ret; 806 } 807 808 /** 809 * sf1_write - write data to the serial flash 810 * @adapter: the adapter 811 * @byte_cnt: number of bytes to write 812 * @cont: whether another operation will be chained 813 * @lock: whether to lock SF for PL access only 814 * @val: value to write 815 * 816 * Writes up to 4 bytes of data to the serial flash. The location of 817 * the write needs to be specified prior to calling this by issuing the 818 * appropriate commands to the serial flash. 819 */ 820 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, 821 int lock, u32 val) 822 { 823 if (!byte_cnt || byte_cnt > 4) 824 return -EINVAL; 825 if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F) 826 return -EBUSY; 827 t4_write_reg(adapter, SF_DATA_A, val); 828 t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) | 829 SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | OP_V(1)); 830 return t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5); 831 } 832 833 /** 834 * flash_wait_op - wait for a flash operation to complete 835 * @adapter: the adapter 836 * @attempts: max number of polls of the status register 837 * @delay: delay between polls in ms 838 * 839 * Wait for a flash operation to complete by polling the status register. 840 */ 841 static int flash_wait_op(struct adapter *adapter, int attempts, int delay) 842 { 843 int ret; 844 u32 status; 845 846 while (1) { 847 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 || 848 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0) 849 return ret; 850 if (!(status & 1)) 851 return 0; 852 if (--attempts == 0) 853 return -EAGAIN; 854 if (delay) 855 msleep(delay); 856 } 857 } 858 859 /** 860 * t4_read_flash - read words from serial flash 861 * @adapter: the adapter 862 * @addr: the start address for the read 863 * @nwords: how many 32-bit words to read 864 * @data: where to store the read data 865 * @byte_oriented: whether to store data as bytes or as words 866 * 867 * Read the specified number of 32-bit words from the serial flash. 868 * If @byte_oriented is set the read data is stored as a byte array 869 * (i.e., big-endian), otherwise as 32-bit words in the platform's 870 * natural endianess. 871 */ 872 int t4_read_flash(struct adapter *adapter, unsigned int addr, 873 unsigned int nwords, u32 *data, int byte_oriented) 874 { 875 int ret; 876 877 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3)) 878 return -EINVAL; 879 880 addr = swab32(addr) | SF_RD_DATA_FAST; 881 882 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 || 883 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0) 884 return ret; 885 886 for ( ; nwords; nwords--, data++) { 887 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data); 888 if (nwords == 1) 889 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */ 890 if (ret) 891 return ret; 892 if (byte_oriented) 893 *data = (__force __u32) (htonl(*data)); 894 } 895 return 0; 896 } 897 898 /** 899 * t4_write_flash - write up to a page of data to the serial flash 900 * @adapter: the adapter 901 * @addr: the start address to write 902 * @n: length of data to write in bytes 903 * @data: the data to write 904 * 905 * Writes up to a page of data (256 bytes) to the serial flash starting 906 * at the given address. All the data must be written to the same page. 907 */ 908 static int t4_write_flash(struct adapter *adapter, unsigned int addr, 909 unsigned int n, const u8 *data) 910 { 911 int ret; 912 u32 buf[64]; 913 unsigned int i, c, left, val, offset = addr & 0xff; 914 915 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE) 916 return -EINVAL; 917 918 val = swab32(addr) | SF_PROG_PAGE; 919 920 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 921 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0) 922 goto unlock; 923 924 for (left = n; left; left -= c) { 925 c = min(left, 4U); 926 for (val = 0, i = 0; i < c; ++i) 927 val = (val << 8) + *data++; 928 929 ret = sf1_write(adapter, c, c != left, 1, val); 930 if (ret) 931 goto unlock; 932 } 933 ret = flash_wait_op(adapter, 8, 1); 934 if (ret) 935 goto unlock; 936 937 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */ 938 939 /* Read the page to verify the write succeeded */ 940 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); 941 if (ret) 942 return ret; 943 944 if (memcmp(data - n, (u8 *)buf + offset, n)) { 945 dev_err(adapter->pdev_dev, 946 "failed to correctly write the flash page at %#x\n", 947 addr); 948 return -EIO; 949 } 950 return 0; 951 952 unlock: 953 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */ 954 return ret; 955 } 956 957 /** 958 * t4_get_fw_version - read the firmware version 959 * @adapter: the adapter 960 * @vers: where to place the version 961 * 962 * Reads the FW version from flash. 963 */ 964 int t4_get_fw_version(struct adapter *adapter, u32 *vers) 965 { 966 return t4_read_flash(adapter, FLASH_FW_START + 967 offsetof(struct fw_hdr, fw_ver), 1, 968 vers, 0); 969 } 970 971 /** 972 * t4_get_tp_version - read the TP microcode version 973 * @adapter: the adapter 974 * @vers: where to place the version 975 * 976 * Reads the TP microcode version from flash. 977 */ 978 int t4_get_tp_version(struct adapter *adapter, u32 *vers) 979 { 980 return t4_read_flash(adapter, FLASH_FW_START + 981 offsetof(struct fw_hdr, tp_microcode_ver), 982 1, vers, 0); 983 } 984 985 /** 986 * t4_get_exprom_version - return the Expansion ROM version (if any) 987 * @adapter: the adapter 988 * @vers: where to place the version 989 * 990 * Reads the Expansion ROM header from FLASH and returns the version 991 * number (if present) through the @vers return value pointer. We return 992 * this in the Firmware Version Format since it's convenient. Return 993 * 0 on success, -ENOENT if no Expansion ROM is present. 994 */ 995 int t4_get_exprom_version(struct adapter *adap, u32 *vers) 996 { 997 struct exprom_header { 998 unsigned char hdr_arr[16]; /* must start with 0x55aa */ 999 unsigned char hdr_ver[4]; /* Expansion ROM version */ 1000 } *hdr; 1001 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header), 1002 sizeof(u32))]; 1003 int ret; 1004 1005 ret = t4_read_flash(adap, FLASH_EXP_ROM_START, 1006 ARRAY_SIZE(exprom_header_buf), exprom_header_buf, 1007 0); 1008 if (ret) 1009 return ret; 1010 1011 hdr = (struct exprom_header *)exprom_header_buf; 1012 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa) 1013 return -ENOENT; 1014 1015 *vers = (FW_HDR_FW_VER_MAJOR_V(hdr->hdr_ver[0]) | 1016 FW_HDR_FW_VER_MINOR_V(hdr->hdr_ver[1]) | 1017 FW_HDR_FW_VER_MICRO_V(hdr->hdr_ver[2]) | 1018 FW_HDR_FW_VER_BUILD_V(hdr->hdr_ver[3])); 1019 return 0; 1020 } 1021 1022 /* Is the given firmware API compatible with the one the driver was compiled 1023 * with? 1024 */ 1025 static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2) 1026 { 1027 1028 /* short circuit if it's the exact same firmware version */ 1029 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) 1030 return 1; 1031 1032 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) 1033 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && 1034 SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe)) 1035 return 1; 1036 #undef SAME_INTF 1037 1038 return 0; 1039 } 1040 1041 /* The firmware in the filesystem is usable, but should it be installed? 1042 * This routine explains itself in detail if it indicates the filesystem 1043 * firmware should be installed. 1044 */ 1045 static int should_install_fs_fw(struct adapter *adap, int card_fw_usable, 1046 int k, int c) 1047 { 1048 const char *reason; 1049 1050 if (!card_fw_usable) { 1051 reason = "incompatible or unusable"; 1052 goto install; 1053 } 1054 1055 if (k > c) { 1056 reason = "older than the version supported with this driver"; 1057 goto install; 1058 } 1059 1060 return 0; 1061 1062 install: 1063 dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, " 1064 "installing firmware %u.%u.%u.%u on card.\n", 1065 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c), 1066 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason, 1067 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), 1068 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); 1069 1070 return 1; 1071 } 1072 1073 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, 1074 const u8 *fw_data, unsigned int fw_size, 1075 struct fw_hdr *card_fw, enum dev_state state, 1076 int *reset) 1077 { 1078 int ret, card_fw_usable, fs_fw_usable; 1079 const struct fw_hdr *fs_fw; 1080 const struct fw_hdr *drv_fw; 1081 1082 drv_fw = &fw_info->fw_hdr; 1083 1084 /* Read the header of the firmware on the card */ 1085 ret = -t4_read_flash(adap, FLASH_FW_START, 1086 sizeof(*card_fw) / sizeof(uint32_t), 1087 (uint32_t *)card_fw, 1); 1088 if (ret == 0) { 1089 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw); 1090 } else { 1091 dev_err(adap->pdev_dev, 1092 "Unable to read card's firmware header: %d\n", ret); 1093 card_fw_usable = 0; 1094 } 1095 1096 if (fw_data != NULL) { 1097 fs_fw = (const void *)fw_data; 1098 fs_fw_usable = fw_compatible(drv_fw, fs_fw); 1099 } else { 1100 fs_fw = NULL; 1101 fs_fw_usable = 0; 1102 } 1103 1104 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver && 1105 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) { 1106 /* Common case: the firmware on the card is an exact match and 1107 * the filesystem one is an exact match too, or the filesystem 1108 * one is absent/incompatible. 1109 */ 1110 } else if (fs_fw_usable && state == DEV_STATE_UNINIT && 1111 should_install_fs_fw(adap, card_fw_usable, 1112 be32_to_cpu(fs_fw->fw_ver), 1113 be32_to_cpu(card_fw->fw_ver))) { 1114 ret = -t4_fw_upgrade(adap, adap->mbox, fw_data, 1115 fw_size, 0); 1116 if (ret != 0) { 1117 dev_err(adap->pdev_dev, 1118 "failed to install firmware: %d\n", ret); 1119 goto bye; 1120 } 1121 1122 /* Installed successfully, update the cached header too. */ 1123 *card_fw = *fs_fw; 1124 card_fw_usable = 1; 1125 *reset = 0; /* already reset as part of load_fw */ 1126 } 1127 1128 if (!card_fw_usable) { 1129 uint32_t d, c, k; 1130 1131 d = be32_to_cpu(drv_fw->fw_ver); 1132 c = be32_to_cpu(card_fw->fw_ver); 1133 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0; 1134 1135 dev_err(adap->pdev_dev, "Cannot find a usable firmware: " 1136 "chip state %d, " 1137 "driver compiled with %d.%d.%d.%d, " 1138 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n", 1139 state, 1140 FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d), 1141 FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d), 1142 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c), 1143 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), 1144 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), 1145 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); 1146 ret = EINVAL; 1147 goto bye; 1148 } 1149 1150 /* We're using whatever's on the card and it's known to be good. */ 1151 adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver); 1152 adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver); 1153 1154 bye: 1155 return ret; 1156 } 1157 1158 /** 1159 * t4_flash_erase_sectors - erase a range of flash sectors 1160 * @adapter: the adapter 1161 * @start: the first sector to erase 1162 * @end: the last sector to erase 1163 * 1164 * Erases the sectors in the given inclusive range. 1165 */ 1166 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) 1167 { 1168 int ret = 0; 1169 1170 if (end >= adapter->params.sf_nsec) 1171 return -EINVAL; 1172 1173 while (start <= end) { 1174 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 1175 (ret = sf1_write(adapter, 4, 0, 1, 1176 SF_ERASE_SECTOR | (start << 8))) != 0 || 1177 (ret = flash_wait_op(adapter, 14, 500)) != 0) { 1178 dev_err(adapter->pdev_dev, 1179 "erase of flash sector %d failed, error %d\n", 1180 start, ret); 1181 break; 1182 } 1183 start++; 1184 } 1185 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */ 1186 return ret; 1187 } 1188 1189 /** 1190 * t4_flash_cfg_addr - return the address of the flash configuration file 1191 * @adapter: the adapter 1192 * 1193 * Return the address within the flash where the Firmware Configuration 1194 * File is stored. 1195 */ 1196 unsigned int t4_flash_cfg_addr(struct adapter *adapter) 1197 { 1198 if (adapter->params.sf_size == 0x100000) 1199 return FLASH_FPGA_CFG_START; 1200 else 1201 return FLASH_CFG_START; 1202 } 1203 1204 /* Return TRUE if the specified firmware matches the adapter. I.e. T4 1205 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead 1206 * and emit an error message for mismatched firmware to save our caller the 1207 * effort ... 1208 */ 1209 static bool t4_fw_matches_chip(const struct adapter *adap, 1210 const struct fw_hdr *hdr) 1211 { 1212 /* The expression below will return FALSE for any unsupported adapter 1213 * which will keep us "honest" in the future ... 1214 */ 1215 if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) || 1216 (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5)) 1217 return true; 1218 1219 dev_err(adap->pdev_dev, 1220 "FW image (%d) is not suitable for this adapter (%d)\n", 1221 hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip)); 1222 return false; 1223 } 1224 1225 /** 1226 * t4_load_fw - download firmware 1227 * @adap: the adapter 1228 * @fw_data: the firmware image to write 1229 * @size: image size 1230 * 1231 * Write the supplied firmware image to the card's serial flash. 1232 */ 1233 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) 1234 { 1235 u32 csum; 1236 int ret, addr; 1237 unsigned int i; 1238 u8 first_page[SF_PAGE_SIZE]; 1239 const __be32 *p = (const __be32 *)fw_data; 1240 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; 1241 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 1242 unsigned int fw_img_start = adap->params.sf_fw_start; 1243 unsigned int fw_start_sec = fw_img_start / sf_sec_size; 1244 1245 if (!size) { 1246 dev_err(adap->pdev_dev, "FW image has no data\n"); 1247 return -EINVAL; 1248 } 1249 if (size & 511) { 1250 dev_err(adap->pdev_dev, 1251 "FW image size not multiple of 512 bytes\n"); 1252 return -EINVAL; 1253 } 1254 if (ntohs(hdr->len512) * 512 != size) { 1255 dev_err(adap->pdev_dev, 1256 "FW image size differs from size in FW header\n"); 1257 return -EINVAL; 1258 } 1259 if (size > FW_MAX_SIZE) { 1260 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n", 1261 FW_MAX_SIZE); 1262 return -EFBIG; 1263 } 1264 if (!t4_fw_matches_chip(adap, hdr)) 1265 return -EINVAL; 1266 1267 for (csum = 0, i = 0; i < size / sizeof(csum); i++) 1268 csum += ntohl(p[i]); 1269 1270 if (csum != 0xffffffff) { 1271 dev_err(adap->pdev_dev, 1272 "corrupted firmware image, checksum %#x\n", csum); 1273 return -EINVAL; 1274 } 1275 1276 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ 1277 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1); 1278 if (ret) 1279 goto out; 1280 1281 /* 1282 * We write the correct version at the end so the driver can see a bad 1283 * version if the FW write fails. Start by writing a copy of the 1284 * first page with a bad version. 1285 */ 1286 memcpy(first_page, fw_data, SF_PAGE_SIZE); 1287 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff); 1288 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page); 1289 if (ret) 1290 goto out; 1291 1292 addr = fw_img_start; 1293 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 1294 addr += SF_PAGE_SIZE; 1295 fw_data += SF_PAGE_SIZE; 1296 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data); 1297 if (ret) 1298 goto out; 1299 } 1300 1301 ret = t4_write_flash(adap, 1302 fw_img_start + offsetof(struct fw_hdr, fw_ver), 1303 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver); 1304 out: 1305 if (ret) 1306 dev_err(adap->pdev_dev, "firmware download failed, error %d\n", 1307 ret); 1308 else 1309 ret = t4_get_fw_version(adap, &adap->params.fw_vers); 1310 return ret; 1311 } 1312 1313 /** 1314 * t4_fwcache - firmware cache operation 1315 * @adap: the adapter 1316 * @op : the operation (flush or flush and invalidate) 1317 */ 1318 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op) 1319 { 1320 struct fw_params_cmd c; 1321 1322 memset(&c, 0, sizeof(c)); 1323 c.op_to_vfn = 1324 cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) | 1325 FW_CMD_REQUEST_F | FW_CMD_WRITE_F | 1326 FW_PARAMS_CMD_PFN_V(adap->fn) | 1327 FW_PARAMS_CMD_VFN_V(0)); 1328 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 1329 c.param[0].mnem = 1330 cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | 1331 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE)); 1332 c.param[0].val = (__force __be32)op; 1333 1334 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL); 1335 } 1336 1337 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf) 1338 { 1339 unsigned int i, j; 1340 1341 for (i = 0; i < 8; i++) { 1342 u32 *p = la_buf + i; 1343 1344 t4_write_reg(adap, ULP_RX_LA_CTL_A, i); 1345 j = t4_read_reg(adap, ULP_RX_LA_WRPTR_A); 1346 t4_write_reg(adap, ULP_RX_LA_RDPTR_A, j); 1347 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8) 1348 *p = t4_read_reg(adap, ULP_RX_LA_RDDATA_A); 1349 } 1350 } 1351 1352 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ 1353 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \ 1354 FW_PORT_CAP_ANEG) 1355 1356 /** 1357 * t4_link_start - apply link configuration to MAC/PHY 1358 * @phy: the PHY to setup 1359 * @mac: the MAC to setup 1360 * @lc: the requested link configuration 1361 * 1362 * Set up a port's MAC and PHY according to a desired link configuration. 1363 * - If the PHY can auto-negotiate first decide what to advertise, then 1364 * enable/disable auto-negotiation as desired, and reset. 1365 * - If the PHY does not auto-negotiate just reset it. 1366 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, 1367 * otherwise do it later based on the outcome of auto-negotiation. 1368 */ 1369 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, 1370 struct link_config *lc) 1371 { 1372 struct fw_port_cmd c; 1373 unsigned int fc = 0, mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO); 1374 1375 lc->link_ok = 0; 1376 if (lc->requested_fc & PAUSE_RX) 1377 fc |= FW_PORT_CAP_FC_RX; 1378 if (lc->requested_fc & PAUSE_TX) 1379 fc |= FW_PORT_CAP_FC_TX; 1380 1381 memset(&c, 0, sizeof(c)); 1382 c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F | 1383 FW_CMD_EXEC_F | FW_PORT_CMD_PORTID_V(port)); 1384 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) | 1385 FW_LEN16(c)); 1386 1387 if (!(lc->supported & FW_PORT_CAP_ANEG)) { 1388 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc); 1389 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); 1390 } else if (lc->autoneg == AUTONEG_DISABLE) { 1391 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi); 1392 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); 1393 } else 1394 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi); 1395 1396 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 1397 } 1398 1399 /** 1400 * t4_restart_aneg - restart autonegotiation 1401 * @adap: the adapter 1402 * @mbox: mbox to use for the FW command 1403 * @port: the port id 1404 * 1405 * Restarts autonegotiation for the selected port. 1406 */ 1407 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) 1408 { 1409 struct fw_port_cmd c; 1410 1411 memset(&c, 0, sizeof(c)); 1412 c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F | 1413 FW_CMD_EXEC_F | FW_PORT_CMD_PORTID_V(port)); 1414 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) | 1415 FW_LEN16(c)); 1416 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG); 1417 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 1418 } 1419 1420 typedef void (*int_handler_t)(struct adapter *adap); 1421 1422 struct intr_info { 1423 unsigned int mask; /* bits to check in interrupt status */ 1424 const char *msg; /* message to print or NULL */ 1425 short stat_idx; /* stat counter to increment or -1 */ 1426 unsigned short fatal; /* whether the condition reported is fatal */ 1427 int_handler_t int_handler; /* platform-specific int handler */ 1428 }; 1429 1430 /** 1431 * t4_handle_intr_status - table driven interrupt handler 1432 * @adapter: the adapter that generated the interrupt 1433 * @reg: the interrupt status register to process 1434 * @acts: table of interrupt actions 1435 * 1436 * A table driven interrupt handler that applies a set of masks to an 1437 * interrupt status word and performs the corresponding actions if the 1438 * interrupts described by the mask have occurred. The actions include 1439 * optionally emitting a warning or alert message. The table is terminated 1440 * by an entry specifying mask 0. Returns the number of fatal interrupt 1441 * conditions. 1442 */ 1443 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg, 1444 const struct intr_info *acts) 1445 { 1446 int fatal = 0; 1447 unsigned int mask = 0; 1448 unsigned int status = t4_read_reg(adapter, reg); 1449 1450 for ( ; acts->mask; ++acts) { 1451 if (!(status & acts->mask)) 1452 continue; 1453 if (acts->fatal) { 1454 fatal++; 1455 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg, 1456 status & acts->mask); 1457 } else if (acts->msg && printk_ratelimit()) 1458 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg, 1459 status & acts->mask); 1460 if (acts->int_handler) 1461 acts->int_handler(adapter); 1462 mask |= acts->mask; 1463 } 1464 status &= mask; 1465 if (status) /* clear processed interrupts */ 1466 t4_write_reg(adapter, reg, status); 1467 return fatal; 1468 } 1469 1470 /* 1471 * Interrupt handler for the PCIE module. 1472 */ 1473 static void pcie_intr_handler(struct adapter *adapter) 1474 { 1475 static const struct intr_info sysbus_intr_info[] = { 1476 { RNPP_F, "RXNP array parity error", -1, 1 }, 1477 { RPCP_F, "RXPC array parity error", -1, 1 }, 1478 { RCIP_F, "RXCIF array parity error", -1, 1 }, 1479 { RCCP_F, "Rx completions control array parity error", -1, 1 }, 1480 { RFTP_F, "RXFT array parity error", -1, 1 }, 1481 { 0 } 1482 }; 1483 static const struct intr_info pcie_port_intr_info[] = { 1484 { TPCP_F, "TXPC array parity error", -1, 1 }, 1485 { TNPP_F, "TXNP array parity error", -1, 1 }, 1486 { TFTP_F, "TXFT array parity error", -1, 1 }, 1487 { TCAP_F, "TXCA array parity error", -1, 1 }, 1488 { TCIP_F, "TXCIF array parity error", -1, 1 }, 1489 { RCAP_F, "RXCA array parity error", -1, 1 }, 1490 { OTDD_F, "outbound request TLP discarded", -1, 1 }, 1491 { RDPE_F, "Rx data parity error", -1, 1 }, 1492 { TDUE_F, "Tx uncorrectable data error", -1, 1 }, 1493 { 0 } 1494 }; 1495 static const struct intr_info pcie_intr_info[] = { 1496 { MSIADDRLPERR_F, "MSI AddrL parity error", -1, 1 }, 1497 { MSIADDRHPERR_F, "MSI AddrH parity error", -1, 1 }, 1498 { MSIDATAPERR_F, "MSI data parity error", -1, 1 }, 1499 { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 }, 1500 { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 }, 1501 { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 }, 1502 { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 }, 1503 { PIOCPLPERR_F, "PCI PIO completion FIFO parity error", -1, 1 }, 1504 { PIOREQPERR_F, "PCI PIO request FIFO parity error", -1, 1 }, 1505 { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 }, 1506 { CCNTPERR_F, "PCI CMD channel count parity error", -1, 1 }, 1507 { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 }, 1508 { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 }, 1509 { DCNTPERR_F, "PCI DMA channel count parity error", -1, 1 }, 1510 { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 }, 1511 { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 }, 1512 { HCNTPERR_F, "PCI HMA channel count parity error", -1, 1 }, 1513 { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 }, 1514 { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 }, 1515 { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 }, 1516 { FIDPERR_F, "PCI FID parity error", -1, 1 }, 1517 { INTXCLRPERR_F, "PCI INTx clear parity error", -1, 1 }, 1518 { MATAGPERR_F, "PCI MA tag parity error", -1, 1 }, 1519 { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 }, 1520 { RXCPLPERR_F, "PCI Rx completion parity error", -1, 1 }, 1521 { RXWRPERR_F, "PCI Rx write parity error", -1, 1 }, 1522 { RPLPERR_F, "PCI replay buffer parity error", -1, 1 }, 1523 { PCIESINT_F, "PCI core secondary fault", -1, 1 }, 1524 { PCIEPINT_F, "PCI core primary fault", -1, 1 }, 1525 { UNXSPLCPLERR_F, "PCI unexpected split completion error", 1526 -1, 0 }, 1527 { 0 } 1528 }; 1529 1530 static struct intr_info t5_pcie_intr_info[] = { 1531 { MSTGRPPERR_F, "Master Response Read Queue parity error", 1532 -1, 1 }, 1533 { MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 }, 1534 { MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 }, 1535 { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 }, 1536 { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 }, 1537 { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 }, 1538 { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 }, 1539 { PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error", 1540 -1, 1 }, 1541 { PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error", 1542 -1, 1 }, 1543 { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 }, 1544 { MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 }, 1545 { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 }, 1546 { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 }, 1547 { DREQWRPERR_F, "PCI DMA channel write request parity error", 1548 -1, 1 }, 1549 { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 }, 1550 { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 }, 1551 { HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 }, 1552 { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 }, 1553 { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 }, 1554 { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 }, 1555 { FIDPERR_F, "PCI FID parity error", -1, 1 }, 1556 { VFIDPERR_F, "PCI INTx clear parity error", -1, 1 }, 1557 { MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 }, 1558 { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 }, 1559 { IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error", 1560 -1, 1 }, 1561 { IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error", 1562 -1, 1 }, 1563 { RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 }, 1564 { IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 }, 1565 { TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 }, 1566 { READRSPERR_F, "Outbound read error", -1, 0 }, 1567 { 0 } 1568 }; 1569 1570 int fat; 1571 1572 if (is_t4(adapter->params.chip)) 1573 fat = t4_handle_intr_status(adapter, 1574 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A, 1575 sysbus_intr_info) + 1576 t4_handle_intr_status(adapter, 1577 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A, 1578 pcie_port_intr_info) + 1579 t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A, 1580 pcie_intr_info); 1581 else 1582 fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A, 1583 t5_pcie_intr_info); 1584 1585 if (fat) 1586 t4_fatal_err(adapter); 1587 } 1588 1589 /* 1590 * TP interrupt handler. 1591 */ 1592 static void tp_intr_handler(struct adapter *adapter) 1593 { 1594 static const struct intr_info tp_intr_info[] = { 1595 { 0x3fffffff, "TP parity error", -1, 1 }, 1596 { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 }, 1597 { 0 } 1598 }; 1599 1600 if (t4_handle_intr_status(adapter, TP_INT_CAUSE_A, tp_intr_info)) 1601 t4_fatal_err(adapter); 1602 } 1603 1604 /* 1605 * SGE interrupt handler. 1606 */ 1607 static void sge_intr_handler(struct adapter *adapter) 1608 { 1609 u64 v; 1610 1611 static const struct intr_info sge_intr_info[] = { 1612 { ERR_CPL_EXCEED_IQE_SIZE_F, 1613 "SGE received CPL exceeding IQE size", -1, 1 }, 1614 { ERR_INVALID_CIDX_INC_F, 1615 "SGE GTS CIDX increment too large", -1, 0 }, 1616 { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 }, 1617 { DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full }, 1618 { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full }, 1619 { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped }, 1620 { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F, 1621 "SGE IQID > 1023 received CPL for FL", -1, 0 }, 1622 { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1, 1623 0 }, 1624 { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1, 1625 0 }, 1626 { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1, 1627 0 }, 1628 { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1, 1629 0 }, 1630 { ERR_ING_CTXT_PRIO_F, 1631 "SGE too many priority ingress contexts", -1, 0 }, 1632 { ERR_EGR_CTXT_PRIO_F, 1633 "SGE too many priority egress contexts", -1, 0 }, 1634 { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 }, 1635 { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 }, 1636 { 0 } 1637 }; 1638 1639 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) | 1640 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32); 1641 if (v) { 1642 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n", 1643 (unsigned long long)v); 1644 t4_write_reg(adapter, SGE_INT_CAUSE1_A, v); 1645 t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32); 1646 } 1647 1648 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info) || 1649 v != 0) 1650 t4_fatal_err(adapter); 1651 } 1652 1653 #define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\ 1654 OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F) 1655 #define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\ 1656 IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F) 1657 1658 /* 1659 * CIM interrupt handler. 1660 */ 1661 static void cim_intr_handler(struct adapter *adapter) 1662 { 1663 static const struct intr_info cim_intr_info[] = { 1664 { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 }, 1665 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 }, 1666 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 }, 1667 { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 }, 1668 { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 }, 1669 { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 }, 1670 { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 }, 1671 { 0 } 1672 }; 1673 static const struct intr_info cim_upintr_info[] = { 1674 { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 }, 1675 { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 }, 1676 { ILLWRINT_F, "CIM illegal write", -1, 1 }, 1677 { ILLRDINT_F, "CIM illegal read", -1, 1 }, 1678 { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 }, 1679 { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 }, 1680 { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 }, 1681 { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 }, 1682 { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 }, 1683 { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 }, 1684 { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 }, 1685 { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 }, 1686 { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 }, 1687 { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 }, 1688 { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 }, 1689 { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 }, 1690 { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 }, 1691 { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 }, 1692 { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 }, 1693 { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 }, 1694 { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 }, 1695 { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 }, 1696 { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 }, 1697 { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 }, 1698 { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 }, 1699 { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 }, 1700 { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 }, 1701 { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 }, 1702 { 0 } 1703 }; 1704 1705 int fat; 1706 1707 if (t4_read_reg(adapter, PCIE_FW_A) & PCIE_FW_ERR_F) 1708 t4_report_fw_error(adapter); 1709 1710 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE_A, 1711 cim_intr_info) + 1712 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE_A, 1713 cim_upintr_info); 1714 if (fat) 1715 t4_fatal_err(adapter); 1716 } 1717 1718 /* 1719 * ULP RX interrupt handler. 1720 */ 1721 static void ulprx_intr_handler(struct adapter *adapter) 1722 { 1723 static const struct intr_info ulprx_intr_info[] = { 1724 { 0x1800000, "ULPRX context error", -1, 1 }, 1725 { 0x7fffff, "ULPRX parity error", -1, 1 }, 1726 { 0 } 1727 }; 1728 1729 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE_A, ulprx_intr_info)) 1730 t4_fatal_err(adapter); 1731 } 1732 1733 /* 1734 * ULP TX interrupt handler. 1735 */ 1736 static void ulptx_intr_handler(struct adapter *adapter) 1737 { 1738 static const struct intr_info ulptx_intr_info[] = { 1739 { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1, 1740 0 }, 1741 { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1, 1742 0 }, 1743 { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1, 1744 0 }, 1745 { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1, 1746 0 }, 1747 { 0xfffffff, "ULPTX parity error", -1, 1 }, 1748 { 0 } 1749 }; 1750 1751 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE_A, ulptx_intr_info)) 1752 t4_fatal_err(adapter); 1753 } 1754 1755 /* 1756 * PM TX interrupt handler. 1757 */ 1758 static void pmtx_intr_handler(struct adapter *adapter) 1759 { 1760 static const struct intr_info pmtx_intr_info[] = { 1761 { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 }, 1762 { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 }, 1763 { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 }, 1764 { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 }, 1765 { PMTX_FRAMING_ERROR_F, "PMTX framing error", -1, 1 }, 1766 { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 }, 1767 { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error", 1768 -1, 1 }, 1769 { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 }, 1770 { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1}, 1771 { 0 } 1772 }; 1773 1774 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE_A, pmtx_intr_info)) 1775 t4_fatal_err(adapter); 1776 } 1777 1778 /* 1779 * PM RX interrupt handler. 1780 */ 1781 static void pmrx_intr_handler(struct adapter *adapter) 1782 { 1783 static const struct intr_info pmrx_intr_info[] = { 1784 { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 }, 1785 { PMRX_FRAMING_ERROR_F, "PMRX framing error", -1, 1 }, 1786 { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 }, 1787 { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error", 1788 -1, 1 }, 1789 { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 }, 1790 { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1}, 1791 { 0 } 1792 }; 1793 1794 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE_A, pmrx_intr_info)) 1795 t4_fatal_err(adapter); 1796 } 1797 1798 /* 1799 * CPL switch interrupt handler. 1800 */ 1801 static void cplsw_intr_handler(struct adapter *adapter) 1802 { 1803 static const struct intr_info cplsw_intr_info[] = { 1804 { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 }, 1805 { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 }, 1806 { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 }, 1807 { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 }, 1808 { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 }, 1809 { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 }, 1810 { 0 } 1811 }; 1812 1813 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE_A, cplsw_intr_info)) 1814 t4_fatal_err(adapter); 1815 } 1816 1817 /* 1818 * LE interrupt handler. 1819 */ 1820 static void le_intr_handler(struct adapter *adap) 1821 { 1822 static const struct intr_info le_intr_info[] = { 1823 { LIPMISS_F, "LE LIP miss", -1, 0 }, 1824 { LIP0_F, "LE 0 LIP error", -1, 0 }, 1825 { PARITYERR_F, "LE parity error", -1, 1 }, 1826 { UNKNOWNCMD_F, "LE unknown command", -1, 1 }, 1827 { REQQPARERR_F, "LE request queue parity error", -1, 1 }, 1828 { 0 } 1829 }; 1830 1831 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A, le_intr_info)) 1832 t4_fatal_err(adap); 1833 } 1834 1835 /* 1836 * MPS interrupt handler. 1837 */ 1838 static void mps_intr_handler(struct adapter *adapter) 1839 { 1840 static const struct intr_info mps_rx_intr_info[] = { 1841 { 0xffffff, "MPS Rx parity error", -1, 1 }, 1842 { 0 } 1843 }; 1844 static const struct intr_info mps_tx_intr_info[] = { 1845 { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 }, 1846 { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 }, 1847 { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error", 1848 -1, 1 }, 1849 { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error", 1850 -1, 1 }, 1851 { BUBBLE_F, "MPS Tx underflow", -1, 1 }, 1852 { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 }, 1853 { FRMERR_F, "MPS Tx framing error", -1, 1 }, 1854 { 0 } 1855 }; 1856 static const struct intr_info mps_trc_intr_info[] = { 1857 { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 }, 1858 { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error", 1859 -1, 1 }, 1860 { MISCPERR_F, "MPS TRC misc parity error", -1, 1 }, 1861 { 0 } 1862 }; 1863 static const struct intr_info mps_stat_sram_intr_info[] = { 1864 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, 1865 { 0 } 1866 }; 1867 static const struct intr_info mps_stat_tx_intr_info[] = { 1868 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, 1869 { 0 } 1870 }; 1871 static const struct intr_info mps_stat_rx_intr_info[] = { 1872 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, 1873 { 0 } 1874 }; 1875 static const struct intr_info mps_cls_intr_info[] = { 1876 { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 }, 1877 { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 }, 1878 { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 }, 1879 { 0 } 1880 }; 1881 1882 int fat; 1883 1884 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE_A, 1885 mps_rx_intr_info) + 1886 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE_A, 1887 mps_tx_intr_info) + 1888 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE_A, 1889 mps_trc_intr_info) + 1890 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM_A, 1891 mps_stat_sram_intr_info) + 1892 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A, 1893 mps_stat_tx_intr_info) + 1894 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A, 1895 mps_stat_rx_intr_info) + 1896 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE_A, 1897 mps_cls_intr_info); 1898 1899 t4_write_reg(adapter, MPS_INT_CAUSE_A, 0); 1900 t4_read_reg(adapter, MPS_INT_CAUSE_A); /* flush */ 1901 if (fat) 1902 t4_fatal_err(adapter); 1903 } 1904 1905 #define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \ 1906 ECC_UE_INT_CAUSE_F) 1907 1908 /* 1909 * EDC/MC interrupt handler. 1910 */ 1911 static void mem_intr_handler(struct adapter *adapter, int idx) 1912 { 1913 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" }; 1914 1915 unsigned int addr, cnt_addr, v; 1916 1917 if (idx <= MEM_EDC1) { 1918 addr = EDC_REG(EDC_INT_CAUSE_A, idx); 1919 cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx); 1920 } else if (idx == MEM_MC) { 1921 if (is_t4(adapter->params.chip)) { 1922 addr = MC_INT_CAUSE_A; 1923 cnt_addr = MC_ECC_STATUS_A; 1924 } else { 1925 addr = MC_P_INT_CAUSE_A; 1926 cnt_addr = MC_P_ECC_STATUS_A; 1927 } 1928 } else { 1929 addr = MC_REG(MC_P_INT_CAUSE_A, 1); 1930 cnt_addr = MC_REG(MC_P_ECC_STATUS_A, 1); 1931 } 1932 1933 v = t4_read_reg(adapter, addr) & MEM_INT_MASK; 1934 if (v & PERR_INT_CAUSE_F) 1935 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n", 1936 name[idx]); 1937 if (v & ECC_CE_INT_CAUSE_F) { 1938 u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr)); 1939 1940 t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M)); 1941 if (printk_ratelimit()) 1942 dev_warn(adapter->pdev_dev, 1943 "%u %s correctable ECC data error%s\n", 1944 cnt, name[idx], cnt > 1 ? "s" : ""); 1945 } 1946 if (v & ECC_UE_INT_CAUSE_F) 1947 dev_alert(adapter->pdev_dev, 1948 "%s uncorrectable ECC data error\n", name[idx]); 1949 1950 t4_write_reg(adapter, addr, v); 1951 if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F)) 1952 t4_fatal_err(adapter); 1953 } 1954 1955 /* 1956 * MA interrupt handler. 1957 */ 1958 static void ma_intr_handler(struct adapter *adap) 1959 { 1960 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE_A); 1961 1962 if (status & MEM_PERR_INT_CAUSE_F) { 1963 dev_alert(adap->pdev_dev, 1964 "MA parity error, parity status %#x\n", 1965 t4_read_reg(adap, MA_PARITY_ERROR_STATUS1_A)); 1966 if (is_t5(adap->params.chip)) 1967 dev_alert(adap->pdev_dev, 1968 "MA parity error, parity status %#x\n", 1969 t4_read_reg(adap, 1970 MA_PARITY_ERROR_STATUS2_A)); 1971 } 1972 if (status & MEM_WRAP_INT_CAUSE_F) { 1973 v = t4_read_reg(adap, MA_INT_WRAP_STATUS_A); 1974 dev_alert(adap->pdev_dev, "MA address wrap-around error by " 1975 "client %u to address %#x\n", 1976 MEM_WRAP_CLIENT_NUM_G(v), 1977 MEM_WRAP_ADDRESS_G(v) << 4); 1978 } 1979 t4_write_reg(adap, MA_INT_CAUSE_A, status); 1980 t4_fatal_err(adap); 1981 } 1982 1983 /* 1984 * SMB interrupt handler. 1985 */ 1986 static void smb_intr_handler(struct adapter *adap) 1987 { 1988 static const struct intr_info smb_intr_info[] = { 1989 { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 }, 1990 { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 }, 1991 { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 }, 1992 { 0 } 1993 }; 1994 1995 if (t4_handle_intr_status(adap, SMB_INT_CAUSE_A, smb_intr_info)) 1996 t4_fatal_err(adap); 1997 } 1998 1999 /* 2000 * NC-SI interrupt handler. 2001 */ 2002 static void ncsi_intr_handler(struct adapter *adap) 2003 { 2004 static const struct intr_info ncsi_intr_info[] = { 2005 { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 }, 2006 { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 }, 2007 { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 }, 2008 { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 }, 2009 { 0 } 2010 }; 2011 2012 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE_A, ncsi_intr_info)) 2013 t4_fatal_err(adap); 2014 } 2015 2016 /* 2017 * XGMAC interrupt handler. 2018 */ 2019 static void xgmac_intr_handler(struct adapter *adap, int port) 2020 { 2021 u32 v, int_cause_reg; 2022 2023 if (is_t4(adap->params.chip)) 2024 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE_A); 2025 else 2026 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A); 2027 2028 v = t4_read_reg(adap, int_cause_reg); 2029 2030 v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F; 2031 if (!v) 2032 return; 2033 2034 if (v & TXFIFO_PRTY_ERR_F) 2035 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n", 2036 port); 2037 if (v & RXFIFO_PRTY_ERR_F) 2038 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n", 2039 port); 2040 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE_A), v); 2041 t4_fatal_err(adap); 2042 } 2043 2044 /* 2045 * PL interrupt handler. 2046 */ 2047 static void pl_intr_handler(struct adapter *adap) 2048 { 2049 static const struct intr_info pl_intr_info[] = { 2050 { FATALPERR_F, "T4 fatal parity error", -1, 1 }, 2051 { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 }, 2052 { 0 } 2053 }; 2054 2055 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE_A, pl_intr_info)) 2056 t4_fatal_err(adap); 2057 } 2058 2059 #define PF_INTR_MASK (PFSW_F) 2060 #define GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | EDC0_F | \ 2061 EDC1_F | LE_F | TP_F | MA_F | PM_TX_F | PM_RX_F | ULP_RX_F | \ 2062 CPL_SWITCH_F | SGE_F | ULP_TX_F) 2063 2064 /** 2065 * t4_slow_intr_handler - control path interrupt handler 2066 * @adapter: the adapter 2067 * 2068 * T4 interrupt handler for non-data global interrupt events, e.g., errors. 2069 * The designation 'slow' is because it involves register reads, while 2070 * data interrupts typically don't involve any MMIOs. 2071 */ 2072 int t4_slow_intr_handler(struct adapter *adapter) 2073 { 2074 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE_A); 2075 2076 if (!(cause & GLBL_INTR_MASK)) 2077 return 0; 2078 if (cause & CIM_F) 2079 cim_intr_handler(adapter); 2080 if (cause & MPS_F) 2081 mps_intr_handler(adapter); 2082 if (cause & NCSI_F) 2083 ncsi_intr_handler(adapter); 2084 if (cause & PL_F) 2085 pl_intr_handler(adapter); 2086 if (cause & SMB_F) 2087 smb_intr_handler(adapter); 2088 if (cause & XGMAC0_F) 2089 xgmac_intr_handler(adapter, 0); 2090 if (cause & XGMAC1_F) 2091 xgmac_intr_handler(adapter, 1); 2092 if (cause & XGMAC_KR0_F) 2093 xgmac_intr_handler(adapter, 2); 2094 if (cause & XGMAC_KR1_F) 2095 xgmac_intr_handler(adapter, 3); 2096 if (cause & PCIE_F) 2097 pcie_intr_handler(adapter); 2098 if (cause & MC_F) 2099 mem_intr_handler(adapter, MEM_MC); 2100 if (!is_t4(adapter->params.chip) && (cause & MC1_S)) 2101 mem_intr_handler(adapter, MEM_MC1); 2102 if (cause & EDC0_F) 2103 mem_intr_handler(adapter, MEM_EDC0); 2104 if (cause & EDC1_F) 2105 mem_intr_handler(adapter, MEM_EDC1); 2106 if (cause & LE_F) 2107 le_intr_handler(adapter); 2108 if (cause & TP_F) 2109 tp_intr_handler(adapter); 2110 if (cause & MA_F) 2111 ma_intr_handler(adapter); 2112 if (cause & PM_TX_F) 2113 pmtx_intr_handler(adapter); 2114 if (cause & PM_RX_F) 2115 pmrx_intr_handler(adapter); 2116 if (cause & ULP_RX_F) 2117 ulprx_intr_handler(adapter); 2118 if (cause & CPL_SWITCH_F) 2119 cplsw_intr_handler(adapter); 2120 if (cause & SGE_F) 2121 sge_intr_handler(adapter); 2122 if (cause & ULP_TX_F) 2123 ulptx_intr_handler(adapter); 2124 2125 /* Clear the interrupts just processed for which we are the master. */ 2126 t4_write_reg(adapter, PL_INT_CAUSE_A, cause & GLBL_INTR_MASK); 2127 (void)t4_read_reg(adapter, PL_INT_CAUSE_A); /* flush */ 2128 return 1; 2129 } 2130 2131 /** 2132 * t4_intr_enable - enable interrupts 2133 * @adapter: the adapter whose interrupts should be enabled 2134 * 2135 * Enable PF-specific interrupts for the calling function and the top-level 2136 * interrupt concentrator for global interrupts. Interrupts are already 2137 * enabled at each module, here we just enable the roots of the interrupt 2138 * hierarchies. 2139 * 2140 * Note: this function should be called only when the driver manages 2141 * non PF-specific interrupts from the various HW modules. Only one PCI 2142 * function at a time should be doing this. 2143 */ 2144 void t4_intr_enable(struct adapter *adapter) 2145 { 2146 u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A)); 2147 2148 t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F | 2149 ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F | 2150 ERR_DROPPED_DB_F | ERR_DATA_CPL_ON_HIGH_QID1_F | 2151 ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F | 2152 ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F | 2153 ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F | 2154 ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F | 2155 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F | 2156 EGRESS_SIZE_ERR_F); 2157 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK); 2158 t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf); 2159 } 2160 2161 /** 2162 * t4_intr_disable - disable interrupts 2163 * @adapter: the adapter whose interrupts should be disabled 2164 * 2165 * Disable interrupts. We only disable the top-level interrupt 2166 * concentrators. The caller must be a PCI function managing global 2167 * interrupts. 2168 */ 2169 void t4_intr_disable(struct adapter *adapter) 2170 { 2171 u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A)); 2172 2173 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0); 2174 t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0); 2175 } 2176 2177 /** 2178 * hash_mac_addr - return the hash value of a MAC address 2179 * @addr: the 48-bit Ethernet MAC address 2180 * 2181 * Hashes a MAC address according to the hash function used by HW inexact 2182 * (hash) address matching. 2183 */ 2184 static int hash_mac_addr(const u8 *addr) 2185 { 2186 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; 2187 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; 2188 a ^= b; 2189 a ^= (a >> 12); 2190 a ^= (a >> 6); 2191 return a & 0x3f; 2192 } 2193 2194 /** 2195 * t4_config_rss_range - configure a portion of the RSS mapping table 2196 * @adapter: the adapter 2197 * @mbox: mbox to use for the FW command 2198 * @viid: virtual interface whose RSS subtable is to be written 2199 * @start: start entry in the table to write 2200 * @n: how many table entries to write 2201 * @rspq: values for the response queue lookup table 2202 * @nrspq: number of values in @rspq 2203 * 2204 * Programs the selected part of the VI's RSS mapping table with the 2205 * provided values. If @nrspq < @n the supplied values are used repeatedly 2206 * until the full table range is populated. 2207 * 2208 * The caller must ensure the values in @rspq are in the range allowed for 2209 * @viid. 2210 */ 2211 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, 2212 int start, int n, const u16 *rspq, unsigned int nrspq) 2213 { 2214 int ret; 2215 const u16 *rsp = rspq; 2216 const u16 *rsp_end = rspq + nrspq; 2217 struct fw_rss_ind_tbl_cmd cmd; 2218 2219 memset(&cmd, 0, sizeof(cmd)); 2220 cmd.op_to_viid = htonl(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) | 2221 FW_CMD_REQUEST_F | FW_CMD_WRITE_F | 2222 FW_RSS_IND_TBL_CMD_VIID_V(viid)); 2223 cmd.retval_len16 = htonl(FW_LEN16(cmd)); 2224 2225 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */ 2226 while (n > 0) { 2227 int nq = min(n, 32); 2228 __be32 *qp = &cmd.iq0_to_iq2; 2229 2230 cmd.niqid = htons(nq); 2231 cmd.startidx = htons(start); 2232 2233 start += nq; 2234 n -= nq; 2235 2236 while (nq > 0) { 2237 unsigned int v; 2238 2239 v = FW_RSS_IND_TBL_CMD_IQ0_V(*rsp); 2240 if (++rsp >= rsp_end) 2241 rsp = rspq; 2242 v |= FW_RSS_IND_TBL_CMD_IQ1_V(*rsp); 2243 if (++rsp >= rsp_end) 2244 rsp = rspq; 2245 v |= FW_RSS_IND_TBL_CMD_IQ2_V(*rsp); 2246 if (++rsp >= rsp_end) 2247 rsp = rspq; 2248 2249 *qp++ = htonl(v); 2250 nq -= 3; 2251 } 2252 2253 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); 2254 if (ret) 2255 return ret; 2256 } 2257 return 0; 2258 } 2259 2260 /** 2261 * t4_config_glbl_rss - configure the global RSS mode 2262 * @adapter: the adapter 2263 * @mbox: mbox to use for the FW command 2264 * @mode: global RSS mode 2265 * @flags: mode-specific flags 2266 * 2267 * Sets the global RSS mode. 2268 */ 2269 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, 2270 unsigned int flags) 2271 { 2272 struct fw_rss_glb_config_cmd c; 2273 2274 memset(&c, 0, sizeof(c)); 2275 c.op_to_write = htonl(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) | 2276 FW_CMD_REQUEST_F | FW_CMD_WRITE_F); 2277 c.retval_len16 = htonl(FW_LEN16(c)); 2278 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) { 2279 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode)); 2280 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { 2281 c.u.basicvirtual.mode_pkd = 2282 htonl(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode)); 2283 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags); 2284 } else 2285 return -EINVAL; 2286 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); 2287 } 2288 2289 /* Read an RSS table row */ 2290 static int rd_rss_row(struct adapter *adap, int row, u32 *val) 2291 { 2292 t4_write_reg(adap, TP_RSS_LKP_TABLE_A, 0xfff00000 | row); 2293 return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE_A, LKPTBLROWVLD_F, 1, 2294 5, 0, val); 2295 } 2296 2297 /** 2298 * t4_read_rss - read the contents of the RSS mapping table 2299 * @adapter: the adapter 2300 * @map: holds the contents of the RSS mapping table 2301 * 2302 * Reads the contents of the RSS hash->queue mapping table. 2303 */ 2304 int t4_read_rss(struct adapter *adapter, u16 *map) 2305 { 2306 u32 val; 2307 int i, ret; 2308 2309 for (i = 0; i < RSS_NENTRIES / 2; ++i) { 2310 ret = rd_rss_row(adapter, i, &val); 2311 if (ret) 2312 return ret; 2313 *map++ = LKPTBLQUEUE0_G(val); 2314 *map++ = LKPTBLQUEUE1_G(val); 2315 } 2316 return 0; 2317 } 2318 2319 /** 2320 * t4_read_rss_key - read the global RSS key 2321 * @adap: the adapter 2322 * @key: 10-entry array holding the 320-bit RSS key 2323 * 2324 * Reads the global 320-bit RSS key. 2325 */ 2326 void t4_read_rss_key(struct adapter *adap, u32 *key) 2327 { 2328 t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10, 2329 TP_RSS_SECRET_KEY0_A); 2330 } 2331 2332 /** 2333 * t4_write_rss_key - program one of the RSS keys 2334 * @adap: the adapter 2335 * @key: 10-entry array holding the 320-bit RSS key 2336 * @idx: which RSS key to write 2337 * 2338 * Writes one of the RSS keys with the given 320-bit value. If @idx is 2339 * 0..15 the corresponding entry in the RSS key table is written, 2340 * otherwise the global RSS key is written. 2341 */ 2342 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx) 2343 { 2344 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10, 2345 TP_RSS_SECRET_KEY0_A); 2346 if (idx >= 0 && idx < 16) 2347 t4_write_reg(adap, TP_RSS_CONFIG_VRT_A, 2348 KEYWRADDR_V(idx) | KEYWREN_F); 2349 } 2350 2351 /** 2352 * t4_read_rss_pf_config - read PF RSS Configuration Table 2353 * @adapter: the adapter 2354 * @index: the entry in the PF RSS table to read 2355 * @valp: where to store the returned value 2356 * 2357 * Reads the PF RSS Configuration Table at the specified index and returns 2358 * the value found there. 2359 */ 2360 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, 2361 u32 *valp) 2362 { 2363 t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, 2364 valp, 1, TP_RSS_PF0_CONFIG_A + index); 2365 } 2366 2367 /** 2368 * t4_read_rss_vf_config - read VF RSS Configuration Table 2369 * @adapter: the adapter 2370 * @index: the entry in the VF RSS table to read 2371 * @vfl: where to store the returned VFL 2372 * @vfh: where to store the returned VFH 2373 * 2374 * Reads the VF RSS Configuration Table at the specified index and returns 2375 * the (VFL, VFH) values found there. 2376 */ 2377 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index, 2378 u32 *vfl, u32 *vfh) 2379 { 2380 u32 vrt, mask, data; 2381 2382 mask = VFWRADDR_V(VFWRADDR_M); 2383 data = VFWRADDR_V(index); 2384 2385 /* Request that the index'th VF Table values be read into VFL/VFH. 2386 */ 2387 vrt = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A); 2388 vrt &= ~(VFRDRG_F | VFWREN_F | KEYWREN_F | mask); 2389 vrt |= data | VFRDEN_F; 2390 t4_write_reg(adapter, TP_RSS_CONFIG_VRT_A, vrt); 2391 2392 /* Grab the VFL/VFH values ... 2393 */ 2394 t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, 2395 vfl, 1, TP_RSS_VFL_CONFIG_A); 2396 t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, 2397 vfh, 1, TP_RSS_VFH_CONFIG_A); 2398 } 2399 2400 /** 2401 * t4_read_rss_pf_map - read PF RSS Map 2402 * @adapter: the adapter 2403 * 2404 * Reads the PF RSS Map register and returns its value. 2405 */ 2406 u32 t4_read_rss_pf_map(struct adapter *adapter) 2407 { 2408 u32 pfmap; 2409 2410 t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, 2411 &pfmap, 1, TP_RSS_PF_MAP_A); 2412 return pfmap; 2413 } 2414 2415 /** 2416 * t4_read_rss_pf_mask - read PF RSS Mask 2417 * @adapter: the adapter 2418 * 2419 * Reads the PF RSS Mask register and returns its value. 2420 */ 2421 u32 t4_read_rss_pf_mask(struct adapter *adapter) 2422 { 2423 u32 pfmask; 2424 2425 t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, 2426 &pfmask, 1, TP_RSS_PF_MSK_A); 2427 return pfmask; 2428 } 2429 2430 /** 2431 * t4_tp_get_tcp_stats - read TP's TCP MIB counters 2432 * @adap: the adapter 2433 * @v4: holds the TCP/IP counter values 2434 * @v6: holds the TCP/IPv6 counter values 2435 * 2436 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters. 2437 * Either @v4 or @v6 may be %NULL to skip the corresponding stats. 2438 */ 2439 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, 2440 struct tp_tcp_stats *v6) 2441 { 2442 u32 val[TP_MIB_TCP_RXT_SEG_LO_A - TP_MIB_TCP_OUT_RST_A + 1]; 2443 2444 #define STAT_IDX(x) ((TP_MIB_TCP_##x##_A) - TP_MIB_TCP_OUT_RST_A) 2445 #define STAT(x) val[STAT_IDX(x)] 2446 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO)) 2447 2448 if (v4) { 2449 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val, 2450 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST_A); 2451 v4->tcpOutRsts = STAT(OUT_RST); 2452 v4->tcpInSegs = STAT64(IN_SEG); 2453 v4->tcpOutSegs = STAT64(OUT_SEG); 2454 v4->tcpRetransSegs = STAT64(RXT_SEG); 2455 } 2456 if (v6) { 2457 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val, 2458 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST_A); 2459 v6->tcpOutRsts = STAT(OUT_RST); 2460 v6->tcpInSegs = STAT64(IN_SEG); 2461 v6->tcpOutSegs = STAT64(OUT_SEG); 2462 v6->tcpRetransSegs = STAT64(RXT_SEG); 2463 } 2464 #undef STAT64 2465 #undef STAT 2466 #undef STAT_IDX 2467 } 2468 2469 /** 2470 * t4_read_mtu_tbl - returns the values in the HW path MTU table 2471 * @adap: the adapter 2472 * @mtus: where to store the MTU values 2473 * @mtu_log: where to store the MTU base-2 log (may be %NULL) 2474 * 2475 * Reads the HW path MTU table. 2476 */ 2477 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log) 2478 { 2479 u32 v; 2480 int i; 2481 2482 for (i = 0; i < NMTUS; ++i) { 2483 t4_write_reg(adap, TP_MTU_TABLE_A, 2484 MTUINDEX_V(0xff) | MTUVALUE_V(i)); 2485 v = t4_read_reg(adap, TP_MTU_TABLE_A); 2486 mtus[i] = MTUVALUE_G(v); 2487 if (mtu_log) 2488 mtu_log[i] = MTUWIDTH_G(v); 2489 } 2490 } 2491 2492 /** 2493 * t4_read_cong_tbl - reads the congestion control table 2494 * @adap: the adapter 2495 * @incr: where to store the alpha values 2496 * 2497 * Reads the additive increments programmed into the HW congestion 2498 * control table. 2499 */ 2500 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]) 2501 { 2502 unsigned int mtu, w; 2503 2504 for (mtu = 0; mtu < NMTUS; ++mtu) 2505 for (w = 0; w < NCCTRL_WIN; ++w) { 2506 t4_write_reg(adap, TP_CCTRL_TABLE_A, 2507 ROWINDEX_V(0xffff) | (mtu << 5) | w); 2508 incr[mtu][w] = (u16)t4_read_reg(adap, 2509 TP_CCTRL_TABLE_A) & 0x1fff; 2510 } 2511 } 2512 2513 /** 2514 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register 2515 * @adap: the adapter 2516 * @addr: the indirect TP register address 2517 * @mask: specifies the field within the register to modify 2518 * @val: new value for the field 2519 * 2520 * Sets a field of an indirect TP register to the given value. 2521 */ 2522 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, 2523 unsigned int mask, unsigned int val) 2524 { 2525 t4_write_reg(adap, TP_PIO_ADDR_A, addr); 2526 val |= t4_read_reg(adap, TP_PIO_DATA_A) & ~mask; 2527 t4_write_reg(adap, TP_PIO_DATA_A, val); 2528 } 2529 2530 /** 2531 * init_cong_ctrl - initialize congestion control parameters 2532 * @a: the alpha values for congestion control 2533 * @b: the beta values for congestion control 2534 * 2535 * Initialize the congestion control parameters. 2536 */ 2537 static void init_cong_ctrl(unsigned short *a, unsigned short *b) 2538 { 2539 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; 2540 a[9] = 2; 2541 a[10] = 3; 2542 a[11] = 4; 2543 a[12] = 5; 2544 a[13] = 6; 2545 a[14] = 7; 2546 a[15] = 8; 2547 a[16] = 9; 2548 a[17] = 10; 2549 a[18] = 14; 2550 a[19] = 17; 2551 a[20] = 21; 2552 a[21] = 25; 2553 a[22] = 30; 2554 a[23] = 35; 2555 a[24] = 45; 2556 a[25] = 60; 2557 a[26] = 80; 2558 a[27] = 100; 2559 a[28] = 200; 2560 a[29] = 300; 2561 a[30] = 400; 2562 a[31] = 500; 2563 2564 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0; 2565 b[9] = b[10] = 1; 2566 b[11] = b[12] = 2; 2567 b[13] = b[14] = b[15] = b[16] = 3; 2568 b[17] = b[18] = b[19] = b[20] = b[21] = 4; 2569 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5; 2570 b[28] = b[29] = 6; 2571 b[30] = b[31] = 7; 2572 } 2573 2574 /* The minimum additive increment value for the congestion control table */ 2575 #define CC_MIN_INCR 2U 2576 2577 /** 2578 * t4_load_mtus - write the MTU and congestion control HW tables 2579 * @adap: the adapter 2580 * @mtus: the values for the MTU table 2581 * @alpha: the values for the congestion control alpha parameter 2582 * @beta: the values for the congestion control beta parameter 2583 * 2584 * Write the HW MTU table with the supplied MTUs and the high-speed 2585 * congestion control table with the supplied alpha, beta, and MTUs. 2586 * We write the two tables together because the additive increments 2587 * depend on the MTUs. 2588 */ 2589 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, 2590 const unsigned short *alpha, const unsigned short *beta) 2591 { 2592 static const unsigned int avg_pkts[NCCTRL_WIN] = { 2593 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, 2594 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, 2595 28672, 40960, 57344, 81920, 114688, 163840, 229376 2596 }; 2597 2598 unsigned int i, w; 2599 2600 for (i = 0; i < NMTUS; ++i) { 2601 unsigned int mtu = mtus[i]; 2602 unsigned int log2 = fls(mtu); 2603 2604 if (!(mtu & ((1 << log2) >> 2))) /* round */ 2605 log2--; 2606 t4_write_reg(adap, TP_MTU_TABLE_A, MTUINDEX_V(i) | 2607 MTUWIDTH_V(log2) | MTUVALUE_V(mtu)); 2608 2609 for (w = 0; w < NCCTRL_WIN; ++w) { 2610 unsigned int inc; 2611 2612 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], 2613 CC_MIN_INCR); 2614 2615 t4_write_reg(adap, TP_CCTRL_TABLE_A, (i << 21) | 2616 (w << 16) | (beta[w] << 13) | inc); 2617 } 2618 } 2619 } 2620 2621 /** 2622 * t4_pmtx_get_stats - returns the HW stats from PMTX 2623 * @adap: the adapter 2624 * @cnt: where to store the count statistics 2625 * @cycles: where to store the cycle statistics 2626 * 2627 * Returns performance statistics from PMTX. 2628 */ 2629 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) 2630 { 2631 int i; 2632 u32 data[2]; 2633 2634 for (i = 0; i < PM_NSTATS; i++) { 2635 t4_write_reg(adap, PM_TX_STAT_CONFIG_A, i + 1); 2636 cnt[i] = t4_read_reg(adap, PM_TX_STAT_COUNT_A); 2637 if (is_t4(adap->params.chip)) { 2638 cycles[i] = t4_read_reg64(adap, PM_TX_STAT_LSB_A); 2639 } else { 2640 t4_read_indirect(adap, PM_TX_DBG_CTRL_A, 2641 PM_TX_DBG_DATA_A, data, 2, 2642 PM_TX_DBG_STAT_MSB_A); 2643 cycles[i] = (((u64)data[0] << 32) | data[1]); 2644 } 2645 } 2646 } 2647 2648 /** 2649 * t4_pmrx_get_stats - returns the HW stats from PMRX 2650 * @adap: the adapter 2651 * @cnt: where to store the count statistics 2652 * @cycles: where to store the cycle statistics 2653 * 2654 * Returns performance statistics from PMRX. 2655 */ 2656 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) 2657 { 2658 int i; 2659 u32 data[2]; 2660 2661 for (i = 0; i < PM_NSTATS; i++) { 2662 t4_write_reg(adap, PM_RX_STAT_CONFIG_A, i + 1); 2663 cnt[i] = t4_read_reg(adap, PM_RX_STAT_COUNT_A); 2664 if (is_t4(adap->params.chip)) { 2665 cycles[i] = t4_read_reg64(adap, PM_RX_STAT_LSB_A); 2666 } else { 2667 t4_read_indirect(adap, PM_RX_DBG_CTRL_A, 2668 PM_RX_DBG_DATA_A, data, 2, 2669 PM_RX_DBG_STAT_MSB_A); 2670 cycles[i] = (((u64)data[0] << 32) | data[1]); 2671 } 2672 } 2673 } 2674 2675 /** 2676 * get_mps_bg_map - return the buffer groups associated with a port 2677 * @adap: the adapter 2678 * @idx: the port index 2679 * 2680 * Returns a bitmap indicating which MPS buffer groups are associated 2681 * with the given port. Bit i is set if buffer group i is used by the 2682 * port. 2683 */ 2684 static unsigned int get_mps_bg_map(struct adapter *adap, int idx) 2685 { 2686 u32 n = NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A)); 2687 2688 if (n == 0) 2689 return idx == 0 ? 0xf : 0; 2690 if (n == 1) 2691 return idx < 2 ? (3 << (2 * idx)) : 0; 2692 return 1 << idx; 2693 } 2694 2695 /** 2696 * t4_get_port_type_description - return Port Type string description 2697 * @port_type: firmware Port Type enumeration 2698 */ 2699 const char *t4_get_port_type_description(enum fw_port_type port_type) 2700 { 2701 static const char *const port_type_description[] = { 2702 "R XFI", 2703 "R XAUI", 2704 "T SGMII", 2705 "T XFI", 2706 "T XAUI", 2707 "KX4", 2708 "CX4", 2709 "KX", 2710 "KR", 2711 "R SFP+", 2712 "KR/KX", 2713 "KR/KX/KX4", 2714 "R QSFP_10G", 2715 "R QSA", 2716 "R QSFP", 2717 "R BP40_BA", 2718 }; 2719 2720 if (port_type < ARRAY_SIZE(port_type_description)) 2721 return port_type_description[port_type]; 2722 return "UNKNOWN"; 2723 } 2724 2725 /** 2726 * t4_get_port_stats - collect port statistics 2727 * @adap: the adapter 2728 * @idx: the port index 2729 * @p: the stats structure to fill 2730 * 2731 * Collect statistics related to the given port from HW. 2732 */ 2733 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) 2734 { 2735 u32 bgmap = get_mps_bg_map(adap, idx); 2736 2737 #define GET_STAT(name) \ 2738 t4_read_reg64(adap, \ 2739 (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \ 2740 T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L))) 2741 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) 2742 2743 p->tx_octets = GET_STAT(TX_PORT_BYTES); 2744 p->tx_frames = GET_STAT(TX_PORT_FRAMES); 2745 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST); 2746 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST); 2747 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST); 2748 p->tx_error_frames = GET_STAT(TX_PORT_ERROR); 2749 p->tx_frames_64 = GET_STAT(TX_PORT_64B); 2750 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B); 2751 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B); 2752 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B); 2753 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B); 2754 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B); 2755 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX); 2756 p->tx_drop = GET_STAT(TX_PORT_DROP); 2757 p->tx_pause = GET_STAT(TX_PORT_PAUSE); 2758 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0); 2759 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1); 2760 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2); 2761 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3); 2762 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4); 2763 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5); 2764 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); 2765 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); 2766 2767 p->rx_octets = GET_STAT(RX_PORT_BYTES); 2768 p->rx_frames = GET_STAT(RX_PORT_FRAMES); 2769 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); 2770 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST); 2771 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST); 2772 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR); 2773 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR); 2774 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR); 2775 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR); 2776 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR); 2777 p->rx_runt = GET_STAT(RX_PORT_LESS_64B); 2778 p->rx_frames_64 = GET_STAT(RX_PORT_64B); 2779 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B); 2780 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B); 2781 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B); 2782 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B); 2783 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B); 2784 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX); 2785 p->rx_pause = GET_STAT(RX_PORT_PAUSE); 2786 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0); 2787 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1); 2788 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2); 2789 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3); 2790 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4); 2791 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5); 2792 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); 2793 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); 2794 2795 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; 2796 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; 2797 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; 2798 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0; 2799 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0; 2800 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0; 2801 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0; 2802 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0; 2803 2804 #undef GET_STAT 2805 #undef GET_STAT_COM 2806 } 2807 2808 /** 2809 * t4_wol_magic_enable - enable/disable magic packet WoL 2810 * @adap: the adapter 2811 * @port: the physical port index 2812 * @addr: MAC address expected in magic packets, %NULL to disable 2813 * 2814 * Enables/disables magic packet wake-on-LAN for the selected port. 2815 */ 2816 void t4_wol_magic_enable(struct adapter *adap, unsigned int port, 2817 const u8 *addr) 2818 { 2819 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg; 2820 2821 if (is_t4(adap->params.chip)) { 2822 mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO); 2823 mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI); 2824 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A); 2825 } else { 2826 mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO); 2827 mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI); 2828 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A); 2829 } 2830 2831 if (addr) { 2832 t4_write_reg(adap, mag_id_reg_l, 2833 (addr[2] << 24) | (addr[3] << 16) | 2834 (addr[4] << 8) | addr[5]); 2835 t4_write_reg(adap, mag_id_reg_h, 2836 (addr[0] << 8) | addr[1]); 2837 } 2838 t4_set_reg_field(adap, port_cfg_reg, MAGICEN_F, 2839 addr ? MAGICEN_F : 0); 2840 } 2841 2842 /** 2843 * t4_wol_pat_enable - enable/disable pattern-based WoL 2844 * @adap: the adapter 2845 * @port: the physical port index 2846 * @map: bitmap of which HW pattern filters to set 2847 * @mask0: byte mask for bytes 0-63 of a packet 2848 * @mask1: byte mask for bytes 64-127 of a packet 2849 * @crc: Ethernet CRC for selected bytes 2850 * @enable: enable/disable switch 2851 * 2852 * Sets the pattern filters indicated in @map to mask out the bytes 2853 * specified in @mask0/@mask1 in received packets and compare the CRC of 2854 * the resulting packet against @crc. If @enable is %true pattern-based 2855 * WoL is enabled, otherwise disabled. 2856 */ 2857 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, 2858 u64 mask0, u64 mask1, unsigned int crc, bool enable) 2859 { 2860 int i; 2861 u32 port_cfg_reg; 2862 2863 if (is_t4(adap->params.chip)) 2864 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A); 2865 else 2866 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A); 2867 2868 if (!enable) { 2869 t4_set_reg_field(adap, port_cfg_reg, PATEN_F, 0); 2870 return 0; 2871 } 2872 if (map > 0xff) 2873 return -EINVAL; 2874 2875 #define EPIO_REG(name) \ 2876 (is_t4(adap->params.chip) ? \ 2877 PORT_REG(port, XGMAC_PORT_EPIO_##name##_A) : \ 2878 T5_PORT_REG(port, MAC_PORT_EPIO_##name##_A)) 2879 2880 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); 2881 t4_write_reg(adap, EPIO_REG(DATA2), mask1); 2882 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32); 2883 2884 for (i = 0; i < NWOL_PAT; i++, map >>= 1) { 2885 if (!(map & 1)) 2886 continue; 2887 2888 /* write byte masks */ 2889 t4_write_reg(adap, EPIO_REG(DATA0), mask0); 2890 t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i) | EPIOWR_F); 2891 t4_read_reg(adap, EPIO_REG(OP)); /* flush */ 2892 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F) 2893 return -ETIMEDOUT; 2894 2895 /* write CRC */ 2896 t4_write_reg(adap, EPIO_REG(DATA0), crc); 2897 t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i + 32) | EPIOWR_F); 2898 t4_read_reg(adap, EPIO_REG(OP)); /* flush */ 2899 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F) 2900 return -ETIMEDOUT; 2901 } 2902 #undef EPIO_REG 2903 2904 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2_A), 0, PATEN_F); 2905 return 0; 2906 } 2907 2908 /* t4_mk_filtdelwr - create a delete filter WR 2909 * @ftid: the filter ID 2910 * @wr: the filter work request to populate 2911 * @qid: ingress queue to receive the delete notification 2912 * 2913 * Creates a filter work request to delete the supplied filter. If @qid is 2914 * negative the delete notification is suppressed. 2915 */ 2916 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid) 2917 { 2918 memset(wr, 0, sizeof(*wr)); 2919 wr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR)); 2920 wr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*wr) / 16)); 2921 wr->tid_to_iq = htonl(FW_FILTER_WR_TID_V(ftid) | 2922 FW_FILTER_WR_NOREPLY_V(qid < 0)); 2923 wr->del_filter_to_l2tix = htonl(FW_FILTER_WR_DEL_FILTER_F); 2924 if (qid >= 0) 2925 wr->rx_chan_rx_rpl_iq = htons(FW_FILTER_WR_RX_RPL_IQ_V(qid)); 2926 } 2927 2928 #define INIT_CMD(var, cmd, rd_wr) do { \ 2929 (var).op_to_write = htonl(FW_CMD_OP_V(FW_##cmd##_CMD) | \ 2930 FW_CMD_REQUEST_F | FW_CMD_##rd_wr##_F); \ 2931 (var).retval_len16 = htonl(FW_LEN16(var)); \ 2932 } while (0) 2933 2934 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, 2935 u32 addr, u32 val) 2936 { 2937 struct fw_ldst_cmd c; 2938 2939 memset(&c, 0, sizeof(c)); 2940 c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F | 2941 FW_CMD_WRITE_F | 2942 FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE)); 2943 c.cycles_to_len16 = htonl(FW_LEN16(c)); 2944 c.u.addrval.addr = htonl(addr); 2945 c.u.addrval.val = htonl(val); 2946 2947 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2948 } 2949 2950 /** 2951 * t4_mdio_rd - read a PHY register through MDIO 2952 * @adap: the adapter 2953 * @mbox: mailbox to use for the FW command 2954 * @phy_addr: the PHY address 2955 * @mmd: the PHY MMD to access (0 for clause 22 PHYs) 2956 * @reg: the register to read 2957 * @valp: where to store the value 2958 * 2959 * Issues a FW command through the given mailbox to read a PHY register. 2960 */ 2961 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 2962 unsigned int mmd, unsigned int reg, u16 *valp) 2963 { 2964 int ret; 2965 struct fw_ldst_cmd c; 2966 2967 memset(&c, 0, sizeof(c)); 2968 c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F | 2969 FW_CMD_READ_F | FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO)); 2970 c.cycles_to_len16 = htonl(FW_LEN16(c)); 2971 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR_V(phy_addr) | 2972 FW_LDST_CMD_MMD_V(mmd)); 2973 c.u.mdio.raddr = htons(reg); 2974 2975 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 2976 if (ret == 0) 2977 *valp = ntohs(c.u.mdio.rval); 2978 return ret; 2979 } 2980 2981 /** 2982 * t4_mdio_wr - write a PHY register through MDIO 2983 * @adap: the adapter 2984 * @mbox: mailbox to use for the FW command 2985 * @phy_addr: the PHY address 2986 * @mmd: the PHY MMD to access (0 for clause 22 PHYs) 2987 * @reg: the register to write 2988 * @valp: value to write 2989 * 2990 * Issues a FW command through the given mailbox to write a PHY register. 2991 */ 2992 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 2993 unsigned int mmd, unsigned int reg, u16 val) 2994 { 2995 struct fw_ldst_cmd c; 2996 2997 memset(&c, 0, sizeof(c)); 2998 c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F | 2999 FW_CMD_WRITE_F | FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO)); 3000 c.cycles_to_len16 = htonl(FW_LEN16(c)); 3001 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR_V(phy_addr) | 3002 FW_LDST_CMD_MMD_V(mmd)); 3003 c.u.mdio.raddr = htons(reg); 3004 c.u.mdio.rval = htons(val); 3005 3006 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3007 } 3008 3009 /** 3010 * t4_sge_decode_idma_state - decode the idma state 3011 * @adap: the adapter 3012 * @state: the state idma is stuck in 3013 */ 3014 void t4_sge_decode_idma_state(struct adapter *adapter, int state) 3015 { 3016 static const char * const t4_decode[] = { 3017 "IDMA_IDLE", 3018 "IDMA_PUSH_MORE_CPL_FIFO", 3019 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", 3020 "Not used", 3021 "IDMA_PHYSADDR_SEND_PCIEHDR", 3022 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", 3023 "IDMA_PHYSADDR_SEND_PAYLOAD", 3024 "IDMA_SEND_FIFO_TO_IMSG", 3025 "IDMA_FL_REQ_DATA_FL_PREP", 3026 "IDMA_FL_REQ_DATA_FL", 3027 "IDMA_FL_DROP", 3028 "IDMA_FL_H_REQ_HEADER_FL", 3029 "IDMA_FL_H_SEND_PCIEHDR", 3030 "IDMA_FL_H_PUSH_CPL_FIFO", 3031 "IDMA_FL_H_SEND_CPL", 3032 "IDMA_FL_H_SEND_IP_HDR_FIRST", 3033 "IDMA_FL_H_SEND_IP_HDR", 3034 "IDMA_FL_H_REQ_NEXT_HEADER_FL", 3035 "IDMA_FL_H_SEND_NEXT_PCIEHDR", 3036 "IDMA_FL_H_SEND_IP_HDR_PADDING", 3037 "IDMA_FL_D_SEND_PCIEHDR", 3038 "IDMA_FL_D_SEND_CPL_AND_IP_HDR", 3039 "IDMA_FL_D_REQ_NEXT_DATA_FL", 3040 "IDMA_FL_SEND_PCIEHDR", 3041 "IDMA_FL_PUSH_CPL_FIFO", 3042 "IDMA_FL_SEND_CPL", 3043 "IDMA_FL_SEND_PAYLOAD_FIRST", 3044 "IDMA_FL_SEND_PAYLOAD", 3045 "IDMA_FL_REQ_NEXT_DATA_FL", 3046 "IDMA_FL_SEND_NEXT_PCIEHDR", 3047 "IDMA_FL_SEND_PADDING", 3048 "IDMA_FL_SEND_COMPLETION_TO_IMSG", 3049 "IDMA_FL_SEND_FIFO_TO_IMSG", 3050 "IDMA_FL_REQ_DATAFL_DONE", 3051 "IDMA_FL_REQ_HEADERFL_DONE", 3052 }; 3053 static const char * const t5_decode[] = { 3054 "IDMA_IDLE", 3055 "IDMA_ALMOST_IDLE", 3056 "IDMA_PUSH_MORE_CPL_FIFO", 3057 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", 3058 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR", 3059 "IDMA_PHYSADDR_SEND_PCIEHDR", 3060 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", 3061 "IDMA_PHYSADDR_SEND_PAYLOAD", 3062 "IDMA_SEND_FIFO_TO_IMSG", 3063 "IDMA_FL_REQ_DATA_FL", 3064 "IDMA_FL_DROP", 3065 "IDMA_FL_DROP_SEND_INC", 3066 "IDMA_FL_H_REQ_HEADER_FL", 3067 "IDMA_FL_H_SEND_PCIEHDR", 3068 "IDMA_FL_H_PUSH_CPL_FIFO", 3069 "IDMA_FL_H_SEND_CPL", 3070 "IDMA_FL_H_SEND_IP_HDR_FIRST", 3071 "IDMA_FL_H_SEND_IP_HDR", 3072 "IDMA_FL_H_REQ_NEXT_HEADER_FL", 3073 "IDMA_FL_H_SEND_NEXT_PCIEHDR", 3074 "IDMA_FL_H_SEND_IP_HDR_PADDING", 3075 "IDMA_FL_D_SEND_PCIEHDR", 3076 "IDMA_FL_D_SEND_CPL_AND_IP_HDR", 3077 "IDMA_FL_D_REQ_NEXT_DATA_FL", 3078 "IDMA_FL_SEND_PCIEHDR", 3079 "IDMA_FL_PUSH_CPL_FIFO", 3080 "IDMA_FL_SEND_CPL", 3081 "IDMA_FL_SEND_PAYLOAD_FIRST", 3082 "IDMA_FL_SEND_PAYLOAD", 3083 "IDMA_FL_REQ_NEXT_DATA_FL", 3084 "IDMA_FL_SEND_NEXT_PCIEHDR", 3085 "IDMA_FL_SEND_PADDING", 3086 "IDMA_FL_SEND_COMPLETION_TO_IMSG", 3087 }; 3088 static const u32 sge_regs[] = { 3089 SGE_DEBUG_DATA_LOW_INDEX_2_A, 3090 SGE_DEBUG_DATA_LOW_INDEX_3_A, 3091 SGE_DEBUG_DATA_HIGH_INDEX_10_A, 3092 }; 3093 const char **sge_idma_decode; 3094 int sge_idma_decode_nstates; 3095 int i; 3096 3097 if (is_t4(adapter->params.chip)) { 3098 sge_idma_decode = (const char **)t4_decode; 3099 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode); 3100 } else { 3101 sge_idma_decode = (const char **)t5_decode; 3102 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode); 3103 } 3104 3105 if (state < sge_idma_decode_nstates) 3106 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]); 3107 else 3108 CH_WARN(adapter, "idma state %d unknown\n", state); 3109 3110 for (i = 0; i < ARRAY_SIZE(sge_regs); i++) 3111 CH_WARN(adapter, "SGE register %#x value %#x\n", 3112 sge_regs[i], t4_read_reg(adapter, sge_regs[i])); 3113 } 3114 3115 /** 3116 * t4_fw_hello - establish communication with FW 3117 * @adap: the adapter 3118 * @mbox: mailbox to use for the FW command 3119 * @evt_mbox: mailbox to receive async FW events 3120 * @master: specifies the caller's willingness to be the device master 3121 * @state: returns the current device state (if non-NULL) 3122 * 3123 * Issues a command to establish communication with FW. Returns either 3124 * an error (negative integer) or the mailbox of the Master PF. 3125 */ 3126 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, 3127 enum dev_master master, enum dev_state *state) 3128 { 3129 int ret; 3130 struct fw_hello_cmd c; 3131 u32 v; 3132 unsigned int master_mbox; 3133 int retries = FW_CMD_HELLO_RETRIES; 3134 3135 retry: 3136 memset(&c, 0, sizeof(c)); 3137 INIT_CMD(c, HELLO, WRITE); 3138 c.err_to_clearinit = htonl( 3139 FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) | 3140 FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) | 3141 FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ? mbox : 3142 FW_HELLO_CMD_MBMASTER_M) | 3143 FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) | 3144 FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) | 3145 FW_HELLO_CMD_CLEARINIT_F); 3146 3147 /* 3148 * Issue the HELLO command to the firmware. If it's not successful 3149 * but indicates that we got a "busy" or "timeout" condition, retry 3150 * the HELLO until we exhaust our retry limit. If we do exceed our 3151 * retry limit, check to see if the firmware left us any error 3152 * information and report that if so. 3153 */ 3154 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 3155 if (ret < 0) { 3156 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) 3157 goto retry; 3158 if (t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_ERR_F) 3159 t4_report_fw_error(adap); 3160 return ret; 3161 } 3162 3163 v = ntohl(c.err_to_clearinit); 3164 master_mbox = FW_HELLO_CMD_MBMASTER_G(v); 3165 if (state) { 3166 if (v & FW_HELLO_CMD_ERR_F) 3167 *state = DEV_STATE_ERR; 3168 else if (v & FW_HELLO_CMD_INIT_F) 3169 *state = DEV_STATE_INIT; 3170 else 3171 *state = DEV_STATE_UNINIT; 3172 } 3173 3174 /* 3175 * If we're not the Master PF then we need to wait around for the 3176 * Master PF Driver to finish setting up the adapter. 3177 * 3178 * Note that we also do this wait if we're a non-Master-capable PF and 3179 * there is no current Master PF; a Master PF may show up momentarily 3180 * and we wouldn't want to fail pointlessly. (This can happen when an 3181 * OS loads lots of different drivers rapidly at the same time). In 3182 * this case, the Master PF returned by the firmware will be 3183 * PCIE_FW_MASTER_M so the test below will work ... 3184 */ 3185 if ((v & (FW_HELLO_CMD_ERR_F|FW_HELLO_CMD_INIT_F)) == 0 && 3186 master_mbox != mbox) { 3187 int waiting = FW_CMD_HELLO_TIMEOUT; 3188 3189 /* 3190 * Wait for the firmware to either indicate an error or 3191 * initialized state. If we see either of these we bail out 3192 * and report the issue to the caller. If we exhaust the 3193 * "hello timeout" and we haven't exhausted our retries, try 3194 * again. Otherwise bail with a timeout error. 3195 */ 3196 for (;;) { 3197 u32 pcie_fw; 3198 3199 msleep(50); 3200 waiting -= 50; 3201 3202 /* 3203 * If neither Error nor Initialialized are indicated 3204 * by the firmware keep waiting till we exaust our 3205 * timeout ... and then retry if we haven't exhausted 3206 * our retries ... 3207 */ 3208 pcie_fw = t4_read_reg(adap, PCIE_FW_A); 3209 if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) { 3210 if (waiting <= 0) { 3211 if (retries-- > 0) 3212 goto retry; 3213 3214 return -ETIMEDOUT; 3215 } 3216 continue; 3217 } 3218 3219 /* 3220 * We either have an Error or Initialized condition 3221 * report errors preferentially. 3222 */ 3223 if (state) { 3224 if (pcie_fw & PCIE_FW_ERR_F) 3225 *state = DEV_STATE_ERR; 3226 else if (pcie_fw & PCIE_FW_INIT_F) 3227 *state = DEV_STATE_INIT; 3228 } 3229 3230 /* 3231 * If we arrived before a Master PF was selected and 3232 * there's not a valid Master PF, grab its identity 3233 * for our caller. 3234 */ 3235 if (master_mbox == PCIE_FW_MASTER_M && 3236 (pcie_fw & PCIE_FW_MASTER_VLD_F)) 3237 master_mbox = PCIE_FW_MASTER_G(pcie_fw); 3238 break; 3239 } 3240 } 3241 3242 return master_mbox; 3243 } 3244 3245 /** 3246 * t4_fw_bye - end communication with FW 3247 * @adap: the adapter 3248 * @mbox: mailbox to use for the FW command 3249 * 3250 * Issues a command to terminate communication with FW. 3251 */ 3252 int t4_fw_bye(struct adapter *adap, unsigned int mbox) 3253 { 3254 struct fw_bye_cmd c; 3255 3256 memset(&c, 0, sizeof(c)); 3257 INIT_CMD(c, BYE, WRITE); 3258 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3259 } 3260 3261 /** 3262 * t4_init_cmd - ask FW to initialize the device 3263 * @adap: the adapter 3264 * @mbox: mailbox to use for the FW command 3265 * 3266 * Issues a command to FW to partially initialize the device. This 3267 * performs initialization that generally doesn't depend on user input. 3268 */ 3269 int t4_early_init(struct adapter *adap, unsigned int mbox) 3270 { 3271 struct fw_initialize_cmd c; 3272 3273 memset(&c, 0, sizeof(c)); 3274 INIT_CMD(c, INITIALIZE, WRITE); 3275 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3276 } 3277 3278 /** 3279 * t4_fw_reset - issue a reset to FW 3280 * @adap: the adapter 3281 * @mbox: mailbox to use for the FW command 3282 * @reset: specifies the type of reset to perform 3283 * 3284 * Issues a reset command of the specified type to FW. 3285 */ 3286 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) 3287 { 3288 struct fw_reset_cmd c; 3289 3290 memset(&c, 0, sizeof(c)); 3291 INIT_CMD(c, RESET, WRITE); 3292 c.val = htonl(reset); 3293 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3294 } 3295 3296 /** 3297 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET 3298 * @adap: the adapter 3299 * @mbox: mailbox to use for the FW RESET command (if desired) 3300 * @force: force uP into RESET even if FW RESET command fails 3301 * 3302 * Issues a RESET command to firmware (if desired) with a HALT indication 3303 * and then puts the microprocessor into RESET state. The RESET command 3304 * will only be issued if a legitimate mailbox is provided (mbox <= 3305 * PCIE_FW_MASTER_M). 3306 * 3307 * This is generally used in order for the host to safely manipulate the 3308 * adapter without fear of conflicting with whatever the firmware might 3309 * be doing. The only way out of this state is to RESTART the firmware 3310 * ... 3311 */ 3312 static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force) 3313 { 3314 int ret = 0; 3315 3316 /* 3317 * If a legitimate mailbox is provided, issue a RESET command 3318 * with a HALT indication. 3319 */ 3320 if (mbox <= PCIE_FW_MASTER_M) { 3321 struct fw_reset_cmd c; 3322 3323 memset(&c, 0, sizeof(c)); 3324 INIT_CMD(c, RESET, WRITE); 3325 c.val = htonl(PIORST_F | PIORSTMODE_F); 3326 c.halt_pkd = htonl(FW_RESET_CMD_HALT_F); 3327 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3328 } 3329 3330 /* 3331 * Normally we won't complete the operation if the firmware RESET 3332 * command fails but if our caller insists we'll go ahead and put the 3333 * uP into RESET. This can be useful if the firmware is hung or even 3334 * missing ... We'll have to take the risk of putting the uP into 3335 * RESET without the cooperation of firmware in that case. 3336 * 3337 * We also force the firmware's HALT flag to be on in case we bypassed 3338 * the firmware RESET command above or we're dealing with old firmware 3339 * which doesn't have the HALT capability. This will serve as a flag 3340 * for the incoming firmware to know that it's coming out of a HALT 3341 * rather than a RESET ... if it's new enough to understand that ... 3342 */ 3343 if (ret == 0 || force) { 3344 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F); 3345 t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F, 3346 PCIE_FW_HALT_F); 3347 } 3348 3349 /* 3350 * And we always return the result of the firmware RESET command 3351 * even when we force the uP into RESET ... 3352 */ 3353 return ret; 3354 } 3355 3356 /** 3357 * t4_fw_restart - restart the firmware by taking the uP out of RESET 3358 * @adap: the adapter 3359 * @reset: if we want to do a RESET to restart things 3360 * 3361 * Restart firmware previously halted by t4_fw_halt(). On successful 3362 * return the previous PF Master remains as the new PF Master and there 3363 * is no need to issue a new HELLO command, etc. 3364 * 3365 * We do this in two ways: 3366 * 3367 * 1. If we're dealing with newer firmware we'll simply want to take 3368 * the chip's microprocessor out of RESET. This will cause the 3369 * firmware to start up from its start vector. And then we'll loop 3370 * until the firmware indicates it's started again (PCIE_FW.HALT 3371 * reset to 0) or we timeout. 3372 * 3373 * 2. If we're dealing with older firmware then we'll need to RESET 3374 * the chip since older firmware won't recognize the PCIE_FW.HALT 3375 * flag and automatically RESET itself on startup. 3376 */ 3377 static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset) 3378 { 3379 if (reset) { 3380 /* 3381 * Since we're directing the RESET instead of the firmware 3382 * doing it automatically, we need to clear the PCIE_FW.HALT 3383 * bit. 3384 */ 3385 t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F, 0); 3386 3387 /* 3388 * If we've been given a valid mailbox, first try to get the 3389 * firmware to do the RESET. If that works, great and we can 3390 * return success. Otherwise, if we haven't been given a 3391 * valid mailbox or the RESET command failed, fall back to 3392 * hitting the chip with a hammer. 3393 */ 3394 if (mbox <= PCIE_FW_MASTER_M) { 3395 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0); 3396 msleep(100); 3397 if (t4_fw_reset(adap, mbox, 3398 PIORST_F | PIORSTMODE_F) == 0) 3399 return 0; 3400 } 3401 3402 t4_write_reg(adap, PL_RST_A, PIORST_F | PIORSTMODE_F); 3403 msleep(2000); 3404 } else { 3405 int ms; 3406 3407 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0); 3408 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { 3409 if (!(t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_HALT_F)) 3410 return 0; 3411 msleep(100); 3412 ms += 100; 3413 } 3414 return -ETIMEDOUT; 3415 } 3416 return 0; 3417 } 3418 3419 /** 3420 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW 3421 * @adap: the adapter 3422 * @mbox: mailbox to use for the FW RESET command (if desired) 3423 * @fw_data: the firmware image to write 3424 * @size: image size 3425 * @force: force upgrade even if firmware doesn't cooperate 3426 * 3427 * Perform all of the steps necessary for upgrading an adapter's 3428 * firmware image. Normally this requires the cooperation of the 3429 * existing firmware in order to halt all existing activities 3430 * but if an invalid mailbox token is passed in we skip that step 3431 * (though we'll still put the adapter microprocessor into RESET in 3432 * that case). 3433 * 3434 * On successful return the new firmware will have been loaded and 3435 * the adapter will have been fully RESET losing all previous setup 3436 * state. On unsuccessful return the adapter may be completely hosed ... 3437 * positive errno indicates that the adapter is ~probably~ intact, a 3438 * negative errno indicates that things are looking bad ... 3439 */ 3440 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, 3441 const u8 *fw_data, unsigned int size, int force) 3442 { 3443 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; 3444 int reset, ret; 3445 3446 if (!t4_fw_matches_chip(adap, fw_hdr)) 3447 return -EINVAL; 3448 3449 ret = t4_fw_halt(adap, mbox, force); 3450 if (ret < 0 && !force) 3451 return ret; 3452 3453 ret = t4_load_fw(adap, fw_data, size); 3454 if (ret < 0) 3455 return ret; 3456 3457 /* 3458 * Older versions of the firmware don't understand the new 3459 * PCIE_FW.HALT flag and so won't know to perform a RESET when they 3460 * restart. So for newly loaded older firmware we'll have to do the 3461 * RESET for it so it starts up on a clean slate. We can tell if 3462 * the newly loaded firmware will handle this right by checking 3463 * its header flags to see if it advertises the capability. 3464 */ 3465 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0); 3466 return t4_fw_restart(adap, mbox, reset); 3467 } 3468 3469 /** 3470 * t4_fixup_host_params - fix up host-dependent parameters 3471 * @adap: the adapter 3472 * @page_size: the host's Base Page Size 3473 * @cache_line_size: the host's Cache Line Size 3474 * 3475 * Various registers in T4 contain values which are dependent on the 3476 * host's Base Page and Cache Line Sizes. This function will fix all of 3477 * those registers with the appropriate values as passed in ... 3478 */ 3479 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, 3480 unsigned int cache_line_size) 3481 { 3482 unsigned int page_shift = fls(page_size) - 1; 3483 unsigned int sge_hps = page_shift - 10; 3484 unsigned int stat_len = cache_line_size > 64 ? 128 : 64; 3485 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size; 3486 unsigned int fl_align_log = fls(fl_align) - 1; 3487 3488 t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A, 3489 HOSTPAGESIZEPF0_V(sge_hps) | 3490 HOSTPAGESIZEPF1_V(sge_hps) | 3491 HOSTPAGESIZEPF2_V(sge_hps) | 3492 HOSTPAGESIZEPF3_V(sge_hps) | 3493 HOSTPAGESIZEPF4_V(sge_hps) | 3494 HOSTPAGESIZEPF5_V(sge_hps) | 3495 HOSTPAGESIZEPF6_V(sge_hps) | 3496 HOSTPAGESIZEPF7_V(sge_hps)); 3497 3498 if (is_t4(adap->params.chip)) { 3499 t4_set_reg_field(adap, SGE_CONTROL_A, 3500 INGPADBOUNDARY_V(INGPADBOUNDARY_M) | 3501 EGRSTATUSPAGESIZE_F, 3502 INGPADBOUNDARY_V(fl_align_log - 3503 INGPADBOUNDARY_SHIFT_X) | 3504 EGRSTATUSPAGESIZE_V(stat_len != 64)); 3505 } else { 3506 /* T5 introduced the separation of the Free List Padding and 3507 * Packing Boundaries. Thus, we can select a smaller Padding 3508 * Boundary to avoid uselessly chewing up PCIe Link and Memory 3509 * Bandwidth, and use a Packing Boundary which is large enough 3510 * to avoid false sharing between CPUs, etc. 3511 * 3512 * For the PCI Link, the smaller the Padding Boundary the 3513 * better. For the Memory Controller, a smaller Padding 3514 * Boundary is better until we cross under the Memory Line 3515 * Size (the minimum unit of transfer to/from Memory). If we 3516 * have a Padding Boundary which is smaller than the Memory 3517 * Line Size, that'll involve a Read-Modify-Write cycle on the 3518 * Memory Controller which is never good. For T5 the smallest 3519 * Padding Boundary which we can select is 32 bytes which is 3520 * larger than any known Memory Controller Line Size so we'll 3521 * use that. 3522 * 3523 * T5 has a different interpretation of the "0" value for the 3524 * Packing Boundary. This corresponds to 16 bytes instead of 3525 * the expected 32 bytes. We never have a Packing Boundary 3526 * less than 32 bytes so we can't use that special value but 3527 * on the other hand, if we wanted 32 bytes, the best we can 3528 * really do is 64 bytes. 3529 */ 3530 if (fl_align <= 32) { 3531 fl_align = 64; 3532 fl_align_log = 6; 3533 } 3534 t4_set_reg_field(adap, SGE_CONTROL_A, 3535 INGPADBOUNDARY_V(INGPADBOUNDARY_M) | 3536 EGRSTATUSPAGESIZE_F, 3537 INGPADBOUNDARY_V(INGPCIEBOUNDARY_32B_X) | 3538 EGRSTATUSPAGESIZE_V(stat_len != 64)); 3539 t4_set_reg_field(adap, SGE_CONTROL2_A, 3540 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M), 3541 INGPACKBOUNDARY_V(fl_align_log - 3542 INGPACKBOUNDARY_SHIFT_X)); 3543 } 3544 /* 3545 * Adjust various SGE Free List Host Buffer Sizes. 3546 * 3547 * This is something of a crock since we're using fixed indices into 3548 * the array which are also known by the sge.c code and the T4 3549 * Firmware Configuration File. We need to come up with a much better 3550 * approach to managing this array. For now, the first four entries 3551 * are: 3552 * 3553 * 0: Host Page Size 3554 * 1: 64KB 3555 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode) 3556 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode) 3557 * 3558 * For the single-MTU buffers in unpacked mode we need to include 3559 * space for the SGE Control Packet Shift, 14 byte Ethernet header, 3560 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet 3561 * Padding boundry. All of these are accommodated in the Factory 3562 * Default Firmware Configuration File but we need to adjust it for 3563 * this host's cache line size. 3564 */ 3565 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A, page_size); 3566 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2_A, 3567 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2_A) + fl_align-1) 3568 & ~(fl_align-1)); 3569 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3_A, 3570 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3_A) + fl_align-1) 3571 & ~(fl_align-1)); 3572 3573 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(page_shift - 12)); 3574 3575 return 0; 3576 } 3577 3578 /** 3579 * t4_fw_initialize - ask FW to initialize the device 3580 * @adap: the adapter 3581 * @mbox: mailbox to use for the FW command 3582 * 3583 * Issues a command to FW to partially initialize the device. This 3584 * performs initialization that generally doesn't depend on user input. 3585 */ 3586 int t4_fw_initialize(struct adapter *adap, unsigned int mbox) 3587 { 3588 struct fw_initialize_cmd c; 3589 3590 memset(&c, 0, sizeof(c)); 3591 INIT_CMD(c, INITIALIZE, WRITE); 3592 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3593 } 3594 3595 /** 3596 * t4_query_params - query FW or device parameters 3597 * @adap: the adapter 3598 * @mbox: mailbox to use for the FW command 3599 * @pf: the PF 3600 * @vf: the VF 3601 * @nparams: the number of parameters 3602 * @params: the parameter names 3603 * @val: the parameter values 3604 * 3605 * Reads the value of FW or device parameters. Up to 7 parameters can be 3606 * queried at once. 3607 */ 3608 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 3609 unsigned int vf, unsigned int nparams, const u32 *params, 3610 u32 *val) 3611 { 3612 int i, ret; 3613 struct fw_params_cmd c; 3614 __be32 *p = &c.param[0].mnem; 3615 3616 if (nparams > 7) 3617 return -EINVAL; 3618 3619 memset(&c, 0, sizeof(c)); 3620 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F | 3621 FW_CMD_READ_F | FW_PARAMS_CMD_PFN_V(pf) | 3622 FW_PARAMS_CMD_VFN_V(vf)); 3623 c.retval_len16 = htonl(FW_LEN16(c)); 3624 for (i = 0; i < nparams; i++, p += 2) 3625 *p = htonl(*params++); 3626 3627 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 3628 if (ret == 0) 3629 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2) 3630 *val++ = ntohl(*p); 3631 return ret; 3632 } 3633 3634 /** 3635 * t4_set_params_nosleep - sets FW or device parameters 3636 * @adap: the adapter 3637 * @mbox: mailbox to use for the FW command 3638 * @pf: the PF 3639 * @vf: the VF 3640 * @nparams: the number of parameters 3641 * @params: the parameter names 3642 * @val: the parameter values 3643 * 3644 * Does not ever sleep 3645 * Sets the value of FW or device parameters. Up to 7 parameters can be 3646 * specified at once. 3647 */ 3648 int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox, 3649 unsigned int pf, unsigned int vf, 3650 unsigned int nparams, const u32 *params, 3651 const u32 *val) 3652 { 3653 struct fw_params_cmd c; 3654 __be32 *p = &c.param[0].mnem; 3655 3656 if (nparams > 7) 3657 return -EINVAL; 3658 3659 memset(&c, 0, sizeof(c)); 3660 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) | 3661 FW_CMD_REQUEST_F | FW_CMD_WRITE_F | 3662 FW_PARAMS_CMD_PFN_V(pf) | 3663 FW_PARAMS_CMD_VFN_V(vf)); 3664 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 3665 3666 while (nparams--) { 3667 *p++ = cpu_to_be32(*params++); 3668 *p++ = cpu_to_be32(*val++); 3669 } 3670 3671 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL); 3672 } 3673 3674 /** 3675 * t4_set_params - sets FW or device parameters 3676 * @adap: the adapter 3677 * @mbox: mailbox to use for the FW command 3678 * @pf: the PF 3679 * @vf: the VF 3680 * @nparams: the number of parameters 3681 * @params: the parameter names 3682 * @val: the parameter values 3683 * 3684 * Sets the value of FW or device parameters. Up to 7 parameters can be 3685 * specified at once. 3686 */ 3687 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 3688 unsigned int vf, unsigned int nparams, const u32 *params, 3689 const u32 *val) 3690 { 3691 struct fw_params_cmd c; 3692 __be32 *p = &c.param[0].mnem; 3693 3694 if (nparams > 7) 3695 return -EINVAL; 3696 3697 memset(&c, 0, sizeof(c)); 3698 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F | 3699 FW_CMD_WRITE_F | FW_PARAMS_CMD_PFN_V(pf) | 3700 FW_PARAMS_CMD_VFN_V(vf)); 3701 c.retval_len16 = htonl(FW_LEN16(c)); 3702 while (nparams--) { 3703 *p++ = htonl(*params++); 3704 *p++ = htonl(*val++); 3705 } 3706 3707 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3708 } 3709 3710 /** 3711 * t4_cfg_pfvf - configure PF/VF resource limits 3712 * @adap: the adapter 3713 * @mbox: mailbox to use for the FW command 3714 * @pf: the PF being configured 3715 * @vf: the VF being configured 3716 * @txq: the max number of egress queues 3717 * @txq_eth_ctrl: the max number of egress Ethernet or control queues 3718 * @rxqi: the max number of interrupt-capable ingress queues 3719 * @rxq: the max number of interruptless ingress queues 3720 * @tc: the PCI traffic class 3721 * @vi: the max number of virtual interfaces 3722 * @cmask: the channel access rights mask for the PF/VF 3723 * @pmask: the port access rights mask for the PF/VF 3724 * @nexact: the maximum number of exact MPS filters 3725 * @rcaps: read capabilities 3726 * @wxcaps: write/execute capabilities 3727 * 3728 * Configures resource limits and capabilities for a physical or virtual 3729 * function. 3730 */ 3731 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, 3732 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, 3733 unsigned int rxqi, unsigned int rxq, unsigned int tc, 3734 unsigned int vi, unsigned int cmask, unsigned int pmask, 3735 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps) 3736 { 3737 struct fw_pfvf_cmd c; 3738 3739 memset(&c, 0, sizeof(c)); 3740 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F | 3741 FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) | 3742 FW_PFVF_CMD_VFN_V(vf)); 3743 c.retval_len16 = htonl(FW_LEN16(c)); 3744 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT_V(rxqi) | 3745 FW_PFVF_CMD_NIQ_V(rxq)); 3746 c.type_to_neq = htonl(FW_PFVF_CMD_CMASK_V(cmask) | 3747 FW_PFVF_CMD_PMASK_V(pmask) | 3748 FW_PFVF_CMD_NEQ_V(txq)); 3749 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC_V(tc) | FW_PFVF_CMD_NVI_V(vi) | 3750 FW_PFVF_CMD_NEXACTF_V(nexact)); 3751 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS_V(rcaps) | 3752 FW_PFVF_CMD_WX_CAPS_V(wxcaps) | 3753 FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl)); 3754 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3755 } 3756 3757 /** 3758 * t4_alloc_vi - allocate a virtual interface 3759 * @adap: the adapter 3760 * @mbox: mailbox to use for the FW command 3761 * @port: physical port associated with the VI 3762 * @pf: the PF owning the VI 3763 * @vf: the VF owning the VI 3764 * @nmac: number of MAC addresses needed (1 to 5) 3765 * @mac: the MAC addresses of the VI 3766 * @rss_size: size of RSS table slice associated with this VI 3767 * 3768 * Allocates a virtual interface for the given physical port. If @mac is 3769 * not %NULL it contains the MAC addresses of the VI as assigned by FW. 3770 * @mac should be large enough to hold @nmac Ethernet addresses, they are 3771 * stored consecutively so the space needed is @nmac * 6 bytes. 3772 * Returns a negative error number or the non-negative VI id. 3773 */ 3774 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, 3775 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, 3776 unsigned int *rss_size) 3777 { 3778 int ret; 3779 struct fw_vi_cmd c; 3780 3781 memset(&c, 0, sizeof(c)); 3782 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F | 3783 FW_CMD_WRITE_F | FW_CMD_EXEC_F | 3784 FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf)); 3785 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC_F | FW_LEN16(c)); 3786 c.portid_pkd = FW_VI_CMD_PORTID_V(port); 3787 c.nmac = nmac - 1; 3788 3789 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 3790 if (ret) 3791 return ret; 3792 3793 if (mac) { 3794 memcpy(mac, c.mac, sizeof(c.mac)); 3795 switch (nmac) { 3796 case 5: 3797 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); 3798 case 4: 3799 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); 3800 case 3: 3801 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); 3802 case 2: 3803 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); 3804 } 3805 } 3806 if (rss_size) 3807 *rss_size = FW_VI_CMD_RSSSIZE_G(ntohs(c.rsssize_pkd)); 3808 return FW_VI_CMD_VIID_G(ntohs(c.type_viid)); 3809 } 3810 3811 /** 3812 * t4_set_rxmode - set Rx properties of a virtual interface 3813 * @adap: the adapter 3814 * @mbox: mailbox to use for the FW command 3815 * @viid: the VI id 3816 * @mtu: the new MTU or -1 3817 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change 3818 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change 3819 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change 3820 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change 3821 * @sleep_ok: if true we may sleep while awaiting command completion 3822 * 3823 * Sets Rx properties of a virtual interface. 3824 */ 3825 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, 3826 int mtu, int promisc, int all_multi, int bcast, int vlanex, 3827 bool sleep_ok) 3828 { 3829 struct fw_vi_rxmode_cmd c; 3830 3831 /* convert to FW values */ 3832 if (mtu < 0) 3833 mtu = FW_RXMODE_MTU_NO_CHG; 3834 if (promisc < 0) 3835 promisc = FW_VI_RXMODE_CMD_PROMISCEN_M; 3836 if (all_multi < 0) 3837 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M; 3838 if (bcast < 0) 3839 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M; 3840 if (vlanex < 0) 3841 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M; 3842 3843 memset(&c, 0, sizeof(c)); 3844 c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST_F | 3845 FW_CMD_WRITE_F | FW_VI_RXMODE_CMD_VIID_V(viid)); 3846 c.retval_len16 = htonl(FW_LEN16(c)); 3847 c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU_V(mtu) | 3848 FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) | 3849 FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) | 3850 FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) | 3851 FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex)); 3852 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 3853 } 3854 3855 /** 3856 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses 3857 * @adap: the adapter 3858 * @mbox: mailbox to use for the FW command 3859 * @viid: the VI id 3860 * @free: if true any existing filters for this VI id are first removed 3861 * @naddr: the number of MAC addresses to allocate filters for (up to 7) 3862 * @addr: the MAC address(es) 3863 * @idx: where to store the index of each allocated filter 3864 * @hash: pointer to hash address filter bitmap 3865 * @sleep_ok: call is allowed to sleep 3866 * 3867 * Allocates an exact-match filter for each of the supplied addresses and 3868 * sets it to the corresponding address. If @idx is not %NULL it should 3869 * have at least @naddr entries, each of which will be set to the index of 3870 * the filter allocated for the corresponding MAC address. If a filter 3871 * could not be allocated for an address its index is set to 0xffff. 3872 * If @hash is not %NULL addresses that fail to allocate an exact filter 3873 * are hashed and update the hash filter bitmap pointed at by @hash. 3874 * 3875 * Returns a negative error number or the number of filters allocated. 3876 */ 3877 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, 3878 unsigned int viid, bool free, unsigned int naddr, 3879 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok) 3880 { 3881 int i, ret; 3882 struct fw_vi_mac_cmd c; 3883 struct fw_vi_mac_exact *p; 3884 unsigned int max_naddr = is_t4(adap->params.chip) ? 3885 NUM_MPS_CLS_SRAM_L_INSTANCES : 3886 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 3887 3888 if (naddr > 7) 3889 return -EINVAL; 3890 3891 memset(&c, 0, sizeof(c)); 3892 c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F | 3893 FW_CMD_WRITE_F | (free ? FW_CMD_EXEC_F : 0) | 3894 FW_VI_MAC_CMD_VIID_V(viid)); 3895 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS_V(free) | 3896 FW_CMD_LEN16_V((naddr + 2) / 2)); 3897 3898 for (i = 0, p = c.u.exact; i < naddr; i++, p++) { 3899 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID_F | 3900 FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC)); 3901 memcpy(p->macaddr, addr[i], sizeof(p->macaddr)); 3902 } 3903 3904 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); 3905 if (ret) 3906 return ret; 3907 3908 for (i = 0, p = c.u.exact; i < naddr; i++, p++) { 3909 u16 index = FW_VI_MAC_CMD_IDX_G(ntohs(p->valid_to_idx)); 3910 3911 if (idx) 3912 idx[i] = index >= max_naddr ? 0xffff : index; 3913 if (index < max_naddr) 3914 ret++; 3915 else if (hash) 3916 *hash |= (1ULL << hash_mac_addr(addr[i])); 3917 } 3918 return ret; 3919 } 3920 3921 /** 3922 * t4_change_mac - modifies the exact-match filter for a MAC address 3923 * @adap: the adapter 3924 * @mbox: mailbox to use for the FW command 3925 * @viid: the VI id 3926 * @idx: index of existing filter for old value of MAC address, or -1 3927 * @addr: the new MAC address value 3928 * @persist: whether a new MAC allocation should be persistent 3929 * @add_smt: if true also add the address to the HW SMT 3930 * 3931 * Modifies an exact-match filter and sets it to the new MAC address. 3932 * Note that in general it is not possible to modify the value of a given 3933 * filter so the generic way to modify an address filter is to free the one 3934 * being used by the old address value and allocate a new filter for the 3935 * new address value. @idx can be -1 if the address is a new addition. 3936 * 3937 * Returns a negative error number or the index of the filter with the new 3938 * MAC value. 3939 */ 3940 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, 3941 int idx, const u8 *addr, bool persist, bool add_smt) 3942 { 3943 int ret, mode; 3944 struct fw_vi_mac_cmd c; 3945 struct fw_vi_mac_exact *p = c.u.exact; 3946 unsigned int max_mac_addr = is_t4(adap->params.chip) ? 3947 NUM_MPS_CLS_SRAM_L_INSTANCES : 3948 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 3949 3950 if (idx < 0) /* new allocation */ 3951 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; 3952 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; 3953 3954 memset(&c, 0, sizeof(c)); 3955 c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F | 3956 FW_CMD_WRITE_F | FW_VI_MAC_CMD_VIID_V(viid)); 3957 c.freemacs_to_len16 = htonl(FW_CMD_LEN16_V(1)); 3958 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID_F | 3959 FW_VI_MAC_CMD_SMAC_RESULT_V(mode) | 3960 FW_VI_MAC_CMD_IDX_V(idx)); 3961 memcpy(p->macaddr, addr, sizeof(p->macaddr)); 3962 3963 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 3964 if (ret == 0) { 3965 ret = FW_VI_MAC_CMD_IDX_G(ntohs(p->valid_to_idx)); 3966 if (ret >= max_mac_addr) 3967 ret = -ENOMEM; 3968 } 3969 return ret; 3970 } 3971 3972 /** 3973 * t4_set_addr_hash - program the MAC inexact-match hash filter 3974 * @adap: the adapter 3975 * @mbox: mailbox to use for the FW command 3976 * @viid: the VI id 3977 * @ucast: whether the hash filter should also match unicast addresses 3978 * @vec: the value to be written to the hash filter 3979 * @sleep_ok: call is allowed to sleep 3980 * 3981 * Sets the 64-bit inexact-match hash filter for a virtual interface. 3982 */ 3983 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, 3984 bool ucast, u64 vec, bool sleep_ok) 3985 { 3986 struct fw_vi_mac_cmd c; 3987 3988 memset(&c, 0, sizeof(c)); 3989 c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F | 3990 FW_CMD_WRITE_F | FW_VI_ENABLE_CMD_VIID_V(viid)); 3991 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN_F | 3992 FW_VI_MAC_CMD_HASHUNIEN_V(ucast) | 3993 FW_CMD_LEN16_V(1)); 3994 c.u.hash.hashvec = cpu_to_be64(vec); 3995 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 3996 } 3997 3998 /** 3999 * t4_enable_vi_params - enable/disable a virtual interface 4000 * @adap: the adapter 4001 * @mbox: mailbox to use for the FW command 4002 * @viid: the VI id 4003 * @rx_en: 1=enable Rx, 0=disable Rx 4004 * @tx_en: 1=enable Tx, 0=disable Tx 4005 * @dcb_en: 1=enable delivery of Data Center Bridging messages. 4006 * 4007 * Enables/disables a virtual interface. Note that setting DCB Enable 4008 * only makes sense when enabling a Virtual Interface ... 4009 */ 4010 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox, 4011 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en) 4012 { 4013 struct fw_vi_enable_cmd c; 4014 4015 memset(&c, 0, sizeof(c)); 4016 c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST_F | 4017 FW_CMD_EXEC_F | FW_VI_ENABLE_CMD_VIID_V(viid)); 4018 4019 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN_V(rx_en) | 4020 FW_VI_ENABLE_CMD_EEN_V(tx_en) | FW_LEN16(c) | 4021 FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en)); 4022 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL); 4023 } 4024 4025 /** 4026 * t4_enable_vi - enable/disable a virtual interface 4027 * @adap: the adapter 4028 * @mbox: mailbox to use for the FW command 4029 * @viid: the VI id 4030 * @rx_en: 1=enable Rx, 0=disable Rx 4031 * @tx_en: 1=enable Tx, 0=disable Tx 4032 * 4033 * Enables/disables a virtual interface. 4034 */ 4035 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, 4036 bool rx_en, bool tx_en) 4037 { 4038 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0); 4039 } 4040 4041 /** 4042 * t4_identify_port - identify a VI's port by blinking its LED 4043 * @adap: the adapter 4044 * @mbox: mailbox to use for the FW command 4045 * @viid: the VI id 4046 * @nblinks: how many times to blink LED at 2.5 Hz 4047 * 4048 * Identifies a VI's port by blinking its LED. 4049 */ 4050 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, 4051 unsigned int nblinks) 4052 { 4053 struct fw_vi_enable_cmd c; 4054 4055 memset(&c, 0, sizeof(c)); 4056 c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST_F | 4057 FW_CMD_EXEC_F | FW_VI_ENABLE_CMD_VIID_V(viid)); 4058 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c)); 4059 c.blinkdur = htons(nblinks); 4060 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4061 } 4062 4063 /** 4064 * t4_iq_free - free an ingress queue and its FLs 4065 * @adap: the adapter 4066 * @mbox: mailbox to use for the FW command 4067 * @pf: the PF owning the queues 4068 * @vf: the VF owning the queues 4069 * @iqtype: the ingress queue type 4070 * @iqid: ingress queue id 4071 * @fl0id: FL0 queue id or 0xffff if no attached FL0 4072 * @fl1id: FL1 queue id or 0xffff if no attached FL1 4073 * 4074 * Frees an ingress queue and its associated FLs, if any. 4075 */ 4076 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 4077 unsigned int vf, unsigned int iqtype, unsigned int iqid, 4078 unsigned int fl0id, unsigned int fl1id) 4079 { 4080 struct fw_iq_cmd c; 4081 4082 memset(&c, 0, sizeof(c)); 4083 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F | 4084 FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) | 4085 FW_IQ_CMD_VFN_V(vf)); 4086 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE_F | FW_LEN16(c)); 4087 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(iqtype)); 4088 c.iqid = htons(iqid); 4089 c.fl0id = htons(fl0id); 4090 c.fl1id = htons(fl1id); 4091 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4092 } 4093 4094 /** 4095 * t4_eth_eq_free - free an Ethernet egress queue 4096 * @adap: the adapter 4097 * @mbox: mailbox to use for the FW command 4098 * @pf: the PF owning the queue 4099 * @vf: the VF owning the queue 4100 * @eqid: egress queue id 4101 * 4102 * Frees an Ethernet egress queue. 4103 */ 4104 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 4105 unsigned int vf, unsigned int eqid) 4106 { 4107 struct fw_eq_eth_cmd c; 4108 4109 memset(&c, 0, sizeof(c)); 4110 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F | 4111 FW_CMD_EXEC_F | FW_EQ_ETH_CMD_PFN_V(pf) | 4112 FW_EQ_ETH_CMD_VFN_V(vf)); 4113 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c)); 4114 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID_V(eqid)); 4115 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4116 } 4117 4118 /** 4119 * t4_ctrl_eq_free - free a control egress queue 4120 * @adap: the adapter 4121 * @mbox: mailbox to use for the FW command 4122 * @pf: the PF owning the queue 4123 * @vf: the VF owning the queue 4124 * @eqid: egress queue id 4125 * 4126 * Frees a control egress queue. 4127 */ 4128 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 4129 unsigned int vf, unsigned int eqid) 4130 { 4131 struct fw_eq_ctrl_cmd c; 4132 4133 memset(&c, 0, sizeof(c)); 4134 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F | 4135 FW_CMD_EXEC_F | FW_EQ_CTRL_CMD_PFN_V(pf) | 4136 FW_EQ_CTRL_CMD_VFN_V(vf)); 4137 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c)); 4138 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID_V(eqid)); 4139 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4140 } 4141 4142 /** 4143 * t4_ofld_eq_free - free an offload egress queue 4144 * @adap: the adapter 4145 * @mbox: mailbox to use for the FW command 4146 * @pf: the PF owning the queue 4147 * @vf: the VF owning the queue 4148 * @eqid: egress queue id 4149 * 4150 * Frees a control egress queue. 4151 */ 4152 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 4153 unsigned int vf, unsigned int eqid) 4154 { 4155 struct fw_eq_ofld_cmd c; 4156 4157 memset(&c, 0, sizeof(c)); 4158 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F | 4159 FW_CMD_EXEC_F | FW_EQ_OFLD_CMD_PFN_V(pf) | 4160 FW_EQ_OFLD_CMD_VFN_V(vf)); 4161 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c)); 4162 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID_V(eqid)); 4163 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 4164 } 4165 4166 /** 4167 * t4_handle_fw_rpl - process a FW reply message 4168 * @adap: the adapter 4169 * @rpl: start of the FW message 4170 * 4171 * Processes a FW message, such as link state change messages. 4172 */ 4173 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) 4174 { 4175 u8 opcode = *(const u8 *)rpl; 4176 4177 if (opcode == FW_PORT_CMD) { /* link/module state change message */ 4178 int speed = 0, fc = 0; 4179 const struct fw_port_cmd *p = (void *)rpl; 4180 int chan = FW_PORT_CMD_PORTID_G(ntohl(p->op_to_portid)); 4181 int port = adap->chan_map[chan]; 4182 struct port_info *pi = adap2pinfo(adap, port); 4183 struct link_config *lc = &pi->link_cfg; 4184 u32 stat = ntohl(p->u.info.lstatus_to_modtype); 4185 int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0; 4186 u32 mod = FW_PORT_CMD_MODTYPE_G(stat); 4187 4188 if (stat & FW_PORT_CMD_RXPAUSE_F) 4189 fc |= PAUSE_RX; 4190 if (stat & FW_PORT_CMD_TXPAUSE_F) 4191 fc |= PAUSE_TX; 4192 if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) 4193 speed = 100; 4194 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) 4195 speed = 1000; 4196 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) 4197 speed = 10000; 4198 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) 4199 speed = 40000; 4200 4201 if (link_ok != lc->link_ok || speed != lc->speed || 4202 fc != lc->fc) { /* something changed */ 4203 lc->link_ok = link_ok; 4204 lc->speed = speed; 4205 lc->fc = fc; 4206 lc->supported = be16_to_cpu(p->u.info.pcap); 4207 t4_os_link_changed(adap, port, link_ok); 4208 } 4209 if (mod != pi->mod_type) { 4210 pi->mod_type = mod; 4211 t4_os_portmod_changed(adap, port); 4212 } 4213 } 4214 return 0; 4215 } 4216 4217 static void get_pci_mode(struct adapter *adapter, struct pci_params *p) 4218 { 4219 u16 val; 4220 4221 if (pci_is_pcie(adapter->pdev)) { 4222 pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val); 4223 p->speed = val & PCI_EXP_LNKSTA_CLS; 4224 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4; 4225 } 4226 } 4227 4228 /** 4229 * init_link_config - initialize a link's SW state 4230 * @lc: structure holding the link state 4231 * @caps: link capabilities 4232 * 4233 * Initializes the SW state maintained for each link, including the link's 4234 * capabilities and default speed/flow-control/autonegotiation settings. 4235 */ 4236 static void init_link_config(struct link_config *lc, unsigned int caps) 4237 { 4238 lc->supported = caps; 4239 lc->requested_speed = 0; 4240 lc->speed = 0; 4241 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; 4242 if (lc->supported & FW_PORT_CAP_ANEG) { 4243 lc->advertising = lc->supported & ADVERT_MASK; 4244 lc->autoneg = AUTONEG_ENABLE; 4245 lc->requested_fc |= PAUSE_AUTONEG; 4246 } else { 4247 lc->advertising = 0; 4248 lc->autoneg = AUTONEG_DISABLE; 4249 } 4250 } 4251 4252 #define CIM_PF_NOACCESS 0xeeeeeeee 4253 4254 int t4_wait_dev_ready(void __iomem *regs) 4255 { 4256 u32 whoami; 4257 4258 whoami = readl(regs + PL_WHOAMI_A); 4259 if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS) 4260 return 0; 4261 4262 msleep(500); 4263 whoami = readl(regs + PL_WHOAMI_A); 4264 return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO); 4265 } 4266 4267 struct flash_desc { 4268 u32 vendor_and_model_id; 4269 u32 size_mb; 4270 }; 4271 4272 static int get_flash_params(struct adapter *adap) 4273 { 4274 /* Table for non-Numonix supported flash parts. Numonix parts are left 4275 * to the preexisting code. All flash parts have 64KB sectors. 4276 */ 4277 static struct flash_desc supported_flash[] = { 4278 { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */ 4279 }; 4280 4281 int ret; 4282 u32 info; 4283 4284 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID); 4285 if (!ret) 4286 ret = sf1_read(adap, 3, 0, 1, &info); 4287 t4_write_reg(adap, SF_OP_A, 0); /* unlock SF */ 4288 if (ret) 4289 return ret; 4290 4291 for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret) 4292 if (supported_flash[ret].vendor_and_model_id == info) { 4293 adap->params.sf_size = supported_flash[ret].size_mb; 4294 adap->params.sf_nsec = 4295 adap->params.sf_size / SF_SEC_SIZE; 4296 return 0; 4297 } 4298 4299 if ((info & 0xff) != 0x20) /* not a Numonix flash */ 4300 return -EINVAL; 4301 info >>= 16; /* log2 of size */ 4302 if (info >= 0x14 && info < 0x18) 4303 adap->params.sf_nsec = 1 << (info - 16); 4304 else if (info == 0x18) 4305 adap->params.sf_nsec = 64; 4306 else 4307 return -EINVAL; 4308 adap->params.sf_size = 1 << info; 4309 adap->params.sf_fw_start = 4310 t4_read_reg(adap, CIM_BOOT_CFG_A) & BOOTADDR_M; 4311 4312 if (adap->params.sf_size < FLASH_MIN_SIZE) 4313 dev_warn(adap->pdev_dev, "WARNING!!! FLASH size %#x < %#x!!!\n", 4314 adap->params.sf_size, FLASH_MIN_SIZE); 4315 return 0; 4316 } 4317 4318 /** 4319 * t4_prep_adapter - prepare SW and HW for operation 4320 * @adapter: the adapter 4321 * @reset: if true perform a HW reset 4322 * 4323 * Initialize adapter SW state for the various HW modules, set initial 4324 * values for some adapter tunables, take PHYs out of reset, and 4325 * initialize the MDIO interface. 4326 */ 4327 int t4_prep_adapter(struct adapter *adapter) 4328 { 4329 int ret, ver; 4330 uint16_t device_id; 4331 u32 pl_rev; 4332 4333 get_pci_mode(adapter, &adapter->params.pci); 4334 pl_rev = REV_G(t4_read_reg(adapter, PL_REV_A)); 4335 4336 ret = get_flash_params(adapter); 4337 if (ret < 0) { 4338 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret); 4339 return ret; 4340 } 4341 4342 /* Retrieve adapter's device ID 4343 */ 4344 pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id); 4345 ver = device_id >> 12; 4346 adapter->params.chip = 0; 4347 switch (ver) { 4348 case CHELSIO_T4: 4349 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev); 4350 break; 4351 case CHELSIO_T5: 4352 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev); 4353 break; 4354 default: 4355 dev_err(adapter->pdev_dev, "Device %d is not supported\n", 4356 device_id); 4357 return -EINVAL; 4358 } 4359 4360 adapter->params.cim_la_size = CIMLA_SIZE; 4361 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); 4362 4363 /* 4364 * Default port for debugging in case we can't reach FW. 4365 */ 4366 adapter->params.nports = 1; 4367 adapter->params.portvec = 1; 4368 adapter->params.vpd.cclk = 50000; 4369 return 0; 4370 } 4371 4372 /** 4373 * cxgb4_t4_bar2_sge_qregs - return BAR2 SGE Queue register information 4374 * @adapter: the adapter 4375 * @qid: the Queue ID 4376 * @qtype: the Ingress or Egress type for @qid 4377 * @pbar2_qoffset: BAR2 Queue Offset 4378 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues 4379 * 4380 * Returns the BAR2 SGE Queue Registers information associated with the 4381 * indicated Absolute Queue ID. These are passed back in return value 4382 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue 4383 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues. 4384 * 4385 * This may return an error which indicates that BAR2 SGE Queue 4386 * registers aren't available. If an error is not returned, then the 4387 * following values are returned: 4388 * 4389 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers 4390 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid 4391 * 4392 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which 4393 * require the "Inferred Queue ID" ability may be used. E.g. the 4394 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0, 4395 * then these "Inferred Queue ID" register may not be used. 4396 */ 4397 int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter, 4398 unsigned int qid, 4399 enum t4_bar2_qtype qtype, 4400 u64 *pbar2_qoffset, 4401 unsigned int *pbar2_qid) 4402 { 4403 unsigned int page_shift, page_size, qpp_shift, qpp_mask; 4404 u64 bar2_page_offset, bar2_qoffset; 4405 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred; 4406 4407 /* T4 doesn't support BAR2 SGE Queue registers. 4408 */ 4409 if (is_t4(adapter->params.chip)) 4410 return -EINVAL; 4411 4412 /* Get our SGE Page Size parameters. 4413 */ 4414 page_shift = adapter->params.sge.hps + 10; 4415 page_size = 1 << page_shift; 4416 4417 /* Get the right Queues per Page parameters for our Queue. 4418 */ 4419 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS 4420 ? adapter->params.sge.eq_qpp 4421 : adapter->params.sge.iq_qpp); 4422 qpp_mask = (1 << qpp_shift) - 1; 4423 4424 /* Calculate the basics of the BAR2 SGE Queue register area: 4425 * o The BAR2 page the Queue registers will be in. 4426 * o The BAR2 Queue ID. 4427 * o The BAR2 Queue ID Offset into the BAR2 page. 4428 */ 4429 bar2_page_offset = ((qid >> qpp_shift) << page_shift); 4430 bar2_qid = qid & qpp_mask; 4431 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; 4432 4433 /* If the BAR2 Queue ID Offset is less than the Page Size, then the 4434 * hardware will infer the Absolute Queue ID simply from the writes to 4435 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a 4436 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply 4437 * write to the first BAR2 SGE Queue Area within the BAR2 Page with 4438 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID 4439 * from the BAR2 Page and BAR2 Queue ID. 4440 * 4441 * One important censequence of this is that some BAR2 SGE registers 4442 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID 4443 * there. But other registers synthesize the SGE Queue ID purely 4444 * from the writes to the registers -- the Write Combined Doorbell 4445 * Buffer is a good example. These BAR2 SGE Registers are only 4446 * available for those BAR2 SGE Register areas where the SGE Absolute 4447 * Queue ID can be inferred from simple writes. 4448 */ 4449 bar2_qoffset = bar2_page_offset; 4450 bar2_qinferred = (bar2_qid_offset < page_size); 4451 if (bar2_qinferred) { 4452 bar2_qoffset += bar2_qid_offset; 4453 bar2_qid = 0; 4454 } 4455 4456 *pbar2_qoffset = bar2_qoffset; 4457 *pbar2_qid = bar2_qid; 4458 return 0; 4459 } 4460 4461 /** 4462 * t4_init_sge_params - initialize adap->params.sge 4463 * @adapter: the adapter 4464 * 4465 * Initialize various fields of the adapter's SGE Parameters structure. 4466 */ 4467 int t4_init_sge_params(struct adapter *adapter) 4468 { 4469 struct sge_params *sge_params = &adapter->params.sge; 4470 u32 hps, qpp; 4471 unsigned int s_hps, s_qpp; 4472 4473 /* Extract the SGE Page Size for our PF. 4474 */ 4475 hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A); 4476 s_hps = (HOSTPAGESIZEPF0_S + 4477 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->fn); 4478 sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M); 4479 4480 /* Extract the SGE Egress and Ingess Queues Per Page for our PF. 4481 */ 4482 s_qpp = (QUEUESPERPAGEPF0_S + 4483 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->fn); 4484 qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A); 4485 sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M); 4486 qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A); 4487 sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M); 4488 4489 return 0; 4490 } 4491 4492 /** 4493 * t4_init_tp_params - initialize adap->params.tp 4494 * @adap: the adapter 4495 * 4496 * Initialize various fields of the adapter's TP Parameters structure. 4497 */ 4498 int t4_init_tp_params(struct adapter *adap) 4499 { 4500 int chan; 4501 u32 v; 4502 4503 v = t4_read_reg(adap, TP_TIMER_RESOLUTION_A); 4504 adap->params.tp.tre = TIMERRESOLUTION_G(v); 4505 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_G(v); 4506 4507 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ 4508 for (chan = 0; chan < NCHAN; chan++) 4509 adap->params.tp.tx_modq[chan] = chan; 4510 4511 /* Cache the adapter's Compressed Filter Mode and global Incress 4512 * Configuration. 4513 */ 4514 t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, 4515 &adap->params.tp.vlan_pri_map, 1, 4516 TP_VLAN_PRI_MAP_A); 4517 t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, 4518 &adap->params.tp.ingress_config, 1, 4519 TP_INGRESS_CONFIG_A); 4520 4521 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field 4522 * shift positions of several elements of the Compressed Filter Tuple 4523 * for this adapter which we need frequently ... 4524 */ 4525 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F); 4526 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F); 4527 adap->params.tp.port_shift = t4_filter_field_shift(adap, PORT_F); 4528 adap->params.tp.protocol_shift = t4_filter_field_shift(adap, 4529 PROTOCOL_F); 4530 4531 /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID 4532 * represents the presense of an Outer VLAN instead of a VNIC ID. 4533 */ 4534 if ((adap->params.tp.ingress_config & VNIC_F) == 0) 4535 adap->params.tp.vnic_shift = -1; 4536 4537 return 0; 4538 } 4539 4540 /** 4541 * t4_filter_field_shift - calculate filter field shift 4542 * @adap: the adapter 4543 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits) 4544 * 4545 * Return the shift position of a filter field within the Compressed 4546 * Filter Tuple. The filter field is specified via its selection bit 4547 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN. 4548 */ 4549 int t4_filter_field_shift(const struct adapter *adap, int filter_sel) 4550 { 4551 unsigned int filter_mode = adap->params.tp.vlan_pri_map; 4552 unsigned int sel; 4553 int field_shift; 4554 4555 if ((filter_mode & filter_sel) == 0) 4556 return -1; 4557 4558 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) { 4559 switch (filter_mode & sel) { 4560 case FCOE_F: 4561 field_shift += FT_FCOE_W; 4562 break; 4563 case PORT_F: 4564 field_shift += FT_PORT_W; 4565 break; 4566 case VNIC_ID_F: 4567 field_shift += FT_VNIC_ID_W; 4568 break; 4569 case VLAN_F: 4570 field_shift += FT_VLAN_W; 4571 break; 4572 case TOS_F: 4573 field_shift += FT_TOS_W; 4574 break; 4575 case PROTOCOL_F: 4576 field_shift += FT_PROTOCOL_W; 4577 break; 4578 case ETHERTYPE_F: 4579 field_shift += FT_ETHERTYPE_W; 4580 break; 4581 case MACMATCH_F: 4582 field_shift += FT_MACMATCH_W; 4583 break; 4584 case MPSHITTYPE_F: 4585 field_shift += FT_MPSHITTYPE_W; 4586 break; 4587 case FRAGMENTATION_F: 4588 field_shift += FT_FRAGMENTATION_W; 4589 break; 4590 } 4591 } 4592 return field_shift; 4593 } 4594 4595 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) 4596 { 4597 u8 addr[6]; 4598 int ret, i, j = 0; 4599 struct fw_port_cmd c; 4600 struct fw_rss_vi_config_cmd rvc; 4601 4602 memset(&c, 0, sizeof(c)); 4603 memset(&rvc, 0, sizeof(rvc)); 4604 4605 for_each_port(adap, i) { 4606 unsigned int rss_size; 4607 struct port_info *p = adap2pinfo(adap, i); 4608 4609 while ((adap->params.portvec & (1 << j)) == 0) 4610 j++; 4611 4612 c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | 4613 FW_CMD_REQUEST_F | FW_CMD_READ_F | 4614 FW_PORT_CMD_PORTID_V(j)); 4615 c.action_to_len16 = htonl( 4616 FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) | 4617 FW_LEN16(c)); 4618 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 4619 if (ret) 4620 return ret; 4621 4622 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size); 4623 if (ret < 0) 4624 return ret; 4625 4626 p->viid = ret; 4627 p->tx_chan = j; 4628 p->lport = j; 4629 p->rss_size = rss_size; 4630 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN); 4631 adap->port[i]->dev_port = j; 4632 4633 ret = ntohl(c.u.info.lstatus_to_modtype); 4634 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ? 4635 FW_PORT_CMD_MDIOADDR_G(ret) : -1; 4636 p->port_type = FW_PORT_CMD_PTYPE_G(ret); 4637 p->mod_type = FW_PORT_MOD_TYPE_NA; 4638 4639 rvc.op_to_viid = htonl(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) | 4640 FW_CMD_REQUEST_F | FW_CMD_READ_F | 4641 FW_RSS_VI_CONFIG_CMD_VIID(p->viid)); 4642 rvc.retval_len16 = htonl(FW_LEN16(rvc)); 4643 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc); 4644 if (ret) 4645 return ret; 4646 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen); 4647 4648 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap)); 4649 j++; 4650 } 4651 return 0; 4652 } 4653 4654 /** 4655 * t4_read_cimq_cfg - read CIM queue configuration 4656 * @adap: the adapter 4657 * @base: holds the queue base addresses in bytes 4658 * @size: holds the queue sizes in bytes 4659 * @thres: holds the queue full thresholds in bytes 4660 * 4661 * Returns the current configuration of the CIM queues, starting with 4662 * the IBQs, then the OBQs. 4663 */ 4664 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres) 4665 { 4666 unsigned int i, v; 4667 int cim_num_obq = is_t4(adap->params.chip) ? 4668 CIM_NUM_OBQ : CIM_NUM_OBQ_T5; 4669 4670 for (i = 0; i < CIM_NUM_IBQ; i++) { 4671 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, IBQSELECT_F | 4672 QUENUMSELECT_V(i)); 4673 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A); 4674 /* value is in 256-byte units */ 4675 *base++ = CIMQBASE_G(v) * 256; 4676 *size++ = CIMQSIZE_G(v) * 256; 4677 *thres++ = QUEFULLTHRSH_G(v) * 8; /* 8-byte unit */ 4678 } 4679 for (i = 0; i < cim_num_obq; i++) { 4680 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F | 4681 QUENUMSELECT_V(i)); 4682 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A); 4683 /* value is in 256-byte units */ 4684 *base++ = CIMQBASE_G(v) * 256; 4685 *size++ = CIMQSIZE_G(v) * 256; 4686 } 4687 } 4688 4689 /** 4690 * t4_read_cim_ibq - read the contents of a CIM inbound queue 4691 * @adap: the adapter 4692 * @qid: the queue index 4693 * @data: where to store the queue contents 4694 * @n: capacity of @data in 32-bit words 4695 * 4696 * Reads the contents of the selected CIM queue starting at address 0 up 4697 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on 4698 * error and the number of 32-bit words actually read on success. 4699 */ 4700 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n) 4701 { 4702 int i, err, attempts; 4703 unsigned int addr; 4704 const unsigned int nwords = CIM_IBQ_SIZE * 4; 4705 4706 if (qid > 5 || (n & 3)) 4707 return -EINVAL; 4708 4709 addr = qid * nwords; 4710 if (n > nwords) 4711 n = nwords; 4712 4713 /* It might take 3-10ms before the IBQ debug read access is allowed. 4714 * Wait for 1 Sec with a delay of 1 usec. 4715 */ 4716 attempts = 1000000; 4717 4718 for (i = 0; i < n; i++, addr++) { 4719 t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, IBQDBGADDR_V(addr) | 4720 IBQDBGEN_F); 4721 err = t4_wait_op_done(adap, CIM_IBQ_DBG_CFG_A, IBQDBGBUSY_F, 0, 4722 attempts, 1); 4723 if (err) 4724 return err; 4725 *data++ = t4_read_reg(adap, CIM_IBQ_DBG_DATA_A); 4726 } 4727 t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, 0); 4728 return i; 4729 } 4730 4731 /** 4732 * t4_read_cim_obq - read the contents of a CIM outbound queue 4733 * @adap: the adapter 4734 * @qid: the queue index 4735 * @data: where to store the queue contents 4736 * @n: capacity of @data in 32-bit words 4737 * 4738 * Reads the contents of the selected CIM queue starting at address 0 up 4739 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on 4740 * error and the number of 32-bit words actually read on success. 4741 */ 4742 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n) 4743 { 4744 int i, err; 4745 unsigned int addr, v, nwords; 4746 int cim_num_obq = is_t4(adap->params.chip) ? 4747 CIM_NUM_OBQ : CIM_NUM_OBQ_T5; 4748 4749 if ((qid > (cim_num_obq - 1)) || (n & 3)) 4750 return -EINVAL; 4751 4752 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F | 4753 QUENUMSELECT_V(qid)); 4754 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A); 4755 4756 addr = CIMQBASE_G(v) * 64; /* muliple of 256 -> muliple of 4 */ 4757 nwords = CIMQSIZE_G(v) * 64; /* same */ 4758 if (n > nwords) 4759 n = nwords; 4760 4761 for (i = 0; i < n; i++, addr++) { 4762 t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, OBQDBGADDR_V(addr) | 4763 OBQDBGEN_F); 4764 err = t4_wait_op_done(adap, CIM_OBQ_DBG_CFG_A, OBQDBGBUSY_F, 0, 4765 2, 1); 4766 if (err) 4767 return err; 4768 *data++ = t4_read_reg(adap, CIM_OBQ_DBG_DATA_A); 4769 } 4770 t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, 0); 4771 return i; 4772 } 4773 4774 /** 4775 * t4_cim_read - read a block from CIM internal address space 4776 * @adap: the adapter 4777 * @addr: the start address within the CIM address space 4778 * @n: number of words to read 4779 * @valp: where to store the result 4780 * 4781 * Reads a block of 4-byte words from the CIM intenal address space. 4782 */ 4783 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n, 4784 unsigned int *valp) 4785 { 4786 int ret = 0; 4787 4788 if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F) 4789 return -EBUSY; 4790 4791 for ( ; !ret && n--; addr += 4) { 4792 t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr); 4793 ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F, 4794 0, 5, 2); 4795 if (!ret) 4796 *valp++ = t4_read_reg(adap, CIM_HOST_ACC_DATA_A); 4797 } 4798 return ret; 4799 } 4800 4801 /** 4802 * t4_cim_write - write a block into CIM internal address space 4803 * @adap: the adapter 4804 * @addr: the start address within the CIM address space 4805 * @n: number of words to write 4806 * @valp: set of values to write 4807 * 4808 * Writes a block of 4-byte words into the CIM intenal address space. 4809 */ 4810 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n, 4811 const unsigned int *valp) 4812 { 4813 int ret = 0; 4814 4815 if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F) 4816 return -EBUSY; 4817 4818 for ( ; !ret && n--; addr += 4) { 4819 t4_write_reg(adap, CIM_HOST_ACC_DATA_A, *valp++); 4820 t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr | HOSTWRITE_F); 4821 ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F, 4822 0, 5, 2); 4823 } 4824 return ret; 4825 } 4826 4827 static int t4_cim_write1(struct adapter *adap, unsigned int addr, 4828 unsigned int val) 4829 { 4830 return t4_cim_write(adap, addr, 1, &val); 4831 } 4832 4833 /** 4834 * t4_cim_read_la - read CIM LA capture buffer 4835 * @adap: the adapter 4836 * @la_buf: where to store the LA data 4837 * @wrptr: the HW write pointer within the capture buffer 4838 * 4839 * Reads the contents of the CIM LA buffer with the most recent entry at 4840 * the end of the returned data and with the entry at @wrptr first. 4841 * We try to leave the LA in the running state we find it in. 4842 */ 4843 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr) 4844 { 4845 int i, ret; 4846 unsigned int cfg, val, idx; 4847 4848 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg); 4849 if (ret) 4850 return ret; 4851 4852 if (cfg & UPDBGLAEN_F) { /* LA is running, freeze it */ 4853 ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, 0); 4854 if (ret) 4855 return ret; 4856 } 4857 4858 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val); 4859 if (ret) 4860 goto restart; 4861 4862 idx = UPDBGLAWRPTR_G(val); 4863 if (wrptr) 4864 *wrptr = idx; 4865 4866 for (i = 0; i < adap->params.cim_la_size; i++) { 4867 ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, 4868 UPDBGLARDPTR_V(idx) | UPDBGLARDEN_F); 4869 if (ret) 4870 break; 4871 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val); 4872 if (ret) 4873 break; 4874 if (val & UPDBGLARDEN_F) { 4875 ret = -ETIMEDOUT; 4876 break; 4877 } 4878 ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]); 4879 if (ret) 4880 break; 4881 idx = (idx + 1) & UPDBGLARDPTR_M; 4882 } 4883 restart: 4884 if (cfg & UPDBGLAEN_F) { 4885 int r = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, 4886 cfg & ~UPDBGLARDEN_F); 4887 if (!ret) 4888 ret = r; 4889 } 4890 return ret; 4891 } 4892 4893 /** 4894 * t4_tp_read_la - read TP LA capture buffer 4895 * @adap: the adapter 4896 * @la_buf: where to store the LA data 4897 * @wrptr: the HW write pointer within the capture buffer 4898 * 4899 * Reads the contents of the TP LA buffer with the most recent entry at 4900 * the end of the returned data and with the entry at @wrptr first. 4901 * We leave the LA in the running state we find it in. 4902 */ 4903 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr) 4904 { 4905 bool last_incomplete; 4906 unsigned int i, cfg, val, idx; 4907 4908 cfg = t4_read_reg(adap, TP_DBG_LA_CONFIG_A) & 0xffff; 4909 if (cfg & DBGLAENABLE_F) /* freeze LA */ 4910 t4_write_reg(adap, TP_DBG_LA_CONFIG_A, 4911 adap->params.tp.la_mask | (cfg ^ DBGLAENABLE_F)); 4912 4913 val = t4_read_reg(adap, TP_DBG_LA_CONFIG_A); 4914 idx = DBGLAWPTR_G(val); 4915 last_incomplete = DBGLAMODE_G(val) >= 2 && (val & DBGLAWHLF_F) == 0; 4916 if (last_incomplete) 4917 idx = (idx + 1) & DBGLARPTR_M; 4918 if (wrptr) 4919 *wrptr = idx; 4920 4921 val &= 0xffff; 4922 val &= ~DBGLARPTR_V(DBGLARPTR_M); 4923 val |= adap->params.tp.la_mask; 4924 4925 for (i = 0; i < TPLA_SIZE; i++) { 4926 t4_write_reg(adap, TP_DBG_LA_CONFIG_A, DBGLARPTR_V(idx) | val); 4927 la_buf[i] = t4_read_reg64(adap, TP_DBG_LA_DATAL_A); 4928 idx = (idx + 1) & DBGLARPTR_M; 4929 } 4930 4931 /* Wipe out last entry if it isn't valid */ 4932 if (last_incomplete) 4933 la_buf[TPLA_SIZE - 1] = ~0ULL; 4934 4935 if (cfg & DBGLAENABLE_F) /* restore running state */ 4936 t4_write_reg(adap, TP_DBG_LA_CONFIG_A, 4937 cfg | adap->params.tp.la_mask); 4938 } 4939