1 /* 2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet 3 * driver for Linux. 4 * 5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/pci.h> 37 38 #include "t4vf_common.h" 39 #include "t4vf_defs.h" 40 41 #include "../cxgb4/t4_regs.h" 42 #include "../cxgb4/t4_values.h" 43 #include "../cxgb4/t4fw_api.h" 44 45 /* 46 * Wait for the device to become ready (signified by our "who am I" register 47 * returning a value other than all 1's). Return an error if it doesn't 48 * become ready ... 49 */ 50 int t4vf_wait_dev_ready(struct adapter *adapter) 51 { 52 const u32 whoami = T4VF_PL_BASE_ADDR + PL_VF_WHOAMI; 53 const u32 notready1 = 0xffffffff; 54 const u32 notready2 = 0xeeeeeeee; 55 u32 val; 56 57 val = t4_read_reg(adapter, whoami); 58 if (val != notready1 && val != notready2) 59 return 0; 60 msleep(500); 61 val = t4_read_reg(adapter, whoami); 62 if (val != notready1 && val != notready2) 63 return 0; 64 else 65 return -EIO; 66 } 67 68 /* 69 * Get the reply to a mailbox command and store it in @rpl in big-endian order 70 * (since the firmware data structures are specified in a big-endian layout). 71 */ 72 static void get_mbox_rpl(struct adapter *adapter, __be64 *rpl, int size, 73 u32 mbox_data) 74 { 75 for ( ; size; size -= 8, mbox_data += 8) 76 *rpl++ = cpu_to_be64(t4_read_reg64(adapter, mbox_data)); 77 } 78 79 /** 80 * t4vf_record_mbox - record a Firmware Mailbox Command/Reply in the log 81 * @adapter: the adapter 82 * @cmd: the Firmware Mailbox Command or Reply 83 * @size: command length in bytes 84 * @access: the time (ms) needed to access the Firmware Mailbox 85 * @execute: the time (ms) the command spent being executed 86 */ 87 static void t4vf_record_mbox(struct adapter *adapter, const __be64 *cmd, 88 int size, int access, int execute) 89 { 90 struct mbox_cmd_log *log = adapter->mbox_log; 91 struct mbox_cmd *entry; 92 int i; 93 94 entry = mbox_cmd_log_entry(log, log->cursor++); 95 if (log->cursor == log->size) 96 log->cursor = 0; 97 98 for (i = 0; i < size / 8; i++) 99 entry->cmd[i] = be64_to_cpu(cmd[i]); 100 while (i < MBOX_LEN / 8) 101 entry->cmd[i++] = 0; 102 entry->timestamp = jiffies; 103 entry->seqno = log->seqno++; 104 entry->access = access; 105 entry->execute = execute; 106 } 107 108 /** 109 * t4vf_wr_mbox_core - send a command to FW through the mailbox 110 * @adapter: the adapter 111 * @cmd: the command to write 112 * @size: command length in bytes 113 * @rpl: where to optionally store the reply 114 * @sleep_ok: if true we may sleep while awaiting command completion 115 * 116 * Sends the given command to FW through the mailbox and waits for the 117 * FW to execute the command. If @rpl is not %NULL it is used to store 118 * the FW's reply to the command. The command and its optional reply 119 * are of the same length. FW can take up to 500 ms to respond. 120 * @sleep_ok determines whether we may sleep while awaiting the response. 121 * If sleeping is allowed we use progressive backoff otherwise we spin. 122 * 123 * The return value is 0 on success or a negative errno on failure. A 124 * failure can happen either because we are not able to execute the 125 * command or FW executes it but signals an error. In the latter case 126 * the return value is the error code indicated by FW (negated). 127 */ 128 int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, 129 void *rpl, bool sleep_ok) 130 { 131 static const int delay[] = { 132 1, 1, 3, 5, 10, 10, 20, 50, 100 133 }; 134 135 u16 access = 0, execute = 0; 136 u32 v, mbox_data; 137 int i, ms, delay_idx, ret; 138 const __be64 *p; 139 u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL; 140 u32 cmd_op = FW_CMD_OP_G(be32_to_cpu(((struct fw_cmd_hdr *)cmd)->hi)); 141 __be64 cmd_rpl[MBOX_LEN / 8]; 142 struct mbox_list entry; 143 144 /* In T6, mailbox size is changed to 128 bytes to avoid 145 * invalidating the entire prefetch buffer. 146 */ 147 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) 148 mbox_data = T4VF_MBDATA_BASE_ADDR; 149 else 150 mbox_data = T6VF_MBDATA_BASE_ADDR; 151 152 /* 153 * Commands must be multiples of 16 bytes in length and may not be 154 * larger than the size of the Mailbox Data register array. 155 */ 156 if ((size % 16) != 0 || 157 size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4) 158 return -EINVAL; 159 160 /* Queue ourselves onto the mailbox access list. When our entry is at 161 * the front of the list, we have rights to access the mailbox. So we 162 * wait [for a while] till we're at the front [or bail out with an 163 * EBUSY] ... 164 */ 165 spin_lock(&adapter->mbox_lock); 166 list_add_tail(&entry.list, &adapter->mlist.list); 167 spin_unlock(&adapter->mbox_lock); 168 169 delay_idx = 0; 170 ms = delay[0]; 171 172 for (i = 0; ; i += ms) { 173 /* If we've waited too long, return a busy indication. This 174 * really ought to be based on our initial position in the 175 * mailbox access list but this is a start. We very rearely 176 * contend on access to the mailbox ... 177 */ 178 if (i > FW_CMD_MAX_TIMEOUT) { 179 spin_lock(&adapter->mbox_lock); 180 list_del(&entry.list); 181 spin_unlock(&adapter->mbox_lock); 182 ret = -EBUSY; 183 t4vf_record_mbox(adapter, cmd, size, access, ret); 184 return ret; 185 } 186 187 /* If we're at the head, break out and start the mailbox 188 * protocol. 189 */ 190 if (list_first_entry(&adapter->mlist.list, struct mbox_list, 191 list) == &entry) 192 break; 193 194 /* Delay for a bit before checking again ... */ 195 if (sleep_ok) { 196 ms = delay[delay_idx]; /* last element may repeat */ 197 if (delay_idx < ARRAY_SIZE(delay) - 1) 198 delay_idx++; 199 msleep(ms); 200 } else { 201 mdelay(ms); 202 } 203 } 204 205 /* 206 * Loop trying to get ownership of the mailbox. Return an error 207 * if we can't gain ownership. 208 */ 209 v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl)); 210 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) 211 v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl)); 212 if (v != MBOX_OWNER_DRV) { 213 spin_lock(&adapter->mbox_lock); 214 list_del(&entry.list); 215 spin_unlock(&adapter->mbox_lock); 216 ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT; 217 t4vf_record_mbox(adapter, cmd, size, access, ret); 218 return ret; 219 } 220 221 /* 222 * Write the command array into the Mailbox Data register array and 223 * transfer ownership of the mailbox to the firmware. 224 * 225 * For the VFs, the Mailbox Data "registers" are actually backed by 226 * T4's "MA" interface rather than PL Registers (as is the case for 227 * the PFs). Because these are in different coherency domains, the 228 * write to the VF's PL-register-backed Mailbox Control can race in 229 * front of the writes to the MA-backed VF Mailbox Data "registers". 230 * So we need to do a read-back on at least one byte of the VF Mailbox 231 * Data registers before doing the write to the VF Mailbox Control 232 * register. 233 */ 234 if (cmd_op != FW_VI_STATS_CMD) 235 t4vf_record_mbox(adapter, cmd, size, access, 0); 236 for (i = 0, p = cmd; i < size; i += 8) 237 t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++)); 238 t4_read_reg(adapter, mbox_data); /* flush write */ 239 240 t4_write_reg(adapter, mbox_ctl, 241 MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW)); 242 t4_read_reg(adapter, mbox_ctl); /* flush write */ 243 244 /* 245 * Spin waiting for firmware to acknowledge processing our command. 246 */ 247 delay_idx = 0; 248 ms = delay[0]; 249 250 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { 251 if (sleep_ok) { 252 ms = delay[delay_idx]; 253 if (delay_idx < ARRAY_SIZE(delay) - 1) 254 delay_idx++; 255 msleep(ms); 256 } else 257 mdelay(ms); 258 259 /* 260 * If we're the owner, see if this is the reply we wanted. 261 */ 262 v = t4_read_reg(adapter, mbox_ctl); 263 if (MBOWNER_G(v) == MBOX_OWNER_DRV) { 264 /* 265 * If the Message Valid bit isn't on, revoke ownership 266 * of the mailbox and continue waiting for our reply. 267 */ 268 if ((v & MBMSGVALID_F) == 0) { 269 t4_write_reg(adapter, mbox_ctl, 270 MBOWNER_V(MBOX_OWNER_NONE)); 271 continue; 272 } 273 274 /* 275 * We now have our reply. Extract the command return 276 * value, copy the reply back to our caller's buffer 277 * (if specified) and revoke ownership of the mailbox. 278 * We return the (negated) firmware command return 279 * code (this depends on FW_SUCCESS == 0). 280 */ 281 get_mbox_rpl(adapter, cmd_rpl, size, mbox_data); 282 283 /* return value in low-order little-endian word */ 284 v = be64_to_cpu(cmd_rpl[0]); 285 286 if (rpl) { 287 /* request bit in high-order BE word */ 288 WARN_ON((be32_to_cpu(*(const __be32 *)cmd) 289 & FW_CMD_REQUEST_F) == 0); 290 memcpy(rpl, cmd_rpl, size); 291 WARN_ON((be32_to_cpu(*(__be32 *)rpl) 292 & FW_CMD_REQUEST_F) != 0); 293 } 294 t4_write_reg(adapter, mbox_ctl, 295 MBOWNER_V(MBOX_OWNER_NONE)); 296 execute = i + ms; 297 if (cmd_op != FW_VI_STATS_CMD) 298 t4vf_record_mbox(adapter, cmd_rpl, size, access, 299 execute); 300 spin_lock(&adapter->mbox_lock); 301 list_del(&entry.list); 302 spin_unlock(&adapter->mbox_lock); 303 return -FW_CMD_RETVAL_G(v); 304 } 305 } 306 307 /* We timed out. Return the error ... */ 308 ret = -ETIMEDOUT; 309 t4vf_record_mbox(adapter, cmd, size, access, ret); 310 spin_lock(&adapter->mbox_lock); 311 list_del(&entry.list); 312 spin_unlock(&adapter->mbox_lock); 313 return ret; 314 } 315 316 #define ADVERT_MASK (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) | \ 317 FW_PORT_CAP32_ANEG) 318 319 /** 320 * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits 321 * @caps16: a 16-bit Port Capabilities value 322 * 323 * Returns the equivalent 32-bit Port Capabilities value. 324 */ 325 static fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16) 326 { 327 fw_port_cap32_t caps32 = 0; 328 329 #define CAP16_TO_CAP32(__cap) \ 330 do { \ 331 if (caps16 & FW_PORT_CAP_##__cap) \ 332 caps32 |= FW_PORT_CAP32_##__cap; \ 333 } while (0) 334 335 CAP16_TO_CAP32(SPEED_100M); 336 CAP16_TO_CAP32(SPEED_1G); 337 CAP16_TO_CAP32(SPEED_25G); 338 CAP16_TO_CAP32(SPEED_10G); 339 CAP16_TO_CAP32(SPEED_40G); 340 CAP16_TO_CAP32(SPEED_100G); 341 CAP16_TO_CAP32(FC_RX); 342 CAP16_TO_CAP32(FC_TX); 343 CAP16_TO_CAP32(ANEG); 344 CAP16_TO_CAP32(MDIX); 345 CAP16_TO_CAP32(MDIAUTO); 346 CAP16_TO_CAP32(FEC_RS); 347 CAP16_TO_CAP32(FEC_BASER_RS); 348 CAP16_TO_CAP32(802_3_PAUSE); 349 CAP16_TO_CAP32(802_3_ASM_DIR); 350 351 #undef CAP16_TO_CAP32 352 353 return caps32; 354 } 355 356 /* Translate Firmware Pause specification to Common Code */ 357 static inline enum cc_pause fwcap_to_cc_pause(fw_port_cap32_t fw_pause) 358 { 359 enum cc_pause cc_pause = 0; 360 361 if (fw_pause & FW_PORT_CAP32_FC_RX) 362 cc_pause |= PAUSE_RX; 363 if (fw_pause & FW_PORT_CAP32_FC_TX) 364 cc_pause |= PAUSE_TX; 365 366 return cc_pause; 367 } 368 369 /* Translate Firmware Forward Error Correction specification to Common Code */ 370 static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec) 371 { 372 enum cc_fec cc_fec = 0; 373 374 if (fw_fec & FW_PORT_CAP32_FEC_RS) 375 cc_fec |= FEC_RS; 376 if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS) 377 cc_fec |= FEC_BASER_RS; 378 379 return cc_fec; 380 } 381 382 /** 383 * Return the highest speed set in the port capabilities, in Mb/s. 384 */ 385 static unsigned int fwcap_to_speed(fw_port_cap32_t caps) 386 { 387 #define TEST_SPEED_RETURN(__caps_speed, __speed) \ 388 do { \ 389 if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \ 390 return __speed; \ 391 } while (0) 392 393 TEST_SPEED_RETURN(400G, 400000); 394 TEST_SPEED_RETURN(200G, 200000); 395 TEST_SPEED_RETURN(100G, 100000); 396 TEST_SPEED_RETURN(50G, 50000); 397 TEST_SPEED_RETURN(40G, 40000); 398 TEST_SPEED_RETURN(25G, 25000); 399 TEST_SPEED_RETURN(10G, 10000); 400 TEST_SPEED_RETURN(1G, 1000); 401 TEST_SPEED_RETURN(100M, 100); 402 403 #undef TEST_SPEED_RETURN 404 405 return 0; 406 } 407 408 /* 409 * init_link_config - initialize a link's SW state 410 * @lc: structure holding the link state 411 * @pcaps: link Port Capabilities 412 * @acaps: link current Advertised Port Capabilities 413 * 414 * Initializes the SW state maintained for each link, including the link's 415 * capabilities and default speed/flow-control/autonegotiation settings. 416 */ 417 static void init_link_config(struct link_config *lc, 418 fw_port_cap32_t pcaps, 419 fw_port_cap32_t acaps) 420 { 421 lc->pcaps = pcaps; 422 lc->lpacaps = 0; 423 lc->speed_caps = 0; 424 lc->speed = 0; 425 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; 426 427 /* For Forward Error Control, we default to whatever the Firmware 428 * tells us the Link is currently advertising. 429 */ 430 lc->auto_fec = fwcap_to_cc_fec(acaps); 431 lc->requested_fec = FEC_AUTO; 432 lc->fec = lc->auto_fec; 433 434 if (lc->pcaps & FW_PORT_CAP32_ANEG) { 435 lc->acaps = acaps & ADVERT_MASK; 436 lc->autoneg = AUTONEG_ENABLE; 437 lc->requested_fc |= PAUSE_AUTONEG; 438 } else { 439 lc->acaps = 0; 440 lc->autoneg = AUTONEG_DISABLE; 441 } 442 } 443 444 /** 445 * t4vf_port_init - initialize port hardware/software state 446 * @adapter: the adapter 447 * @pidx: the adapter port index 448 */ 449 int t4vf_port_init(struct adapter *adapter, int pidx) 450 { 451 struct port_info *pi = adap2pinfo(adapter, pidx); 452 unsigned int fw_caps = adapter->params.fw_caps_support; 453 struct fw_vi_cmd vi_cmd, vi_rpl; 454 struct fw_port_cmd port_cmd, port_rpl; 455 enum fw_port_type port_type; 456 int mdio_addr; 457 fw_port_cap32_t pcaps, acaps; 458 int ret; 459 460 /* If we haven't yet determined whether we're talking to Firmware 461 * which knows the new 32-bit Port Capabilities, it's time to find 462 * out now. This will also tell new Firmware to send us Port Status 463 * Updates using the new 32-bit Port Capabilities version of the 464 * Port Information message. 465 */ 466 if (fw_caps == FW_CAPS_UNKNOWN) { 467 u32 param, val; 468 469 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | 470 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_PORT_CAPS32)); 471 val = 1; 472 ret = t4vf_set_params(adapter, 1, ¶m, &val); 473 fw_caps = (ret == 0 ? FW_CAPS32 : FW_CAPS16); 474 adapter->params.fw_caps_support = fw_caps; 475 } 476 477 /* 478 * Execute a VI Read command to get our Virtual Interface information 479 * like MAC address, etc. 480 */ 481 memset(&vi_cmd, 0, sizeof(vi_cmd)); 482 vi_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | 483 FW_CMD_REQUEST_F | 484 FW_CMD_READ_F); 485 vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd)); 486 vi_cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(pi->viid)); 487 ret = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl); 488 if (ret != FW_SUCCESS) 489 return ret; 490 491 BUG_ON(pi->port_id != FW_VI_CMD_PORTID_G(vi_rpl.portid_pkd)); 492 pi->rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(vi_rpl.rsssize_pkd)); 493 t4_os_set_hw_addr(adapter, pidx, vi_rpl.mac); 494 495 /* 496 * If we don't have read access to our port information, we're done 497 * now. Otherwise, execute a PORT Read command to get it ... 498 */ 499 if (!(adapter->params.vfres.r_caps & FW_CMD_CAP_PORT)) 500 return 0; 501 502 memset(&port_cmd, 0, sizeof(port_cmd)); 503 port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | 504 FW_CMD_REQUEST_F | 505 FW_CMD_READ_F | 506 FW_PORT_CMD_PORTID_V(pi->port_id)); 507 port_cmd.action_to_len16 = cpu_to_be32( 508 FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16 509 ? FW_PORT_ACTION_GET_PORT_INFO 510 : FW_PORT_ACTION_GET_PORT_INFO32) | 511 FW_LEN16(port_cmd)); 512 ret = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd), &port_rpl); 513 if (ret != FW_SUCCESS) 514 return ret; 515 516 /* Extract the various fields from the Port Information message. */ 517 if (fw_caps == FW_CAPS16) { 518 u32 lstatus = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype); 519 520 port_type = FW_PORT_CMD_PTYPE_G(lstatus); 521 mdio_addr = ((lstatus & FW_PORT_CMD_MDIOCAP_F) 522 ? FW_PORT_CMD_MDIOADDR_G(lstatus) 523 : -1); 524 pcaps = fwcaps16_to_caps32(be16_to_cpu(port_rpl.u.info.pcap)); 525 acaps = fwcaps16_to_caps32(be16_to_cpu(port_rpl.u.info.acap)); 526 } else { 527 u32 lstatus32 = 528 be32_to_cpu(port_rpl.u.info32.lstatus32_to_cbllen32); 529 530 port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32); 531 mdio_addr = ((lstatus32 & FW_PORT_CMD_MDIOCAP32_F) 532 ? FW_PORT_CMD_MDIOADDR32_G(lstatus32) 533 : -1); 534 pcaps = be32_to_cpu(port_rpl.u.info32.pcaps32); 535 acaps = be32_to_cpu(port_rpl.u.info32.acaps32); 536 } 537 538 pi->port_type = port_type; 539 pi->mdio_addr = mdio_addr; 540 pi->mod_type = FW_PORT_MOD_TYPE_NA; 541 542 init_link_config(&pi->link_cfg, pcaps, acaps); 543 return 0; 544 } 545 546 /** 547 * t4vf_fw_reset - issue a reset to FW 548 * @adapter: the adapter 549 * 550 * Issues a reset command to FW. For a Physical Function this would 551 * result in the Firmware resetting all of its state. For a Virtual 552 * Function this just resets the state associated with the VF. 553 */ 554 int t4vf_fw_reset(struct adapter *adapter) 555 { 556 struct fw_reset_cmd cmd; 557 558 memset(&cmd, 0, sizeof(cmd)); 559 cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RESET_CMD) | 560 FW_CMD_WRITE_F); 561 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 562 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 563 } 564 565 /** 566 * t4vf_query_params - query FW or device parameters 567 * @adapter: the adapter 568 * @nparams: the number of parameters 569 * @params: the parameter names 570 * @vals: the parameter values 571 * 572 * Reads the values of firmware or device parameters. Up to 7 parameters 573 * can be queried at once. 574 */ 575 static int t4vf_query_params(struct adapter *adapter, unsigned int nparams, 576 const u32 *params, u32 *vals) 577 { 578 int i, ret; 579 struct fw_params_cmd cmd, rpl; 580 struct fw_params_param *p; 581 size_t len16; 582 583 if (nparams > 7) 584 return -EINVAL; 585 586 memset(&cmd, 0, sizeof(cmd)); 587 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) | 588 FW_CMD_REQUEST_F | 589 FW_CMD_READ_F); 590 len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd, 591 param[nparams].mnem), 16); 592 cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); 593 for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) 594 p->mnem = htonl(*params++); 595 596 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 597 if (ret == 0) 598 for (i = 0, p = &rpl.param[0]; i < nparams; i++, p++) 599 *vals++ = be32_to_cpu(p->val); 600 return ret; 601 } 602 603 /** 604 * t4vf_set_params - sets FW or device parameters 605 * @adapter: the adapter 606 * @nparams: the number of parameters 607 * @params: the parameter names 608 * @vals: the parameter values 609 * 610 * Sets the values of firmware or device parameters. Up to 7 parameters 611 * can be specified at once. 612 */ 613 int t4vf_set_params(struct adapter *adapter, unsigned int nparams, 614 const u32 *params, const u32 *vals) 615 { 616 int i; 617 struct fw_params_cmd cmd; 618 struct fw_params_param *p; 619 size_t len16; 620 621 if (nparams > 7) 622 return -EINVAL; 623 624 memset(&cmd, 0, sizeof(cmd)); 625 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) | 626 FW_CMD_REQUEST_F | 627 FW_CMD_WRITE_F); 628 len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd, 629 param[nparams]), 16); 630 cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); 631 for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) { 632 p->mnem = cpu_to_be32(*params++); 633 p->val = cpu_to_be32(*vals++); 634 } 635 636 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 637 } 638 639 /** 640 * t4vf_fl_pkt_align - return the fl packet alignment 641 * @adapter: the adapter 642 * 643 * T4 has a single field to specify the packing and padding boundary. 644 * T5 onwards has separate fields for this and hence the alignment for 645 * next packet offset is maximum of these two. And T6 changes the 646 * Ingress Padding Boundary Shift, so it's all a mess and it's best 647 * if we put this in low-level Common Code ... 648 * 649 */ 650 int t4vf_fl_pkt_align(struct adapter *adapter) 651 { 652 u32 sge_control, sge_control2; 653 unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift; 654 655 sge_control = adapter->params.sge.sge_control; 656 657 /* T4 uses a single control field to specify both the PCIe Padding and 658 * Packing Boundary. T5 introduced the ability to specify these 659 * separately. The actual Ingress Packet Data alignment boundary 660 * within Packed Buffer Mode is the maximum of these two 661 * specifications. (Note that it makes no real practical sense to 662 * have the Pading Boudary be larger than the Packing Boundary but you 663 * could set the chip up that way and, in fact, legacy T4 code would 664 * end doing this because it would initialize the Padding Boundary and 665 * leave the Packing Boundary initialized to 0 (16 bytes).) 666 * Padding Boundary values in T6 starts from 8B, 667 * where as it is 32B for T4 and T5. 668 */ 669 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) 670 ingpad_shift = INGPADBOUNDARY_SHIFT_X; 671 else 672 ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X; 673 674 ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift); 675 676 fl_align = ingpadboundary; 677 if (!is_t4(adapter->params.chip)) { 678 /* T5 has a different interpretation of one of the PCIe Packing 679 * Boundary values. 680 */ 681 sge_control2 = adapter->params.sge.sge_control2; 682 ingpackboundary = INGPACKBOUNDARY_G(sge_control2); 683 if (ingpackboundary == INGPACKBOUNDARY_16B_X) 684 ingpackboundary = 16; 685 else 686 ingpackboundary = 1 << (ingpackboundary + 687 INGPACKBOUNDARY_SHIFT_X); 688 689 fl_align = max(ingpadboundary, ingpackboundary); 690 } 691 return fl_align; 692 } 693 694 /** 695 * t4vf_bar2_sge_qregs - return BAR2 SGE Queue register information 696 * @adapter: the adapter 697 * @qid: the Queue ID 698 * @qtype: the Ingress or Egress type for @qid 699 * @pbar2_qoffset: BAR2 Queue Offset 700 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues 701 * 702 * Returns the BAR2 SGE Queue Registers information associated with the 703 * indicated Absolute Queue ID. These are passed back in return value 704 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue 705 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues. 706 * 707 * This may return an error which indicates that BAR2 SGE Queue 708 * registers aren't available. If an error is not returned, then the 709 * following values are returned: 710 * 711 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers 712 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid 713 * 714 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which 715 * require the "Inferred Queue ID" ability may be used. E.g. the 716 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0, 717 * then these "Inferred Queue ID" register may not be used. 718 */ 719 int t4vf_bar2_sge_qregs(struct adapter *adapter, 720 unsigned int qid, 721 enum t4_bar2_qtype qtype, 722 u64 *pbar2_qoffset, 723 unsigned int *pbar2_qid) 724 { 725 unsigned int page_shift, page_size, qpp_shift, qpp_mask; 726 u64 bar2_page_offset, bar2_qoffset; 727 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred; 728 729 /* T4 doesn't support BAR2 SGE Queue registers. 730 */ 731 if (is_t4(adapter->params.chip)) 732 return -EINVAL; 733 734 /* Get our SGE Page Size parameters. 735 */ 736 page_shift = adapter->params.sge.sge_vf_hps + 10; 737 page_size = 1 << page_shift; 738 739 /* Get the right Queues per Page parameters for our Queue. 740 */ 741 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS 742 ? adapter->params.sge.sge_vf_eq_qpp 743 : adapter->params.sge.sge_vf_iq_qpp); 744 qpp_mask = (1 << qpp_shift) - 1; 745 746 /* Calculate the basics of the BAR2 SGE Queue register area: 747 * o The BAR2 page the Queue registers will be in. 748 * o The BAR2 Queue ID. 749 * o The BAR2 Queue ID Offset into the BAR2 page. 750 */ 751 bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift); 752 bar2_qid = qid & qpp_mask; 753 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; 754 755 /* If the BAR2 Queue ID Offset is less than the Page Size, then the 756 * hardware will infer the Absolute Queue ID simply from the writes to 757 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a 758 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply 759 * write to the first BAR2 SGE Queue Area within the BAR2 Page with 760 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID 761 * from the BAR2 Page and BAR2 Queue ID. 762 * 763 * One important censequence of this is that some BAR2 SGE registers 764 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID 765 * there. But other registers synthesize the SGE Queue ID purely 766 * from the writes to the registers -- the Write Combined Doorbell 767 * Buffer is a good example. These BAR2 SGE Registers are only 768 * available for those BAR2 SGE Register areas where the SGE Absolute 769 * Queue ID can be inferred from simple writes. 770 */ 771 bar2_qoffset = bar2_page_offset; 772 bar2_qinferred = (bar2_qid_offset < page_size); 773 if (bar2_qinferred) { 774 bar2_qoffset += bar2_qid_offset; 775 bar2_qid = 0; 776 } 777 778 *pbar2_qoffset = bar2_qoffset; 779 *pbar2_qid = bar2_qid; 780 return 0; 781 } 782 783 unsigned int t4vf_get_pf_from_vf(struct adapter *adapter) 784 { 785 u32 whoami; 786 787 whoami = t4_read_reg(adapter, T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A); 788 return (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ? 789 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami)); 790 } 791 792 /** 793 * t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters 794 * @adapter: the adapter 795 * 796 * Retrieves various core SGE parameters in the form of hardware SGE 797 * register values. The caller is responsible for decoding these as 798 * needed. The SGE parameters are stored in @adapter->params.sge. 799 */ 800 int t4vf_get_sge_params(struct adapter *adapter) 801 { 802 struct sge_params *sge_params = &adapter->params.sge; 803 u32 params[7], vals[7]; 804 int v; 805 806 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 807 FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL_A)); 808 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 809 FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE_A)); 810 params[2] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 811 FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0_A)); 812 params[3] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 813 FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1_A)); 814 params[4] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 815 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1_A)); 816 params[5] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 817 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3_A)); 818 params[6] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 819 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5_A)); 820 v = t4vf_query_params(adapter, 7, params, vals); 821 if (v) 822 return v; 823 sge_params->sge_control = vals[0]; 824 sge_params->sge_host_page_size = vals[1]; 825 sge_params->sge_fl_buffer_size[0] = vals[2]; 826 sge_params->sge_fl_buffer_size[1] = vals[3]; 827 sge_params->sge_timer_value_0_and_1 = vals[4]; 828 sge_params->sge_timer_value_2_and_3 = vals[5]; 829 sge_params->sge_timer_value_4_and_5 = vals[6]; 830 831 /* T4 uses a single control field to specify both the PCIe Padding and 832 * Packing Boundary. T5 introduced the ability to specify these 833 * separately with the Padding Boundary in SGE_CONTROL and and Packing 834 * Boundary in SGE_CONTROL2. So for T5 and later we need to grab 835 * SGE_CONTROL in order to determine how ingress packet data will be 836 * laid out in Packed Buffer Mode. Unfortunately, older versions of 837 * the firmware won't let us retrieve SGE_CONTROL2 so if we get a 838 * failure grabbing it we throw an error since we can't figure out the 839 * right value. 840 */ 841 if (!is_t4(adapter->params.chip)) { 842 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 843 FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL2_A)); 844 v = t4vf_query_params(adapter, 1, params, vals); 845 if (v != FW_SUCCESS) { 846 dev_err(adapter->pdev_dev, 847 "Unable to get SGE Control2; " 848 "probably old firmware.\n"); 849 return v; 850 } 851 sge_params->sge_control2 = vals[0]; 852 } 853 854 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 855 FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD_A)); 856 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 857 FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL_A)); 858 v = t4vf_query_params(adapter, 2, params, vals); 859 if (v) 860 return v; 861 sge_params->sge_ingress_rx_threshold = vals[0]; 862 sge_params->sge_congestion_control = vals[1]; 863 864 /* For T5 and later we want to use the new BAR2 Doorbells. 865 * Unfortunately, older firmware didn't allow the this register to be 866 * read. 867 */ 868 if (!is_t4(adapter->params.chip)) { 869 unsigned int pf, s_hps, s_qpp; 870 871 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 872 FW_PARAMS_PARAM_XYZ_V( 873 SGE_EGRESS_QUEUES_PER_PAGE_VF_A)); 874 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 875 FW_PARAMS_PARAM_XYZ_V( 876 SGE_INGRESS_QUEUES_PER_PAGE_VF_A)); 877 v = t4vf_query_params(adapter, 2, params, vals); 878 if (v != FW_SUCCESS) { 879 dev_warn(adapter->pdev_dev, 880 "Unable to get VF SGE Queues/Page; " 881 "probably old firmware.\n"); 882 return v; 883 } 884 sge_params->sge_egress_queues_per_page = vals[0]; 885 sge_params->sge_ingress_queues_per_page = vals[1]; 886 887 /* We need the Queues/Page for our VF. This is based on the 888 * PF from which we're instantiated and is indexed in the 889 * register we just read. Do it once here so other code in 890 * the driver can just use it. 891 */ 892 pf = t4vf_get_pf_from_vf(adapter); 893 s_hps = (HOSTPAGESIZEPF0_S + 894 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf); 895 sge_params->sge_vf_hps = 896 ((sge_params->sge_host_page_size >> s_hps) 897 & HOSTPAGESIZEPF0_M); 898 899 s_qpp = (QUEUESPERPAGEPF0_S + 900 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * pf); 901 sge_params->sge_vf_eq_qpp = 902 ((sge_params->sge_egress_queues_per_page >> s_qpp) 903 & QUEUESPERPAGEPF0_M); 904 sge_params->sge_vf_iq_qpp = 905 ((sge_params->sge_ingress_queues_per_page >> s_qpp) 906 & QUEUESPERPAGEPF0_M); 907 } 908 909 return 0; 910 } 911 912 /** 913 * t4vf_get_vpd_params - retrieve device VPD paremeters 914 * @adapter: the adapter 915 * 916 * Retrives various device Vital Product Data parameters. The parameters 917 * are stored in @adapter->params.vpd. 918 */ 919 int t4vf_get_vpd_params(struct adapter *adapter) 920 { 921 struct vpd_params *vpd_params = &adapter->params.vpd; 922 u32 params[7], vals[7]; 923 int v; 924 925 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | 926 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK)); 927 v = t4vf_query_params(adapter, 1, params, vals); 928 if (v) 929 return v; 930 vpd_params->cclk = vals[0]; 931 932 return 0; 933 } 934 935 /** 936 * t4vf_get_dev_params - retrieve device paremeters 937 * @adapter: the adapter 938 * 939 * Retrives various device parameters. The parameters are stored in 940 * @adapter->params.dev. 941 */ 942 int t4vf_get_dev_params(struct adapter *adapter) 943 { 944 struct dev_params *dev_params = &adapter->params.dev; 945 u32 params[7], vals[7]; 946 int v; 947 948 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | 949 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWREV)); 950 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | 951 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_TPREV)); 952 v = t4vf_query_params(adapter, 2, params, vals); 953 if (v) 954 return v; 955 dev_params->fwrev = vals[0]; 956 dev_params->tprev = vals[1]; 957 958 return 0; 959 } 960 961 /** 962 * t4vf_get_rss_glb_config - retrieve adapter RSS Global Configuration 963 * @adapter: the adapter 964 * 965 * Retrieves global RSS mode and parameters with which we have to live 966 * and stores them in the @adapter's RSS parameters. 967 */ 968 int t4vf_get_rss_glb_config(struct adapter *adapter) 969 { 970 struct rss_params *rss = &adapter->params.rss; 971 struct fw_rss_glb_config_cmd cmd, rpl; 972 int v; 973 974 /* 975 * Execute an RSS Global Configuration read command to retrieve 976 * our RSS configuration. 977 */ 978 memset(&cmd, 0, sizeof(cmd)); 979 cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) | 980 FW_CMD_REQUEST_F | 981 FW_CMD_READ_F); 982 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 983 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 984 if (v) 985 return v; 986 987 /* 988 * Transate the big-endian RSS Global Configuration into our 989 * cpu-endian format based on the RSS mode. We also do first level 990 * filtering at this point to weed out modes which don't support 991 * VF Drivers ... 992 */ 993 rss->mode = FW_RSS_GLB_CONFIG_CMD_MODE_G( 994 be32_to_cpu(rpl.u.manual.mode_pkd)); 995 switch (rss->mode) { 996 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { 997 u32 word = be32_to_cpu( 998 rpl.u.basicvirtual.synmapen_to_hashtoeplitz); 999 1000 rss->u.basicvirtual.synmapen = 1001 ((word & FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_F) != 0); 1002 rss->u.basicvirtual.syn4tupenipv6 = 1003 ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_F) != 0); 1004 rss->u.basicvirtual.syn2tupenipv6 = 1005 ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_F) != 0); 1006 rss->u.basicvirtual.syn4tupenipv4 = 1007 ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_F) != 0); 1008 rss->u.basicvirtual.syn2tupenipv4 = 1009 ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_F) != 0); 1010 1011 rss->u.basicvirtual.ofdmapen = 1012 ((word & FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_F) != 0); 1013 1014 rss->u.basicvirtual.tnlmapen = 1015 ((word & FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F) != 0); 1016 rss->u.basicvirtual.tnlalllookup = 1017 ((word & FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F) != 0); 1018 1019 rss->u.basicvirtual.hashtoeplitz = 1020 ((word & FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F) != 0); 1021 1022 /* we need at least Tunnel Map Enable to be set */ 1023 if (!rss->u.basicvirtual.tnlmapen) 1024 return -EINVAL; 1025 break; 1026 } 1027 1028 default: 1029 /* all unknown/unsupported RSS modes result in an error */ 1030 return -EINVAL; 1031 } 1032 1033 return 0; 1034 } 1035 1036 /** 1037 * t4vf_get_vfres - retrieve VF resource limits 1038 * @adapter: the adapter 1039 * 1040 * Retrieves configured resource limits and capabilities for a virtual 1041 * function. The results are stored in @adapter->vfres. 1042 */ 1043 int t4vf_get_vfres(struct adapter *adapter) 1044 { 1045 struct vf_resources *vfres = &adapter->params.vfres; 1046 struct fw_pfvf_cmd cmd, rpl; 1047 int v; 1048 u32 word; 1049 1050 /* 1051 * Execute PFVF Read command to get VF resource limits; bail out early 1052 * with error on command failure. 1053 */ 1054 memset(&cmd, 0, sizeof(cmd)); 1055 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | 1056 FW_CMD_REQUEST_F | 1057 FW_CMD_READ_F); 1058 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 1059 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 1060 if (v) 1061 return v; 1062 1063 /* 1064 * Extract VF resource limits and return success. 1065 */ 1066 word = be32_to_cpu(rpl.niqflint_niq); 1067 vfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word); 1068 vfres->niq = FW_PFVF_CMD_NIQ_G(word); 1069 1070 word = be32_to_cpu(rpl.type_to_neq); 1071 vfres->neq = FW_PFVF_CMD_NEQ_G(word); 1072 vfres->pmask = FW_PFVF_CMD_PMASK_G(word); 1073 1074 word = be32_to_cpu(rpl.tc_to_nexactf); 1075 vfres->tc = FW_PFVF_CMD_TC_G(word); 1076 vfres->nvi = FW_PFVF_CMD_NVI_G(word); 1077 vfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word); 1078 1079 word = be32_to_cpu(rpl.r_caps_to_nethctrl); 1080 vfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word); 1081 vfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word); 1082 vfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word); 1083 1084 return 0; 1085 } 1086 1087 /** 1088 * t4vf_read_rss_vi_config - read a VI's RSS configuration 1089 * @adapter: the adapter 1090 * @viid: Virtual Interface ID 1091 * @config: pointer to host-native VI RSS Configuration buffer 1092 * 1093 * Reads the Virtual Interface's RSS configuration information and 1094 * translates it into CPU-native format. 1095 */ 1096 int t4vf_read_rss_vi_config(struct adapter *adapter, unsigned int viid, 1097 union rss_vi_config *config) 1098 { 1099 struct fw_rss_vi_config_cmd cmd, rpl; 1100 int v; 1101 1102 memset(&cmd, 0, sizeof(cmd)); 1103 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) | 1104 FW_CMD_REQUEST_F | 1105 FW_CMD_READ_F | 1106 FW_RSS_VI_CONFIG_CMD_VIID(viid)); 1107 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 1108 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 1109 if (v) 1110 return v; 1111 1112 switch (adapter->params.rss.mode) { 1113 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { 1114 u32 word = be32_to_cpu(rpl.u.basicvirtual.defaultq_to_udpen); 1115 1116 config->basicvirtual.ip6fourtupen = 1117 ((word & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) != 0); 1118 config->basicvirtual.ip6twotupen = 1119 ((word & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) != 0); 1120 config->basicvirtual.ip4fourtupen = 1121 ((word & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) != 0); 1122 config->basicvirtual.ip4twotupen = 1123 ((word & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) != 0); 1124 config->basicvirtual.udpen = 1125 ((word & FW_RSS_VI_CONFIG_CMD_UDPEN_F) != 0); 1126 config->basicvirtual.defaultq = 1127 FW_RSS_VI_CONFIG_CMD_DEFAULTQ_G(word); 1128 break; 1129 } 1130 1131 default: 1132 return -EINVAL; 1133 } 1134 1135 return 0; 1136 } 1137 1138 /** 1139 * t4vf_write_rss_vi_config - write a VI's RSS configuration 1140 * @adapter: the adapter 1141 * @viid: Virtual Interface ID 1142 * @config: pointer to host-native VI RSS Configuration buffer 1143 * 1144 * Write the Virtual Interface's RSS configuration information 1145 * (translating it into firmware-native format before writing). 1146 */ 1147 int t4vf_write_rss_vi_config(struct adapter *adapter, unsigned int viid, 1148 union rss_vi_config *config) 1149 { 1150 struct fw_rss_vi_config_cmd cmd, rpl; 1151 1152 memset(&cmd, 0, sizeof(cmd)); 1153 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) | 1154 FW_CMD_REQUEST_F | 1155 FW_CMD_WRITE_F | 1156 FW_RSS_VI_CONFIG_CMD_VIID(viid)); 1157 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 1158 switch (adapter->params.rss.mode) { 1159 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { 1160 u32 word = 0; 1161 1162 if (config->basicvirtual.ip6fourtupen) 1163 word |= FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F; 1164 if (config->basicvirtual.ip6twotupen) 1165 word |= FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F; 1166 if (config->basicvirtual.ip4fourtupen) 1167 word |= FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F; 1168 if (config->basicvirtual.ip4twotupen) 1169 word |= FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F; 1170 if (config->basicvirtual.udpen) 1171 word |= FW_RSS_VI_CONFIG_CMD_UDPEN_F; 1172 word |= FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V( 1173 config->basicvirtual.defaultq); 1174 cmd.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(word); 1175 break; 1176 } 1177 1178 default: 1179 return -EINVAL; 1180 } 1181 1182 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 1183 } 1184 1185 /** 1186 * t4vf_config_rss_range - configure a portion of the RSS mapping table 1187 * @adapter: the adapter 1188 * @viid: Virtual Interface of RSS Table Slice 1189 * @start: starting entry in the table to write 1190 * @n: how many table entries to write 1191 * @rspq: values for the "Response Queue" (Ingress Queue) lookup table 1192 * @nrspq: number of values in @rspq 1193 * 1194 * Programs the selected part of the VI's RSS mapping table with the 1195 * provided values. If @nrspq < @n the supplied values are used repeatedly 1196 * until the full table range is populated. 1197 * 1198 * The caller must ensure the values in @rspq are in the range 0..1023. 1199 */ 1200 int t4vf_config_rss_range(struct adapter *adapter, unsigned int viid, 1201 int start, int n, const u16 *rspq, int nrspq) 1202 { 1203 const u16 *rsp = rspq; 1204 const u16 *rsp_end = rspq+nrspq; 1205 struct fw_rss_ind_tbl_cmd cmd; 1206 1207 /* 1208 * Initialize firmware command template to write the RSS table. 1209 */ 1210 memset(&cmd, 0, sizeof(cmd)); 1211 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) | 1212 FW_CMD_REQUEST_F | 1213 FW_CMD_WRITE_F | 1214 FW_RSS_IND_TBL_CMD_VIID_V(viid)); 1215 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 1216 1217 /* 1218 * Each firmware RSS command can accommodate up to 32 RSS Ingress 1219 * Queue Identifiers. These Ingress Queue IDs are packed three to 1220 * a 32-bit word as 10-bit values with the upper remaining 2 bits 1221 * reserved. 1222 */ 1223 while (n > 0) { 1224 __be32 *qp = &cmd.iq0_to_iq2; 1225 int nq = min(n, 32); 1226 int ret; 1227 1228 /* 1229 * Set up the firmware RSS command header to send the next 1230 * "nq" Ingress Queue IDs to the firmware. 1231 */ 1232 cmd.niqid = cpu_to_be16(nq); 1233 cmd.startidx = cpu_to_be16(start); 1234 1235 /* 1236 * "nq" more done for the start of the next loop. 1237 */ 1238 start += nq; 1239 n -= nq; 1240 1241 /* 1242 * While there are still Ingress Queue IDs to stuff into the 1243 * current firmware RSS command, retrieve them from the 1244 * Ingress Queue ID array and insert them into the command. 1245 */ 1246 while (nq > 0) { 1247 /* 1248 * Grab up to the next 3 Ingress Queue IDs (wrapping 1249 * around the Ingress Queue ID array if necessary) and 1250 * insert them into the firmware RSS command at the 1251 * current 3-tuple position within the commad. 1252 */ 1253 u16 qbuf[3]; 1254 u16 *qbp = qbuf; 1255 int nqbuf = min(3, nq); 1256 1257 nq -= nqbuf; 1258 qbuf[0] = qbuf[1] = qbuf[2] = 0; 1259 while (nqbuf) { 1260 nqbuf--; 1261 *qbp++ = *rsp++; 1262 if (rsp >= rsp_end) 1263 rsp = rspq; 1264 } 1265 *qp++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0_V(qbuf[0]) | 1266 FW_RSS_IND_TBL_CMD_IQ1_V(qbuf[1]) | 1267 FW_RSS_IND_TBL_CMD_IQ2_V(qbuf[2])); 1268 } 1269 1270 /* 1271 * Send this portion of the RRS table update to the firmware; 1272 * bail out on any errors. 1273 */ 1274 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 1275 if (ret) 1276 return ret; 1277 } 1278 return 0; 1279 } 1280 1281 /** 1282 * t4vf_alloc_vi - allocate a virtual interface on a port 1283 * @adapter: the adapter 1284 * @port_id: physical port associated with the VI 1285 * 1286 * Allocate a new Virtual Interface and bind it to the indicated 1287 * physical port. Return the new Virtual Interface Identifier on 1288 * success, or a [negative] error number on failure. 1289 */ 1290 int t4vf_alloc_vi(struct adapter *adapter, int port_id) 1291 { 1292 struct fw_vi_cmd cmd, rpl; 1293 int v; 1294 1295 /* 1296 * Execute a VI command to allocate Virtual Interface and return its 1297 * VIID. 1298 */ 1299 memset(&cmd, 0, sizeof(cmd)); 1300 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | 1301 FW_CMD_REQUEST_F | 1302 FW_CMD_WRITE_F | 1303 FW_CMD_EXEC_F); 1304 cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) | 1305 FW_VI_CMD_ALLOC_F); 1306 cmd.portid_pkd = FW_VI_CMD_PORTID_V(port_id); 1307 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 1308 if (v) 1309 return v; 1310 1311 return FW_VI_CMD_VIID_G(be16_to_cpu(rpl.type_viid)); 1312 } 1313 1314 /** 1315 * t4vf_free_vi -- free a virtual interface 1316 * @adapter: the adapter 1317 * @viid: the virtual interface identifier 1318 * 1319 * Free a previously allocated Virtual Interface. Return an error on 1320 * failure. 1321 */ 1322 int t4vf_free_vi(struct adapter *adapter, int viid) 1323 { 1324 struct fw_vi_cmd cmd; 1325 1326 /* 1327 * Execute a VI command to free the Virtual Interface. 1328 */ 1329 memset(&cmd, 0, sizeof(cmd)); 1330 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | 1331 FW_CMD_REQUEST_F | 1332 FW_CMD_EXEC_F); 1333 cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) | 1334 FW_VI_CMD_FREE_F); 1335 cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid)); 1336 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 1337 } 1338 1339 /** 1340 * t4vf_enable_vi - enable/disable a virtual interface 1341 * @adapter: the adapter 1342 * @viid: the Virtual Interface ID 1343 * @rx_en: 1=enable Rx, 0=disable Rx 1344 * @tx_en: 1=enable Tx, 0=disable Tx 1345 * 1346 * Enables/disables a virtual interface. 1347 */ 1348 int t4vf_enable_vi(struct adapter *adapter, unsigned int viid, 1349 bool rx_en, bool tx_en) 1350 { 1351 struct fw_vi_enable_cmd cmd; 1352 1353 memset(&cmd, 0, sizeof(cmd)); 1354 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | 1355 FW_CMD_REQUEST_F | 1356 FW_CMD_EXEC_F | 1357 FW_VI_ENABLE_CMD_VIID_V(viid)); 1358 cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) | 1359 FW_VI_ENABLE_CMD_EEN_V(tx_en) | 1360 FW_LEN16(cmd)); 1361 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 1362 } 1363 1364 /** 1365 * t4vf_identify_port - identify a VI's port by blinking its LED 1366 * @adapter: the adapter 1367 * @viid: the Virtual Interface ID 1368 * @nblinks: how many times to blink LED at 2.5 Hz 1369 * 1370 * Identifies a VI's port by blinking its LED. 1371 */ 1372 int t4vf_identify_port(struct adapter *adapter, unsigned int viid, 1373 unsigned int nblinks) 1374 { 1375 struct fw_vi_enable_cmd cmd; 1376 1377 memset(&cmd, 0, sizeof(cmd)); 1378 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | 1379 FW_CMD_REQUEST_F | 1380 FW_CMD_EXEC_F | 1381 FW_VI_ENABLE_CMD_VIID_V(viid)); 1382 cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | 1383 FW_LEN16(cmd)); 1384 cmd.blinkdur = cpu_to_be16(nblinks); 1385 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 1386 } 1387 1388 /** 1389 * t4vf_set_rxmode - set Rx properties of a virtual interface 1390 * @adapter: the adapter 1391 * @viid: the VI id 1392 * @mtu: the new MTU or -1 for no change 1393 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change 1394 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change 1395 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change 1396 * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it, 1397 * -1 no change 1398 * 1399 * Sets Rx properties of a virtual interface. 1400 */ 1401 int t4vf_set_rxmode(struct adapter *adapter, unsigned int viid, 1402 int mtu, int promisc, int all_multi, int bcast, int vlanex, 1403 bool sleep_ok) 1404 { 1405 struct fw_vi_rxmode_cmd cmd; 1406 1407 /* convert to FW values */ 1408 if (mtu < 0) 1409 mtu = FW_VI_RXMODE_CMD_MTU_M; 1410 if (promisc < 0) 1411 promisc = FW_VI_RXMODE_CMD_PROMISCEN_M; 1412 if (all_multi < 0) 1413 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M; 1414 if (bcast < 0) 1415 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M; 1416 if (vlanex < 0) 1417 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M; 1418 1419 memset(&cmd, 0, sizeof(cmd)); 1420 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) | 1421 FW_CMD_REQUEST_F | 1422 FW_CMD_WRITE_F | 1423 FW_VI_RXMODE_CMD_VIID_V(viid)); 1424 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 1425 cmd.mtu_to_vlanexen = 1426 cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) | 1427 FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) | 1428 FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) | 1429 FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) | 1430 FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex)); 1431 return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok); 1432 } 1433 1434 /** 1435 * t4vf_alloc_mac_filt - allocates exact-match filters for MAC addresses 1436 * @adapter: the adapter 1437 * @viid: the Virtual Interface Identifier 1438 * @free: if true any existing filters for this VI id are first removed 1439 * @naddr: the number of MAC addresses to allocate filters for (up to 7) 1440 * @addr: the MAC address(es) 1441 * @idx: where to store the index of each allocated filter 1442 * @hash: pointer to hash address filter bitmap 1443 * @sleep_ok: call is allowed to sleep 1444 * 1445 * Allocates an exact-match filter for each of the supplied addresses and 1446 * sets it to the corresponding address. If @idx is not %NULL it should 1447 * have at least @naddr entries, each of which will be set to the index of 1448 * the filter allocated for the corresponding MAC address. If a filter 1449 * could not be allocated for an address its index is set to 0xffff. 1450 * If @hash is not %NULL addresses that fail to allocate an exact filter 1451 * are hashed and update the hash filter bitmap pointed at by @hash. 1452 * 1453 * Returns a negative error number or the number of filters allocated. 1454 */ 1455 int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free, 1456 unsigned int naddr, const u8 **addr, u16 *idx, 1457 u64 *hash, bool sleep_ok) 1458 { 1459 int offset, ret = 0; 1460 unsigned nfilters = 0; 1461 unsigned int rem = naddr; 1462 struct fw_vi_mac_cmd cmd, rpl; 1463 unsigned int max_naddr = adapter->params.arch.mps_tcam_size; 1464 1465 if (naddr > max_naddr) 1466 return -EINVAL; 1467 1468 for (offset = 0; offset < naddr; /**/) { 1469 unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact) 1470 ? rem 1471 : ARRAY_SIZE(cmd.u.exact)); 1472 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 1473 u.exact[fw_naddr]), 16); 1474 struct fw_vi_mac_exact *p; 1475 int i; 1476 1477 memset(&cmd, 0, sizeof(cmd)); 1478 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | 1479 FW_CMD_REQUEST_F | 1480 FW_CMD_WRITE_F | 1481 (free ? FW_CMD_EXEC_F : 0) | 1482 FW_VI_MAC_CMD_VIID_V(viid)); 1483 cmd.freemacs_to_len16 = 1484 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) | 1485 FW_CMD_LEN16_V(len16)); 1486 1487 for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) { 1488 p->valid_to_idx = cpu_to_be16( 1489 FW_VI_MAC_CMD_VALID_F | 1490 FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC)); 1491 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); 1492 } 1493 1494 1495 ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl, 1496 sleep_ok); 1497 if (ret && ret != -ENOMEM) 1498 break; 1499 1500 for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) { 1501 u16 index = FW_VI_MAC_CMD_IDX_G( 1502 be16_to_cpu(p->valid_to_idx)); 1503 1504 if (idx) 1505 idx[offset+i] = 1506 (index >= max_naddr 1507 ? 0xffff 1508 : index); 1509 if (index < max_naddr) 1510 nfilters++; 1511 else if (hash) 1512 *hash |= (1ULL << hash_mac_addr(addr[offset+i])); 1513 } 1514 1515 free = false; 1516 offset += fw_naddr; 1517 rem -= fw_naddr; 1518 } 1519 1520 /* 1521 * If there were no errors or we merely ran out of room in our MAC 1522 * address arena, return the number of filters actually written. 1523 */ 1524 if (ret == 0 || ret == -ENOMEM) 1525 ret = nfilters; 1526 return ret; 1527 } 1528 1529 /** 1530 * t4vf_free_mac_filt - frees exact-match filters of given MAC addresses 1531 * @adapter: the adapter 1532 * @viid: the VI id 1533 * @naddr: the number of MAC addresses to allocate filters for (up to 7) 1534 * @addr: the MAC address(es) 1535 * @sleep_ok: call is allowed to sleep 1536 * 1537 * Frees the exact-match filter for each of the supplied addresses 1538 * 1539 * Returns a negative error number or the number of filters freed. 1540 */ 1541 int t4vf_free_mac_filt(struct adapter *adapter, unsigned int viid, 1542 unsigned int naddr, const u8 **addr, bool sleep_ok) 1543 { 1544 int offset, ret = 0; 1545 struct fw_vi_mac_cmd cmd; 1546 unsigned int nfilters = 0; 1547 unsigned int max_naddr = adapter->params.arch.mps_tcam_size; 1548 unsigned int rem = naddr; 1549 1550 if (naddr > max_naddr) 1551 return -EINVAL; 1552 1553 for (offset = 0; offset < (int)naddr ; /**/) { 1554 unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact) ? 1555 rem : ARRAY_SIZE(cmd.u.exact)); 1556 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 1557 u.exact[fw_naddr]), 16); 1558 struct fw_vi_mac_exact *p; 1559 int i; 1560 1561 memset(&cmd, 0, sizeof(cmd)); 1562 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | 1563 FW_CMD_REQUEST_F | 1564 FW_CMD_WRITE_F | 1565 FW_CMD_EXEC_V(0) | 1566 FW_VI_MAC_CMD_VIID_V(viid)); 1567 cmd.freemacs_to_len16 = 1568 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) | 1569 FW_CMD_LEN16_V(len16)); 1570 1571 for (i = 0, p = cmd.u.exact; i < (int)fw_naddr; i++, p++) { 1572 p->valid_to_idx = cpu_to_be16( 1573 FW_VI_MAC_CMD_VALID_F | 1574 FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE)); 1575 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); 1576 } 1577 1578 ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &cmd, 1579 sleep_ok); 1580 if (ret) 1581 break; 1582 1583 for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) { 1584 u16 index = FW_VI_MAC_CMD_IDX_G( 1585 be16_to_cpu(p->valid_to_idx)); 1586 1587 if (index < max_naddr) 1588 nfilters++; 1589 } 1590 1591 offset += fw_naddr; 1592 rem -= fw_naddr; 1593 } 1594 1595 if (ret == 0) 1596 ret = nfilters; 1597 return ret; 1598 } 1599 1600 /** 1601 * t4vf_change_mac - modifies the exact-match filter for a MAC address 1602 * @adapter: the adapter 1603 * @viid: the Virtual Interface ID 1604 * @idx: index of existing filter for old value of MAC address, or -1 1605 * @addr: the new MAC address value 1606 * @persist: if idx < 0, the new MAC allocation should be persistent 1607 * 1608 * Modifies an exact-match filter and sets it to the new MAC address. 1609 * Note that in general it is not possible to modify the value of a given 1610 * filter so the generic way to modify an address filter is to free the 1611 * one being used by the old address value and allocate a new filter for 1612 * the new address value. @idx can be -1 if the address is a new 1613 * addition. 1614 * 1615 * Returns a negative error number or the index of the filter with the new 1616 * MAC value. 1617 */ 1618 int t4vf_change_mac(struct adapter *adapter, unsigned int viid, 1619 int idx, const u8 *addr, bool persist) 1620 { 1621 int ret; 1622 struct fw_vi_mac_cmd cmd, rpl; 1623 struct fw_vi_mac_exact *p = &cmd.u.exact[0]; 1624 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 1625 u.exact[1]), 16); 1626 unsigned int max_mac_addr = adapter->params.arch.mps_tcam_size; 1627 1628 /* 1629 * If this is a new allocation, determine whether it should be 1630 * persistent (across a "freemacs" operation) or not. 1631 */ 1632 if (idx < 0) 1633 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; 1634 1635 memset(&cmd, 0, sizeof(cmd)); 1636 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | 1637 FW_CMD_REQUEST_F | 1638 FW_CMD_WRITE_F | 1639 FW_VI_MAC_CMD_VIID_V(viid)); 1640 cmd.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); 1641 p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F | 1642 FW_VI_MAC_CMD_IDX_V(idx)); 1643 memcpy(p->macaddr, addr, sizeof(p->macaddr)); 1644 1645 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 1646 if (ret == 0) { 1647 p = &rpl.u.exact[0]; 1648 ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx)); 1649 if (ret >= max_mac_addr) 1650 ret = -ENOMEM; 1651 } 1652 return ret; 1653 } 1654 1655 /** 1656 * t4vf_set_addr_hash - program the MAC inexact-match hash filter 1657 * @adapter: the adapter 1658 * @viid: the Virtual Interface Identifier 1659 * @ucast: whether the hash filter should also match unicast addresses 1660 * @vec: the value to be written to the hash filter 1661 * @sleep_ok: call is allowed to sleep 1662 * 1663 * Sets the 64-bit inexact-match hash filter for a virtual interface. 1664 */ 1665 int t4vf_set_addr_hash(struct adapter *adapter, unsigned int viid, 1666 bool ucast, u64 vec, bool sleep_ok) 1667 { 1668 struct fw_vi_mac_cmd cmd; 1669 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 1670 u.exact[0]), 16); 1671 1672 memset(&cmd, 0, sizeof(cmd)); 1673 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | 1674 FW_CMD_REQUEST_F | 1675 FW_CMD_WRITE_F | 1676 FW_VI_ENABLE_CMD_VIID_V(viid)); 1677 cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F | 1678 FW_VI_MAC_CMD_HASHUNIEN_V(ucast) | 1679 FW_CMD_LEN16_V(len16)); 1680 cmd.u.hash.hashvec = cpu_to_be64(vec); 1681 return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok); 1682 } 1683 1684 /** 1685 * t4vf_get_port_stats - collect "port" statistics 1686 * @adapter: the adapter 1687 * @pidx: the port index 1688 * @s: the stats structure to fill 1689 * 1690 * Collect statistics for the "port"'s Virtual Interface. 1691 */ 1692 int t4vf_get_port_stats(struct adapter *adapter, int pidx, 1693 struct t4vf_port_stats *s) 1694 { 1695 struct port_info *pi = adap2pinfo(adapter, pidx); 1696 struct fw_vi_stats_vf fwstats; 1697 unsigned int rem = VI_VF_NUM_STATS; 1698 __be64 *fwsp = (__be64 *)&fwstats; 1699 1700 /* 1701 * Grab the Virtual Interface statistics a chunk at a time via mailbox 1702 * commands. We could use a Work Request and get all of them at once 1703 * but that's an asynchronous interface which is awkward to use. 1704 */ 1705 while (rem) { 1706 unsigned int ix = VI_VF_NUM_STATS - rem; 1707 unsigned int nstats = min(6U, rem); 1708 struct fw_vi_stats_cmd cmd, rpl; 1709 size_t len = (offsetof(struct fw_vi_stats_cmd, u) + 1710 sizeof(struct fw_vi_stats_ctl)); 1711 size_t len16 = DIV_ROUND_UP(len, 16); 1712 int ret; 1713 1714 memset(&cmd, 0, sizeof(cmd)); 1715 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_STATS_CMD) | 1716 FW_VI_STATS_CMD_VIID_V(pi->viid) | 1717 FW_CMD_REQUEST_F | 1718 FW_CMD_READ_F); 1719 cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); 1720 cmd.u.ctl.nstats_ix = 1721 cpu_to_be16(FW_VI_STATS_CMD_IX_V(ix) | 1722 FW_VI_STATS_CMD_NSTATS_V(nstats)); 1723 ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl); 1724 if (ret) 1725 return ret; 1726 1727 memcpy(fwsp, &rpl.u.ctl.stat0, sizeof(__be64) * nstats); 1728 1729 rem -= nstats; 1730 fwsp += nstats; 1731 } 1732 1733 /* 1734 * Translate firmware statistics into host native statistics. 1735 */ 1736 s->tx_bcast_bytes = be64_to_cpu(fwstats.tx_bcast_bytes); 1737 s->tx_bcast_frames = be64_to_cpu(fwstats.tx_bcast_frames); 1738 s->tx_mcast_bytes = be64_to_cpu(fwstats.tx_mcast_bytes); 1739 s->tx_mcast_frames = be64_to_cpu(fwstats.tx_mcast_frames); 1740 s->tx_ucast_bytes = be64_to_cpu(fwstats.tx_ucast_bytes); 1741 s->tx_ucast_frames = be64_to_cpu(fwstats.tx_ucast_frames); 1742 s->tx_drop_frames = be64_to_cpu(fwstats.tx_drop_frames); 1743 s->tx_offload_bytes = be64_to_cpu(fwstats.tx_offload_bytes); 1744 s->tx_offload_frames = be64_to_cpu(fwstats.tx_offload_frames); 1745 1746 s->rx_bcast_bytes = be64_to_cpu(fwstats.rx_bcast_bytes); 1747 s->rx_bcast_frames = be64_to_cpu(fwstats.rx_bcast_frames); 1748 s->rx_mcast_bytes = be64_to_cpu(fwstats.rx_mcast_bytes); 1749 s->rx_mcast_frames = be64_to_cpu(fwstats.rx_mcast_frames); 1750 s->rx_ucast_bytes = be64_to_cpu(fwstats.rx_ucast_bytes); 1751 s->rx_ucast_frames = be64_to_cpu(fwstats.rx_ucast_frames); 1752 1753 s->rx_err_frames = be64_to_cpu(fwstats.rx_err_frames); 1754 1755 return 0; 1756 } 1757 1758 /** 1759 * t4vf_iq_free - free an ingress queue and its free lists 1760 * @adapter: the adapter 1761 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) 1762 * @iqid: ingress queue ID 1763 * @fl0id: FL0 queue ID or 0xffff if no attached FL0 1764 * @fl1id: FL1 queue ID or 0xffff if no attached FL1 1765 * 1766 * Frees an ingress queue and its associated free lists, if any. 1767 */ 1768 int t4vf_iq_free(struct adapter *adapter, unsigned int iqtype, 1769 unsigned int iqid, unsigned int fl0id, unsigned int fl1id) 1770 { 1771 struct fw_iq_cmd cmd; 1772 1773 memset(&cmd, 0, sizeof(cmd)); 1774 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | 1775 FW_CMD_REQUEST_F | 1776 FW_CMD_EXEC_F); 1777 cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | 1778 FW_LEN16(cmd)); 1779 cmd.type_to_iqandstindex = 1780 cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype)); 1781 1782 cmd.iqid = cpu_to_be16(iqid); 1783 cmd.fl0id = cpu_to_be16(fl0id); 1784 cmd.fl1id = cpu_to_be16(fl1id); 1785 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 1786 } 1787 1788 /** 1789 * t4vf_eth_eq_free - free an Ethernet egress queue 1790 * @adapter: the adapter 1791 * @eqid: egress queue ID 1792 * 1793 * Frees an Ethernet egress queue. 1794 */ 1795 int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid) 1796 { 1797 struct fw_eq_eth_cmd cmd; 1798 1799 memset(&cmd, 0, sizeof(cmd)); 1800 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) | 1801 FW_CMD_REQUEST_F | 1802 FW_CMD_EXEC_F); 1803 cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | 1804 FW_LEN16(cmd)); 1805 cmd.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid)); 1806 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 1807 } 1808 1809 /** 1810 * t4vf_link_down_rc_str - return a string for a Link Down Reason Code 1811 * @link_down_rc: Link Down Reason Code 1812 * 1813 * Returns a string representation of the Link Down Reason Code. 1814 */ 1815 static const char *t4vf_link_down_rc_str(unsigned char link_down_rc) 1816 { 1817 static const char * const reason[] = { 1818 "Link Down", 1819 "Remote Fault", 1820 "Auto-negotiation Failure", 1821 "Reserved", 1822 "Insufficient Airflow", 1823 "Unable To Determine Reason", 1824 "No RX Signal Detected", 1825 "Reserved", 1826 }; 1827 1828 if (link_down_rc >= ARRAY_SIZE(reason)) 1829 return "Bad Reason Code"; 1830 1831 return reason[link_down_rc]; 1832 } 1833 1834 /** 1835 * t4vf_handle_get_port_info - process a FW reply message 1836 * @pi: the port info 1837 * @rpl: start of the FW message 1838 * 1839 * Processes a GET_PORT_INFO FW reply message. 1840 */ 1841 static void t4vf_handle_get_port_info(struct port_info *pi, 1842 const struct fw_port_cmd *cmd) 1843 { 1844 int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16)); 1845 struct adapter *adapter = pi->adapter; 1846 struct link_config *lc = &pi->link_cfg; 1847 int link_ok, linkdnrc; 1848 enum fw_port_type port_type; 1849 enum fw_port_module_type mod_type; 1850 unsigned int speed, fc, fec; 1851 fw_port_cap32_t pcaps, acaps, lpacaps, linkattr; 1852 1853 /* Extract the various fields from the Port Information message. */ 1854 switch (action) { 1855 case FW_PORT_ACTION_GET_PORT_INFO: { 1856 u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype); 1857 1858 link_ok = (lstatus & FW_PORT_CMD_LSTATUS_F) != 0; 1859 linkdnrc = FW_PORT_CMD_LINKDNRC_G(lstatus); 1860 port_type = FW_PORT_CMD_PTYPE_G(lstatus); 1861 mod_type = FW_PORT_CMD_MODTYPE_G(lstatus); 1862 pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap)); 1863 acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap)); 1864 lpacaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.lpacap)); 1865 1866 /* Unfortunately the format of the Link Status in the old 1867 * 16-bit Port Information message isn't the same as the 1868 * 16-bit Port Capabilities bitfield used everywhere else ... 1869 */ 1870 linkattr = 0; 1871 if (lstatus & FW_PORT_CMD_RXPAUSE_F) 1872 linkattr |= FW_PORT_CAP32_FC_RX; 1873 if (lstatus & FW_PORT_CMD_TXPAUSE_F) 1874 linkattr |= FW_PORT_CAP32_FC_TX; 1875 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) 1876 linkattr |= FW_PORT_CAP32_SPEED_100M; 1877 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) 1878 linkattr |= FW_PORT_CAP32_SPEED_1G; 1879 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) 1880 linkattr |= FW_PORT_CAP32_SPEED_10G; 1881 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G)) 1882 linkattr |= FW_PORT_CAP32_SPEED_25G; 1883 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) 1884 linkattr |= FW_PORT_CAP32_SPEED_40G; 1885 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G)) 1886 linkattr |= FW_PORT_CAP32_SPEED_100G; 1887 1888 break; 1889 } 1890 1891 case FW_PORT_ACTION_GET_PORT_INFO32: { 1892 u32 lstatus32; 1893 1894 lstatus32 = be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32); 1895 link_ok = (lstatus32 & FW_PORT_CMD_LSTATUS32_F) != 0; 1896 linkdnrc = FW_PORT_CMD_LINKDNRC32_G(lstatus32); 1897 port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32); 1898 mod_type = FW_PORT_CMD_MODTYPE32_G(lstatus32); 1899 pcaps = be32_to_cpu(cmd->u.info32.pcaps32); 1900 acaps = be32_to_cpu(cmd->u.info32.acaps32); 1901 lpacaps = be32_to_cpu(cmd->u.info32.lpacaps32); 1902 linkattr = be32_to_cpu(cmd->u.info32.linkattr32); 1903 break; 1904 } 1905 1906 default: 1907 dev_err(adapter->pdev_dev, "Handle Port Information: Bad Command/Action %#x\n", 1908 be32_to_cpu(cmd->action_to_len16)); 1909 return; 1910 } 1911 1912 fec = fwcap_to_cc_fec(acaps); 1913 fc = fwcap_to_cc_pause(linkattr); 1914 speed = fwcap_to_speed(linkattr); 1915 1916 if (mod_type != pi->mod_type) { 1917 /* When a new Transceiver Module is inserted, the Firmware 1918 * will examine any Forward Error Correction parameters 1919 * present in the Transceiver Module i2c EPROM and determine 1920 * the supported and recommended FEC settings from those 1921 * based on IEEE 802.3 standards. We always record the 1922 * IEEE 802.3 recommended "automatic" settings. 1923 */ 1924 lc->auto_fec = fec; 1925 1926 /* Some versions of the early T6 Firmware "cheated" when 1927 * handling different Transceiver Modules by changing the 1928 * underlaying Port Type reported to the Host Drivers. As 1929 * such we need to capture whatever Port Type the Firmware 1930 * sends us and record it in case it's different from what we 1931 * were told earlier. Unfortunately, since Firmware is 1932 * forever, we'll need to keep this code here forever, but in 1933 * later T6 Firmware it should just be an assignment of the 1934 * same value already recorded. 1935 */ 1936 pi->port_type = port_type; 1937 1938 pi->mod_type = mod_type; 1939 t4vf_os_portmod_changed(adapter, pi->pidx); 1940 } 1941 1942 if (link_ok != lc->link_ok || speed != lc->speed || 1943 fc != lc->fc || fec != lc->fec) { /* something changed */ 1944 if (!link_ok && lc->link_ok) { 1945 lc->link_down_rc = linkdnrc; 1946 dev_warn(adapter->pdev_dev, "Port %d link down, reason: %s\n", 1947 pi->port_id, t4vf_link_down_rc_str(linkdnrc)); 1948 } 1949 lc->link_ok = link_ok; 1950 lc->speed = speed; 1951 lc->fc = fc; 1952 lc->fec = fec; 1953 1954 lc->pcaps = pcaps; 1955 lc->lpacaps = lpacaps; 1956 lc->acaps = acaps & ADVERT_MASK; 1957 1958 if (lc->acaps & FW_PORT_CAP32_ANEG) { 1959 lc->autoneg = AUTONEG_ENABLE; 1960 } else { 1961 /* When Autoneg is disabled, user needs to set 1962 * single speed. 1963 * Similar to cxgb4_ethtool.c: set_link_ksettings 1964 */ 1965 lc->acaps = 0; 1966 lc->speed_caps = fwcap_to_speed(acaps); 1967 lc->autoneg = AUTONEG_DISABLE; 1968 } 1969 1970 t4vf_os_link_changed(adapter, pi->pidx, link_ok); 1971 } 1972 } 1973 1974 /** 1975 * t4vf_update_port_info - retrieve and update port information if changed 1976 * @pi: the port_info 1977 * 1978 * We issue a Get Port Information Command to the Firmware and, if 1979 * successful, we check to see if anything is different from what we 1980 * last recorded and update things accordingly. 1981 */ 1982 int t4vf_update_port_info(struct port_info *pi) 1983 { 1984 unsigned int fw_caps = pi->adapter->params.fw_caps_support; 1985 struct fw_port_cmd port_cmd; 1986 int ret; 1987 1988 memset(&port_cmd, 0, sizeof(port_cmd)); 1989 port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | 1990 FW_CMD_REQUEST_F | FW_CMD_READ_F | 1991 FW_PORT_CMD_PORTID_V(pi->port_id)); 1992 port_cmd.action_to_len16 = cpu_to_be32( 1993 FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16 1994 ? FW_PORT_ACTION_GET_PORT_INFO 1995 : FW_PORT_ACTION_GET_PORT_INFO32) | 1996 FW_LEN16(port_cmd)); 1997 ret = t4vf_wr_mbox(pi->adapter, &port_cmd, sizeof(port_cmd), 1998 &port_cmd); 1999 if (ret) 2000 return ret; 2001 t4vf_handle_get_port_info(pi, &port_cmd); 2002 return 0; 2003 } 2004 2005 /** 2006 * t4vf_handle_fw_rpl - process a firmware reply message 2007 * @adapter: the adapter 2008 * @rpl: start of the firmware message 2009 * 2010 * Processes a firmware message, such as link state change messages. 2011 */ 2012 int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) 2013 { 2014 const struct fw_cmd_hdr *cmd_hdr = (const struct fw_cmd_hdr *)rpl; 2015 u8 opcode = FW_CMD_OP_G(be32_to_cpu(cmd_hdr->hi)); 2016 2017 switch (opcode) { 2018 case FW_PORT_CMD: { 2019 /* 2020 * Link/module state change message. 2021 */ 2022 const struct fw_port_cmd *port_cmd = 2023 (const struct fw_port_cmd *)rpl; 2024 int action = FW_PORT_CMD_ACTION_G( 2025 be32_to_cpu(port_cmd->action_to_len16)); 2026 int port_id, pidx; 2027 2028 if (action != FW_PORT_ACTION_GET_PORT_INFO && 2029 action != FW_PORT_ACTION_GET_PORT_INFO32) { 2030 dev_err(adapter->pdev_dev, 2031 "Unknown firmware PORT reply action %x\n", 2032 action); 2033 break; 2034 } 2035 2036 port_id = FW_PORT_CMD_PORTID_G( 2037 be32_to_cpu(port_cmd->op_to_portid)); 2038 for_each_port(adapter, pidx) { 2039 struct port_info *pi = adap2pinfo(adapter, pidx); 2040 2041 if (pi->port_id != port_id) 2042 continue; 2043 t4vf_handle_get_port_info(pi, port_cmd); 2044 } 2045 break; 2046 } 2047 2048 default: 2049 dev_err(adapter->pdev_dev, "Unknown firmware reply %X\n", 2050 opcode); 2051 } 2052 return 0; 2053 } 2054 2055 /** 2056 */ 2057 int t4vf_prep_adapter(struct adapter *adapter) 2058 { 2059 int err; 2060 unsigned int chipid; 2061 2062 /* Wait for the device to become ready before proceeding ... 2063 */ 2064 err = t4vf_wait_dev_ready(adapter); 2065 if (err) 2066 return err; 2067 2068 /* Default port and clock for debugging in case we can't reach 2069 * firmware. 2070 */ 2071 adapter->params.nports = 1; 2072 adapter->params.vfres.pmask = 1; 2073 adapter->params.vpd.cclk = 50000; 2074 2075 adapter->params.chip = 0; 2076 switch (CHELSIO_PCI_ID_VER(adapter->pdev->device)) { 2077 case CHELSIO_T4: 2078 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, 0); 2079 adapter->params.arch.sge_fl_db = DBPRIO_F; 2080 adapter->params.arch.mps_tcam_size = 2081 NUM_MPS_CLS_SRAM_L_INSTANCES; 2082 break; 2083 2084 case CHELSIO_T5: 2085 chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A)); 2086 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid); 2087 adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F; 2088 adapter->params.arch.mps_tcam_size = 2089 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 2090 break; 2091 2092 case CHELSIO_T6: 2093 chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A)); 2094 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, chipid); 2095 adapter->params.arch.sge_fl_db = 0; 2096 adapter->params.arch.mps_tcam_size = 2097 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 2098 break; 2099 } 2100 2101 return 0; 2102 } 2103 2104 /** 2105 * t4vf_get_vf_mac_acl - Get the MAC address to be set to 2106 * the VI of this VF. 2107 * @adapter: The adapter 2108 * @pf: The pf associated with vf 2109 * @naddr: the number of ACL MAC addresses returned in addr 2110 * @addr: Placeholder for MAC addresses 2111 * 2112 * Find the MAC address to be set to the VF's VI. The requested MAC address 2113 * is from the host OS via callback in the PF driver. 2114 */ 2115 int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int pf, 2116 unsigned int *naddr, u8 *addr) 2117 { 2118 struct fw_acl_mac_cmd cmd; 2119 int ret; 2120 2121 memset(&cmd, 0, sizeof(cmd)); 2122 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_MAC_CMD) | 2123 FW_CMD_REQUEST_F | 2124 FW_CMD_READ_F); 2125 cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd)); 2126 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &cmd); 2127 if (ret) 2128 return ret; 2129 2130 if (cmd.nmac < *naddr) 2131 *naddr = cmd.nmac; 2132 2133 switch (pf) { 2134 case 3: 2135 memcpy(addr, cmd.macaddr3, sizeof(cmd.macaddr3)); 2136 break; 2137 case 2: 2138 memcpy(addr, cmd.macaddr2, sizeof(cmd.macaddr2)); 2139 break; 2140 case 1: 2141 memcpy(addr, cmd.macaddr1, sizeof(cmd.macaddr1)); 2142 break; 2143 case 0: 2144 memcpy(addr, cmd.macaddr0, sizeof(cmd.macaddr0)); 2145 break; 2146 } 2147 2148 return ret; 2149 } 2150