1 /* 2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet 3 * driver for Linux. 4 * 5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/pci.h> 37 38 #include "t4vf_common.h" 39 #include "t4vf_defs.h" 40 41 #include "../cxgb4/t4_regs.h" 42 #include "../cxgb4/t4_values.h" 43 #include "../cxgb4/t4fw_api.h" 44 45 /* 46 * Wait for the device to become ready (signified by our "who am I" register 47 * returning a value other than all 1's). Return an error if it doesn't 48 * become ready ... 49 */ 50 int t4vf_wait_dev_ready(struct adapter *adapter) 51 { 52 const u32 whoami = T4VF_PL_BASE_ADDR + PL_VF_WHOAMI; 53 const u32 notready1 = 0xffffffff; 54 const u32 notready2 = 0xeeeeeeee; 55 u32 val; 56 57 val = t4_read_reg(adapter, whoami); 58 if (val != notready1 && val != notready2) 59 return 0; 60 msleep(500); 61 val = t4_read_reg(adapter, whoami); 62 if (val != notready1 && val != notready2) 63 return 0; 64 else 65 return -EIO; 66 } 67 68 /* 69 * Get the reply to a mailbox command and store it in @rpl in big-endian order 70 * (since the firmware data structures are specified in a big-endian layout). 71 */ 72 static void get_mbox_rpl(struct adapter *adapter, __be64 *rpl, int size, 73 u32 mbox_data) 74 { 75 for ( ; size; size -= 8, mbox_data += 8) 76 *rpl++ = cpu_to_be64(t4_read_reg64(adapter, mbox_data)); 77 } 78 79 /** 80 * t4vf_record_mbox - record a Firmware Mailbox Command/Reply in the log 81 * @adapter: the adapter 82 * @cmd: the Firmware Mailbox Command or Reply 83 * @size: command length in bytes 84 * @access: the time (ms) needed to access the Firmware Mailbox 85 * @execute: the time (ms) the command spent being executed 86 */ 87 static void t4vf_record_mbox(struct adapter *adapter, const __be64 *cmd, 88 int size, int access, int execute) 89 { 90 struct mbox_cmd_log *log = adapter->mbox_log; 91 struct mbox_cmd *entry; 92 int i; 93 94 entry = mbox_cmd_log_entry(log, log->cursor++); 95 if (log->cursor == log->size) 96 log->cursor = 0; 97 98 for (i = 0; i < size / 8; i++) 99 entry->cmd[i] = be64_to_cpu(cmd[i]); 100 while (i < MBOX_LEN / 8) 101 entry->cmd[i++] = 0; 102 entry->timestamp = jiffies; 103 entry->seqno = log->seqno++; 104 entry->access = access; 105 entry->execute = execute; 106 } 107 108 /** 109 * t4vf_wr_mbox_core - send a command to FW through the mailbox 110 * @adapter: the adapter 111 * @cmd: the command to write 112 * @size: command length in bytes 113 * @rpl: where to optionally store the reply 114 * @sleep_ok: if true we may sleep while awaiting command completion 115 * 116 * Sends the given command to FW through the mailbox and waits for the 117 * FW to execute the command. If @rpl is not %NULL it is used to store 118 * the FW's reply to the command. The command and its optional reply 119 * are of the same length. FW can take up to 500 ms to respond. 120 * @sleep_ok determines whether we may sleep while awaiting the response. 121 * If sleeping is allowed we use progressive backoff otherwise we spin. 122 * 123 * The return value is 0 on success or a negative errno on failure. A 124 * failure can happen either because we are not able to execute the 125 * command or FW executes it but signals an error. In the latter case 126 * the return value is the error code indicated by FW (negated). 127 */ 128 int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, 129 void *rpl, bool sleep_ok) 130 { 131 static const int delay[] = { 132 1, 1, 3, 5, 10, 10, 20, 50, 100 133 }; 134 135 u16 access = 0, execute = 0; 136 u32 v, mbox_data; 137 int i, ms, delay_idx, ret; 138 const __be64 *p; 139 u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL; 140 u32 cmd_op = FW_CMD_OP_G(be32_to_cpu(((struct fw_cmd_hdr *)cmd)->hi)); 141 __be64 cmd_rpl[MBOX_LEN / 8]; 142 struct mbox_list entry; 143 144 /* In T6, mailbox size is changed to 128 bytes to avoid 145 * invalidating the entire prefetch buffer. 146 */ 147 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) 148 mbox_data = T4VF_MBDATA_BASE_ADDR; 149 else 150 mbox_data = T6VF_MBDATA_BASE_ADDR; 151 152 /* 153 * Commands must be multiples of 16 bytes in length and may not be 154 * larger than the size of the Mailbox Data register array. 155 */ 156 if ((size % 16) != 0 || 157 size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4) 158 return -EINVAL; 159 160 /* Queue ourselves onto the mailbox access list. When our entry is at 161 * the front of the list, we have rights to access the mailbox. So we 162 * wait [for a while] till we're at the front [or bail out with an 163 * EBUSY] ... 164 */ 165 spin_lock(&adapter->mbox_lock); 166 list_add_tail(&entry.list, &adapter->mlist.list); 167 spin_unlock(&adapter->mbox_lock); 168 169 delay_idx = 0; 170 ms = delay[0]; 171 172 for (i = 0; ; i += ms) { 173 /* If we've waited too long, return a busy indication. This 174 * really ought to be based on our initial position in the 175 * mailbox access list but this is a start. We very rearely 176 * contend on access to the mailbox ... 177 */ 178 if (i > FW_CMD_MAX_TIMEOUT) { 179 spin_lock(&adapter->mbox_lock); 180 list_del(&entry.list); 181 spin_unlock(&adapter->mbox_lock); 182 ret = -EBUSY; 183 t4vf_record_mbox(adapter, cmd, size, access, ret); 184 return ret; 185 } 186 187 /* If we're at the head, break out and start the mailbox 188 * protocol. 189 */ 190 if (list_first_entry(&adapter->mlist.list, struct mbox_list, 191 list) == &entry) 192 break; 193 194 /* Delay for a bit before checking again ... */ 195 if (sleep_ok) { 196 ms = delay[delay_idx]; /* last element may repeat */ 197 if (delay_idx < ARRAY_SIZE(delay) - 1) 198 delay_idx++; 199 msleep(ms); 200 } else { 201 mdelay(ms); 202 } 203 } 204 205 /* 206 * Loop trying to get ownership of the mailbox. Return an error 207 * if we can't gain ownership. 208 */ 209 v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl)); 210 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) 211 v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl)); 212 if (v != MBOX_OWNER_DRV) { 213 spin_lock(&adapter->mbox_lock); 214 list_del(&entry.list); 215 spin_unlock(&adapter->mbox_lock); 216 ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT; 217 t4vf_record_mbox(adapter, cmd, size, access, ret); 218 return ret; 219 } 220 221 /* 222 * Write the command array into the Mailbox Data register array and 223 * transfer ownership of the mailbox to the firmware. 224 * 225 * For the VFs, the Mailbox Data "registers" are actually backed by 226 * T4's "MA" interface rather than PL Registers (as is the case for 227 * the PFs). Because these are in different coherency domains, the 228 * write to the VF's PL-register-backed Mailbox Control can race in 229 * front of the writes to the MA-backed VF Mailbox Data "registers". 230 * So we need to do a read-back on at least one byte of the VF Mailbox 231 * Data registers before doing the write to the VF Mailbox Control 232 * register. 233 */ 234 if (cmd_op != FW_VI_STATS_CMD) 235 t4vf_record_mbox(adapter, cmd, size, access, 0); 236 for (i = 0, p = cmd; i < size; i += 8) 237 t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++)); 238 t4_read_reg(adapter, mbox_data); /* flush write */ 239 240 t4_write_reg(adapter, mbox_ctl, 241 MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW)); 242 t4_read_reg(adapter, mbox_ctl); /* flush write */ 243 244 /* 245 * Spin waiting for firmware to acknowledge processing our command. 246 */ 247 delay_idx = 0; 248 ms = delay[0]; 249 250 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { 251 if (sleep_ok) { 252 ms = delay[delay_idx]; 253 if (delay_idx < ARRAY_SIZE(delay) - 1) 254 delay_idx++; 255 msleep(ms); 256 } else 257 mdelay(ms); 258 259 /* 260 * If we're the owner, see if this is the reply we wanted. 261 */ 262 v = t4_read_reg(adapter, mbox_ctl); 263 if (MBOWNER_G(v) == MBOX_OWNER_DRV) { 264 /* 265 * If the Message Valid bit isn't on, revoke ownership 266 * of the mailbox and continue waiting for our reply. 267 */ 268 if ((v & MBMSGVALID_F) == 0) { 269 t4_write_reg(adapter, mbox_ctl, 270 MBOWNER_V(MBOX_OWNER_NONE)); 271 continue; 272 } 273 274 /* 275 * We now have our reply. Extract the command return 276 * value, copy the reply back to our caller's buffer 277 * (if specified) and revoke ownership of the mailbox. 278 * We return the (negated) firmware command return 279 * code (this depends on FW_SUCCESS == 0). 280 */ 281 get_mbox_rpl(adapter, cmd_rpl, size, mbox_data); 282 283 /* return value in low-order little-endian word */ 284 v = be64_to_cpu(cmd_rpl[0]); 285 286 if (rpl) { 287 /* request bit in high-order BE word */ 288 WARN_ON((be32_to_cpu(*(const __be32 *)cmd) 289 & FW_CMD_REQUEST_F) == 0); 290 memcpy(rpl, cmd_rpl, size); 291 WARN_ON((be32_to_cpu(*(__be32 *)rpl) 292 & FW_CMD_REQUEST_F) != 0); 293 } 294 t4_write_reg(adapter, mbox_ctl, 295 MBOWNER_V(MBOX_OWNER_NONE)); 296 execute = i + ms; 297 if (cmd_op != FW_VI_STATS_CMD) 298 t4vf_record_mbox(adapter, cmd_rpl, size, access, 299 execute); 300 spin_lock(&adapter->mbox_lock); 301 list_del(&entry.list); 302 spin_unlock(&adapter->mbox_lock); 303 return -FW_CMD_RETVAL_G(v); 304 } 305 } 306 307 /* We timed out. Return the error ... */ 308 ret = -ETIMEDOUT; 309 t4vf_record_mbox(adapter, cmd, size, access, ret); 310 spin_lock(&adapter->mbox_lock); 311 list_del(&entry.list); 312 spin_unlock(&adapter->mbox_lock); 313 return ret; 314 } 315 316 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ 317 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_25G | \ 318 FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \ 319 FW_PORT_CAP_ANEG) 320 321 /** 322 * init_link_config - initialize a link's SW state 323 * @lc: structure holding the link state 324 * @caps: link capabilities 325 * 326 * Initializes the SW state maintained for each link, including the link's 327 * capabilities and default speed/flow-control/autonegotiation settings. 328 */ 329 static void init_link_config(struct link_config *lc, unsigned int caps) 330 { 331 lc->supported = caps; 332 lc->lp_advertising = 0; 333 lc->requested_speed = 0; 334 lc->speed = 0; 335 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; 336 if (lc->supported & FW_PORT_CAP_ANEG) { 337 lc->advertising = lc->supported & ADVERT_MASK; 338 lc->autoneg = AUTONEG_ENABLE; 339 lc->requested_fc |= PAUSE_AUTONEG; 340 } else { 341 lc->advertising = 0; 342 lc->autoneg = AUTONEG_DISABLE; 343 } 344 } 345 346 /** 347 * t4vf_port_init - initialize port hardware/software state 348 * @adapter: the adapter 349 * @pidx: the adapter port index 350 */ 351 int t4vf_port_init(struct adapter *adapter, int pidx) 352 { 353 struct port_info *pi = adap2pinfo(adapter, pidx); 354 struct fw_vi_cmd vi_cmd, vi_rpl; 355 struct fw_port_cmd port_cmd, port_rpl; 356 int v; 357 358 /* 359 * Execute a VI Read command to get our Virtual Interface information 360 * like MAC address, etc. 361 */ 362 memset(&vi_cmd, 0, sizeof(vi_cmd)); 363 vi_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | 364 FW_CMD_REQUEST_F | 365 FW_CMD_READ_F); 366 vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd)); 367 vi_cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(pi->viid)); 368 v = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl); 369 if (v) 370 return v; 371 372 BUG_ON(pi->port_id != FW_VI_CMD_PORTID_G(vi_rpl.portid_pkd)); 373 pi->rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(vi_rpl.rsssize_pkd)); 374 t4_os_set_hw_addr(adapter, pidx, vi_rpl.mac); 375 376 /* 377 * If we don't have read access to our port information, we're done 378 * now. Otherwise, execute a PORT Read command to get it ... 379 */ 380 if (!(adapter->params.vfres.r_caps & FW_CMD_CAP_PORT)) 381 return 0; 382 383 memset(&port_cmd, 0, sizeof(port_cmd)); 384 port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | 385 FW_CMD_REQUEST_F | 386 FW_CMD_READ_F | 387 FW_PORT_CMD_PORTID_V(pi->port_id)); 388 port_cmd.action_to_len16 = 389 cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) | 390 FW_LEN16(port_cmd)); 391 v = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd), &port_rpl); 392 if (v) 393 return v; 394 395 v = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype); 396 pi->mdio_addr = (v & FW_PORT_CMD_MDIOCAP_F) ? 397 FW_PORT_CMD_MDIOADDR_G(v) : -1; 398 pi->port_type = FW_PORT_CMD_PTYPE_G(v); 399 pi->mod_type = FW_PORT_MOD_TYPE_NA; 400 401 init_link_config(&pi->link_cfg, be16_to_cpu(port_rpl.u.info.pcap)); 402 403 return 0; 404 } 405 406 /** 407 * t4vf_fw_reset - issue a reset to FW 408 * @adapter: the adapter 409 * 410 * Issues a reset command to FW. For a Physical Function this would 411 * result in the Firmware resetting all of its state. For a Virtual 412 * Function this just resets the state associated with the VF. 413 */ 414 int t4vf_fw_reset(struct adapter *adapter) 415 { 416 struct fw_reset_cmd cmd; 417 418 memset(&cmd, 0, sizeof(cmd)); 419 cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RESET_CMD) | 420 FW_CMD_WRITE_F); 421 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 422 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 423 } 424 425 /** 426 * t4vf_query_params - query FW or device parameters 427 * @adapter: the adapter 428 * @nparams: the number of parameters 429 * @params: the parameter names 430 * @vals: the parameter values 431 * 432 * Reads the values of firmware or device parameters. Up to 7 parameters 433 * can be queried at once. 434 */ 435 static int t4vf_query_params(struct adapter *adapter, unsigned int nparams, 436 const u32 *params, u32 *vals) 437 { 438 int i, ret; 439 struct fw_params_cmd cmd, rpl; 440 struct fw_params_param *p; 441 size_t len16; 442 443 if (nparams > 7) 444 return -EINVAL; 445 446 memset(&cmd, 0, sizeof(cmd)); 447 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) | 448 FW_CMD_REQUEST_F | 449 FW_CMD_READ_F); 450 len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd, 451 param[nparams].mnem), 16); 452 cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); 453 for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) 454 p->mnem = htonl(*params++); 455 456 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 457 if (ret == 0) 458 for (i = 0, p = &rpl.param[0]; i < nparams; i++, p++) 459 *vals++ = be32_to_cpu(p->val); 460 return ret; 461 } 462 463 /** 464 * t4vf_set_params - sets FW or device parameters 465 * @adapter: the adapter 466 * @nparams: the number of parameters 467 * @params: the parameter names 468 * @vals: the parameter values 469 * 470 * Sets the values of firmware or device parameters. Up to 7 parameters 471 * can be specified at once. 472 */ 473 int t4vf_set_params(struct adapter *adapter, unsigned int nparams, 474 const u32 *params, const u32 *vals) 475 { 476 int i; 477 struct fw_params_cmd cmd; 478 struct fw_params_param *p; 479 size_t len16; 480 481 if (nparams > 7) 482 return -EINVAL; 483 484 memset(&cmd, 0, sizeof(cmd)); 485 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) | 486 FW_CMD_REQUEST_F | 487 FW_CMD_WRITE_F); 488 len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd, 489 param[nparams]), 16); 490 cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); 491 for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) { 492 p->mnem = cpu_to_be32(*params++); 493 p->val = cpu_to_be32(*vals++); 494 } 495 496 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 497 } 498 499 /** 500 * t4vf_fl_pkt_align - return the fl packet alignment 501 * @adapter: the adapter 502 * 503 * T4 has a single field to specify the packing and padding boundary. 504 * T5 onwards has separate fields for this and hence the alignment for 505 * next packet offset is maximum of these two. And T6 changes the 506 * Ingress Padding Boundary Shift, so it's all a mess and it's best 507 * if we put this in low-level Common Code ... 508 * 509 */ 510 int t4vf_fl_pkt_align(struct adapter *adapter) 511 { 512 u32 sge_control, sge_control2; 513 unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift; 514 515 sge_control = adapter->params.sge.sge_control; 516 517 /* T4 uses a single control field to specify both the PCIe Padding and 518 * Packing Boundary. T5 introduced the ability to specify these 519 * separately. The actual Ingress Packet Data alignment boundary 520 * within Packed Buffer Mode is the maximum of these two 521 * specifications. (Note that it makes no real practical sense to 522 * have the Pading Boudary be larger than the Packing Boundary but you 523 * could set the chip up that way and, in fact, legacy T4 code would 524 * end doing this because it would initialize the Padding Boundary and 525 * leave the Packing Boundary initialized to 0 (16 bytes).) 526 * Padding Boundary values in T6 starts from 8B, 527 * where as it is 32B for T4 and T5. 528 */ 529 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) 530 ingpad_shift = INGPADBOUNDARY_SHIFT_X; 531 else 532 ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X; 533 534 ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift); 535 536 fl_align = ingpadboundary; 537 if (!is_t4(adapter->params.chip)) { 538 /* T5 has a different interpretation of one of the PCIe Packing 539 * Boundary values. 540 */ 541 sge_control2 = adapter->params.sge.sge_control2; 542 ingpackboundary = INGPACKBOUNDARY_G(sge_control2); 543 if (ingpackboundary == INGPACKBOUNDARY_16B_X) 544 ingpackboundary = 16; 545 else 546 ingpackboundary = 1 << (ingpackboundary + 547 INGPACKBOUNDARY_SHIFT_X); 548 549 fl_align = max(ingpadboundary, ingpackboundary); 550 } 551 return fl_align; 552 } 553 554 /** 555 * t4vf_bar2_sge_qregs - return BAR2 SGE Queue register information 556 * @adapter: the adapter 557 * @qid: the Queue ID 558 * @qtype: the Ingress or Egress type for @qid 559 * @pbar2_qoffset: BAR2 Queue Offset 560 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues 561 * 562 * Returns the BAR2 SGE Queue Registers information associated with the 563 * indicated Absolute Queue ID. These are passed back in return value 564 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue 565 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues. 566 * 567 * This may return an error which indicates that BAR2 SGE Queue 568 * registers aren't available. If an error is not returned, then the 569 * following values are returned: 570 * 571 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers 572 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid 573 * 574 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which 575 * require the "Inferred Queue ID" ability may be used. E.g. the 576 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0, 577 * then these "Inferred Queue ID" register may not be used. 578 */ 579 int t4vf_bar2_sge_qregs(struct adapter *adapter, 580 unsigned int qid, 581 enum t4_bar2_qtype qtype, 582 u64 *pbar2_qoffset, 583 unsigned int *pbar2_qid) 584 { 585 unsigned int page_shift, page_size, qpp_shift, qpp_mask; 586 u64 bar2_page_offset, bar2_qoffset; 587 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred; 588 589 /* T4 doesn't support BAR2 SGE Queue registers. 590 */ 591 if (is_t4(adapter->params.chip)) 592 return -EINVAL; 593 594 /* Get our SGE Page Size parameters. 595 */ 596 page_shift = adapter->params.sge.sge_vf_hps + 10; 597 page_size = 1 << page_shift; 598 599 /* Get the right Queues per Page parameters for our Queue. 600 */ 601 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS 602 ? adapter->params.sge.sge_vf_eq_qpp 603 : adapter->params.sge.sge_vf_iq_qpp); 604 qpp_mask = (1 << qpp_shift) - 1; 605 606 /* Calculate the basics of the BAR2 SGE Queue register area: 607 * o The BAR2 page the Queue registers will be in. 608 * o The BAR2 Queue ID. 609 * o The BAR2 Queue ID Offset into the BAR2 page. 610 */ 611 bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift); 612 bar2_qid = qid & qpp_mask; 613 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; 614 615 /* If the BAR2 Queue ID Offset is less than the Page Size, then the 616 * hardware will infer the Absolute Queue ID simply from the writes to 617 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a 618 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply 619 * write to the first BAR2 SGE Queue Area within the BAR2 Page with 620 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID 621 * from the BAR2 Page and BAR2 Queue ID. 622 * 623 * One important censequence of this is that some BAR2 SGE registers 624 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID 625 * there. But other registers synthesize the SGE Queue ID purely 626 * from the writes to the registers -- the Write Combined Doorbell 627 * Buffer is a good example. These BAR2 SGE Registers are only 628 * available for those BAR2 SGE Register areas where the SGE Absolute 629 * Queue ID can be inferred from simple writes. 630 */ 631 bar2_qoffset = bar2_page_offset; 632 bar2_qinferred = (bar2_qid_offset < page_size); 633 if (bar2_qinferred) { 634 bar2_qoffset += bar2_qid_offset; 635 bar2_qid = 0; 636 } 637 638 *pbar2_qoffset = bar2_qoffset; 639 *pbar2_qid = bar2_qid; 640 return 0; 641 } 642 643 unsigned int t4vf_get_pf_from_vf(struct adapter *adapter) 644 { 645 u32 whoami; 646 647 whoami = t4_read_reg(adapter, T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A); 648 return (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ? 649 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami)); 650 } 651 652 /** 653 * t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters 654 * @adapter: the adapter 655 * 656 * Retrieves various core SGE parameters in the form of hardware SGE 657 * register values. The caller is responsible for decoding these as 658 * needed. The SGE parameters are stored in @adapter->params.sge. 659 */ 660 int t4vf_get_sge_params(struct adapter *adapter) 661 { 662 struct sge_params *sge_params = &adapter->params.sge; 663 u32 params[7], vals[7]; 664 int v; 665 666 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 667 FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL_A)); 668 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 669 FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE_A)); 670 params[2] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 671 FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0_A)); 672 params[3] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 673 FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1_A)); 674 params[4] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 675 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1_A)); 676 params[5] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 677 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3_A)); 678 params[6] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 679 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5_A)); 680 v = t4vf_query_params(adapter, 7, params, vals); 681 if (v) 682 return v; 683 sge_params->sge_control = vals[0]; 684 sge_params->sge_host_page_size = vals[1]; 685 sge_params->sge_fl_buffer_size[0] = vals[2]; 686 sge_params->sge_fl_buffer_size[1] = vals[3]; 687 sge_params->sge_timer_value_0_and_1 = vals[4]; 688 sge_params->sge_timer_value_2_and_3 = vals[5]; 689 sge_params->sge_timer_value_4_and_5 = vals[6]; 690 691 /* T4 uses a single control field to specify both the PCIe Padding and 692 * Packing Boundary. T5 introduced the ability to specify these 693 * separately with the Padding Boundary in SGE_CONTROL and and Packing 694 * Boundary in SGE_CONTROL2. So for T5 and later we need to grab 695 * SGE_CONTROL in order to determine how ingress packet data will be 696 * laid out in Packed Buffer Mode. Unfortunately, older versions of 697 * the firmware won't let us retrieve SGE_CONTROL2 so if we get a 698 * failure grabbing it we throw an error since we can't figure out the 699 * right value. 700 */ 701 if (!is_t4(adapter->params.chip)) { 702 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 703 FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL2_A)); 704 v = t4vf_query_params(adapter, 1, params, vals); 705 if (v != FW_SUCCESS) { 706 dev_err(adapter->pdev_dev, 707 "Unable to get SGE Control2; " 708 "probably old firmware.\n"); 709 return v; 710 } 711 sge_params->sge_control2 = vals[0]; 712 } 713 714 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 715 FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD_A)); 716 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 717 FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL_A)); 718 v = t4vf_query_params(adapter, 2, params, vals); 719 if (v) 720 return v; 721 sge_params->sge_ingress_rx_threshold = vals[0]; 722 sge_params->sge_congestion_control = vals[1]; 723 724 /* For T5 and later we want to use the new BAR2 Doorbells. 725 * Unfortunately, older firmware didn't allow the this register to be 726 * read. 727 */ 728 if (!is_t4(adapter->params.chip)) { 729 unsigned int pf, s_hps, s_qpp; 730 731 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 732 FW_PARAMS_PARAM_XYZ_V( 733 SGE_EGRESS_QUEUES_PER_PAGE_VF_A)); 734 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 735 FW_PARAMS_PARAM_XYZ_V( 736 SGE_INGRESS_QUEUES_PER_PAGE_VF_A)); 737 v = t4vf_query_params(adapter, 2, params, vals); 738 if (v != FW_SUCCESS) { 739 dev_warn(adapter->pdev_dev, 740 "Unable to get VF SGE Queues/Page; " 741 "probably old firmware.\n"); 742 return v; 743 } 744 sge_params->sge_egress_queues_per_page = vals[0]; 745 sge_params->sge_ingress_queues_per_page = vals[1]; 746 747 /* We need the Queues/Page for our VF. This is based on the 748 * PF from which we're instantiated and is indexed in the 749 * register we just read. Do it once here so other code in 750 * the driver can just use it. 751 */ 752 pf = t4vf_get_pf_from_vf(adapter); 753 s_hps = (HOSTPAGESIZEPF0_S + 754 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf); 755 sge_params->sge_vf_hps = 756 ((sge_params->sge_host_page_size >> s_hps) 757 & HOSTPAGESIZEPF0_M); 758 759 s_qpp = (QUEUESPERPAGEPF0_S + 760 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * pf); 761 sge_params->sge_vf_eq_qpp = 762 ((sge_params->sge_egress_queues_per_page >> s_qpp) 763 & QUEUESPERPAGEPF0_M); 764 sge_params->sge_vf_iq_qpp = 765 ((sge_params->sge_ingress_queues_per_page >> s_qpp) 766 & QUEUESPERPAGEPF0_M); 767 } 768 769 return 0; 770 } 771 772 /** 773 * t4vf_get_vpd_params - retrieve device VPD paremeters 774 * @adapter: the adapter 775 * 776 * Retrives various device Vital Product Data parameters. The parameters 777 * are stored in @adapter->params.vpd. 778 */ 779 int t4vf_get_vpd_params(struct adapter *adapter) 780 { 781 struct vpd_params *vpd_params = &adapter->params.vpd; 782 u32 params[7], vals[7]; 783 int v; 784 785 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | 786 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK)); 787 v = t4vf_query_params(adapter, 1, params, vals); 788 if (v) 789 return v; 790 vpd_params->cclk = vals[0]; 791 792 return 0; 793 } 794 795 /** 796 * t4vf_get_dev_params - retrieve device paremeters 797 * @adapter: the adapter 798 * 799 * Retrives various device parameters. The parameters are stored in 800 * @adapter->params.dev. 801 */ 802 int t4vf_get_dev_params(struct adapter *adapter) 803 { 804 struct dev_params *dev_params = &adapter->params.dev; 805 u32 params[7], vals[7]; 806 int v; 807 808 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | 809 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWREV)); 810 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | 811 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_TPREV)); 812 v = t4vf_query_params(adapter, 2, params, vals); 813 if (v) 814 return v; 815 dev_params->fwrev = vals[0]; 816 dev_params->tprev = vals[1]; 817 818 return 0; 819 } 820 821 /** 822 * t4vf_get_rss_glb_config - retrieve adapter RSS Global Configuration 823 * @adapter: the adapter 824 * 825 * Retrieves global RSS mode and parameters with which we have to live 826 * and stores them in the @adapter's RSS parameters. 827 */ 828 int t4vf_get_rss_glb_config(struct adapter *adapter) 829 { 830 struct rss_params *rss = &adapter->params.rss; 831 struct fw_rss_glb_config_cmd cmd, rpl; 832 int v; 833 834 /* 835 * Execute an RSS Global Configuration read command to retrieve 836 * our RSS configuration. 837 */ 838 memset(&cmd, 0, sizeof(cmd)); 839 cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) | 840 FW_CMD_REQUEST_F | 841 FW_CMD_READ_F); 842 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 843 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 844 if (v) 845 return v; 846 847 /* 848 * Transate the big-endian RSS Global Configuration into our 849 * cpu-endian format based on the RSS mode. We also do first level 850 * filtering at this point to weed out modes which don't support 851 * VF Drivers ... 852 */ 853 rss->mode = FW_RSS_GLB_CONFIG_CMD_MODE_G( 854 be32_to_cpu(rpl.u.manual.mode_pkd)); 855 switch (rss->mode) { 856 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { 857 u32 word = be32_to_cpu( 858 rpl.u.basicvirtual.synmapen_to_hashtoeplitz); 859 860 rss->u.basicvirtual.synmapen = 861 ((word & FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_F) != 0); 862 rss->u.basicvirtual.syn4tupenipv6 = 863 ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_F) != 0); 864 rss->u.basicvirtual.syn2tupenipv6 = 865 ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_F) != 0); 866 rss->u.basicvirtual.syn4tupenipv4 = 867 ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_F) != 0); 868 rss->u.basicvirtual.syn2tupenipv4 = 869 ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_F) != 0); 870 871 rss->u.basicvirtual.ofdmapen = 872 ((word & FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_F) != 0); 873 874 rss->u.basicvirtual.tnlmapen = 875 ((word & FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F) != 0); 876 rss->u.basicvirtual.tnlalllookup = 877 ((word & FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F) != 0); 878 879 rss->u.basicvirtual.hashtoeplitz = 880 ((word & FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F) != 0); 881 882 /* we need at least Tunnel Map Enable to be set */ 883 if (!rss->u.basicvirtual.tnlmapen) 884 return -EINVAL; 885 break; 886 } 887 888 default: 889 /* all unknown/unsupported RSS modes result in an error */ 890 return -EINVAL; 891 } 892 893 return 0; 894 } 895 896 /** 897 * t4vf_get_vfres - retrieve VF resource limits 898 * @adapter: the adapter 899 * 900 * Retrieves configured resource limits and capabilities for a virtual 901 * function. The results are stored in @adapter->vfres. 902 */ 903 int t4vf_get_vfres(struct adapter *adapter) 904 { 905 struct vf_resources *vfres = &adapter->params.vfres; 906 struct fw_pfvf_cmd cmd, rpl; 907 int v; 908 u32 word; 909 910 /* 911 * Execute PFVF Read command to get VF resource limits; bail out early 912 * with error on command failure. 913 */ 914 memset(&cmd, 0, sizeof(cmd)); 915 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | 916 FW_CMD_REQUEST_F | 917 FW_CMD_READ_F); 918 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 919 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 920 if (v) 921 return v; 922 923 /* 924 * Extract VF resource limits and return success. 925 */ 926 word = be32_to_cpu(rpl.niqflint_niq); 927 vfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word); 928 vfres->niq = FW_PFVF_CMD_NIQ_G(word); 929 930 word = be32_to_cpu(rpl.type_to_neq); 931 vfres->neq = FW_PFVF_CMD_NEQ_G(word); 932 vfres->pmask = FW_PFVF_CMD_PMASK_G(word); 933 934 word = be32_to_cpu(rpl.tc_to_nexactf); 935 vfres->tc = FW_PFVF_CMD_TC_G(word); 936 vfres->nvi = FW_PFVF_CMD_NVI_G(word); 937 vfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word); 938 939 word = be32_to_cpu(rpl.r_caps_to_nethctrl); 940 vfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word); 941 vfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word); 942 vfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word); 943 944 return 0; 945 } 946 947 /** 948 * t4vf_read_rss_vi_config - read a VI's RSS configuration 949 * @adapter: the adapter 950 * @viid: Virtual Interface ID 951 * @config: pointer to host-native VI RSS Configuration buffer 952 * 953 * Reads the Virtual Interface's RSS configuration information and 954 * translates it into CPU-native format. 955 */ 956 int t4vf_read_rss_vi_config(struct adapter *adapter, unsigned int viid, 957 union rss_vi_config *config) 958 { 959 struct fw_rss_vi_config_cmd cmd, rpl; 960 int v; 961 962 memset(&cmd, 0, sizeof(cmd)); 963 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) | 964 FW_CMD_REQUEST_F | 965 FW_CMD_READ_F | 966 FW_RSS_VI_CONFIG_CMD_VIID(viid)); 967 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 968 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 969 if (v) 970 return v; 971 972 switch (adapter->params.rss.mode) { 973 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { 974 u32 word = be32_to_cpu(rpl.u.basicvirtual.defaultq_to_udpen); 975 976 config->basicvirtual.ip6fourtupen = 977 ((word & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) != 0); 978 config->basicvirtual.ip6twotupen = 979 ((word & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) != 0); 980 config->basicvirtual.ip4fourtupen = 981 ((word & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) != 0); 982 config->basicvirtual.ip4twotupen = 983 ((word & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) != 0); 984 config->basicvirtual.udpen = 985 ((word & FW_RSS_VI_CONFIG_CMD_UDPEN_F) != 0); 986 config->basicvirtual.defaultq = 987 FW_RSS_VI_CONFIG_CMD_DEFAULTQ_G(word); 988 break; 989 } 990 991 default: 992 return -EINVAL; 993 } 994 995 return 0; 996 } 997 998 /** 999 * t4vf_write_rss_vi_config - write a VI's RSS configuration 1000 * @adapter: the adapter 1001 * @viid: Virtual Interface ID 1002 * @config: pointer to host-native VI RSS Configuration buffer 1003 * 1004 * Write the Virtual Interface's RSS configuration information 1005 * (translating it into firmware-native format before writing). 1006 */ 1007 int t4vf_write_rss_vi_config(struct adapter *adapter, unsigned int viid, 1008 union rss_vi_config *config) 1009 { 1010 struct fw_rss_vi_config_cmd cmd, rpl; 1011 1012 memset(&cmd, 0, sizeof(cmd)); 1013 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) | 1014 FW_CMD_REQUEST_F | 1015 FW_CMD_WRITE_F | 1016 FW_RSS_VI_CONFIG_CMD_VIID(viid)); 1017 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 1018 switch (adapter->params.rss.mode) { 1019 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { 1020 u32 word = 0; 1021 1022 if (config->basicvirtual.ip6fourtupen) 1023 word |= FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F; 1024 if (config->basicvirtual.ip6twotupen) 1025 word |= FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F; 1026 if (config->basicvirtual.ip4fourtupen) 1027 word |= FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F; 1028 if (config->basicvirtual.ip4twotupen) 1029 word |= FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F; 1030 if (config->basicvirtual.udpen) 1031 word |= FW_RSS_VI_CONFIG_CMD_UDPEN_F; 1032 word |= FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V( 1033 config->basicvirtual.defaultq); 1034 cmd.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(word); 1035 break; 1036 } 1037 1038 default: 1039 return -EINVAL; 1040 } 1041 1042 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 1043 } 1044 1045 /** 1046 * t4vf_config_rss_range - configure a portion of the RSS mapping table 1047 * @adapter: the adapter 1048 * @viid: Virtual Interface of RSS Table Slice 1049 * @start: starting entry in the table to write 1050 * @n: how many table entries to write 1051 * @rspq: values for the "Response Queue" (Ingress Queue) lookup table 1052 * @nrspq: number of values in @rspq 1053 * 1054 * Programs the selected part of the VI's RSS mapping table with the 1055 * provided values. If @nrspq < @n the supplied values are used repeatedly 1056 * until the full table range is populated. 1057 * 1058 * The caller must ensure the values in @rspq are in the range 0..1023. 1059 */ 1060 int t4vf_config_rss_range(struct adapter *adapter, unsigned int viid, 1061 int start, int n, const u16 *rspq, int nrspq) 1062 { 1063 const u16 *rsp = rspq; 1064 const u16 *rsp_end = rspq+nrspq; 1065 struct fw_rss_ind_tbl_cmd cmd; 1066 1067 /* 1068 * Initialize firmware command template to write the RSS table. 1069 */ 1070 memset(&cmd, 0, sizeof(cmd)); 1071 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) | 1072 FW_CMD_REQUEST_F | 1073 FW_CMD_WRITE_F | 1074 FW_RSS_IND_TBL_CMD_VIID_V(viid)); 1075 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 1076 1077 /* 1078 * Each firmware RSS command can accommodate up to 32 RSS Ingress 1079 * Queue Identifiers. These Ingress Queue IDs are packed three to 1080 * a 32-bit word as 10-bit values with the upper remaining 2 bits 1081 * reserved. 1082 */ 1083 while (n > 0) { 1084 __be32 *qp = &cmd.iq0_to_iq2; 1085 int nq = min(n, 32); 1086 int ret; 1087 1088 /* 1089 * Set up the firmware RSS command header to send the next 1090 * "nq" Ingress Queue IDs to the firmware. 1091 */ 1092 cmd.niqid = cpu_to_be16(nq); 1093 cmd.startidx = cpu_to_be16(start); 1094 1095 /* 1096 * "nq" more done for the start of the next loop. 1097 */ 1098 start += nq; 1099 n -= nq; 1100 1101 /* 1102 * While there are still Ingress Queue IDs to stuff into the 1103 * current firmware RSS command, retrieve them from the 1104 * Ingress Queue ID array and insert them into the command. 1105 */ 1106 while (nq > 0) { 1107 /* 1108 * Grab up to the next 3 Ingress Queue IDs (wrapping 1109 * around the Ingress Queue ID array if necessary) and 1110 * insert them into the firmware RSS command at the 1111 * current 3-tuple position within the commad. 1112 */ 1113 u16 qbuf[3]; 1114 u16 *qbp = qbuf; 1115 int nqbuf = min(3, nq); 1116 1117 nq -= nqbuf; 1118 qbuf[0] = qbuf[1] = qbuf[2] = 0; 1119 while (nqbuf) { 1120 nqbuf--; 1121 *qbp++ = *rsp++; 1122 if (rsp >= rsp_end) 1123 rsp = rspq; 1124 } 1125 *qp++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0_V(qbuf[0]) | 1126 FW_RSS_IND_TBL_CMD_IQ1_V(qbuf[1]) | 1127 FW_RSS_IND_TBL_CMD_IQ2_V(qbuf[2])); 1128 } 1129 1130 /* 1131 * Send this portion of the RRS table update to the firmware; 1132 * bail out on any errors. 1133 */ 1134 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 1135 if (ret) 1136 return ret; 1137 } 1138 return 0; 1139 } 1140 1141 /** 1142 * t4vf_alloc_vi - allocate a virtual interface on a port 1143 * @adapter: the adapter 1144 * @port_id: physical port associated with the VI 1145 * 1146 * Allocate a new Virtual Interface and bind it to the indicated 1147 * physical port. Return the new Virtual Interface Identifier on 1148 * success, or a [negative] error number on failure. 1149 */ 1150 int t4vf_alloc_vi(struct adapter *adapter, int port_id) 1151 { 1152 struct fw_vi_cmd cmd, rpl; 1153 int v; 1154 1155 /* 1156 * Execute a VI command to allocate Virtual Interface and return its 1157 * VIID. 1158 */ 1159 memset(&cmd, 0, sizeof(cmd)); 1160 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | 1161 FW_CMD_REQUEST_F | 1162 FW_CMD_WRITE_F | 1163 FW_CMD_EXEC_F); 1164 cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) | 1165 FW_VI_CMD_ALLOC_F); 1166 cmd.portid_pkd = FW_VI_CMD_PORTID_V(port_id); 1167 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 1168 if (v) 1169 return v; 1170 1171 return FW_VI_CMD_VIID_G(be16_to_cpu(rpl.type_viid)); 1172 } 1173 1174 /** 1175 * t4vf_free_vi -- free a virtual interface 1176 * @adapter: the adapter 1177 * @viid: the virtual interface identifier 1178 * 1179 * Free a previously allocated Virtual Interface. Return an error on 1180 * failure. 1181 */ 1182 int t4vf_free_vi(struct adapter *adapter, int viid) 1183 { 1184 struct fw_vi_cmd cmd; 1185 1186 /* 1187 * Execute a VI command to free the Virtual Interface. 1188 */ 1189 memset(&cmd, 0, sizeof(cmd)); 1190 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | 1191 FW_CMD_REQUEST_F | 1192 FW_CMD_EXEC_F); 1193 cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) | 1194 FW_VI_CMD_FREE_F); 1195 cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid)); 1196 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 1197 } 1198 1199 /** 1200 * t4vf_enable_vi - enable/disable a virtual interface 1201 * @adapter: the adapter 1202 * @viid: the Virtual Interface ID 1203 * @rx_en: 1=enable Rx, 0=disable Rx 1204 * @tx_en: 1=enable Tx, 0=disable Tx 1205 * 1206 * Enables/disables a virtual interface. 1207 */ 1208 int t4vf_enable_vi(struct adapter *adapter, unsigned int viid, 1209 bool rx_en, bool tx_en) 1210 { 1211 struct fw_vi_enable_cmd cmd; 1212 1213 memset(&cmd, 0, sizeof(cmd)); 1214 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | 1215 FW_CMD_REQUEST_F | 1216 FW_CMD_EXEC_F | 1217 FW_VI_ENABLE_CMD_VIID_V(viid)); 1218 cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) | 1219 FW_VI_ENABLE_CMD_EEN_V(tx_en) | 1220 FW_LEN16(cmd)); 1221 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 1222 } 1223 1224 /** 1225 * t4vf_identify_port - identify a VI's port by blinking its LED 1226 * @adapter: the adapter 1227 * @viid: the Virtual Interface ID 1228 * @nblinks: how many times to blink LED at 2.5 Hz 1229 * 1230 * Identifies a VI's port by blinking its LED. 1231 */ 1232 int t4vf_identify_port(struct adapter *adapter, unsigned int viid, 1233 unsigned int nblinks) 1234 { 1235 struct fw_vi_enable_cmd cmd; 1236 1237 memset(&cmd, 0, sizeof(cmd)); 1238 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | 1239 FW_CMD_REQUEST_F | 1240 FW_CMD_EXEC_F | 1241 FW_VI_ENABLE_CMD_VIID_V(viid)); 1242 cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | 1243 FW_LEN16(cmd)); 1244 cmd.blinkdur = cpu_to_be16(nblinks); 1245 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 1246 } 1247 1248 /** 1249 * t4vf_set_rxmode - set Rx properties of a virtual interface 1250 * @adapter: the adapter 1251 * @viid: the VI id 1252 * @mtu: the new MTU or -1 for no change 1253 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change 1254 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change 1255 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change 1256 * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it, 1257 * -1 no change 1258 * 1259 * Sets Rx properties of a virtual interface. 1260 */ 1261 int t4vf_set_rxmode(struct adapter *adapter, unsigned int viid, 1262 int mtu, int promisc, int all_multi, int bcast, int vlanex, 1263 bool sleep_ok) 1264 { 1265 struct fw_vi_rxmode_cmd cmd; 1266 1267 /* convert to FW values */ 1268 if (mtu < 0) 1269 mtu = FW_VI_RXMODE_CMD_MTU_M; 1270 if (promisc < 0) 1271 promisc = FW_VI_RXMODE_CMD_PROMISCEN_M; 1272 if (all_multi < 0) 1273 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M; 1274 if (bcast < 0) 1275 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M; 1276 if (vlanex < 0) 1277 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M; 1278 1279 memset(&cmd, 0, sizeof(cmd)); 1280 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) | 1281 FW_CMD_REQUEST_F | 1282 FW_CMD_WRITE_F | 1283 FW_VI_RXMODE_CMD_VIID_V(viid)); 1284 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 1285 cmd.mtu_to_vlanexen = 1286 cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) | 1287 FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) | 1288 FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) | 1289 FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) | 1290 FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex)); 1291 return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok); 1292 } 1293 1294 /** 1295 * t4vf_alloc_mac_filt - allocates exact-match filters for MAC addresses 1296 * @adapter: the adapter 1297 * @viid: the Virtual Interface Identifier 1298 * @free: if true any existing filters for this VI id are first removed 1299 * @naddr: the number of MAC addresses to allocate filters for (up to 7) 1300 * @addr: the MAC address(es) 1301 * @idx: where to store the index of each allocated filter 1302 * @hash: pointer to hash address filter bitmap 1303 * @sleep_ok: call is allowed to sleep 1304 * 1305 * Allocates an exact-match filter for each of the supplied addresses and 1306 * sets it to the corresponding address. If @idx is not %NULL it should 1307 * have at least @naddr entries, each of which will be set to the index of 1308 * the filter allocated for the corresponding MAC address. If a filter 1309 * could not be allocated for an address its index is set to 0xffff. 1310 * If @hash is not %NULL addresses that fail to allocate an exact filter 1311 * are hashed and update the hash filter bitmap pointed at by @hash. 1312 * 1313 * Returns a negative error number or the number of filters allocated. 1314 */ 1315 int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free, 1316 unsigned int naddr, const u8 **addr, u16 *idx, 1317 u64 *hash, bool sleep_ok) 1318 { 1319 int offset, ret = 0; 1320 unsigned nfilters = 0; 1321 unsigned int rem = naddr; 1322 struct fw_vi_mac_cmd cmd, rpl; 1323 unsigned int max_naddr = adapter->params.arch.mps_tcam_size; 1324 1325 if (naddr > max_naddr) 1326 return -EINVAL; 1327 1328 for (offset = 0; offset < naddr; /**/) { 1329 unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact) 1330 ? rem 1331 : ARRAY_SIZE(cmd.u.exact)); 1332 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 1333 u.exact[fw_naddr]), 16); 1334 struct fw_vi_mac_exact *p; 1335 int i; 1336 1337 memset(&cmd, 0, sizeof(cmd)); 1338 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | 1339 FW_CMD_REQUEST_F | 1340 FW_CMD_WRITE_F | 1341 (free ? FW_CMD_EXEC_F : 0) | 1342 FW_VI_MAC_CMD_VIID_V(viid)); 1343 cmd.freemacs_to_len16 = 1344 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) | 1345 FW_CMD_LEN16_V(len16)); 1346 1347 for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) { 1348 p->valid_to_idx = cpu_to_be16( 1349 FW_VI_MAC_CMD_VALID_F | 1350 FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC)); 1351 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); 1352 } 1353 1354 1355 ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl, 1356 sleep_ok); 1357 if (ret && ret != -ENOMEM) 1358 break; 1359 1360 for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) { 1361 u16 index = FW_VI_MAC_CMD_IDX_G( 1362 be16_to_cpu(p->valid_to_idx)); 1363 1364 if (idx) 1365 idx[offset+i] = 1366 (index >= max_naddr 1367 ? 0xffff 1368 : index); 1369 if (index < max_naddr) 1370 nfilters++; 1371 else if (hash) 1372 *hash |= (1ULL << hash_mac_addr(addr[offset+i])); 1373 } 1374 1375 free = false; 1376 offset += fw_naddr; 1377 rem -= fw_naddr; 1378 } 1379 1380 /* 1381 * If there were no errors or we merely ran out of room in our MAC 1382 * address arena, return the number of filters actually written. 1383 */ 1384 if (ret == 0 || ret == -ENOMEM) 1385 ret = nfilters; 1386 return ret; 1387 } 1388 1389 /** 1390 * t4vf_free_mac_filt - frees exact-match filters of given MAC addresses 1391 * @adapter: the adapter 1392 * @viid: the VI id 1393 * @naddr: the number of MAC addresses to allocate filters for (up to 7) 1394 * @addr: the MAC address(es) 1395 * @sleep_ok: call is allowed to sleep 1396 * 1397 * Frees the exact-match filter for each of the supplied addresses 1398 * 1399 * Returns a negative error number or the number of filters freed. 1400 */ 1401 int t4vf_free_mac_filt(struct adapter *adapter, unsigned int viid, 1402 unsigned int naddr, const u8 **addr, bool sleep_ok) 1403 { 1404 int offset, ret = 0; 1405 struct fw_vi_mac_cmd cmd; 1406 unsigned int nfilters = 0; 1407 unsigned int max_naddr = adapter->params.arch.mps_tcam_size; 1408 unsigned int rem = naddr; 1409 1410 if (naddr > max_naddr) 1411 return -EINVAL; 1412 1413 for (offset = 0; offset < (int)naddr ; /**/) { 1414 unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact) ? 1415 rem : ARRAY_SIZE(cmd.u.exact)); 1416 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 1417 u.exact[fw_naddr]), 16); 1418 struct fw_vi_mac_exact *p; 1419 int i; 1420 1421 memset(&cmd, 0, sizeof(cmd)); 1422 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | 1423 FW_CMD_REQUEST_F | 1424 FW_CMD_WRITE_F | 1425 FW_CMD_EXEC_V(0) | 1426 FW_VI_MAC_CMD_VIID_V(viid)); 1427 cmd.freemacs_to_len16 = 1428 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) | 1429 FW_CMD_LEN16_V(len16)); 1430 1431 for (i = 0, p = cmd.u.exact; i < (int)fw_naddr; i++, p++) { 1432 p->valid_to_idx = cpu_to_be16( 1433 FW_VI_MAC_CMD_VALID_F | 1434 FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE)); 1435 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); 1436 } 1437 1438 ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &cmd, 1439 sleep_ok); 1440 if (ret) 1441 break; 1442 1443 for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) { 1444 u16 index = FW_VI_MAC_CMD_IDX_G( 1445 be16_to_cpu(p->valid_to_idx)); 1446 1447 if (index < max_naddr) 1448 nfilters++; 1449 } 1450 1451 offset += fw_naddr; 1452 rem -= fw_naddr; 1453 } 1454 1455 if (ret == 0) 1456 ret = nfilters; 1457 return ret; 1458 } 1459 1460 /** 1461 * t4vf_change_mac - modifies the exact-match filter for a MAC address 1462 * @adapter: the adapter 1463 * @viid: the Virtual Interface ID 1464 * @idx: index of existing filter for old value of MAC address, or -1 1465 * @addr: the new MAC address value 1466 * @persist: if idx < 0, the new MAC allocation should be persistent 1467 * 1468 * Modifies an exact-match filter and sets it to the new MAC address. 1469 * Note that in general it is not possible to modify the value of a given 1470 * filter so the generic way to modify an address filter is to free the 1471 * one being used by the old address value and allocate a new filter for 1472 * the new address value. @idx can be -1 if the address is a new 1473 * addition. 1474 * 1475 * Returns a negative error number or the index of the filter with the new 1476 * MAC value. 1477 */ 1478 int t4vf_change_mac(struct adapter *adapter, unsigned int viid, 1479 int idx, const u8 *addr, bool persist) 1480 { 1481 int ret; 1482 struct fw_vi_mac_cmd cmd, rpl; 1483 struct fw_vi_mac_exact *p = &cmd.u.exact[0]; 1484 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 1485 u.exact[1]), 16); 1486 unsigned int max_mac_addr = adapter->params.arch.mps_tcam_size; 1487 1488 /* 1489 * If this is a new allocation, determine whether it should be 1490 * persistent (across a "freemacs" operation) or not. 1491 */ 1492 if (idx < 0) 1493 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; 1494 1495 memset(&cmd, 0, sizeof(cmd)); 1496 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | 1497 FW_CMD_REQUEST_F | 1498 FW_CMD_WRITE_F | 1499 FW_VI_MAC_CMD_VIID_V(viid)); 1500 cmd.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); 1501 p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F | 1502 FW_VI_MAC_CMD_IDX_V(idx)); 1503 memcpy(p->macaddr, addr, sizeof(p->macaddr)); 1504 1505 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 1506 if (ret == 0) { 1507 p = &rpl.u.exact[0]; 1508 ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx)); 1509 if (ret >= max_mac_addr) 1510 ret = -ENOMEM; 1511 } 1512 return ret; 1513 } 1514 1515 /** 1516 * t4vf_set_addr_hash - program the MAC inexact-match hash filter 1517 * @adapter: the adapter 1518 * @viid: the Virtual Interface Identifier 1519 * @ucast: whether the hash filter should also match unicast addresses 1520 * @vec: the value to be written to the hash filter 1521 * @sleep_ok: call is allowed to sleep 1522 * 1523 * Sets the 64-bit inexact-match hash filter for a virtual interface. 1524 */ 1525 int t4vf_set_addr_hash(struct adapter *adapter, unsigned int viid, 1526 bool ucast, u64 vec, bool sleep_ok) 1527 { 1528 struct fw_vi_mac_cmd cmd; 1529 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 1530 u.exact[0]), 16); 1531 1532 memset(&cmd, 0, sizeof(cmd)); 1533 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | 1534 FW_CMD_REQUEST_F | 1535 FW_CMD_WRITE_F | 1536 FW_VI_ENABLE_CMD_VIID_V(viid)); 1537 cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F | 1538 FW_VI_MAC_CMD_HASHUNIEN_V(ucast) | 1539 FW_CMD_LEN16_V(len16)); 1540 cmd.u.hash.hashvec = cpu_to_be64(vec); 1541 return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok); 1542 } 1543 1544 /** 1545 * t4vf_get_port_stats - collect "port" statistics 1546 * @adapter: the adapter 1547 * @pidx: the port index 1548 * @s: the stats structure to fill 1549 * 1550 * Collect statistics for the "port"'s Virtual Interface. 1551 */ 1552 int t4vf_get_port_stats(struct adapter *adapter, int pidx, 1553 struct t4vf_port_stats *s) 1554 { 1555 struct port_info *pi = adap2pinfo(adapter, pidx); 1556 struct fw_vi_stats_vf fwstats; 1557 unsigned int rem = VI_VF_NUM_STATS; 1558 __be64 *fwsp = (__be64 *)&fwstats; 1559 1560 /* 1561 * Grab the Virtual Interface statistics a chunk at a time via mailbox 1562 * commands. We could use a Work Request and get all of them at once 1563 * but that's an asynchronous interface which is awkward to use. 1564 */ 1565 while (rem) { 1566 unsigned int ix = VI_VF_NUM_STATS - rem; 1567 unsigned int nstats = min(6U, rem); 1568 struct fw_vi_stats_cmd cmd, rpl; 1569 size_t len = (offsetof(struct fw_vi_stats_cmd, u) + 1570 sizeof(struct fw_vi_stats_ctl)); 1571 size_t len16 = DIV_ROUND_UP(len, 16); 1572 int ret; 1573 1574 memset(&cmd, 0, sizeof(cmd)); 1575 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_STATS_CMD) | 1576 FW_VI_STATS_CMD_VIID_V(pi->viid) | 1577 FW_CMD_REQUEST_F | 1578 FW_CMD_READ_F); 1579 cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); 1580 cmd.u.ctl.nstats_ix = 1581 cpu_to_be16(FW_VI_STATS_CMD_IX_V(ix) | 1582 FW_VI_STATS_CMD_NSTATS_V(nstats)); 1583 ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl); 1584 if (ret) 1585 return ret; 1586 1587 memcpy(fwsp, &rpl.u.ctl.stat0, sizeof(__be64) * nstats); 1588 1589 rem -= nstats; 1590 fwsp += nstats; 1591 } 1592 1593 /* 1594 * Translate firmware statistics into host native statistics. 1595 */ 1596 s->tx_bcast_bytes = be64_to_cpu(fwstats.tx_bcast_bytes); 1597 s->tx_bcast_frames = be64_to_cpu(fwstats.tx_bcast_frames); 1598 s->tx_mcast_bytes = be64_to_cpu(fwstats.tx_mcast_bytes); 1599 s->tx_mcast_frames = be64_to_cpu(fwstats.tx_mcast_frames); 1600 s->tx_ucast_bytes = be64_to_cpu(fwstats.tx_ucast_bytes); 1601 s->tx_ucast_frames = be64_to_cpu(fwstats.tx_ucast_frames); 1602 s->tx_drop_frames = be64_to_cpu(fwstats.tx_drop_frames); 1603 s->tx_offload_bytes = be64_to_cpu(fwstats.tx_offload_bytes); 1604 s->tx_offload_frames = be64_to_cpu(fwstats.tx_offload_frames); 1605 1606 s->rx_bcast_bytes = be64_to_cpu(fwstats.rx_bcast_bytes); 1607 s->rx_bcast_frames = be64_to_cpu(fwstats.rx_bcast_frames); 1608 s->rx_mcast_bytes = be64_to_cpu(fwstats.rx_mcast_bytes); 1609 s->rx_mcast_frames = be64_to_cpu(fwstats.rx_mcast_frames); 1610 s->rx_ucast_bytes = be64_to_cpu(fwstats.rx_ucast_bytes); 1611 s->rx_ucast_frames = be64_to_cpu(fwstats.rx_ucast_frames); 1612 1613 s->rx_err_frames = be64_to_cpu(fwstats.rx_err_frames); 1614 1615 return 0; 1616 } 1617 1618 /** 1619 * t4vf_iq_free - free an ingress queue and its free lists 1620 * @adapter: the adapter 1621 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) 1622 * @iqid: ingress queue ID 1623 * @fl0id: FL0 queue ID or 0xffff if no attached FL0 1624 * @fl1id: FL1 queue ID or 0xffff if no attached FL1 1625 * 1626 * Frees an ingress queue and its associated free lists, if any. 1627 */ 1628 int t4vf_iq_free(struct adapter *adapter, unsigned int iqtype, 1629 unsigned int iqid, unsigned int fl0id, unsigned int fl1id) 1630 { 1631 struct fw_iq_cmd cmd; 1632 1633 memset(&cmd, 0, sizeof(cmd)); 1634 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | 1635 FW_CMD_REQUEST_F | 1636 FW_CMD_EXEC_F); 1637 cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | 1638 FW_LEN16(cmd)); 1639 cmd.type_to_iqandstindex = 1640 cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype)); 1641 1642 cmd.iqid = cpu_to_be16(iqid); 1643 cmd.fl0id = cpu_to_be16(fl0id); 1644 cmd.fl1id = cpu_to_be16(fl1id); 1645 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 1646 } 1647 1648 /** 1649 * t4vf_eth_eq_free - free an Ethernet egress queue 1650 * @adapter: the adapter 1651 * @eqid: egress queue ID 1652 * 1653 * Frees an Ethernet egress queue. 1654 */ 1655 int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid) 1656 { 1657 struct fw_eq_eth_cmd cmd; 1658 1659 memset(&cmd, 0, sizeof(cmd)); 1660 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) | 1661 FW_CMD_REQUEST_F | 1662 FW_CMD_EXEC_F); 1663 cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | 1664 FW_LEN16(cmd)); 1665 cmd.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid)); 1666 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 1667 } 1668 1669 /** 1670 * t4vf_handle_fw_rpl - process a firmware reply message 1671 * @adapter: the adapter 1672 * @rpl: start of the firmware message 1673 * 1674 * Processes a firmware message, such as link state change messages. 1675 */ 1676 int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) 1677 { 1678 const struct fw_cmd_hdr *cmd_hdr = (const struct fw_cmd_hdr *)rpl; 1679 u8 opcode = FW_CMD_OP_G(be32_to_cpu(cmd_hdr->hi)); 1680 1681 switch (opcode) { 1682 case FW_PORT_CMD: { 1683 /* 1684 * Link/module state change message. 1685 */ 1686 const struct fw_port_cmd *port_cmd = 1687 (const struct fw_port_cmd *)rpl; 1688 u32 stat, mod; 1689 int action, port_id, link_ok, speed, fc, pidx; 1690 1691 /* 1692 * Extract various fields from port status change message. 1693 */ 1694 action = FW_PORT_CMD_ACTION_G( 1695 be32_to_cpu(port_cmd->action_to_len16)); 1696 if (action != FW_PORT_ACTION_GET_PORT_INFO) { 1697 dev_err(adapter->pdev_dev, 1698 "Unknown firmware PORT reply action %x\n", 1699 action); 1700 break; 1701 } 1702 1703 port_id = FW_PORT_CMD_PORTID_G( 1704 be32_to_cpu(port_cmd->op_to_portid)); 1705 1706 stat = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype); 1707 link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0; 1708 speed = 0; 1709 fc = 0; 1710 if (stat & FW_PORT_CMD_RXPAUSE_F) 1711 fc |= PAUSE_RX; 1712 if (stat & FW_PORT_CMD_TXPAUSE_F) 1713 fc |= PAUSE_TX; 1714 if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) 1715 speed = 100; 1716 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) 1717 speed = 1000; 1718 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) 1719 speed = 10000; 1720 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G)) 1721 speed = 25000; 1722 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) 1723 speed = 40000; 1724 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G)) 1725 speed = 100000; 1726 1727 /* 1728 * Scan all of our "ports" (Virtual Interfaces) looking for 1729 * those bound to the physical port which has changed. If 1730 * our recorded state doesn't match the current state, 1731 * signal that change to the OS code. 1732 */ 1733 for_each_port(adapter, pidx) { 1734 struct port_info *pi = adap2pinfo(adapter, pidx); 1735 struct link_config *lc; 1736 1737 if (pi->port_id != port_id) 1738 continue; 1739 1740 lc = &pi->link_cfg; 1741 1742 mod = FW_PORT_CMD_MODTYPE_G(stat); 1743 if (mod != pi->mod_type) { 1744 pi->mod_type = mod; 1745 t4vf_os_portmod_changed(adapter, pidx); 1746 } 1747 1748 if (link_ok != lc->link_ok || speed != lc->speed || 1749 fc != lc->fc) { 1750 /* something changed */ 1751 lc->link_ok = link_ok; 1752 lc->speed = speed; 1753 lc->fc = fc; 1754 lc->supported = 1755 be16_to_cpu(port_cmd->u.info.pcap); 1756 lc->lp_advertising = 1757 be16_to_cpu(port_cmd->u.info.lpacap); 1758 t4vf_os_link_changed(adapter, pidx, link_ok); 1759 } 1760 } 1761 break; 1762 } 1763 1764 default: 1765 dev_err(adapter->pdev_dev, "Unknown firmware reply %X\n", 1766 opcode); 1767 } 1768 return 0; 1769 } 1770 1771 /** 1772 */ 1773 int t4vf_prep_adapter(struct adapter *adapter) 1774 { 1775 int err; 1776 unsigned int chipid; 1777 1778 /* Wait for the device to become ready before proceeding ... 1779 */ 1780 err = t4vf_wait_dev_ready(adapter); 1781 if (err) 1782 return err; 1783 1784 /* Default port and clock for debugging in case we can't reach 1785 * firmware. 1786 */ 1787 adapter->params.nports = 1; 1788 adapter->params.vfres.pmask = 1; 1789 adapter->params.vpd.cclk = 50000; 1790 1791 adapter->params.chip = 0; 1792 switch (CHELSIO_PCI_ID_VER(adapter->pdev->device)) { 1793 case CHELSIO_T4: 1794 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, 0); 1795 adapter->params.arch.sge_fl_db = DBPRIO_F; 1796 adapter->params.arch.mps_tcam_size = 1797 NUM_MPS_CLS_SRAM_L_INSTANCES; 1798 break; 1799 1800 case CHELSIO_T5: 1801 chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A)); 1802 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid); 1803 adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F; 1804 adapter->params.arch.mps_tcam_size = 1805 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 1806 break; 1807 1808 case CHELSIO_T6: 1809 chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A)); 1810 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, chipid); 1811 adapter->params.arch.sge_fl_db = 0; 1812 adapter->params.arch.mps_tcam_size = 1813 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 1814 break; 1815 } 1816 1817 return 0; 1818 } 1819 1820 /** 1821 * t4vf_get_vf_mac_acl - Get the MAC address to be set to 1822 * the VI of this VF. 1823 * @adapter: The adapter 1824 * @pf: The pf associated with vf 1825 * @naddr: the number of ACL MAC addresses returned in addr 1826 * @addr: Placeholder for MAC addresses 1827 * 1828 * Find the MAC address to be set to the VF's VI. The requested MAC address 1829 * is from the host OS via callback in the PF driver. 1830 */ 1831 int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int pf, 1832 unsigned int *naddr, u8 *addr) 1833 { 1834 struct fw_acl_mac_cmd cmd; 1835 int ret; 1836 1837 memset(&cmd, 0, sizeof(cmd)); 1838 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_MAC_CMD) | 1839 FW_CMD_REQUEST_F | 1840 FW_CMD_READ_F); 1841 cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd)); 1842 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &cmd); 1843 if (ret) 1844 return ret; 1845 1846 if (cmd.nmac < *naddr) 1847 *naddr = cmd.nmac; 1848 1849 switch (pf) { 1850 case 3: 1851 memcpy(addr, cmd.macaddr3, sizeof(cmd.macaddr3)); 1852 break; 1853 case 2: 1854 memcpy(addr, cmd.macaddr2, sizeof(cmd.macaddr2)); 1855 break; 1856 case 1: 1857 memcpy(addr, cmd.macaddr1, sizeof(cmd.macaddr1)); 1858 break; 1859 case 0: 1860 memcpy(addr, cmd.macaddr0, sizeof(cmd.macaddr0)); 1861 break; 1862 } 1863 1864 return ret; 1865 } 1866