1 /* 2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet 3 * driver for Linux. 4 * 5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/pci.h> 37 38 #include "t4vf_common.h" 39 #include "t4vf_defs.h" 40 41 #include "../cxgb4/t4_regs.h" 42 #include "../cxgb4/t4_values.h" 43 #include "../cxgb4/t4fw_api.h" 44 45 /* 46 * Wait for the device to become ready (signified by our "who am I" register 47 * returning a value other than all 1's). Return an error if it doesn't 48 * become ready ... 49 */ 50 int t4vf_wait_dev_ready(struct adapter *adapter) 51 { 52 const u32 whoami = T4VF_PL_BASE_ADDR + PL_VF_WHOAMI; 53 const u32 notready1 = 0xffffffff; 54 const u32 notready2 = 0xeeeeeeee; 55 u32 val; 56 57 val = t4_read_reg(adapter, whoami); 58 if (val != notready1 && val != notready2) 59 return 0; 60 msleep(500); 61 val = t4_read_reg(adapter, whoami); 62 if (val != notready1 && val != notready2) 63 return 0; 64 else 65 return -EIO; 66 } 67 68 /* 69 * Get the reply to a mailbox command and store it in @rpl in big-endian order 70 * (since the firmware data structures are specified in a big-endian layout). 71 */ 72 static void get_mbox_rpl(struct adapter *adapter, __be64 *rpl, int size, 73 u32 mbox_data) 74 { 75 for ( ; size; size -= 8, mbox_data += 8) 76 *rpl++ = cpu_to_be64(t4_read_reg64(adapter, mbox_data)); 77 } 78 79 /* 80 * Dump contents of mailbox with a leading tag. 81 */ 82 static void dump_mbox(struct adapter *adapter, const char *tag, u32 mbox_data) 83 { 84 dev_err(adapter->pdev_dev, 85 "mbox %s: %llx %llx %llx %llx %llx %llx %llx %llx\n", tag, 86 (unsigned long long)t4_read_reg64(adapter, mbox_data + 0), 87 (unsigned long long)t4_read_reg64(adapter, mbox_data + 8), 88 (unsigned long long)t4_read_reg64(adapter, mbox_data + 16), 89 (unsigned long long)t4_read_reg64(adapter, mbox_data + 24), 90 (unsigned long long)t4_read_reg64(adapter, mbox_data + 32), 91 (unsigned long long)t4_read_reg64(adapter, mbox_data + 40), 92 (unsigned long long)t4_read_reg64(adapter, mbox_data + 48), 93 (unsigned long long)t4_read_reg64(adapter, mbox_data + 56)); 94 } 95 96 /** 97 * t4vf_wr_mbox_core - send a command to FW through the mailbox 98 * @adapter: the adapter 99 * @cmd: the command to write 100 * @size: command length in bytes 101 * @rpl: where to optionally store the reply 102 * @sleep_ok: if true we may sleep while awaiting command completion 103 * 104 * Sends the given command to FW through the mailbox and waits for the 105 * FW to execute the command. If @rpl is not %NULL it is used to store 106 * the FW's reply to the command. The command and its optional reply 107 * are of the same length. FW can take up to 500 ms to respond. 108 * @sleep_ok determines whether we may sleep while awaiting the response. 109 * If sleeping is allowed we use progressive backoff otherwise we spin. 110 * 111 * The return value is 0 on success or a negative errno on failure. A 112 * failure can happen either because we are not able to execute the 113 * command or FW executes it but signals an error. In the latter case 114 * the return value is the error code indicated by FW (negated). 115 */ 116 int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, 117 void *rpl, bool sleep_ok) 118 { 119 static const int delay[] = { 120 1, 1, 3, 5, 10, 10, 20, 50, 100 121 }; 122 123 u32 v, mbox_data; 124 int i, ms, delay_idx; 125 const __be64 *p; 126 u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL; 127 128 /* In T6, mailbox size is changed to 128 bytes to avoid 129 * invalidating the entire prefetch buffer. 130 */ 131 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) 132 mbox_data = T4VF_MBDATA_BASE_ADDR; 133 else 134 mbox_data = T6VF_MBDATA_BASE_ADDR; 135 136 /* 137 * Commands must be multiples of 16 bytes in length and may not be 138 * larger than the size of the Mailbox Data register array. 139 */ 140 if ((size % 16) != 0 || 141 size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4) 142 return -EINVAL; 143 144 /* 145 * Loop trying to get ownership of the mailbox. Return an error 146 * if we can't gain ownership. 147 */ 148 v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl)); 149 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) 150 v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl)); 151 if (v != MBOX_OWNER_DRV) 152 return v == MBOX_OWNER_FW ? -EBUSY : -ETIMEDOUT; 153 154 /* 155 * Write the command array into the Mailbox Data register array and 156 * transfer ownership of the mailbox to the firmware. 157 * 158 * For the VFs, the Mailbox Data "registers" are actually backed by 159 * T4's "MA" interface rather than PL Registers (as is the case for 160 * the PFs). Because these are in different coherency domains, the 161 * write to the VF's PL-register-backed Mailbox Control can race in 162 * front of the writes to the MA-backed VF Mailbox Data "registers". 163 * So we need to do a read-back on at least one byte of the VF Mailbox 164 * Data registers before doing the write to the VF Mailbox Control 165 * register. 166 */ 167 for (i = 0, p = cmd; i < size; i += 8) 168 t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++)); 169 t4_read_reg(adapter, mbox_data); /* flush write */ 170 171 t4_write_reg(adapter, mbox_ctl, 172 MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW)); 173 t4_read_reg(adapter, mbox_ctl); /* flush write */ 174 175 /* 176 * Spin waiting for firmware to acknowledge processing our command. 177 */ 178 delay_idx = 0; 179 ms = delay[0]; 180 181 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { 182 if (sleep_ok) { 183 ms = delay[delay_idx]; 184 if (delay_idx < ARRAY_SIZE(delay) - 1) 185 delay_idx++; 186 msleep(ms); 187 } else 188 mdelay(ms); 189 190 /* 191 * If we're the owner, see if this is the reply we wanted. 192 */ 193 v = t4_read_reg(adapter, mbox_ctl); 194 if (MBOWNER_G(v) == MBOX_OWNER_DRV) { 195 /* 196 * If the Message Valid bit isn't on, revoke ownership 197 * of the mailbox and continue waiting for our reply. 198 */ 199 if ((v & MBMSGVALID_F) == 0) { 200 t4_write_reg(adapter, mbox_ctl, 201 MBOWNER_V(MBOX_OWNER_NONE)); 202 continue; 203 } 204 205 /* 206 * We now have our reply. Extract the command return 207 * value, copy the reply back to our caller's buffer 208 * (if specified) and revoke ownership of the mailbox. 209 * We return the (negated) firmware command return 210 * code (this depends on FW_SUCCESS == 0). 211 */ 212 213 /* return value in low-order little-endian word */ 214 v = t4_read_reg(adapter, mbox_data); 215 if (FW_CMD_RETVAL_G(v)) 216 dump_mbox(adapter, "FW Error", mbox_data); 217 218 if (rpl) { 219 /* request bit in high-order BE word */ 220 WARN_ON((be32_to_cpu(*(const __be32 *)cmd) 221 & FW_CMD_REQUEST_F) == 0); 222 get_mbox_rpl(adapter, rpl, size, mbox_data); 223 WARN_ON((be32_to_cpu(*(__be32 *)rpl) 224 & FW_CMD_REQUEST_F) != 0); 225 } 226 t4_write_reg(adapter, mbox_ctl, 227 MBOWNER_V(MBOX_OWNER_NONE)); 228 return -FW_CMD_RETVAL_G(v); 229 } 230 } 231 232 /* 233 * We timed out. Return the error ... 234 */ 235 dump_mbox(adapter, "FW Timeout", mbox_data); 236 return -ETIMEDOUT; 237 } 238 239 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ 240 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \ 241 FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG) 242 243 /** 244 * init_link_config - initialize a link's SW state 245 * @lc: structure holding the link state 246 * @caps: link capabilities 247 * 248 * Initializes the SW state maintained for each link, including the link's 249 * capabilities and default speed/flow-control/autonegotiation settings. 250 */ 251 static void init_link_config(struct link_config *lc, unsigned int caps) 252 { 253 lc->supported = caps; 254 lc->requested_speed = 0; 255 lc->speed = 0; 256 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; 257 if (lc->supported & FW_PORT_CAP_ANEG) { 258 lc->advertising = lc->supported & ADVERT_MASK; 259 lc->autoneg = AUTONEG_ENABLE; 260 lc->requested_fc |= PAUSE_AUTONEG; 261 } else { 262 lc->advertising = 0; 263 lc->autoneg = AUTONEG_DISABLE; 264 } 265 } 266 267 /** 268 * t4vf_port_init - initialize port hardware/software state 269 * @adapter: the adapter 270 * @pidx: the adapter port index 271 */ 272 int t4vf_port_init(struct adapter *adapter, int pidx) 273 { 274 struct port_info *pi = adap2pinfo(adapter, pidx); 275 struct fw_vi_cmd vi_cmd, vi_rpl; 276 struct fw_port_cmd port_cmd, port_rpl; 277 int v; 278 279 /* 280 * Execute a VI Read command to get our Virtual Interface information 281 * like MAC address, etc. 282 */ 283 memset(&vi_cmd, 0, sizeof(vi_cmd)); 284 vi_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | 285 FW_CMD_REQUEST_F | 286 FW_CMD_READ_F); 287 vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd)); 288 vi_cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(pi->viid)); 289 v = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl); 290 if (v) 291 return v; 292 293 BUG_ON(pi->port_id != FW_VI_CMD_PORTID_G(vi_rpl.portid_pkd)); 294 pi->rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(vi_rpl.rsssize_pkd)); 295 t4_os_set_hw_addr(adapter, pidx, vi_rpl.mac); 296 297 /* 298 * If we don't have read access to our port information, we're done 299 * now. Otherwise, execute a PORT Read command to get it ... 300 */ 301 if (!(adapter->params.vfres.r_caps & FW_CMD_CAP_PORT)) 302 return 0; 303 304 memset(&port_cmd, 0, sizeof(port_cmd)); 305 port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | 306 FW_CMD_REQUEST_F | 307 FW_CMD_READ_F | 308 FW_PORT_CMD_PORTID_V(pi->port_id)); 309 port_cmd.action_to_len16 = 310 cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) | 311 FW_LEN16(port_cmd)); 312 v = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd), &port_rpl); 313 if (v) 314 return v; 315 316 v = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype); 317 pi->mdio_addr = (v & FW_PORT_CMD_MDIOCAP_F) ? 318 FW_PORT_CMD_MDIOADDR_G(v) : -1; 319 pi->port_type = FW_PORT_CMD_PTYPE_G(v); 320 pi->mod_type = FW_PORT_MOD_TYPE_NA; 321 322 init_link_config(&pi->link_cfg, be16_to_cpu(port_rpl.u.info.pcap)); 323 324 return 0; 325 } 326 327 /** 328 * t4vf_fw_reset - issue a reset to FW 329 * @adapter: the adapter 330 * 331 * Issues a reset command to FW. For a Physical Function this would 332 * result in the Firmware resetting all of its state. For a Virtual 333 * Function this just resets the state associated with the VF. 334 */ 335 int t4vf_fw_reset(struct adapter *adapter) 336 { 337 struct fw_reset_cmd cmd; 338 339 memset(&cmd, 0, sizeof(cmd)); 340 cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RESET_CMD) | 341 FW_CMD_WRITE_F); 342 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 343 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 344 } 345 346 /** 347 * t4vf_query_params - query FW or device parameters 348 * @adapter: the adapter 349 * @nparams: the number of parameters 350 * @params: the parameter names 351 * @vals: the parameter values 352 * 353 * Reads the values of firmware or device parameters. Up to 7 parameters 354 * can be queried at once. 355 */ 356 static int t4vf_query_params(struct adapter *adapter, unsigned int nparams, 357 const u32 *params, u32 *vals) 358 { 359 int i, ret; 360 struct fw_params_cmd cmd, rpl; 361 struct fw_params_param *p; 362 size_t len16; 363 364 if (nparams > 7) 365 return -EINVAL; 366 367 memset(&cmd, 0, sizeof(cmd)); 368 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) | 369 FW_CMD_REQUEST_F | 370 FW_CMD_READ_F); 371 len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd, 372 param[nparams].mnem), 16); 373 cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); 374 for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) 375 p->mnem = htonl(*params++); 376 377 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 378 if (ret == 0) 379 for (i = 0, p = &rpl.param[0]; i < nparams; i++, p++) 380 *vals++ = be32_to_cpu(p->val); 381 return ret; 382 } 383 384 /** 385 * t4vf_set_params - sets FW or device parameters 386 * @adapter: the adapter 387 * @nparams: the number of parameters 388 * @params: the parameter names 389 * @vals: the parameter values 390 * 391 * Sets the values of firmware or device parameters. Up to 7 parameters 392 * can be specified at once. 393 */ 394 int t4vf_set_params(struct adapter *adapter, unsigned int nparams, 395 const u32 *params, const u32 *vals) 396 { 397 int i; 398 struct fw_params_cmd cmd; 399 struct fw_params_param *p; 400 size_t len16; 401 402 if (nparams > 7) 403 return -EINVAL; 404 405 memset(&cmd, 0, sizeof(cmd)); 406 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) | 407 FW_CMD_REQUEST_F | 408 FW_CMD_WRITE_F); 409 len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd, 410 param[nparams]), 16); 411 cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); 412 for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) { 413 p->mnem = cpu_to_be32(*params++); 414 p->val = cpu_to_be32(*vals++); 415 } 416 417 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 418 } 419 420 /** 421 * t4vf_fl_pkt_align - return the fl packet alignment 422 * @adapter: the adapter 423 * 424 * T4 has a single field to specify the packing and padding boundary. 425 * T5 onwards has separate fields for this and hence the alignment for 426 * next packet offset is maximum of these two. And T6 changes the 427 * Ingress Padding Boundary Shift, so it's all a mess and it's best 428 * if we put this in low-level Common Code ... 429 * 430 */ 431 int t4vf_fl_pkt_align(struct adapter *adapter) 432 { 433 u32 sge_control, sge_control2; 434 unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift; 435 436 sge_control = adapter->params.sge.sge_control; 437 438 /* T4 uses a single control field to specify both the PCIe Padding and 439 * Packing Boundary. T5 introduced the ability to specify these 440 * separately. The actual Ingress Packet Data alignment boundary 441 * within Packed Buffer Mode is the maximum of these two 442 * specifications. (Note that it makes no real practical sense to 443 * have the Pading Boudary be larger than the Packing Boundary but you 444 * could set the chip up that way and, in fact, legacy T4 code would 445 * end doing this because it would initialize the Padding Boundary and 446 * leave the Packing Boundary initialized to 0 (16 bytes).) 447 * Padding Boundary values in T6 starts from 8B, 448 * where as it is 32B for T4 and T5. 449 */ 450 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) 451 ingpad_shift = INGPADBOUNDARY_SHIFT_X; 452 else 453 ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X; 454 455 ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift); 456 457 fl_align = ingpadboundary; 458 if (!is_t4(adapter->params.chip)) { 459 /* T5 has a different interpretation of one of the PCIe Packing 460 * Boundary values. 461 */ 462 sge_control2 = adapter->params.sge.sge_control2; 463 ingpackboundary = INGPACKBOUNDARY_G(sge_control2); 464 if (ingpackboundary == INGPACKBOUNDARY_16B_X) 465 ingpackboundary = 16; 466 else 467 ingpackboundary = 1 << (ingpackboundary + 468 INGPACKBOUNDARY_SHIFT_X); 469 470 fl_align = max(ingpadboundary, ingpackboundary); 471 } 472 return fl_align; 473 } 474 475 /** 476 * t4vf_bar2_sge_qregs - return BAR2 SGE Queue register information 477 * @adapter: the adapter 478 * @qid: the Queue ID 479 * @qtype: the Ingress or Egress type for @qid 480 * @pbar2_qoffset: BAR2 Queue Offset 481 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues 482 * 483 * Returns the BAR2 SGE Queue Registers information associated with the 484 * indicated Absolute Queue ID. These are passed back in return value 485 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue 486 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues. 487 * 488 * This may return an error which indicates that BAR2 SGE Queue 489 * registers aren't available. If an error is not returned, then the 490 * following values are returned: 491 * 492 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers 493 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid 494 * 495 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which 496 * require the "Inferred Queue ID" ability may be used. E.g. the 497 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0, 498 * then these "Inferred Queue ID" register may not be used. 499 */ 500 int t4vf_bar2_sge_qregs(struct adapter *adapter, 501 unsigned int qid, 502 enum t4_bar2_qtype qtype, 503 u64 *pbar2_qoffset, 504 unsigned int *pbar2_qid) 505 { 506 unsigned int page_shift, page_size, qpp_shift, qpp_mask; 507 u64 bar2_page_offset, bar2_qoffset; 508 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred; 509 510 /* T4 doesn't support BAR2 SGE Queue registers. 511 */ 512 if (is_t4(adapter->params.chip)) 513 return -EINVAL; 514 515 /* Get our SGE Page Size parameters. 516 */ 517 page_shift = adapter->params.sge.sge_vf_hps + 10; 518 page_size = 1 << page_shift; 519 520 /* Get the right Queues per Page parameters for our Queue. 521 */ 522 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS 523 ? adapter->params.sge.sge_vf_eq_qpp 524 : adapter->params.sge.sge_vf_iq_qpp); 525 qpp_mask = (1 << qpp_shift) - 1; 526 527 /* Calculate the basics of the BAR2 SGE Queue register area: 528 * o The BAR2 page the Queue registers will be in. 529 * o The BAR2 Queue ID. 530 * o The BAR2 Queue ID Offset into the BAR2 page. 531 */ 532 bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift); 533 bar2_qid = qid & qpp_mask; 534 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; 535 536 /* If the BAR2 Queue ID Offset is less than the Page Size, then the 537 * hardware will infer the Absolute Queue ID simply from the writes to 538 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a 539 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply 540 * write to the first BAR2 SGE Queue Area within the BAR2 Page with 541 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID 542 * from the BAR2 Page and BAR2 Queue ID. 543 * 544 * One important censequence of this is that some BAR2 SGE registers 545 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID 546 * there. But other registers synthesize the SGE Queue ID purely 547 * from the writes to the registers -- the Write Combined Doorbell 548 * Buffer is a good example. These BAR2 SGE Registers are only 549 * available for those BAR2 SGE Register areas where the SGE Absolute 550 * Queue ID can be inferred from simple writes. 551 */ 552 bar2_qoffset = bar2_page_offset; 553 bar2_qinferred = (bar2_qid_offset < page_size); 554 if (bar2_qinferred) { 555 bar2_qoffset += bar2_qid_offset; 556 bar2_qid = 0; 557 } 558 559 *pbar2_qoffset = bar2_qoffset; 560 *pbar2_qid = bar2_qid; 561 return 0; 562 } 563 564 /** 565 * t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters 566 * @adapter: the adapter 567 * 568 * Retrieves various core SGE parameters in the form of hardware SGE 569 * register values. The caller is responsible for decoding these as 570 * needed. The SGE parameters are stored in @adapter->params.sge. 571 */ 572 int t4vf_get_sge_params(struct adapter *adapter) 573 { 574 struct sge_params *sge_params = &adapter->params.sge; 575 u32 params[7], vals[7]; 576 int v; 577 578 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 579 FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL_A)); 580 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 581 FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE_A)); 582 params[2] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 583 FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0_A)); 584 params[3] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 585 FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1_A)); 586 params[4] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 587 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1_A)); 588 params[5] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 589 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3_A)); 590 params[6] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 591 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5_A)); 592 v = t4vf_query_params(adapter, 7, params, vals); 593 if (v) 594 return v; 595 sge_params->sge_control = vals[0]; 596 sge_params->sge_host_page_size = vals[1]; 597 sge_params->sge_fl_buffer_size[0] = vals[2]; 598 sge_params->sge_fl_buffer_size[1] = vals[3]; 599 sge_params->sge_timer_value_0_and_1 = vals[4]; 600 sge_params->sge_timer_value_2_and_3 = vals[5]; 601 sge_params->sge_timer_value_4_and_5 = vals[6]; 602 603 /* T4 uses a single control field to specify both the PCIe Padding and 604 * Packing Boundary. T5 introduced the ability to specify these 605 * separately with the Padding Boundary in SGE_CONTROL and and Packing 606 * Boundary in SGE_CONTROL2. So for T5 and later we need to grab 607 * SGE_CONTROL in order to determine how ingress packet data will be 608 * laid out in Packed Buffer Mode. Unfortunately, older versions of 609 * the firmware won't let us retrieve SGE_CONTROL2 so if we get a 610 * failure grabbing it we throw an error since we can't figure out the 611 * right value. 612 */ 613 if (!is_t4(adapter->params.chip)) { 614 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 615 FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL2_A)); 616 v = t4vf_query_params(adapter, 1, params, vals); 617 if (v != FW_SUCCESS) { 618 dev_err(adapter->pdev_dev, 619 "Unable to get SGE Control2; " 620 "probably old firmware.\n"); 621 return v; 622 } 623 sge_params->sge_control2 = vals[0]; 624 } 625 626 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 627 FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD_A)); 628 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 629 FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL_A)); 630 v = t4vf_query_params(adapter, 2, params, vals); 631 if (v) 632 return v; 633 sge_params->sge_ingress_rx_threshold = vals[0]; 634 sge_params->sge_congestion_control = vals[1]; 635 636 /* For T5 and later we want to use the new BAR2 Doorbells. 637 * Unfortunately, older firmware didn't allow the this register to be 638 * read. 639 */ 640 if (!is_t4(adapter->params.chip)) { 641 u32 whoami; 642 unsigned int pf, s_hps, s_qpp; 643 644 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 645 FW_PARAMS_PARAM_XYZ_V( 646 SGE_EGRESS_QUEUES_PER_PAGE_VF_A)); 647 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 648 FW_PARAMS_PARAM_XYZ_V( 649 SGE_INGRESS_QUEUES_PER_PAGE_VF_A)); 650 v = t4vf_query_params(adapter, 2, params, vals); 651 if (v != FW_SUCCESS) { 652 dev_warn(adapter->pdev_dev, 653 "Unable to get VF SGE Queues/Page; " 654 "probably old firmware.\n"); 655 return v; 656 } 657 sge_params->sge_egress_queues_per_page = vals[0]; 658 sge_params->sge_ingress_queues_per_page = vals[1]; 659 660 /* We need the Queues/Page for our VF. This is based on the 661 * PF from which we're instantiated and is indexed in the 662 * register we just read. Do it once here so other code in 663 * the driver can just use it. 664 */ 665 whoami = t4_read_reg(adapter, 666 T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A); 667 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ? 668 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami); 669 670 s_hps = (HOSTPAGESIZEPF0_S + 671 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf); 672 sge_params->sge_vf_hps = 673 ((sge_params->sge_host_page_size >> s_hps) 674 & HOSTPAGESIZEPF0_M); 675 676 s_qpp = (QUEUESPERPAGEPF0_S + 677 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * pf); 678 sge_params->sge_vf_eq_qpp = 679 ((sge_params->sge_egress_queues_per_page >> s_qpp) 680 & QUEUESPERPAGEPF0_M); 681 sge_params->sge_vf_iq_qpp = 682 ((sge_params->sge_ingress_queues_per_page >> s_qpp) 683 & QUEUESPERPAGEPF0_M); 684 } 685 686 return 0; 687 } 688 689 /** 690 * t4vf_get_vpd_params - retrieve device VPD paremeters 691 * @adapter: the adapter 692 * 693 * Retrives various device Vital Product Data parameters. The parameters 694 * are stored in @adapter->params.vpd. 695 */ 696 int t4vf_get_vpd_params(struct adapter *adapter) 697 { 698 struct vpd_params *vpd_params = &adapter->params.vpd; 699 u32 params[7], vals[7]; 700 int v; 701 702 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | 703 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK)); 704 v = t4vf_query_params(adapter, 1, params, vals); 705 if (v) 706 return v; 707 vpd_params->cclk = vals[0]; 708 709 return 0; 710 } 711 712 /** 713 * t4vf_get_dev_params - retrieve device paremeters 714 * @adapter: the adapter 715 * 716 * Retrives various device parameters. The parameters are stored in 717 * @adapter->params.dev. 718 */ 719 int t4vf_get_dev_params(struct adapter *adapter) 720 { 721 struct dev_params *dev_params = &adapter->params.dev; 722 u32 params[7], vals[7]; 723 int v; 724 725 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | 726 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWREV)); 727 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | 728 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_TPREV)); 729 v = t4vf_query_params(adapter, 2, params, vals); 730 if (v) 731 return v; 732 dev_params->fwrev = vals[0]; 733 dev_params->tprev = vals[1]; 734 735 return 0; 736 } 737 738 /** 739 * t4vf_get_rss_glb_config - retrieve adapter RSS Global Configuration 740 * @adapter: the adapter 741 * 742 * Retrieves global RSS mode and parameters with which we have to live 743 * and stores them in the @adapter's RSS parameters. 744 */ 745 int t4vf_get_rss_glb_config(struct adapter *adapter) 746 { 747 struct rss_params *rss = &adapter->params.rss; 748 struct fw_rss_glb_config_cmd cmd, rpl; 749 int v; 750 751 /* 752 * Execute an RSS Global Configuration read command to retrieve 753 * our RSS configuration. 754 */ 755 memset(&cmd, 0, sizeof(cmd)); 756 cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) | 757 FW_CMD_REQUEST_F | 758 FW_CMD_READ_F); 759 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 760 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 761 if (v) 762 return v; 763 764 /* 765 * Transate the big-endian RSS Global Configuration into our 766 * cpu-endian format based on the RSS mode. We also do first level 767 * filtering at this point to weed out modes which don't support 768 * VF Drivers ... 769 */ 770 rss->mode = FW_RSS_GLB_CONFIG_CMD_MODE_G( 771 be32_to_cpu(rpl.u.manual.mode_pkd)); 772 switch (rss->mode) { 773 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { 774 u32 word = be32_to_cpu( 775 rpl.u.basicvirtual.synmapen_to_hashtoeplitz); 776 777 rss->u.basicvirtual.synmapen = 778 ((word & FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_F) != 0); 779 rss->u.basicvirtual.syn4tupenipv6 = 780 ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_F) != 0); 781 rss->u.basicvirtual.syn2tupenipv6 = 782 ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_F) != 0); 783 rss->u.basicvirtual.syn4tupenipv4 = 784 ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_F) != 0); 785 rss->u.basicvirtual.syn2tupenipv4 = 786 ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_F) != 0); 787 788 rss->u.basicvirtual.ofdmapen = 789 ((word & FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_F) != 0); 790 791 rss->u.basicvirtual.tnlmapen = 792 ((word & FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F) != 0); 793 rss->u.basicvirtual.tnlalllookup = 794 ((word & FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F) != 0); 795 796 rss->u.basicvirtual.hashtoeplitz = 797 ((word & FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F) != 0); 798 799 /* we need at least Tunnel Map Enable to be set */ 800 if (!rss->u.basicvirtual.tnlmapen) 801 return -EINVAL; 802 break; 803 } 804 805 default: 806 /* all unknown/unsupported RSS modes result in an error */ 807 return -EINVAL; 808 } 809 810 return 0; 811 } 812 813 /** 814 * t4vf_get_vfres - retrieve VF resource limits 815 * @adapter: the adapter 816 * 817 * Retrieves configured resource limits and capabilities for a virtual 818 * function. The results are stored in @adapter->vfres. 819 */ 820 int t4vf_get_vfres(struct adapter *adapter) 821 { 822 struct vf_resources *vfres = &adapter->params.vfres; 823 struct fw_pfvf_cmd cmd, rpl; 824 int v; 825 u32 word; 826 827 /* 828 * Execute PFVF Read command to get VF resource limits; bail out early 829 * with error on command failure. 830 */ 831 memset(&cmd, 0, sizeof(cmd)); 832 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | 833 FW_CMD_REQUEST_F | 834 FW_CMD_READ_F); 835 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 836 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 837 if (v) 838 return v; 839 840 /* 841 * Extract VF resource limits and return success. 842 */ 843 word = be32_to_cpu(rpl.niqflint_niq); 844 vfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word); 845 vfres->niq = FW_PFVF_CMD_NIQ_G(word); 846 847 word = be32_to_cpu(rpl.type_to_neq); 848 vfres->neq = FW_PFVF_CMD_NEQ_G(word); 849 vfres->pmask = FW_PFVF_CMD_PMASK_G(word); 850 851 word = be32_to_cpu(rpl.tc_to_nexactf); 852 vfres->tc = FW_PFVF_CMD_TC_G(word); 853 vfres->nvi = FW_PFVF_CMD_NVI_G(word); 854 vfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word); 855 856 word = be32_to_cpu(rpl.r_caps_to_nethctrl); 857 vfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word); 858 vfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word); 859 vfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word); 860 861 return 0; 862 } 863 864 /** 865 * t4vf_read_rss_vi_config - read a VI's RSS configuration 866 * @adapter: the adapter 867 * @viid: Virtual Interface ID 868 * @config: pointer to host-native VI RSS Configuration buffer 869 * 870 * Reads the Virtual Interface's RSS configuration information and 871 * translates it into CPU-native format. 872 */ 873 int t4vf_read_rss_vi_config(struct adapter *adapter, unsigned int viid, 874 union rss_vi_config *config) 875 { 876 struct fw_rss_vi_config_cmd cmd, rpl; 877 int v; 878 879 memset(&cmd, 0, sizeof(cmd)); 880 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) | 881 FW_CMD_REQUEST_F | 882 FW_CMD_READ_F | 883 FW_RSS_VI_CONFIG_CMD_VIID(viid)); 884 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 885 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 886 if (v) 887 return v; 888 889 switch (adapter->params.rss.mode) { 890 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { 891 u32 word = be32_to_cpu(rpl.u.basicvirtual.defaultq_to_udpen); 892 893 config->basicvirtual.ip6fourtupen = 894 ((word & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) != 0); 895 config->basicvirtual.ip6twotupen = 896 ((word & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) != 0); 897 config->basicvirtual.ip4fourtupen = 898 ((word & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) != 0); 899 config->basicvirtual.ip4twotupen = 900 ((word & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) != 0); 901 config->basicvirtual.udpen = 902 ((word & FW_RSS_VI_CONFIG_CMD_UDPEN_F) != 0); 903 config->basicvirtual.defaultq = 904 FW_RSS_VI_CONFIG_CMD_DEFAULTQ_G(word); 905 break; 906 } 907 908 default: 909 return -EINVAL; 910 } 911 912 return 0; 913 } 914 915 /** 916 * t4vf_write_rss_vi_config - write a VI's RSS configuration 917 * @adapter: the adapter 918 * @viid: Virtual Interface ID 919 * @config: pointer to host-native VI RSS Configuration buffer 920 * 921 * Write the Virtual Interface's RSS configuration information 922 * (translating it into firmware-native format before writing). 923 */ 924 int t4vf_write_rss_vi_config(struct adapter *adapter, unsigned int viid, 925 union rss_vi_config *config) 926 { 927 struct fw_rss_vi_config_cmd cmd, rpl; 928 929 memset(&cmd, 0, sizeof(cmd)); 930 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) | 931 FW_CMD_REQUEST_F | 932 FW_CMD_WRITE_F | 933 FW_RSS_VI_CONFIG_CMD_VIID(viid)); 934 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 935 switch (adapter->params.rss.mode) { 936 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { 937 u32 word = 0; 938 939 if (config->basicvirtual.ip6fourtupen) 940 word |= FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F; 941 if (config->basicvirtual.ip6twotupen) 942 word |= FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F; 943 if (config->basicvirtual.ip4fourtupen) 944 word |= FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F; 945 if (config->basicvirtual.ip4twotupen) 946 word |= FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F; 947 if (config->basicvirtual.udpen) 948 word |= FW_RSS_VI_CONFIG_CMD_UDPEN_F; 949 word |= FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V( 950 config->basicvirtual.defaultq); 951 cmd.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(word); 952 break; 953 } 954 955 default: 956 return -EINVAL; 957 } 958 959 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 960 } 961 962 /** 963 * t4vf_config_rss_range - configure a portion of the RSS mapping table 964 * @adapter: the adapter 965 * @viid: Virtual Interface of RSS Table Slice 966 * @start: starting entry in the table to write 967 * @n: how many table entries to write 968 * @rspq: values for the "Response Queue" (Ingress Queue) lookup table 969 * @nrspq: number of values in @rspq 970 * 971 * Programs the selected part of the VI's RSS mapping table with the 972 * provided values. If @nrspq < @n the supplied values are used repeatedly 973 * until the full table range is populated. 974 * 975 * The caller must ensure the values in @rspq are in the range 0..1023. 976 */ 977 int t4vf_config_rss_range(struct adapter *adapter, unsigned int viid, 978 int start, int n, const u16 *rspq, int nrspq) 979 { 980 const u16 *rsp = rspq; 981 const u16 *rsp_end = rspq+nrspq; 982 struct fw_rss_ind_tbl_cmd cmd; 983 984 /* 985 * Initialize firmware command template to write the RSS table. 986 */ 987 memset(&cmd, 0, sizeof(cmd)); 988 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) | 989 FW_CMD_REQUEST_F | 990 FW_CMD_WRITE_F | 991 FW_RSS_IND_TBL_CMD_VIID_V(viid)); 992 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 993 994 /* 995 * Each firmware RSS command can accommodate up to 32 RSS Ingress 996 * Queue Identifiers. These Ingress Queue IDs are packed three to 997 * a 32-bit word as 10-bit values with the upper remaining 2 bits 998 * reserved. 999 */ 1000 while (n > 0) { 1001 __be32 *qp = &cmd.iq0_to_iq2; 1002 int nq = min(n, 32); 1003 int ret; 1004 1005 /* 1006 * Set up the firmware RSS command header to send the next 1007 * "nq" Ingress Queue IDs to the firmware. 1008 */ 1009 cmd.niqid = cpu_to_be16(nq); 1010 cmd.startidx = cpu_to_be16(start); 1011 1012 /* 1013 * "nq" more done for the start of the next loop. 1014 */ 1015 start += nq; 1016 n -= nq; 1017 1018 /* 1019 * While there are still Ingress Queue IDs to stuff into the 1020 * current firmware RSS command, retrieve them from the 1021 * Ingress Queue ID array and insert them into the command. 1022 */ 1023 while (nq > 0) { 1024 /* 1025 * Grab up to the next 3 Ingress Queue IDs (wrapping 1026 * around the Ingress Queue ID array if necessary) and 1027 * insert them into the firmware RSS command at the 1028 * current 3-tuple position within the commad. 1029 */ 1030 u16 qbuf[3]; 1031 u16 *qbp = qbuf; 1032 int nqbuf = min(3, nq); 1033 1034 nq -= nqbuf; 1035 qbuf[0] = qbuf[1] = qbuf[2] = 0; 1036 while (nqbuf) { 1037 nqbuf--; 1038 *qbp++ = *rsp++; 1039 if (rsp >= rsp_end) 1040 rsp = rspq; 1041 } 1042 *qp++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0_V(qbuf[0]) | 1043 FW_RSS_IND_TBL_CMD_IQ1_V(qbuf[1]) | 1044 FW_RSS_IND_TBL_CMD_IQ2_V(qbuf[2])); 1045 } 1046 1047 /* 1048 * Send this portion of the RRS table update to the firmware; 1049 * bail out on any errors. 1050 */ 1051 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 1052 if (ret) 1053 return ret; 1054 } 1055 return 0; 1056 } 1057 1058 /** 1059 * t4vf_alloc_vi - allocate a virtual interface on a port 1060 * @adapter: the adapter 1061 * @port_id: physical port associated with the VI 1062 * 1063 * Allocate a new Virtual Interface and bind it to the indicated 1064 * physical port. Return the new Virtual Interface Identifier on 1065 * success, or a [negative] error number on failure. 1066 */ 1067 int t4vf_alloc_vi(struct adapter *adapter, int port_id) 1068 { 1069 struct fw_vi_cmd cmd, rpl; 1070 int v; 1071 1072 /* 1073 * Execute a VI command to allocate Virtual Interface and return its 1074 * VIID. 1075 */ 1076 memset(&cmd, 0, sizeof(cmd)); 1077 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | 1078 FW_CMD_REQUEST_F | 1079 FW_CMD_WRITE_F | 1080 FW_CMD_EXEC_F); 1081 cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) | 1082 FW_VI_CMD_ALLOC_F); 1083 cmd.portid_pkd = FW_VI_CMD_PORTID_V(port_id); 1084 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 1085 if (v) 1086 return v; 1087 1088 return FW_VI_CMD_VIID_G(be16_to_cpu(rpl.type_viid)); 1089 } 1090 1091 /** 1092 * t4vf_free_vi -- free a virtual interface 1093 * @adapter: the adapter 1094 * @viid: the virtual interface identifier 1095 * 1096 * Free a previously allocated Virtual Interface. Return an error on 1097 * failure. 1098 */ 1099 int t4vf_free_vi(struct adapter *adapter, int viid) 1100 { 1101 struct fw_vi_cmd cmd; 1102 1103 /* 1104 * Execute a VI command to free the Virtual Interface. 1105 */ 1106 memset(&cmd, 0, sizeof(cmd)); 1107 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | 1108 FW_CMD_REQUEST_F | 1109 FW_CMD_EXEC_F); 1110 cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) | 1111 FW_VI_CMD_FREE_F); 1112 cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid)); 1113 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 1114 } 1115 1116 /** 1117 * t4vf_enable_vi - enable/disable a virtual interface 1118 * @adapter: the adapter 1119 * @viid: the Virtual Interface ID 1120 * @rx_en: 1=enable Rx, 0=disable Rx 1121 * @tx_en: 1=enable Tx, 0=disable Tx 1122 * 1123 * Enables/disables a virtual interface. 1124 */ 1125 int t4vf_enable_vi(struct adapter *adapter, unsigned int viid, 1126 bool rx_en, bool tx_en) 1127 { 1128 struct fw_vi_enable_cmd cmd; 1129 1130 memset(&cmd, 0, sizeof(cmd)); 1131 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | 1132 FW_CMD_REQUEST_F | 1133 FW_CMD_EXEC_F | 1134 FW_VI_ENABLE_CMD_VIID_V(viid)); 1135 cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) | 1136 FW_VI_ENABLE_CMD_EEN_V(tx_en) | 1137 FW_LEN16(cmd)); 1138 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 1139 } 1140 1141 /** 1142 * t4vf_identify_port - identify a VI's port by blinking its LED 1143 * @adapter: the adapter 1144 * @viid: the Virtual Interface ID 1145 * @nblinks: how many times to blink LED at 2.5 Hz 1146 * 1147 * Identifies a VI's port by blinking its LED. 1148 */ 1149 int t4vf_identify_port(struct adapter *adapter, unsigned int viid, 1150 unsigned int nblinks) 1151 { 1152 struct fw_vi_enable_cmd cmd; 1153 1154 memset(&cmd, 0, sizeof(cmd)); 1155 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | 1156 FW_CMD_REQUEST_F | 1157 FW_CMD_EXEC_F | 1158 FW_VI_ENABLE_CMD_VIID_V(viid)); 1159 cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | 1160 FW_LEN16(cmd)); 1161 cmd.blinkdur = cpu_to_be16(nblinks); 1162 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 1163 } 1164 1165 /** 1166 * t4vf_set_rxmode - set Rx properties of a virtual interface 1167 * @adapter: the adapter 1168 * @viid: the VI id 1169 * @mtu: the new MTU or -1 for no change 1170 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change 1171 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change 1172 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change 1173 * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it, 1174 * -1 no change 1175 * 1176 * Sets Rx properties of a virtual interface. 1177 */ 1178 int t4vf_set_rxmode(struct adapter *adapter, unsigned int viid, 1179 int mtu, int promisc, int all_multi, int bcast, int vlanex, 1180 bool sleep_ok) 1181 { 1182 struct fw_vi_rxmode_cmd cmd; 1183 1184 /* convert to FW values */ 1185 if (mtu < 0) 1186 mtu = FW_VI_RXMODE_CMD_MTU_M; 1187 if (promisc < 0) 1188 promisc = FW_VI_RXMODE_CMD_PROMISCEN_M; 1189 if (all_multi < 0) 1190 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M; 1191 if (bcast < 0) 1192 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M; 1193 if (vlanex < 0) 1194 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M; 1195 1196 memset(&cmd, 0, sizeof(cmd)); 1197 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) | 1198 FW_CMD_REQUEST_F | 1199 FW_CMD_WRITE_F | 1200 FW_VI_RXMODE_CMD_VIID_V(viid)); 1201 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 1202 cmd.mtu_to_vlanexen = 1203 cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) | 1204 FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) | 1205 FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) | 1206 FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) | 1207 FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex)); 1208 return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok); 1209 } 1210 1211 /** 1212 * t4vf_alloc_mac_filt - allocates exact-match filters for MAC addresses 1213 * @adapter: the adapter 1214 * @viid: the Virtual Interface Identifier 1215 * @free: if true any existing filters for this VI id are first removed 1216 * @naddr: the number of MAC addresses to allocate filters for (up to 7) 1217 * @addr: the MAC address(es) 1218 * @idx: where to store the index of each allocated filter 1219 * @hash: pointer to hash address filter bitmap 1220 * @sleep_ok: call is allowed to sleep 1221 * 1222 * Allocates an exact-match filter for each of the supplied addresses and 1223 * sets it to the corresponding address. If @idx is not %NULL it should 1224 * have at least @naddr entries, each of which will be set to the index of 1225 * the filter allocated for the corresponding MAC address. If a filter 1226 * could not be allocated for an address its index is set to 0xffff. 1227 * If @hash is not %NULL addresses that fail to allocate an exact filter 1228 * are hashed and update the hash filter bitmap pointed at by @hash. 1229 * 1230 * Returns a negative error number or the number of filters allocated. 1231 */ 1232 int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free, 1233 unsigned int naddr, const u8 **addr, u16 *idx, 1234 u64 *hash, bool sleep_ok) 1235 { 1236 int offset, ret = 0; 1237 unsigned nfilters = 0; 1238 unsigned int rem = naddr; 1239 struct fw_vi_mac_cmd cmd, rpl; 1240 unsigned int max_naddr = adapter->params.arch.mps_tcam_size; 1241 1242 if (naddr > max_naddr) 1243 return -EINVAL; 1244 1245 for (offset = 0; offset < naddr; /**/) { 1246 unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact) 1247 ? rem 1248 : ARRAY_SIZE(cmd.u.exact)); 1249 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 1250 u.exact[fw_naddr]), 16); 1251 struct fw_vi_mac_exact *p; 1252 int i; 1253 1254 memset(&cmd, 0, sizeof(cmd)); 1255 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | 1256 FW_CMD_REQUEST_F | 1257 FW_CMD_WRITE_F | 1258 (free ? FW_CMD_EXEC_F : 0) | 1259 FW_VI_MAC_CMD_VIID_V(viid)); 1260 cmd.freemacs_to_len16 = 1261 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) | 1262 FW_CMD_LEN16_V(len16)); 1263 1264 for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) { 1265 p->valid_to_idx = cpu_to_be16( 1266 FW_VI_MAC_CMD_VALID_F | 1267 FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC)); 1268 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); 1269 } 1270 1271 1272 ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl, 1273 sleep_ok); 1274 if (ret && ret != -ENOMEM) 1275 break; 1276 1277 for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) { 1278 u16 index = FW_VI_MAC_CMD_IDX_G( 1279 be16_to_cpu(p->valid_to_idx)); 1280 1281 if (idx) 1282 idx[offset+i] = 1283 (index >= max_naddr 1284 ? 0xffff 1285 : index); 1286 if (index < max_naddr) 1287 nfilters++; 1288 else if (hash) 1289 *hash |= (1ULL << hash_mac_addr(addr[offset+i])); 1290 } 1291 1292 free = false; 1293 offset += fw_naddr; 1294 rem -= fw_naddr; 1295 } 1296 1297 /* 1298 * If there were no errors or we merely ran out of room in our MAC 1299 * address arena, return the number of filters actually written. 1300 */ 1301 if (ret == 0 || ret == -ENOMEM) 1302 ret = nfilters; 1303 return ret; 1304 } 1305 1306 /** 1307 * t4vf_free_mac_filt - frees exact-match filters of given MAC addresses 1308 * @adapter: the adapter 1309 * @viid: the VI id 1310 * @naddr: the number of MAC addresses to allocate filters for (up to 7) 1311 * @addr: the MAC address(es) 1312 * @sleep_ok: call is allowed to sleep 1313 * 1314 * Frees the exact-match filter for each of the supplied addresses 1315 * 1316 * Returns a negative error number or the number of filters freed. 1317 */ 1318 int t4vf_free_mac_filt(struct adapter *adapter, unsigned int viid, 1319 unsigned int naddr, const u8 **addr, bool sleep_ok) 1320 { 1321 int offset, ret = 0; 1322 struct fw_vi_mac_cmd cmd; 1323 unsigned int nfilters = 0; 1324 unsigned int max_naddr = adapter->params.arch.mps_tcam_size; 1325 unsigned int rem = naddr; 1326 1327 if (naddr > max_naddr) 1328 return -EINVAL; 1329 1330 for (offset = 0; offset < (int)naddr ; /**/) { 1331 unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact) ? 1332 rem : ARRAY_SIZE(cmd.u.exact)); 1333 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 1334 u.exact[fw_naddr]), 16); 1335 struct fw_vi_mac_exact *p; 1336 int i; 1337 1338 memset(&cmd, 0, sizeof(cmd)); 1339 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | 1340 FW_CMD_REQUEST_F | 1341 FW_CMD_WRITE_F | 1342 FW_CMD_EXEC_V(0) | 1343 FW_VI_MAC_CMD_VIID_V(viid)); 1344 cmd.freemacs_to_len16 = 1345 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) | 1346 FW_CMD_LEN16_V(len16)); 1347 1348 for (i = 0, p = cmd.u.exact; i < (int)fw_naddr; i++, p++) { 1349 p->valid_to_idx = cpu_to_be16( 1350 FW_VI_MAC_CMD_VALID_F | 1351 FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE)); 1352 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); 1353 } 1354 1355 ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &cmd, 1356 sleep_ok); 1357 if (ret) 1358 break; 1359 1360 for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) { 1361 u16 index = FW_VI_MAC_CMD_IDX_G( 1362 be16_to_cpu(p->valid_to_idx)); 1363 1364 if (index < max_naddr) 1365 nfilters++; 1366 } 1367 1368 offset += fw_naddr; 1369 rem -= fw_naddr; 1370 } 1371 1372 if (ret == 0) 1373 ret = nfilters; 1374 return ret; 1375 } 1376 1377 /** 1378 * t4vf_change_mac - modifies the exact-match filter for a MAC address 1379 * @adapter: the adapter 1380 * @viid: the Virtual Interface ID 1381 * @idx: index of existing filter for old value of MAC address, or -1 1382 * @addr: the new MAC address value 1383 * @persist: if idx < 0, the new MAC allocation should be persistent 1384 * 1385 * Modifies an exact-match filter and sets it to the new MAC address. 1386 * Note that in general it is not possible to modify the value of a given 1387 * filter so the generic way to modify an address filter is to free the 1388 * one being used by the old address value and allocate a new filter for 1389 * the new address value. @idx can be -1 if the address is a new 1390 * addition. 1391 * 1392 * Returns a negative error number or the index of the filter with the new 1393 * MAC value. 1394 */ 1395 int t4vf_change_mac(struct adapter *adapter, unsigned int viid, 1396 int idx, const u8 *addr, bool persist) 1397 { 1398 int ret; 1399 struct fw_vi_mac_cmd cmd, rpl; 1400 struct fw_vi_mac_exact *p = &cmd.u.exact[0]; 1401 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 1402 u.exact[1]), 16); 1403 unsigned int max_mac_addr = adapter->params.arch.mps_tcam_size; 1404 1405 /* 1406 * If this is a new allocation, determine whether it should be 1407 * persistent (across a "freemacs" operation) or not. 1408 */ 1409 if (idx < 0) 1410 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; 1411 1412 memset(&cmd, 0, sizeof(cmd)); 1413 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | 1414 FW_CMD_REQUEST_F | 1415 FW_CMD_WRITE_F | 1416 FW_VI_MAC_CMD_VIID_V(viid)); 1417 cmd.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); 1418 p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F | 1419 FW_VI_MAC_CMD_IDX_V(idx)); 1420 memcpy(p->macaddr, addr, sizeof(p->macaddr)); 1421 1422 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 1423 if (ret == 0) { 1424 p = &rpl.u.exact[0]; 1425 ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx)); 1426 if (ret >= max_mac_addr) 1427 ret = -ENOMEM; 1428 } 1429 return ret; 1430 } 1431 1432 /** 1433 * t4vf_set_addr_hash - program the MAC inexact-match hash filter 1434 * @adapter: the adapter 1435 * @viid: the Virtual Interface Identifier 1436 * @ucast: whether the hash filter should also match unicast addresses 1437 * @vec: the value to be written to the hash filter 1438 * @sleep_ok: call is allowed to sleep 1439 * 1440 * Sets the 64-bit inexact-match hash filter for a virtual interface. 1441 */ 1442 int t4vf_set_addr_hash(struct adapter *adapter, unsigned int viid, 1443 bool ucast, u64 vec, bool sleep_ok) 1444 { 1445 struct fw_vi_mac_cmd cmd; 1446 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 1447 u.exact[0]), 16); 1448 1449 memset(&cmd, 0, sizeof(cmd)); 1450 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | 1451 FW_CMD_REQUEST_F | 1452 FW_CMD_WRITE_F | 1453 FW_VI_ENABLE_CMD_VIID_V(viid)); 1454 cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F | 1455 FW_VI_MAC_CMD_HASHUNIEN_V(ucast) | 1456 FW_CMD_LEN16_V(len16)); 1457 cmd.u.hash.hashvec = cpu_to_be64(vec); 1458 return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok); 1459 } 1460 1461 /** 1462 * t4vf_get_port_stats - collect "port" statistics 1463 * @adapter: the adapter 1464 * @pidx: the port index 1465 * @s: the stats structure to fill 1466 * 1467 * Collect statistics for the "port"'s Virtual Interface. 1468 */ 1469 int t4vf_get_port_stats(struct adapter *adapter, int pidx, 1470 struct t4vf_port_stats *s) 1471 { 1472 struct port_info *pi = adap2pinfo(adapter, pidx); 1473 struct fw_vi_stats_vf fwstats; 1474 unsigned int rem = VI_VF_NUM_STATS; 1475 __be64 *fwsp = (__be64 *)&fwstats; 1476 1477 /* 1478 * Grab the Virtual Interface statistics a chunk at a time via mailbox 1479 * commands. We could use a Work Request and get all of them at once 1480 * but that's an asynchronous interface which is awkward to use. 1481 */ 1482 while (rem) { 1483 unsigned int ix = VI_VF_NUM_STATS - rem; 1484 unsigned int nstats = min(6U, rem); 1485 struct fw_vi_stats_cmd cmd, rpl; 1486 size_t len = (offsetof(struct fw_vi_stats_cmd, u) + 1487 sizeof(struct fw_vi_stats_ctl)); 1488 size_t len16 = DIV_ROUND_UP(len, 16); 1489 int ret; 1490 1491 memset(&cmd, 0, sizeof(cmd)); 1492 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_STATS_CMD) | 1493 FW_VI_STATS_CMD_VIID_V(pi->viid) | 1494 FW_CMD_REQUEST_F | 1495 FW_CMD_READ_F); 1496 cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); 1497 cmd.u.ctl.nstats_ix = 1498 cpu_to_be16(FW_VI_STATS_CMD_IX_V(ix) | 1499 FW_VI_STATS_CMD_NSTATS_V(nstats)); 1500 ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl); 1501 if (ret) 1502 return ret; 1503 1504 memcpy(fwsp, &rpl.u.ctl.stat0, sizeof(__be64) * nstats); 1505 1506 rem -= nstats; 1507 fwsp += nstats; 1508 } 1509 1510 /* 1511 * Translate firmware statistics into host native statistics. 1512 */ 1513 s->tx_bcast_bytes = be64_to_cpu(fwstats.tx_bcast_bytes); 1514 s->tx_bcast_frames = be64_to_cpu(fwstats.tx_bcast_frames); 1515 s->tx_mcast_bytes = be64_to_cpu(fwstats.tx_mcast_bytes); 1516 s->tx_mcast_frames = be64_to_cpu(fwstats.tx_mcast_frames); 1517 s->tx_ucast_bytes = be64_to_cpu(fwstats.tx_ucast_bytes); 1518 s->tx_ucast_frames = be64_to_cpu(fwstats.tx_ucast_frames); 1519 s->tx_drop_frames = be64_to_cpu(fwstats.tx_drop_frames); 1520 s->tx_offload_bytes = be64_to_cpu(fwstats.tx_offload_bytes); 1521 s->tx_offload_frames = be64_to_cpu(fwstats.tx_offload_frames); 1522 1523 s->rx_bcast_bytes = be64_to_cpu(fwstats.rx_bcast_bytes); 1524 s->rx_bcast_frames = be64_to_cpu(fwstats.rx_bcast_frames); 1525 s->rx_mcast_bytes = be64_to_cpu(fwstats.rx_mcast_bytes); 1526 s->rx_mcast_frames = be64_to_cpu(fwstats.rx_mcast_frames); 1527 s->rx_ucast_bytes = be64_to_cpu(fwstats.rx_ucast_bytes); 1528 s->rx_ucast_frames = be64_to_cpu(fwstats.rx_ucast_frames); 1529 1530 s->rx_err_frames = be64_to_cpu(fwstats.rx_err_frames); 1531 1532 return 0; 1533 } 1534 1535 /** 1536 * t4vf_iq_free - free an ingress queue and its free lists 1537 * @adapter: the adapter 1538 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) 1539 * @iqid: ingress queue ID 1540 * @fl0id: FL0 queue ID or 0xffff if no attached FL0 1541 * @fl1id: FL1 queue ID or 0xffff if no attached FL1 1542 * 1543 * Frees an ingress queue and its associated free lists, if any. 1544 */ 1545 int t4vf_iq_free(struct adapter *adapter, unsigned int iqtype, 1546 unsigned int iqid, unsigned int fl0id, unsigned int fl1id) 1547 { 1548 struct fw_iq_cmd cmd; 1549 1550 memset(&cmd, 0, sizeof(cmd)); 1551 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | 1552 FW_CMD_REQUEST_F | 1553 FW_CMD_EXEC_F); 1554 cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | 1555 FW_LEN16(cmd)); 1556 cmd.type_to_iqandstindex = 1557 cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype)); 1558 1559 cmd.iqid = cpu_to_be16(iqid); 1560 cmd.fl0id = cpu_to_be16(fl0id); 1561 cmd.fl1id = cpu_to_be16(fl1id); 1562 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 1563 } 1564 1565 /** 1566 * t4vf_eth_eq_free - free an Ethernet egress queue 1567 * @adapter: the adapter 1568 * @eqid: egress queue ID 1569 * 1570 * Frees an Ethernet egress queue. 1571 */ 1572 int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid) 1573 { 1574 struct fw_eq_eth_cmd cmd; 1575 1576 memset(&cmd, 0, sizeof(cmd)); 1577 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) | 1578 FW_CMD_REQUEST_F | 1579 FW_CMD_EXEC_F); 1580 cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | 1581 FW_LEN16(cmd)); 1582 cmd.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid)); 1583 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 1584 } 1585 1586 /** 1587 * t4vf_handle_fw_rpl - process a firmware reply message 1588 * @adapter: the adapter 1589 * @rpl: start of the firmware message 1590 * 1591 * Processes a firmware message, such as link state change messages. 1592 */ 1593 int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) 1594 { 1595 const struct fw_cmd_hdr *cmd_hdr = (const struct fw_cmd_hdr *)rpl; 1596 u8 opcode = FW_CMD_OP_G(be32_to_cpu(cmd_hdr->hi)); 1597 1598 switch (opcode) { 1599 case FW_PORT_CMD: { 1600 /* 1601 * Link/module state change message. 1602 */ 1603 const struct fw_port_cmd *port_cmd = 1604 (const struct fw_port_cmd *)rpl; 1605 u32 stat, mod; 1606 int action, port_id, link_ok, speed, fc, pidx; 1607 1608 /* 1609 * Extract various fields from port status change message. 1610 */ 1611 action = FW_PORT_CMD_ACTION_G( 1612 be32_to_cpu(port_cmd->action_to_len16)); 1613 if (action != FW_PORT_ACTION_GET_PORT_INFO) { 1614 dev_err(adapter->pdev_dev, 1615 "Unknown firmware PORT reply action %x\n", 1616 action); 1617 break; 1618 } 1619 1620 port_id = FW_PORT_CMD_PORTID_G( 1621 be32_to_cpu(port_cmd->op_to_portid)); 1622 1623 stat = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype); 1624 link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0; 1625 speed = 0; 1626 fc = 0; 1627 if (stat & FW_PORT_CMD_RXPAUSE_F) 1628 fc |= PAUSE_RX; 1629 if (stat & FW_PORT_CMD_TXPAUSE_F) 1630 fc |= PAUSE_TX; 1631 if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) 1632 speed = 100; 1633 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) 1634 speed = 1000; 1635 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) 1636 speed = 10000; 1637 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) 1638 speed = 40000; 1639 1640 /* 1641 * Scan all of our "ports" (Virtual Interfaces) looking for 1642 * those bound to the physical port which has changed. If 1643 * our recorded state doesn't match the current state, 1644 * signal that change to the OS code. 1645 */ 1646 for_each_port(adapter, pidx) { 1647 struct port_info *pi = adap2pinfo(adapter, pidx); 1648 struct link_config *lc; 1649 1650 if (pi->port_id != port_id) 1651 continue; 1652 1653 lc = &pi->link_cfg; 1654 1655 mod = FW_PORT_CMD_MODTYPE_G(stat); 1656 if (mod != pi->mod_type) { 1657 pi->mod_type = mod; 1658 t4vf_os_portmod_changed(adapter, pidx); 1659 } 1660 1661 if (link_ok != lc->link_ok || speed != lc->speed || 1662 fc != lc->fc) { 1663 /* something changed */ 1664 lc->link_ok = link_ok; 1665 lc->speed = speed; 1666 lc->fc = fc; 1667 lc->supported = 1668 be16_to_cpu(port_cmd->u.info.pcap); 1669 t4vf_os_link_changed(adapter, pidx, link_ok); 1670 } 1671 } 1672 break; 1673 } 1674 1675 default: 1676 dev_err(adapter->pdev_dev, "Unknown firmware reply %X\n", 1677 opcode); 1678 } 1679 return 0; 1680 } 1681 1682 /** 1683 */ 1684 int t4vf_prep_adapter(struct adapter *adapter) 1685 { 1686 int err; 1687 unsigned int chipid; 1688 1689 /* Wait for the device to become ready before proceeding ... 1690 */ 1691 err = t4vf_wait_dev_ready(adapter); 1692 if (err) 1693 return err; 1694 1695 /* Default port and clock for debugging in case we can't reach 1696 * firmware. 1697 */ 1698 adapter->params.nports = 1; 1699 adapter->params.vfres.pmask = 1; 1700 adapter->params.vpd.cclk = 50000; 1701 1702 adapter->params.chip = 0; 1703 switch (CHELSIO_PCI_ID_VER(adapter->pdev->device)) { 1704 case CHELSIO_T4: 1705 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, 0); 1706 adapter->params.arch.sge_fl_db = DBPRIO_F; 1707 adapter->params.arch.mps_tcam_size = 1708 NUM_MPS_CLS_SRAM_L_INSTANCES; 1709 break; 1710 1711 case CHELSIO_T5: 1712 chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A)); 1713 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid); 1714 adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F; 1715 adapter->params.arch.mps_tcam_size = 1716 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 1717 break; 1718 1719 case CHELSIO_T6: 1720 chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A)); 1721 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, chipid); 1722 adapter->params.arch.sge_fl_db = 0; 1723 adapter->params.arch.mps_tcam_size = 1724 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 1725 break; 1726 } 1727 1728 return 0; 1729 } 1730