1 /* 2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet 3 * driver for Linux. 4 * 5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/pci.h> 37 38 #include "t4vf_common.h" 39 #include "t4vf_defs.h" 40 41 #include "../cxgb4/t4_regs.h" 42 #include "../cxgb4/t4fw_api.h" 43 44 /* 45 * Wait for the device to become ready (signified by our "who am I" register 46 * returning a value other than all 1's). Return an error if it doesn't 47 * become ready ... 48 */ 49 int t4vf_wait_dev_ready(struct adapter *adapter) 50 { 51 const u32 whoami = T4VF_PL_BASE_ADDR + PL_VF_WHOAMI; 52 const u32 notready1 = 0xffffffff; 53 const u32 notready2 = 0xeeeeeeee; 54 u32 val; 55 56 val = t4_read_reg(adapter, whoami); 57 if (val != notready1 && val != notready2) 58 return 0; 59 msleep(500); 60 val = t4_read_reg(adapter, whoami); 61 if (val != notready1 && val != notready2) 62 return 0; 63 else 64 return -EIO; 65 } 66 67 /* 68 * Get the reply to a mailbox command and store it in @rpl in big-endian order 69 * (since the firmware data structures are specified in a big-endian layout). 70 */ 71 static void get_mbox_rpl(struct adapter *adapter, __be64 *rpl, int size, 72 u32 mbox_data) 73 { 74 for ( ; size; size -= 8, mbox_data += 8) 75 *rpl++ = cpu_to_be64(t4_read_reg64(adapter, mbox_data)); 76 } 77 78 /* 79 * Dump contents of mailbox with a leading tag. 80 */ 81 static void dump_mbox(struct adapter *adapter, const char *tag, u32 mbox_data) 82 { 83 dev_err(adapter->pdev_dev, 84 "mbox %s: %llx %llx %llx %llx %llx %llx %llx %llx\n", tag, 85 (unsigned long long)t4_read_reg64(adapter, mbox_data + 0), 86 (unsigned long long)t4_read_reg64(adapter, mbox_data + 8), 87 (unsigned long long)t4_read_reg64(adapter, mbox_data + 16), 88 (unsigned long long)t4_read_reg64(adapter, mbox_data + 24), 89 (unsigned long long)t4_read_reg64(adapter, mbox_data + 32), 90 (unsigned long long)t4_read_reg64(adapter, mbox_data + 40), 91 (unsigned long long)t4_read_reg64(adapter, mbox_data + 48), 92 (unsigned long long)t4_read_reg64(adapter, mbox_data + 56)); 93 } 94 95 /** 96 * t4vf_wr_mbox_core - send a command to FW through the mailbox 97 * @adapter: the adapter 98 * @cmd: the command to write 99 * @size: command length in bytes 100 * @rpl: where to optionally store the reply 101 * @sleep_ok: if true we may sleep while awaiting command completion 102 * 103 * Sends the given command to FW through the mailbox and waits for the 104 * FW to execute the command. If @rpl is not %NULL it is used to store 105 * the FW's reply to the command. The command and its optional reply 106 * are of the same length. FW can take up to 500 ms to respond. 107 * @sleep_ok determines whether we may sleep while awaiting the response. 108 * If sleeping is allowed we use progressive backoff otherwise we spin. 109 * 110 * The return value is 0 on success or a negative errno on failure. A 111 * failure can happen either because we are not able to execute the 112 * command or FW executes it but signals an error. In the latter case 113 * the return value is the error code indicated by FW (negated). 114 */ 115 int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, 116 void *rpl, bool sleep_ok) 117 { 118 static const int delay[] = { 119 1, 1, 3, 5, 10, 10, 20, 50, 100 120 }; 121 122 u32 v; 123 int i, ms, delay_idx; 124 const __be64 *p; 125 u32 mbox_data = T4VF_MBDATA_BASE_ADDR; 126 u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL; 127 128 /* 129 * Commands must be multiples of 16 bytes in length and may not be 130 * larger than the size of the Mailbox Data register array. 131 */ 132 if ((size % 16) != 0 || 133 size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4) 134 return -EINVAL; 135 136 /* 137 * Loop trying to get ownership of the mailbox. Return an error 138 * if we can't gain ownership. 139 */ 140 v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl)); 141 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) 142 v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl)); 143 if (v != MBOX_OWNER_DRV) 144 return v == MBOX_OWNER_FW ? -EBUSY : -ETIMEDOUT; 145 146 /* 147 * Write the command array into the Mailbox Data register array and 148 * transfer ownership of the mailbox to the firmware. 149 * 150 * For the VFs, the Mailbox Data "registers" are actually backed by 151 * T4's "MA" interface rather than PL Registers (as is the case for 152 * the PFs). Because these are in different coherency domains, the 153 * write to the VF's PL-register-backed Mailbox Control can race in 154 * front of the writes to the MA-backed VF Mailbox Data "registers". 155 * So we need to do a read-back on at least one byte of the VF Mailbox 156 * Data registers before doing the write to the VF Mailbox Control 157 * register. 158 */ 159 for (i = 0, p = cmd; i < size; i += 8) 160 t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++)); 161 t4_read_reg(adapter, mbox_data); /* flush write */ 162 163 t4_write_reg(adapter, mbox_ctl, 164 MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); 165 t4_read_reg(adapter, mbox_ctl); /* flush write */ 166 167 /* 168 * Spin waiting for firmware to acknowledge processing our command. 169 */ 170 delay_idx = 0; 171 ms = delay[0]; 172 173 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { 174 if (sleep_ok) { 175 ms = delay[delay_idx]; 176 if (delay_idx < ARRAY_SIZE(delay) - 1) 177 delay_idx++; 178 msleep(ms); 179 } else 180 mdelay(ms); 181 182 /* 183 * If we're the owner, see if this is the reply we wanted. 184 */ 185 v = t4_read_reg(adapter, mbox_ctl); 186 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) { 187 /* 188 * If the Message Valid bit isn't on, revoke ownership 189 * of the mailbox and continue waiting for our reply. 190 */ 191 if ((v & MBMSGVALID) == 0) { 192 t4_write_reg(adapter, mbox_ctl, 193 MBOWNER(MBOX_OWNER_NONE)); 194 continue; 195 } 196 197 /* 198 * We now have our reply. Extract the command return 199 * value, copy the reply back to our caller's buffer 200 * (if specified) and revoke ownership of the mailbox. 201 * We return the (negated) firmware command return 202 * code (this depends on FW_SUCCESS == 0). 203 */ 204 205 /* return value in low-order little-endian word */ 206 v = t4_read_reg(adapter, mbox_data); 207 if (FW_CMD_RETVAL_G(v)) 208 dump_mbox(adapter, "FW Error", mbox_data); 209 210 if (rpl) { 211 /* request bit in high-order BE word */ 212 WARN_ON((be32_to_cpu(*(const u32 *)cmd) 213 & FW_CMD_REQUEST_F) == 0); 214 get_mbox_rpl(adapter, rpl, size, mbox_data); 215 WARN_ON((be32_to_cpu(*(u32 *)rpl) 216 & FW_CMD_REQUEST_F) != 0); 217 } 218 t4_write_reg(adapter, mbox_ctl, 219 MBOWNER(MBOX_OWNER_NONE)); 220 return -FW_CMD_RETVAL_G(v); 221 } 222 } 223 224 /* 225 * We timed out. Return the error ... 226 */ 227 dump_mbox(adapter, "FW Timeout", mbox_data); 228 return -ETIMEDOUT; 229 } 230 231 /** 232 * hash_mac_addr - return the hash value of a MAC address 233 * @addr: the 48-bit Ethernet MAC address 234 * 235 * Hashes a MAC address according to the hash function used by hardware 236 * inexact (hash) address matching. 237 */ 238 static int hash_mac_addr(const u8 *addr) 239 { 240 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; 241 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; 242 a ^= b; 243 a ^= (a >> 12); 244 a ^= (a >> 6); 245 return a & 0x3f; 246 } 247 248 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ 249 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \ 250 FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG) 251 252 /** 253 * init_link_config - initialize a link's SW state 254 * @lc: structure holding the link state 255 * @caps: link capabilities 256 * 257 * Initializes the SW state maintained for each link, including the link's 258 * capabilities and default speed/flow-control/autonegotiation settings. 259 */ 260 static void init_link_config(struct link_config *lc, unsigned int caps) 261 { 262 lc->supported = caps; 263 lc->requested_speed = 0; 264 lc->speed = 0; 265 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; 266 if (lc->supported & FW_PORT_CAP_ANEG) { 267 lc->advertising = lc->supported & ADVERT_MASK; 268 lc->autoneg = AUTONEG_ENABLE; 269 lc->requested_fc |= PAUSE_AUTONEG; 270 } else { 271 lc->advertising = 0; 272 lc->autoneg = AUTONEG_DISABLE; 273 } 274 } 275 276 /** 277 * t4vf_port_init - initialize port hardware/software state 278 * @adapter: the adapter 279 * @pidx: the adapter port index 280 */ 281 int t4vf_port_init(struct adapter *adapter, int pidx) 282 { 283 struct port_info *pi = adap2pinfo(adapter, pidx); 284 struct fw_vi_cmd vi_cmd, vi_rpl; 285 struct fw_port_cmd port_cmd, port_rpl; 286 int v; 287 288 /* 289 * Execute a VI Read command to get our Virtual Interface information 290 * like MAC address, etc. 291 */ 292 memset(&vi_cmd, 0, sizeof(vi_cmd)); 293 vi_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | 294 FW_CMD_REQUEST_F | 295 FW_CMD_READ_F); 296 vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd)); 297 vi_cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(pi->viid)); 298 v = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl); 299 if (v) 300 return v; 301 302 BUG_ON(pi->port_id != FW_VI_CMD_PORTID_G(vi_rpl.portid_pkd)); 303 pi->rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(vi_rpl.rsssize_pkd)); 304 t4_os_set_hw_addr(adapter, pidx, vi_rpl.mac); 305 306 /* 307 * If we don't have read access to our port information, we're done 308 * now. Otherwise, execute a PORT Read command to get it ... 309 */ 310 if (!(adapter->params.vfres.r_caps & FW_CMD_CAP_PORT)) 311 return 0; 312 313 memset(&port_cmd, 0, sizeof(port_cmd)); 314 port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | 315 FW_CMD_REQUEST_F | 316 FW_CMD_READ_F | 317 FW_PORT_CMD_PORTID_V(pi->port_id)); 318 port_cmd.action_to_len16 = 319 cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) | 320 FW_LEN16(port_cmd)); 321 v = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd), &port_rpl); 322 if (v) 323 return v; 324 325 v = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype); 326 pi->mdio_addr = (v & FW_PORT_CMD_MDIOCAP_F) ? 327 FW_PORT_CMD_MDIOADDR_G(v) : -1; 328 pi->port_type = FW_PORT_CMD_PTYPE_G(v); 329 pi->mod_type = FW_PORT_MOD_TYPE_NA; 330 331 init_link_config(&pi->link_cfg, be16_to_cpu(port_rpl.u.info.pcap)); 332 333 return 0; 334 } 335 336 /** 337 * t4vf_fw_reset - issue a reset to FW 338 * @adapter: the adapter 339 * 340 * Issues a reset command to FW. For a Physical Function this would 341 * result in the Firmware reseting all of its state. For a Virtual 342 * Function this just resets the state associated with the VF. 343 */ 344 int t4vf_fw_reset(struct adapter *adapter) 345 { 346 struct fw_reset_cmd cmd; 347 348 memset(&cmd, 0, sizeof(cmd)); 349 cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RESET_CMD) | 350 FW_CMD_WRITE_F); 351 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 352 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 353 } 354 355 /** 356 * t4vf_query_params - query FW or device parameters 357 * @adapter: the adapter 358 * @nparams: the number of parameters 359 * @params: the parameter names 360 * @vals: the parameter values 361 * 362 * Reads the values of firmware or device parameters. Up to 7 parameters 363 * can be queried at once. 364 */ 365 static int t4vf_query_params(struct adapter *adapter, unsigned int nparams, 366 const u32 *params, u32 *vals) 367 { 368 int i, ret; 369 struct fw_params_cmd cmd, rpl; 370 struct fw_params_param *p; 371 size_t len16; 372 373 if (nparams > 7) 374 return -EINVAL; 375 376 memset(&cmd, 0, sizeof(cmd)); 377 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) | 378 FW_CMD_REQUEST_F | 379 FW_CMD_READ_F); 380 len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd, 381 param[nparams].mnem), 16); 382 cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); 383 for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) 384 p->mnem = htonl(*params++); 385 386 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 387 if (ret == 0) 388 for (i = 0, p = &rpl.param[0]; i < nparams; i++, p++) 389 *vals++ = be32_to_cpu(p->val); 390 return ret; 391 } 392 393 /** 394 * t4vf_set_params - sets FW or device parameters 395 * @adapter: the adapter 396 * @nparams: the number of parameters 397 * @params: the parameter names 398 * @vals: the parameter values 399 * 400 * Sets the values of firmware or device parameters. Up to 7 parameters 401 * can be specified at once. 402 */ 403 int t4vf_set_params(struct adapter *adapter, unsigned int nparams, 404 const u32 *params, const u32 *vals) 405 { 406 int i; 407 struct fw_params_cmd cmd; 408 struct fw_params_param *p; 409 size_t len16; 410 411 if (nparams > 7) 412 return -EINVAL; 413 414 memset(&cmd, 0, sizeof(cmd)); 415 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) | 416 FW_CMD_REQUEST_F | 417 FW_CMD_WRITE_F); 418 len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd, 419 param[nparams]), 16); 420 cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); 421 for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) { 422 p->mnem = cpu_to_be32(*params++); 423 p->val = cpu_to_be32(*vals++); 424 } 425 426 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 427 } 428 429 /** 430 * t4_bar2_sge_qregs - return BAR2 SGE Queue register information 431 * @adapter: the adapter 432 * @qid: the Queue ID 433 * @qtype: the Ingress or Egress type for @qid 434 * @pbar2_qoffset: BAR2 Queue Offset 435 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues 436 * 437 * Returns the BAR2 SGE Queue Registers information associated with the 438 * indicated Absolute Queue ID. These are passed back in return value 439 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue 440 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues. 441 * 442 * This may return an error which indicates that BAR2 SGE Queue 443 * registers aren't available. If an error is not returned, then the 444 * following values are returned: 445 * 446 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers 447 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid 448 * 449 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which 450 * require the "Inferred Queue ID" ability may be used. E.g. the 451 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0, 452 * then these "Inferred Queue ID" register may not be used. 453 */ 454 int t4_bar2_sge_qregs(struct adapter *adapter, 455 unsigned int qid, 456 enum t4_bar2_qtype qtype, 457 u64 *pbar2_qoffset, 458 unsigned int *pbar2_qid) 459 { 460 unsigned int page_shift, page_size, qpp_shift, qpp_mask; 461 u64 bar2_page_offset, bar2_qoffset; 462 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred; 463 464 /* T4 doesn't support BAR2 SGE Queue registers. 465 */ 466 if (is_t4(adapter->params.chip)) 467 return -EINVAL; 468 469 /* Get our SGE Page Size parameters. 470 */ 471 page_shift = adapter->params.sge.sge_vf_hps + 10; 472 page_size = 1 << page_shift; 473 474 /* Get the right Queues per Page parameters for our Queue. 475 */ 476 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS 477 ? adapter->params.sge.sge_vf_eq_qpp 478 : adapter->params.sge.sge_vf_iq_qpp); 479 qpp_mask = (1 << qpp_shift) - 1; 480 481 /* Calculate the basics of the BAR2 SGE Queue register area: 482 * o The BAR2 page the Queue registers will be in. 483 * o The BAR2 Queue ID. 484 * o The BAR2 Queue ID Offset into the BAR2 page. 485 */ 486 bar2_page_offset = ((qid >> qpp_shift) << page_shift); 487 bar2_qid = qid & qpp_mask; 488 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; 489 490 /* If the BAR2 Queue ID Offset is less than the Page Size, then the 491 * hardware will infer the Absolute Queue ID simply from the writes to 492 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a 493 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply 494 * write to the first BAR2 SGE Queue Area within the BAR2 Page with 495 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID 496 * from the BAR2 Page and BAR2 Queue ID. 497 * 498 * One important censequence of this is that some BAR2 SGE registers 499 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID 500 * there. But other registers synthesize the SGE Queue ID purely 501 * from the writes to the registers -- the Write Combined Doorbell 502 * Buffer is a good example. These BAR2 SGE Registers are only 503 * available for those BAR2 SGE Register areas where the SGE Absolute 504 * Queue ID can be inferred from simple writes. 505 */ 506 bar2_qoffset = bar2_page_offset; 507 bar2_qinferred = (bar2_qid_offset < page_size); 508 if (bar2_qinferred) { 509 bar2_qoffset += bar2_qid_offset; 510 bar2_qid = 0; 511 } 512 513 *pbar2_qoffset = bar2_qoffset; 514 *pbar2_qid = bar2_qid; 515 return 0; 516 } 517 518 /** 519 * t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters 520 * @adapter: the adapter 521 * 522 * Retrieves various core SGE parameters in the form of hardware SGE 523 * register values. The caller is responsible for decoding these as 524 * needed. The SGE parameters are stored in @adapter->params.sge. 525 */ 526 int t4vf_get_sge_params(struct adapter *adapter) 527 { 528 struct sge_params *sge_params = &adapter->params.sge; 529 u32 params[7], vals[7]; 530 int v; 531 532 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 533 FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL)); 534 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 535 FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE)); 536 params[2] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 537 FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0)); 538 params[3] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 539 FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1)); 540 params[4] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 541 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1)); 542 params[5] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 543 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3)); 544 params[6] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 545 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5)); 546 v = t4vf_query_params(adapter, 7, params, vals); 547 if (v) 548 return v; 549 sge_params->sge_control = vals[0]; 550 sge_params->sge_host_page_size = vals[1]; 551 sge_params->sge_fl_buffer_size[0] = vals[2]; 552 sge_params->sge_fl_buffer_size[1] = vals[3]; 553 sge_params->sge_timer_value_0_and_1 = vals[4]; 554 sge_params->sge_timer_value_2_and_3 = vals[5]; 555 sge_params->sge_timer_value_4_and_5 = vals[6]; 556 557 /* T4 uses a single control field to specify both the PCIe Padding and 558 * Packing Boundary. T5 introduced the ability to specify these 559 * separately with the Padding Boundary in SGE_CONTROL and and Packing 560 * Boundary in SGE_CONTROL2. So for T5 and later we need to grab 561 * SGE_CONTROL in order to determine how ingress packet data will be 562 * laid out in Packed Buffer Mode. Unfortunately, older versions of 563 * the firmware won't let us retrieve SGE_CONTROL2 so if we get a 564 * failure grabbing it we throw an error since we can't figure out the 565 * right value. 566 */ 567 if (!is_t4(adapter->params.chip)) { 568 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 569 FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL2_A)); 570 v = t4vf_query_params(adapter, 1, params, vals); 571 if (v != FW_SUCCESS) { 572 dev_err(adapter->pdev_dev, 573 "Unable to get SGE Control2; " 574 "probably old firmware.\n"); 575 return v; 576 } 577 sge_params->sge_control2 = vals[0]; 578 } 579 580 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 581 FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD)); 582 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 583 FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL)); 584 v = t4vf_query_params(adapter, 2, params, vals); 585 if (v) 586 return v; 587 sge_params->sge_ingress_rx_threshold = vals[0]; 588 sge_params->sge_congestion_control = vals[1]; 589 590 /* For T5 and later we want to use the new BAR2 Doorbells. 591 * Unfortunately, older firmware didn't allow the this register to be 592 * read. 593 */ 594 if (!is_t4(adapter->params.chip)) { 595 u32 whoami; 596 unsigned int pf, s_hps, s_qpp; 597 598 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 599 FW_PARAMS_PARAM_XYZ_V( 600 SGE_EGRESS_QUEUES_PER_PAGE_VF_A)); 601 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 602 FW_PARAMS_PARAM_XYZ_V( 603 SGE_INGRESS_QUEUES_PER_PAGE_VF_A)); 604 v = t4vf_query_params(adapter, 2, params, vals); 605 if (v != FW_SUCCESS) { 606 dev_warn(adapter->pdev_dev, 607 "Unable to get VF SGE Queues/Page; " 608 "probably old firmware.\n"); 609 return v; 610 } 611 sge_params->sge_egress_queues_per_page = vals[0]; 612 sge_params->sge_ingress_queues_per_page = vals[1]; 613 614 /* We need the Queues/Page for our VF. This is based on the 615 * PF from which we're instantiated and is indexed in the 616 * register we just read. Do it once here so other code in 617 * the driver can just use it. 618 */ 619 whoami = t4_read_reg(adapter, 620 T4VF_PL_BASE_ADDR + A_PL_VF_WHOAMI); 621 pf = SOURCEPF_GET(whoami); 622 623 s_hps = (HOSTPAGESIZEPF0_S + 624 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf); 625 sge_params->sge_vf_hps = 626 ((sge_params->sge_host_page_size >> s_hps) 627 & HOSTPAGESIZEPF0_M); 628 629 s_qpp = (QUEUESPERPAGEPF0_S + 630 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * pf); 631 sge_params->sge_vf_eq_qpp = 632 ((sge_params->sge_egress_queues_per_page >> s_qpp) 633 & QUEUESPERPAGEPF0_MASK); 634 sge_params->sge_vf_iq_qpp = 635 ((sge_params->sge_ingress_queues_per_page >> s_qpp) 636 & QUEUESPERPAGEPF0_MASK); 637 } 638 639 return 0; 640 } 641 642 /** 643 * t4vf_get_vpd_params - retrieve device VPD paremeters 644 * @adapter: the adapter 645 * 646 * Retrives various device Vital Product Data parameters. The parameters 647 * are stored in @adapter->params.vpd. 648 */ 649 int t4vf_get_vpd_params(struct adapter *adapter) 650 { 651 struct vpd_params *vpd_params = &adapter->params.vpd; 652 u32 params[7], vals[7]; 653 int v; 654 655 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | 656 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK)); 657 v = t4vf_query_params(adapter, 1, params, vals); 658 if (v) 659 return v; 660 vpd_params->cclk = vals[0]; 661 662 return 0; 663 } 664 665 /** 666 * t4vf_get_dev_params - retrieve device paremeters 667 * @adapter: the adapter 668 * 669 * Retrives various device parameters. The parameters are stored in 670 * @adapter->params.dev. 671 */ 672 int t4vf_get_dev_params(struct adapter *adapter) 673 { 674 struct dev_params *dev_params = &adapter->params.dev; 675 u32 params[7], vals[7]; 676 int v; 677 678 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | 679 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWREV)); 680 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | 681 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_TPREV)); 682 v = t4vf_query_params(adapter, 2, params, vals); 683 if (v) 684 return v; 685 dev_params->fwrev = vals[0]; 686 dev_params->tprev = vals[1]; 687 688 return 0; 689 } 690 691 /** 692 * t4vf_get_rss_glb_config - retrieve adapter RSS Global Configuration 693 * @adapter: the adapter 694 * 695 * Retrieves global RSS mode and parameters with which we have to live 696 * and stores them in the @adapter's RSS parameters. 697 */ 698 int t4vf_get_rss_glb_config(struct adapter *adapter) 699 { 700 struct rss_params *rss = &adapter->params.rss; 701 struct fw_rss_glb_config_cmd cmd, rpl; 702 int v; 703 704 /* 705 * Execute an RSS Global Configuration read command to retrieve 706 * our RSS configuration. 707 */ 708 memset(&cmd, 0, sizeof(cmd)); 709 cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) | 710 FW_CMD_REQUEST_F | 711 FW_CMD_READ_F); 712 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 713 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 714 if (v) 715 return v; 716 717 /* 718 * Transate the big-endian RSS Global Configuration into our 719 * cpu-endian format based on the RSS mode. We also do first level 720 * filtering at this point to weed out modes which don't support 721 * VF Drivers ... 722 */ 723 rss->mode = FW_RSS_GLB_CONFIG_CMD_MODE_G( 724 be32_to_cpu(rpl.u.manual.mode_pkd)); 725 switch (rss->mode) { 726 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { 727 u32 word = be32_to_cpu( 728 rpl.u.basicvirtual.synmapen_to_hashtoeplitz); 729 730 rss->u.basicvirtual.synmapen = 731 ((word & FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_F) != 0); 732 rss->u.basicvirtual.syn4tupenipv6 = 733 ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_F) != 0); 734 rss->u.basicvirtual.syn2tupenipv6 = 735 ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_F) != 0); 736 rss->u.basicvirtual.syn4tupenipv4 = 737 ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_F) != 0); 738 rss->u.basicvirtual.syn2tupenipv4 = 739 ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_F) != 0); 740 741 rss->u.basicvirtual.ofdmapen = 742 ((word & FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_F) != 0); 743 744 rss->u.basicvirtual.tnlmapen = 745 ((word & FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F) != 0); 746 rss->u.basicvirtual.tnlalllookup = 747 ((word & FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F) != 0); 748 749 rss->u.basicvirtual.hashtoeplitz = 750 ((word & FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F) != 0); 751 752 /* we need at least Tunnel Map Enable to be set */ 753 if (!rss->u.basicvirtual.tnlmapen) 754 return -EINVAL; 755 break; 756 } 757 758 default: 759 /* all unknown/unsupported RSS modes result in an error */ 760 return -EINVAL; 761 } 762 763 return 0; 764 } 765 766 /** 767 * t4vf_get_vfres - retrieve VF resource limits 768 * @adapter: the adapter 769 * 770 * Retrieves configured resource limits and capabilities for a virtual 771 * function. The results are stored in @adapter->vfres. 772 */ 773 int t4vf_get_vfres(struct adapter *adapter) 774 { 775 struct vf_resources *vfres = &adapter->params.vfres; 776 struct fw_pfvf_cmd cmd, rpl; 777 int v; 778 u32 word; 779 780 /* 781 * Execute PFVF Read command to get VF resource limits; bail out early 782 * with error on command failure. 783 */ 784 memset(&cmd, 0, sizeof(cmd)); 785 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | 786 FW_CMD_REQUEST_F | 787 FW_CMD_READ_F); 788 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 789 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 790 if (v) 791 return v; 792 793 /* 794 * Extract VF resource limits and return success. 795 */ 796 word = be32_to_cpu(rpl.niqflint_niq); 797 vfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word); 798 vfres->niq = FW_PFVF_CMD_NIQ_G(word); 799 800 word = be32_to_cpu(rpl.type_to_neq); 801 vfres->neq = FW_PFVF_CMD_NEQ_G(word); 802 vfres->pmask = FW_PFVF_CMD_PMASK_G(word); 803 804 word = be32_to_cpu(rpl.tc_to_nexactf); 805 vfres->tc = FW_PFVF_CMD_TC_G(word); 806 vfres->nvi = FW_PFVF_CMD_NVI_G(word); 807 vfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word); 808 809 word = be32_to_cpu(rpl.r_caps_to_nethctrl); 810 vfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word); 811 vfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word); 812 vfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word); 813 814 return 0; 815 } 816 817 /** 818 * t4vf_read_rss_vi_config - read a VI's RSS configuration 819 * @adapter: the adapter 820 * @viid: Virtual Interface ID 821 * @config: pointer to host-native VI RSS Configuration buffer 822 * 823 * Reads the Virtual Interface's RSS configuration information and 824 * translates it into CPU-native format. 825 */ 826 int t4vf_read_rss_vi_config(struct adapter *adapter, unsigned int viid, 827 union rss_vi_config *config) 828 { 829 struct fw_rss_vi_config_cmd cmd, rpl; 830 int v; 831 832 memset(&cmd, 0, sizeof(cmd)); 833 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) | 834 FW_CMD_REQUEST_F | 835 FW_CMD_READ_F | 836 FW_RSS_VI_CONFIG_CMD_VIID(viid)); 837 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 838 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 839 if (v) 840 return v; 841 842 switch (adapter->params.rss.mode) { 843 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { 844 u32 word = be32_to_cpu(rpl.u.basicvirtual.defaultq_to_udpen); 845 846 config->basicvirtual.ip6fourtupen = 847 ((word & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) != 0); 848 config->basicvirtual.ip6twotupen = 849 ((word & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) != 0); 850 config->basicvirtual.ip4fourtupen = 851 ((word & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) != 0); 852 config->basicvirtual.ip4twotupen = 853 ((word & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) != 0); 854 config->basicvirtual.udpen = 855 ((word & FW_RSS_VI_CONFIG_CMD_UDPEN_F) != 0); 856 config->basicvirtual.defaultq = 857 FW_RSS_VI_CONFIG_CMD_DEFAULTQ_G(word); 858 break; 859 } 860 861 default: 862 return -EINVAL; 863 } 864 865 return 0; 866 } 867 868 /** 869 * t4vf_write_rss_vi_config - write a VI's RSS configuration 870 * @adapter: the adapter 871 * @viid: Virtual Interface ID 872 * @config: pointer to host-native VI RSS Configuration buffer 873 * 874 * Write the Virtual Interface's RSS configuration information 875 * (translating it into firmware-native format before writing). 876 */ 877 int t4vf_write_rss_vi_config(struct adapter *adapter, unsigned int viid, 878 union rss_vi_config *config) 879 { 880 struct fw_rss_vi_config_cmd cmd, rpl; 881 882 memset(&cmd, 0, sizeof(cmd)); 883 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) | 884 FW_CMD_REQUEST_F | 885 FW_CMD_WRITE_F | 886 FW_RSS_VI_CONFIG_CMD_VIID(viid)); 887 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 888 switch (adapter->params.rss.mode) { 889 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { 890 u32 word = 0; 891 892 if (config->basicvirtual.ip6fourtupen) 893 word |= FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F; 894 if (config->basicvirtual.ip6twotupen) 895 word |= FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F; 896 if (config->basicvirtual.ip4fourtupen) 897 word |= FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F; 898 if (config->basicvirtual.ip4twotupen) 899 word |= FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F; 900 if (config->basicvirtual.udpen) 901 word |= FW_RSS_VI_CONFIG_CMD_UDPEN_F; 902 word |= FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V( 903 config->basicvirtual.defaultq); 904 cmd.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(word); 905 break; 906 } 907 908 default: 909 return -EINVAL; 910 } 911 912 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 913 } 914 915 /** 916 * t4vf_config_rss_range - configure a portion of the RSS mapping table 917 * @adapter: the adapter 918 * @viid: Virtual Interface of RSS Table Slice 919 * @start: starting entry in the table to write 920 * @n: how many table entries to write 921 * @rspq: values for the "Response Queue" (Ingress Queue) lookup table 922 * @nrspq: number of values in @rspq 923 * 924 * Programs the selected part of the VI's RSS mapping table with the 925 * provided values. If @nrspq < @n the supplied values are used repeatedly 926 * until the full table range is populated. 927 * 928 * The caller must ensure the values in @rspq are in the range 0..1023. 929 */ 930 int t4vf_config_rss_range(struct adapter *adapter, unsigned int viid, 931 int start, int n, const u16 *rspq, int nrspq) 932 { 933 const u16 *rsp = rspq; 934 const u16 *rsp_end = rspq+nrspq; 935 struct fw_rss_ind_tbl_cmd cmd; 936 937 /* 938 * Initialize firmware command template to write the RSS table. 939 */ 940 memset(&cmd, 0, sizeof(cmd)); 941 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) | 942 FW_CMD_REQUEST_F | 943 FW_CMD_WRITE_F | 944 FW_RSS_IND_TBL_CMD_VIID_V(viid)); 945 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 946 947 /* 948 * Each firmware RSS command can accommodate up to 32 RSS Ingress 949 * Queue Identifiers. These Ingress Queue IDs are packed three to 950 * a 32-bit word as 10-bit values with the upper remaining 2 bits 951 * reserved. 952 */ 953 while (n > 0) { 954 __be32 *qp = &cmd.iq0_to_iq2; 955 int nq = min(n, 32); 956 int ret; 957 958 /* 959 * Set up the firmware RSS command header to send the next 960 * "nq" Ingress Queue IDs to the firmware. 961 */ 962 cmd.niqid = cpu_to_be16(nq); 963 cmd.startidx = cpu_to_be16(start); 964 965 /* 966 * "nq" more done for the start of the next loop. 967 */ 968 start += nq; 969 n -= nq; 970 971 /* 972 * While there are still Ingress Queue IDs to stuff into the 973 * current firmware RSS command, retrieve them from the 974 * Ingress Queue ID array and insert them into the command. 975 */ 976 while (nq > 0) { 977 /* 978 * Grab up to the next 3 Ingress Queue IDs (wrapping 979 * around the Ingress Queue ID array if necessary) and 980 * insert them into the firmware RSS command at the 981 * current 3-tuple position within the commad. 982 */ 983 u16 qbuf[3]; 984 u16 *qbp = qbuf; 985 int nqbuf = min(3, nq); 986 987 nq -= nqbuf; 988 qbuf[0] = qbuf[1] = qbuf[2] = 0; 989 while (nqbuf) { 990 nqbuf--; 991 *qbp++ = *rsp++; 992 if (rsp >= rsp_end) 993 rsp = rspq; 994 } 995 *qp++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0_V(qbuf[0]) | 996 FW_RSS_IND_TBL_CMD_IQ1_V(qbuf[1]) | 997 FW_RSS_IND_TBL_CMD_IQ2_V(qbuf[2])); 998 } 999 1000 /* 1001 * Send this portion of the RRS table update to the firmware; 1002 * bail out on any errors. 1003 */ 1004 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 1005 if (ret) 1006 return ret; 1007 } 1008 return 0; 1009 } 1010 1011 /** 1012 * t4vf_alloc_vi - allocate a virtual interface on a port 1013 * @adapter: the adapter 1014 * @port_id: physical port associated with the VI 1015 * 1016 * Allocate a new Virtual Interface and bind it to the indicated 1017 * physical port. Return the new Virtual Interface Identifier on 1018 * success, or a [negative] error number on failure. 1019 */ 1020 int t4vf_alloc_vi(struct adapter *adapter, int port_id) 1021 { 1022 struct fw_vi_cmd cmd, rpl; 1023 int v; 1024 1025 /* 1026 * Execute a VI command to allocate Virtual Interface and return its 1027 * VIID. 1028 */ 1029 memset(&cmd, 0, sizeof(cmd)); 1030 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | 1031 FW_CMD_REQUEST_F | 1032 FW_CMD_WRITE_F | 1033 FW_CMD_EXEC_F); 1034 cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) | 1035 FW_VI_CMD_ALLOC_F); 1036 cmd.portid_pkd = FW_VI_CMD_PORTID_V(port_id); 1037 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 1038 if (v) 1039 return v; 1040 1041 return FW_VI_CMD_VIID_G(be16_to_cpu(rpl.type_viid)); 1042 } 1043 1044 /** 1045 * t4vf_free_vi -- free a virtual interface 1046 * @adapter: the adapter 1047 * @viid: the virtual interface identifier 1048 * 1049 * Free a previously allocated Virtual Interface. Return an error on 1050 * failure. 1051 */ 1052 int t4vf_free_vi(struct adapter *adapter, int viid) 1053 { 1054 struct fw_vi_cmd cmd; 1055 1056 /* 1057 * Execute a VI command to free the Virtual Interface. 1058 */ 1059 memset(&cmd, 0, sizeof(cmd)); 1060 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | 1061 FW_CMD_REQUEST_F | 1062 FW_CMD_EXEC_F); 1063 cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) | 1064 FW_VI_CMD_FREE_F); 1065 cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid)); 1066 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 1067 } 1068 1069 /** 1070 * t4vf_enable_vi - enable/disable a virtual interface 1071 * @adapter: the adapter 1072 * @viid: the Virtual Interface ID 1073 * @rx_en: 1=enable Rx, 0=disable Rx 1074 * @tx_en: 1=enable Tx, 0=disable Tx 1075 * 1076 * Enables/disables a virtual interface. 1077 */ 1078 int t4vf_enable_vi(struct adapter *adapter, unsigned int viid, 1079 bool rx_en, bool tx_en) 1080 { 1081 struct fw_vi_enable_cmd cmd; 1082 1083 memset(&cmd, 0, sizeof(cmd)); 1084 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | 1085 FW_CMD_REQUEST_F | 1086 FW_CMD_EXEC_F | 1087 FW_VI_ENABLE_CMD_VIID_V(viid)); 1088 cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) | 1089 FW_VI_ENABLE_CMD_EEN_V(tx_en) | 1090 FW_LEN16(cmd)); 1091 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 1092 } 1093 1094 /** 1095 * t4vf_identify_port - identify a VI's port by blinking its LED 1096 * @adapter: the adapter 1097 * @viid: the Virtual Interface ID 1098 * @nblinks: how many times to blink LED at 2.5 Hz 1099 * 1100 * Identifies a VI's port by blinking its LED. 1101 */ 1102 int t4vf_identify_port(struct adapter *adapter, unsigned int viid, 1103 unsigned int nblinks) 1104 { 1105 struct fw_vi_enable_cmd cmd; 1106 1107 memset(&cmd, 0, sizeof(cmd)); 1108 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | 1109 FW_CMD_REQUEST_F | 1110 FW_CMD_EXEC_F | 1111 FW_VI_ENABLE_CMD_VIID_V(viid)); 1112 cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | 1113 FW_LEN16(cmd)); 1114 cmd.blinkdur = cpu_to_be16(nblinks); 1115 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 1116 } 1117 1118 /** 1119 * t4vf_set_rxmode - set Rx properties of a virtual interface 1120 * @adapter: the adapter 1121 * @viid: the VI id 1122 * @mtu: the new MTU or -1 for no change 1123 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change 1124 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change 1125 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change 1126 * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it, 1127 * -1 no change 1128 * 1129 * Sets Rx properties of a virtual interface. 1130 */ 1131 int t4vf_set_rxmode(struct adapter *adapter, unsigned int viid, 1132 int mtu, int promisc, int all_multi, int bcast, int vlanex, 1133 bool sleep_ok) 1134 { 1135 struct fw_vi_rxmode_cmd cmd; 1136 1137 /* convert to FW values */ 1138 if (mtu < 0) 1139 mtu = FW_VI_RXMODE_CMD_MTU_M; 1140 if (promisc < 0) 1141 promisc = FW_VI_RXMODE_CMD_PROMISCEN_M; 1142 if (all_multi < 0) 1143 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M; 1144 if (bcast < 0) 1145 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M; 1146 if (vlanex < 0) 1147 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M; 1148 1149 memset(&cmd, 0, sizeof(cmd)); 1150 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) | 1151 FW_CMD_REQUEST_F | 1152 FW_CMD_WRITE_F | 1153 FW_VI_RXMODE_CMD_VIID_V(viid)); 1154 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 1155 cmd.mtu_to_vlanexen = 1156 cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) | 1157 FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) | 1158 FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) | 1159 FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) | 1160 FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex)); 1161 return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok); 1162 } 1163 1164 /** 1165 * t4vf_alloc_mac_filt - allocates exact-match filters for MAC addresses 1166 * @adapter: the adapter 1167 * @viid: the Virtual Interface Identifier 1168 * @free: if true any existing filters for this VI id are first removed 1169 * @naddr: the number of MAC addresses to allocate filters for (up to 7) 1170 * @addr: the MAC address(es) 1171 * @idx: where to store the index of each allocated filter 1172 * @hash: pointer to hash address filter bitmap 1173 * @sleep_ok: call is allowed to sleep 1174 * 1175 * Allocates an exact-match filter for each of the supplied addresses and 1176 * sets it to the corresponding address. If @idx is not %NULL it should 1177 * have at least @naddr entries, each of which will be set to the index of 1178 * the filter allocated for the corresponding MAC address. If a filter 1179 * could not be allocated for an address its index is set to 0xffff. 1180 * If @hash is not %NULL addresses that fail to allocate an exact filter 1181 * are hashed and update the hash filter bitmap pointed at by @hash. 1182 * 1183 * Returns a negative error number or the number of filters allocated. 1184 */ 1185 int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free, 1186 unsigned int naddr, const u8 **addr, u16 *idx, 1187 u64 *hash, bool sleep_ok) 1188 { 1189 int offset, ret = 0; 1190 unsigned nfilters = 0; 1191 unsigned int rem = naddr; 1192 struct fw_vi_mac_cmd cmd, rpl; 1193 unsigned int max_naddr = is_t4(adapter->params.chip) ? 1194 NUM_MPS_CLS_SRAM_L_INSTANCES : 1195 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 1196 1197 if (naddr > max_naddr) 1198 return -EINVAL; 1199 1200 for (offset = 0; offset < naddr; /**/) { 1201 unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact) 1202 ? rem 1203 : ARRAY_SIZE(cmd.u.exact)); 1204 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 1205 u.exact[fw_naddr]), 16); 1206 struct fw_vi_mac_exact *p; 1207 int i; 1208 1209 memset(&cmd, 0, sizeof(cmd)); 1210 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | 1211 FW_CMD_REQUEST_F | 1212 FW_CMD_WRITE_F | 1213 (free ? FW_CMD_EXEC_F : 0) | 1214 FW_VI_MAC_CMD_VIID_V(viid)); 1215 cmd.freemacs_to_len16 = 1216 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) | 1217 FW_CMD_LEN16_V(len16)); 1218 1219 for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) { 1220 p->valid_to_idx = cpu_to_be16( 1221 FW_VI_MAC_CMD_VALID_F | 1222 FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC)); 1223 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); 1224 } 1225 1226 1227 ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl, 1228 sleep_ok); 1229 if (ret && ret != -ENOMEM) 1230 break; 1231 1232 for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) { 1233 u16 index = FW_VI_MAC_CMD_IDX_G( 1234 be16_to_cpu(p->valid_to_idx)); 1235 1236 if (idx) 1237 idx[offset+i] = 1238 (index >= max_naddr 1239 ? 0xffff 1240 : index); 1241 if (index < max_naddr) 1242 nfilters++; 1243 else if (hash) 1244 *hash |= (1ULL << hash_mac_addr(addr[offset+i])); 1245 } 1246 1247 free = false; 1248 offset += fw_naddr; 1249 rem -= fw_naddr; 1250 } 1251 1252 /* 1253 * If there were no errors or we merely ran out of room in our MAC 1254 * address arena, return the number of filters actually written. 1255 */ 1256 if (ret == 0 || ret == -ENOMEM) 1257 ret = nfilters; 1258 return ret; 1259 } 1260 1261 /** 1262 * t4vf_change_mac - modifies the exact-match filter for a MAC address 1263 * @adapter: the adapter 1264 * @viid: the Virtual Interface ID 1265 * @idx: index of existing filter for old value of MAC address, or -1 1266 * @addr: the new MAC address value 1267 * @persist: if idx < 0, the new MAC allocation should be persistent 1268 * 1269 * Modifies an exact-match filter and sets it to the new MAC address. 1270 * Note that in general it is not possible to modify the value of a given 1271 * filter so the generic way to modify an address filter is to free the 1272 * one being used by the old address value and allocate a new filter for 1273 * the new address value. @idx can be -1 if the address is a new 1274 * addition. 1275 * 1276 * Returns a negative error number or the index of the filter with the new 1277 * MAC value. 1278 */ 1279 int t4vf_change_mac(struct adapter *adapter, unsigned int viid, 1280 int idx, const u8 *addr, bool persist) 1281 { 1282 int ret; 1283 struct fw_vi_mac_cmd cmd, rpl; 1284 struct fw_vi_mac_exact *p = &cmd.u.exact[0]; 1285 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 1286 u.exact[1]), 16); 1287 unsigned int max_naddr = is_t4(adapter->params.chip) ? 1288 NUM_MPS_CLS_SRAM_L_INSTANCES : 1289 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 1290 1291 /* 1292 * If this is a new allocation, determine whether it should be 1293 * persistent (across a "freemacs" operation) or not. 1294 */ 1295 if (idx < 0) 1296 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; 1297 1298 memset(&cmd, 0, sizeof(cmd)); 1299 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | 1300 FW_CMD_REQUEST_F | 1301 FW_CMD_WRITE_F | 1302 FW_VI_MAC_CMD_VIID_V(viid)); 1303 cmd.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); 1304 p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F | 1305 FW_VI_MAC_CMD_IDX_V(idx)); 1306 memcpy(p->macaddr, addr, sizeof(p->macaddr)); 1307 1308 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 1309 if (ret == 0) { 1310 p = &rpl.u.exact[0]; 1311 ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx)); 1312 if (ret >= max_naddr) 1313 ret = -ENOMEM; 1314 } 1315 return ret; 1316 } 1317 1318 /** 1319 * t4vf_set_addr_hash - program the MAC inexact-match hash filter 1320 * @adapter: the adapter 1321 * @viid: the Virtual Interface Identifier 1322 * @ucast: whether the hash filter should also match unicast addresses 1323 * @vec: the value to be written to the hash filter 1324 * @sleep_ok: call is allowed to sleep 1325 * 1326 * Sets the 64-bit inexact-match hash filter for a virtual interface. 1327 */ 1328 int t4vf_set_addr_hash(struct adapter *adapter, unsigned int viid, 1329 bool ucast, u64 vec, bool sleep_ok) 1330 { 1331 struct fw_vi_mac_cmd cmd; 1332 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 1333 u.exact[0]), 16); 1334 1335 memset(&cmd, 0, sizeof(cmd)); 1336 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | 1337 FW_CMD_REQUEST_F | 1338 FW_CMD_WRITE_F | 1339 FW_VI_ENABLE_CMD_VIID_V(viid)); 1340 cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F | 1341 FW_VI_MAC_CMD_HASHUNIEN_V(ucast) | 1342 FW_CMD_LEN16_V(len16)); 1343 cmd.u.hash.hashvec = cpu_to_be64(vec); 1344 return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok); 1345 } 1346 1347 /** 1348 * t4vf_get_port_stats - collect "port" statistics 1349 * @adapter: the adapter 1350 * @pidx: the port index 1351 * @s: the stats structure to fill 1352 * 1353 * Collect statistics for the "port"'s Virtual Interface. 1354 */ 1355 int t4vf_get_port_stats(struct adapter *adapter, int pidx, 1356 struct t4vf_port_stats *s) 1357 { 1358 struct port_info *pi = adap2pinfo(adapter, pidx); 1359 struct fw_vi_stats_vf fwstats; 1360 unsigned int rem = VI_VF_NUM_STATS; 1361 __be64 *fwsp = (__be64 *)&fwstats; 1362 1363 /* 1364 * Grab the Virtual Interface statistics a chunk at a time via mailbox 1365 * commands. We could use a Work Request and get all of them at once 1366 * but that's an asynchronous interface which is awkward to use. 1367 */ 1368 while (rem) { 1369 unsigned int ix = VI_VF_NUM_STATS - rem; 1370 unsigned int nstats = min(6U, rem); 1371 struct fw_vi_stats_cmd cmd, rpl; 1372 size_t len = (offsetof(struct fw_vi_stats_cmd, u) + 1373 sizeof(struct fw_vi_stats_ctl)); 1374 size_t len16 = DIV_ROUND_UP(len, 16); 1375 int ret; 1376 1377 memset(&cmd, 0, sizeof(cmd)); 1378 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_STATS_CMD) | 1379 FW_VI_STATS_CMD_VIID_V(pi->viid) | 1380 FW_CMD_REQUEST_F | 1381 FW_CMD_READ_F); 1382 cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); 1383 cmd.u.ctl.nstats_ix = 1384 cpu_to_be16(FW_VI_STATS_CMD_IX_V(ix) | 1385 FW_VI_STATS_CMD_NSTATS_V(nstats)); 1386 ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl); 1387 if (ret) 1388 return ret; 1389 1390 memcpy(fwsp, &rpl.u.ctl.stat0, sizeof(__be64) * nstats); 1391 1392 rem -= nstats; 1393 fwsp += nstats; 1394 } 1395 1396 /* 1397 * Translate firmware statistics into host native statistics. 1398 */ 1399 s->tx_bcast_bytes = be64_to_cpu(fwstats.tx_bcast_bytes); 1400 s->tx_bcast_frames = be64_to_cpu(fwstats.tx_bcast_frames); 1401 s->tx_mcast_bytes = be64_to_cpu(fwstats.tx_mcast_bytes); 1402 s->tx_mcast_frames = be64_to_cpu(fwstats.tx_mcast_frames); 1403 s->tx_ucast_bytes = be64_to_cpu(fwstats.tx_ucast_bytes); 1404 s->tx_ucast_frames = be64_to_cpu(fwstats.tx_ucast_frames); 1405 s->tx_drop_frames = be64_to_cpu(fwstats.tx_drop_frames); 1406 s->tx_offload_bytes = be64_to_cpu(fwstats.tx_offload_bytes); 1407 s->tx_offload_frames = be64_to_cpu(fwstats.tx_offload_frames); 1408 1409 s->rx_bcast_bytes = be64_to_cpu(fwstats.rx_bcast_bytes); 1410 s->rx_bcast_frames = be64_to_cpu(fwstats.rx_bcast_frames); 1411 s->rx_mcast_bytes = be64_to_cpu(fwstats.rx_mcast_bytes); 1412 s->rx_mcast_frames = be64_to_cpu(fwstats.rx_mcast_frames); 1413 s->rx_ucast_bytes = be64_to_cpu(fwstats.rx_ucast_bytes); 1414 s->rx_ucast_frames = be64_to_cpu(fwstats.rx_ucast_frames); 1415 1416 s->rx_err_frames = be64_to_cpu(fwstats.rx_err_frames); 1417 1418 return 0; 1419 } 1420 1421 /** 1422 * t4vf_iq_free - free an ingress queue and its free lists 1423 * @adapter: the adapter 1424 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) 1425 * @iqid: ingress queue ID 1426 * @fl0id: FL0 queue ID or 0xffff if no attached FL0 1427 * @fl1id: FL1 queue ID or 0xffff if no attached FL1 1428 * 1429 * Frees an ingress queue and its associated free lists, if any. 1430 */ 1431 int t4vf_iq_free(struct adapter *adapter, unsigned int iqtype, 1432 unsigned int iqid, unsigned int fl0id, unsigned int fl1id) 1433 { 1434 struct fw_iq_cmd cmd; 1435 1436 memset(&cmd, 0, sizeof(cmd)); 1437 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | 1438 FW_CMD_REQUEST_F | 1439 FW_CMD_EXEC_F); 1440 cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | 1441 FW_LEN16(cmd)); 1442 cmd.type_to_iqandstindex = 1443 cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype)); 1444 1445 cmd.iqid = cpu_to_be16(iqid); 1446 cmd.fl0id = cpu_to_be16(fl0id); 1447 cmd.fl1id = cpu_to_be16(fl1id); 1448 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 1449 } 1450 1451 /** 1452 * t4vf_eth_eq_free - free an Ethernet egress queue 1453 * @adapter: the adapter 1454 * @eqid: egress queue ID 1455 * 1456 * Frees an Ethernet egress queue. 1457 */ 1458 int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid) 1459 { 1460 struct fw_eq_eth_cmd cmd; 1461 1462 memset(&cmd, 0, sizeof(cmd)); 1463 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) | 1464 FW_CMD_REQUEST_F | 1465 FW_CMD_EXEC_F); 1466 cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | 1467 FW_LEN16(cmd)); 1468 cmd.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid)); 1469 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 1470 } 1471 1472 /** 1473 * t4vf_handle_fw_rpl - process a firmware reply message 1474 * @adapter: the adapter 1475 * @rpl: start of the firmware message 1476 * 1477 * Processes a firmware message, such as link state change messages. 1478 */ 1479 int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) 1480 { 1481 const struct fw_cmd_hdr *cmd_hdr = (const struct fw_cmd_hdr *)rpl; 1482 u8 opcode = FW_CMD_OP_G(be32_to_cpu(cmd_hdr->hi)); 1483 1484 switch (opcode) { 1485 case FW_PORT_CMD: { 1486 /* 1487 * Link/module state change message. 1488 */ 1489 const struct fw_port_cmd *port_cmd = 1490 (const struct fw_port_cmd *)rpl; 1491 u32 stat, mod; 1492 int action, port_id, link_ok, speed, fc, pidx; 1493 1494 /* 1495 * Extract various fields from port status change message. 1496 */ 1497 action = FW_PORT_CMD_ACTION_G( 1498 be32_to_cpu(port_cmd->action_to_len16)); 1499 if (action != FW_PORT_ACTION_GET_PORT_INFO) { 1500 dev_err(adapter->pdev_dev, 1501 "Unknown firmware PORT reply action %x\n", 1502 action); 1503 break; 1504 } 1505 1506 port_id = FW_PORT_CMD_PORTID_G( 1507 be32_to_cpu(port_cmd->op_to_portid)); 1508 1509 stat = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype); 1510 link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0; 1511 speed = 0; 1512 fc = 0; 1513 if (stat & FW_PORT_CMD_RXPAUSE_F) 1514 fc |= PAUSE_RX; 1515 if (stat & FW_PORT_CMD_TXPAUSE_F) 1516 fc |= PAUSE_TX; 1517 if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) 1518 speed = 100; 1519 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) 1520 speed = 1000; 1521 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) 1522 speed = 10000; 1523 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) 1524 speed = 40000; 1525 1526 /* 1527 * Scan all of our "ports" (Virtual Interfaces) looking for 1528 * those bound to the physical port which has changed. If 1529 * our recorded state doesn't match the current state, 1530 * signal that change to the OS code. 1531 */ 1532 for_each_port(adapter, pidx) { 1533 struct port_info *pi = adap2pinfo(adapter, pidx); 1534 struct link_config *lc; 1535 1536 if (pi->port_id != port_id) 1537 continue; 1538 1539 lc = &pi->link_cfg; 1540 1541 mod = FW_PORT_CMD_MODTYPE_G(stat); 1542 if (mod != pi->mod_type) { 1543 pi->mod_type = mod; 1544 t4vf_os_portmod_changed(adapter, pidx); 1545 } 1546 1547 if (link_ok != lc->link_ok || speed != lc->speed || 1548 fc != lc->fc) { 1549 /* something changed */ 1550 lc->link_ok = link_ok; 1551 lc->speed = speed; 1552 lc->fc = fc; 1553 lc->supported = 1554 be16_to_cpu(port_cmd->u.info.pcap); 1555 t4vf_os_link_changed(adapter, pidx, link_ok); 1556 } 1557 } 1558 break; 1559 } 1560 1561 default: 1562 dev_err(adapter->pdev_dev, "Unknown firmware reply %X\n", 1563 opcode); 1564 } 1565 return 0; 1566 } 1567 1568 /** 1569 */ 1570 int t4vf_prep_adapter(struct adapter *adapter) 1571 { 1572 int err; 1573 unsigned int chipid; 1574 1575 /* Wait for the device to become ready before proceeding ... 1576 */ 1577 err = t4vf_wait_dev_ready(adapter); 1578 if (err) 1579 return err; 1580 1581 /* Default port and clock for debugging in case we can't reach 1582 * firmware. 1583 */ 1584 adapter->params.nports = 1; 1585 adapter->params.vfres.pmask = 1; 1586 adapter->params.vpd.cclk = 50000; 1587 1588 adapter->params.chip = 0; 1589 switch (CHELSIO_PCI_ID_VER(adapter->pdev->device)) { 1590 case CHELSIO_T4: 1591 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, 0); 1592 break; 1593 1594 case CHELSIO_T5: 1595 chipid = G_REV(t4_read_reg(adapter, A_PL_VF_REV)); 1596 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid); 1597 break; 1598 } 1599 1600 return 0; 1601 } 1602