1 /* 2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet 3 * driver for Linux. 4 * 5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/pci.h> 37 38 #include "t4vf_common.h" 39 #include "t4vf_defs.h" 40 41 #include "../cxgb4/t4_regs.h" 42 #include "../cxgb4/t4_values.h" 43 #include "../cxgb4/t4fw_api.h" 44 45 /* 46 * Wait for the device to become ready (signified by our "who am I" register 47 * returning a value other than all 1's). Return an error if it doesn't 48 * become ready ... 49 */ 50 int t4vf_wait_dev_ready(struct adapter *adapter) 51 { 52 const u32 whoami = T4VF_PL_BASE_ADDR + PL_VF_WHOAMI; 53 const u32 notready1 = 0xffffffff; 54 const u32 notready2 = 0xeeeeeeee; 55 u32 val; 56 57 val = t4_read_reg(adapter, whoami); 58 if (val != notready1 && val != notready2) 59 return 0; 60 msleep(500); 61 val = t4_read_reg(adapter, whoami); 62 if (val != notready1 && val != notready2) 63 return 0; 64 else 65 return -EIO; 66 } 67 68 /* 69 * Get the reply to a mailbox command and store it in @rpl in big-endian order 70 * (since the firmware data structures are specified in a big-endian layout). 71 */ 72 static void get_mbox_rpl(struct adapter *adapter, __be64 *rpl, int size, 73 u32 mbox_data) 74 { 75 for ( ; size; size -= 8, mbox_data += 8) 76 *rpl++ = cpu_to_be64(t4_read_reg64(adapter, mbox_data)); 77 } 78 79 /* 80 * Dump contents of mailbox with a leading tag. 81 */ 82 static void dump_mbox(struct adapter *adapter, const char *tag, u32 mbox_data) 83 { 84 dev_err(adapter->pdev_dev, 85 "mbox %s: %llx %llx %llx %llx %llx %llx %llx %llx\n", tag, 86 (unsigned long long)t4_read_reg64(adapter, mbox_data + 0), 87 (unsigned long long)t4_read_reg64(adapter, mbox_data + 8), 88 (unsigned long long)t4_read_reg64(adapter, mbox_data + 16), 89 (unsigned long long)t4_read_reg64(adapter, mbox_data + 24), 90 (unsigned long long)t4_read_reg64(adapter, mbox_data + 32), 91 (unsigned long long)t4_read_reg64(adapter, mbox_data + 40), 92 (unsigned long long)t4_read_reg64(adapter, mbox_data + 48), 93 (unsigned long long)t4_read_reg64(adapter, mbox_data + 56)); 94 } 95 96 /** 97 * t4vf_wr_mbox_core - send a command to FW through the mailbox 98 * @adapter: the adapter 99 * @cmd: the command to write 100 * @size: command length in bytes 101 * @rpl: where to optionally store the reply 102 * @sleep_ok: if true we may sleep while awaiting command completion 103 * 104 * Sends the given command to FW through the mailbox and waits for the 105 * FW to execute the command. If @rpl is not %NULL it is used to store 106 * the FW's reply to the command. The command and its optional reply 107 * are of the same length. FW can take up to 500 ms to respond. 108 * @sleep_ok determines whether we may sleep while awaiting the response. 109 * If sleeping is allowed we use progressive backoff otherwise we spin. 110 * 111 * The return value is 0 on success or a negative errno on failure. A 112 * failure can happen either because we are not able to execute the 113 * command or FW executes it but signals an error. In the latter case 114 * the return value is the error code indicated by FW (negated). 115 */ 116 int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, 117 void *rpl, bool sleep_ok) 118 { 119 static const int delay[] = { 120 1, 1, 3, 5, 10, 10, 20, 50, 100 121 }; 122 123 u32 v; 124 int i, ms, delay_idx; 125 const __be64 *p; 126 u32 mbox_data = T4VF_MBDATA_BASE_ADDR; 127 u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL; 128 129 /* 130 * Commands must be multiples of 16 bytes in length and may not be 131 * larger than the size of the Mailbox Data register array. 132 */ 133 if ((size % 16) != 0 || 134 size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4) 135 return -EINVAL; 136 137 /* 138 * Loop trying to get ownership of the mailbox. Return an error 139 * if we can't gain ownership. 140 */ 141 v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl)); 142 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) 143 v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl)); 144 if (v != MBOX_OWNER_DRV) 145 return v == MBOX_OWNER_FW ? -EBUSY : -ETIMEDOUT; 146 147 /* 148 * Write the command array into the Mailbox Data register array and 149 * transfer ownership of the mailbox to the firmware. 150 * 151 * For the VFs, the Mailbox Data "registers" are actually backed by 152 * T4's "MA" interface rather than PL Registers (as is the case for 153 * the PFs). Because these are in different coherency domains, the 154 * write to the VF's PL-register-backed Mailbox Control can race in 155 * front of the writes to the MA-backed VF Mailbox Data "registers". 156 * So we need to do a read-back on at least one byte of the VF Mailbox 157 * Data registers before doing the write to the VF Mailbox Control 158 * register. 159 */ 160 for (i = 0, p = cmd; i < size; i += 8) 161 t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++)); 162 t4_read_reg(adapter, mbox_data); /* flush write */ 163 164 t4_write_reg(adapter, mbox_ctl, 165 MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW)); 166 t4_read_reg(adapter, mbox_ctl); /* flush write */ 167 168 /* 169 * Spin waiting for firmware to acknowledge processing our command. 170 */ 171 delay_idx = 0; 172 ms = delay[0]; 173 174 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { 175 if (sleep_ok) { 176 ms = delay[delay_idx]; 177 if (delay_idx < ARRAY_SIZE(delay) - 1) 178 delay_idx++; 179 msleep(ms); 180 } else 181 mdelay(ms); 182 183 /* 184 * If we're the owner, see if this is the reply we wanted. 185 */ 186 v = t4_read_reg(adapter, mbox_ctl); 187 if (MBOWNER_G(v) == MBOX_OWNER_DRV) { 188 /* 189 * If the Message Valid bit isn't on, revoke ownership 190 * of the mailbox and continue waiting for our reply. 191 */ 192 if ((v & MBMSGVALID_F) == 0) { 193 t4_write_reg(adapter, mbox_ctl, 194 MBOWNER_V(MBOX_OWNER_NONE)); 195 continue; 196 } 197 198 /* 199 * We now have our reply. Extract the command return 200 * value, copy the reply back to our caller's buffer 201 * (if specified) and revoke ownership of the mailbox. 202 * We return the (negated) firmware command return 203 * code (this depends on FW_SUCCESS == 0). 204 */ 205 206 /* return value in low-order little-endian word */ 207 v = t4_read_reg(adapter, mbox_data); 208 if (FW_CMD_RETVAL_G(v)) 209 dump_mbox(adapter, "FW Error", mbox_data); 210 211 if (rpl) { 212 /* request bit in high-order BE word */ 213 WARN_ON((be32_to_cpu(*(const __be32 *)cmd) 214 & FW_CMD_REQUEST_F) == 0); 215 get_mbox_rpl(adapter, rpl, size, mbox_data); 216 WARN_ON((be32_to_cpu(*(__be32 *)rpl) 217 & FW_CMD_REQUEST_F) != 0); 218 } 219 t4_write_reg(adapter, mbox_ctl, 220 MBOWNER_V(MBOX_OWNER_NONE)); 221 return -FW_CMD_RETVAL_G(v); 222 } 223 } 224 225 /* 226 * We timed out. Return the error ... 227 */ 228 dump_mbox(adapter, "FW Timeout", mbox_data); 229 return -ETIMEDOUT; 230 } 231 232 /** 233 * hash_mac_addr - return the hash value of a MAC address 234 * @addr: the 48-bit Ethernet MAC address 235 * 236 * Hashes a MAC address according to the hash function used by hardware 237 * inexact (hash) address matching. 238 */ 239 static int hash_mac_addr(const u8 *addr) 240 { 241 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; 242 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; 243 a ^= b; 244 a ^= (a >> 12); 245 a ^= (a >> 6); 246 return a & 0x3f; 247 } 248 249 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ 250 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \ 251 FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG) 252 253 /** 254 * init_link_config - initialize a link's SW state 255 * @lc: structure holding the link state 256 * @caps: link capabilities 257 * 258 * Initializes the SW state maintained for each link, including the link's 259 * capabilities and default speed/flow-control/autonegotiation settings. 260 */ 261 static void init_link_config(struct link_config *lc, unsigned int caps) 262 { 263 lc->supported = caps; 264 lc->requested_speed = 0; 265 lc->speed = 0; 266 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; 267 if (lc->supported & FW_PORT_CAP_ANEG) { 268 lc->advertising = lc->supported & ADVERT_MASK; 269 lc->autoneg = AUTONEG_ENABLE; 270 lc->requested_fc |= PAUSE_AUTONEG; 271 } else { 272 lc->advertising = 0; 273 lc->autoneg = AUTONEG_DISABLE; 274 } 275 } 276 277 /** 278 * t4vf_port_init - initialize port hardware/software state 279 * @adapter: the adapter 280 * @pidx: the adapter port index 281 */ 282 int t4vf_port_init(struct adapter *adapter, int pidx) 283 { 284 struct port_info *pi = adap2pinfo(adapter, pidx); 285 struct fw_vi_cmd vi_cmd, vi_rpl; 286 struct fw_port_cmd port_cmd, port_rpl; 287 int v; 288 289 /* 290 * Execute a VI Read command to get our Virtual Interface information 291 * like MAC address, etc. 292 */ 293 memset(&vi_cmd, 0, sizeof(vi_cmd)); 294 vi_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | 295 FW_CMD_REQUEST_F | 296 FW_CMD_READ_F); 297 vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd)); 298 vi_cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(pi->viid)); 299 v = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl); 300 if (v) 301 return v; 302 303 BUG_ON(pi->port_id != FW_VI_CMD_PORTID_G(vi_rpl.portid_pkd)); 304 pi->rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(vi_rpl.rsssize_pkd)); 305 t4_os_set_hw_addr(adapter, pidx, vi_rpl.mac); 306 307 /* 308 * If we don't have read access to our port information, we're done 309 * now. Otherwise, execute a PORT Read command to get it ... 310 */ 311 if (!(adapter->params.vfres.r_caps & FW_CMD_CAP_PORT)) 312 return 0; 313 314 memset(&port_cmd, 0, sizeof(port_cmd)); 315 port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | 316 FW_CMD_REQUEST_F | 317 FW_CMD_READ_F | 318 FW_PORT_CMD_PORTID_V(pi->port_id)); 319 port_cmd.action_to_len16 = 320 cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) | 321 FW_LEN16(port_cmd)); 322 v = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd), &port_rpl); 323 if (v) 324 return v; 325 326 v = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype); 327 pi->mdio_addr = (v & FW_PORT_CMD_MDIOCAP_F) ? 328 FW_PORT_CMD_MDIOADDR_G(v) : -1; 329 pi->port_type = FW_PORT_CMD_PTYPE_G(v); 330 pi->mod_type = FW_PORT_MOD_TYPE_NA; 331 332 init_link_config(&pi->link_cfg, be16_to_cpu(port_rpl.u.info.pcap)); 333 334 return 0; 335 } 336 337 /** 338 * t4vf_fw_reset - issue a reset to FW 339 * @adapter: the adapter 340 * 341 * Issues a reset command to FW. For a Physical Function this would 342 * result in the Firmware resetting all of its state. For a Virtual 343 * Function this just resets the state associated with the VF. 344 */ 345 int t4vf_fw_reset(struct adapter *adapter) 346 { 347 struct fw_reset_cmd cmd; 348 349 memset(&cmd, 0, sizeof(cmd)); 350 cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RESET_CMD) | 351 FW_CMD_WRITE_F); 352 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 353 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 354 } 355 356 /** 357 * t4vf_query_params - query FW or device parameters 358 * @adapter: the adapter 359 * @nparams: the number of parameters 360 * @params: the parameter names 361 * @vals: the parameter values 362 * 363 * Reads the values of firmware or device parameters. Up to 7 parameters 364 * can be queried at once. 365 */ 366 static int t4vf_query_params(struct adapter *adapter, unsigned int nparams, 367 const u32 *params, u32 *vals) 368 { 369 int i, ret; 370 struct fw_params_cmd cmd, rpl; 371 struct fw_params_param *p; 372 size_t len16; 373 374 if (nparams > 7) 375 return -EINVAL; 376 377 memset(&cmd, 0, sizeof(cmd)); 378 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) | 379 FW_CMD_REQUEST_F | 380 FW_CMD_READ_F); 381 len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd, 382 param[nparams].mnem), 16); 383 cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); 384 for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) 385 p->mnem = htonl(*params++); 386 387 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 388 if (ret == 0) 389 for (i = 0, p = &rpl.param[0]; i < nparams; i++, p++) 390 *vals++ = be32_to_cpu(p->val); 391 return ret; 392 } 393 394 /** 395 * t4vf_set_params - sets FW or device parameters 396 * @adapter: the adapter 397 * @nparams: the number of parameters 398 * @params: the parameter names 399 * @vals: the parameter values 400 * 401 * Sets the values of firmware or device parameters. Up to 7 parameters 402 * can be specified at once. 403 */ 404 int t4vf_set_params(struct adapter *adapter, unsigned int nparams, 405 const u32 *params, const u32 *vals) 406 { 407 int i; 408 struct fw_params_cmd cmd; 409 struct fw_params_param *p; 410 size_t len16; 411 412 if (nparams > 7) 413 return -EINVAL; 414 415 memset(&cmd, 0, sizeof(cmd)); 416 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) | 417 FW_CMD_REQUEST_F | 418 FW_CMD_WRITE_F); 419 len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd, 420 param[nparams]), 16); 421 cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); 422 for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) { 423 p->mnem = cpu_to_be32(*params++); 424 p->val = cpu_to_be32(*vals++); 425 } 426 427 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 428 } 429 430 /** 431 * t4vf_bar2_sge_qregs - return BAR2 SGE Queue register information 432 * @adapter: the adapter 433 * @qid: the Queue ID 434 * @qtype: the Ingress or Egress type for @qid 435 * @pbar2_qoffset: BAR2 Queue Offset 436 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues 437 * 438 * Returns the BAR2 SGE Queue Registers information associated with the 439 * indicated Absolute Queue ID. These are passed back in return value 440 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue 441 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues. 442 * 443 * This may return an error which indicates that BAR2 SGE Queue 444 * registers aren't available. If an error is not returned, then the 445 * following values are returned: 446 * 447 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers 448 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid 449 * 450 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which 451 * require the "Inferred Queue ID" ability may be used. E.g. the 452 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0, 453 * then these "Inferred Queue ID" register may not be used. 454 */ 455 int t4vf_bar2_sge_qregs(struct adapter *adapter, 456 unsigned int qid, 457 enum t4_bar2_qtype qtype, 458 u64 *pbar2_qoffset, 459 unsigned int *pbar2_qid) 460 { 461 unsigned int page_shift, page_size, qpp_shift, qpp_mask; 462 u64 bar2_page_offset, bar2_qoffset; 463 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred; 464 465 /* T4 doesn't support BAR2 SGE Queue registers. 466 */ 467 if (is_t4(adapter->params.chip)) 468 return -EINVAL; 469 470 /* Get our SGE Page Size parameters. 471 */ 472 page_shift = adapter->params.sge.sge_vf_hps + 10; 473 page_size = 1 << page_shift; 474 475 /* Get the right Queues per Page parameters for our Queue. 476 */ 477 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS 478 ? adapter->params.sge.sge_vf_eq_qpp 479 : adapter->params.sge.sge_vf_iq_qpp); 480 qpp_mask = (1 << qpp_shift) - 1; 481 482 /* Calculate the basics of the BAR2 SGE Queue register area: 483 * o The BAR2 page the Queue registers will be in. 484 * o The BAR2 Queue ID. 485 * o The BAR2 Queue ID Offset into the BAR2 page. 486 */ 487 bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift); 488 bar2_qid = qid & qpp_mask; 489 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; 490 491 /* If the BAR2 Queue ID Offset is less than the Page Size, then the 492 * hardware will infer the Absolute Queue ID simply from the writes to 493 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a 494 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply 495 * write to the first BAR2 SGE Queue Area within the BAR2 Page with 496 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID 497 * from the BAR2 Page and BAR2 Queue ID. 498 * 499 * One important censequence of this is that some BAR2 SGE registers 500 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID 501 * there. But other registers synthesize the SGE Queue ID purely 502 * from the writes to the registers -- the Write Combined Doorbell 503 * Buffer is a good example. These BAR2 SGE Registers are only 504 * available for those BAR2 SGE Register areas where the SGE Absolute 505 * Queue ID can be inferred from simple writes. 506 */ 507 bar2_qoffset = bar2_page_offset; 508 bar2_qinferred = (bar2_qid_offset < page_size); 509 if (bar2_qinferred) { 510 bar2_qoffset += bar2_qid_offset; 511 bar2_qid = 0; 512 } 513 514 *pbar2_qoffset = bar2_qoffset; 515 *pbar2_qid = bar2_qid; 516 return 0; 517 } 518 519 /** 520 * t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters 521 * @adapter: the adapter 522 * 523 * Retrieves various core SGE parameters in the form of hardware SGE 524 * register values. The caller is responsible for decoding these as 525 * needed. The SGE parameters are stored in @adapter->params.sge. 526 */ 527 int t4vf_get_sge_params(struct adapter *adapter) 528 { 529 struct sge_params *sge_params = &adapter->params.sge; 530 u32 params[7], vals[7]; 531 int v; 532 533 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 534 FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL_A)); 535 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 536 FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE_A)); 537 params[2] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 538 FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0_A)); 539 params[3] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 540 FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1_A)); 541 params[4] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 542 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1_A)); 543 params[5] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 544 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3_A)); 545 params[6] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 546 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5_A)); 547 v = t4vf_query_params(adapter, 7, params, vals); 548 if (v) 549 return v; 550 sge_params->sge_control = vals[0]; 551 sge_params->sge_host_page_size = vals[1]; 552 sge_params->sge_fl_buffer_size[0] = vals[2]; 553 sge_params->sge_fl_buffer_size[1] = vals[3]; 554 sge_params->sge_timer_value_0_and_1 = vals[4]; 555 sge_params->sge_timer_value_2_and_3 = vals[5]; 556 sge_params->sge_timer_value_4_and_5 = vals[6]; 557 558 /* T4 uses a single control field to specify both the PCIe Padding and 559 * Packing Boundary. T5 introduced the ability to specify these 560 * separately with the Padding Boundary in SGE_CONTROL and and Packing 561 * Boundary in SGE_CONTROL2. So for T5 and later we need to grab 562 * SGE_CONTROL in order to determine how ingress packet data will be 563 * laid out in Packed Buffer Mode. Unfortunately, older versions of 564 * the firmware won't let us retrieve SGE_CONTROL2 so if we get a 565 * failure grabbing it we throw an error since we can't figure out the 566 * right value. 567 */ 568 if (!is_t4(adapter->params.chip)) { 569 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 570 FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL2_A)); 571 v = t4vf_query_params(adapter, 1, params, vals); 572 if (v != FW_SUCCESS) { 573 dev_err(adapter->pdev_dev, 574 "Unable to get SGE Control2; " 575 "probably old firmware.\n"); 576 return v; 577 } 578 sge_params->sge_control2 = vals[0]; 579 } 580 581 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 582 FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD_A)); 583 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 584 FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL_A)); 585 v = t4vf_query_params(adapter, 2, params, vals); 586 if (v) 587 return v; 588 sge_params->sge_ingress_rx_threshold = vals[0]; 589 sge_params->sge_congestion_control = vals[1]; 590 591 /* For T5 and later we want to use the new BAR2 Doorbells. 592 * Unfortunately, older firmware didn't allow the this register to be 593 * read. 594 */ 595 if (!is_t4(adapter->params.chip)) { 596 u32 whoami; 597 unsigned int pf, s_hps, s_qpp; 598 599 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 600 FW_PARAMS_PARAM_XYZ_V( 601 SGE_EGRESS_QUEUES_PER_PAGE_VF_A)); 602 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 603 FW_PARAMS_PARAM_XYZ_V( 604 SGE_INGRESS_QUEUES_PER_PAGE_VF_A)); 605 v = t4vf_query_params(adapter, 2, params, vals); 606 if (v != FW_SUCCESS) { 607 dev_warn(adapter->pdev_dev, 608 "Unable to get VF SGE Queues/Page; " 609 "probably old firmware.\n"); 610 return v; 611 } 612 sge_params->sge_egress_queues_per_page = vals[0]; 613 sge_params->sge_ingress_queues_per_page = vals[1]; 614 615 /* We need the Queues/Page for our VF. This is based on the 616 * PF from which we're instantiated and is indexed in the 617 * register we just read. Do it once here so other code in 618 * the driver can just use it. 619 */ 620 whoami = t4_read_reg(adapter, 621 T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A); 622 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ? 623 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami); 624 625 s_hps = (HOSTPAGESIZEPF0_S + 626 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf); 627 sge_params->sge_vf_hps = 628 ((sge_params->sge_host_page_size >> s_hps) 629 & HOSTPAGESIZEPF0_M); 630 631 s_qpp = (QUEUESPERPAGEPF0_S + 632 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * pf); 633 sge_params->sge_vf_eq_qpp = 634 ((sge_params->sge_egress_queues_per_page >> s_qpp) 635 & QUEUESPERPAGEPF0_M); 636 sge_params->sge_vf_iq_qpp = 637 ((sge_params->sge_ingress_queues_per_page >> s_qpp) 638 & QUEUESPERPAGEPF0_M); 639 } 640 641 return 0; 642 } 643 644 /** 645 * t4vf_get_vpd_params - retrieve device VPD paremeters 646 * @adapter: the adapter 647 * 648 * Retrives various device Vital Product Data parameters. The parameters 649 * are stored in @adapter->params.vpd. 650 */ 651 int t4vf_get_vpd_params(struct adapter *adapter) 652 { 653 struct vpd_params *vpd_params = &adapter->params.vpd; 654 u32 params[7], vals[7]; 655 int v; 656 657 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | 658 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK)); 659 v = t4vf_query_params(adapter, 1, params, vals); 660 if (v) 661 return v; 662 vpd_params->cclk = vals[0]; 663 664 return 0; 665 } 666 667 /** 668 * t4vf_get_dev_params - retrieve device paremeters 669 * @adapter: the adapter 670 * 671 * Retrives various device parameters. The parameters are stored in 672 * @adapter->params.dev. 673 */ 674 int t4vf_get_dev_params(struct adapter *adapter) 675 { 676 struct dev_params *dev_params = &adapter->params.dev; 677 u32 params[7], vals[7]; 678 int v; 679 680 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | 681 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWREV)); 682 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | 683 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_TPREV)); 684 v = t4vf_query_params(adapter, 2, params, vals); 685 if (v) 686 return v; 687 dev_params->fwrev = vals[0]; 688 dev_params->tprev = vals[1]; 689 690 return 0; 691 } 692 693 /** 694 * t4vf_get_rss_glb_config - retrieve adapter RSS Global Configuration 695 * @adapter: the adapter 696 * 697 * Retrieves global RSS mode and parameters with which we have to live 698 * and stores them in the @adapter's RSS parameters. 699 */ 700 int t4vf_get_rss_glb_config(struct adapter *adapter) 701 { 702 struct rss_params *rss = &adapter->params.rss; 703 struct fw_rss_glb_config_cmd cmd, rpl; 704 int v; 705 706 /* 707 * Execute an RSS Global Configuration read command to retrieve 708 * our RSS configuration. 709 */ 710 memset(&cmd, 0, sizeof(cmd)); 711 cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) | 712 FW_CMD_REQUEST_F | 713 FW_CMD_READ_F); 714 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 715 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 716 if (v) 717 return v; 718 719 /* 720 * Transate the big-endian RSS Global Configuration into our 721 * cpu-endian format based on the RSS mode. We also do first level 722 * filtering at this point to weed out modes which don't support 723 * VF Drivers ... 724 */ 725 rss->mode = FW_RSS_GLB_CONFIG_CMD_MODE_G( 726 be32_to_cpu(rpl.u.manual.mode_pkd)); 727 switch (rss->mode) { 728 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { 729 u32 word = be32_to_cpu( 730 rpl.u.basicvirtual.synmapen_to_hashtoeplitz); 731 732 rss->u.basicvirtual.synmapen = 733 ((word & FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_F) != 0); 734 rss->u.basicvirtual.syn4tupenipv6 = 735 ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_F) != 0); 736 rss->u.basicvirtual.syn2tupenipv6 = 737 ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_F) != 0); 738 rss->u.basicvirtual.syn4tupenipv4 = 739 ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_F) != 0); 740 rss->u.basicvirtual.syn2tupenipv4 = 741 ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_F) != 0); 742 743 rss->u.basicvirtual.ofdmapen = 744 ((word & FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_F) != 0); 745 746 rss->u.basicvirtual.tnlmapen = 747 ((word & FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F) != 0); 748 rss->u.basicvirtual.tnlalllookup = 749 ((word & FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F) != 0); 750 751 rss->u.basicvirtual.hashtoeplitz = 752 ((word & FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F) != 0); 753 754 /* we need at least Tunnel Map Enable to be set */ 755 if (!rss->u.basicvirtual.tnlmapen) 756 return -EINVAL; 757 break; 758 } 759 760 default: 761 /* all unknown/unsupported RSS modes result in an error */ 762 return -EINVAL; 763 } 764 765 return 0; 766 } 767 768 /** 769 * t4vf_get_vfres - retrieve VF resource limits 770 * @adapter: the adapter 771 * 772 * Retrieves configured resource limits and capabilities for a virtual 773 * function. The results are stored in @adapter->vfres. 774 */ 775 int t4vf_get_vfres(struct adapter *adapter) 776 { 777 struct vf_resources *vfres = &adapter->params.vfres; 778 struct fw_pfvf_cmd cmd, rpl; 779 int v; 780 u32 word; 781 782 /* 783 * Execute PFVF Read command to get VF resource limits; bail out early 784 * with error on command failure. 785 */ 786 memset(&cmd, 0, sizeof(cmd)); 787 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | 788 FW_CMD_REQUEST_F | 789 FW_CMD_READ_F); 790 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 791 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 792 if (v) 793 return v; 794 795 /* 796 * Extract VF resource limits and return success. 797 */ 798 word = be32_to_cpu(rpl.niqflint_niq); 799 vfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word); 800 vfres->niq = FW_PFVF_CMD_NIQ_G(word); 801 802 word = be32_to_cpu(rpl.type_to_neq); 803 vfres->neq = FW_PFVF_CMD_NEQ_G(word); 804 vfres->pmask = FW_PFVF_CMD_PMASK_G(word); 805 806 word = be32_to_cpu(rpl.tc_to_nexactf); 807 vfres->tc = FW_PFVF_CMD_TC_G(word); 808 vfres->nvi = FW_PFVF_CMD_NVI_G(word); 809 vfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word); 810 811 word = be32_to_cpu(rpl.r_caps_to_nethctrl); 812 vfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word); 813 vfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word); 814 vfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word); 815 816 return 0; 817 } 818 819 /** 820 * t4vf_read_rss_vi_config - read a VI's RSS configuration 821 * @adapter: the adapter 822 * @viid: Virtual Interface ID 823 * @config: pointer to host-native VI RSS Configuration buffer 824 * 825 * Reads the Virtual Interface's RSS configuration information and 826 * translates it into CPU-native format. 827 */ 828 int t4vf_read_rss_vi_config(struct adapter *adapter, unsigned int viid, 829 union rss_vi_config *config) 830 { 831 struct fw_rss_vi_config_cmd cmd, rpl; 832 int v; 833 834 memset(&cmd, 0, sizeof(cmd)); 835 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) | 836 FW_CMD_REQUEST_F | 837 FW_CMD_READ_F | 838 FW_RSS_VI_CONFIG_CMD_VIID(viid)); 839 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 840 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 841 if (v) 842 return v; 843 844 switch (adapter->params.rss.mode) { 845 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { 846 u32 word = be32_to_cpu(rpl.u.basicvirtual.defaultq_to_udpen); 847 848 config->basicvirtual.ip6fourtupen = 849 ((word & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) != 0); 850 config->basicvirtual.ip6twotupen = 851 ((word & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) != 0); 852 config->basicvirtual.ip4fourtupen = 853 ((word & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) != 0); 854 config->basicvirtual.ip4twotupen = 855 ((word & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) != 0); 856 config->basicvirtual.udpen = 857 ((word & FW_RSS_VI_CONFIG_CMD_UDPEN_F) != 0); 858 config->basicvirtual.defaultq = 859 FW_RSS_VI_CONFIG_CMD_DEFAULTQ_G(word); 860 break; 861 } 862 863 default: 864 return -EINVAL; 865 } 866 867 return 0; 868 } 869 870 /** 871 * t4vf_write_rss_vi_config - write a VI's RSS configuration 872 * @adapter: the adapter 873 * @viid: Virtual Interface ID 874 * @config: pointer to host-native VI RSS Configuration buffer 875 * 876 * Write the Virtual Interface's RSS configuration information 877 * (translating it into firmware-native format before writing). 878 */ 879 int t4vf_write_rss_vi_config(struct adapter *adapter, unsigned int viid, 880 union rss_vi_config *config) 881 { 882 struct fw_rss_vi_config_cmd cmd, rpl; 883 884 memset(&cmd, 0, sizeof(cmd)); 885 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) | 886 FW_CMD_REQUEST_F | 887 FW_CMD_WRITE_F | 888 FW_RSS_VI_CONFIG_CMD_VIID(viid)); 889 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 890 switch (adapter->params.rss.mode) { 891 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { 892 u32 word = 0; 893 894 if (config->basicvirtual.ip6fourtupen) 895 word |= FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F; 896 if (config->basicvirtual.ip6twotupen) 897 word |= FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F; 898 if (config->basicvirtual.ip4fourtupen) 899 word |= FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F; 900 if (config->basicvirtual.ip4twotupen) 901 word |= FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F; 902 if (config->basicvirtual.udpen) 903 word |= FW_RSS_VI_CONFIG_CMD_UDPEN_F; 904 word |= FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V( 905 config->basicvirtual.defaultq); 906 cmd.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(word); 907 break; 908 } 909 910 default: 911 return -EINVAL; 912 } 913 914 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 915 } 916 917 /** 918 * t4vf_config_rss_range - configure a portion of the RSS mapping table 919 * @adapter: the adapter 920 * @viid: Virtual Interface of RSS Table Slice 921 * @start: starting entry in the table to write 922 * @n: how many table entries to write 923 * @rspq: values for the "Response Queue" (Ingress Queue) lookup table 924 * @nrspq: number of values in @rspq 925 * 926 * Programs the selected part of the VI's RSS mapping table with the 927 * provided values. If @nrspq < @n the supplied values are used repeatedly 928 * until the full table range is populated. 929 * 930 * The caller must ensure the values in @rspq are in the range 0..1023. 931 */ 932 int t4vf_config_rss_range(struct adapter *adapter, unsigned int viid, 933 int start, int n, const u16 *rspq, int nrspq) 934 { 935 const u16 *rsp = rspq; 936 const u16 *rsp_end = rspq+nrspq; 937 struct fw_rss_ind_tbl_cmd cmd; 938 939 /* 940 * Initialize firmware command template to write the RSS table. 941 */ 942 memset(&cmd, 0, sizeof(cmd)); 943 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) | 944 FW_CMD_REQUEST_F | 945 FW_CMD_WRITE_F | 946 FW_RSS_IND_TBL_CMD_VIID_V(viid)); 947 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 948 949 /* 950 * Each firmware RSS command can accommodate up to 32 RSS Ingress 951 * Queue Identifiers. These Ingress Queue IDs are packed three to 952 * a 32-bit word as 10-bit values with the upper remaining 2 bits 953 * reserved. 954 */ 955 while (n > 0) { 956 __be32 *qp = &cmd.iq0_to_iq2; 957 int nq = min(n, 32); 958 int ret; 959 960 /* 961 * Set up the firmware RSS command header to send the next 962 * "nq" Ingress Queue IDs to the firmware. 963 */ 964 cmd.niqid = cpu_to_be16(nq); 965 cmd.startidx = cpu_to_be16(start); 966 967 /* 968 * "nq" more done for the start of the next loop. 969 */ 970 start += nq; 971 n -= nq; 972 973 /* 974 * While there are still Ingress Queue IDs to stuff into the 975 * current firmware RSS command, retrieve them from the 976 * Ingress Queue ID array and insert them into the command. 977 */ 978 while (nq > 0) { 979 /* 980 * Grab up to the next 3 Ingress Queue IDs (wrapping 981 * around the Ingress Queue ID array if necessary) and 982 * insert them into the firmware RSS command at the 983 * current 3-tuple position within the commad. 984 */ 985 u16 qbuf[3]; 986 u16 *qbp = qbuf; 987 int nqbuf = min(3, nq); 988 989 nq -= nqbuf; 990 qbuf[0] = qbuf[1] = qbuf[2] = 0; 991 while (nqbuf) { 992 nqbuf--; 993 *qbp++ = *rsp++; 994 if (rsp >= rsp_end) 995 rsp = rspq; 996 } 997 *qp++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0_V(qbuf[0]) | 998 FW_RSS_IND_TBL_CMD_IQ1_V(qbuf[1]) | 999 FW_RSS_IND_TBL_CMD_IQ2_V(qbuf[2])); 1000 } 1001 1002 /* 1003 * Send this portion of the RRS table update to the firmware; 1004 * bail out on any errors. 1005 */ 1006 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 1007 if (ret) 1008 return ret; 1009 } 1010 return 0; 1011 } 1012 1013 /** 1014 * t4vf_alloc_vi - allocate a virtual interface on a port 1015 * @adapter: the adapter 1016 * @port_id: physical port associated with the VI 1017 * 1018 * Allocate a new Virtual Interface and bind it to the indicated 1019 * physical port. Return the new Virtual Interface Identifier on 1020 * success, or a [negative] error number on failure. 1021 */ 1022 int t4vf_alloc_vi(struct adapter *adapter, int port_id) 1023 { 1024 struct fw_vi_cmd cmd, rpl; 1025 int v; 1026 1027 /* 1028 * Execute a VI command to allocate Virtual Interface and return its 1029 * VIID. 1030 */ 1031 memset(&cmd, 0, sizeof(cmd)); 1032 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | 1033 FW_CMD_REQUEST_F | 1034 FW_CMD_WRITE_F | 1035 FW_CMD_EXEC_F); 1036 cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) | 1037 FW_VI_CMD_ALLOC_F); 1038 cmd.portid_pkd = FW_VI_CMD_PORTID_V(port_id); 1039 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 1040 if (v) 1041 return v; 1042 1043 return FW_VI_CMD_VIID_G(be16_to_cpu(rpl.type_viid)); 1044 } 1045 1046 /** 1047 * t4vf_free_vi -- free a virtual interface 1048 * @adapter: the adapter 1049 * @viid: the virtual interface identifier 1050 * 1051 * Free a previously allocated Virtual Interface. Return an error on 1052 * failure. 1053 */ 1054 int t4vf_free_vi(struct adapter *adapter, int viid) 1055 { 1056 struct fw_vi_cmd cmd; 1057 1058 /* 1059 * Execute a VI command to free the Virtual Interface. 1060 */ 1061 memset(&cmd, 0, sizeof(cmd)); 1062 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | 1063 FW_CMD_REQUEST_F | 1064 FW_CMD_EXEC_F); 1065 cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) | 1066 FW_VI_CMD_FREE_F); 1067 cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid)); 1068 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 1069 } 1070 1071 /** 1072 * t4vf_enable_vi - enable/disable a virtual interface 1073 * @adapter: the adapter 1074 * @viid: the Virtual Interface ID 1075 * @rx_en: 1=enable Rx, 0=disable Rx 1076 * @tx_en: 1=enable Tx, 0=disable Tx 1077 * 1078 * Enables/disables a virtual interface. 1079 */ 1080 int t4vf_enable_vi(struct adapter *adapter, unsigned int viid, 1081 bool rx_en, bool tx_en) 1082 { 1083 struct fw_vi_enable_cmd cmd; 1084 1085 memset(&cmd, 0, sizeof(cmd)); 1086 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | 1087 FW_CMD_REQUEST_F | 1088 FW_CMD_EXEC_F | 1089 FW_VI_ENABLE_CMD_VIID_V(viid)); 1090 cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) | 1091 FW_VI_ENABLE_CMD_EEN_V(tx_en) | 1092 FW_LEN16(cmd)); 1093 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 1094 } 1095 1096 /** 1097 * t4vf_identify_port - identify a VI's port by blinking its LED 1098 * @adapter: the adapter 1099 * @viid: the Virtual Interface ID 1100 * @nblinks: how many times to blink LED at 2.5 Hz 1101 * 1102 * Identifies a VI's port by blinking its LED. 1103 */ 1104 int t4vf_identify_port(struct adapter *adapter, unsigned int viid, 1105 unsigned int nblinks) 1106 { 1107 struct fw_vi_enable_cmd cmd; 1108 1109 memset(&cmd, 0, sizeof(cmd)); 1110 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | 1111 FW_CMD_REQUEST_F | 1112 FW_CMD_EXEC_F | 1113 FW_VI_ENABLE_CMD_VIID_V(viid)); 1114 cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | 1115 FW_LEN16(cmd)); 1116 cmd.blinkdur = cpu_to_be16(nblinks); 1117 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 1118 } 1119 1120 /** 1121 * t4vf_set_rxmode - set Rx properties of a virtual interface 1122 * @adapter: the adapter 1123 * @viid: the VI id 1124 * @mtu: the new MTU or -1 for no change 1125 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change 1126 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change 1127 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change 1128 * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it, 1129 * -1 no change 1130 * 1131 * Sets Rx properties of a virtual interface. 1132 */ 1133 int t4vf_set_rxmode(struct adapter *adapter, unsigned int viid, 1134 int mtu, int promisc, int all_multi, int bcast, int vlanex, 1135 bool sleep_ok) 1136 { 1137 struct fw_vi_rxmode_cmd cmd; 1138 1139 /* convert to FW values */ 1140 if (mtu < 0) 1141 mtu = FW_VI_RXMODE_CMD_MTU_M; 1142 if (promisc < 0) 1143 promisc = FW_VI_RXMODE_CMD_PROMISCEN_M; 1144 if (all_multi < 0) 1145 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M; 1146 if (bcast < 0) 1147 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M; 1148 if (vlanex < 0) 1149 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M; 1150 1151 memset(&cmd, 0, sizeof(cmd)); 1152 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) | 1153 FW_CMD_REQUEST_F | 1154 FW_CMD_WRITE_F | 1155 FW_VI_RXMODE_CMD_VIID_V(viid)); 1156 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 1157 cmd.mtu_to_vlanexen = 1158 cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) | 1159 FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) | 1160 FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) | 1161 FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) | 1162 FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex)); 1163 return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok); 1164 } 1165 1166 /** 1167 * t4vf_alloc_mac_filt - allocates exact-match filters for MAC addresses 1168 * @adapter: the adapter 1169 * @viid: the Virtual Interface Identifier 1170 * @free: if true any existing filters for this VI id are first removed 1171 * @naddr: the number of MAC addresses to allocate filters for (up to 7) 1172 * @addr: the MAC address(es) 1173 * @idx: where to store the index of each allocated filter 1174 * @hash: pointer to hash address filter bitmap 1175 * @sleep_ok: call is allowed to sleep 1176 * 1177 * Allocates an exact-match filter for each of the supplied addresses and 1178 * sets it to the corresponding address. If @idx is not %NULL it should 1179 * have at least @naddr entries, each of which will be set to the index of 1180 * the filter allocated for the corresponding MAC address. If a filter 1181 * could not be allocated for an address its index is set to 0xffff. 1182 * If @hash is not %NULL addresses that fail to allocate an exact filter 1183 * are hashed and update the hash filter bitmap pointed at by @hash. 1184 * 1185 * Returns a negative error number or the number of filters allocated. 1186 */ 1187 int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free, 1188 unsigned int naddr, const u8 **addr, u16 *idx, 1189 u64 *hash, bool sleep_ok) 1190 { 1191 int offset, ret = 0; 1192 unsigned nfilters = 0; 1193 unsigned int rem = naddr; 1194 struct fw_vi_mac_cmd cmd, rpl; 1195 unsigned int max_naddr = adapter->params.arch.mps_tcam_size; 1196 1197 if (naddr > max_naddr) 1198 return -EINVAL; 1199 1200 for (offset = 0; offset < naddr; /**/) { 1201 unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact) 1202 ? rem 1203 : ARRAY_SIZE(cmd.u.exact)); 1204 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 1205 u.exact[fw_naddr]), 16); 1206 struct fw_vi_mac_exact *p; 1207 int i; 1208 1209 memset(&cmd, 0, sizeof(cmd)); 1210 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | 1211 FW_CMD_REQUEST_F | 1212 FW_CMD_WRITE_F | 1213 (free ? FW_CMD_EXEC_F : 0) | 1214 FW_VI_MAC_CMD_VIID_V(viid)); 1215 cmd.freemacs_to_len16 = 1216 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) | 1217 FW_CMD_LEN16_V(len16)); 1218 1219 for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) { 1220 p->valid_to_idx = cpu_to_be16( 1221 FW_VI_MAC_CMD_VALID_F | 1222 FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC)); 1223 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); 1224 } 1225 1226 1227 ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl, 1228 sleep_ok); 1229 if (ret && ret != -ENOMEM) 1230 break; 1231 1232 for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) { 1233 u16 index = FW_VI_MAC_CMD_IDX_G( 1234 be16_to_cpu(p->valid_to_idx)); 1235 1236 if (idx) 1237 idx[offset+i] = 1238 (index >= max_naddr 1239 ? 0xffff 1240 : index); 1241 if (index < max_naddr) 1242 nfilters++; 1243 else if (hash) 1244 *hash |= (1ULL << hash_mac_addr(addr[offset+i])); 1245 } 1246 1247 free = false; 1248 offset += fw_naddr; 1249 rem -= fw_naddr; 1250 } 1251 1252 /* 1253 * If there were no errors or we merely ran out of room in our MAC 1254 * address arena, return the number of filters actually written. 1255 */ 1256 if (ret == 0 || ret == -ENOMEM) 1257 ret = nfilters; 1258 return ret; 1259 } 1260 1261 /** 1262 * t4vf_change_mac - modifies the exact-match filter for a MAC address 1263 * @adapter: the adapter 1264 * @viid: the Virtual Interface ID 1265 * @idx: index of existing filter for old value of MAC address, or -1 1266 * @addr: the new MAC address value 1267 * @persist: if idx < 0, the new MAC allocation should be persistent 1268 * 1269 * Modifies an exact-match filter and sets it to the new MAC address. 1270 * Note that in general it is not possible to modify the value of a given 1271 * filter so the generic way to modify an address filter is to free the 1272 * one being used by the old address value and allocate a new filter for 1273 * the new address value. @idx can be -1 if the address is a new 1274 * addition. 1275 * 1276 * Returns a negative error number or the index of the filter with the new 1277 * MAC value. 1278 */ 1279 int t4vf_change_mac(struct adapter *adapter, unsigned int viid, 1280 int idx, const u8 *addr, bool persist) 1281 { 1282 int ret; 1283 struct fw_vi_mac_cmd cmd, rpl; 1284 struct fw_vi_mac_exact *p = &cmd.u.exact[0]; 1285 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 1286 u.exact[1]), 16); 1287 unsigned int max_mac_addr = adapter->params.arch.mps_tcam_size; 1288 1289 /* 1290 * If this is a new allocation, determine whether it should be 1291 * persistent (across a "freemacs" operation) or not. 1292 */ 1293 if (idx < 0) 1294 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; 1295 1296 memset(&cmd, 0, sizeof(cmd)); 1297 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | 1298 FW_CMD_REQUEST_F | 1299 FW_CMD_WRITE_F | 1300 FW_VI_MAC_CMD_VIID_V(viid)); 1301 cmd.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); 1302 p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F | 1303 FW_VI_MAC_CMD_IDX_V(idx)); 1304 memcpy(p->macaddr, addr, sizeof(p->macaddr)); 1305 1306 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); 1307 if (ret == 0) { 1308 p = &rpl.u.exact[0]; 1309 ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx)); 1310 if (ret >= max_mac_addr) 1311 ret = -ENOMEM; 1312 } 1313 return ret; 1314 } 1315 1316 /** 1317 * t4vf_set_addr_hash - program the MAC inexact-match hash filter 1318 * @adapter: the adapter 1319 * @viid: the Virtual Interface Identifier 1320 * @ucast: whether the hash filter should also match unicast addresses 1321 * @vec: the value to be written to the hash filter 1322 * @sleep_ok: call is allowed to sleep 1323 * 1324 * Sets the 64-bit inexact-match hash filter for a virtual interface. 1325 */ 1326 int t4vf_set_addr_hash(struct adapter *adapter, unsigned int viid, 1327 bool ucast, u64 vec, bool sleep_ok) 1328 { 1329 struct fw_vi_mac_cmd cmd; 1330 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 1331 u.exact[0]), 16); 1332 1333 memset(&cmd, 0, sizeof(cmd)); 1334 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | 1335 FW_CMD_REQUEST_F | 1336 FW_CMD_WRITE_F | 1337 FW_VI_ENABLE_CMD_VIID_V(viid)); 1338 cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F | 1339 FW_VI_MAC_CMD_HASHUNIEN_V(ucast) | 1340 FW_CMD_LEN16_V(len16)); 1341 cmd.u.hash.hashvec = cpu_to_be64(vec); 1342 return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok); 1343 } 1344 1345 /** 1346 * t4vf_get_port_stats - collect "port" statistics 1347 * @adapter: the adapter 1348 * @pidx: the port index 1349 * @s: the stats structure to fill 1350 * 1351 * Collect statistics for the "port"'s Virtual Interface. 1352 */ 1353 int t4vf_get_port_stats(struct adapter *adapter, int pidx, 1354 struct t4vf_port_stats *s) 1355 { 1356 struct port_info *pi = adap2pinfo(adapter, pidx); 1357 struct fw_vi_stats_vf fwstats; 1358 unsigned int rem = VI_VF_NUM_STATS; 1359 __be64 *fwsp = (__be64 *)&fwstats; 1360 1361 /* 1362 * Grab the Virtual Interface statistics a chunk at a time via mailbox 1363 * commands. We could use a Work Request and get all of them at once 1364 * but that's an asynchronous interface which is awkward to use. 1365 */ 1366 while (rem) { 1367 unsigned int ix = VI_VF_NUM_STATS - rem; 1368 unsigned int nstats = min(6U, rem); 1369 struct fw_vi_stats_cmd cmd, rpl; 1370 size_t len = (offsetof(struct fw_vi_stats_cmd, u) + 1371 sizeof(struct fw_vi_stats_ctl)); 1372 size_t len16 = DIV_ROUND_UP(len, 16); 1373 int ret; 1374 1375 memset(&cmd, 0, sizeof(cmd)); 1376 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_STATS_CMD) | 1377 FW_VI_STATS_CMD_VIID_V(pi->viid) | 1378 FW_CMD_REQUEST_F | 1379 FW_CMD_READ_F); 1380 cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); 1381 cmd.u.ctl.nstats_ix = 1382 cpu_to_be16(FW_VI_STATS_CMD_IX_V(ix) | 1383 FW_VI_STATS_CMD_NSTATS_V(nstats)); 1384 ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl); 1385 if (ret) 1386 return ret; 1387 1388 memcpy(fwsp, &rpl.u.ctl.stat0, sizeof(__be64) * nstats); 1389 1390 rem -= nstats; 1391 fwsp += nstats; 1392 } 1393 1394 /* 1395 * Translate firmware statistics into host native statistics. 1396 */ 1397 s->tx_bcast_bytes = be64_to_cpu(fwstats.tx_bcast_bytes); 1398 s->tx_bcast_frames = be64_to_cpu(fwstats.tx_bcast_frames); 1399 s->tx_mcast_bytes = be64_to_cpu(fwstats.tx_mcast_bytes); 1400 s->tx_mcast_frames = be64_to_cpu(fwstats.tx_mcast_frames); 1401 s->tx_ucast_bytes = be64_to_cpu(fwstats.tx_ucast_bytes); 1402 s->tx_ucast_frames = be64_to_cpu(fwstats.tx_ucast_frames); 1403 s->tx_drop_frames = be64_to_cpu(fwstats.tx_drop_frames); 1404 s->tx_offload_bytes = be64_to_cpu(fwstats.tx_offload_bytes); 1405 s->tx_offload_frames = be64_to_cpu(fwstats.tx_offload_frames); 1406 1407 s->rx_bcast_bytes = be64_to_cpu(fwstats.rx_bcast_bytes); 1408 s->rx_bcast_frames = be64_to_cpu(fwstats.rx_bcast_frames); 1409 s->rx_mcast_bytes = be64_to_cpu(fwstats.rx_mcast_bytes); 1410 s->rx_mcast_frames = be64_to_cpu(fwstats.rx_mcast_frames); 1411 s->rx_ucast_bytes = be64_to_cpu(fwstats.rx_ucast_bytes); 1412 s->rx_ucast_frames = be64_to_cpu(fwstats.rx_ucast_frames); 1413 1414 s->rx_err_frames = be64_to_cpu(fwstats.rx_err_frames); 1415 1416 return 0; 1417 } 1418 1419 /** 1420 * t4vf_iq_free - free an ingress queue and its free lists 1421 * @adapter: the adapter 1422 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) 1423 * @iqid: ingress queue ID 1424 * @fl0id: FL0 queue ID or 0xffff if no attached FL0 1425 * @fl1id: FL1 queue ID or 0xffff if no attached FL1 1426 * 1427 * Frees an ingress queue and its associated free lists, if any. 1428 */ 1429 int t4vf_iq_free(struct adapter *adapter, unsigned int iqtype, 1430 unsigned int iqid, unsigned int fl0id, unsigned int fl1id) 1431 { 1432 struct fw_iq_cmd cmd; 1433 1434 memset(&cmd, 0, sizeof(cmd)); 1435 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | 1436 FW_CMD_REQUEST_F | 1437 FW_CMD_EXEC_F); 1438 cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | 1439 FW_LEN16(cmd)); 1440 cmd.type_to_iqandstindex = 1441 cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype)); 1442 1443 cmd.iqid = cpu_to_be16(iqid); 1444 cmd.fl0id = cpu_to_be16(fl0id); 1445 cmd.fl1id = cpu_to_be16(fl1id); 1446 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 1447 } 1448 1449 /** 1450 * t4vf_eth_eq_free - free an Ethernet egress queue 1451 * @adapter: the adapter 1452 * @eqid: egress queue ID 1453 * 1454 * Frees an Ethernet egress queue. 1455 */ 1456 int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid) 1457 { 1458 struct fw_eq_eth_cmd cmd; 1459 1460 memset(&cmd, 0, sizeof(cmd)); 1461 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) | 1462 FW_CMD_REQUEST_F | 1463 FW_CMD_EXEC_F); 1464 cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | 1465 FW_LEN16(cmd)); 1466 cmd.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid)); 1467 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 1468 } 1469 1470 /** 1471 * t4vf_handle_fw_rpl - process a firmware reply message 1472 * @adapter: the adapter 1473 * @rpl: start of the firmware message 1474 * 1475 * Processes a firmware message, such as link state change messages. 1476 */ 1477 int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) 1478 { 1479 const struct fw_cmd_hdr *cmd_hdr = (const struct fw_cmd_hdr *)rpl; 1480 u8 opcode = FW_CMD_OP_G(be32_to_cpu(cmd_hdr->hi)); 1481 1482 switch (opcode) { 1483 case FW_PORT_CMD: { 1484 /* 1485 * Link/module state change message. 1486 */ 1487 const struct fw_port_cmd *port_cmd = 1488 (const struct fw_port_cmd *)rpl; 1489 u32 stat, mod; 1490 int action, port_id, link_ok, speed, fc, pidx; 1491 1492 /* 1493 * Extract various fields from port status change message. 1494 */ 1495 action = FW_PORT_CMD_ACTION_G( 1496 be32_to_cpu(port_cmd->action_to_len16)); 1497 if (action != FW_PORT_ACTION_GET_PORT_INFO) { 1498 dev_err(adapter->pdev_dev, 1499 "Unknown firmware PORT reply action %x\n", 1500 action); 1501 break; 1502 } 1503 1504 port_id = FW_PORT_CMD_PORTID_G( 1505 be32_to_cpu(port_cmd->op_to_portid)); 1506 1507 stat = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype); 1508 link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0; 1509 speed = 0; 1510 fc = 0; 1511 if (stat & FW_PORT_CMD_RXPAUSE_F) 1512 fc |= PAUSE_RX; 1513 if (stat & FW_PORT_CMD_TXPAUSE_F) 1514 fc |= PAUSE_TX; 1515 if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) 1516 speed = 100; 1517 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) 1518 speed = 1000; 1519 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) 1520 speed = 10000; 1521 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) 1522 speed = 40000; 1523 1524 /* 1525 * Scan all of our "ports" (Virtual Interfaces) looking for 1526 * those bound to the physical port which has changed. If 1527 * our recorded state doesn't match the current state, 1528 * signal that change to the OS code. 1529 */ 1530 for_each_port(adapter, pidx) { 1531 struct port_info *pi = adap2pinfo(adapter, pidx); 1532 struct link_config *lc; 1533 1534 if (pi->port_id != port_id) 1535 continue; 1536 1537 lc = &pi->link_cfg; 1538 1539 mod = FW_PORT_CMD_MODTYPE_G(stat); 1540 if (mod != pi->mod_type) { 1541 pi->mod_type = mod; 1542 t4vf_os_portmod_changed(adapter, pidx); 1543 } 1544 1545 if (link_ok != lc->link_ok || speed != lc->speed || 1546 fc != lc->fc) { 1547 /* something changed */ 1548 lc->link_ok = link_ok; 1549 lc->speed = speed; 1550 lc->fc = fc; 1551 lc->supported = 1552 be16_to_cpu(port_cmd->u.info.pcap); 1553 t4vf_os_link_changed(adapter, pidx, link_ok); 1554 } 1555 } 1556 break; 1557 } 1558 1559 default: 1560 dev_err(adapter->pdev_dev, "Unknown firmware reply %X\n", 1561 opcode); 1562 } 1563 return 0; 1564 } 1565 1566 /** 1567 */ 1568 int t4vf_prep_adapter(struct adapter *adapter) 1569 { 1570 int err; 1571 unsigned int chipid; 1572 1573 /* Wait for the device to become ready before proceeding ... 1574 */ 1575 err = t4vf_wait_dev_ready(adapter); 1576 if (err) 1577 return err; 1578 1579 /* Default port and clock for debugging in case we can't reach 1580 * firmware. 1581 */ 1582 adapter->params.nports = 1; 1583 adapter->params.vfres.pmask = 1; 1584 adapter->params.vpd.cclk = 50000; 1585 1586 adapter->params.chip = 0; 1587 switch (CHELSIO_PCI_ID_VER(adapter->pdev->device)) { 1588 case CHELSIO_T4: 1589 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, 0); 1590 adapter->params.arch.sge_fl_db = DBPRIO_F; 1591 adapter->params.arch.mps_tcam_size = 1592 NUM_MPS_CLS_SRAM_L_INSTANCES; 1593 break; 1594 1595 case CHELSIO_T5: 1596 chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A)); 1597 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid); 1598 adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F; 1599 adapter->params.arch.mps_tcam_size = 1600 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 1601 break; 1602 1603 case CHELSIO_T6: 1604 chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A)); 1605 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, chipid); 1606 adapter->params.arch.sge_fl_db = 0; 1607 adapter->params.arch.mps_tcam_size = 1608 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 1609 break; 1610 } 1611 1612 return 0; 1613 } 1614