1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/types.h> 34 #include <asm/byteorder.h> 35 #include <asm/param.h> 36 #include <linux/delay.h> 37 #include <linux/dma-mapping.h> 38 #include <linux/etherdevice.h> 39 #include <linux/interrupt.h> 40 #include <linux/kernel.h> 41 #include <linux/module.h> 42 #include <linux/pci.h> 43 #include <linux/slab.h> 44 #include <linux/stddef.h> 45 #include <linux/string.h> 46 #include <linux/workqueue.h> 47 #include <linux/bitops.h> 48 #include <linux/bug.h> 49 #include <linux/vmalloc.h> 50 #include "qed.h" 51 #include <linux/qed/qed_chain.h> 52 #include "qed_cxt.h" 53 #include "qed_dev_api.h" 54 #include <linux/qed/qed_eth_if.h> 55 #include "qed_hsi.h" 56 #include "qed_hw.h" 57 #include "qed_int.h" 58 #include "qed_l2.h" 59 #include "qed_mcp.h" 60 #include "qed_reg_addr.h" 61 #include "qed_sp.h" 62 #include "qed_sriov.h" 63 64 65 #define QED_MAX_SGES_NUM 16 66 #define CRC32_POLY 0x1edc6f41 67 68 struct qed_l2_info { 69 u32 queues; 70 unsigned long **pp_qid_usage; 71 72 /* The lock is meant to synchronize access to the qid usage */ 73 struct mutex lock; 74 }; 75 76 int qed_l2_alloc(struct qed_hwfn *p_hwfn) 77 { 78 struct qed_l2_info *p_l2_info; 79 unsigned long **pp_qids; 80 u32 i; 81 82 if (p_hwfn->hw_info.personality != QED_PCI_ETH && 83 p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE) 84 return 0; 85 86 p_l2_info = kzalloc(sizeof(*p_l2_info), GFP_KERNEL); 87 if (!p_l2_info) 88 return -ENOMEM; 89 p_hwfn->p_l2_info = p_l2_info; 90 91 if (IS_PF(p_hwfn->cdev)) { 92 p_l2_info->queues = RESC_NUM(p_hwfn, QED_L2_QUEUE); 93 } else { 94 u8 rx = 0, tx = 0; 95 96 qed_vf_get_num_rxqs(p_hwfn, &rx); 97 qed_vf_get_num_txqs(p_hwfn, &tx); 98 99 p_l2_info->queues = max_t(u8, rx, tx); 100 } 101 102 pp_qids = kzalloc(sizeof(unsigned long *) * p_l2_info->queues, 103 GFP_KERNEL); 104 if (!pp_qids) 105 return -ENOMEM; 106 p_l2_info->pp_qid_usage = pp_qids; 107 108 for (i = 0; i < p_l2_info->queues; i++) { 109 pp_qids[i] = kzalloc(MAX_QUEUES_PER_QZONE / 8, GFP_KERNEL); 110 if (!pp_qids[i]) 111 return -ENOMEM; 112 } 113 114 return 0; 115 } 116 117 void qed_l2_setup(struct qed_hwfn *p_hwfn) 118 { 119 if (p_hwfn->hw_info.personality != QED_PCI_ETH && 120 p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE) 121 return; 122 123 mutex_init(&p_hwfn->p_l2_info->lock); 124 } 125 126 void qed_l2_free(struct qed_hwfn *p_hwfn) 127 { 128 u32 i; 129 130 if (p_hwfn->hw_info.personality != QED_PCI_ETH && 131 p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE) 132 return; 133 134 if (!p_hwfn->p_l2_info) 135 return; 136 137 if (!p_hwfn->p_l2_info->pp_qid_usage) 138 goto out_l2_info; 139 140 /* Free until hit first uninitialized entry */ 141 for (i = 0; i < p_hwfn->p_l2_info->queues; i++) { 142 if (!p_hwfn->p_l2_info->pp_qid_usage[i]) 143 break; 144 kfree(p_hwfn->p_l2_info->pp_qid_usage[i]); 145 } 146 147 kfree(p_hwfn->p_l2_info->pp_qid_usage); 148 149 out_l2_info: 150 kfree(p_hwfn->p_l2_info); 151 p_hwfn->p_l2_info = NULL; 152 } 153 154 static bool qed_eth_queue_qid_usage_add(struct qed_hwfn *p_hwfn, 155 struct qed_queue_cid *p_cid) 156 { 157 struct qed_l2_info *p_l2_info = p_hwfn->p_l2_info; 158 u16 queue_id = p_cid->rel.queue_id; 159 bool b_rc = true; 160 u8 first; 161 162 mutex_lock(&p_l2_info->lock); 163 164 if (queue_id >= p_l2_info->queues) { 165 DP_NOTICE(p_hwfn, 166 "Requested to increase usage for qzone %04x out of %08x\n", 167 queue_id, p_l2_info->queues); 168 b_rc = false; 169 goto out; 170 } 171 172 first = (u8)find_first_zero_bit(p_l2_info->pp_qid_usage[queue_id], 173 MAX_QUEUES_PER_QZONE); 174 if (first >= MAX_QUEUES_PER_QZONE) { 175 b_rc = false; 176 goto out; 177 } 178 179 __set_bit(first, p_l2_info->pp_qid_usage[queue_id]); 180 p_cid->qid_usage_idx = first; 181 182 out: 183 mutex_unlock(&p_l2_info->lock); 184 return b_rc; 185 } 186 187 static void qed_eth_queue_qid_usage_del(struct qed_hwfn *p_hwfn, 188 struct qed_queue_cid *p_cid) 189 { 190 mutex_lock(&p_hwfn->p_l2_info->lock); 191 192 clear_bit(p_cid->qid_usage_idx, 193 p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]); 194 195 mutex_unlock(&p_hwfn->p_l2_info->lock); 196 } 197 198 void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn, 199 struct qed_queue_cid *p_cid) 200 { 201 bool b_legacy_vf = !!(p_cid->vf_legacy & QED_QCID_LEGACY_VF_CID); 202 203 if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) 204 _qed_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid); 205 206 /* For PF's VFs we maintain the index inside queue-zone in IOV */ 207 if (p_cid->vfid == QED_QUEUE_CID_SELF) 208 qed_eth_queue_qid_usage_del(p_hwfn, p_cid); 209 210 vfree(p_cid); 211 } 212 213 /* The internal is only meant to be directly called by PFs initializeing CIDs 214 * for their VFs. 215 */ 216 static struct qed_queue_cid * 217 _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, 218 u16 opaque_fid, 219 u32 cid, 220 struct qed_queue_start_common_params *p_params, 221 bool b_is_rx, 222 struct qed_queue_cid_vf_params *p_vf_params) 223 { 224 struct qed_queue_cid *p_cid; 225 int rc; 226 227 p_cid = vmalloc(sizeof(*p_cid)); 228 if (!p_cid) 229 return NULL; 230 memset(p_cid, 0, sizeof(*p_cid)); 231 232 p_cid->opaque_fid = opaque_fid; 233 p_cid->cid = cid; 234 p_cid->p_owner = p_hwfn; 235 236 /* Fill in parameters */ 237 p_cid->rel.vport_id = p_params->vport_id; 238 p_cid->rel.queue_id = p_params->queue_id; 239 p_cid->rel.stats_id = p_params->stats_id; 240 p_cid->sb_igu_id = p_params->p_sb->igu_sb_id; 241 p_cid->b_is_rx = b_is_rx; 242 p_cid->sb_idx = p_params->sb_idx; 243 244 /* Fill-in bits related to VFs' queues if information was provided */ 245 if (p_vf_params) { 246 p_cid->vfid = p_vf_params->vfid; 247 p_cid->vf_qid = p_vf_params->vf_qid; 248 p_cid->vf_legacy = p_vf_params->vf_legacy; 249 } else { 250 p_cid->vfid = QED_QUEUE_CID_SELF; 251 } 252 253 /* Don't try calculating the absolute indices for VFs */ 254 if (IS_VF(p_hwfn->cdev)) { 255 p_cid->abs = p_cid->rel; 256 goto out; 257 } 258 259 /* Calculate the engine-absolute indices of the resources. 260 * This would guarantee they're valid later on. 261 * In some cases [SBs] we already have the right values. 262 */ 263 rc = qed_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id); 264 if (rc) 265 goto fail; 266 267 rc = qed_fw_l2_queue(p_hwfn, p_cid->rel.queue_id, &p_cid->abs.queue_id); 268 if (rc) 269 goto fail; 270 271 /* In case of a PF configuring its VF's queues, the stats-id is already 272 * absolute [since there's a single index that's suitable per-VF]. 273 */ 274 if (p_cid->vfid == QED_QUEUE_CID_SELF) { 275 rc = qed_fw_vport(p_hwfn, p_cid->rel.stats_id, 276 &p_cid->abs.stats_id); 277 if (rc) 278 goto fail; 279 } else { 280 p_cid->abs.stats_id = p_cid->rel.stats_id; 281 } 282 283 out: 284 /* VF-images have provided the qid_usage_idx on their own. 285 * Otherwise, we need to allocate a unique one. 286 */ 287 if (!p_vf_params) { 288 if (!qed_eth_queue_qid_usage_add(p_hwfn, p_cid)) 289 goto fail; 290 } else { 291 p_cid->qid_usage_idx = p_vf_params->qid_usage_idx; 292 } 293 294 DP_VERBOSE(p_hwfn, 295 QED_MSG_SP, 296 "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n", 297 p_cid->opaque_fid, 298 p_cid->cid, 299 p_cid->rel.vport_id, 300 p_cid->abs.vport_id, 301 p_cid->rel.queue_id, 302 p_cid->qid_usage_idx, 303 p_cid->abs.queue_id, 304 p_cid->rel.stats_id, 305 p_cid->abs.stats_id, p_cid->sb_igu_id, p_cid->sb_idx); 306 307 return p_cid; 308 309 fail: 310 vfree(p_cid); 311 return NULL; 312 } 313 314 struct qed_queue_cid * 315 qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, 316 u16 opaque_fid, 317 struct qed_queue_start_common_params *p_params, 318 bool b_is_rx, 319 struct qed_queue_cid_vf_params *p_vf_params) 320 { 321 struct qed_queue_cid *p_cid; 322 u8 vfid = QED_CXT_PF_CID; 323 bool b_legacy_vf = false; 324 u32 cid = 0; 325 326 /* In case of legacy VFs, The CID can be derived from the additional 327 * VF parameters - the VF assumes queue X uses CID X, so we can simply 328 * use the vf_qid for this purpose as well. 329 */ 330 if (p_vf_params) { 331 vfid = p_vf_params->vfid; 332 333 if (p_vf_params->vf_legacy & QED_QCID_LEGACY_VF_CID) { 334 b_legacy_vf = true; 335 cid = p_vf_params->vf_qid; 336 } 337 } 338 339 /* Get a unique firmware CID for this queue, in case it's a PF. 340 * VF's don't need a CID as the queue configuration will be done 341 * by PF. 342 */ 343 if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) { 344 if (_qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, 345 &cid, vfid)) { 346 DP_NOTICE(p_hwfn, "Failed to acquire cid\n"); 347 return NULL; 348 } 349 } 350 351 p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid, 352 p_params, b_is_rx, p_vf_params); 353 if (!p_cid && IS_PF(p_hwfn->cdev) && !b_legacy_vf) 354 _qed_cxt_release_cid(p_hwfn, cid, vfid); 355 356 return p_cid; 357 } 358 359 static struct qed_queue_cid * 360 qed_eth_queue_to_cid_pf(struct qed_hwfn *p_hwfn, 361 u16 opaque_fid, 362 bool b_is_rx, 363 struct qed_queue_start_common_params *p_params) 364 { 365 return qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx, 366 NULL); 367 } 368 369 int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, 370 struct qed_sp_vport_start_params *p_params) 371 { 372 struct vport_start_ramrod_data *p_ramrod = NULL; 373 struct qed_spq_entry *p_ent = NULL; 374 struct qed_sp_init_data init_data; 375 u8 abs_vport_id = 0; 376 int rc = -EINVAL; 377 u16 rx_mode = 0; 378 379 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); 380 if (rc) 381 return rc; 382 383 memset(&init_data, 0, sizeof(init_data)); 384 init_data.cid = qed_spq_get_cid(p_hwfn); 385 init_data.opaque_fid = p_params->opaque_fid; 386 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 387 388 rc = qed_sp_init_request(p_hwfn, &p_ent, 389 ETH_RAMROD_VPORT_START, 390 PROTOCOLID_ETH, &init_data); 391 if (rc) 392 return rc; 393 394 p_ramrod = &p_ent->ramrod.vport_start; 395 p_ramrod->vport_id = abs_vport_id; 396 397 p_ramrod->mtu = cpu_to_le16(p_params->mtu); 398 p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts; 399 p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan; 400 p_ramrod->drop_ttl0_en = p_params->drop_ttl0; 401 p_ramrod->untagged = p_params->only_untagged; 402 403 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1); 404 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1); 405 406 p_ramrod->rx_mode.state = cpu_to_le16(rx_mode); 407 408 /* TPA related fields */ 409 memset(&p_ramrod->tpa_param, 0, sizeof(struct eth_vport_tpa_param)); 410 411 p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe; 412 413 switch (p_params->tpa_mode) { 414 case QED_TPA_MODE_GRO: 415 p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; 416 p_ramrod->tpa_param.tpa_max_size = (u16)-1; 417 p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2; 418 p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2; 419 p_ramrod->tpa_param.tpa_ipv4_en_flg = 1; 420 p_ramrod->tpa_param.tpa_ipv6_en_flg = 1; 421 p_ramrod->tpa_param.tpa_pkt_split_flg = 1; 422 p_ramrod->tpa_param.tpa_gro_consistent_flg = 1; 423 break; 424 default: 425 break; 426 } 427 428 p_ramrod->tx_switching_en = p_params->tx_switching; 429 430 p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac; 431 p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype; 432 433 /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */ 434 p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev, 435 p_params->concrete_fid); 436 437 return qed_spq_post(p_hwfn, p_ent, NULL); 438 } 439 440 static int qed_sp_vport_start(struct qed_hwfn *p_hwfn, 441 struct qed_sp_vport_start_params *p_params) 442 { 443 if (IS_VF(p_hwfn->cdev)) { 444 return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id, 445 p_params->mtu, 446 p_params->remove_inner_vlan, 447 p_params->tpa_mode, 448 p_params->max_buffers_per_cqe, 449 p_params->only_untagged); 450 } 451 452 return qed_sp_eth_vport_start(p_hwfn, p_params); 453 } 454 455 static int 456 qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn, 457 struct vport_update_ramrod_data *p_ramrod, 458 struct qed_rss_params *p_rss) 459 { 460 struct eth_vport_rss_config *p_config; 461 u16 capabilities = 0; 462 int i, table_size; 463 int rc = 0; 464 465 if (!p_rss) { 466 p_ramrod->common.update_rss_flg = 0; 467 return rc; 468 } 469 p_config = &p_ramrod->rss_config; 470 471 BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE != ETH_RSS_IND_TABLE_ENTRIES_NUM); 472 473 rc = qed_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id); 474 if (rc) 475 return rc; 476 477 p_ramrod->common.update_rss_flg = p_rss->update_rss_config; 478 p_config->update_rss_capabilities = p_rss->update_rss_capabilities; 479 p_config->update_rss_ind_table = p_rss->update_rss_ind_table; 480 p_config->update_rss_key = p_rss->update_rss_key; 481 482 p_config->rss_mode = p_rss->rss_enable ? 483 ETH_VPORT_RSS_MODE_REGULAR : 484 ETH_VPORT_RSS_MODE_DISABLED; 485 486 SET_FIELD(capabilities, 487 ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY, 488 !!(p_rss->rss_caps & QED_RSS_IPV4)); 489 SET_FIELD(capabilities, 490 ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY, 491 !!(p_rss->rss_caps & QED_RSS_IPV6)); 492 SET_FIELD(capabilities, 493 ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY, 494 !!(p_rss->rss_caps & QED_RSS_IPV4_TCP)); 495 SET_FIELD(capabilities, 496 ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY, 497 !!(p_rss->rss_caps & QED_RSS_IPV6_TCP)); 498 SET_FIELD(capabilities, 499 ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY, 500 !!(p_rss->rss_caps & QED_RSS_IPV4_UDP)); 501 SET_FIELD(capabilities, 502 ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY, 503 !!(p_rss->rss_caps & QED_RSS_IPV6_UDP)); 504 p_config->tbl_size = p_rss->rss_table_size_log; 505 506 p_config->capabilities = cpu_to_le16(capabilities); 507 508 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, 509 "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n", 510 p_ramrod->common.update_rss_flg, 511 p_config->rss_mode, 512 p_config->update_rss_capabilities, 513 p_config->capabilities, 514 p_config->update_rss_ind_table, p_config->update_rss_key); 515 516 table_size = min_t(int, QED_RSS_IND_TABLE_SIZE, 517 1 << p_config->tbl_size); 518 for (i = 0; i < table_size; i++) { 519 struct qed_queue_cid *p_queue = p_rss->rss_ind_table[i]; 520 521 if (!p_queue) 522 return -EINVAL; 523 524 p_config->indirection_table[i] = 525 cpu_to_le16(p_queue->abs.queue_id); 526 } 527 528 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, 529 "Configured RSS indirection table [%d entries]:\n", 530 table_size); 531 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i += 0x10) { 532 DP_VERBOSE(p_hwfn, 533 NETIF_MSG_IFUP, 534 "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n", 535 le16_to_cpu(p_config->indirection_table[i]), 536 le16_to_cpu(p_config->indirection_table[i + 1]), 537 le16_to_cpu(p_config->indirection_table[i + 2]), 538 le16_to_cpu(p_config->indirection_table[i + 3]), 539 le16_to_cpu(p_config->indirection_table[i + 4]), 540 le16_to_cpu(p_config->indirection_table[i + 5]), 541 le16_to_cpu(p_config->indirection_table[i + 6]), 542 le16_to_cpu(p_config->indirection_table[i + 7]), 543 le16_to_cpu(p_config->indirection_table[i + 8]), 544 le16_to_cpu(p_config->indirection_table[i + 9]), 545 le16_to_cpu(p_config->indirection_table[i + 10]), 546 le16_to_cpu(p_config->indirection_table[i + 11]), 547 le16_to_cpu(p_config->indirection_table[i + 12]), 548 le16_to_cpu(p_config->indirection_table[i + 13]), 549 le16_to_cpu(p_config->indirection_table[i + 14]), 550 le16_to_cpu(p_config->indirection_table[i + 15])); 551 } 552 553 for (i = 0; i < 10; i++) 554 p_config->rss_key[i] = cpu_to_le32(p_rss->rss_key[i]); 555 556 return rc; 557 } 558 559 static void 560 qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn, 561 struct vport_update_ramrod_data *p_ramrod, 562 struct qed_filter_accept_flags accept_flags) 563 { 564 p_ramrod->common.update_rx_mode_flg = 565 accept_flags.update_rx_mode_config; 566 567 p_ramrod->common.update_tx_mode_flg = 568 accept_flags.update_tx_mode_config; 569 570 /* Set Rx mode accept flags */ 571 if (p_ramrod->common.update_rx_mode_flg) { 572 u8 accept_filter = accept_flags.rx_accept_filter; 573 u16 state = 0; 574 575 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 576 !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) || 577 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED))); 578 579 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED, 580 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)); 581 582 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 583 !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) || 584 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); 585 586 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL, 587 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) && 588 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); 589 590 SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL, 591 !!(accept_filter & QED_ACCEPT_BCAST)); 592 593 p_ramrod->rx_mode.state = cpu_to_le16(state); 594 DP_VERBOSE(p_hwfn, QED_MSG_SP, 595 "p_ramrod->rx_mode.state = 0x%x\n", state); 596 } 597 598 /* Set Tx mode accept flags */ 599 if (p_ramrod->common.update_tx_mode_flg) { 600 u8 accept_filter = accept_flags.tx_accept_filter; 601 u16 state = 0; 602 603 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL, 604 !!(accept_filter & QED_ACCEPT_NONE)); 605 606 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL, 607 !!(accept_filter & QED_ACCEPT_NONE)); 608 609 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL, 610 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) && 611 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); 612 613 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL, 614 !!(accept_filter & QED_ACCEPT_BCAST)); 615 616 p_ramrod->tx_mode.state = cpu_to_le16(state); 617 DP_VERBOSE(p_hwfn, QED_MSG_SP, 618 "p_ramrod->tx_mode.state = 0x%x\n", state); 619 } 620 } 621 622 static void 623 qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn, 624 struct vport_update_ramrod_data *p_ramrod, 625 struct qed_sge_tpa_params *p_params) 626 { 627 struct eth_vport_tpa_param *p_tpa; 628 629 if (!p_params) { 630 p_ramrod->common.update_tpa_param_flg = 0; 631 p_ramrod->common.update_tpa_en_flg = 0; 632 p_ramrod->common.update_tpa_param_flg = 0; 633 return; 634 } 635 636 p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg; 637 p_tpa = &p_ramrod->tpa_param; 638 p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg; 639 p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg; 640 p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg; 641 p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg; 642 643 p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg; 644 p_tpa->max_buff_num = p_params->max_buffers_per_cqe; 645 p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg; 646 p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg; 647 p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg; 648 p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num; 649 p_tpa->tpa_max_size = p_params->tpa_max_size; 650 p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start; 651 p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont; 652 } 653 654 static void 655 qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn, 656 struct vport_update_ramrod_data *p_ramrod, 657 struct qed_sp_vport_update_params *p_params) 658 { 659 int i; 660 661 memset(&p_ramrod->approx_mcast.bins, 0, 662 sizeof(p_ramrod->approx_mcast.bins)); 663 664 if (!p_params->update_approx_mcast_flg) 665 return; 666 667 p_ramrod->common.update_approx_mcast_flg = 1; 668 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { 669 u32 *p_bins = (u32 *)p_params->bins; 670 671 p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]); 672 } 673 } 674 675 int qed_sp_vport_update(struct qed_hwfn *p_hwfn, 676 struct qed_sp_vport_update_params *p_params, 677 enum spq_mode comp_mode, 678 struct qed_spq_comp_cb *p_comp_data) 679 { 680 struct qed_rss_params *p_rss_params = p_params->rss_params; 681 struct vport_update_ramrod_data_cmn *p_cmn; 682 struct qed_sp_init_data init_data; 683 struct vport_update_ramrod_data *p_ramrod = NULL; 684 struct qed_spq_entry *p_ent = NULL; 685 u8 abs_vport_id = 0, val; 686 int rc = -EINVAL; 687 688 if (IS_VF(p_hwfn->cdev)) { 689 rc = qed_vf_pf_vport_update(p_hwfn, p_params); 690 return rc; 691 } 692 693 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); 694 if (rc) 695 return rc; 696 697 memset(&init_data, 0, sizeof(init_data)); 698 init_data.cid = qed_spq_get_cid(p_hwfn); 699 init_data.opaque_fid = p_params->opaque_fid; 700 init_data.comp_mode = comp_mode; 701 init_data.p_comp_data = p_comp_data; 702 703 rc = qed_sp_init_request(p_hwfn, &p_ent, 704 ETH_RAMROD_VPORT_UPDATE, 705 PROTOCOLID_ETH, &init_data); 706 if (rc) 707 return rc; 708 709 /* Copy input params to ramrod according to FW struct */ 710 p_ramrod = &p_ent->ramrod.vport_update; 711 p_cmn = &p_ramrod->common; 712 713 p_cmn->vport_id = abs_vport_id; 714 p_cmn->rx_active_flg = p_params->vport_active_rx_flg; 715 p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg; 716 p_cmn->tx_active_flg = p_params->vport_active_tx_flg; 717 p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg; 718 p_cmn->accept_any_vlan = p_params->accept_any_vlan; 719 val = p_params->update_accept_any_vlan_flg; 720 p_cmn->update_accept_any_vlan_flg = val; 721 722 p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg; 723 val = p_params->update_inner_vlan_removal_flg; 724 p_cmn->update_inner_vlan_removal_en_flg = val; 725 726 p_cmn->default_vlan_en = p_params->default_vlan_enable_flg; 727 val = p_params->update_default_vlan_enable_flg; 728 p_cmn->update_default_vlan_en_flg = val; 729 730 p_cmn->default_vlan = cpu_to_le16(p_params->default_vlan); 731 p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg; 732 733 p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg; 734 735 p_ramrod->common.tx_switching_en = p_params->tx_switching_flg; 736 p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg; 737 738 p_cmn->anti_spoofing_en = p_params->anti_spoofing_en; 739 val = p_params->update_anti_spoofing_en_flg; 740 p_ramrod->common.update_anti_spoofing_en_flg = val; 741 742 rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params); 743 if (rc) { 744 /* Return spq entry which is taken in qed_sp_init_request()*/ 745 qed_spq_return_entry(p_hwfn, p_ent); 746 return rc; 747 } 748 749 /* Update mcast bins for VFs, PF doesn't use this functionality */ 750 qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params); 751 752 qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags); 753 qed_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, p_params->sge_tpa_params); 754 return qed_spq_post(p_hwfn, p_ent, NULL); 755 } 756 757 int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id) 758 { 759 struct vport_stop_ramrod_data *p_ramrod; 760 struct qed_sp_init_data init_data; 761 struct qed_spq_entry *p_ent; 762 u8 abs_vport_id = 0; 763 int rc; 764 765 if (IS_VF(p_hwfn->cdev)) 766 return qed_vf_pf_vport_stop(p_hwfn); 767 768 rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id); 769 if (rc) 770 return rc; 771 772 memset(&init_data, 0, sizeof(init_data)); 773 init_data.cid = qed_spq_get_cid(p_hwfn); 774 init_data.opaque_fid = opaque_fid; 775 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 776 777 rc = qed_sp_init_request(p_hwfn, &p_ent, 778 ETH_RAMROD_VPORT_STOP, 779 PROTOCOLID_ETH, &init_data); 780 if (rc) 781 return rc; 782 783 p_ramrod = &p_ent->ramrod.vport_stop; 784 p_ramrod->vport_id = abs_vport_id; 785 786 return qed_spq_post(p_hwfn, p_ent, NULL); 787 } 788 789 static int 790 qed_vf_pf_accept_flags(struct qed_hwfn *p_hwfn, 791 struct qed_filter_accept_flags *p_accept_flags) 792 { 793 struct qed_sp_vport_update_params s_params; 794 795 memset(&s_params, 0, sizeof(s_params)); 796 memcpy(&s_params.accept_flags, p_accept_flags, 797 sizeof(struct qed_filter_accept_flags)); 798 799 return qed_vf_pf_vport_update(p_hwfn, &s_params); 800 } 801 802 static int qed_filter_accept_cmd(struct qed_dev *cdev, 803 u8 vport, 804 struct qed_filter_accept_flags accept_flags, 805 u8 update_accept_any_vlan, 806 u8 accept_any_vlan, 807 enum spq_mode comp_mode, 808 struct qed_spq_comp_cb *p_comp_data) 809 { 810 struct qed_sp_vport_update_params vport_update_params; 811 int i, rc; 812 813 /* Prepare and send the vport rx_mode change */ 814 memset(&vport_update_params, 0, sizeof(vport_update_params)); 815 vport_update_params.vport_id = vport; 816 vport_update_params.accept_flags = accept_flags; 817 vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan; 818 vport_update_params.accept_any_vlan = accept_any_vlan; 819 820 for_each_hwfn(cdev, i) { 821 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 822 823 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 824 825 if (IS_VF(cdev)) { 826 rc = qed_vf_pf_accept_flags(p_hwfn, &accept_flags); 827 if (rc) 828 return rc; 829 continue; 830 } 831 832 rc = qed_sp_vport_update(p_hwfn, &vport_update_params, 833 comp_mode, p_comp_data); 834 if (rc) { 835 DP_ERR(cdev, "Update rx_mode failed %d\n", rc); 836 return rc; 837 } 838 839 DP_VERBOSE(p_hwfn, QED_MSG_SP, 840 "Accept filter configured, flags = [Rx]%x [Tx]%x\n", 841 accept_flags.rx_accept_filter, 842 accept_flags.tx_accept_filter); 843 if (update_accept_any_vlan) 844 DP_VERBOSE(p_hwfn, QED_MSG_SP, 845 "accept_any_vlan=%d configured\n", 846 accept_any_vlan); 847 } 848 849 return 0; 850 } 851 852 int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, 853 struct qed_queue_cid *p_cid, 854 u16 bd_max_bytes, 855 dma_addr_t bd_chain_phys_addr, 856 dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size) 857 { 858 struct rx_queue_start_ramrod_data *p_ramrod = NULL; 859 struct qed_spq_entry *p_ent = NULL; 860 struct qed_sp_init_data init_data; 861 int rc = -EINVAL; 862 863 DP_VERBOSE(p_hwfn, QED_MSG_SP, 864 "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n", 865 p_cid->opaque_fid, p_cid->cid, 866 p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->sb_igu_id); 867 868 /* Get SPQ entry */ 869 memset(&init_data, 0, sizeof(init_data)); 870 init_data.cid = p_cid->cid; 871 init_data.opaque_fid = p_cid->opaque_fid; 872 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 873 874 rc = qed_sp_init_request(p_hwfn, &p_ent, 875 ETH_RAMROD_RX_QUEUE_START, 876 PROTOCOLID_ETH, &init_data); 877 if (rc) 878 return rc; 879 880 p_ramrod = &p_ent->ramrod.rx_queue_start; 881 882 p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id); 883 p_ramrod->sb_index = p_cid->sb_idx; 884 p_ramrod->vport_id = p_cid->abs.vport_id; 885 p_ramrod->stats_counter_id = p_cid->abs.stats_id; 886 p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id); 887 p_ramrod->complete_cqe_flg = 0; 888 p_ramrod->complete_event_flg = 1; 889 890 p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes); 891 DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr); 892 893 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); 894 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr); 895 896 if (p_cid->vfid != QED_QUEUE_CID_SELF) { 897 bool b_legacy_vf = !!(p_cid->vf_legacy & 898 QED_QCID_LEGACY_VF_RX_PROD); 899 900 p_ramrod->vf_rx_prod_index = p_cid->vf_qid; 901 DP_VERBOSE(p_hwfn, QED_MSG_SP, 902 "Queue%s is meant for VF rxq[%02x]\n", 903 b_legacy_vf ? " [legacy]" : "", p_cid->vf_qid); 904 p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf; 905 } 906 907 return qed_spq_post(p_hwfn, p_ent, NULL); 908 } 909 910 static int 911 qed_eth_pf_rx_queue_start(struct qed_hwfn *p_hwfn, 912 struct qed_queue_cid *p_cid, 913 u16 bd_max_bytes, 914 dma_addr_t bd_chain_phys_addr, 915 dma_addr_t cqe_pbl_addr, 916 u16 cqe_pbl_size, void __iomem **pp_prod) 917 { 918 u32 init_prod_val = 0; 919 920 *pp_prod = p_hwfn->regview + 921 GTT_BAR0_MAP_REG_MSDM_RAM + 922 MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id); 923 924 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ 925 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), 926 (u32 *)(&init_prod_val)); 927 928 return qed_eth_rxq_start_ramrod(p_hwfn, p_cid, 929 bd_max_bytes, 930 bd_chain_phys_addr, 931 cqe_pbl_addr, cqe_pbl_size); 932 } 933 934 static int 935 qed_eth_rx_queue_start(struct qed_hwfn *p_hwfn, 936 u16 opaque_fid, 937 struct qed_queue_start_common_params *p_params, 938 u16 bd_max_bytes, 939 dma_addr_t bd_chain_phys_addr, 940 dma_addr_t cqe_pbl_addr, 941 u16 cqe_pbl_size, 942 struct qed_rxq_start_ret_params *p_ret_params) 943 { 944 struct qed_queue_cid *p_cid; 945 int rc; 946 947 /* Allocate a CID for the queue */ 948 p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, true, p_params); 949 if (!p_cid) 950 return -ENOMEM; 951 952 if (IS_PF(p_hwfn->cdev)) { 953 rc = qed_eth_pf_rx_queue_start(p_hwfn, p_cid, 954 bd_max_bytes, 955 bd_chain_phys_addr, 956 cqe_pbl_addr, cqe_pbl_size, 957 &p_ret_params->p_prod); 958 } else { 959 rc = qed_vf_pf_rxq_start(p_hwfn, p_cid, 960 bd_max_bytes, 961 bd_chain_phys_addr, 962 cqe_pbl_addr, 963 cqe_pbl_size, &p_ret_params->p_prod); 964 } 965 966 /* Provide the caller with a reference to as handler */ 967 if (rc) 968 qed_eth_queue_cid_release(p_hwfn, p_cid); 969 else 970 p_ret_params->p_handle = (void *)p_cid; 971 972 return rc; 973 } 974 975 int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, 976 void **pp_rxq_handles, 977 u8 num_rxqs, 978 u8 complete_cqe_flg, 979 u8 complete_event_flg, 980 enum spq_mode comp_mode, 981 struct qed_spq_comp_cb *p_comp_data) 982 { 983 struct rx_queue_update_ramrod_data *p_ramrod = NULL; 984 struct qed_spq_entry *p_ent = NULL; 985 struct qed_sp_init_data init_data; 986 struct qed_queue_cid *p_cid; 987 int rc = -EINVAL; 988 u8 i; 989 990 memset(&init_data, 0, sizeof(init_data)); 991 init_data.comp_mode = comp_mode; 992 init_data.p_comp_data = p_comp_data; 993 994 for (i = 0; i < num_rxqs; i++) { 995 p_cid = ((struct qed_queue_cid **)pp_rxq_handles)[i]; 996 997 /* Get SPQ entry */ 998 init_data.cid = p_cid->cid; 999 init_data.opaque_fid = p_cid->opaque_fid; 1000 1001 rc = qed_sp_init_request(p_hwfn, &p_ent, 1002 ETH_RAMROD_RX_QUEUE_UPDATE, 1003 PROTOCOLID_ETH, &init_data); 1004 if (rc) 1005 return rc; 1006 1007 p_ramrod = &p_ent->ramrod.rx_queue_update; 1008 p_ramrod->vport_id = p_cid->abs.vport_id; 1009 1010 p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id); 1011 p_ramrod->complete_cqe_flg = complete_cqe_flg; 1012 p_ramrod->complete_event_flg = complete_event_flg; 1013 1014 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1015 if (rc) 1016 return rc; 1017 } 1018 1019 return rc; 1020 } 1021 1022 static int 1023 qed_eth_pf_rx_queue_stop(struct qed_hwfn *p_hwfn, 1024 struct qed_queue_cid *p_cid, 1025 bool b_eq_completion_only, bool b_cqe_completion) 1026 { 1027 struct rx_queue_stop_ramrod_data *p_ramrod = NULL; 1028 struct qed_spq_entry *p_ent = NULL; 1029 struct qed_sp_init_data init_data; 1030 int rc; 1031 1032 memset(&init_data, 0, sizeof(init_data)); 1033 init_data.cid = p_cid->cid; 1034 init_data.opaque_fid = p_cid->opaque_fid; 1035 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1036 1037 rc = qed_sp_init_request(p_hwfn, &p_ent, 1038 ETH_RAMROD_RX_QUEUE_STOP, 1039 PROTOCOLID_ETH, &init_data); 1040 if (rc) 1041 return rc; 1042 1043 p_ramrod = &p_ent->ramrod.rx_queue_stop; 1044 p_ramrod->vport_id = p_cid->abs.vport_id; 1045 p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id); 1046 1047 /* Cleaning the queue requires the completion to arrive there. 1048 * In addition, VFs require the answer to come as eqe to PF. 1049 */ 1050 p_ramrod->complete_cqe_flg = ((p_cid->vfid == QED_QUEUE_CID_SELF) && 1051 !b_eq_completion_only) || 1052 b_cqe_completion; 1053 p_ramrod->complete_event_flg = (p_cid->vfid != QED_QUEUE_CID_SELF) || 1054 b_eq_completion_only; 1055 1056 return qed_spq_post(p_hwfn, p_ent, NULL); 1057 } 1058 1059 int qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, 1060 void *p_rxq, 1061 bool eq_completion_only, bool cqe_completion) 1062 { 1063 struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_rxq; 1064 int rc = -EINVAL; 1065 1066 if (IS_PF(p_hwfn->cdev)) 1067 rc = qed_eth_pf_rx_queue_stop(p_hwfn, p_cid, 1068 eq_completion_only, 1069 cqe_completion); 1070 else 1071 rc = qed_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion); 1072 1073 if (!rc) 1074 qed_eth_queue_cid_release(p_hwfn, p_cid); 1075 return rc; 1076 } 1077 1078 int 1079 qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, 1080 struct qed_queue_cid *p_cid, 1081 dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id) 1082 { 1083 struct tx_queue_start_ramrod_data *p_ramrod = NULL; 1084 struct qed_spq_entry *p_ent = NULL; 1085 struct qed_sp_init_data init_data; 1086 int rc = -EINVAL; 1087 1088 /* Get SPQ entry */ 1089 memset(&init_data, 0, sizeof(init_data)); 1090 init_data.cid = p_cid->cid; 1091 init_data.opaque_fid = p_cid->opaque_fid; 1092 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1093 1094 rc = qed_sp_init_request(p_hwfn, &p_ent, 1095 ETH_RAMROD_TX_QUEUE_START, 1096 PROTOCOLID_ETH, &init_data); 1097 if (rc) 1098 return rc; 1099 1100 p_ramrod = &p_ent->ramrod.tx_queue_start; 1101 p_ramrod->vport_id = p_cid->abs.vport_id; 1102 1103 p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id); 1104 p_ramrod->sb_index = p_cid->sb_idx; 1105 p_ramrod->stats_counter_id = p_cid->abs.stats_id; 1106 1107 p_ramrod->queue_zone_id = cpu_to_le16(p_cid->abs.queue_id); 1108 p_ramrod->same_as_last_id = cpu_to_le16(p_cid->abs.queue_id); 1109 1110 p_ramrod->pbl_size = cpu_to_le16(pbl_size); 1111 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr); 1112 1113 p_ramrod->qm_pq_id = cpu_to_le16(pq_id); 1114 1115 return qed_spq_post(p_hwfn, p_ent, NULL); 1116 } 1117 1118 static int 1119 qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn, 1120 struct qed_queue_cid *p_cid, 1121 u8 tc, 1122 dma_addr_t pbl_addr, 1123 u16 pbl_size, void __iomem **pp_doorbell) 1124 { 1125 int rc; 1126 1127 1128 rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid, 1129 pbl_addr, pbl_size, 1130 qed_get_cm_pq_idx_mcos(p_hwfn, tc)); 1131 if (rc) 1132 return rc; 1133 1134 /* Provide the caller with the necessary return values */ 1135 *pp_doorbell = p_hwfn->doorbells + 1136 qed_db_addr(p_cid->cid, DQ_DEMS_LEGACY); 1137 1138 return 0; 1139 } 1140 1141 static int 1142 qed_eth_tx_queue_start(struct qed_hwfn *p_hwfn, 1143 u16 opaque_fid, 1144 struct qed_queue_start_common_params *p_params, 1145 u8 tc, 1146 dma_addr_t pbl_addr, 1147 u16 pbl_size, 1148 struct qed_txq_start_ret_params *p_ret_params) 1149 { 1150 struct qed_queue_cid *p_cid; 1151 int rc; 1152 1153 p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, false, p_params); 1154 if (!p_cid) 1155 return -EINVAL; 1156 1157 if (IS_PF(p_hwfn->cdev)) 1158 rc = qed_eth_pf_tx_queue_start(p_hwfn, p_cid, tc, 1159 pbl_addr, pbl_size, 1160 &p_ret_params->p_doorbell); 1161 else 1162 rc = qed_vf_pf_txq_start(p_hwfn, p_cid, 1163 pbl_addr, pbl_size, 1164 &p_ret_params->p_doorbell); 1165 1166 if (rc) 1167 qed_eth_queue_cid_release(p_hwfn, p_cid); 1168 else 1169 p_ret_params->p_handle = (void *)p_cid; 1170 1171 return rc; 1172 } 1173 1174 static int 1175 qed_eth_pf_tx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid) 1176 { 1177 struct qed_spq_entry *p_ent = NULL; 1178 struct qed_sp_init_data init_data; 1179 int rc; 1180 1181 memset(&init_data, 0, sizeof(init_data)); 1182 init_data.cid = p_cid->cid; 1183 init_data.opaque_fid = p_cid->opaque_fid; 1184 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1185 1186 rc = qed_sp_init_request(p_hwfn, &p_ent, 1187 ETH_RAMROD_TX_QUEUE_STOP, 1188 PROTOCOLID_ETH, &init_data); 1189 if (rc) 1190 return rc; 1191 1192 return qed_spq_post(p_hwfn, p_ent, NULL); 1193 } 1194 1195 int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_handle) 1196 { 1197 struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_handle; 1198 int rc; 1199 1200 if (IS_PF(p_hwfn->cdev)) 1201 rc = qed_eth_pf_tx_queue_stop(p_hwfn, p_cid); 1202 else 1203 rc = qed_vf_pf_txq_stop(p_hwfn, p_cid); 1204 1205 if (!rc) 1206 qed_eth_queue_cid_release(p_hwfn, p_cid); 1207 return rc; 1208 } 1209 1210 static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode) 1211 { 1212 enum eth_filter_action action = MAX_ETH_FILTER_ACTION; 1213 1214 switch (opcode) { 1215 case QED_FILTER_ADD: 1216 action = ETH_FILTER_ACTION_ADD; 1217 break; 1218 case QED_FILTER_REMOVE: 1219 action = ETH_FILTER_ACTION_REMOVE; 1220 break; 1221 case QED_FILTER_FLUSH: 1222 action = ETH_FILTER_ACTION_REMOVE_ALL; 1223 break; 1224 default: 1225 action = MAX_ETH_FILTER_ACTION; 1226 } 1227 1228 return action; 1229 } 1230 1231 static void qed_set_fw_mac_addr(__le16 *fw_msb, 1232 __le16 *fw_mid, 1233 __le16 *fw_lsb, 1234 u8 *mac) 1235 { 1236 ((u8 *)fw_msb)[0] = mac[1]; 1237 ((u8 *)fw_msb)[1] = mac[0]; 1238 ((u8 *)fw_mid)[0] = mac[3]; 1239 ((u8 *)fw_mid)[1] = mac[2]; 1240 ((u8 *)fw_lsb)[0] = mac[5]; 1241 ((u8 *)fw_lsb)[1] = mac[4]; 1242 } 1243 1244 static int 1245 qed_filter_ucast_common(struct qed_hwfn *p_hwfn, 1246 u16 opaque_fid, 1247 struct qed_filter_ucast *p_filter_cmd, 1248 struct vport_filter_update_ramrod_data **pp_ramrod, 1249 struct qed_spq_entry **pp_ent, 1250 enum spq_mode comp_mode, 1251 struct qed_spq_comp_cb *p_comp_data) 1252 { 1253 u8 vport_to_add_to = 0, vport_to_remove_from = 0; 1254 struct vport_filter_update_ramrod_data *p_ramrod; 1255 struct eth_filter_cmd *p_first_filter; 1256 struct eth_filter_cmd *p_second_filter; 1257 struct qed_sp_init_data init_data; 1258 enum eth_filter_action action; 1259 int rc; 1260 1261 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, 1262 &vport_to_remove_from); 1263 if (rc) 1264 return rc; 1265 1266 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, 1267 &vport_to_add_to); 1268 if (rc) 1269 return rc; 1270 1271 /* Get SPQ entry */ 1272 memset(&init_data, 0, sizeof(init_data)); 1273 init_data.cid = qed_spq_get_cid(p_hwfn); 1274 init_data.opaque_fid = opaque_fid; 1275 init_data.comp_mode = comp_mode; 1276 init_data.p_comp_data = p_comp_data; 1277 1278 rc = qed_sp_init_request(p_hwfn, pp_ent, 1279 ETH_RAMROD_FILTERS_UPDATE, 1280 PROTOCOLID_ETH, &init_data); 1281 if (rc) 1282 return rc; 1283 1284 *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update; 1285 p_ramrod = *pp_ramrod; 1286 p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0; 1287 p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0; 1288 1289 switch (p_filter_cmd->opcode) { 1290 case QED_FILTER_REPLACE: 1291 case QED_FILTER_MOVE: 1292 p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break; 1293 default: 1294 p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break; 1295 } 1296 1297 p_first_filter = &p_ramrod->filter_cmds[0]; 1298 p_second_filter = &p_ramrod->filter_cmds[1]; 1299 1300 switch (p_filter_cmd->type) { 1301 case QED_FILTER_MAC: 1302 p_first_filter->type = ETH_FILTER_TYPE_MAC; break; 1303 case QED_FILTER_VLAN: 1304 p_first_filter->type = ETH_FILTER_TYPE_VLAN; break; 1305 case QED_FILTER_MAC_VLAN: 1306 p_first_filter->type = ETH_FILTER_TYPE_PAIR; break; 1307 case QED_FILTER_INNER_MAC: 1308 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break; 1309 case QED_FILTER_INNER_VLAN: 1310 p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break; 1311 case QED_FILTER_INNER_PAIR: 1312 p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break; 1313 case QED_FILTER_INNER_MAC_VNI_PAIR: 1314 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR; 1315 break; 1316 case QED_FILTER_MAC_VNI_PAIR: 1317 p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break; 1318 case QED_FILTER_VNI: 1319 p_first_filter->type = ETH_FILTER_TYPE_VNI; break; 1320 } 1321 1322 if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) || 1323 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || 1324 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) || 1325 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) || 1326 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || 1327 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) { 1328 qed_set_fw_mac_addr(&p_first_filter->mac_msb, 1329 &p_first_filter->mac_mid, 1330 &p_first_filter->mac_lsb, 1331 (u8 *)p_filter_cmd->mac); 1332 } 1333 1334 if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) || 1335 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || 1336 (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) || 1337 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR)) 1338 p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan); 1339 1340 if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || 1341 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) || 1342 (p_first_filter->type == ETH_FILTER_TYPE_VNI)) 1343 p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni); 1344 1345 if (p_filter_cmd->opcode == QED_FILTER_MOVE) { 1346 p_second_filter->type = p_first_filter->type; 1347 p_second_filter->mac_msb = p_first_filter->mac_msb; 1348 p_second_filter->mac_mid = p_first_filter->mac_mid; 1349 p_second_filter->mac_lsb = p_first_filter->mac_lsb; 1350 p_second_filter->vlan_id = p_first_filter->vlan_id; 1351 p_second_filter->vni = p_first_filter->vni; 1352 1353 p_first_filter->action = ETH_FILTER_ACTION_REMOVE; 1354 1355 p_first_filter->vport_id = vport_to_remove_from; 1356 1357 p_second_filter->action = ETH_FILTER_ACTION_ADD; 1358 p_second_filter->vport_id = vport_to_add_to; 1359 } else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) { 1360 p_first_filter->vport_id = vport_to_add_to; 1361 memcpy(p_second_filter, p_first_filter, 1362 sizeof(*p_second_filter)); 1363 p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL; 1364 p_second_filter->action = ETH_FILTER_ACTION_ADD; 1365 } else { 1366 action = qed_filter_action(p_filter_cmd->opcode); 1367 1368 if (action == MAX_ETH_FILTER_ACTION) { 1369 DP_NOTICE(p_hwfn, 1370 "%d is not supported yet\n", 1371 p_filter_cmd->opcode); 1372 return -EINVAL; 1373 } 1374 1375 p_first_filter->action = action; 1376 p_first_filter->vport_id = (p_filter_cmd->opcode == 1377 QED_FILTER_REMOVE) ? 1378 vport_to_remove_from : 1379 vport_to_add_to; 1380 } 1381 1382 return 0; 1383 } 1384 1385 int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, 1386 u16 opaque_fid, 1387 struct qed_filter_ucast *p_filter_cmd, 1388 enum spq_mode comp_mode, 1389 struct qed_spq_comp_cb *p_comp_data) 1390 { 1391 struct vport_filter_update_ramrod_data *p_ramrod = NULL; 1392 struct qed_spq_entry *p_ent = NULL; 1393 struct eth_filter_cmd_header *p_header; 1394 int rc; 1395 1396 rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd, 1397 &p_ramrod, &p_ent, 1398 comp_mode, p_comp_data); 1399 if (rc) { 1400 DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc); 1401 return rc; 1402 } 1403 p_header = &p_ramrod->filter_cmd_hdr; 1404 p_header->assert_on_error = p_filter_cmd->assert_on_error; 1405 1406 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1407 if (rc) { 1408 DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc); 1409 return rc; 1410 } 1411 1412 DP_VERBOSE(p_hwfn, QED_MSG_SP, 1413 "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n", 1414 (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" : 1415 ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ? 1416 "REMOVE" : 1417 ((p_filter_cmd->opcode == QED_FILTER_MOVE) ? 1418 "MOVE" : "REPLACE")), 1419 (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" : 1420 ((p_filter_cmd->type == QED_FILTER_VLAN) ? 1421 "VLAN" : "MAC & VLAN"), 1422 p_ramrod->filter_cmd_hdr.cmd_cnt, 1423 p_filter_cmd->is_rx_filter, 1424 p_filter_cmd->is_tx_filter); 1425 DP_VERBOSE(p_hwfn, QED_MSG_SP, 1426 "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n", 1427 p_filter_cmd->vport_to_add_to, 1428 p_filter_cmd->vport_to_remove_from, 1429 p_filter_cmd->mac[0], 1430 p_filter_cmd->mac[1], 1431 p_filter_cmd->mac[2], 1432 p_filter_cmd->mac[3], 1433 p_filter_cmd->mac[4], 1434 p_filter_cmd->mac[5], 1435 p_filter_cmd->vlan); 1436 1437 return 0; 1438 } 1439 1440 /******************************************************************************* 1441 * Description: 1442 * Calculates crc 32 on a buffer 1443 * Note: crc32_length MUST be aligned to 8 1444 * Return: 1445 ******************************************************************************/ 1446 static u32 qed_calc_crc32c(u8 *crc32_packet, 1447 u32 crc32_length, u32 crc32_seed, u8 complement) 1448 { 1449 u32 byte = 0, bit = 0, crc32_result = crc32_seed; 1450 u8 msb = 0, current_byte = 0; 1451 1452 if ((!crc32_packet) || 1453 (crc32_length == 0) || 1454 ((crc32_length % 8) != 0)) 1455 return crc32_result; 1456 for (byte = 0; byte < crc32_length; byte++) { 1457 current_byte = crc32_packet[byte]; 1458 for (bit = 0; bit < 8; bit++) { 1459 msb = (u8)(crc32_result >> 31); 1460 crc32_result = crc32_result << 1; 1461 if (msb != (0x1 & (current_byte >> bit))) { 1462 crc32_result = crc32_result ^ CRC32_POLY; 1463 crc32_result |= 1; /*crc32_result[0] = 1;*/ 1464 } 1465 } 1466 } 1467 return crc32_result; 1468 } 1469 1470 static u32 qed_crc32c_le(u32 seed, u8 *mac, u32 len) 1471 { 1472 u32 packet_buf[2] = { 0 }; 1473 1474 memcpy((u8 *)(&packet_buf[0]), &mac[0], 6); 1475 return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0); 1476 } 1477 1478 u8 qed_mcast_bin_from_mac(u8 *mac) 1479 { 1480 u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED, 1481 mac, ETH_ALEN); 1482 1483 return crc & 0xff; 1484 } 1485 1486 static int 1487 qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, 1488 u16 opaque_fid, 1489 struct qed_filter_mcast *p_filter_cmd, 1490 enum spq_mode comp_mode, 1491 struct qed_spq_comp_cb *p_comp_data) 1492 { 1493 unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; 1494 struct vport_update_ramrod_data *p_ramrod = NULL; 1495 struct qed_spq_entry *p_ent = NULL; 1496 struct qed_sp_init_data init_data; 1497 u8 abs_vport_id = 0; 1498 int rc, i; 1499 1500 if (p_filter_cmd->opcode == QED_FILTER_ADD) 1501 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, 1502 &abs_vport_id); 1503 else 1504 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, 1505 &abs_vport_id); 1506 if (rc) 1507 return rc; 1508 1509 /* Get SPQ entry */ 1510 memset(&init_data, 0, sizeof(init_data)); 1511 init_data.cid = qed_spq_get_cid(p_hwfn); 1512 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1513 init_data.comp_mode = comp_mode; 1514 init_data.p_comp_data = p_comp_data; 1515 1516 rc = qed_sp_init_request(p_hwfn, &p_ent, 1517 ETH_RAMROD_VPORT_UPDATE, 1518 PROTOCOLID_ETH, &init_data); 1519 if (rc) { 1520 DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc); 1521 return rc; 1522 } 1523 1524 p_ramrod = &p_ent->ramrod.vport_update; 1525 p_ramrod->common.update_approx_mcast_flg = 1; 1526 1527 /* explicitly clear out the entire vector */ 1528 memset(&p_ramrod->approx_mcast.bins, 0, 1529 sizeof(p_ramrod->approx_mcast.bins)); 1530 memset(bins, 0, sizeof(unsigned long) * 1531 ETH_MULTICAST_MAC_BINS_IN_REGS); 1532 /* filter ADD op is explicit set op and it removes 1533 * any existing filters for the vport 1534 */ 1535 if (p_filter_cmd->opcode == QED_FILTER_ADD) { 1536 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { 1537 u32 bit; 1538 1539 bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); 1540 __set_bit(bit, bins); 1541 } 1542 1543 /* Convert to correct endianity */ 1544 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { 1545 struct vport_update_ramrod_mcast *p_ramrod_bins; 1546 u32 *p_bins = (u32 *)bins; 1547 1548 p_ramrod_bins = &p_ramrod->approx_mcast; 1549 p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]); 1550 } 1551 } 1552 1553 p_ramrod->common.vport_id = abs_vport_id; 1554 1555 return qed_spq_post(p_hwfn, p_ent, NULL); 1556 } 1557 1558 static int qed_filter_mcast_cmd(struct qed_dev *cdev, 1559 struct qed_filter_mcast *p_filter_cmd, 1560 enum spq_mode comp_mode, 1561 struct qed_spq_comp_cb *p_comp_data) 1562 { 1563 int rc = 0; 1564 int i; 1565 1566 /* only ADD and REMOVE operations are supported for multi-cast */ 1567 if ((p_filter_cmd->opcode != QED_FILTER_ADD && 1568 (p_filter_cmd->opcode != QED_FILTER_REMOVE)) || 1569 (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS)) 1570 return -EINVAL; 1571 1572 for_each_hwfn(cdev, i) { 1573 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1574 1575 u16 opaque_fid; 1576 1577 if (IS_VF(cdev)) { 1578 qed_vf_pf_filter_mcast(p_hwfn, p_filter_cmd); 1579 continue; 1580 } 1581 1582 opaque_fid = p_hwfn->hw_info.opaque_fid; 1583 1584 rc = qed_sp_eth_filter_mcast(p_hwfn, 1585 opaque_fid, 1586 p_filter_cmd, 1587 comp_mode, p_comp_data); 1588 } 1589 return rc; 1590 } 1591 1592 static int qed_filter_ucast_cmd(struct qed_dev *cdev, 1593 struct qed_filter_ucast *p_filter_cmd, 1594 enum spq_mode comp_mode, 1595 struct qed_spq_comp_cb *p_comp_data) 1596 { 1597 int rc = 0; 1598 int i; 1599 1600 for_each_hwfn(cdev, i) { 1601 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1602 u16 opaque_fid; 1603 1604 if (IS_VF(cdev)) { 1605 rc = qed_vf_pf_filter_ucast(p_hwfn, p_filter_cmd); 1606 continue; 1607 } 1608 1609 opaque_fid = p_hwfn->hw_info.opaque_fid; 1610 1611 rc = qed_sp_eth_filter_ucast(p_hwfn, 1612 opaque_fid, 1613 p_filter_cmd, 1614 comp_mode, p_comp_data); 1615 if (rc) 1616 break; 1617 } 1618 1619 return rc; 1620 } 1621 1622 /* Statistics related code */ 1623 static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn, 1624 u32 *p_addr, 1625 u32 *p_len, u16 statistics_bin) 1626 { 1627 if (IS_PF(p_hwfn->cdev)) { 1628 *p_addr = BAR0_MAP_REG_PSDM_RAM + 1629 PSTORM_QUEUE_STAT_OFFSET(statistics_bin); 1630 *p_len = sizeof(struct eth_pstorm_per_queue_stat); 1631 } else { 1632 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1633 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1634 1635 *p_addr = p_resp->pfdev_info.stats_info.pstats.address; 1636 *p_len = p_resp->pfdev_info.stats_info.pstats.len; 1637 } 1638 } 1639 1640 static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn, 1641 struct qed_ptt *p_ptt, 1642 struct qed_eth_stats *p_stats, 1643 u16 statistics_bin) 1644 { 1645 struct eth_pstorm_per_queue_stat pstats; 1646 u32 pstats_addr = 0, pstats_len = 0; 1647 1648 __qed_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len, 1649 statistics_bin); 1650 1651 memset(&pstats, 0, sizeof(pstats)); 1652 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len); 1653 1654 p_stats->common.tx_ucast_bytes += 1655 HILO_64_REGPAIR(pstats.sent_ucast_bytes); 1656 p_stats->common.tx_mcast_bytes += 1657 HILO_64_REGPAIR(pstats.sent_mcast_bytes); 1658 p_stats->common.tx_bcast_bytes += 1659 HILO_64_REGPAIR(pstats.sent_bcast_bytes); 1660 p_stats->common.tx_ucast_pkts += 1661 HILO_64_REGPAIR(pstats.sent_ucast_pkts); 1662 p_stats->common.tx_mcast_pkts += 1663 HILO_64_REGPAIR(pstats.sent_mcast_pkts); 1664 p_stats->common.tx_bcast_pkts += 1665 HILO_64_REGPAIR(pstats.sent_bcast_pkts); 1666 p_stats->common.tx_err_drop_pkts += 1667 HILO_64_REGPAIR(pstats.error_drop_pkts); 1668 } 1669 1670 static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, 1671 struct qed_ptt *p_ptt, 1672 struct qed_eth_stats *p_stats, 1673 u16 statistics_bin) 1674 { 1675 struct tstorm_per_port_stat tstats; 1676 u32 tstats_addr, tstats_len; 1677 1678 if (IS_PF(p_hwfn->cdev)) { 1679 tstats_addr = BAR0_MAP_REG_TSDM_RAM + 1680 TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)); 1681 tstats_len = sizeof(struct tstorm_per_port_stat); 1682 } else { 1683 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1684 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1685 1686 tstats_addr = p_resp->pfdev_info.stats_info.tstats.address; 1687 tstats_len = p_resp->pfdev_info.stats_info.tstats.len; 1688 } 1689 1690 memset(&tstats, 0, sizeof(tstats)); 1691 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len); 1692 1693 p_stats->common.mftag_filter_discards += 1694 HILO_64_REGPAIR(tstats.mftag_filter_discard); 1695 p_stats->common.mac_filter_discards += 1696 HILO_64_REGPAIR(tstats.eth_mac_filter_discard); 1697 } 1698 1699 static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn, 1700 u32 *p_addr, 1701 u32 *p_len, u16 statistics_bin) 1702 { 1703 if (IS_PF(p_hwfn->cdev)) { 1704 *p_addr = BAR0_MAP_REG_USDM_RAM + 1705 USTORM_QUEUE_STAT_OFFSET(statistics_bin); 1706 *p_len = sizeof(struct eth_ustorm_per_queue_stat); 1707 } else { 1708 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1709 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1710 1711 *p_addr = p_resp->pfdev_info.stats_info.ustats.address; 1712 *p_len = p_resp->pfdev_info.stats_info.ustats.len; 1713 } 1714 } 1715 1716 static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, 1717 struct qed_ptt *p_ptt, 1718 struct qed_eth_stats *p_stats, 1719 u16 statistics_bin) 1720 { 1721 struct eth_ustorm_per_queue_stat ustats; 1722 u32 ustats_addr = 0, ustats_len = 0; 1723 1724 __qed_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len, 1725 statistics_bin); 1726 1727 memset(&ustats, 0, sizeof(ustats)); 1728 qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len); 1729 1730 p_stats->common.rx_ucast_bytes += 1731 HILO_64_REGPAIR(ustats.rcv_ucast_bytes); 1732 p_stats->common.rx_mcast_bytes += 1733 HILO_64_REGPAIR(ustats.rcv_mcast_bytes); 1734 p_stats->common.rx_bcast_bytes += 1735 HILO_64_REGPAIR(ustats.rcv_bcast_bytes); 1736 p_stats->common.rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts); 1737 p_stats->common.rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts); 1738 p_stats->common.rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts); 1739 } 1740 1741 static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn, 1742 u32 *p_addr, 1743 u32 *p_len, u16 statistics_bin) 1744 { 1745 if (IS_PF(p_hwfn->cdev)) { 1746 *p_addr = BAR0_MAP_REG_MSDM_RAM + 1747 MSTORM_QUEUE_STAT_OFFSET(statistics_bin); 1748 *p_len = sizeof(struct eth_mstorm_per_queue_stat); 1749 } else { 1750 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1751 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1752 1753 *p_addr = p_resp->pfdev_info.stats_info.mstats.address; 1754 *p_len = p_resp->pfdev_info.stats_info.mstats.len; 1755 } 1756 } 1757 1758 static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn, 1759 struct qed_ptt *p_ptt, 1760 struct qed_eth_stats *p_stats, 1761 u16 statistics_bin) 1762 { 1763 struct eth_mstorm_per_queue_stat mstats; 1764 u32 mstats_addr = 0, mstats_len = 0; 1765 1766 __qed_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len, 1767 statistics_bin); 1768 1769 memset(&mstats, 0, sizeof(mstats)); 1770 qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len); 1771 1772 p_stats->common.no_buff_discards += 1773 HILO_64_REGPAIR(mstats.no_buff_discard); 1774 p_stats->common.packet_too_big_discard += 1775 HILO_64_REGPAIR(mstats.packet_too_big_discard); 1776 p_stats->common.ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard); 1777 p_stats->common.tpa_coalesced_pkts += 1778 HILO_64_REGPAIR(mstats.tpa_coalesced_pkts); 1779 p_stats->common.tpa_coalesced_events += 1780 HILO_64_REGPAIR(mstats.tpa_coalesced_events); 1781 p_stats->common.tpa_aborts_num += 1782 HILO_64_REGPAIR(mstats.tpa_aborts_num); 1783 p_stats->common.tpa_coalesced_bytes += 1784 HILO_64_REGPAIR(mstats.tpa_coalesced_bytes); 1785 } 1786 1787 static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, 1788 struct qed_ptt *p_ptt, 1789 struct qed_eth_stats *p_stats) 1790 { 1791 struct qed_eth_stats_common *p_common = &p_stats->common; 1792 struct port_stats port_stats; 1793 int j; 1794 1795 memset(&port_stats, 0, sizeof(port_stats)); 1796 1797 qed_memcpy_from(p_hwfn, p_ptt, &port_stats, 1798 p_hwfn->mcp_info->port_addr + 1799 offsetof(struct public_port, stats), 1800 sizeof(port_stats)); 1801 1802 p_common->rx_64_byte_packets += port_stats.eth.r64; 1803 p_common->rx_65_to_127_byte_packets += port_stats.eth.r127; 1804 p_common->rx_128_to_255_byte_packets += port_stats.eth.r255; 1805 p_common->rx_256_to_511_byte_packets += port_stats.eth.r511; 1806 p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023; 1807 p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518; 1808 p_common->rx_crc_errors += port_stats.eth.rfcs; 1809 p_common->rx_mac_crtl_frames += port_stats.eth.rxcf; 1810 p_common->rx_pause_frames += port_stats.eth.rxpf; 1811 p_common->rx_pfc_frames += port_stats.eth.rxpp; 1812 p_common->rx_align_errors += port_stats.eth.raln; 1813 p_common->rx_carrier_errors += port_stats.eth.rfcr; 1814 p_common->rx_oversize_packets += port_stats.eth.rovr; 1815 p_common->rx_jabbers += port_stats.eth.rjbr; 1816 p_common->rx_undersize_packets += port_stats.eth.rund; 1817 p_common->rx_fragments += port_stats.eth.rfrg; 1818 p_common->tx_64_byte_packets += port_stats.eth.t64; 1819 p_common->tx_65_to_127_byte_packets += port_stats.eth.t127; 1820 p_common->tx_128_to_255_byte_packets += port_stats.eth.t255; 1821 p_common->tx_256_to_511_byte_packets += port_stats.eth.t511; 1822 p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023; 1823 p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518; 1824 p_common->tx_pause_frames += port_stats.eth.txpf; 1825 p_common->tx_pfc_frames += port_stats.eth.txpp; 1826 p_common->rx_mac_bytes += port_stats.eth.rbyte; 1827 p_common->rx_mac_uc_packets += port_stats.eth.rxuca; 1828 p_common->rx_mac_mc_packets += port_stats.eth.rxmca; 1829 p_common->rx_mac_bc_packets += port_stats.eth.rxbca; 1830 p_common->rx_mac_frames_ok += port_stats.eth.rxpok; 1831 p_common->tx_mac_bytes += port_stats.eth.tbyte; 1832 p_common->tx_mac_uc_packets += port_stats.eth.txuca; 1833 p_common->tx_mac_mc_packets += port_stats.eth.txmca; 1834 p_common->tx_mac_bc_packets += port_stats.eth.txbca; 1835 p_common->tx_mac_ctrl_frames += port_stats.eth.txcf; 1836 for (j = 0; j < 8; j++) { 1837 p_common->brb_truncates += port_stats.brb.brb_truncate[j]; 1838 p_common->brb_discards += port_stats.brb.brb_discard[j]; 1839 } 1840 1841 if (QED_IS_BB(p_hwfn->cdev)) { 1842 struct qed_eth_stats_bb *p_bb = &p_stats->bb; 1843 1844 p_bb->rx_1519_to_1522_byte_packets += 1845 port_stats.eth.u0.bb0.r1522; 1846 p_bb->rx_1519_to_2047_byte_packets += 1847 port_stats.eth.u0.bb0.r2047; 1848 p_bb->rx_2048_to_4095_byte_packets += 1849 port_stats.eth.u0.bb0.r4095; 1850 p_bb->rx_4096_to_9216_byte_packets += 1851 port_stats.eth.u0.bb0.r9216; 1852 p_bb->rx_9217_to_16383_byte_packets += 1853 port_stats.eth.u0.bb0.r16383; 1854 p_bb->tx_1519_to_2047_byte_packets += 1855 port_stats.eth.u1.bb1.t2047; 1856 p_bb->tx_2048_to_4095_byte_packets += 1857 port_stats.eth.u1.bb1.t4095; 1858 p_bb->tx_4096_to_9216_byte_packets += 1859 port_stats.eth.u1.bb1.t9216; 1860 p_bb->tx_9217_to_16383_byte_packets += 1861 port_stats.eth.u1.bb1.t16383; 1862 p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec; 1863 p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl; 1864 } else { 1865 struct qed_eth_stats_ah *p_ah = &p_stats->ah; 1866 1867 p_ah->rx_1519_to_max_byte_packets += 1868 port_stats.eth.u0.ah0.r1519_to_max; 1869 p_ah->tx_1519_to_max_byte_packets = 1870 port_stats.eth.u1.ah1.t1519_to_max; 1871 } 1872 } 1873 1874 static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn, 1875 struct qed_ptt *p_ptt, 1876 struct qed_eth_stats *stats, 1877 u16 statistics_bin, bool b_get_port_stats) 1878 { 1879 __qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin); 1880 __qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin); 1881 __qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin); 1882 __qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin); 1883 1884 if (b_get_port_stats && p_hwfn->mcp_info) 1885 __qed_get_vport_port_stats(p_hwfn, p_ptt, stats); 1886 } 1887 1888 static void _qed_get_vport_stats(struct qed_dev *cdev, 1889 struct qed_eth_stats *stats) 1890 { 1891 u8 fw_vport = 0; 1892 int i; 1893 1894 memset(stats, 0, sizeof(*stats)); 1895 1896 for_each_hwfn(cdev, i) { 1897 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1898 struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn) 1899 : NULL; 1900 1901 if (IS_PF(cdev)) { 1902 /* The main vport index is relative first */ 1903 if (qed_fw_vport(p_hwfn, 0, &fw_vport)) { 1904 DP_ERR(p_hwfn, "No vport available!\n"); 1905 goto out; 1906 } 1907 } 1908 1909 if (IS_PF(cdev) && !p_ptt) { 1910 DP_ERR(p_hwfn, "Failed to acquire ptt\n"); 1911 continue; 1912 } 1913 1914 __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport, 1915 IS_PF(cdev) ? true : false); 1916 1917 out: 1918 if (IS_PF(cdev) && p_ptt) 1919 qed_ptt_release(p_hwfn, p_ptt); 1920 } 1921 } 1922 1923 void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats) 1924 { 1925 u32 i; 1926 1927 if (!cdev) { 1928 memset(stats, 0, sizeof(*stats)); 1929 return; 1930 } 1931 1932 _qed_get_vport_stats(cdev, stats); 1933 1934 if (!cdev->reset_stats) 1935 return; 1936 1937 /* Reduce the statistics baseline */ 1938 for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++) 1939 ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i]; 1940 } 1941 1942 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */ 1943 void qed_reset_vport_stats(struct qed_dev *cdev) 1944 { 1945 int i; 1946 1947 for_each_hwfn(cdev, i) { 1948 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1949 struct eth_mstorm_per_queue_stat mstats; 1950 struct eth_ustorm_per_queue_stat ustats; 1951 struct eth_pstorm_per_queue_stat pstats; 1952 struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn) 1953 : NULL; 1954 u32 addr = 0, len = 0; 1955 1956 if (IS_PF(cdev) && !p_ptt) { 1957 DP_ERR(p_hwfn, "Failed to acquire ptt\n"); 1958 continue; 1959 } 1960 1961 memset(&mstats, 0, sizeof(mstats)); 1962 __qed_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0); 1963 qed_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len); 1964 1965 memset(&ustats, 0, sizeof(ustats)); 1966 __qed_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0); 1967 qed_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len); 1968 1969 memset(&pstats, 0, sizeof(pstats)); 1970 __qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0); 1971 qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len); 1972 1973 if (IS_PF(cdev)) 1974 qed_ptt_release(p_hwfn, p_ptt); 1975 } 1976 1977 /* PORT statistics are not necessarily reset, so we need to 1978 * read and create a baseline for future statistics. 1979 */ 1980 if (!cdev->reset_stats) 1981 DP_INFO(cdev, "Reset stats not allocated\n"); 1982 else 1983 _qed_get_vport_stats(cdev, cdev->reset_stats); 1984 } 1985 1986 static void 1987 qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 1988 struct qed_arfs_config_params *p_cfg_params) 1989 { 1990 if (p_cfg_params->arfs_enable) { 1991 qed_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id, 1992 p_cfg_params->tcp, p_cfg_params->udp, 1993 p_cfg_params->ipv4, p_cfg_params->ipv6); 1994 DP_VERBOSE(p_hwfn, QED_MSG_SP, 1995 "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n", 1996 p_cfg_params->tcp ? "Enable" : "Disable", 1997 p_cfg_params->udp ? "Enable" : "Disable", 1998 p_cfg_params->ipv4 ? "Enable" : "Disable", 1999 p_cfg_params->ipv6 ? "Enable" : "Disable"); 2000 } else { 2001 qed_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 2002 } 2003 2004 DP_VERBOSE(p_hwfn, QED_MSG_SP, "Configured ARFS mode : %s\n", 2005 p_cfg_params->arfs_enable ? "Enable" : "Disable"); 2006 } 2007 2008 static int 2009 qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 2010 struct qed_spq_comp_cb *p_cb, 2011 dma_addr_t p_addr, u16 length, u16 qid, 2012 u8 vport_id, bool b_is_add) 2013 { 2014 struct rx_update_gft_filter_data *p_ramrod = NULL; 2015 struct qed_spq_entry *p_ent = NULL; 2016 struct qed_sp_init_data init_data; 2017 u16 abs_rx_q_id = 0; 2018 u8 abs_vport_id = 0; 2019 int rc = -EINVAL; 2020 2021 rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id); 2022 if (rc) 2023 return rc; 2024 2025 rc = qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id); 2026 if (rc) 2027 return rc; 2028 2029 /* Get SPQ entry */ 2030 memset(&init_data, 0, sizeof(init_data)); 2031 init_data.cid = qed_spq_get_cid(p_hwfn); 2032 2033 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 2034 2035 if (p_cb) { 2036 init_data.comp_mode = QED_SPQ_MODE_CB; 2037 init_data.p_comp_data = p_cb; 2038 } else { 2039 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 2040 } 2041 2042 rc = qed_sp_init_request(p_hwfn, &p_ent, 2043 ETH_RAMROD_GFT_UPDATE_FILTER, 2044 PROTOCOLID_ETH, &init_data); 2045 if (rc) 2046 return rc; 2047 2048 p_ramrod = &p_ent->ramrod.rx_update_gft; 2049 DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr); 2050 p_ramrod->pkt_hdr_length = cpu_to_le16(length); 2051 p_ramrod->rx_qid_or_action_icid = cpu_to_le16(abs_rx_q_id); 2052 p_ramrod->vport_id = abs_vport_id; 2053 p_ramrod->filter_type = RFS_FILTER_TYPE; 2054 p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER : GFT_DELETE_FILTER; 2055 2056 DP_VERBOSE(p_hwfn, QED_MSG_SP, 2057 "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n", 2058 abs_vport_id, abs_rx_q_id, 2059 b_is_add ? "Adding" : "Removing", (u64)p_addr, length); 2060 2061 return qed_spq_post(p_hwfn, p_ent, NULL); 2062 } 2063 2064 static int qed_fill_eth_dev_info(struct qed_dev *cdev, 2065 struct qed_dev_eth_info *info) 2066 { 2067 int i; 2068 2069 memset(info, 0, sizeof(*info)); 2070 2071 info->num_tc = 1; 2072 2073 if (IS_PF(cdev)) { 2074 int max_vf_vlan_filters = 0; 2075 int max_vf_mac_filters = 0; 2076 2077 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 2078 u16 num_queues = 0; 2079 2080 /* Since the feature controls only queue-zones, 2081 * make sure we have the contexts [rx, tx, xdp] to 2082 * match. 2083 */ 2084 for_each_hwfn(cdev, i) { 2085 struct qed_hwfn *hwfn = &cdev->hwfns[i]; 2086 u16 l2_queues = (u16)FEAT_NUM(hwfn, 2087 QED_PF_L2_QUE); 2088 u16 cids; 2089 2090 cids = hwfn->pf_params.eth_pf_params.num_cons; 2091 num_queues += min_t(u16, l2_queues, cids / 3); 2092 } 2093 2094 /* queues might theoretically be >256, but interrupts' 2095 * upper-limit guarantes that it would fit in a u8. 2096 */ 2097 if (cdev->int_params.fp_msix_cnt) { 2098 u8 irqs = cdev->int_params.fp_msix_cnt; 2099 2100 info->num_queues = (u8)min_t(u16, 2101 num_queues, irqs); 2102 } 2103 } else { 2104 info->num_queues = cdev->num_hwfns; 2105 } 2106 2107 if (IS_QED_SRIOV(cdev)) { 2108 max_vf_vlan_filters = cdev->p_iov_info->total_vfs * 2109 QED_ETH_VF_NUM_VLAN_FILTERS; 2110 max_vf_mac_filters = cdev->p_iov_info->total_vfs * 2111 QED_ETH_VF_NUM_MAC_FILTERS; 2112 } 2113 info->num_vlan_filters = RESC_NUM(QED_LEADING_HWFN(cdev), 2114 QED_VLAN) - 2115 max_vf_vlan_filters; 2116 info->num_mac_filters = RESC_NUM(QED_LEADING_HWFN(cdev), 2117 QED_MAC) - 2118 max_vf_mac_filters; 2119 2120 ether_addr_copy(info->port_mac, 2121 cdev->hwfns[0].hw_info.hw_mac_addr); 2122 2123 info->xdp_supported = true; 2124 } else { 2125 u16 total_cids = 0; 2126 2127 /* Determine queues & XDP support */ 2128 for_each_hwfn(cdev, i) { 2129 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 2130 u8 queues, cids; 2131 2132 qed_vf_get_num_cids(p_hwfn, &cids); 2133 qed_vf_get_num_rxqs(p_hwfn, &queues); 2134 info->num_queues += queues; 2135 total_cids += cids; 2136 } 2137 2138 /* Enable VF XDP in case PF guarntees sufficient connections */ 2139 if (total_cids >= info->num_queues * 3) 2140 info->xdp_supported = true; 2141 2142 qed_vf_get_num_vlan_filters(&cdev->hwfns[0], 2143 (u8 *)&info->num_vlan_filters); 2144 qed_vf_get_num_mac_filters(&cdev->hwfns[0], 2145 (u8 *)&info->num_mac_filters); 2146 qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac); 2147 2148 info->is_legacy = !!cdev->hwfns[0].vf_iov_info->b_pre_fp_hsi; 2149 } 2150 2151 qed_fill_dev_info(cdev, &info->common); 2152 2153 if (IS_VF(cdev)) 2154 eth_zero_addr(info->common.hw_mac); 2155 2156 return 0; 2157 } 2158 2159 static void qed_register_eth_ops(struct qed_dev *cdev, 2160 struct qed_eth_cb_ops *ops, void *cookie) 2161 { 2162 cdev->protocol_ops.eth = ops; 2163 cdev->ops_cookie = cookie; 2164 2165 /* For VF, we start bulletin reading */ 2166 if (IS_VF(cdev)) 2167 qed_vf_start_iov_wq(cdev); 2168 } 2169 2170 static bool qed_check_mac(struct qed_dev *cdev, u8 *mac) 2171 { 2172 if (IS_PF(cdev)) 2173 return true; 2174 2175 return qed_vf_check_mac(&cdev->hwfns[0], mac); 2176 } 2177 2178 static int qed_start_vport(struct qed_dev *cdev, 2179 struct qed_start_vport_params *params) 2180 { 2181 int rc, i; 2182 2183 for_each_hwfn(cdev, i) { 2184 struct qed_sp_vport_start_params start = { 0 }; 2185 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 2186 2187 start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO : 2188 QED_TPA_MODE_NONE; 2189 start.remove_inner_vlan = params->remove_inner_vlan; 2190 start.only_untagged = true; /* untagged only */ 2191 start.drop_ttl0 = params->drop_ttl0; 2192 start.opaque_fid = p_hwfn->hw_info.opaque_fid; 2193 start.concrete_fid = p_hwfn->hw_info.concrete_fid; 2194 start.handle_ptp_pkts = params->handle_ptp_pkts; 2195 start.vport_id = params->vport_id; 2196 start.max_buffers_per_cqe = 16; 2197 start.mtu = params->mtu; 2198 2199 rc = qed_sp_vport_start(p_hwfn, &start); 2200 if (rc) { 2201 DP_ERR(cdev, "Failed to start VPORT\n"); 2202 return rc; 2203 } 2204 2205 rc = qed_hw_start_fastpath(p_hwfn); 2206 if (rc) { 2207 DP_ERR(cdev, "Failed to start VPORT fastpath\n"); 2208 return rc; 2209 } 2210 2211 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), 2212 "Started V-PORT %d with MTU %d\n", 2213 start.vport_id, start.mtu); 2214 } 2215 2216 if (params->clear_stats) 2217 qed_reset_vport_stats(cdev); 2218 2219 return 0; 2220 } 2221 2222 static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id) 2223 { 2224 int rc, i; 2225 2226 for_each_hwfn(cdev, i) { 2227 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 2228 2229 rc = qed_sp_vport_stop(p_hwfn, 2230 p_hwfn->hw_info.opaque_fid, vport_id); 2231 2232 if (rc) { 2233 DP_ERR(cdev, "Failed to stop VPORT\n"); 2234 return rc; 2235 } 2236 } 2237 return 0; 2238 } 2239 2240 static int qed_update_vport_rss(struct qed_dev *cdev, 2241 struct qed_update_vport_rss_params *input, 2242 struct qed_rss_params *rss) 2243 { 2244 int i, fn; 2245 2246 /* Update configuration with what's correct regardless of CMT */ 2247 rss->update_rss_config = 1; 2248 rss->rss_enable = 1; 2249 rss->update_rss_capabilities = 1; 2250 rss->update_rss_ind_table = 1; 2251 rss->update_rss_key = 1; 2252 rss->rss_caps = input->rss_caps; 2253 memcpy(rss->rss_key, input->rss_key, QED_RSS_KEY_SIZE * sizeof(u32)); 2254 2255 /* In regular scenario, we'd simply need to take input handlers. 2256 * But in CMT, we'd have to split the handlers according to the 2257 * engine they were configured on. We'd then have to understand 2258 * whether RSS is really required, since 2-queues on CMT doesn't 2259 * require RSS. 2260 */ 2261 if (cdev->num_hwfns == 1) { 2262 memcpy(rss->rss_ind_table, 2263 input->rss_ind_table, 2264 QED_RSS_IND_TABLE_SIZE * sizeof(void *)); 2265 rss->rss_table_size_log = 7; 2266 return 0; 2267 } 2268 2269 /* Start by copying the non-spcific information to the 2nd copy */ 2270 memcpy(&rss[1], &rss[0], sizeof(struct qed_rss_params)); 2271 2272 /* CMT should be round-robin */ 2273 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { 2274 struct qed_queue_cid *cid = input->rss_ind_table[i]; 2275 struct qed_rss_params *t_rss; 2276 2277 if (cid->p_owner == QED_LEADING_HWFN(cdev)) 2278 t_rss = &rss[0]; 2279 else 2280 t_rss = &rss[1]; 2281 2282 t_rss->rss_ind_table[i / cdev->num_hwfns] = cid; 2283 } 2284 2285 /* Make sure RSS is actually required */ 2286 for_each_hwfn(cdev, fn) { 2287 for (i = 1; i < QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns; i++) { 2288 if (rss[fn].rss_ind_table[i] != 2289 rss[fn].rss_ind_table[0]) 2290 break; 2291 } 2292 if (i == QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns) { 2293 DP_VERBOSE(cdev, NETIF_MSG_IFUP, 2294 "CMT - 1 queue per-hwfn; Disabling RSS\n"); 2295 return -EINVAL; 2296 } 2297 rss[fn].rss_table_size_log = 6; 2298 } 2299 2300 return 0; 2301 } 2302 2303 static int qed_update_vport(struct qed_dev *cdev, 2304 struct qed_update_vport_params *params) 2305 { 2306 struct qed_sp_vport_update_params sp_params; 2307 struct qed_rss_params *rss; 2308 int rc = 0, i; 2309 2310 if (!cdev) 2311 return -ENODEV; 2312 2313 rss = vzalloc(sizeof(*rss) * cdev->num_hwfns); 2314 if (!rss) 2315 return -ENOMEM; 2316 2317 memset(&sp_params, 0, sizeof(sp_params)); 2318 2319 /* Translate protocol params into sp params */ 2320 sp_params.vport_id = params->vport_id; 2321 sp_params.update_vport_active_rx_flg = params->update_vport_active_flg; 2322 sp_params.update_vport_active_tx_flg = params->update_vport_active_flg; 2323 sp_params.vport_active_rx_flg = params->vport_active_flg; 2324 sp_params.vport_active_tx_flg = params->vport_active_flg; 2325 sp_params.update_tx_switching_flg = params->update_tx_switching_flg; 2326 sp_params.tx_switching_flg = params->tx_switching_flg; 2327 sp_params.accept_any_vlan = params->accept_any_vlan; 2328 sp_params.update_accept_any_vlan_flg = 2329 params->update_accept_any_vlan_flg; 2330 2331 /* Prepare the RSS configuration */ 2332 if (params->update_rss_flg) 2333 if (qed_update_vport_rss(cdev, ¶ms->rss_params, rss)) 2334 params->update_rss_flg = 0; 2335 2336 for_each_hwfn(cdev, i) { 2337 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 2338 2339 if (params->update_rss_flg) 2340 sp_params.rss_params = &rss[i]; 2341 2342 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 2343 rc = qed_sp_vport_update(p_hwfn, &sp_params, 2344 QED_SPQ_MODE_EBLOCK, 2345 NULL); 2346 if (rc) { 2347 DP_ERR(cdev, "Failed to update VPORT\n"); 2348 goto out; 2349 } 2350 2351 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), 2352 "Updated V-PORT %d: active_flag %d [update %d]\n", 2353 params->vport_id, params->vport_active_flg, 2354 params->update_vport_active_flg); 2355 } 2356 2357 out: 2358 vfree(rss); 2359 return rc; 2360 } 2361 2362 static int qed_start_rxq(struct qed_dev *cdev, 2363 u8 rss_num, 2364 struct qed_queue_start_common_params *p_params, 2365 u16 bd_max_bytes, 2366 dma_addr_t bd_chain_phys_addr, 2367 dma_addr_t cqe_pbl_addr, 2368 u16 cqe_pbl_size, 2369 struct qed_rxq_start_ret_params *ret_params) 2370 { 2371 struct qed_hwfn *p_hwfn; 2372 int rc, hwfn_index; 2373 2374 hwfn_index = rss_num % cdev->num_hwfns; 2375 p_hwfn = &cdev->hwfns[hwfn_index]; 2376 2377 p_params->queue_id = p_params->queue_id / cdev->num_hwfns; 2378 p_params->stats_id = p_params->vport_id; 2379 2380 rc = qed_eth_rx_queue_start(p_hwfn, 2381 p_hwfn->hw_info.opaque_fid, 2382 p_params, 2383 bd_max_bytes, 2384 bd_chain_phys_addr, 2385 cqe_pbl_addr, cqe_pbl_size, ret_params); 2386 if (rc) { 2387 DP_ERR(cdev, "Failed to start RXQ#%d\n", p_params->queue_id); 2388 return rc; 2389 } 2390 2391 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), 2392 "Started RX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n", 2393 p_params->queue_id, rss_num, p_params->vport_id, 2394 p_params->p_sb->igu_sb_id); 2395 2396 return 0; 2397 } 2398 2399 static int qed_stop_rxq(struct qed_dev *cdev, u8 rss_id, void *handle) 2400 { 2401 int rc, hwfn_index; 2402 struct qed_hwfn *p_hwfn; 2403 2404 hwfn_index = rss_id % cdev->num_hwfns; 2405 p_hwfn = &cdev->hwfns[hwfn_index]; 2406 2407 rc = qed_eth_rx_queue_stop(p_hwfn, handle, false, false); 2408 if (rc) { 2409 DP_ERR(cdev, "Failed to stop RXQ#%02x\n", rss_id); 2410 return rc; 2411 } 2412 2413 return 0; 2414 } 2415 2416 static int qed_start_txq(struct qed_dev *cdev, 2417 u8 rss_num, 2418 struct qed_queue_start_common_params *p_params, 2419 dma_addr_t pbl_addr, 2420 u16 pbl_size, 2421 struct qed_txq_start_ret_params *ret_params) 2422 { 2423 struct qed_hwfn *p_hwfn; 2424 int rc, hwfn_index; 2425 2426 hwfn_index = rss_num % cdev->num_hwfns; 2427 p_hwfn = &cdev->hwfns[hwfn_index]; 2428 p_params->queue_id = p_params->queue_id / cdev->num_hwfns; 2429 p_params->stats_id = p_params->vport_id; 2430 2431 rc = qed_eth_tx_queue_start(p_hwfn, 2432 p_hwfn->hw_info.opaque_fid, 2433 p_params, 0, 2434 pbl_addr, pbl_size, ret_params); 2435 2436 if (rc) { 2437 DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id); 2438 return rc; 2439 } 2440 2441 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), 2442 "Started TX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n", 2443 p_params->queue_id, rss_num, p_params->vport_id, 2444 p_params->p_sb->igu_sb_id); 2445 2446 return 0; 2447 } 2448 2449 #define QED_HW_STOP_RETRY_LIMIT (10) 2450 static int qed_fastpath_stop(struct qed_dev *cdev) 2451 { 2452 int rc; 2453 2454 rc = qed_hw_stop_fastpath(cdev); 2455 if (rc) { 2456 DP_ERR(cdev, "Failed to stop Fastpath\n"); 2457 return rc; 2458 } 2459 2460 return 0; 2461 } 2462 2463 static int qed_stop_txq(struct qed_dev *cdev, u8 rss_id, void *handle) 2464 { 2465 struct qed_hwfn *p_hwfn; 2466 int rc, hwfn_index; 2467 2468 hwfn_index = rss_id % cdev->num_hwfns; 2469 p_hwfn = &cdev->hwfns[hwfn_index]; 2470 2471 rc = qed_eth_tx_queue_stop(p_hwfn, handle); 2472 if (rc) { 2473 DP_ERR(cdev, "Failed to stop TXQ#%02x\n", rss_id); 2474 return rc; 2475 } 2476 2477 return 0; 2478 } 2479 2480 static int qed_tunn_configure(struct qed_dev *cdev, 2481 struct qed_tunn_params *tunn_params) 2482 { 2483 struct qed_tunnel_info tunn_info; 2484 int i, rc; 2485 2486 memset(&tunn_info, 0, sizeof(tunn_info)); 2487 if (tunn_params->update_vxlan_port) { 2488 tunn_info.vxlan_port.b_update_port = true; 2489 tunn_info.vxlan_port.port = tunn_params->vxlan_port; 2490 } 2491 2492 if (tunn_params->update_geneve_port) { 2493 tunn_info.geneve_port.b_update_port = true; 2494 tunn_info.geneve_port.port = tunn_params->geneve_port; 2495 } 2496 2497 for_each_hwfn(cdev, i) { 2498 struct qed_hwfn *hwfn = &cdev->hwfns[i]; 2499 struct qed_ptt *p_ptt; 2500 struct qed_tunnel_info *tun; 2501 2502 tun = &hwfn->cdev->tunnel; 2503 if (IS_PF(cdev)) { 2504 p_ptt = qed_ptt_acquire(hwfn); 2505 if (!p_ptt) 2506 return -EAGAIN; 2507 } else { 2508 p_ptt = NULL; 2509 } 2510 2511 rc = qed_sp_pf_update_tunn_cfg(hwfn, p_ptt, &tunn_info, 2512 QED_SPQ_MODE_EBLOCK, NULL); 2513 if (rc) { 2514 if (IS_PF(cdev)) 2515 qed_ptt_release(hwfn, p_ptt); 2516 return rc; 2517 } 2518 2519 if (IS_PF_SRIOV(hwfn)) { 2520 u16 vxlan_port, geneve_port; 2521 int j; 2522 2523 vxlan_port = tun->vxlan_port.port; 2524 geneve_port = tun->geneve_port.port; 2525 2526 qed_for_each_vf(hwfn, j) { 2527 qed_iov_bulletin_set_udp_ports(hwfn, j, 2528 vxlan_port, 2529 geneve_port); 2530 } 2531 2532 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 2533 } 2534 if (IS_PF(cdev)) 2535 qed_ptt_release(hwfn, p_ptt); 2536 } 2537 2538 return 0; 2539 } 2540 2541 static int qed_configure_filter_rx_mode(struct qed_dev *cdev, 2542 enum qed_filter_rx_mode_type type) 2543 { 2544 struct qed_filter_accept_flags accept_flags; 2545 2546 memset(&accept_flags, 0, sizeof(accept_flags)); 2547 2548 accept_flags.update_rx_mode_config = 1; 2549 accept_flags.update_tx_mode_config = 1; 2550 accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED | 2551 QED_ACCEPT_MCAST_MATCHED | 2552 QED_ACCEPT_BCAST; 2553 accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED | 2554 QED_ACCEPT_MCAST_MATCHED | 2555 QED_ACCEPT_BCAST; 2556 2557 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) { 2558 accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED | 2559 QED_ACCEPT_MCAST_UNMATCHED; 2560 accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; 2561 } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) { 2562 accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; 2563 accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; 2564 } 2565 2566 return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false, 2567 QED_SPQ_MODE_CB, NULL); 2568 } 2569 2570 static int qed_configure_filter_ucast(struct qed_dev *cdev, 2571 struct qed_filter_ucast_params *params) 2572 { 2573 struct qed_filter_ucast ucast; 2574 2575 if (!params->vlan_valid && !params->mac_valid) { 2576 DP_NOTICE(cdev, 2577 "Tried configuring a unicast filter, but both MAC and VLAN are not set\n"); 2578 return -EINVAL; 2579 } 2580 2581 memset(&ucast, 0, sizeof(ucast)); 2582 switch (params->type) { 2583 case QED_FILTER_XCAST_TYPE_ADD: 2584 ucast.opcode = QED_FILTER_ADD; 2585 break; 2586 case QED_FILTER_XCAST_TYPE_DEL: 2587 ucast.opcode = QED_FILTER_REMOVE; 2588 break; 2589 case QED_FILTER_XCAST_TYPE_REPLACE: 2590 ucast.opcode = QED_FILTER_REPLACE; 2591 break; 2592 default: 2593 DP_NOTICE(cdev, "Unknown unicast filter type %d\n", 2594 params->type); 2595 } 2596 2597 if (params->vlan_valid && params->mac_valid) { 2598 ucast.type = QED_FILTER_MAC_VLAN; 2599 ether_addr_copy(ucast.mac, params->mac); 2600 ucast.vlan = params->vlan; 2601 } else if (params->mac_valid) { 2602 ucast.type = QED_FILTER_MAC; 2603 ether_addr_copy(ucast.mac, params->mac); 2604 } else { 2605 ucast.type = QED_FILTER_VLAN; 2606 ucast.vlan = params->vlan; 2607 } 2608 2609 ucast.is_rx_filter = true; 2610 ucast.is_tx_filter = true; 2611 2612 return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL); 2613 } 2614 2615 static int qed_configure_filter_mcast(struct qed_dev *cdev, 2616 struct qed_filter_mcast_params *params) 2617 { 2618 struct qed_filter_mcast mcast; 2619 int i; 2620 2621 memset(&mcast, 0, sizeof(mcast)); 2622 switch (params->type) { 2623 case QED_FILTER_XCAST_TYPE_ADD: 2624 mcast.opcode = QED_FILTER_ADD; 2625 break; 2626 case QED_FILTER_XCAST_TYPE_DEL: 2627 mcast.opcode = QED_FILTER_REMOVE; 2628 break; 2629 default: 2630 DP_NOTICE(cdev, "Unknown multicast filter type %d\n", 2631 params->type); 2632 } 2633 2634 mcast.num_mc_addrs = params->num; 2635 for (i = 0; i < mcast.num_mc_addrs; i++) 2636 ether_addr_copy(mcast.mac[i], params->mac[i]); 2637 2638 return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL); 2639 } 2640 2641 static int qed_configure_filter(struct qed_dev *cdev, 2642 struct qed_filter_params *params) 2643 { 2644 enum qed_filter_rx_mode_type accept_flags; 2645 2646 switch (params->type) { 2647 case QED_FILTER_TYPE_UCAST: 2648 return qed_configure_filter_ucast(cdev, ¶ms->filter.ucast); 2649 case QED_FILTER_TYPE_MCAST: 2650 return qed_configure_filter_mcast(cdev, ¶ms->filter.mcast); 2651 case QED_FILTER_TYPE_RX_MODE: 2652 accept_flags = params->filter.accept_flags; 2653 return qed_configure_filter_rx_mode(cdev, accept_flags); 2654 default: 2655 DP_NOTICE(cdev, "Unknown filter type %d\n", (int)params->type); 2656 return -EINVAL; 2657 } 2658 } 2659 2660 static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher) 2661 { 2662 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 2663 struct qed_arfs_config_params arfs_config_params; 2664 2665 memset(&arfs_config_params, 0, sizeof(arfs_config_params)); 2666 arfs_config_params.tcp = true; 2667 arfs_config_params.udp = true; 2668 arfs_config_params.ipv4 = true; 2669 arfs_config_params.ipv6 = true; 2670 arfs_config_params.arfs_enable = en_searcher; 2671 2672 qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt, 2673 &arfs_config_params); 2674 return 0; 2675 } 2676 2677 static void 2678 qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn, 2679 void *cookie, union event_ring_data *data, 2680 u8 fw_return_code) 2681 { 2682 struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common; 2683 void *dev = p_hwfn->cdev->ops_cookie; 2684 2685 op->arfs_filter_op(dev, cookie, fw_return_code); 2686 } 2687 2688 static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie, 2689 dma_addr_t mapping, u16 length, 2690 u16 vport_id, u16 rx_queue_id, 2691 bool add_filter) 2692 { 2693 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 2694 struct qed_spq_comp_cb cb; 2695 int rc = -EINVAL; 2696 2697 cb.function = qed_arfs_sp_response_handler; 2698 cb.cookie = cookie; 2699 2700 rc = qed_configure_rfs_ntuple_filter(p_hwfn, p_hwfn->p_arfs_ptt, 2701 &cb, mapping, length, rx_queue_id, 2702 vport_id, add_filter); 2703 if (rc) 2704 DP_NOTICE(p_hwfn, 2705 "Failed to issue a-RFS filter configuration\n"); 2706 else 2707 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, 2708 "Successfully issued a-RFS filter configuration\n"); 2709 2710 return rc; 2711 } 2712 2713 static int qed_fp_cqe_completion(struct qed_dev *dev, 2714 u8 rss_id, struct eth_slow_path_rx_cqe *cqe) 2715 { 2716 return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns], 2717 cqe); 2718 } 2719 2720 #ifdef CONFIG_QED_SRIOV 2721 extern const struct qed_iov_hv_ops qed_iov_ops_pass; 2722 #endif 2723 2724 #ifdef CONFIG_DCB 2725 extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass; 2726 #endif 2727 2728 extern const struct qed_eth_ptp_ops qed_ptp_ops_pass; 2729 2730 static const struct qed_eth_ops qed_eth_ops_pass = { 2731 .common = &qed_common_ops_pass, 2732 #ifdef CONFIG_QED_SRIOV 2733 .iov = &qed_iov_ops_pass, 2734 #endif 2735 #ifdef CONFIG_DCB 2736 .dcb = &qed_dcbnl_ops_pass, 2737 #endif 2738 .ptp = &qed_ptp_ops_pass, 2739 .fill_dev_info = &qed_fill_eth_dev_info, 2740 .register_ops = &qed_register_eth_ops, 2741 .check_mac = &qed_check_mac, 2742 .vport_start = &qed_start_vport, 2743 .vport_stop = &qed_stop_vport, 2744 .vport_update = &qed_update_vport, 2745 .q_rx_start = &qed_start_rxq, 2746 .q_rx_stop = &qed_stop_rxq, 2747 .q_tx_start = &qed_start_txq, 2748 .q_tx_stop = &qed_stop_txq, 2749 .filter_config = &qed_configure_filter, 2750 .fastpath_stop = &qed_fastpath_stop, 2751 .eth_cqe_completion = &qed_fp_cqe_completion, 2752 .get_vport_stats = &qed_get_vport_stats, 2753 .tunn_config = &qed_tunn_configure, 2754 .ntuple_filter_config = &qed_ntuple_arfs_filter_config, 2755 .configure_arfs_searcher = &qed_configure_arfs_searcher, 2756 }; 2757 2758 const struct qed_eth_ops *qed_get_eth_ops(void) 2759 { 2760 return &qed_eth_ops_pass; 2761 } 2762 EXPORT_SYMBOL(qed_get_eth_ops); 2763 2764 void qed_put_eth_ops(void) 2765 { 2766 /* TODO - reference count for module? */ 2767 } 2768 EXPORT_SYMBOL(qed_put_eth_ops); 2769