1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015 QLogic Corporation 3 * 4 * This software is available under the terms of the GNU General Public License 5 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * this source tree. 7 */ 8 9 #include <linux/types.h> 10 #include <asm/byteorder.h> 11 #include <asm/param.h> 12 #include <linux/delay.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/etherdevice.h> 15 #include <linux/interrupt.h> 16 #include <linux/kernel.h> 17 #include <linux/module.h> 18 #include <linux/pci.h> 19 #include <linux/slab.h> 20 #include <linux/stddef.h> 21 #include <linux/string.h> 22 #include <linux/version.h> 23 #include <linux/workqueue.h> 24 #include <linux/bitops.h> 25 #include <linux/bug.h> 26 #include "qed.h" 27 #include <linux/qed/qed_chain.h> 28 #include "qed_cxt.h" 29 #include "qed_dev_api.h" 30 #include <linux/qed/qed_eth_if.h> 31 #include "qed_hsi.h" 32 #include "qed_hw.h" 33 #include "qed_int.h" 34 #include "qed_l2.h" 35 #include "qed_mcp.h" 36 #include "qed_reg_addr.h" 37 #include "qed_sp.h" 38 #include "qed_sriov.h" 39 40 41 #define QED_MAX_SGES_NUM 16 42 #define CRC32_POLY 0x1edc6f41 43 44 int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, 45 struct qed_sp_vport_start_params *p_params) 46 { 47 struct vport_start_ramrod_data *p_ramrod = NULL; 48 struct qed_spq_entry *p_ent = NULL; 49 struct qed_sp_init_data init_data; 50 u8 abs_vport_id = 0; 51 int rc = -EINVAL; 52 u16 rx_mode = 0; 53 54 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); 55 if (rc) 56 return rc; 57 58 memset(&init_data, 0, sizeof(init_data)); 59 init_data.cid = qed_spq_get_cid(p_hwfn); 60 init_data.opaque_fid = p_params->opaque_fid; 61 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 62 63 rc = qed_sp_init_request(p_hwfn, &p_ent, 64 ETH_RAMROD_VPORT_START, 65 PROTOCOLID_ETH, &init_data); 66 if (rc) 67 return rc; 68 69 p_ramrod = &p_ent->ramrod.vport_start; 70 p_ramrod->vport_id = abs_vport_id; 71 72 p_ramrod->mtu = cpu_to_le16(p_params->mtu); 73 p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan; 74 p_ramrod->drop_ttl0_en = p_params->drop_ttl0; 75 p_ramrod->untagged = p_params->only_untagged; 76 77 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1); 78 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1); 79 80 p_ramrod->rx_mode.state = cpu_to_le16(rx_mode); 81 82 /* TPA related fields */ 83 memset(&p_ramrod->tpa_param, 0, sizeof(struct eth_vport_tpa_param)); 84 85 p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe; 86 87 switch (p_params->tpa_mode) { 88 case QED_TPA_MODE_GRO: 89 p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; 90 p_ramrod->tpa_param.tpa_max_size = (u16)-1; 91 p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2; 92 p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2; 93 p_ramrod->tpa_param.tpa_ipv4_en_flg = 1; 94 p_ramrod->tpa_param.tpa_ipv6_en_flg = 1; 95 p_ramrod->tpa_param.tpa_pkt_split_flg = 1; 96 p_ramrod->tpa_param.tpa_gro_consistent_flg = 1; 97 break; 98 default: 99 break; 100 } 101 102 p_ramrod->tx_switching_en = p_params->tx_switching; 103 104 p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac; 105 p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype; 106 107 /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */ 108 p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev, 109 p_params->concrete_fid); 110 111 return qed_spq_post(p_hwfn, p_ent, NULL); 112 } 113 114 static int qed_sp_vport_start(struct qed_hwfn *p_hwfn, 115 struct qed_sp_vport_start_params *p_params) 116 { 117 if (IS_VF(p_hwfn->cdev)) { 118 return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id, 119 p_params->mtu, 120 p_params->remove_inner_vlan, 121 p_params->tpa_mode, 122 p_params->max_buffers_per_cqe, 123 p_params->only_untagged); 124 } 125 126 return qed_sp_eth_vport_start(p_hwfn, p_params); 127 } 128 129 static int 130 qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn, 131 struct vport_update_ramrod_data *p_ramrod, 132 struct qed_rss_params *p_params) 133 { 134 struct eth_vport_rss_config *rss = &p_ramrod->rss_config; 135 u16 abs_l2_queue = 0, capabilities = 0; 136 int rc = 0, i; 137 138 if (!p_params) { 139 p_ramrod->common.update_rss_flg = 0; 140 return rc; 141 } 142 143 BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE != 144 ETH_RSS_IND_TABLE_ENTRIES_NUM); 145 146 rc = qed_fw_rss_eng(p_hwfn, p_params->rss_eng_id, &rss->rss_id); 147 if (rc) 148 return rc; 149 150 p_ramrod->common.update_rss_flg = p_params->update_rss_config; 151 rss->update_rss_capabilities = p_params->update_rss_capabilities; 152 rss->update_rss_ind_table = p_params->update_rss_ind_table; 153 rss->update_rss_key = p_params->update_rss_key; 154 155 rss->rss_mode = p_params->rss_enable ? 156 ETH_VPORT_RSS_MODE_REGULAR : 157 ETH_VPORT_RSS_MODE_DISABLED; 158 159 SET_FIELD(capabilities, 160 ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY, 161 !!(p_params->rss_caps & QED_RSS_IPV4)); 162 SET_FIELD(capabilities, 163 ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY, 164 !!(p_params->rss_caps & QED_RSS_IPV6)); 165 SET_FIELD(capabilities, 166 ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY, 167 !!(p_params->rss_caps & QED_RSS_IPV4_TCP)); 168 SET_FIELD(capabilities, 169 ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY, 170 !!(p_params->rss_caps & QED_RSS_IPV6_TCP)); 171 SET_FIELD(capabilities, 172 ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY, 173 !!(p_params->rss_caps & QED_RSS_IPV4_UDP)); 174 SET_FIELD(capabilities, 175 ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY, 176 !!(p_params->rss_caps & QED_RSS_IPV6_UDP)); 177 rss->tbl_size = p_params->rss_table_size_log; 178 179 rss->capabilities = cpu_to_le16(capabilities); 180 181 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, 182 "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n", 183 p_ramrod->common.update_rss_flg, 184 rss->rss_mode, rss->update_rss_capabilities, 185 capabilities, rss->update_rss_ind_table, 186 rss->update_rss_key); 187 188 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { 189 rc = qed_fw_l2_queue(p_hwfn, 190 (u8)p_params->rss_ind_table[i], 191 &abs_l2_queue); 192 if (rc) 193 return rc; 194 195 rss->indirection_table[i] = cpu_to_le16(abs_l2_queue); 196 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "i= %d, queue = %d\n", 197 i, rss->indirection_table[i]); 198 } 199 200 for (i = 0; i < 10; i++) 201 rss->rss_key[i] = cpu_to_le32(p_params->rss_key[i]); 202 203 return rc; 204 } 205 206 static void 207 qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn, 208 struct vport_update_ramrod_data *p_ramrod, 209 struct qed_filter_accept_flags accept_flags) 210 { 211 p_ramrod->common.update_rx_mode_flg = 212 accept_flags.update_rx_mode_config; 213 214 p_ramrod->common.update_tx_mode_flg = 215 accept_flags.update_tx_mode_config; 216 217 /* Set Rx mode accept flags */ 218 if (p_ramrod->common.update_rx_mode_flg) { 219 u8 accept_filter = accept_flags.rx_accept_filter; 220 u16 state = 0; 221 222 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 223 !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) || 224 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED))); 225 226 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED, 227 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)); 228 229 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 230 !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) || 231 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); 232 233 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL, 234 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) && 235 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); 236 237 SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL, 238 !!(accept_filter & QED_ACCEPT_BCAST)); 239 240 p_ramrod->rx_mode.state = cpu_to_le16(state); 241 DP_VERBOSE(p_hwfn, QED_MSG_SP, 242 "p_ramrod->rx_mode.state = 0x%x\n", state); 243 } 244 245 /* Set Tx mode accept flags */ 246 if (p_ramrod->common.update_tx_mode_flg) { 247 u8 accept_filter = accept_flags.tx_accept_filter; 248 u16 state = 0; 249 250 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL, 251 !!(accept_filter & QED_ACCEPT_NONE)); 252 253 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL, 254 !!(accept_filter & QED_ACCEPT_NONE)); 255 256 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL, 257 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) && 258 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); 259 260 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL, 261 !!(accept_filter & QED_ACCEPT_BCAST)); 262 263 p_ramrod->tx_mode.state = cpu_to_le16(state); 264 DP_VERBOSE(p_hwfn, QED_MSG_SP, 265 "p_ramrod->tx_mode.state = 0x%x\n", state); 266 } 267 } 268 269 static void 270 qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn, 271 struct vport_update_ramrod_data *p_ramrod, 272 struct qed_sge_tpa_params *p_params) 273 { 274 struct eth_vport_tpa_param *p_tpa; 275 276 if (!p_params) { 277 p_ramrod->common.update_tpa_param_flg = 0; 278 p_ramrod->common.update_tpa_en_flg = 0; 279 p_ramrod->common.update_tpa_param_flg = 0; 280 return; 281 } 282 283 p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg; 284 p_tpa = &p_ramrod->tpa_param; 285 p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg; 286 p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg; 287 p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg; 288 p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg; 289 290 p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg; 291 p_tpa->max_buff_num = p_params->max_buffers_per_cqe; 292 p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg; 293 p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg; 294 p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg; 295 p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num; 296 p_tpa->tpa_max_size = p_params->tpa_max_size; 297 p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start; 298 p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont; 299 } 300 301 static void 302 qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn, 303 struct vport_update_ramrod_data *p_ramrod, 304 struct qed_sp_vport_update_params *p_params) 305 { 306 int i; 307 308 memset(&p_ramrod->approx_mcast.bins, 0, 309 sizeof(p_ramrod->approx_mcast.bins)); 310 311 if (!p_params->update_approx_mcast_flg) 312 return; 313 314 p_ramrod->common.update_approx_mcast_flg = 1; 315 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { 316 u32 *p_bins = (u32 *)p_params->bins; 317 318 p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]); 319 } 320 } 321 322 int qed_sp_vport_update(struct qed_hwfn *p_hwfn, 323 struct qed_sp_vport_update_params *p_params, 324 enum spq_mode comp_mode, 325 struct qed_spq_comp_cb *p_comp_data) 326 { 327 struct qed_rss_params *p_rss_params = p_params->rss_params; 328 struct vport_update_ramrod_data_cmn *p_cmn; 329 struct qed_sp_init_data init_data; 330 struct vport_update_ramrod_data *p_ramrod = NULL; 331 struct qed_spq_entry *p_ent = NULL; 332 u8 abs_vport_id = 0, val; 333 int rc = -EINVAL; 334 335 if (IS_VF(p_hwfn->cdev)) { 336 rc = qed_vf_pf_vport_update(p_hwfn, p_params); 337 return rc; 338 } 339 340 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); 341 if (rc) 342 return rc; 343 344 memset(&init_data, 0, sizeof(init_data)); 345 init_data.cid = qed_spq_get_cid(p_hwfn); 346 init_data.opaque_fid = p_params->opaque_fid; 347 init_data.comp_mode = comp_mode; 348 init_data.p_comp_data = p_comp_data; 349 350 rc = qed_sp_init_request(p_hwfn, &p_ent, 351 ETH_RAMROD_VPORT_UPDATE, 352 PROTOCOLID_ETH, &init_data); 353 if (rc) 354 return rc; 355 356 /* Copy input params to ramrod according to FW struct */ 357 p_ramrod = &p_ent->ramrod.vport_update; 358 p_cmn = &p_ramrod->common; 359 360 p_cmn->vport_id = abs_vport_id; 361 p_cmn->rx_active_flg = p_params->vport_active_rx_flg; 362 p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg; 363 p_cmn->tx_active_flg = p_params->vport_active_tx_flg; 364 p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg; 365 p_cmn->accept_any_vlan = p_params->accept_any_vlan; 366 val = p_params->update_accept_any_vlan_flg; 367 p_cmn->update_accept_any_vlan_flg = val; 368 369 p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg; 370 val = p_params->update_inner_vlan_removal_flg; 371 p_cmn->update_inner_vlan_removal_en_flg = val; 372 373 p_cmn->default_vlan_en = p_params->default_vlan_enable_flg; 374 val = p_params->update_default_vlan_enable_flg; 375 p_cmn->update_default_vlan_en_flg = val; 376 377 p_cmn->default_vlan = cpu_to_le16(p_params->default_vlan); 378 p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg; 379 380 p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg; 381 382 p_ramrod->common.tx_switching_en = p_params->tx_switching_flg; 383 p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg; 384 385 p_cmn->anti_spoofing_en = p_params->anti_spoofing_en; 386 val = p_params->update_anti_spoofing_en_flg; 387 p_ramrod->common.update_anti_spoofing_en_flg = val; 388 389 rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params); 390 if (rc) { 391 /* Return spq entry which is taken in qed_sp_init_request()*/ 392 qed_spq_return_entry(p_hwfn, p_ent); 393 return rc; 394 } 395 396 /* Update mcast bins for VFs, PF doesn't use this functionality */ 397 qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params); 398 399 qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags); 400 qed_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, p_params->sge_tpa_params); 401 return qed_spq_post(p_hwfn, p_ent, NULL); 402 } 403 404 int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id) 405 { 406 struct vport_stop_ramrod_data *p_ramrod; 407 struct qed_sp_init_data init_data; 408 struct qed_spq_entry *p_ent; 409 u8 abs_vport_id = 0; 410 int rc; 411 412 if (IS_VF(p_hwfn->cdev)) 413 return qed_vf_pf_vport_stop(p_hwfn); 414 415 rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id); 416 if (rc) 417 return rc; 418 419 memset(&init_data, 0, sizeof(init_data)); 420 init_data.cid = qed_spq_get_cid(p_hwfn); 421 init_data.opaque_fid = opaque_fid; 422 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 423 424 rc = qed_sp_init_request(p_hwfn, &p_ent, 425 ETH_RAMROD_VPORT_STOP, 426 PROTOCOLID_ETH, &init_data); 427 if (rc) 428 return rc; 429 430 p_ramrod = &p_ent->ramrod.vport_stop; 431 p_ramrod->vport_id = abs_vport_id; 432 433 return qed_spq_post(p_hwfn, p_ent, NULL); 434 } 435 436 static int 437 qed_vf_pf_accept_flags(struct qed_hwfn *p_hwfn, 438 struct qed_filter_accept_flags *p_accept_flags) 439 { 440 struct qed_sp_vport_update_params s_params; 441 442 memset(&s_params, 0, sizeof(s_params)); 443 memcpy(&s_params.accept_flags, p_accept_flags, 444 sizeof(struct qed_filter_accept_flags)); 445 446 return qed_vf_pf_vport_update(p_hwfn, &s_params); 447 } 448 449 static int qed_filter_accept_cmd(struct qed_dev *cdev, 450 u8 vport, 451 struct qed_filter_accept_flags accept_flags, 452 u8 update_accept_any_vlan, 453 u8 accept_any_vlan, 454 enum spq_mode comp_mode, 455 struct qed_spq_comp_cb *p_comp_data) 456 { 457 struct qed_sp_vport_update_params vport_update_params; 458 int i, rc; 459 460 /* Prepare and send the vport rx_mode change */ 461 memset(&vport_update_params, 0, sizeof(vport_update_params)); 462 vport_update_params.vport_id = vport; 463 vport_update_params.accept_flags = accept_flags; 464 vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan; 465 vport_update_params.accept_any_vlan = accept_any_vlan; 466 467 for_each_hwfn(cdev, i) { 468 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 469 470 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 471 472 if (IS_VF(cdev)) { 473 rc = qed_vf_pf_accept_flags(p_hwfn, &accept_flags); 474 if (rc) 475 return rc; 476 continue; 477 } 478 479 rc = qed_sp_vport_update(p_hwfn, &vport_update_params, 480 comp_mode, p_comp_data); 481 if (rc) { 482 DP_ERR(cdev, "Update rx_mode failed %d\n", rc); 483 return rc; 484 } 485 486 DP_VERBOSE(p_hwfn, QED_MSG_SP, 487 "Accept filter configured, flags = [Rx]%x [Tx]%x\n", 488 accept_flags.rx_accept_filter, 489 accept_flags.tx_accept_filter); 490 if (update_accept_any_vlan) 491 DP_VERBOSE(p_hwfn, QED_MSG_SP, 492 "accept_any_vlan=%d configured\n", 493 accept_any_vlan); 494 } 495 496 return 0; 497 } 498 499 static int qed_sp_release_queue_cid( 500 struct qed_hwfn *p_hwfn, 501 struct qed_hw_cid_data *p_cid_data) 502 { 503 if (!p_cid_data->b_cid_allocated) 504 return 0; 505 506 qed_cxt_release_cid(p_hwfn, p_cid_data->cid); 507 508 p_cid_data->b_cid_allocated = false; 509 510 return 0; 511 } 512 513 int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, 514 u16 opaque_fid, 515 u32 cid, 516 struct qed_queue_start_common_params *p_params, 517 u8 stats_id, 518 u16 bd_max_bytes, 519 dma_addr_t bd_chain_phys_addr, 520 dma_addr_t cqe_pbl_addr, 521 u16 cqe_pbl_size, bool b_use_zone_a_prod) 522 { 523 struct rx_queue_start_ramrod_data *p_ramrod = NULL; 524 struct qed_spq_entry *p_ent = NULL; 525 struct qed_sp_init_data init_data; 526 struct qed_hw_cid_data *p_rx_cid; 527 u16 abs_rx_q_id = 0; 528 u8 abs_vport_id = 0; 529 int rc = -EINVAL; 530 531 /* Store information for the stop */ 532 p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id]; 533 p_rx_cid->cid = cid; 534 p_rx_cid->opaque_fid = opaque_fid; 535 p_rx_cid->vport_id = p_params->vport_id; 536 537 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); 538 if (rc) 539 return rc; 540 541 rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_rx_q_id); 542 if (rc) 543 return rc; 544 545 DP_VERBOSE(p_hwfn, QED_MSG_SP, 546 "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n", 547 opaque_fid, 548 cid, p_params->queue_id, p_params->vport_id, p_params->sb); 549 550 /* Get SPQ entry */ 551 memset(&init_data, 0, sizeof(init_data)); 552 init_data.cid = cid; 553 init_data.opaque_fid = opaque_fid; 554 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 555 556 rc = qed_sp_init_request(p_hwfn, &p_ent, 557 ETH_RAMROD_RX_QUEUE_START, 558 PROTOCOLID_ETH, &init_data); 559 if (rc) 560 return rc; 561 562 p_ramrod = &p_ent->ramrod.rx_queue_start; 563 564 p_ramrod->sb_id = cpu_to_le16(p_params->sb); 565 p_ramrod->sb_index = p_params->sb_idx; 566 p_ramrod->vport_id = abs_vport_id; 567 p_ramrod->stats_counter_id = stats_id; 568 p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id); 569 p_ramrod->complete_cqe_flg = 0; 570 p_ramrod->complete_event_flg = 1; 571 572 p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes); 573 DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr); 574 575 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); 576 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr); 577 578 if (p_params->vf_qid || b_use_zone_a_prod) { 579 p_ramrod->vf_rx_prod_index = p_params->vf_qid; 580 DP_VERBOSE(p_hwfn, QED_MSG_SP, 581 "Queue%s is meant for VF rxq[%02x]\n", 582 b_use_zone_a_prod ? " [legacy]" : "", 583 p_params->vf_qid); 584 p_ramrod->vf_rx_prod_use_zone_a = b_use_zone_a_prod; 585 } 586 587 return qed_spq_post(p_hwfn, p_ent, NULL); 588 } 589 590 static int 591 qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn, 592 u16 opaque_fid, 593 struct qed_queue_start_common_params *p_params, 594 u16 bd_max_bytes, 595 dma_addr_t bd_chain_phys_addr, 596 dma_addr_t cqe_pbl_addr, 597 u16 cqe_pbl_size, void __iomem **pp_prod) 598 { 599 struct qed_hw_cid_data *p_rx_cid; 600 u32 init_prod_val = 0; 601 u16 abs_l2_queue = 0; 602 u8 abs_stats_id = 0; 603 int rc; 604 605 if (IS_VF(p_hwfn->cdev)) { 606 return qed_vf_pf_rxq_start(p_hwfn, 607 p_params->queue_id, 608 p_params->sb, 609 (u8)p_params->sb_idx, 610 bd_max_bytes, 611 bd_chain_phys_addr, 612 cqe_pbl_addr, cqe_pbl_size, pp_prod); 613 } 614 615 rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_l2_queue); 616 if (rc) 617 return rc; 618 619 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id); 620 if (rc) 621 return rc; 622 623 *pp_prod = (u8 __iomem *)p_hwfn->regview + 624 GTT_BAR0_MAP_REG_MSDM_RAM + 625 MSTORM_ETH_PF_PRODS_OFFSET(abs_l2_queue); 626 627 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ 628 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), 629 (u32 *)(&init_prod_val)); 630 631 /* Allocate a CID for the queue */ 632 p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id]; 633 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_rx_cid->cid); 634 if (rc) { 635 DP_NOTICE(p_hwfn, "Failed to acquire cid\n"); 636 return rc; 637 } 638 p_rx_cid->b_cid_allocated = true; 639 640 rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, 641 opaque_fid, 642 p_rx_cid->cid, 643 p_params, 644 abs_stats_id, 645 bd_max_bytes, 646 bd_chain_phys_addr, 647 cqe_pbl_addr, cqe_pbl_size, false); 648 649 if (rc) 650 qed_sp_release_queue_cid(p_hwfn, p_rx_cid); 651 652 return rc; 653 } 654 655 int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, 656 u16 rx_queue_id, 657 u8 num_rxqs, 658 u8 complete_cqe_flg, 659 u8 complete_event_flg, 660 enum spq_mode comp_mode, 661 struct qed_spq_comp_cb *p_comp_data) 662 { 663 struct rx_queue_update_ramrod_data *p_ramrod = NULL; 664 struct qed_spq_entry *p_ent = NULL; 665 struct qed_sp_init_data init_data; 666 struct qed_hw_cid_data *p_rx_cid; 667 u16 qid, abs_rx_q_id = 0; 668 int rc = -EINVAL; 669 u8 i; 670 671 memset(&init_data, 0, sizeof(init_data)); 672 init_data.comp_mode = comp_mode; 673 init_data.p_comp_data = p_comp_data; 674 675 for (i = 0; i < num_rxqs; i++) { 676 qid = rx_queue_id + i; 677 p_rx_cid = &p_hwfn->p_rx_cids[qid]; 678 679 /* Get SPQ entry */ 680 init_data.cid = p_rx_cid->cid; 681 init_data.opaque_fid = p_rx_cid->opaque_fid; 682 683 rc = qed_sp_init_request(p_hwfn, &p_ent, 684 ETH_RAMROD_RX_QUEUE_UPDATE, 685 PROTOCOLID_ETH, &init_data); 686 if (rc) 687 return rc; 688 689 p_ramrod = &p_ent->ramrod.rx_queue_update; 690 691 qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id); 692 qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id); 693 p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id); 694 p_ramrod->complete_cqe_flg = complete_cqe_flg; 695 p_ramrod->complete_event_flg = complete_event_flg; 696 697 rc = qed_spq_post(p_hwfn, p_ent, NULL); 698 if (rc) 699 return rc; 700 } 701 702 return rc; 703 } 704 705 int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, 706 u16 rx_queue_id, 707 bool eq_completion_only, bool cqe_completion) 708 { 709 struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id]; 710 struct rx_queue_stop_ramrod_data *p_ramrod = NULL; 711 struct qed_spq_entry *p_ent = NULL; 712 struct qed_sp_init_data init_data; 713 u16 abs_rx_q_id = 0; 714 int rc = -EINVAL; 715 716 if (IS_VF(p_hwfn->cdev)) 717 return qed_vf_pf_rxq_stop(p_hwfn, rx_queue_id, cqe_completion); 718 719 /* Get SPQ entry */ 720 memset(&init_data, 0, sizeof(init_data)); 721 init_data.cid = p_rx_cid->cid; 722 init_data.opaque_fid = p_rx_cid->opaque_fid; 723 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 724 725 rc = qed_sp_init_request(p_hwfn, &p_ent, 726 ETH_RAMROD_RX_QUEUE_STOP, 727 PROTOCOLID_ETH, &init_data); 728 if (rc) 729 return rc; 730 731 p_ramrod = &p_ent->ramrod.rx_queue_stop; 732 733 qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id); 734 qed_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id); 735 p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id); 736 737 /* Cleaning the queue requires the completion to arrive there. 738 * In addition, VFs require the answer to come as eqe to PF. 739 */ 740 p_ramrod->complete_cqe_flg = 741 (!!(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) && 742 !eq_completion_only) || cqe_completion; 743 p_ramrod->complete_event_flg = 744 !(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) || 745 eq_completion_only; 746 747 rc = qed_spq_post(p_hwfn, p_ent, NULL); 748 if (rc) 749 return rc; 750 751 return qed_sp_release_queue_cid(p_hwfn, p_rx_cid); 752 } 753 754 int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, 755 u16 opaque_fid, 756 u32 cid, 757 struct qed_queue_start_common_params *p_params, 758 u8 stats_id, 759 dma_addr_t pbl_addr, 760 u16 pbl_size, 761 union qed_qm_pq_params *p_pq_params) 762 { 763 struct tx_queue_start_ramrod_data *p_ramrod = NULL; 764 struct qed_spq_entry *p_ent = NULL; 765 struct qed_sp_init_data init_data; 766 struct qed_hw_cid_data *p_tx_cid; 767 u16 pq_id, abs_tx_q_id = 0; 768 int rc = -EINVAL; 769 u8 abs_vport_id; 770 771 /* Store information for the stop */ 772 p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id]; 773 p_tx_cid->cid = cid; 774 p_tx_cid->opaque_fid = opaque_fid; 775 776 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); 777 if (rc) 778 return rc; 779 780 rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_tx_q_id); 781 if (rc) 782 return rc; 783 784 /* Get SPQ entry */ 785 memset(&init_data, 0, sizeof(init_data)); 786 init_data.cid = cid; 787 init_data.opaque_fid = opaque_fid; 788 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 789 790 rc = qed_sp_init_request(p_hwfn, &p_ent, 791 ETH_RAMROD_TX_QUEUE_START, 792 PROTOCOLID_ETH, &init_data); 793 if (rc) 794 return rc; 795 796 p_ramrod = &p_ent->ramrod.tx_queue_start; 797 p_ramrod->vport_id = abs_vport_id; 798 799 p_ramrod->sb_id = cpu_to_le16(p_params->sb); 800 p_ramrod->sb_index = p_params->sb_idx; 801 p_ramrod->stats_counter_id = stats_id; 802 803 p_ramrod->queue_zone_id = cpu_to_le16(abs_tx_q_id); 804 805 p_ramrod->pbl_size = cpu_to_le16(pbl_size); 806 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr); 807 808 pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH, p_pq_params); 809 p_ramrod->qm_pq_id = cpu_to_le16(pq_id); 810 811 return qed_spq_post(p_hwfn, p_ent, NULL); 812 } 813 814 static int 815 qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn, 816 u16 opaque_fid, 817 struct qed_queue_start_common_params *p_params, 818 dma_addr_t pbl_addr, 819 u16 pbl_size, void __iomem **pp_doorbell) 820 { 821 struct qed_hw_cid_data *p_tx_cid; 822 union qed_qm_pq_params pq_params; 823 u8 abs_stats_id = 0; 824 int rc; 825 826 if (IS_VF(p_hwfn->cdev)) { 827 return qed_vf_pf_txq_start(p_hwfn, 828 p_params->queue_id, 829 p_params->sb, 830 p_params->sb_idx, 831 pbl_addr, pbl_size, pp_doorbell); 832 } 833 834 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id); 835 if (rc) 836 return rc; 837 838 p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id]; 839 memset(p_tx_cid, 0, sizeof(*p_tx_cid)); 840 memset(&pq_params, 0, sizeof(pq_params)); 841 842 /* Allocate a CID for the queue */ 843 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_tx_cid->cid); 844 if (rc) { 845 DP_NOTICE(p_hwfn, "Failed to acquire cid\n"); 846 return rc; 847 } 848 p_tx_cid->b_cid_allocated = true; 849 850 DP_VERBOSE(p_hwfn, QED_MSG_SP, 851 "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n", 852 opaque_fid, p_tx_cid->cid, 853 p_params->queue_id, p_params->vport_id, p_params->sb); 854 855 rc = qed_sp_eth_txq_start_ramrod(p_hwfn, 856 opaque_fid, 857 p_tx_cid->cid, 858 p_params, 859 abs_stats_id, 860 pbl_addr, 861 pbl_size, 862 &pq_params); 863 864 *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + 865 qed_db_addr(p_tx_cid->cid, DQ_DEMS_LEGACY); 866 867 if (rc) 868 qed_sp_release_queue_cid(p_hwfn, p_tx_cid); 869 870 return rc; 871 } 872 873 int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id) 874 { 875 struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id]; 876 struct qed_spq_entry *p_ent = NULL; 877 struct qed_sp_init_data init_data; 878 int rc = -EINVAL; 879 880 if (IS_VF(p_hwfn->cdev)) 881 return qed_vf_pf_txq_stop(p_hwfn, tx_queue_id); 882 883 /* Get SPQ entry */ 884 memset(&init_data, 0, sizeof(init_data)); 885 init_data.cid = p_tx_cid->cid; 886 init_data.opaque_fid = p_tx_cid->opaque_fid; 887 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 888 889 rc = qed_sp_init_request(p_hwfn, &p_ent, 890 ETH_RAMROD_TX_QUEUE_STOP, 891 PROTOCOLID_ETH, &init_data); 892 if (rc) 893 return rc; 894 895 rc = qed_spq_post(p_hwfn, p_ent, NULL); 896 if (rc) 897 return rc; 898 899 return qed_sp_release_queue_cid(p_hwfn, p_tx_cid); 900 } 901 902 static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode) 903 { 904 enum eth_filter_action action = MAX_ETH_FILTER_ACTION; 905 906 switch (opcode) { 907 case QED_FILTER_ADD: 908 action = ETH_FILTER_ACTION_ADD; 909 break; 910 case QED_FILTER_REMOVE: 911 action = ETH_FILTER_ACTION_REMOVE; 912 break; 913 case QED_FILTER_FLUSH: 914 action = ETH_FILTER_ACTION_REMOVE_ALL; 915 break; 916 default: 917 action = MAX_ETH_FILTER_ACTION; 918 } 919 920 return action; 921 } 922 923 static void qed_set_fw_mac_addr(__le16 *fw_msb, 924 __le16 *fw_mid, 925 __le16 *fw_lsb, 926 u8 *mac) 927 { 928 ((u8 *)fw_msb)[0] = mac[1]; 929 ((u8 *)fw_msb)[1] = mac[0]; 930 ((u8 *)fw_mid)[0] = mac[3]; 931 ((u8 *)fw_mid)[1] = mac[2]; 932 ((u8 *)fw_lsb)[0] = mac[5]; 933 ((u8 *)fw_lsb)[1] = mac[4]; 934 } 935 936 static int 937 qed_filter_ucast_common(struct qed_hwfn *p_hwfn, 938 u16 opaque_fid, 939 struct qed_filter_ucast *p_filter_cmd, 940 struct vport_filter_update_ramrod_data **pp_ramrod, 941 struct qed_spq_entry **pp_ent, 942 enum spq_mode comp_mode, 943 struct qed_spq_comp_cb *p_comp_data) 944 { 945 u8 vport_to_add_to = 0, vport_to_remove_from = 0; 946 struct vport_filter_update_ramrod_data *p_ramrod; 947 struct eth_filter_cmd *p_first_filter; 948 struct eth_filter_cmd *p_second_filter; 949 struct qed_sp_init_data init_data; 950 enum eth_filter_action action; 951 int rc; 952 953 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, 954 &vport_to_remove_from); 955 if (rc) 956 return rc; 957 958 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, 959 &vport_to_add_to); 960 if (rc) 961 return rc; 962 963 /* Get SPQ entry */ 964 memset(&init_data, 0, sizeof(init_data)); 965 init_data.cid = qed_spq_get_cid(p_hwfn); 966 init_data.opaque_fid = opaque_fid; 967 init_data.comp_mode = comp_mode; 968 init_data.p_comp_data = p_comp_data; 969 970 rc = qed_sp_init_request(p_hwfn, pp_ent, 971 ETH_RAMROD_FILTERS_UPDATE, 972 PROTOCOLID_ETH, &init_data); 973 if (rc) 974 return rc; 975 976 *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update; 977 p_ramrod = *pp_ramrod; 978 p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0; 979 p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0; 980 981 switch (p_filter_cmd->opcode) { 982 case QED_FILTER_REPLACE: 983 case QED_FILTER_MOVE: 984 p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break; 985 default: 986 p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break; 987 } 988 989 p_first_filter = &p_ramrod->filter_cmds[0]; 990 p_second_filter = &p_ramrod->filter_cmds[1]; 991 992 switch (p_filter_cmd->type) { 993 case QED_FILTER_MAC: 994 p_first_filter->type = ETH_FILTER_TYPE_MAC; break; 995 case QED_FILTER_VLAN: 996 p_first_filter->type = ETH_FILTER_TYPE_VLAN; break; 997 case QED_FILTER_MAC_VLAN: 998 p_first_filter->type = ETH_FILTER_TYPE_PAIR; break; 999 case QED_FILTER_INNER_MAC: 1000 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break; 1001 case QED_FILTER_INNER_VLAN: 1002 p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break; 1003 case QED_FILTER_INNER_PAIR: 1004 p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break; 1005 case QED_FILTER_INNER_MAC_VNI_PAIR: 1006 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR; 1007 break; 1008 case QED_FILTER_MAC_VNI_PAIR: 1009 p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break; 1010 case QED_FILTER_VNI: 1011 p_first_filter->type = ETH_FILTER_TYPE_VNI; break; 1012 } 1013 1014 if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) || 1015 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || 1016 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) || 1017 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) || 1018 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || 1019 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) { 1020 qed_set_fw_mac_addr(&p_first_filter->mac_msb, 1021 &p_first_filter->mac_mid, 1022 &p_first_filter->mac_lsb, 1023 (u8 *)p_filter_cmd->mac); 1024 } 1025 1026 if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) || 1027 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || 1028 (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) || 1029 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR)) 1030 p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan); 1031 1032 if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || 1033 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) || 1034 (p_first_filter->type == ETH_FILTER_TYPE_VNI)) 1035 p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni); 1036 1037 if (p_filter_cmd->opcode == QED_FILTER_MOVE) { 1038 p_second_filter->type = p_first_filter->type; 1039 p_second_filter->mac_msb = p_first_filter->mac_msb; 1040 p_second_filter->mac_mid = p_first_filter->mac_mid; 1041 p_second_filter->mac_lsb = p_first_filter->mac_lsb; 1042 p_second_filter->vlan_id = p_first_filter->vlan_id; 1043 p_second_filter->vni = p_first_filter->vni; 1044 1045 p_first_filter->action = ETH_FILTER_ACTION_REMOVE; 1046 1047 p_first_filter->vport_id = vport_to_remove_from; 1048 1049 p_second_filter->action = ETH_FILTER_ACTION_ADD; 1050 p_second_filter->vport_id = vport_to_add_to; 1051 } else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) { 1052 p_first_filter->vport_id = vport_to_add_to; 1053 memcpy(p_second_filter, p_first_filter, 1054 sizeof(*p_second_filter)); 1055 p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL; 1056 p_second_filter->action = ETH_FILTER_ACTION_ADD; 1057 } else { 1058 action = qed_filter_action(p_filter_cmd->opcode); 1059 1060 if (action == MAX_ETH_FILTER_ACTION) { 1061 DP_NOTICE(p_hwfn, 1062 "%d is not supported yet\n", 1063 p_filter_cmd->opcode); 1064 return -EINVAL; 1065 } 1066 1067 p_first_filter->action = action; 1068 p_first_filter->vport_id = (p_filter_cmd->opcode == 1069 QED_FILTER_REMOVE) ? 1070 vport_to_remove_from : 1071 vport_to_add_to; 1072 } 1073 1074 return 0; 1075 } 1076 1077 int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, 1078 u16 opaque_fid, 1079 struct qed_filter_ucast *p_filter_cmd, 1080 enum spq_mode comp_mode, 1081 struct qed_spq_comp_cb *p_comp_data) 1082 { 1083 struct vport_filter_update_ramrod_data *p_ramrod = NULL; 1084 struct qed_spq_entry *p_ent = NULL; 1085 struct eth_filter_cmd_header *p_header; 1086 int rc; 1087 1088 rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd, 1089 &p_ramrod, &p_ent, 1090 comp_mode, p_comp_data); 1091 if (rc) { 1092 DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc); 1093 return rc; 1094 } 1095 p_header = &p_ramrod->filter_cmd_hdr; 1096 p_header->assert_on_error = p_filter_cmd->assert_on_error; 1097 1098 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1099 if (rc) { 1100 DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc); 1101 return rc; 1102 } 1103 1104 DP_VERBOSE(p_hwfn, QED_MSG_SP, 1105 "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n", 1106 (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" : 1107 ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ? 1108 "REMOVE" : 1109 ((p_filter_cmd->opcode == QED_FILTER_MOVE) ? 1110 "MOVE" : "REPLACE")), 1111 (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" : 1112 ((p_filter_cmd->type == QED_FILTER_VLAN) ? 1113 "VLAN" : "MAC & VLAN"), 1114 p_ramrod->filter_cmd_hdr.cmd_cnt, 1115 p_filter_cmd->is_rx_filter, 1116 p_filter_cmd->is_tx_filter); 1117 DP_VERBOSE(p_hwfn, QED_MSG_SP, 1118 "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n", 1119 p_filter_cmd->vport_to_add_to, 1120 p_filter_cmd->vport_to_remove_from, 1121 p_filter_cmd->mac[0], 1122 p_filter_cmd->mac[1], 1123 p_filter_cmd->mac[2], 1124 p_filter_cmd->mac[3], 1125 p_filter_cmd->mac[4], 1126 p_filter_cmd->mac[5], 1127 p_filter_cmd->vlan); 1128 1129 return 0; 1130 } 1131 1132 /******************************************************************************* 1133 * Description: 1134 * Calculates crc 32 on a buffer 1135 * Note: crc32_length MUST be aligned to 8 1136 * Return: 1137 ******************************************************************************/ 1138 static u32 qed_calc_crc32c(u8 *crc32_packet, 1139 u32 crc32_length, u32 crc32_seed, u8 complement) 1140 { 1141 u32 byte = 0, bit = 0, crc32_result = crc32_seed; 1142 u8 msb = 0, current_byte = 0; 1143 1144 if ((!crc32_packet) || 1145 (crc32_length == 0) || 1146 ((crc32_length % 8) != 0)) 1147 return crc32_result; 1148 for (byte = 0; byte < crc32_length; byte++) { 1149 current_byte = crc32_packet[byte]; 1150 for (bit = 0; bit < 8; bit++) { 1151 msb = (u8)(crc32_result >> 31); 1152 crc32_result = crc32_result << 1; 1153 if (msb != (0x1 & (current_byte >> bit))) { 1154 crc32_result = crc32_result ^ CRC32_POLY; 1155 crc32_result |= 1; /*crc32_result[0] = 1;*/ 1156 } 1157 } 1158 } 1159 return crc32_result; 1160 } 1161 1162 static u32 qed_crc32c_le(u32 seed, u8 *mac, u32 len) 1163 { 1164 u32 packet_buf[2] = { 0 }; 1165 1166 memcpy((u8 *)(&packet_buf[0]), &mac[0], 6); 1167 return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0); 1168 } 1169 1170 u8 qed_mcast_bin_from_mac(u8 *mac) 1171 { 1172 u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED, 1173 mac, ETH_ALEN); 1174 1175 return crc & 0xff; 1176 } 1177 1178 static int 1179 qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, 1180 u16 opaque_fid, 1181 struct qed_filter_mcast *p_filter_cmd, 1182 enum spq_mode comp_mode, 1183 struct qed_spq_comp_cb *p_comp_data) 1184 { 1185 unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; 1186 struct vport_update_ramrod_data *p_ramrod = NULL; 1187 struct qed_spq_entry *p_ent = NULL; 1188 struct qed_sp_init_data init_data; 1189 u8 abs_vport_id = 0; 1190 int rc, i; 1191 1192 if (p_filter_cmd->opcode == QED_FILTER_ADD) 1193 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, 1194 &abs_vport_id); 1195 else 1196 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, 1197 &abs_vport_id); 1198 if (rc) 1199 return rc; 1200 1201 /* Get SPQ entry */ 1202 memset(&init_data, 0, sizeof(init_data)); 1203 init_data.cid = qed_spq_get_cid(p_hwfn); 1204 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1205 init_data.comp_mode = comp_mode; 1206 init_data.p_comp_data = p_comp_data; 1207 1208 rc = qed_sp_init_request(p_hwfn, &p_ent, 1209 ETH_RAMROD_VPORT_UPDATE, 1210 PROTOCOLID_ETH, &init_data); 1211 if (rc) { 1212 DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc); 1213 return rc; 1214 } 1215 1216 p_ramrod = &p_ent->ramrod.vport_update; 1217 p_ramrod->common.update_approx_mcast_flg = 1; 1218 1219 /* explicitly clear out the entire vector */ 1220 memset(&p_ramrod->approx_mcast.bins, 0, 1221 sizeof(p_ramrod->approx_mcast.bins)); 1222 memset(bins, 0, sizeof(unsigned long) * 1223 ETH_MULTICAST_MAC_BINS_IN_REGS); 1224 /* filter ADD op is explicit set op and it removes 1225 * any existing filters for the vport 1226 */ 1227 if (p_filter_cmd->opcode == QED_FILTER_ADD) { 1228 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { 1229 u32 bit; 1230 1231 bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); 1232 __set_bit(bit, bins); 1233 } 1234 1235 /* Convert to correct endianity */ 1236 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { 1237 struct vport_update_ramrod_mcast *p_ramrod_bins; 1238 u32 *p_bins = (u32 *)bins; 1239 1240 p_ramrod_bins = &p_ramrod->approx_mcast; 1241 p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]); 1242 } 1243 } 1244 1245 p_ramrod->common.vport_id = abs_vport_id; 1246 1247 return qed_spq_post(p_hwfn, p_ent, NULL); 1248 } 1249 1250 static int qed_filter_mcast_cmd(struct qed_dev *cdev, 1251 struct qed_filter_mcast *p_filter_cmd, 1252 enum spq_mode comp_mode, 1253 struct qed_spq_comp_cb *p_comp_data) 1254 { 1255 int rc = 0; 1256 int i; 1257 1258 /* only ADD and REMOVE operations are supported for multi-cast */ 1259 if ((p_filter_cmd->opcode != QED_FILTER_ADD && 1260 (p_filter_cmd->opcode != QED_FILTER_REMOVE)) || 1261 (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS)) 1262 return -EINVAL; 1263 1264 for_each_hwfn(cdev, i) { 1265 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1266 1267 u16 opaque_fid; 1268 1269 if (IS_VF(cdev)) { 1270 qed_vf_pf_filter_mcast(p_hwfn, p_filter_cmd); 1271 continue; 1272 } 1273 1274 opaque_fid = p_hwfn->hw_info.opaque_fid; 1275 1276 rc = qed_sp_eth_filter_mcast(p_hwfn, 1277 opaque_fid, 1278 p_filter_cmd, 1279 comp_mode, p_comp_data); 1280 } 1281 return rc; 1282 } 1283 1284 static int qed_filter_ucast_cmd(struct qed_dev *cdev, 1285 struct qed_filter_ucast *p_filter_cmd, 1286 enum spq_mode comp_mode, 1287 struct qed_spq_comp_cb *p_comp_data) 1288 { 1289 int rc = 0; 1290 int i; 1291 1292 for_each_hwfn(cdev, i) { 1293 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1294 u16 opaque_fid; 1295 1296 if (IS_VF(cdev)) { 1297 rc = qed_vf_pf_filter_ucast(p_hwfn, p_filter_cmd); 1298 continue; 1299 } 1300 1301 opaque_fid = p_hwfn->hw_info.opaque_fid; 1302 1303 rc = qed_sp_eth_filter_ucast(p_hwfn, 1304 opaque_fid, 1305 p_filter_cmd, 1306 comp_mode, p_comp_data); 1307 if (rc) 1308 break; 1309 } 1310 1311 return rc; 1312 } 1313 1314 /* Statistics related code */ 1315 static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn, 1316 u32 *p_addr, 1317 u32 *p_len, u16 statistics_bin) 1318 { 1319 if (IS_PF(p_hwfn->cdev)) { 1320 *p_addr = BAR0_MAP_REG_PSDM_RAM + 1321 PSTORM_QUEUE_STAT_OFFSET(statistics_bin); 1322 *p_len = sizeof(struct eth_pstorm_per_queue_stat); 1323 } else { 1324 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1325 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1326 1327 *p_addr = p_resp->pfdev_info.stats_info.pstats.address; 1328 *p_len = p_resp->pfdev_info.stats_info.pstats.len; 1329 } 1330 } 1331 1332 static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn, 1333 struct qed_ptt *p_ptt, 1334 struct qed_eth_stats *p_stats, 1335 u16 statistics_bin) 1336 { 1337 struct eth_pstorm_per_queue_stat pstats; 1338 u32 pstats_addr = 0, pstats_len = 0; 1339 1340 __qed_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len, 1341 statistics_bin); 1342 1343 memset(&pstats, 0, sizeof(pstats)); 1344 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len); 1345 1346 p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes); 1347 p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes); 1348 p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes); 1349 p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts); 1350 p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts); 1351 p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts); 1352 p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts); 1353 } 1354 1355 static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, 1356 struct qed_ptt *p_ptt, 1357 struct qed_eth_stats *p_stats, 1358 u16 statistics_bin) 1359 { 1360 struct tstorm_per_port_stat tstats; 1361 u32 tstats_addr, tstats_len; 1362 1363 if (IS_PF(p_hwfn->cdev)) { 1364 tstats_addr = BAR0_MAP_REG_TSDM_RAM + 1365 TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)); 1366 tstats_len = sizeof(struct tstorm_per_port_stat); 1367 } else { 1368 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1369 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1370 1371 tstats_addr = p_resp->pfdev_info.stats_info.tstats.address; 1372 tstats_len = p_resp->pfdev_info.stats_info.tstats.len; 1373 } 1374 1375 memset(&tstats, 0, sizeof(tstats)); 1376 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len); 1377 1378 p_stats->mftag_filter_discards += 1379 HILO_64_REGPAIR(tstats.mftag_filter_discard); 1380 p_stats->mac_filter_discards += 1381 HILO_64_REGPAIR(tstats.eth_mac_filter_discard); 1382 } 1383 1384 static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn, 1385 u32 *p_addr, 1386 u32 *p_len, u16 statistics_bin) 1387 { 1388 if (IS_PF(p_hwfn->cdev)) { 1389 *p_addr = BAR0_MAP_REG_USDM_RAM + 1390 USTORM_QUEUE_STAT_OFFSET(statistics_bin); 1391 *p_len = sizeof(struct eth_ustorm_per_queue_stat); 1392 } else { 1393 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1394 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1395 1396 *p_addr = p_resp->pfdev_info.stats_info.ustats.address; 1397 *p_len = p_resp->pfdev_info.stats_info.ustats.len; 1398 } 1399 } 1400 1401 static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, 1402 struct qed_ptt *p_ptt, 1403 struct qed_eth_stats *p_stats, 1404 u16 statistics_bin) 1405 { 1406 struct eth_ustorm_per_queue_stat ustats; 1407 u32 ustats_addr = 0, ustats_len = 0; 1408 1409 __qed_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len, 1410 statistics_bin); 1411 1412 memset(&ustats, 0, sizeof(ustats)); 1413 qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len); 1414 1415 p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes); 1416 p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes); 1417 p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes); 1418 p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts); 1419 p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts); 1420 p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts); 1421 } 1422 1423 static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn, 1424 u32 *p_addr, 1425 u32 *p_len, u16 statistics_bin) 1426 { 1427 if (IS_PF(p_hwfn->cdev)) { 1428 *p_addr = BAR0_MAP_REG_MSDM_RAM + 1429 MSTORM_QUEUE_STAT_OFFSET(statistics_bin); 1430 *p_len = sizeof(struct eth_mstorm_per_queue_stat); 1431 } else { 1432 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1433 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1434 1435 *p_addr = p_resp->pfdev_info.stats_info.mstats.address; 1436 *p_len = p_resp->pfdev_info.stats_info.mstats.len; 1437 } 1438 } 1439 1440 static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn, 1441 struct qed_ptt *p_ptt, 1442 struct qed_eth_stats *p_stats, 1443 u16 statistics_bin) 1444 { 1445 struct eth_mstorm_per_queue_stat mstats; 1446 u32 mstats_addr = 0, mstats_len = 0; 1447 1448 __qed_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len, 1449 statistics_bin); 1450 1451 memset(&mstats, 0, sizeof(mstats)); 1452 qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len); 1453 1454 p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard); 1455 p_stats->packet_too_big_discard += 1456 HILO_64_REGPAIR(mstats.packet_too_big_discard); 1457 p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard); 1458 p_stats->tpa_coalesced_pkts += 1459 HILO_64_REGPAIR(mstats.tpa_coalesced_pkts); 1460 p_stats->tpa_coalesced_events += 1461 HILO_64_REGPAIR(mstats.tpa_coalesced_events); 1462 p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num); 1463 p_stats->tpa_coalesced_bytes += 1464 HILO_64_REGPAIR(mstats.tpa_coalesced_bytes); 1465 } 1466 1467 static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, 1468 struct qed_ptt *p_ptt, 1469 struct qed_eth_stats *p_stats) 1470 { 1471 struct port_stats port_stats; 1472 int j; 1473 1474 memset(&port_stats, 0, sizeof(port_stats)); 1475 1476 qed_memcpy_from(p_hwfn, p_ptt, &port_stats, 1477 p_hwfn->mcp_info->port_addr + 1478 offsetof(struct public_port, stats), 1479 sizeof(port_stats)); 1480 1481 p_stats->rx_64_byte_packets += port_stats.eth.r64; 1482 p_stats->rx_65_to_127_byte_packets += port_stats.eth.r127; 1483 p_stats->rx_128_to_255_byte_packets += port_stats.eth.r255; 1484 p_stats->rx_256_to_511_byte_packets += port_stats.eth.r511; 1485 p_stats->rx_512_to_1023_byte_packets += port_stats.eth.r1023; 1486 p_stats->rx_1024_to_1518_byte_packets += port_stats.eth.r1518; 1487 p_stats->rx_1519_to_1522_byte_packets += port_stats.eth.r1522; 1488 p_stats->rx_1519_to_2047_byte_packets += port_stats.eth.r2047; 1489 p_stats->rx_2048_to_4095_byte_packets += port_stats.eth.r4095; 1490 p_stats->rx_4096_to_9216_byte_packets += port_stats.eth.r9216; 1491 p_stats->rx_9217_to_16383_byte_packets += port_stats.eth.r16383; 1492 p_stats->rx_crc_errors += port_stats.eth.rfcs; 1493 p_stats->rx_mac_crtl_frames += port_stats.eth.rxcf; 1494 p_stats->rx_pause_frames += port_stats.eth.rxpf; 1495 p_stats->rx_pfc_frames += port_stats.eth.rxpp; 1496 p_stats->rx_align_errors += port_stats.eth.raln; 1497 p_stats->rx_carrier_errors += port_stats.eth.rfcr; 1498 p_stats->rx_oversize_packets += port_stats.eth.rovr; 1499 p_stats->rx_jabbers += port_stats.eth.rjbr; 1500 p_stats->rx_undersize_packets += port_stats.eth.rund; 1501 p_stats->rx_fragments += port_stats.eth.rfrg; 1502 p_stats->tx_64_byte_packets += port_stats.eth.t64; 1503 p_stats->tx_65_to_127_byte_packets += port_stats.eth.t127; 1504 p_stats->tx_128_to_255_byte_packets += port_stats.eth.t255; 1505 p_stats->tx_256_to_511_byte_packets += port_stats.eth.t511; 1506 p_stats->tx_512_to_1023_byte_packets += port_stats.eth.t1023; 1507 p_stats->tx_1024_to_1518_byte_packets += port_stats.eth.t1518; 1508 p_stats->tx_1519_to_2047_byte_packets += port_stats.eth.t2047; 1509 p_stats->tx_2048_to_4095_byte_packets += port_stats.eth.t4095; 1510 p_stats->tx_4096_to_9216_byte_packets += port_stats.eth.t9216; 1511 p_stats->tx_9217_to_16383_byte_packets += port_stats.eth.t16383; 1512 p_stats->tx_pause_frames += port_stats.eth.txpf; 1513 p_stats->tx_pfc_frames += port_stats.eth.txpp; 1514 p_stats->tx_lpi_entry_count += port_stats.eth.tlpiec; 1515 p_stats->tx_total_collisions += port_stats.eth.tncl; 1516 p_stats->rx_mac_bytes += port_stats.eth.rbyte; 1517 p_stats->rx_mac_uc_packets += port_stats.eth.rxuca; 1518 p_stats->rx_mac_mc_packets += port_stats.eth.rxmca; 1519 p_stats->rx_mac_bc_packets += port_stats.eth.rxbca; 1520 p_stats->rx_mac_frames_ok += port_stats.eth.rxpok; 1521 p_stats->tx_mac_bytes += port_stats.eth.tbyte; 1522 p_stats->tx_mac_uc_packets += port_stats.eth.txuca; 1523 p_stats->tx_mac_mc_packets += port_stats.eth.txmca; 1524 p_stats->tx_mac_bc_packets += port_stats.eth.txbca; 1525 p_stats->tx_mac_ctrl_frames += port_stats.eth.txcf; 1526 for (j = 0; j < 8; j++) { 1527 p_stats->brb_truncates += port_stats.brb.brb_truncate[j]; 1528 p_stats->brb_discards += port_stats.brb.brb_discard[j]; 1529 } 1530 } 1531 1532 static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn, 1533 struct qed_ptt *p_ptt, 1534 struct qed_eth_stats *stats, 1535 u16 statistics_bin, bool b_get_port_stats) 1536 { 1537 __qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin); 1538 __qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin); 1539 __qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin); 1540 __qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin); 1541 1542 if (b_get_port_stats && p_hwfn->mcp_info) 1543 __qed_get_vport_port_stats(p_hwfn, p_ptt, stats); 1544 } 1545 1546 static void _qed_get_vport_stats(struct qed_dev *cdev, 1547 struct qed_eth_stats *stats) 1548 { 1549 u8 fw_vport = 0; 1550 int i; 1551 1552 memset(stats, 0, sizeof(*stats)); 1553 1554 for_each_hwfn(cdev, i) { 1555 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1556 struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn) 1557 : NULL; 1558 1559 if (IS_PF(cdev)) { 1560 /* The main vport index is relative first */ 1561 if (qed_fw_vport(p_hwfn, 0, &fw_vport)) { 1562 DP_ERR(p_hwfn, "No vport available!\n"); 1563 goto out; 1564 } 1565 } 1566 1567 if (IS_PF(cdev) && !p_ptt) { 1568 DP_ERR(p_hwfn, "Failed to acquire ptt\n"); 1569 continue; 1570 } 1571 1572 __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport, 1573 IS_PF(cdev) ? true : false); 1574 1575 out: 1576 if (IS_PF(cdev) && p_ptt) 1577 qed_ptt_release(p_hwfn, p_ptt); 1578 } 1579 } 1580 1581 void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats) 1582 { 1583 u32 i; 1584 1585 if (!cdev) { 1586 memset(stats, 0, sizeof(*stats)); 1587 return; 1588 } 1589 1590 _qed_get_vport_stats(cdev, stats); 1591 1592 if (!cdev->reset_stats) 1593 return; 1594 1595 /* Reduce the statistics baseline */ 1596 for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++) 1597 ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i]; 1598 } 1599 1600 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */ 1601 void qed_reset_vport_stats(struct qed_dev *cdev) 1602 { 1603 int i; 1604 1605 for_each_hwfn(cdev, i) { 1606 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1607 struct eth_mstorm_per_queue_stat mstats; 1608 struct eth_ustorm_per_queue_stat ustats; 1609 struct eth_pstorm_per_queue_stat pstats; 1610 struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn) 1611 : NULL; 1612 u32 addr = 0, len = 0; 1613 1614 if (IS_PF(cdev) && !p_ptt) { 1615 DP_ERR(p_hwfn, "Failed to acquire ptt\n"); 1616 continue; 1617 } 1618 1619 memset(&mstats, 0, sizeof(mstats)); 1620 __qed_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0); 1621 qed_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len); 1622 1623 memset(&ustats, 0, sizeof(ustats)); 1624 __qed_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0); 1625 qed_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len); 1626 1627 memset(&pstats, 0, sizeof(pstats)); 1628 __qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0); 1629 qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len); 1630 1631 if (IS_PF(cdev)) 1632 qed_ptt_release(p_hwfn, p_ptt); 1633 } 1634 1635 /* PORT statistics are not necessarily reset, so we need to 1636 * read and create a baseline for future statistics. 1637 */ 1638 if (!cdev->reset_stats) 1639 DP_INFO(cdev, "Reset stats not allocated\n"); 1640 else 1641 _qed_get_vport_stats(cdev, cdev->reset_stats); 1642 } 1643 1644 static int qed_fill_eth_dev_info(struct qed_dev *cdev, 1645 struct qed_dev_eth_info *info) 1646 { 1647 int i; 1648 1649 memset(info, 0, sizeof(*info)); 1650 1651 info->num_tc = 1; 1652 1653 if (IS_PF(cdev)) { 1654 int max_vf_vlan_filters = 0; 1655 1656 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 1657 for_each_hwfn(cdev, i) 1658 info->num_queues += 1659 FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE); 1660 if (cdev->int_params.fp_msix_cnt) 1661 info->num_queues = 1662 min_t(u8, info->num_queues, 1663 cdev->int_params.fp_msix_cnt); 1664 } else { 1665 info->num_queues = cdev->num_hwfns; 1666 } 1667 1668 if (IS_QED_SRIOV(cdev)) 1669 max_vf_vlan_filters = cdev->p_iov_info->total_vfs * 1670 QED_ETH_VF_NUM_VLAN_FILTERS; 1671 info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN) - 1672 max_vf_vlan_filters; 1673 1674 ether_addr_copy(info->port_mac, 1675 cdev->hwfns[0].hw_info.hw_mac_addr); 1676 } else { 1677 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), &info->num_queues); 1678 if (cdev->num_hwfns > 1) { 1679 u8 queues = 0; 1680 1681 qed_vf_get_num_rxqs(&cdev->hwfns[1], &queues); 1682 info->num_queues += queues; 1683 } 1684 1685 qed_vf_get_num_vlan_filters(&cdev->hwfns[0], 1686 &info->num_vlan_filters); 1687 qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac); 1688 1689 info->is_legacy = !!cdev->hwfns[0].vf_iov_info->b_pre_fp_hsi; 1690 } 1691 1692 qed_fill_dev_info(cdev, &info->common); 1693 1694 if (IS_VF(cdev)) 1695 memset(info->common.hw_mac, 0, ETH_ALEN); 1696 1697 return 0; 1698 } 1699 1700 static void qed_register_eth_ops(struct qed_dev *cdev, 1701 struct qed_eth_cb_ops *ops, void *cookie) 1702 { 1703 cdev->protocol_ops.eth = ops; 1704 cdev->ops_cookie = cookie; 1705 1706 /* For VF, we start bulletin reading */ 1707 if (IS_VF(cdev)) 1708 qed_vf_start_iov_wq(cdev); 1709 } 1710 1711 static bool qed_check_mac(struct qed_dev *cdev, u8 *mac) 1712 { 1713 if (IS_PF(cdev)) 1714 return true; 1715 1716 return qed_vf_check_mac(&cdev->hwfns[0], mac); 1717 } 1718 1719 static int qed_start_vport(struct qed_dev *cdev, 1720 struct qed_start_vport_params *params) 1721 { 1722 int rc, i; 1723 1724 for_each_hwfn(cdev, i) { 1725 struct qed_sp_vport_start_params start = { 0 }; 1726 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1727 1728 start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO : 1729 QED_TPA_MODE_NONE; 1730 start.remove_inner_vlan = params->remove_inner_vlan; 1731 start.only_untagged = true; /* untagged only */ 1732 start.drop_ttl0 = params->drop_ttl0; 1733 start.opaque_fid = p_hwfn->hw_info.opaque_fid; 1734 start.concrete_fid = p_hwfn->hw_info.concrete_fid; 1735 start.vport_id = params->vport_id; 1736 start.max_buffers_per_cqe = 16; 1737 start.mtu = params->mtu; 1738 1739 rc = qed_sp_vport_start(p_hwfn, &start); 1740 if (rc) { 1741 DP_ERR(cdev, "Failed to start VPORT\n"); 1742 return rc; 1743 } 1744 1745 qed_hw_start_fastpath(p_hwfn); 1746 1747 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), 1748 "Started V-PORT %d with MTU %d\n", 1749 start.vport_id, start.mtu); 1750 } 1751 1752 if (params->clear_stats) 1753 qed_reset_vport_stats(cdev); 1754 1755 return 0; 1756 } 1757 1758 static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id) 1759 { 1760 int rc, i; 1761 1762 for_each_hwfn(cdev, i) { 1763 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1764 1765 rc = qed_sp_vport_stop(p_hwfn, 1766 p_hwfn->hw_info.opaque_fid, vport_id); 1767 1768 if (rc) { 1769 DP_ERR(cdev, "Failed to stop VPORT\n"); 1770 return rc; 1771 } 1772 } 1773 return 0; 1774 } 1775 1776 static int qed_update_vport(struct qed_dev *cdev, 1777 struct qed_update_vport_params *params) 1778 { 1779 struct qed_sp_vport_update_params sp_params; 1780 struct qed_rss_params sp_rss_params; 1781 int rc, i; 1782 1783 if (!cdev) 1784 return -ENODEV; 1785 1786 memset(&sp_params, 0, sizeof(sp_params)); 1787 memset(&sp_rss_params, 0, sizeof(sp_rss_params)); 1788 1789 /* Translate protocol params into sp params */ 1790 sp_params.vport_id = params->vport_id; 1791 sp_params.update_vport_active_rx_flg = params->update_vport_active_flg; 1792 sp_params.update_vport_active_tx_flg = params->update_vport_active_flg; 1793 sp_params.vport_active_rx_flg = params->vport_active_flg; 1794 sp_params.vport_active_tx_flg = params->vport_active_flg; 1795 sp_params.update_tx_switching_flg = params->update_tx_switching_flg; 1796 sp_params.tx_switching_flg = params->tx_switching_flg; 1797 sp_params.accept_any_vlan = params->accept_any_vlan; 1798 sp_params.update_accept_any_vlan_flg = 1799 params->update_accept_any_vlan_flg; 1800 1801 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns. 1802 * We need to re-fix the rss values per engine for CMT. 1803 */ 1804 if (cdev->num_hwfns > 1 && params->update_rss_flg) { 1805 struct qed_update_vport_rss_params *rss = ¶ms->rss_params; 1806 int k, max = 0; 1807 1808 /* Find largest entry, since it's possible RSS needs to 1809 * be disabled [in case only 1 queue per-hwfn] 1810 */ 1811 for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++) 1812 max = (max > rss->rss_ind_table[k]) ? 1813 max : rss->rss_ind_table[k]; 1814 1815 /* Either fix RSS values or disable RSS */ 1816 if (cdev->num_hwfns < max + 1) { 1817 int divisor = (max + cdev->num_hwfns - 1) / 1818 cdev->num_hwfns; 1819 1820 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), 1821 "CMT - fixing RSS values (modulo %02x)\n", 1822 divisor); 1823 1824 for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++) 1825 rss->rss_ind_table[k] = 1826 rss->rss_ind_table[k] % divisor; 1827 } else { 1828 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), 1829 "CMT - 1 queue per-hwfn; Disabling RSS\n"); 1830 params->update_rss_flg = 0; 1831 } 1832 } 1833 1834 /* Now, update the RSS configuration for actual configuration */ 1835 if (params->update_rss_flg) { 1836 sp_rss_params.update_rss_config = 1; 1837 sp_rss_params.rss_enable = 1; 1838 sp_rss_params.update_rss_capabilities = 1; 1839 sp_rss_params.update_rss_ind_table = 1; 1840 sp_rss_params.update_rss_key = 1; 1841 sp_rss_params.rss_caps = params->rss_params.rss_caps; 1842 sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */ 1843 memcpy(sp_rss_params.rss_ind_table, 1844 params->rss_params.rss_ind_table, 1845 QED_RSS_IND_TABLE_SIZE * sizeof(u16)); 1846 memcpy(sp_rss_params.rss_key, params->rss_params.rss_key, 1847 QED_RSS_KEY_SIZE * sizeof(u32)); 1848 sp_params.rss_params = &sp_rss_params; 1849 } 1850 1851 for_each_hwfn(cdev, i) { 1852 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1853 1854 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 1855 rc = qed_sp_vport_update(p_hwfn, &sp_params, 1856 QED_SPQ_MODE_EBLOCK, 1857 NULL); 1858 if (rc) { 1859 DP_ERR(cdev, "Failed to update VPORT\n"); 1860 return rc; 1861 } 1862 1863 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), 1864 "Updated V-PORT %d: active_flag %d [update %d]\n", 1865 params->vport_id, params->vport_active_flg, 1866 params->update_vport_active_flg); 1867 } 1868 1869 return 0; 1870 } 1871 1872 static int qed_start_rxq(struct qed_dev *cdev, 1873 struct qed_queue_start_common_params *params, 1874 u16 bd_max_bytes, 1875 dma_addr_t bd_chain_phys_addr, 1876 dma_addr_t cqe_pbl_addr, 1877 u16 cqe_pbl_size, 1878 void __iomem **pp_prod) 1879 { 1880 struct qed_hwfn *p_hwfn; 1881 int rc, hwfn_index; 1882 1883 hwfn_index = params->rss_id % cdev->num_hwfns; 1884 p_hwfn = &cdev->hwfns[hwfn_index]; 1885 1886 /* Fix queue ID in 100g mode */ 1887 params->queue_id /= cdev->num_hwfns; 1888 1889 rc = qed_sp_eth_rx_queue_start(p_hwfn, 1890 p_hwfn->hw_info.opaque_fid, 1891 params, 1892 bd_max_bytes, 1893 bd_chain_phys_addr, 1894 cqe_pbl_addr, 1895 cqe_pbl_size, 1896 pp_prod); 1897 1898 if (rc) { 1899 DP_ERR(cdev, "Failed to start RXQ#%d\n", params->queue_id); 1900 return rc; 1901 } 1902 1903 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), 1904 "Started RX-Q %d [rss %d] on V-PORT %d and SB %d\n", 1905 params->queue_id, params->rss_id, params->vport_id, 1906 params->sb); 1907 1908 return 0; 1909 } 1910 1911 static int qed_stop_rxq(struct qed_dev *cdev, 1912 struct qed_stop_rxq_params *params) 1913 { 1914 int rc, hwfn_index; 1915 struct qed_hwfn *p_hwfn; 1916 1917 hwfn_index = params->rss_id % cdev->num_hwfns; 1918 p_hwfn = &cdev->hwfns[hwfn_index]; 1919 1920 rc = qed_sp_eth_rx_queue_stop(p_hwfn, 1921 params->rx_queue_id / cdev->num_hwfns, 1922 params->eq_completion_only, false); 1923 if (rc) { 1924 DP_ERR(cdev, "Failed to stop RXQ#%d\n", params->rx_queue_id); 1925 return rc; 1926 } 1927 1928 return 0; 1929 } 1930 1931 static int qed_start_txq(struct qed_dev *cdev, 1932 struct qed_queue_start_common_params *p_params, 1933 dma_addr_t pbl_addr, 1934 u16 pbl_size, 1935 void __iomem **pp_doorbell) 1936 { 1937 struct qed_hwfn *p_hwfn; 1938 int rc, hwfn_index; 1939 1940 hwfn_index = p_params->rss_id % cdev->num_hwfns; 1941 p_hwfn = &cdev->hwfns[hwfn_index]; 1942 1943 /* Fix queue ID in 100g mode */ 1944 p_params->queue_id /= cdev->num_hwfns; 1945 1946 rc = qed_sp_eth_tx_queue_start(p_hwfn, 1947 p_hwfn->hw_info.opaque_fid, 1948 p_params, 1949 pbl_addr, 1950 pbl_size, 1951 pp_doorbell); 1952 1953 if (rc) { 1954 DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id); 1955 return rc; 1956 } 1957 1958 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), 1959 "Started TX-Q %d [rss %d] on V-PORT %d and SB %d\n", 1960 p_params->queue_id, p_params->rss_id, p_params->vport_id, 1961 p_params->sb); 1962 1963 return 0; 1964 } 1965 1966 #define QED_HW_STOP_RETRY_LIMIT (10) 1967 static int qed_fastpath_stop(struct qed_dev *cdev) 1968 { 1969 qed_hw_stop_fastpath(cdev); 1970 1971 return 0; 1972 } 1973 1974 static int qed_stop_txq(struct qed_dev *cdev, 1975 struct qed_stop_txq_params *params) 1976 { 1977 struct qed_hwfn *p_hwfn; 1978 int rc, hwfn_index; 1979 1980 hwfn_index = params->rss_id % cdev->num_hwfns; 1981 p_hwfn = &cdev->hwfns[hwfn_index]; 1982 1983 rc = qed_sp_eth_tx_queue_stop(p_hwfn, 1984 params->tx_queue_id / cdev->num_hwfns); 1985 if (rc) { 1986 DP_ERR(cdev, "Failed to stop TXQ#%d\n", params->tx_queue_id); 1987 return rc; 1988 } 1989 1990 return 0; 1991 } 1992 1993 static int qed_tunn_configure(struct qed_dev *cdev, 1994 struct qed_tunn_params *tunn_params) 1995 { 1996 struct qed_tunn_update_params tunn_info; 1997 int i, rc; 1998 1999 if (IS_VF(cdev)) 2000 return 0; 2001 2002 memset(&tunn_info, 0, sizeof(tunn_info)); 2003 if (tunn_params->update_vxlan_port == 1) { 2004 tunn_info.update_vxlan_udp_port = 1; 2005 tunn_info.vxlan_udp_port = tunn_params->vxlan_port; 2006 } 2007 2008 if (tunn_params->update_geneve_port == 1) { 2009 tunn_info.update_geneve_udp_port = 1; 2010 tunn_info.geneve_udp_port = tunn_params->geneve_port; 2011 } 2012 2013 for_each_hwfn(cdev, i) { 2014 struct qed_hwfn *hwfn = &cdev->hwfns[i]; 2015 2016 rc = qed_sp_pf_update_tunn_cfg(hwfn, &tunn_info, 2017 QED_SPQ_MODE_EBLOCK, NULL); 2018 2019 if (rc) 2020 return rc; 2021 } 2022 2023 return 0; 2024 } 2025 2026 static int qed_configure_filter_rx_mode(struct qed_dev *cdev, 2027 enum qed_filter_rx_mode_type type) 2028 { 2029 struct qed_filter_accept_flags accept_flags; 2030 2031 memset(&accept_flags, 0, sizeof(accept_flags)); 2032 2033 accept_flags.update_rx_mode_config = 1; 2034 accept_flags.update_tx_mode_config = 1; 2035 accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED | 2036 QED_ACCEPT_MCAST_MATCHED | 2037 QED_ACCEPT_BCAST; 2038 accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED | 2039 QED_ACCEPT_MCAST_MATCHED | 2040 QED_ACCEPT_BCAST; 2041 2042 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) 2043 accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED | 2044 QED_ACCEPT_MCAST_UNMATCHED; 2045 else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) 2046 accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; 2047 2048 return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false, 2049 QED_SPQ_MODE_CB, NULL); 2050 } 2051 2052 static int qed_configure_filter_ucast(struct qed_dev *cdev, 2053 struct qed_filter_ucast_params *params) 2054 { 2055 struct qed_filter_ucast ucast; 2056 2057 if (!params->vlan_valid && !params->mac_valid) { 2058 DP_NOTICE(cdev, 2059 "Tried configuring a unicast filter, but both MAC and VLAN are not set\n"); 2060 return -EINVAL; 2061 } 2062 2063 memset(&ucast, 0, sizeof(ucast)); 2064 switch (params->type) { 2065 case QED_FILTER_XCAST_TYPE_ADD: 2066 ucast.opcode = QED_FILTER_ADD; 2067 break; 2068 case QED_FILTER_XCAST_TYPE_DEL: 2069 ucast.opcode = QED_FILTER_REMOVE; 2070 break; 2071 case QED_FILTER_XCAST_TYPE_REPLACE: 2072 ucast.opcode = QED_FILTER_REPLACE; 2073 break; 2074 default: 2075 DP_NOTICE(cdev, "Unknown unicast filter type %d\n", 2076 params->type); 2077 } 2078 2079 if (params->vlan_valid && params->mac_valid) { 2080 ucast.type = QED_FILTER_MAC_VLAN; 2081 ether_addr_copy(ucast.mac, params->mac); 2082 ucast.vlan = params->vlan; 2083 } else if (params->mac_valid) { 2084 ucast.type = QED_FILTER_MAC; 2085 ether_addr_copy(ucast.mac, params->mac); 2086 } else { 2087 ucast.type = QED_FILTER_VLAN; 2088 ucast.vlan = params->vlan; 2089 } 2090 2091 ucast.is_rx_filter = true; 2092 ucast.is_tx_filter = true; 2093 2094 return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL); 2095 } 2096 2097 static int qed_configure_filter_mcast(struct qed_dev *cdev, 2098 struct qed_filter_mcast_params *params) 2099 { 2100 struct qed_filter_mcast mcast; 2101 int i; 2102 2103 memset(&mcast, 0, sizeof(mcast)); 2104 switch (params->type) { 2105 case QED_FILTER_XCAST_TYPE_ADD: 2106 mcast.opcode = QED_FILTER_ADD; 2107 break; 2108 case QED_FILTER_XCAST_TYPE_DEL: 2109 mcast.opcode = QED_FILTER_REMOVE; 2110 break; 2111 default: 2112 DP_NOTICE(cdev, "Unknown multicast filter type %d\n", 2113 params->type); 2114 } 2115 2116 mcast.num_mc_addrs = params->num; 2117 for (i = 0; i < mcast.num_mc_addrs; i++) 2118 ether_addr_copy(mcast.mac[i], params->mac[i]); 2119 2120 return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL); 2121 } 2122 2123 static int qed_configure_filter(struct qed_dev *cdev, 2124 struct qed_filter_params *params) 2125 { 2126 enum qed_filter_rx_mode_type accept_flags; 2127 2128 switch (params->type) { 2129 case QED_FILTER_TYPE_UCAST: 2130 return qed_configure_filter_ucast(cdev, ¶ms->filter.ucast); 2131 case QED_FILTER_TYPE_MCAST: 2132 return qed_configure_filter_mcast(cdev, ¶ms->filter.mcast); 2133 case QED_FILTER_TYPE_RX_MODE: 2134 accept_flags = params->filter.accept_flags; 2135 return qed_configure_filter_rx_mode(cdev, accept_flags); 2136 default: 2137 DP_NOTICE(cdev, "Unknown filter type %d\n", (int)params->type); 2138 return -EINVAL; 2139 } 2140 } 2141 2142 static int qed_fp_cqe_completion(struct qed_dev *dev, 2143 u8 rss_id, struct eth_slow_path_rx_cqe *cqe) 2144 { 2145 return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns], 2146 cqe); 2147 } 2148 2149 #ifdef CONFIG_QED_SRIOV 2150 extern const struct qed_iov_hv_ops qed_iov_ops_pass; 2151 #endif 2152 2153 #ifdef CONFIG_DCB 2154 extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass; 2155 #endif 2156 2157 static const struct qed_eth_ops qed_eth_ops_pass = { 2158 .common = &qed_common_ops_pass, 2159 #ifdef CONFIG_QED_SRIOV 2160 .iov = &qed_iov_ops_pass, 2161 #endif 2162 #ifdef CONFIG_DCB 2163 .dcb = &qed_dcbnl_ops_pass, 2164 #endif 2165 .fill_dev_info = &qed_fill_eth_dev_info, 2166 .register_ops = &qed_register_eth_ops, 2167 .check_mac = &qed_check_mac, 2168 .vport_start = &qed_start_vport, 2169 .vport_stop = &qed_stop_vport, 2170 .vport_update = &qed_update_vport, 2171 .q_rx_start = &qed_start_rxq, 2172 .q_rx_stop = &qed_stop_rxq, 2173 .q_tx_start = &qed_start_txq, 2174 .q_tx_stop = &qed_stop_txq, 2175 .filter_config = &qed_configure_filter, 2176 .fastpath_stop = &qed_fastpath_stop, 2177 .eth_cqe_completion = &qed_fp_cqe_completion, 2178 .get_vport_stats = &qed_get_vport_stats, 2179 .tunn_config = &qed_tunn_configure, 2180 }; 2181 2182 const struct qed_eth_ops *qed_get_eth_ops(void) 2183 { 2184 return &qed_eth_ops_pass; 2185 } 2186 EXPORT_SYMBOL(qed_get_eth_ops); 2187 2188 void qed_put_eth_ops(void) 2189 { 2190 /* TODO - reference count for module? */ 2191 } 2192 EXPORT_SYMBOL(qed_put_eth_ops); 2193