1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 /* QLogic qed NIC Driver 3 * Copyright (c) 2015-2017 QLogic Corporation 4 * Copyright (c) 2019-2020 Marvell International Ltd. 5 */ 6 7 #include <linux/types.h> 8 #include <asm/byteorder.h> 9 #include <asm/param.h> 10 #include <linux/delay.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/etherdevice.h> 13 #include <linux/interrupt.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/pci.h> 17 #include <linux/slab.h> 18 #include <linux/stddef.h> 19 #include <linux/string.h> 20 #include <linux/workqueue.h> 21 #include <linux/bitops.h> 22 #include <linux/bug.h> 23 #include <linux/vmalloc.h> 24 #include "qed.h" 25 #include <linux/qed/qed_chain.h> 26 #include "qed_cxt.h" 27 #include "qed_dcbx.h" 28 #include "qed_dev_api.h" 29 #include <linux/qed/qed_eth_if.h> 30 #include "qed_hsi.h" 31 #include "qed_iro_hsi.h" 32 #include "qed_hw.h" 33 #include "qed_int.h" 34 #include "qed_l2.h" 35 #include "qed_mcp.h" 36 #include "qed_ptp.h" 37 #include "qed_reg_addr.h" 38 #include "qed_sp.h" 39 #include "qed_sriov.h" 40 41 42 #define QED_MAX_SGES_NUM 16 43 #define CRC32_POLY 0x1edc6f41 44 45 struct qed_l2_info { 46 u32 queues; 47 unsigned long **pp_qid_usage; 48 49 /* The lock is meant to synchronize access to the qid usage */ 50 struct mutex lock; 51 }; 52 53 int qed_l2_alloc(struct qed_hwfn *p_hwfn) 54 { 55 struct qed_l2_info *p_l2_info; 56 unsigned long **pp_qids; 57 u32 i; 58 59 if (!QED_IS_L2_PERSONALITY(p_hwfn)) 60 return 0; 61 62 p_l2_info = kzalloc(sizeof(*p_l2_info), GFP_KERNEL); 63 if (!p_l2_info) 64 return -ENOMEM; 65 p_hwfn->p_l2_info = p_l2_info; 66 67 if (IS_PF(p_hwfn->cdev)) { 68 p_l2_info->queues = RESC_NUM(p_hwfn, QED_L2_QUEUE); 69 } else { 70 u8 rx = 0, tx = 0; 71 72 qed_vf_get_num_rxqs(p_hwfn, &rx); 73 qed_vf_get_num_txqs(p_hwfn, &tx); 74 75 p_l2_info->queues = max_t(u8, rx, tx); 76 } 77 78 pp_qids = kcalloc(p_l2_info->queues, sizeof(unsigned long *), 79 GFP_KERNEL); 80 if (!pp_qids) 81 return -ENOMEM; 82 p_l2_info->pp_qid_usage = pp_qids; 83 84 for (i = 0; i < p_l2_info->queues; i++) { 85 pp_qids[i] = kzalloc(MAX_QUEUES_PER_QZONE / 8, GFP_KERNEL); 86 if (!pp_qids[i]) 87 return -ENOMEM; 88 } 89 90 return 0; 91 } 92 93 void qed_l2_setup(struct qed_hwfn *p_hwfn) 94 { 95 if (!QED_IS_L2_PERSONALITY(p_hwfn)) 96 return; 97 98 mutex_init(&p_hwfn->p_l2_info->lock); 99 } 100 101 void qed_l2_free(struct qed_hwfn *p_hwfn) 102 { 103 u32 i; 104 105 if (!QED_IS_L2_PERSONALITY(p_hwfn)) 106 return; 107 108 if (!p_hwfn->p_l2_info) 109 return; 110 111 if (!p_hwfn->p_l2_info->pp_qid_usage) 112 goto out_l2_info; 113 114 /* Free until hit first uninitialized entry */ 115 for (i = 0; i < p_hwfn->p_l2_info->queues; i++) { 116 if (!p_hwfn->p_l2_info->pp_qid_usage[i]) 117 break; 118 kfree(p_hwfn->p_l2_info->pp_qid_usage[i]); 119 } 120 121 kfree(p_hwfn->p_l2_info->pp_qid_usage); 122 123 out_l2_info: 124 kfree(p_hwfn->p_l2_info); 125 p_hwfn->p_l2_info = NULL; 126 } 127 128 static bool qed_eth_queue_qid_usage_add(struct qed_hwfn *p_hwfn, 129 struct qed_queue_cid *p_cid) 130 { 131 struct qed_l2_info *p_l2_info = p_hwfn->p_l2_info; 132 u16 queue_id = p_cid->rel.queue_id; 133 bool b_rc = true; 134 u8 first; 135 136 mutex_lock(&p_l2_info->lock); 137 138 if (queue_id >= p_l2_info->queues) { 139 DP_NOTICE(p_hwfn, 140 "Requested to increase usage for qzone %04x out of %08x\n", 141 queue_id, p_l2_info->queues); 142 b_rc = false; 143 goto out; 144 } 145 146 first = (u8)find_first_zero_bit(p_l2_info->pp_qid_usage[queue_id], 147 MAX_QUEUES_PER_QZONE); 148 if (first >= MAX_QUEUES_PER_QZONE) { 149 b_rc = false; 150 goto out; 151 } 152 153 __set_bit(first, p_l2_info->pp_qid_usage[queue_id]); 154 p_cid->qid_usage_idx = first; 155 156 out: 157 mutex_unlock(&p_l2_info->lock); 158 return b_rc; 159 } 160 161 static void qed_eth_queue_qid_usage_del(struct qed_hwfn *p_hwfn, 162 struct qed_queue_cid *p_cid) 163 { 164 mutex_lock(&p_hwfn->p_l2_info->lock); 165 166 clear_bit(p_cid->qid_usage_idx, 167 p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]); 168 169 mutex_unlock(&p_hwfn->p_l2_info->lock); 170 } 171 172 void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn, 173 struct qed_queue_cid *p_cid) 174 { 175 bool b_legacy_vf = !!(p_cid->vf_legacy & QED_QCID_LEGACY_VF_CID); 176 177 if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) 178 _qed_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid); 179 180 /* For PF's VFs we maintain the index inside queue-zone in IOV */ 181 if (p_cid->vfid == QED_QUEUE_CID_SELF) 182 qed_eth_queue_qid_usage_del(p_hwfn, p_cid); 183 184 vfree(p_cid); 185 } 186 187 /* The internal is only meant to be directly called by PFs initializeing CIDs 188 * for their VFs. 189 */ 190 static struct qed_queue_cid * 191 _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, 192 u16 opaque_fid, 193 u32 cid, 194 struct qed_queue_start_common_params *p_params, 195 bool b_is_rx, 196 struct qed_queue_cid_vf_params *p_vf_params) 197 { 198 struct qed_queue_cid *p_cid; 199 int rc; 200 201 p_cid = vzalloc(sizeof(*p_cid)); 202 if (!p_cid) 203 return NULL; 204 205 p_cid->opaque_fid = opaque_fid; 206 p_cid->cid = cid; 207 p_cid->p_owner = p_hwfn; 208 209 /* Fill in parameters */ 210 p_cid->rel.vport_id = p_params->vport_id; 211 p_cid->rel.queue_id = p_params->queue_id; 212 p_cid->rel.stats_id = p_params->stats_id; 213 p_cid->sb_igu_id = p_params->p_sb->igu_sb_id; 214 p_cid->b_is_rx = b_is_rx; 215 p_cid->sb_idx = p_params->sb_idx; 216 217 /* Fill-in bits related to VFs' queues if information was provided */ 218 if (p_vf_params) { 219 p_cid->vfid = p_vf_params->vfid; 220 p_cid->vf_qid = p_vf_params->vf_qid; 221 p_cid->vf_legacy = p_vf_params->vf_legacy; 222 } else { 223 p_cid->vfid = QED_QUEUE_CID_SELF; 224 } 225 226 /* Don't try calculating the absolute indices for VFs */ 227 if (IS_VF(p_hwfn->cdev)) { 228 p_cid->abs = p_cid->rel; 229 goto out; 230 } 231 232 /* Calculate the engine-absolute indices of the resources. 233 * This would guarantee they're valid later on. 234 * In some cases [SBs] we already have the right values. 235 */ 236 rc = qed_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id); 237 if (rc) 238 goto fail; 239 240 rc = qed_fw_l2_queue(p_hwfn, p_cid->rel.queue_id, &p_cid->abs.queue_id); 241 if (rc) 242 goto fail; 243 244 /* In case of a PF configuring its VF's queues, the stats-id is already 245 * absolute [since there's a single index that's suitable per-VF]. 246 */ 247 if (p_cid->vfid == QED_QUEUE_CID_SELF) { 248 rc = qed_fw_vport(p_hwfn, p_cid->rel.stats_id, 249 &p_cid->abs.stats_id); 250 if (rc) 251 goto fail; 252 } else { 253 p_cid->abs.stats_id = p_cid->rel.stats_id; 254 } 255 256 out: 257 /* VF-images have provided the qid_usage_idx on their own. 258 * Otherwise, we need to allocate a unique one. 259 */ 260 if (!p_vf_params) { 261 if (!qed_eth_queue_qid_usage_add(p_hwfn, p_cid)) 262 goto fail; 263 } else { 264 p_cid->qid_usage_idx = p_vf_params->qid_usage_idx; 265 } 266 267 DP_VERBOSE(p_hwfn, 268 QED_MSG_SP, 269 "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n", 270 p_cid->opaque_fid, 271 p_cid->cid, 272 p_cid->rel.vport_id, 273 p_cid->abs.vport_id, 274 p_cid->rel.queue_id, 275 p_cid->qid_usage_idx, 276 p_cid->abs.queue_id, 277 p_cid->rel.stats_id, 278 p_cid->abs.stats_id, p_cid->sb_igu_id, p_cid->sb_idx); 279 280 return p_cid; 281 282 fail: 283 vfree(p_cid); 284 return NULL; 285 } 286 287 struct qed_queue_cid * 288 qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, 289 u16 opaque_fid, 290 struct qed_queue_start_common_params *p_params, 291 bool b_is_rx, 292 struct qed_queue_cid_vf_params *p_vf_params) 293 { 294 struct qed_queue_cid *p_cid; 295 u8 vfid = QED_CXT_PF_CID; 296 bool b_legacy_vf = false; 297 u32 cid = 0; 298 299 /* In case of legacy VFs, The CID can be derived from the additional 300 * VF parameters - the VF assumes queue X uses CID X, so we can simply 301 * use the vf_qid for this purpose as well. 302 */ 303 if (p_vf_params) { 304 vfid = p_vf_params->vfid; 305 306 if (p_vf_params->vf_legacy & QED_QCID_LEGACY_VF_CID) { 307 b_legacy_vf = true; 308 cid = p_vf_params->vf_qid; 309 } 310 } 311 312 /* Get a unique firmware CID for this queue, in case it's a PF. 313 * VF's don't need a CID as the queue configuration will be done 314 * by PF. 315 */ 316 if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) { 317 if (_qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, 318 &cid, vfid)) { 319 DP_NOTICE(p_hwfn, "Failed to acquire cid\n"); 320 return NULL; 321 } 322 } 323 324 p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid, 325 p_params, b_is_rx, p_vf_params); 326 if (!p_cid && IS_PF(p_hwfn->cdev) && !b_legacy_vf) 327 _qed_cxt_release_cid(p_hwfn, cid, vfid); 328 329 return p_cid; 330 } 331 332 static struct qed_queue_cid * 333 qed_eth_queue_to_cid_pf(struct qed_hwfn *p_hwfn, 334 u16 opaque_fid, 335 bool b_is_rx, 336 struct qed_queue_start_common_params *p_params) 337 { 338 return qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx, 339 NULL); 340 } 341 342 int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, 343 struct qed_sp_vport_start_params *p_params) 344 { 345 struct vport_start_ramrod_data *p_ramrod = NULL; 346 struct eth_vport_tpa_param *tpa_param; 347 struct qed_spq_entry *p_ent = NULL; 348 struct qed_sp_init_data init_data; 349 u16 min_size, rx_mode = 0; 350 u8 abs_vport_id = 0; 351 int rc; 352 353 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); 354 if (rc) 355 return rc; 356 357 memset(&init_data, 0, sizeof(init_data)); 358 init_data.cid = qed_spq_get_cid(p_hwfn); 359 init_data.opaque_fid = p_params->opaque_fid; 360 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 361 362 rc = qed_sp_init_request(p_hwfn, &p_ent, 363 ETH_RAMROD_VPORT_START, 364 PROTOCOLID_ETH, &init_data); 365 if (rc) 366 return rc; 367 368 p_ramrod = &p_ent->ramrod.vport_start; 369 p_ramrod->vport_id = abs_vport_id; 370 371 p_ramrod->mtu = cpu_to_le16(p_params->mtu); 372 p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts; 373 p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan; 374 p_ramrod->drop_ttl0_en = p_params->drop_ttl0; 375 p_ramrod->untagged = p_params->only_untagged; 376 377 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1); 378 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1); 379 380 p_ramrod->rx_mode.state = cpu_to_le16(rx_mode); 381 382 /* TPA related fields */ 383 tpa_param = &p_ramrod->tpa_param; 384 memset(tpa_param, 0, sizeof(*tpa_param)); 385 386 tpa_param->max_buff_num = p_params->max_buffers_per_cqe; 387 388 switch (p_params->tpa_mode) { 389 case QED_TPA_MODE_GRO: 390 min_size = p_params->mtu / 2; 391 392 tpa_param->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; 393 tpa_param->tpa_max_size = cpu_to_le16(U16_MAX); 394 tpa_param->tpa_min_size_to_cont = cpu_to_le16(min_size); 395 tpa_param->tpa_min_size_to_start = cpu_to_le16(min_size); 396 tpa_param->tpa_ipv4_en_flg = 1; 397 tpa_param->tpa_ipv6_en_flg = 1; 398 tpa_param->tpa_pkt_split_flg = 1; 399 tpa_param->tpa_gro_consistent_flg = 1; 400 break; 401 default: 402 break; 403 } 404 405 p_ramrod->tx_switching_en = p_params->tx_switching; 406 407 p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac; 408 p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype; 409 410 /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */ 411 p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev, 412 p_params->concrete_fid); 413 414 return qed_spq_post(p_hwfn, p_ent, NULL); 415 } 416 417 static int qed_sp_vport_start(struct qed_hwfn *p_hwfn, 418 struct qed_sp_vport_start_params *p_params) 419 { 420 if (IS_VF(p_hwfn->cdev)) { 421 return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id, 422 p_params->mtu, 423 p_params->remove_inner_vlan, 424 p_params->tpa_mode, 425 p_params->max_buffers_per_cqe, 426 p_params->only_untagged); 427 } 428 429 return qed_sp_eth_vport_start(p_hwfn, p_params); 430 } 431 432 static int 433 qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn, 434 struct vport_update_ramrod_data *p_ramrod, 435 struct qed_rss_params *p_rss) 436 { 437 struct eth_vport_rss_config *p_config; 438 u16 capabilities = 0; 439 int i, table_size; 440 int rc = 0; 441 442 if (!p_rss) { 443 p_ramrod->common.update_rss_flg = 0; 444 return rc; 445 } 446 p_config = &p_ramrod->rss_config; 447 448 BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE != ETH_RSS_IND_TABLE_ENTRIES_NUM); 449 450 rc = qed_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id); 451 if (rc) 452 return rc; 453 454 p_ramrod->common.update_rss_flg = p_rss->update_rss_config; 455 p_config->update_rss_capabilities = p_rss->update_rss_capabilities; 456 p_config->update_rss_ind_table = p_rss->update_rss_ind_table; 457 p_config->update_rss_key = p_rss->update_rss_key; 458 459 p_config->rss_mode = p_rss->rss_enable ? 460 ETH_VPORT_RSS_MODE_REGULAR : 461 ETH_VPORT_RSS_MODE_DISABLED; 462 463 SET_FIELD(capabilities, 464 ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY, 465 !!(p_rss->rss_caps & QED_RSS_IPV4)); 466 SET_FIELD(capabilities, 467 ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY, 468 !!(p_rss->rss_caps & QED_RSS_IPV6)); 469 SET_FIELD(capabilities, 470 ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY, 471 !!(p_rss->rss_caps & QED_RSS_IPV4_TCP)); 472 SET_FIELD(capabilities, 473 ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY, 474 !!(p_rss->rss_caps & QED_RSS_IPV6_TCP)); 475 SET_FIELD(capabilities, 476 ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY, 477 !!(p_rss->rss_caps & QED_RSS_IPV4_UDP)); 478 SET_FIELD(capabilities, 479 ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY, 480 !!(p_rss->rss_caps & QED_RSS_IPV6_UDP)); 481 p_config->tbl_size = p_rss->rss_table_size_log; 482 483 p_config->capabilities = cpu_to_le16(capabilities); 484 485 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, 486 "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n", 487 p_ramrod->common.update_rss_flg, 488 p_config->rss_mode, 489 p_config->update_rss_capabilities, 490 p_config->capabilities, 491 p_config->update_rss_ind_table, p_config->update_rss_key); 492 493 table_size = min_t(int, QED_RSS_IND_TABLE_SIZE, 494 1 << p_config->tbl_size); 495 for (i = 0; i < table_size; i++) { 496 struct qed_queue_cid *p_queue = p_rss->rss_ind_table[i]; 497 498 if (!p_queue) 499 return -EINVAL; 500 501 p_config->indirection_table[i] = 502 cpu_to_le16(p_queue->abs.queue_id); 503 } 504 505 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, 506 "Configured RSS indirection table [%d entries]:\n", 507 table_size); 508 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i += 0x10) { 509 DP_VERBOSE(p_hwfn, 510 NETIF_MSG_IFUP, 511 "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n", 512 le16_to_cpu(p_config->indirection_table[i]), 513 le16_to_cpu(p_config->indirection_table[i + 1]), 514 le16_to_cpu(p_config->indirection_table[i + 2]), 515 le16_to_cpu(p_config->indirection_table[i + 3]), 516 le16_to_cpu(p_config->indirection_table[i + 4]), 517 le16_to_cpu(p_config->indirection_table[i + 5]), 518 le16_to_cpu(p_config->indirection_table[i + 6]), 519 le16_to_cpu(p_config->indirection_table[i + 7]), 520 le16_to_cpu(p_config->indirection_table[i + 8]), 521 le16_to_cpu(p_config->indirection_table[i + 9]), 522 le16_to_cpu(p_config->indirection_table[i + 10]), 523 le16_to_cpu(p_config->indirection_table[i + 11]), 524 le16_to_cpu(p_config->indirection_table[i + 12]), 525 le16_to_cpu(p_config->indirection_table[i + 13]), 526 le16_to_cpu(p_config->indirection_table[i + 14]), 527 le16_to_cpu(p_config->indirection_table[i + 15])); 528 } 529 530 for (i = 0; i < 10; i++) 531 p_config->rss_key[i] = cpu_to_le32(p_rss->rss_key[i]); 532 533 return rc; 534 } 535 536 static void 537 qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn, 538 struct vport_update_ramrod_data *p_ramrod, 539 struct qed_filter_accept_flags accept_flags) 540 { 541 p_ramrod->common.update_rx_mode_flg = 542 accept_flags.update_rx_mode_config; 543 544 p_ramrod->common.update_tx_mode_flg = 545 accept_flags.update_tx_mode_config; 546 547 /* Set Rx mode accept flags */ 548 if (p_ramrod->common.update_rx_mode_flg) { 549 u8 accept_filter = accept_flags.rx_accept_filter; 550 u16 state = 0; 551 552 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 553 !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) || 554 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED))); 555 556 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED, 557 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)); 558 559 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 560 !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) || 561 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); 562 563 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL, 564 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) && 565 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); 566 567 SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL, 568 !!(accept_filter & QED_ACCEPT_BCAST)); 569 570 SET_FIELD(state, ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI, 571 !!(accept_filter & QED_ACCEPT_ANY_VNI)); 572 573 p_ramrod->rx_mode.state = cpu_to_le16(state); 574 DP_VERBOSE(p_hwfn, QED_MSG_SP, 575 "p_ramrod->rx_mode.state = 0x%x\n", state); 576 } 577 578 /* Set Tx mode accept flags */ 579 if (p_ramrod->common.update_tx_mode_flg) { 580 u8 accept_filter = accept_flags.tx_accept_filter; 581 u16 state = 0; 582 583 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL, 584 !!(accept_filter & QED_ACCEPT_NONE)); 585 586 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL, 587 !!(accept_filter & QED_ACCEPT_NONE)); 588 589 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL, 590 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) && 591 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); 592 593 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL, 594 (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) && 595 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED))); 596 597 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL, 598 !!(accept_filter & QED_ACCEPT_BCAST)); 599 600 p_ramrod->tx_mode.state = cpu_to_le16(state); 601 DP_VERBOSE(p_hwfn, QED_MSG_SP, 602 "p_ramrod->tx_mode.state = 0x%x\n", state); 603 } 604 } 605 606 static void 607 qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn, 608 struct vport_update_ramrod_data *p_ramrod, 609 const struct qed_sge_tpa_params *param) 610 { 611 struct eth_vport_tpa_param *tpa; 612 613 if (!param) { 614 p_ramrod->common.update_tpa_param_flg = 0; 615 p_ramrod->common.update_tpa_en_flg = 0; 616 p_ramrod->common.update_tpa_param_flg = 0; 617 return; 618 } 619 620 p_ramrod->common.update_tpa_en_flg = param->update_tpa_en_flg; 621 tpa = &p_ramrod->tpa_param; 622 tpa->tpa_ipv4_en_flg = param->tpa_ipv4_en_flg; 623 tpa->tpa_ipv6_en_flg = param->tpa_ipv6_en_flg; 624 tpa->tpa_ipv4_tunn_en_flg = param->tpa_ipv4_tunn_en_flg; 625 tpa->tpa_ipv6_tunn_en_flg = param->tpa_ipv6_tunn_en_flg; 626 627 p_ramrod->common.update_tpa_param_flg = param->update_tpa_param_flg; 628 tpa->max_buff_num = param->max_buffers_per_cqe; 629 tpa->tpa_pkt_split_flg = param->tpa_pkt_split_flg; 630 tpa->tpa_hdr_data_split_flg = param->tpa_hdr_data_split_flg; 631 tpa->tpa_gro_consistent_flg = param->tpa_gro_consistent_flg; 632 tpa->tpa_max_aggs_num = param->tpa_max_aggs_num; 633 tpa->tpa_max_size = cpu_to_le16(param->tpa_max_size); 634 tpa->tpa_min_size_to_start = cpu_to_le16(param->tpa_min_size_to_start); 635 tpa->tpa_min_size_to_cont = cpu_to_le16(param->tpa_min_size_to_cont); 636 } 637 638 static void 639 qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn, 640 struct vport_update_ramrod_data *p_ramrod, 641 struct qed_sp_vport_update_params *p_params) 642 { 643 int i; 644 645 memset(&p_ramrod->approx_mcast.bins, 0, 646 sizeof(p_ramrod->approx_mcast.bins)); 647 648 if (!p_params->update_approx_mcast_flg) 649 return; 650 651 p_ramrod->common.update_approx_mcast_flg = 1; 652 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { 653 u32 *p_bins = p_params->bins; 654 655 p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]); 656 } 657 } 658 659 int qed_sp_vport_update(struct qed_hwfn *p_hwfn, 660 struct qed_sp_vport_update_params *p_params, 661 enum spq_mode comp_mode, 662 struct qed_spq_comp_cb *p_comp_data) 663 { 664 struct qed_rss_params *p_rss_params = p_params->rss_params; 665 struct vport_update_ramrod_data_cmn *p_cmn; 666 struct qed_sp_init_data init_data; 667 struct vport_update_ramrod_data *p_ramrod = NULL; 668 struct qed_spq_entry *p_ent = NULL; 669 u8 abs_vport_id = 0, val; 670 int rc = -EINVAL; 671 672 if (IS_VF(p_hwfn->cdev)) { 673 rc = qed_vf_pf_vport_update(p_hwfn, p_params); 674 return rc; 675 } 676 677 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); 678 if (rc) 679 return rc; 680 681 memset(&init_data, 0, sizeof(init_data)); 682 init_data.cid = qed_spq_get_cid(p_hwfn); 683 init_data.opaque_fid = p_params->opaque_fid; 684 init_data.comp_mode = comp_mode; 685 init_data.p_comp_data = p_comp_data; 686 687 rc = qed_sp_init_request(p_hwfn, &p_ent, 688 ETH_RAMROD_VPORT_UPDATE, 689 PROTOCOLID_ETH, &init_data); 690 if (rc) 691 return rc; 692 693 /* Copy input params to ramrod according to FW struct */ 694 p_ramrod = &p_ent->ramrod.vport_update; 695 p_cmn = &p_ramrod->common; 696 697 p_cmn->vport_id = abs_vport_id; 698 p_cmn->rx_active_flg = p_params->vport_active_rx_flg; 699 p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg; 700 p_cmn->tx_active_flg = p_params->vport_active_tx_flg; 701 p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg; 702 p_cmn->accept_any_vlan = p_params->accept_any_vlan; 703 val = p_params->update_accept_any_vlan_flg; 704 p_cmn->update_accept_any_vlan_flg = val; 705 706 p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg; 707 val = p_params->update_inner_vlan_removal_flg; 708 p_cmn->update_inner_vlan_removal_en_flg = val; 709 710 p_cmn->default_vlan_en = p_params->default_vlan_enable_flg; 711 val = p_params->update_default_vlan_enable_flg; 712 p_cmn->update_default_vlan_en_flg = val; 713 714 p_cmn->default_vlan = cpu_to_le16(p_params->default_vlan); 715 p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg; 716 717 p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg; 718 719 p_ramrod->common.tx_switching_en = p_params->tx_switching_flg; 720 p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg; 721 722 p_cmn->anti_spoofing_en = p_params->anti_spoofing_en; 723 val = p_params->update_anti_spoofing_en_flg; 724 p_ramrod->common.update_anti_spoofing_en_flg = val; 725 726 rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params); 727 if (rc) { 728 qed_sp_destroy_request(p_hwfn, p_ent); 729 return rc; 730 } 731 732 if (p_params->update_ctl_frame_check) { 733 p_cmn->ctl_frame_mac_check_en = p_params->mac_chk_en; 734 p_cmn->ctl_frame_ethtype_check_en = p_params->ethtype_chk_en; 735 } 736 737 /* Update mcast bins for VFs, PF doesn't use this functionality */ 738 qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params); 739 740 qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags); 741 qed_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, p_params->sge_tpa_params); 742 return qed_spq_post(p_hwfn, p_ent, NULL); 743 } 744 745 int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id) 746 { 747 struct vport_stop_ramrod_data *p_ramrod; 748 struct qed_sp_init_data init_data; 749 struct qed_spq_entry *p_ent; 750 u8 abs_vport_id = 0; 751 int rc; 752 753 if (IS_VF(p_hwfn->cdev)) 754 return qed_vf_pf_vport_stop(p_hwfn); 755 756 rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id); 757 if (rc) 758 return rc; 759 760 memset(&init_data, 0, sizeof(init_data)); 761 init_data.cid = qed_spq_get_cid(p_hwfn); 762 init_data.opaque_fid = opaque_fid; 763 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 764 765 rc = qed_sp_init_request(p_hwfn, &p_ent, 766 ETH_RAMROD_VPORT_STOP, 767 PROTOCOLID_ETH, &init_data); 768 if (rc) 769 return rc; 770 771 p_ramrod = &p_ent->ramrod.vport_stop; 772 p_ramrod->vport_id = abs_vport_id; 773 774 return qed_spq_post(p_hwfn, p_ent, NULL); 775 } 776 777 static int 778 qed_vf_pf_accept_flags(struct qed_hwfn *p_hwfn, 779 struct qed_filter_accept_flags *p_accept_flags) 780 { 781 struct qed_sp_vport_update_params s_params; 782 783 memset(&s_params, 0, sizeof(s_params)); 784 memcpy(&s_params.accept_flags, p_accept_flags, 785 sizeof(struct qed_filter_accept_flags)); 786 787 return qed_vf_pf_vport_update(p_hwfn, &s_params); 788 } 789 790 static int qed_filter_accept_cmd(struct qed_dev *cdev, 791 u8 vport, 792 struct qed_filter_accept_flags accept_flags, 793 u8 update_accept_any_vlan, 794 u8 accept_any_vlan, 795 enum spq_mode comp_mode, 796 struct qed_spq_comp_cb *p_comp_data) 797 { 798 struct qed_sp_vport_update_params vport_update_params; 799 int i, rc; 800 801 /* Prepare and send the vport rx_mode change */ 802 memset(&vport_update_params, 0, sizeof(vport_update_params)); 803 vport_update_params.vport_id = vport; 804 vport_update_params.accept_flags = accept_flags; 805 vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan; 806 vport_update_params.accept_any_vlan = accept_any_vlan; 807 808 for_each_hwfn(cdev, i) { 809 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 810 811 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 812 813 if (IS_VF(cdev)) { 814 rc = qed_vf_pf_accept_flags(p_hwfn, &accept_flags); 815 if (rc) 816 return rc; 817 continue; 818 } 819 820 rc = qed_sp_vport_update(p_hwfn, &vport_update_params, 821 comp_mode, p_comp_data); 822 if (rc) { 823 DP_ERR(cdev, "Update rx_mode failed %d\n", rc); 824 return rc; 825 } 826 827 DP_VERBOSE(p_hwfn, QED_MSG_SP, 828 "Accept filter configured, flags = [Rx]%x [Tx]%x\n", 829 accept_flags.rx_accept_filter, 830 accept_flags.tx_accept_filter); 831 if (update_accept_any_vlan) 832 DP_VERBOSE(p_hwfn, QED_MSG_SP, 833 "accept_any_vlan=%d configured\n", 834 accept_any_vlan); 835 } 836 837 return 0; 838 } 839 840 int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, 841 struct qed_queue_cid *p_cid, 842 u16 bd_max_bytes, 843 dma_addr_t bd_chain_phys_addr, 844 dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size) 845 { 846 struct rx_queue_start_ramrod_data *p_ramrod = NULL; 847 struct qed_spq_entry *p_ent = NULL; 848 struct qed_sp_init_data init_data; 849 int rc = -EINVAL; 850 851 DP_VERBOSE(p_hwfn, QED_MSG_SP, 852 "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n", 853 p_cid->opaque_fid, p_cid->cid, 854 p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->sb_igu_id); 855 856 /* Get SPQ entry */ 857 memset(&init_data, 0, sizeof(init_data)); 858 init_data.cid = p_cid->cid; 859 init_data.opaque_fid = p_cid->opaque_fid; 860 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 861 862 rc = qed_sp_init_request(p_hwfn, &p_ent, 863 ETH_RAMROD_RX_QUEUE_START, 864 PROTOCOLID_ETH, &init_data); 865 if (rc) 866 return rc; 867 868 p_ramrod = &p_ent->ramrod.rx_queue_start; 869 870 p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id); 871 p_ramrod->sb_index = p_cid->sb_idx; 872 p_ramrod->vport_id = p_cid->abs.vport_id; 873 p_ramrod->stats_counter_id = p_cid->abs.stats_id; 874 p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id); 875 p_ramrod->complete_cqe_flg = 0; 876 p_ramrod->complete_event_flg = 1; 877 878 p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes); 879 DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr); 880 881 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); 882 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr); 883 884 if (p_cid->vfid != QED_QUEUE_CID_SELF) { 885 bool b_legacy_vf = !!(p_cid->vf_legacy & 886 QED_QCID_LEGACY_VF_RX_PROD); 887 888 p_ramrod->vf_rx_prod_index = p_cid->vf_qid; 889 DP_VERBOSE(p_hwfn, QED_MSG_SP, 890 "Queue%s is meant for VF rxq[%02x]\n", 891 b_legacy_vf ? " [legacy]" : "", p_cid->vf_qid); 892 p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf; 893 } 894 895 return qed_spq_post(p_hwfn, p_ent, NULL); 896 } 897 898 static int 899 qed_eth_pf_rx_queue_start(struct qed_hwfn *p_hwfn, 900 struct qed_queue_cid *p_cid, 901 u16 bd_max_bytes, 902 dma_addr_t bd_chain_phys_addr, 903 dma_addr_t cqe_pbl_addr, 904 u16 cqe_pbl_size, void __iomem **pp_prod) 905 { 906 u32 init_prod_val = 0; 907 908 *pp_prod = p_hwfn->regview + 909 GTT_BAR0_MAP_REG_MSDM_RAM + 910 MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id); 911 912 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ 913 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), 914 (u32 *)(&init_prod_val)); 915 916 return qed_eth_rxq_start_ramrod(p_hwfn, p_cid, 917 bd_max_bytes, 918 bd_chain_phys_addr, 919 cqe_pbl_addr, cqe_pbl_size); 920 } 921 922 static int 923 qed_eth_rx_queue_start(struct qed_hwfn *p_hwfn, 924 u16 opaque_fid, 925 struct qed_queue_start_common_params *p_params, 926 u16 bd_max_bytes, 927 dma_addr_t bd_chain_phys_addr, 928 dma_addr_t cqe_pbl_addr, 929 u16 cqe_pbl_size, 930 struct qed_rxq_start_ret_params *p_ret_params) 931 { 932 struct qed_queue_cid *p_cid; 933 int rc; 934 935 /* Allocate a CID for the queue */ 936 p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, true, p_params); 937 if (!p_cid) 938 return -ENOMEM; 939 940 if (IS_PF(p_hwfn->cdev)) { 941 rc = qed_eth_pf_rx_queue_start(p_hwfn, p_cid, 942 bd_max_bytes, 943 bd_chain_phys_addr, 944 cqe_pbl_addr, cqe_pbl_size, 945 &p_ret_params->p_prod); 946 } else { 947 rc = qed_vf_pf_rxq_start(p_hwfn, p_cid, 948 bd_max_bytes, 949 bd_chain_phys_addr, 950 cqe_pbl_addr, 951 cqe_pbl_size, &p_ret_params->p_prod); 952 } 953 954 /* Provide the caller with a reference to as handler */ 955 if (rc) 956 qed_eth_queue_cid_release(p_hwfn, p_cid); 957 else 958 p_ret_params->p_handle = (void *)p_cid; 959 960 return rc; 961 } 962 963 int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, 964 void **pp_rxq_handles, 965 u8 num_rxqs, 966 u8 complete_cqe_flg, 967 u8 complete_event_flg, 968 enum spq_mode comp_mode, 969 struct qed_spq_comp_cb *p_comp_data) 970 { 971 struct rx_queue_update_ramrod_data *p_ramrod = NULL; 972 struct qed_spq_entry *p_ent = NULL; 973 struct qed_sp_init_data init_data; 974 struct qed_queue_cid *p_cid; 975 int rc = -EINVAL; 976 u8 i; 977 978 memset(&init_data, 0, sizeof(init_data)); 979 init_data.comp_mode = comp_mode; 980 init_data.p_comp_data = p_comp_data; 981 982 for (i = 0; i < num_rxqs; i++) { 983 p_cid = ((struct qed_queue_cid **)pp_rxq_handles)[i]; 984 985 /* Get SPQ entry */ 986 init_data.cid = p_cid->cid; 987 init_data.opaque_fid = p_cid->opaque_fid; 988 989 rc = qed_sp_init_request(p_hwfn, &p_ent, 990 ETH_RAMROD_RX_QUEUE_UPDATE, 991 PROTOCOLID_ETH, &init_data); 992 if (rc) 993 return rc; 994 995 p_ramrod = &p_ent->ramrod.rx_queue_update; 996 p_ramrod->vport_id = p_cid->abs.vport_id; 997 998 p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id); 999 p_ramrod->complete_cqe_flg = complete_cqe_flg; 1000 p_ramrod->complete_event_flg = complete_event_flg; 1001 1002 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1003 if (rc) 1004 return rc; 1005 } 1006 1007 return rc; 1008 } 1009 1010 static int 1011 qed_eth_pf_rx_queue_stop(struct qed_hwfn *p_hwfn, 1012 struct qed_queue_cid *p_cid, 1013 bool b_eq_completion_only, bool b_cqe_completion) 1014 { 1015 struct rx_queue_stop_ramrod_data *p_ramrod = NULL; 1016 struct qed_spq_entry *p_ent = NULL; 1017 struct qed_sp_init_data init_data; 1018 int rc; 1019 1020 memset(&init_data, 0, sizeof(init_data)); 1021 init_data.cid = p_cid->cid; 1022 init_data.opaque_fid = p_cid->opaque_fid; 1023 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1024 1025 rc = qed_sp_init_request(p_hwfn, &p_ent, 1026 ETH_RAMROD_RX_QUEUE_STOP, 1027 PROTOCOLID_ETH, &init_data); 1028 if (rc) 1029 return rc; 1030 1031 p_ramrod = &p_ent->ramrod.rx_queue_stop; 1032 p_ramrod->vport_id = p_cid->abs.vport_id; 1033 p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id); 1034 1035 /* Cleaning the queue requires the completion to arrive there. 1036 * In addition, VFs require the answer to come as eqe to PF. 1037 */ 1038 p_ramrod->complete_cqe_flg = ((p_cid->vfid == QED_QUEUE_CID_SELF) && 1039 !b_eq_completion_only) || 1040 b_cqe_completion; 1041 p_ramrod->complete_event_flg = (p_cid->vfid != QED_QUEUE_CID_SELF) || 1042 b_eq_completion_only; 1043 1044 return qed_spq_post(p_hwfn, p_ent, NULL); 1045 } 1046 1047 int qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, 1048 void *p_rxq, 1049 bool eq_completion_only, bool cqe_completion) 1050 { 1051 struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_rxq; 1052 int rc = -EINVAL; 1053 1054 if (IS_PF(p_hwfn->cdev)) 1055 rc = qed_eth_pf_rx_queue_stop(p_hwfn, p_cid, 1056 eq_completion_only, 1057 cqe_completion); 1058 else 1059 rc = qed_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion); 1060 1061 if (!rc) 1062 qed_eth_queue_cid_release(p_hwfn, p_cid); 1063 return rc; 1064 } 1065 1066 int 1067 qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, 1068 struct qed_queue_cid *p_cid, 1069 dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id) 1070 { 1071 struct tx_queue_start_ramrod_data *p_ramrod = NULL; 1072 struct qed_spq_entry *p_ent = NULL; 1073 struct qed_sp_init_data init_data; 1074 int rc = -EINVAL; 1075 1076 /* Get SPQ entry */ 1077 memset(&init_data, 0, sizeof(init_data)); 1078 init_data.cid = p_cid->cid; 1079 init_data.opaque_fid = p_cid->opaque_fid; 1080 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1081 1082 rc = qed_sp_init_request(p_hwfn, &p_ent, 1083 ETH_RAMROD_TX_QUEUE_START, 1084 PROTOCOLID_ETH, &init_data); 1085 if (rc) 1086 return rc; 1087 1088 p_ramrod = &p_ent->ramrod.tx_queue_start; 1089 p_ramrod->vport_id = p_cid->abs.vport_id; 1090 1091 p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id); 1092 p_ramrod->sb_index = p_cid->sb_idx; 1093 p_ramrod->stats_counter_id = p_cid->abs.stats_id; 1094 1095 p_ramrod->queue_zone_id = cpu_to_le16(p_cid->abs.queue_id); 1096 p_ramrod->same_as_last_id = cpu_to_le16(p_cid->abs.queue_id); 1097 1098 p_ramrod->pbl_size = cpu_to_le16(pbl_size); 1099 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr); 1100 1101 p_ramrod->qm_pq_id = cpu_to_le16(pq_id); 1102 1103 return qed_spq_post(p_hwfn, p_ent, NULL); 1104 } 1105 1106 static int 1107 qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn, 1108 struct qed_queue_cid *p_cid, 1109 u8 tc, 1110 dma_addr_t pbl_addr, 1111 u16 pbl_size, void __iomem **pp_doorbell) 1112 { 1113 int rc; 1114 1115 1116 rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid, 1117 pbl_addr, pbl_size, 1118 qed_get_cm_pq_idx_mcos(p_hwfn, tc)); 1119 if (rc) 1120 return rc; 1121 1122 /* Provide the caller with the necessary return values */ 1123 *pp_doorbell = p_hwfn->doorbells + 1124 qed_db_addr(p_cid->cid, DQ_DEMS_LEGACY); 1125 1126 return 0; 1127 } 1128 1129 static int 1130 qed_eth_tx_queue_start(struct qed_hwfn *p_hwfn, 1131 u16 opaque_fid, 1132 struct qed_queue_start_common_params *p_params, 1133 u8 tc, 1134 dma_addr_t pbl_addr, 1135 u16 pbl_size, 1136 struct qed_txq_start_ret_params *p_ret_params) 1137 { 1138 struct qed_queue_cid *p_cid; 1139 int rc; 1140 1141 p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, false, p_params); 1142 if (!p_cid) 1143 return -EINVAL; 1144 1145 if (IS_PF(p_hwfn->cdev)) 1146 rc = qed_eth_pf_tx_queue_start(p_hwfn, p_cid, tc, 1147 pbl_addr, pbl_size, 1148 &p_ret_params->p_doorbell); 1149 else 1150 rc = qed_vf_pf_txq_start(p_hwfn, p_cid, 1151 pbl_addr, pbl_size, 1152 &p_ret_params->p_doorbell); 1153 1154 if (rc) 1155 qed_eth_queue_cid_release(p_hwfn, p_cid); 1156 else 1157 p_ret_params->p_handle = (void *)p_cid; 1158 1159 return rc; 1160 } 1161 1162 static int 1163 qed_eth_pf_tx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid) 1164 { 1165 struct qed_spq_entry *p_ent = NULL; 1166 struct qed_sp_init_data init_data; 1167 int rc; 1168 1169 memset(&init_data, 0, sizeof(init_data)); 1170 init_data.cid = p_cid->cid; 1171 init_data.opaque_fid = p_cid->opaque_fid; 1172 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1173 1174 rc = qed_sp_init_request(p_hwfn, &p_ent, 1175 ETH_RAMROD_TX_QUEUE_STOP, 1176 PROTOCOLID_ETH, &init_data); 1177 if (rc) 1178 return rc; 1179 1180 return qed_spq_post(p_hwfn, p_ent, NULL); 1181 } 1182 1183 int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_handle) 1184 { 1185 struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_handle; 1186 int rc; 1187 1188 if (IS_PF(p_hwfn->cdev)) 1189 rc = qed_eth_pf_tx_queue_stop(p_hwfn, p_cid); 1190 else 1191 rc = qed_vf_pf_txq_stop(p_hwfn, p_cid); 1192 1193 if (!rc) 1194 qed_eth_queue_cid_release(p_hwfn, p_cid); 1195 return rc; 1196 } 1197 1198 static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode) 1199 { 1200 enum eth_filter_action action = MAX_ETH_FILTER_ACTION; 1201 1202 switch (opcode) { 1203 case QED_FILTER_ADD: 1204 action = ETH_FILTER_ACTION_ADD; 1205 break; 1206 case QED_FILTER_REMOVE: 1207 action = ETH_FILTER_ACTION_REMOVE; 1208 break; 1209 case QED_FILTER_FLUSH: 1210 action = ETH_FILTER_ACTION_REMOVE_ALL; 1211 break; 1212 default: 1213 action = MAX_ETH_FILTER_ACTION; 1214 } 1215 1216 return action; 1217 } 1218 1219 static int 1220 qed_filter_ucast_common(struct qed_hwfn *p_hwfn, 1221 u16 opaque_fid, 1222 struct qed_filter_ucast *p_filter_cmd, 1223 struct vport_filter_update_ramrod_data **pp_ramrod, 1224 struct qed_spq_entry **pp_ent, 1225 enum spq_mode comp_mode, 1226 struct qed_spq_comp_cb *p_comp_data) 1227 { 1228 u8 vport_to_add_to = 0, vport_to_remove_from = 0; 1229 struct vport_filter_update_ramrod_data *p_ramrod; 1230 struct eth_filter_cmd *p_first_filter; 1231 struct eth_filter_cmd *p_second_filter; 1232 struct qed_sp_init_data init_data; 1233 enum eth_filter_action action; 1234 int rc; 1235 1236 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, 1237 &vport_to_remove_from); 1238 if (rc) 1239 return rc; 1240 1241 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, 1242 &vport_to_add_to); 1243 if (rc) 1244 return rc; 1245 1246 /* Get SPQ entry */ 1247 memset(&init_data, 0, sizeof(init_data)); 1248 init_data.cid = qed_spq_get_cid(p_hwfn); 1249 init_data.opaque_fid = opaque_fid; 1250 init_data.comp_mode = comp_mode; 1251 init_data.p_comp_data = p_comp_data; 1252 1253 rc = qed_sp_init_request(p_hwfn, pp_ent, 1254 ETH_RAMROD_FILTERS_UPDATE, 1255 PROTOCOLID_ETH, &init_data); 1256 if (rc) 1257 return rc; 1258 1259 *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update; 1260 p_ramrod = *pp_ramrod; 1261 p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0; 1262 p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0; 1263 1264 switch (p_filter_cmd->opcode) { 1265 case QED_FILTER_REPLACE: 1266 case QED_FILTER_MOVE: 1267 p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break; 1268 default: 1269 p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break; 1270 } 1271 1272 p_first_filter = &p_ramrod->filter_cmds[0]; 1273 p_second_filter = &p_ramrod->filter_cmds[1]; 1274 1275 switch (p_filter_cmd->type) { 1276 case QED_FILTER_MAC: 1277 p_first_filter->type = ETH_FILTER_TYPE_MAC; break; 1278 case QED_FILTER_VLAN: 1279 p_first_filter->type = ETH_FILTER_TYPE_VLAN; break; 1280 case QED_FILTER_MAC_VLAN: 1281 p_first_filter->type = ETH_FILTER_TYPE_PAIR; break; 1282 case QED_FILTER_INNER_MAC: 1283 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break; 1284 case QED_FILTER_INNER_VLAN: 1285 p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break; 1286 case QED_FILTER_INNER_PAIR: 1287 p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break; 1288 case QED_FILTER_INNER_MAC_VNI_PAIR: 1289 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR; 1290 break; 1291 case QED_FILTER_MAC_VNI_PAIR: 1292 p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break; 1293 case QED_FILTER_VNI: 1294 p_first_filter->type = ETH_FILTER_TYPE_VNI; break; 1295 } 1296 1297 if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) || 1298 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || 1299 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) || 1300 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) || 1301 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || 1302 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) { 1303 qed_set_fw_mac_addr(&p_first_filter->mac_msb, 1304 &p_first_filter->mac_mid, 1305 &p_first_filter->mac_lsb, 1306 (u8 *)p_filter_cmd->mac); 1307 } 1308 1309 if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) || 1310 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || 1311 (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) || 1312 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR)) 1313 p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan); 1314 1315 if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || 1316 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) || 1317 (p_first_filter->type == ETH_FILTER_TYPE_VNI)) 1318 p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni); 1319 1320 if (p_filter_cmd->opcode == QED_FILTER_MOVE) { 1321 p_second_filter->type = p_first_filter->type; 1322 p_second_filter->mac_msb = p_first_filter->mac_msb; 1323 p_second_filter->mac_mid = p_first_filter->mac_mid; 1324 p_second_filter->mac_lsb = p_first_filter->mac_lsb; 1325 p_second_filter->vlan_id = p_first_filter->vlan_id; 1326 p_second_filter->vni = p_first_filter->vni; 1327 1328 p_first_filter->action = ETH_FILTER_ACTION_REMOVE; 1329 1330 p_first_filter->vport_id = vport_to_remove_from; 1331 1332 p_second_filter->action = ETH_FILTER_ACTION_ADD; 1333 p_second_filter->vport_id = vport_to_add_to; 1334 } else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) { 1335 p_first_filter->vport_id = vport_to_add_to; 1336 memcpy(p_second_filter, p_first_filter, 1337 sizeof(*p_second_filter)); 1338 p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL; 1339 p_second_filter->action = ETH_FILTER_ACTION_ADD; 1340 } else { 1341 action = qed_filter_action(p_filter_cmd->opcode); 1342 1343 if (action == MAX_ETH_FILTER_ACTION) { 1344 DP_NOTICE(p_hwfn, 1345 "%d is not supported yet\n", 1346 p_filter_cmd->opcode); 1347 qed_sp_destroy_request(p_hwfn, *pp_ent); 1348 return -EINVAL; 1349 } 1350 1351 p_first_filter->action = action; 1352 p_first_filter->vport_id = (p_filter_cmd->opcode == 1353 QED_FILTER_REMOVE) ? 1354 vport_to_remove_from : 1355 vport_to_add_to; 1356 } 1357 1358 return 0; 1359 } 1360 1361 int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, 1362 u16 opaque_fid, 1363 struct qed_filter_ucast *p_filter_cmd, 1364 enum spq_mode comp_mode, 1365 struct qed_spq_comp_cb *p_comp_data) 1366 { 1367 struct vport_filter_update_ramrod_data *p_ramrod = NULL; 1368 struct qed_spq_entry *p_ent = NULL; 1369 struct eth_filter_cmd_header *p_header; 1370 int rc; 1371 1372 rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd, 1373 &p_ramrod, &p_ent, 1374 comp_mode, p_comp_data); 1375 if (rc) { 1376 DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc); 1377 return rc; 1378 } 1379 p_header = &p_ramrod->filter_cmd_hdr; 1380 p_header->assert_on_error = p_filter_cmd->assert_on_error; 1381 1382 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1383 if (rc) { 1384 DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc); 1385 return rc; 1386 } 1387 1388 DP_VERBOSE(p_hwfn, QED_MSG_SP, 1389 "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n", 1390 (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" : 1391 ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ? 1392 "REMOVE" : 1393 ((p_filter_cmd->opcode == QED_FILTER_MOVE) ? 1394 "MOVE" : "REPLACE")), 1395 (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" : 1396 ((p_filter_cmd->type == QED_FILTER_VLAN) ? 1397 "VLAN" : "MAC & VLAN"), 1398 p_ramrod->filter_cmd_hdr.cmd_cnt, 1399 p_filter_cmd->is_rx_filter, 1400 p_filter_cmd->is_tx_filter); 1401 DP_VERBOSE(p_hwfn, QED_MSG_SP, 1402 "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n", 1403 p_filter_cmd->vport_to_add_to, 1404 p_filter_cmd->vport_to_remove_from, 1405 p_filter_cmd->mac[0], 1406 p_filter_cmd->mac[1], 1407 p_filter_cmd->mac[2], 1408 p_filter_cmd->mac[3], 1409 p_filter_cmd->mac[4], 1410 p_filter_cmd->mac[5], 1411 p_filter_cmd->vlan); 1412 1413 return 0; 1414 } 1415 1416 /******************************************************************************* 1417 * Description: 1418 * Calculates crc 32 on a buffer 1419 * Note: crc32_length MUST be aligned to 8 1420 * Return: 1421 ******************************************************************************/ 1422 static u32 qed_calc_crc32c(u8 *crc32_packet, 1423 u32 crc32_length, u32 crc32_seed, u8 complement) 1424 { 1425 u32 byte = 0, bit = 0, crc32_result = crc32_seed; 1426 u8 msb = 0, current_byte = 0; 1427 1428 if ((!crc32_packet) || 1429 (crc32_length == 0) || 1430 ((crc32_length % 8) != 0)) 1431 return crc32_result; 1432 for (byte = 0; byte < crc32_length; byte++) { 1433 current_byte = crc32_packet[byte]; 1434 for (bit = 0; bit < 8; bit++) { 1435 msb = (u8)(crc32_result >> 31); 1436 crc32_result = crc32_result << 1; 1437 if (msb != (0x1 & (current_byte >> bit))) { 1438 crc32_result = crc32_result ^ CRC32_POLY; 1439 crc32_result |= 1; /*crc32_result[0] = 1;*/ 1440 } 1441 } 1442 } 1443 return crc32_result; 1444 } 1445 1446 static u32 qed_crc32c_le(u32 seed, u8 *mac, u32 len) 1447 { 1448 u32 packet_buf[2] = { 0 }; 1449 1450 memcpy((u8 *)(&packet_buf[0]), &mac[0], 6); 1451 return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0); 1452 } 1453 1454 u8 qed_mcast_bin_from_mac(u8 *mac) 1455 { 1456 u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED, 1457 mac, ETH_ALEN); 1458 1459 return crc & 0xff; 1460 } 1461 1462 static int 1463 qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, 1464 u16 opaque_fid, 1465 struct qed_filter_mcast *p_filter_cmd, 1466 enum spq_mode comp_mode, 1467 struct qed_spq_comp_cb *p_comp_data) 1468 { 1469 struct vport_update_ramrod_data *p_ramrod = NULL; 1470 u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; 1471 struct qed_spq_entry *p_ent = NULL; 1472 struct qed_sp_init_data init_data; 1473 u8 abs_vport_id = 0; 1474 int rc, i; 1475 1476 if (p_filter_cmd->opcode == QED_FILTER_ADD) 1477 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, 1478 &abs_vport_id); 1479 else 1480 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, 1481 &abs_vport_id); 1482 if (rc) 1483 return rc; 1484 1485 /* Get SPQ entry */ 1486 memset(&init_data, 0, sizeof(init_data)); 1487 init_data.cid = qed_spq_get_cid(p_hwfn); 1488 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1489 init_data.comp_mode = comp_mode; 1490 init_data.p_comp_data = p_comp_data; 1491 1492 rc = qed_sp_init_request(p_hwfn, &p_ent, 1493 ETH_RAMROD_VPORT_UPDATE, 1494 PROTOCOLID_ETH, &init_data); 1495 if (rc) { 1496 DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc); 1497 return rc; 1498 } 1499 1500 p_ramrod = &p_ent->ramrod.vport_update; 1501 p_ramrod->common.update_approx_mcast_flg = 1; 1502 1503 /* explicitly clear out the entire vector */ 1504 memset(&p_ramrod->approx_mcast.bins, 0, 1505 sizeof(p_ramrod->approx_mcast.bins)); 1506 memset(bins, 0, sizeof(bins)); 1507 /* filter ADD op is explicit set op and it removes 1508 * any existing filters for the vport 1509 */ 1510 if (p_filter_cmd->opcode == QED_FILTER_ADD) { 1511 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { 1512 u32 bit, nbits; 1513 1514 bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); 1515 nbits = sizeof(u32) * BITS_PER_BYTE; 1516 bins[bit / nbits] |= 1 << (bit % nbits); 1517 } 1518 1519 /* Convert to correct endianity */ 1520 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { 1521 struct vport_update_ramrod_mcast *p_ramrod_bins; 1522 1523 p_ramrod_bins = &p_ramrod->approx_mcast; 1524 p_ramrod_bins->bins[i] = cpu_to_le32(bins[i]); 1525 } 1526 } 1527 1528 p_ramrod->common.vport_id = abs_vport_id; 1529 1530 return qed_spq_post(p_hwfn, p_ent, NULL); 1531 } 1532 1533 static int qed_filter_mcast_cmd(struct qed_dev *cdev, 1534 struct qed_filter_mcast *p_filter_cmd, 1535 enum spq_mode comp_mode, 1536 struct qed_spq_comp_cb *p_comp_data) 1537 { 1538 int rc = 0; 1539 int i; 1540 1541 /* only ADD and REMOVE operations are supported for multi-cast */ 1542 if ((p_filter_cmd->opcode != QED_FILTER_ADD && 1543 (p_filter_cmd->opcode != QED_FILTER_REMOVE)) || 1544 (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS)) 1545 return -EINVAL; 1546 1547 for_each_hwfn(cdev, i) { 1548 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1549 1550 u16 opaque_fid; 1551 1552 if (IS_VF(cdev)) { 1553 qed_vf_pf_filter_mcast(p_hwfn, p_filter_cmd); 1554 continue; 1555 } 1556 1557 opaque_fid = p_hwfn->hw_info.opaque_fid; 1558 1559 rc = qed_sp_eth_filter_mcast(p_hwfn, 1560 opaque_fid, 1561 p_filter_cmd, 1562 comp_mode, p_comp_data); 1563 } 1564 return rc; 1565 } 1566 1567 static int qed_filter_ucast_cmd(struct qed_dev *cdev, 1568 struct qed_filter_ucast *p_filter_cmd, 1569 enum spq_mode comp_mode, 1570 struct qed_spq_comp_cb *p_comp_data) 1571 { 1572 int rc = 0; 1573 int i; 1574 1575 for_each_hwfn(cdev, i) { 1576 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1577 u16 opaque_fid; 1578 1579 if (IS_VF(cdev)) { 1580 rc = qed_vf_pf_filter_ucast(p_hwfn, p_filter_cmd); 1581 continue; 1582 } 1583 1584 opaque_fid = p_hwfn->hw_info.opaque_fid; 1585 1586 rc = qed_sp_eth_filter_ucast(p_hwfn, 1587 opaque_fid, 1588 p_filter_cmd, 1589 comp_mode, p_comp_data); 1590 if (rc) 1591 break; 1592 } 1593 1594 return rc; 1595 } 1596 1597 /* Statistics related code */ 1598 static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn, 1599 u32 *p_addr, 1600 u32 *p_len, u16 statistics_bin) 1601 { 1602 if (IS_PF(p_hwfn->cdev)) { 1603 *p_addr = BAR0_MAP_REG_PSDM_RAM + 1604 PSTORM_QUEUE_STAT_OFFSET(statistics_bin); 1605 *p_len = sizeof(struct eth_pstorm_per_queue_stat); 1606 } else { 1607 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1608 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1609 1610 *p_addr = p_resp->pfdev_info.stats_info.pstats.address; 1611 *p_len = p_resp->pfdev_info.stats_info.pstats.len; 1612 } 1613 } 1614 1615 static noinline_for_stack void 1616 __qed_get_vport_pstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 1617 struct qed_eth_stats *p_stats, u16 statistics_bin) 1618 { 1619 struct eth_pstorm_per_queue_stat pstats; 1620 u32 pstats_addr = 0, pstats_len = 0; 1621 1622 __qed_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len, 1623 statistics_bin); 1624 1625 memset(&pstats, 0, sizeof(pstats)); 1626 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len); 1627 1628 p_stats->common.tx_ucast_bytes += 1629 HILO_64_REGPAIR(pstats.sent_ucast_bytes); 1630 p_stats->common.tx_mcast_bytes += 1631 HILO_64_REGPAIR(pstats.sent_mcast_bytes); 1632 p_stats->common.tx_bcast_bytes += 1633 HILO_64_REGPAIR(pstats.sent_bcast_bytes); 1634 p_stats->common.tx_ucast_pkts += 1635 HILO_64_REGPAIR(pstats.sent_ucast_pkts); 1636 p_stats->common.tx_mcast_pkts += 1637 HILO_64_REGPAIR(pstats.sent_mcast_pkts); 1638 p_stats->common.tx_bcast_pkts += 1639 HILO_64_REGPAIR(pstats.sent_bcast_pkts); 1640 p_stats->common.tx_err_drop_pkts += 1641 HILO_64_REGPAIR(pstats.error_drop_pkts); 1642 } 1643 1644 static noinline_for_stack void 1645 __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 1646 struct qed_eth_stats *p_stats, u16 statistics_bin) 1647 { 1648 struct tstorm_per_port_stat tstats; 1649 u32 tstats_addr, tstats_len; 1650 1651 if (IS_PF(p_hwfn->cdev)) { 1652 tstats_addr = BAR0_MAP_REG_TSDM_RAM + 1653 TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)); 1654 tstats_len = sizeof(struct tstorm_per_port_stat); 1655 } else { 1656 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1657 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1658 1659 tstats_addr = p_resp->pfdev_info.stats_info.tstats.address; 1660 tstats_len = p_resp->pfdev_info.stats_info.tstats.len; 1661 } 1662 1663 memset(&tstats, 0, sizeof(tstats)); 1664 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len); 1665 1666 p_stats->common.mftag_filter_discards += 1667 HILO_64_REGPAIR(tstats.mftag_filter_discard); 1668 p_stats->common.mac_filter_discards += 1669 HILO_64_REGPAIR(tstats.eth_mac_filter_discard); 1670 p_stats->common.gft_filter_drop += 1671 HILO_64_REGPAIR(tstats.eth_gft_drop_pkt); 1672 } 1673 1674 static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn, 1675 u32 *p_addr, 1676 u32 *p_len, u16 statistics_bin) 1677 { 1678 if (IS_PF(p_hwfn->cdev)) { 1679 *p_addr = BAR0_MAP_REG_USDM_RAM + 1680 USTORM_QUEUE_STAT_OFFSET(statistics_bin); 1681 *p_len = sizeof(struct eth_ustorm_per_queue_stat); 1682 } else { 1683 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1684 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1685 1686 *p_addr = p_resp->pfdev_info.stats_info.ustats.address; 1687 *p_len = p_resp->pfdev_info.stats_info.ustats.len; 1688 } 1689 } 1690 1691 static noinline_for_stack 1692 void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 1693 struct qed_eth_stats *p_stats, u16 statistics_bin) 1694 { 1695 struct eth_ustorm_per_queue_stat ustats; 1696 u32 ustats_addr = 0, ustats_len = 0; 1697 1698 __qed_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len, 1699 statistics_bin); 1700 1701 memset(&ustats, 0, sizeof(ustats)); 1702 qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len); 1703 1704 p_stats->common.rx_ucast_bytes += 1705 HILO_64_REGPAIR(ustats.rcv_ucast_bytes); 1706 p_stats->common.rx_mcast_bytes += 1707 HILO_64_REGPAIR(ustats.rcv_mcast_bytes); 1708 p_stats->common.rx_bcast_bytes += 1709 HILO_64_REGPAIR(ustats.rcv_bcast_bytes); 1710 p_stats->common.rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts); 1711 p_stats->common.rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts); 1712 p_stats->common.rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts); 1713 } 1714 1715 static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn, 1716 u32 *p_addr, 1717 u32 *p_len, u16 statistics_bin) 1718 { 1719 if (IS_PF(p_hwfn->cdev)) { 1720 *p_addr = BAR0_MAP_REG_MSDM_RAM + 1721 MSTORM_QUEUE_STAT_OFFSET(statistics_bin); 1722 *p_len = sizeof(struct eth_mstorm_per_queue_stat); 1723 } else { 1724 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1725 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1726 1727 *p_addr = p_resp->pfdev_info.stats_info.mstats.address; 1728 *p_len = p_resp->pfdev_info.stats_info.mstats.len; 1729 } 1730 } 1731 1732 static noinline_for_stack void 1733 __qed_get_vport_mstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 1734 struct qed_eth_stats *p_stats, u16 statistics_bin) 1735 { 1736 struct eth_mstorm_per_queue_stat mstats; 1737 u32 mstats_addr = 0, mstats_len = 0; 1738 1739 __qed_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len, 1740 statistics_bin); 1741 1742 memset(&mstats, 0, sizeof(mstats)); 1743 qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len); 1744 1745 p_stats->common.no_buff_discards += 1746 HILO_64_REGPAIR(mstats.no_buff_discard); 1747 p_stats->common.packet_too_big_discard += 1748 HILO_64_REGPAIR(mstats.packet_too_big_discard); 1749 p_stats->common.ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard); 1750 p_stats->common.tpa_coalesced_pkts += 1751 HILO_64_REGPAIR(mstats.tpa_coalesced_pkts); 1752 p_stats->common.tpa_coalesced_events += 1753 HILO_64_REGPAIR(mstats.tpa_coalesced_events); 1754 p_stats->common.tpa_aborts_num += 1755 HILO_64_REGPAIR(mstats.tpa_aborts_num); 1756 p_stats->common.tpa_coalesced_bytes += 1757 HILO_64_REGPAIR(mstats.tpa_coalesced_bytes); 1758 } 1759 1760 static noinline_for_stack void 1761 __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 1762 struct qed_eth_stats *p_stats) 1763 { 1764 struct qed_eth_stats_common *p_common = &p_stats->common; 1765 struct port_stats port_stats; 1766 int j; 1767 1768 memset(&port_stats, 0, sizeof(port_stats)); 1769 1770 qed_memcpy_from(p_hwfn, p_ptt, &port_stats, 1771 p_hwfn->mcp_info->port_addr + 1772 offsetof(struct public_port, stats), 1773 sizeof(port_stats)); 1774 1775 p_common->rx_64_byte_packets += port_stats.eth.r64; 1776 p_common->rx_65_to_127_byte_packets += port_stats.eth.r127; 1777 p_common->rx_128_to_255_byte_packets += port_stats.eth.r255; 1778 p_common->rx_256_to_511_byte_packets += port_stats.eth.r511; 1779 p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023; 1780 p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518; 1781 p_common->rx_crc_errors += port_stats.eth.rfcs; 1782 p_common->rx_mac_crtl_frames += port_stats.eth.rxcf; 1783 p_common->rx_pause_frames += port_stats.eth.rxpf; 1784 p_common->rx_pfc_frames += port_stats.eth.rxpp; 1785 p_common->rx_align_errors += port_stats.eth.raln; 1786 p_common->rx_carrier_errors += port_stats.eth.rfcr; 1787 p_common->rx_oversize_packets += port_stats.eth.rovr; 1788 p_common->rx_jabbers += port_stats.eth.rjbr; 1789 p_common->rx_undersize_packets += port_stats.eth.rund; 1790 p_common->rx_fragments += port_stats.eth.rfrg; 1791 p_common->tx_64_byte_packets += port_stats.eth.t64; 1792 p_common->tx_65_to_127_byte_packets += port_stats.eth.t127; 1793 p_common->tx_128_to_255_byte_packets += port_stats.eth.t255; 1794 p_common->tx_256_to_511_byte_packets += port_stats.eth.t511; 1795 p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023; 1796 p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518; 1797 p_common->tx_pause_frames += port_stats.eth.txpf; 1798 p_common->tx_pfc_frames += port_stats.eth.txpp; 1799 p_common->rx_mac_bytes += port_stats.eth.rbyte; 1800 p_common->rx_mac_uc_packets += port_stats.eth.rxuca; 1801 p_common->rx_mac_mc_packets += port_stats.eth.rxmca; 1802 p_common->rx_mac_bc_packets += port_stats.eth.rxbca; 1803 p_common->rx_mac_frames_ok += port_stats.eth.rxpok; 1804 p_common->tx_mac_bytes += port_stats.eth.tbyte; 1805 p_common->tx_mac_uc_packets += port_stats.eth.txuca; 1806 p_common->tx_mac_mc_packets += port_stats.eth.txmca; 1807 p_common->tx_mac_bc_packets += port_stats.eth.txbca; 1808 p_common->tx_mac_ctrl_frames += port_stats.eth.txcf; 1809 for (j = 0; j < 8; j++) { 1810 p_common->brb_truncates += port_stats.brb.brb_truncate[j]; 1811 p_common->brb_discards += port_stats.brb.brb_discard[j]; 1812 } 1813 1814 if (QED_IS_BB(p_hwfn->cdev)) { 1815 struct qed_eth_stats_bb *p_bb = &p_stats->bb; 1816 1817 p_bb->rx_1519_to_1522_byte_packets += 1818 port_stats.eth.u0.bb0.r1522; 1819 p_bb->rx_1519_to_2047_byte_packets += 1820 port_stats.eth.u0.bb0.r2047; 1821 p_bb->rx_2048_to_4095_byte_packets += 1822 port_stats.eth.u0.bb0.r4095; 1823 p_bb->rx_4096_to_9216_byte_packets += 1824 port_stats.eth.u0.bb0.r9216; 1825 p_bb->rx_9217_to_16383_byte_packets += 1826 port_stats.eth.u0.bb0.r16383; 1827 p_bb->tx_1519_to_2047_byte_packets += 1828 port_stats.eth.u1.bb1.t2047; 1829 p_bb->tx_2048_to_4095_byte_packets += 1830 port_stats.eth.u1.bb1.t4095; 1831 p_bb->tx_4096_to_9216_byte_packets += 1832 port_stats.eth.u1.bb1.t9216; 1833 p_bb->tx_9217_to_16383_byte_packets += 1834 port_stats.eth.u1.bb1.t16383; 1835 p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec; 1836 p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl; 1837 } else { 1838 struct qed_eth_stats_ah *p_ah = &p_stats->ah; 1839 1840 p_ah->rx_1519_to_max_byte_packets += 1841 port_stats.eth.u0.ah0.r1519_to_max; 1842 p_ah->tx_1519_to_max_byte_packets = 1843 port_stats.eth.u1.ah1.t1519_to_max; 1844 } 1845 1846 p_common->link_change_count = qed_rd(p_hwfn, p_ptt, 1847 p_hwfn->mcp_info->port_addr + 1848 offsetof(struct public_port, 1849 link_change_count)); 1850 } 1851 1852 static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn, 1853 struct qed_ptt *p_ptt, 1854 struct qed_eth_stats *stats, 1855 u16 statistics_bin, bool b_get_port_stats) 1856 { 1857 __qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin); 1858 __qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin); 1859 __qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin); 1860 __qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin); 1861 1862 if (b_get_port_stats && p_hwfn->mcp_info) 1863 __qed_get_vport_port_stats(p_hwfn, p_ptt, stats); 1864 } 1865 1866 static void _qed_get_vport_stats(struct qed_dev *cdev, 1867 struct qed_eth_stats *stats) 1868 { 1869 u8 fw_vport = 0; 1870 int i; 1871 1872 memset(stats, 0, sizeof(*stats)); 1873 1874 for_each_hwfn(cdev, i) { 1875 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1876 struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn) 1877 : NULL; 1878 bool b_get_port_stats; 1879 1880 if (IS_PF(cdev)) { 1881 /* The main vport index is relative first */ 1882 if (qed_fw_vport(p_hwfn, 0, &fw_vport)) { 1883 DP_ERR(p_hwfn, "No vport available!\n"); 1884 goto out; 1885 } 1886 } 1887 1888 if (IS_PF(cdev) && !p_ptt) { 1889 DP_ERR(p_hwfn, "Failed to acquire ptt\n"); 1890 continue; 1891 } 1892 1893 b_get_port_stats = IS_PF(cdev) && IS_LEAD_HWFN(p_hwfn); 1894 __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport, 1895 b_get_port_stats); 1896 1897 out: 1898 if (IS_PF(cdev) && p_ptt) 1899 qed_ptt_release(p_hwfn, p_ptt); 1900 } 1901 } 1902 1903 void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats) 1904 { 1905 u32 i; 1906 1907 if (!cdev) { 1908 memset(stats, 0, sizeof(*stats)); 1909 return; 1910 } 1911 1912 _qed_get_vport_stats(cdev, stats); 1913 1914 if (!cdev->reset_stats) 1915 return; 1916 1917 /* Reduce the statistics baseline */ 1918 for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++) 1919 ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i]; 1920 } 1921 1922 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */ 1923 void qed_reset_vport_stats(struct qed_dev *cdev) 1924 { 1925 int i; 1926 1927 for_each_hwfn(cdev, i) { 1928 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1929 struct eth_mstorm_per_queue_stat mstats; 1930 struct eth_ustorm_per_queue_stat ustats; 1931 struct eth_pstorm_per_queue_stat pstats; 1932 struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn) 1933 : NULL; 1934 u32 addr = 0, len = 0; 1935 1936 if (IS_PF(cdev) && !p_ptt) { 1937 DP_ERR(p_hwfn, "Failed to acquire ptt\n"); 1938 continue; 1939 } 1940 1941 memset(&mstats, 0, sizeof(mstats)); 1942 __qed_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0); 1943 qed_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len); 1944 1945 memset(&ustats, 0, sizeof(ustats)); 1946 __qed_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0); 1947 qed_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len); 1948 1949 memset(&pstats, 0, sizeof(pstats)); 1950 __qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0); 1951 qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len); 1952 1953 if (IS_PF(cdev)) 1954 qed_ptt_release(p_hwfn, p_ptt); 1955 } 1956 1957 /* PORT statistics are not necessarily reset, so we need to 1958 * read and create a baseline for future statistics. 1959 * Link change stat is maintained by MFW, return its value as is. 1960 */ 1961 if (!cdev->reset_stats) { 1962 DP_INFO(cdev, "Reset stats not allocated\n"); 1963 } else { 1964 _qed_get_vport_stats(cdev, cdev->reset_stats); 1965 cdev->reset_stats->common.link_change_count = 0; 1966 } 1967 } 1968 1969 static enum gft_profile_type 1970 qed_arfs_mode_to_hsi(enum qed_filter_config_mode mode) 1971 { 1972 if (mode == QED_FILTER_CONFIG_MODE_5_TUPLE) 1973 return GFT_PROFILE_TYPE_4_TUPLE; 1974 if (mode == QED_FILTER_CONFIG_MODE_IP_DEST) 1975 return GFT_PROFILE_TYPE_IP_DST_ADDR; 1976 if (mode == QED_FILTER_CONFIG_MODE_IP_SRC) 1977 return GFT_PROFILE_TYPE_IP_SRC_ADDR; 1978 return GFT_PROFILE_TYPE_L4_DST_PORT; 1979 } 1980 1981 void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, 1982 struct qed_ptt *p_ptt, 1983 struct qed_arfs_config_params *p_cfg_params) 1984 { 1985 if (test_bit(QED_MF_DISABLE_ARFS, &p_hwfn->cdev->mf_bits)) 1986 return; 1987 1988 if (p_cfg_params->mode != QED_FILTER_CONFIG_MODE_DISABLE) { 1989 qed_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id, 1990 p_cfg_params->tcp, 1991 p_cfg_params->udp, 1992 p_cfg_params->ipv4, 1993 p_cfg_params->ipv6, 1994 qed_arfs_mode_to_hsi(p_cfg_params->mode)); 1995 DP_VERBOSE(p_hwfn, 1996 QED_MSG_SP, 1997 "Configured Filtering: tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s mode=%08x\n", 1998 p_cfg_params->tcp ? "Enable" : "Disable", 1999 p_cfg_params->udp ? "Enable" : "Disable", 2000 p_cfg_params->ipv4 ? "Enable" : "Disable", 2001 p_cfg_params->ipv6 ? "Enable" : "Disable", 2002 (u32)p_cfg_params->mode); 2003 } else { 2004 DP_VERBOSE(p_hwfn, QED_MSG_SP, "Disabled Filtering\n"); 2005 qed_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 2006 } 2007 } 2008 2009 int 2010 qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, 2011 struct qed_spq_comp_cb *p_cb, 2012 struct qed_ntuple_filter_params *p_params) 2013 { 2014 struct rx_update_gft_filter_data *p_ramrod = NULL; 2015 struct qed_spq_entry *p_ent = NULL; 2016 struct qed_sp_init_data init_data; 2017 u16 abs_rx_q_id = 0; 2018 u8 abs_vport_id = 0; 2019 int rc = -EINVAL; 2020 2021 /* Get SPQ entry */ 2022 memset(&init_data, 0, sizeof(init_data)); 2023 init_data.cid = qed_spq_get_cid(p_hwfn); 2024 2025 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 2026 2027 if (p_cb) { 2028 init_data.comp_mode = QED_SPQ_MODE_CB; 2029 init_data.p_comp_data = p_cb; 2030 } else { 2031 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 2032 } 2033 2034 rc = qed_sp_init_request(p_hwfn, &p_ent, 2035 ETH_RAMROD_GFT_UPDATE_FILTER, 2036 PROTOCOLID_ETH, &init_data); 2037 if (rc) 2038 return rc; 2039 2040 p_ramrod = &p_ent->ramrod.rx_update_gft; 2041 2042 DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_params->addr); 2043 p_ramrod->pkt_hdr_length = cpu_to_le16(p_params->length); 2044 2045 if (p_params->b_is_drop) { 2046 p_ramrod->vport_id = cpu_to_le16(ETH_GFT_TRASHCAN_VPORT); 2047 } else { 2048 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); 2049 if (rc) 2050 goto err; 2051 2052 if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) { 2053 rc = qed_fw_l2_queue(p_hwfn, p_params->qid, 2054 &abs_rx_q_id); 2055 if (rc) 2056 goto err; 2057 2058 p_ramrod->rx_qid_valid = 1; 2059 p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id); 2060 } 2061 2062 p_ramrod->vport_id = cpu_to_le16((u16)abs_vport_id); 2063 } 2064 2065 p_ramrod->flow_id_valid = 0; 2066 p_ramrod->flow_id = 0; 2067 p_ramrod->filter_action = p_params->b_is_add ? GFT_ADD_FILTER 2068 : GFT_DELETE_FILTER; 2069 2070 DP_VERBOSE(p_hwfn, QED_MSG_SP, 2071 "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n", 2072 abs_vport_id, abs_rx_q_id, 2073 p_params->b_is_add ? "Adding" : "Removing", 2074 (u64)p_params->addr, p_params->length); 2075 2076 return qed_spq_post(p_hwfn, p_ent, NULL); 2077 2078 err: 2079 qed_sp_destroy_request(p_hwfn, p_ent); 2080 return rc; 2081 } 2082 2083 int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn, 2084 struct qed_ptt *p_ptt, 2085 struct qed_queue_cid *p_cid, u16 *p_rx_coal) 2086 { 2087 u32 coalesce, address, is_valid; 2088 struct cau_sb_entry sb_entry; 2089 u8 timer_res; 2090 int rc; 2091 2092 rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + 2093 p_cid->sb_igu_id * sizeof(u64), 2094 (u64)(uintptr_t)&sb_entry, 2, NULL); 2095 if (rc) { 2096 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); 2097 return rc; 2098 } 2099 2100 timer_res = GET_FIELD(le32_to_cpu(sb_entry.params), 2101 CAU_SB_ENTRY_TIMER_RES0); 2102 2103 address = BAR0_MAP_REG_USDM_RAM + 2104 USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); 2105 coalesce = qed_rd(p_hwfn, p_ptt, address); 2106 2107 is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID); 2108 if (!is_valid) 2109 return -EINVAL; 2110 2111 coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET); 2112 *p_rx_coal = (u16)(coalesce << timer_res); 2113 2114 return 0; 2115 } 2116 2117 int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn, 2118 struct qed_ptt *p_ptt, 2119 struct qed_queue_cid *p_cid, u16 *p_tx_coal) 2120 { 2121 u32 coalesce, address, is_valid; 2122 struct cau_sb_entry sb_entry; 2123 u8 timer_res; 2124 int rc; 2125 2126 rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + 2127 p_cid->sb_igu_id * sizeof(u64), 2128 (u64)(uintptr_t)&sb_entry, 2, NULL); 2129 if (rc) { 2130 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); 2131 return rc; 2132 } 2133 2134 timer_res = GET_FIELD(le32_to_cpu(sb_entry.params), 2135 CAU_SB_ENTRY_TIMER_RES1); 2136 2137 address = BAR0_MAP_REG_XSDM_RAM + 2138 XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); 2139 coalesce = qed_rd(p_hwfn, p_ptt, address); 2140 2141 is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID); 2142 if (!is_valid) 2143 return -EINVAL; 2144 2145 coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET); 2146 *p_tx_coal = (u16)(coalesce << timer_res); 2147 2148 return 0; 2149 } 2150 2151 int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *p_coal, void *handle) 2152 { 2153 struct qed_queue_cid *p_cid = handle; 2154 struct qed_ptt *p_ptt; 2155 int rc = 0; 2156 2157 if (IS_VF(p_hwfn->cdev)) { 2158 rc = qed_vf_pf_get_coalesce(p_hwfn, p_coal, p_cid); 2159 if (rc) 2160 DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n"); 2161 2162 return rc; 2163 } 2164 2165 p_ptt = qed_ptt_acquire(p_hwfn); 2166 if (!p_ptt) 2167 return -EAGAIN; 2168 2169 if (p_cid->b_is_rx) { 2170 rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_coal); 2171 if (rc) 2172 goto out; 2173 } else { 2174 rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_coal); 2175 if (rc) 2176 goto out; 2177 } 2178 2179 out: 2180 qed_ptt_release(p_hwfn, p_ptt); 2181 2182 return rc; 2183 } 2184 2185 static int qed_fill_eth_dev_info(struct qed_dev *cdev, 2186 struct qed_dev_eth_info *info) 2187 { 2188 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 2189 int i; 2190 2191 memset(info, 0, sizeof(*info)); 2192 2193 if (IS_PF(cdev)) { 2194 int max_vf_vlan_filters = 0; 2195 int max_vf_mac_filters = 0; 2196 2197 info->num_tc = p_hwfn->hw_info.num_hw_tc; 2198 2199 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 2200 u16 num_queues = 0; 2201 2202 /* Since the feature controls only queue-zones, 2203 * make sure we have the contexts [rx, xdp, tcs] to 2204 * match. 2205 */ 2206 for_each_hwfn(cdev, i) { 2207 struct qed_hwfn *hwfn = &cdev->hwfns[i]; 2208 u16 l2_queues = (u16)FEAT_NUM(hwfn, 2209 QED_PF_L2_QUE); 2210 u16 cids; 2211 2212 cids = hwfn->pf_params.eth_pf_params.num_cons; 2213 cids /= (2 + info->num_tc); 2214 num_queues += min_t(u16, l2_queues, cids); 2215 } 2216 2217 /* queues might theoretically be >256, but interrupts' 2218 * upper-limit guarantes that it would fit in a u8. 2219 */ 2220 if (cdev->int_params.fp_msix_cnt) { 2221 u8 irqs = cdev->int_params.fp_msix_cnt; 2222 2223 info->num_queues = (u8)min_t(u16, 2224 num_queues, irqs); 2225 } 2226 } else { 2227 info->num_queues = cdev->num_hwfns; 2228 } 2229 2230 if (IS_QED_SRIOV(cdev)) { 2231 max_vf_vlan_filters = cdev->p_iov_info->total_vfs * 2232 QED_ETH_VF_NUM_VLAN_FILTERS; 2233 max_vf_mac_filters = cdev->p_iov_info->total_vfs * 2234 QED_ETH_VF_NUM_MAC_FILTERS; 2235 } 2236 info->num_vlan_filters = RESC_NUM(QED_LEADING_HWFN(cdev), 2237 QED_VLAN) - 2238 max_vf_vlan_filters; 2239 info->num_mac_filters = RESC_NUM(QED_LEADING_HWFN(cdev), 2240 QED_MAC) - 2241 max_vf_mac_filters; 2242 2243 ether_addr_copy(info->port_mac, 2244 cdev->hwfns[0].hw_info.hw_mac_addr); 2245 2246 info->xdp_supported = true; 2247 } else { 2248 u16 total_cids = 0; 2249 2250 info->num_tc = 1; 2251 2252 /* Determine queues & XDP support */ 2253 for_each_hwfn(cdev, i) { 2254 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 2255 u8 queues, cids; 2256 2257 qed_vf_get_num_cids(p_hwfn, &cids); 2258 qed_vf_get_num_rxqs(p_hwfn, &queues); 2259 info->num_queues += queues; 2260 total_cids += cids; 2261 } 2262 2263 /* Enable VF XDP in case PF guarntees sufficient connections */ 2264 if (total_cids >= info->num_queues * 3) 2265 info->xdp_supported = true; 2266 2267 qed_vf_get_num_vlan_filters(&cdev->hwfns[0], 2268 (u8 *)&info->num_vlan_filters); 2269 qed_vf_get_num_mac_filters(&cdev->hwfns[0], 2270 (u8 *)&info->num_mac_filters); 2271 qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac); 2272 2273 info->is_legacy = !!cdev->hwfns[0].vf_iov_info->b_pre_fp_hsi; 2274 } 2275 2276 qed_fill_dev_info(cdev, &info->common); 2277 2278 if (IS_VF(cdev)) 2279 eth_zero_addr(info->common.hw_mac); 2280 2281 return 0; 2282 } 2283 2284 static void qed_register_eth_ops(struct qed_dev *cdev, 2285 struct qed_eth_cb_ops *ops, void *cookie) 2286 { 2287 cdev->protocol_ops.eth = ops; 2288 cdev->ops_cookie = cookie; 2289 2290 /* For VF, we start bulletin reading */ 2291 if (IS_VF(cdev)) 2292 qed_vf_start_iov_wq(cdev); 2293 } 2294 2295 static bool qed_check_mac(struct qed_dev *cdev, u8 *mac) 2296 { 2297 if (IS_PF(cdev)) 2298 return true; 2299 2300 return qed_vf_check_mac(&cdev->hwfns[0], mac); 2301 } 2302 2303 static int qed_start_vport(struct qed_dev *cdev, 2304 struct qed_start_vport_params *params) 2305 { 2306 int rc, i; 2307 2308 for_each_hwfn(cdev, i) { 2309 struct qed_sp_vport_start_params start = { 0 }; 2310 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 2311 2312 start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO : 2313 QED_TPA_MODE_NONE; 2314 start.remove_inner_vlan = params->remove_inner_vlan; 2315 start.only_untagged = true; /* untagged only */ 2316 start.drop_ttl0 = params->drop_ttl0; 2317 start.opaque_fid = p_hwfn->hw_info.opaque_fid; 2318 start.concrete_fid = p_hwfn->hw_info.concrete_fid; 2319 start.handle_ptp_pkts = params->handle_ptp_pkts; 2320 start.vport_id = params->vport_id; 2321 start.max_buffers_per_cqe = 16; 2322 start.mtu = params->mtu; 2323 2324 rc = qed_sp_vport_start(p_hwfn, &start); 2325 if (rc) { 2326 DP_ERR(cdev, "Failed to start VPORT\n"); 2327 return rc; 2328 } 2329 2330 rc = qed_hw_start_fastpath(p_hwfn); 2331 if (rc) { 2332 DP_ERR(cdev, "Failed to start VPORT fastpath\n"); 2333 return rc; 2334 } 2335 2336 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), 2337 "Started V-PORT %d with MTU %d\n", 2338 start.vport_id, start.mtu); 2339 } 2340 2341 if (params->clear_stats) 2342 qed_reset_vport_stats(cdev); 2343 2344 return 0; 2345 } 2346 2347 static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id) 2348 { 2349 int rc, i; 2350 2351 for_each_hwfn(cdev, i) { 2352 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 2353 2354 rc = qed_sp_vport_stop(p_hwfn, 2355 p_hwfn->hw_info.opaque_fid, vport_id); 2356 2357 if (rc) { 2358 DP_ERR(cdev, "Failed to stop VPORT\n"); 2359 return rc; 2360 } 2361 } 2362 return 0; 2363 } 2364 2365 static int qed_update_vport_rss(struct qed_dev *cdev, 2366 struct qed_update_vport_rss_params *input, 2367 struct qed_rss_params *rss) 2368 { 2369 int i, fn; 2370 2371 /* Update configuration with what's correct regardless of CMT */ 2372 rss->update_rss_config = 1; 2373 rss->rss_enable = 1; 2374 rss->update_rss_capabilities = 1; 2375 rss->update_rss_ind_table = 1; 2376 rss->update_rss_key = 1; 2377 rss->rss_caps = input->rss_caps; 2378 memcpy(rss->rss_key, input->rss_key, QED_RSS_KEY_SIZE * sizeof(u32)); 2379 2380 /* In regular scenario, we'd simply need to take input handlers. 2381 * But in CMT, we'd have to split the handlers according to the 2382 * engine they were configured on. We'd then have to understand 2383 * whether RSS is really required, since 2-queues on CMT doesn't 2384 * require RSS. 2385 */ 2386 if (cdev->num_hwfns == 1) { 2387 memcpy(rss->rss_ind_table, 2388 input->rss_ind_table, 2389 QED_RSS_IND_TABLE_SIZE * sizeof(void *)); 2390 rss->rss_table_size_log = 7; 2391 return 0; 2392 } 2393 2394 /* Start by copying the non-spcific information to the 2nd copy */ 2395 memcpy(&rss[1], &rss[0], sizeof(struct qed_rss_params)); 2396 2397 /* CMT should be round-robin */ 2398 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { 2399 struct qed_queue_cid *cid = input->rss_ind_table[i]; 2400 struct qed_rss_params *t_rss; 2401 2402 if (cid->p_owner == QED_LEADING_HWFN(cdev)) 2403 t_rss = &rss[0]; 2404 else 2405 t_rss = &rss[1]; 2406 2407 t_rss->rss_ind_table[i / cdev->num_hwfns] = cid; 2408 } 2409 2410 /* Make sure RSS is actually required */ 2411 for_each_hwfn(cdev, fn) { 2412 for (i = 1; i < QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns; i++) { 2413 if (rss[fn].rss_ind_table[i] != 2414 rss[fn].rss_ind_table[0]) 2415 break; 2416 } 2417 if (i == QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns) { 2418 DP_VERBOSE(cdev, NETIF_MSG_IFUP, 2419 "CMT - 1 queue per-hwfn; Disabling RSS\n"); 2420 return -EINVAL; 2421 } 2422 rss[fn].rss_table_size_log = 6; 2423 } 2424 2425 return 0; 2426 } 2427 2428 static int qed_update_vport(struct qed_dev *cdev, 2429 struct qed_update_vport_params *params) 2430 { 2431 struct qed_sp_vport_update_params sp_params; 2432 struct qed_rss_params *rss; 2433 int rc = 0, i; 2434 2435 if (!cdev) 2436 return -ENODEV; 2437 2438 rss = vzalloc(array_size(sizeof(*rss), cdev->num_hwfns)); 2439 if (!rss) 2440 return -ENOMEM; 2441 2442 memset(&sp_params, 0, sizeof(sp_params)); 2443 2444 /* Translate protocol params into sp params */ 2445 sp_params.vport_id = params->vport_id; 2446 sp_params.update_vport_active_rx_flg = params->update_vport_active_flg; 2447 sp_params.update_vport_active_tx_flg = params->update_vport_active_flg; 2448 sp_params.vport_active_rx_flg = params->vport_active_flg; 2449 sp_params.vport_active_tx_flg = params->vport_active_flg; 2450 sp_params.update_tx_switching_flg = params->update_tx_switching_flg; 2451 sp_params.tx_switching_flg = params->tx_switching_flg; 2452 sp_params.accept_any_vlan = params->accept_any_vlan; 2453 sp_params.update_accept_any_vlan_flg = 2454 params->update_accept_any_vlan_flg; 2455 2456 /* Prepare the RSS configuration */ 2457 if (params->update_rss_flg) 2458 if (qed_update_vport_rss(cdev, ¶ms->rss_params, rss)) 2459 params->update_rss_flg = 0; 2460 2461 for_each_hwfn(cdev, i) { 2462 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 2463 2464 if (params->update_rss_flg) 2465 sp_params.rss_params = &rss[i]; 2466 2467 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 2468 rc = qed_sp_vport_update(p_hwfn, &sp_params, 2469 QED_SPQ_MODE_EBLOCK, 2470 NULL); 2471 if (rc) { 2472 DP_ERR(cdev, "Failed to update VPORT\n"); 2473 goto out; 2474 } 2475 2476 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), 2477 "Updated V-PORT %d: active_flag %d [update %d]\n", 2478 params->vport_id, params->vport_active_flg, 2479 params->update_vport_active_flg); 2480 } 2481 2482 out: 2483 vfree(rss); 2484 return rc; 2485 } 2486 2487 static int qed_start_rxq(struct qed_dev *cdev, 2488 u8 rss_num, 2489 struct qed_queue_start_common_params *p_params, 2490 u16 bd_max_bytes, 2491 dma_addr_t bd_chain_phys_addr, 2492 dma_addr_t cqe_pbl_addr, 2493 u16 cqe_pbl_size, 2494 struct qed_rxq_start_ret_params *ret_params) 2495 { 2496 struct qed_hwfn *p_hwfn; 2497 int rc, hwfn_index; 2498 2499 hwfn_index = rss_num % cdev->num_hwfns; 2500 p_hwfn = &cdev->hwfns[hwfn_index]; 2501 2502 p_params->queue_id = p_params->queue_id / cdev->num_hwfns; 2503 p_params->stats_id = p_params->vport_id; 2504 2505 rc = qed_eth_rx_queue_start(p_hwfn, 2506 p_hwfn->hw_info.opaque_fid, 2507 p_params, 2508 bd_max_bytes, 2509 bd_chain_phys_addr, 2510 cqe_pbl_addr, cqe_pbl_size, ret_params); 2511 if (rc) { 2512 DP_ERR(cdev, "Failed to start RXQ#%d\n", p_params->queue_id); 2513 return rc; 2514 } 2515 2516 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), 2517 "Started RX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n", 2518 p_params->queue_id, rss_num, p_params->vport_id, 2519 p_params->p_sb->igu_sb_id); 2520 2521 return 0; 2522 } 2523 2524 static int qed_stop_rxq(struct qed_dev *cdev, u8 rss_id, void *handle) 2525 { 2526 int rc, hwfn_index; 2527 struct qed_hwfn *p_hwfn; 2528 2529 hwfn_index = rss_id % cdev->num_hwfns; 2530 p_hwfn = &cdev->hwfns[hwfn_index]; 2531 2532 rc = qed_eth_rx_queue_stop(p_hwfn, handle, false, false); 2533 if (rc) { 2534 DP_ERR(cdev, "Failed to stop RXQ#%02x\n", rss_id); 2535 return rc; 2536 } 2537 2538 return 0; 2539 } 2540 2541 static int qed_start_txq(struct qed_dev *cdev, 2542 u8 rss_num, 2543 struct qed_queue_start_common_params *p_params, 2544 dma_addr_t pbl_addr, 2545 u16 pbl_size, 2546 struct qed_txq_start_ret_params *ret_params) 2547 { 2548 struct qed_hwfn *p_hwfn; 2549 int rc, hwfn_index; 2550 2551 hwfn_index = rss_num % cdev->num_hwfns; 2552 p_hwfn = &cdev->hwfns[hwfn_index]; 2553 p_params->queue_id = p_params->queue_id / cdev->num_hwfns; 2554 p_params->stats_id = p_params->vport_id; 2555 2556 rc = qed_eth_tx_queue_start(p_hwfn, 2557 p_hwfn->hw_info.opaque_fid, 2558 p_params, p_params->tc, 2559 pbl_addr, pbl_size, ret_params); 2560 2561 if (rc) { 2562 DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id); 2563 return rc; 2564 } 2565 2566 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), 2567 "Started TX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n", 2568 p_params->queue_id, rss_num, p_params->vport_id, 2569 p_params->p_sb->igu_sb_id); 2570 2571 return 0; 2572 } 2573 2574 #define QED_HW_STOP_RETRY_LIMIT (10) 2575 static int qed_fastpath_stop(struct qed_dev *cdev) 2576 { 2577 int rc; 2578 2579 rc = qed_hw_stop_fastpath(cdev); 2580 if (rc) { 2581 DP_ERR(cdev, "Failed to stop Fastpath\n"); 2582 return rc; 2583 } 2584 2585 return 0; 2586 } 2587 2588 static int qed_stop_txq(struct qed_dev *cdev, u8 rss_id, void *handle) 2589 { 2590 struct qed_hwfn *p_hwfn; 2591 int rc, hwfn_index; 2592 2593 hwfn_index = rss_id % cdev->num_hwfns; 2594 p_hwfn = &cdev->hwfns[hwfn_index]; 2595 2596 rc = qed_eth_tx_queue_stop(p_hwfn, handle); 2597 if (rc) { 2598 DP_ERR(cdev, "Failed to stop TXQ#%02x\n", rss_id); 2599 return rc; 2600 } 2601 2602 return 0; 2603 } 2604 2605 static int qed_tunn_configure(struct qed_dev *cdev, 2606 struct qed_tunn_params *tunn_params) 2607 { 2608 struct qed_tunnel_info tunn_info; 2609 int i, rc; 2610 2611 memset(&tunn_info, 0, sizeof(tunn_info)); 2612 if (tunn_params->update_vxlan_port) { 2613 tunn_info.vxlan_port.b_update_port = true; 2614 tunn_info.vxlan_port.port = tunn_params->vxlan_port; 2615 } 2616 2617 if (tunn_params->update_geneve_port) { 2618 tunn_info.geneve_port.b_update_port = true; 2619 tunn_info.geneve_port.port = tunn_params->geneve_port; 2620 } 2621 2622 for_each_hwfn(cdev, i) { 2623 struct qed_hwfn *hwfn = &cdev->hwfns[i]; 2624 struct qed_ptt *p_ptt; 2625 struct qed_tunnel_info *tun; 2626 2627 tun = &hwfn->cdev->tunnel; 2628 if (IS_PF(cdev)) { 2629 p_ptt = qed_ptt_acquire(hwfn); 2630 if (!p_ptt) 2631 return -EAGAIN; 2632 } else { 2633 p_ptt = NULL; 2634 } 2635 2636 rc = qed_sp_pf_update_tunn_cfg(hwfn, p_ptt, &tunn_info, 2637 QED_SPQ_MODE_EBLOCK, NULL); 2638 if (rc) { 2639 if (IS_PF(cdev)) 2640 qed_ptt_release(hwfn, p_ptt); 2641 return rc; 2642 } 2643 2644 if (IS_PF_SRIOV(hwfn)) { 2645 u16 vxlan_port, geneve_port; 2646 int j; 2647 2648 vxlan_port = tun->vxlan_port.port; 2649 geneve_port = tun->geneve_port.port; 2650 2651 qed_for_each_vf(hwfn, j) { 2652 qed_iov_bulletin_set_udp_ports(hwfn, j, 2653 vxlan_port, 2654 geneve_port); 2655 } 2656 2657 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 2658 } 2659 if (IS_PF(cdev)) 2660 qed_ptt_release(hwfn, p_ptt); 2661 } 2662 2663 return 0; 2664 } 2665 2666 static int qed_configure_filter_rx_mode(struct qed_dev *cdev, 2667 enum qed_filter_rx_mode_type type) 2668 { 2669 struct qed_filter_accept_flags accept_flags; 2670 2671 memset(&accept_flags, 0, sizeof(accept_flags)); 2672 2673 accept_flags.update_rx_mode_config = 1; 2674 accept_flags.update_tx_mode_config = 1; 2675 accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED | 2676 QED_ACCEPT_MCAST_MATCHED | 2677 QED_ACCEPT_BCAST; 2678 accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED | 2679 QED_ACCEPT_MCAST_MATCHED | 2680 QED_ACCEPT_BCAST; 2681 2682 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) { 2683 accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED | 2684 QED_ACCEPT_MCAST_UNMATCHED; 2685 accept_flags.tx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED | 2686 QED_ACCEPT_MCAST_UNMATCHED; 2687 } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) { 2688 accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; 2689 accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; 2690 } 2691 2692 return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false, 2693 QED_SPQ_MODE_CB, NULL); 2694 } 2695 2696 static int qed_configure_filter_ucast(struct qed_dev *cdev, 2697 struct qed_filter_ucast_params *params) 2698 { 2699 struct qed_filter_ucast ucast; 2700 2701 if (!params->vlan_valid && !params->mac_valid) { 2702 DP_NOTICE(cdev, 2703 "Tried configuring a unicast filter, but both MAC and VLAN are not set\n"); 2704 return -EINVAL; 2705 } 2706 2707 memset(&ucast, 0, sizeof(ucast)); 2708 switch (params->type) { 2709 case QED_FILTER_XCAST_TYPE_ADD: 2710 ucast.opcode = QED_FILTER_ADD; 2711 break; 2712 case QED_FILTER_XCAST_TYPE_DEL: 2713 ucast.opcode = QED_FILTER_REMOVE; 2714 break; 2715 case QED_FILTER_XCAST_TYPE_REPLACE: 2716 ucast.opcode = QED_FILTER_REPLACE; 2717 break; 2718 default: 2719 DP_NOTICE(cdev, "Unknown unicast filter type %d\n", 2720 params->type); 2721 } 2722 2723 if (params->vlan_valid && params->mac_valid) { 2724 ucast.type = QED_FILTER_MAC_VLAN; 2725 ether_addr_copy(ucast.mac, params->mac); 2726 ucast.vlan = params->vlan; 2727 } else if (params->mac_valid) { 2728 ucast.type = QED_FILTER_MAC; 2729 ether_addr_copy(ucast.mac, params->mac); 2730 } else { 2731 ucast.type = QED_FILTER_VLAN; 2732 ucast.vlan = params->vlan; 2733 } 2734 2735 ucast.is_rx_filter = true; 2736 ucast.is_tx_filter = true; 2737 2738 return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL); 2739 } 2740 2741 static int qed_configure_filter_mcast(struct qed_dev *cdev, 2742 struct qed_filter_mcast_params *params) 2743 { 2744 struct qed_filter_mcast mcast; 2745 int i; 2746 2747 memset(&mcast, 0, sizeof(mcast)); 2748 switch (params->type) { 2749 case QED_FILTER_XCAST_TYPE_ADD: 2750 mcast.opcode = QED_FILTER_ADD; 2751 break; 2752 case QED_FILTER_XCAST_TYPE_DEL: 2753 mcast.opcode = QED_FILTER_REMOVE; 2754 break; 2755 default: 2756 DP_NOTICE(cdev, "Unknown multicast filter type %d\n", 2757 params->type); 2758 } 2759 2760 mcast.num_mc_addrs = params->num; 2761 for (i = 0; i < mcast.num_mc_addrs; i++) 2762 ether_addr_copy(mcast.mac[i], params->mac[i]); 2763 2764 return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL); 2765 } 2766 2767 static int qed_configure_arfs_searcher(struct qed_dev *cdev, 2768 enum qed_filter_config_mode mode) 2769 { 2770 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 2771 struct qed_arfs_config_params arfs_config_params; 2772 2773 memset(&arfs_config_params, 0, sizeof(arfs_config_params)); 2774 arfs_config_params.tcp = true; 2775 arfs_config_params.udp = true; 2776 arfs_config_params.ipv4 = true; 2777 arfs_config_params.ipv6 = true; 2778 arfs_config_params.mode = mode; 2779 qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt, 2780 &arfs_config_params); 2781 return 0; 2782 } 2783 2784 static void 2785 qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn, 2786 void *cookie, 2787 union event_ring_data *data, u8 fw_return_code) 2788 { 2789 struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common; 2790 void *dev = p_hwfn->cdev->ops_cookie; 2791 2792 op->arfs_filter_op(dev, cookie, fw_return_code); 2793 } 2794 2795 static int 2796 qed_ntuple_arfs_filter_config(struct qed_dev *cdev, 2797 void *cookie, 2798 struct qed_ntuple_filter_params *params) 2799 { 2800 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 2801 struct qed_spq_comp_cb cb; 2802 int rc = -EINVAL; 2803 2804 cb.function = qed_arfs_sp_response_handler; 2805 cb.cookie = cookie; 2806 2807 if (params->b_is_vf) { 2808 if (!qed_iov_is_valid_vfid(p_hwfn, params->vf_id, false, 2809 false)) { 2810 DP_INFO(p_hwfn, "vfid 0x%02x is out of bounds\n", 2811 params->vf_id); 2812 return rc; 2813 } 2814 2815 params->vport_id = params->vf_id + 1; 2816 params->qid = QED_RFS_NTUPLE_QID_RSS; 2817 } 2818 2819 rc = qed_configure_rfs_ntuple_filter(p_hwfn, &cb, params); 2820 if (rc) 2821 DP_NOTICE(p_hwfn, 2822 "Failed to issue a-RFS filter configuration\n"); 2823 else 2824 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, 2825 "Successfully issued a-RFS filter configuration\n"); 2826 2827 return rc; 2828 } 2829 2830 static int qed_get_coalesce(struct qed_dev *cdev, u16 *coal, void *handle) 2831 { 2832 struct qed_queue_cid *p_cid = handle; 2833 struct qed_hwfn *p_hwfn; 2834 int rc; 2835 2836 p_hwfn = p_cid->p_owner; 2837 rc = qed_get_queue_coalesce(p_hwfn, coal, handle); 2838 if (rc) 2839 DP_VERBOSE(cdev, QED_MSG_DEBUG, 2840 "Unable to read queue coalescing\n"); 2841 2842 return rc; 2843 } 2844 2845 static int qed_fp_cqe_completion(struct qed_dev *dev, 2846 u8 rss_id, struct eth_slow_path_rx_cqe *cqe) 2847 { 2848 return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns], 2849 cqe); 2850 } 2851 2852 static int qed_req_bulletin_update_mac(struct qed_dev *cdev, u8 *mac) 2853 { 2854 int i, ret; 2855 2856 if (IS_PF(cdev)) 2857 return 0; 2858 2859 for_each_hwfn(cdev, i) { 2860 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 2861 2862 ret = qed_vf_pf_bulletin_update_mac(p_hwfn, mac); 2863 if (ret) 2864 return ret; 2865 } 2866 2867 return 0; 2868 } 2869 2870 static const struct qed_eth_ops qed_eth_ops_pass = { 2871 .common = &qed_common_ops_pass, 2872 #ifdef CONFIG_QED_SRIOV 2873 .iov = &qed_iov_ops_pass, 2874 #endif 2875 #ifdef CONFIG_DCB 2876 .dcb = &qed_dcbnl_ops_pass, 2877 #endif 2878 .ptp = &qed_ptp_ops_pass, 2879 .fill_dev_info = &qed_fill_eth_dev_info, 2880 .register_ops = &qed_register_eth_ops, 2881 .check_mac = &qed_check_mac, 2882 .vport_start = &qed_start_vport, 2883 .vport_stop = &qed_stop_vport, 2884 .vport_update = &qed_update_vport, 2885 .q_rx_start = &qed_start_rxq, 2886 .q_rx_stop = &qed_stop_rxq, 2887 .q_tx_start = &qed_start_txq, 2888 .q_tx_stop = &qed_stop_txq, 2889 .filter_config_rx_mode = &qed_configure_filter_rx_mode, 2890 .filter_config_ucast = &qed_configure_filter_ucast, 2891 .filter_config_mcast = &qed_configure_filter_mcast, 2892 .fastpath_stop = &qed_fastpath_stop, 2893 .eth_cqe_completion = &qed_fp_cqe_completion, 2894 .get_vport_stats = &qed_get_vport_stats, 2895 .tunn_config = &qed_tunn_configure, 2896 .ntuple_filter_config = &qed_ntuple_arfs_filter_config, 2897 .configure_arfs_searcher = &qed_configure_arfs_searcher, 2898 .get_coalesce = &qed_get_coalesce, 2899 .req_bulletin_update_mac = &qed_req_bulletin_update_mac, 2900 }; 2901 2902 const struct qed_eth_ops *qed_get_eth_ops(void) 2903 { 2904 return &qed_eth_ops_pass; 2905 } 2906 EXPORT_SYMBOL(qed_get_eth_ops); 2907 2908 void qed_put_eth_ops(void) 2909 { 2910 /* TODO - reference count for module? */ 2911 } 2912 EXPORT_SYMBOL(qed_put_eth_ops); 2913