1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/types.h> 34 #include <asm/byteorder.h> 35 #include <asm/param.h> 36 #include <linux/delay.h> 37 #include <linux/dma-mapping.h> 38 #include <linux/etherdevice.h> 39 #include <linux/interrupt.h> 40 #include <linux/kernel.h> 41 #include <linux/module.h> 42 #include <linux/pci.h> 43 #include <linux/slab.h> 44 #include <linux/stddef.h> 45 #include <linux/string.h> 46 #include <linux/workqueue.h> 47 #include <linux/bitops.h> 48 #include <linux/bug.h> 49 #include <linux/vmalloc.h> 50 #include "qed.h" 51 #include <linux/qed/qed_chain.h> 52 #include "qed_cxt.h" 53 #include "qed_dev_api.h" 54 #include <linux/qed/qed_eth_if.h> 55 #include "qed_hsi.h" 56 #include "qed_hw.h" 57 #include "qed_int.h" 58 #include "qed_l2.h" 59 #include "qed_mcp.h" 60 #include "qed_reg_addr.h" 61 #include "qed_sp.h" 62 #include "qed_sriov.h" 63 64 65 #define QED_MAX_SGES_NUM 16 66 #define CRC32_POLY 0x1edc6f41 67 68 struct qed_l2_info { 69 u32 queues; 70 unsigned long **pp_qid_usage; 71 72 /* The lock is meant to synchronize access to the qid usage */ 73 struct mutex lock; 74 }; 75 76 int qed_l2_alloc(struct qed_hwfn *p_hwfn) 77 { 78 struct qed_l2_info *p_l2_info; 79 unsigned long **pp_qids; 80 u32 i; 81 82 if (!QED_IS_L2_PERSONALITY(p_hwfn)) 83 return 0; 84 85 p_l2_info = kzalloc(sizeof(*p_l2_info), GFP_KERNEL); 86 if (!p_l2_info) 87 return -ENOMEM; 88 p_hwfn->p_l2_info = p_l2_info; 89 90 if (IS_PF(p_hwfn->cdev)) { 91 p_l2_info->queues = RESC_NUM(p_hwfn, QED_L2_QUEUE); 92 } else { 93 u8 rx = 0, tx = 0; 94 95 qed_vf_get_num_rxqs(p_hwfn, &rx); 96 qed_vf_get_num_txqs(p_hwfn, &tx); 97 98 p_l2_info->queues = max_t(u8, rx, tx); 99 } 100 101 pp_qids = kzalloc(sizeof(unsigned long *) * p_l2_info->queues, 102 GFP_KERNEL); 103 if (!pp_qids) 104 return -ENOMEM; 105 p_l2_info->pp_qid_usage = pp_qids; 106 107 for (i = 0; i < p_l2_info->queues; i++) { 108 pp_qids[i] = kzalloc(MAX_QUEUES_PER_QZONE / 8, GFP_KERNEL); 109 if (!pp_qids[i]) 110 return -ENOMEM; 111 } 112 113 return 0; 114 } 115 116 void qed_l2_setup(struct qed_hwfn *p_hwfn) 117 { 118 if (p_hwfn->hw_info.personality != QED_PCI_ETH && 119 p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE) 120 return; 121 122 mutex_init(&p_hwfn->p_l2_info->lock); 123 } 124 125 void qed_l2_free(struct qed_hwfn *p_hwfn) 126 { 127 u32 i; 128 129 if (p_hwfn->hw_info.personality != QED_PCI_ETH && 130 p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE) 131 return; 132 133 if (!p_hwfn->p_l2_info) 134 return; 135 136 if (!p_hwfn->p_l2_info->pp_qid_usage) 137 goto out_l2_info; 138 139 /* Free until hit first uninitialized entry */ 140 for (i = 0; i < p_hwfn->p_l2_info->queues; i++) { 141 if (!p_hwfn->p_l2_info->pp_qid_usage[i]) 142 break; 143 kfree(p_hwfn->p_l2_info->pp_qid_usage[i]); 144 } 145 146 kfree(p_hwfn->p_l2_info->pp_qid_usage); 147 148 out_l2_info: 149 kfree(p_hwfn->p_l2_info); 150 p_hwfn->p_l2_info = NULL; 151 } 152 153 static bool qed_eth_queue_qid_usage_add(struct qed_hwfn *p_hwfn, 154 struct qed_queue_cid *p_cid) 155 { 156 struct qed_l2_info *p_l2_info = p_hwfn->p_l2_info; 157 u16 queue_id = p_cid->rel.queue_id; 158 bool b_rc = true; 159 u8 first; 160 161 mutex_lock(&p_l2_info->lock); 162 163 if (queue_id >= p_l2_info->queues) { 164 DP_NOTICE(p_hwfn, 165 "Requested to increase usage for qzone %04x out of %08x\n", 166 queue_id, p_l2_info->queues); 167 b_rc = false; 168 goto out; 169 } 170 171 first = (u8)find_first_zero_bit(p_l2_info->pp_qid_usage[queue_id], 172 MAX_QUEUES_PER_QZONE); 173 if (first >= MAX_QUEUES_PER_QZONE) { 174 b_rc = false; 175 goto out; 176 } 177 178 __set_bit(first, p_l2_info->pp_qid_usage[queue_id]); 179 p_cid->qid_usage_idx = first; 180 181 out: 182 mutex_unlock(&p_l2_info->lock); 183 return b_rc; 184 } 185 186 static void qed_eth_queue_qid_usage_del(struct qed_hwfn *p_hwfn, 187 struct qed_queue_cid *p_cid) 188 { 189 mutex_lock(&p_hwfn->p_l2_info->lock); 190 191 clear_bit(p_cid->qid_usage_idx, 192 p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]); 193 194 mutex_unlock(&p_hwfn->p_l2_info->lock); 195 } 196 197 void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn, 198 struct qed_queue_cid *p_cid) 199 { 200 bool b_legacy_vf = !!(p_cid->vf_legacy & QED_QCID_LEGACY_VF_CID); 201 202 if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) 203 _qed_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid); 204 205 /* For PF's VFs we maintain the index inside queue-zone in IOV */ 206 if (p_cid->vfid == QED_QUEUE_CID_SELF) 207 qed_eth_queue_qid_usage_del(p_hwfn, p_cid); 208 209 vfree(p_cid); 210 } 211 212 /* The internal is only meant to be directly called by PFs initializeing CIDs 213 * for their VFs. 214 */ 215 static struct qed_queue_cid * 216 _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, 217 u16 opaque_fid, 218 u32 cid, 219 struct qed_queue_start_common_params *p_params, 220 bool b_is_rx, 221 struct qed_queue_cid_vf_params *p_vf_params) 222 { 223 struct qed_queue_cid *p_cid; 224 int rc; 225 226 p_cid = vmalloc(sizeof(*p_cid)); 227 if (!p_cid) 228 return NULL; 229 memset(p_cid, 0, sizeof(*p_cid)); 230 231 p_cid->opaque_fid = opaque_fid; 232 p_cid->cid = cid; 233 p_cid->p_owner = p_hwfn; 234 235 /* Fill in parameters */ 236 p_cid->rel.vport_id = p_params->vport_id; 237 p_cid->rel.queue_id = p_params->queue_id; 238 p_cid->rel.stats_id = p_params->stats_id; 239 p_cid->sb_igu_id = p_params->p_sb->igu_sb_id; 240 p_cid->b_is_rx = b_is_rx; 241 p_cid->sb_idx = p_params->sb_idx; 242 243 /* Fill-in bits related to VFs' queues if information was provided */ 244 if (p_vf_params) { 245 p_cid->vfid = p_vf_params->vfid; 246 p_cid->vf_qid = p_vf_params->vf_qid; 247 p_cid->vf_legacy = p_vf_params->vf_legacy; 248 } else { 249 p_cid->vfid = QED_QUEUE_CID_SELF; 250 } 251 252 /* Don't try calculating the absolute indices for VFs */ 253 if (IS_VF(p_hwfn->cdev)) { 254 p_cid->abs = p_cid->rel; 255 goto out; 256 } 257 258 /* Calculate the engine-absolute indices of the resources. 259 * This would guarantee they're valid later on. 260 * In some cases [SBs] we already have the right values. 261 */ 262 rc = qed_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id); 263 if (rc) 264 goto fail; 265 266 rc = qed_fw_l2_queue(p_hwfn, p_cid->rel.queue_id, &p_cid->abs.queue_id); 267 if (rc) 268 goto fail; 269 270 /* In case of a PF configuring its VF's queues, the stats-id is already 271 * absolute [since there's a single index that's suitable per-VF]. 272 */ 273 if (p_cid->vfid == QED_QUEUE_CID_SELF) { 274 rc = qed_fw_vport(p_hwfn, p_cid->rel.stats_id, 275 &p_cid->abs.stats_id); 276 if (rc) 277 goto fail; 278 } else { 279 p_cid->abs.stats_id = p_cid->rel.stats_id; 280 } 281 282 out: 283 /* VF-images have provided the qid_usage_idx on their own. 284 * Otherwise, we need to allocate a unique one. 285 */ 286 if (!p_vf_params) { 287 if (!qed_eth_queue_qid_usage_add(p_hwfn, p_cid)) 288 goto fail; 289 } else { 290 p_cid->qid_usage_idx = p_vf_params->qid_usage_idx; 291 } 292 293 DP_VERBOSE(p_hwfn, 294 QED_MSG_SP, 295 "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n", 296 p_cid->opaque_fid, 297 p_cid->cid, 298 p_cid->rel.vport_id, 299 p_cid->abs.vport_id, 300 p_cid->rel.queue_id, 301 p_cid->qid_usage_idx, 302 p_cid->abs.queue_id, 303 p_cid->rel.stats_id, 304 p_cid->abs.stats_id, p_cid->sb_igu_id, p_cid->sb_idx); 305 306 return p_cid; 307 308 fail: 309 vfree(p_cid); 310 return NULL; 311 } 312 313 struct qed_queue_cid * 314 qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, 315 u16 opaque_fid, 316 struct qed_queue_start_common_params *p_params, 317 bool b_is_rx, 318 struct qed_queue_cid_vf_params *p_vf_params) 319 { 320 struct qed_queue_cid *p_cid; 321 u8 vfid = QED_CXT_PF_CID; 322 bool b_legacy_vf = false; 323 u32 cid = 0; 324 325 /* In case of legacy VFs, The CID can be derived from the additional 326 * VF parameters - the VF assumes queue X uses CID X, so we can simply 327 * use the vf_qid for this purpose as well. 328 */ 329 if (p_vf_params) { 330 vfid = p_vf_params->vfid; 331 332 if (p_vf_params->vf_legacy & QED_QCID_LEGACY_VF_CID) { 333 b_legacy_vf = true; 334 cid = p_vf_params->vf_qid; 335 } 336 } 337 338 /* Get a unique firmware CID for this queue, in case it's a PF. 339 * VF's don't need a CID as the queue configuration will be done 340 * by PF. 341 */ 342 if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) { 343 if (_qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, 344 &cid, vfid)) { 345 DP_NOTICE(p_hwfn, "Failed to acquire cid\n"); 346 return NULL; 347 } 348 } 349 350 p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid, 351 p_params, b_is_rx, p_vf_params); 352 if (!p_cid && IS_PF(p_hwfn->cdev) && !b_legacy_vf) 353 _qed_cxt_release_cid(p_hwfn, cid, vfid); 354 355 return p_cid; 356 } 357 358 static struct qed_queue_cid * 359 qed_eth_queue_to_cid_pf(struct qed_hwfn *p_hwfn, 360 u16 opaque_fid, 361 bool b_is_rx, 362 struct qed_queue_start_common_params *p_params) 363 { 364 return qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx, 365 NULL); 366 } 367 368 int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, 369 struct qed_sp_vport_start_params *p_params) 370 { 371 struct vport_start_ramrod_data *p_ramrod = NULL; 372 struct qed_spq_entry *p_ent = NULL; 373 struct qed_sp_init_data init_data; 374 u8 abs_vport_id = 0; 375 int rc = -EINVAL; 376 u16 rx_mode = 0; 377 378 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); 379 if (rc) 380 return rc; 381 382 memset(&init_data, 0, sizeof(init_data)); 383 init_data.cid = qed_spq_get_cid(p_hwfn); 384 init_data.opaque_fid = p_params->opaque_fid; 385 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 386 387 rc = qed_sp_init_request(p_hwfn, &p_ent, 388 ETH_RAMROD_VPORT_START, 389 PROTOCOLID_ETH, &init_data); 390 if (rc) 391 return rc; 392 393 p_ramrod = &p_ent->ramrod.vport_start; 394 p_ramrod->vport_id = abs_vport_id; 395 396 p_ramrod->mtu = cpu_to_le16(p_params->mtu); 397 p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts; 398 p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan; 399 p_ramrod->drop_ttl0_en = p_params->drop_ttl0; 400 p_ramrod->untagged = p_params->only_untagged; 401 402 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1); 403 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1); 404 405 p_ramrod->rx_mode.state = cpu_to_le16(rx_mode); 406 407 /* TPA related fields */ 408 memset(&p_ramrod->tpa_param, 0, sizeof(struct eth_vport_tpa_param)); 409 410 p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe; 411 412 switch (p_params->tpa_mode) { 413 case QED_TPA_MODE_GRO: 414 p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; 415 p_ramrod->tpa_param.tpa_max_size = (u16)-1; 416 p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2; 417 p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2; 418 p_ramrod->tpa_param.tpa_ipv4_en_flg = 1; 419 p_ramrod->tpa_param.tpa_ipv6_en_flg = 1; 420 p_ramrod->tpa_param.tpa_pkt_split_flg = 1; 421 p_ramrod->tpa_param.tpa_gro_consistent_flg = 1; 422 break; 423 default: 424 break; 425 } 426 427 p_ramrod->tx_switching_en = p_params->tx_switching; 428 429 p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac; 430 p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype; 431 432 /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */ 433 p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev, 434 p_params->concrete_fid); 435 436 return qed_spq_post(p_hwfn, p_ent, NULL); 437 } 438 439 static int qed_sp_vport_start(struct qed_hwfn *p_hwfn, 440 struct qed_sp_vport_start_params *p_params) 441 { 442 if (IS_VF(p_hwfn->cdev)) { 443 return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id, 444 p_params->mtu, 445 p_params->remove_inner_vlan, 446 p_params->tpa_mode, 447 p_params->max_buffers_per_cqe, 448 p_params->only_untagged); 449 } 450 451 return qed_sp_eth_vport_start(p_hwfn, p_params); 452 } 453 454 static int 455 qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn, 456 struct vport_update_ramrod_data *p_ramrod, 457 struct qed_rss_params *p_rss) 458 { 459 struct eth_vport_rss_config *p_config; 460 u16 capabilities = 0; 461 int i, table_size; 462 int rc = 0; 463 464 if (!p_rss) { 465 p_ramrod->common.update_rss_flg = 0; 466 return rc; 467 } 468 p_config = &p_ramrod->rss_config; 469 470 BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE != ETH_RSS_IND_TABLE_ENTRIES_NUM); 471 472 rc = qed_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id); 473 if (rc) 474 return rc; 475 476 p_ramrod->common.update_rss_flg = p_rss->update_rss_config; 477 p_config->update_rss_capabilities = p_rss->update_rss_capabilities; 478 p_config->update_rss_ind_table = p_rss->update_rss_ind_table; 479 p_config->update_rss_key = p_rss->update_rss_key; 480 481 p_config->rss_mode = p_rss->rss_enable ? 482 ETH_VPORT_RSS_MODE_REGULAR : 483 ETH_VPORT_RSS_MODE_DISABLED; 484 485 SET_FIELD(capabilities, 486 ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY, 487 !!(p_rss->rss_caps & QED_RSS_IPV4)); 488 SET_FIELD(capabilities, 489 ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY, 490 !!(p_rss->rss_caps & QED_RSS_IPV6)); 491 SET_FIELD(capabilities, 492 ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY, 493 !!(p_rss->rss_caps & QED_RSS_IPV4_TCP)); 494 SET_FIELD(capabilities, 495 ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY, 496 !!(p_rss->rss_caps & QED_RSS_IPV6_TCP)); 497 SET_FIELD(capabilities, 498 ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY, 499 !!(p_rss->rss_caps & QED_RSS_IPV4_UDP)); 500 SET_FIELD(capabilities, 501 ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY, 502 !!(p_rss->rss_caps & QED_RSS_IPV6_UDP)); 503 p_config->tbl_size = p_rss->rss_table_size_log; 504 505 p_config->capabilities = cpu_to_le16(capabilities); 506 507 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, 508 "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n", 509 p_ramrod->common.update_rss_flg, 510 p_config->rss_mode, 511 p_config->update_rss_capabilities, 512 p_config->capabilities, 513 p_config->update_rss_ind_table, p_config->update_rss_key); 514 515 table_size = min_t(int, QED_RSS_IND_TABLE_SIZE, 516 1 << p_config->tbl_size); 517 for (i = 0; i < table_size; i++) { 518 struct qed_queue_cid *p_queue = p_rss->rss_ind_table[i]; 519 520 if (!p_queue) 521 return -EINVAL; 522 523 p_config->indirection_table[i] = 524 cpu_to_le16(p_queue->abs.queue_id); 525 } 526 527 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, 528 "Configured RSS indirection table [%d entries]:\n", 529 table_size); 530 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i += 0x10) { 531 DP_VERBOSE(p_hwfn, 532 NETIF_MSG_IFUP, 533 "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n", 534 le16_to_cpu(p_config->indirection_table[i]), 535 le16_to_cpu(p_config->indirection_table[i + 1]), 536 le16_to_cpu(p_config->indirection_table[i + 2]), 537 le16_to_cpu(p_config->indirection_table[i + 3]), 538 le16_to_cpu(p_config->indirection_table[i + 4]), 539 le16_to_cpu(p_config->indirection_table[i + 5]), 540 le16_to_cpu(p_config->indirection_table[i + 6]), 541 le16_to_cpu(p_config->indirection_table[i + 7]), 542 le16_to_cpu(p_config->indirection_table[i + 8]), 543 le16_to_cpu(p_config->indirection_table[i + 9]), 544 le16_to_cpu(p_config->indirection_table[i + 10]), 545 le16_to_cpu(p_config->indirection_table[i + 11]), 546 le16_to_cpu(p_config->indirection_table[i + 12]), 547 le16_to_cpu(p_config->indirection_table[i + 13]), 548 le16_to_cpu(p_config->indirection_table[i + 14]), 549 le16_to_cpu(p_config->indirection_table[i + 15])); 550 } 551 552 for (i = 0; i < 10; i++) 553 p_config->rss_key[i] = cpu_to_le32(p_rss->rss_key[i]); 554 555 return rc; 556 } 557 558 static void 559 qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn, 560 struct vport_update_ramrod_data *p_ramrod, 561 struct qed_filter_accept_flags accept_flags) 562 { 563 p_ramrod->common.update_rx_mode_flg = 564 accept_flags.update_rx_mode_config; 565 566 p_ramrod->common.update_tx_mode_flg = 567 accept_flags.update_tx_mode_config; 568 569 /* Set Rx mode accept flags */ 570 if (p_ramrod->common.update_rx_mode_flg) { 571 u8 accept_filter = accept_flags.rx_accept_filter; 572 u16 state = 0; 573 574 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 575 !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) || 576 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED))); 577 578 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED, 579 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)); 580 581 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 582 !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) || 583 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); 584 585 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL, 586 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) && 587 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); 588 589 SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL, 590 !!(accept_filter & QED_ACCEPT_BCAST)); 591 592 p_ramrod->rx_mode.state = cpu_to_le16(state); 593 DP_VERBOSE(p_hwfn, QED_MSG_SP, 594 "p_ramrod->rx_mode.state = 0x%x\n", state); 595 } 596 597 /* Set Tx mode accept flags */ 598 if (p_ramrod->common.update_tx_mode_flg) { 599 u8 accept_filter = accept_flags.tx_accept_filter; 600 u16 state = 0; 601 602 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL, 603 !!(accept_filter & QED_ACCEPT_NONE)); 604 605 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL, 606 !!(accept_filter & QED_ACCEPT_NONE)); 607 608 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL, 609 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) && 610 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); 611 612 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL, 613 !!(accept_filter & QED_ACCEPT_BCAST)); 614 615 p_ramrod->tx_mode.state = cpu_to_le16(state); 616 DP_VERBOSE(p_hwfn, QED_MSG_SP, 617 "p_ramrod->tx_mode.state = 0x%x\n", state); 618 } 619 } 620 621 static void 622 qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn, 623 struct vport_update_ramrod_data *p_ramrod, 624 struct qed_sge_tpa_params *p_params) 625 { 626 struct eth_vport_tpa_param *p_tpa; 627 628 if (!p_params) { 629 p_ramrod->common.update_tpa_param_flg = 0; 630 p_ramrod->common.update_tpa_en_flg = 0; 631 p_ramrod->common.update_tpa_param_flg = 0; 632 return; 633 } 634 635 p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg; 636 p_tpa = &p_ramrod->tpa_param; 637 p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg; 638 p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg; 639 p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg; 640 p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg; 641 642 p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg; 643 p_tpa->max_buff_num = p_params->max_buffers_per_cqe; 644 p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg; 645 p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg; 646 p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg; 647 p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num; 648 p_tpa->tpa_max_size = p_params->tpa_max_size; 649 p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start; 650 p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont; 651 } 652 653 static void 654 qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn, 655 struct vport_update_ramrod_data *p_ramrod, 656 struct qed_sp_vport_update_params *p_params) 657 { 658 int i; 659 660 memset(&p_ramrod->approx_mcast.bins, 0, 661 sizeof(p_ramrod->approx_mcast.bins)); 662 663 if (!p_params->update_approx_mcast_flg) 664 return; 665 666 p_ramrod->common.update_approx_mcast_flg = 1; 667 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { 668 u32 *p_bins = (u32 *)p_params->bins; 669 670 p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]); 671 } 672 } 673 674 int qed_sp_vport_update(struct qed_hwfn *p_hwfn, 675 struct qed_sp_vport_update_params *p_params, 676 enum spq_mode comp_mode, 677 struct qed_spq_comp_cb *p_comp_data) 678 { 679 struct qed_rss_params *p_rss_params = p_params->rss_params; 680 struct vport_update_ramrod_data_cmn *p_cmn; 681 struct qed_sp_init_data init_data; 682 struct vport_update_ramrod_data *p_ramrod = NULL; 683 struct qed_spq_entry *p_ent = NULL; 684 u8 abs_vport_id = 0, val; 685 int rc = -EINVAL; 686 687 if (IS_VF(p_hwfn->cdev)) { 688 rc = qed_vf_pf_vport_update(p_hwfn, p_params); 689 return rc; 690 } 691 692 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); 693 if (rc) 694 return rc; 695 696 memset(&init_data, 0, sizeof(init_data)); 697 init_data.cid = qed_spq_get_cid(p_hwfn); 698 init_data.opaque_fid = p_params->opaque_fid; 699 init_data.comp_mode = comp_mode; 700 init_data.p_comp_data = p_comp_data; 701 702 rc = qed_sp_init_request(p_hwfn, &p_ent, 703 ETH_RAMROD_VPORT_UPDATE, 704 PROTOCOLID_ETH, &init_data); 705 if (rc) 706 return rc; 707 708 /* Copy input params to ramrod according to FW struct */ 709 p_ramrod = &p_ent->ramrod.vport_update; 710 p_cmn = &p_ramrod->common; 711 712 p_cmn->vport_id = abs_vport_id; 713 p_cmn->rx_active_flg = p_params->vport_active_rx_flg; 714 p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg; 715 p_cmn->tx_active_flg = p_params->vport_active_tx_flg; 716 p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg; 717 p_cmn->accept_any_vlan = p_params->accept_any_vlan; 718 val = p_params->update_accept_any_vlan_flg; 719 p_cmn->update_accept_any_vlan_flg = val; 720 721 p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg; 722 val = p_params->update_inner_vlan_removal_flg; 723 p_cmn->update_inner_vlan_removal_en_flg = val; 724 725 p_cmn->default_vlan_en = p_params->default_vlan_enable_flg; 726 val = p_params->update_default_vlan_enable_flg; 727 p_cmn->update_default_vlan_en_flg = val; 728 729 p_cmn->default_vlan = cpu_to_le16(p_params->default_vlan); 730 p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg; 731 732 p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg; 733 734 p_ramrod->common.tx_switching_en = p_params->tx_switching_flg; 735 p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg; 736 737 p_cmn->anti_spoofing_en = p_params->anti_spoofing_en; 738 val = p_params->update_anti_spoofing_en_flg; 739 p_ramrod->common.update_anti_spoofing_en_flg = val; 740 741 rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params); 742 if (rc) { 743 /* Return spq entry which is taken in qed_sp_init_request()*/ 744 qed_spq_return_entry(p_hwfn, p_ent); 745 return rc; 746 } 747 748 /* Update mcast bins for VFs, PF doesn't use this functionality */ 749 qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params); 750 751 qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags); 752 qed_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, p_params->sge_tpa_params); 753 return qed_spq_post(p_hwfn, p_ent, NULL); 754 } 755 756 int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id) 757 { 758 struct vport_stop_ramrod_data *p_ramrod; 759 struct qed_sp_init_data init_data; 760 struct qed_spq_entry *p_ent; 761 u8 abs_vport_id = 0; 762 int rc; 763 764 if (IS_VF(p_hwfn->cdev)) 765 return qed_vf_pf_vport_stop(p_hwfn); 766 767 rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id); 768 if (rc) 769 return rc; 770 771 memset(&init_data, 0, sizeof(init_data)); 772 init_data.cid = qed_spq_get_cid(p_hwfn); 773 init_data.opaque_fid = opaque_fid; 774 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 775 776 rc = qed_sp_init_request(p_hwfn, &p_ent, 777 ETH_RAMROD_VPORT_STOP, 778 PROTOCOLID_ETH, &init_data); 779 if (rc) 780 return rc; 781 782 p_ramrod = &p_ent->ramrod.vport_stop; 783 p_ramrod->vport_id = abs_vport_id; 784 785 return qed_spq_post(p_hwfn, p_ent, NULL); 786 } 787 788 static int 789 qed_vf_pf_accept_flags(struct qed_hwfn *p_hwfn, 790 struct qed_filter_accept_flags *p_accept_flags) 791 { 792 struct qed_sp_vport_update_params s_params; 793 794 memset(&s_params, 0, sizeof(s_params)); 795 memcpy(&s_params.accept_flags, p_accept_flags, 796 sizeof(struct qed_filter_accept_flags)); 797 798 return qed_vf_pf_vport_update(p_hwfn, &s_params); 799 } 800 801 static int qed_filter_accept_cmd(struct qed_dev *cdev, 802 u8 vport, 803 struct qed_filter_accept_flags accept_flags, 804 u8 update_accept_any_vlan, 805 u8 accept_any_vlan, 806 enum spq_mode comp_mode, 807 struct qed_spq_comp_cb *p_comp_data) 808 { 809 struct qed_sp_vport_update_params vport_update_params; 810 int i, rc; 811 812 /* Prepare and send the vport rx_mode change */ 813 memset(&vport_update_params, 0, sizeof(vport_update_params)); 814 vport_update_params.vport_id = vport; 815 vport_update_params.accept_flags = accept_flags; 816 vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan; 817 vport_update_params.accept_any_vlan = accept_any_vlan; 818 819 for_each_hwfn(cdev, i) { 820 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 821 822 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 823 824 if (IS_VF(cdev)) { 825 rc = qed_vf_pf_accept_flags(p_hwfn, &accept_flags); 826 if (rc) 827 return rc; 828 continue; 829 } 830 831 rc = qed_sp_vport_update(p_hwfn, &vport_update_params, 832 comp_mode, p_comp_data); 833 if (rc) { 834 DP_ERR(cdev, "Update rx_mode failed %d\n", rc); 835 return rc; 836 } 837 838 DP_VERBOSE(p_hwfn, QED_MSG_SP, 839 "Accept filter configured, flags = [Rx]%x [Tx]%x\n", 840 accept_flags.rx_accept_filter, 841 accept_flags.tx_accept_filter); 842 if (update_accept_any_vlan) 843 DP_VERBOSE(p_hwfn, QED_MSG_SP, 844 "accept_any_vlan=%d configured\n", 845 accept_any_vlan); 846 } 847 848 return 0; 849 } 850 851 int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, 852 struct qed_queue_cid *p_cid, 853 u16 bd_max_bytes, 854 dma_addr_t bd_chain_phys_addr, 855 dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size) 856 { 857 struct rx_queue_start_ramrod_data *p_ramrod = NULL; 858 struct qed_spq_entry *p_ent = NULL; 859 struct qed_sp_init_data init_data; 860 int rc = -EINVAL; 861 862 DP_VERBOSE(p_hwfn, QED_MSG_SP, 863 "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n", 864 p_cid->opaque_fid, p_cid->cid, 865 p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->sb_igu_id); 866 867 /* Get SPQ entry */ 868 memset(&init_data, 0, sizeof(init_data)); 869 init_data.cid = p_cid->cid; 870 init_data.opaque_fid = p_cid->opaque_fid; 871 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 872 873 rc = qed_sp_init_request(p_hwfn, &p_ent, 874 ETH_RAMROD_RX_QUEUE_START, 875 PROTOCOLID_ETH, &init_data); 876 if (rc) 877 return rc; 878 879 p_ramrod = &p_ent->ramrod.rx_queue_start; 880 881 p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id); 882 p_ramrod->sb_index = p_cid->sb_idx; 883 p_ramrod->vport_id = p_cid->abs.vport_id; 884 p_ramrod->stats_counter_id = p_cid->abs.stats_id; 885 p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id); 886 p_ramrod->complete_cqe_flg = 0; 887 p_ramrod->complete_event_flg = 1; 888 889 p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes); 890 DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr); 891 892 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); 893 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr); 894 895 if (p_cid->vfid != QED_QUEUE_CID_SELF) { 896 bool b_legacy_vf = !!(p_cid->vf_legacy & 897 QED_QCID_LEGACY_VF_RX_PROD); 898 899 p_ramrod->vf_rx_prod_index = p_cid->vf_qid; 900 DP_VERBOSE(p_hwfn, QED_MSG_SP, 901 "Queue%s is meant for VF rxq[%02x]\n", 902 b_legacy_vf ? " [legacy]" : "", p_cid->vf_qid); 903 p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf; 904 } 905 906 return qed_spq_post(p_hwfn, p_ent, NULL); 907 } 908 909 static int 910 qed_eth_pf_rx_queue_start(struct qed_hwfn *p_hwfn, 911 struct qed_queue_cid *p_cid, 912 u16 bd_max_bytes, 913 dma_addr_t bd_chain_phys_addr, 914 dma_addr_t cqe_pbl_addr, 915 u16 cqe_pbl_size, void __iomem **pp_prod) 916 { 917 u32 init_prod_val = 0; 918 919 *pp_prod = p_hwfn->regview + 920 GTT_BAR0_MAP_REG_MSDM_RAM + 921 MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id); 922 923 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ 924 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), 925 (u32 *)(&init_prod_val)); 926 927 return qed_eth_rxq_start_ramrod(p_hwfn, p_cid, 928 bd_max_bytes, 929 bd_chain_phys_addr, 930 cqe_pbl_addr, cqe_pbl_size); 931 } 932 933 static int 934 qed_eth_rx_queue_start(struct qed_hwfn *p_hwfn, 935 u16 opaque_fid, 936 struct qed_queue_start_common_params *p_params, 937 u16 bd_max_bytes, 938 dma_addr_t bd_chain_phys_addr, 939 dma_addr_t cqe_pbl_addr, 940 u16 cqe_pbl_size, 941 struct qed_rxq_start_ret_params *p_ret_params) 942 { 943 struct qed_queue_cid *p_cid; 944 int rc; 945 946 /* Allocate a CID for the queue */ 947 p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, true, p_params); 948 if (!p_cid) 949 return -ENOMEM; 950 951 if (IS_PF(p_hwfn->cdev)) { 952 rc = qed_eth_pf_rx_queue_start(p_hwfn, p_cid, 953 bd_max_bytes, 954 bd_chain_phys_addr, 955 cqe_pbl_addr, cqe_pbl_size, 956 &p_ret_params->p_prod); 957 } else { 958 rc = qed_vf_pf_rxq_start(p_hwfn, p_cid, 959 bd_max_bytes, 960 bd_chain_phys_addr, 961 cqe_pbl_addr, 962 cqe_pbl_size, &p_ret_params->p_prod); 963 } 964 965 /* Provide the caller with a reference to as handler */ 966 if (rc) 967 qed_eth_queue_cid_release(p_hwfn, p_cid); 968 else 969 p_ret_params->p_handle = (void *)p_cid; 970 971 return rc; 972 } 973 974 int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, 975 void **pp_rxq_handles, 976 u8 num_rxqs, 977 u8 complete_cqe_flg, 978 u8 complete_event_flg, 979 enum spq_mode comp_mode, 980 struct qed_spq_comp_cb *p_comp_data) 981 { 982 struct rx_queue_update_ramrod_data *p_ramrod = NULL; 983 struct qed_spq_entry *p_ent = NULL; 984 struct qed_sp_init_data init_data; 985 struct qed_queue_cid *p_cid; 986 int rc = -EINVAL; 987 u8 i; 988 989 memset(&init_data, 0, sizeof(init_data)); 990 init_data.comp_mode = comp_mode; 991 init_data.p_comp_data = p_comp_data; 992 993 for (i = 0; i < num_rxqs; i++) { 994 p_cid = ((struct qed_queue_cid **)pp_rxq_handles)[i]; 995 996 /* Get SPQ entry */ 997 init_data.cid = p_cid->cid; 998 init_data.opaque_fid = p_cid->opaque_fid; 999 1000 rc = qed_sp_init_request(p_hwfn, &p_ent, 1001 ETH_RAMROD_RX_QUEUE_UPDATE, 1002 PROTOCOLID_ETH, &init_data); 1003 if (rc) 1004 return rc; 1005 1006 p_ramrod = &p_ent->ramrod.rx_queue_update; 1007 p_ramrod->vport_id = p_cid->abs.vport_id; 1008 1009 p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id); 1010 p_ramrod->complete_cqe_flg = complete_cqe_flg; 1011 p_ramrod->complete_event_flg = complete_event_flg; 1012 1013 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1014 if (rc) 1015 return rc; 1016 } 1017 1018 return rc; 1019 } 1020 1021 static int 1022 qed_eth_pf_rx_queue_stop(struct qed_hwfn *p_hwfn, 1023 struct qed_queue_cid *p_cid, 1024 bool b_eq_completion_only, bool b_cqe_completion) 1025 { 1026 struct rx_queue_stop_ramrod_data *p_ramrod = NULL; 1027 struct qed_spq_entry *p_ent = NULL; 1028 struct qed_sp_init_data init_data; 1029 int rc; 1030 1031 memset(&init_data, 0, sizeof(init_data)); 1032 init_data.cid = p_cid->cid; 1033 init_data.opaque_fid = p_cid->opaque_fid; 1034 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1035 1036 rc = qed_sp_init_request(p_hwfn, &p_ent, 1037 ETH_RAMROD_RX_QUEUE_STOP, 1038 PROTOCOLID_ETH, &init_data); 1039 if (rc) 1040 return rc; 1041 1042 p_ramrod = &p_ent->ramrod.rx_queue_stop; 1043 p_ramrod->vport_id = p_cid->abs.vport_id; 1044 p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id); 1045 1046 /* Cleaning the queue requires the completion to arrive there. 1047 * In addition, VFs require the answer to come as eqe to PF. 1048 */ 1049 p_ramrod->complete_cqe_flg = ((p_cid->vfid == QED_QUEUE_CID_SELF) && 1050 !b_eq_completion_only) || 1051 b_cqe_completion; 1052 p_ramrod->complete_event_flg = (p_cid->vfid != QED_QUEUE_CID_SELF) || 1053 b_eq_completion_only; 1054 1055 return qed_spq_post(p_hwfn, p_ent, NULL); 1056 } 1057 1058 int qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, 1059 void *p_rxq, 1060 bool eq_completion_only, bool cqe_completion) 1061 { 1062 struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_rxq; 1063 int rc = -EINVAL; 1064 1065 if (IS_PF(p_hwfn->cdev)) 1066 rc = qed_eth_pf_rx_queue_stop(p_hwfn, p_cid, 1067 eq_completion_only, 1068 cqe_completion); 1069 else 1070 rc = qed_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion); 1071 1072 if (!rc) 1073 qed_eth_queue_cid_release(p_hwfn, p_cid); 1074 return rc; 1075 } 1076 1077 int 1078 qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, 1079 struct qed_queue_cid *p_cid, 1080 dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id) 1081 { 1082 struct tx_queue_start_ramrod_data *p_ramrod = NULL; 1083 struct qed_spq_entry *p_ent = NULL; 1084 struct qed_sp_init_data init_data; 1085 int rc = -EINVAL; 1086 1087 /* Get SPQ entry */ 1088 memset(&init_data, 0, sizeof(init_data)); 1089 init_data.cid = p_cid->cid; 1090 init_data.opaque_fid = p_cid->opaque_fid; 1091 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1092 1093 rc = qed_sp_init_request(p_hwfn, &p_ent, 1094 ETH_RAMROD_TX_QUEUE_START, 1095 PROTOCOLID_ETH, &init_data); 1096 if (rc) 1097 return rc; 1098 1099 p_ramrod = &p_ent->ramrod.tx_queue_start; 1100 p_ramrod->vport_id = p_cid->abs.vport_id; 1101 1102 p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id); 1103 p_ramrod->sb_index = p_cid->sb_idx; 1104 p_ramrod->stats_counter_id = p_cid->abs.stats_id; 1105 1106 p_ramrod->queue_zone_id = cpu_to_le16(p_cid->abs.queue_id); 1107 p_ramrod->same_as_last_id = cpu_to_le16(p_cid->abs.queue_id); 1108 1109 p_ramrod->pbl_size = cpu_to_le16(pbl_size); 1110 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr); 1111 1112 p_ramrod->qm_pq_id = cpu_to_le16(pq_id); 1113 1114 return qed_spq_post(p_hwfn, p_ent, NULL); 1115 } 1116 1117 static int 1118 qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn, 1119 struct qed_queue_cid *p_cid, 1120 u8 tc, 1121 dma_addr_t pbl_addr, 1122 u16 pbl_size, void __iomem **pp_doorbell) 1123 { 1124 int rc; 1125 1126 1127 rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid, 1128 pbl_addr, pbl_size, 1129 qed_get_cm_pq_idx_mcos(p_hwfn, tc)); 1130 if (rc) 1131 return rc; 1132 1133 /* Provide the caller with the necessary return values */ 1134 *pp_doorbell = p_hwfn->doorbells + 1135 qed_db_addr(p_cid->cid, DQ_DEMS_LEGACY); 1136 1137 return 0; 1138 } 1139 1140 static int 1141 qed_eth_tx_queue_start(struct qed_hwfn *p_hwfn, 1142 u16 opaque_fid, 1143 struct qed_queue_start_common_params *p_params, 1144 u8 tc, 1145 dma_addr_t pbl_addr, 1146 u16 pbl_size, 1147 struct qed_txq_start_ret_params *p_ret_params) 1148 { 1149 struct qed_queue_cid *p_cid; 1150 int rc; 1151 1152 p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, false, p_params); 1153 if (!p_cid) 1154 return -EINVAL; 1155 1156 if (IS_PF(p_hwfn->cdev)) 1157 rc = qed_eth_pf_tx_queue_start(p_hwfn, p_cid, tc, 1158 pbl_addr, pbl_size, 1159 &p_ret_params->p_doorbell); 1160 else 1161 rc = qed_vf_pf_txq_start(p_hwfn, p_cid, 1162 pbl_addr, pbl_size, 1163 &p_ret_params->p_doorbell); 1164 1165 if (rc) 1166 qed_eth_queue_cid_release(p_hwfn, p_cid); 1167 else 1168 p_ret_params->p_handle = (void *)p_cid; 1169 1170 return rc; 1171 } 1172 1173 static int 1174 qed_eth_pf_tx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid) 1175 { 1176 struct qed_spq_entry *p_ent = NULL; 1177 struct qed_sp_init_data init_data; 1178 int rc; 1179 1180 memset(&init_data, 0, sizeof(init_data)); 1181 init_data.cid = p_cid->cid; 1182 init_data.opaque_fid = p_cid->opaque_fid; 1183 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1184 1185 rc = qed_sp_init_request(p_hwfn, &p_ent, 1186 ETH_RAMROD_TX_QUEUE_STOP, 1187 PROTOCOLID_ETH, &init_data); 1188 if (rc) 1189 return rc; 1190 1191 return qed_spq_post(p_hwfn, p_ent, NULL); 1192 } 1193 1194 int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_handle) 1195 { 1196 struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_handle; 1197 int rc; 1198 1199 if (IS_PF(p_hwfn->cdev)) 1200 rc = qed_eth_pf_tx_queue_stop(p_hwfn, p_cid); 1201 else 1202 rc = qed_vf_pf_txq_stop(p_hwfn, p_cid); 1203 1204 if (!rc) 1205 qed_eth_queue_cid_release(p_hwfn, p_cid); 1206 return rc; 1207 } 1208 1209 static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode) 1210 { 1211 enum eth_filter_action action = MAX_ETH_FILTER_ACTION; 1212 1213 switch (opcode) { 1214 case QED_FILTER_ADD: 1215 action = ETH_FILTER_ACTION_ADD; 1216 break; 1217 case QED_FILTER_REMOVE: 1218 action = ETH_FILTER_ACTION_REMOVE; 1219 break; 1220 case QED_FILTER_FLUSH: 1221 action = ETH_FILTER_ACTION_REMOVE_ALL; 1222 break; 1223 default: 1224 action = MAX_ETH_FILTER_ACTION; 1225 } 1226 1227 return action; 1228 } 1229 1230 static int 1231 qed_filter_ucast_common(struct qed_hwfn *p_hwfn, 1232 u16 opaque_fid, 1233 struct qed_filter_ucast *p_filter_cmd, 1234 struct vport_filter_update_ramrod_data **pp_ramrod, 1235 struct qed_spq_entry **pp_ent, 1236 enum spq_mode comp_mode, 1237 struct qed_spq_comp_cb *p_comp_data) 1238 { 1239 u8 vport_to_add_to = 0, vport_to_remove_from = 0; 1240 struct vport_filter_update_ramrod_data *p_ramrod; 1241 struct eth_filter_cmd *p_first_filter; 1242 struct eth_filter_cmd *p_second_filter; 1243 struct qed_sp_init_data init_data; 1244 enum eth_filter_action action; 1245 int rc; 1246 1247 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, 1248 &vport_to_remove_from); 1249 if (rc) 1250 return rc; 1251 1252 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, 1253 &vport_to_add_to); 1254 if (rc) 1255 return rc; 1256 1257 /* Get SPQ entry */ 1258 memset(&init_data, 0, sizeof(init_data)); 1259 init_data.cid = qed_spq_get_cid(p_hwfn); 1260 init_data.opaque_fid = opaque_fid; 1261 init_data.comp_mode = comp_mode; 1262 init_data.p_comp_data = p_comp_data; 1263 1264 rc = qed_sp_init_request(p_hwfn, pp_ent, 1265 ETH_RAMROD_FILTERS_UPDATE, 1266 PROTOCOLID_ETH, &init_data); 1267 if (rc) 1268 return rc; 1269 1270 *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update; 1271 p_ramrod = *pp_ramrod; 1272 p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0; 1273 p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0; 1274 1275 switch (p_filter_cmd->opcode) { 1276 case QED_FILTER_REPLACE: 1277 case QED_FILTER_MOVE: 1278 p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break; 1279 default: 1280 p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break; 1281 } 1282 1283 p_first_filter = &p_ramrod->filter_cmds[0]; 1284 p_second_filter = &p_ramrod->filter_cmds[1]; 1285 1286 switch (p_filter_cmd->type) { 1287 case QED_FILTER_MAC: 1288 p_first_filter->type = ETH_FILTER_TYPE_MAC; break; 1289 case QED_FILTER_VLAN: 1290 p_first_filter->type = ETH_FILTER_TYPE_VLAN; break; 1291 case QED_FILTER_MAC_VLAN: 1292 p_first_filter->type = ETH_FILTER_TYPE_PAIR; break; 1293 case QED_FILTER_INNER_MAC: 1294 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break; 1295 case QED_FILTER_INNER_VLAN: 1296 p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break; 1297 case QED_FILTER_INNER_PAIR: 1298 p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break; 1299 case QED_FILTER_INNER_MAC_VNI_PAIR: 1300 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR; 1301 break; 1302 case QED_FILTER_MAC_VNI_PAIR: 1303 p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break; 1304 case QED_FILTER_VNI: 1305 p_first_filter->type = ETH_FILTER_TYPE_VNI; break; 1306 } 1307 1308 if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) || 1309 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || 1310 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) || 1311 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) || 1312 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || 1313 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) { 1314 qed_set_fw_mac_addr(&p_first_filter->mac_msb, 1315 &p_first_filter->mac_mid, 1316 &p_first_filter->mac_lsb, 1317 (u8 *)p_filter_cmd->mac); 1318 } 1319 1320 if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) || 1321 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || 1322 (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) || 1323 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR)) 1324 p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan); 1325 1326 if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || 1327 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) || 1328 (p_first_filter->type == ETH_FILTER_TYPE_VNI)) 1329 p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni); 1330 1331 if (p_filter_cmd->opcode == QED_FILTER_MOVE) { 1332 p_second_filter->type = p_first_filter->type; 1333 p_second_filter->mac_msb = p_first_filter->mac_msb; 1334 p_second_filter->mac_mid = p_first_filter->mac_mid; 1335 p_second_filter->mac_lsb = p_first_filter->mac_lsb; 1336 p_second_filter->vlan_id = p_first_filter->vlan_id; 1337 p_second_filter->vni = p_first_filter->vni; 1338 1339 p_first_filter->action = ETH_FILTER_ACTION_REMOVE; 1340 1341 p_first_filter->vport_id = vport_to_remove_from; 1342 1343 p_second_filter->action = ETH_FILTER_ACTION_ADD; 1344 p_second_filter->vport_id = vport_to_add_to; 1345 } else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) { 1346 p_first_filter->vport_id = vport_to_add_to; 1347 memcpy(p_second_filter, p_first_filter, 1348 sizeof(*p_second_filter)); 1349 p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL; 1350 p_second_filter->action = ETH_FILTER_ACTION_ADD; 1351 } else { 1352 action = qed_filter_action(p_filter_cmd->opcode); 1353 1354 if (action == MAX_ETH_FILTER_ACTION) { 1355 DP_NOTICE(p_hwfn, 1356 "%d is not supported yet\n", 1357 p_filter_cmd->opcode); 1358 return -EINVAL; 1359 } 1360 1361 p_first_filter->action = action; 1362 p_first_filter->vport_id = (p_filter_cmd->opcode == 1363 QED_FILTER_REMOVE) ? 1364 vport_to_remove_from : 1365 vport_to_add_to; 1366 } 1367 1368 return 0; 1369 } 1370 1371 int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, 1372 u16 opaque_fid, 1373 struct qed_filter_ucast *p_filter_cmd, 1374 enum spq_mode comp_mode, 1375 struct qed_spq_comp_cb *p_comp_data) 1376 { 1377 struct vport_filter_update_ramrod_data *p_ramrod = NULL; 1378 struct qed_spq_entry *p_ent = NULL; 1379 struct eth_filter_cmd_header *p_header; 1380 int rc; 1381 1382 rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd, 1383 &p_ramrod, &p_ent, 1384 comp_mode, p_comp_data); 1385 if (rc) { 1386 DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc); 1387 return rc; 1388 } 1389 p_header = &p_ramrod->filter_cmd_hdr; 1390 p_header->assert_on_error = p_filter_cmd->assert_on_error; 1391 1392 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1393 if (rc) { 1394 DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc); 1395 return rc; 1396 } 1397 1398 DP_VERBOSE(p_hwfn, QED_MSG_SP, 1399 "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n", 1400 (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" : 1401 ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ? 1402 "REMOVE" : 1403 ((p_filter_cmd->opcode == QED_FILTER_MOVE) ? 1404 "MOVE" : "REPLACE")), 1405 (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" : 1406 ((p_filter_cmd->type == QED_FILTER_VLAN) ? 1407 "VLAN" : "MAC & VLAN"), 1408 p_ramrod->filter_cmd_hdr.cmd_cnt, 1409 p_filter_cmd->is_rx_filter, 1410 p_filter_cmd->is_tx_filter); 1411 DP_VERBOSE(p_hwfn, QED_MSG_SP, 1412 "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n", 1413 p_filter_cmd->vport_to_add_to, 1414 p_filter_cmd->vport_to_remove_from, 1415 p_filter_cmd->mac[0], 1416 p_filter_cmd->mac[1], 1417 p_filter_cmd->mac[2], 1418 p_filter_cmd->mac[3], 1419 p_filter_cmd->mac[4], 1420 p_filter_cmd->mac[5], 1421 p_filter_cmd->vlan); 1422 1423 return 0; 1424 } 1425 1426 /******************************************************************************* 1427 * Description: 1428 * Calculates crc 32 on a buffer 1429 * Note: crc32_length MUST be aligned to 8 1430 * Return: 1431 ******************************************************************************/ 1432 static u32 qed_calc_crc32c(u8 *crc32_packet, 1433 u32 crc32_length, u32 crc32_seed, u8 complement) 1434 { 1435 u32 byte = 0, bit = 0, crc32_result = crc32_seed; 1436 u8 msb = 0, current_byte = 0; 1437 1438 if ((!crc32_packet) || 1439 (crc32_length == 0) || 1440 ((crc32_length % 8) != 0)) 1441 return crc32_result; 1442 for (byte = 0; byte < crc32_length; byte++) { 1443 current_byte = crc32_packet[byte]; 1444 for (bit = 0; bit < 8; bit++) { 1445 msb = (u8)(crc32_result >> 31); 1446 crc32_result = crc32_result << 1; 1447 if (msb != (0x1 & (current_byte >> bit))) { 1448 crc32_result = crc32_result ^ CRC32_POLY; 1449 crc32_result |= 1; /*crc32_result[0] = 1;*/ 1450 } 1451 } 1452 } 1453 return crc32_result; 1454 } 1455 1456 static u32 qed_crc32c_le(u32 seed, u8 *mac, u32 len) 1457 { 1458 u32 packet_buf[2] = { 0 }; 1459 1460 memcpy((u8 *)(&packet_buf[0]), &mac[0], 6); 1461 return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0); 1462 } 1463 1464 u8 qed_mcast_bin_from_mac(u8 *mac) 1465 { 1466 u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED, 1467 mac, ETH_ALEN); 1468 1469 return crc & 0xff; 1470 } 1471 1472 static int 1473 qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, 1474 u16 opaque_fid, 1475 struct qed_filter_mcast *p_filter_cmd, 1476 enum spq_mode comp_mode, 1477 struct qed_spq_comp_cb *p_comp_data) 1478 { 1479 unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; 1480 struct vport_update_ramrod_data *p_ramrod = NULL; 1481 struct qed_spq_entry *p_ent = NULL; 1482 struct qed_sp_init_data init_data; 1483 u8 abs_vport_id = 0; 1484 int rc, i; 1485 1486 if (p_filter_cmd->opcode == QED_FILTER_ADD) 1487 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, 1488 &abs_vport_id); 1489 else 1490 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, 1491 &abs_vport_id); 1492 if (rc) 1493 return rc; 1494 1495 /* Get SPQ entry */ 1496 memset(&init_data, 0, sizeof(init_data)); 1497 init_data.cid = qed_spq_get_cid(p_hwfn); 1498 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1499 init_data.comp_mode = comp_mode; 1500 init_data.p_comp_data = p_comp_data; 1501 1502 rc = qed_sp_init_request(p_hwfn, &p_ent, 1503 ETH_RAMROD_VPORT_UPDATE, 1504 PROTOCOLID_ETH, &init_data); 1505 if (rc) { 1506 DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc); 1507 return rc; 1508 } 1509 1510 p_ramrod = &p_ent->ramrod.vport_update; 1511 p_ramrod->common.update_approx_mcast_flg = 1; 1512 1513 /* explicitly clear out the entire vector */ 1514 memset(&p_ramrod->approx_mcast.bins, 0, 1515 sizeof(p_ramrod->approx_mcast.bins)); 1516 memset(bins, 0, sizeof(unsigned long) * 1517 ETH_MULTICAST_MAC_BINS_IN_REGS); 1518 /* filter ADD op is explicit set op and it removes 1519 * any existing filters for the vport 1520 */ 1521 if (p_filter_cmd->opcode == QED_FILTER_ADD) { 1522 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { 1523 u32 bit; 1524 1525 bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); 1526 __set_bit(bit, bins); 1527 } 1528 1529 /* Convert to correct endianity */ 1530 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { 1531 struct vport_update_ramrod_mcast *p_ramrod_bins; 1532 u32 *p_bins = (u32 *)bins; 1533 1534 p_ramrod_bins = &p_ramrod->approx_mcast; 1535 p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]); 1536 } 1537 } 1538 1539 p_ramrod->common.vport_id = abs_vport_id; 1540 1541 return qed_spq_post(p_hwfn, p_ent, NULL); 1542 } 1543 1544 static int qed_filter_mcast_cmd(struct qed_dev *cdev, 1545 struct qed_filter_mcast *p_filter_cmd, 1546 enum spq_mode comp_mode, 1547 struct qed_spq_comp_cb *p_comp_data) 1548 { 1549 int rc = 0; 1550 int i; 1551 1552 /* only ADD and REMOVE operations are supported for multi-cast */ 1553 if ((p_filter_cmd->opcode != QED_FILTER_ADD && 1554 (p_filter_cmd->opcode != QED_FILTER_REMOVE)) || 1555 (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS)) 1556 return -EINVAL; 1557 1558 for_each_hwfn(cdev, i) { 1559 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1560 1561 u16 opaque_fid; 1562 1563 if (IS_VF(cdev)) { 1564 qed_vf_pf_filter_mcast(p_hwfn, p_filter_cmd); 1565 continue; 1566 } 1567 1568 opaque_fid = p_hwfn->hw_info.opaque_fid; 1569 1570 rc = qed_sp_eth_filter_mcast(p_hwfn, 1571 opaque_fid, 1572 p_filter_cmd, 1573 comp_mode, p_comp_data); 1574 } 1575 return rc; 1576 } 1577 1578 static int qed_filter_ucast_cmd(struct qed_dev *cdev, 1579 struct qed_filter_ucast *p_filter_cmd, 1580 enum spq_mode comp_mode, 1581 struct qed_spq_comp_cb *p_comp_data) 1582 { 1583 int rc = 0; 1584 int i; 1585 1586 for_each_hwfn(cdev, i) { 1587 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1588 u16 opaque_fid; 1589 1590 if (IS_VF(cdev)) { 1591 rc = qed_vf_pf_filter_ucast(p_hwfn, p_filter_cmd); 1592 continue; 1593 } 1594 1595 opaque_fid = p_hwfn->hw_info.opaque_fid; 1596 1597 rc = qed_sp_eth_filter_ucast(p_hwfn, 1598 opaque_fid, 1599 p_filter_cmd, 1600 comp_mode, p_comp_data); 1601 if (rc) 1602 break; 1603 } 1604 1605 return rc; 1606 } 1607 1608 /* Statistics related code */ 1609 static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn, 1610 u32 *p_addr, 1611 u32 *p_len, u16 statistics_bin) 1612 { 1613 if (IS_PF(p_hwfn->cdev)) { 1614 *p_addr = BAR0_MAP_REG_PSDM_RAM + 1615 PSTORM_QUEUE_STAT_OFFSET(statistics_bin); 1616 *p_len = sizeof(struct eth_pstorm_per_queue_stat); 1617 } else { 1618 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1619 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1620 1621 *p_addr = p_resp->pfdev_info.stats_info.pstats.address; 1622 *p_len = p_resp->pfdev_info.stats_info.pstats.len; 1623 } 1624 } 1625 1626 static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn, 1627 struct qed_ptt *p_ptt, 1628 struct qed_eth_stats *p_stats, 1629 u16 statistics_bin) 1630 { 1631 struct eth_pstorm_per_queue_stat pstats; 1632 u32 pstats_addr = 0, pstats_len = 0; 1633 1634 __qed_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len, 1635 statistics_bin); 1636 1637 memset(&pstats, 0, sizeof(pstats)); 1638 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len); 1639 1640 p_stats->common.tx_ucast_bytes += 1641 HILO_64_REGPAIR(pstats.sent_ucast_bytes); 1642 p_stats->common.tx_mcast_bytes += 1643 HILO_64_REGPAIR(pstats.sent_mcast_bytes); 1644 p_stats->common.tx_bcast_bytes += 1645 HILO_64_REGPAIR(pstats.sent_bcast_bytes); 1646 p_stats->common.tx_ucast_pkts += 1647 HILO_64_REGPAIR(pstats.sent_ucast_pkts); 1648 p_stats->common.tx_mcast_pkts += 1649 HILO_64_REGPAIR(pstats.sent_mcast_pkts); 1650 p_stats->common.tx_bcast_pkts += 1651 HILO_64_REGPAIR(pstats.sent_bcast_pkts); 1652 p_stats->common.tx_err_drop_pkts += 1653 HILO_64_REGPAIR(pstats.error_drop_pkts); 1654 } 1655 1656 static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, 1657 struct qed_ptt *p_ptt, 1658 struct qed_eth_stats *p_stats, 1659 u16 statistics_bin) 1660 { 1661 struct tstorm_per_port_stat tstats; 1662 u32 tstats_addr, tstats_len; 1663 1664 if (IS_PF(p_hwfn->cdev)) { 1665 tstats_addr = BAR0_MAP_REG_TSDM_RAM + 1666 TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)); 1667 tstats_len = sizeof(struct tstorm_per_port_stat); 1668 } else { 1669 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1670 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1671 1672 tstats_addr = p_resp->pfdev_info.stats_info.tstats.address; 1673 tstats_len = p_resp->pfdev_info.stats_info.tstats.len; 1674 } 1675 1676 memset(&tstats, 0, sizeof(tstats)); 1677 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len); 1678 1679 p_stats->common.mftag_filter_discards += 1680 HILO_64_REGPAIR(tstats.mftag_filter_discard); 1681 p_stats->common.mac_filter_discards += 1682 HILO_64_REGPAIR(tstats.eth_mac_filter_discard); 1683 } 1684 1685 static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn, 1686 u32 *p_addr, 1687 u32 *p_len, u16 statistics_bin) 1688 { 1689 if (IS_PF(p_hwfn->cdev)) { 1690 *p_addr = BAR0_MAP_REG_USDM_RAM + 1691 USTORM_QUEUE_STAT_OFFSET(statistics_bin); 1692 *p_len = sizeof(struct eth_ustorm_per_queue_stat); 1693 } else { 1694 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1695 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1696 1697 *p_addr = p_resp->pfdev_info.stats_info.ustats.address; 1698 *p_len = p_resp->pfdev_info.stats_info.ustats.len; 1699 } 1700 } 1701 1702 static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, 1703 struct qed_ptt *p_ptt, 1704 struct qed_eth_stats *p_stats, 1705 u16 statistics_bin) 1706 { 1707 struct eth_ustorm_per_queue_stat ustats; 1708 u32 ustats_addr = 0, ustats_len = 0; 1709 1710 __qed_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len, 1711 statistics_bin); 1712 1713 memset(&ustats, 0, sizeof(ustats)); 1714 qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len); 1715 1716 p_stats->common.rx_ucast_bytes += 1717 HILO_64_REGPAIR(ustats.rcv_ucast_bytes); 1718 p_stats->common.rx_mcast_bytes += 1719 HILO_64_REGPAIR(ustats.rcv_mcast_bytes); 1720 p_stats->common.rx_bcast_bytes += 1721 HILO_64_REGPAIR(ustats.rcv_bcast_bytes); 1722 p_stats->common.rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts); 1723 p_stats->common.rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts); 1724 p_stats->common.rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts); 1725 } 1726 1727 static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn, 1728 u32 *p_addr, 1729 u32 *p_len, u16 statistics_bin) 1730 { 1731 if (IS_PF(p_hwfn->cdev)) { 1732 *p_addr = BAR0_MAP_REG_MSDM_RAM + 1733 MSTORM_QUEUE_STAT_OFFSET(statistics_bin); 1734 *p_len = sizeof(struct eth_mstorm_per_queue_stat); 1735 } else { 1736 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1737 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1738 1739 *p_addr = p_resp->pfdev_info.stats_info.mstats.address; 1740 *p_len = p_resp->pfdev_info.stats_info.mstats.len; 1741 } 1742 } 1743 1744 static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn, 1745 struct qed_ptt *p_ptt, 1746 struct qed_eth_stats *p_stats, 1747 u16 statistics_bin) 1748 { 1749 struct eth_mstorm_per_queue_stat mstats; 1750 u32 mstats_addr = 0, mstats_len = 0; 1751 1752 __qed_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len, 1753 statistics_bin); 1754 1755 memset(&mstats, 0, sizeof(mstats)); 1756 qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len); 1757 1758 p_stats->common.no_buff_discards += 1759 HILO_64_REGPAIR(mstats.no_buff_discard); 1760 p_stats->common.packet_too_big_discard += 1761 HILO_64_REGPAIR(mstats.packet_too_big_discard); 1762 p_stats->common.ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard); 1763 p_stats->common.tpa_coalesced_pkts += 1764 HILO_64_REGPAIR(mstats.tpa_coalesced_pkts); 1765 p_stats->common.tpa_coalesced_events += 1766 HILO_64_REGPAIR(mstats.tpa_coalesced_events); 1767 p_stats->common.tpa_aborts_num += 1768 HILO_64_REGPAIR(mstats.tpa_aborts_num); 1769 p_stats->common.tpa_coalesced_bytes += 1770 HILO_64_REGPAIR(mstats.tpa_coalesced_bytes); 1771 } 1772 1773 static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, 1774 struct qed_ptt *p_ptt, 1775 struct qed_eth_stats *p_stats) 1776 { 1777 struct qed_eth_stats_common *p_common = &p_stats->common; 1778 struct port_stats port_stats; 1779 int j; 1780 1781 memset(&port_stats, 0, sizeof(port_stats)); 1782 1783 qed_memcpy_from(p_hwfn, p_ptt, &port_stats, 1784 p_hwfn->mcp_info->port_addr + 1785 offsetof(struct public_port, stats), 1786 sizeof(port_stats)); 1787 1788 p_common->rx_64_byte_packets += port_stats.eth.r64; 1789 p_common->rx_65_to_127_byte_packets += port_stats.eth.r127; 1790 p_common->rx_128_to_255_byte_packets += port_stats.eth.r255; 1791 p_common->rx_256_to_511_byte_packets += port_stats.eth.r511; 1792 p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023; 1793 p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518; 1794 p_common->rx_crc_errors += port_stats.eth.rfcs; 1795 p_common->rx_mac_crtl_frames += port_stats.eth.rxcf; 1796 p_common->rx_pause_frames += port_stats.eth.rxpf; 1797 p_common->rx_pfc_frames += port_stats.eth.rxpp; 1798 p_common->rx_align_errors += port_stats.eth.raln; 1799 p_common->rx_carrier_errors += port_stats.eth.rfcr; 1800 p_common->rx_oversize_packets += port_stats.eth.rovr; 1801 p_common->rx_jabbers += port_stats.eth.rjbr; 1802 p_common->rx_undersize_packets += port_stats.eth.rund; 1803 p_common->rx_fragments += port_stats.eth.rfrg; 1804 p_common->tx_64_byte_packets += port_stats.eth.t64; 1805 p_common->tx_65_to_127_byte_packets += port_stats.eth.t127; 1806 p_common->tx_128_to_255_byte_packets += port_stats.eth.t255; 1807 p_common->tx_256_to_511_byte_packets += port_stats.eth.t511; 1808 p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023; 1809 p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518; 1810 p_common->tx_pause_frames += port_stats.eth.txpf; 1811 p_common->tx_pfc_frames += port_stats.eth.txpp; 1812 p_common->rx_mac_bytes += port_stats.eth.rbyte; 1813 p_common->rx_mac_uc_packets += port_stats.eth.rxuca; 1814 p_common->rx_mac_mc_packets += port_stats.eth.rxmca; 1815 p_common->rx_mac_bc_packets += port_stats.eth.rxbca; 1816 p_common->rx_mac_frames_ok += port_stats.eth.rxpok; 1817 p_common->tx_mac_bytes += port_stats.eth.tbyte; 1818 p_common->tx_mac_uc_packets += port_stats.eth.txuca; 1819 p_common->tx_mac_mc_packets += port_stats.eth.txmca; 1820 p_common->tx_mac_bc_packets += port_stats.eth.txbca; 1821 p_common->tx_mac_ctrl_frames += port_stats.eth.txcf; 1822 for (j = 0; j < 8; j++) { 1823 p_common->brb_truncates += port_stats.brb.brb_truncate[j]; 1824 p_common->brb_discards += port_stats.brb.brb_discard[j]; 1825 } 1826 1827 if (QED_IS_BB(p_hwfn->cdev)) { 1828 struct qed_eth_stats_bb *p_bb = &p_stats->bb; 1829 1830 p_bb->rx_1519_to_1522_byte_packets += 1831 port_stats.eth.u0.bb0.r1522; 1832 p_bb->rx_1519_to_2047_byte_packets += 1833 port_stats.eth.u0.bb0.r2047; 1834 p_bb->rx_2048_to_4095_byte_packets += 1835 port_stats.eth.u0.bb0.r4095; 1836 p_bb->rx_4096_to_9216_byte_packets += 1837 port_stats.eth.u0.bb0.r9216; 1838 p_bb->rx_9217_to_16383_byte_packets += 1839 port_stats.eth.u0.bb0.r16383; 1840 p_bb->tx_1519_to_2047_byte_packets += 1841 port_stats.eth.u1.bb1.t2047; 1842 p_bb->tx_2048_to_4095_byte_packets += 1843 port_stats.eth.u1.bb1.t4095; 1844 p_bb->tx_4096_to_9216_byte_packets += 1845 port_stats.eth.u1.bb1.t9216; 1846 p_bb->tx_9217_to_16383_byte_packets += 1847 port_stats.eth.u1.bb1.t16383; 1848 p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec; 1849 p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl; 1850 } else { 1851 struct qed_eth_stats_ah *p_ah = &p_stats->ah; 1852 1853 p_ah->rx_1519_to_max_byte_packets += 1854 port_stats.eth.u0.ah0.r1519_to_max; 1855 p_ah->tx_1519_to_max_byte_packets = 1856 port_stats.eth.u1.ah1.t1519_to_max; 1857 } 1858 } 1859 1860 static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn, 1861 struct qed_ptt *p_ptt, 1862 struct qed_eth_stats *stats, 1863 u16 statistics_bin, bool b_get_port_stats) 1864 { 1865 __qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin); 1866 __qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin); 1867 __qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin); 1868 __qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin); 1869 1870 if (b_get_port_stats && p_hwfn->mcp_info) 1871 __qed_get_vport_port_stats(p_hwfn, p_ptt, stats); 1872 } 1873 1874 static void _qed_get_vport_stats(struct qed_dev *cdev, 1875 struct qed_eth_stats *stats) 1876 { 1877 u8 fw_vport = 0; 1878 int i; 1879 1880 memset(stats, 0, sizeof(*stats)); 1881 1882 for_each_hwfn(cdev, i) { 1883 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1884 struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn) 1885 : NULL; 1886 1887 if (IS_PF(cdev)) { 1888 /* The main vport index is relative first */ 1889 if (qed_fw_vport(p_hwfn, 0, &fw_vport)) { 1890 DP_ERR(p_hwfn, "No vport available!\n"); 1891 goto out; 1892 } 1893 } 1894 1895 if (IS_PF(cdev) && !p_ptt) { 1896 DP_ERR(p_hwfn, "Failed to acquire ptt\n"); 1897 continue; 1898 } 1899 1900 __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport, 1901 IS_PF(cdev) ? true : false); 1902 1903 out: 1904 if (IS_PF(cdev) && p_ptt) 1905 qed_ptt_release(p_hwfn, p_ptt); 1906 } 1907 } 1908 1909 void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats) 1910 { 1911 u32 i; 1912 1913 if (!cdev) { 1914 memset(stats, 0, sizeof(*stats)); 1915 return; 1916 } 1917 1918 _qed_get_vport_stats(cdev, stats); 1919 1920 if (!cdev->reset_stats) 1921 return; 1922 1923 /* Reduce the statistics baseline */ 1924 for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++) 1925 ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i]; 1926 } 1927 1928 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */ 1929 void qed_reset_vport_stats(struct qed_dev *cdev) 1930 { 1931 int i; 1932 1933 for_each_hwfn(cdev, i) { 1934 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1935 struct eth_mstorm_per_queue_stat mstats; 1936 struct eth_ustorm_per_queue_stat ustats; 1937 struct eth_pstorm_per_queue_stat pstats; 1938 struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn) 1939 : NULL; 1940 u32 addr = 0, len = 0; 1941 1942 if (IS_PF(cdev) && !p_ptt) { 1943 DP_ERR(p_hwfn, "Failed to acquire ptt\n"); 1944 continue; 1945 } 1946 1947 memset(&mstats, 0, sizeof(mstats)); 1948 __qed_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0); 1949 qed_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len); 1950 1951 memset(&ustats, 0, sizeof(ustats)); 1952 __qed_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0); 1953 qed_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len); 1954 1955 memset(&pstats, 0, sizeof(pstats)); 1956 __qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0); 1957 qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len); 1958 1959 if (IS_PF(cdev)) 1960 qed_ptt_release(p_hwfn, p_ptt); 1961 } 1962 1963 /* PORT statistics are not necessarily reset, so we need to 1964 * read and create a baseline for future statistics. 1965 */ 1966 if (!cdev->reset_stats) 1967 DP_INFO(cdev, "Reset stats not allocated\n"); 1968 else 1969 _qed_get_vport_stats(cdev, cdev->reset_stats); 1970 } 1971 1972 static void 1973 qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 1974 struct qed_arfs_config_params *p_cfg_params) 1975 { 1976 if (p_cfg_params->arfs_enable) { 1977 qed_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id, 1978 p_cfg_params->tcp, p_cfg_params->udp, 1979 p_cfg_params->ipv4, p_cfg_params->ipv6); 1980 DP_VERBOSE(p_hwfn, QED_MSG_SP, 1981 "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n", 1982 p_cfg_params->tcp ? "Enable" : "Disable", 1983 p_cfg_params->udp ? "Enable" : "Disable", 1984 p_cfg_params->ipv4 ? "Enable" : "Disable", 1985 p_cfg_params->ipv6 ? "Enable" : "Disable"); 1986 } else { 1987 qed_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 1988 } 1989 1990 DP_VERBOSE(p_hwfn, QED_MSG_SP, "Configured ARFS mode : %s\n", 1991 p_cfg_params->arfs_enable ? "Enable" : "Disable"); 1992 } 1993 1994 static int 1995 qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 1996 struct qed_spq_comp_cb *p_cb, 1997 dma_addr_t p_addr, u16 length, u16 qid, 1998 u8 vport_id, bool b_is_add) 1999 { 2000 struct rx_update_gft_filter_data *p_ramrod = NULL; 2001 struct qed_spq_entry *p_ent = NULL; 2002 struct qed_sp_init_data init_data; 2003 u16 abs_rx_q_id = 0; 2004 u8 abs_vport_id = 0; 2005 int rc = -EINVAL; 2006 2007 rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id); 2008 if (rc) 2009 return rc; 2010 2011 rc = qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id); 2012 if (rc) 2013 return rc; 2014 2015 /* Get SPQ entry */ 2016 memset(&init_data, 0, sizeof(init_data)); 2017 init_data.cid = qed_spq_get_cid(p_hwfn); 2018 2019 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 2020 2021 if (p_cb) { 2022 init_data.comp_mode = QED_SPQ_MODE_CB; 2023 init_data.p_comp_data = p_cb; 2024 } else { 2025 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 2026 } 2027 2028 rc = qed_sp_init_request(p_hwfn, &p_ent, 2029 ETH_RAMROD_GFT_UPDATE_FILTER, 2030 PROTOCOLID_ETH, &init_data); 2031 if (rc) 2032 return rc; 2033 2034 p_ramrod = &p_ent->ramrod.rx_update_gft; 2035 DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr); 2036 p_ramrod->pkt_hdr_length = cpu_to_le16(length); 2037 p_ramrod->rx_qid_or_action_icid = cpu_to_le16(abs_rx_q_id); 2038 p_ramrod->vport_id = abs_vport_id; 2039 p_ramrod->filter_type = RFS_FILTER_TYPE; 2040 p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER : GFT_DELETE_FILTER; 2041 2042 DP_VERBOSE(p_hwfn, QED_MSG_SP, 2043 "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n", 2044 abs_vport_id, abs_rx_q_id, 2045 b_is_add ? "Adding" : "Removing", (u64)p_addr, length); 2046 2047 return qed_spq_post(p_hwfn, p_ent, NULL); 2048 } 2049 2050 int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn, 2051 struct qed_ptt *p_ptt, 2052 struct qed_queue_cid *p_cid, u16 *p_rx_coal) 2053 { 2054 u32 coalesce, address, is_valid; 2055 struct cau_sb_entry sb_entry; 2056 u8 timer_res; 2057 int rc; 2058 2059 rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + 2060 p_cid->sb_igu_id * sizeof(u64), 2061 (u64)(uintptr_t)&sb_entry, 2, 0); 2062 if (rc) { 2063 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); 2064 return rc; 2065 } 2066 2067 timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0); 2068 2069 address = BAR0_MAP_REG_USDM_RAM + 2070 USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); 2071 coalesce = qed_rd(p_hwfn, p_ptt, address); 2072 2073 is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID); 2074 if (!is_valid) 2075 return -EINVAL; 2076 2077 coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET); 2078 *p_rx_coal = (u16)(coalesce << timer_res); 2079 2080 return 0; 2081 } 2082 2083 int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn, 2084 struct qed_ptt *p_ptt, 2085 struct qed_queue_cid *p_cid, u16 *p_tx_coal) 2086 { 2087 u32 coalesce, address, is_valid; 2088 struct cau_sb_entry sb_entry; 2089 u8 timer_res; 2090 int rc; 2091 2092 rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + 2093 p_cid->sb_igu_id * sizeof(u64), 2094 (u64)(uintptr_t)&sb_entry, 2, 0); 2095 if (rc) { 2096 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); 2097 return rc; 2098 } 2099 2100 timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1); 2101 2102 address = BAR0_MAP_REG_XSDM_RAM + 2103 XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); 2104 coalesce = qed_rd(p_hwfn, p_ptt, address); 2105 2106 is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID); 2107 if (!is_valid) 2108 return -EINVAL; 2109 2110 coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET); 2111 *p_tx_coal = (u16)(coalesce << timer_res); 2112 2113 return 0; 2114 } 2115 2116 int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *p_coal, void *handle) 2117 { 2118 struct qed_queue_cid *p_cid = handle; 2119 struct qed_ptt *p_ptt; 2120 int rc = 0; 2121 2122 if (IS_VF(p_hwfn->cdev)) { 2123 rc = qed_vf_pf_get_coalesce(p_hwfn, p_coal, p_cid); 2124 if (rc) 2125 DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n"); 2126 2127 return rc; 2128 } 2129 2130 p_ptt = qed_ptt_acquire(p_hwfn); 2131 if (!p_ptt) 2132 return -EAGAIN; 2133 2134 if (p_cid->b_is_rx) { 2135 rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_coal); 2136 if (rc) 2137 goto out; 2138 } else { 2139 rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_coal); 2140 if (rc) 2141 goto out; 2142 } 2143 2144 out: 2145 qed_ptt_release(p_hwfn, p_ptt); 2146 2147 return rc; 2148 } 2149 2150 static int qed_fill_eth_dev_info(struct qed_dev *cdev, 2151 struct qed_dev_eth_info *info) 2152 { 2153 int i; 2154 2155 memset(info, 0, sizeof(*info)); 2156 2157 info->num_tc = 1; 2158 2159 if (IS_PF(cdev)) { 2160 int max_vf_vlan_filters = 0; 2161 int max_vf_mac_filters = 0; 2162 2163 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 2164 u16 num_queues = 0; 2165 2166 /* Since the feature controls only queue-zones, 2167 * make sure we have the contexts [rx, tx, xdp] to 2168 * match. 2169 */ 2170 for_each_hwfn(cdev, i) { 2171 struct qed_hwfn *hwfn = &cdev->hwfns[i]; 2172 u16 l2_queues = (u16)FEAT_NUM(hwfn, 2173 QED_PF_L2_QUE); 2174 u16 cids; 2175 2176 cids = hwfn->pf_params.eth_pf_params.num_cons; 2177 num_queues += min_t(u16, l2_queues, cids / 3); 2178 } 2179 2180 /* queues might theoretically be >256, but interrupts' 2181 * upper-limit guarantes that it would fit in a u8. 2182 */ 2183 if (cdev->int_params.fp_msix_cnt) { 2184 u8 irqs = cdev->int_params.fp_msix_cnt; 2185 2186 info->num_queues = (u8)min_t(u16, 2187 num_queues, irqs); 2188 } 2189 } else { 2190 info->num_queues = cdev->num_hwfns; 2191 } 2192 2193 if (IS_QED_SRIOV(cdev)) { 2194 max_vf_vlan_filters = cdev->p_iov_info->total_vfs * 2195 QED_ETH_VF_NUM_VLAN_FILTERS; 2196 max_vf_mac_filters = cdev->p_iov_info->total_vfs * 2197 QED_ETH_VF_NUM_MAC_FILTERS; 2198 } 2199 info->num_vlan_filters = RESC_NUM(QED_LEADING_HWFN(cdev), 2200 QED_VLAN) - 2201 max_vf_vlan_filters; 2202 info->num_mac_filters = RESC_NUM(QED_LEADING_HWFN(cdev), 2203 QED_MAC) - 2204 max_vf_mac_filters; 2205 2206 ether_addr_copy(info->port_mac, 2207 cdev->hwfns[0].hw_info.hw_mac_addr); 2208 2209 info->xdp_supported = true; 2210 } else { 2211 u16 total_cids = 0; 2212 2213 /* Determine queues & XDP support */ 2214 for_each_hwfn(cdev, i) { 2215 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 2216 u8 queues, cids; 2217 2218 qed_vf_get_num_cids(p_hwfn, &cids); 2219 qed_vf_get_num_rxqs(p_hwfn, &queues); 2220 info->num_queues += queues; 2221 total_cids += cids; 2222 } 2223 2224 /* Enable VF XDP in case PF guarntees sufficient connections */ 2225 if (total_cids >= info->num_queues * 3) 2226 info->xdp_supported = true; 2227 2228 qed_vf_get_num_vlan_filters(&cdev->hwfns[0], 2229 (u8 *)&info->num_vlan_filters); 2230 qed_vf_get_num_mac_filters(&cdev->hwfns[0], 2231 (u8 *)&info->num_mac_filters); 2232 qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac); 2233 2234 info->is_legacy = !!cdev->hwfns[0].vf_iov_info->b_pre_fp_hsi; 2235 } 2236 2237 qed_fill_dev_info(cdev, &info->common); 2238 2239 if (IS_VF(cdev)) 2240 eth_zero_addr(info->common.hw_mac); 2241 2242 return 0; 2243 } 2244 2245 static void qed_register_eth_ops(struct qed_dev *cdev, 2246 struct qed_eth_cb_ops *ops, void *cookie) 2247 { 2248 cdev->protocol_ops.eth = ops; 2249 cdev->ops_cookie = cookie; 2250 2251 /* For VF, we start bulletin reading */ 2252 if (IS_VF(cdev)) 2253 qed_vf_start_iov_wq(cdev); 2254 } 2255 2256 static bool qed_check_mac(struct qed_dev *cdev, u8 *mac) 2257 { 2258 if (IS_PF(cdev)) 2259 return true; 2260 2261 return qed_vf_check_mac(&cdev->hwfns[0], mac); 2262 } 2263 2264 static int qed_start_vport(struct qed_dev *cdev, 2265 struct qed_start_vport_params *params) 2266 { 2267 int rc, i; 2268 2269 for_each_hwfn(cdev, i) { 2270 struct qed_sp_vport_start_params start = { 0 }; 2271 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 2272 2273 start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO : 2274 QED_TPA_MODE_NONE; 2275 start.remove_inner_vlan = params->remove_inner_vlan; 2276 start.only_untagged = true; /* untagged only */ 2277 start.drop_ttl0 = params->drop_ttl0; 2278 start.opaque_fid = p_hwfn->hw_info.opaque_fid; 2279 start.concrete_fid = p_hwfn->hw_info.concrete_fid; 2280 start.handle_ptp_pkts = params->handle_ptp_pkts; 2281 start.vport_id = params->vport_id; 2282 start.max_buffers_per_cqe = 16; 2283 start.mtu = params->mtu; 2284 2285 rc = qed_sp_vport_start(p_hwfn, &start); 2286 if (rc) { 2287 DP_ERR(cdev, "Failed to start VPORT\n"); 2288 return rc; 2289 } 2290 2291 rc = qed_hw_start_fastpath(p_hwfn); 2292 if (rc) { 2293 DP_ERR(cdev, "Failed to start VPORT fastpath\n"); 2294 return rc; 2295 } 2296 2297 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), 2298 "Started V-PORT %d with MTU %d\n", 2299 start.vport_id, start.mtu); 2300 } 2301 2302 if (params->clear_stats) 2303 qed_reset_vport_stats(cdev); 2304 2305 return 0; 2306 } 2307 2308 static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id) 2309 { 2310 int rc, i; 2311 2312 for_each_hwfn(cdev, i) { 2313 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 2314 2315 rc = qed_sp_vport_stop(p_hwfn, 2316 p_hwfn->hw_info.opaque_fid, vport_id); 2317 2318 if (rc) { 2319 DP_ERR(cdev, "Failed to stop VPORT\n"); 2320 return rc; 2321 } 2322 } 2323 return 0; 2324 } 2325 2326 static int qed_update_vport_rss(struct qed_dev *cdev, 2327 struct qed_update_vport_rss_params *input, 2328 struct qed_rss_params *rss) 2329 { 2330 int i, fn; 2331 2332 /* Update configuration with what's correct regardless of CMT */ 2333 rss->update_rss_config = 1; 2334 rss->rss_enable = 1; 2335 rss->update_rss_capabilities = 1; 2336 rss->update_rss_ind_table = 1; 2337 rss->update_rss_key = 1; 2338 rss->rss_caps = input->rss_caps; 2339 memcpy(rss->rss_key, input->rss_key, QED_RSS_KEY_SIZE * sizeof(u32)); 2340 2341 /* In regular scenario, we'd simply need to take input handlers. 2342 * But in CMT, we'd have to split the handlers according to the 2343 * engine they were configured on. We'd then have to understand 2344 * whether RSS is really required, since 2-queues on CMT doesn't 2345 * require RSS. 2346 */ 2347 if (cdev->num_hwfns == 1) { 2348 memcpy(rss->rss_ind_table, 2349 input->rss_ind_table, 2350 QED_RSS_IND_TABLE_SIZE * sizeof(void *)); 2351 rss->rss_table_size_log = 7; 2352 return 0; 2353 } 2354 2355 /* Start by copying the non-spcific information to the 2nd copy */ 2356 memcpy(&rss[1], &rss[0], sizeof(struct qed_rss_params)); 2357 2358 /* CMT should be round-robin */ 2359 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { 2360 struct qed_queue_cid *cid = input->rss_ind_table[i]; 2361 struct qed_rss_params *t_rss; 2362 2363 if (cid->p_owner == QED_LEADING_HWFN(cdev)) 2364 t_rss = &rss[0]; 2365 else 2366 t_rss = &rss[1]; 2367 2368 t_rss->rss_ind_table[i / cdev->num_hwfns] = cid; 2369 } 2370 2371 /* Make sure RSS is actually required */ 2372 for_each_hwfn(cdev, fn) { 2373 for (i = 1; i < QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns; i++) { 2374 if (rss[fn].rss_ind_table[i] != 2375 rss[fn].rss_ind_table[0]) 2376 break; 2377 } 2378 if (i == QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns) { 2379 DP_VERBOSE(cdev, NETIF_MSG_IFUP, 2380 "CMT - 1 queue per-hwfn; Disabling RSS\n"); 2381 return -EINVAL; 2382 } 2383 rss[fn].rss_table_size_log = 6; 2384 } 2385 2386 return 0; 2387 } 2388 2389 static int qed_update_vport(struct qed_dev *cdev, 2390 struct qed_update_vport_params *params) 2391 { 2392 struct qed_sp_vport_update_params sp_params; 2393 struct qed_rss_params *rss; 2394 int rc = 0, i; 2395 2396 if (!cdev) 2397 return -ENODEV; 2398 2399 rss = vzalloc(sizeof(*rss) * cdev->num_hwfns); 2400 if (!rss) 2401 return -ENOMEM; 2402 2403 memset(&sp_params, 0, sizeof(sp_params)); 2404 2405 /* Translate protocol params into sp params */ 2406 sp_params.vport_id = params->vport_id; 2407 sp_params.update_vport_active_rx_flg = params->update_vport_active_flg; 2408 sp_params.update_vport_active_tx_flg = params->update_vport_active_flg; 2409 sp_params.vport_active_rx_flg = params->vport_active_flg; 2410 sp_params.vport_active_tx_flg = params->vport_active_flg; 2411 sp_params.update_tx_switching_flg = params->update_tx_switching_flg; 2412 sp_params.tx_switching_flg = params->tx_switching_flg; 2413 sp_params.accept_any_vlan = params->accept_any_vlan; 2414 sp_params.update_accept_any_vlan_flg = 2415 params->update_accept_any_vlan_flg; 2416 2417 /* Prepare the RSS configuration */ 2418 if (params->update_rss_flg) 2419 if (qed_update_vport_rss(cdev, ¶ms->rss_params, rss)) 2420 params->update_rss_flg = 0; 2421 2422 for_each_hwfn(cdev, i) { 2423 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 2424 2425 if (params->update_rss_flg) 2426 sp_params.rss_params = &rss[i]; 2427 2428 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 2429 rc = qed_sp_vport_update(p_hwfn, &sp_params, 2430 QED_SPQ_MODE_EBLOCK, 2431 NULL); 2432 if (rc) { 2433 DP_ERR(cdev, "Failed to update VPORT\n"); 2434 goto out; 2435 } 2436 2437 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), 2438 "Updated V-PORT %d: active_flag %d [update %d]\n", 2439 params->vport_id, params->vport_active_flg, 2440 params->update_vport_active_flg); 2441 } 2442 2443 out: 2444 vfree(rss); 2445 return rc; 2446 } 2447 2448 static int qed_start_rxq(struct qed_dev *cdev, 2449 u8 rss_num, 2450 struct qed_queue_start_common_params *p_params, 2451 u16 bd_max_bytes, 2452 dma_addr_t bd_chain_phys_addr, 2453 dma_addr_t cqe_pbl_addr, 2454 u16 cqe_pbl_size, 2455 struct qed_rxq_start_ret_params *ret_params) 2456 { 2457 struct qed_hwfn *p_hwfn; 2458 int rc, hwfn_index; 2459 2460 hwfn_index = rss_num % cdev->num_hwfns; 2461 p_hwfn = &cdev->hwfns[hwfn_index]; 2462 2463 p_params->queue_id = p_params->queue_id / cdev->num_hwfns; 2464 p_params->stats_id = p_params->vport_id; 2465 2466 rc = qed_eth_rx_queue_start(p_hwfn, 2467 p_hwfn->hw_info.opaque_fid, 2468 p_params, 2469 bd_max_bytes, 2470 bd_chain_phys_addr, 2471 cqe_pbl_addr, cqe_pbl_size, ret_params); 2472 if (rc) { 2473 DP_ERR(cdev, "Failed to start RXQ#%d\n", p_params->queue_id); 2474 return rc; 2475 } 2476 2477 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), 2478 "Started RX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n", 2479 p_params->queue_id, rss_num, p_params->vport_id, 2480 p_params->p_sb->igu_sb_id); 2481 2482 return 0; 2483 } 2484 2485 static int qed_stop_rxq(struct qed_dev *cdev, u8 rss_id, void *handle) 2486 { 2487 int rc, hwfn_index; 2488 struct qed_hwfn *p_hwfn; 2489 2490 hwfn_index = rss_id % cdev->num_hwfns; 2491 p_hwfn = &cdev->hwfns[hwfn_index]; 2492 2493 rc = qed_eth_rx_queue_stop(p_hwfn, handle, false, false); 2494 if (rc) { 2495 DP_ERR(cdev, "Failed to stop RXQ#%02x\n", rss_id); 2496 return rc; 2497 } 2498 2499 return 0; 2500 } 2501 2502 static int qed_start_txq(struct qed_dev *cdev, 2503 u8 rss_num, 2504 struct qed_queue_start_common_params *p_params, 2505 dma_addr_t pbl_addr, 2506 u16 pbl_size, 2507 struct qed_txq_start_ret_params *ret_params) 2508 { 2509 struct qed_hwfn *p_hwfn; 2510 int rc, hwfn_index; 2511 2512 hwfn_index = rss_num % cdev->num_hwfns; 2513 p_hwfn = &cdev->hwfns[hwfn_index]; 2514 p_params->queue_id = p_params->queue_id / cdev->num_hwfns; 2515 p_params->stats_id = p_params->vport_id; 2516 2517 rc = qed_eth_tx_queue_start(p_hwfn, 2518 p_hwfn->hw_info.opaque_fid, 2519 p_params, 0, 2520 pbl_addr, pbl_size, ret_params); 2521 2522 if (rc) { 2523 DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id); 2524 return rc; 2525 } 2526 2527 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), 2528 "Started TX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n", 2529 p_params->queue_id, rss_num, p_params->vport_id, 2530 p_params->p_sb->igu_sb_id); 2531 2532 return 0; 2533 } 2534 2535 #define QED_HW_STOP_RETRY_LIMIT (10) 2536 static int qed_fastpath_stop(struct qed_dev *cdev) 2537 { 2538 int rc; 2539 2540 rc = qed_hw_stop_fastpath(cdev); 2541 if (rc) { 2542 DP_ERR(cdev, "Failed to stop Fastpath\n"); 2543 return rc; 2544 } 2545 2546 return 0; 2547 } 2548 2549 static int qed_stop_txq(struct qed_dev *cdev, u8 rss_id, void *handle) 2550 { 2551 struct qed_hwfn *p_hwfn; 2552 int rc, hwfn_index; 2553 2554 hwfn_index = rss_id % cdev->num_hwfns; 2555 p_hwfn = &cdev->hwfns[hwfn_index]; 2556 2557 rc = qed_eth_tx_queue_stop(p_hwfn, handle); 2558 if (rc) { 2559 DP_ERR(cdev, "Failed to stop TXQ#%02x\n", rss_id); 2560 return rc; 2561 } 2562 2563 return 0; 2564 } 2565 2566 static int qed_tunn_configure(struct qed_dev *cdev, 2567 struct qed_tunn_params *tunn_params) 2568 { 2569 struct qed_tunnel_info tunn_info; 2570 int i, rc; 2571 2572 memset(&tunn_info, 0, sizeof(tunn_info)); 2573 if (tunn_params->update_vxlan_port) { 2574 tunn_info.vxlan_port.b_update_port = true; 2575 tunn_info.vxlan_port.port = tunn_params->vxlan_port; 2576 } 2577 2578 if (tunn_params->update_geneve_port) { 2579 tunn_info.geneve_port.b_update_port = true; 2580 tunn_info.geneve_port.port = tunn_params->geneve_port; 2581 } 2582 2583 for_each_hwfn(cdev, i) { 2584 struct qed_hwfn *hwfn = &cdev->hwfns[i]; 2585 struct qed_ptt *p_ptt; 2586 struct qed_tunnel_info *tun; 2587 2588 tun = &hwfn->cdev->tunnel; 2589 if (IS_PF(cdev)) { 2590 p_ptt = qed_ptt_acquire(hwfn); 2591 if (!p_ptt) 2592 return -EAGAIN; 2593 } else { 2594 p_ptt = NULL; 2595 } 2596 2597 rc = qed_sp_pf_update_tunn_cfg(hwfn, p_ptt, &tunn_info, 2598 QED_SPQ_MODE_EBLOCK, NULL); 2599 if (rc) { 2600 if (IS_PF(cdev)) 2601 qed_ptt_release(hwfn, p_ptt); 2602 return rc; 2603 } 2604 2605 if (IS_PF_SRIOV(hwfn)) { 2606 u16 vxlan_port, geneve_port; 2607 int j; 2608 2609 vxlan_port = tun->vxlan_port.port; 2610 geneve_port = tun->geneve_port.port; 2611 2612 qed_for_each_vf(hwfn, j) { 2613 qed_iov_bulletin_set_udp_ports(hwfn, j, 2614 vxlan_port, 2615 geneve_port); 2616 } 2617 2618 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 2619 } 2620 if (IS_PF(cdev)) 2621 qed_ptt_release(hwfn, p_ptt); 2622 } 2623 2624 return 0; 2625 } 2626 2627 static int qed_configure_filter_rx_mode(struct qed_dev *cdev, 2628 enum qed_filter_rx_mode_type type) 2629 { 2630 struct qed_filter_accept_flags accept_flags; 2631 2632 memset(&accept_flags, 0, sizeof(accept_flags)); 2633 2634 accept_flags.update_rx_mode_config = 1; 2635 accept_flags.update_tx_mode_config = 1; 2636 accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED | 2637 QED_ACCEPT_MCAST_MATCHED | 2638 QED_ACCEPT_BCAST; 2639 accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED | 2640 QED_ACCEPT_MCAST_MATCHED | 2641 QED_ACCEPT_BCAST; 2642 2643 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) { 2644 accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED | 2645 QED_ACCEPT_MCAST_UNMATCHED; 2646 accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; 2647 } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) { 2648 accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; 2649 accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; 2650 } 2651 2652 return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false, 2653 QED_SPQ_MODE_CB, NULL); 2654 } 2655 2656 static int qed_configure_filter_ucast(struct qed_dev *cdev, 2657 struct qed_filter_ucast_params *params) 2658 { 2659 struct qed_filter_ucast ucast; 2660 2661 if (!params->vlan_valid && !params->mac_valid) { 2662 DP_NOTICE(cdev, 2663 "Tried configuring a unicast filter, but both MAC and VLAN are not set\n"); 2664 return -EINVAL; 2665 } 2666 2667 memset(&ucast, 0, sizeof(ucast)); 2668 switch (params->type) { 2669 case QED_FILTER_XCAST_TYPE_ADD: 2670 ucast.opcode = QED_FILTER_ADD; 2671 break; 2672 case QED_FILTER_XCAST_TYPE_DEL: 2673 ucast.opcode = QED_FILTER_REMOVE; 2674 break; 2675 case QED_FILTER_XCAST_TYPE_REPLACE: 2676 ucast.opcode = QED_FILTER_REPLACE; 2677 break; 2678 default: 2679 DP_NOTICE(cdev, "Unknown unicast filter type %d\n", 2680 params->type); 2681 } 2682 2683 if (params->vlan_valid && params->mac_valid) { 2684 ucast.type = QED_FILTER_MAC_VLAN; 2685 ether_addr_copy(ucast.mac, params->mac); 2686 ucast.vlan = params->vlan; 2687 } else if (params->mac_valid) { 2688 ucast.type = QED_FILTER_MAC; 2689 ether_addr_copy(ucast.mac, params->mac); 2690 } else { 2691 ucast.type = QED_FILTER_VLAN; 2692 ucast.vlan = params->vlan; 2693 } 2694 2695 ucast.is_rx_filter = true; 2696 ucast.is_tx_filter = true; 2697 2698 return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL); 2699 } 2700 2701 static int qed_configure_filter_mcast(struct qed_dev *cdev, 2702 struct qed_filter_mcast_params *params) 2703 { 2704 struct qed_filter_mcast mcast; 2705 int i; 2706 2707 memset(&mcast, 0, sizeof(mcast)); 2708 switch (params->type) { 2709 case QED_FILTER_XCAST_TYPE_ADD: 2710 mcast.opcode = QED_FILTER_ADD; 2711 break; 2712 case QED_FILTER_XCAST_TYPE_DEL: 2713 mcast.opcode = QED_FILTER_REMOVE; 2714 break; 2715 default: 2716 DP_NOTICE(cdev, "Unknown multicast filter type %d\n", 2717 params->type); 2718 } 2719 2720 mcast.num_mc_addrs = params->num; 2721 for (i = 0; i < mcast.num_mc_addrs; i++) 2722 ether_addr_copy(mcast.mac[i], params->mac[i]); 2723 2724 return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL); 2725 } 2726 2727 static int qed_configure_filter(struct qed_dev *cdev, 2728 struct qed_filter_params *params) 2729 { 2730 enum qed_filter_rx_mode_type accept_flags; 2731 2732 switch (params->type) { 2733 case QED_FILTER_TYPE_UCAST: 2734 return qed_configure_filter_ucast(cdev, ¶ms->filter.ucast); 2735 case QED_FILTER_TYPE_MCAST: 2736 return qed_configure_filter_mcast(cdev, ¶ms->filter.mcast); 2737 case QED_FILTER_TYPE_RX_MODE: 2738 accept_flags = params->filter.accept_flags; 2739 return qed_configure_filter_rx_mode(cdev, accept_flags); 2740 default: 2741 DP_NOTICE(cdev, "Unknown filter type %d\n", (int)params->type); 2742 return -EINVAL; 2743 } 2744 } 2745 2746 static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher) 2747 { 2748 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 2749 struct qed_arfs_config_params arfs_config_params; 2750 2751 memset(&arfs_config_params, 0, sizeof(arfs_config_params)); 2752 arfs_config_params.tcp = true; 2753 arfs_config_params.udp = true; 2754 arfs_config_params.ipv4 = true; 2755 arfs_config_params.ipv6 = true; 2756 arfs_config_params.arfs_enable = en_searcher; 2757 2758 qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt, 2759 &arfs_config_params); 2760 return 0; 2761 } 2762 2763 static void 2764 qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn, 2765 void *cookie, union event_ring_data *data, 2766 u8 fw_return_code) 2767 { 2768 struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common; 2769 void *dev = p_hwfn->cdev->ops_cookie; 2770 2771 op->arfs_filter_op(dev, cookie, fw_return_code); 2772 } 2773 2774 static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie, 2775 dma_addr_t mapping, u16 length, 2776 u16 vport_id, u16 rx_queue_id, 2777 bool add_filter) 2778 { 2779 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 2780 struct qed_spq_comp_cb cb; 2781 int rc = -EINVAL; 2782 2783 cb.function = qed_arfs_sp_response_handler; 2784 cb.cookie = cookie; 2785 2786 rc = qed_configure_rfs_ntuple_filter(p_hwfn, p_hwfn->p_arfs_ptt, 2787 &cb, mapping, length, rx_queue_id, 2788 vport_id, add_filter); 2789 if (rc) 2790 DP_NOTICE(p_hwfn, 2791 "Failed to issue a-RFS filter configuration\n"); 2792 else 2793 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, 2794 "Successfully issued a-RFS filter configuration\n"); 2795 2796 return rc; 2797 } 2798 2799 static int qed_get_coalesce(struct qed_dev *cdev, u16 *coal, void *handle) 2800 { 2801 struct qed_queue_cid *p_cid = handle; 2802 struct qed_hwfn *p_hwfn; 2803 int rc; 2804 2805 p_hwfn = p_cid->p_owner; 2806 rc = qed_get_queue_coalesce(p_hwfn, coal, handle); 2807 if (rc) 2808 DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n"); 2809 2810 return rc; 2811 } 2812 2813 static int qed_fp_cqe_completion(struct qed_dev *dev, 2814 u8 rss_id, struct eth_slow_path_rx_cqe *cqe) 2815 { 2816 return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns], 2817 cqe); 2818 } 2819 2820 #ifdef CONFIG_QED_SRIOV 2821 extern const struct qed_iov_hv_ops qed_iov_ops_pass; 2822 #endif 2823 2824 #ifdef CONFIG_DCB 2825 extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass; 2826 #endif 2827 2828 extern const struct qed_eth_ptp_ops qed_ptp_ops_pass; 2829 2830 static const struct qed_eth_ops qed_eth_ops_pass = { 2831 .common = &qed_common_ops_pass, 2832 #ifdef CONFIG_QED_SRIOV 2833 .iov = &qed_iov_ops_pass, 2834 #endif 2835 #ifdef CONFIG_DCB 2836 .dcb = &qed_dcbnl_ops_pass, 2837 #endif 2838 .ptp = &qed_ptp_ops_pass, 2839 .fill_dev_info = &qed_fill_eth_dev_info, 2840 .register_ops = &qed_register_eth_ops, 2841 .check_mac = &qed_check_mac, 2842 .vport_start = &qed_start_vport, 2843 .vport_stop = &qed_stop_vport, 2844 .vport_update = &qed_update_vport, 2845 .q_rx_start = &qed_start_rxq, 2846 .q_rx_stop = &qed_stop_rxq, 2847 .q_tx_start = &qed_start_txq, 2848 .q_tx_stop = &qed_stop_txq, 2849 .filter_config = &qed_configure_filter, 2850 .fastpath_stop = &qed_fastpath_stop, 2851 .eth_cqe_completion = &qed_fp_cqe_completion, 2852 .get_vport_stats = &qed_get_vport_stats, 2853 .tunn_config = &qed_tunn_configure, 2854 .ntuple_filter_config = &qed_ntuple_arfs_filter_config, 2855 .configure_arfs_searcher = &qed_configure_arfs_searcher, 2856 .get_coalesce = &qed_get_coalesce, 2857 }; 2858 2859 const struct qed_eth_ops *qed_get_eth_ops(void) 2860 { 2861 return &qed_eth_ops_pass; 2862 } 2863 EXPORT_SYMBOL(qed_get_eth_ops); 2864 2865 void qed_put_eth_ops(void) 2866 { 2867 /* TODO - reference count for module? */ 2868 } 2869 EXPORT_SYMBOL(qed_put_eth_ops); 2870