1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/types.h> 34 #include <asm/byteorder.h> 35 #include <asm/param.h> 36 #include <linux/delay.h> 37 #include <linux/dma-mapping.h> 38 #include <linux/etherdevice.h> 39 #include <linux/interrupt.h> 40 #include <linux/kernel.h> 41 #include <linux/module.h> 42 #include <linux/pci.h> 43 #include <linux/slab.h> 44 #include <linux/stddef.h> 45 #include <linux/string.h> 46 #include <linux/workqueue.h> 47 #include <linux/bitops.h> 48 #include <linux/bug.h> 49 #include <linux/vmalloc.h> 50 #include "qed.h" 51 #include <linux/qed/qed_chain.h> 52 #include "qed_cxt.h" 53 #include "qed_dev_api.h" 54 #include <linux/qed/qed_eth_if.h> 55 #include "qed_hsi.h" 56 #include "qed_hw.h" 57 #include "qed_int.h" 58 #include "qed_l2.h" 59 #include "qed_mcp.h" 60 #include "qed_reg_addr.h" 61 #include "qed_sp.h" 62 #include "qed_sriov.h" 63 64 65 #define QED_MAX_SGES_NUM 16 66 #define CRC32_POLY 0x1edc6f41 67 68 struct qed_l2_info { 69 u32 queues; 70 unsigned long **pp_qid_usage; 71 72 /* The lock is meant to synchronize access to the qid usage */ 73 struct mutex lock; 74 }; 75 76 int qed_l2_alloc(struct qed_hwfn *p_hwfn) 77 { 78 struct qed_l2_info *p_l2_info; 79 unsigned long **pp_qids; 80 u32 i; 81 82 if (p_hwfn->hw_info.personality != QED_PCI_ETH && 83 p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE) 84 return 0; 85 86 p_l2_info = kzalloc(sizeof(*p_l2_info), GFP_KERNEL); 87 if (!p_l2_info) 88 return -ENOMEM; 89 p_hwfn->p_l2_info = p_l2_info; 90 91 if (IS_PF(p_hwfn->cdev)) { 92 p_l2_info->queues = RESC_NUM(p_hwfn, QED_L2_QUEUE); 93 } else { 94 u8 rx = 0, tx = 0; 95 96 qed_vf_get_num_rxqs(p_hwfn, &rx); 97 qed_vf_get_num_txqs(p_hwfn, &tx); 98 99 p_l2_info->queues = max_t(u8, rx, tx); 100 } 101 102 pp_qids = kzalloc(sizeof(unsigned long *) * p_l2_info->queues, 103 GFP_KERNEL); 104 if (!pp_qids) 105 return -ENOMEM; 106 p_l2_info->pp_qid_usage = pp_qids; 107 108 for (i = 0; i < p_l2_info->queues; i++) { 109 pp_qids[i] = kzalloc(MAX_QUEUES_PER_QZONE / 8, GFP_KERNEL); 110 if (!pp_qids[i]) 111 return -ENOMEM; 112 } 113 114 return 0; 115 } 116 117 void qed_l2_setup(struct qed_hwfn *p_hwfn) 118 { 119 if (p_hwfn->hw_info.personality != QED_PCI_ETH && 120 p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE) 121 return; 122 123 mutex_init(&p_hwfn->p_l2_info->lock); 124 } 125 126 void qed_l2_free(struct qed_hwfn *p_hwfn) 127 { 128 u32 i; 129 130 if (p_hwfn->hw_info.personality != QED_PCI_ETH && 131 p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE) 132 return; 133 134 if (!p_hwfn->p_l2_info) 135 return; 136 137 if (!p_hwfn->p_l2_info->pp_qid_usage) 138 goto out_l2_info; 139 140 /* Free until hit first uninitialized entry */ 141 for (i = 0; i < p_hwfn->p_l2_info->queues; i++) { 142 if (!p_hwfn->p_l2_info->pp_qid_usage[i]) 143 break; 144 kfree(p_hwfn->p_l2_info->pp_qid_usage[i]); 145 } 146 147 kfree(p_hwfn->p_l2_info->pp_qid_usage); 148 149 out_l2_info: 150 kfree(p_hwfn->p_l2_info); 151 p_hwfn->p_l2_info = NULL; 152 } 153 154 static bool qed_eth_queue_qid_usage_add(struct qed_hwfn *p_hwfn, 155 struct qed_queue_cid *p_cid) 156 { 157 struct qed_l2_info *p_l2_info = p_hwfn->p_l2_info; 158 u16 queue_id = p_cid->rel.queue_id; 159 bool b_rc = true; 160 u8 first; 161 162 mutex_lock(&p_l2_info->lock); 163 164 if (queue_id > p_l2_info->queues) { 165 DP_NOTICE(p_hwfn, 166 "Requested to increase usage for qzone %04x out of %08x\n", 167 queue_id, p_l2_info->queues); 168 b_rc = false; 169 goto out; 170 } 171 172 first = (u8)find_first_zero_bit(p_l2_info->pp_qid_usage[queue_id], 173 MAX_QUEUES_PER_QZONE); 174 if (first >= MAX_QUEUES_PER_QZONE) { 175 b_rc = false; 176 goto out; 177 } 178 179 __set_bit(first, p_l2_info->pp_qid_usage[queue_id]); 180 p_cid->qid_usage_idx = first; 181 182 out: 183 mutex_unlock(&p_l2_info->lock); 184 return b_rc; 185 } 186 187 static void qed_eth_queue_qid_usage_del(struct qed_hwfn *p_hwfn, 188 struct qed_queue_cid *p_cid) 189 { 190 mutex_lock(&p_hwfn->p_l2_info->lock); 191 192 clear_bit(p_cid->qid_usage_idx, 193 p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]); 194 195 mutex_unlock(&p_hwfn->p_l2_info->lock); 196 } 197 198 void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn, 199 struct qed_queue_cid *p_cid) 200 { 201 /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF */ 202 if ((p_cid->vfid == QED_QUEUE_CID_SELF) && 203 IS_PF(p_hwfn->cdev)) 204 qed_cxt_release_cid(p_hwfn, p_cid->cid); 205 206 /* For PF's VFs we maintain the index inside queue-zone in IOV */ 207 if (p_cid->vfid == QED_QUEUE_CID_SELF) 208 qed_eth_queue_qid_usage_del(p_hwfn, p_cid); 209 210 vfree(p_cid); 211 } 212 213 /* The internal is only meant to be directly called by PFs initializeing CIDs 214 * for their VFs. 215 */ 216 static struct qed_queue_cid * 217 _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, 218 u16 opaque_fid, 219 u32 cid, 220 struct qed_queue_start_common_params *p_params, 221 bool b_is_rx, 222 struct qed_queue_cid_vf_params *p_vf_params) 223 { 224 struct qed_queue_cid *p_cid; 225 int rc; 226 227 p_cid = vmalloc(sizeof(*p_cid)); 228 if (!p_cid) 229 return NULL; 230 memset(p_cid, 0, sizeof(*p_cid)); 231 232 p_cid->opaque_fid = opaque_fid; 233 p_cid->cid = cid; 234 p_cid->p_owner = p_hwfn; 235 236 /* Fill in parameters */ 237 p_cid->rel.vport_id = p_params->vport_id; 238 p_cid->rel.queue_id = p_params->queue_id; 239 p_cid->rel.stats_id = p_params->stats_id; 240 p_cid->sb_igu_id = p_params->p_sb->igu_sb_id; 241 p_cid->b_is_rx = b_is_rx; 242 p_cid->sb_idx = p_params->sb_idx; 243 244 /* Fill-in bits related to VFs' queues if information was provided */ 245 if (p_vf_params) { 246 p_cid->vfid = p_vf_params->vfid; 247 p_cid->vf_qid = p_vf_params->vf_qid; 248 p_cid->vf_legacy = p_vf_params->vf_legacy; 249 } else { 250 p_cid->vfid = QED_QUEUE_CID_SELF; 251 } 252 253 /* Don't try calculating the absolute indices for VFs */ 254 if (IS_VF(p_hwfn->cdev)) { 255 p_cid->abs = p_cid->rel; 256 goto out; 257 } 258 259 /* Calculate the engine-absolute indices of the resources. 260 * This would guarantee they're valid later on. 261 * In some cases [SBs] we already have the right values. 262 */ 263 rc = qed_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id); 264 if (rc) 265 goto fail; 266 267 rc = qed_fw_l2_queue(p_hwfn, p_cid->rel.queue_id, &p_cid->abs.queue_id); 268 if (rc) 269 goto fail; 270 271 /* In case of a PF configuring its VF's queues, the stats-id is already 272 * absolute [since there's a single index that's suitable per-VF]. 273 */ 274 if (p_cid->vfid == QED_QUEUE_CID_SELF) { 275 rc = qed_fw_vport(p_hwfn, p_cid->rel.stats_id, 276 &p_cid->abs.stats_id); 277 if (rc) 278 goto fail; 279 } else { 280 p_cid->abs.stats_id = p_cid->rel.stats_id; 281 } 282 283 out: 284 /* VF-images have provided the qid_usage_idx on their own. 285 * Otherwise, we need to allocate a unique one. 286 */ 287 if (!p_vf_params) { 288 if (!qed_eth_queue_qid_usage_add(p_hwfn, p_cid)) 289 goto fail; 290 } else { 291 p_cid->qid_usage_idx = p_vf_params->qid_usage_idx; 292 } 293 294 DP_VERBOSE(p_hwfn, 295 QED_MSG_SP, 296 "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n", 297 p_cid->opaque_fid, 298 p_cid->cid, 299 p_cid->rel.vport_id, 300 p_cid->abs.vport_id, 301 p_cid->rel.queue_id, 302 p_cid->qid_usage_idx, 303 p_cid->abs.queue_id, 304 p_cid->rel.stats_id, 305 p_cid->abs.stats_id, p_cid->sb_igu_id, p_cid->sb_idx); 306 307 return p_cid; 308 309 fail: 310 vfree(p_cid); 311 return NULL; 312 } 313 314 struct qed_queue_cid * 315 qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, 316 u16 opaque_fid, 317 struct qed_queue_start_common_params *p_params, 318 bool b_is_rx, 319 struct qed_queue_cid_vf_params *p_vf_params) 320 { 321 struct qed_queue_cid *p_cid; 322 bool b_legacy_vf = false; 323 u32 cid = 0; 324 325 /* Currently, PF doesn't need to allocate CIDs for any VF */ 326 if (p_vf_params) 327 b_legacy_vf = true; 328 /* Get a unique firmware CID for this queue, in case it's a PF. 329 * VF's don't need a CID as the queue configuration will be done 330 * by PF. 331 */ 332 if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) { 333 if (qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &cid)) { 334 DP_NOTICE(p_hwfn, "Failed to acquire cid\n"); 335 return NULL; 336 } 337 } 338 339 p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid, 340 p_params, b_is_rx, p_vf_params); 341 if (!p_cid && IS_PF(p_hwfn->cdev) && !b_legacy_vf) 342 qed_cxt_release_cid(p_hwfn, cid); 343 344 return p_cid; 345 } 346 347 static struct qed_queue_cid * 348 qed_eth_queue_to_cid_pf(struct qed_hwfn *p_hwfn, 349 u16 opaque_fid, 350 bool b_is_rx, 351 struct qed_queue_start_common_params *p_params) 352 { 353 return qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx, 354 NULL); 355 } 356 357 int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, 358 struct qed_sp_vport_start_params *p_params) 359 { 360 struct vport_start_ramrod_data *p_ramrod = NULL; 361 struct qed_spq_entry *p_ent = NULL; 362 struct qed_sp_init_data init_data; 363 u8 abs_vport_id = 0; 364 int rc = -EINVAL; 365 u16 rx_mode = 0; 366 367 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); 368 if (rc) 369 return rc; 370 371 memset(&init_data, 0, sizeof(init_data)); 372 init_data.cid = qed_spq_get_cid(p_hwfn); 373 init_data.opaque_fid = p_params->opaque_fid; 374 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 375 376 rc = qed_sp_init_request(p_hwfn, &p_ent, 377 ETH_RAMROD_VPORT_START, 378 PROTOCOLID_ETH, &init_data); 379 if (rc) 380 return rc; 381 382 p_ramrod = &p_ent->ramrod.vport_start; 383 p_ramrod->vport_id = abs_vport_id; 384 385 p_ramrod->mtu = cpu_to_le16(p_params->mtu); 386 p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts; 387 p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan; 388 p_ramrod->drop_ttl0_en = p_params->drop_ttl0; 389 p_ramrod->untagged = p_params->only_untagged; 390 391 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1); 392 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1); 393 394 p_ramrod->rx_mode.state = cpu_to_le16(rx_mode); 395 396 /* TPA related fields */ 397 memset(&p_ramrod->tpa_param, 0, sizeof(struct eth_vport_tpa_param)); 398 399 p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe; 400 401 switch (p_params->tpa_mode) { 402 case QED_TPA_MODE_GRO: 403 p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; 404 p_ramrod->tpa_param.tpa_max_size = (u16)-1; 405 p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2; 406 p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2; 407 p_ramrod->tpa_param.tpa_ipv4_en_flg = 1; 408 p_ramrod->tpa_param.tpa_ipv6_en_flg = 1; 409 p_ramrod->tpa_param.tpa_pkt_split_flg = 1; 410 p_ramrod->tpa_param.tpa_gro_consistent_flg = 1; 411 break; 412 default: 413 break; 414 } 415 416 p_ramrod->tx_switching_en = p_params->tx_switching; 417 418 p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac; 419 p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype; 420 421 /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */ 422 p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev, 423 p_params->concrete_fid); 424 425 return qed_spq_post(p_hwfn, p_ent, NULL); 426 } 427 428 static int qed_sp_vport_start(struct qed_hwfn *p_hwfn, 429 struct qed_sp_vport_start_params *p_params) 430 { 431 if (IS_VF(p_hwfn->cdev)) { 432 return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id, 433 p_params->mtu, 434 p_params->remove_inner_vlan, 435 p_params->tpa_mode, 436 p_params->max_buffers_per_cqe, 437 p_params->only_untagged); 438 } 439 440 return qed_sp_eth_vport_start(p_hwfn, p_params); 441 } 442 443 static int 444 qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn, 445 struct vport_update_ramrod_data *p_ramrod, 446 struct qed_rss_params *p_rss) 447 { 448 struct eth_vport_rss_config *p_config; 449 u16 capabilities = 0; 450 int i, table_size; 451 int rc = 0; 452 453 if (!p_rss) { 454 p_ramrod->common.update_rss_flg = 0; 455 return rc; 456 } 457 p_config = &p_ramrod->rss_config; 458 459 BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE != ETH_RSS_IND_TABLE_ENTRIES_NUM); 460 461 rc = qed_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id); 462 if (rc) 463 return rc; 464 465 p_ramrod->common.update_rss_flg = p_rss->update_rss_config; 466 p_config->update_rss_capabilities = p_rss->update_rss_capabilities; 467 p_config->update_rss_ind_table = p_rss->update_rss_ind_table; 468 p_config->update_rss_key = p_rss->update_rss_key; 469 470 p_config->rss_mode = p_rss->rss_enable ? 471 ETH_VPORT_RSS_MODE_REGULAR : 472 ETH_VPORT_RSS_MODE_DISABLED; 473 474 SET_FIELD(capabilities, 475 ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY, 476 !!(p_rss->rss_caps & QED_RSS_IPV4)); 477 SET_FIELD(capabilities, 478 ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY, 479 !!(p_rss->rss_caps & QED_RSS_IPV6)); 480 SET_FIELD(capabilities, 481 ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY, 482 !!(p_rss->rss_caps & QED_RSS_IPV4_TCP)); 483 SET_FIELD(capabilities, 484 ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY, 485 !!(p_rss->rss_caps & QED_RSS_IPV6_TCP)); 486 SET_FIELD(capabilities, 487 ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY, 488 !!(p_rss->rss_caps & QED_RSS_IPV4_UDP)); 489 SET_FIELD(capabilities, 490 ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY, 491 !!(p_rss->rss_caps & QED_RSS_IPV6_UDP)); 492 p_config->tbl_size = p_rss->rss_table_size_log; 493 494 p_config->capabilities = cpu_to_le16(capabilities); 495 496 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, 497 "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n", 498 p_ramrod->common.update_rss_flg, 499 p_config->rss_mode, 500 p_config->update_rss_capabilities, 501 p_config->capabilities, 502 p_config->update_rss_ind_table, p_config->update_rss_key); 503 504 table_size = min_t(int, QED_RSS_IND_TABLE_SIZE, 505 1 << p_config->tbl_size); 506 for (i = 0; i < table_size; i++) { 507 struct qed_queue_cid *p_queue = p_rss->rss_ind_table[i]; 508 509 if (!p_queue) 510 return -EINVAL; 511 512 p_config->indirection_table[i] = 513 cpu_to_le16(p_queue->abs.queue_id); 514 } 515 516 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, 517 "Configured RSS indirection table [%d entries]:\n", 518 table_size); 519 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i += 0x10) { 520 DP_VERBOSE(p_hwfn, 521 NETIF_MSG_IFUP, 522 "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n", 523 le16_to_cpu(p_config->indirection_table[i]), 524 le16_to_cpu(p_config->indirection_table[i + 1]), 525 le16_to_cpu(p_config->indirection_table[i + 2]), 526 le16_to_cpu(p_config->indirection_table[i + 3]), 527 le16_to_cpu(p_config->indirection_table[i + 4]), 528 le16_to_cpu(p_config->indirection_table[i + 5]), 529 le16_to_cpu(p_config->indirection_table[i + 6]), 530 le16_to_cpu(p_config->indirection_table[i + 7]), 531 le16_to_cpu(p_config->indirection_table[i + 8]), 532 le16_to_cpu(p_config->indirection_table[i + 9]), 533 le16_to_cpu(p_config->indirection_table[i + 10]), 534 le16_to_cpu(p_config->indirection_table[i + 11]), 535 le16_to_cpu(p_config->indirection_table[i + 12]), 536 le16_to_cpu(p_config->indirection_table[i + 13]), 537 le16_to_cpu(p_config->indirection_table[i + 14]), 538 le16_to_cpu(p_config->indirection_table[i + 15])); 539 } 540 541 for (i = 0; i < 10; i++) 542 p_config->rss_key[i] = cpu_to_le32(p_rss->rss_key[i]); 543 544 return rc; 545 } 546 547 static void 548 qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn, 549 struct vport_update_ramrod_data *p_ramrod, 550 struct qed_filter_accept_flags accept_flags) 551 { 552 p_ramrod->common.update_rx_mode_flg = 553 accept_flags.update_rx_mode_config; 554 555 p_ramrod->common.update_tx_mode_flg = 556 accept_flags.update_tx_mode_config; 557 558 /* Set Rx mode accept flags */ 559 if (p_ramrod->common.update_rx_mode_flg) { 560 u8 accept_filter = accept_flags.rx_accept_filter; 561 u16 state = 0; 562 563 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 564 !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) || 565 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED))); 566 567 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED, 568 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)); 569 570 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 571 !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) || 572 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); 573 574 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL, 575 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) && 576 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); 577 578 SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL, 579 !!(accept_filter & QED_ACCEPT_BCAST)); 580 581 p_ramrod->rx_mode.state = cpu_to_le16(state); 582 DP_VERBOSE(p_hwfn, QED_MSG_SP, 583 "p_ramrod->rx_mode.state = 0x%x\n", state); 584 } 585 586 /* Set Tx mode accept flags */ 587 if (p_ramrod->common.update_tx_mode_flg) { 588 u8 accept_filter = accept_flags.tx_accept_filter; 589 u16 state = 0; 590 591 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL, 592 !!(accept_filter & QED_ACCEPT_NONE)); 593 594 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL, 595 !!(accept_filter & QED_ACCEPT_NONE)); 596 597 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL, 598 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) && 599 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); 600 601 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL, 602 !!(accept_filter & QED_ACCEPT_BCAST)); 603 604 p_ramrod->tx_mode.state = cpu_to_le16(state); 605 DP_VERBOSE(p_hwfn, QED_MSG_SP, 606 "p_ramrod->tx_mode.state = 0x%x\n", state); 607 } 608 } 609 610 static void 611 qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn, 612 struct vport_update_ramrod_data *p_ramrod, 613 struct qed_sge_tpa_params *p_params) 614 { 615 struct eth_vport_tpa_param *p_tpa; 616 617 if (!p_params) { 618 p_ramrod->common.update_tpa_param_flg = 0; 619 p_ramrod->common.update_tpa_en_flg = 0; 620 p_ramrod->common.update_tpa_param_flg = 0; 621 return; 622 } 623 624 p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg; 625 p_tpa = &p_ramrod->tpa_param; 626 p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg; 627 p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg; 628 p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg; 629 p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg; 630 631 p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg; 632 p_tpa->max_buff_num = p_params->max_buffers_per_cqe; 633 p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg; 634 p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg; 635 p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg; 636 p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num; 637 p_tpa->tpa_max_size = p_params->tpa_max_size; 638 p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start; 639 p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont; 640 } 641 642 static void 643 qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn, 644 struct vport_update_ramrod_data *p_ramrod, 645 struct qed_sp_vport_update_params *p_params) 646 { 647 int i; 648 649 memset(&p_ramrod->approx_mcast.bins, 0, 650 sizeof(p_ramrod->approx_mcast.bins)); 651 652 if (!p_params->update_approx_mcast_flg) 653 return; 654 655 p_ramrod->common.update_approx_mcast_flg = 1; 656 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { 657 u32 *p_bins = (u32 *)p_params->bins; 658 659 p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]); 660 } 661 } 662 663 int qed_sp_vport_update(struct qed_hwfn *p_hwfn, 664 struct qed_sp_vport_update_params *p_params, 665 enum spq_mode comp_mode, 666 struct qed_spq_comp_cb *p_comp_data) 667 { 668 struct qed_rss_params *p_rss_params = p_params->rss_params; 669 struct vport_update_ramrod_data_cmn *p_cmn; 670 struct qed_sp_init_data init_data; 671 struct vport_update_ramrod_data *p_ramrod = NULL; 672 struct qed_spq_entry *p_ent = NULL; 673 u8 abs_vport_id = 0, val; 674 int rc = -EINVAL; 675 676 if (IS_VF(p_hwfn->cdev)) { 677 rc = qed_vf_pf_vport_update(p_hwfn, p_params); 678 return rc; 679 } 680 681 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); 682 if (rc) 683 return rc; 684 685 memset(&init_data, 0, sizeof(init_data)); 686 init_data.cid = qed_spq_get_cid(p_hwfn); 687 init_data.opaque_fid = p_params->opaque_fid; 688 init_data.comp_mode = comp_mode; 689 init_data.p_comp_data = p_comp_data; 690 691 rc = qed_sp_init_request(p_hwfn, &p_ent, 692 ETH_RAMROD_VPORT_UPDATE, 693 PROTOCOLID_ETH, &init_data); 694 if (rc) 695 return rc; 696 697 /* Copy input params to ramrod according to FW struct */ 698 p_ramrod = &p_ent->ramrod.vport_update; 699 p_cmn = &p_ramrod->common; 700 701 p_cmn->vport_id = abs_vport_id; 702 p_cmn->rx_active_flg = p_params->vport_active_rx_flg; 703 p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg; 704 p_cmn->tx_active_flg = p_params->vport_active_tx_flg; 705 p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg; 706 p_cmn->accept_any_vlan = p_params->accept_any_vlan; 707 val = p_params->update_accept_any_vlan_flg; 708 p_cmn->update_accept_any_vlan_flg = val; 709 710 p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg; 711 val = p_params->update_inner_vlan_removal_flg; 712 p_cmn->update_inner_vlan_removal_en_flg = val; 713 714 p_cmn->default_vlan_en = p_params->default_vlan_enable_flg; 715 val = p_params->update_default_vlan_enable_flg; 716 p_cmn->update_default_vlan_en_flg = val; 717 718 p_cmn->default_vlan = cpu_to_le16(p_params->default_vlan); 719 p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg; 720 721 p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg; 722 723 p_ramrod->common.tx_switching_en = p_params->tx_switching_flg; 724 p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg; 725 726 p_cmn->anti_spoofing_en = p_params->anti_spoofing_en; 727 val = p_params->update_anti_spoofing_en_flg; 728 p_ramrod->common.update_anti_spoofing_en_flg = val; 729 730 rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params); 731 if (rc) { 732 /* Return spq entry which is taken in qed_sp_init_request()*/ 733 qed_spq_return_entry(p_hwfn, p_ent); 734 return rc; 735 } 736 737 /* Update mcast bins for VFs, PF doesn't use this functionality */ 738 qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params); 739 740 qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags); 741 qed_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, p_params->sge_tpa_params); 742 return qed_spq_post(p_hwfn, p_ent, NULL); 743 } 744 745 int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id) 746 { 747 struct vport_stop_ramrod_data *p_ramrod; 748 struct qed_sp_init_data init_data; 749 struct qed_spq_entry *p_ent; 750 u8 abs_vport_id = 0; 751 int rc; 752 753 if (IS_VF(p_hwfn->cdev)) 754 return qed_vf_pf_vport_stop(p_hwfn); 755 756 rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id); 757 if (rc) 758 return rc; 759 760 memset(&init_data, 0, sizeof(init_data)); 761 init_data.cid = qed_spq_get_cid(p_hwfn); 762 init_data.opaque_fid = opaque_fid; 763 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 764 765 rc = qed_sp_init_request(p_hwfn, &p_ent, 766 ETH_RAMROD_VPORT_STOP, 767 PROTOCOLID_ETH, &init_data); 768 if (rc) 769 return rc; 770 771 p_ramrod = &p_ent->ramrod.vport_stop; 772 p_ramrod->vport_id = abs_vport_id; 773 774 return qed_spq_post(p_hwfn, p_ent, NULL); 775 } 776 777 static int 778 qed_vf_pf_accept_flags(struct qed_hwfn *p_hwfn, 779 struct qed_filter_accept_flags *p_accept_flags) 780 { 781 struct qed_sp_vport_update_params s_params; 782 783 memset(&s_params, 0, sizeof(s_params)); 784 memcpy(&s_params.accept_flags, p_accept_flags, 785 sizeof(struct qed_filter_accept_flags)); 786 787 return qed_vf_pf_vport_update(p_hwfn, &s_params); 788 } 789 790 static int qed_filter_accept_cmd(struct qed_dev *cdev, 791 u8 vport, 792 struct qed_filter_accept_flags accept_flags, 793 u8 update_accept_any_vlan, 794 u8 accept_any_vlan, 795 enum spq_mode comp_mode, 796 struct qed_spq_comp_cb *p_comp_data) 797 { 798 struct qed_sp_vport_update_params vport_update_params; 799 int i, rc; 800 801 /* Prepare and send the vport rx_mode change */ 802 memset(&vport_update_params, 0, sizeof(vport_update_params)); 803 vport_update_params.vport_id = vport; 804 vport_update_params.accept_flags = accept_flags; 805 vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan; 806 vport_update_params.accept_any_vlan = accept_any_vlan; 807 808 for_each_hwfn(cdev, i) { 809 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 810 811 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 812 813 if (IS_VF(cdev)) { 814 rc = qed_vf_pf_accept_flags(p_hwfn, &accept_flags); 815 if (rc) 816 return rc; 817 continue; 818 } 819 820 rc = qed_sp_vport_update(p_hwfn, &vport_update_params, 821 comp_mode, p_comp_data); 822 if (rc) { 823 DP_ERR(cdev, "Update rx_mode failed %d\n", rc); 824 return rc; 825 } 826 827 DP_VERBOSE(p_hwfn, QED_MSG_SP, 828 "Accept filter configured, flags = [Rx]%x [Tx]%x\n", 829 accept_flags.rx_accept_filter, 830 accept_flags.tx_accept_filter); 831 if (update_accept_any_vlan) 832 DP_VERBOSE(p_hwfn, QED_MSG_SP, 833 "accept_any_vlan=%d configured\n", 834 accept_any_vlan); 835 } 836 837 return 0; 838 } 839 840 int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, 841 struct qed_queue_cid *p_cid, 842 u16 bd_max_bytes, 843 dma_addr_t bd_chain_phys_addr, 844 dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size) 845 { 846 struct rx_queue_start_ramrod_data *p_ramrod = NULL; 847 struct qed_spq_entry *p_ent = NULL; 848 struct qed_sp_init_data init_data; 849 int rc = -EINVAL; 850 851 DP_VERBOSE(p_hwfn, QED_MSG_SP, 852 "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n", 853 p_cid->opaque_fid, p_cid->cid, 854 p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->sb_igu_id); 855 856 /* Get SPQ entry */ 857 memset(&init_data, 0, sizeof(init_data)); 858 init_data.cid = p_cid->cid; 859 init_data.opaque_fid = p_cid->opaque_fid; 860 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 861 862 rc = qed_sp_init_request(p_hwfn, &p_ent, 863 ETH_RAMROD_RX_QUEUE_START, 864 PROTOCOLID_ETH, &init_data); 865 if (rc) 866 return rc; 867 868 p_ramrod = &p_ent->ramrod.rx_queue_start; 869 870 p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id); 871 p_ramrod->sb_index = p_cid->sb_idx; 872 p_ramrod->vport_id = p_cid->abs.vport_id; 873 p_ramrod->stats_counter_id = p_cid->abs.stats_id; 874 p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id); 875 p_ramrod->complete_cqe_flg = 0; 876 p_ramrod->complete_event_flg = 1; 877 878 p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes); 879 DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr); 880 881 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); 882 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr); 883 884 if (p_cid->vfid != QED_QUEUE_CID_SELF) { 885 bool b_legacy_vf = !!(p_cid->vf_legacy & 886 QED_QCID_LEGACY_VF_RX_PROD); 887 888 p_ramrod->vf_rx_prod_index = p_cid->vf_qid; 889 DP_VERBOSE(p_hwfn, QED_MSG_SP, 890 "Queue%s is meant for VF rxq[%02x]\n", 891 b_legacy_vf ? " [legacy]" : "", p_cid->vf_qid); 892 p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf; 893 } 894 895 return qed_spq_post(p_hwfn, p_ent, NULL); 896 } 897 898 static int 899 qed_eth_pf_rx_queue_start(struct qed_hwfn *p_hwfn, 900 struct qed_queue_cid *p_cid, 901 u16 bd_max_bytes, 902 dma_addr_t bd_chain_phys_addr, 903 dma_addr_t cqe_pbl_addr, 904 u16 cqe_pbl_size, void __iomem **pp_prod) 905 { 906 u32 init_prod_val = 0; 907 908 *pp_prod = p_hwfn->regview + 909 GTT_BAR0_MAP_REG_MSDM_RAM + 910 MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id); 911 912 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ 913 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), 914 (u32 *)(&init_prod_val)); 915 916 return qed_eth_rxq_start_ramrod(p_hwfn, p_cid, 917 bd_max_bytes, 918 bd_chain_phys_addr, 919 cqe_pbl_addr, cqe_pbl_size); 920 } 921 922 static int 923 qed_eth_rx_queue_start(struct qed_hwfn *p_hwfn, 924 u16 opaque_fid, 925 struct qed_queue_start_common_params *p_params, 926 u16 bd_max_bytes, 927 dma_addr_t bd_chain_phys_addr, 928 dma_addr_t cqe_pbl_addr, 929 u16 cqe_pbl_size, 930 struct qed_rxq_start_ret_params *p_ret_params) 931 { 932 struct qed_queue_cid *p_cid; 933 int rc; 934 935 /* Allocate a CID for the queue */ 936 p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, true, p_params); 937 if (!p_cid) 938 return -ENOMEM; 939 940 if (IS_PF(p_hwfn->cdev)) { 941 rc = qed_eth_pf_rx_queue_start(p_hwfn, p_cid, 942 bd_max_bytes, 943 bd_chain_phys_addr, 944 cqe_pbl_addr, cqe_pbl_size, 945 &p_ret_params->p_prod); 946 } else { 947 rc = qed_vf_pf_rxq_start(p_hwfn, p_cid, 948 bd_max_bytes, 949 bd_chain_phys_addr, 950 cqe_pbl_addr, 951 cqe_pbl_size, &p_ret_params->p_prod); 952 } 953 954 /* Provide the caller with a reference to as handler */ 955 if (rc) 956 qed_eth_queue_cid_release(p_hwfn, p_cid); 957 else 958 p_ret_params->p_handle = (void *)p_cid; 959 960 return rc; 961 } 962 963 int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, 964 void **pp_rxq_handles, 965 u8 num_rxqs, 966 u8 complete_cqe_flg, 967 u8 complete_event_flg, 968 enum spq_mode comp_mode, 969 struct qed_spq_comp_cb *p_comp_data) 970 { 971 struct rx_queue_update_ramrod_data *p_ramrod = NULL; 972 struct qed_spq_entry *p_ent = NULL; 973 struct qed_sp_init_data init_data; 974 struct qed_queue_cid *p_cid; 975 int rc = -EINVAL; 976 u8 i; 977 978 memset(&init_data, 0, sizeof(init_data)); 979 init_data.comp_mode = comp_mode; 980 init_data.p_comp_data = p_comp_data; 981 982 for (i = 0; i < num_rxqs; i++) { 983 p_cid = ((struct qed_queue_cid **)pp_rxq_handles)[i]; 984 985 /* Get SPQ entry */ 986 init_data.cid = p_cid->cid; 987 init_data.opaque_fid = p_cid->opaque_fid; 988 989 rc = qed_sp_init_request(p_hwfn, &p_ent, 990 ETH_RAMROD_RX_QUEUE_UPDATE, 991 PROTOCOLID_ETH, &init_data); 992 if (rc) 993 return rc; 994 995 p_ramrod = &p_ent->ramrod.rx_queue_update; 996 p_ramrod->vport_id = p_cid->abs.vport_id; 997 998 p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id); 999 p_ramrod->complete_cqe_flg = complete_cqe_flg; 1000 p_ramrod->complete_event_flg = complete_event_flg; 1001 1002 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1003 if (rc) 1004 return rc; 1005 } 1006 1007 return rc; 1008 } 1009 1010 static int 1011 qed_eth_pf_rx_queue_stop(struct qed_hwfn *p_hwfn, 1012 struct qed_queue_cid *p_cid, 1013 bool b_eq_completion_only, bool b_cqe_completion) 1014 { 1015 struct rx_queue_stop_ramrod_data *p_ramrod = NULL; 1016 struct qed_spq_entry *p_ent = NULL; 1017 struct qed_sp_init_data init_data; 1018 int rc; 1019 1020 memset(&init_data, 0, sizeof(init_data)); 1021 init_data.cid = p_cid->cid; 1022 init_data.opaque_fid = p_cid->opaque_fid; 1023 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1024 1025 rc = qed_sp_init_request(p_hwfn, &p_ent, 1026 ETH_RAMROD_RX_QUEUE_STOP, 1027 PROTOCOLID_ETH, &init_data); 1028 if (rc) 1029 return rc; 1030 1031 p_ramrod = &p_ent->ramrod.rx_queue_stop; 1032 p_ramrod->vport_id = p_cid->abs.vport_id; 1033 p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id); 1034 1035 /* Cleaning the queue requires the completion to arrive there. 1036 * In addition, VFs require the answer to come as eqe to PF. 1037 */ 1038 p_ramrod->complete_cqe_flg = ((p_cid->vfid == QED_QUEUE_CID_SELF) && 1039 !b_eq_completion_only) || 1040 b_cqe_completion; 1041 p_ramrod->complete_event_flg = (p_cid->vfid != QED_QUEUE_CID_SELF) || 1042 b_eq_completion_only; 1043 1044 return qed_spq_post(p_hwfn, p_ent, NULL); 1045 } 1046 1047 int qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, 1048 void *p_rxq, 1049 bool eq_completion_only, bool cqe_completion) 1050 { 1051 struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_rxq; 1052 int rc = -EINVAL; 1053 1054 if (IS_PF(p_hwfn->cdev)) 1055 rc = qed_eth_pf_rx_queue_stop(p_hwfn, p_cid, 1056 eq_completion_only, 1057 cqe_completion); 1058 else 1059 rc = qed_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion); 1060 1061 if (!rc) 1062 qed_eth_queue_cid_release(p_hwfn, p_cid); 1063 return rc; 1064 } 1065 1066 int 1067 qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, 1068 struct qed_queue_cid *p_cid, 1069 dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id) 1070 { 1071 struct tx_queue_start_ramrod_data *p_ramrod = NULL; 1072 struct qed_spq_entry *p_ent = NULL; 1073 struct qed_sp_init_data init_data; 1074 int rc = -EINVAL; 1075 1076 /* Get SPQ entry */ 1077 memset(&init_data, 0, sizeof(init_data)); 1078 init_data.cid = p_cid->cid; 1079 init_data.opaque_fid = p_cid->opaque_fid; 1080 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1081 1082 rc = qed_sp_init_request(p_hwfn, &p_ent, 1083 ETH_RAMROD_TX_QUEUE_START, 1084 PROTOCOLID_ETH, &init_data); 1085 if (rc) 1086 return rc; 1087 1088 p_ramrod = &p_ent->ramrod.tx_queue_start; 1089 p_ramrod->vport_id = p_cid->abs.vport_id; 1090 1091 p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id); 1092 p_ramrod->sb_index = p_cid->sb_idx; 1093 p_ramrod->stats_counter_id = p_cid->abs.stats_id; 1094 1095 p_ramrod->queue_zone_id = cpu_to_le16(p_cid->abs.queue_id); 1096 p_ramrod->same_as_last_id = cpu_to_le16(p_cid->abs.queue_id); 1097 1098 p_ramrod->pbl_size = cpu_to_le16(pbl_size); 1099 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr); 1100 1101 p_ramrod->qm_pq_id = cpu_to_le16(pq_id); 1102 1103 return qed_spq_post(p_hwfn, p_ent, NULL); 1104 } 1105 1106 static int 1107 qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn, 1108 struct qed_queue_cid *p_cid, 1109 u8 tc, 1110 dma_addr_t pbl_addr, 1111 u16 pbl_size, void __iomem **pp_doorbell) 1112 { 1113 int rc; 1114 1115 1116 rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid, 1117 pbl_addr, pbl_size, 1118 qed_get_cm_pq_idx_mcos(p_hwfn, tc)); 1119 if (rc) 1120 return rc; 1121 1122 /* Provide the caller with the necessary return values */ 1123 *pp_doorbell = p_hwfn->doorbells + 1124 qed_db_addr(p_cid->cid, DQ_DEMS_LEGACY); 1125 1126 return 0; 1127 } 1128 1129 static int 1130 qed_eth_tx_queue_start(struct qed_hwfn *p_hwfn, 1131 u16 opaque_fid, 1132 struct qed_queue_start_common_params *p_params, 1133 u8 tc, 1134 dma_addr_t pbl_addr, 1135 u16 pbl_size, 1136 struct qed_txq_start_ret_params *p_ret_params) 1137 { 1138 struct qed_queue_cid *p_cid; 1139 int rc; 1140 1141 p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, false, p_params); 1142 if (!p_cid) 1143 return -EINVAL; 1144 1145 if (IS_PF(p_hwfn->cdev)) 1146 rc = qed_eth_pf_tx_queue_start(p_hwfn, p_cid, tc, 1147 pbl_addr, pbl_size, 1148 &p_ret_params->p_doorbell); 1149 else 1150 rc = qed_vf_pf_txq_start(p_hwfn, p_cid, 1151 pbl_addr, pbl_size, 1152 &p_ret_params->p_doorbell); 1153 1154 if (rc) 1155 qed_eth_queue_cid_release(p_hwfn, p_cid); 1156 else 1157 p_ret_params->p_handle = (void *)p_cid; 1158 1159 return rc; 1160 } 1161 1162 static int 1163 qed_eth_pf_tx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid) 1164 { 1165 struct qed_spq_entry *p_ent = NULL; 1166 struct qed_sp_init_data init_data; 1167 int rc; 1168 1169 memset(&init_data, 0, sizeof(init_data)); 1170 init_data.cid = p_cid->cid; 1171 init_data.opaque_fid = p_cid->opaque_fid; 1172 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1173 1174 rc = qed_sp_init_request(p_hwfn, &p_ent, 1175 ETH_RAMROD_TX_QUEUE_STOP, 1176 PROTOCOLID_ETH, &init_data); 1177 if (rc) 1178 return rc; 1179 1180 return qed_spq_post(p_hwfn, p_ent, NULL); 1181 } 1182 1183 int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_handle) 1184 { 1185 struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_handle; 1186 int rc; 1187 1188 if (IS_PF(p_hwfn->cdev)) 1189 rc = qed_eth_pf_tx_queue_stop(p_hwfn, p_cid); 1190 else 1191 rc = qed_vf_pf_txq_stop(p_hwfn, p_cid); 1192 1193 if (!rc) 1194 qed_eth_queue_cid_release(p_hwfn, p_cid); 1195 return rc; 1196 } 1197 1198 static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode) 1199 { 1200 enum eth_filter_action action = MAX_ETH_FILTER_ACTION; 1201 1202 switch (opcode) { 1203 case QED_FILTER_ADD: 1204 action = ETH_FILTER_ACTION_ADD; 1205 break; 1206 case QED_FILTER_REMOVE: 1207 action = ETH_FILTER_ACTION_REMOVE; 1208 break; 1209 case QED_FILTER_FLUSH: 1210 action = ETH_FILTER_ACTION_REMOVE_ALL; 1211 break; 1212 default: 1213 action = MAX_ETH_FILTER_ACTION; 1214 } 1215 1216 return action; 1217 } 1218 1219 static void qed_set_fw_mac_addr(__le16 *fw_msb, 1220 __le16 *fw_mid, 1221 __le16 *fw_lsb, 1222 u8 *mac) 1223 { 1224 ((u8 *)fw_msb)[0] = mac[1]; 1225 ((u8 *)fw_msb)[1] = mac[0]; 1226 ((u8 *)fw_mid)[0] = mac[3]; 1227 ((u8 *)fw_mid)[1] = mac[2]; 1228 ((u8 *)fw_lsb)[0] = mac[5]; 1229 ((u8 *)fw_lsb)[1] = mac[4]; 1230 } 1231 1232 static int 1233 qed_filter_ucast_common(struct qed_hwfn *p_hwfn, 1234 u16 opaque_fid, 1235 struct qed_filter_ucast *p_filter_cmd, 1236 struct vport_filter_update_ramrod_data **pp_ramrod, 1237 struct qed_spq_entry **pp_ent, 1238 enum spq_mode comp_mode, 1239 struct qed_spq_comp_cb *p_comp_data) 1240 { 1241 u8 vport_to_add_to = 0, vport_to_remove_from = 0; 1242 struct vport_filter_update_ramrod_data *p_ramrod; 1243 struct eth_filter_cmd *p_first_filter; 1244 struct eth_filter_cmd *p_second_filter; 1245 struct qed_sp_init_data init_data; 1246 enum eth_filter_action action; 1247 int rc; 1248 1249 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, 1250 &vport_to_remove_from); 1251 if (rc) 1252 return rc; 1253 1254 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, 1255 &vport_to_add_to); 1256 if (rc) 1257 return rc; 1258 1259 /* Get SPQ entry */ 1260 memset(&init_data, 0, sizeof(init_data)); 1261 init_data.cid = qed_spq_get_cid(p_hwfn); 1262 init_data.opaque_fid = opaque_fid; 1263 init_data.comp_mode = comp_mode; 1264 init_data.p_comp_data = p_comp_data; 1265 1266 rc = qed_sp_init_request(p_hwfn, pp_ent, 1267 ETH_RAMROD_FILTERS_UPDATE, 1268 PROTOCOLID_ETH, &init_data); 1269 if (rc) 1270 return rc; 1271 1272 *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update; 1273 p_ramrod = *pp_ramrod; 1274 p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0; 1275 p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0; 1276 1277 switch (p_filter_cmd->opcode) { 1278 case QED_FILTER_REPLACE: 1279 case QED_FILTER_MOVE: 1280 p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break; 1281 default: 1282 p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break; 1283 } 1284 1285 p_first_filter = &p_ramrod->filter_cmds[0]; 1286 p_second_filter = &p_ramrod->filter_cmds[1]; 1287 1288 switch (p_filter_cmd->type) { 1289 case QED_FILTER_MAC: 1290 p_first_filter->type = ETH_FILTER_TYPE_MAC; break; 1291 case QED_FILTER_VLAN: 1292 p_first_filter->type = ETH_FILTER_TYPE_VLAN; break; 1293 case QED_FILTER_MAC_VLAN: 1294 p_first_filter->type = ETH_FILTER_TYPE_PAIR; break; 1295 case QED_FILTER_INNER_MAC: 1296 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break; 1297 case QED_FILTER_INNER_VLAN: 1298 p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break; 1299 case QED_FILTER_INNER_PAIR: 1300 p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break; 1301 case QED_FILTER_INNER_MAC_VNI_PAIR: 1302 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR; 1303 break; 1304 case QED_FILTER_MAC_VNI_PAIR: 1305 p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break; 1306 case QED_FILTER_VNI: 1307 p_first_filter->type = ETH_FILTER_TYPE_VNI; break; 1308 } 1309 1310 if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) || 1311 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || 1312 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) || 1313 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) || 1314 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || 1315 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) { 1316 qed_set_fw_mac_addr(&p_first_filter->mac_msb, 1317 &p_first_filter->mac_mid, 1318 &p_first_filter->mac_lsb, 1319 (u8 *)p_filter_cmd->mac); 1320 } 1321 1322 if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) || 1323 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || 1324 (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) || 1325 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR)) 1326 p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan); 1327 1328 if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || 1329 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) || 1330 (p_first_filter->type == ETH_FILTER_TYPE_VNI)) 1331 p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni); 1332 1333 if (p_filter_cmd->opcode == QED_FILTER_MOVE) { 1334 p_second_filter->type = p_first_filter->type; 1335 p_second_filter->mac_msb = p_first_filter->mac_msb; 1336 p_second_filter->mac_mid = p_first_filter->mac_mid; 1337 p_second_filter->mac_lsb = p_first_filter->mac_lsb; 1338 p_second_filter->vlan_id = p_first_filter->vlan_id; 1339 p_second_filter->vni = p_first_filter->vni; 1340 1341 p_first_filter->action = ETH_FILTER_ACTION_REMOVE; 1342 1343 p_first_filter->vport_id = vport_to_remove_from; 1344 1345 p_second_filter->action = ETH_FILTER_ACTION_ADD; 1346 p_second_filter->vport_id = vport_to_add_to; 1347 } else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) { 1348 p_first_filter->vport_id = vport_to_add_to; 1349 memcpy(p_second_filter, p_first_filter, 1350 sizeof(*p_second_filter)); 1351 p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL; 1352 p_second_filter->action = ETH_FILTER_ACTION_ADD; 1353 } else { 1354 action = qed_filter_action(p_filter_cmd->opcode); 1355 1356 if (action == MAX_ETH_FILTER_ACTION) { 1357 DP_NOTICE(p_hwfn, 1358 "%d is not supported yet\n", 1359 p_filter_cmd->opcode); 1360 return -EINVAL; 1361 } 1362 1363 p_first_filter->action = action; 1364 p_first_filter->vport_id = (p_filter_cmd->opcode == 1365 QED_FILTER_REMOVE) ? 1366 vport_to_remove_from : 1367 vport_to_add_to; 1368 } 1369 1370 return 0; 1371 } 1372 1373 int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, 1374 u16 opaque_fid, 1375 struct qed_filter_ucast *p_filter_cmd, 1376 enum spq_mode comp_mode, 1377 struct qed_spq_comp_cb *p_comp_data) 1378 { 1379 struct vport_filter_update_ramrod_data *p_ramrod = NULL; 1380 struct qed_spq_entry *p_ent = NULL; 1381 struct eth_filter_cmd_header *p_header; 1382 int rc; 1383 1384 rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd, 1385 &p_ramrod, &p_ent, 1386 comp_mode, p_comp_data); 1387 if (rc) { 1388 DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc); 1389 return rc; 1390 } 1391 p_header = &p_ramrod->filter_cmd_hdr; 1392 p_header->assert_on_error = p_filter_cmd->assert_on_error; 1393 1394 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1395 if (rc) { 1396 DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc); 1397 return rc; 1398 } 1399 1400 DP_VERBOSE(p_hwfn, QED_MSG_SP, 1401 "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n", 1402 (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" : 1403 ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ? 1404 "REMOVE" : 1405 ((p_filter_cmd->opcode == QED_FILTER_MOVE) ? 1406 "MOVE" : "REPLACE")), 1407 (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" : 1408 ((p_filter_cmd->type == QED_FILTER_VLAN) ? 1409 "VLAN" : "MAC & VLAN"), 1410 p_ramrod->filter_cmd_hdr.cmd_cnt, 1411 p_filter_cmd->is_rx_filter, 1412 p_filter_cmd->is_tx_filter); 1413 DP_VERBOSE(p_hwfn, QED_MSG_SP, 1414 "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n", 1415 p_filter_cmd->vport_to_add_to, 1416 p_filter_cmd->vport_to_remove_from, 1417 p_filter_cmd->mac[0], 1418 p_filter_cmd->mac[1], 1419 p_filter_cmd->mac[2], 1420 p_filter_cmd->mac[3], 1421 p_filter_cmd->mac[4], 1422 p_filter_cmd->mac[5], 1423 p_filter_cmd->vlan); 1424 1425 return 0; 1426 } 1427 1428 /******************************************************************************* 1429 * Description: 1430 * Calculates crc 32 on a buffer 1431 * Note: crc32_length MUST be aligned to 8 1432 * Return: 1433 ******************************************************************************/ 1434 static u32 qed_calc_crc32c(u8 *crc32_packet, 1435 u32 crc32_length, u32 crc32_seed, u8 complement) 1436 { 1437 u32 byte = 0, bit = 0, crc32_result = crc32_seed; 1438 u8 msb = 0, current_byte = 0; 1439 1440 if ((!crc32_packet) || 1441 (crc32_length == 0) || 1442 ((crc32_length % 8) != 0)) 1443 return crc32_result; 1444 for (byte = 0; byte < crc32_length; byte++) { 1445 current_byte = crc32_packet[byte]; 1446 for (bit = 0; bit < 8; bit++) { 1447 msb = (u8)(crc32_result >> 31); 1448 crc32_result = crc32_result << 1; 1449 if (msb != (0x1 & (current_byte >> bit))) { 1450 crc32_result = crc32_result ^ CRC32_POLY; 1451 crc32_result |= 1; /*crc32_result[0] = 1;*/ 1452 } 1453 } 1454 } 1455 return crc32_result; 1456 } 1457 1458 static u32 qed_crc32c_le(u32 seed, u8 *mac, u32 len) 1459 { 1460 u32 packet_buf[2] = { 0 }; 1461 1462 memcpy((u8 *)(&packet_buf[0]), &mac[0], 6); 1463 return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0); 1464 } 1465 1466 u8 qed_mcast_bin_from_mac(u8 *mac) 1467 { 1468 u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED, 1469 mac, ETH_ALEN); 1470 1471 return crc & 0xff; 1472 } 1473 1474 static int 1475 qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, 1476 u16 opaque_fid, 1477 struct qed_filter_mcast *p_filter_cmd, 1478 enum spq_mode comp_mode, 1479 struct qed_spq_comp_cb *p_comp_data) 1480 { 1481 unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; 1482 struct vport_update_ramrod_data *p_ramrod = NULL; 1483 struct qed_spq_entry *p_ent = NULL; 1484 struct qed_sp_init_data init_data; 1485 u8 abs_vport_id = 0; 1486 int rc, i; 1487 1488 if (p_filter_cmd->opcode == QED_FILTER_ADD) 1489 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, 1490 &abs_vport_id); 1491 else 1492 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, 1493 &abs_vport_id); 1494 if (rc) 1495 return rc; 1496 1497 /* Get SPQ entry */ 1498 memset(&init_data, 0, sizeof(init_data)); 1499 init_data.cid = qed_spq_get_cid(p_hwfn); 1500 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1501 init_data.comp_mode = comp_mode; 1502 init_data.p_comp_data = p_comp_data; 1503 1504 rc = qed_sp_init_request(p_hwfn, &p_ent, 1505 ETH_RAMROD_VPORT_UPDATE, 1506 PROTOCOLID_ETH, &init_data); 1507 if (rc) { 1508 DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc); 1509 return rc; 1510 } 1511 1512 p_ramrod = &p_ent->ramrod.vport_update; 1513 p_ramrod->common.update_approx_mcast_flg = 1; 1514 1515 /* explicitly clear out the entire vector */ 1516 memset(&p_ramrod->approx_mcast.bins, 0, 1517 sizeof(p_ramrod->approx_mcast.bins)); 1518 memset(bins, 0, sizeof(unsigned long) * 1519 ETH_MULTICAST_MAC_BINS_IN_REGS); 1520 /* filter ADD op is explicit set op and it removes 1521 * any existing filters for the vport 1522 */ 1523 if (p_filter_cmd->opcode == QED_FILTER_ADD) { 1524 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { 1525 u32 bit; 1526 1527 bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); 1528 __set_bit(bit, bins); 1529 } 1530 1531 /* Convert to correct endianity */ 1532 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { 1533 struct vport_update_ramrod_mcast *p_ramrod_bins; 1534 u32 *p_bins = (u32 *)bins; 1535 1536 p_ramrod_bins = &p_ramrod->approx_mcast; 1537 p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]); 1538 } 1539 } 1540 1541 p_ramrod->common.vport_id = abs_vport_id; 1542 1543 return qed_spq_post(p_hwfn, p_ent, NULL); 1544 } 1545 1546 static int qed_filter_mcast_cmd(struct qed_dev *cdev, 1547 struct qed_filter_mcast *p_filter_cmd, 1548 enum spq_mode comp_mode, 1549 struct qed_spq_comp_cb *p_comp_data) 1550 { 1551 int rc = 0; 1552 int i; 1553 1554 /* only ADD and REMOVE operations are supported for multi-cast */ 1555 if ((p_filter_cmd->opcode != QED_FILTER_ADD && 1556 (p_filter_cmd->opcode != QED_FILTER_REMOVE)) || 1557 (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS)) 1558 return -EINVAL; 1559 1560 for_each_hwfn(cdev, i) { 1561 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1562 1563 u16 opaque_fid; 1564 1565 if (IS_VF(cdev)) { 1566 qed_vf_pf_filter_mcast(p_hwfn, p_filter_cmd); 1567 continue; 1568 } 1569 1570 opaque_fid = p_hwfn->hw_info.opaque_fid; 1571 1572 rc = qed_sp_eth_filter_mcast(p_hwfn, 1573 opaque_fid, 1574 p_filter_cmd, 1575 comp_mode, p_comp_data); 1576 } 1577 return rc; 1578 } 1579 1580 static int qed_filter_ucast_cmd(struct qed_dev *cdev, 1581 struct qed_filter_ucast *p_filter_cmd, 1582 enum spq_mode comp_mode, 1583 struct qed_spq_comp_cb *p_comp_data) 1584 { 1585 int rc = 0; 1586 int i; 1587 1588 for_each_hwfn(cdev, i) { 1589 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1590 u16 opaque_fid; 1591 1592 if (IS_VF(cdev)) { 1593 rc = qed_vf_pf_filter_ucast(p_hwfn, p_filter_cmd); 1594 continue; 1595 } 1596 1597 opaque_fid = p_hwfn->hw_info.opaque_fid; 1598 1599 rc = qed_sp_eth_filter_ucast(p_hwfn, 1600 opaque_fid, 1601 p_filter_cmd, 1602 comp_mode, p_comp_data); 1603 if (rc) 1604 break; 1605 } 1606 1607 return rc; 1608 } 1609 1610 /* Statistics related code */ 1611 static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn, 1612 u32 *p_addr, 1613 u32 *p_len, u16 statistics_bin) 1614 { 1615 if (IS_PF(p_hwfn->cdev)) { 1616 *p_addr = BAR0_MAP_REG_PSDM_RAM + 1617 PSTORM_QUEUE_STAT_OFFSET(statistics_bin); 1618 *p_len = sizeof(struct eth_pstorm_per_queue_stat); 1619 } else { 1620 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1621 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1622 1623 *p_addr = p_resp->pfdev_info.stats_info.pstats.address; 1624 *p_len = p_resp->pfdev_info.stats_info.pstats.len; 1625 } 1626 } 1627 1628 static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn, 1629 struct qed_ptt *p_ptt, 1630 struct qed_eth_stats *p_stats, 1631 u16 statistics_bin) 1632 { 1633 struct eth_pstorm_per_queue_stat pstats; 1634 u32 pstats_addr = 0, pstats_len = 0; 1635 1636 __qed_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len, 1637 statistics_bin); 1638 1639 memset(&pstats, 0, sizeof(pstats)); 1640 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len); 1641 1642 p_stats->common.tx_ucast_bytes += 1643 HILO_64_REGPAIR(pstats.sent_ucast_bytes); 1644 p_stats->common.tx_mcast_bytes += 1645 HILO_64_REGPAIR(pstats.sent_mcast_bytes); 1646 p_stats->common.tx_bcast_bytes += 1647 HILO_64_REGPAIR(pstats.sent_bcast_bytes); 1648 p_stats->common.tx_ucast_pkts += 1649 HILO_64_REGPAIR(pstats.sent_ucast_pkts); 1650 p_stats->common.tx_mcast_pkts += 1651 HILO_64_REGPAIR(pstats.sent_mcast_pkts); 1652 p_stats->common.tx_bcast_pkts += 1653 HILO_64_REGPAIR(pstats.sent_bcast_pkts); 1654 p_stats->common.tx_err_drop_pkts += 1655 HILO_64_REGPAIR(pstats.error_drop_pkts); 1656 } 1657 1658 static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, 1659 struct qed_ptt *p_ptt, 1660 struct qed_eth_stats *p_stats, 1661 u16 statistics_bin) 1662 { 1663 struct tstorm_per_port_stat tstats; 1664 u32 tstats_addr, tstats_len; 1665 1666 if (IS_PF(p_hwfn->cdev)) { 1667 tstats_addr = BAR0_MAP_REG_TSDM_RAM + 1668 TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)); 1669 tstats_len = sizeof(struct tstorm_per_port_stat); 1670 } else { 1671 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1672 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1673 1674 tstats_addr = p_resp->pfdev_info.stats_info.tstats.address; 1675 tstats_len = p_resp->pfdev_info.stats_info.tstats.len; 1676 } 1677 1678 memset(&tstats, 0, sizeof(tstats)); 1679 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len); 1680 1681 p_stats->common.mftag_filter_discards += 1682 HILO_64_REGPAIR(tstats.mftag_filter_discard); 1683 p_stats->common.mac_filter_discards += 1684 HILO_64_REGPAIR(tstats.eth_mac_filter_discard); 1685 } 1686 1687 static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn, 1688 u32 *p_addr, 1689 u32 *p_len, u16 statistics_bin) 1690 { 1691 if (IS_PF(p_hwfn->cdev)) { 1692 *p_addr = BAR0_MAP_REG_USDM_RAM + 1693 USTORM_QUEUE_STAT_OFFSET(statistics_bin); 1694 *p_len = sizeof(struct eth_ustorm_per_queue_stat); 1695 } else { 1696 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1697 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1698 1699 *p_addr = p_resp->pfdev_info.stats_info.ustats.address; 1700 *p_len = p_resp->pfdev_info.stats_info.ustats.len; 1701 } 1702 } 1703 1704 static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, 1705 struct qed_ptt *p_ptt, 1706 struct qed_eth_stats *p_stats, 1707 u16 statistics_bin) 1708 { 1709 struct eth_ustorm_per_queue_stat ustats; 1710 u32 ustats_addr = 0, ustats_len = 0; 1711 1712 __qed_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len, 1713 statistics_bin); 1714 1715 memset(&ustats, 0, sizeof(ustats)); 1716 qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len); 1717 1718 p_stats->common.rx_ucast_bytes += 1719 HILO_64_REGPAIR(ustats.rcv_ucast_bytes); 1720 p_stats->common.rx_mcast_bytes += 1721 HILO_64_REGPAIR(ustats.rcv_mcast_bytes); 1722 p_stats->common.rx_bcast_bytes += 1723 HILO_64_REGPAIR(ustats.rcv_bcast_bytes); 1724 p_stats->common.rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts); 1725 p_stats->common.rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts); 1726 p_stats->common.rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts); 1727 } 1728 1729 static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn, 1730 u32 *p_addr, 1731 u32 *p_len, u16 statistics_bin) 1732 { 1733 if (IS_PF(p_hwfn->cdev)) { 1734 *p_addr = BAR0_MAP_REG_MSDM_RAM + 1735 MSTORM_QUEUE_STAT_OFFSET(statistics_bin); 1736 *p_len = sizeof(struct eth_mstorm_per_queue_stat); 1737 } else { 1738 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 1739 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1740 1741 *p_addr = p_resp->pfdev_info.stats_info.mstats.address; 1742 *p_len = p_resp->pfdev_info.stats_info.mstats.len; 1743 } 1744 } 1745 1746 static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn, 1747 struct qed_ptt *p_ptt, 1748 struct qed_eth_stats *p_stats, 1749 u16 statistics_bin) 1750 { 1751 struct eth_mstorm_per_queue_stat mstats; 1752 u32 mstats_addr = 0, mstats_len = 0; 1753 1754 __qed_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len, 1755 statistics_bin); 1756 1757 memset(&mstats, 0, sizeof(mstats)); 1758 qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len); 1759 1760 p_stats->common.no_buff_discards += 1761 HILO_64_REGPAIR(mstats.no_buff_discard); 1762 p_stats->common.packet_too_big_discard += 1763 HILO_64_REGPAIR(mstats.packet_too_big_discard); 1764 p_stats->common.ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard); 1765 p_stats->common.tpa_coalesced_pkts += 1766 HILO_64_REGPAIR(mstats.tpa_coalesced_pkts); 1767 p_stats->common.tpa_coalesced_events += 1768 HILO_64_REGPAIR(mstats.tpa_coalesced_events); 1769 p_stats->common.tpa_aborts_num += 1770 HILO_64_REGPAIR(mstats.tpa_aborts_num); 1771 p_stats->common.tpa_coalesced_bytes += 1772 HILO_64_REGPAIR(mstats.tpa_coalesced_bytes); 1773 } 1774 1775 static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, 1776 struct qed_ptt *p_ptt, 1777 struct qed_eth_stats *p_stats) 1778 { 1779 struct qed_eth_stats_common *p_common = &p_stats->common; 1780 struct port_stats port_stats; 1781 int j; 1782 1783 memset(&port_stats, 0, sizeof(port_stats)); 1784 1785 qed_memcpy_from(p_hwfn, p_ptt, &port_stats, 1786 p_hwfn->mcp_info->port_addr + 1787 offsetof(struct public_port, stats), 1788 sizeof(port_stats)); 1789 1790 p_common->rx_64_byte_packets += port_stats.eth.r64; 1791 p_common->rx_65_to_127_byte_packets += port_stats.eth.r127; 1792 p_common->rx_128_to_255_byte_packets += port_stats.eth.r255; 1793 p_common->rx_256_to_511_byte_packets += port_stats.eth.r511; 1794 p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023; 1795 p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518; 1796 p_common->rx_crc_errors += port_stats.eth.rfcs; 1797 p_common->rx_mac_crtl_frames += port_stats.eth.rxcf; 1798 p_common->rx_pause_frames += port_stats.eth.rxpf; 1799 p_common->rx_pfc_frames += port_stats.eth.rxpp; 1800 p_common->rx_align_errors += port_stats.eth.raln; 1801 p_common->rx_carrier_errors += port_stats.eth.rfcr; 1802 p_common->rx_oversize_packets += port_stats.eth.rovr; 1803 p_common->rx_jabbers += port_stats.eth.rjbr; 1804 p_common->rx_undersize_packets += port_stats.eth.rund; 1805 p_common->rx_fragments += port_stats.eth.rfrg; 1806 p_common->tx_64_byte_packets += port_stats.eth.t64; 1807 p_common->tx_65_to_127_byte_packets += port_stats.eth.t127; 1808 p_common->tx_128_to_255_byte_packets += port_stats.eth.t255; 1809 p_common->tx_256_to_511_byte_packets += port_stats.eth.t511; 1810 p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023; 1811 p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518; 1812 p_common->tx_pause_frames += port_stats.eth.txpf; 1813 p_common->tx_pfc_frames += port_stats.eth.txpp; 1814 p_common->rx_mac_bytes += port_stats.eth.rbyte; 1815 p_common->rx_mac_uc_packets += port_stats.eth.rxuca; 1816 p_common->rx_mac_mc_packets += port_stats.eth.rxmca; 1817 p_common->rx_mac_bc_packets += port_stats.eth.rxbca; 1818 p_common->rx_mac_frames_ok += port_stats.eth.rxpok; 1819 p_common->tx_mac_bytes += port_stats.eth.tbyte; 1820 p_common->tx_mac_uc_packets += port_stats.eth.txuca; 1821 p_common->tx_mac_mc_packets += port_stats.eth.txmca; 1822 p_common->tx_mac_bc_packets += port_stats.eth.txbca; 1823 p_common->tx_mac_ctrl_frames += port_stats.eth.txcf; 1824 for (j = 0; j < 8; j++) { 1825 p_common->brb_truncates += port_stats.brb.brb_truncate[j]; 1826 p_common->brb_discards += port_stats.brb.brb_discard[j]; 1827 } 1828 1829 if (QED_IS_BB(p_hwfn->cdev)) { 1830 struct qed_eth_stats_bb *p_bb = &p_stats->bb; 1831 1832 p_bb->rx_1519_to_1522_byte_packets += 1833 port_stats.eth.u0.bb0.r1522; 1834 p_bb->rx_1519_to_2047_byte_packets += 1835 port_stats.eth.u0.bb0.r2047; 1836 p_bb->rx_2048_to_4095_byte_packets += 1837 port_stats.eth.u0.bb0.r4095; 1838 p_bb->rx_4096_to_9216_byte_packets += 1839 port_stats.eth.u0.bb0.r9216; 1840 p_bb->rx_9217_to_16383_byte_packets += 1841 port_stats.eth.u0.bb0.r16383; 1842 p_bb->tx_1519_to_2047_byte_packets += 1843 port_stats.eth.u1.bb1.t2047; 1844 p_bb->tx_2048_to_4095_byte_packets += 1845 port_stats.eth.u1.bb1.t4095; 1846 p_bb->tx_4096_to_9216_byte_packets += 1847 port_stats.eth.u1.bb1.t9216; 1848 p_bb->tx_9217_to_16383_byte_packets += 1849 port_stats.eth.u1.bb1.t16383; 1850 p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec; 1851 p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl; 1852 } else { 1853 struct qed_eth_stats_ah *p_ah = &p_stats->ah; 1854 1855 p_ah->rx_1519_to_max_byte_packets += 1856 port_stats.eth.u0.ah0.r1519_to_max; 1857 p_ah->tx_1519_to_max_byte_packets = 1858 port_stats.eth.u1.ah1.t1519_to_max; 1859 } 1860 } 1861 1862 static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn, 1863 struct qed_ptt *p_ptt, 1864 struct qed_eth_stats *stats, 1865 u16 statistics_bin, bool b_get_port_stats) 1866 { 1867 __qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin); 1868 __qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin); 1869 __qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin); 1870 __qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin); 1871 1872 if (b_get_port_stats && p_hwfn->mcp_info) 1873 __qed_get_vport_port_stats(p_hwfn, p_ptt, stats); 1874 } 1875 1876 static void _qed_get_vport_stats(struct qed_dev *cdev, 1877 struct qed_eth_stats *stats) 1878 { 1879 u8 fw_vport = 0; 1880 int i; 1881 1882 memset(stats, 0, sizeof(*stats)); 1883 1884 for_each_hwfn(cdev, i) { 1885 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1886 struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn) 1887 : NULL; 1888 1889 if (IS_PF(cdev)) { 1890 /* The main vport index is relative first */ 1891 if (qed_fw_vport(p_hwfn, 0, &fw_vport)) { 1892 DP_ERR(p_hwfn, "No vport available!\n"); 1893 goto out; 1894 } 1895 } 1896 1897 if (IS_PF(cdev) && !p_ptt) { 1898 DP_ERR(p_hwfn, "Failed to acquire ptt\n"); 1899 continue; 1900 } 1901 1902 __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport, 1903 IS_PF(cdev) ? true : false); 1904 1905 out: 1906 if (IS_PF(cdev) && p_ptt) 1907 qed_ptt_release(p_hwfn, p_ptt); 1908 } 1909 } 1910 1911 void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats) 1912 { 1913 u32 i; 1914 1915 if (!cdev) { 1916 memset(stats, 0, sizeof(*stats)); 1917 return; 1918 } 1919 1920 _qed_get_vport_stats(cdev, stats); 1921 1922 if (!cdev->reset_stats) 1923 return; 1924 1925 /* Reduce the statistics baseline */ 1926 for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++) 1927 ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i]; 1928 } 1929 1930 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */ 1931 void qed_reset_vport_stats(struct qed_dev *cdev) 1932 { 1933 int i; 1934 1935 for_each_hwfn(cdev, i) { 1936 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1937 struct eth_mstorm_per_queue_stat mstats; 1938 struct eth_ustorm_per_queue_stat ustats; 1939 struct eth_pstorm_per_queue_stat pstats; 1940 struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn) 1941 : NULL; 1942 u32 addr = 0, len = 0; 1943 1944 if (IS_PF(cdev) && !p_ptt) { 1945 DP_ERR(p_hwfn, "Failed to acquire ptt\n"); 1946 continue; 1947 } 1948 1949 memset(&mstats, 0, sizeof(mstats)); 1950 __qed_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0); 1951 qed_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len); 1952 1953 memset(&ustats, 0, sizeof(ustats)); 1954 __qed_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0); 1955 qed_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len); 1956 1957 memset(&pstats, 0, sizeof(pstats)); 1958 __qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0); 1959 qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len); 1960 1961 if (IS_PF(cdev)) 1962 qed_ptt_release(p_hwfn, p_ptt); 1963 } 1964 1965 /* PORT statistics are not necessarily reset, so we need to 1966 * read and create a baseline for future statistics. 1967 */ 1968 if (!cdev->reset_stats) 1969 DP_INFO(cdev, "Reset stats not allocated\n"); 1970 else 1971 _qed_get_vport_stats(cdev, cdev->reset_stats); 1972 } 1973 1974 static void 1975 qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 1976 struct qed_arfs_config_params *p_cfg_params) 1977 { 1978 if (p_cfg_params->arfs_enable) { 1979 qed_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id, 1980 p_cfg_params->tcp, p_cfg_params->udp, 1981 p_cfg_params->ipv4, p_cfg_params->ipv6); 1982 DP_VERBOSE(p_hwfn, QED_MSG_SP, 1983 "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n", 1984 p_cfg_params->tcp ? "Enable" : "Disable", 1985 p_cfg_params->udp ? "Enable" : "Disable", 1986 p_cfg_params->ipv4 ? "Enable" : "Disable", 1987 p_cfg_params->ipv6 ? "Enable" : "Disable"); 1988 } else { 1989 qed_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 1990 } 1991 1992 DP_VERBOSE(p_hwfn, QED_MSG_SP, "Configured ARFS mode : %s\n", 1993 p_cfg_params->arfs_enable ? "Enable" : "Disable"); 1994 } 1995 1996 static int 1997 qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 1998 struct qed_spq_comp_cb *p_cb, 1999 dma_addr_t p_addr, u16 length, u16 qid, 2000 u8 vport_id, bool b_is_add) 2001 { 2002 struct rx_update_gft_filter_data *p_ramrod = NULL; 2003 struct qed_spq_entry *p_ent = NULL; 2004 struct qed_sp_init_data init_data; 2005 u16 abs_rx_q_id = 0; 2006 u8 abs_vport_id = 0; 2007 int rc = -EINVAL; 2008 2009 rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id); 2010 if (rc) 2011 return rc; 2012 2013 rc = qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id); 2014 if (rc) 2015 return rc; 2016 2017 /* Get SPQ entry */ 2018 memset(&init_data, 0, sizeof(init_data)); 2019 init_data.cid = qed_spq_get_cid(p_hwfn); 2020 2021 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 2022 2023 if (p_cb) { 2024 init_data.comp_mode = QED_SPQ_MODE_CB; 2025 init_data.p_comp_data = p_cb; 2026 } else { 2027 init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 2028 } 2029 2030 rc = qed_sp_init_request(p_hwfn, &p_ent, 2031 ETH_RAMROD_GFT_UPDATE_FILTER, 2032 PROTOCOLID_ETH, &init_data); 2033 if (rc) 2034 return rc; 2035 2036 p_ramrod = &p_ent->ramrod.rx_update_gft; 2037 DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr); 2038 p_ramrod->pkt_hdr_length = cpu_to_le16(length); 2039 p_ramrod->rx_qid_or_action_icid = cpu_to_le16(abs_rx_q_id); 2040 p_ramrod->vport_id = abs_vport_id; 2041 p_ramrod->filter_type = RFS_FILTER_TYPE; 2042 p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER : GFT_DELETE_FILTER; 2043 2044 DP_VERBOSE(p_hwfn, QED_MSG_SP, 2045 "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n", 2046 abs_vport_id, abs_rx_q_id, 2047 b_is_add ? "Adding" : "Removing", (u64)p_addr, length); 2048 2049 return qed_spq_post(p_hwfn, p_ent, NULL); 2050 } 2051 2052 static int qed_fill_eth_dev_info(struct qed_dev *cdev, 2053 struct qed_dev_eth_info *info) 2054 { 2055 int i; 2056 2057 memset(info, 0, sizeof(*info)); 2058 2059 info->num_tc = 1; 2060 2061 if (IS_PF(cdev)) { 2062 int max_vf_vlan_filters = 0; 2063 int max_vf_mac_filters = 0; 2064 2065 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 2066 u16 num_queues = 0; 2067 2068 /* Since the feature controls only queue-zones, 2069 * make sure we have the contexts [rx, tx, xdp] to 2070 * match. 2071 */ 2072 for_each_hwfn(cdev, i) { 2073 struct qed_hwfn *hwfn = &cdev->hwfns[i]; 2074 u16 l2_queues = (u16)FEAT_NUM(hwfn, 2075 QED_PF_L2_QUE); 2076 u16 cids; 2077 2078 cids = hwfn->pf_params.eth_pf_params.num_cons; 2079 num_queues += min_t(u16, l2_queues, cids / 3); 2080 } 2081 2082 /* queues might theoretically be >256, but interrupts' 2083 * upper-limit guarantes that it would fit in a u8. 2084 */ 2085 if (cdev->int_params.fp_msix_cnt) { 2086 u8 irqs = cdev->int_params.fp_msix_cnt; 2087 2088 info->num_queues = (u8)min_t(u16, 2089 num_queues, irqs); 2090 } 2091 } else { 2092 info->num_queues = cdev->num_hwfns; 2093 } 2094 2095 if (IS_QED_SRIOV(cdev)) { 2096 max_vf_vlan_filters = cdev->p_iov_info->total_vfs * 2097 QED_ETH_VF_NUM_VLAN_FILTERS; 2098 max_vf_mac_filters = cdev->p_iov_info->total_vfs * 2099 QED_ETH_VF_NUM_MAC_FILTERS; 2100 } 2101 info->num_vlan_filters = RESC_NUM(QED_LEADING_HWFN(cdev), 2102 QED_VLAN) - 2103 max_vf_vlan_filters; 2104 info->num_mac_filters = RESC_NUM(QED_LEADING_HWFN(cdev), 2105 QED_MAC) - 2106 max_vf_mac_filters; 2107 2108 ether_addr_copy(info->port_mac, 2109 cdev->hwfns[0].hw_info.hw_mac_addr); 2110 } else { 2111 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), &info->num_queues); 2112 if (cdev->num_hwfns > 1) { 2113 u8 queues = 0; 2114 2115 qed_vf_get_num_rxqs(&cdev->hwfns[1], &queues); 2116 info->num_queues += queues; 2117 } 2118 2119 qed_vf_get_num_vlan_filters(&cdev->hwfns[0], 2120 (u8 *)&info->num_vlan_filters); 2121 qed_vf_get_num_mac_filters(&cdev->hwfns[0], 2122 (u8 *)&info->num_mac_filters); 2123 qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac); 2124 2125 info->is_legacy = !!cdev->hwfns[0].vf_iov_info->b_pre_fp_hsi; 2126 } 2127 2128 qed_fill_dev_info(cdev, &info->common); 2129 2130 if (IS_VF(cdev)) 2131 eth_zero_addr(info->common.hw_mac); 2132 2133 return 0; 2134 } 2135 2136 static void qed_register_eth_ops(struct qed_dev *cdev, 2137 struct qed_eth_cb_ops *ops, void *cookie) 2138 { 2139 cdev->protocol_ops.eth = ops; 2140 cdev->ops_cookie = cookie; 2141 2142 /* For VF, we start bulletin reading */ 2143 if (IS_VF(cdev)) 2144 qed_vf_start_iov_wq(cdev); 2145 } 2146 2147 static bool qed_check_mac(struct qed_dev *cdev, u8 *mac) 2148 { 2149 if (IS_PF(cdev)) 2150 return true; 2151 2152 return qed_vf_check_mac(&cdev->hwfns[0], mac); 2153 } 2154 2155 static int qed_start_vport(struct qed_dev *cdev, 2156 struct qed_start_vport_params *params) 2157 { 2158 int rc, i; 2159 2160 for_each_hwfn(cdev, i) { 2161 struct qed_sp_vport_start_params start = { 0 }; 2162 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 2163 2164 start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO : 2165 QED_TPA_MODE_NONE; 2166 start.remove_inner_vlan = params->remove_inner_vlan; 2167 start.only_untagged = true; /* untagged only */ 2168 start.drop_ttl0 = params->drop_ttl0; 2169 start.opaque_fid = p_hwfn->hw_info.opaque_fid; 2170 start.concrete_fid = p_hwfn->hw_info.concrete_fid; 2171 start.handle_ptp_pkts = params->handle_ptp_pkts; 2172 start.vport_id = params->vport_id; 2173 start.max_buffers_per_cqe = 16; 2174 start.mtu = params->mtu; 2175 2176 rc = qed_sp_vport_start(p_hwfn, &start); 2177 if (rc) { 2178 DP_ERR(cdev, "Failed to start VPORT\n"); 2179 return rc; 2180 } 2181 2182 rc = qed_hw_start_fastpath(p_hwfn); 2183 if (rc) { 2184 DP_ERR(cdev, "Failed to start VPORT fastpath\n"); 2185 return rc; 2186 } 2187 2188 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), 2189 "Started V-PORT %d with MTU %d\n", 2190 start.vport_id, start.mtu); 2191 } 2192 2193 if (params->clear_stats) 2194 qed_reset_vport_stats(cdev); 2195 2196 return 0; 2197 } 2198 2199 static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id) 2200 { 2201 int rc, i; 2202 2203 for_each_hwfn(cdev, i) { 2204 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 2205 2206 rc = qed_sp_vport_stop(p_hwfn, 2207 p_hwfn->hw_info.opaque_fid, vport_id); 2208 2209 if (rc) { 2210 DP_ERR(cdev, "Failed to stop VPORT\n"); 2211 return rc; 2212 } 2213 } 2214 return 0; 2215 } 2216 2217 static int qed_update_vport_rss(struct qed_dev *cdev, 2218 struct qed_update_vport_rss_params *input, 2219 struct qed_rss_params *rss) 2220 { 2221 int i, fn; 2222 2223 /* Update configuration with what's correct regardless of CMT */ 2224 rss->update_rss_config = 1; 2225 rss->rss_enable = 1; 2226 rss->update_rss_capabilities = 1; 2227 rss->update_rss_ind_table = 1; 2228 rss->update_rss_key = 1; 2229 rss->rss_caps = input->rss_caps; 2230 memcpy(rss->rss_key, input->rss_key, QED_RSS_KEY_SIZE * sizeof(u32)); 2231 2232 /* In regular scenario, we'd simply need to take input handlers. 2233 * But in CMT, we'd have to split the handlers according to the 2234 * engine they were configured on. We'd then have to understand 2235 * whether RSS is really required, since 2-queues on CMT doesn't 2236 * require RSS. 2237 */ 2238 if (cdev->num_hwfns == 1) { 2239 memcpy(rss->rss_ind_table, 2240 input->rss_ind_table, 2241 QED_RSS_IND_TABLE_SIZE * sizeof(void *)); 2242 rss->rss_table_size_log = 7; 2243 return 0; 2244 } 2245 2246 /* Start by copying the non-spcific information to the 2nd copy */ 2247 memcpy(&rss[1], &rss[0], sizeof(struct qed_rss_params)); 2248 2249 /* CMT should be round-robin */ 2250 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { 2251 struct qed_queue_cid *cid = input->rss_ind_table[i]; 2252 struct qed_rss_params *t_rss; 2253 2254 if (cid->p_owner == QED_LEADING_HWFN(cdev)) 2255 t_rss = &rss[0]; 2256 else 2257 t_rss = &rss[1]; 2258 2259 t_rss->rss_ind_table[i / cdev->num_hwfns] = cid; 2260 } 2261 2262 /* Make sure RSS is actually required */ 2263 for_each_hwfn(cdev, fn) { 2264 for (i = 1; i < QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns; i++) { 2265 if (rss[fn].rss_ind_table[i] != 2266 rss[fn].rss_ind_table[0]) 2267 break; 2268 } 2269 if (i == QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns) { 2270 DP_VERBOSE(cdev, NETIF_MSG_IFUP, 2271 "CMT - 1 queue per-hwfn; Disabling RSS\n"); 2272 return -EINVAL; 2273 } 2274 rss[fn].rss_table_size_log = 6; 2275 } 2276 2277 return 0; 2278 } 2279 2280 static int qed_update_vport(struct qed_dev *cdev, 2281 struct qed_update_vport_params *params) 2282 { 2283 struct qed_sp_vport_update_params sp_params; 2284 struct qed_rss_params *rss; 2285 int rc = 0, i; 2286 2287 if (!cdev) 2288 return -ENODEV; 2289 2290 rss = vzalloc(sizeof(*rss) * cdev->num_hwfns); 2291 if (!rss) 2292 return -ENOMEM; 2293 2294 memset(&sp_params, 0, sizeof(sp_params)); 2295 2296 /* Translate protocol params into sp params */ 2297 sp_params.vport_id = params->vport_id; 2298 sp_params.update_vport_active_rx_flg = params->update_vport_active_flg; 2299 sp_params.update_vport_active_tx_flg = params->update_vport_active_flg; 2300 sp_params.vport_active_rx_flg = params->vport_active_flg; 2301 sp_params.vport_active_tx_flg = params->vport_active_flg; 2302 sp_params.update_tx_switching_flg = params->update_tx_switching_flg; 2303 sp_params.tx_switching_flg = params->tx_switching_flg; 2304 sp_params.accept_any_vlan = params->accept_any_vlan; 2305 sp_params.update_accept_any_vlan_flg = 2306 params->update_accept_any_vlan_flg; 2307 2308 /* Prepare the RSS configuration */ 2309 if (params->update_rss_flg) 2310 if (qed_update_vport_rss(cdev, ¶ms->rss_params, rss)) 2311 params->update_rss_flg = 0; 2312 2313 for_each_hwfn(cdev, i) { 2314 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 2315 2316 if (params->update_rss_flg) 2317 sp_params.rss_params = &rss[i]; 2318 2319 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 2320 rc = qed_sp_vport_update(p_hwfn, &sp_params, 2321 QED_SPQ_MODE_EBLOCK, 2322 NULL); 2323 if (rc) { 2324 DP_ERR(cdev, "Failed to update VPORT\n"); 2325 goto out; 2326 } 2327 2328 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), 2329 "Updated V-PORT %d: active_flag %d [update %d]\n", 2330 params->vport_id, params->vport_active_flg, 2331 params->update_vport_active_flg); 2332 } 2333 2334 out: 2335 vfree(rss); 2336 return rc; 2337 } 2338 2339 static int qed_start_rxq(struct qed_dev *cdev, 2340 u8 rss_num, 2341 struct qed_queue_start_common_params *p_params, 2342 u16 bd_max_bytes, 2343 dma_addr_t bd_chain_phys_addr, 2344 dma_addr_t cqe_pbl_addr, 2345 u16 cqe_pbl_size, 2346 struct qed_rxq_start_ret_params *ret_params) 2347 { 2348 struct qed_hwfn *p_hwfn; 2349 int rc, hwfn_index; 2350 2351 hwfn_index = rss_num % cdev->num_hwfns; 2352 p_hwfn = &cdev->hwfns[hwfn_index]; 2353 2354 p_params->queue_id = p_params->queue_id / cdev->num_hwfns; 2355 p_params->stats_id = p_params->vport_id; 2356 2357 rc = qed_eth_rx_queue_start(p_hwfn, 2358 p_hwfn->hw_info.opaque_fid, 2359 p_params, 2360 bd_max_bytes, 2361 bd_chain_phys_addr, 2362 cqe_pbl_addr, cqe_pbl_size, ret_params); 2363 if (rc) { 2364 DP_ERR(cdev, "Failed to start RXQ#%d\n", p_params->queue_id); 2365 return rc; 2366 } 2367 2368 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), 2369 "Started RX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n", 2370 p_params->queue_id, rss_num, p_params->vport_id, 2371 p_params->p_sb->igu_sb_id); 2372 2373 return 0; 2374 } 2375 2376 static int qed_stop_rxq(struct qed_dev *cdev, u8 rss_id, void *handle) 2377 { 2378 int rc, hwfn_index; 2379 struct qed_hwfn *p_hwfn; 2380 2381 hwfn_index = rss_id % cdev->num_hwfns; 2382 p_hwfn = &cdev->hwfns[hwfn_index]; 2383 2384 rc = qed_eth_rx_queue_stop(p_hwfn, handle, false, false); 2385 if (rc) { 2386 DP_ERR(cdev, "Failed to stop RXQ#%02x\n", rss_id); 2387 return rc; 2388 } 2389 2390 return 0; 2391 } 2392 2393 static int qed_start_txq(struct qed_dev *cdev, 2394 u8 rss_num, 2395 struct qed_queue_start_common_params *p_params, 2396 dma_addr_t pbl_addr, 2397 u16 pbl_size, 2398 struct qed_txq_start_ret_params *ret_params) 2399 { 2400 struct qed_hwfn *p_hwfn; 2401 int rc, hwfn_index; 2402 2403 hwfn_index = rss_num % cdev->num_hwfns; 2404 p_hwfn = &cdev->hwfns[hwfn_index]; 2405 p_params->queue_id = p_params->queue_id / cdev->num_hwfns; 2406 p_params->stats_id = p_params->vport_id; 2407 2408 rc = qed_eth_tx_queue_start(p_hwfn, 2409 p_hwfn->hw_info.opaque_fid, 2410 p_params, 0, 2411 pbl_addr, pbl_size, ret_params); 2412 2413 if (rc) { 2414 DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id); 2415 return rc; 2416 } 2417 2418 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), 2419 "Started TX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n", 2420 p_params->queue_id, rss_num, p_params->vport_id, 2421 p_params->p_sb->igu_sb_id); 2422 2423 return 0; 2424 } 2425 2426 #define QED_HW_STOP_RETRY_LIMIT (10) 2427 static int qed_fastpath_stop(struct qed_dev *cdev) 2428 { 2429 int rc; 2430 2431 rc = qed_hw_stop_fastpath(cdev); 2432 if (rc) { 2433 DP_ERR(cdev, "Failed to stop Fastpath\n"); 2434 return rc; 2435 } 2436 2437 return 0; 2438 } 2439 2440 static int qed_stop_txq(struct qed_dev *cdev, u8 rss_id, void *handle) 2441 { 2442 struct qed_hwfn *p_hwfn; 2443 int rc, hwfn_index; 2444 2445 hwfn_index = rss_id % cdev->num_hwfns; 2446 p_hwfn = &cdev->hwfns[hwfn_index]; 2447 2448 rc = qed_eth_tx_queue_stop(p_hwfn, handle); 2449 if (rc) { 2450 DP_ERR(cdev, "Failed to stop TXQ#%02x\n", rss_id); 2451 return rc; 2452 } 2453 2454 return 0; 2455 } 2456 2457 static int qed_tunn_configure(struct qed_dev *cdev, 2458 struct qed_tunn_params *tunn_params) 2459 { 2460 struct qed_tunnel_info tunn_info; 2461 int i, rc; 2462 2463 memset(&tunn_info, 0, sizeof(tunn_info)); 2464 if (tunn_params->update_vxlan_port) { 2465 tunn_info.vxlan_port.b_update_port = true; 2466 tunn_info.vxlan_port.port = tunn_params->vxlan_port; 2467 } 2468 2469 if (tunn_params->update_geneve_port) { 2470 tunn_info.geneve_port.b_update_port = true; 2471 tunn_info.geneve_port.port = tunn_params->geneve_port; 2472 } 2473 2474 for_each_hwfn(cdev, i) { 2475 struct qed_hwfn *hwfn = &cdev->hwfns[i]; 2476 struct qed_ptt *p_ptt; 2477 struct qed_tunnel_info *tun; 2478 2479 tun = &hwfn->cdev->tunnel; 2480 if (IS_PF(cdev)) { 2481 p_ptt = qed_ptt_acquire(hwfn); 2482 if (!p_ptt) 2483 return -EAGAIN; 2484 } else { 2485 p_ptt = NULL; 2486 } 2487 2488 rc = qed_sp_pf_update_tunn_cfg(hwfn, p_ptt, &tunn_info, 2489 QED_SPQ_MODE_EBLOCK, NULL); 2490 if (rc) { 2491 if (IS_PF(cdev)) 2492 qed_ptt_release(hwfn, p_ptt); 2493 return rc; 2494 } 2495 2496 if (IS_PF_SRIOV(hwfn)) { 2497 u16 vxlan_port, geneve_port; 2498 int j; 2499 2500 vxlan_port = tun->vxlan_port.port; 2501 geneve_port = tun->geneve_port.port; 2502 2503 qed_for_each_vf(hwfn, j) { 2504 qed_iov_bulletin_set_udp_ports(hwfn, j, 2505 vxlan_port, 2506 geneve_port); 2507 } 2508 2509 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 2510 } 2511 if (IS_PF(cdev)) 2512 qed_ptt_release(hwfn, p_ptt); 2513 } 2514 2515 return 0; 2516 } 2517 2518 static int qed_configure_filter_rx_mode(struct qed_dev *cdev, 2519 enum qed_filter_rx_mode_type type) 2520 { 2521 struct qed_filter_accept_flags accept_flags; 2522 2523 memset(&accept_flags, 0, sizeof(accept_flags)); 2524 2525 accept_flags.update_rx_mode_config = 1; 2526 accept_flags.update_tx_mode_config = 1; 2527 accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED | 2528 QED_ACCEPT_MCAST_MATCHED | 2529 QED_ACCEPT_BCAST; 2530 accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED | 2531 QED_ACCEPT_MCAST_MATCHED | 2532 QED_ACCEPT_BCAST; 2533 2534 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) { 2535 accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED | 2536 QED_ACCEPT_MCAST_UNMATCHED; 2537 accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; 2538 } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) { 2539 accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; 2540 accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; 2541 } 2542 2543 return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false, 2544 QED_SPQ_MODE_CB, NULL); 2545 } 2546 2547 static int qed_configure_filter_ucast(struct qed_dev *cdev, 2548 struct qed_filter_ucast_params *params) 2549 { 2550 struct qed_filter_ucast ucast; 2551 2552 if (!params->vlan_valid && !params->mac_valid) { 2553 DP_NOTICE(cdev, 2554 "Tried configuring a unicast filter, but both MAC and VLAN are not set\n"); 2555 return -EINVAL; 2556 } 2557 2558 memset(&ucast, 0, sizeof(ucast)); 2559 switch (params->type) { 2560 case QED_FILTER_XCAST_TYPE_ADD: 2561 ucast.opcode = QED_FILTER_ADD; 2562 break; 2563 case QED_FILTER_XCAST_TYPE_DEL: 2564 ucast.opcode = QED_FILTER_REMOVE; 2565 break; 2566 case QED_FILTER_XCAST_TYPE_REPLACE: 2567 ucast.opcode = QED_FILTER_REPLACE; 2568 break; 2569 default: 2570 DP_NOTICE(cdev, "Unknown unicast filter type %d\n", 2571 params->type); 2572 } 2573 2574 if (params->vlan_valid && params->mac_valid) { 2575 ucast.type = QED_FILTER_MAC_VLAN; 2576 ether_addr_copy(ucast.mac, params->mac); 2577 ucast.vlan = params->vlan; 2578 } else if (params->mac_valid) { 2579 ucast.type = QED_FILTER_MAC; 2580 ether_addr_copy(ucast.mac, params->mac); 2581 } else { 2582 ucast.type = QED_FILTER_VLAN; 2583 ucast.vlan = params->vlan; 2584 } 2585 2586 ucast.is_rx_filter = true; 2587 ucast.is_tx_filter = true; 2588 2589 return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL); 2590 } 2591 2592 static int qed_configure_filter_mcast(struct qed_dev *cdev, 2593 struct qed_filter_mcast_params *params) 2594 { 2595 struct qed_filter_mcast mcast; 2596 int i; 2597 2598 memset(&mcast, 0, sizeof(mcast)); 2599 switch (params->type) { 2600 case QED_FILTER_XCAST_TYPE_ADD: 2601 mcast.opcode = QED_FILTER_ADD; 2602 break; 2603 case QED_FILTER_XCAST_TYPE_DEL: 2604 mcast.opcode = QED_FILTER_REMOVE; 2605 break; 2606 default: 2607 DP_NOTICE(cdev, "Unknown multicast filter type %d\n", 2608 params->type); 2609 } 2610 2611 mcast.num_mc_addrs = params->num; 2612 for (i = 0; i < mcast.num_mc_addrs; i++) 2613 ether_addr_copy(mcast.mac[i], params->mac[i]); 2614 2615 return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL); 2616 } 2617 2618 static int qed_configure_filter(struct qed_dev *cdev, 2619 struct qed_filter_params *params) 2620 { 2621 enum qed_filter_rx_mode_type accept_flags; 2622 2623 switch (params->type) { 2624 case QED_FILTER_TYPE_UCAST: 2625 return qed_configure_filter_ucast(cdev, ¶ms->filter.ucast); 2626 case QED_FILTER_TYPE_MCAST: 2627 return qed_configure_filter_mcast(cdev, ¶ms->filter.mcast); 2628 case QED_FILTER_TYPE_RX_MODE: 2629 accept_flags = params->filter.accept_flags; 2630 return qed_configure_filter_rx_mode(cdev, accept_flags); 2631 default: 2632 DP_NOTICE(cdev, "Unknown filter type %d\n", (int)params->type); 2633 return -EINVAL; 2634 } 2635 } 2636 2637 static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher) 2638 { 2639 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 2640 struct qed_arfs_config_params arfs_config_params; 2641 2642 memset(&arfs_config_params, 0, sizeof(arfs_config_params)); 2643 arfs_config_params.tcp = true; 2644 arfs_config_params.udp = true; 2645 arfs_config_params.ipv4 = true; 2646 arfs_config_params.ipv6 = true; 2647 arfs_config_params.arfs_enable = en_searcher; 2648 2649 qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt, 2650 &arfs_config_params); 2651 return 0; 2652 } 2653 2654 static void 2655 qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn, 2656 void *cookie, union event_ring_data *data, 2657 u8 fw_return_code) 2658 { 2659 struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common; 2660 void *dev = p_hwfn->cdev->ops_cookie; 2661 2662 op->arfs_filter_op(dev, cookie, fw_return_code); 2663 } 2664 2665 static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie, 2666 dma_addr_t mapping, u16 length, 2667 u16 vport_id, u16 rx_queue_id, 2668 bool add_filter) 2669 { 2670 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 2671 struct qed_spq_comp_cb cb; 2672 int rc = -EINVAL; 2673 2674 cb.function = qed_arfs_sp_response_handler; 2675 cb.cookie = cookie; 2676 2677 rc = qed_configure_rfs_ntuple_filter(p_hwfn, p_hwfn->p_arfs_ptt, 2678 &cb, mapping, length, rx_queue_id, 2679 vport_id, add_filter); 2680 if (rc) 2681 DP_NOTICE(p_hwfn, 2682 "Failed to issue a-RFS filter configuration\n"); 2683 else 2684 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, 2685 "Successfully issued a-RFS filter configuration\n"); 2686 2687 return rc; 2688 } 2689 2690 static int qed_fp_cqe_completion(struct qed_dev *dev, 2691 u8 rss_id, struct eth_slow_path_rx_cqe *cqe) 2692 { 2693 return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns], 2694 cqe); 2695 } 2696 2697 #ifdef CONFIG_QED_SRIOV 2698 extern const struct qed_iov_hv_ops qed_iov_ops_pass; 2699 #endif 2700 2701 #ifdef CONFIG_DCB 2702 extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass; 2703 #endif 2704 2705 extern const struct qed_eth_ptp_ops qed_ptp_ops_pass; 2706 2707 static const struct qed_eth_ops qed_eth_ops_pass = { 2708 .common = &qed_common_ops_pass, 2709 #ifdef CONFIG_QED_SRIOV 2710 .iov = &qed_iov_ops_pass, 2711 #endif 2712 #ifdef CONFIG_DCB 2713 .dcb = &qed_dcbnl_ops_pass, 2714 #endif 2715 .ptp = &qed_ptp_ops_pass, 2716 .fill_dev_info = &qed_fill_eth_dev_info, 2717 .register_ops = &qed_register_eth_ops, 2718 .check_mac = &qed_check_mac, 2719 .vport_start = &qed_start_vport, 2720 .vport_stop = &qed_stop_vport, 2721 .vport_update = &qed_update_vport, 2722 .q_rx_start = &qed_start_rxq, 2723 .q_rx_stop = &qed_stop_rxq, 2724 .q_tx_start = &qed_start_txq, 2725 .q_tx_stop = &qed_stop_txq, 2726 .filter_config = &qed_configure_filter, 2727 .fastpath_stop = &qed_fastpath_stop, 2728 .eth_cqe_completion = &qed_fp_cqe_completion, 2729 .get_vport_stats = &qed_get_vport_stats, 2730 .tunn_config = &qed_tunn_configure, 2731 .ntuple_filter_config = &qed_ntuple_arfs_filter_config, 2732 .configure_arfs_searcher = &qed_configure_arfs_searcher, 2733 }; 2734 2735 const struct qed_eth_ops *qed_get_eth_ops(void) 2736 { 2737 return &qed_eth_ops_pass; 2738 } 2739 EXPORT_SYMBOL(qed_get_eth_ops); 2740 2741 void qed_put_eth_ops(void) 2742 { 2743 /* TODO - reference count for module? */ 2744 } 2745 EXPORT_SYMBOL(qed_put_eth_ops); 2746