1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015 QLogic Corporation 3 * 4 * This software is available under the terms of the GNU General Public License 5 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * this source tree. 7 */ 8 9 #include <linux/types.h> 10 #include <asm/byteorder.h> 11 #include <asm/param.h> 12 #include <linux/delay.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/etherdevice.h> 15 #include <linux/interrupt.h> 16 #include <linux/kernel.h> 17 #include <linux/module.h> 18 #include <linux/pci.h> 19 #include <linux/slab.h> 20 #include <linux/stddef.h> 21 #include <linux/string.h> 22 #include <linux/version.h> 23 #include <linux/workqueue.h> 24 #include <linux/bitops.h> 25 #include <linux/bug.h> 26 #include "qed.h" 27 #include <linux/qed/qed_chain.h> 28 #include "qed_cxt.h" 29 #include "qed_dev_api.h" 30 #include <linux/qed/qed_eth_if.h> 31 #include "qed_hsi.h" 32 #include "qed_hw.h" 33 #include "qed_int.h" 34 #include "qed_reg_addr.h" 35 #include "qed_sp.h" 36 37 enum qed_rss_caps { 38 QED_RSS_IPV4 = 0x1, 39 QED_RSS_IPV6 = 0x2, 40 QED_RSS_IPV4_TCP = 0x4, 41 QED_RSS_IPV6_TCP = 0x8, 42 QED_RSS_IPV4_UDP = 0x10, 43 QED_RSS_IPV6_UDP = 0x20, 44 }; 45 46 /* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */ 47 #define QED_RSS_IND_TABLE_SIZE 128 48 #define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */ 49 50 struct qed_rss_params { 51 u8 update_rss_config; 52 u8 rss_enable; 53 u8 rss_eng_id; 54 u8 update_rss_capabilities; 55 u8 update_rss_ind_table; 56 u8 update_rss_key; 57 u8 rss_caps; 58 u8 rss_table_size_log; 59 u16 rss_ind_table[QED_RSS_IND_TABLE_SIZE]; 60 u32 rss_key[QED_RSS_KEY_SIZE]; 61 }; 62 63 enum qed_filter_opcode { 64 QED_FILTER_ADD, 65 QED_FILTER_REMOVE, 66 QED_FILTER_MOVE, 67 QED_FILTER_REPLACE, /* Delete all MACs and add new one instead */ 68 QED_FILTER_FLUSH, /* Removes all filters */ 69 }; 70 71 enum qed_filter_ucast_type { 72 QED_FILTER_MAC, 73 QED_FILTER_VLAN, 74 QED_FILTER_MAC_VLAN, 75 QED_FILTER_INNER_MAC, 76 QED_FILTER_INNER_VLAN, 77 QED_FILTER_INNER_PAIR, 78 QED_FILTER_INNER_MAC_VNI_PAIR, 79 QED_FILTER_MAC_VNI_PAIR, 80 QED_FILTER_VNI, 81 }; 82 83 struct qed_filter_ucast { 84 enum qed_filter_opcode opcode; 85 enum qed_filter_ucast_type type; 86 u8 is_rx_filter; 87 u8 is_tx_filter; 88 u8 vport_to_add_to; 89 u8 vport_to_remove_from; 90 unsigned char mac[ETH_ALEN]; 91 u8 assert_on_error; 92 u16 vlan; 93 u32 vni; 94 }; 95 96 struct qed_filter_mcast { 97 /* MOVE is not supported for multicast */ 98 enum qed_filter_opcode opcode; 99 u8 vport_to_add_to; 100 u8 vport_to_remove_from; 101 u8 num_mc_addrs; 102 #define QED_MAX_MC_ADDRS 64 103 unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN]; 104 }; 105 106 struct qed_filter_accept_flags { 107 u8 update_rx_mode_config; 108 u8 update_tx_mode_config; 109 u8 rx_accept_filter; 110 u8 tx_accept_filter; 111 #define QED_ACCEPT_NONE 0x01 112 #define QED_ACCEPT_UCAST_MATCHED 0x02 113 #define QED_ACCEPT_UCAST_UNMATCHED 0x04 114 #define QED_ACCEPT_MCAST_MATCHED 0x08 115 #define QED_ACCEPT_MCAST_UNMATCHED 0x10 116 #define QED_ACCEPT_BCAST 0x20 117 }; 118 119 struct qed_sp_vport_update_params { 120 u16 opaque_fid; 121 u8 vport_id; 122 u8 update_vport_active_rx_flg; 123 u8 vport_active_rx_flg; 124 u8 update_vport_active_tx_flg; 125 u8 vport_active_tx_flg; 126 u8 update_approx_mcast_flg; 127 unsigned long bins[8]; 128 struct qed_rss_params *rss_params; 129 struct qed_filter_accept_flags accept_flags; 130 }; 131 132 #define QED_MAX_SGES_NUM 16 133 #define CRC32_POLY 0x1edc6f41 134 135 static int qed_sp_vport_start(struct qed_hwfn *p_hwfn, 136 u32 concrete_fid, 137 u16 opaque_fid, 138 u8 vport_id, 139 u16 mtu, 140 u8 drop_ttl0_flg, 141 u8 inner_vlan_removal_en_flg) 142 { 143 struct qed_sp_init_request_params params; 144 struct vport_start_ramrod_data *p_ramrod = NULL; 145 struct qed_spq_entry *p_ent = NULL; 146 int rc = -EINVAL; 147 u16 rx_mode = 0; 148 u8 abs_vport_id = 0; 149 150 rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id); 151 if (rc != 0) 152 return rc; 153 154 memset(¶ms, 0, sizeof(params)); 155 params.ramrod_data_size = sizeof(*p_ramrod); 156 params.comp_mode = QED_SPQ_MODE_EBLOCK; 157 158 rc = qed_sp_init_request(p_hwfn, &p_ent, 159 qed_spq_get_cid(p_hwfn), 160 opaque_fid, 161 ETH_RAMROD_VPORT_START, 162 PROTOCOLID_ETH, 163 ¶ms); 164 if (rc) 165 return rc; 166 167 p_ramrod = &p_ent->ramrod.vport_start; 168 p_ramrod->vport_id = abs_vport_id; 169 170 p_ramrod->mtu = cpu_to_le16(mtu); 171 p_ramrod->inner_vlan_removal_en = inner_vlan_removal_en_flg; 172 p_ramrod->drop_ttl0_en = drop_ttl0_flg; 173 174 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1); 175 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1); 176 177 p_ramrod->rx_mode.state = cpu_to_le16(rx_mode); 178 179 /* TPA related fields */ 180 memset(&p_ramrod->tpa_param, 0, 181 sizeof(struct eth_vport_tpa_param)); 182 183 /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */ 184 p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev, 185 concrete_fid); 186 187 return qed_spq_post(p_hwfn, p_ent, NULL); 188 } 189 190 static int 191 qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn, 192 struct vport_update_ramrod_data *p_ramrod, 193 struct qed_rss_params *p_params) 194 { 195 struct eth_vport_rss_config *rss = &p_ramrod->rss_config; 196 u16 abs_l2_queue = 0, capabilities = 0; 197 int rc = 0, i; 198 199 if (!p_params) { 200 p_ramrod->common.update_rss_flg = 0; 201 return rc; 202 } 203 204 BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE != 205 ETH_RSS_IND_TABLE_ENTRIES_NUM); 206 207 rc = qed_fw_rss_eng(p_hwfn, p_params->rss_eng_id, &rss->rss_id); 208 if (rc) 209 return rc; 210 211 p_ramrod->common.update_rss_flg = p_params->update_rss_config; 212 rss->update_rss_capabilities = p_params->update_rss_capabilities; 213 rss->update_rss_ind_table = p_params->update_rss_ind_table; 214 rss->update_rss_key = p_params->update_rss_key; 215 216 rss->rss_mode = p_params->rss_enable ? 217 ETH_VPORT_RSS_MODE_REGULAR : 218 ETH_VPORT_RSS_MODE_DISABLED; 219 220 SET_FIELD(capabilities, 221 ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY, 222 !!(p_params->rss_caps & QED_RSS_IPV4)); 223 SET_FIELD(capabilities, 224 ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY, 225 !!(p_params->rss_caps & QED_RSS_IPV6)); 226 SET_FIELD(capabilities, 227 ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY, 228 !!(p_params->rss_caps & QED_RSS_IPV4_TCP)); 229 SET_FIELD(capabilities, 230 ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY, 231 !!(p_params->rss_caps & QED_RSS_IPV6_TCP)); 232 SET_FIELD(capabilities, 233 ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY, 234 !!(p_params->rss_caps & QED_RSS_IPV4_UDP)); 235 SET_FIELD(capabilities, 236 ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY, 237 !!(p_params->rss_caps & QED_RSS_IPV6_UDP)); 238 rss->tbl_size = p_params->rss_table_size_log; 239 240 rss->capabilities = cpu_to_le16(capabilities); 241 242 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, 243 "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n", 244 p_ramrod->common.update_rss_flg, 245 rss->rss_mode, rss->update_rss_capabilities, 246 capabilities, rss->update_rss_ind_table, 247 rss->update_rss_key); 248 249 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { 250 rc = qed_fw_l2_queue(p_hwfn, 251 (u8)p_params->rss_ind_table[i], 252 &abs_l2_queue); 253 if (rc) 254 return rc; 255 256 rss->indirection_table[i] = cpu_to_le16(abs_l2_queue); 257 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "i= %d, queue = %d\n", 258 i, rss->indirection_table[i]); 259 } 260 261 for (i = 0; i < 10; i++) 262 rss->rss_key[i] = cpu_to_le32(p_params->rss_key[i]); 263 264 return rc; 265 } 266 267 static void 268 qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn, 269 struct vport_update_ramrod_data *p_ramrod, 270 struct qed_filter_accept_flags accept_flags) 271 { 272 p_ramrod->common.update_rx_mode_flg = 273 accept_flags.update_rx_mode_config; 274 275 p_ramrod->common.update_tx_mode_flg = 276 accept_flags.update_tx_mode_config; 277 278 /* Set Rx mode accept flags */ 279 if (p_ramrod->common.update_rx_mode_flg) { 280 u8 accept_filter = accept_flags.rx_accept_filter; 281 u16 state = 0; 282 283 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 284 !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) || 285 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED))); 286 287 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED, 288 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)); 289 290 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 291 !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) || 292 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); 293 294 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL, 295 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) && 296 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); 297 298 SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL, 299 !!(accept_filter & QED_ACCEPT_BCAST)); 300 301 p_ramrod->rx_mode.state = cpu_to_le16(state); 302 DP_VERBOSE(p_hwfn, QED_MSG_SP, 303 "p_ramrod->rx_mode.state = 0x%x\n", state); 304 } 305 306 /* Set Tx mode accept flags */ 307 if (p_ramrod->common.update_tx_mode_flg) { 308 u8 accept_filter = accept_flags.tx_accept_filter; 309 u16 state = 0; 310 311 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL, 312 !!(accept_filter & QED_ACCEPT_NONE)); 313 314 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL, 315 (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) && 316 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED))); 317 318 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL, 319 !!(accept_filter & QED_ACCEPT_NONE)); 320 321 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL, 322 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) && 323 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); 324 325 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL, 326 !!(accept_filter & QED_ACCEPT_BCAST)); 327 328 p_ramrod->tx_mode.state = cpu_to_le16(state); 329 DP_VERBOSE(p_hwfn, QED_MSG_SP, 330 "p_ramrod->tx_mode.state = 0x%x\n", state); 331 } 332 } 333 334 static void 335 qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn, 336 struct vport_update_ramrod_data *p_ramrod, 337 struct qed_sp_vport_update_params *p_params) 338 { 339 int i; 340 341 memset(&p_ramrod->approx_mcast.bins, 0, 342 sizeof(p_ramrod->approx_mcast.bins)); 343 344 if (p_params->update_approx_mcast_flg) { 345 p_ramrod->common.update_approx_mcast_flg = 1; 346 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { 347 u32 *p_bins = (u32 *)p_params->bins; 348 __le32 val = cpu_to_le32(p_bins[i]); 349 350 p_ramrod->approx_mcast.bins[i] = val; 351 } 352 } 353 } 354 355 static int 356 qed_sp_vport_update(struct qed_hwfn *p_hwfn, 357 struct qed_sp_vport_update_params *p_params, 358 enum spq_mode comp_mode, 359 struct qed_spq_comp_cb *p_comp_data) 360 { 361 struct qed_rss_params *p_rss_params = p_params->rss_params; 362 struct vport_update_ramrod_data_cmn *p_cmn; 363 struct qed_sp_init_request_params sp_params; 364 struct vport_update_ramrod_data *p_ramrod = NULL; 365 struct qed_spq_entry *p_ent = NULL; 366 u8 abs_vport_id = 0; 367 int rc = -EINVAL; 368 369 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); 370 if (rc != 0) 371 return rc; 372 373 memset(&sp_params, 0, sizeof(sp_params)); 374 sp_params.ramrod_data_size = sizeof(*p_ramrod); 375 sp_params.comp_mode = comp_mode; 376 sp_params.p_comp_data = p_comp_data; 377 378 rc = qed_sp_init_request(p_hwfn, &p_ent, 379 qed_spq_get_cid(p_hwfn), 380 p_params->opaque_fid, 381 ETH_RAMROD_VPORT_UPDATE, 382 PROTOCOLID_ETH, 383 &sp_params); 384 if (rc) 385 return rc; 386 387 /* Copy input params to ramrod according to FW struct */ 388 p_ramrod = &p_ent->ramrod.vport_update; 389 p_cmn = &p_ramrod->common; 390 391 p_cmn->vport_id = abs_vport_id; 392 p_cmn->rx_active_flg = p_params->vport_active_rx_flg; 393 p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg; 394 p_cmn->tx_active_flg = p_params->vport_active_tx_flg; 395 p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg; 396 397 rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params); 398 if (rc) { 399 /* Return spq entry which is taken in qed_sp_init_request()*/ 400 qed_spq_return_entry(p_hwfn, p_ent); 401 return rc; 402 } 403 404 /* Update mcast bins for VFs, PF doesn't use this functionality */ 405 qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params); 406 407 qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags); 408 return qed_spq_post(p_hwfn, p_ent, NULL); 409 } 410 411 static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, 412 u16 opaque_fid, 413 u8 vport_id) 414 { 415 struct qed_sp_init_request_params sp_params; 416 struct vport_stop_ramrod_data *p_ramrod; 417 struct qed_spq_entry *p_ent; 418 u8 abs_vport_id = 0; 419 int rc; 420 421 rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id); 422 if (rc != 0) 423 return rc; 424 425 memset(&sp_params, 0, sizeof(sp_params)); 426 sp_params.ramrod_data_size = sizeof(*p_ramrod); 427 sp_params.comp_mode = QED_SPQ_MODE_EBLOCK; 428 429 rc = qed_sp_init_request(p_hwfn, &p_ent, 430 qed_spq_get_cid(p_hwfn), 431 opaque_fid, 432 ETH_RAMROD_VPORT_STOP, 433 PROTOCOLID_ETH, 434 &sp_params); 435 if (rc) 436 return rc; 437 438 p_ramrod = &p_ent->ramrod.vport_stop; 439 p_ramrod->vport_id = abs_vport_id; 440 441 return qed_spq_post(p_hwfn, p_ent, NULL); 442 } 443 444 static int qed_filter_accept_cmd(struct qed_dev *cdev, 445 u8 vport, 446 struct qed_filter_accept_flags accept_flags, 447 enum spq_mode comp_mode, 448 struct qed_spq_comp_cb *p_comp_data) 449 { 450 struct qed_sp_vport_update_params vport_update_params; 451 int i, rc; 452 453 /* Prepare and send the vport rx_mode change */ 454 memset(&vport_update_params, 0, sizeof(vport_update_params)); 455 vport_update_params.vport_id = vport; 456 vport_update_params.accept_flags = accept_flags; 457 458 for_each_hwfn(cdev, i) { 459 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 460 461 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 462 463 rc = qed_sp_vport_update(p_hwfn, &vport_update_params, 464 comp_mode, p_comp_data); 465 if (rc != 0) { 466 DP_ERR(cdev, "Update rx_mode failed %d\n", rc); 467 return rc; 468 } 469 470 DP_VERBOSE(p_hwfn, QED_MSG_SP, 471 "Accept filter configured, flags = [Rx]%x [Tx]%x\n", 472 accept_flags.rx_accept_filter, 473 accept_flags.tx_accept_filter); 474 } 475 476 return 0; 477 } 478 479 static int qed_sp_release_queue_cid( 480 struct qed_hwfn *p_hwfn, 481 struct qed_hw_cid_data *p_cid_data) 482 { 483 if (!p_cid_data->b_cid_allocated) 484 return 0; 485 486 qed_cxt_release_cid(p_hwfn, p_cid_data->cid); 487 488 p_cid_data->b_cid_allocated = false; 489 490 return 0; 491 } 492 493 static int 494 qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, 495 u16 opaque_fid, 496 u32 cid, 497 struct qed_queue_start_common_params *params, 498 u8 stats_id, 499 u16 bd_max_bytes, 500 dma_addr_t bd_chain_phys_addr, 501 dma_addr_t cqe_pbl_addr, 502 u16 cqe_pbl_size) 503 { 504 struct rx_queue_start_ramrod_data *p_ramrod = NULL; 505 struct qed_sp_init_request_params sp_params; 506 struct qed_spq_entry *p_ent = NULL; 507 struct qed_hw_cid_data *p_rx_cid; 508 u16 abs_rx_q_id = 0; 509 u8 abs_vport_id = 0; 510 int rc = -EINVAL; 511 512 /* Store information for the stop */ 513 p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id]; 514 p_rx_cid->cid = cid; 515 p_rx_cid->opaque_fid = opaque_fid; 516 p_rx_cid->vport_id = params->vport_id; 517 518 rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_vport_id); 519 if (rc != 0) 520 return rc; 521 522 rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_rx_q_id); 523 if (rc != 0) 524 return rc; 525 526 DP_VERBOSE(p_hwfn, QED_MSG_SP, 527 "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n", 528 opaque_fid, cid, params->queue_id, params->vport_id, 529 params->sb); 530 531 memset(&sp_params, 0, sizeof(params)); 532 sp_params.comp_mode = QED_SPQ_MODE_EBLOCK; 533 sp_params.ramrod_data_size = sizeof(*p_ramrod); 534 535 rc = qed_sp_init_request(p_hwfn, &p_ent, 536 cid, opaque_fid, 537 ETH_RAMROD_RX_QUEUE_START, 538 PROTOCOLID_ETH, 539 &sp_params); 540 if (rc) 541 return rc; 542 543 p_ramrod = &p_ent->ramrod.rx_queue_start; 544 545 p_ramrod->sb_id = cpu_to_le16(params->sb); 546 p_ramrod->sb_index = params->sb_idx; 547 p_ramrod->vport_id = abs_vport_id; 548 p_ramrod->stats_counter_id = stats_id; 549 p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id); 550 p_ramrod->complete_cqe_flg = 0; 551 p_ramrod->complete_event_flg = 1; 552 553 p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes); 554 p_ramrod->bd_base.hi = DMA_HI_LE(bd_chain_phys_addr); 555 p_ramrod->bd_base.lo = DMA_LO_LE(bd_chain_phys_addr); 556 557 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); 558 p_ramrod->cqe_pbl_addr.hi = DMA_HI_LE(cqe_pbl_addr); 559 p_ramrod->cqe_pbl_addr.lo = DMA_LO_LE(cqe_pbl_addr); 560 561 rc = qed_spq_post(p_hwfn, p_ent, NULL); 562 563 return rc; 564 } 565 566 static int 567 qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn, 568 u16 opaque_fid, 569 struct qed_queue_start_common_params *params, 570 u16 bd_max_bytes, 571 dma_addr_t bd_chain_phys_addr, 572 dma_addr_t cqe_pbl_addr, 573 u16 cqe_pbl_size, 574 void __iomem **pp_prod) 575 { 576 struct qed_hw_cid_data *p_rx_cid; 577 u64 init_prod_val = 0; 578 u16 abs_l2_queue = 0; 579 u8 abs_stats_id = 0; 580 int rc; 581 582 rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue); 583 if (rc != 0) 584 return rc; 585 586 rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_stats_id); 587 if (rc != 0) 588 return rc; 589 590 *pp_prod = (u8 __iomem *)p_hwfn->regview + 591 GTT_BAR0_MAP_REG_MSDM_RAM + 592 MSTORM_PRODS_OFFSET(abs_l2_queue); 593 594 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ 595 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64), 596 (u32 *)(&init_prod_val)); 597 598 /* Allocate a CID for the queue */ 599 p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id]; 600 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, 601 &p_rx_cid->cid); 602 if (rc) { 603 DP_NOTICE(p_hwfn, "Failed to acquire cid\n"); 604 return rc; 605 } 606 p_rx_cid->b_cid_allocated = true; 607 608 rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, 609 opaque_fid, 610 p_rx_cid->cid, 611 params, 612 abs_stats_id, 613 bd_max_bytes, 614 bd_chain_phys_addr, 615 cqe_pbl_addr, 616 cqe_pbl_size); 617 618 if (rc != 0) 619 qed_sp_release_queue_cid(p_hwfn, p_rx_cid); 620 621 return rc; 622 } 623 624 static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, 625 u16 rx_queue_id, 626 bool eq_completion_only, 627 bool cqe_completion) 628 { 629 struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id]; 630 struct rx_queue_stop_ramrod_data *p_ramrod = NULL; 631 struct qed_sp_init_request_params sp_params; 632 struct qed_spq_entry *p_ent = NULL; 633 u16 abs_rx_q_id = 0; 634 int rc = -EINVAL; 635 636 memset(&sp_params, 0, sizeof(sp_params)); 637 sp_params.ramrod_data_size = sizeof(*p_ramrod); 638 sp_params.comp_mode = QED_SPQ_MODE_EBLOCK; 639 640 rc = qed_sp_init_request(p_hwfn, &p_ent, 641 p_rx_cid->cid, 642 p_rx_cid->opaque_fid, 643 ETH_RAMROD_RX_QUEUE_STOP, 644 PROTOCOLID_ETH, 645 &sp_params); 646 if (rc) 647 return rc; 648 649 p_ramrod = &p_ent->ramrod.rx_queue_stop; 650 651 qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id); 652 qed_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id); 653 p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id); 654 655 /* Cleaning the queue requires the completion to arrive there. 656 * In addition, VFs require the answer to come as eqe to PF. 657 */ 658 p_ramrod->complete_cqe_flg = 659 (!!(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) && 660 !eq_completion_only) || cqe_completion; 661 p_ramrod->complete_event_flg = 662 !(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) || 663 eq_completion_only; 664 665 rc = qed_spq_post(p_hwfn, p_ent, NULL); 666 if (rc) 667 return rc; 668 669 return qed_sp_release_queue_cid(p_hwfn, p_rx_cid); 670 } 671 672 static int 673 qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, 674 u16 opaque_fid, 675 u32 cid, 676 struct qed_queue_start_common_params *p_params, 677 u8 stats_id, 678 dma_addr_t pbl_addr, 679 u16 pbl_size, 680 union qed_qm_pq_params *p_pq_params) 681 { 682 struct tx_queue_start_ramrod_data *p_ramrod = NULL; 683 struct qed_sp_init_request_params sp_params; 684 struct qed_spq_entry *p_ent = NULL; 685 struct qed_hw_cid_data *p_tx_cid; 686 u8 abs_vport_id; 687 int rc = -EINVAL; 688 u16 pq_id; 689 690 /* Store information for the stop */ 691 p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id]; 692 p_tx_cid->cid = cid; 693 p_tx_cid->opaque_fid = opaque_fid; 694 695 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); 696 if (rc) 697 return rc; 698 699 memset(&sp_params, 0, sizeof(sp_params)); 700 sp_params.ramrod_data_size = sizeof(*p_ramrod); 701 sp_params.comp_mode = QED_SPQ_MODE_EBLOCK; 702 703 rc = qed_sp_init_request(p_hwfn, &p_ent, cid, 704 opaque_fid, 705 ETH_RAMROD_TX_QUEUE_START, 706 PROTOCOLID_ETH, 707 &sp_params); 708 if (rc) 709 return rc; 710 711 p_ramrod = &p_ent->ramrod.tx_queue_start; 712 p_ramrod->vport_id = abs_vport_id; 713 714 p_ramrod->sb_id = cpu_to_le16(p_params->sb); 715 p_ramrod->sb_index = p_params->sb_idx; 716 p_ramrod->stats_counter_id = stats_id; 717 718 p_ramrod->pbl_size = cpu_to_le16(pbl_size); 719 p_ramrod->pbl_base_addr.hi = DMA_HI_LE(pbl_addr); 720 p_ramrod->pbl_base_addr.lo = DMA_LO_LE(pbl_addr); 721 722 pq_id = qed_get_qm_pq(p_hwfn, 723 PROTOCOLID_ETH, 724 p_pq_params); 725 p_ramrod->qm_pq_id = cpu_to_le16(pq_id); 726 727 return qed_spq_post(p_hwfn, p_ent, NULL); 728 } 729 730 static int 731 qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn, 732 u16 opaque_fid, 733 struct qed_queue_start_common_params *p_params, 734 dma_addr_t pbl_addr, 735 u16 pbl_size, 736 void __iomem **pp_doorbell) 737 { 738 struct qed_hw_cid_data *p_tx_cid; 739 union qed_qm_pq_params pq_params; 740 u8 abs_stats_id = 0; 741 int rc; 742 743 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id); 744 if (rc) 745 return rc; 746 747 p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id]; 748 memset(p_tx_cid, 0, sizeof(*p_tx_cid)); 749 memset(&pq_params, 0, sizeof(pq_params)); 750 751 /* Allocate a CID for the queue */ 752 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, 753 &p_tx_cid->cid); 754 if (rc) { 755 DP_NOTICE(p_hwfn, "Failed to acquire cid\n"); 756 return rc; 757 } 758 p_tx_cid->b_cid_allocated = true; 759 760 DP_VERBOSE(p_hwfn, QED_MSG_SP, 761 "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n", 762 opaque_fid, p_tx_cid->cid, 763 p_params->queue_id, p_params->vport_id, p_params->sb); 764 765 rc = qed_sp_eth_txq_start_ramrod(p_hwfn, 766 opaque_fid, 767 p_tx_cid->cid, 768 p_params, 769 abs_stats_id, 770 pbl_addr, 771 pbl_size, 772 &pq_params); 773 774 *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + 775 qed_db_addr(p_tx_cid->cid, DQ_DEMS_LEGACY); 776 777 if (rc) 778 qed_sp_release_queue_cid(p_hwfn, p_tx_cid); 779 780 return rc; 781 } 782 783 static int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, 784 u16 tx_queue_id) 785 { 786 struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id]; 787 struct qed_sp_init_request_params sp_params; 788 struct qed_spq_entry *p_ent = NULL; 789 int rc = -EINVAL; 790 791 memset(&sp_params, 0, sizeof(sp_params)); 792 sp_params.ramrod_data_size = sizeof(struct tx_queue_stop_ramrod_data); 793 sp_params.comp_mode = QED_SPQ_MODE_EBLOCK; 794 795 rc = qed_sp_init_request(p_hwfn, &p_ent, 796 p_tx_cid->cid, 797 p_tx_cid->opaque_fid, 798 ETH_RAMROD_TX_QUEUE_STOP, 799 PROTOCOLID_ETH, 800 &sp_params); 801 if (rc) 802 return rc; 803 804 rc = qed_spq_post(p_hwfn, p_ent, NULL); 805 if (rc) 806 return rc; 807 808 return qed_sp_release_queue_cid(p_hwfn, p_tx_cid); 809 } 810 811 static enum eth_filter_action 812 qed_filter_action(enum qed_filter_opcode opcode) 813 { 814 enum eth_filter_action action = MAX_ETH_FILTER_ACTION; 815 816 switch (opcode) { 817 case QED_FILTER_ADD: 818 action = ETH_FILTER_ACTION_ADD; 819 break; 820 case QED_FILTER_REMOVE: 821 action = ETH_FILTER_ACTION_REMOVE; 822 break; 823 case QED_FILTER_FLUSH: 824 action = ETH_FILTER_ACTION_REMOVE_ALL; 825 break; 826 default: 827 action = MAX_ETH_FILTER_ACTION; 828 } 829 830 return action; 831 } 832 833 static void qed_set_fw_mac_addr(__le16 *fw_msb, 834 __le16 *fw_mid, 835 __le16 *fw_lsb, 836 u8 *mac) 837 { 838 ((u8 *)fw_msb)[0] = mac[1]; 839 ((u8 *)fw_msb)[1] = mac[0]; 840 ((u8 *)fw_mid)[0] = mac[3]; 841 ((u8 *)fw_mid)[1] = mac[2]; 842 ((u8 *)fw_lsb)[0] = mac[5]; 843 ((u8 *)fw_lsb)[1] = mac[4]; 844 } 845 846 static int 847 qed_filter_ucast_common(struct qed_hwfn *p_hwfn, 848 u16 opaque_fid, 849 struct qed_filter_ucast *p_filter_cmd, 850 struct vport_filter_update_ramrod_data **pp_ramrod, 851 struct qed_spq_entry **pp_ent, 852 enum spq_mode comp_mode, 853 struct qed_spq_comp_cb *p_comp_data) 854 { 855 u8 vport_to_add_to = 0, vport_to_remove_from = 0; 856 struct vport_filter_update_ramrod_data *p_ramrod; 857 struct qed_sp_init_request_params sp_params; 858 struct eth_filter_cmd *p_first_filter; 859 struct eth_filter_cmd *p_second_filter; 860 enum eth_filter_action action; 861 int rc; 862 863 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, 864 &vport_to_remove_from); 865 if (rc) 866 return rc; 867 868 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, 869 &vport_to_add_to); 870 if (rc) 871 return rc; 872 873 memset(&sp_params, 0, sizeof(sp_params)); 874 sp_params.ramrod_data_size = sizeof(**pp_ramrod); 875 sp_params.comp_mode = comp_mode; 876 sp_params.p_comp_data = p_comp_data; 877 878 rc = qed_sp_init_request(p_hwfn, pp_ent, 879 qed_spq_get_cid(p_hwfn), 880 opaque_fid, 881 ETH_RAMROD_FILTERS_UPDATE, 882 PROTOCOLID_ETH, 883 &sp_params); 884 if (rc) 885 return rc; 886 887 *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update; 888 p_ramrod = *pp_ramrod; 889 p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0; 890 p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0; 891 892 switch (p_filter_cmd->opcode) { 893 case QED_FILTER_REPLACE: 894 case QED_FILTER_MOVE: 895 p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break; 896 default: 897 p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break; 898 } 899 900 p_first_filter = &p_ramrod->filter_cmds[0]; 901 p_second_filter = &p_ramrod->filter_cmds[1]; 902 903 switch (p_filter_cmd->type) { 904 case QED_FILTER_MAC: 905 p_first_filter->type = ETH_FILTER_TYPE_MAC; break; 906 case QED_FILTER_VLAN: 907 p_first_filter->type = ETH_FILTER_TYPE_VLAN; break; 908 case QED_FILTER_MAC_VLAN: 909 p_first_filter->type = ETH_FILTER_TYPE_PAIR; break; 910 case QED_FILTER_INNER_MAC: 911 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break; 912 case QED_FILTER_INNER_VLAN: 913 p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break; 914 case QED_FILTER_INNER_PAIR: 915 p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break; 916 case QED_FILTER_INNER_MAC_VNI_PAIR: 917 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR; 918 break; 919 case QED_FILTER_MAC_VNI_PAIR: 920 p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break; 921 case QED_FILTER_VNI: 922 p_first_filter->type = ETH_FILTER_TYPE_VNI; break; 923 } 924 925 if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) || 926 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || 927 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) || 928 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) || 929 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || 930 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) { 931 qed_set_fw_mac_addr(&p_first_filter->mac_msb, 932 &p_first_filter->mac_mid, 933 &p_first_filter->mac_lsb, 934 (u8 *)p_filter_cmd->mac); 935 } 936 937 if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) || 938 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || 939 (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) || 940 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR)) 941 p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan); 942 943 if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || 944 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) || 945 (p_first_filter->type == ETH_FILTER_TYPE_VNI)) 946 p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni); 947 948 if (p_filter_cmd->opcode == QED_FILTER_MOVE) { 949 p_second_filter->type = p_first_filter->type; 950 p_second_filter->mac_msb = p_first_filter->mac_msb; 951 p_second_filter->mac_mid = p_first_filter->mac_mid; 952 p_second_filter->mac_lsb = p_first_filter->mac_lsb; 953 p_second_filter->vlan_id = p_first_filter->vlan_id; 954 p_second_filter->vni = p_first_filter->vni; 955 956 p_first_filter->action = ETH_FILTER_ACTION_REMOVE; 957 958 p_first_filter->vport_id = vport_to_remove_from; 959 960 p_second_filter->action = ETH_FILTER_ACTION_ADD; 961 p_second_filter->vport_id = vport_to_add_to; 962 } else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) { 963 p_first_filter->vport_id = vport_to_add_to; 964 memcpy(p_second_filter, p_first_filter, 965 sizeof(*p_second_filter)); 966 p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL; 967 p_second_filter->action = ETH_FILTER_ACTION_ADD; 968 } else { 969 action = qed_filter_action(p_filter_cmd->opcode); 970 971 if (action == MAX_ETH_FILTER_ACTION) { 972 DP_NOTICE(p_hwfn, 973 "%d is not supported yet\n", 974 p_filter_cmd->opcode); 975 return -EINVAL; 976 } 977 978 p_first_filter->action = action; 979 p_first_filter->vport_id = (p_filter_cmd->opcode == 980 QED_FILTER_REMOVE) ? 981 vport_to_remove_from : 982 vport_to_add_to; 983 } 984 985 return 0; 986 } 987 988 static int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, 989 u16 opaque_fid, 990 struct qed_filter_ucast *p_filter_cmd, 991 enum spq_mode comp_mode, 992 struct qed_spq_comp_cb *p_comp_data) 993 { 994 struct vport_filter_update_ramrod_data *p_ramrod = NULL; 995 struct qed_spq_entry *p_ent = NULL; 996 struct eth_filter_cmd_header *p_header; 997 int rc; 998 999 rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd, 1000 &p_ramrod, &p_ent, 1001 comp_mode, p_comp_data); 1002 if (rc != 0) { 1003 DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc); 1004 return rc; 1005 } 1006 p_header = &p_ramrod->filter_cmd_hdr; 1007 p_header->assert_on_error = p_filter_cmd->assert_on_error; 1008 1009 rc = qed_spq_post(p_hwfn, p_ent, NULL); 1010 if (rc != 0) { 1011 DP_ERR(p_hwfn, 1012 "Unicast filter ADD command failed %d\n", 1013 rc); 1014 return rc; 1015 } 1016 1017 DP_VERBOSE(p_hwfn, QED_MSG_SP, 1018 "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n", 1019 (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" : 1020 ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ? 1021 "REMOVE" : 1022 ((p_filter_cmd->opcode == QED_FILTER_MOVE) ? 1023 "MOVE" : "REPLACE")), 1024 (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" : 1025 ((p_filter_cmd->type == QED_FILTER_VLAN) ? 1026 "VLAN" : "MAC & VLAN"), 1027 p_ramrod->filter_cmd_hdr.cmd_cnt, 1028 p_filter_cmd->is_rx_filter, 1029 p_filter_cmd->is_tx_filter); 1030 DP_VERBOSE(p_hwfn, QED_MSG_SP, 1031 "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n", 1032 p_filter_cmd->vport_to_add_to, 1033 p_filter_cmd->vport_to_remove_from, 1034 p_filter_cmd->mac[0], 1035 p_filter_cmd->mac[1], 1036 p_filter_cmd->mac[2], 1037 p_filter_cmd->mac[3], 1038 p_filter_cmd->mac[4], 1039 p_filter_cmd->mac[5], 1040 p_filter_cmd->vlan); 1041 1042 return 0; 1043 } 1044 1045 /******************************************************************************* 1046 * Description: 1047 * Calculates crc 32 on a buffer 1048 * Note: crc32_length MUST be aligned to 8 1049 * Return: 1050 ******************************************************************************/ 1051 static u32 qed_calc_crc32c(u8 *crc32_packet, 1052 u32 crc32_length, 1053 u32 crc32_seed, 1054 u8 complement) 1055 { 1056 u32 byte = 0; 1057 u32 bit = 0; 1058 u8 msb = 0; 1059 u8 current_byte = 0; 1060 u32 crc32_result = crc32_seed; 1061 1062 if ((!crc32_packet) || 1063 (crc32_length == 0) || 1064 ((crc32_length % 8) != 0)) 1065 return crc32_result; 1066 for (byte = 0; byte < crc32_length; byte++) { 1067 current_byte = crc32_packet[byte]; 1068 for (bit = 0; bit < 8; bit++) { 1069 msb = (u8)(crc32_result >> 31); 1070 crc32_result = crc32_result << 1; 1071 if (msb != (0x1 & (current_byte >> bit))) { 1072 crc32_result = crc32_result ^ CRC32_POLY; 1073 crc32_result |= 1; /*crc32_result[0] = 1;*/ 1074 } 1075 } 1076 } 1077 return crc32_result; 1078 } 1079 1080 static inline u32 qed_crc32c_le(u32 seed, 1081 u8 *mac, 1082 u32 len) 1083 { 1084 u32 packet_buf[2] = { 0 }; 1085 1086 memcpy((u8 *)(&packet_buf[0]), &mac[0], 6); 1087 return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0); 1088 } 1089 1090 static u8 qed_mcast_bin_from_mac(u8 *mac) 1091 { 1092 u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED, 1093 mac, ETH_ALEN); 1094 1095 return crc & 0xff; 1096 } 1097 1098 static int 1099 qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, 1100 u16 opaque_fid, 1101 struct qed_filter_mcast *p_filter_cmd, 1102 enum spq_mode comp_mode, 1103 struct qed_spq_comp_cb *p_comp_data) 1104 { 1105 unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; 1106 struct vport_update_ramrod_data *p_ramrod = NULL; 1107 struct qed_sp_init_request_params sp_params; 1108 struct qed_spq_entry *p_ent = NULL; 1109 u8 abs_vport_id = 0; 1110 int rc, i; 1111 1112 if (p_filter_cmd->opcode == QED_FILTER_ADD) { 1113 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, 1114 &abs_vport_id); 1115 if (rc) 1116 return rc; 1117 } else { 1118 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, 1119 &abs_vport_id); 1120 if (rc) 1121 return rc; 1122 } 1123 1124 memset(&sp_params, 0, sizeof(sp_params)); 1125 sp_params.ramrod_data_size = sizeof(*p_ramrod); 1126 sp_params.comp_mode = comp_mode; 1127 sp_params.p_comp_data = p_comp_data; 1128 1129 rc = qed_sp_init_request(p_hwfn, &p_ent, 1130 qed_spq_get_cid(p_hwfn), 1131 p_hwfn->hw_info.opaque_fid, 1132 ETH_RAMROD_VPORT_UPDATE, 1133 PROTOCOLID_ETH, 1134 &sp_params); 1135 1136 if (rc) { 1137 DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc); 1138 return rc; 1139 } 1140 1141 p_ramrod = &p_ent->ramrod.vport_update; 1142 p_ramrod->common.update_approx_mcast_flg = 1; 1143 1144 /* explicitly clear out the entire vector */ 1145 memset(&p_ramrod->approx_mcast.bins, 0, 1146 sizeof(p_ramrod->approx_mcast.bins)); 1147 memset(bins, 0, sizeof(unsigned long) * 1148 ETH_MULTICAST_MAC_BINS_IN_REGS); 1149 /* filter ADD op is explicit set op and it removes 1150 * any existing filters for the vport 1151 */ 1152 if (p_filter_cmd->opcode == QED_FILTER_ADD) { 1153 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { 1154 u32 bit; 1155 1156 bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); 1157 __set_bit(bit, bins); 1158 } 1159 1160 /* Convert to correct endianity */ 1161 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { 1162 u32 *p_bins = (u32 *)bins; 1163 struct vport_update_ramrod_mcast *approx_mcast; 1164 1165 approx_mcast = &p_ramrod->approx_mcast; 1166 approx_mcast->bins[i] = cpu_to_le32(p_bins[i]); 1167 } 1168 } 1169 1170 p_ramrod->common.vport_id = abs_vport_id; 1171 1172 return qed_spq_post(p_hwfn, p_ent, NULL); 1173 } 1174 1175 static int 1176 qed_filter_mcast_cmd(struct qed_dev *cdev, 1177 struct qed_filter_mcast *p_filter_cmd, 1178 enum spq_mode comp_mode, 1179 struct qed_spq_comp_cb *p_comp_data) 1180 { 1181 int rc = 0; 1182 int i; 1183 1184 /* only ADD and REMOVE operations are supported for multi-cast */ 1185 if ((p_filter_cmd->opcode != QED_FILTER_ADD && 1186 (p_filter_cmd->opcode != QED_FILTER_REMOVE)) || 1187 (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS)) 1188 return -EINVAL; 1189 1190 for_each_hwfn(cdev, i) { 1191 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1192 1193 u16 opaque_fid; 1194 1195 if (rc != 0) 1196 break; 1197 1198 opaque_fid = p_hwfn->hw_info.opaque_fid; 1199 1200 rc = qed_sp_eth_filter_mcast(p_hwfn, 1201 opaque_fid, 1202 p_filter_cmd, 1203 comp_mode, 1204 p_comp_data); 1205 } 1206 return rc; 1207 } 1208 1209 static int qed_filter_ucast_cmd(struct qed_dev *cdev, 1210 struct qed_filter_ucast *p_filter_cmd, 1211 enum spq_mode comp_mode, 1212 struct qed_spq_comp_cb *p_comp_data) 1213 { 1214 int rc = 0; 1215 int i; 1216 1217 for_each_hwfn(cdev, i) { 1218 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1219 u16 opaque_fid; 1220 1221 if (rc != 0) 1222 break; 1223 1224 opaque_fid = p_hwfn->hw_info.opaque_fid; 1225 1226 rc = qed_sp_eth_filter_ucast(p_hwfn, 1227 opaque_fid, 1228 p_filter_cmd, 1229 comp_mode, 1230 p_comp_data); 1231 } 1232 1233 return rc; 1234 } 1235 1236 static int qed_fill_eth_dev_info(struct qed_dev *cdev, 1237 struct qed_dev_eth_info *info) 1238 { 1239 int i; 1240 1241 memset(info, 0, sizeof(*info)); 1242 1243 info->num_tc = 1; 1244 1245 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 1246 for_each_hwfn(cdev, i) 1247 info->num_queues += FEAT_NUM(&cdev->hwfns[i], 1248 QED_PF_L2_QUE); 1249 if (cdev->int_params.fp_msix_cnt) 1250 info->num_queues = min_t(u8, info->num_queues, 1251 cdev->int_params.fp_msix_cnt); 1252 } else { 1253 info->num_queues = cdev->num_hwfns; 1254 } 1255 1256 info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN); 1257 ether_addr_copy(info->port_mac, 1258 cdev->hwfns[0].hw_info.hw_mac_addr); 1259 1260 qed_fill_dev_info(cdev, &info->common); 1261 1262 return 0; 1263 } 1264 1265 static void qed_register_eth_ops(struct qed_dev *cdev, 1266 struct qed_eth_cb_ops *ops, 1267 void *cookie) 1268 { 1269 cdev->protocol_ops.eth = ops; 1270 cdev->ops_cookie = cookie; 1271 } 1272 1273 static int qed_start_vport(struct qed_dev *cdev, 1274 u8 vport_id, 1275 u16 mtu, 1276 u8 drop_ttl0_flg, 1277 u8 inner_vlan_removal_en_flg) 1278 { 1279 int rc, i; 1280 1281 for_each_hwfn(cdev, i) { 1282 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1283 1284 rc = qed_sp_vport_start(p_hwfn, 1285 p_hwfn->hw_info.concrete_fid, 1286 p_hwfn->hw_info.opaque_fid, 1287 vport_id, 1288 mtu, 1289 drop_ttl0_flg, 1290 inner_vlan_removal_en_flg); 1291 1292 if (rc) { 1293 DP_ERR(cdev, "Failed to start VPORT\n"); 1294 return rc; 1295 } 1296 1297 qed_hw_start_fastpath(p_hwfn); 1298 1299 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), 1300 "Started V-PORT %d with MTU %d\n", 1301 vport_id, mtu); 1302 } 1303 1304 qed_reset_vport_stats(cdev); 1305 1306 return 0; 1307 } 1308 1309 static int qed_stop_vport(struct qed_dev *cdev, 1310 u8 vport_id) 1311 { 1312 int rc, i; 1313 1314 for_each_hwfn(cdev, i) { 1315 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1316 1317 rc = qed_sp_vport_stop(p_hwfn, 1318 p_hwfn->hw_info.opaque_fid, 1319 vport_id); 1320 1321 if (rc) { 1322 DP_ERR(cdev, "Failed to stop VPORT\n"); 1323 return rc; 1324 } 1325 } 1326 return 0; 1327 } 1328 1329 static int qed_update_vport(struct qed_dev *cdev, 1330 struct qed_update_vport_params *params) 1331 { 1332 struct qed_sp_vport_update_params sp_params; 1333 struct qed_rss_params sp_rss_params; 1334 int rc, i; 1335 1336 if (!cdev) 1337 return -ENODEV; 1338 1339 memset(&sp_params, 0, sizeof(sp_params)); 1340 memset(&sp_rss_params, 0, sizeof(sp_rss_params)); 1341 1342 /* Translate protocol params into sp params */ 1343 sp_params.vport_id = params->vport_id; 1344 sp_params.update_vport_active_rx_flg = 1345 params->update_vport_active_flg; 1346 sp_params.update_vport_active_tx_flg = 1347 params->update_vport_active_flg; 1348 sp_params.vport_active_rx_flg = params->vport_active_flg; 1349 sp_params.vport_active_tx_flg = params->vport_active_flg; 1350 1351 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns. 1352 * We need to re-fix the rss values per engine for CMT. 1353 */ 1354 if (cdev->num_hwfns > 1 && params->update_rss_flg) { 1355 struct qed_update_vport_rss_params *rss = 1356 ¶ms->rss_params; 1357 int k, max = 0; 1358 1359 /* Find largest entry, since it's possible RSS needs to 1360 * be disabled [in case only 1 queue per-hwfn] 1361 */ 1362 for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++) 1363 max = (max > rss->rss_ind_table[k]) ? 1364 max : rss->rss_ind_table[k]; 1365 1366 /* Either fix RSS values or disable RSS */ 1367 if (cdev->num_hwfns < max + 1) { 1368 int divisor = (max + cdev->num_hwfns - 1) / 1369 cdev->num_hwfns; 1370 1371 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), 1372 "CMT - fixing RSS values (modulo %02x)\n", 1373 divisor); 1374 1375 for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++) 1376 rss->rss_ind_table[k] = 1377 rss->rss_ind_table[k] % divisor; 1378 } else { 1379 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), 1380 "CMT - 1 queue per-hwfn; Disabling RSS\n"); 1381 params->update_rss_flg = 0; 1382 } 1383 } 1384 1385 /* Now, update the RSS configuration for actual configuration */ 1386 if (params->update_rss_flg) { 1387 sp_rss_params.update_rss_config = 1; 1388 sp_rss_params.rss_enable = 1; 1389 sp_rss_params.update_rss_capabilities = 1; 1390 sp_rss_params.update_rss_ind_table = 1; 1391 sp_rss_params.update_rss_key = 1; 1392 sp_rss_params.rss_caps = QED_RSS_IPV4 | 1393 QED_RSS_IPV6 | 1394 QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP; 1395 sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */ 1396 memcpy(sp_rss_params.rss_ind_table, 1397 params->rss_params.rss_ind_table, 1398 QED_RSS_IND_TABLE_SIZE * sizeof(u16)); 1399 memcpy(sp_rss_params.rss_key, params->rss_params.rss_key, 1400 QED_RSS_KEY_SIZE * sizeof(u32)); 1401 } 1402 sp_params.rss_params = &sp_rss_params; 1403 1404 for_each_hwfn(cdev, i) { 1405 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1406 1407 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 1408 rc = qed_sp_vport_update(p_hwfn, &sp_params, 1409 QED_SPQ_MODE_EBLOCK, 1410 NULL); 1411 if (rc) { 1412 DP_ERR(cdev, "Failed to update VPORT\n"); 1413 return rc; 1414 } 1415 1416 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), 1417 "Updated V-PORT %d: active_flag %d [update %d]\n", 1418 params->vport_id, params->vport_active_flg, 1419 params->update_vport_active_flg); 1420 } 1421 1422 return 0; 1423 } 1424 1425 static int qed_start_rxq(struct qed_dev *cdev, 1426 struct qed_queue_start_common_params *params, 1427 u16 bd_max_bytes, 1428 dma_addr_t bd_chain_phys_addr, 1429 dma_addr_t cqe_pbl_addr, 1430 u16 cqe_pbl_size, 1431 void __iomem **pp_prod) 1432 { 1433 int rc, hwfn_index; 1434 struct qed_hwfn *p_hwfn; 1435 1436 hwfn_index = params->rss_id % cdev->num_hwfns; 1437 p_hwfn = &cdev->hwfns[hwfn_index]; 1438 1439 /* Fix queue ID in 100g mode */ 1440 params->queue_id /= cdev->num_hwfns; 1441 1442 rc = qed_sp_eth_rx_queue_start(p_hwfn, 1443 p_hwfn->hw_info.opaque_fid, 1444 params, 1445 bd_max_bytes, 1446 bd_chain_phys_addr, 1447 cqe_pbl_addr, 1448 cqe_pbl_size, 1449 pp_prod); 1450 1451 if (rc) { 1452 DP_ERR(cdev, "Failed to start RXQ#%d\n", params->queue_id); 1453 return rc; 1454 } 1455 1456 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), 1457 "Started RX-Q %d [rss %d] on V-PORT %d and SB %d\n", 1458 params->queue_id, params->rss_id, params->vport_id, 1459 params->sb); 1460 1461 return 0; 1462 } 1463 1464 static int qed_stop_rxq(struct qed_dev *cdev, 1465 struct qed_stop_rxq_params *params) 1466 { 1467 int rc, hwfn_index; 1468 struct qed_hwfn *p_hwfn; 1469 1470 hwfn_index = params->rss_id % cdev->num_hwfns; 1471 p_hwfn = &cdev->hwfns[hwfn_index]; 1472 1473 rc = qed_sp_eth_rx_queue_stop(p_hwfn, 1474 params->rx_queue_id / cdev->num_hwfns, 1475 params->eq_completion_only, 1476 false); 1477 if (rc) { 1478 DP_ERR(cdev, "Failed to stop RXQ#%d\n", params->rx_queue_id); 1479 return rc; 1480 } 1481 1482 return 0; 1483 } 1484 1485 static int qed_start_txq(struct qed_dev *cdev, 1486 struct qed_queue_start_common_params *p_params, 1487 dma_addr_t pbl_addr, 1488 u16 pbl_size, 1489 void __iomem **pp_doorbell) 1490 { 1491 struct qed_hwfn *p_hwfn; 1492 int rc, hwfn_index; 1493 1494 hwfn_index = p_params->rss_id % cdev->num_hwfns; 1495 p_hwfn = &cdev->hwfns[hwfn_index]; 1496 1497 /* Fix queue ID in 100g mode */ 1498 p_params->queue_id /= cdev->num_hwfns; 1499 1500 rc = qed_sp_eth_tx_queue_start(p_hwfn, 1501 p_hwfn->hw_info.opaque_fid, 1502 p_params, 1503 pbl_addr, 1504 pbl_size, 1505 pp_doorbell); 1506 1507 if (rc) { 1508 DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id); 1509 return rc; 1510 } 1511 1512 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), 1513 "Started TX-Q %d [rss %d] on V-PORT %d and SB %d\n", 1514 p_params->queue_id, p_params->rss_id, p_params->vport_id, 1515 p_params->sb); 1516 1517 return 0; 1518 } 1519 1520 #define QED_HW_STOP_RETRY_LIMIT (10) 1521 static int qed_fastpath_stop(struct qed_dev *cdev) 1522 { 1523 qed_hw_stop_fastpath(cdev); 1524 1525 return 0; 1526 } 1527 1528 static int qed_stop_txq(struct qed_dev *cdev, 1529 struct qed_stop_txq_params *params) 1530 { 1531 struct qed_hwfn *p_hwfn; 1532 int rc, hwfn_index; 1533 1534 hwfn_index = params->rss_id % cdev->num_hwfns; 1535 p_hwfn = &cdev->hwfns[hwfn_index]; 1536 1537 rc = qed_sp_eth_tx_queue_stop(p_hwfn, 1538 params->tx_queue_id / cdev->num_hwfns); 1539 if (rc) { 1540 DP_ERR(cdev, "Failed to stop TXQ#%d\n", params->tx_queue_id); 1541 return rc; 1542 } 1543 1544 return 0; 1545 } 1546 1547 static int qed_configure_filter_rx_mode(struct qed_dev *cdev, 1548 enum qed_filter_rx_mode_type type) 1549 { 1550 struct qed_filter_accept_flags accept_flags; 1551 1552 memset(&accept_flags, 0, sizeof(accept_flags)); 1553 1554 accept_flags.update_rx_mode_config = 1; 1555 accept_flags.update_tx_mode_config = 1; 1556 accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED | 1557 QED_ACCEPT_MCAST_MATCHED | 1558 QED_ACCEPT_BCAST; 1559 accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED | 1560 QED_ACCEPT_MCAST_MATCHED | 1561 QED_ACCEPT_BCAST; 1562 1563 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) 1564 accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED | 1565 QED_ACCEPT_MCAST_UNMATCHED; 1566 else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) 1567 accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; 1568 1569 return qed_filter_accept_cmd(cdev, 0, accept_flags, 1570 QED_SPQ_MODE_CB, NULL); 1571 } 1572 1573 static int qed_configure_filter_ucast(struct qed_dev *cdev, 1574 struct qed_filter_ucast_params *params) 1575 { 1576 struct qed_filter_ucast ucast; 1577 1578 if (!params->vlan_valid && !params->mac_valid) { 1579 DP_NOTICE( 1580 cdev, 1581 "Tried configuring a unicast filter, but both MAC and VLAN are not set\n"); 1582 return -EINVAL; 1583 } 1584 1585 memset(&ucast, 0, sizeof(ucast)); 1586 switch (params->type) { 1587 case QED_FILTER_XCAST_TYPE_ADD: 1588 ucast.opcode = QED_FILTER_ADD; 1589 break; 1590 case QED_FILTER_XCAST_TYPE_DEL: 1591 ucast.opcode = QED_FILTER_REMOVE; 1592 break; 1593 case QED_FILTER_XCAST_TYPE_REPLACE: 1594 ucast.opcode = QED_FILTER_REPLACE; 1595 break; 1596 default: 1597 DP_NOTICE(cdev, "Unknown unicast filter type %d\n", 1598 params->type); 1599 } 1600 1601 if (params->vlan_valid && params->mac_valid) { 1602 ucast.type = QED_FILTER_MAC_VLAN; 1603 ether_addr_copy(ucast.mac, params->mac); 1604 ucast.vlan = params->vlan; 1605 } else if (params->mac_valid) { 1606 ucast.type = QED_FILTER_MAC; 1607 ether_addr_copy(ucast.mac, params->mac); 1608 } else { 1609 ucast.type = QED_FILTER_VLAN; 1610 ucast.vlan = params->vlan; 1611 } 1612 1613 ucast.is_rx_filter = true; 1614 ucast.is_tx_filter = true; 1615 1616 return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL); 1617 } 1618 1619 static int qed_configure_filter_mcast(struct qed_dev *cdev, 1620 struct qed_filter_mcast_params *params) 1621 { 1622 struct qed_filter_mcast mcast; 1623 int i; 1624 1625 memset(&mcast, 0, sizeof(mcast)); 1626 switch (params->type) { 1627 case QED_FILTER_XCAST_TYPE_ADD: 1628 mcast.opcode = QED_FILTER_ADD; 1629 break; 1630 case QED_FILTER_XCAST_TYPE_DEL: 1631 mcast.opcode = QED_FILTER_REMOVE; 1632 break; 1633 default: 1634 DP_NOTICE(cdev, "Unknown multicast filter type %d\n", 1635 params->type); 1636 } 1637 1638 mcast.num_mc_addrs = params->num; 1639 for (i = 0; i < mcast.num_mc_addrs; i++) 1640 ether_addr_copy(mcast.mac[i], params->mac[i]); 1641 1642 return qed_filter_mcast_cmd(cdev, &mcast, 1643 QED_SPQ_MODE_CB, NULL); 1644 } 1645 1646 static int qed_configure_filter(struct qed_dev *cdev, 1647 struct qed_filter_params *params) 1648 { 1649 enum qed_filter_rx_mode_type accept_flags; 1650 1651 switch (params->type) { 1652 case QED_FILTER_TYPE_UCAST: 1653 return qed_configure_filter_ucast(cdev, ¶ms->filter.ucast); 1654 case QED_FILTER_TYPE_MCAST: 1655 return qed_configure_filter_mcast(cdev, ¶ms->filter.mcast); 1656 case QED_FILTER_TYPE_RX_MODE: 1657 accept_flags = params->filter.accept_flags; 1658 return qed_configure_filter_rx_mode(cdev, accept_flags); 1659 default: 1660 DP_NOTICE(cdev, "Unknown filter type %d\n", 1661 (int)params->type); 1662 return -EINVAL; 1663 } 1664 } 1665 1666 static int qed_fp_cqe_completion(struct qed_dev *dev, 1667 u8 rss_id, 1668 struct eth_slow_path_rx_cqe *cqe) 1669 { 1670 return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns], 1671 cqe); 1672 } 1673 1674 static const struct qed_eth_ops qed_eth_ops_pass = { 1675 .common = &qed_common_ops_pass, 1676 .fill_dev_info = &qed_fill_eth_dev_info, 1677 .register_ops = &qed_register_eth_ops, 1678 .vport_start = &qed_start_vport, 1679 .vport_stop = &qed_stop_vport, 1680 .vport_update = &qed_update_vport, 1681 .q_rx_start = &qed_start_rxq, 1682 .q_rx_stop = &qed_stop_rxq, 1683 .q_tx_start = &qed_start_txq, 1684 .q_tx_stop = &qed_stop_txq, 1685 .filter_config = &qed_configure_filter, 1686 .fastpath_stop = &qed_fastpath_stop, 1687 .eth_cqe_completion = &qed_fp_cqe_completion, 1688 .get_vport_stats = &qed_get_vport_stats, 1689 }; 1690 1691 const struct qed_eth_ops *qed_get_eth_ops(u32 version) 1692 { 1693 if (version != QED_ETH_INTERFACE_VERSION) { 1694 pr_notice("Cannot supply ethtool operations [%08x != %08x]\n", 1695 version, QED_ETH_INTERFACE_VERSION); 1696 return NULL; 1697 } 1698 1699 return &qed_eth_ops_pass; 1700 } 1701 EXPORT_SYMBOL(qed_get_eth_ops); 1702 1703 void qed_put_eth_ops(void) 1704 { 1705 /* TODO - reference count for module? */ 1706 } 1707 EXPORT_SYMBOL(qed_put_eth_ops); 1708