1 /* 2 * Linux network driver for QLogic BR-series Converged Network Adapter. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License (GPL) Version 2 as 6 * published by the Free Software Foundation 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 /* 14 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. 15 * Copyright (c) 2014-2015 QLogic Corporation 16 * All rights reserved 17 * www.qlogic.com 18 */ 19 #include "bna.h" 20 #include "bfi.h" 21 22 /* IB */ 23 static void 24 bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo) 25 { 26 ib->coalescing_timeo = coalescing_timeo; 27 ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK( 28 (u32)ib->coalescing_timeo, 0); 29 } 30 31 /* RXF */ 32 33 #define bna_rxf_vlan_cfg_soft_reset(rxf) \ 34 do { \ 35 (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; \ 36 (rxf)->vlan_strip_pending = true; \ 37 } while (0) 38 39 #define bna_rxf_rss_cfg_soft_reset(rxf) \ 40 do { \ 41 if ((rxf)->rss_status == BNA_STATUS_T_ENABLED) \ 42 (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING | \ 43 BNA_RSS_F_CFG_PENDING | \ 44 BNA_RSS_F_STATUS_PENDING); \ 45 } while (0) 46 47 static int bna_rxf_cfg_apply(struct bna_rxf *rxf); 48 static void bna_rxf_cfg_reset(struct bna_rxf *rxf); 49 static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf); 50 static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf); 51 static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf); 52 static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf); 53 static int bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, 54 enum bna_cleanup_type cleanup); 55 static int bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, 56 enum bna_cleanup_type cleanup); 57 static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, 58 enum bna_cleanup_type cleanup); 59 60 bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf, 61 enum bna_rxf_event); 62 bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf, 63 enum bna_rxf_event); 64 bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf, 65 enum bna_rxf_event); 66 bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf, 67 enum bna_rxf_event); 68 69 static void 70 bna_rxf_sm_stopped_entry(struct bna_rxf *rxf) 71 { 72 call_rxf_stop_cbfn(rxf); 73 } 74 75 static void 76 bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event) 77 { 78 switch (event) { 79 case RXF_E_START: 80 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait); 81 break; 82 83 case RXF_E_STOP: 84 call_rxf_stop_cbfn(rxf); 85 break; 86 87 case RXF_E_FAIL: 88 /* No-op */ 89 break; 90 91 case RXF_E_CONFIG: 92 call_rxf_cam_fltr_cbfn(rxf); 93 break; 94 95 default: 96 bfa_sm_fault(event); 97 } 98 } 99 100 static void 101 bna_rxf_sm_cfg_wait_entry(struct bna_rxf *rxf) 102 { 103 if (!bna_rxf_cfg_apply(rxf)) { 104 /* No more pending config updates */ 105 bfa_fsm_set_state(rxf, bna_rxf_sm_started); 106 } 107 } 108 109 static void 110 bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event) 111 { 112 switch (event) { 113 case RXF_E_STOP: 114 bfa_fsm_set_state(rxf, bna_rxf_sm_last_resp_wait); 115 break; 116 117 case RXF_E_FAIL: 118 bna_rxf_cfg_reset(rxf); 119 call_rxf_start_cbfn(rxf); 120 call_rxf_cam_fltr_cbfn(rxf); 121 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); 122 break; 123 124 case RXF_E_CONFIG: 125 /* No-op */ 126 break; 127 128 case RXF_E_FW_RESP: 129 if (!bna_rxf_cfg_apply(rxf)) { 130 /* No more pending config updates */ 131 bfa_fsm_set_state(rxf, bna_rxf_sm_started); 132 } 133 break; 134 135 default: 136 bfa_sm_fault(event); 137 } 138 } 139 140 static void 141 bna_rxf_sm_started_entry(struct bna_rxf *rxf) 142 { 143 call_rxf_start_cbfn(rxf); 144 call_rxf_cam_fltr_cbfn(rxf); 145 } 146 147 static void 148 bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event) 149 { 150 switch (event) { 151 case RXF_E_STOP: 152 case RXF_E_FAIL: 153 bna_rxf_cfg_reset(rxf); 154 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); 155 break; 156 157 case RXF_E_CONFIG: 158 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait); 159 break; 160 161 default: 162 bfa_sm_fault(event); 163 } 164 } 165 166 static void 167 bna_rxf_sm_last_resp_wait_entry(struct bna_rxf *rxf) 168 { 169 } 170 171 static void 172 bna_rxf_sm_last_resp_wait(struct bna_rxf *rxf, enum bna_rxf_event event) 173 { 174 switch (event) { 175 case RXF_E_FAIL: 176 case RXF_E_FW_RESP: 177 bna_rxf_cfg_reset(rxf); 178 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); 179 break; 180 181 default: 182 bfa_sm_fault(event); 183 } 184 } 185 186 static void 187 bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac, 188 enum bfi_enet_h2i_msgs req_type) 189 { 190 struct bfi_enet_ucast_req *req = &rxf->bfi_enet_cmd.ucast_req; 191 192 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid); 193 req->mh.num_entries = htons( 194 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req))); 195 ether_addr_copy(req->mac_addr, mac->addr); 196 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, 197 sizeof(struct bfi_enet_ucast_req), &req->mh); 198 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); 199 } 200 201 static void 202 bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac) 203 { 204 struct bfi_enet_mcast_add_req *req = 205 &rxf->bfi_enet_cmd.mcast_add_req; 206 207 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_ADD_REQ, 208 0, rxf->rx->rid); 209 req->mh.num_entries = htons( 210 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req))); 211 ether_addr_copy(req->mac_addr, mac->addr); 212 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, 213 sizeof(struct bfi_enet_mcast_add_req), &req->mh); 214 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); 215 } 216 217 static void 218 bna_bfi_mcast_del_req(struct bna_rxf *rxf, u16 handle) 219 { 220 struct bfi_enet_mcast_del_req *req = 221 &rxf->bfi_enet_cmd.mcast_del_req; 222 223 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_DEL_REQ, 224 0, rxf->rx->rid); 225 req->mh.num_entries = htons( 226 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_del_req))); 227 req->handle = htons(handle); 228 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, 229 sizeof(struct bfi_enet_mcast_del_req), &req->mh); 230 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); 231 } 232 233 static void 234 bna_bfi_mcast_filter_req(struct bna_rxf *rxf, enum bna_status status) 235 { 236 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; 237 238 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, 239 BFI_ENET_H2I_MAC_MCAST_FILTER_REQ, 0, rxf->rx->rid); 240 req->mh.num_entries = htons( 241 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); 242 req->enable = status; 243 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, 244 sizeof(struct bfi_enet_enable_req), &req->mh); 245 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); 246 } 247 248 static void 249 bna_bfi_rx_promisc_req(struct bna_rxf *rxf, enum bna_status status) 250 { 251 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; 252 253 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, 254 BFI_ENET_H2I_RX_PROMISCUOUS_REQ, 0, rxf->rx->rid); 255 req->mh.num_entries = htons( 256 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); 257 req->enable = status; 258 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, 259 sizeof(struct bfi_enet_enable_req), &req->mh); 260 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); 261 } 262 263 static void 264 bna_bfi_rx_vlan_filter_set(struct bna_rxf *rxf, u8 block_idx) 265 { 266 struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req; 267 int i; 268 int j; 269 270 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, 271 BFI_ENET_H2I_RX_VLAN_SET_REQ, 0, rxf->rx->rid); 272 req->mh.num_entries = htons( 273 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_vlan_req))); 274 req->block_idx = block_idx; 275 for (i = 0; i < (BFI_ENET_VLAN_BLOCK_SIZE / 32); i++) { 276 j = (block_idx * (BFI_ENET_VLAN_BLOCK_SIZE / 32)) + i; 277 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) 278 req->bit_mask[i] = 279 htonl(rxf->vlan_filter_table[j]); 280 else 281 req->bit_mask[i] = 0xFFFFFFFF; 282 } 283 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, 284 sizeof(struct bfi_enet_rx_vlan_req), &req->mh); 285 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); 286 } 287 288 static void 289 bna_bfi_vlan_strip_enable(struct bna_rxf *rxf) 290 { 291 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; 292 293 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, 294 BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ, 0, rxf->rx->rid); 295 req->mh.num_entries = htons( 296 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); 297 req->enable = rxf->vlan_strip_status; 298 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, 299 sizeof(struct bfi_enet_enable_req), &req->mh); 300 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); 301 } 302 303 static void 304 bna_bfi_rit_cfg(struct bna_rxf *rxf) 305 { 306 struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req; 307 308 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, 309 BFI_ENET_H2I_RIT_CFG_REQ, 0, rxf->rx->rid); 310 req->mh.num_entries = htons( 311 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rit_req))); 312 req->size = htons(rxf->rit_size); 313 memcpy(&req->table[0], rxf->rit, rxf->rit_size); 314 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, 315 sizeof(struct bfi_enet_rit_req), &req->mh); 316 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); 317 } 318 319 static void 320 bna_bfi_rss_cfg(struct bna_rxf *rxf) 321 { 322 struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req; 323 int i; 324 325 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, 326 BFI_ENET_H2I_RSS_CFG_REQ, 0, rxf->rx->rid); 327 req->mh.num_entries = htons( 328 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rss_cfg_req))); 329 req->cfg.type = rxf->rss_cfg.hash_type; 330 req->cfg.mask = rxf->rss_cfg.hash_mask; 331 for (i = 0; i < BFI_ENET_RSS_KEY_LEN; i++) 332 req->cfg.key[i] = 333 htonl(rxf->rss_cfg.toeplitz_hash_key[i]); 334 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, 335 sizeof(struct bfi_enet_rss_cfg_req), &req->mh); 336 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); 337 } 338 339 static void 340 bna_bfi_rss_enable(struct bna_rxf *rxf) 341 { 342 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; 343 344 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, 345 BFI_ENET_H2I_RSS_ENABLE_REQ, 0, rxf->rx->rid); 346 req->mh.num_entries = htons( 347 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); 348 req->enable = rxf->rss_status; 349 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, 350 sizeof(struct bfi_enet_enable_req), &req->mh); 351 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); 352 } 353 354 /* This function gets the multicast MAC that has already been added to CAM */ 355 static struct bna_mac * 356 bna_rxf_mcmac_get(struct bna_rxf *rxf, const u8 *mac_addr) 357 { 358 struct bna_mac *mac; 359 360 list_for_each_entry(mac, &rxf->mcast_active_q, qe) 361 if (ether_addr_equal(mac->addr, mac_addr)) 362 return mac; 363 364 list_for_each_entry(mac, &rxf->mcast_pending_del_q, qe) 365 if (ether_addr_equal(mac->addr, mac_addr)) 366 return mac; 367 368 return NULL; 369 } 370 371 static struct bna_mcam_handle * 372 bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle) 373 { 374 struct bna_mcam_handle *mchandle; 375 376 list_for_each_entry(mchandle, &rxf->mcast_handle_q, qe) 377 if (mchandle->handle == handle) 378 return mchandle; 379 380 return NULL; 381 } 382 383 static void 384 bna_rxf_mchandle_attach(struct bna_rxf *rxf, u8 *mac_addr, int handle) 385 { 386 struct bna_mac *mcmac; 387 struct bna_mcam_handle *mchandle; 388 389 mcmac = bna_rxf_mcmac_get(rxf, mac_addr); 390 mchandle = bna_rxf_mchandle_get(rxf, handle); 391 if (mchandle == NULL) { 392 mchandle = bna_mcam_mod_handle_get(&rxf->rx->bna->mcam_mod); 393 mchandle->handle = handle; 394 mchandle->refcnt = 0; 395 list_add_tail(&mchandle->qe, &rxf->mcast_handle_q); 396 } 397 mchandle->refcnt++; 398 mcmac->handle = mchandle; 399 } 400 401 static int 402 bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac, 403 enum bna_cleanup_type cleanup) 404 { 405 struct bna_mcam_handle *mchandle; 406 int ret = 0; 407 408 mchandle = mac->handle; 409 if (mchandle == NULL) 410 return ret; 411 412 mchandle->refcnt--; 413 if (mchandle->refcnt == 0) { 414 if (cleanup == BNA_HARD_CLEANUP) { 415 bna_bfi_mcast_del_req(rxf, mchandle->handle); 416 ret = 1; 417 } 418 list_del(&mchandle->qe); 419 bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle); 420 } 421 mac->handle = NULL; 422 423 return ret; 424 } 425 426 static int 427 bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf) 428 { 429 struct bna_mac *mac = NULL; 430 int ret; 431 432 /* First delete multicast entries to maintain the count */ 433 while (!list_empty(&rxf->mcast_pending_del_q)) { 434 mac = list_first_entry(&rxf->mcast_pending_del_q, 435 struct bna_mac, qe); 436 ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP); 437 list_move_tail(&mac->qe, bna_mcam_mod_del_q(rxf->rx->bna)); 438 if (ret) 439 return ret; 440 } 441 442 /* Add multicast entries */ 443 if (!list_empty(&rxf->mcast_pending_add_q)) { 444 mac = list_first_entry(&rxf->mcast_pending_add_q, 445 struct bna_mac, qe); 446 list_move_tail(&mac->qe, &rxf->mcast_active_q); 447 bna_bfi_mcast_add_req(rxf, mac); 448 return 1; 449 } 450 451 return 0; 452 } 453 454 static int 455 bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf) 456 { 457 u8 vlan_pending_bitmask; 458 int block_idx = 0; 459 460 if (rxf->vlan_pending_bitmask) { 461 vlan_pending_bitmask = rxf->vlan_pending_bitmask; 462 while (!(vlan_pending_bitmask & 0x1)) { 463 block_idx++; 464 vlan_pending_bitmask >>= 1; 465 } 466 rxf->vlan_pending_bitmask &= ~BIT(block_idx); 467 bna_bfi_rx_vlan_filter_set(rxf, block_idx); 468 return 1; 469 } 470 471 return 0; 472 } 473 474 static int 475 bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) 476 { 477 struct bna_mac *mac; 478 int ret; 479 480 /* Throw away delete pending mcast entries */ 481 while (!list_empty(&rxf->mcast_pending_del_q)) { 482 mac = list_first_entry(&rxf->mcast_pending_del_q, 483 struct bna_mac, qe); 484 ret = bna_rxf_mcast_del(rxf, mac, cleanup); 485 list_move_tail(&mac->qe, bna_mcam_mod_del_q(rxf->rx->bna)); 486 if (ret) 487 return ret; 488 } 489 490 /* Move active mcast entries to pending_add_q */ 491 while (!list_empty(&rxf->mcast_active_q)) { 492 mac = list_first_entry(&rxf->mcast_active_q, 493 struct bna_mac, qe); 494 list_move_tail(&mac->qe, &rxf->mcast_pending_add_q); 495 if (bna_rxf_mcast_del(rxf, mac, cleanup)) 496 return 1; 497 } 498 499 return 0; 500 } 501 502 static int 503 bna_rxf_rss_cfg_apply(struct bna_rxf *rxf) 504 { 505 if (rxf->rss_pending) { 506 if (rxf->rss_pending & BNA_RSS_F_RIT_PENDING) { 507 rxf->rss_pending &= ~BNA_RSS_F_RIT_PENDING; 508 bna_bfi_rit_cfg(rxf); 509 return 1; 510 } 511 512 if (rxf->rss_pending & BNA_RSS_F_CFG_PENDING) { 513 rxf->rss_pending &= ~BNA_RSS_F_CFG_PENDING; 514 bna_bfi_rss_cfg(rxf); 515 return 1; 516 } 517 518 if (rxf->rss_pending & BNA_RSS_F_STATUS_PENDING) { 519 rxf->rss_pending &= ~BNA_RSS_F_STATUS_PENDING; 520 bna_bfi_rss_enable(rxf); 521 return 1; 522 } 523 } 524 525 return 0; 526 } 527 528 static int 529 bna_rxf_cfg_apply(struct bna_rxf *rxf) 530 { 531 if (bna_rxf_ucast_cfg_apply(rxf)) 532 return 1; 533 534 if (bna_rxf_mcast_cfg_apply(rxf)) 535 return 1; 536 537 if (bna_rxf_promisc_cfg_apply(rxf)) 538 return 1; 539 540 if (bna_rxf_allmulti_cfg_apply(rxf)) 541 return 1; 542 543 if (bna_rxf_vlan_cfg_apply(rxf)) 544 return 1; 545 546 if (bna_rxf_vlan_strip_cfg_apply(rxf)) 547 return 1; 548 549 if (bna_rxf_rss_cfg_apply(rxf)) 550 return 1; 551 552 return 0; 553 } 554 555 static void 556 bna_rxf_cfg_reset(struct bna_rxf *rxf) 557 { 558 bna_rxf_ucast_cfg_reset(rxf, BNA_SOFT_CLEANUP); 559 bna_rxf_mcast_cfg_reset(rxf, BNA_SOFT_CLEANUP); 560 bna_rxf_promisc_cfg_reset(rxf, BNA_SOFT_CLEANUP); 561 bna_rxf_allmulti_cfg_reset(rxf, BNA_SOFT_CLEANUP); 562 bna_rxf_vlan_cfg_soft_reset(rxf); 563 bna_rxf_rss_cfg_soft_reset(rxf); 564 } 565 566 static void 567 bna_rit_init(struct bna_rxf *rxf, int rit_size) 568 { 569 struct bna_rx *rx = rxf->rx; 570 struct bna_rxp *rxp; 571 int offset = 0; 572 573 rxf->rit_size = rit_size; 574 list_for_each_entry(rxp, &rx->rxp_q, qe) { 575 rxf->rit[offset] = rxp->cq.ccb->id; 576 offset++; 577 } 578 } 579 580 void 581 bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr) 582 { 583 bfa_fsm_send_event(rxf, RXF_E_FW_RESP); 584 } 585 586 void 587 bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf, 588 struct bfi_msgq_mhdr *msghdr) 589 { 590 struct bfi_enet_rsp *rsp = 591 container_of(msghdr, struct bfi_enet_rsp, mh); 592 593 if (rsp->error) { 594 /* Clear ucast from cache */ 595 rxf->ucast_active_set = 0; 596 } 597 598 bfa_fsm_send_event(rxf, RXF_E_FW_RESP); 599 } 600 601 void 602 bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf, 603 struct bfi_msgq_mhdr *msghdr) 604 { 605 struct bfi_enet_mcast_add_req *req = 606 &rxf->bfi_enet_cmd.mcast_add_req; 607 struct bfi_enet_mcast_add_rsp *rsp = 608 container_of(msghdr, struct bfi_enet_mcast_add_rsp, mh); 609 610 bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr, 611 ntohs(rsp->handle)); 612 bfa_fsm_send_event(rxf, RXF_E_FW_RESP); 613 } 614 615 static void 616 bna_rxf_init(struct bna_rxf *rxf, 617 struct bna_rx *rx, 618 struct bna_rx_config *q_config, 619 struct bna_res_info *res_info) 620 { 621 rxf->rx = rx; 622 623 INIT_LIST_HEAD(&rxf->ucast_pending_add_q); 624 INIT_LIST_HEAD(&rxf->ucast_pending_del_q); 625 rxf->ucast_pending_set = 0; 626 rxf->ucast_active_set = 0; 627 INIT_LIST_HEAD(&rxf->ucast_active_q); 628 rxf->ucast_pending_mac = NULL; 629 630 INIT_LIST_HEAD(&rxf->mcast_pending_add_q); 631 INIT_LIST_HEAD(&rxf->mcast_pending_del_q); 632 INIT_LIST_HEAD(&rxf->mcast_active_q); 633 INIT_LIST_HEAD(&rxf->mcast_handle_q); 634 635 rxf->rit = (u8 *) 636 res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva; 637 bna_rit_init(rxf, q_config->num_paths); 638 639 rxf->rss_status = q_config->rss_status; 640 if (rxf->rss_status == BNA_STATUS_T_ENABLED) { 641 rxf->rss_cfg = q_config->rss_config; 642 rxf->rss_pending |= BNA_RSS_F_CFG_PENDING; 643 rxf->rss_pending |= BNA_RSS_F_RIT_PENDING; 644 rxf->rss_pending |= BNA_RSS_F_STATUS_PENDING; 645 } 646 647 rxf->vlan_filter_status = BNA_STATUS_T_DISABLED; 648 memset(rxf->vlan_filter_table, 0, 649 (sizeof(u32) * (BFI_ENET_VLAN_ID_MAX / 32))); 650 rxf->vlan_filter_table[0] |= 1; /* for pure priority tagged frames */ 651 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; 652 653 rxf->vlan_strip_status = q_config->vlan_strip_status; 654 655 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); 656 } 657 658 static void 659 bna_rxf_uninit(struct bna_rxf *rxf) 660 { 661 struct bna_mac *mac; 662 663 rxf->ucast_pending_set = 0; 664 rxf->ucast_active_set = 0; 665 666 while (!list_empty(&rxf->ucast_pending_add_q)) { 667 mac = list_first_entry(&rxf->ucast_pending_add_q, 668 struct bna_mac, qe); 669 list_move_tail(&mac->qe, bna_ucam_mod_free_q(rxf->rx->bna)); 670 } 671 672 if (rxf->ucast_pending_mac) { 673 list_add_tail(&rxf->ucast_pending_mac->qe, 674 bna_ucam_mod_free_q(rxf->rx->bna)); 675 rxf->ucast_pending_mac = NULL; 676 } 677 678 while (!list_empty(&rxf->mcast_pending_add_q)) { 679 mac = list_first_entry(&rxf->mcast_pending_add_q, 680 struct bna_mac, qe); 681 list_move_tail(&mac->qe, bna_mcam_mod_free_q(rxf->rx->bna)); 682 } 683 684 rxf->rxmode_pending = 0; 685 rxf->rxmode_pending_bitmask = 0; 686 if (rxf->rx->bna->promisc_rid == rxf->rx->rid) 687 rxf->rx->bna->promisc_rid = BFI_INVALID_RID; 688 if (rxf->rx->bna->default_mode_rid == rxf->rx->rid) 689 rxf->rx->bna->default_mode_rid = BFI_INVALID_RID; 690 691 rxf->rss_pending = 0; 692 rxf->vlan_strip_pending = false; 693 694 rxf->rx = NULL; 695 } 696 697 static void 698 bna_rx_cb_rxf_started(struct bna_rx *rx) 699 { 700 bfa_fsm_send_event(rx, RX_E_RXF_STARTED); 701 } 702 703 static void 704 bna_rxf_start(struct bna_rxf *rxf) 705 { 706 rxf->start_cbfn = bna_rx_cb_rxf_started; 707 rxf->start_cbarg = rxf->rx; 708 bfa_fsm_send_event(rxf, RXF_E_START); 709 } 710 711 static void 712 bna_rx_cb_rxf_stopped(struct bna_rx *rx) 713 { 714 bfa_fsm_send_event(rx, RX_E_RXF_STOPPED); 715 } 716 717 static void 718 bna_rxf_stop(struct bna_rxf *rxf) 719 { 720 rxf->stop_cbfn = bna_rx_cb_rxf_stopped; 721 rxf->stop_cbarg = rxf->rx; 722 bfa_fsm_send_event(rxf, RXF_E_STOP); 723 } 724 725 static void 726 bna_rxf_fail(struct bna_rxf *rxf) 727 { 728 bfa_fsm_send_event(rxf, RXF_E_FAIL); 729 } 730 731 enum bna_cb_status 732 bna_rx_ucast_set(struct bna_rx *rx, const u8 *ucmac) 733 { 734 struct bna_rxf *rxf = &rx->rxf; 735 736 if (rxf->ucast_pending_mac == NULL) { 737 rxf->ucast_pending_mac = 738 bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna)); 739 if (rxf->ucast_pending_mac == NULL) 740 return BNA_CB_UCAST_CAM_FULL; 741 } 742 743 ether_addr_copy(rxf->ucast_pending_mac->addr, ucmac); 744 rxf->ucast_pending_set = 1; 745 rxf->cam_fltr_cbfn = NULL; 746 rxf->cam_fltr_cbarg = rx->bna->bnad; 747 748 bfa_fsm_send_event(rxf, RXF_E_CONFIG); 749 750 return BNA_CB_SUCCESS; 751 } 752 753 enum bna_cb_status 754 bna_rx_mcast_add(struct bna_rx *rx, const u8 *addr, 755 void (*cbfn)(struct bnad *, struct bna_rx *)) 756 { 757 struct bna_rxf *rxf = &rx->rxf; 758 struct bna_mac *mac; 759 760 /* Check if already added or pending addition */ 761 if (bna_mac_find(&rxf->mcast_active_q, addr) || 762 bna_mac_find(&rxf->mcast_pending_add_q, addr)) { 763 if (cbfn) 764 cbfn(rx->bna->bnad, rx); 765 return BNA_CB_SUCCESS; 766 } 767 768 mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna)); 769 if (mac == NULL) 770 return BNA_CB_MCAST_LIST_FULL; 771 ether_addr_copy(mac->addr, addr); 772 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q); 773 774 rxf->cam_fltr_cbfn = cbfn; 775 rxf->cam_fltr_cbarg = rx->bna->bnad; 776 777 bfa_fsm_send_event(rxf, RXF_E_CONFIG); 778 779 return BNA_CB_SUCCESS; 780 } 781 782 enum bna_cb_status 783 bna_rx_ucast_listset(struct bna_rx *rx, int count, const u8 *uclist) 784 { 785 struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod; 786 struct bna_rxf *rxf = &rx->rxf; 787 struct list_head list_head; 788 const u8 *mcaddr; 789 struct bna_mac *mac, *del_mac; 790 int i; 791 792 /* Purge the pending_add_q */ 793 while (!list_empty(&rxf->ucast_pending_add_q)) { 794 mac = list_first_entry(&rxf->ucast_pending_add_q, 795 struct bna_mac, qe); 796 list_move_tail(&mac->qe, &ucam_mod->free_q); 797 } 798 799 /* Schedule active_q entries for deletion */ 800 while (!list_empty(&rxf->ucast_active_q)) { 801 mac = list_first_entry(&rxf->ucast_active_q, 802 struct bna_mac, qe); 803 del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q); 804 ether_addr_copy(del_mac->addr, mac->addr); 805 del_mac->handle = mac->handle; 806 list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q); 807 list_move_tail(&mac->qe, &ucam_mod->free_q); 808 } 809 810 /* Allocate nodes */ 811 INIT_LIST_HEAD(&list_head); 812 for (i = 0, mcaddr = uclist; i < count; i++) { 813 mac = bna_cam_mod_mac_get(&ucam_mod->free_q); 814 if (mac == NULL) 815 goto err_return; 816 ether_addr_copy(mac->addr, mcaddr); 817 list_add_tail(&mac->qe, &list_head); 818 mcaddr += ETH_ALEN; 819 } 820 821 /* Add the new entries */ 822 while (!list_empty(&list_head)) { 823 mac = list_first_entry(&list_head, struct bna_mac, qe); 824 list_move_tail(&mac->qe, &rxf->ucast_pending_add_q); 825 } 826 827 bfa_fsm_send_event(rxf, RXF_E_CONFIG); 828 829 return BNA_CB_SUCCESS; 830 831 err_return: 832 while (!list_empty(&list_head)) { 833 mac = list_first_entry(&list_head, struct bna_mac, qe); 834 list_move_tail(&mac->qe, &ucam_mod->free_q); 835 } 836 837 return BNA_CB_UCAST_CAM_FULL; 838 } 839 840 enum bna_cb_status 841 bna_rx_mcast_listset(struct bna_rx *rx, int count, const u8 *mclist) 842 { 843 struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod; 844 struct bna_rxf *rxf = &rx->rxf; 845 struct list_head list_head; 846 const u8 *mcaddr; 847 struct bna_mac *mac, *del_mac; 848 int i; 849 850 /* Purge the pending_add_q */ 851 while (!list_empty(&rxf->mcast_pending_add_q)) { 852 mac = list_first_entry(&rxf->mcast_pending_add_q, 853 struct bna_mac, qe); 854 list_move_tail(&mac->qe, &mcam_mod->free_q); 855 } 856 857 /* Schedule active_q entries for deletion */ 858 while (!list_empty(&rxf->mcast_active_q)) { 859 mac = list_first_entry(&rxf->mcast_active_q, 860 struct bna_mac, qe); 861 del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q); 862 ether_addr_copy(del_mac->addr, mac->addr); 863 del_mac->handle = mac->handle; 864 list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q); 865 mac->handle = NULL; 866 list_move_tail(&mac->qe, &mcam_mod->free_q); 867 } 868 869 /* Allocate nodes */ 870 INIT_LIST_HEAD(&list_head); 871 for (i = 0, mcaddr = mclist; i < count; i++) { 872 mac = bna_cam_mod_mac_get(&mcam_mod->free_q); 873 if (mac == NULL) 874 goto err_return; 875 ether_addr_copy(mac->addr, mcaddr); 876 list_add_tail(&mac->qe, &list_head); 877 878 mcaddr += ETH_ALEN; 879 } 880 881 /* Add the new entries */ 882 while (!list_empty(&list_head)) { 883 mac = list_first_entry(&list_head, struct bna_mac, qe); 884 list_move_tail(&mac->qe, &rxf->mcast_pending_add_q); 885 } 886 887 bfa_fsm_send_event(rxf, RXF_E_CONFIG); 888 889 return BNA_CB_SUCCESS; 890 891 err_return: 892 while (!list_empty(&list_head)) { 893 mac = list_first_entry(&list_head, struct bna_mac, qe); 894 list_move_tail(&mac->qe, &mcam_mod->free_q); 895 } 896 897 return BNA_CB_MCAST_LIST_FULL; 898 } 899 900 void 901 bna_rx_mcast_delall(struct bna_rx *rx) 902 { 903 struct bna_rxf *rxf = &rx->rxf; 904 struct bna_mac *mac, *del_mac; 905 int need_hw_config = 0; 906 907 /* Purge all entries from pending_add_q */ 908 while (!list_empty(&rxf->mcast_pending_add_q)) { 909 mac = list_first_entry(&rxf->mcast_pending_add_q, 910 struct bna_mac, qe); 911 list_move_tail(&mac->qe, bna_mcam_mod_free_q(rxf->rx->bna)); 912 } 913 914 /* Schedule all entries in active_q for deletion */ 915 while (!list_empty(&rxf->mcast_active_q)) { 916 mac = list_first_entry(&rxf->mcast_active_q, 917 struct bna_mac, qe); 918 list_del(&mac->qe); 919 del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna)); 920 memcpy(del_mac, mac, sizeof(*del_mac)); 921 list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q); 922 mac->handle = NULL; 923 list_add_tail(&mac->qe, bna_mcam_mod_free_q(rxf->rx->bna)); 924 need_hw_config = 1; 925 } 926 927 if (need_hw_config) 928 bfa_fsm_send_event(rxf, RXF_E_CONFIG); 929 } 930 931 void 932 bna_rx_vlan_add(struct bna_rx *rx, int vlan_id) 933 { 934 struct bna_rxf *rxf = &rx->rxf; 935 int index = (vlan_id >> BFI_VLAN_WORD_SHIFT); 936 int bit = BIT(vlan_id & BFI_VLAN_WORD_MASK); 937 int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT); 938 939 rxf->vlan_filter_table[index] |= bit; 940 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) { 941 rxf->vlan_pending_bitmask |= BIT(group_id); 942 bfa_fsm_send_event(rxf, RXF_E_CONFIG); 943 } 944 } 945 946 void 947 bna_rx_vlan_del(struct bna_rx *rx, int vlan_id) 948 { 949 struct bna_rxf *rxf = &rx->rxf; 950 int index = (vlan_id >> BFI_VLAN_WORD_SHIFT); 951 int bit = BIT(vlan_id & BFI_VLAN_WORD_MASK); 952 int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT); 953 954 rxf->vlan_filter_table[index] &= ~bit; 955 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) { 956 rxf->vlan_pending_bitmask |= BIT(group_id); 957 bfa_fsm_send_event(rxf, RXF_E_CONFIG); 958 } 959 } 960 961 static int 962 bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf) 963 { 964 struct bna_mac *mac = NULL; 965 966 /* Delete MAC addresses previousely added */ 967 if (!list_empty(&rxf->ucast_pending_del_q)) { 968 mac = list_first_entry(&rxf->ucast_pending_del_q, 969 struct bna_mac, qe); 970 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ); 971 list_move_tail(&mac->qe, bna_ucam_mod_del_q(rxf->rx->bna)); 972 return 1; 973 } 974 975 /* Set default unicast MAC */ 976 if (rxf->ucast_pending_set) { 977 rxf->ucast_pending_set = 0; 978 ether_addr_copy(rxf->ucast_active_mac.addr, 979 rxf->ucast_pending_mac->addr); 980 rxf->ucast_active_set = 1; 981 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac, 982 BFI_ENET_H2I_MAC_UCAST_SET_REQ); 983 return 1; 984 } 985 986 /* Add additional MAC entries */ 987 if (!list_empty(&rxf->ucast_pending_add_q)) { 988 mac = list_first_entry(&rxf->ucast_pending_add_q, 989 struct bna_mac, qe); 990 list_add_tail(&mac->qe, &rxf->ucast_active_q); 991 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ); 992 return 1; 993 } 994 995 return 0; 996 } 997 998 static int 999 bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) 1000 { 1001 struct bna_mac *mac; 1002 1003 /* Throw away delete pending ucast entries */ 1004 while (!list_empty(&rxf->ucast_pending_del_q)) { 1005 mac = list_first_entry(&rxf->ucast_pending_del_q, 1006 struct bna_mac, qe); 1007 if (cleanup == BNA_SOFT_CLEANUP) 1008 list_move_tail(&mac->qe, 1009 bna_ucam_mod_del_q(rxf->rx->bna)); 1010 else { 1011 bna_bfi_ucast_req(rxf, mac, 1012 BFI_ENET_H2I_MAC_UCAST_DEL_REQ); 1013 list_move_tail(&mac->qe, 1014 bna_ucam_mod_del_q(rxf->rx->bna)); 1015 return 1; 1016 } 1017 } 1018 1019 /* Move active ucast entries to pending_add_q */ 1020 while (!list_empty(&rxf->ucast_active_q)) { 1021 mac = list_first_entry(&rxf->ucast_active_q, 1022 struct bna_mac, qe); 1023 list_move_tail(&mac->qe, &rxf->ucast_pending_add_q); 1024 if (cleanup == BNA_HARD_CLEANUP) { 1025 bna_bfi_ucast_req(rxf, mac, 1026 BFI_ENET_H2I_MAC_UCAST_DEL_REQ); 1027 return 1; 1028 } 1029 } 1030 1031 if (rxf->ucast_active_set) { 1032 rxf->ucast_pending_set = 1; 1033 rxf->ucast_active_set = 0; 1034 if (cleanup == BNA_HARD_CLEANUP) { 1035 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac, 1036 BFI_ENET_H2I_MAC_UCAST_CLR_REQ); 1037 return 1; 1038 } 1039 } 1040 1041 return 0; 1042 } 1043 1044 static int 1045 bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf) 1046 { 1047 struct bna *bna = rxf->rx->bna; 1048 1049 /* Enable/disable promiscuous mode */ 1050 if (is_promisc_enable(rxf->rxmode_pending, 1051 rxf->rxmode_pending_bitmask)) { 1052 /* move promisc configuration from pending -> active */ 1053 promisc_inactive(rxf->rxmode_pending, 1054 rxf->rxmode_pending_bitmask); 1055 rxf->rxmode_active |= BNA_RXMODE_PROMISC; 1056 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_ENABLED); 1057 return 1; 1058 } else if (is_promisc_disable(rxf->rxmode_pending, 1059 rxf->rxmode_pending_bitmask)) { 1060 /* move promisc configuration from pending -> active */ 1061 promisc_inactive(rxf->rxmode_pending, 1062 rxf->rxmode_pending_bitmask); 1063 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC; 1064 bna->promisc_rid = BFI_INVALID_RID; 1065 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED); 1066 return 1; 1067 } 1068 1069 return 0; 1070 } 1071 1072 static int 1073 bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) 1074 { 1075 struct bna *bna = rxf->rx->bna; 1076 1077 /* Clear pending promisc mode disable */ 1078 if (is_promisc_disable(rxf->rxmode_pending, 1079 rxf->rxmode_pending_bitmask)) { 1080 promisc_inactive(rxf->rxmode_pending, 1081 rxf->rxmode_pending_bitmask); 1082 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC; 1083 bna->promisc_rid = BFI_INVALID_RID; 1084 if (cleanup == BNA_HARD_CLEANUP) { 1085 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED); 1086 return 1; 1087 } 1088 } 1089 1090 /* Move promisc mode config from active -> pending */ 1091 if (rxf->rxmode_active & BNA_RXMODE_PROMISC) { 1092 promisc_enable(rxf->rxmode_pending, 1093 rxf->rxmode_pending_bitmask); 1094 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC; 1095 if (cleanup == BNA_HARD_CLEANUP) { 1096 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED); 1097 return 1; 1098 } 1099 } 1100 1101 return 0; 1102 } 1103 1104 static int 1105 bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf) 1106 { 1107 /* Enable/disable allmulti mode */ 1108 if (is_allmulti_enable(rxf->rxmode_pending, 1109 rxf->rxmode_pending_bitmask)) { 1110 /* move allmulti configuration from pending -> active */ 1111 allmulti_inactive(rxf->rxmode_pending, 1112 rxf->rxmode_pending_bitmask); 1113 rxf->rxmode_active |= BNA_RXMODE_ALLMULTI; 1114 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_DISABLED); 1115 return 1; 1116 } else if (is_allmulti_disable(rxf->rxmode_pending, 1117 rxf->rxmode_pending_bitmask)) { 1118 /* move allmulti configuration from pending -> active */ 1119 allmulti_inactive(rxf->rxmode_pending, 1120 rxf->rxmode_pending_bitmask); 1121 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI; 1122 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED); 1123 return 1; 1124 } 1125 1126 return 0; 1127 } 1128 1129 static int 1130 bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) 1131 { 1132 /* Clear pending allmulti mode disable */ 1133 if (is_allmulti_disable(rxf->rxmode_pending, 1134 rxf->rxmode_pending_bitmask)) { 1135 allmulti_inactive(rxf->rxmode_pending, 1136 rxf->rxmode_pending_bitmask); 1137 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI; 1138 if (cleanup == BNA_HARD_CLEANUP) { 1139 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED); 1140 return 1; 1141 } 1142 } 1143 1144 /* Move allmulti mode config from active -> pending */ 1145 if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) { 1146 allmulti_enable(rxf->rxmode_pending, 1147 rxf->rxmode_pending_bitmask); 1148 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI; 1149 if (cleanup == BNA_HARD_CLEANUP) { 1150 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED); 1151 return 1; 1152 } 1153 } 1154 1155 return 0; 1156 } 1157 1158 static int 1159 bna_rxf_promisc_enable(struct bna_rxf *rxf) 1160 { 1161 struct bna *bna = rxf->rx->bna; 1162 int ret = 0; 1163 1164 if (is_promisc_enable(rxf->rxmode_pending, 1165 rxf->rxmode_pending_bitmask) || 1166 (rxf->rxmode_active & BNA_RXMODE_PROMISC)) { 1167 /* Do nothing if pending enable or already enabled */ 1168 } else if (is_promisc_disable(rxf->rxmode_pending, 1169 rxf->rxmode_pending_bitmask)) { 1170 /* Turn off pending disable command */ 1171 promisc_inactive(rxf->rxmode_pending, 1172 rxf->rxmode_pending_bitmask); 1173 } else { 1174 /* Schedule enable */ 1175 promisc_enable(rxf->rxmode_pending, 1176 rxf->rxmode_pending_bitmask); 1177 bna->promisc_rid = rxf->rx->rid; 1178 ret = 1; 1179 } 1180 1181 return ret; 1182 } 1183 1184 static int 1185 bna_rxf_promisc_disable(struct bna_rxf *rxf) 1186 { 1187 struct bna *bna = rxf->rx->bna; 1188 int ret = 0; 1189 1190 if (is_promisc_disable(rxf->rxmode_pending, 1191 rxf->rxmode_pending_bitmask) || 1192 (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))) { 1193 /* Do nothing if pending disable or already disabled */ 1194 } else if (is_promisc_enable(rxf->rxmode_pending, 1195 rxf->rxmode_pending_bitmask)) { 1196 /* Turn off pending enable command */ 1197 promisc_inactive(rxf->rxmode_pending, 1198 rxf->rxmode_pending_bitmask); 1199 bna->promisc_rid = BFI_INVALID_RID; 1200 } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) { 1201 /* Schedule disable */ 1202 promisc_disable(rxf->rxmode_pending, 1203 rxf->rxmode_pending_bitmask); 1204 ret = 1; 1205 } 1206 1207 return ret; 1208 } 1209 1210 static int 1211 bna_rxf_allmulti_enable(struct bna_rxf *rxf) 1212 { 1213 int ret = 0; 1214 1215 if (is_allmulti_enable(rxf->rxmode_pending, 1216 rxf->rxmode_pending_bitmask) || 1217 (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) { 1218 /* Do nothing if pending enable or already enabled */ 1219 } else if (is_allmulti_disable(rxf->rxmode_pending, 1220 rxf->rxmode_pending_bitmask)) { 1221 /* Turn off pending disable command */ 1222 allmulti_inactive(rxf->rxmode_pending, 1223 rxf->rxmode_pending_bitmask); 1224 } else { 1225 /* Schedule enable */ 1226 allmulti_enable(rxf->rxmode_pending, 1227 rxf->rxmode_pending_bitmask); 1228 ret = 1; 1229 } 1230 1231 return ret; 1232 } 1233 1234 static int 1235 bna_rxf_allmulti_disable(struct bna_rxf *rxf) 1236 { 1237 int ret = 0; 1238 1239 if (is_allmulti_disable(rxf->rxmode_pending, 1240 rxf->rxmode_pending_bitmask) || 1241 (!(rxf->rxmode_active & BNA_RXMODE_ALLMULTI))) { 1242 /* Do nothing if pending disable or already disabled */ 1243 } else if (is_allmulti_enable(rxf->rxmode_pending, 1244 rxf->rxmode_pending_bitmask)) { 1245 /* Turn off pending enable command */ 1246 allmulti_inactive(rxf->rxmode_pending, 1247 rxf->rxmode_pending_bitmask); 1248 } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) { 1249 /* Schedule disable */ 1250 allmulti_disable(rxf->rxmode_pending, 1251 rxf->rxmode_pending_bitmask); 1252 ret = 1; 1253 } 1254 1255 return ret; 1256 } 1257 1258 static int 1259 bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf) 1260 { 1261 if (rxf->vlan_strip_pending) { 1262 rxf->vlan_strip_pending = false; 1263 bna_bfi_vlan_strip_enable(rxf); 1264 return 1; 1265 } 1266 1267 return 0; 1268 } 1269 1270 /* RX */ 1271 1272 #define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \ 1273 (qcfg)->num_paths : ((qcfg)->num_paths * 2)) 1274 1275 #define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\ 1276 (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT)) 1277 1278 #define call_rx_stop_cbfn(rx) \ 1279 do { \ 1280 if ((rx)->stop_cbfn) { \ 1281 void (*cbfn)(void *, struct bna_rx *); \ 1282 void *cbarg; \ 1283 cbfn = (rx)->stop_cbfn; \ 1284 cbarg = (rx)->stop_cbarg; \ 1285 (rx)->stop_cbfn = NULL; \ 1286 (rx)->stop_cbarg = NULL; \ 1287 cbfn(cbarg, rx); \ 1288 } \ 1289 } while (0) 1290 1291 #define call_rx_stall_cbfn(rx) \ 1292 do { \ 1293 if ((rx)->rx_stall_cbfn) \ 1294 (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx)); \ 1295 } while (0) 1296 1297 #define bfi_enet_datapath_q_init(bfi_q, bna_qpt) \ 1298 do { \ 1299 struct bna_dma_addr cur_q_addr = \ 1300 *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr)); \ 1301 (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb; \ 1302 (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb; \ 1303 (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb; \ 1304 (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb; \ 1305 (bfi_q)->pages = htons((u16)(bna_qpt)->page_count); \ 1306 (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\ 1307 } while (0) 1308 1309 static void bna_bfi_rx_enet_start(struct bna_rx *rx); 1310 static void bna_rx_enet_stop(struct bna_rx *rx); 1311 static void bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx); 1312 1313 bfa_fsm_state_decl(bna_rx, stopped, 1314 struct bna_rx, enum bna_rx_event); 1315 bfa_fsm_state_decl(bna_rx, start_wait, 1316 struct bna_rx, enum bna_rx_event); 1317 bfa_fsm_state_decl(bna_rx, start_stop_wait, 1318 struct bna_rx, enum bna_rx_event); 1319 bfa_fsm_state_decl(bna_rx, rxf_start_wait, 1320 struct bna_rx, enum bna_rx_event); 1321 bfa_fsm_state_decl(bna_rx, started, 1322 struct bna_rx, enum bna_rx_event); 1323 bfa_fsm_state_decl(bna_rx, rxf_stop_wait, 1324 struct bna_rx, enum bna_rx_event); 1325 bfa_fsm_state_decl(bna_rx, stop_wait, 1326 struct bna_rx, enum bna_rx_event); 1327 bfa_fsm_state_decl(bna_rx, cleanup_wait, 1328 struct bna_rx, enum bna_rx_event); 1329 bfa_fsm_state_decl(bna_rx, failed, 1330 struct bna_rx, enum bna_rx_event); 1331 bfa_fsm_state_decl(bna_rx, quiesce_wait, 1332 struct bna_rx, enum bna_rx_event); 1333 1334 static void bna_rx_sm_stopped_entry(struct bna_rx *rx) 1335 { 1336 call_rx_stop_cbfn(rx); 1337 } 1338 1339 static void bna_rx_sm_stopped(struct bna_rx *rx, 1340 enum bna_rx_event event) 1341 { 1342 switch (event) { 1343 case RX_E_START: 1344 bfa_fsm_set_state(rx, bna_rx_sm_start_wait); 1345 break; 1346 1347 case RX_E_STOP: 1348 call_rx_stop_cbfn(rx); 1349 break; 1350 1351 case RX_E_FAIL: 1352 /* no-op */ 1353 break; 1354 1355 default: 1356 bfa_sm_fault(event); 1357 break; 1358 } 1359 } 1360 1361 static void bna_rx_sm_start_wait_entry(struct bna_rx *rx) 1362 { 1363 bna_bfi_rx_enet_start(rx); 1364 } 1365 1366 static void 1367 bna_rx_sm_stop_wait_entry(struct bna_rx *rx) 1368 { 1369 } 1370 1371 static void 1372 bna_rx_sm_stop_wait(struct bna_rx *rx, enum bna_rx_event event) 1373 { 1374 switch (event) { 1375 case RX_E_FAIL: 1376 case RX_E_STOPPED: 1377 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); 1378 rx->rx_cleanup_cbfn(rx->bna->bnad, rx); 1379 break; 1380 1381 case RX_E_STARTED: 1382 bna_rx_enet_stop(rx); 1383 break; 1384 1385 default: 1386 bfa_sm_fault(event); 1387 break; 1388 } 1389 } 1390 1391 static void bna_rx_sm_start_wait(struct bna_rx *rx, 1392 enum bna_rx_event event) 1393 { 1394 switch (event) { 1395 case RX_E_STOP: 1396 bfa_fsm_set_state(rx, bna_rx_sm_start_stop_wait); 1397 break; 1398 1399 case RX_E_FAIL: 1400 bfa_fsm_set_state(rx, bna_rx_sm_stopped); 1401 break; 1402 1403 case RX_E_STARTED: 1404 bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait); 1405 break; 1406 1407 default: 1408 bfa_sm_fault(event); 1409 break; 1410 } 1411 } 1412 1413 static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx) 1414 { 1415 rx->rx_post_cbfn(rx->bna->bnad, rx); 1416 bna_rxf_start(&rx->rxf); 1417 } 1418 1419 static void 1420 bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx) 1421 { 1422 } 1423 1424 static void 1425 bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event) 1426 { 1427 switch (event) { 1428 case RX_E_FAIL: 1429 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); 1430 bna_rxf_fail(&rx->rxf); 1431 call_rx_stall_cbfn(rx); 1432 rx->rx_cleanup_cbfn(rx->bna->bnad, rx); 1433 break; 1434 1435 case RX_E_RXF_STARTED: 1436 bna_rxf_stop(&rx->rxf); 1437 break; 1438 1439 case RX_E_RXF_STOPPED: 1440 bfa_fsm_set_state(rx, bna_rx_sm_stop_wait); 1441 call_rx_stall_cbfn(rx); 1442 bna_rx_enet_stop(rx); 1443 break; 1444 1445 default: 1446 bfa_sm_fault(event); 1447 break; 1448 } 1449 1450 } 1451 1452 static void 1453 bna_rx_sm_start_stop_wait_entry(struct bna_rx *rx) 1454 { 1455 } 1456 1457 static void 1458 bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event) 1459 { 1460 switch (event) { 1461 case RX_E_FAIL: 1462 case RX_E_STOPPED: 1463 bfa_fsm_set_state(rx, bna_rx_sm_stopped); 1464 break; 1465 1466 case RX_E_STARTED: 1467 bna_rx_enet_stop(rx); 1468 break; 1469 1470 default: 1471 bfa_sm_fault(event); 1472 } 1473 } 1474 1475 static void 1476 bna_rx_sm_started_entry(struct bna_rx *rx) 1477 { 1478 struct bna_rxp *rxp; 1479 int is_regular = (rx->type == BNA_RX_T_REGULAR); 1480 1481 /* Start IB */ 1482 list_for_each_entry(rxp, &rx->rxp_q, qe) 1483 bna_ib_start(rx->bna, &rxp->cq.ib, is_regular); 1484 1485 bna_ethport_cb_rx_started(&rx->bna->ethport); 1486 } 1487 1488 static void 1489 bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event) 1490 { 1491 switch (event) { 1492 case RX_E_STOP: 1493 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait); 1494 bna_ethport_cb_rx_stopped(&rx->bna->ethport); 1495 bna_rxf_stop(&rx->rxf); 1496 break; 1497 1498 case RX_E_FAIL: 1499 bfa_fsm_set_state(rx, bna_rx_sm_failed); 1500 bna_ethport_cb_rx_stopped(&rx->bna->ethport); 1501 bna_rxf_fail(&rx->rxf); 1502 call_rx_stall_cbfn(rx); 1503 rx->rx_cleanup_cbfn(rx->bna->bnad, rx); 1504 break; 1505 1506 default: 1507 bfa_sm_fault(event); 1508 break; 1509 } 1510 } 1511 1512 static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx, 1513 enum bna_rx_event event) 1514 { 1515 switch (event) { 1516 case RX_E_STOP: 1517 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait); 1518 break; 1519 1520 case RX_E_FAIL: 1521 bfa_fsm_set_state(rx, bna_rx_sm_failed); 1522 bna_rxf_fail(&rx->rxf); 1523 call_rx_stall_cbfn(rx); 1524 rx->rx_cleanup_cbfn(rx->bna->bnad, rx); 1525 break; 1526 1527 case RX_E_RXF_STARTED: 1528 bfa_fsm_set_state(rx, bna_rx_sm_started); 1529 break; 1530 1531 default: 1532 bfa_sm_fault(event); 1533 break; 1534 } 1535 } 1536 1537 static void 1538 bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx) 1539 { 1540 } 1541 1542 static void 1543 bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event) 1544 { 1545 switch (event) { 1546 case RX_E_FAIL: 1547 case RX_E_RXF_STOPPED: 1548 /* No-op */ 1549 break; 1550 1551 case RX_E_CLEANUP_DONE: 1552 bfa_fsm_set_state(rx, bna_rx_sm_stopped); 1553 break; 1554 1555 default: 1556 bfa_sm_fault(event); 1557 break; 1558 } 1559 } 1560 1561 static void 1562 bna_rx_sm_failed_entry(struct bna_rx *rx) 1563 { 1564 } 1565 1566 static void 1567 bna_rx_sm_failed(struct bna_rx *rx, enum bna_rx_event event) 1568 { 1569 switch (event) { 1570 case RX_E_START: 1571 bfa_fsm_set_state(rx, bna_rx_sm_quiesce_wait); 1572 break; 1573 1574 case RX_E_STOP: 1575 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); 1576 break; 1577 1578 case RX_E_FAIL: 1579 case RX_E_RXF_STARTED: 1580 case RX_E_RXF_STOPPED: 1581 /* No-op */ 1582 break; 1583 1584 case RX_E_CLEANUP_DONE: 1585 bfa_fsm_set_state(rx, bna_rx_sm_stopped); 1586 break; 1587 1588 default: 1589 bfa_sm_fault(event); 1590 break; 1591 } } 1592 1593 static void 1594 bna_rx_sm_quiesce_wait_entry(struct bna_rx *rx) 1595 { 1596 } 1597 1598 static void 1599 bna_rx_sm_quiesce_wait(struct bna_rx *rx, enum bna_rx_event event) 1600 { 1601 switch (event) { 1602 case RX_E_STOP: 1603 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); 1604 break; 1605 1606 case RX_E_FAIL: 1607 bfa_fsm_set_state(rx, bna_rx_sm_failed); 1608 break; 1609 1610 case RX_E_CLEANUP_DONE: 1611 bfa_fsm_set_state(rx, bna_rx_sm_start_wait); 1612 break; 1613 1614 default: 1615 bfa_sm_fault(event); 1616 break; 1617 } 1618 } 1619 1620 static void 1621 bna_bfi_rx_enet_start(struct bna_rx *rx) 1622 { 1623 struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req; 1624 struct bna_rxp *rxp = NULL; 1625 struct bna_rxq *q0 = NULL, *q1 = NULL; 1626 int i; 1627 1628 bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET, 1629 BFI_ENET_H2I_RX_CFG_SET_REQ, 0, rx->rid); 1630 cfg_req->mh.num_entries = htons( 1631 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req))); 1632 1633 cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet); 1634 cfg_req->num_queue_sets = rx->num_paths; 1635 for (i = 0; i < rx->num_paths; i++) { 1636 rxp = rxp ? list_next_entry(rxp, qe) 1637 : list_first_entry(&rx->rxp_q, struct bna_rxp, qe); 1638 GET_RXQS(rxp, q0, q1); 1639 switch (rxp->type) { 1640 case BNA_RXP_SLR: 1641 case BNA_RXP_HDS: 1642 /* Small RxQ */ 1643 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q, 1644 &q1->qpt); 1645 cfg_req->q_cfg[i].qs.rx_buffer_size = 1646 htons((u16)q1->buffer_size); 1647 /* Fall through */ 1648 1649 case BNA_RXP_SINGLE: 1650 /* Large/Single RxQ */ 1651 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q, 1652 &q0->qpt); 1653 if (q0->multi_buffer) 1654 /* multi-buffer is enabled by allocating 1655 * a new rx with new set of resources. 1656 * q0->buffer_size should be initialized to 1657 * fragment size. 1658 */ 1659 cfg_req->rx_cfg.multi_buffer = 1660 BNA_STATUS_T_ENABLED; 1661 else 1662 q0->buffer_size = 1663 bna_enet_mtu_get(&rx->bna->enet); 1664 cfg_req->q_cfg[i].ql.rx_buffer_size = 1665 htons((u16)q0->buffer_size); 1666 break; 1667 1668 default: 1669 BUG_ON(1); 1670 } 1671 1672 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q, 1673 &rxp->cq.qpt); 1674 1675 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo = 1676 rxp->cq.ib.ib_seg_host_addr.lsb; 1677 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi = 1678 rxp->cq.ib.ib_seg_host_addr.msb; 1679 cfg_req->q_cfg[i].ib.intr.msix_index = 1680 htons((u16)rxp->cq.ib.intr_vector); 1681 } 1682 1683 cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_DISABLED; 1684 cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED; 1685 cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED; 1686 cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_DISABLED; 1687 cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX) 1688 ? BNA_STATUS_T_ENABLED : 1689 BNA_STATUS_T_DISABLED; 1690 cfg_req->ib_cfg.coalescing_timeout = 1691 htonl((u32)rxp->cq.ib.coalescing_timeo); 1692 cfg_req->ib_cfg.inter_pkt_timeout = 1693 htonl((u32)rxp->cq.ib.interpkt_timeo); 1694 cfg_req->ib_cfg.inter_pkt_count = (u8)rxp->cq.ib.interpkt_count; 1695 1696 switch (rxp->type) { 1697 case BNA_RXP_SLR: 1698 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_LARGE_SMALL; 1699 break; 1700 1701 case BNA_RXP_HDS: 1702 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_HDS; 1703 cfg_req->rx_cfg.hds.type = rx->hds_cfg.hdr_type; 1704 cfg_req->rx_cfg.hds.force_offset = rx->hds_cfg.forced_offset; 1705 cfg_req->rx_cfg.hds.max_header_size = rx->hds_cfg.forced_offset; 1706 break; 1707 1708 case BNA_RXP_SINGLE: 1709 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_SINGLE; 1710 break; 1711 1712 default: 1713 BUG_ON(1); 1714 } 1715 cfg_req->rx_cfg.strip_vlan = rx->rxf.vlan_strip_status; 1716 1717 bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, 1718 sizeof(struct bfi_enet_rx_cfg_req), &cfg_req->mh); 1719 bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd); 1720 } 1721 1722 static void 1723 bna_bfi_rx_enet_stop(struct bna_rx *rx) 1724 { 1725 struct bfi_enet_req *req = &rx->bfi_enet_cmd.req; 1726 1727 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, 1728 BFI_ENET_H2I_RX_CFG_CLR_REQ, 0, rx->rid); 1729 req->mh.num_entries = htons( 1730 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req))); 1731 bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req), 1732 &req->mh); 1733 bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd); 1734 } 1735 1736 static void 1737 bna_rx_enet_stop(struct bna_rx *rx) 1738 { 1739 struct bna_rxp *rxp; 1740 1741 /* Stop IB */ 1742 list_for_each_entry(rxp, &rx->rxp_q, qe) 1743 bna_ib_stop(rx->bna, &rxp->cq.ib); 1744 1745 bna_bfi_rx_enet_stop(rx); 1746 } 1747 1748 static int 1749 bna_rx_res_check(struct bna_rx_mod *rx_mod, struct bna_rx_config *rx_cfg) 1750 { 1751 if ((rx_mod->rx_free_count == 0) || 1752 (rx_mod->rxp_free_count == 0) || 1753 (rx_mod->rxq_free_count == 0)) 1754 return 0; 1755 1756 if (rx_cfg->rxp_type == BNA_RXP_SINGLE) { 1757 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) || 1758 (rx_mod->rxq_free_count < rx_cfg->num_paths)) 1759 return 0; 1760 } else { 1761 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) || 1762 (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths))) 1763 return 0; 1764 } 1765 1766 return 1; 1767 } 1768 1769 static struct bna_rxq * 1770 bna_rxq_get(struct bna_rx_mod *rx_mod) 1771 { 1772 struct bna_rxq *rxq = NULL; 1773 1774 rxq = list_first_entry(&rx_mod->rxq_free_q, struct bna_rxq, qe); 1775 list_del(&rxq->qe); 1776 rx_mod->rxq_free_count--; 1777 1778 return rxq; 1779 } 1780 1781 static void 1782 bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq) 1783 { 1784 list_add_tail(&rxq->qe, &rx_mod->rxq_free_q); 1785 rx_mod->rxq_free_count++; 1786 } 1787 1788 static struct bna_rxp * 1789 bna_rxp_get(struct bna_rx_mod *rx_mod) 1790 { 1791 struct bna_rxp *rxp = NULL; 1792 1793 rxp = list_first_entry(&rx_mod->rxp_free_q, struct bna_rxp, qe); 1794 list_del(&rxp->qe); 1795 rx_mod->rxp_free_count--; 1796 1797 return rxp; 1798 } 1799 1800 static void 1801 bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp) 1802 { 1803 list_add_tail(&rxp->qe, &rx_mod->rxp_free_q); 1804 rx_mod->rxp_free_count++; 1805 } 1806 1807 static struct bna_rx * 1808 bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type) 1809 { 1810 struct bna_rx *rx = NULL; 1811 1812 BUG_ON(list_empty(&rx_mod->rx_free_q)); 1813 if (type == BNA_RX_T_REGULAR) 1814 rx = list_first_entry(&rx_mod->rx_free_q, struct bna_rx, qe); 1815 else 1816 rx = list_last_entry(&rx_mod->rx_free_q, struct bna_rx, qe); 1817 1818 rx_mod->rx_free_count--; 1819 list_move_tail(&rx->qe, &rx_mod->rx_active_q); 1820 rx->type = type; 1821 1822 return rx; 1823 } 1824 1825 static void 1826 bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx) 1827 { 1828 struct list_head *qe; 1829 1830 list_for_each_prev(qe, &rx_mod->rx_free_q) 1831 if (((struct bna_rx *)qe)->rid < rx->rid) 1832 break; 1833 1834 list_add(&rx->qe, qe); 1835 rx_mod->rx_free_count++; 1836 } 1837 1838 static void 1839 bna_rxp_add_rxqs(struct bna_rxp *rxp, struct bna_rxq *q0, 1840 struct bna_rxq *q1) 1841 { 1842 switch (rxp->type) { 1843 case BNA_RXP_SINGLE: 1844 rxp->rxq.single.only = q0; 1845 rxp->rxq.single.reserved = NULL; 1846 break; 1847 case BNA_RXP_SLR: 1848 rxp->rxq.slr.large = q0; 1849 rxp->rxq.slr.small = q1; 1850 break; 1851 case BNA_RXP_HDS: 1852 rxp->rxq.hds.data = q0; 1853 rxp->rxq.hds.hdr = q1; 1854 break; 1855 default: 1856 break; 1857 } 1858 } 1859 1860 static void 1861 bna_rxq_qpt_setup(struct bna_rxq *rxq, 1862 struct bna_rxp *rxp, 1863 u32 page_count, 1864 u32 page_size, 1865 struct bna_mem_descr *qpt_mem, 1866 struct bna_mem_descr *swqpt_mem, 1867 struct bna_mem_descr *page_mem) 1868 { 1869 u8 *kva; 1870 u64 dma; 1871 struct bna_dma_addr bna_dma; 1872 int i; 1873 1874 rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; 1875 rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; 1876 rxq->qpt.kv_qpt_ptr = qpt_mem->kva; 1877 rxq->qpt.page_count = page_count; 1878 rxq->qpt.page_size = page_size; 1879 1880 rxq->rcb->sw_qpt = (void **) swqpt_mem->kva; 1881 rxq->rcb->sw_q = page_mem->kva; 1882 1883 kva = page_mem->kva; 1884 BNA_GET_DMA_ADDR(&page_mem->dma, dma); 1885 1886 for (i = 0; i < rxq->qpt.page_count; i++) { 1887 rxq->rcb->sw_qpt[i] = kva; 1888 kva += PAGE_SIZE; 1889 1890 BNA_SET_DMA_ADDR(dma, &bna_dma); 1891 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb = 1892 bna_dma.lsb; 1893 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb = 1894 bna_dma.msb; 1895 dma += PAGE_SIZE; 1896 } 1897 } 1898 1899 static void 1900 bna_rxp_cqpt_setup(struct bna_rxp *rxp, 1901 u32 page_count, 1902 u32 page_size, 1903 struct bna_mem_descr *qpt_mem, 1904 struct bna_mem_descr *swqpt_mem, 1905 struct bna_mem_descr *page_mem) 1906 { 1907 u8 *kva; 1908 u64 dma; 1909 struct bna_dma_addr bna_dma; 1910 int i; 1911 1912 rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; 1913 rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; 1914 rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva; 1915 rxp->cq.qpt.page_count = page_count; 1916 rxp->cq.qpt.page_size = page_size; 1917 1918 rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva; 1919 rxp->cq.ccb->sw_q = page_mem->kva; 1920 1921 kva = page_mem->kva; 1922 BNA_GET_DMA_ADDR(&page_mem->dma, dma); 1923 1924 for (i = 0; i < rxp->cq.qpt.page_count; i++) { 1925 rxp->cq.ccb->sw_qpt[i] = kva; 1926 kva += PAGE_SIZE; 1927 1928 BNA_SET_DMA_ADDR(dma, &bna_dma); 1929 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb = 1930 bna_dma.lsb; 1931 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb = 1932 bna_dma.msb; 1933 dma += PAGE_SIZE; 1934 } 1935 } 1936 1937 static void 1938 bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx) 1939 { 1940 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg; 1941 1942 bfa_wc_down(&rx_mod->rx_stop_wc); 1943 } 1944 1945 static void 1946 bna_rx_mod_cb_rx_stopped_all(void *arg) 1947 { 1948 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg; 1949 1950 if (rx_mod->stop_cbfn) 1951 rx_mod->stop_cbfn(&rx_mod->bna->enet); 1952 rx_mod->stop_cbfn = NULL; 1953 } 1954 1955 static void 1956 bna_rx_start(struct bna_rx *rx) 1957 { 1958 rx->rx_flags |= BNA_RX_F_ENET_STARTED; 1959 if (rx->rx_flags & BNA_RX_F_ENABLED) 1960 bfa_fsm_send_event(rx, RX_E_START); 1961 } 1962 1963 static void 1964 bna_rx_stop(struct bna_rx *rx) 1965 { 1966 rx->rx_flags &= ~BNA_RX_F_ENET_STARTED; 1967 if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped) 1968 bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx); 1969 else { 1970 rx->stop_cbfn = bna_rx_mod_cb_rx_stopped; 1971 rx->stop_cbarg = &rx->bna->rx_mod; 1972 bfa_fsm_send_event(rx, RX_E_STOP); 1973 } 1974 } 1975 1976 static void 1977 bna_rx_fail(struct bna_rx *rx) 1978 { 1979 /* Indicate Enet is not enabled, and failed */ 1980 rx->rx_flags &= ~BNA_RX_F_ENET_STARTED; 1981 bfa_fsm_send_event(rx, RX_E_FAIL); 1982 } 1983 1984 void 1985 bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type) 1986 { 1987 struct bna_rx *rx; 1988 1989 rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED; 1990 if (type == BNA_RX_T_LOOPBACK) 1991 rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK; 1992 1993 list_for_each_entry(rx, &rx_mod->rx_active_q, qe) 1994 if (rx->type == type) 1995 bna_rx_start(rx); 1996 } 1997 1998 void 1999 bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type) 2000 { 2001 struct bna_rx *rx; 2002 2003 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED; 2004 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK; 2005 2006 rx_mod->stop_cbfn = bna_enet_cb_rx_stopped; 2007 2008 bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod); 2009 2010 list_for_each_entry(rx, &rx_mod->rx_active_q, qe) 2011 if (rx->type == type) { 2012 bfa_wc_up(&rx_mod->rx_stop_wc); 2013 bna_rx_stop(rx); 2014 } 2015 2016 bfa_wc_wait(&rx_mod->rx_stop_wc); 2017 } 2018 2019 void 2020 bna_rx_mod_fail(struct bna_rx_mod *rx_mod) 2021 { 2022 struct bna_rx *rx; 2023 2024 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED; 2025 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK; 2026 2027 list_for_each_entry(rx, &rx_mod->rx_active_q, qe) 2028 bna_rx_fail(rx); 2029 } 2030 2031 void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna, 2032 struct bna_res_info *res_info) 2033 { 2034 int index; 2035 struct bna_rx *rx_ptr; 2036 struct bna_rxp *rxp_ptr; 2037 struct bna_rxq *rxq_ptr; 2038 2039 rx_mod->bna = bna; 2040 rx_mod->flags = 0; 2041 2042 rx_mod->rx = (struct bna_rx *) 2043 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva; 2044 rx_mod->rxp = (struct bna_rxp *) 2045 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva; 2046 rx_mod->rxq = (struct bna_rxq *) 2047 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva; 2048 2049 /* Initialize the queues */ 2050 INIT_LIST_HEAD(&rx_mod->rx_free_q); 2051 rx_mod->rx_free_count = 0; 2052 INIT_LIST_HEAD(&rx_mod->rxq_free_q); 2053 rx_mod->rxq_free_count = 0; 2054 INIT_LIST_HEAD(&rx_mod->rxp_free_q); 2055 rx_mod->rxp_free_count = 0; 2056 INIT_LIST_HEAD(&rx_mod->rx_active_q); 2057 2058 /* Build RX queues */ 2059 for (index = 0; index < bna->ioceth.attr.num_rxp; index++) { 2060 rx_ptr = &rx_mod->rx[index]; 2061 2062 INIT_LIST_HEAD(&rx_ptr->rxp_q); 2063 rx_ptr->bna = NULL; 2064 rx_ptr->rid = index; 2065 rx_ptr->stop_cbfn = NULL; 2066 rx_ptr->stop_cbarg = NULL; 2067 2068 list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q); 2069 rx_mod->rx_free_count++; 2070 } 2071 2072 /* build RX-path queue */ 2073 for (index = 0; index < bna->ioceth.attr.num_rxp; index++) { 2074 rxp_ptr = &rx_mod->rxp[index]; 2075 list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q); 2076 rx_mod->rxp_free_count++; 2077 } 2078 2079 /* build RXQ queue */ 2080 for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) { 2081 rxq_ptr = &rx_mod->rxq[index]; 2082 list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q); 2083 rx_mod->rxq_free_count++; 2084 } 2085 } 2086 2087 void 2088 bna_rx_mod_uninit(struct bna_rx_mod *rx_mod) 2089 { 2090 rx_mod->bna = NULL; 2091 } 2092 2093 void 2094 bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr) 2095 { 2096 struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp; 2097 struct bna_rxp *rxp = NULL; 2098 struct bna_rxq *q0 = NULL, *q1 = NULL; 2099 int i; 2100 2101 bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp, 2102 sizeof(struct bfi_enet_rx_cfg_rsp)); 2103 2104 rx->hw_id = cfg_rsp->hw_id; 2105 2106 for (i = 0, rxp = list_first_entry(&rx->rxp_q, struct bna_rxp, qe); 2107 i < rx->num_paths; i++, rxp = list_next_entry(rxp, qe)) { 2108 GET_RXQS(rxp, q0, q1); 2109 2110 /* Setup doorbells */ 2111 rxp->cq.ccb->i_dbell->doorbell_addr = 2112 rx->bna->pcidev.pci_bar_kva 2113 + ntohl(cfg_rsp->q_handles[i].i_dbell); 2114 rxp->hw_id = cfg_rsp->q_handles[i].hw_cqid; 2115 q0->rcb->q_dbell = 2116 rx->bna->pcidev.pci_bar_kva 2117 + ntohl(cfg_rsp->q_handles[i].ql_dbell); 2118 q0->hw_id = cfg_rsp->q_handles[i].hw_lqid; 2119 if (q1) { 2120 q1->rcb->q_dbell = 2121 rx->bna->pcidev.pci_bar_kva 2122 + ntohl(cfg_rsp->q_handles[i].qs_dbell); 2123 q1->hw_id = cfg_rsp->q_handles[i].hw_sqid; 2124 } 2125 2126 /* Initialize producer/consumer indexes */ 2127 (*rxp->cq.ccb->hw_producer_index) = 0; 2128 rxp->cq.ccb->producer_index = 0; 2129 q0->rcb->producer_index = q0->rcb->consumer_index = 0; 2130 if (q1) 2131 q1->rcb->producer_index = q1->rcb->consumer_index = 0; 2132 } 2133 2134 bfa_fsm_send_event(rx, RX_E_STARTED); 2135 } 2136 2137 void 2138 bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr) 2139 { 2140 bfa_fsm_send_event(rx, RX_E_STOPPED); 2141 } 2142 2143 void 2144 bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info) 2145 { 2146 u32 cq_size, hq_size, dq_size; 2147 u32 cpage_count, hpage_count, dpage_count; 2148 struct bna_mem_info *mem_info; 2149 u32 cq_depth; 2150 u32 hq_depth; 2151 u32 dq_depth; 2152 2153 dq_depth = q_cfg->q0_depth; 2154 hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth); 2155 cq_depth = roundup_pow_of_two(dq_depth + hq_depth); 2156 2157 cq_size = cq_depth * BFI_CQ_WI_SIZE; 2158 cq_size = ALIGN(cq_size, PAGE_SIZE); 2159 cpage_count = SIZE_TO_PAGES(cq_size); 2160 2161 dq_depth = roundup_pow_of_two(dq_depth); 2162 dq_size = dq_depth * BFI_RXQ_WI_SIZE; 2163 dq_size = ALIGN(dq_size, PAGE_SIZE); 2164 dpage_count = SIZE_TO_PAGES(dq_size); 2165 2166 if (BNA_RXP_SINGLE != q_cfg->rxp_type) { 2167 hq_depth = roundup_pow_of_two(hq_depth); 2168 hq_size = hq_depth * BFI_RXQ_WI_SIZE; 2169 hq_size = ALIGN(hq_size, PAGE_SIZE); 2170 hpage_count = SIZE_TO_PAGES(hq_size); 2171 } else 2172 hpage_count = 0; 2173 2174 res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM; 2175 mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info; 2176 mem_info->mem_type = BNA_MEM_T_KVA; 2177 mem_info->len = sizeof(struct bna_ccb); 2178 mem_info->num = q_cfg->num_paths; 2179 2180 res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM; 2181 mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info; 2182 mem_info->mem_type = BNA_MEM_T_KVA; 2183 mem_info->len = sizeof(struct bna_rcb); 2184 mem_info->num = BNA_GET_RXQS(q_cfg); 2185 2186 res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM; 2187 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info; 2188 mem_info->mem_type = BNA_MEM_T_DMA; 2189 mem_info->len = cpage_count * sizeof(struct bna_dma_addr); 2190 mem_info->num = q_cfg->num_paths; 2191 2192 res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM; 2193 mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info; 2194 mem_info->mem_type = BNA_MEM_T_KVA; 2195 mem_info->len = cpage_count * sizeof(void *); 2196 mem_info->num = q_cfg->num_paths; 2197 2198 res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM; 2199 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info; 2200 mem_info->mem_type = BNA_MEM_T_DMA; 2201 mem_info->len = PAGE_SIZE * cpage_count; 2202 mem_info->num = q_cfg->num_paths; 2203 2204 res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM; 2205 mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info; 2206 mem_info->mem_type = BNA_MEM_T_DMA; 2207 mem_info->len = dpage_count * sizeof(struct bna_dma_addr); 2208 mem_info->num = q_cfg->num_paths; 2209 2210 res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM; 2211 mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info; 2212 mem_info->mem_type = BNA_MEM_T_KVA; 2213 mem_info->len = dpage_count * sizeof(void *); 2214 mem_info->num = q_cfg->num_paths; 2215 2216 res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM; 2217 mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info; 2218 mem_info->mem_type = BNA_MEM_T_DMA; 2219 mem_info->len = PAGE_SIZE * dpage_count; 2220 mem_info->num = q_cfg->num_paths; 2221 2222 res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM; 2223 mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info; 2224 mem_info->mem_type = BNA_MEM_T_DMA; 2225 mem_info->len = hpage_count * sizeof(struct bna_dma_addr); 2226 mem_info->num = (hpage_count ? q_cfg->num_paths : 0); 2227 2228 res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM; 2229 mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info; 2230 mem_info->mem_type = BNA_MEM_T_KVA; 2231 mem_info->len = hpage_count * sizeof(void *); 2232 mem_info->num = (hpage_count ? q_cfg->num_paths : 0); 2233 2234 res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM; 2235 mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info; 2236 mem_info->mem_type = BNA_MEM_T_DMA; 2237 mem_info->len = PAGE_SIZE * hpage_count; 2238 mem_info->num = (hpage_count ? q_cfg->num_paths : 0); 2239 2240 res_info[BNA_RX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM; 2241 mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info; 2242 mem_info->mem_type = BNA_MEM_T_DMA; 2243 mem_info->len = BFI_IBIDX_SIZE; 2244 mem_info->num = q_cfg->num_paths; 2245 2246 res_info[BNA_RX_RES_MEM_T_RIT].res_type = BNA_RES_T_MEM; 2247 mem_info = &res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info; 2248 mem_info->mem_type = BNA_MEM_T_KVA; 2249 mem_info->len = BFI_ENET_RSS_RIT_MAX; 2250 mem_info->num = 1; 2251 2252 res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR; 2253 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX; 2254 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths; 2255 } 2256 2257 struct bna_rx * 2258 bna_rx_create(struct bna *bna, struct bnad *bnad, 2259 struct bna_rx_config *rx_cfg, 2260 const struct bna_rx_event_cbfn *rx_cbfn, 2261 struct bna_res_info *res_info, 2262 void *priv) 2263 { 2264 struct bna_rx_mod *rx_mod = &bna->rx_mod; 2265 struct bna_rx *rx; 2266 struct bna_rxp *rxp; 2267 struct bna_rxq *q0; 2268 struct bna_rxq *q1; 2269 struct bna_intr_info *intr_info; 2270 struct bna_mem_descr *hqunmap_mem; 2271 struct bna_mem_descr *dqunmap_mem; 2272 struct bna_mem_descr *ccb_mem; 2273 struct bna_mem_descr *rcb_mem; 2274 struct bna_mem_descr *cqpt_mem; 2275 struct bna_mem_descr *cswqpt_mem; 2276 struct bna_mem_descr *cpage_mem; 2277 struct bna_mem_descr *hqpt_mem; 2278 struct bna_mem_descr *dqpt_mem; 2279 struct bna_mem_descr *hsqpt_mem; 2280 struct bna_mem_descr *dsqpt_mem; 2281 struct bna_mem_descr *hpage_mem; 2282 struct bna_mem_descr *dpage_mem; 2283 u32 dpage_count, hpage_count; 2284 u32 hq_idx, dq_idx, rcb_idx; 2285 u32 cq_depth, i; 2286 u32 page_count; 2287 2288 if (!bna_rx_res_check(rx_mod, rx_cfg)) 2289 return NULL; 2290 2291 intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info; 2292 ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0]; 2293 rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0]; 2294 dqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPDQ].res_u.mem_info.mdl[0]; 2295 hqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPHQ].res_u.mem_info.mdl[0]; 2296 cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0]; 2297 cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0]; 2298 cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0]; 2299 hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0]; 2300 dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0]; 2301 hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0]; 2302 dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0]; 2303 hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0]; 2304 dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0]; 2305 2306 page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.len / 2307 PAGE_SIZE; 2308 2309 dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.len / 2310 PAGE_SIZE; 2311 2312 hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.len / 2313 PAGE_SIZE; 2314 2315 rx = bna_rx_get(rx_mod, rx_cfg->rx_type); 2316 rx->bna = bna; 2317 rx->rx_flags = 0; 2318 INIT_LIST_HEAD(&rx->rxp_q); 2319 rx->stop_cbfn = NULL; 2320 rx->stop_cbarg = NULL; 2321 rx->priv = priv; 2322 2323 rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn; 2324 rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn; 2325 rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn; 2326 rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn; 2327 rx->rx_stall_cbfn = rx_cbfn->rx_stall_cbfn; 2328 /* Following callbacks are mandatory */ 2329 rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn; 2330 rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn; 2331 2332 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_STARTED) { 2333 switch (rx->type) { 2334 case BNA_RX_T_REGULAR: 2335 if (!(rx->bna->rx_mod.flags & 2336 BNA_RX_MOD_F_ENET_LOOPBACK)) 2337 rx->rx_flags |= BNA_RX_F_ENET_STARTED; 2338 break; 2339 case BNA_RX_T_LOOPBACK: 2340 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_LOOPBACK) 2341 rx->rx_flags |= BNA_RX_F_ENET_STARTED; 2342 break; 2343 } 2344 } 2345 2346 rx->num_paths = rx_cfg->num_paths; 2347 for (i = 0, hq_idx = 0, dq_idx = 0, rcb_idx = 0; 2348 i < rx->num_paths; i++) { 2349 rxp = bna_rxp_get(rx_mod); 2350 list_add_tail(&rxp->qe, &rx->rxp_q); 2351 rxp->type = rx_cfg->rxp_type; 2352 rxp->rx = rx; 2353 rxp->cq.rx = rx; 2354 2355 q0 = bna_rxq_get(rx_mod); 2356 if (BNA_RXP_SINGLE == rx_cfg->rxp_type) 2357 q1 = NULL; 2358 else 2359 q1 = bna_rxq_get(rx_mod); 2360 2361 if (1 == intr_info->num) 2362 rxp->vector = intr_info->idl[0].vector; 2363 else 2364 rxp->vector = intr_info->idl[i].vector; 2365 2366 /* Setup IB */ 2367 2368 rxp->cq.ib.ib_seg_host_addr.lsb = 2369 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb; 2370 rxp->cq.ib.ib_seg_host_addr.msb = 2371 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb; 2372 rxp->cq.ib.ib_seg_host_addr_kva = 2373 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva; 2374 rxp->cq.ib.intr_type = intr_info->intr_type; 2375 if (intr_info->intr_type == BNA_INTR_T_MSIX) 2376 rxp->cq.ib.intr_vector = rxp->vector; 2377 else 2378 rxp->cq.ib.intr_vector = BIT(rxp->vector); 2379 rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo; 2380 rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT; 2381 rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO; 2382 2383 bna_rxp_add_rxqs(rxp, q0, q1); 2384 2385 /* Setup large Q */ 2386 2387 q0->rx = rx; 2388 q0->rxp = rxp; 2389 2390 q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva; 2391 q0->rcb->unmap_q = (void *)dqunmap_mem[dq_idx].kva; 2392 rcb_idx++; dq_idx++; 2393 q0->rcb->q_depth = rx_cfg->q0_depth; 2394 q0->q_depth = rx_cfg->q0_depth; 2395 q0->multi_buffer = rx_cfg->q0_multi_buf; 2396 q0->buffer_size = rx_cfg->q0_buf_size; 2397 q0->num_vecs = rx_cfg->q0_num_vecs; 2398 q0->rcb->rxq = q0; 2399 q0->rcb->bnad = bna->bnad; 2400 q0->rcb->id = 0; 2401 q0->rx_packets = q0->rx_bytes = 0; 2402 q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0; 2403 2404 bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE, 2405 &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]); 2406 2407 if (rx->rcb_setup_cbfn) 2408 rx->rcb_setup_cbfn(bnad, q0->rcb); 2409 2410 /* Setup small Q */ 2411 2412 if (q1) { 2413 q1->rx = rx; 2414 q1->rxp = rxp; 2415 2416 q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva; 2417 q1->rcb->unmap_q = (void *)hqunmap_mem[hq_idx].kva; 2418 rcb_idx++; hq_idx++; 2419 q1->rcb->q_depth = rx_cfg->q1_depth; 2420 q1->q_depth = rx_cfg->q1_depth; 2421 q1->multi_buffer = BNA_STATUS_T_DISABLED; 2422 q1->num_vecs = 1; 2423 q1->rcb->rxq = q1; 2424 q1->rcb->bnad = bna->bnad; 2425 q1->rcb->id = 1; 2426 q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ? 2427 rx_cfg->hds_config.forced_offset 2428 : rx_cfg->q1_buf_size; 2429 q1->rx_packets = q1->rx_bytes = 0; 2430 q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0; 2431 2432 bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE, 2433 &hqpt_mem[i], &hsqpt_mem[i], 2434 &hpage_mem[i]); 2435 2436 if (rx->rcb_setup_cbfn) 2437 rx->rcb_setup_cbfn(bnad, q1->rcb); 2438 } 2439 2440 /* Setup CQ */ 2441 2442 rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva; 2443 cq_depth = rx_cfg->q0_depth + 2444 ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ? 2445 0 : rx_cfg->q1_depth); 2446 /* if multi-buffer is enabled sum of q0_depth 2447 * and q1_depth need not be a power of 2 2448 */ 2449 cq_depth = roundup_pow_of_two(cq_depth); 2450 rxp->cq.ccb->q_depth = cq_depth; 2451 rxp->cq.ccb->cq = &rxp->cq; 2452 rxp->cq.ccb->rcb[0] = q0->rcb; 2453 q0->rcb->ccb = rxp->cq.ccb; 2454 if (q1) { 2455 rxp->cq.ccb->rcb[1] = q1->rcb; 2456 q1->rcb->ccb = rxp->cq.ccb; 2457 } 2458 rxp->cq.ccb->hw_producer_index = 2459 (u32 *)rxp->cq.ib.ib_seg_host_addr_kva; 2460 rxp->cq.ccb->i_dbell = &rxp->cq.ib.door_bell; 2461 rxp->cq.ccb->intr_type = rxp->cq.ib.intr_type; 2462 rxp->cq.ccb->intr_vector = rxp->cq.ib.intr_vector; 2463 rxp->cq.ccb->rx_coalescing_timeo = 2464 rxp->cq.ib.coalescing_timeo; 2465 rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0; 2466 rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0; 2467 rxp->cq.ccb->bnad = bna->bnad; 2468 rxp->cq.ccb->id = i; 2469 2470 bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE, 2471 &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[i]); 2472 2473 if (rx->ccb_setup_cbfn) 2474 rx->ccb_setup_cbfn(bnad, rxp->cq.ccb); 2475 } 2476 2477 rx->hds_cfg = rx_cfg->hds_config; 2478 2479 bna_rxf_init(&rx->rxf, rx, rx_cfg, res_info); 2480 2481 bfa_fsm_set_state(rx, bna_rx_sm_stopped); 2482 2483 rx_mod->rid_mask |= BIT(rx->rid); 2484 2485 return rx; 2486 } 2487 2488 void 2489 bna_rx_destroy(struct bna_rx *rx) 2490 { 2491 struct bna_rx_mod *rx_mod = &rx->bna->rx_mod; 2492 struct bna_rxq *q0 = NULL; 2493 struct bna_rxq *q1 = NULL; 2494 struct bna_rxp *rxp; 2495 struct list_head *qe; 2496 2497 bna_rxf_uninit(&rx->rxf); 2498 2499 while (!list_empty(&rx->rxp_q)) { 2500 rxp = list_first_entry(&rx->rxp_q, struct bna_rxp, qe); 2501 list_del(&rxp->qe); 2502 GET_RXQS(rxp, q0, q1); 2503 if (rx->rcb_destroy_cbfn) 2504 rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb); 2505 q0->rcb = NULL; 2506 q0->rxp = NULL; 2507 q0->rx = NULL; 2508 bna_rxq_put(rx_mod, q0); 2509 2510 if (q1) { 2511 if (rx->rcb_destroy_cbfn) 2512 rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb); 2513 q1->rcb = NULL; 2514 q1->rxp = NULL; 2515 q1->rx = NULL; 2516 bna_rxq_put(rx_mod, q1); 2517 } 2518 rxp->rxq.slr.large = NULL; 2519 rxp->rxq.slr.small = NULL; 2520 2521 if (rx->ccb_destroy_cbfn) 2522 rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb); 2523 rxp->cq.ccb = NULL; 2524 rxp->rx = NULL; 2525 bna_rxp_put(rx_mod, rxp); 2526 } 2527 2528 list_for_each(qe, &rx_mod->rx_active_q) 2529 if (qe == &rx->qe) { 2530 list_del(&rx->qe); 2531 break; 2532 } 2533 2534 rx_mod->rid_mask &= ~BIT(rx->rid); 2535 2536 rx->bna = NULL; 2537 rx->priv = NULL; 2538 bna_rx_put(rx_mod, rx); 2539 } 2540 2541 void 2542 bna_rx_enable(struct bna_rx *rx) 2543 { 2544 if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped) 2545 return; 2546 2547 rx->rx_flags |= BNA_RX_F_ENABLED; 2548 if (rx->rx_flags & BNA_RX_F_ENET_STARTED) 2549 bfa_fsm_send_event(rx, RX_E_START); 2550 } 2551 2552 void 2553 bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type, 2554 void (*cbfn)(void *, struct bna_rx *)) 2555 { 2556 if (type == BNA_SOFT_CLEANUP) { 2557 /* h/w should not be accessed. Treat we're stopped */ 2558 (*cbfn)(rx->bna->bnad, rx); 2559 } else { 2560 rx->stop_cbfn = cbfn; 2561 rx->stop_cbarg = rx->bna->bnad; 2562 2563 rx->rx_flags &= ~BNA_RX_F_ENABLED; 2564 2565 bfa_fsm_send_event(rx, RX_E_STOP); 2566 } 2567 } 2568 2569 void 2570 bna_rx_cleanup_complete(struct bna_rx *rx) 2571 { 2572 bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE); 2573 } 2574 2575 void 2576 bna_rx_vlan_strip_enable(struct bna_rx *rx) 2577 { 2578 struct bna_rxf *rxf = &rx->rxf; 2579 2580 if (rxf->vlan_strip_status == BNA_STATUS_T_DISABLED) { 2581 rxf->vlan_strip_status = BNA_STATUS_T_ENABLED; 2582 rxf->vlan_strip_pending = true; 2583 bfa_fsm_send_event(rxf, RXF_E_CONFIG); 2584 } 2585 } 2586 2587 void 2588 bna_rx_vlan_strip_disable(struct bna_rx *rx) 2589 { 2590 struct bna_rxf *rxf = &rx->rxf; 2591 2592 if (rxf->vlan_strip_status != BNA_STATUS_T_DISABLED) { 2593 rxf->vlan_strip_status = BNA_STATUS_T_DISABLED; 2594 rxf->vlan_strip_pending = true; 2595 bfa_fsm_send_event(rxf, RXF_E_CONFIG); 2596 } 2597 } 2598 2599 enum bna_cb_status 2600 bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode, 2601 enum bna_rxmode bitmask) 2602 { 2603 struct bna_rxf *rxf = &rx->rxf; 2604 int need_hw_config = 0; 2605 2606 /* Error checks */ 2607 2608 if (is_promisc_enable(new_mode, bitmask)) { 2609 /* If promisc mode is already enabled elsewhere in the system */ 2610 if ((rx->bna->promisc_rid != BFI_INVALID_RID) && 2611 (rx->bna->promisc_rid != rxf->rx->rid)) 2612 goto err_return; 2613 2614 /* If default mode is already enabled in the system */ 2615 if (rx->bna->default_mode_rid != BFI_INVALID_RID) 2616 goto err_return; 2617 2618 /* Trying to enable promiscuous and default mode together */ 2619 if (is_default_enable(new_mode, bitmask)) 2620 goto err_return; 2621 } 2622 2623 if (is_default_enable(new_mode, bitmask)) { 2624 /* If default mode is already enabled elsewhere in the system */ 2625 if ((rx->bna->default_mode_rid != BFI_INVALID_RID) && 2626 (rx->bna->default_mode_rid != rxf->rx->rid)) { 2627 goto err_return; 2628 } 2629 2630 /* If promiscuous mode is already enabled in the system */ 2631 if (rx->bna->promisc_rid != BFI_INVALID_RID) 2632 goto err_return; 2633 } 2634 2635 /* Process the commands */ 2636 2637 if (is_promisc_enable(new_mode, bitmask)) { 2638 if (bna_rxf_promisc_enable(rxf)) 2639 need_hw_config = 1; 2640 } else if (is_promisc_disable(new_mode, bitmask)) { 2641 if (bna_rxf_promisc_disable(rxf)) 2642 need_hw_config = 1; 2643 } 2644 2645 if (is_allmulti_enable(new_mode, bitmask)) { 2646 if (bna_rxf_allmulti_enable(rxf)) 2647 need_hw_config = 1; 2648 } else if (is_allmulti_disable(new_mode, bitmask)) { 2649 if (bna_rxf_allmulti_disable(rxf)) 2650 need_hw_config = 1; 2651 } 2652 2653 /* Trigger h/w if needed */ 2654 2655 if (need_hw_config) { 2656 rxf->cam_fltr_cbfn = NULL; 2657 rxf->cam_fltr_cbarg = rx->bna->bnad; 2658 bfa_fsm_send_event(rxf, RXF_E_CONFIG); 2659 } 2660 2661 return BNA_CB_SUCCESS; 2662 2663 err_return: 2664 return BNA_CB_FAIL; 2665 } 2666 2667 void 2668 bna_rx_vlanfilter_enable(struct bna_rx *rx) 2669 { 2670 struct bna_rxf *rxf = &rx->rxf; 2671 2672 if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) { 2673 rxf->vlan_filter_status = BNA_STATUS_T_ENABLED; 2674 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; 2675 bfa_fsm_send_event(rxf, RXF_E_CONFIG); 2676 } 2677 } 2678 2679 void 2680 bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo) 2681 { 2682 struct bna_rxp *rxp; 2683 2684 list_for_each_entry(rxp, &rx->rxp_q, qe) { 2685 rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo; 2686 bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo); 2687 } 2688 } 2689 2690 void 2691 bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]) 2692 { 2693 int i, j; 2694 2695 for (i = 0; i < BNA_LOAD_T_MAX; i++) 2696 for (j = 0; j < BNA_BIAS_T_MAX; j++) 2697 bna->rx_mod.dim_vector[i][j] = vector[i][j]; 2698 } 2699 2700 void 2701 bna_rx_dim_update(struct bna_ccb *ccb) 2702 { 2703 struct bna *bna = ccb->cq->rx->bna; 2704 u32 load, bias; 2705 u32 pkt_rt, small_rt, large_rt; 2706 u8 coalescing_timeo; 2707 2708 if ((ccb->pkt_rate.small_pkt_cnt == 0) && 2709 (ccb->pkt_rate.large_pkt_cnt == 0)) 2710 return; 2711 2712 /* Arrive at preconfigured coalescing timeo value based on pkt rate */ 2713 2714 small_rt = ccb->pkt_rate.small_pkt_cnt; 2715 large_rt = ccb->pkt_rate.large_pkt_cnt; 2716 2717 pkt_rt = small_rt + large_rt; 2718 2719 if (pkt_rt < BNA_PKT_RATE_10K) 2720 load = BNA_LOAD_T_LOW_4; 2721 else if (pkt_rt < BNA_PKT_RATE_20K) 2722 load = BNA_LOAD_T_LOW_3; 2723 else if (pkt_rt < BNA_PKT_RATE_30K) 2724 load = BNA_LOAD_T_LOW_2; 2725 else if (pkt_rt < BNA_PKT_RATE_40K) 2726 load = BNA_LOAD_T_LOW_1; 2727 else if (pkt_rt < BNA_PKT_RATE_50K) 2728 load = BNA_LOAD_T_HIGH_1; 2729 else if (pkt_rt < BNA_PKT_RATE_60K) 2730 load = BNA_LOAD_T_HIGH_2; 2731 else if (pkt_rt < BNA_PKT_RATE_80K) 2732 load = BNA_LOAD_T_HIGH_3; 2733 else 2734 load = BNA_LOAD_T_HIGH_4; 2735 2736 if (small_rt > (large_rt << 1)) 2737 bias = 0; 2738 else 2739 bias = 1; 2740 2741 ccb->pkt_rate.small_pkt_cnt = 0; 2742 ccb->pkt_rate.large_pkt_cnt = 0; 2743 2744 coalescing_timeo = bna->rx_mod.dim_vector[load][bias]; 2745 ccb->rx_coalescing_timeo = coalescing_timeo; 2746 2747 /* Set it to IB */ 2748 bna_ib_coalescing_timeo_set(&ccb->cq->ib, coalescing_timeo); 2749 } 2750 2751 const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = { 2752 {12, 12}, 2753 {6, 10}, 2754 {5, 10}, 2755 {4, 8}, 2756 {3, 6}, 2757 {3, 6}, 2758 {2, 4}, 2759 {1, 2}, 2760 }; 2761 2762 /* TX */ 2763 2764 #define call_tx_stop_cbfn(tx) \ 2765 do { \ 2766 if ((tx)->stop_cbfn) { \ 2767 void (*cbfn)(void *, struct bna_tx *); \ 2768 void *cbarg; \ 2769 cbfn = (tx)->stop_cbfn; \ 2770 cbarg = (tx)->stop_cbarg; \ 2771 (tx)->stop_cbfn = NULL; \ 2772 (tx)->stop_cbarg = NULL; \ 2773 cbfn(cbarg, (tx)); \ 2774 } \ 2775 } while (0) 2776 2777 static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx); 2778 static void bna_bfi_tx_enet_start(struct bna_tx *tx); 2779 static void bna_tx_enet_stop(struct bna_tx *tx); 2780 2781 enum bna_tx_event { 2782 TX_E_START = 1, 2783 TX_E_STOP = 2, 2784 TX_E_FAIL = 3, 2785 TX_E_STARTED = 4, 2786 TX_E_STOPPED = 5, 2787 TX_E_CLEANUP_DONE = 7, 2788 TX_E_BW_UPDATE = 8, 2789 }; 2790 2791 bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx, enum bna_tx_event); 2792 bfa_fsm_state_decl(bna_tx, start_wait, struct bna_tx, enum bna_tx_event); 2793 bfa_fsm_state_decl(bna_tx, started, struct bna_tx, enum bna_tx_event); 2794 bfa_fsm_state_decl(bna_tx, stop_wait, struct bna_tx, enum bna_tx_event); 2795 bfa_fsm_state_decl(bna_tx, cleanup_wait, struct bna_tx, 2796 enum bna_tx_event); 2797 bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx, 2798 enum bna_tx_event); 2799 bfa_fsm_state_decl(bna_tx, prio_cleanup_wait, struct bna_tx, 2800 enum bna_tx_event); 2801 bfa_fsm_state_decl(bna_tx, failed, struct bna_tx, enum bna_tx_event); 2802 bfa_fsm_state_decl(bna_tx, quiesce_wait, struct bna_tx, 2803 enum bna_tx_event); 2804 2805 static void 2806 bna_tx_sm_stopped_entry(struct bna_tx *tx) 2807 { 2808 call_tx_stop_cbfn(tx); 2809 } 2810 2811 static void 2812 bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event) 2813 { 2814 switch (event) { 2815 case TX_E_START: 2816 bfa_fsm_set_state(tx, bna_tx_sm_start_wait); 2817 break; 2818 2819 case TX_E_STOP: 2820 call_tx_stop_cbfn(tx); 2821 break; 2822 2823 case TX_E_FAIL: 2824 /* No-op */ 2825 break; 2826 2827 case TX_E_BW_UPDATE: 2828 /* No-op */ 2829 break; 2830 2831 default: 2832 bfa_sm_fault(event); 2833 } 2834 } 2835 2836 static void 2837 bna_tx_sm_start_wait_entry(struct bna_tx *tx) 2838 { 2839 bna_bfi_tx_enet_start(tx); 2840 } 2841 2842 static void 2843 bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event) 2844 { 2845 switch (event) { 2846 case TX_E_STOP: 2847 tx->flags &= ~BNA_TX_F_BW_UPDATED; 2848 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait); 2849 break; 2850 2851 case TX_E_FAIL: 2852 tx->flags &= ~BNA_TX_F_BW_UPDATED; 2853 bfa_fsm_set_state(tx, bna_tx_sm_stopped); 2854 break; 2855 2856 case TX_E_STARTED: 2857 if (tx->flags & BNA_TX_F_BW_UPDATED) { 2858 tx->flags &= ~BNA_TX_F_BW_UPDATED; 2859 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait); 2860 } else 2861 bfa_fsm_set_state(tx, bna_tx_sm_started); 2862 break; 2863 2864 case TX_E_BW_UPDATE: 2865 tx->flags |= BNA_TX_F_BW_UPDATED; 2866 break; 2867 2868 default: 2869 bfa_sm_fault(event); 2870 } 2871 } 2872 2873 static void 2874 bna_tx_sm_started_entry(struct bna_tx *tx) 2875 { 2876 struct bna_txq *txq; 2877 int is_regular = (tx->type == BNA_TX_T_REGULAR); 2878 2879 list_for_each_entry(txq, &tx->txq_q, qe) { 2880 txq->tcb->priority = txq->priority; 2881 /* Start IB */ 2882 bna_ib_start(tx->bna, &txq->ib, is_regular); 2883 } 2884 tx->tx_resume_cbfn(tx->bna->bnad, tx); 2885 } 2886 2887 static void 2888 bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event) 2889 { 2890 switch (event) { 2891 case TX_E_STOP: 2892 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait); 2893 tx->tx_stall_cbfn(tx->bna->bnad, tx); 2894 bna_tx_enet_stop(tx); 2895 break; 2896 2897 case TX_E_FAIL: 2898 bfa_fsm_set_state(tx, bna_tx_sm_failed); 2899 tx->tx_stall_cbfn(tx->bna->bnad, tx); 2900 tx->tx_cleanup_cbfn(tx->bna->bnad, tx); 2901 break; 2902 2903 case TX_E_BW_UPDATE: 2904 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait); 2905 break; 2906 2907 default: 2908 bfa_sm_fault(event); 2909 } 2910 } 2911 2912 static void 2913 bna_tx_sm_stop_wait_entry(struct bna_tx *tx) 2914 { 2915 } 2916 2917 static void 2918 bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event) 2919 { 2920 switch (event) { 2921 case TX_E_FAIL: 2922 case TX_E_STOPPED: 2923 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); 2924 tx->tx_cleanup_cbfn(tx->bna->bnad, tx); 2925 break; 2926 2927 case TX_E_STARTED: 2928 /** 2929 * We are here due to start_wait -> stop_wait transition on 2930 * TX_E_STOP event 2931 */ 2932 bna_tx_enet_stop(tx); 2933 break; 2934 2935 case TX_E_BW_UPDATE: 2936 /* No-op */ 2937 break; 2938 2939 default: 2940 bfa_sm_fault(event); 2941 } 2942 } 2943 2944 static void 2945 bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx) 2946 { 2947 } 2948 2949 static void 2950 bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event) 2951 { 2952 switch (event) { 2953 case TX_E_FAIL: 2954 case TX_E_BW_UPDATE: 2955 /* No-op */ 2956 break; 2957 2958 case TX_E_CLEANUP_DONE: 2959 bfa_fsm_set_state(tx, bna_tx_sm_stopped); 2960 break; 2961 2962 default: 2963 bfa_sm_fault(event); 2964 } 2965 } 2966 2967 static void 2968 bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx) 2969 { 2970 tx->tx_stall_cbfn(tx->bna->bnad, tx); 2971 bna_tx_enet_stop(tx); 2972 } 2973 2974 static void 2975 bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event) 2976 { 2977 switch (event) { 2978 case TX_E_STOP: 2979 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait); 2980 break; 2981 2982 case TX_E_FAIL: 2983 bfa_fsm_set_state(tx, bna_tx_sm_failed); 2984 tx->tx_cleanup_cbfn(tx->bna->bnad, tx); 2985 break; 2986 2987 case TX_E_STOPPED: 2988 bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait); 2989 break; 2990 2991 case TX_E_BW_UPDATE: 2992 /* No-op */ 2993 break; 2994 2995 default: 2996 bfa_sm_fault(event); 2997 } 2998 } 2999 3000 static void 3001 bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx) 3002 { 3003 tx->tx_cleanup_cbfn(tx->bna->bnad, tx); 3004 } 3005 3006 static void 3007 bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event) 3008 { 3009 switch (event) { 3010 case TX_E_STOP: 3011 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); 3012 break; 3013 3014 case TX_E_FAIL: 3015 bfa_fsm_set_state(tx, bna_tx_sm_failed); 3016 break; 3017 3018 case TX_E_BW_UPDATE: 3019 /* No-op */ 3020 break; 3021 3022 case TX_E_CLEANUP_DONE: 3023 bfa_fsm_set_state(tx, bna_tx_sm_start_wait); 3024 break; 3025 3026 default: 3027 bfa_sm_fault(event); 3028 } 3029 } 3030 3031 static void 3032 bna_tx_sm_failed_entry(struct bna_tx *tx) 3033 { 3034 } 3035 3036 static void 3037 bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event) 3038 { 3039 switch (event) { 3040 case TX_E_START: 3041 bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait); 3042 break; 3043 3044 case TX_E_STOP: 3045 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); 3046 break; 3047 3048 case TX_E_FAIL: 3049 /* No-op */ 3050 break; 3051 3052 case TX_E_CLEANUP_DONE: 3053 bfa_fsm_set_state(tx, bna_tx_sm_stopped); 3054 break; 3055 3056 default: 3057 bfa_sm_fault(event); 3058 } 3059 } 3060 3061 static void 3062 bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx) 3063 { 3064 } 3065 3066 static void 3067 bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event) 3068 { 3069 switch (event) { 3070 case TX_E_STOP: 3071 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); 3072 break; 3073 3074 case TX_E_FAIL: 3075 bfa_fsm_set_state(tx, bna_tx_sm_failed); 3076 break; 3077 3078 case TX_E_CLEANUP_DONE: 3079 bfa_fsm_set_state(tx, bna_tx_sm_start_wait); 3080 break; 3081 3082 case TX_E_BW_UPDATE: 3083 /* No-op */ 3084 break; 3085 3086 default: 3087 bfa_sm_fault(event); 3088 } 3089 } 3090 3091 static void 3092 bna_bfi_tx_enet_start(struct bna_tx *tx) 3093 { 3094 struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req; 3095 struct bna_txq *txq = NULL; 3096 int i; 3097 3098 bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET, 3099 BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid); 3100 cfg_req->mh.num_entries = htons( 3101 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req))); 3102 3103 cfg_req->num_queues = tx->num_txq; 3104 for (i = 0; i < tx->num_txq; i++) { 3105 txq = txq ? list_next_entry(txq, qe) 3106 : list_first_entry(&tx->txq_q, struct bna_txq, qe); 3107 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt); 3108 cfg_req->q_cfg[i].q.priority = txq->priority; 3109 3110 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo = 3111 txq->ib.ib_seg_host_addr.lsb; 3112 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi = 3113 txq->ib.ib_seg_host_addr.msb; 3114 cfg_req->q_cfg[i].ib.intr.msix_index = 3115 htons((u16)txq->ib.intr_vector); 3116 } 3117 3118 cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_ENABLED; 3119 cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED; 3120 cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED; 3121 cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_ENABLED; 3122 cfg_req->ib_cfg.msix = (txq->ib.intr_type == BNA_INTR_T_MSIX) 3123 ? BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED; 3124 cfg_req->ib_cfg.coalescing_timeout = 3125 htonl((u32)txq->ib.coalescing_timeo); 3126 cfg_req->ib_cfg.inter_pkt_timeout = 3127 htonl((u32)txq->ib.interpkt_timeo); 3128 cfg_req->ib_cfg.inter_pkt_count = (u8)txq->ib.interpkt_count; 3129 3130 cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI; 3131 cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id); 3132 cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_ENABLED; 3133 cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED; 3134 3135 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, 3136 sizeof(struct bfi_enet_tx_cfg_req), &cfg_req->mh); 3137 bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd); 3138 } 3139 3140 static void 3141 bna_bfi_tx_enet_stop(struct bna_tx *tx) 3142 { 3143 struct bfi_enet_req *req = &tx->bfi_enet_cmd.req; 3144 3145 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, 3146 BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid); 3147 req->mh.num_entries = htons( 3148 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req))); 3149 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req), 3150 &req->mh); 3151 bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd); 3152 } 3153 3154 static void 3155 bna_tx_enet_stop(struct bna_tx *tx) 3156 { 3157 struct bna_txq *txq; 3158 3159 /* Stop IB */ 3160 list_for_each_entry(txq, &tx->txq_q, qe) 3161 bna_ib_stop(tx->bna, &txq->ib); 3162 3163 bna_bfi_tx_enet_stop(tx); 3164 } 3165 3166 static void 3167 bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size, 3168 struct bna_mem_descr *qpt_mem, 3169 struct bna_mem_descr *swqpt_mem, 3170 struct bna_mem_descr *page_mem) 3171 { 3172 u8 *kva; 3173 u64 dma; 3174 struct bna_dma_addr bna_dma; 3175 int i; 3176 3177 txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; 3178 txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; 3179 txq->qpt.kv_qpt_ptr = qpt_mem->kva; 3180 txq->qpt.page_count = page_count; 3181 txq->qpt.page_size = page_size; 3182 3183 txq->tcb->sw_qpt = (void **) swqpt_mem->kva; 3184 txq->tcb->sw_q = page_mem->kva; 3185 3186 kva = page_mem->kva; 3187 BNA_GET_DMA_ADDR(&page_mem->dma, dma); 3188 3189 for (i = 0; i < page_count; i++) { 3190 txq->tcb->sw_qpt[i] = kva; 3191 kva += PAGE_SIZE; 3192 3193 BNA_SET_DMA_ADDR(dma, &bna_dma); 3194 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb = 3195 bna_dma.lsb; 3196 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb = 3197 bna_dma.msb; 3198 dma += PAGE_SIZE; 3199 } 3200 } 3201 3202 static struct bna_tx * 3203 bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type) 3204 { 3205 struct bna_tx *tx = NULL; 3206 3207 if (list_empty(&tx_mod->tx_free_q)) 3208 return NULL; 3209 if (type == BNA_TX_T_REGULAR) 3210 tx = list_first_entry(&tx_mod->tx_free_q, struct bna_tx, qe); 3211 else 3212 tx = list_last_entry(&tx_mod->tx_free_q, struct bna_tx, qe); 3213 list_del(&tx->qe); 3214 tx->type = type; 3215 3216 return tx; 3217 } 3218 3219 static void 3220 bna_tx_free(struct bna_tx *tx) 3221 { 3222 struct bna_tx_mod *tx_mod = &tx->bna->tx_mod; 3223 struct bna_txq *txq; 3224 struct list_head *qe; 3225 3226 while (!list_empty(&tx->txq_q)) { 3227 txq = list_first_entry(&tx->txq_q, struct bna_txq, qe); 3228 txq->tcb = NULL; 3229 txq->tx = NULL; 3230 list_move_tail(&txq->qe, &tx_mod->txq_free_q); 3231 } 3232 3233 list_for_each(qe, &tx_mod->tx_active_q) { 3234 if (qe == &tx->qe) { 3235 list_del(&tx->qe); 3236 break; 3237 } 3238 } 3239 3240 tx->bna = NULL; 3241 tx->priv = NULL; 3242 3243 list_for_each_prev(qe, &tx_mod->tx_free_q) 3244 if (((struct bna_tx *)qe)->rid < tx->rid) 3245 break; 3246 3247 list_add(&tx->qe, qe); 3248 } 3249 3250 static void 3251 bna_tx_start(struct bna_tx *tx) 3252 { 3253 tx->flags |= BNA_TX_F_ENET_STARTED; 3254 if (tx->flags & BNA_TX_F_ENABLED) 3255 bfa_fsm_send_event(tx, TX_E_START); 3256 } 3257 3258 static void 3259 bna_tx_stop(struct bna_tx *tx) 3260 { 3261 tx->stop_cbfn = bna_tx_mod_cb_tx_stopped; 3262 tx->stop_cbarg = &tx->bna->tx_mod; 3263 3264 tx->flags &= ~BNA_TX_F_ENET_STARTED; 3265 bfa_fsm_send_event(tx, TX_E_STOP); 3266 } 3267 3268 static void 3269 bna_tx_fail(struct bna_tx *tx) 3270 { 3271 tx->flags &= ~BNA_TX_F_ENET_STARTED; 3272 bfa_fsm_send_event(tx, TX_E_FAIL); 3273 } 3274 3275 void 3276 bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr) 3277 { 3278 struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp; 3279 struct bna_txq *txq = NULL; 3280 int i; 3281 3282 bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp, 3283 sizeof(struct bfi_enet_tx_cfg_rsp)); 3284 3285 tx->hw_id = cfg_rsp->hw_id; 3286 3287 for (i = 0, txq = list_first_entry(&tx->txq_q, struct bna_txq, qe); 3288 i < tx->num_txq; i++, txq = list_next_entry(txq, qe)) { 3289 /* Setup doorbells */ 3290 txq->tcb->i_dbell->doorbell_addr = 3291 tx->bna->pcidev.pci_bar_kva 3292 + ntohl(cfg_rsp->q_handles[i].i_dbell); 3293 txq->tcb->q_dbell = 3294 tx->bna->pcidev.pci_bar_kva 3295 + ntohl(cfg_rsp->q_handles[i].q_dbell); 3296 txq->hw_id = cfg_rsp->q_handles[i].hw_qid; 3297 3298 /* Initialize producer/consumer indexes */ 3299 (*txq->tcb->hw_consumer_index) = 0; 3300 txq->tcb->producer_index = txq->tcb->consumer_index = 0; 3301 } 3302 3303 bfa_fsm_send_event(tx, TX_E_STARTED); 3304 } 3305 3306 void 3307 bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr) 3308 { 3309 bfa_fsm_send_event(tx, TX_E_STOPPED); 3310 } 3311 3312 void 3313 bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod) 3314 { 3315 struct bna_tx *tx; 3316 3317 list_for_each_entry(tx, &tx_mod->tx_active_q, qe) 3318 bfa_fsm_send_event(tx, TX_E_BW_UPDATE); 3319 } 3320 3321 void 3322 bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info) 3323 { 3324 u32 q_size; 3325 u32 page_count; 3326 struct bna_mem_info *mem_info; 3327 3328 res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM; 3329 mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info; 3330 mem_info->mem_type = BNA_MEM_T_KVA; 3331 mem_info->len = sizeof(struct bna_tcb); 3332 mem_info->num = num_txq; 3333 3334 q_size = txq_depth * BFI_TXQ_WI_SIZE; 3335 q_size = ALIGN(q_size, PAGE_SIZE); 3336 page_count = q_size >> PAGE_SHIFT; 3337 3338 res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM; 3339 mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info; 3340 mem_info->mem_type = BNA_MEM_T_DMA; 3341 mem_info->len = page_count * sizeof(struct bna_dma_addr); 3342 mem_info->num = num_txq; 3343 3344 res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM; 3345 mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info; 3346 mem_info->mem_type = BNA_MEM_T_KVA; 3347 mem_info->len = page_count * sizeof(void *); 3348 mem_info->num = num_txq; 3349 3350 res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM; 3351 mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info; 3352 mem_info->mem_type = BNA_MEM_T_DMA; 3353 mem_info->len = PAGE_SIZE * page_count; 3354 mem_info->num = num_txq; 3355 3356 res_info[BNA_TX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM; 3357 mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info; 3358 mem_info->mem_type = BNA_MEM_T_DMA; 3359 mem_info->len = BFI_IBIDX_SIZE; 3360 mem_info->num = num_txq; 3361 3362 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR; 3363 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type = 3364 BNA_INTR_T_MSIX; 3365 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq; 3366 } 3367 3368 struct bna_tx * 3369 bna_tx_create(struct bna *bna, struct bnad *bnad, 3370 struct bna_tx_config *tx_cfg, 3371 const struct bna_tx_event_cbfn *tx_cbfn, 3372 struct bna_res_info *res_info, void *priv) 3373 { 3374 struct bna_intr_info *intr_info; 3375 struct bna_tx_mod *tx_mod = &bna->tx_mod; 3376 struct bna_tx *tx; 3377 struct bna_txq *txq; 3378 int page_count; 3379 int i; 3380 3381 intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info; 3382 page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len) / 3383 PAGE_SIZE; 3384 3385 /** 3386 * Get resources 3387 */ 3388 3389 if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq)) 3390 return NULL; 3391 3392 /* Tx */ 3393 3394 tx = bna_tx_get(tx_mod, tx_cfg->tx_type); 3395 if (!tx) 3396 return NULL; 3397 tx->bna = bna; 3398 tx->priv = priv; 3399 3400 /* TxQs */ 3401 3402 INIT_LIST_HEAD(&tx->txq_q); 3403 for (i = 0; i < tx_cfg->num_txq; i++) { 3404 if (list_empty(&tx_mod->txq_free_q)) 3405 goto err_return; 3406 3407 txq = list_first_entry(&tx_mod->txq_free_q, struct bna_txq, qe); 3408 list_move_tail(&txq->qe, &tx->txq_q); 3409 txq->tx = tx; 3410 } 3411 3412 /* 3413 * Initialize 3414 */ 3415 3416 /* Tx */ 3417 3418 tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn; 3419 tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn; 3420 /* Following callbacks are mandatory */ 3421 tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn; 3422 tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn; 3423 tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn; 3424 3425 list_add_tail(&tx->qe, &tx_mod->tx_active_q); 3426 3427 tx->num_txq = tx_cfg->num_txq; 3428 3429 tx->flags = 0; 3430 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) { 3431 switch (tx->type) { 3432 case BNA_TX_T_REGULAR: 3433 if (!(tx->bna->tx_mod.flags & 3434 BNA_TX_MOD_F_ENET_LOOPBACK)) 3435 tx->flags |= BNA_TX_F_ENET_STARTED; 3436 break; 3437 case BNA_TX_T_LOOPBACK: 3438 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK) 3439 tx->flags |= BNA_TX_F_ENET_STARTED; 3440 break; 3441 } 3442 } 3443 3444 /* TxQ */ 3445 3446 i = 0; 3447 list_for_each_entry(txq, &tx->txq_q, qe) { 3448 txq->tcb = (struct bna_tcb *) 3449 res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva; 3450 txq->tx_packets = 0; 3451 txq->tx_bytes = 0; 3452 3453 /* IB */ 3454 txq->ib.ib_seg_host_addr.lsb = 3455 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb; 3456 txq->ib.ib_seg_host_addr.msb = 3457 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb; 3458 txq->ib.ib_seg_host_addr_kva = 3459 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva; 3460 txq->ib.intr_type = intr_info->intr_type; 3461 txq->ib.intr_vector = (intr_info->num == 1) ? 3462 intr_info->idl[0].vector : 3463 intr_info->idl[i].vector; 3464 if (intr_info->intr_type == BNA_INTR_T_INTX) 3465 txq->ib.intr_vector = BIT(txq->ib.intr_vector); 3466 txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo; 3467 txq->ib.interpkt_timeo = BFI_TX_INTERPKT_TIMEO; 3468 txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT; 3469 3470 /* TCB */ 3471 3472 txq->tcb->q_depth = tx_cfg->txq_depth; 3473 txq->tcb->unmap_q = (void *) 3474 res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva; 3475 txq->tcb->hw_consumer_index = 3476 (u32 *)txq->ib.ib_seg_host_addr_kva; 3477 txq->tcb->i_dbell = &txq->ib.door_bell; 3478 txq->tcb->intr_type = txq->ib.intr_type; 3479 txq->tcb->intr_vector = txq->ib.intr_vector; 3480 txq->tcb->txq = txq; 3481 txq->tcb->bnad = bnad; 3482 txq->tcb->id = i; 3483 3484 /* QPT, SWQPT, Pages */ 3485 bna_txq_qpt_setup(txq, page_count, PAGE_SIZE, 3486 &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i], 3487 &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i], 3488 &res_info[BNA_TX_RES_MEM_T_PAGE]. 3489 res_u.mem_info.mdl[i]); 3490 3491 /* Callback to bnad for setting up TCB */ 3492 if (tx->tcb_setup_cbfn) 3493 (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb); 3494 3495 if (tx_cfg->num_txq == BFI_TX_MAX_PRIO) 3496 txq->priority = txq->tcb->id; 3497 else 3498 txq->priority = tx_mod->default_prio; 3499 3500 i++; 3501 } 3502 3503 tx->txf_vlan_id = 0; 3504 3505 bfa_fsm_set_state(tx, bna_tx_sm_stopped); 3506 3507 tx_mod->rid_mask |= BIT(tx->rid); 3508 3509 return tx; 3510 3511 err_return: 3512 bna_tx_free(tx); 3513 return NULL; 3514 } 3515 3516 void 3517 bna_tx_destroy(struct bna_tx *tx) 3518 { 3519 struct bna_txq *txq; 3520 3521 list_for_each_entry(txq, &tx->txq_q, qe) 3522 if (tx->tcb_destroy_cbfn) 3523 (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb); 3524 3525 tx->bna->tx_mod.rid_mask &= ~BIT(tx->rid); 3526 bna_tx_free(tx); 3527 } 3528 3529 void 3530 bna_tx_enable(struct bna_tx *tx) 3531 { 3532 if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped) 3533 return; 3534 3535 tx->flags |= BNA_TX_F_ENABLED; 3536 3537 if (tx->flags & BNA_TX_F_ENET_STARTED) 3538 bfa_fsm_send_event(tx, TX_E_START); 3539 } 3540 3541 void 3542 bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type, 3543 void (*cbfn)(void *, struct bna_tx *)) 3544 { 3545 if (type == BNA_SOFT_CLEANUP) { 3546 (*cbfn)(tx->bna->bnad, tx); 3547 return; 3548 } 3549 3550 tx->stop_cbfn = cbfn; 3551 tx->stop_cbarg = tx->bna->bnad; 3552 3553 tx->flags &= ~BNA_TX_F_ENABLED; 3554 3555 bfa_fsm_send_event(tx, TX_E_STOP); 3556 } 3557 3558 void 3559 bna_tx_cleanup_complete(struct bna_tx *tx) 3560 { 3561 bfa_fsm_send_event(tx, TX_E_CLEANUP_DONE); 3562 } 3563 3564 static void 3565 bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx) 3566 { 3567 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg; 3568 3569 bfa_wc_down(&tx_mod->tx_stop_wc); 3570 } 3571 3572 static void 3573 bna_tx_mod_cb_tx_stopped_all(void *arg) 3574 { 3575 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg; 3576 3577 if (tx_mod->stop_cbfn) 3578 tx_mod->stop_cbfn(&tx_mod->bna->enet); 3579 tx_mod->stop_cbfn = NULL; 3580 } 3581 3582 void 3583 bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna, 3584 struct bna_res_info *res_info) 3585 { 3586 int i; 3587 3588 tx_mod->bna = bna; 3589 tx_mod->flags = 0; 3590 3591 tx_mod->tx = (struct bna_tx *) 3592 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva; 3593 tx_mod->txq = (struct bna_txq *) 3594 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva; 3595 3596 INIT_LIST_HEAD(&tx_mod->tx_free_q); 3597 INIT_LIST_HEAD(&tx_mod->tx_active_q); 3598 3599 INIT_LIST_HEAD(&tx_mod->txq_free_q); 3600 3601 for (i = 0; i < bna->ioceth.attr.num_txq; i++) { 3602 tx_mod->tx[i].rid = i; 3603 list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q); 3604 list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q); 3605 } 3606 3607 tx_mod->prio_map = BFI_TX_PRIO_MAP_ALL; 3608 tx_mod->default_prio = 0; 3609 tx_mod->iscsi_over_cee = BNA_STATUS_T_DISABLED; 3610 tx_mod->iscsi_prio = -1; 3611 } 3612 3613 void 3614 bna_tx_mod_uninit(struct bna_tx_mod *tx_mod) 3615 { 3616 tx_mod->bna = NULL; 3617 } 3618 3619 void 3620 bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type) 3621 { 3622 struct bna_tx *tx; 3623 3624 tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED; 3625 if (type == BNA_TX_T_LOOPBACK) 3626 tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK; 3627 3628 list_for_each_entry(tx, &tx_mod->tx_active_q, qe) 3629 if (tx->type == type) 3630 bna_tx_start(tx); 3631 } 3632 3633 void 3634 bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type) 3635 { 3636 struct bna_tx *tx; 3637 3638 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED; 3639 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK; 3640 3641 tx_mod->stop_cbfn = bna_enet_cb_tx_stopped; 3642 3643 bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod); 3644 3645 list_for_each_entry(tx, &tx_mod->tx_active_q, qe) 3646 if (tx->type == type) { 3647 bfa_wc_up(&tx_mod->tx_stop_wc); 3648 bna_tx_stop(tx); 3649 } 3650 3651 bfa_wc_wait(&tx_mod->tx_stop_wc); 3652 } 3653 3654 void 3655 bna_tx_mod_fail(struct bna_tx_mod *tx_mod) 3656 { 3657 struct bna_tx *tx; 3658 3659 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED; 3660 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK; 3661 3662 list_for_each_entry(tx, &tx_mod->tx_active_q, qe) 3663 bna_tx_fail(tx); 3664 } 3665 3666 void 3667 bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo) 3668 { 3669 struct bna_txq *txq; 3670 3671 list_for_each_entry(txq, &tx->txq_q, qe) 3672 bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo); 3673 } 3674