1 /* 2 * Linux network driver for QLogic BR-series Converged Network Adapter. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License (GPL) Version 2 as 6 * published by the Free Software Foundation 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 /* 14 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. 15 * Copyright (c) 2014-2015 QLogic Corporation 16 * All rights reserved 17 * www.qlogic.com 18 */ 19 #include "bna.h" 20 #include "bfi.h" 21 22 /* IB */ 23 static void 24 bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo) 25 { 26 ib->coalescing_timeo = coalescing_timeo; 27 ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK( 28 (u32)ib->coalescing_timeo, 0); 29 } 30 31 /* RXF */ 32 33 #define bna_rxf_vlan_cfg_soft_reset(rxf) \ 34 do { \ 35 (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; \ 36 (rxf)->vlan_strip_pending = true; \ 37 } while (0) 38 39 #define bna_rxf_rss_cfg_soft_reset(rxf) \ 40 do { \ 41 if ((rxf)->rss_status == BNA_STATUS_T_ENABLED) \ 42 (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING | \ 43 BNA_RSS_F_CFG_PENDING | \ 44 BNA_RSS_F_STATUS_PENDING); \ 45 } while (0) 46 47 static int bna_rxf_cfg_apply(struct bna_rxf *rxf); 48 static void bna_rxf_cfg_reset(struct bna_rxf *rxf); 49 static int bna_rxf_fltr_clear(struct bna_rxf *rxf); 50 static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf); 51 static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf); 52 static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf); 53 static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf); 54 static int bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, 55 enum bna_cleanup_type cleanup); 56 static int bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, 57 enum bna_cleanup_type cleanup); 58 static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, 59 enum bna_cleanup_type cleanup); 60 61 bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf, 62 enum bna_rxf_event); 63 bfa_fsm_state_decl(bna_rxf, paused, struct bna_rxf, 64 enum bna_rxf_event); 65 bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf, 66 enum bna_rxf_event); 67 bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf, 68 enum bna_rxf_event); 69 bfa_fsm_state_decl(bna_rxf, fltr_clr_wait, struct bna_rxf, 70 enum bna_rxf_event); 71 bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf, 72 enum bna_rxf_event); 73 74 static void 75 bna_rxf_sm_stopped_entry(struct bna_rxf *rxf) 76 { 77 call_rxf_stop_cbfn(rxf); 78 } 79 80 static void 81 bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event) 82 { 83 switch (event) { 84 case RXF_E_START: 85 if (rxf->flags & BNA_RXF_F_PAUSED) { 86 bfa_fsm_set_state(rxf, bna_rxf_sm_paused); 87 call_rxf_start_cbfn(rxf); 88 } else 89 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait); 90 break; 91 92 case RXF_E_STOP: 93 call_rxf_stop_cbfn(rxf); 94 break; 95 96 case RXF_E_FAIL: 97 /* No-op */ 98 break; 99 100 case RXF_E_CONFIG: 101 call_rxf_cam_fltr_cbfn(rxf); 102 break; 103 104 case RXF_E_PAUSE: 105 rxf->flags |= BNA_RXF_F_PAUSED; 106 call_rxf_pause_cbfn(rxf); 107 break; 108 109 case RXF_E_RESUME: 110 rxf->flags &= ~BNA_RXF_F_PAUSED; 111 call_rxf_resume_cbfn(rxf); 112 break; 113 114 default: 115 bfa_sm_fault(event); 116 } 117 } 118 119 static void 120 bna_rxf_sm_paused_entry(struct bna_rxf *rxf) 121 { 122 call_rxf_pause_cbfn(rxf); 123 } 124 125 static void 126 bna_rxf_sm_paused(struct bna_rxf *rxf, enum bna_rxf_event event) 127 { 128 switch (event) { 129 case RXF_E_STOP: 130 case RXF_E_FAIL: 131 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); 132 break; 133 134 case RXF_E_CONFIG: 135 call_rxf_cam_fltr_cbfn(rxf); 136 break; 137 138 case RXF_E_RESUME: 139 rxf->flags &= ~BNA_RXF_F_PAUSED; 140 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait); 141 break; 142 143 default: 144 bfa_sm_fault(event); 145 } 146 } 147 148 static void 149 bna_rxf_sm_cfg_wait_entry(struct bna_rxf *rxf) 150 { 151 if (!bna_rxf_cfg_apply(rxf)) { 152 /* No more pending config updates */ 153 bfa_fsm_set_state(rxf, bna_rxf_sm_started); 154 } 155 } 156 157 static void 158 bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event) 159 { 160 switch (event) { 161 case RXF_E_STOP: 162 bfa_fsm_set_state(rxf, bna_rxf_sm_last_resp_wait); 163 break; 164 165 case RXF_E_FAIL: 166 bna_rxf_cfg_reset(rxf); 167 call_rxf_start_cbfn(rxf); 168 call_rxf_cam_fltr_cbfn(rxf); 169 call_rxf_resume_cbfn(rxf); 170 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); 171 break; 172 173 case RXF_E_CONFIG: 174 /* No-op */ 175 break; 176 177 case RXF_E_PAUSE: 178 rxf->flags |= BNA_RXF_F_PAUSED; 179 call_rxf_start_cbfn(rxf); 180 bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait); 181 break; 182 183 case RXF_E_FW_RESP: 184 if (!bna_rxf_cfg_apply(rxf)) { 185 /* No more pending config updates */ 186 bfa_fsm_set_state(rxf, bna_rxf_sm_started); 187 } 188 break; 189 190 default: 191 bfa_sm_fault(event); 192 } 193 } 194 195 static void 196 bna_rxf_sm_started_entry(struct bna_rxf *rxf) 197 { 198 call_rxf_start_cbfn(rxf); 199 call_rxf_cam_fltr_cbfn(rxf); 200 call_rxf_resume_cbfn(rxf); 201 } 202 203 static void 204 bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event) 205 { 206 switch (event) { 207 case RXF_E_STOP: 208 case RXF_E_FAIL: 209 bna_rxf_cfg_reset(rxf); 210 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); 211 break; 212 213 case RXF_E_CONFIG: 214 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait); 215 break; 216 217 case RXF_E_PAUSE: 218 rxf->flags |= BNA_RXF_F_PAUSED; 219 if (!bna_rxf_fltr_clear(rxf)) 220 bfa_fsm_set_state(rxf, bna_rxf_sm_paused); 221 else 222 bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait); 223 break; 224 225 default: 226 bfa_sm_fault(event); 227 } 228 } 229 230 static void 231 bna_rxf_sm_fltr_clr_wait_entry(struct bna_rxf *rxf) 232 { 233 } 234 235 static void 236 bna_rxf_sm_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event) 237 { 238 switch (event) { 239 case RXF_E_FAIL: 240 bna_rxf_cfg_reset(rxf); 241 call_rxf_pause_cbfn(rxf); 242 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); 243 break; 244 245 case RXF_E_FW_RESP: 246 if (!bna_rxf_fltr_clear(rxf)) { 247 /* No more pending CAM entries to clear */ 248 bfa_fsm_set_state(rxf, bna_rxf_sm_paused); 249 } 250 break; 251 252 default: 253 bfa_sm_fault(event); 254 } 255 } 256 257 static void 258 bna_rxf_sm_last_resp_wait_entry(struct bna_rxf *rxf) 259 { 260 } 261 262 static void 263 bna_rxf_sm_last_resp_wait(struct bna_rxf *rxf, enum bna_rxf_event event) 264 { 265 switch (event) { 266 case RXF_E_FAIL: 267 case RXF_E_FW_RESP: 268 bna_rxf_cfg_reset(rxf); 269 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); 270 break; 271 272 default: 273 bfa_sm_fault(event); 274 } 275 } 276 277 static void 278 bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac, 279 enum bfi_enet_h2i_msgs req_type) 280 { 281 struct bfi_enet_ucast_req *req = &rxf->bfi_enet_cmd.ucast_req; 282 283 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid); 284 req->mh.num_entries = htons( 285 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req))); 286 ether_addr_copy(req->mac_addr, mac->addr); 287 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, 288 sizeof(struct bfi_enet_ucast_req), &req->mh); 289 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); 290 } 291 292 static void 293 bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac) 294 { 295 struct bfi_enet_mcast_add_req *req = 296 &rxf->bfi_enet_cmd.mcast_add_req; 297 298 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_ADD_REQ, 299 0, rxf->rx->rid); 300 req->mh.num_entries = htons( 301 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req))); 302 ether_addr_copy(req->mac_addr, mac->addr); 303 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, 304 sizeof(struct bfi_enet_mcast_add_req), &req->mh); 305 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); 306 } 307 308 static void 309 bna_bfi_mcast_del_req(struct bna_rxf *rxf, u16 handle) 310 { 311 struct bfi_enet_mcast_del_req *req = 312 &rxf->bfi_enet_cmd.mcast_del_req; 313 314 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_DEL_REQ, 315 0, rxf->rx->rid); 316 req->mh.num_entries = htons( 317 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_del_req))); 318 req->handle = htons(handle); 319 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, 320 sizeof(struct bfi_enet_mcast_del_req), &req->mh); 321 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); 322 } 323 324 static void 325 bna_bfi_mcast_filter_req(struct bna_rxf *rxf, enum bna_status status) 326 { 327 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; 328 329 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, 330 BFI_ENET_H2I_MAC_MCAST_FILTER_REQ, 0, rxf->rx->rid); 331 req->mh.num_entries = htons( 332 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); 333 req->enable = status; 334 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, 335 sizeof(struct bfi_enet_enable_req), &req->mh); 336 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); 337 } 338 339 static void 340 bna_bfi_rx_promisc_req(struct bna_rxf *rxf, enum bna_status status) 341 { 342 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; 343 344 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, 345 BFI_ENET_H2I_RX_PROMISCUOUS_REQ, 0, rxf->rx->rid); 346 req->mh.num_entries = htons( 347 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); 348 req->enable = status; 349 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, 350 sizeof(struct bfi_enet_enable_req), &req->mh); 351 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); 352 } 353 354 static void 355 bna_bfi_rx_vlan_filter_set(struct bna_rxf *rxf, u8 block_idx) 356 { 357 struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req; 358 int i; 359 int j; 360 361 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, 362 BFI_ENET_H2I_RX_VLAN_SET_REQ, 0, rxf->rx->rid); 363 req->mh.num_entries = htons( 364 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_vlan_req))); 365 req->block_idx = block_idx; 366 for (i = 0; i < (BFI_ENET_VLAN_BLOCK_SIZE / 32); i++) { 367 j = (block_idx * (BFI_ENET_VLAN_BLOCK_SIZE / 32)) + i; 368 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) 369 req->bit_mask[i] = 370 htonl(rxf->vlan_filter_table[j]); 371 else 372 req->bit_mask[i] = 0xFFFFFFFF; 373 } 374 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, 375 sizeof(struct bfi_enet_rx_vlan_req), &req->mh); 376 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); 377 } 378 379 static void 380 bna_bfi_vlan_strip_enable(struct bna_rxf *rxf) 381 { 382 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; 383 384 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, 385 BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ, 0, rxf->rx->rid); 386 req->mh.num_entries = htons( 387 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); 388 req->enable = rxf->vlan_strip_status; 389 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, 390 sizeof(struct bfi_enet_enable_req), &req->mh); 391 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); 392 } 393 394 static void 395 bna_bfi_rit_cfg(struct bna_rxf *rxf) 396 { 397 struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req; 398 399 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, 400 BFI_ENET_H2I_RIT_CFG_REQ, 0, rxf->rx->rid); 401 req->mh.num_entries = htons( 402 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rit_req))); 403 req->size = htons(rxf->rit_size); 404 memcpy(&req->table[0], rxf->rit, rxf->rit_size); 405 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, 406 sizeof(struct bfi_enet_rit_req), &req->mh); 407 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); 408 } 409 410 static void 411 bna_bfi_rss_cfg(struct bna_rxf *rxf) 412 { 413 struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req; 414 int i; 415 416 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, 417 BFI_ENET_H2I_RSS_CFG_REQ, 0, rxf->rx->rid); 418 req->mh.num_entries = htons( 419 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rss_cfg_req))); 420 req->cfg.type = rxf->rss_cfg.hash_type; 421 req->cfg.mask = rxf->rss_cfg.hash_mask; 422 for (i = 0; i < BFI_ENET_RSS_KEY_LEN; i++) 423 req->cfg.key[i] = 424 htonl(rxf->rss_cfg.toeplitz_hash_key[i]); 425 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, 426 sizeof(struct bfi_enet_rss_cfg_req), &req->mh); 427 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); 428 } 429 430 static void 431 bna_bfi_rss_enable(struct bna_rxf *rxf) 432 { 433 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; 434 435 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, 436 BFI_ENET_H2I_RSS_ENABLE_REQ, 0, rxf->rx->rid); 437 req->mh.num_entries = htons( 438 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); 439 req->enable = rxf->rss_status; 440 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, 441 sizeof(struct bfi_enet_enable_req), &req->mh); 442 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); 443 } 444 445 /* This function gets the multicast MAC that has already been added to CAM */ 446 static struct bna_mac * 447 bna_rxf_mcmac_get(struct bna_rxf *rxf, u8 *mac_addr) 448 { 449 struct bna_mac *mac; 450 struct list_head *qe; 451 452 list_for_each(qe, &rxf->mcast_active_q) { 453 mac = (struct bna_mac *)qe; 454 if (ether_addr_equal(mac->addr, mac_addr)) 455 return mac; 456 } 457 458 list_for_each(qe, &rxf->mcast_pending_del_q) { 459 mac = (struct bna_mac *)qe; 460 if (ether_addr_equal(mac->addr, mac_addr)) 461 return mac; 462 } 463 464 return NULL; 465 } 466 467 static struct bna_mcam_handle * 468 bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle) 469 { 470 struct bna_mcam_handle *mchandle; 471 struct list_head *qe; 472 473 list_for_each(qe, &rxf->mcast_handle_q) { 474 mchandle = (struct bna_mcam_handle *)qe; 475 if (mchandle->handle == handle) 476 return mchandle; 477 } 478 479 return NULL; 480 } 481 482 static void 483 bna_rxf_mchandle_attach(struct bna_rxf *rxf, u8 *mac_addr, int handle) 484 { 485 struct bna_mac *mcmac; 486 struct bna_mcam_handle *mchandle; 487 488 mcmac = bna_rxf_mcmac_get(rxf, mac_addr); 489 mchandle = bna_rxf_mchandle_get(rxf, handle); 490 if (mchandle == NULL) { 491 mchandle = bna_mcam_mod_handle_get(&rxf->rx->bna->mcam_mod); 492 mchandle->handle = handle; 493 mchandle->refcnt = 0; 494 list_add_tail(&mchandle->qe, &rxf->mcast_handle_q); 495 } 496 mchandle->refcnt++; 497 mcmac->handle = mchandle; 498 } 499 500 static int 501 bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac, 502 enum bna_cleanup_type cleanup) 503 { 504 struct bna_mcam_handle *mchandle; 505 int ret = 0; 506 507 mchandle = mac->handle; 508 if (mchandle == NULL) 509 return ret; 510 511 mchandle->refcnt--; 512 if (mchandle->refcnt == 0) { 513 if (cleanup == BNA_HARD_CLEANUP) { 514 bna_bfi_mcast_del_req(rxf, mchandle->handle); 515 ret = 1; 516 } 517 list_del(&mchandle->qe); 518 bfa_q_qe_init(&mchandle->qe); 519 bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle); 520 } 521 mac->handle = NULL; 522 523 return ret; 524 } 525 526 static int 527 bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf) 528 { 529 struct bna_mac *mac = NULL; 530 struct list_head *qe; 531 int ret; 532 533 /* First delete multicast entries to maintain the count */ 534 while (!list_empty(&rxf->mcast_pending_del_q)) { 535 bfa_q_deq(&rxf->mcast_pending_del_q, &qe); 536 bfa_q_qe_init(qe); 537 mac = (struct bna_mac *)qe; 538 ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP); 539 bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac); 540 if (ret) 541 return ret; 542 } 543 544 /* Add multicast entries */ 545 if (!list_empty(&rxf->mcast_pending_add_q)) { 546 bfa_q_deq(&rxf->mcast_pending_add_q, &qe); 547 bfa_q_qe_init(qe); 548 mac = (struct bna_mac *)qe; 549 list_add_tail(&mac->qe, &rxf->mcast_active_q); 550 bna_bfi_mcast_add_req(rxf, mac); 551 return 1; 552 } 553 554 return 0; 555 } 556 557 static int 558 bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf) 559 { 560 u8 vlan_pending_bitmask; 561 int block_idx = 0; 562 563 if (rxf->vlan_pending_bitmask) { 564 vlan_pending_bitmask = rxf->vlan_pending_bitmask; 565 while (!(vlan_pending_bitmask & 0x1)) { 566 block_idx++; 567 vlan_pending_bitmask >>= 1; 568 } 569 rxf->vlan_pending_bitmask &= ~(1 << block_idx); 570 bna_bfi_rx_vlan_filter_set(rxf, block_idx); 571 return 1; 572 } 573 574 return 0; 575 } 576 577 static int 578 bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) 579 { 580 struct list_head *qe; 581 struct bna_mac *mac; 582 int ret; 583 584 /* Throw away delete pending mcast entries */ 585 while (!list_empty(&rxf->mcast_pending_del_q)) { 586 bfa_q_deq(&rxf->mcast_pending_del_q, &qe); 587 bfa_q_qe_init(qe); 588 mac = (struct bna_mac *)qe; 589 ret = bna_rxf_mcast_del(rxf, mac, cleanup); 590 bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac); 591 if (ret) 592 return ret; 593 } 594 595 /* Move active mcast entries to pending_add_q */ 596 while (!list_empty(&rxf->mcast_active_q)) { 597 bfa_q_deq(&rxf->mcast_active_q, &qe); 598 bfa_q_qe_init(qe); 599 list_add_tail(qe, &rxf->mcast_pending_add_q); 600 mac = (struct bna_mac *)qe; 601 if (bna_rxf_mcast_del(rxf, mac, cleanup)) 602 return 1; 603 } 604 605 return 0; 606 } 607 608 static int 609 bna_rxf_rss_cfg_apply(struct bna_rxf *rxf) 610 { 611 if (rxf->rss_pending) { 612 if (rxf->rss_pending & BNA_RSS_F_RIT_PENDING) { 613 rxf->rss_pending &= ~BNA_RSS_F_RIT_PENDING; 614 bna_bfi_rit_cfg(rxf); 615 return 1; 616 } 617 618 if (rxf->rss_pending & BNA_RSS_F_CFG_PENDING) { 619 rxf->rss_pending &= ~BNA_RSS_F_CFG_PENDING; 620 bna_bfi_rss_cfg(rxf); 621 return 1; 622 } 623 624 if (rxf->rss_pending & BNA_RSS_F_STATUS_PENDING) { 625 rxf->rss_pending &= ~BNA_RSS_F_STATUS_PENDING; 626 bna_bfi_rss_enable(rxf); 627 return 1; 628 } 629 } 630 631 return 0; 632 } 633 634 static int 635 bna_rxf_cfg_apply(struct bna_rxf *rxf) 636 { 637 if (bna_rxf_ucast_cfg_apply(rxf)) 638 return 1; 639 640 if (bna_rxf_mcast_cfg_apply(rxf)) 641 return 1; 642 643 if (bna_rxf_promisc_cfg_apply(rxf)) 644 return 1; 645 646 if (bna_rxf_allmulti_cfg_apply(rxf)) 647 return 1; 648 649 if (bna_rxf_vlan_cfg_apply(rxf)) 650 return 1; 651 652 if (bna_rxf_vlan_strip_cfg_apply(rxf)) 653 return 1; 654 655 if (bna_rxf_rss_cfg_apply(rxf)) 656 return 1; 657 658 return 0; 659 } 660 661 /* Only software reset */ 662 static int 663 bna_rxf_fltr_clear(struct bna_rxf *rxf) 664 { 665 if (bna_rxf_ucast_cfg_reset(rxf, BNA_HARD_CLEANUP)) 666 return 1; 667 668 if (bna_rxf_mcast_cfg_reset(rxf, BNA_HARD_CLEANUP)) 669 return 1; 670 671 if (bna_rxf_promisc_cfg_reset(rxf, BNA_HARD_CLEANUP)) 672 return 1; 673 674 if (bna_rxf_allmulti_cfg_reset(rxf, BNA_HARD_CLEANUP)) 675 return 1; 676 677 return 0; 678 } 679 680 static void 681 bna_rxf_cfg_reset(struct bna_rxf *rxf) 682 { 683 bna_rxf_ucast_cfg_reset(rxf, BNA_SOFT_CLEANUP); 684 bna_rxf_mcast_cfg_reset(rxf, BNA_SOFT_CLEANUP); 685 bna_rxf_promisc_cfg_reset(rxf, BNA_SOFT_CLEANUP); 686 bna_rxf_allmulti_cfg_reset(rxf, BNA_SOFT_CLEANUP); 687 bna_rxf_vlan_cfg_soft_reset(rxf); 688 bna_rxf_rss_cfg_soft_reset(rxf); 689 } 690 691 static void 692 bna_rit_init(struct bna_rxf *rxf, int rit_size) 693 { 694 struct bna_rx *rx = rxf->rx; 695 struct bna_rxp *rxp; 696 struct list_head *qe; 697 int offset = 0; 698 699 rxf->rit_size = rit_size; 700 list_for_each(qe, &rx->rxp_q) { 701 rxp = (struct bna_rxp *)qe; 702 rxf->rit[offset] = rxp->cq.ccb->id; 703 offset++; 704 } 705 706 } 707 708 void 709 bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr) 710 { 711 bfa_fsm_send_event(rxf, RXF_E_FW_RESP); 712 } 713 714 void 715 bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf, 716 struct bfi_msgq_mhdr *msghdr) 717 { 718 struct bfi_enet_rsp *rsp = 719 container_of(msghdr, struct bfi_enet_rsp, mh); 720 721 if (rsp->error) { 722 /* Clear ucast from cache */ 723 rxf->ucast_active_set = 0; 724 } 725 726 bfa_fsm_send_event(rxf, RXF_E_FW_RESP); 727 } 728 729 void 730 bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf, 731 struct bfi_msgq_mhdr *msghdr) 732 { 733 struct bfi_enet_mcast_add_req *req = 734 &rxf->bfi_enet_cmd.mcast_add_req; 735 struct bfi_enet_mcast_add_rsp *rsp = 736 container_of(msghdr, struct bfi_enet_mcast_add_rsp, mh); 737 738 bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr, 739 ntohs(rsp->handle)); 740 bfa_fsm_send_event(rxf, RXF_E_FW_RESP); 741 } 742 743 static void 744 bna_rxf_init(struct bna_rxf *rxf, 745 struct bna_rx *rx, 746 struct bna_rx_config *q_config, 747 struct bna_res_info *res_info) 748 { 749 rxf->rx = rx; 750 751 INIT_LIST_HEAD(&rxf->ucast_pending_add_q); 752 INIT_LIST_HEAD(&rxf->ucast_pending_del_q); 753 rxf->ucast_pending_set = 0; 754 rxf->ucast_active_set = 0; 755 INIT_LIST_HEAD(&rxf->ucast_active_q); 756 rxf->ucast_pending_mac = NULL; 757 758 INIT_LIST_HEAD(&rxf->mcast_pending_add_q); 759 INIT_LIST_HEAD(&rxf->mcast_pending_del_q); 760 INIT_LIST_HEAD(&rxf->mcast_active_q); 761 INIT_LIST_HEAD(&rxf->mcast_handle_q); 762 763 if (q_config->paused) 764 rxf->flags |= BNA_RXF_F_PAUSED; 765 766 rxf->rit = (u8 *) 767 res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva; 768 bna_rit_init(rxf, q_config->num_paths); 769 770 rxf->rss_status = q_config->rss_status; 771 if (rxf->rss_status == BNA_STATUS_T_ENABLED) { 772 rxf->rss_cfg = q_config->rss_config; 773 rxf->rss_pending |= BNA_RSS_F_CFG_PENDING; 774 rxf->rss_pending |= BNA_RSS_F_RIT_PENDING; 775 rxf->rss_pending |= BNA_RSS_F_STATUS_PENDING; 776 } 777 778 rxf->vlan_filter_status = BNA_STATUS_T_DISABLED; 779 memset(rxf->vlan_filter_table, 0, 780 (sizeof(u32) * (BFI_ENET_VLAN_ID_MAX / 32))); 781 rxf->vlan_filter_table[0] |= 1; /* for pure priority tagged frames */ 782 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; 783 784 rxf->vlan_strip_status = q_config->vlan_strip_status; 785 786 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); 787 } 788 789 static void 790 bna_rxf_uninit(struct bna_rxf *rxf) 791 { 792 struct bna_mac *mac; 793 794 rxf->ucast_pending_set = 0; 795 rxf->ucast_active_set = 0; 796 797 while (!list_empty(&rxf->ucast_pending_add_q)) { 798 bfa_q_deq(&rxf->ucast_pending_add_q, &mac); 799 bfa_q_qe_init(&mac->qe); 800 bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna), mac); 801 } 802 803 if (rxf->ucast_pending_mac) { 804 bfa_q_qe_init(&rxf->ucast_pending_mac->qe); 805 bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna), 806 rxf->ucast_pending_mac); 807 rxf->ucast_pending_mac = NULL; 808 } 809 810 while (!list_empty(&rxf->mcast_pending_add_q)) { 811 bfa_q_deq(&rxf->mcast_pending_add_q, &mac); 812 bfa_q_qe_init(&mac->qe); 813 bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac); 814 } 815 816 rxf->rxmode_pending = 0; 817 rxf->rxmode_pending_bitmask = 0; 818 if (rxf->rx->bna->promisc_rid == rxf->rx->rid) 819 rxf->rx->bna->promisc_rid = BFI_INVALID_RID; 820 if (rxf->rx->bna->default_mode_rid == rxf->rx->rid) 821 rxf->rx->bna->default_mode_rid = BFI_INVALID_RID; 822 823 rxf->rss_pending = 0; 824 rxf->vlan_strip_pending = false; 825 826 rxf->flags = 0; 827 828 rxf->rx = NULL; 829 } 830 831 static void 832 bna_rx_cb_rxf_started(struct bna_rx *rx) 833 { 834 bfa_fsm_send_event(rx, RX_E_RXF_STARTED); 835 } 836 837 static void 838 bna_rxf_start(struct bna_rxf *rxf) 839 { 840 rxf->start_cbfn = bna_rx_cb_rxf_started; 841 rxf->start_cbarg = rxf->rx; 842 bfa_fsm_send_event(rxf, RXF_E_START); 843 } 844 845 static void 846 bna_rx_cb_rxf_stopped(struct bna_rx *rx) 847 { 848 bfa_fsm_send_event(rx, RX_E_RXF_STOPPED); 849 } 850 851 static void 852 bna_rxf_stop(struct bna_rxf *rxf) 853 { 854 rxf->stop_cbfn = bna_rx_cb_rxf_stopped; 855 rxf->stop_cbarg = rxf->rx; 856 bfa_fsm_send_event(rxf, RXF_E_STOP); 857 } 858 859 static void 860 bna_rxf_fail(struct bna_rxf *rxf) 861 { 862 bfa_fsm_send_event(rxf, RXF_E_FAIL); 863 } 864 865 enum bna_cb_status 866 bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac, 867 void (*cbfn)(struct bnad *, struct bna_rx *)) 868 { 869 struct bna_rxf *rxf = &rx->rxf; 870 871 if (rxf->ucast_pending_mac == NULL) { 872 rxf->ucast_pending_mac = 873 bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna)); 874 if (rxf->ucast_pending_mac == NULL) 875 return BNA_CB_UCAST_CAM_FULL; 876 bfa_q_qe_init(&rxf->ucast_pending_mac->qe); 877 } 878 879 ether_addr_copy(rxf->ucast_pending_mac->addr, ucmac); 880 rxf->ucast_pending_set = 1; 881 rxf->cam_fltr_cbfn = cbfn; 882 rxf->cam_fltr_cbarg = rx->bna->bnad; 883 884 bfa_fsm_send_event(rxf, RXF_E_CONFIG); 885 886 return BNA_CB_SUCCESS; 887 } 888 889 enum bna_cb_status 890 bna_rx_mcast_add(struct bna_rx *rx, u8 *addr, 891 void (*cbfn)(struct bnad *, struct bna_rx *)) 892 { 893 struct bna_rxf *rxf = &rx->rxf; 894 struct bna_mac *mac; 895 896 /* Check if already added or pending addition */ 897 if (bna_mac_find(&rxf->mcast_active_q, addr) || 898 bna_mac_find(&rxf->mcast_pending_add_q, addr)) { 899 if (cbfn) 900 cbfn(rx->bna->bnad, rx); 901 return BNA_CB_SUCCESS; 902 } 903 904 mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna)); 905 if (mac == NULL) 906 return BNA_CB_MCAST_LIST_FULL; 907 bfa_q_qe_init(&mac->qe); 908 ether_addr_copy(mac->addr, addr); 909 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q); 910 911 rxf->cam_fltr_cbfn = cbfn; 912 rxf->cam_fltr_cbarg = rx->bna->bnad; 913 914 bfa_fsm_send_event(rxf, RXF_E_CONFIG); 915 916 return BNA_CB_SUCCESS; 917 } 918 919 enum bna_cb_status 920 bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist, 921 void (*cbfn)(struct bnad *, struct bna_rx *)) 922 { 923 struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod; 924 struct bna_rxf *rxf = &rx->rxf; 925 struct list_head list_head; 926 struct list_head *qe; 927 u8 *mcaddr; 928 struct bna_mac *mac, *del_mac; 929 int i; 930 931 /* Purge the pending_add_q */ 932 while (!list_empty(&rxf->ucast_pending_add_q)) { 933 bfa_q_deq(&rxf->ucast_pending_add_q, &qe); 934 bfa_q_qe_init(qe); 935 mac = (struct bna_mac *)qe; 936 bna_cam_mod_mac_put(&ucam_mod->free_q, mac); 937 } 938 939 /* Schedule active_q entries for deletion */ 940 while (!list_empty(&rxf->ucast_active_q)) { 941 bfa_q_deq(&rxf->ucast_active_q, &qe); 942 mac = (struct bna_mac *)qe; 943 bfa_q_qe_init(&mac->qe); 944 945 del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q); 946 memcpy(del_mac, mac, sizeof(*del_mac)); 947 list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q); 948 bna_cam_mod_mac_put(&ucam_mod->free_q, mac); 949 } 950 951 /* Allocate nodes */ 952 INIT_LIST_HEAD(&list_head); 953 for (i = 0, mcaddr = uclist; i < count; i++) { 954 mac = bna_cam_mod_mac_get(&ucam_mod->free_q); 955 if (mac == NULL) 956 goto err_return; 957 bfa_q_qe_init(&mac->qe); 958 ether_addr_copy(mac->addr, mcaddr); 959 list_add_tail(&mac->qe, &list_head); 960 mcaddr += ETH_ALEN; 961 } 962 963 /* Add the new entries */ 964 while (!list_empty(&list_head)) { 965 bfa_q_deq(&list_head, &qe); 966 mac = (struct bna_mac *)qe; 967 bfa_q_qe_init(&mac->qe); 968 list_add_tail(&mac->qe, &rxf->ucast_pending_add_q); 969 } 970 971 rxf->cam_fltr_cbfn = cbfn; 972 rxf->cam_fltr_cbarg = rx->bna->bnad; 973 bfa_fsm_send_event(rxf, RXF_E_CONFIG); 974 975 return BNA_CB_SUCCESS; 976 977 err_return: 978 while (!list_empty(&list_head)) { 979 bfa_q_deq(&list_head, &qe); 980 mac = (struct bna_mac *)qe; 981 bfa_q_qe_init(&mac->qe); 982 bna_cam_mod_mac_put(&ucam_mod->free_q, mac); 983 } 984 985 return BNA_CB_UCAST_CAM_FULL; 986 } 987 988 enum bna_cb_status 989 bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist, 990 void (*cbfn)(struct bnad *, struct bna_rx *)) 991 { 992 struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod; 993 struct bna_rxf *rxf = &rx->rxf; 994 struct list_head list_head; 995 struct list_head *qe; 996 u8 *mcaddr; 997 struct bna_mac *mac, *del_mac; 998 int i; 999 1000 /* Purge the pending_add_q */ 1001 while (!list_empty(&rxf->mcast_pending_add_q)) { 1002 bfa_q_deq(&rxf->mcast_pending_add_q, &qe); 1003 bfa_q_qe_init(qe); 1004 mac = (struct bna_mac *)qe; 1005 bna_cam_mod_mac_put(&mcam_mod->free_q, mac); 1006 } 1007 1008 /* Schedule active_q entries for deletion */ 1009 while (!list_empty(&rxf->mcast_active_q)) { 1010 bfa_q_deq(&rxf->mcast_active_q, &qe); 1011 mac = (struct bna_mac *)qe; 1012 bfa_q_qe_init(&mac->qe); 1013 1014 del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q); 1015 1016 memcpy(del_mac, mac, sizeof(*del_mac)); 1017 list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q); 1018 mac->handle = NULL; 1019 bna_cam_mod_mac_put(&mcam_mod->free_q, mac); 1020 } 1021 1022 /* Allocate nodes */ 1023 INIT_LIST_HEAD(&list_head); 1024 for (i = 0, mcaddr = mclist; i < count; i++) { 1025 mac = bna_cam_mod_mac_get(&mcam_mod->free_q); 1026 if (mac == NULL) 1027 goto err_return; 1028 bfa_q_qe_init(&mac->qe); 1029 ether_addr_copy(mac->addr, mcaddr); 1030 list_add_tail(&mac->qe, &list_head); 1031 1032 mcaddr += ETH_ALEN; 1033 } 1034 1035 /* Add the new entries */ 1036 while (!list_empty(&list_head)) { 1037 bfa_q_deq(&list_head, &qe); 1038 mac = (struct bna_mac *)qe; 1039 bfa_q_qe_init(&mac->qe); 1040 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q); 1041 } 1042 1043 rxf->cam_fltr_cbfn = cbfn; 1044 rxf->cam_fltr_cbarg = rx->bna->bnad; 1045 bfa_fsm_send_event(rxf, RXF_E_CONFIG); 1046 1047 return BNA_CB_SUCCESS; 1048 1049 err_return: 1050 while (!list_empty(&list_head)) { 1051 bfa_q_deq(&list_head, &qe); 1052 mac = (struct bna_mac *)qe; 1053 bfa_q_qe_init(&mac->qe); 1054 bna_cam_mod_mac_put(&mcam_mod->free_q, mac); 1055 } 1056 1057 return BNA_CB_MCAST_LIST_FULL; 1058 } 1059 1060 void 1061 bna_rx_mcast_delall(struct bna_rx *rx, 1062 void (*cbfn)(struct bnad *, struct bna_rx *)) 1063 { 1064 struct bna_rxf *rxf = &rx->rxf; 1065 struct list_head *qe; 1066 struct bna_mac *mac, *del_mac; 1067 int need_hw_config = 0; 1068 1069 /* Purge all entries from pending_add_q */ 1070 while (!list_empty(&rxf->mcast_pending_add_q)) { 1071 bfa_q_deq(&rxf->mcast_pending_add_q, &qe); 1072 mac = (struct bna_mac *)qe; 1073 bfa_q_qe_init(&mac->qe); 1074 bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac); 1075 } 1076 1077 /* Schedule all entries in active_q for deletion */ 1078 while (!list_empty(&rxf->mcast_active_q)) { 1079 bfa_q_deq(&rxf->mcast_active_q, &qe); 1080 mac = (struct bna_mac *)qe; 1081 bfa_q_qe_init(&mac->qe); 1082 1083 del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna)); 1084 1085 memcpy(del_mac, mac, sizeof(*del_mac)); 1086 list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q); 1087 mac->handle = NULL; 1088 bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac); 1089 need_hw_config = 1; 1090 } 1091 1092 if (need_hw_config) { 1093 rxf->cam_fltr_cbfn = cbfn; 1094 rxf->cam_fltr_cbarg = rx->bna->bnad; 1095 bfa_fsm_send_event(rxf, RXF_E_CONFIG); 1096 return; 1097 } 1098 1099 if (cbfn) 1100 (*cbfn)(rx->bna->bnad, rx); 1101 } 1102 1103 void 1104 bna_rx_vlan_add(struct bna_rx *rx, int vlan_id) 1105 { 1106 struct bna_rxf *rxf = &rx->rxf; 1107 int index = (vlan_id >> BFI_VLAN_WORD_SHIFT); 1108 int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK)); 1109 int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT); 1110 1111 rxf->vlan_filter_table[index] |= bit; 1112 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) { 1113 rxf->vlan_pending_bitmask |= (1 << group_id); 1114 bfa_fsm_send_event(rxf, RXF_E_CONFIG); 1115 } 1116 } 1117 1118 void 1119 bna_rx_vlan_del(struct bna_rx *rx, int vlan_id) 1120 { 1121 struct bna_rxf *rxf = &rx->rxf; 1122 int index = (vlan_id >> BFI_VLAN_WORD_SHIFT); 1123 int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK)); 1124 int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT); 1125 1126 rxf->vlan_filter_table[index] &= ~bit; 1127 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) { 1128 rxf->vlan_pending_bitmask |= (1 << group_id); 1129 bfa_fsm_send_event(rxf, RXF_E_CONFIG); 1130 } 1131 } 1132 1133 static int 1134 bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf) 1135 { 1136 struct bna_mac *mac = NULL; 1137 struct list_head *qe; 1138 1139 /* Delete MAC addresses previousely added */ 1140 if (!list_empty(&rxf->ucast_pending_del_q)) { 1141 bfa_q_deq(&rxf->ucast_pending_del_q, &qe); 1142 bfa_q_qe_init(qe); 1143 mac = (struct bna_mac *)qe; 1144 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ); 1145 bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), mac); 1146 return 1; 1147 } 1148 1149 /* Set default unicast MAC */ 1150 if (rxf->ucast_pending_set) { 1151 rxf->ucast_pending_set = 0; 1152 ether_addr_copy(rxf->ucast_active_mac.addr, 1153 rxf->ucast_pending_mac->addr); 1154 rxf->ucast_active_set = 1; 1155 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac, 1156 BFI_ENET_H2I_MAC_UCAST_SET_REQ); 1157 return 1; 1158 } 1159 1160 /* Add additional MAC entries */ 1161 if (!list_empty(&rxf->ucast_pending_add_q)) { 1162 bfa_q_deq(&rxf->ucast_pending_add_q, &qe); 1163 bfa_q_qe_init(qe); 1164 mac = (struct bna_mac *)qe; 1165 list_add_tail(&mac->qe, &rxf->ucast_active_q); 1166 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ); 1167 return 1; 1168 } 1169 1170 return 0; 1171 } 1172 1173 static int 1174 bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) 1175 { 1176 struct list_head *qe; 1177 struct bna_mac *mac; 1178 1179 /* Throw away delete pending ucast entries */ 1180 while (!list_empty(&rxf->ucast_pending_del_q)) { 1181 bfa_q_deq(&rxf->ucast_pending_del_q, &qe); 1182 bfa_q_qe_init(qe); 1183 mac = (struct bna_mac *)qe; 1184 if (cleanup == BNA_SOFT_CLEANUP) 1185 bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), 1186 mac); 1187 else { 1188 bna_bfi_ucast_req(rxf, mac, 1189 BFI_ENET_H2I_MAC_UCAST_DEL_REQ); 1190 bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), 1191 mac); 1192 return 1; 1193 } 1194 } 1195 1196 /* Move active ucast entries to pending_add_q */ 1197 while (!list_empty(&rxf->ucast_active_q)) { 1198 bfa_q_deq(&rxf->ucast_active_q, &qe); 1199 bfa_q_qe_init(qe); 1200 list_add_tail(qe, &rxf->ucast_pending_add_q); 1201 if (cleanup == BNA_HARD_CLEANUP) { 1202 mac = (struct bna_mac *)qe; 1203 bna_bfi_ucast_req(rxf, mac, 1204 BFI_ENET_H2I_MAC_UCAST_DEL_REQ); 1205 return 1; 1206 } 1207 } 1208 1209 if (rxf->ucast_active_set) { 1210 rxf->ucast_pending_set = 1; 1211 rxf->ucast_active_set = 0; 1212 if (cleanup == BNA_HARD_CLEANUP) { 1213 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac, 1214 BFI_ENET_H2I_MAC_UCAST_CLR_REQ); 1215 return 1; 1216 } 1217 } 1218 1219 return 0; 1220 } 1221 1222 static int 1223 bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf) 1224 { 1225 struct bna *bna = rxf->rx->bna; 1226 1227 /* Enable/disable promiscuous mode */ 1228 if (is_promisc_enable(rxf->rxmode_pending, 1229 rxf->rxmode_pending_bitmask)) { 1230 /* move promisc configuration from pending -> active */ 1231 promisc_inactive(rxf->rxmode_pending, 1232 rxf->rxmode_pending_bitmask); 1233 rxf->rxmode_active |= BNA_RXMODE_PROMISC; 1234 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_ENABLED); 1235 return 1; 1236 } else if (is_promisc_disable(rxf->rxmode_pending, 1237 rxf->rxmode_pending_bitmask)) { 1238 /* move promisc configuration from pending -> active */ 1239 promisc_inactive(rxf->rxmode_pending, 1240 rxf->rxmode_pending_bitmask); 1241 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC; 1242 bna->promisc_rid = BFI_INVALID_RID; 1243 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED); 1244 return 1; 1245 } 1246 1247 return 0; 1248 } 1249 1250 static int 1251 bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) 1252 { 1253 struct bna *bna = rxf->rx->bna; 1254 1255 /* Clear pending promisc mode disable */ 1256 if (is_promisc_disable(rxf->rxmode_pending, 1257 rxf->rxmode_pending_bitmask)) { 1258 promisc_inactive(rxf->rxmode_pending, 1259 rxf->rxmode_pending_bitmask); 1260 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC; 1261 bna->promisc_rid = BFI_INVALID_RID; 1262 if (cleanup == BNA_HARD_CLEANUP) { 1263 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED); 1264 return 1; 1265 } 1266 } 1267 1268 /* Move promisc mode config from active -> pending */ 1269 if (rxf->rxmode_active & BNA_RXMODE_PROMISC) { 1270 promisc_enable(rxf->rxmode_pending, 1271 rxf->rxmode_pending_bitmask); 1272 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC; 1273 if (cleanup == BNA_HARD_CLEANUP) { 1274 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED); 1275 return 1; 1276 } 1277 } 1278 1279 return 0; 1280 } 1281 1282 static int 1283 bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf) 1284 { 1285 /* Enable/disable allmulti mode */ 1286 if (is_allmulti_enable(rxf->rxmode_pending, 1287 rxf->rxmode_pending_bitmask)) { 1288 /* move allmulti configuration from pending -> active */ 1289 allmulti_inactive(rxf->rxmode_pending, 1290 rxf->rxmode_pending_bitmask); 1291 rxf->rxmode_active |= BNA_RXMODE_ALLMULTI; 1292 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_DISABLED); 1293 return 1; 1294 } else if (is_allmulti_disable(rxf->rxmode_pending, 1295 rxf->rxmode_pending_bitmask)) { 1296 /* move allmulti configuration from pending -> active */ 1297 allmulti_inactive(rxf->rxmode_pending, 1298 rxf->rxmode_pending_bitmask); 1299 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI; 1300 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED); 1301 return 1; 1302 } 1303 1304 return 0; 1305 } 1306 1307 static int 1308 bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) 1309 { 1310 /* Clear pending allmulti mode disable */ 1311 if (is_allmulti_disable(rxf->rxmode_pending, 1312 rxf->rxmode_pending_bitmask)) { 1313 allmulti_inactive(rxf->rxmode_pending, 1314 rxf->rxmode_pending_bitmask); 1315 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI; 1316 if (cleanup == BNA_HARD_CLEANUP) { 1317 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED); 1318 return 1; 1319 } 1320 } 1321 1322 /* Move allmulti mode config from active -> pending */ 1323 if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) { 1324 allmulti_enable(rxf->rxmode_pending, 1325 rxf->rxmode_pending_bitmask); 1326 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI; 1327 if (cleanup == BNA_HARD_CLEANUP) { 1328 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED); 1329 return 1; 1330 } 1331 } 1332 1333 return 0; 1334 } 1335 1336 static int 1337 bna_rxf_promisc_enable(struct bna_rxf *rxf) 1338 { 1339 struct bna *bna = rxf->rx->bna; 1340 int ret = 0; 1341 1342 if (is_promisc_enable(rxf->rxmode_pending, 1343 rxf->rxmode_pending_bitmask) || 1344 (rxf->rxmode_active & BNA_RXMODE_PROMISC)) { 1345 /* Do nothing if pending enable or already enabled */ 1346 } else if (is_promisc_disable(rxf->rxmode_pending, 1347 rxf->rxmode_pending_bitmask)) { 1348 /* Turn off pending disable command */ 1349 promisc_inactive(rxf->rxmode_pending, 1350 rxf->rxmode_pending_bitmask); 1351 } else { 1352 /* Schedule enable */ 1353 promisc_enable(rxf->rxmode_pending, 1354 rxf->rxmode_pending_bitmask); 1355 bna->promisc_rid = rxf->rx->rid; 1356 ret = 1; 1357 } 1358 1359 return ret; 1360 } 1361 1362 static int 1363 bna_rxf_promisc_disable(struct bna_rxf *rxf) 1364 { 1365 struct bna *bna = rxf->rx->bna; 1366 int ret = 0; 1367 1368 if (is_promisc_disable(rxf->rxmode_pending, 1369 rxf->rxmode_pending_bitmask) || 1370 (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))) { 1371 /* Do nothing if pending disable or already disabled */ 1372 } else if (is_promisc_enable(rxf->rxmode_pending, 1373 rxf->rxmode_pending_bitmask)) { 1374 /* Turn off pending enable command */ 1375 promisc_inactive(rxf->rxmode_pending, 1376 rxf->rxmode_pending_bitmask); 1377 bna->promisc_rid = BFI_INVALID_RID; 1378 } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) { 1379 /* Schedule disable */ 1380 promisc_disable(rxf->rxmode_pending, 1381 rxf->rxmode_pending_bitmask); 1382 ret = 1; 1383 } 1384 1385 return ret; 1386 } 1387 1388 static int 1389 bna_rxf_allmulti_enable(struct bna_rxf *rxf) 1390 { 1391 int ret = 0; 1392 1393 if (is_allmulti_enable(rxf->rxmode_pending, 1394 rxf->rxmode_pending_bitmask) || 1395 (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) { 1396 /* Do nothing if pending enable or already enabled */ 1397 } else if (is_allmulti_disable(rxf->rxmode_pending, 1398 rxf->rxmode_pending_bitmask)) { 1399 /* Turn off pending disable command */ 1400 allmulti_inactive(rxf->rxmode_pending, 1401 rxf->rxmode_pending_bitmask); 1402 } else { 1403 /* Schedule enable */ 1404 allmulti_enable(rxf->rxmode_pending, 1405 rxf->rxmode_pending_bitmask); 1406 ret = 1; 1407 } 1408 1409 return ret; 1410 } 1411 1412 static int 1413 bna_rxf_allmulti_disable(struct bna_rxf *rxf) 1414 { 1415 int ret = 0; 1416 1417 if (is_allmulti_disable(rxf->rxmode_pending, 1418 rxf->rxmode_pending_bitmask) || 1419 (!(rxf->rxmode_active & BNA_RXMODE_ALLMULTI))) { 1420 /* Do nothing if pending disable or already disabled */ 1421 } else if (is_allmulti_enable(rxf->rxmode_pending, 1422 rxf->rxmode_pending_bitmask)) { 1423 /* Turn off pending enable command */ 1424 allmulti_inactive(rxf->rxmode_pending, 1425 rxf->rxmode_pending_bitmask); 1426 } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) { 1427 /* Schedule disable */ 1428 allmulti_disable(rxf->rxmode_pending, 1429 rxf->rxmode_pending_bitmask); 1430 ret = 1; 1431 } 1432 1433 return ret; 1434 } 1435 1436 static int 1437 bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf) 1438 { 1439 if (rxf->vlan_strip_pending) { 1440 rxf->vlan_strip_pending = false; 1441 bna_bfi_vlan_strip_enable(rxf); 1442 return 1; 1443 } 1444 1445 return 0; 1446 } 1447 1448 /* RX */ 1449 1450 #define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \ 1451 (qcfg)->num_paths : ((qcfg)->num_paths * 2)) 1452 1453 #define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\ 1454 (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT)) 1455 1456 #define call_rx_stop_cbfn(rx) \ 1457 do { \ 1458 if ((rx)->stop_cbfn) { \ 1459 void (*cbfn)(void *, struct bna_rx *); \ 1460 void *cbarg; \ 1461 cbfn = (rx)->stop_cbfn; \ 1462 cbarg = (rx)->stop_cbarg; \ 1463 (rx)->stop_cbfn = NULL; \ 1464 (rx)->stop_cbarg = NULL; \ 1465 cbfn(cbarg, rx); \ 1466 } \ 1467 } while (0) 1468 1469 #define call_rx_stall_cbfn(rx) \ 1470 do { \ 1471 if ((rx)->rx_stall_cbfn) \ 1472 (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx)); \ 1473 } while (0) 1474 1475 #define bfi_enet_datapath_q_init(bfi_q, bna_qpt) \ 1476 do { \ 1477 struct bna_dma_addr cur_q_addr = \ 1478 *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr)); \ 1479 (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb; \ 1480 (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb; \ 1481 (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb; \ 1482 (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb; \ 1483 (bfi_q)->pages = htons((u16)(bna_qpt)->page_count); \ 1484 (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\ 1485 } while (0) 1486 1487 static void bna_bfi_rx_enet_start(struct bna_rx *rx); 1488 static void bna_rx_enet_stop(struct bna_rx *rx); 1489 static void bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx); 1490 1491 bfa_fsm_state_decl(bna_rx, stopped, 1492 struct bna_rx, enum bna_rx_event); 1493 bfa_fsm_state_decl(bna_rx, start_wait, 1494 struct bna_rx, enum bna_rx_event); 1495 bfa_fsm_state_decl(bna_rx, start_stop_wait, 1496 struct bna_rx, enum bna_rx_event); 1497 bfa_fsm_state_decl(bna_rx, rxf_start_wait, 1498 struct bna_rx, enum bna_rx_event); 1499 bfa_fsm_state_decl(bna_rx, started, 1500 struct bna_rx, enum bna_rx_event); 1501 bfa_fsm_state_decl(bna_rx, rxf_stop_wait, 1502 struct bna_rx, enum bna_rx_event); 1503 bfa_fsm_state_decl(bna_rx, stop_wait, 1504 struct bna_rx, enum bna_rx_event); 1505 bfa_fsm_state_decl(bna_rx, cleanup_wait, 1506 struct bna_rx, enum bna_rx_event); 1507 bfa_fsm_state_decl(bna_rx, failed, 1508 struct bna_rx, enum bna_rx_event); 1509 bfa_fsm_state_decl(bna_rx, quiesce_wait, 1510 struct bna_rx, enum bna_rx_event); 1511 1512 static void bna_rx_sm_stopped_entry(struct bna_rx *rx) 1513 { 1514 call_rx_stop_cbfn(rx); 1515 } 1516 1517 static void bna_rx_sm_stopped(struct bna_rx *rx, 1518 enum bna_rx_event event) 1519 { 1520 switch (event) { 1521 case RX_E_START: 1522 bfa_fsm_set_state(rx, bna_rx_sm_start_wait); 1523 break; 1524 1525 case RX_E_STOP: 1526 call_rx_stop_cbfn(rx); 1527 break; 1528 1529 case RX_E_FAIL: 1530 /* no-op */ 1531 break; 1532 1533 default: 1534 bfa_sm_fault(event); 1535 break; 1536 } 1537 } 1538 1539 static void bna_rx_sm_start_wait_entry(struct bna_rx *rx) 1540 { 1541 bna_bfi_rx_enet_start(rx); 1542 } 1543 1544 static void 1545 bna_rx_sm_stop_wait_entry(struct bna_rx *rx) 1546 { 1547 } 1548 1549 static void 1550 bna_rx_sm_stop_wait(struct bna_rx *rx, enum bna_rx_event event) 1551 { 1552 switch (event) { 1553 case RX_E_FAIL: 1554 case RX_E_STOPPED: 1555 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); 1556 rx->rx_cleanup_cbfn(rx->bna->bnad, rx); 1557 break; 1558 1559 case RX_E_STARTED: 1560 bna_rx_enet_stop(rx); 1561 break; 1562 1563 default: 1564 bfa_sm_fault(event); 1565 break; 1566 } 1567 } 1568 1569 static void bna_rx_sm_start_wait(struct bna_rx *rx, 1570 enum bna_rx_event event) 1571 { 1572 switch (event) { 1573 case RX_E_STOP: 1574 bfa_fsm_set_state(rx, bna_rx_sm_start_stop_wait); 1575 break; 1576 1577 case RX_E_FAIL: 1578 bfa_fsm_set_state(rx, bna_rx_sm_stopped); 1579 break; 1580 1581 case RX_E_STARTED: 1582 bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait); 1583 break; 1584 1585 default: 1586 bfa_sm_fault(event); 1587 break; 1588 } 1589 } 1590 1591 static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx) 1592 { 1593 rx->rx_post_cbfn(rx->bna->bnad, rx); 1594 bna_rxf_start(&rx->rxf); 1595 } 1596 1597 static void 1598 bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx) 1599 { 1600 } 1601 1602 static void 1603 bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event) 1604 { 1605 switch (event) { 1606 case RX_E_FAIL: 1607 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); 1608 bna_rxf_fail(&rx->rxf); 1609 call_rx_stall_cbfn(rx); 1610 rx->rx_cleanup_cbfn(rx->bna->bnad, rx); 1611 break; 1612 1613 case RX_E_RXF_STARTED: 1614 bna_rxf_stop(&rx->rxf); 1615 break; 1616 1617 case RX_E_RXF_STOPPED: 1618 bfa_fsm_set_state(rx, bna_rx_sm_stop_wait); 1619 call_rx_stall_cbfn(rx); 1620 bna_rx_enet_stop(rx); 1621 break; 1622 1623 default: 1624 bfa_sm_fault(event); 1625 break; 1626 } 1627 1628 } 1629 1630 static void 1631 bna_rx_sm_start_stop_wait_entry(struct bna_rx *rx) 1632 { 1633 } 1634 1635 static void 1636 bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event) 1637 { 1638 switch (event) { 1639 case RX_E_FAIL: 1640 case RX_E_STOPPED: 1641 bfa_fsm_set_state(rx, bna_rx_sm_stopped); 1642 break; 1643 1644 case RX_E_STARTED: 1645 bna_rx_enet_stop(rx); 1646 break; 1647 1648 default: 1649 bfa_sm_fault(event); 1650 } 1651 } 1652 1653 static void 1654 bna_rx_sm_started_entry(struct bna_rx *rx) 1655 { 1656 struct bna_rxp *rxp; 1657 struct list_head *qe_rxp; 1658 int is_regular = (rx->type == BNA_RX_T_REGULAR); 1659 1660 /* Start IB */ 1661 list_for_each(qe_rxp, &rx->rxp_q) { 1662 rxp = (struct bna_rxp *)qe_rxp; 1663 bna_ib_start(rx->bna, &rxp->cq.ib, is_regular); 1664 } 1665 1666 bna_ethport_cb_rx_started(&rx->bna->ethport); 1667 } 1668 1669 static void 1670 bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event) 1671 { 1672 switch (event) { 1673 case RX_E_STOP: 1674 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait); 1675 bna_ethport_cb_rx_stopped(&rx->bna->ethport); 1676 bna_rxf_stop(&rx->rxf); 1677 break; 1678 1679 case RX_E_FAIL: 1680 bfa_fsm_set_state(rx, bna_rx_sm_failed); 1681 bna_ethport_cb_rx_stopped(&rx->bna->ethport); 1682 bna_rxf_fail(&rx->rxf); 1683 call_rx_stall_cbfn(rx); 1684 rx->rx_cleanup_cbfn(rx->bna->bnad, rx); 1685 break; 1686 1687 default: 1688 bfa_sm_fault(event); 1689 break; 1690 } 1691 } 1692 1693 static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx, 1694 enum bna_rx_event event) 1695 { 1696 switch (event) { 1697 case RX_E_STOP: 1698 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait); 1699 break; 1700 1701 case RX_E_FAIL: 1702 bfa_fsm_set_state(rx, bna_rx_sm_failed); 1703 bna_rxf_fail(&rx->rxf); 1704 call_rx_stall_cbfn(rx); 1705 rx->rx_cleanup_cbfn(rx->bna->bnad, rx); 1706 break; 1707 1708 case RX_E_RXF_STARTED: 1709 bfa_fsm_set_state(rx, bna_rx_sm_started); 1710 break; 1711 1712 default: 1713 bfa_sm_fault(event); 1714 break; 1715 } 1716 } 1717 1718 static void 1719 bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx) 1720 { 1721 } 1722 1723 static void 1724 bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event) 1725 { 1726 switch (event) { 1727 case RX_E_FAIL: 1728 case RX_E_RXF_STOPPED: 1729 /* No-op */ 1730 break; 1731 1732 case RX_E_CLEANUP_DONE: 1733 bfa_fsm_set_state(rx, bna_rx_sm_stopped); 1734 break; 1735 1736 default: 1737 bfa_sm_fault(event); 1738 break; 1739 } 1740 } 1741 1742 static void 1743 bna_rx_sm_failed_entry(struct bna_rx *rx) 1744 { 1745 } 1746 1747 static void 1748 bna_rx_sm_failed(struct bna_rx *rx, enum bna_rx_event event) 1749 { 1750 switch (event) { 1751 case RX_E_START: 1752 bfa_fsm_set_state(rx, bna_rx_sm_quiesce_wait); 1753 break; 1754 1755 case RX_E_STOP: 1756 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); 1757 break; 1758 1759 case RX_E_FAIL: 1760 case RX_E_RXF_STARTED: 1761 case RX_E_RXF_STOPPED: 1762 /* No-op */ 1763 break; 1764 1765 case RX_E_CLEANUP_DONE: 1766 bfa_fsm_set_state(rx, bna_rx_sm_stopped); 1767 break; 1768 1769 default: 1770 bfa_sm_fault(event); 1771 break; 1772 } } 1773 1774 static void 1775 bna_rx_sm_quiesce_wait_entry(struct bna_rx *rx) 1776 { 1777 } 1778 1779 static void 1780 bna_rx_sm_quiesce_wait(struct bna_rx *rx, enum bna_rx_event event) 1781 { 1782 switch (event) { 1783 case RX_E_STOP: 1784 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); 1785 break; 1786 1787 case RX_E_FAIL: 1788 bfa_fsm_set_state(rx, bna_rx_sm_failed); 1789 break; 1790 1791 case RX_E_CLEANUP_DONE: 1792 bfa_fsm_set_state(rx, bna_rx_sm_start_wait); 1793 break; 1794 1795 default: 1796 bfa_sm_fault(event); 1797 break; 1798 } 1799 } 1800 1801 static void 1802 bna_bfi_rx_enet_start(struct bna_rx *rx) 1803 { 1804 struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req; 1805 struct bna_rxp *rxp = NULL; 1806 struct bna_rxq *q0 = NULL, *q1 = NULL; 1807 struct list_head *rxp_qe; 1808 int i; 1809 1810 bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET, 1811 BFI_ENET_H2I_RX_CFG_SET_REQ, 0, rx->rid); 1812 cfg_req->mh.num_entries = htons( 1813 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req))); 1814 1815 cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet); 1816 cfg_req->num_queue_sets = rx->num_paths; 1817 for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q); 1818 i < rx->num_paths; 1819 i++, rxp_qe = bfa_q_next(rxp_qe)) { 1820 rxp = (struct bna_rxp *)rxp_qe; 1821 1822 GET_RXQS(rxp, q0, q1); 1823 switch (rxp->type) { 1824 case BNA_RXP_SLR: 1825 case BNA_RXP_HDS: 1826 /* Small RxQ */ 1827 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q, 1828 &q1->qpt); 1829 cfg_req->q_cfg[i].qs.rx_buffer_size = 1830 htons((u16)q1->buffer_size); 1831 /* Fall through */ 1832 1833 case BNA_RXP_SINGLE: 1834 /* Large/Single RxQ */ 1835 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q, 1836 &q0->qpt); 1837 if (q0->multi_buffer) 1838 /* multi-buffer is enabled by allocating 1839 * a new rx with new set of resources. 1840 * q0->buffer_size should be initialized to 1841 * fragment size. 1842 */ 1843 cfg_req->rx_cfg.multi_buffer = 1844 BNA_STATUS_T_ENABLED; 1845 else 1846 q0->buffer_size = 1847 bna_enet_mtu_get(&rx->bna->enet); 1848 cfg_req->q_cfg[i].ql.rx_buffer_size = 1849 htons((u16)q0->buffer_size); 1850 break; 1851 1852 default: 1853 BUG_ON(1); 1854 } 1855 1856 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q, 1857 &rxp->cq.qpt); 1858 1859 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo = 1860 rxp->cq.ib.ib_seg_host_addr.lsb; 1861 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi = 1862 rxp->cq.ib.ib_seg_host_addr.msb; 1863 cfg_req->q_cfg[i].ib.intr.msix_index = 1864 htons((u16)rxp->cq.ib.intr_vector); 1865 } 1866 1867 cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_DISABLED; 1868 cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED; 1869 cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED; 1870 cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_DISABLED; 1871 cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX) 1872 ? BNA_STATUS_T_ENABLED : 1873 BNA_STATUS_T_DISABLED; 1874 cfg_req->ib_cfg.coalescing_timeout = 1875 htonl((u32)rxp->cq.ib.coalescing_timeo); 1876 cfg_req->ib_cfg.inter_pkt_timeout = 1877 htonl((u32)rxp->cq.ib.interpkt_timeo); 1878 cfg_req->ib_cfg.inter_pkt_count = (u8)rxp->cq.ib.interpkt_count; 1879 1880 switch (rxp->type) { 1881 case BNA_RXP_SLR: 1882 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_LARGE_SMALL; 1883 break; 1884 1885 case BNA_RXP_HDS: 1886 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_HDS; 1887 cfg_req->rx_cfg.hds.type = rx->hds_cfg.hdr_type; 1888 cfg_req->rx_cfg.hds.force_offset = rx->hds_cfg.forced_offset; 1889 cfg_req->rx_cfg.hds.max_header_size = rx->hds_cfg.forced_offset; 1890 break; 1891 1892 case BNA_RXP_SINGLE: 1893 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_SINGLE; 1894 break; 1895 1896 default: 1897 BUG_ON(1); 1898 } 1899 cfg_req->rx_cfg.strip_vlan = rx->rxf.vlan_strip_status; 1900 1901 bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, 1902 sizeof(struct bfi_enet_rx_cfg_req), &cfg_req->mh); 1903 bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd); 1904 } 1905 1906 static void 1907 bna_bfi_rx_enet_stop(struct bna_rx *rx) 1908 { 1909 struct bfi_enet_req *req = &rx->bfi_enet_cmd.req; 1910 1911 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, 1912 BFI_ENET_H2I_RX_CFG_CLR_REQ, 0, rx->rid); 1913 req->mh.num_entries = htons( 1914 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req))); 1915 bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req), 1916 &req->mh); 1917 bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd); 1918 } 1919 1920 static void 1921 bna_rx_enet_stop(struct bna_rx *rx) 1922 { 1923 struct bna_rxp *rxp; 1924 struct list_head *qe_rxp; 1925 1926 /* Stop IB */ 1927 list_for_each(qe_rxp, &rx->rxp_q) { 1928 rxp = (struct bna_rxp *)qe_rxp; 1929 bna_ib_stop(rx->bna, &rxp->cq.ib); 1930 } 1931 1932 bna_bfi_rx_enet_stop(rx); 1933 } 1934 1935 static int 1936 bna_rx_res_check(struct bna_rx_mod *rx_mod, struct bna_rx_config *rx_cfg) 1937 { 1938 if ((rx_mod->rx_free_count == 0) || 1939 (rx_mod->rxp_free_count == 0) || 1940 (rx_mod->rxq_free_count == 0)) 1941 return 0; 1942 1943 if (rx_cfg->rxp_type == BNA_RXP_SINGLE) { 1944 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) || 1945 (rx_mod->rxq_free_count < rx_cfg->num_paths)) 1946 return 0; 1947 } else { 1948 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) || 1949 (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths))) 1950 return 0; 1951 } 1952 1953 return 1; 1954 } 1955 1956 static struct bna_rxq * 1957 bna_rxq_get(struct bna_rx_mod *rx_mod) 1958 { 1959 struct bna_rxq *rxq = NULL; 1960 struct list_head *qe = NULL; 1961 1962 bfa_q_deq(&rx_mod->rxq_free_q, &qe); 1963 rx_mod->rxq_free_count--; 1964 rxq = (struct bna_rxq *)qe; 1965 bfa_q_qe_init(&rxq->qe); 1966 1967 return rxq; 1968 } 1969 1970 static void 1971 bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq) 1972 { 1973 bfa_q_qe_init(&rxq->qe); 1974 list_add_tail(&rxq->qe, &rx_mod->rxq_free_q); 1975 rx_mod->rxq_free_count++; 1976 } 1977 1978 static struct bna_rxp * 1979 bna_rxp_get(struct bna_rx_mod *rx_mod) 1980 { 1981 struct list_head *qe = NULL; 1982 struct bna_rxp *rxp = NULL; 1983 1984 bfa_q_deq(&rx_mod->rxp_free_q, &qe); 1985 rx_mod->rxp_free_count--; 1986 rxp = (struct bna_rxp *)qe; 1987 bfa_q_qe_init(&rxp->qe); 1988 1989 return rxp; 1990 } 1991 1992 static void 1993 bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp) 1994 { 1995 bfa_q_qe_init(&rxp->qe); 1996 list_add_tail(&rxp->qe, &rx_mod->rxp_free_q); 1997 rx_mod->rxp_free_count++; 1998 } 1999 2000 static struct bna_rx * 2001 bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type) 2002 { 2003 struct list_head *qe = NULL; 2004 struct bna_rx *rx = NULL; 2005 2006 if (type == BNA_RX_T_REGULAR) { 2007 bfa_q_deq(&rx_mod->rx_free_q, &qe); 2008 } else 2009 bfa_q_deq_tail(&rx_mod->rx_free_q, &qe); 2010 2011 rx_mod->rx_free_count--; 2012 rx = (struct bna_rx *)qe; 2013 bfa_q_qe_init(&rx->qe); 2014 list_add_tail(&rx->qe, &rx_mod->rx_active_q); 2015 rx->type = type; 2016 2017 return rx; 2018 } 2019 2020 static void 2021 bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx) 2022 { 2023 struct list_head *prev_qe = NULL; 2024 struct list_head *qe; 2025 2026 bfa_q_qe_init(&rx->qe); 2027 2028 list_for_each(qe, &rx_mod->rx_free_q) { 2029 if (((struct bna_rx *)qe)->rid < rx->rid) 2030 prev_qe = qe; 2031 else 2032 break; 2033 } 2034 2035 if (prev_qe == NULL) { 2036 /* This is the first entry */ 2037 bfa_q_enq_head(&rx_mod->rx_free_q, &rx->qe); 2038 } else if (bfa_q_next(prev_qe) == &rx_mod->rx_free_q) { 2039 /* This is the last entry */ 2040 list_add_tail(&rx->qe, &rx_mod->rx_free_q); 2041 } else { 2042 /* Somewhere in the middle */ 2043 bfa_q_next(&rx->qe) = bfa_q_next(prev_qe); 2044 bfa_q_prev(&rx->qe) = prev_qe; 2045 bfa_q_next(prev_qe) = &rx->qe; 2046 bfa_q_prev(bfa_q_next(&rx->qe)) = &rx->qe; 2047 } 2048 2049 rx_mod->rx_free_count++; 2050 } 2051 2052 static void 2053 bna_rxp_add_rxqs(struct bna_rxp *rxp, struct bna_rxq *q0, 2054 struct bna_rxq *q1) 2055 { 2056 switch (rxp->type) { 2057 case BNA_RXP_SINGLE: 2058 rxp->rxq.single.only = q0; 2059 rxp->rxq.single.reserved = NULL; 2060 break; 2061 case BNA_RXP_SLR: 2062 rxp->rxq.slr.large = q0; 2063 rxp->rxq.slr.small = q1; 2064 break; 2065 case BNA_RXP_HDS: 2066 rxp->rxq.hds.data = q0; 2067 rxp->rxq.hds.hdr = q1; 2068 break; 2069 default: 2070 break; 2071 } 2072 } 2073 2074 static void 2075 bna_rxq_qpt_setup(struct bna_rxq *rxq, 2076 struct bna_rxp *rxp, 2077 u32 page_count, 2078 u32 page_size, 2079 struct bna_mem_descr *qpt_mem, 2080 struct bna_mem_descr *swqpt_mem, 2081 struct bna_mem_descr *page_mem) 2082 { 2083 u8 *kva; 2084 u64 dma; 2085 struct bna_dma_addr bna_dma; 2086 int i; 2087 2088 rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; 2089 rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; 2090 rxq->qpt.kv_qpt_ptr = qpt_mem->kva; 2091 rxq->qpt.page_count = page_count; 2092 rxq->qpt.page_size = page_size; 2093 2094 rxq->rcb->sw_qpt = (void **) swqpt_mem->kva; 2095 rxq->rcb->sw_q = page_mem->kva; 2096 2097 kva = page_mem->kva; 2098 BNA_GET_DMA_ADDR(&page_mem->dma, dma); 2099 2100 for (i = 0; i < rxq->qpt.page_count; i++) { 2101 rxq->rcb->sw_qpt[i] = kva; 2102 kva += PAGE_SIZE; 2103 2104 BNA_SET_DMA_ADDR(dma, &bna_dma); 2105 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb = 2106 bna_dma.lsb; 2107 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb = 2108 bna_dma.msb; 2109 dma += PAGE_SIZE; 2110 } 2111 } 2112 2113 static void 2114 bna_rxp_cqpt_setup(struct bna_rxp *rxp, 2115 u32 page_count, 2116 u32 page_size, 2117 struct bna_mem_descr *qpt_mem, 2118 struct bna_mem_descr *swqpt_mem, 2119 struct bna_mem_descr *page_mem) 2120 { 2121 u8 *kva; 2122 u64 dma; 2123 struct bna_dma_addr bna_dma; 2124 int i; 2125 2126 rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; 2127 rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; 2128 rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva; 2129 rxp->cq.qpt.page_count = page_count; 2130 rxp->cq.qpt.page_size = page_size; 2131 2132 rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva; 2133 rxp->cq.ccb->sw_q = page_mem->kva; 2134 2135 kva = page_mem->kva; 2136 BNA_GET_DMA_ADDR(&page_mem->dma, dma); 2137 2138 for (i = 0; i < rxp->cq.qpt.page_count; i++) { 2139 rxp->cq.ccb->sw_qpt[i] = kva; 2140 kva += PAGE_SIZE; 2141 2142 BNA_SET_DMA_ADDR(dma, &bna_dma); 2143 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb = 2144 bna_dma.lsb; 2145 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb = 2146 bna_dma.msb; 2147 dma += PAGE_SIZE; 2148 } 2149 } 2150 2151 static void 2152 bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx) 2153 { 2154 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg; 2155 2156 bfa_wc_down(&rx_mod->rx_stop_wc); 2157 } 2158 2159 static void 2160 bna_rx_mod_cb_rx_stopped_all(void *arg) 2161 { 2162 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg; 2163 2164 if (rx_mod->stop_cbfn) 2165 rx_mod->stop_cbfn(&rx_mod->bna->enet); 2166 rx_mod->stop_cbfn = NULL; 2167 } 2168 2169 static void 2170 bna_rx_start(struct bna_rx *rx) 2171 { 2172 rx->rx_flags |= BNA_RX_F_ENET_STARTED; 2173 if (rx->rx_flags & BNA_RX_F_ENABLED) 2174 bfa_fsm_send_event(rx, RX_E_START); 2175 } 2176 2177 static void 2178 bna_rx_stop(struct bna_rx *rx) 2179 { 2180 rx->rx_flags &= ~BNA_RX_F_ENET_STARTED; 2181 if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped) 2182 bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx); 2183 else { 2184 rx->stop_cbfn = bna_rx_mod_cb_rx_stopped; 2185 rx->stop_cbarg = &rx->bna->rx_mod; 2186 bfa_fsm_send_event(rx, RX_E_STOP); 2187 } 2188 } 2189 2190 static void 2191 bna_rx_fail(struct bna_rx *rx) 2192 { 2193 /* Indicate Enet is not enabled, and failed */ 2194 rx->rx_flags &= ~BNA_RX_F_ENET_STARTED; 2195 bfa_fsm_send_event(rx, RX_E_FAIL); 2196 } 2197 2198 void 2199 bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type) 2200 { 2201 struct bna_rx *rx; 2202 struct list_head *qe; 2203 2204 rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED; 2205 if (type == BNA_RX_T_LOOPBACK) 2206 rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK; 2207 2208 list_for_each(qe, &rx_mod->rx_active_q) { 2209 rx = (struct bna_rx *)qe; 2210 if (rx->type == type) 2211 bna_rx_start(rx); 2212 } 2213 } 2214 2215 void 2216 bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type) 2217 { 2218 struct bna_rx *rx; 2219 struct list_head *qe; 2220 2221 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED; 2222 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK; 2223 2224 rx_mod->stop_cbfn = bna_enet_cb_rx_stopped; 2225 2226 bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod); 2227 2228 list_for_each(qe, &rx_mod->rx_active_q) { 2229 rx = (struct bna_rx *)qe; 2230 if (rx->type == type) { 2231 bfa_wc_up(&rx_mod->rx_stop_wc); 2232 bna_rx_stop(rx); 2233 } 2234 } 2235 2236 bfa_wc_wait(&rx_mod->rx_stop_wc); 2237 } 2238 2239 void 2240 bna_rx_mod_fail(struct bna_rx_mod *rx_mod) 2241 { 2242 struct bna_rx *rx; 2243 struct list_head *qe; 2244 2245 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED; 2246 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK; 2247 2248 list_for_each(qe, &rx_mod->rx_active_q) { 2249 rx = (struct bna_rx *)qe; 2250 bna_rx_fail(rx); 2251 } 2252 } 2253 2254 void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna, 2255 struct bna_res_info *res_info) 2256 { 2257 int index; 2258 struct bna_rx *rx_ptr; 2259 struct bna_rxp *rxp_ptr; 2260 struct bna_rxq *rxq_ptr; 2261 2262 rx_mod->bna = bna; 2263 rx_mod->flags = 0; 2264 2265 rx_mod->rx = (struct bna_rx *) 2266 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva; 2267 rx_mod->rxp = (struct bna_rxp *) 2268 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva; 2269 rx_mod->rxq = (struct bna_rxq *) 2270 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva; 2271 2272 /* Initialize the queues */ 2273 INIT_LIST_HEAD(&rx_mod->rx_free_q); 2274 rx_mod->rx_free_count = 0; 2275 INIT_LIST_HEAD(&rx_mod->rxq_free_q); 2276 rx_mod->rxq_free_count = 0; 2277 INIT_LIST_HEAD(&rx_mod->rxp_free_q); 2278 rx_mod->rxp_free_count = 0; 2279 INIT_LIST_HEAD(&rx_mod->rx_active_q); 2280 2281 /* Build RX queues */ 2282 for (index = 0; index < bna->ioceth.attr.num_rxp; index++) { 2283 rx_ptr = &rx_mod->rx[index]; 2284 2285 bfa_q_qe_init(&rx_ptr->qe); 2286 INIT_LIST_HEAD(&rx_ptr->rxp_q); 2287 rx_ptr->bna = NULL; 2288 rx_ptr->rid = index; 2289 rx_ptr->stop_cbfn = NULL; 2290 rx_ptr->stop_cbarg = NULL; 2291 2292 list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q); 2293 rx_mod->rx_free_count++; 2294 } 2295 2296 /* build RX-path queue */ 2297 for (index = 0; index < bna->ioceth.attr.num_rxp; index++) { 2298 rxp_ptr = &rx_mod->rxp[index]; 2299 bfa_q_qe_init(&rxp_ptr->qe); 2300 list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q); 2301 rx_mod->rxp_free_count++; 2302 } 2303 2304 /* build RXQ queue */ 2305 for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) { 2306 rxq_ptr = &rx_mod->rxq[index]; 2307 bfa_q_qe_init(&rxq_ptr->qe); 2308 list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q); 2309 rx_mod->rxq_free_count++; 2310 } 2311 } 2312 2313 void 2314 bna_rx_mod_uninit(struct bna_rx_mod *rx_mod) 2315 { 2316 struct list_head *qe; 2317 int i; 2318 2319 i = 0; 2320 list_for_each(qe, &rx_mod->rx_free_q) 2321 i++; 2322 2323 i = 0; 2324 list_for_each(qe, &rx_mod->rxp_free_q) 2325 i++; 2326 2327 i = 0; 2328 list_for_each(qe, &rx_mod->rxq_free_q) 2329 i++; 2330 2331 rx_mod->bna = NULL; 2332 } 2333 2334 void 2335 bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr) 2336 { 2337 struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp; 2338 struct bna_rxp *rxp = NULL; 2339 struct bna_rxq *q0 = NULL, *q1 = NULL; 2340 struct list_head *rxp_qe; 2341 int i; 2342 2343 bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp, 2344 sizeof(struct bfi_enet_rx_cfg_rsp)); 2345 2346 rx->hw_id = cfg_rsp->hw_id; 2347 2348 for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q); 2349 i < rx->num_paths; 2350 i++, rxp_qe = bfa_q_next(rxp_qe)) { 2351 rxp = (struct bna_rxp *)rxp_qe; 2352 GET_RXQS(rxp, q0, q1); 2353 2354 /* Setup doorbells */ 2355 rxp->cq.ccb->i_dbell->doorbell_addr = 2356 rx->bna->pcidev.pci_bar_kva 2357 + ntohl(cfg_rsp->q_handles[i].i_dbell); 2358 rxp->hw_id = cfg_rsp->q_handles[i].hw_cqid; 2359 q0->rcb->q_dbell = 2360 rx->bna->pcidev.pci_bar_kva 2361 + ntohl(cfg_rsp->q_handles[i].ql_dbell); 2362 q0->hw_id = cfg_rsp->q_handles[i].hw_lqid; 2363 if (q1) { 2364 q1->rcb->q_dbell = 2365 rx->bna->pcidev.pci_bar_kva 2366 + ntohl(cfg_rsp->q_handles[i].qs_dbell); 2367 q1->hw_id = cfg_rsp->q_handles[i].hw_sqid; 2368 } 2369 2370 /* Initialize producer/consumer indexes */ 2371 (*rxp->cq.ccb->hw_producer_index) = 0; 2372 rxp->cq.ccb->producer_index = 0; 2373 q0->rcb->producer_index = q0->rcb->consumer_index = 0; 2374 if (q1) 2375 q1->rcb->producer_index = q1->rcb->consumer_index = 0; 2376 } 2377 2378 bfa_fsm_send_event(rx, RX_E_STARTED); 2379 } 2380 2381 void 2382 bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr) 2383 { 2384 bfa_fsm_send_event(rx, RX_E_STOPPED); 2385 } 2386 2387 void 2388 bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info) 2389 { 2390 u32 cq_size, hq_size, dq_size; 2391 u32 cpage_count, hpage_count, dpage_count; 2392 struct bna_mem_info *mem_info; 2393 u32 cq_depth; 2394 u32 hq_depth; 2395 u32 dq_depth; 2396 2397 dq_depth = q_cfg->q0_depth; 2398 hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth); 2399 cq_depth = roundup_pow_of_two(dq_depth + hq_depth); 2400 2401 cq_size = cq_depth * BFI_CQ_WI_SIZE; 2402 cq_size = ALIGN(cq_size, PAGE_SIZE); 2403 cpage_count = SIZE_TO_PAGES(cq_size); 2404 2405 dq_depth = roundup_pow_of_two(dq_depth); 2406 dq_size = dq_depth * BFI_RXQ_WI_SIZE; 2407 dq_size = ALIGN(dq_size, PAGE_SIZE); 2408 dpage_count = SIZE_TO_PAGES(dq_size); 2409 2410 if (BNA_RXP_SINGLE != q_cfg->rxp_type) { 2411 hq_depth = roundup_pow_of_two(hq_depth); 2412 hq_size = hq_depth * BFI_RXQ_WI_SIZE; 2413 hq_size = ALIGN(hq_size, PAGE_SIZE); 2414 hpage_count = SIZE_TO_PAGES(hq_size); 2415 } else 2416 hpage_count = 0; 2417 2418 res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM; 2419 mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info; 2420 mem_info->mem_type = BNA_MEM_T_KVA; 2421 mem_info->len = sizeof(struct bna_ccb); 2422 mem_info->num = q_cfg->num_paths; 2423 2424 res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM; 2425 mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info; 2426 mem_info->mem_type = BNA_MEM_T_KVA; 2427 mem_info->len = sizeof(struct bna_rcb); 2428 mem_info->num = BNA_GET_RXQS(q_cfg); 2429 2430 res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM; 2431 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info; 2432 mem_info->mem_type = BNA_MEM_T_DMA; 2433 mem_info->len = cpage_count * sizeof(struct bna_dma_addr); 2434 mem_info->num = q_cfg->num_paths; 2435 2436 res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM; 2437 mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info; 2438 mem_info->mem_type = BNA_MEM_T_KVA; 2439 mem_info->len = cpage_count * sizeof(void *); 2440 mem_info->num = q_cfg->num_paths; 2441 2442 res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM; 2443 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info; 2444 mem_info->mem_type = BNA_MEM_T_DMA; 2445 mem_info->len = PAGE_SIZE * cpage_count; 2446 mem_info->num = q_cfg->num_paths; 2447 2448 res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM; 2449 mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info; 2450 mem_info->mem_type = BNA_MEM_T_DMA; 2451 mem_info->len = dpage_count * sizeof(struct bna_dma_addr); 2452 mem_info->num = q_cfg->num_paths; 2453 2454 res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM; 2455 mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info; 2456 mem_info->mem_type = BNA_MEM_T_KVA; 2457 mem_info->len = dpage_count * sizeof(void *); 2458 mem_info->num = q_cfg->num_paths; 2459 2460 res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM; 2461 mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info; 2462 mem_info->mem_type = BNA_MEM_T_DMA; 2463 mem_info->len = PAGE_SIZE * dpage_count; 2464 mem_info->num = q_cfg->num_paths; 2465 2466 res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM; 2467 mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info; 2468 mem_info->mem_type = BNA_MEM_T_DMA; 2469 mem_info->len = hpage_count * sizeof(struct bna_dma_addr); 2470 mem_info->num = (hpage_count ? q_cfg->num_paths : 0); 2471 2472 res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM; 2473 mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info; 2474 mem_info->mem_type = BNA_MEM_T_KVA; 2475 mem_info->len = hpage_count * sizeof(void *); 2476 mem_info->num = (hpage_count ? q_cfg->num_paths : 0); 2477 2478 res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM; 2479 mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info; 2480 mem_info->mem_type = BNA_MEM_T_DMA; 2481 mem_info->len = PAGE_SIZE * hpage_count; 2482 mem_info->num = (hpage_count ? q_cfg->num_paths : 0); 2483 2484 res_info[BNA_RX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM; 2485 mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info; 2486 mem_info->mem_type = BNA_MEM_T_DMA; 2487 mem_info->len = BFI_IBIDX_SIZE; 2488 mem_info->num = q_cfg->num_paths; 2489 2490 res_info[BNA_RX_RES_MEM_T_RIT].res_type = BNA_RES_T_MEM; 2491 mem_info = &res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info; 2492 mem_info->mem_type = BNA_MEM_T_KVA; 2493 mem_info->len = BFI_ENET_RSS_RIT_MAX; 2494 mem_info->num = 1; 2495 2496 res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR; 2497 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX; 2498 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths; 2499 } 2500 2501 struct bna_rx * 2502 bna_rx_create(struct bna *bna, struct bnad *bnad, 2503 struct bna_rx_config *rx_cfg, 2504 const struct bna_rx_event_cbfn *rx_cbfn, 2505 struct bna_res_info *res_info, 2506 void *priv) 2507 { 2508 struct bna_rx_mod *rx_mod = &bna->rx_mod; 2509 struct bna_rx *rx; 2510 struct bna_rxp *rxp; 2511 struct bna_rxq *q0; 2512 struct bna_rxq *q1; 2513 struct bna_intr_info *intr_info; 2514 struct bna_mem_descr *hqunmap_mem; 2515 struct bna_mem_descr *dqunmap_mem; 2516 struct bna_mem_descr *ccb_mem; 2517 struct bna_mem_descr *rcb_mem; 2518 struct bna_mem_descr *cqpt_mem; 2519 struct bna_mem_descr *cswqpt_mem; 2520 struct bna_mem_descr *cpage_mem; 2521 struct bna_mem_descr *hqpt_mem; 2522 struct bna_mem_descr *dqpt_mem; 2523 struct bna_mem_descr *hsqpt_mem; 2524 struct bna_mem_descr *dsqpt_mem; 2525 struct bna_mem_descr *hpage_mem; 2526 struct bna_mem_descr *dpage_mem; 2527 u32 dpage_count, hpage_count; 2528 u32 hq_idx, dq_idx, rcb_idx; 2529 u32 cq_depth, i; 2530 u32 page_count; 2531 2532 if (!bna_rx_res_check(rx_mod, rx_cfg)) 2533 return NULL; 2534 2535 intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info; 2536 ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0]; 2537 rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0]; 2538 dqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPDQ].res_u.mem_info.mdl[0]; 2539 hqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPHQ].res_u.mem_info.mdl[0]; 2540 cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0]; 2541 cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0]; 2542 cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0]; 2543 hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0]; 2544 dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0]; 2545 hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0]; 2546 dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0]; 2547 hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0]; 2548 dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0]; 2549 2550 page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.len / 2551 PAGE_SIZE; 2552 2553 dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.len / 2554 PAGE_SIZE; 2555 2556 hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.len / 2557 PAGE_SIZE; 2558 2559 rx = bna_rx_get(rx_mod, rx_cfg->rx_type); 2560 rx->bna = bna; 2561 rx->rx_flags = 0; 2562 INIT_LIST_HEAD(&rx->rxp_q); 2563 rx->stop_cbfn = NULL; 2564 rx->stop_cbarg = NULL; 2565 rx->priv = priv; 2566 2567 rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn; 2568 rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn; 2569 rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn; 2570 rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn; 2571 rx->rx_stall_cbfn = rx_cbfn->rx_stall_cbfn; 2572 /* Following callbacks are mandatory */ 2573 rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn; 2574 rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn; 2575 2576 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_STARTED) { 2577 switch (rx->type) { 2578 case BNA_RX_T_REGULAR: 2579 if (!(rx->bna->rx_mod.flags & 2580 BNA_RX_MOD_F_ENET_LOOPBACK)) 2581 rx->rx_flags |= BNA_RX_F_ENET_STARTED; 2582 break; 2583 case BNA_RX_T_LOOPBACK: 2584 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_LOOPBACK) 2585 rx->rx_flags |= BNA_RX_F_ENET_STARTED; 2586 break; 2587 } 2588 } 2589 2590 rx->num_paths = rx_cfg->num_paths; 2591 for (i = 0, hq_idx = 0, dq_idx = 0, rcb_idx = 0; 2592 i < rx->num_paths; i++) { 2593 rxp = bna_rxp_get(rx_mod); 2594 list_add_tail(&rxp->qe, &rx->rxp_q); 2595 rxp->type = rx_cfg->rxp_type; 2596 rxp->rx = rx; 2597 rxp->cq.rx = rx; 2598 2599 q0 = bna_rxq_get(rx_mod); 2600 if (BNA_RXP_SINGLE == rx_cfg->rxp_type) 2601 q1 = NULL; 2602 else 2603 q1 = bna_rxq_get(rx_mod); 2604 2605 if (1 == intr_info->num) 2606 rxp->vector = intr_info->idl[0].vector; 2607 else 2608 rxp->vector = intr_info->idl[i].vector; 2609 2610 /* Setup IB */ 2611 2612 rxp->cq.ib.ib_seg_host_addr.lsb = 2613 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb; 2614 rxp->cq.ib.ib_seg_host_addr.msb = 2615 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb; 2616 rxp->cq.ib.ib_seg_host_addr_kva = 2617 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva; 2618 rxp->cq.ib.intr_type = intr_info->intr_type; 2619 if (intr_info->intr_type == BNA_INTR_T_MSIX) 2620 rxp->cq.ib.intr_vector = rxp->vector; 2621 else 2622 rxp->cq.ib.intr_vector = (1 << rxp->vector); 2623 rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo; 2624 rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT; 2625 rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO; 2626 2627 bna_rxp_add_rxqs(rxp, q0, q1); 2628 2629 /* Setup large Q */ 2630 2631 q0->rx = rx; 2632 q0->rxp = rxp; 2633 2634 q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva; 2635 q0->rcb->unmap_q = (void *)dqunmap_mem[dq_idx].kva; 2636 rcb_idx++; dq_idx++; 2637 q0->rcb->q_depth = rx_cfg->q0_depth; 2638 q0->q_depth = rx_cfg->q0_depth; 2639 q0->multi_buffer = rx_cfg->q0_multi_buf; 2640 q0->buffer_size = rx_cfg->q0_buf_size; 2641 q0->num_vecs = rx_cfg->q0_num_vecs; 2642 q0->rcb->rxq = q0; 2643 q0->rcb->bnad = bna->bnad; 2644 q0->rcb->id = 0; 2645 q0->rx_packets = q0->rx_bytes = 0; 2646 q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0; 2647 2648 bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE, 2649 &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]); 2650 2651 if (rx->rcb_setup_cbfn) 2652 rx->rcb_setup_cbfn(bnad, q0->rcb); 2653 2654 /* Setup small Q */ 2655 2656 if (q1) { 2657 q1->rx = rx; 2658 q1->rxp = rxp; 2659 2660 q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva; 2661 q1->rcb->unmap_q = (void *)hqunmap_mem[hq_idx].kva; 2662 rcb_idx++; hq_idx++; 2663 q1->rcb->q_depth = rx_cfg->q1_depth; 2664 q1->q_depth = rx_cfg->q1_depth; 2665 q1->multi_buffer = BNA_STATUS_T_DISABLED; 2666 q1->num_vecs = 1; 2667 q1->rcb->rxq = q1; 2668 q1->rcb->bnad = bna->bnad; 2669 q1->rcb->id = 1; 2670 q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ? 2671 rx_cfg->hds_config.forced_offset 2672 : rx_cfg->q1_buf_size; 2673 q1->rx_packets = q1->rx_bytes = 0; 2674 q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0; 2675 2676 bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE, 2677 &hqpt_mem[i], &hsqpt_mem[i], 2678 &hpage_mem[i]); 2679 2680 if (rx->rcb_setup_cbfn) 2681 rx->rcb_setup_cbfn(bnad, q1->rcb); 2682 } 2683 2684 /* Setup CQ */ 2685 2686 rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva; 2687 cq_depth = rx_cfg->q0_depth + 2688 ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ? 2689 0 : rx_cfg->q1_depth); 2690 /* if multi-buffer is enabled sum of q0_depth 2691 * and q1_depth need not be a power of 2 2692 */ 2693 cq_depth = roundup_pow_of_two(cq_depth); 2694 rxp->cq.ccb->q_depth = cq_depth; 2695 rxp->cq.ccb->cq = &rxp->cq; 2696 rxp->cq.ccb->rcb[0] = q0->rcb; 2697 q0->rcb->ccb = rxp->cq.ccb; 2698 if (q1) { 2699 rxp->cq.ccb->rcb[1] = q1->rcb; 2700 q1->rcb->ccb = rxp->cq.ccb; 2701 } 2702 rxp->cq.ccb->hw_producer_index = 2703 (u32 *)rxp->cq.ib.ib_seg_host_addr_kva; 2704 rxp->cq.ccb->i_dbell = &rxp->cq.ib.door_bell; 2705 rxp->cq.ccb->intr_type = rxp->cq.ib.intr_type; 2706 rxp->cq.ccb->intr_vector = rxp->cq.ib.intr_vector; 2707 rxp->cq.ccb->rx_coalescing_timeo = 2708 rxp->cq.ib.coalescing_timeo; 2709 rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0; 2710 rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0; 2711 rxp->cq.ccb->bnad = bna->bnad; 2712 rxp->cq.ccb->id = i; 2713 2714 bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE, 2715 &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[i]); 2716 2717 if (rx->ccb_setup_cbfn) 2718 rx->ccb_setup_cbfn(bnad, rxp->cq.ccb); 2719 } 2720 2721 rx->hds_cfg = rx_cfg->hds_config; 2722 2723 bna_rxf_init(&rx->rxf, rx, rx_cfg, res_info); 2724 2725 bfa_fsm_set_state(rx, bna_rx_sm_stopped); 2726 2727 rx_mod->rid_mask |= (1 << rx->rid); 2728 2729 return rx; 2730 } 2731 2732 void 2733 bna_rx_destroy(struct bna_rx *rx) 2734 { 2735 struct bna_rx_mod *rx_mod = &rx->bna->rx_mod; 2736 struct bna_rxq *q0 = NULL; 2737 struct bna_rxq *q1 = NULL; 2738 struct bna_rxp *rxp; 2739 struct list_head *qe; 2740 2741 bna_rxf_uninit(&rx->rxf); 2742 2743 while (!list_empty(&rx->rxp_q)) { 2744 bfa_q_deq(&rx->rxp_q, &rxp); 2745 GET_RXQS(rxp, q0, q1); 2746 if (rx->rcb_destroy_cbfn) 2747 rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb); 2748 q0->rcb = NULL; 2749 q0->rxp = NULL; 2750 q0->rx = NULL; 2751 bna_rxq_put(rx_mod, q0); 2752 2753 if (q1) { 2754 if (rx->rcb_destroy_cbfn) 2755 rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb); 2756 q1->rcb = NULL; 2757 q1->rxp = NULL; 2758 q1->rx = NULL; 2759 bna_rxq_put(rx_mod, q1); 2760 } 2761 rxp->rxq.slr.large = NULL; 2762 rxp->rxq.slr.small = NULL; 2763 2764 if (rx->ccb_destroy_cbfn) 2765 rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb); 2766 rxp->cq.ccb = NULL; 2767 rxp->rx = NULL; 2768 bna_rxp_put(rx_mod, rxp); 2769 } 2770 2771 list_for_each(qe, &rx_mod->rx_active_q) { 2772 if (qe == &rx->qe) { 2773 list_del(&rx->qe); 2774 bfa_q_qe_init(&rx->qe); 2775 break; 2776 } 2777 } 2778 2779 rx_mod->rid_mask &= ~(1 << rx->rid); 2780 2781 rx->bna = NULL; 2782 rx->priv = NULL; 2783 bna_rx_put(rx_mod, rx); 2784 } 2785 2786 void 2787 bna_rx_enable(struct bna_rx *rx) 2788 { 2789 if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped) 2790 return; 2791 2792 rx->rx_flags |= BNA_RX_F_ENABLED; 2793 if (rx->rx_flags & BNA_RX_F_ENET_STARTED) 2794 bfa_fsm_send_event(rx, RX_E_START); 2795 } 2796 2797 void 2798 bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type, 2799 void (*cbfn)(void *, struct bna_rx *)) 2800 { 2801 if (type == BNA_SOFT_CLEANUP) { 2802 /* h/w should not be accessed. Treat we're stopped */ 2803 (*cbfn)(rx->bna->bnad, rx); 2804 } else { 2805 rx->stop_cbfn = cbfn; 2806 rx->stop_cbarg = rx->bna->bnad; 2807 2808 rx->rx_flags &= ~BNA_RX_F_ENABLED; 2809 2810 bfa_fsm_send_event(rx, RX_E_STOP); 2811 } 2812 } 2813 2814 void 2815 bna_rx_cleanup_complete(struct bna_rx *rx) 2816 { 2817 bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE); 2818 } 2819 2820 void 2821 bna_rx_vlan_strip_enable(struct bna_rx *rx) 2822 { 2823 struct bna_rxf *rxf = &rx->rxf; 2824 2825 if (rxf->vlan_strip_status == BNA_STATUS_T_DISABLED) { 2826 rxf->vlan_strip_status = BNA_STATUS_T_ENABLED; 2827 rxf->vlan_strip_pending = true; 2828 bfa_fsm_send_event(rxf, RXF_E_CONFIG); 2829 } 2830 } 2831 2832 void 2833 bna_rx_vlan_strip_disable(struct bna_rx *rx) 2834 { 2835 struct bna_rxf *rxf = &rx->rxf; 2836 2837 if (rxf->vlan_strip_status != BNA_STATUS_T_DISABLED) { 2838 rxf->vlan_strip_status = BNA_STATUS_T_DISABLED; 2839 rxf->vlan_strip_pending = true; 2840 bfa_fsm_send_event(rxf, RXF_E_CONFIG); 2841 } 2842 } 2843 2844 enum bna_cb_status 2845 bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode, 2846 enum bna_rxmode bitmask, 2847 void (*cbfn)(struct bnad *, struct bna_rx *)) 2848 { 2849 struct bna_rxf *rxf = &rx->rxf; 2850 int need_hw_config = 0; 2851 2852 /* Error checks */ 2853 2854 if (is_promisc_enable(new_mode, bitmask)) { 2855 /* If promisc mode is already enabled elsewhere in the system */ 2856 if ((rx->bna->promisc_rid != BFI_INVALID_RID) && 2857 (rx->bna->promisc_rid != rxf->rx->rid)) 2858 goto err_return; 2859 2860 /* If default mode is already enabled in the system */ 2861 if (rx->bna->default_mode_rid != BFI_INVALID_RID) 2862 goto err_return; 2863 2864 /* Trying to enable promiscuous and default mode together */ 2865 if (is_default_enable(new_mode, bitmask)) 2866 goto err_return; 2867 } 2868 2869 if (is_default_enable(new_mode, bitmask)) { 2870 /* If default mode is already enabled elsewhere in the system */ 2871 if ((rx->bna->default_mode_rid != BFI_INVALID_RID) && 2872 (rx->bna->default_mode_rid != rxf->rx->rid)) { 2873 goto err_return; 2874 } 2875 2876 /* If promiscuous mode is already enabled in the system */ 2877 if (rx->bna->promisc_rid != BFI_INVALID_RID) 2878 goto err_return; 2879 } 2880 2881 /* Process the commands */ 2882 2883 if (is_promisc_enable(new_mode, bitmask)) { 2884 if (bna_rxf_promisc_enable(rxf)) 2885 need_hw_config = 1; 2886 } else if (is_promisc_disable(new_mode, bitmask)) { 2887 if (bna_rxf_promisc_disable(rxf)) 2888 need_hw_config = 1; 2889 } 2890 2891 if (is_allmulti_enable(new_mode, bitmask)) { 2892 if (bna_rxf_allmulti_enable(rxf)) 2893 need_hw_config = 1; 2894 } else if (is_allmulti_disable(new_mode, bitmask)) { 2895 if (bna_rxf_allmulti_disable(rxf)) 2896 need_hw_config = 1; 2897 } 2898 2899 /* Trigger h/w if needed */ 2900 2901 if (need_hw_config) { 2902 rxf->cam_fltr_cbfn = cbfn; 2903 rxf->cam_fltr_cbarg = rx->bna->bnad; 2904 bfa_fsm_send_event(rxf, RXF_E_CONFIG); 2905 } else if (cbfn) 2906 (*cbfn)(rx->bna->bnad, rx); 2907 2908 return BNA_CB_SUCCESS; 2909 2910 err_return: 2911 return BNA_CB_FAIL; 2912 } 2913 2914 void 2915 bna_rx_vlanfilter_enable(struct bna_rx *rx) 2916 { 2917 struct bna_rxf *rxf = &rx->rxf; 2918 2919 if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) { 2920 rxf->vlan_filter_status = BNA_STATUS_T_ENABLED; 2921 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; 2922 bfa_fsm_send_event(rxf, RXF_E_CONFIG); 2923 } 2924 } 2925 2926 void 2927 bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo) 2928 { 2929 struct bna_rxp *rxp; 2930 struct list_head *qe; 2931 2932 list_for_each(qe, &rx->rxp_q) { 2933 rxp = (struct bna_rxp *)qe; 2934 rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo; 2935 bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo); 2936 } 2937 } 2938 2939 void 2940 bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]) 2941 { 2942 int i, j; 2943 2944 for (i = 0; i < BNA_LOAD_T_MAX; i++) 2945 for (j = 0; j < BNA_BIAS_T_MAX; j++) 2946 bna->rx_mod.dim_vector[i][j] = vector[i][j]; 2947 } 2948 2949 void 2950 bna_rx_dim_update(struct bna_ccb *ccb) 2951 { 2952 struct bna *bna = ccb->cq->rx->bna; 2953 u32 load, bias; 2954 u32 pkt_rt, small_rt, large_rt; 2955 u8 coalescing_timeo; 2956 2957 if ((ccb->pkt_rate.small_pkt_cnt == 0) && 2958 (ccb->pkt_rate.large_pkt_cnt == 0)) 2959 return; 2960 2961 /* Arrive at preconfigured coalescing timeo value based on pkt rate */ 2962 2963 small_rt = ccb->pkt_rate.small_pkt_cnt; 2964 large_rt = ccb->pkt_rate.large_pkt_cnt; 2965 2966 pkt_rt = small_rt + large_rt; 2967 2968 if (pkt_rt < BNA_PKT_RATE_10K) 2969 load = BNA_LOAD_T_LOW_4; 2970 else if (pkt_rt < BNA_PKT_RATE_20K) 2971 load = BNA_LOAD_T_LOW_3; 2972 else if (pkt_rt < BNA_PKT_RATE_30K) 2973 load = BNA_LOAD_T_LOW_2; 2974 else if (pkt_rt < BNA_PKT_RATE_40K) 2975 load = BNA_LOAD_T_LOW_1; 2976 else if (pkt_rt < BNA_PKT_RATE_50K) 2977 load = BNA_LOAD_T_HIGH_1; 2978 else if (pkt_rt < BNA_PKT_RATE_60K) 2979 load = BNA_LOAD_T_HIGH_2; 2980 else if (pkt_rt < BNA_PKT_RATE_80K) 2981 load = BNA_LOAD_T_HIGH_3; 2982 else 2983 load = BNA_LOAD_T_HIGH_4; 2984 2985 if (small_rt > (large_rt << 1)) 2986 bias = 0; 2987 else 2988 bias = 1; 2989 2990 ccb->pkt_rate.small_pkt_cnt = 0; 2991 ccb->pkt_rate.large_pkt_cnt = 0; 2992 2993 coalescing_timeo = bna->rx_mod.dim_vector[load][bias]; 2994 ccb->rx_coalescing_timeo = coalescing_timeo; 2995 2996 /* Set it to IB */ 2997 bna_ib_coalescing_timeo_set(&ccb->cq->ib, coalescing_timeo); 2998 } 2999 3000 const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = { 3001 {12, 12}, 3002 {6, 10}, 3003 {5, 10}, 3004 {4, 8}, 3005 {3, 6}, 3006 {3, 6}, 3007 {2, 4}, 3008 {1, 2}, 3009 }; 3010 3011 /* TX */ 3012 3013 #define call_tx_stop_cbfn(tx) \ 3014 do { \ 3015 if ((tx)->stop_cbfn) { \ 3016 void (*cbfn)(void *, struct bna_tx *); \ 3017 void *cbarg; \ 3018 cbfn = (tx)->stop_cbfn; \ 3019 cbarg = (tx)->stop_cbarg; \ 3020 (tx)->stop_cbfn = NULL; \ 3021 (tx)->stop_cbarg = NULL; \ 3022 cbfn(cbarg, (tx)); \ 3023 } \ 3024 } while (0) 3025 3026 #define call_tx_prio_change_cbfn(tx) \ 3027 do { \ 3028 if ((tx)->prio_change_cbfn) { \ 3029 void (*cbfn)(struct bnad *, struct bna_tx *); \ 3030 cbfn = (tx)->prio_change_cbfn; \ 3031 (tx)->prio_change_cbfn = NULL; \ 3032 cbfn((tx)->bna->bnad, (tx)); \ 3033 } \ 3034 } while (0) 3035 3036 static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx); 3037 static void bna_bfi_tx_enet_start(struct bna_tx *tx); 3038 static void bna_tx_enet_stop(struct bna_tx *tx); 3039 3040 enum bna_tx_event { 3041 TX_E_START = 1, 3042 TX_E_STOP = 2, 3043 TX_E_FAIL = 3, 3044 TX_E_STARTED = 4, 3045 TX_E_STOPPED = 5, 3046 TX_E_PRIO_CHANGE = 6, 3047 TX_E_CLEANUP_DONE = 7, 3048 TX_E_BW_UPDATE = 8, 3049 }; 3050 3051 bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx, enum bna_tx_event); 3052 bfa_fsm_state_decl(bna_tx, start_wait, struct bna_tx, enum bna_tx_event); 3053 bfa_fsm_state_decl(bna_tx, started, struct bna_tx, enum bna_tx_event); 3054 bfa_fsm_state_decl(bna_tx, stop_wait, struct bna_tx, enum bna_tx_event); 3055 bfa_fsm_state_decl(bna_tx, cleanup_wait, struct bna_tx, 3056 enum bna_tx_event); 3057 bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx, 3058 enum bna_tx_event); 3059 bfa_fsm_state_decl(bna_tx, prio_cleanup_wait, struct bna_tx, 3060 enum bna_tx_event); 3061 bfa_fsm_state_decl(bna_tx, failed, struct bna_tx, enum bna_tx_event); 3062 bfa_fsm_state_decl(bna_tx, quiesce_wait, struct bna_tx, 3063 enum bna_tx_event); 3064 3065 static void 3066 bna_tx_sm_stopped_entry(struct bna_tx *tx) 3067 { 3068 call_tx_stop_cbfn(tx); 3069 } 3070 3071 static void 3072 bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event) 3073 { 3074 switch (event) { 3075 case TX_E_START: 3076 bfa_fsm_set_state(tx, bna_tx_sm_start_wait); 3077 break; 3078 3079 case TX_E_STOP: 3080 call_tx_stop_cbfn(tx); 3081 break; 3082 3083 case TX_E_FAIL: 3084 /* No-op */ 3085 break; 3086 3087 case TX_E_PRIO_CHANGE: 3088 call_tx_prio_change_cbfn(tx); 3089 break; 3090 3091 case TX_E_BW_UPDATE: 3092 /* No-op */ 3093 break; 3094 3095 default: 3096 bfa_sm_fault(event); 3097 } 3098 } 3099 3100 static void 3101 bna_tx_sm_start_wait_entry(struct bna_tx *tx) 3102 { 3103 bna_bfi_tx_enet_start(tx); 3104 } 3105 3106 static void 3107 bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event) 3108 { 3109 switch (event) { 3110 case TX_E_STOP: 3111 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED); 3112 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait); 3113 break; 3114 3115 case TX_E_FAIL: 3116 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED); 3117 bfa_fsm_set_state(tx, bna_tx_sm_stopped); 3118 break; 3119 3120 case TX_E_STARTED: 3121 if (tx->flags & (BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED)) { 3122 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | 3123 BNA_TX_F_BW_UPDATED); 3124 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait); 3125 } else 3126 bfa_fsm_set_state(tx, bna_tx_sm_started); 3127 break; 3128 3129 case TX_E_PRIO_CHANGE: 3130 tx->flags |= BNA_TX_F_PRIO_CHANGED; 3131 break; 3132 3133 case TX_E_BW_UPDATE: 3134 tx->flags |= BNA_TX_F_BW_UPDATED; 3135 break; 3136 3137 default: 3138 bfa_sm_fault(event); 3139 } 3140 } 3141 3142 static void 3143 bna_tx_sm_started_entry(struct bna_tx *tx) 3144 { 3145 struct bna_txq *txq; 3146 struct list_head *qe; 3147 int is_regular = (tx->type == BNA_TX_T_REGULAR); 3148 3149 list_for_each(qe, &tx->txq_q) { 3150 txq = (struct bna_txq *)qe; 3151 txq->tcb->priority = txq->priority; 3152 /* Start IB */ 3153 bna_ib_start(tx->bna, &txq->ib, is_regular); 3154 } 3155 tx->tx_resume_cbfn(tx->bna->bnad, tx); 3156 } 3157 3158 static void 3159 bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event) 3160 { 3161 switch (event) { 3162 case TX_E_STOP: 3163 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait); 3164 tx->tx_stall_cbfn(tx->bna->bnad, tx); 3165 bna_tx_enet_stop(tx); 3166 break; 3167 3168 case TX_E_FAIL: 3169 bfa_fsm_set_state(tx, bna_tx_sm_failed); 3170 tx->tx_stall_cbfn(tx->bna->bnad, tx); 3171 tx->tx_cleanup_cbfn(tx->bna->bnad, tx); 3172 break; 3173 3174 case TX_E_PRIO_CHANGE: 3175 case TX_E_BW_UPDATE: 3176 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait); 3177 break; 3178 3179 default: 3180 bfa_sm_fault(event); 3181 } 3182 } 3183 3184 static void 3185 bna_tx_sm_stop_wait_entry(struct bna_tx *tx) 3186 { 3187 } 3188 3189 static void 3190 bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event) 3191 { 3192 switch (event) { 3193 case TX_E_FAIL: 3194 case TX_E_STOPPED: 3195 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); 3196 tx->tx_cleanup_cbfn(tx->bna->bnad, tx); 3197 break; 3198 3199 case TX_E_STARTED: 3200 /** 3201 * We are here due to start_wait -> stop_wait transition on 3202 * TX_E_STOP event 3203 */ 3204 bna_tx_enet_stop(tx); 3205 break; 3206 3207 case TX_E_PRIO_CHANGE: 3208 case TX_E_BW_UPDATE: 3209 /* No-op */ 3210 break; 3211 3212 default: 3213 bfa_sm_fault(event); 3214 } 3215 } 3216 3217 static void 3218 bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx) 3219 { 3220 } 3221 3222 static void 3223 bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event) 3224 { 3225 switch (event) { 3226 case TX_E_FAIL: 3227 case TX_E_PRIO_CHANGE: 3228 case TX_E_BW_UPDATE: 3229 /* No-op */ 3230 break; 3231 3232 case TX_E_CLEANUP_DONE: 3233 bfa_fsm_set_state(tx, bna_tx_sm_stopped); 3234 break; 3235 3236 default: 3237 bfa_sm_fault(event); 3238 } 3239 } 3240 3241 static void 3242 bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx) 3243 { 3244 tx->tx_stall_cbfn(tx->bna->bnad, tx); 3245 bna_tx_enet_stop(tx); 3246 } 3247 3248 static void 3249 bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event) 3250 { 3251 switch (event) { 3252 case TX_E_STOP: 3253 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait); 3254 break; 3255 3256 case TX_E_FAIL: 3257 bfa_fsm_set_state(tx, bna_tx_sm_failed); 3258 call_tx_prio_change_cbfn(tx); 3259 tx->tx_cleanup_cbfn(tx->bna->bnad, tx); 3260 break; 3261 3262 case TX_E_STOPPED: 3263 bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait); 3264 break; 3265 3266 case TX_E_PRIO_CHANGE: 3267 case TX_E_BW_UPDATE: 3268 /* No-op */ 3269 break; 3270 3271 default: 3272 bfa_sm_fault(event); 3273 } 3274 } 3275 3276 static void 3277 bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx) 3278 { 3279 call_tx_prio_change_cbfn(tx); 3280 tx->tx_cleanup_cbfn(tx->bna->bnad, tx); 3281 } 3282 3283 static void 3284 bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event) 3285 { 3286 switch (event) { 3287 case TX_E_STOP: 3288 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); 3289 break; 3290 3291 case TX_E_FAIL: 3292 bfa_fsm_set_state(tx, bna_tx_sm_failed); 3293 break; 3294 3295 case TX_E_PRIO_CHANGE: 3296 case TX_E_BW_UPDATE: 3297 /* No-op */ 3298 break; 3299 3300 case TX_E_CLEANUP_DONE: 3301 bfa_fsm_set_state(tx, bna_tx_sm_start_wait); 3302 break; 3303 3304 default: 3305 bfa_sm_fault(event); 3306 } 3307 } 3308 3309 static void 3310 bna_tx_sm_failed_entry(struct bna_tx *tx) 3311 { 3312 } 3313 3314 static void 3315 bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event) 3316 { 3317 switch (event) { 3318 case TX_E_START: 3319 bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait); 3320 break; 3321 3322 case TX_E_STOP: 3323 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); 3324 break; 3325 3326 case TX_E_FAIL: 3327 /* No-op */ 3328 break; 3329 3330 case TX_E_CLEANUP_DONE: 3331 bfa_fsm_set_state(tx, bna_tx_sm_stopped); 3332 break; 3333 3334 default: 3335 bfa_sm_fault(event); 3336 } 3337 } 3338 3339 static void 3340 bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx) 3341 { 3342 } 3343 3344 static void 3345 bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event) 3346 { 3347 switch (event) { 3348 case TX_E_STOP: 3349 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); 3350 break; 3351 3352 case TX_E_FAIL: 3353 bfa_fsm_set_state(tx, bna_tx_sm_failed); 3354 break; 3355 3356 case TX_E_CLEANUP_DONE: 3357 bfa_fsm_set_state(tx, bna_tx_sm_start_wait); 3358 break; 3359 3360 case TX_E_BW_UPDATE: 3361 /* No-op */ 3362 break; 3363 3364 default: 3365 bfa_sm_fault(event); 3366 } 3367 } 3368 3369 static void 3370 bna_bfi_tx_enet_start(struct bna_tx *tx) 3371 { 3372 struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req; 3373 struct bna_txq *txq = NULL; 3374 struct list_head *qe; 3375 int i; 3376 3377 bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET, 3378 BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid); 3379 cfg_req->mh.num_entries = htons( 3380 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req))); 3381 3382 cfg_req->num_queues = tx->num_txq; 3383 for (i = 0, qe = bfa_q_first(&tx->txq_q); 3384 i < tx->num_txq; 3385 i++, qe = bfa_q_next(qe)) { 3386 txq = (struct bna_txq *)qe; 3387 3388 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt); 3389 cfg_req->q_cfg[i].q.priority = txq->priority; 3390 3391 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo = 3392 txq->ib.ib_seg_host_addr.lsb; 3393 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi = 3394 txq->ib.ib_seg_host_addr.msb; 3395 cfg_req->q_cfg[i].ib.intr.msix_index = 3396 htons((u16)txq->ib.intr_vector); 3397 } 3398 3399 cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_ENABLED; 3400 cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED; 3401 cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED; 3402 cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_ENABLED; 3403 cfg_req->ib_cfg.msix = (txq->ib.intr_type == BNA_INTR_T_MSIX) 3404 ? BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED; 3405 cfg_req->ib_cfg.coalescing_timeout = 3406 htonl((u32)txq->ib.coalescing_timeo); 3407 cfg_req->ib_cfg.inter_pkt_timeout = 3408 htonl((u32)txq->ib.interpkt_timeo); 3409 cfg_req->ib_cfg.inter_pkt_count = (u8)txq->ib.interpkt_count; 3410 3411 cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI; 3412 cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id); 3413 cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_ENABLED; 3414 cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED; 3415 3416 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, 3417 sizeof(struct bfi_enet_tx_cfg_req), &cfg_req->mh); 3418 bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd); 3419 } 3420 3421 static void 3422 bna_bfi_tx_enet_stop(struct bna_tx *tx) 3423 { 3424 struct bfi_enet_req *req = &tx->bfi_enet_cmd.req; 3425 3426 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, 3427 BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid); 3428 req->mh.num_entries = htons( 3429 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req))); 3430 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req), 3431 &req->mh); 3432 bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd); 3433 } 3434 3435 static void 3436 bna_tx_enet_stop(struct bna_tx *tx) 3437 { 3438 struct bna_txq *txq; 3439 struct list_head *qe; 3440 3441 /* Stop IB */ 3442 list_for_each(qe, &tx->txq_q) { 3443 txq = (struct bna_txq *)qe; 3444 bna_ib_stop(tx->bna, &txq->ib); 3445 } 3446 3447 bna_bfi_tx_enet_stop(tx); 3448 } 3449 3450 static void 3451 bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size, 3452 struct bna_mem_descr *qpt_mem, 3453 struct bna_mem_descr *swqpt_mem, 3454 struct bna_mem_descr *page_mem) 3455 { 3456 u8 *kva; 3457 u64 dma; 3458 struct bna_dma_addr bna_dma; 3459 int i; 3460 3461 txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; 3462 txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; 3463 txq->qpt.kv_qpt_ptr = qpt_mem->kva; 3464 txq->qpt.page_count = page_count; 3465 txq->qpt.page_size = page_size; 3466 3467 txq->tcb->sw_qpt = (void **) swqpt_mem->kva; 3468 txq->tcb->sw_q = page_mem->kva; 3469 3470 kva = page_mem->kva; 3471 BNA_GET_DMA_ADDR(&page_mem->dma, dma); 3472 3473 for (i = 0; i < page_count; i++) { 3474 txq->tcb->sw_qpt[i] = kva; 3475 kva += PAGE_SIZE; 3476 3477 BNA_SET_DMA_ADDR(dma, &bna_dma); 3478 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb = 3479 bna_dma.lsb; 3480 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb = 3481 bna_dma.msb; 3482 dma += PAGE_SIZE; 3483 } 3484 } 3485 3486 static struct bna_tx * 3487 bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type) 3488 { 3489 struct list_head *qe = NULL; 3490 struct bna_tx *tx = NULL; 3491 3492 if (list_empty(&tx_mod->tx_free_q)) 3493 return NULL; 3494 if (type == BNA_TX_T_REGULAR) { 3495 bfa_q_deq(&tx_mod->tx_free_q, &qe); 3496 } else { 3497 bfa_q_deq_tail(&tx_mod->tx_free_q, &qe); 3498 } 3499 tx = (struct bna_tx *)qe; 3500 bfa_q_qe_init(&tx->qe); 3501 tx->type = type; 3502 3503 return tx; 3504 } 3505 3506 static void 3507 bna_tx_free(struct bna_tx *tx) 3508 { 3509 struct bna_tx_mod *tx_mod = &tx->bna->tx_mod; 3510 struct bna_txq *txq; 3511 struct list_head *prev_qe; 3512 struct list_head *qe; 3513 3514 while (!list_empty(&tx->txq_q)) { 3515 bfa_q_deq(&tx->txq_q, &txq); 3516 bfa_q_qe_init(&txq->qe); 3517 txq->tcb = NULL; 3518 txq->tx = NULL; 3519 list_add_tail(&txq->qe, &tx_mod->txq_free_q); 3520 } 3521 3522 list_for_each(qe, &tx_mod->tx_active_q) { 3523 if (qe == &tx->qe) { 3524 list_del(&tx->qe); 3525 bfa_q_qe_init(&tx->qe); 3526 break; 3527 } 3528 } 3529 3530 tx->bna = NULL; 3531 tx->priv = NULL; 3532 3533 prev_qe = NULL; 3534 list_for_each(qe, &tx_mod->tx_free_q) { 3535 if (((struct bna_tx *)qe)->rid < tx->rid) 3536 prev_qe = qe; 3537 else { 3538 break; 3539 } 3540 } 3541 3542 if (prev_qe == NULL) { 3543 /* This is the first entry */ 3544 bfa_q_enq_head(&tx_mod->tx_free_q, &tx->qe); 3545 } else if (bfa_q_next(prev_qe) == &tx_mod->tx_free_q) { 3546 /* This is the last entry */ 3547 list_add_tail(&tx->qe, &tx_mod->tx_free_q); 3548 } else { 3549 /* Somewhere in the middle */ 3550 bfa_q_next(&tx->qe) = bfa_q_next(prev_qe); 3551 bfa_q_prev(&tx->qe) = prev_qe; 3552 bfa_q_next(prev_qe) = &tx->qe; 3553 bfa_q_prev(bfa_q_next(&tx->qe)) = &tx->qe; 3554 } 3555 } 3556 3557 static void 3558 bna_tx_start(struct bna_tx *tx) 3559 { 3560 tx->flags |= BNA_TX_F_ENET_STARTED; 3561 if (tx->flags & BNA_TX_F_ENABLED) 3562 bfa_fsm_send_event(tx, TX_E_START); 3563 } 3564 3565 static void 3566 bna_tx_stop(struct bna_tx *tx) 3567 { 3568 tx->stop_cbfn = bna_tx_mod_cb_tx_stopped; 3569 tx->stop_cbarg = &tx->bna->tx_mod; 3570 3571 tx->flags &= ~BNA_TX_F_ENET_STARTED; 3572 bfa_fsm_send_event(tx, TX_E_STOP); 3573 } 3574 3575 static void 3576 bna_tx_fail(struct bna_tx *tx) 3577 { 3578 tx->flags &= ~BNA_TX_F_ENET_STARTED; 3579 bfa_fsm_send_event(tx, TX_E_FAIL); 3580 } 3581 3582 void 3583 bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr) 3584 { 3585 struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp; 3586 struct bna_txq *txq = NULL; 3587 struct list_head *qe; 3588 int i; 3589 3590 bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp, 3591 sizeof(struct bfi_enet_tx_cfg_rsp)); 3592 3593 tx->hw_id = cfg_rsp->hw_id; 3594 3595 for (i = 0, qe = bfa_q_first(&tx->txq_q); 3596 i < tx->num_txq; i++, qe = bfa_q_next(qe)) { 3597 txq = (struct bna_txq *)qe; 3598 3599 /* Setup doorbells */ 3600 txq->tcb->i_dbell->doorbell_addr = 3601 tx->bna->pcidev.pci_bar_kva 3602 + ntohl(cfg_rsp->q_handles[i].i_dbell); 3603 txq->tcb->q_dbell = 3604 tx->bna->pcidev.pci_bar_kva 3605 + ntohl(cfg_rsp->q_handles[i].q_dbell); 3606 txq->hw_id = cfg_rsp->q_handles[i].hw_qid; 3607 3608 /* Initialize producer/consumer indexes */ 3609 (*txq->tcb->hw_consumer_index) = 0; 3610 txq->tcb->producer_index = txq->tcb->consumer_index = 0; 3611 } 3612 3613 bfa_fsm_send_event(tx, TX_E_STARTED); 3614 } 3615 3616 void 3617 bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr) 3618 { 3619 bfa_fsm_send_event(tx, TX_E_STOPPED); 3620 } 3621 3622 void 3623 bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod) 3624 { 3625 struct bna_tx *tx; 3626 struct list_head *qe; 3627 3628 list_for_each(qe, &tx_mod->tx_active_q) { 3629 tx = (struct bna_tx *)qe; 3630 bfa_fsm_send_event(tx, TX_E_BW_UPDATE); 3631 } 3632 } 3633 3634 void 3635 bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info) 3636 { 3637 u32 q_size; 3638 u32 page_count; 3639 struct bna_mem_info *mem_info; 3640 3641 res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM; 3642 mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info; 3643 mem_info->mem_type = BNA_MEM_T_KVA; 3644 mem_info->len = sizeof(struct bna_tcb); 3645 mem_info->num = num_txq; 3646 3647 q_size = txq_depth * BFI_TXQ_WI_SIZE; 3648 q_size = ALIGN(q_size, PAGE_SIZE); 3649 page_count = q_size >> PAGE_SHIFT; 3650 3651 res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM; 3652 mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info; 3653 mem_info->mem_type = BNA_MEM_T_DMA; 3654 mem_info->len = page_count * sizeof(struct bna_dma_addr); 3655 mem_info->num = num_txq; 3656 3657 res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM; 3658 mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info; 3659 mem_info->mem_type = BNA_MEM_T_KVA; 3660 mem_info->len = page_count * sizeof(void *); 3661 mem_info->num = num_txq; 3662 3663 res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM; 3664 mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info; 3665 mem_info->mem_type = BNA_MEM_T_DMA; 3666 mem_info->len = PAGE_SIZE * page_count; 3667 mem_info->num = num_txq; 3668 3669 res_info[BNA_TX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM; 3670 mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info; 3671 mem_info->mem_type = BNA_MEM_T_DMA; 3672 mem_info->len = BFI_IBIDX_SIZE; 3673 mem_info->num = num_txq; 3674 3675 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR; 3676 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type = 3677 BNA_INTR_T_MSIX; 3678 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq; 3679 } 3680 3681 struct bna_tx * 3682 bna_tx_create(struct bna *bna, struct bnad *bnad, 3683 struct bna_tx_config *tx_cfg, 3684 const struct bna_tx_event_cbfn *tx_cbfn, 3685 struct bna_res_info *res_info, void *priv) 3686 { 3687 struct bna_intr_info *intr_info; 3688 struct bna_tx_mod *tx_mod = &bna->tx_mod; 3689 struct bna_tx *tx; 3690 struct bna_txq *txq; 3691 struct list_head *qe; 3692 int page_count; 3693 int i; 3694 3695 intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info; 3696 page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len) / 3697 PAGE_SIZE; 3698 3699 /** 3700 * Get resources 3701 */ 3702 3703 if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq)) 3704 return NULL; 3705 3706 /* Tx */ 3707 3708 tx = bna_tx_get(tx_mod, tx_cfg->tx_type); 3709 if (!tx) 3710 return NULL; 3711 tx->bna = bna; 3712 tx->priv = priv; 3713 3714 /* TxQs */ 3715 3716 INIT_LIST_HEAD(&tx->txq_q); 3717 for (i = 0; i < tx_cfg->num_txq; i++) { 3718 if (list_empty(&tx_mod->txq_free_q)) 3719 goto err_return; 3720 3721 bfa_q_deq(&tx_mod->txq_free_q, &txq); 3722 bfa_q_qe_init(&txq->qe); 3723 list_add_tail(&txq->qe, &tx->txq_q); 3724 txq->tx = tx; 3725 } 3726 3727 /* 3728 * Initialize 3729 */ 3730 3731 /* Tx */ 3732 3733 tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn; 3734 tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn; 3735 /* Following callbacks are mandatory */ 3736 tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn; 3737 tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn; 3738 tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn; 3739 3740 list_add_tail(&tx->qe, &tx_mod->tx_active_q); 3741 3742 tx->num_txq = tx_cfg->num_txq; 3743 3744 tx->flags = 0; 3745 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) { 3746 switch (tx->type) { 3747 case BNA_TX_T_REGULAR: 3748 if (!(tx->bna->tx_mod.flags & 3749 BNA_TX_MOD_F_ENET_LOOPBACK)) 3750 tx->flags |= BNA_TX_F_ENET_STARTED; 3751 break; 3752 case BNA_TX_T_LOOPBACK: 3753 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK) 3754 tx->flags |= BNA_TX_F_ENET_STARTED; 3755 break; 3756 } 3757 } 3758 3759 /* TxQ */ 3760 3761 i = 0; 3762 list_for_each(qe, &tx->txq_q) { 3763 txq = (struct bna_txq *)qe; 3764 txq->tcb = (struct bna_tcb *) 3765 res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva; 3766 txq->tx_packets = 0; 3767 txq->tx_bytes = 0; 3768 3769 /* IB */ 3770 txq->ib.ib_seg_host_addr.lsb = 3771 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb; 3772 txq->ib.ib_seg_host_addr.msb = 3773 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb; 3774 txq->ib.ib_seg_host_addr_kva = 3775 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva; 3776 txq->ib.intr_type = intr_info->intr_type; 3777 txq->ib.intr_vector = (intr_info->num == 1) ? 3778 intr_info->idl[0].vector : 3779 intr_info->idl[i].vector; 3780 if (intr_info->intr_type == BNA_INTR_T_INTX) 3781 txq->ib.intr_vector = (1 << txq->ib.intr_vector); 3782 txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo; 3783 txq->ib.interpkt_timeo = BFI_TX_INTERPKT_TIMEO; 3784 txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT; 3785 3786 /* TCB */ 3787 3788 txq->tcb->q_depth = tx_cfg->txq_depth; 3789 txq->tcb->unmap_q = (void *) 3790 res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva; 3791 txq->tcb->hw_consumer_index = 3792 (u32 *)txq->ib.ib_seg_host_addr_kva; 3793 txq->tcb->i_dbell = &txq->ib.door_bell; 3794 txq->tcb->intr_type = txq->ib.intr_type; 3795 txq->tcb->intr_vector = txq->ib.intr_vector; 3796 txq->tcb->txq = txq; 3797 txq->tcb->bnad = bnad; 3798 txq->tcb->id = i; 3799 3800 /* QPT, SWQPT, Pages */ 3801 bna_txq_qpt_setup(txq, page_count, PAGE_SIZE, 3802 &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i], 3803 &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i], 3804 &res_info[BNA_TX_RES_MEM_T_PAGE]. 3805 res_u.mem_info.mdl[i]); 3806 3807 /* Callback to bnad for setting up TCB */ 3808 if (tx->tcb_setup_cbfn) 3809 (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb); 3810 3811 if (tx_cfg->num_txq == BFI_TX_MAX_PRIO) 3812 txq->priority = txq->tcb->id; 3813 else 3814 txq->priority = tx_mod->default_prio; 3815 3816 i++; 3817 } 3818 3819 tx->txf_vlan_id = 0; 3820 3821 bfa_fsm_set_state(tx, bna_tx_sm_stopped); 3822 3823 tx_mod->rid_mask |= (1 << tx->rid); 3824 3825 return tx; 3826 3827 err_return: 3828 bna_tx_free(tx); 3829 return NULL; 3830 } 3831 3832 void 3833 bna_tx_destroy(struct bna_tx *tx) 3834 { 3835 struct bna_txq *txq; 3836 struct list_head *qe; 3837 3838 list_for_each(qe, &tx->txq_q) { 3839 txq = (struct bna_txq *)qe; 3840 if (tx->tcb_destroy_cbfn) 3841 (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb); 3842 } 3843 3844 tx->bna->tx_mod.rid_mask &= ~(1 << tx->rid); 3845 bna_tx_free(tx); 3846 } 3847 3848 void 3849 bna_tx_enable(struct bna_tx *tx) 3850 { 3851 if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped) 3852 return; 3853 3854 tx->flags |= BNA_TX_F_ENABLED; 3855 3856 if (tx->flags & BNA_TX_F_ENET_STARTED) 3857 bfa_fsm_send_event(tx, TX_E_START); 3858 } 3859 3860 void 3861 bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type, 3862 void (*cbfn)(void *, struct bna_tx *)) 3863 { 3864 if (type == BNA_SOFT_CLEANUP) { 3865 (*cbfn)(tx->bna->bnad, tx); 3866 return; 3867 } 3868 3869 tx->stop_cbfn = cbfn; 3870 tx->stop_cbarg = tx->bna->bnad; 3871 3872 tx->flags &= ~BNA_TX_F_ENABLED; 3873 3874 bfa_fsm_send_event(tx, TX_E_STOP); 3875 } 3876 3877 void 3878 bna_tx_cleanup_complete(struct bna_tx *tx) 3879 { 3880 bfa_fsm_send_event(tx, TX_E_CLEANUP_DONE); 3881 } 3882 3883 static void 3884 bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx) 3885 { 3886 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg; 3887 3888 bfa_wc_down(&tx_mod->tx_stop_wc); 3889 } 3890 3891 static void 3892 bna_tx_mod_cb_tx_stopped_all(void *arg) 3893 { 3894 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg; 3895 3896 if (tx_mod->stop_cbfn) 3897 tx_mod->stop_cbfn(&tx_mod->bna->enet); 3898 tx_mod->stop_cbfn = NULL; 3899 } 3900 3901 void 3902 bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna, 3903 struct bna_res_info *res_info) 3904 { 3905 int i; 3906 3907 tx_mod->bna = bna; 3908 tx_mod->flags = 0; 3909 3910 tx_mod->tx = (struct bna_tx *) 3911 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva; 3912 tx_mod->txq = (struct bna_txq *) 3913 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva; 3914 3915 INIT_LIST_HEAD(&tx_mod->tx_free_q); 3916 INIT_LIST_HEAD(&tx_mod->tx_active_q); 3917 3918 INIT_LIST_HEAD(&tx_mod->txq_free_q); 3919 3920 for (i = 0; i < bna->ioceth.attr.num_txq; i++) { 3921 tx_mod->tx[i].rid = i; 3922 bfa_q_qe_init(&tx_mod->tx[i].qe); 3923 list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q); 3924 bfa_q_qe_init(&tx_mod->txq[i].qe); 3925 list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q); 3926 } 3927 3928 tx_mod->prio_map = BFI_TX_PRIO_MAP_ALL; 3929 tx_mod->default_prio = 0; 3930 tx_mod->iscsi_over_cee = BNA_STATUS_T_DISABLED; 3931 tx_mod->iscsi_prio = -1; 3932 } 3933 3934 void 3935 bna_tx_mod_uninit(struct bna_tx_mod *tx_mod) 3936 { 3937 struct list_head *qe; 3938 int i; 3939 3940 i = 0; 3941 list_for_each(qe, &tx_mod->tx_free_q) 3942 i++; 3943 3944 i = 0; 3945 list_for_each(qe, &tx_mod->txq_free_q) 3946 i++; 3947 3948 tx_mod->bna = NULL; 3949 } 3950 3951 void 3952 bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type) 3953 { 3954 struct bna_tx *tx; 3955 struct list_head *qe; 3956 3957 tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED; 3958 if (type == BNA_TX_T_LOOPBACK) 3959 tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK; 3960 3961 list_for_each(qe, &tx_mod->tx_active_q) { 3962 tx = (struct bna_tx *)qe; 3963 if (tx->type == type) 3964 bna_tx_start(tx); 3965 } 3966 } 3967 3968 void 3969 bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type) 3970 { 3971 struct bna_tx *tx; 3972 struct list_head *qe; 3973 3974 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED; 3975 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK; 3976 3977 tx_mod->stop_cbfn = bna_enet_cb_tx_stopped; 3978 3979 bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod); 3980 3981 list_for_each(qe, &tx_mod->tx_active_q) { 3982 tx = (struct bna_tx *)qe; 3983 if (tx->type == type) { 3984 bfa_wc_up(&tx_mod->tx_stop_wc); 3985 bna_tx_stop(tx); 3986 } 3987 } 3988 3989 bfa_wc_wait(&tx_mod->tx_stop_wc); 3990 } 3991 3992 void 3993 bna_tx_mod_fail(struct bna_tx_mod *tx_mod) 3994 { 3995 struct bna_tx *tx; 3996 struct list_head *qe; 3997 3998 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED; 3999 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK; 4000 4001 list_for_each(qe, &tx_mod->tx_active_q) { 4002 tx = (struct bna_tx *)qe; 4003 bna_tx_fail(tx); 4004 } 4005 } 4006 4007 void 4008 bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo) 4009 { 4010 struct bna_txq *txq; 4011 struct list_head *qe; 4012 4013 list_for_each(qe, &tx->txq_q) { 4014 txq = (struct bna_txq *)qe; 4015 bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo); 4016 } 4017 } 4018