1 /* bnx2x_sp.c: Broadcom Everest network driver. 2 * 3 * Copyright (c) 2011-2012 Broadcom Corporation 4 * 5 * Unless you and Broadcom execute a separate written software license 6 * agreement governing use of this software, this software is licensed to you 7 * under the terms of the GNU General Public License version 2, available 8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). 9 * 10 * Notwithstanding the above, under no circumstances may you combine this 11 * software in any way with any other Broadcom software provided under a 12 * license other than the GPL, without Broadcom's express prior written 13 * consent. 14 * 15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 16 * Written by: Vladislav Zolotarov 17 * 18 */ 19 20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 21 22 #include <linux/module.h> 23 #include <linux/crc32.h> 24 #include <linux/netdevice.h> 25 #include <linux/etherdevice.h> 26 #include <linux/crc32c.h> 27 #include "bnx2x.h" 28 #include "bnx2x_cmn.h" 29 #include "bnx2x_sp.h" 30 31 #define BNX2X_MAX_EMUL_MULTI 16 32 33 #define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN) 34 35 /**** Exe Queue interfaces ****/ 36 37 /** 38 * bnx2x_exe_queue_init - init the Exe Queue object 39 * 40 * @o: poiter to the object 41 * @exe_len: length 42 * @owner: poiter to the owner 43 * @validate: validate function pointer 44 * @optimize: optimize function pointer 45 * @exec: execute function pointer 46 * @get: get function pointer 47 */ 48 static inline void bnx2x_exe_queue_init(struct bnx2x *bp, 49 struct bnx2x_exe_queue_obj *o, 50 int exe_len, 51 union bnx2x_qable_obj *owner, 52 exe_q_validate validate, 53 exe_q_remove remove, 54 exe_q_optimize optimize, 55 exe_q_execute exec, 56 exe_q_get get) 57 { 58 memset(o, 0, sizeof(*o)); 59 60 INIT_LIST_HEAD(&o->exe_queue); 61 INIT_LIST_HEAD(&o->pending_comp); 62 63 spin_lock_init(&o->lock); 64 65 o->exe_chunk_len = exe_len; 66 o->owner = owner; 67 68 /* Owner specific callbacks */ 69 o->validate = validate; 70 o->remove = remove; 71 o->optimize = optimize; 72 o->execute = exec; 73 o->get = get; 74 75 DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n", 76 exe_len); 77 } 78 79 static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp, 80 struct bnx2x_exeq_elem *elem) 81 { 82 DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n"); 83 kfree(elem); 84 } 85 86 static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o) 87 { 88 struct bnx2x_exeq_elem *elem; 89 int cnt = 0; 90 91 spin_lock_bh(&o->lock); 92 93 list_for_each_entry(elem, &o->exe_queue, link) 94 cnt++; 95 96 spin_unlock_bh(&o->lock); 97 98 return cnt; 99 } 100 101 /** 102 * bnx2x_exe_queue_add - add a new element to the execution queue 103 * 104 * @bp: driver handle 105 * @o: queue 106 * @cmd: new command to add 107 * @restore: true - do not optimize the command 108 * 109 * If the element is optimized or is illegal, frees it. 110 */ 111 static inline int bnx2x_exe_queue_add(struct bnx2x *bp, 112 struct bnx2x_exe_queue_obj *o, 113 struct bnx2x_exeq_elem *elem, 114 bool restore) 115 { 116 int rc; 117 118 spin_lock_bh(&o->lock); 119 120 if (!restore) { 121 /* Try to cancel this element queue */ 122 rc = o->optimize(bp, o->owner, elem); 123 if (rc) 124 goto free_and_exit; 125 126 /* Check if this request is ok */ 127 rc = o->validate(bp, o->owner, elem); 128 if (rc) { 129 BNX2X_ERR("Preamble failed: %d\n", rc); 130 goto free_and_exit; 131 } 132 } 133 134 /* If so, add it to the execution queue */ 135 list_add_tail(&elem->link, &o->exe_queue); 136 137 spin_unlock_bh(&o->lock); 138 139 return 0; 140 141 free_and_exit: 142 bnx2x_exe_queue_free_elem(bp, elem); 143 144 spin_unlock_bh(&o->lock); 145 146 return rc; 147 148 } 149 150 static inline void __bnx2x_exe_queue_reset_pending( 151 struct bnx2x *bp, 152 struct bnx2x_exe_queue_obj *o) 153 { 154 struct bnx2x_exeq_elem *elem; 155 156 while (!list_empty(&o->pending_comp)) { 157 elem = list_first_entry(&o->pending_comp, 158 struct bnx2x_exeq_elem, link); 159 160 list_del(&elem->link); 161 bnx2x_exe_queue_free_elem(bp, elem); 162 } 163 } 164 165 static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp, 166 struct bnx2x_exe_queue_obj *o) 167 { 168 169 spin_lock_bh(&o->lock); 170 171 __bnx2x_exe_queue_reset_pending(bp, o); 172 173 spin_unlock_bh(&o->lock); 174 175 } 176 177 /** 178 * bnx2x_exe_queue_step - execute one execution chunk atomically 179 * 180 * @bp: driver handle 181 * @o: queue 182 * @ramrod_flags: flags 183 * 184 * (Atomicy is ensured using the exe_queue->lock). 185 */ 186 static inline int bnx2x_exe_queue_step(struct bnx2x *bp, 187 struct bnx2x_exe_queue_obj *o, 188 unsigned long *ramrod_flags) 189 { 190 struct bnx2x_exeq_elem *elem, spacer; 191 int cur_len = 0, rc; 192 193 memset(&spacer, 0, sizeof(spacer)); 194 195 spin_lock_bh(&o->lock); 196 197 /* 198 * Next step should not be performed until the current is finished, 199 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to 200 * properly clear object internals without sending any command to the FW 201 * which also implies there won't be any completion to clear the 202 * 'pending' list. 203 */ 204 if (!list_empty(&o->pending_comp)) { 205 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { 206 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n"); 207 __bnx2x_exe_queue_reset_pending(bp, o); 208 } else { 209 spin_unlock_bh(&o->lock); 210 return 1; 211 } 212 } 213 214 /* 215 * Run through the pending commands list and create a next 216 * execution chunk. 217 */ 218 while (!list_empty(&o->exe_queue)) { 219 elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem, 220 link); 221 WARN_ON(!elem->cmd_len); 222 223 if (cur_len + elem->cmd_len <= o->exe_chunk_len) { 224 cur_len += elem->cmd_len; 225 /* 226 * Prevent from both lists being empty when moving an 227 * element. This will allow the call of 228 * bnx2x_exe_queue_empty() without locking. 229 */ 230 list_add_tail(&spacer.link, &o->pending_comp); 231 mb(); 232 list_del(&elem->link); 233 list_add_tail(&elem->link, &o->pending_comp); 234 list_del(&spacer.link); 235 } else 236 break; 237 } 238 239 /* Sanity check */ 240 if (!cur_len) { 241 spin_unlock_bh(&o->lock); 242 return 0; 243 } 244 245 rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags); 246 if (rc < 0) 247 /* 248 * In case of an error return the commands back to the queue 249 * and reset the pending_comp. 250 */ 251 list_splice_init(&o->pending_comp, &o->exe_queue); 252 else if (!rc) 253 /* 254 * If zero is returned, means there are no outstanding pending 255 * completions and we may dismiss the pending list. 256 */ 257 __bnx2x_exe_queue_reset_pending(bp, o); 258 259 spin_unlock_bh(&o->lock); 260 return rc; 261 } 262 263 static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o) 264 { 265 bool empty = list_empty(&o->exe_queue); 266 267 /* Don't reorder!!! */ 268 mb(); 269 270 return empty && list_empty(&o->pending_comp); 271 } 272 273 static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem( 274 struct bnx2x *bp) 275 { 276 DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n"); 277 return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC); 278 } 279 280 /************************ raw_obj functions ***********************************/ 281 static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o) 282 { 283 return !!test_bit(o->state, o->pstate); 284 } 285 286 static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o) 287 { 288 smp_mb__before_clear_bit(); 289 clear_bit(o->state, o->pstate); 290 smp_mb__after_clear_bit(); 291 } 292 293 static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o) 294 { 295 smp_mb__before_clear_bit(); 296 set_bit(o->state, o->pstate); 297 smp_mb__after_clear_bit(); 298 } 299 300 /** 301 * bnx2x_state_wait - wait until the given bit(state) is cleared 302 * 303 * @bp: device handle 304 * @state: state which is to be cleared 305 * @state_p: state buffer 306 * 307 */ 308 static inline int bnx2x_state_wait(struct bnx2x *bp, int state, 309 unsigned long *pstate) 310 { 311 /* can take a while if any port is running */ 312 int cnt = 5000; 313 314 315 if (CHIP_REV_IS_EMUL(bp)) 316 cnt *= 20; 317 318 DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state); 319 320 might_sleep(); 321 while (cnt--) { 322 if (!test_bit(state, pstate)) { 323 #ifdef BNX2X_STOP_ON_ERROR 324 DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt); 325 #endif 326 return 0; 327 } 328 329 usleep_range(1000, 1000); 330 331 if (bp->panic) 332 return -EIO; 333 } 334 335 /* timeout! */ 336 BNX2X_ERR("timeout waiting for state %d\n", state); 337 #ifdef BNX2X_STOP_ON_ERROR 338 bnx2x_panic(); 339 #endif 340 341 return -EBUSY; 342 } 343 344 static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw) 345 { 346 return bnx2x_state_wait(bp, raw->state, raw->pstate); 347 } 348 349 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ 350 /* credit handling callbacks */ 351 static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset) 352 { 353 struct bnx2x_credit_pool_obj *mp = o->macs_pool; 354 355 WARN_ON(!mp); 356 357 return mp->get_entry(mp, offset); 358 } 359 360 static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o) 361 { 362 struct bnx2x_credit_pool_obj *mp = o->macs_pool; 363 364 WARN_ON(!mp); 365 366 return mp->get(mp, 1); 367 } 368 369 static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset) 370 { 371 struct bnx2x_credit_pool_obj *vp = o->vlans_pool; 372 373 WARN_ON(!vp); 374 375 return vp->get_entry(vp, offset); 376 } 377 378 static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o) 379 { 380 struct bnx2x_credit_pool_obj *vp = o->vlans_pool; 381 382 WARN_ON(!vp); 383 384 return vp->get(vp, 1); 385 } 386 387 static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o) 388 { 389 struct bnx2x_credit_pool_obj *mp = o->macs_pool; 390 struct bnx2x_credit_pool_obj *vp = o->vlans_pool; 391 392 if (!mp->get(mp, 1)) 393 return false; 394 395 if (!vp->get(vp, 1)) { 396 mp->put(mp, 1); 397 return false; 398 } 399 400 return true; 401 } 402 403 static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset) 404 { 405 struct bnx2x_credit_pool_obj *mp = o->macs_pool; 406 407 return mp->put_entry(mp, offset); 408 } 409 410 static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o) 411 { 412 struct bnx2x_credit_pool_obj *mp = o->macs_pool; 413 414 return mp->put(mp, 1); 415 } 416 417 static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset) 418 { 419 struct bnx2x_credit_pool_obj *vp = o->vlans_pool; 420 421 return vp->put_entry(vp, offset); 422 } 423 424 static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o) 425 { 426 struct bnx2x_credit_pool_obj *vp = o->vlans_pool; 427 428 return vp->put(vp, 1); 429 } 430 431 static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o) 432 { 433 struct bnx2x_credit_pool_obj *mp = o->macs_pool; 434 struct bnx2x_credit_pool_obj *vp = o->vlans_pool; 435 436 if (!mp->put(mp, 1)) 437 return false; 438 439 if (!vp->put(vp, 1)) { 440 mp->get(mp, 1); 441 return false; 442 } 443 444 return true; 445 } 446 447 static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, 448 int n, u8 *buf) 449 { 450 struct bnx2x_vlan_mac_registry_elem *pos; 451 u8 *next = buf; 452 int counter = 0; 453 454 /* traverse list */ 455 list_for_each_entry(pos, &o->head, link) { 456 if (counter < n) { 457 /* place leading zeroes in buffer */ 458 memset(next, 0, MAC_LEADING_ZERO_CNT); 459 460 /* place mac after leading zeroes*/ 461 memcpy(next + MAC_LEADING_ZERO_CNT, pos->u.mac.mac, 462 ETH_ALEN); 463 464 /* calculate address of next element and 465 * advance counter 466 */ 467 counter++; 468 next = buf + counter * ALIGN(ETH_ALEN, sizeof(u32)); 469 470 DP(BNX2X_MSG_SP, "copied element number %d to address %p element was %pM\n", 471 counter, next, pos->u.mac.mac); 472 } 473 } 474 return counter * ETH_ALEN; 475 } 476 477 /* check_add() callbacks */ 478 static int bnx2x_check_mac_add(struct bnx2x *bp, 479 struct bnx2x_vlan_mac_obj *o, 480 union bnx2x_classification_ramrod_data *data) 481 { 482 struct bnx2x_vlan_mac_registry_elem *pos; 483 484 DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n", data->mac.mac); 485 486 if (!is_valid_ether_addr(data->mac.mac)) 487 return -EINVAL; 488 489 /* Check if a requested MAC already exists */ 490 list_for_each_entry(pos, &o->head, link) 491 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) 492 return -EEXIST; 493 494 return 0; 495 } 496 497 static int bnx2x_check_vlan_add(struct bnx2x *bp, 498 struct bnx2x_vlan_mac_obj *o, 499 union bnx2x_classification_ramrod_data *data) 500 { 501 struct bnx2x_vlan_mac_registry_elem *pos; 502 503 DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan); 504 505 list_for_each_entry(pos, &o->head, link) 506 if (data->vlan.vlan == pos->u.vlan.vlan) 507 return -EEXIST; 508 509 return 0; 510 } 511 512 static int bnx2x_check_vlan_mac_add(struct bnx2x *bp, 513 struct bnx2x_vlan_mac_obj *o, 514 union bnx2x_classification_ramrod_data *data) 515 { 516 struct bnx2x_vlan_mac_registry_elem *pos; 517 518 DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n", 519 data->vlan_mac.mac, data->vlan_mac.vlan); 520 521 list_for_each_entry(pos, &o->head, link) 522 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && 523 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, 524 ETH_ALEN))) 525 return -EEXIST; 526 527 return 0; 528 } 529 530 531 /* check_del() callbacks */ 532 static struct bnx2x_vlan_mac_registry_elem * 533 bnx2x_check_mac_del(struct bnx2x *bp, 534 struct bnx2x_vlan_mac_obj *o, 535 union bnx2x_classification_ramrod_data *data) 536 { 537 struct bnx2x_vlan_mac_registry_elem *pos; 538 539 DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac); 540 541 list_for_each_entry(pos, &o->head, link) 542 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) 543 return pos; 544 545 return NULL; 546 } 547 548 static struct bnx2x_vlan_mac_registry_elem * 549 bnx2x_check_vlan_del(struct bnx2x *bp, 550 struct bnx2x_vlan_mac_obj *o, 551 union bnx2x_classification_ramrod_data *data) 552 { 553 struct bnx2x_vlan_mac_registry_elem *pos; 554 555 DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan); 556 557 list_for_each_entry(pos, &o->head, link) 558 if (data->vlan.vlan == pos->u.vlan.vlan) 559 return pos; 560 561 return NULL; 562 } 563 564 static struct bnx2x_vlan_mac_registry_elem * 565 bnx2x_check_vlan_mac_del(struct bnx2x *bp, 566 struct bnx2x_vlan_mac_obj *o, 567 union bnx2x_classification_ramrod_data *data) 568 { 569 struct bnx2x_vlan_mac_registry_elem *pos; 570 571 DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n", 572 data->vlan_mac.mac, data->vlan_mac.vlan); 573 574 list_for_each_entry(pos, &o->head, link) 575 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && 576 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, 577 ETH_ALEN))) 578 return pos; 579 580 return NULL; 581 } 582 583 /* check_move() callback */ 584 static bool bnx2x_check_move(struct bnx2x *bp, 585 struct bnx2x_vlan_mac_obj *src_o, 586 struct bnx2x_vlan_mac_obj *dst_o, 587 union bnx2x_classification_ramrod_data *data) 588 { 589 struct bnx2x_vlan_mac_registry_elem *pos; 590 int rc; 591 592 /* Check if we can delete the requested configuration from the first 593 * object. 594 */ 595 pos = src_o->check_del(bp, src_o, data); 596 597 /* check if configuration can be added */ 598 rc = dst_o->check_add(bp, dst_o, data); 599 600 /* If this classification can not be added (is already set) 601 * or can't be deleted - return an error. 602 */ 603 if (rc || !pos) 604 return false; 605 606 return true; 607 } 608 609 static bool bnx2x_check_move_always_err( 610 struct bnx2x *bp, 611 struct bnx2x_vlan_mac_obj *src_o, 612 struct bnx2x_vlan_mac_obj *dst_o, 613 union bnx2x_classification_ramrod_data *data) 614 { 615 return false; 616 } 617 618 619 static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o) 620 { 621 struct bnx2x_raw_obj *raw = &o->raw; 622 u8 rx_tx_flag = 0; 623 624 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) || 625 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) 626 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD; 627 628 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) || 629 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) 630 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD; 631 632 return rx_tx_flag; 633 } 634 635 636 static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp, 637 bool add, unsigned char *dev_addr, int index) 638 { 639 u32 wb_data[2]; 640 u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM : 641 NIG_REG_LLH0_FUNC_MEM; 642 643 if (!IS_MF_SI(bp) || index > BNX2X_LLH_CAM_MAX_PF_LINE) 644 return; 645 646 DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n", 647 (add ? "ADD" : "DELETE"), index); 648 649 if (add) { 650 /* LLH_FUNC_MEM is a u64 WB register */ 651 reg_offset += 8*index; 652 653 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) | 654 (dev_addr[4] << 8) | dev_addr[5]); 655 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]); 656 657 REG_WR_DMAE(bp, reg_offset, wb_data, 2); 658 } 659 660 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE : 661 NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add); 662 } 663 664 /** 665 * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod 666 * 667 * @bp: device handle 668 * @o: queue for which we want to configure this rule 669 * @add: if true the command is an ADD command, DEL otherwise 670 * @opcode: CLASSIFY_RULE_OPCODE_XXX 671 * @hdr: pointer to a header to setup 672 * 673 */ 674 static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp, 675 struct bnx2x_vlan_mac_obj *o, bool add, int opcode, 676 struct eth_classify_cmd_header *hdr) 677 { 678 struct bnx2x_raw_obj *raw = &o->raw; 679 680 hdr->client_id = raw->cl_id; 681 hdr->func_id = raw->func_id; 682 683 /* Rx or/and Tx (internal switching) configuration ? */ 684 hdr->cmd_general_data |= 685 bnx2x_vlan_mac_get_rx_tx_flag(o); 686 687 if (add) 688 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD; 689 690 hdr->cmd_general_data |= 691 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT); 692 } 693 694 /** 695 * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header 696 * 697 * @cid: connection id 698 * @type: BNX2X_FILTER_XXX_PENDING 699 * @hdr: poiter to header to setup 700 * @rule_cnt: 701 * 702 * currently we always configure one rule and echo field to contain a CID and an 703 * opcode type. 704 */ 705 static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type, 706 struct eth_classify_header *hdr, int rule_cnt) 707 { 708 hdr->echo = (cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT); 709 hdr->rule_cnt = (u8)rule_cnt; 710 } 711 712 713 /* hw_config() callbacks */ 714 static void bnx2x_set_one_mac_e2(struct bnx2x *bp, 715 struct bnx2x_vlan_mac_obj *o, 716 struct bnx2x_exeq_elem *elem, int rule_idx, 717 int cam_offset) 718 { 719 struct bnx2x_raw_obj *raw = &o->raw; 720 struct eth_classify_rules_ramrod_data *data = 721 (struct eth_classify_rules_ramrod_data *)(raw->rdata); 722 int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd; 723 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; 724 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; 725 unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags; 726 u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac; 727 728 /* 729 * Set LLH CAM entry: currently only iSCSI and ETH macs are 730 * relevant. In addition, current implementation is tuned for a 731 * single ETH MAC. 732 * 733 * When multiple unicast ETH MACs PF configuration in switch 734 * independent mode is required (NetQ, multiple netdev MACs, 735 * etc.), consider better utilisation of 8 per function MAC 736 * entries in the LLH register. There is also 737 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the 738 * total number of CAM entries to 16. 739 * 740 * Currently we won't configure NIG for MACs other than a primary ETH 741 * MAC and iSCSI L2 MAC. 742 * 743 * If this MAC is moving from one Queue to another, no need to change 744 * NIG configuration. 745 */ 746 if (cmd != BNX2X_VLAN_MAC_MOVE) { 747 if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags)) 748 bnx2x_set_mac_in_nig(bp, add, mac, 749 BNX2X_LLH_CAM_ISCSI_ETH_LINE); 750 else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags)) 751 bnx2x_set_mac_in_nig(bp, add, mac, 752 BNX2X_LLH_CAM_ETH_LINE); 753 } 754 755 /* Reset the ramrod data buffer for the first rule */ 756 if (rule_idx == 0) 757 memset(data, 0, sizeof(*data)); 758 759 /* Setup a command header */ 760 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC, 761 &rule_entry->mac.header); 762 763 DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n", 764 (add ? "add" : "delete"), mac, raw->cl_id); 765 766 /* Set a MAC itself */ 767 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb, 768 &rule_entry->mac.mac_mid, 769 &rule_entry->mac.mac_lsb, mac); 770 771 /* MOVE: Add a rule that will add this MAC to the target Queue */ 772 if (cmd == BNX2X_VLAN_MAC_MOVE) { 773 rule_entry++; 774 rule_cnt++; 775 776 /* Setup ramrod data */ 777 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, 778 elem->cmd_data.vlan_mac.target_obj, 779 true, CLASSIFY_RULE_OPCODE_MAC, 780 &rule_entry->mac.header); 781 782 /* Set a MAC itself */ 783 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb, 784 &rule_entry->mac.mac_mid, 785 &rule_entry->mac.mac_lsb, mac); 786 } 787 788 /* Set the ramrod data header */ 789 /* TODO: take this to the higher level in order to prevent multiple 790 writing */ 791 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, 792 rule_cnt); 793 } 794 795 /** 796 * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod 797 * 798 * @bp: device handle 799 * @o: queue 800 * @type: 801 * @cam_offset: offset in cam memory 802 * @hdr: pointer to a header to setup 803 * 804 * E1/E1H 805 */ 806 static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp, 807 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, 808 struct mac_configuration_hdr *hdr) 809 { 810 struct bnx2x_raw_obj *r = &o->raw; 811 812 hdr->length = 1; 813 hdr->offset = (u8)cam_offset; 814 hdr->client_id = 0xff; 815 hdr->echo = ((r->cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT)); 816 } 817 818 static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp, 819 struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac, 820 u16 vlan_id, struct mac_configuration_entry *cfg_entry) 821 { 822 struct bnx2x_raw_obj *r = &o->raw; 823 u32 cl_bit_vec = (1 << r->cl_id); 824 825 cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec); 826 cfg_entry->pf_id = r->func_id; 827 cfg_entry->vlan_id = cpu_to_le16(vlan_id); 828 829 if (add) { 830 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE, 831 T_ETH_MAC_COMMAND_SET); 832 SET_FLAG(cfg_entry->flags, 833 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode); 834 835 /* Set a MAC in a ramrod data */ 836 bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr, 837 &cfg_entry->middle_mac_addr, 838 &cfg_entry->lsb_mac_addr, mac); 839 } else 840 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE, 841 T_ETH_MAC_COMMAND_INVALIDATE); 842 } 843 844 static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp, 845 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add, 846 u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config) 847 { 848 struct mac_configuration_entry *cfg_entry = &config->config_table[0]; 849 struct bnx2x_raw_obj *raw = &o->raw; 850 851 bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset, 852 &config->hdr); 853 bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id, 854 cfg_entry); 855 856 DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n", 857 (add ? "setting" : "clearing"), 858 mac, raw->cl_id, cam_offset); 859 } 860 861 /** 862 * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data 863 * 864 * @bp: device handle 865 * @o: bnx2x_vlan_mac_obj 866 * @elem: bnx2x_exeq_elem 867 * @rule_idx: rule_idx 868 * @cam_offset: cam_offset 869 */ 870 static void bnx2x_set_one_mac_e1x(struct bnx2x *bp, 871 struct bnx2x_vlan_mac_obj *o, 872 struct bnx2x_exeq_elem *elem, int rule_idx, 873 int cam_offset) 874 { 875 struct bnx2x_raw_obj *raw = &o->raw; 876 struct mac_configuration_cmd *config = 877 (struct mac_configuration_cmd *)(raw->rdata); 878 /* 879 * 57710 and 57711 do not support MOVE command, 880 * so it's either ADD or DEL 881 */ 882 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? 883 true : false; 884 885 /* Reset the ramrod data buffer */ 886 memset(config, 0, sizeof(*config)); 887 888 bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state, 889 cam_offset, add, 890 elem->cmd_data.vlan_mac.u.mac.mac, 0, 891 ETH_VLAN_FILTER_ANY_VLAN, config); 892 } 893 894 static void bnx2x_set_one_vlan_e2(struct bnx2x *bp, 895 struct bnx2x_vlan_mac_obj *o, 896 struct bnx2x_exeq_elem *elem, int rule_idx, 897 int cam_offset) 898 { 899 struct bnx2x_raw_obj *raw = &o->raw; 900 struct eth_classify_rules_ramrod_data *data = 901 (struct eth_classify_rules_ramrod_data *)(raw->rdata); 902 int rule_cnt = rule_idx + 1; 903 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; 904 int cmd = elem->cmd_data.vlan_mac.cmd; 905 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; 906 u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan; 907 908 /* Reset the ramrod data buffer for the first rule */ 909 if (rule_idx == 0) 910 memset(data, 0, sizeof(*data)); 911 912 /* Set a rule header */ 913 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN, 914 &rule_entry->vlan.header); 915 916 DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"), 917 vlan); 918 919 /* Set a VLAN itself */ 920 rule_entry->vlan.vlan = cpu_to_le16(vlan); 921 922 /* MOVE: Add a rule that will add this MAC to the target Queue */ 923 if (cmd == BNX2X_VLAN_MAC_MOVE) { 924 rule_entry++; 925 rule_cnt++; 926 927 /* Setup ramrod data */ 928 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, 929 elem->cmd_data.vlan_mac.target_obj, 930 true, CLASSIFY_RULE_OPCODE_VLAN, 931 &rule_entry->vlan.header); 932 933 /* Set a VLAN itself */ 934 rule_entry->vlan.vlan = cpu_to_le16(vlan); 935 } 936 937 /* Set the ramrod data header */ 938 /* TODO: take this to the higher level in order to prevent multiple 939 writing */ 940 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, 941 rule_cnt); 942 } 943 944 static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp, 945 struct bnx2x_vlan_mac_obj *o, 946 struct bnx2x_exeq_elem *elem, 947 int rule_idx, int cam_offset) 948 { 949 struct bnx2x_raw_obj *raw = &o->raw; 950 struct eth_classify_rules_ramrod_data *data = 951 (struct eth_classify_rules_ramrod_data *)(raw->rdata); 952 int rule_cnt = rule_idx + 1; 953 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; 954 int cmd = elem->cmd_data.vlan_mac.cmd; 955 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; 956 u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan; 957 u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac; 958 959 960 /* Reset the ramrod data buffer for the first rule */ 961 if (rule_idx == 0) 962 memset(data, 0, sizeof(*data)); 963 964 /* Set a rule header */ 965 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR, 966 &rule_entry->pair.header); 967 968 /* Set VLAN and MAC themselvs */ 969 rule_entry->pair.vlan = cpu_to_le16(vlan); 970 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, 971 &rule_entry->pair.mac_mid, 972 &rule_entry->pair.mac_lsb, mac); 973 974 /* MOVE: Add a rule that will add this MAC to the target Queue */ 975 if (cmd == BNX2X_VLAN_MAC_MOVE) { 976 rule_entry++; 977 rule_cnt++; 978 979 /* Setup ramrod data */ 980 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, 981 elem->cmd_data.vlan_mac.target_obj, 982 true, CLASSIFY_RULE_OPCODE_PAIR, 983 &rule_entry->pair.header); 984 985 /* Set a VLAN itself */ 986 rule_entry->pair.vlan = cpu_to_le16(vlan); 987 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, 988 &rule_entry->pair.mac_mid, 989 &rule_entry->pair.mac_lsb, mac); 990 } 991 992 /* Set the ramrod data header */ 993 /* TODO: take this to the higher level in order to prevent multiple 994 writing */ 995 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, 996 rule_cnt); 997 } 998 999 /** 1000 * bnx2x_set_one_vlan_mac_e1h - 1001 * 1002 * @bp: device handle 1003 * @o: bnx2x_vlan_mac_obj 1004 * @elem: bnx2x_exeq_elem 1005 * @rule_idx: rule_idx 1006 * @cam_offset: cam_offset 1007 */ 1008 static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp, 1009 struct bnx2x_vlan_mac_obj *o, 1010 struct bnx2x_exeq_elem *elem, 1011 int rule_idx, int cam_offset) 1012 { 1013 struct bnx2x_raw_obj *raw = &o->raw; 1014 struct mac_configuration_cmd *config = 1015 (struct mac_configuration_cmd *)(raw->rdata); 1016 /* 1017 * 57710 and 57711 do not support MOVE command, 1018 * so it's either ADD or DEL 1019 */ 1020 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? 1021 true : false; 1022 1023 /* Reset the ramrod data buffer */ 1024 memset(config, 0, sizeof(*config)); 1025 1026 bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING, 1027 cam_offset, add, 1028 elem->cmd_data.vlan_mac.u.vlan_mac.mac, 1029 elem->cmd_data.vlan_mac.u.vlan_mac.vlan, 1030 ETH_VLAN_FILTER_CLASSIFY, config); 1031 } 1032 1033 #define list_next_entry(pos, member) \ 1034 list_entry((pos)->member.next, typeof(*(pos)), member) 1035 1036 /** 1037 * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element 1038 * 1039 * @bp: device handle 1040 * @p: command parameters 1041 * @ppos: pointer to the cooky 1042 * 1043 * reconfigure next MAC/VLAN/VLAN-MAC element from the 1044 * previously configured elements list. 1045 * 1046 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken 1047 * into an account 1048 * 1049 * pointer to the cooky - that should be given back in the next call to make 1050 * function handle the next element. If *ppos is set to NULL it will restart the 1051 * iterator. If returned *ppos == NULL this means that the last element has been 1052 * handled. 1053 * 1054 */ 1055 static int bnx2x_vlan_mac_restore(struct bnx2x *bp, 1056 struct bnx2x_vlan_mac_ramrod_params *p, 1057 struct bnx2x_vlan_mac_registry_elem **ppos) 1058 { 1059 struct bnx2x_vlan_mac_registry_elem *pos; 1060 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; 1061 1062 /* If list is empty - there is nothing to do here */ 1063 if (list_empty(&o->head)) { 1064 *ppos = NULL; 1065 return 0; 1066 } 1067 1068 /* make a step... */ 1069 if (*ppos == NULL) 1070 *ppos = list_first_entry(&o->head, 1071 struct bnx2x_vlan_mac_registry_elem, 1072 link); 1073 else 1074 *ppos = list_next_entry(*ppos, link); 1075 1076 pos = *ppos; 1077 1078 /* If it's the last step - return NULL */ 1079 if (list_is_last(&pos->link, &o->head)) 1080 *ppos = NULL; 1081 1082 /* Prepare a 'user_req' */ 1083 memcpy(&p->user_req.u, &pos->u, sizeof(pos->u)); 1084 1085 /* Set the command */ 1086 p->user_req.cmd = BNX2X_VLAN_MAC_ADD; 1087 1088 /* Set vlan_mac_flags */ 1089 p->user_req.vlan_mac_flags = pos->vlan_mac_flags; 1090 1091 /* Set a restore bit */ 1092 __set_bit(RAMROD_RESTORE, &p->ramrod_flags); 1093 1094 return bnx2x_config_vlan_mac(bp, p); 1095 } 1096 1097 /* 1098 * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a 1099 * pointer to an element with a specific criteria and NULL if such an element 1100 * hasn't been found. 1101 */ 1102 static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac( 1103 struct bnx2x_exe_queue_obj *o, 1104 struct bnx2x_exeq_elem *elem) 1105 { 1106 struct bnx2x_exeq_elem *pos; 1107 struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac; 1108 1109 /* Check pending for execution commands */ 1110 list_for_each_entry(pos, &o->exe_queue, link) 1111 if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data, 1112 sizeof(*data)) && 1113 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) 1114 return pos; 1115 1116 return NULL; 1117 } 1118 1119 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan( 1120 struct bnx2x_exe_queue_obj *o, 1121 struct bnx2x_exeq_elem *elem) 1122 { 1123 struct bnx2x_exeq_elem *pos; 1124 struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan; 1125 1126 /* Check pending for execution commands */ 1127 list_for_each_entry(pos, &o->exe_queue, link) 1128 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data, 1129 sizeof(*data)) && 1130 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) 1131 return pos; 1132 1133 return NULL; 1134 } 1135 1136 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac( 1137 struct bnx2x_exe_queue_obj *o, 1138 struct bnx2x_exeq_elem *elem) 1139 { 1140 struct bnx2x_exeq_elem *pos; 1141 struct bnx2x_vlan_mac_ramrod_data *data = 1142 &elem->cmd_data.vlan_mac.u.vlan_mac; 1143 1144 /* Check pending for execution commands */ 1145 list_for_each_entry(pos, &o->exe_queue, link) 1146 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data, 1147 sizeof(*data)) && 1148 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) 1149 return pos; 1150 1151 return NULL; 1152 } 1153 1154 /** 1155 * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed 1156 * 1157 * @bp: device handle 1158 * @qo: bnx2x_qable_obj 1159 * @elem: bnx2x_exeq_elem 1160 * 1161 * Checks that the requested configuration can be added. If yes and if 1162 * requested, consume CAM credit. 1163 * 1164 * The 'validate' is run after the 'optimize'. 1165 * 1166 */ 1167 static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp, 1168 union bnx2x_qable_obj *qo, 1169 struct bnx2x_exeq_elem *elem) 1170 { 1171 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac; 1172 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; 1173 int rc; 1174 1175 /* Check the registry */ 1176 rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u); 1177 if (rc) { 1178 DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n"); 1179 return rc; 1180 } 1181 1182 /* 1183 * Check if there is a pending ADD command for this 1184 * MAC/VLAN/VLAN-MAC. Return an error if there is. 1185 */ 1186 if (exeq->get(exeq, elem)) { 1187 DP(BNX2X_MSG_SP, "There is a pending ADD command already\n"); 1188 return -EEXIST; 1189 } 1190 1191 /* 1192 * TODO: Check the pending MOVE from other objects where this 1193 * object is a destination object. 1194 */ 1195 1196 /* Consume the credit if not requested not to */ 1197 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 1198 &elem->cmd_data.vlan_mac.vlan_mac_flags) || 1199 o->get_credit(o))) 1200 return -EINVAL; 1201 1202 return 0; 1203 } 1204 1205 /** 1206 * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed 1207 * 1208 * @bp: device handle 1209 * @qo: quable object to check 1210 * @elem: element that needs to be deleted 1211 * 1212 * Checks that the requested configuration can be deleted. If yes and if 1213 * requested, returns a CAM credit. 1214 * 1215 * The 'validate' is run after the 'optimize'. 1216 */ 1217 static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp, 1218 union bnx2x_qable_obj *qo, 1219 struct bnx2x_exeq_elem *elem) 1220 { 1221 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac; 1222 struct bnx2x_vlan_mac_registry_elem *pos; 1223 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; 1224 struct bnx2x_exeq_elem query_elem; 1225 1226 /* If this classification can not be deleted (doesn't exist) 1227 * - return a BNX2X_EXIST. 1228 */ 1229 pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u); 1230 if (!pos) { 1231 DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n"); 1232 return -EEXIST; 1233 } 1234 1235 /* 1236 * Check if there are pending DEL or MOVE commands for this 1237 * MAC/VLAN/VLAN-MAC. Return an error if so. 1238 */ 1239 memcpy(&query_elem, elem, sizeof(query_elem)); 1240 1241 /* Check for MOVE commands */ 1242 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE; 1243 if (exeq->get(exeq, &query_elem)) { 1244 BNX2X_ERR("There is a pending MOVE command already\n"); 1245 return -EINVAL; 1246 } 1247 1248 /* Check for DEL commands */ 1249 if (exeq->get(exeq, elem)) { 1250 DP(BNX2X_MSG_SP, "There is a pending DEL command already\n"); 1251 return -EEXIST; 1252 } 1253 1254 /* Return the credit to the credit pool if not requested not to */ 1255 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 1256 &elem->cmd_data.vlan_mac.vlan_mac_flags) || 1257 o->put_credit(o))) { 1258 BNX2X_ERR("Failed to return a credit\n"); 1259 return -EINVAL; 1260 } 1261 1262 return 0; 1263 } 1264 1265 /** 1266 * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed 1267 * 1268 * @bp: device handle 1269 * @qo: quable object to check (source) 1270 * @elem: element that needs to be moved 1271 * 1272 * Checks that the requested configuration can be moved. If yes and if 1273 * requested, returns a CAM credit. 1274 * 1275 * The 'validate' is run after the 'optimize'. 1276 */ 1277 static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp, 1278 union bnx2x_qable_obj *qo, 1279 struct bnx2x_exeq_elem *elem) 1280 { 1281 struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac; 1282 struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj; 1283 struct bnx2x_exeq_elem query_elem; 1284 struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue; 1285 struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue; 1286 1287 /* 1288 * Check if we can perform this operation based on the current registry 1289 * state. 1290 */ 1291 if (!src_o->check_move(bp, src_o, dest_o, 1292 &elem->cmd_data.vlan_mac.u)) { 1293 DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n"); 1294 return -EINVAL; 1295 } 1296 1297 /* 1298 * Check if there is an already pending DEL or MOVE command for the 1299 * source object or ADD command for a destination object. Return an 1300 * error if so. 1301 */ 1302 memcpy(&query_elem, elem, sizeof(query_elem)); 1303 1304 /* Check DEL on source */ 1305 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL; 1306 if (src_exeq->get(src_exeq, &query_elem)) { 1307 BNX2X_ERR("There is a pending DEL command on the source queue already\n"); 1308 return -EINVAL; 1309 } 1310 1311 /* Check MOVE on source */ 1312 if (src_exeq->get(src_exeq, elem)) { 1313 DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n"); 1314 return -EEXIST; 1315 } 1316 1317 /* Check ADD on destination */ 1318 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD; 1319 if (dest_exeq->get(dest_exeq, &query_elem)) { 1320 BNX2X_ERR("There is a pending ADD command on the destination queue already\n"); 1321 return -EINVAL; 1322 } 1323 1324 /* Consume the credit if not requested not to */ 1325 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST, 1326 &elem->cmd_data.vlan_mac.vlan_mac_flags) || 1327 dest_o->get_credit(dest_o))) 1328 return -EINVAL; 1329 1330 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 1331 &elem->cmd_data.vlan_mac.vlan_mac_flags) || 1332 src_o->put_credit(src_o))) { 1333 /* return the credit taken from dest... */ 1334 dest_o->put_credit(dest_o); 1335 return -EINVAL; 1336 } 1337 1338 return 0; 1339 } 1340 1341 static int bnx2x_validate_vlan_mac(struct bnx2x *bp, 1342 union bnx2x_qable_obj *qo, 1343 struct bnx2x_exeq_elem *elem) 1344 { 1345 switch (elem->cmd_data.vlan_mac.cmd) { 1346 case BNX2X_VLAN_MAC_ADD: 1347 return bnx2x_validate_vlan_mac_add(bp, qo, elem); 1348 case BNX2X_VLAN_MAC_DEL: 1349 return bnx2x_validate_vlan_mac_del(bp, qo, elem); 1350 case BNX2X_VLAN_MAC_MOVE: 1351 return bnx2x_validate_vlan_mac_move(bp, qo, elem); 1352 default: 1353 return -EINVAL; 1354 } 1355 } 1356 1357 static int bnx2x_remove_vlan_mac(struct bnx2x *bp, 1358 union bnx2x_qable_obj *qo, 1359 struct bnx2x_exeq_elem *elem) 1360 { 1361 int rc = 0; 1362 1363 /* If consumption wasn't required, nothing to do */ 1364 if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 1365 &elem->cmd_data.vlan_mac.vlan_mac_flags)) 1366 return 0; 1367 1368 switch (elem->cmd_data.vlan_mac.cmd) { 1369 case BNX2X_VLAN_MAC_ADD: 1370 case BNX2X_VLAN_MAC_MOVE: 1371 rc = qo->vlan_mac.put_credit(&qo->vlan_mac); 1372 break; 1373 case BNX2X_VLAN_MAC_DEL: 1374 rc = qo->vlan_mac.get_credit(&qo->vlan_mac); 1375 break; 1376 default: 1377 return -EINVAL; 1378 } 1379 1380 if (rc != true) 1381 return -EINVAL; 1382 1383 return 0; 1384 } 1385 1386 /** 1387 * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes. 1388 * 1389 * @bp: device handle 1390 * @o: bnx2x_vlan_mac_obj 1391 * 1392 */ 1393 static int bnx2x_wait_vlan_mac(struct bnx2x *bp, 1394 struct bnx2x_vlan_mac_obj *o) 1395 { 1396 int cnt = 5000, rc; 1397 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; 1398 struct bnx2x_raw_obj *raw = &o->raw; 1399 1400 while (cnt--) { 1401 /* Wait for the current command to complete */ 1402 rc = raw->wait_comp(bp, raw); 1403 if (rc) 1404 return rc; 1405 1406 /* Wait until there are no pending commands */ 1407 if (!bnx2x_exe_queue_empty(exeq)) 1408 usleep_range(1000, 1000); 1409 else 1410 return 0; 1411 } 1412 1413 return -EBUSY; 1414 } 1415 1416 /** 1417 * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod 1418 * 1419 * @bp: device handle 1420 * @o: bnx2x_vlan_mac_obj 1421 * @cqe: 1422 * @cont: if true schedule next execution chunk 1423 * 1424 */ 1425 static int bnx2x_complete_vlan_mac(struct bnx2x *bp, 1426 struct bnx2x_vlan_mac_obj *o, 1427 union event_ring_elem *cqe, 1428 unsigned long *ramrod_flags) 1429 { 1430 struct bnx2x_raw_obj *r = &o->raw; 1431 int rc; 1432 1433 /* Reset pending list */ 1434 bnx2x_exe_queue_reset_pending(bp, &o->exe_queue); 1435 1436 /* Clear pending */ 1437 r->clear_pending(r); 1438 1439 /* If ramrod failed this is most likely a SW bug */ 1440 if (cqe->message.error) 1441 return -EINVAL; 1442 1443 /* Run the next bulk of pending commands if requeted */ 1444 if (test_bit(RAMROD_CONT, ramrod_flags)) { 1445 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); 1446 if (rc < 0) 1447 return rc; 1448 } 1449 1450 /* If there is more work to do return PENDING */ 1451 if (!bnx2x_exe_queue_empty(&o->exe_queue)) 1452 return 1; 1453 1454 return 0; 1455 } 1456 1457 /** 1458 * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands. 1459 * 1460 * @bp: device handle 1461 * @o: bnx2x_qable_obj 1462 * @elem: bnx2x_exeq_elem 1463 */ 1464 static int bnx2x_optimize_vlan_mac(struct bnx2x *bp, 1465 union bnx2x_qable_obj *qo, 1466 struct bnx2x_exeq_elem *elem) 1467 { 1468 struct bnx2x_exeq_elem query, *pos; 1469 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac; 1470 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; 1471 1472 memcpy(&query, elem, sizeof(query)); 1473 1474 switch (elem->cmd_data.vlan_mac.cmd) { 1475 case BNX2X_VLAN_MAC_ADD: 1476 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL; 1477 break; 1478 case BNX2X_VLAN_MAC_DEL: 1479 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD; 1480 break; 1481 default: 1482 /* Don't handle anything other than ADD or DEL */ 1483 return 0; 1484 } 1485 1486 /* If we found the appropriate element - delete it */ 1487 pos = exeq->get(exeq, &query); 1488 if (pos) { 1489 1490 /* Return the credit of the optimized command */ 1491 if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 1492 &pos->cmd_data.vlan_mac.vlan_mac_flags)) { 1493 if ((query.cmd_data.vlan_mac.cmd == 1494 BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) { 1495 BNX2X_ERR("Failed to return the credit for the optimized ADD command\n"); 1496 return -EINVAL; 1497 } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */ 1498 BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n"); 1499 return -EINVAL; 1500 } 1501 } 1502 1503 DP(BNX2X_MSG_SP, "Optimizing %s command\n", 1504 (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? 1505 "ADD" : "DEL"); 1506 1507 list_del(&pos->link); 1508 bnx2x_exe_queue_free_elem(bp, pos); 1509 return 1; 1510 } 1511 1512 return 0; 1513 } 1514 1515 /** 1516 * bnx2x_vlan_mac_get_registry_elem - prepare a registry element 1517 * 1518 * @bp: device handle 1519 * @o: 1520 * @elem: 1521 * @restore: 1522 * @re: 1523 * 1524 * prepare a registry element according to the current command request. 1525 */ 1526 static inline int bnx2x_vlan_mac_get_registry_elem( 1527 struct bnx2x *bp, 1528 struct bnx2x_vlan_mac_obj *o, 1529 struct bnx2x_exeq_elem *elem, 1530 bool restore, 1531 struct bnx2x_vlan_mac_registry_elem **re) 1532 { 1533 int cmd = elem->cmd_data.vlan_mac.cmd; 1534 struct bnx2x_vlan_mac_registry_elem *reg_elem; 1535 1536 /* Allocate a new registry element if needed. */ 1537 if (!restore && 1538 ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) { 1539 reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC); 1540 if (!reg_elem) 1541 return -ENOMEM; 1542 1543 /* Get a new CAM offset */ 1544 if (!o->get_cam_offset(o, ®_elem->cam_offset)) { 1545 /* 1546 * This shell never happen, because we have checked the 1547 * CAM availiability in the 'validate'. 1548 */ 1549 WARN_ON(1); 1550 kfree(reg_elem); 1551 return -EINVAL; 1552 } 1553 1554 DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset); 1555 1556 /* Set a VLAN-MAC data */ 1557 memcpy(®_elem->u, &elem->cmd_data.vlan_mac.u, 1558 sizeof(reg_elem->u)); 1559 1560 /* Copy the flags (needed for DEL and RESTORE flows) */ 1561 reg_elem->vlan_mac_flags = 1562 elem->cmd_data.vlan_mac.vlan_mac_flags; 1563 } else /* DEL, RESTORE */ 1564 reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u); 1565 1566 *re = reg_elem; 1567 return 0; 1568 } 1569 1570 /** 1571 * bnx2x_execute_vlan_mac - execute vlan mac command 1572 * 1573 * @bp: device handle 1574 * @qo: 1575 * @exe_chunk: 1576 * @ramrod_flags: 1577 * 1578 * go and send a ramrod! 1579 */ 1580 static int bnx2x_execute_vlan_mac(struct bnx2x *bp, 1581 union bnx2x_qable_obj *qo, 1582 struct list_head *exe_chunk, 1583 unsigned long *ramrod_flags) 1584 { 1585 struct bnx2x_exeq_elem *elem; 1586 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj; 1587 struct bnx2x_raw_obj *r = &o->raw; 1588 int rc, idx = 0; 1589 bool restore = test_bit(RAMROD_RESTORE, ramrod_flags); 1590 bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags); 1591 struct bnx2x_vlan_mac_registry_elem *reg_elem; 1592 int cmd; 1593 1594 /* 1595 * If DRIVER_ONLY execution is requested, cleanup a registry 1596 * and exit. Otherwise send a ramrod to FW. 1597 */ 1598 if (!drv_only) { 1599 WARN_ON(r->check_pending(r)); 1600 1601 /* Set pending */ 1602 r->set_pending(r); 1603 1604 /* Fill tha ramrod data */ 1605 list_for_each_entry(elem, exe_chunk, link) { 1606 cmd = elem->cmd_data.vlan_mac.cmd; 1607 /* 1608 * We will add to the target object in MOVE command, so 1609 * change the object for a CAM search. 1610 */ 1611 if (cmd == BNX2X_VLAN_MAC_MOVE) 1612 cam_obj = elem->cmd_data.vlan_mac.target_obj; 1613 else 1614 cam_obj = o; 1615 1616 rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj, 1617 elem, restore, 1618 ®_elem); 1619 if (rc) 1620 goto error_exit; 1621 1622 WARN_ON(!reg_elem); 1623 1624 /* Push a new entry into the registry */ 1625 if (!restore && 1626 ((cmd == BNX2X_VLAN_MAC_ADD) || 1627 (cmd == BNX2X_VLAN_MAC_MOVE))) 1628 list_add(®_elem->link, &cam_obj->head); 1629 1630 /* Configure a single command in a ramrod data buffer */ 1631 o->set_one_rule(bp, o, elem, idx, 1632 reg_elem->cam_offset); 1633 1634 /* MOVE command consumes 2 entries in the ramrod data */ 1635 if (cmd == BNX2X_VLAN_MAC_MOVE) 1636 idx += 2; 1637 else 1638 idx++; 1639 } 1640 1641 /* 1642 * No need for an explicit memory barrier here as long we would 1643 * need to ensure the ordering of writing to the SPQ element 1644 * and updating of the SPQ producer which involves a memory 1645 * read and we will have to put a full memory barrier there 1646 * (inside bnx2x_sp_post()). 1647 */ 1648 1649 rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid, 1650 U64_HI(r->rdata_mapping), 1651 U64_LO(r->rdata_mapping), 1652 ETH_CONNECTION_TYPE); 1653 if (rc) 1654 goto error_exit; 1655 } 1656 1657 /* Now, when we are done with the ramrod - clean up the registry */ 1658 list_for_each_entry(elem, exe_chunk, link) { 1659 cmd = elem->cmd_data.vlan_mac.cmd; 1660 if ((cmd == BNX2X_VLAN_MAC_DEL) || 1661 (cmd == BNX2X_VLAN_MAC_MOVE)) { 1662 reg_elem = o->check_del(bp, o, 1663 &elem->cmd_data.vlan_mac.u); 1664 1665 WARN_ON(!reg_elem); 1666 1667 o->put_cam_offset(o, reg_elem->cam_offset); 1668 list_del(®_elem->link); 1669 kfree(reg_elem); 1670 } 1671 } 1672 1673 if (!drv_only) 1674 return 1; 1675 else 1676 return 0; 1677 1678 error_exit: 1679 r->clear_pending(r); 1680 1681 /* Cleanup a registry in case of a failure */ 1682 list_for_each_entry(elem, exe_chunk, link) { 1683 cmd = elem->cmd_data.vlan_mac.cmd; 1684 1685 if (cmd == BNX2X_VLAN_MAC_MOVE) 1686 cam_obj = elem->cmd_data.vlan_mac.target_obj; 1687 else 1688 cam_obj = o; 1689 1690 /* Delete all newly added above entries */ 1691 if (!restore && 1692 ((cmd == BNX2X_VLAN_MAC_ADD) || 1693 (cmd == BNX2X_VLAN_MAC_MOVE))) { 1694 reg_elem = o->check_del(bp, cam_obj, 1695 &elem->cmd_data.vlan_mac.u); 1696 if (reg_elem) { 1697 list_del(®_elem->link); 1698 kfree(reg_elem); 1699 } 1700 } 1701 } 1702 1703 return rc; 1704 } 1705 1706 static inline int bnx2x_vlan_mac_push_new_cmd( 1707 struct bnx2x *bp, 1708 struct bnx2x_vlan_mac_ramrod_params *p) 1709 { 1710 struct bnx2x_exeq_elem *elem; 1711 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; 1712 bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags); 1713 1714 /* Allocate the execution queue element */ 1715 elem = bnx2x_exe_queue_alloc_elem(bp); 1716 if (!elem) 1717 return -ENOMEM; 1718 1719 /* Set the command 'length' */ 1720 switch (p->user_req.cmd) { 1721 case BNX2X_VLAN_MAC_MOVE: 1722 elem->cmd_len = 2; 1723 break; 1724 default: 1725 elem->cmd_len = 1; 1726 } 1727 1728 /* Fill the object specific info */ 1729 memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req)); 1730 1731 /* Try to add a new command to the pending list */ 1732 return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore); 1733 } 1734 1735 /** 1736 * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules. 1737 * 1738 * @bp: device handle 1739 * @p: 1740 * 1741 */ 1742 int bnx2x_config_vlan_mac( 1743 struct bnx2x *bp, 1744 struct bnx2x_vlan_mac_ramrod_params *p) 1745 { 1746 int rc = 0; 1747 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; 1748 unsigned long *ramrod_flags = &p->ramrod_flags; 1749 bool cont = test_bit(RAMROD_CONT, ramrod_flags); 1750 struct bnx2x_raw_obj *raw = &o->raw; 1751 1752 /* 1753 * Add new elements to the execution list for commands that require it. 1754 */ 1755 if (!cont) { 1756 rc = bnx2x_vlan_mac_push_new_cmd(bp, p); 1757 if (rc) 1758 return rc; 1759 } 1760 1761 /* 1762 * If nothing will be executed further in this iteration we want to 1763 * return PENDING if there are pending commands 1764 */ 1765 if (!bnx2x_exe_queue_empty(&o->exe_queue)) 1766 rc = 1; 1767 1768 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { 1769 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n"); 1770 raw->clear_pending(raw); 1771 } 1772 1773 /* Execute commands if required */ 1774 if (cont || test_bit(RAMROD_EXEC, ramrod_flags) || 1775 test_bit(RAMROD_COMP_WAIT, ramrod_flags)) { 1776 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); 1777 if (rc < 0) 1778 return rc; 1779 } 1780 1781 /* 1782 * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set 1783 * then user want to wait until the last command is done. 1784 */ 1785 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) { 1786 /* 1787 * Wait maximum for the current exe_queue length iterations plus 1788 * one (for the current pending command). 1789 */ 1790 int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1; 1791 1792 while (!bnx2x_exe_queue_empty(&o->exe_queue) && 1793 max_iterations--) { 1794 1795 /* Wait for the current command to complete */ 1796 rc = raw->wait_comp(bp, raw); 1797 if (rc) 1798 return rc; 1799 1800 /* Make a next step */ 1801 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, 1802 ramrod_flags); 1803 if (rc < 0) 1804 return rc; 1805 } 1806 1807 return 0; 1808 } 1809 1810 return rc; 1811 } 1812 1813 1814 1815 /** 1816 * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec 1817 * 1818 * @bp: device handle 1819 * @o: 1820 * @vlan_mac_flags: 1821 * @ramrod_flags: execution flags to be used for this deletion 1822 * 1823 * if the last operation has completed successfully and there are no 1824 * moreelements left, positive value if the last operation has completed 1825 * successfully and there are more previously configured elements, negative 1826 * value is current operation has failed. 1827 */ 1828 static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, 1829 struct bnx2x_vlan_mac_obj *o, 1830 unsigned long *vlan_mac_flags, 1831 unsigned long *ramrod_flags) 1832 { 1833 struct bnx2x_vlan_mac_registry_elem *pos = NULL; 1834 int rc = 0; 1835 struct bnx2x_vlan_mac_ramrod_params p; 1836 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; 1837 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n; 1838 1839 /* Clear pending commands first */ 1840 1841 spin_lock_bh(&exeq->lock); 1842 1843 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) { 1844 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags == 1845 *vlan_mac_flags) { 1846 rc = exeq->remove(bp, exeq->owner, exeq_pos); 1847 if (rc) { 1848 BNX2X_ERR("Failed to remove command\n"); 1849 spin_unlock_bh(&exeq->lock); 1850 return rc; 1851 } 1852 list_del(&exeq_pos->link); 1853 } 1854 } 1855 1856 spin_unlock_bh(&exeq->lock); 1857 1858 /* Prepare a command request */ 1859 memset(&p, 0, sizeof(p)); 1860 p.vlan_mac_obj = o; 1861 p.ramrod_flags = *ramrod_flags; 1862 p.user_req.cmd = BNX2X_VLAN_MAC_DEL; 1863 1864 /* 1865 * Add all but the last VLAN-MAC to the execution queue without actually 1866 * execution anything. 1867 */ 1868 __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags); 1869 __clear_bit(RAMROD_EXEC, &p.ramrod_flags); 1870 __clear_bit(RAMROD_CONT, &p.ramrod_flags); 1871 1872 list_for_each_entry(pos, &o->head, link) { 1873 if (pos->vlan_mac_flags == *vlan_mac_flags) { 1874 p.user_req.vlan_mac_flags = pos->vlan_mac_flags; 1875 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u)); 1876 rc = bnx2x_config_vlan_mac(bp, &p); 1877 if (rc < 0) { 1878 BNX2X_ERR("Failed to add a new DEL command\n"); 1879 return rc; 1880 } 1881 } 1882 } 1883 1884 p.ramrod_flags = *ramrod_flags; 1885 __set_bit(RAMROD_CONT, &p.ramrod_flags); 1886 1887 return bnx2x_config_vlan_mac(bp, &p); 1888 } 1889 1890 static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id, 1891 u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state, 1892 unsigned long *pstate, bnx2x_obj_type type) 1893 { 1894 raw->func_id = func_id; 1895 raw->cid = cid; 1896 raw->cl_id = cl_id; 1897 raw->rdata = rdata; 1898 raw->rdata_mapping = rdata_mapping; 1899 raw->state = state; 1900 raw->pstate = pstate; 1901 raw->obj_type = type; 1902 raw->check_pending = bnx2x_raw_check_pending; 1903 raw->clear_pending = bnx2x_raw_clear_pending; 1904 raw->set_pending = bnx2x_raw_set_pending; 1905 raw->wait_comp = bnx2x_raw_wait; 1906 } 1907 1908 static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o, 1909 u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, 1910 int state, unsigned long *pstate, bnx2x_obj_type type, 1911 struct bnx2x_credit_pool_obj *macs_pool, 1912 struct bnx2x_credit_pool_obj *vlans_pool) 1913 { 1914 INIT_LIST_HEAD(&o->head); 1915 1916 o->macs_pool = macs_pool; 1917 o->vlans_pool = vlans_pool; 1918 1919 o->delete_all = bnx2x_vlan_mac_del_all; 1920 o->restore = bnx2x_vlan_mac_restore; 1921 o->complete = bnx2x_complete_vlan_mac; 1922 o->wait = bnx2x_wait_vlan_mac; 1923 1924 bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping, 1925 state, pstate, type); 1926 } 1927 1928 1929 void bnx2x_init_mac_obj(struct bnx2x *bp, 1930 struct bnx2x_vlan_mac_obj *mac_obj, 1931 u8 cl_id, u32 cid, u8 func_id, void *rdata, 1932 dma_addr_t rdata_mapping, int state, 1933 unsigned long *pstate, bnx2x_obj_type type, 1934 struct bnx2x_credit_pool_obj *macs_pool) 1935 { 1936 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj; 1937 1938 bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata, 1939 rdata_mapping, state, pstate, type, 1940 macs_pool, NULL); 1941 1942 /* CAM credit pool handling */ 1943 mac_obj->get_credit = bnx2x_get_credit_mac; 1944 mac_obj->put_credit = bnx2x_put_credit_mac; 1945 mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac; 1946 mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac; 1947 1948 if (CHIP_IS_E1x(bp)) { 1949 mac_obj->set_one_rule = bnx2x_set_one_mac_e1x; 1950 mac_obj->check_del = bnx2x_check_mac_del; 1951 mac_obj->check_add = bnx2x_check_mac_add; 1952 mac_obj->check_move = bnx2x_check_move_always_err; 1953 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC; 1954 1955 /* Exe Queue */ 1956 bnx2x_exe_queue_init(bp, 1957 &mac_obj->exe_queue, 1, qable_obj, 1958 bnx2x_validate_vlan_mac, 1959 bnx2x_remove_vlan_mac, 1960 bnx2x_optimize_vlan_mac, 1961 bnx2x_execute_vlan_mac, 1962 bnx2x_exeq_get_mac); 1963 } else { 1964 mac_obj->set_one_rule = bnx2x_set_one_mac_e2; 1965 mac_obj->check_del = bnx2x_check_mac_del; 1966 mac_obj->check_add = bnx2x_check_mac_add; 1967 mac_obj->check_move = bnx2x_check_move; 1968 mac_obj->ramrod_cmd = 1969 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; 1970 mac_obj->get_n_elements = bnx2x_get_n_elements; 1971 1972 /* Exe Queue */ 1973 bnx2x_exe_queue_init(bp, 1974 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT, 1975 qable_obj, bnx2x_validate_vlan_mac, 1976 bnx2x_remove_vlan_mac, 1977 bnx2x_optimize_vlan_mac, 1978 bnx2x_execute_vlan_mac, 1979 bnx2x_exeq_get_mac); 1980 } 1981 } 1982 1983 void bnx2x_init_vlan_obj(struct bnx2x *bp, 1984 struct bnx2x_vlan_mac_obj *vlan_obj, 1985 u8 cl_id, u32 cid, u8 func_id, void *rdata, 1986 dma_addr_t rdata_mapping, int state, 1987 unsigned long *pstate, bnx2x_obj_type type, 1988 struct bnx2x_credit_pool_obj *vlans_pool) 1989 { 1990 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj; 1991 1992 bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata, 1993 rdata_mapping, state, pstate, type, NULL, 1994 vlans_pool); 1995 1996 vlan_obj->get_credit = bnx2x_get_credit_vlan; 1997 vlan_obj->put_credit = bnx2x_put_credit_vlan; 1998 vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan; 1999 vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan; 2000 2001 if (CHIP_IS_E1x(bp)) { 2002 BNX2X_ERR("Do not support chips others than E2 and newer\n"); 2003 BUG(); 2004 } else { 2005 vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2; 2006 vlan_obj->check_del = bnx2x_check_vlan_del; 2007 vlan_obj->check_add = bnx2x_check_vlan_add; 2008 vlan_obj->check_move = bnx2x_check_move; 2009 vlan_obj->ramrod_cmd = 2010 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; 2011 2012 /* Exe Queue */ 2013 bnx2x_exe_queue_init(bp, 2014 &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT, 2015 qable_obj, bnx2x_validate_vlan_mac, 2016 bnx2x_remove_vlan_mac, 2017 bnx2x_optimize_vlan_mac, 2018 bnx2x_execute_vlan_mac, 2019 bnx2x_exeq_get_vlan); 2020 } 2021 } 2022 2023 void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, 2024 struct bnx2x_vlan_mac_obj *vlan_mac_obj, 2025 u8 cl_id, u32 cid, u8 func_id, void *rdata, 2026 dma_addr_t rdata_mapping, int state, 2027 unsigned long *pstate, bnx2x_obj_type type, 2028 struct bnx2x_credit_pool_obj *macs_pool, 2029 struct bnx2x_credit_pool_obj *vlans_pool) 2030 { 2031 union bnx2x_qable_obj *qable_obj = 2032 (union bnx2x_qable_obj *)vlan_mac_obj; 2033 2034 bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata, 2035 rdata_mapping, state, pstate, type, 2036 macs_pool, vlans_pool); 2037 2038 /* CAM pool handling */ 2039 vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac; 2040 vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac; 2041 /* 2042 * CAM offset is relevant for 57710 and 57711 chips only which have a 2043 * single CAM for both MACs and VLAN-MAC pairs. So the offset 2044 * will be taken from MACs' pool object only. 2045 */ 2046 vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac; 2047 vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac; 2048 2049 if (CHIP_IS_E1(bp)) { 2050 BNX2X_ERR("Do not support chips others than E2\n"); 2051 BUG(); 2052 } else if (CHIP_IS_E1H(bp)) { 2053 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h; 2054 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del; 2055 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add; 2056 vlan_mac_obj->check_move = bnx2x_check_move_always_err; 2057 vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC; 2058 2059 /* Exe Queue */ 2060 bnx2x_exe_queue_init(bp, 2061 &vlan_mac_obj->exe_queue, 1, qable_obj, 2062 bnx2x_validate_vlan_mac, 2063 bnx2x_remove_vlan_mac, 2064 bnx2x_optimize_vlan_mac, 2065 bnx2x_execute_vlan_mac, 2066 bnx2x_exeq_get_vlan_mac); 2067 } else { 2068 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2; 2069 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del; 2070 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add; 2071 vlan_mac_obj->check_move = bnx2x_check_move; 2072 vlan_mac_obj->ramrod_cmd = 2073 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; 2074 2075 /* Exe Queue */ 2076 bnx2x_exe_queue_init(bp, 2077 &vlan_mac_obj->exe_queue, 2078 CLASSIFY_RULES_COUNT, 2079 qable_obj, bnx2x_validate_vlan_mac, 2080 bnx2x_remove_vlan_mac, 2081 bnx2x_optimize_vlan_mac, 2082 bnx2x_execute_vlan_mac, 2083 bnx2x_exeq_get_vlan_mac); 2084 } 2085 2086 } 2087 2088 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ 2089 static inline void __storm_memset_mac_filters(struct bnx2x *bp, 2090 struct tstorm_eth_mac_filter_config *mac_filters, 2091 u16 pf_id) 2092 { 2093 size_t size = sizeof(struct tstorm_eth_mac_filter_config); 2094 2095 u32 addr = BAR_TSTRORM_INTMEM + 2096 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id); 2097 2098 __storm_memset_struct(bp, addr, size, (u32 *)mac_filters); 2099 } 2100 2101 static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp, 2102 struct bnx2x_rx_mode_ramrod_params *p) 2103 { 2104 /* update the bp MAC filter structure */ 2105 u32 mask = (1 << p->cl_id); 2106 2107 struct tstorm_eth_mac_filter_config *mac_filters = 2108 (struct tstorm_eth_mac_filter_config *)p->rdata; 2109 2110 /* initial seeting is drop-all */ 2111 u8 drop_all_ucast = 1, drop_all_mcast = 1; 2112 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0; 2113 u8 unmatched_unicast = 0; 2114 2115 /* In e1x there we only take into account rx acceot flag since tx switching 2116 * isn't enabled. */ 2117 if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags)) 2118 /* accept matched ucast */ 2119 drop_all_ucast = 0; 2120 2121 if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags)) 2122 /* accept matched mcast */ 2123 drop_all_mcast = 0; 2124 2125 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) { 2126 /* accept all mcast */ 2127 drop_all_ucast = 0; 2128 accp_all_ucast = 1; 2129 } 2130 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) { 2131 /* accept all mcast */ 2132 drop_all_mcast = 0; 2133 accp_all_mcast = 1; 2134 } 2135 if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags)) 2136 /* accept (all) bcast */ 2137 accp_all_bcast = 1; 2138 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags)) 2139 /* accept unmatched unicasts */ 2140 unmatched_unicast = 1; 2141 2142 mac_filters->ucast_drop_all = drop_all_ucast ? 2143 mac_filters->ucast_drop_all | mask : 2144 mac_filters->ucast_drop_all & ~mask; 2145 2146 mac_filters->mcast_drop_all = drop_all_mcast ? 2147 mac_filters->mcast_drop_all | mask : 2148 mac_filters->mcast_drop_all & ~mask; 2149 2150 mac_filters->ucast_accept_all = accp_all_ucast ? 2151 mac_filters->ucast_accept_all | mask : 2152 mac_filters->ucast_accept_all & ~mask; 2153 2154 mac_filters->mcast_accept_all = accp_all_mcast ? 2155 mac_filters->mcast_accept_all | mask : 2156 mac_filters->mcast_accept_all & ~mask; 2157 2158 mac_filters->bcast_accept_all = accp_all_bcast ? 2159 mac_filters->bcast_accept_all | mask : 2160 mac_filters->bcast_accept_all & ~mask; 2161 2162 mac_filters->unmatched_unicast = unmatched_unicast ? 2163 mac_filters->unmatched_unicast | mask : 2164 mac_filters->unmatched_unicast & ~mask; 2165 2166 DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n" 2167 "accp_mcast 0x%x\naccp_bcast 0x%x\n", 2168 mac_filters->ucast_drop_all, mac_filters->mcast_drop_all, 2169 mac_filters->ucast_accept_all, mac_filters->mcast_accept_all, 2170 mac_filters->bcast_accept_all); 2171 2172 /* write the MAC filter structure*/ 2173 __storm_memset_mac_filters(bp, mac_filters, p->func_id); 2174 2175 /* The operation is completed */ 2176 clear_bit(p->state, p->pstate); 2177 smp_mb__after_clear_bit(); 2178 2179 return 0; 2180 } 2181 2182 /* Setup ramrod data */ 2183 static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid, 2184 struct eth_classify_header *hdr, 2185 u8 rule_cnt) 2186 { 2187 hdr->echo = cid; 2188 hdr->rule_cnt = rule_cnt; 2189 } 2190 2191 static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp, 2192 unsigned long accept_flags, 2193 struct eth_filter_rules_cmd *cmd, 2194 bool clear_accept_all) 2195 { 2196 u16 state; 2197 2198 /* start with 'drop-all' */ 2199 state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL | 2200 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; 2201 2202 if (accept_flags) { 2203 if (test_bit(BNX2X_ACCEPT_UNICAST, &accept_flags)) 2204 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; 2205 2206 if (test_bit(BNX2X_ACCEPT_MULTICAST, &accept_flags)) 2207 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; 2208 2209 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept_flags)) { 2210 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; 2211 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; 2212 } 2213 2214 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags)) { 2215 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; 2216 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; 2217 } 2218 if (test_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags)) 2219 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; 2220 2221 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &accept_flags)) { 2222 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; 2223 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; 2224 } 2225 if (test_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags)) 2226 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN; 2227 } 2228 2229 /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */ 2230 if (clear_accept_all) { 2231 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; 2232 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; 2233 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; 2234 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; 2235 } 2236 2237 cmd->state = cpu_to_le16(state); 2238 2239 } 2240 2241 static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, 2242 struct bnx2x_rx_mode_ramrod_params *p) 2243 { 2244 struct eth_filter_rules_ramrod_data *data = p->rdata; 2245 int rc; 2246 u8 rule_idx = 0; 2247 2248 /* Reset the ramrod data buffer */ 2249 memset(data, 0, sizeof(*data)); 2250 2251 /* Setup ramrod data */ 2252 2253 /* Tx (internal switching) */ 2254 if (test_bit(RAMROD_TX, &p->ramrod_flags)) { 2255 data->rules[rule_idx].client_id = p->cl_id; 2256 data->rules[rule_idx].func_id = p->func_id; 2257 2258 data->rules[rule_idx].cmd_general_data = 2259 ETH_FILTER_RULES_CMD_TX_CMD; 2260 2261 bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags, 2262 &(data->rules[rule_idx++]), false); 2263 } 2264 2265 /* Rx */ 2266 if (test_bit(RAMROD_RX, &p->ramrod_flags)) { 2267 data->rules[rule_idx].client_id = p->cl_id; 2268 data->rules[rule_idx].func_id = p->func_id; 2269 2270 data->rules[rule_idx].cmd_general_data = 2271 ETH_FILTER_RULES_CMD_RX_CMD; 2272 2273 bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags, 2274 &(data->rules[rule_idx++]), false); 2275 } 2276 2277 2278 /* 2279 * If FCoE Queue configuration has been requested configure the Rx and 2280 * internal switching modes for this queue in separate rules. 2281 * 2282 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort: 2283 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED. 2284 */ 2285 if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) { 2286 /* Tx (internal switching) */ 2287 if (test_bit(RAMROD_TX, &p->ramrod_flags)) { 2288 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id); 2289 data->rules[rule_idx].func_id = p->func_id; 2290 2291 data->rules[rule_idx].cmd_general_data = 2292 ETH_FILTER_RULES_CMD_TX_CMD; 2293 2294 bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags, 2295 &(data->rules[rule_idx++]), 2296 true); 2297 } 2298 2299 /* Rx */ 2300 if (test_bit(RAMROD_RX, &p->ramrod_flags)) { 2301 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id); 2302 data->rules[rule_idx].func_id = p->func_id; 2303 2304 data->rules[rule_idx].cmd_general_data = 2305 ETH_FILTER_RULES_CMD_RX_CMD; 2306 2307 bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags, 2308 &(data->rules[rule_idx++]), 2309 true); 2310 } 2311 } 2312 2313 /* 2314 * Set the ramrod header (most importantly - number of rules to 2315 * configure). 2316 */ 2317 bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx); 2318 2319 DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n", 2320 data->header.rule_cnt, p->rx_accept_flags, 2321 p->tx_accept_flags); 2322 2323 /* 2324 * No need for an explicit memory barrier here as long we would 2325 * need to ensure the ordering of writing to the SPQ element 2326 * and updating of the SPQ producer which involves a memory 2327 * read and we will have to put a full memory barrier there 2328 * (inside bnx2x_sp_post()). 2329 */ 2330 2331 /* Send a ramrod */ 2332 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid, 2333 U64_HI(p->rdata_mapping), 2334 U64_LO(p->rdata_mapping), 2335 ETH_CONNECTION_TYPE); 2336 if (rc) 2337 return rc; 2338 2339 /* Ramrod completion is pending */ 2340 return 1; 2341 } 2342 2343 static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp, 2344 struct bnx2x_rx_mode_ramrod_params *p) 2345 { 2346 return bnx2x_state_wait(bp, p->state, p->pstate); 2347 } 2348 2349 static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp, 2350 struct bnx2x_rx_mode_ramrod_params *p) 2351 { 2352 /* Do nothing */ 2353 return 0; 2354 } 2355 2356 int bnx2x_config_rx_mode(struct bnx2x *bp, 2357 struct bnx2x_rx_mode_ramrod_params *p) 2358 { 2359 int rc; 2360 2361 /* Configure the new classification in the chip */ 2362 rc = p->rx_mode_obj->config_rx_mode(bp, p); 2363 if (rc < 0) 2364 return rc; 2365 2366 /* Wait for a ramrod completion if was requested */ 2367 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) { 2368 rc = p->rx_mode_obj->wait_comp(bp, p); 2369 if (rc) 2370 return rc; 2371 } 2372 2373 return rc; 2374 } 2375 2376 void bnx2x_init_rx_mode_obj(struct bnx2x *bp, 2377 struct bnx2x_rx_mode_obj *o) 2378 { 2379 if (CHIP_IS_E1x(bp)) { 2380 o->wait_comp = bnx2x_empty_rx_mode_wait; 2381 o->config_rx_mode = bnx2x_set_rx_mode_e1x; 2382 } else { 2383 o->wait_comp = bnx2x_wait_rx_mode_comp_e2; 2384 o->config_rx_mode = bnx2x_set_rx_mode_e2; 2385 } 2386 } 2387 2388 /********************* Multicast verbs: SET, CLEAR ****************************/ 2389 static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac) 2390 { 2391 return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff; 2392 } 2393 2394 struct bnx2x_mcast_mac_elem { 2395 struct list_head link; 2396 u8 mac[ETH_ALEN]; 2397 u8 pad[2]; /* For a natural alignment of the following buffer */ 2398 }; 2399 2400 struct bnx2x_pending_mcast_cmd { 2401 struct list_head link; 2402 int type; /* BNX2X_MCAST_CMD_X */ 2403 union { 2404 struct list_head macs_head; 2405 u32 macs_num; /* Needed for DEL command */ 2406 int next_bin; /* Needed for RESTORE flow with aprox match */ 2407 } data; 2408 2409 bool done; /* set to true, when the command has been handled, 2410 * practically used in 57712 handling only, where one pending 2411 * command may be handled in a few operations. As long as for 2412 * other chips every operation handling is completed in a 2413 * single ramrod, there is no need to utilize this field. 2414 */ 2415 }; 2416 2417 static int bnx2x_mcast_wait(struct bnx2x *bp, 2418 struct bnx2x_mcast_obj *o) 2419 { 2420 if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) || 2421 o->raw.wait_comp(bp, &o->raw)) 2422 return -EBUSY; 2423 2424 return 0; 2425 } 2426 2427 static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp, 2428 struct bnx2x_mcast_obj *o, 2429 struct bnx2x_mcast_ramrod_params *p, 2430 int cmd) 2431 { 2432 int total_sz; 2433 struct bnx2x_pending_mcast_cmd *new_cmd; 2434 struct bnx2x_mcast_mac_elem *cur_mac = NULL; 2435 struct bnx2x_mcast_list_elem *pos; 2436 int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ? 2437 p->mcast_list_len : 0); 2438 2439 /* If the command is empty ("handle pending commands only"), break */ 2440 if (!p->mcast_list_len) 2441 return 0; 2442 2443 total_sz = sizeof(*new_cmd) + 2444 macs_list_len * sizeof(struct bnx2x_mcast_mac_elem); 2445 2446 /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */ 2447 new_cmd = kzalloc(total_sz, GFP_ATOMIC); 2448 2449 if (!new_cmd) 2450 return -ENOMEM; 2451 2452 DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n", 2453 cmd, macs_list_len); 2454 2455 INIT_LIST_HEAD(&new_cmd->data.macs_head); 2456 2457 new_cmd->type = cmd; 2458 new_cmd->done = false; 2459 2460 switch (cmd) { 2461 case BNX2X_MCAST_CMD_ADD: 2462 cur_mac = (struct bnx2x_mcast_mac_elem *) 2463 ((u8 *)new_cmd + sizeof(*new_cmd)); 2464 2465 /* Push the MACs of the current command into the pendig command 2466 * MACs list: FIFO 2467 */ 2468 list_for_each_entry(pos, &p->mcast_list, link) { 2469 memcpy(cur_mac->mac, pos->mac, ETH_ALEN); 2470 list_add_tail(&cur_mac->link, &new_cmd->data.macs_head); 2471 cur_mac++; 2472 } 2473 2474 break; 2475 2476 case BNX2X_MCAST_CMD_DEL: 2477 new_cmd->data.macs_num = p->mcast_list_len; 2478 break; 2479 2480 case BNX2X_MCAST_CMD_RESTORE: 2481 new_cmd->data.next_bin = 0; 2482 break; 2483 2484 default: 2485 BNX2X_ERR("Unknown command: %d\n", cmd); 2486 return -EINVAL; 2487 } 2488 2489 /* Push the new pending command to the tail of the pending list: FIFO */ 2490 list_add_tail(&new_cmd->link, &o->pending_cmds_head); 2491 2492 o->set_sched(o); 2493 2494 return 1; 2495 } 2496 2497 /** 2498 * bnx2x_mcast_get_next_bin - get the next set bin (index) 2499 * 2500 * @o: 2501 * @last: index to start looking from (including) 2502 * 2503 * returns the next found (set) bin or a negative value if none is found. 2504 */ 2505 static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last) 2506 { 2507 int i, j, inner_start = last % BIT_VEC64_ELEM_SZ; 2508 2509 for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) { 2510 if (o->registry.aprox_match.vec[i]) 2511 for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) { 2512 int cur_bit = j + BIT_VEC64_ELEM_SZ * i; 2513 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match. 2514 vec, cur_bit)) { 2515 return cur_bit; 2516 } 2517 } 2518 inner_start = 0; 2519 } 2520 2521 /* None found */ 2522 return -1; 2523 } 2524 2525 /** 2526 * bnx2x_mcast_clear_first_bin - find the first set bin and clear it 2527 * 2528 * @o: 2529 * 2530 * returns the index of the found bin or -1 if none is found 2531 */ 2532 static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o) 2533 { 2534 int cur_bit = bnx2x_mcast_get_next_bin(o, 0); 2535 2536 if (cur_bit >= 0) 2537 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit); 2538 2539 return cur_bit; 2540 } 2541 2542 static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o) 2543 { 2544 struct bnx2x_raw_obj *raw = &o->raw; 2545 u8 rx_tx_flag = 0; 2546 2547 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) || 2548 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) 2549 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD; 2550 2551 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) || 2552 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) 2553 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD; 2554 2555 return rx_tx_flag; 2556 } 2557 2558 static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp, 2559 struct bnx2x_mcast_obj *o, int idx, 2560 union bnx2x_mcast_config_data *cfg_data, 2561 int cmd) 2562 { 2563 struct bnx2x_raw_obj *r = &o->raw; 2564 struct eth_multicast_rules_ramrod_data *data = 2565 (struct eth_multicast_rules_ramrod_data *)(r->rdata); 2566 u8 func_id = r->func_id; 2567 u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o); 2568 int bin; 2569 2570 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) 2571 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD; 2572 2573 data->rules[idx].cmd_general_data |= rx_tx_add_flag; 2574 2575 /* Get a bin and update a bins' vector */ 2576 switch (cmd) { 2577 case BNX2X_MCAST_CMD_ADD: 2578 bin = bnx2x_mcast_bin_from_mac(cfg_data->mac); 2579 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin); 2580 break; 2581 2582 case BNX2X_MCAST_CMD_DEL: 2583 /* If there were no more bins to clear 2584 * (bnx2x_mcast_clear_first_bin() returns -1) then we would 2585 * clear any (0xff) bin. 2586 * See bnx2x_mcast_validate_e2() for explanation when it may 2587 * happen. 2588 */ 2589 bin = bnx2x_mcast_clear_first_bin(o); 2590 break; 2591 2592 case BNX2X_MCAST_CMD_RESTORE: 2593 bin = cfg_data->bin; 2594 break; 2595 2596 default: 2597 BNX2X_ERR("Unknown command: %d\n", cmd); 2598 return; 2599 } 2600 2601 DP(BNX2X_MSG_SP, "%s bin %d\n", 2602 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ? 2603 "Setting" : "Clearing"), bin); 2604 2605 data->rules[idx].bin_id = (u8)bin; 2606 data->rules[idx].func_id = func_id; 2607 data->rules[idx].engine_id = o->engine_id; 2608 } 2609 2610 /** 2611 * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry 2612 * 2613 * @bp: device handle 2614 * @o: 2615 * @start_bin: index in the registry to start from (including) 2616 * @rdata_idx: index in the ramrod data to start from 2617 * 2618 * returns last handled bin index or -1 if all bins have been handled 2619 */ 2620 static inline int bnx2x_mcast_handle_restore_cmd_e2( 2621 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin, 2622 int *rdata_idx) 2623 { 2624 int cur_bin, cnt = *rdata_idx; 2625 union bnx2x_mcast_config_data cfg_data = {0}; 2626 2627 /* go through the registry and configure the bins from it */ 2628 for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0; 2629 cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) { 2630 2631 cfg_data.bin = (u8)cur_bin; 2632 o->set_one_rule(bp, o, cnt, &cfg_data, 2633 BNX2X_MCAST_CMD_RESTORE); 2634 2635 cnt++; 2636 2637 DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin); 2638 2639 /* Break if we reached the maximum number 2640 * of rules. 2641 */ 2642 if (cnt >= o->max_cmd_len) 2643 break; 2644 } 2645 2646 *rdata_idx = cnt; 2647 2648 return cur_bin; 2649 } 2650 2651 static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp, 2652 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos, 2653 int *line_idx) 2654 { 2655 struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n; 2656 int cnt = *line_idx; 2657 union bnx2x_mcast_config_data cfg_data = {0}; 2658 2659 list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head, 2660 link) { 2661 2662 cfg_data.mac = &pmac_pos->mac[0]; 2663 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type); 2664 2665 cnt++; 2666 2667 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n", 2668 pmac_pos->mac); 2669 2670 list_del(&pmac_pos->link); 2671 2672 /* Break if we reached the maximum number 2673 * of rules. 2674 */ 2675 if (cnt >= o->max_cmd_len) 2676 break; 2677 } 2678 2679 *line_idx = cnt; 2680 2681 /* if no more MACs to configure - we are done */ 2682 if (list_empty(&cmd_pos->data.macs_head)) 2683 cmd_pos->done = true; 2684 } 2685 2686 static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp, 2687 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos, 2688 int *line_idx) 2689 { 2690 int cnt = *line_idx; 2691 2692 while (cmd_pos->data.macs_num) { 2693 o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type); 2694 2695 cnt++; 2696 2697 cmd_pos->data.macs_num--; 2698 2699 DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n", 2700 cmd_pos->data.macs_num, cnt); 2701 2702 /* Break if we reached the maximum 2703 * number of rules. 2704 */ 2705 if (cnt >= o->max_cmd_len) 2706 break; 2707 } 2708 2709 *line_idx = cnt; 2710 2711 /* If we cleared all bins - we are done */ 2712 if (!cmd_pos->data.macs_num) 2713 cmd_pos->done = true; 2714 } 2715 2716 static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp, 2717 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos, 2718 int *line_idx) 2719 { 2720 cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin, 2721 line_idx); 2722 2723 if (cmd_pos->data.next_bin < 0) 2724 /* If o->set_restore returned -1 we are done */ 2725 cmd_pos->done = true; 2726 else 2727 /* Start from the next bin next time */ 2728 cmd_pos->data.next_bin++; 2729 } 2730 2731 static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp, 2732 struct bnx2x_mcast_ramrod_params *p) 2733 { 2734 struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n; 2735 int cnt = 0; 2736 struct bnx2x_mcast_obj *o = p->mcast_obj; 2737 2738 list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head, 2739 link) { 2740 switch (cmd_pos->type) { 2741 case BNX2X_MCAST_CMD_ADD: 2742 bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt); 2743 break; 2744 2745 case BNX2X_MCAST_CMD_DEL: 2746 bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt); 2747 break; 2748 2749 case BNX2X_MCAST_CMD_RESTORE: 2750 bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos, 2751 &cnt); 2752 break; 2753 2754 default: 2755 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type); 2756 return -EINVAL; 2757 } 2758 2759 /* If the command has been completed - remove it from the list 2760 * and free the memory 2761 */ 2762 if (cmd_pos->done) { 2763 list_del(&cmd_pos->link); 2764 kfree(cmd_pos); 2765 } 2766 2767 /* Break if we reached the maximum number of rules */ 2768 if (cnt >= o->max_cmd_len) 2769 break; 2770 } 2771 2772 return cnt; 2773 } 2774 2775 static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp, 2776 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p, 2777 int *line_idx) 2778 { 2779 struct bnx2x_mcast_list_elem *mlist_pos; 2780 union bnx2x_mcast_config_data cfg_data = {0}; 2781 int cnt = *line_idx; 2782 2783 list_for_each_entry(mlist_pos, &p->mcast_list, link) { 2784 cfg_data.mac = mlist_pos->mac; 2785 o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD); 2786 2787 cnt++; 2788 2789 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n", 2790 mlist_pos->mac); 2791 } 2792 2793 *line_idx = cnt; 2794 } 2795 2796 static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp, 2797 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p, 2798 int *line_idx) 2799 { 2800 int cnt = *line_idx, i; 2801 2802 for (i = 0; i < p->mcast_list_len; i++) { 2803 o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL); 2804 2805 cnt++; 2806 2807 DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n", 2808 p->mcast_list_len - i - 1); 2809 } 2810 2811 *line_idx = cnt; 2812 } 2813 2814 /** 2815 * bnx2x_mcast_handle_current_cmd - 2816 * 2817 * @bp: device handle 2818 * @p: 2819 * @cmd: 2820 * @start_cnt: first line in the ramrod data that may be used 2821 * 2822 * This function is called iff there is enough place for the current command in 2823 * the ramrod data. 2824 * Returns number of lines filled in the ramrod data in total. 2825 */ 2826 static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp, 2827 struct bnx2x_mcast_ramrod_params *p, int cmd, 2828 int start_cnt) 2829 { 2830 struct bnx2x_mcast_obj *o = p->mcast_obj; 2831 int cnt = start_cnt; 2832 2833 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len); 2834 2835 switch (cmd) { 2836 case BNX2X_MCAST_CMD_ADD: 2837 bnx2x_mcast_hdl_add(bp, o, p, &cnt); 2838 break; 2839 2840 case BNX2X_MCAST_CMD_DEL: 2841 bnx2x_mcast_hdl_del(bp, o, p, &cnt); 2842 break; 2843 2844 case BNX2X_MCAST_CMD_RESTORE: 2845 o->hdl_restore(bp, o, 0, &cnt); 2846 break; 2847 2848 default: 2849 BNX2X_ERR("Unknown command: %d\n", cmd); 2850 return -EINVAL; 2851 } 2852 2853 /* The current command has been handled */ 2854 p->mcast_list_len = 0; 2855 2856 return cnt; 2857 } 2858 2859 static int bnx2x_mcast_validate_e2(struct bnx2x *bp, 2860 struct bnx2x_mcast_ramrod_params *p, 2861 int cmd) 2862 { 2863 struct bnx2x_mcast_obj *o = p->mcast_obj; 2864 int reg_sz = o->get_registry_size(o); 2865 2866 switch (cmd) { 2867 /* DEL command deletes all currently configured MACs */ 2868 case BNX2X_MCAST_CMD_DEL: 2869 o->set_registry_size(o, 0); 2870 /* Don't break */ 2871 2872 /* RESTORE command will restore the entire multicast configuration */ 2873 case BNX2X_MCAST_CMD_RESTORE: 2874 /* Here we set the approximate amount of work to do, which in 2875 * fact may be only less as some MACs in postponed ADD 2876 * command(s) scheduled before this command may fall into 2877 * the same bin and the actual number of bins set in the 2878 * registry would be less than we estimated here. See 2879 * bnx2x_mcast_set_one_rule_e2() for further details. 2880 */ 2881 p->mcast_list_len = reg_sz; 2882 break; 2883 2884 case BNX2X_MCAST_CMD_ADD: 2885 case BNX2X_MCAST_CMD_CONT: 2886 /* Here we assume that all new MACs will fall into new bins. 2887 * However we will correct the real registry size after we 2888 * handle all pending commands. 2889 */ 2890 o->set_registry_size(o, reg_sz + p->mcast_list_len); 2891 break; 2892 2893 default: 2894 BNX2X_ERR("Unknown command: %d\n", cmd); 2895 return -EINVAL; 2896 2897 } 2898 2899 /* Increase the total number of MACs pending to be configured */ 2900 o->total_pending_num += p->mcast_list_len; 2901 2902 return 0; 2903 } 2904 2905 static void bnx2x_mcast_revert_e2(struct bnx2x *bp, 2906 struct bnx2x_mcast_ramrod_params *p, 2907 int old_num_bins) 2908 { 2909 struct bnx2x_mcast_obj *o = p->mcast_obj; 2910 2911 o->set_registry_size(o, old_num_bins); 2912 o->total_pending_num -= p->mcast_list_len; 2913 } 2914 2915 /** 2916 * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values 2917 * 2918 * @bp: device handle 2919 * @p: 2920 * @len: number of rules to handle 2921 */ 2922 static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp, 2923 struct bnx2x_mcast_ramrod_params *p, 2924 u8 len) 2925 { 2926 struct bnx2x_raw_obj *r = &p->mcast_obj->raw; 2927 struct eth_multicast_rules_ramrod_data *data = 2928 (struct eth_multicast_rules_ramrod_data *)(r->rdata); 2929 2930 data->header.echo = ((r->cid & BNX2X_SWCID_MASK) | 2931 (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT)); 2932 data->header.rule_cnt = len; 2933 } 2934 2935 /** 2936 * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins 2937 * 2938 * @bp: device handle 2939 * @o: 2940 * 2941 * Recalculate the actual number of set bins in the registry using Brian 2942 * Kernighan's algorithm: it's execution complexity is as a number of set bins. 2943 * 2944 * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1(). 2945 */ 2946 static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp, 2947 struct bnx2x_mcast_obj *o) 2948 { 2949 int i, cnt = 0; 2950 u64 elem; 2951 2952 for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) { 2953 elem = o->registry.aprox_match.vec[i]; 2954 for (; elem; cnt++) 2955 elem &= elem - 1; 2956 } 2957 2958 o->set_registry_size(o, cnt); 2959 2960 return 0; 2961 } 2962 2963 static int bnx2x_mcast_setup_e2(struct bnx2x *bp, 2964 struct bnx2x_mcast_ramrod_params *p, 2965 int cmd) 2966 { 2967 struct bnx2x_raw_obj *raw = &p->mcast_obj->raw; 2968 struct bnx2x_mcast_obj *o = p->mcast_obj; 2969 struct eth_multicast_rules_ramrod_data *data = 2970 (struct eth_multicast_rules_ramrod_data *)(raw->rdata); 2971 int cnt = 0, rc; 2972 2973 /* Reset the ramrod data buffer */ 2974 memset(data, 0, sizeof(*data)); 2975 2976 cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p); 2977 2978 /* If there are no more pending commands - clear SCHEDULED state */ 2979 if (list_empty(&o->pending_cmds_head)) 2980 o->clear_sched(o); 2981 2982 /* The below may be true iff there was enough room in ramrod 2983 * data for all pending commands and for the current 2984 * command. Otherwise the current command would have been added 2985 * to the pending commands and p->mcast_list_len would have been 2986 * zeroed. 2987 */ 2988 if (p->mcast_list_len > 0) 2989 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt); 2990 2991 /* We've pulled out some MACs - update the total number of 2992 * outstanding. 2993 */ 2994 o->total_pending_num -= cnt; 2995 2996 /* send a ramrod */ 2997 WARN_ON(o->total_pending_num < 0); 2998 WARN_ON(cnt > o->max_cmd_len); 2999 3000 bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt); 3001 3002 /* Update a registry size if there are no more pending operations. 3003 * 3004 * We don't want to change the value of the registry size if there are 3005 * pending operations because we want it to always be equal to the 3006 * exact or the approximate number (see bnx2x_mcast_validate_e2()) of 3007 * set bins after the last requested operation in order to properly 3008 * evaluate the size of the next DEL/RESTORE operation. 3009 * 3010 * Note that we update the registry itself during command(s) handling 3011 * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we 3012 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but 3013 * with a limited amount of update commands (per MAC/bin) and we don't 3014 * know in this scope what the actual state of bins configuration is 3015 * going to be after this ramrod. 3016 */ 3017 if (!o->total_pending_num) 3018 bnx2x_mcast_refresh_registry_e2(bp, o); 3019 3020 /* 3021 * If CLEAR_ONLY was requested - don't send a ramrod and clear 3022 * RAMROD_PENDING status immediately. 3023 */ 3024 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { 3025 raw->clear_pending(raw); 3026 return 0; 3027 } else { 3028 /* 3029 * No need for an explicit memory barrier here as long we would 3030 * need to ensure the ordering of writing to the SPQ element 3031 * and updating of the SPQ producer which involves a memory 3032 * read and we will have to put a full memory barrier there 3033 * (inside bnx2x_sp_post()). 3034 */ 3035 3036 /* Send a ramrod */ 3037 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES, 3038 raw->cid, U64_HI(raw->rdata_mapping), 3039 U64_LO(raw->rdata_mapping), 3040 ETH_CONNECTION_TYPE); 3041 if (rc) 3042 return rc; 3043 3044 /* Ramrod completion is pending */ 3045 return 1; 3046 } 3047 } 3048 3049 static int bnx2x_mcast_validate_e1h(struct bnx2x *bp, 3050 struct bnx2x_mcast_ramrod_params *p, 3051 int cmd) 3052 { 3053 /* Mark, that there is a work to do */ 3054 if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE)) 3055 p->mcast_list_len = 1; 3056 3057 return 0; 3058 } 3059 3060 static void bnx2x_mcast_revert_e1h(struct bnx2x *bp, 3061 struct bnx2x_mcast_ramrod_params *p, 3062 int old_num_bins) 3063 { 3064 /* Do nothing */ 3065 } 3066 3067 #define BNX2X_57711_SET_MC_FILTER(filter, bit) \ 3068 do { \ 3069 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \ 3070 } while (0) 3071 3072 static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp, 3073 struct bnx2x_mcast_obj *o, 3074 struct bnx2x_mcast_ramrod_params *p, 3075 u32 *mc_filter) 3076 { 3077 struct bnx2x_mcast_list_elem *mlist_pos; 3078 int bit; 3079 3080 list_for_each_entry(mlist_pos, &p->mcast_list, link) { 3081 bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac); 3082 BNX2X_57711_SET_MC_FILTER(mc_filter, bit); 3083 3084 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n", 3085 mlist_pos->mac, bit); 3086 3087 /* bookkeeping... */ 3088 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, 3089 bit); 3090 } 3091 } 3092 3093 static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp, 3094 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p, 3095 u32 *mc_filter) 3096 { 3097 int bit; 3098 3099 for (bit = bnx2x_mcast_get_next_bin(o, 0); 3100 bit >= 0; 3101 bit = bnx2x_mcast_get_next_bin(o, bit + 1)) { 3102 BNX2X_57711_SET_MC_FILTER(mc_filter, bit); 3103 DP(BNX2X_MSG_SP, "About to set bin %d\n", bit); 3104 } 3105 } 3106 3107 /* On 57711 we write the multicast MACs' aproximate match 3108 * table by directly into the TSTORM's internal RAM. So we don't 3109 * really need to handle any tricks to make it work. 3110 */ 3111 static int bnx2x_mcast_setup_e1h(struct bnx2x *bp, 3112 struct bnx2x_mcast_ramrod_params *p, 3113 int cmd) 3114 { 3115 int i; 3116 struct bnx2x_mcast_obj *o = p->mcast_obj; 3117 struct bnx2x_raw_obj *r = &o->raw; 3118 3119 /* If CLEAR_ONLY has been requested - clear the registry 3120 * and clear a pending bit. 3121 */ 3122 if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { 3123 u32 mc_filter[MC_HASH_SIZE] = {0}; 3124 3125 /* Set the multicast filter bits before writing it into 3126 * the internal memory. 3127 */ 3128 switch (cmd) { 3129 case BNX2X_MCAST_CMD_ADD: 3130 bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter); 3131 break; 3132 3133 case BNX2X_MCAST_CMD_DEL: 3134 DP(BNX2X_MSG_SP, 3135 "Invalidating multicast MACs configuration\n"); 3136 3137 /* clear the registry */ 3138 memset(o->registry.aprox_match.vec, 0, 3139 sizeof(o->registry.aprox_match.vec)); 3140 break; 3141 3142 case BNX2X_MCAST_CMD_RESTORE: 3143 bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter); 3144 break; 3145 3146 default: 3147 BNX2X_ERR("Unknown command: %d\n", cmd); 3148 return -EINVAL; 3149 } 3150 3151 /* Set the mcast filter in the internal memory */ 3152 for (i = 0; i < MC_HASH_SIZE; i++) 3153 REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]); 3154 } else 3155 /* clear the registry */ 3156 memset(o->registry.aprox_match.vec, 0, 3157 sizeof(o->registry.aprox_match.vec)); 3158 3159 /* We are done */ 3160 r->clear_pending(r); 3161 3162 return 0; 3163 } 3164 3165 static int bnx2x_mcast_validate_e1(struct bnx2x *bp, 3166 struct bnx2x_mcast_ramrod_params *p, 3167 int cmd) 3168 { 3169 struct bnx2x_mcast_obj *o = p->mcast_obj; 3170 int reg_sz = o->get_registry_size(o); 3171 3172 switch (cmd) { 3173 /* DEL command deletes all currently configured MACs */ 3174 case BNX2X_MCAST_CMD_DEL: 3175 o->set_registry_size(o, 0); 3176 /* Don't break */ 3177 3178 /* RESTORE command will restore the entire multicast configuration */ 3179 case BNX2X_MCAST_CMD_RESTORE: 3180 p->mcast_list_len = reg_sz; 3181 DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n", 3182 cmd, p->mcast_list_len); 3183 break; 3184 3185 case BNX2X_MCAST_CMD_ADD: 3186 case BNX2X_MCAST_CMD_CONT: 3187 /* Multicast MACs on 57710 are configured as unicast MACs and 3188 * there is only a limited number of CAM entries for that 3189 * matter. 3190 */ 3191 if (p->mcast_list_len > o->max_cmd_len) { 3192 BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n", 3193 o->max_cmd_len); 3194 return -EINVAL; 3195 } 3196 /* Every configured MAC should be cleared if DEL command is 3197 * called. Only the last ADD command is relevant as long as 3198 * every ADD commands overrides the previous configuration. 3199 */ 3200 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len); 3201 if (p->mcast_list_len > 0) 3202 o->set_registry_size(o, p->mcast_list_len); 3203 3204 break; 3205 3206 default: 3207 BNX2X_ERR("Unknown command: %d\n", cmd); 3208 return -EINVAL; 3209 3210 } 3211 3212 /* We want to ensure that commands are executed one by one for 57710. 3213 * Therefore each none-empty command will consume o->max_cmd_len. 3214 */ 3215 if (p->mcast_list_len) 3216 o->total_pending_num += o->max_cmd_len; 3217 3218 return 0; 3219 } 3220 3221 static void bnx2x_mcast_revert_e1(struct bnx2x *bp, 3222 struct bnx2x_mcast_ramrod_params *p, 3223 int old_num_macs) 3224 { 3225 struct bnx2x_mcast_obj *o = p->mcast_obj; 3226 3227 o->set_registry_size(o, old_num_macs); 3228 3229 /* If current command hasn't been handled yet and we are 3230 * here means that it's meant to be dropped and we have to 3231 * update the number of outstandling MACs accordingly. 3232 */ 3233 if (p->mcast_list_len) 3234 o->total_pending_num -= o->max_cmd_len; 3235 } 3236 3237 static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp, 3238 struct bnx2x_mcast_obj *o, int idx, 3239 union bnx2x_mcast_config_data *cfg_data, 3240 int cmd) 3241 { 3242 struct bnx2x_raw_obj *r = &o->raw; 3243 struct mac_configuration_cmd *data = 3244 (struct mac_configuration_cmd *)(r->rdata); 3245 3246 /* copy mac */ 3247 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) { 3248 bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr, 3249 &data->config_table[idx].middle_mac_addr, 3250 &data->config_table[idx].lsb_mac_addr, 3251 cfg_data->mac); 3252 3253 data->config_table[idx].vlan_id = 0; 3254 data->config_table[idx].pf_id = r->func_id; 3255 data->config_table[idx].clients_bit_vector = 3256 cpu_to_le32(1 << r->cl_id); 3257 3258 SET_FLAG(data->config_table[idx].flags, 3259 MAC_CONFIGURATION_ENTRY_ACTION_TYPE, 3260 T_ETH_MAC_COMMAND_SET); 3261 } 3262 } 3263 3264 /** 3265 * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd 3266 * 3267 * @bp: device handle 3268 * @p: 3269 * @len: number of rules to handle 3270 */ 3271 static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp, 3272 struct bnx2x_mcast_ramrod_params *p, 3273 u8 len) 3274 { 3275 struct bnx2x_raw_obj *r = &p->mcast_obj->raw; 3276 struct mac_configuration_cmd *data = 3277 (struct mac_configuration_cmd *)(r->rdata); 3278 3279 u8 offset = (CHIP_REV_IS_SLOW(bp) ? 3280 BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) : 3281 BNX2X_MAX_MULTICAST*(1 + r->func_id)); 3282 3283 data->hdr.offset = offset; 3284 data->hdr.client_id = 0xff; 3285 data->hdr.echo = ((r->cid & BNX2X_SWCID_MASK) | 3286 (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT)); 3287 data->hdr.length = len; 3288 } 3289 3290 /** 3291 * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710 3292 * 3293 * @bp: device handle 3294 * @o: 3295 * @start_idx: index in the registry to start from 3296 * @rdata_idx: index in the ramrod data to start from 3297 * 3298 * restore command for 57710 is like all other commands - always a stand alone 3299 * command - start_idx and rdata_idx will always be 0. This function will always 3300 * succeed. 3301 * returns -1 to comply with 57712 variant. 3302 */ 3303 static inline int bnx2x_mcast_handle_restore_cmd_e1( 3304 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx, 3305 int *rdata_idx) 3306 { 3307 struct bnx2x_mcast_mac_elem *elem; 3308 int i = 0; 3309 union bnx2x_mcast_config_data cfg_data = {0}; 3310 3311 /* go through the registry and configure the MACs from it. */ 3312 list_for_each_entry(elem, &o->registry.exact_match.macs, link) { 3313 cfg_data.mac = &elem->mac[0]; 3314 o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE); 3315 3316 i++; 3317 3318 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n", 3319 cfg_data.mac); 3320 } 3321 3322 *rdata_idx = i; 3323 3324 return -1; 3325 } 3326 3327 3328 static inline int bnx2x_mcast_handle_pending_cmds_e1( 3329 struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p) 3330 { 3331 struct bnx2x_pending_mcast_cmd *cmd_pos; 3332 struct bnx2x_mcast_mac_elem *pmac_pos; 3333 struct bnx2x_mcast_obj *o = p->mcast_obj; 3334 union bnx2x_mcast_config_data cfg_data = {0}; 3335 int cnt = 0; 3336 3337 3338 /* If nothing to be done - return */ 3339 if (list_empty(&o->pending_cmds_head)) 3340 return 0; 3341 3342 /* Handle the first command */ 3343 cmd_pos = list_first_entry(&o->pending_cmds_head, 3344 struct bnx2x_pending_mcast_cmd, link); 3345 3346 switch (cmd_pos->type) { 3347 case BNX2X_MCAST_CMD_ADD: 3348 list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) { 3349 cfg_data.mac = &pmac_pos->mac[0]; 3350 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type); 3351 3352 cnt++; 3353 3354 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n", 3355 pmac_pos->mac); 3356 } 3357 break; 3358 3359 case BNX2X_MCAST_CMD_DEL: 3360 cnt = cmd_pos->data.macs_num; 3361 DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt); 3362 break; 3363 3364 case BNX2X_MCAST_CMD_RESTORE: 3365 o->hdl_restore(bp, o, 0, &cnt); 3366 break; 3367 3368 default: 3369 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type); 3370 return -EINVAL; 3371 } 3372 3373 list_del(&cmd_pos->link); 3374 kfree(cmd_pos); 3375 3376 return cnt; 3377 } 3378 3379 /** 3380 * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr(). 3381 * 3382 * @fw_hi: 3383 * @fw_mid: 3384 * @fw_lo: 3385 * @mac: 3386 */ 3387 static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid, 3388 __le16 *fw_lo, u8 *mac) 3389 { 3390 mac[1] = ((u8 *)fw_hi)[0]; 3391 mac[0] = ((u8 *)fw_hi)[1]; 3392 mac[3] = ((u8 *)fw_mid)[0]; 3393 mac[2] = ((u8 *)fw_mid)[1]; 3394 mac[5] = ((u8 *)fw_lo)[0]; 3395 mac[4] = ((u8 *)fw_lo)[1]; 3396 } 3397 3398 /** 3399 * bnx2x_mcast_refresh_registry_e1 - 3400 * 3401 * @bp: device handle 3402 * @cnt: 3403 * 3404 * Check the ramrod data first entry flag to see if it's a DELETE or ADD command 3405 * and update the registry correspondingly: if ADD - allocate a memory and add 3406 * the entries to the registry (list), if DELETE - clear the registry and free 3407 * the memory. 3408 */ 3409 static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp, 3410 struct bnx2x_mcast_obj *o) 3411 { 3412 struct bnx2x_raw_obj *raw = &o->raw; 3413 struct bnx2x_mcast_mac_elem *elem; 3414 struct mac_configuration_cmd *data = 3415 (struct mac_configuration_cmd *)(raw->rdata); 3416 3417 /* If first entry contains a SET bit - the command was ADD, 3418 * otherwise - DEL_ALL 3419 */ 3420 if (GET_FLAG(data->config_table[0].flags, 3421 MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) { 3422 int i, len = data->hdr.length; 3423 3424 /* Break if it was a RESTORE command */ 3425 if (!list_empty(&o->registry.exact_match.macs)) 3426 return 0; 3427 3428 elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC); 3429 if (!elem) { 3430 BNX2X_ERR("Failed to allocate registry memory\n"); 3431 return -ENOMEM; 3432 } 3433 3434 for (i = 0; i < len; i++, elem++) { 3435 bnx2x_get_fw_mac_addr( 3436 &data->config_table[i].msb_mac_addr, 3437 &data->config_table[i].middle_mac_addr, 3438 &data->config_table[i].lsb_mac_addr, 3439 elem->mac); 3440 DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n", 3441 elem->mac); 3442 list_add_tail(&elem->link, 3443 &o->registry.exact_match.macs); 3444 } 3445 } else { 3446 elem = list_first_entry(&o->registry.exact_match.macs, 3447 struct bnx2x_mcast_mac_elem, link); 3448 DP(BNX2X_MSG_SP, "Deleting a registry\n"); 3449 kfree(elem); 3450 INIT_LIST_HEAD(&o->registry.exact_match.macs); 3451 } 3452 3453 return 0; 3454 } 3455 3456 static int bnx2x_mcast_setup_e1(struct bnx2x *bp, 3457 struct bnx2x_mcast_ramrod_params *p, 3458 int cmd) 3459 { 3460 struct bnx2x_mcast_obj *o = p->mcast_obj; 3461 struct bnx2x_raw_obj *raw = &o->raw; 3462 struct mac_configuration_cmd *data = 3463 (struct mac_configuration_cmd *)(raw->rdata); 3464 int cnt = 0, i, rc; 3465 3466 /* Reset the ramrod data buffer */ 3467 memset(data, 0, sizeof(*data)); 3468 3469 /* First set all entries as invalid */ 3470 for (i = 0; i < o->max_cmd_len ; i++) 3471 SET_FLAG(data->config_table[i].flags, 3472 MAC_CONFIGURATION_ENTRY_ACTION_TYPE, 3473 T_ETH_MAC_COMMAND_INVALIDATE); 3474 3475 /* Handle pending commands first */ 3476 cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p); 3477 3478 /* If there are no more pending commands - clear SCHEDULED state */ 3479 if (list_empty(&o->pending_cmds_head)) 3480 o->clear_sched(o); 3481 3482 /* The below may be true iff there were no pending commands */ 3483 if (!cnt) 3484 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0); 3485 3486 /* For 57710 every command has o->max_cmd_len length to ensure that 3487 * commands are done one at a time. 3488 */ 3489 o->total_pending_num -= o->max_cmd_len; 3490 3491 /* send a ramrod */ 3492 3493 WARN_ON(cnt > o->max_cmd_len); 3494 3495 /* Set ramrod header (in particular, a number of entries to update) */ 3496 bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt); 3497 3498 /* update a registry: we need the registry contents to be always up 3499 * to date in order to be able to execute a RESTORE opcode. Here 3500 * we use the fact that for 57710 we sent one command at a time 3501 * hence we may take the registry update out of the command handling 3502 * and do it in a simpler way here. 3503 */ 3504 rc = bnx2x_mcast_refresh_registry_e1(bp, o); 3505 if (rc) 3506 return rc; 3507 3508 /* 3509 * If CLEAR_ONLY was requested - don't send a ramrod and clear 3510 * RAMROD_PENDING status immediately. 3511 */ 3512 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { 3513 raw->clear_pending(raw); 3514 return 0; 3515 } else { 3516 /* 3517 * No need for an explicit memory barrier here as long we would 3518 * need to ensure the ordering of writing to the SPQ element 3519 * and updating of the SPQ producer which involves a memory 3520 * read and we will have to put a full memory barrier there 3521 * (inside bnx2x_sp_post()). 3522 */ 3523 3524 /* Send a ramrod */ 3525 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid, 3526 U64_HI(raw->rdata_mapping), 3527 U64_LO(raw->rdata_mapping), 3528 ETH_CONNECTION_TYPE); 3529 if (rc) 3530 return rc; 3531 3532 /* Ramrod completion is pending */ 3533 return 1; 3534 } 3535 3536 } 3537 3538 static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o) 3539 { 3540 return o->registry.exact_match.num_macs_set; 3541 } 3542 3543 static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o) 3544 { 3545 return o->registry.aprox_match.num_bins_set; 3546 } 3547 3548 static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o, 3549 int n) 3550 { 3551 o->registry.exact_match.num_macs_set = n; 3552 } 3553 3554 static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o, 3555 int n) 3556 { 3557 o->registry.aprox_match.num_bins_set = n; 3558 } 3559 3560 int bnx2x_config_mcast(struct bnx2x *bp, 3561 struct bnx2x_mcast_ramrod_params *p, 3562 int cmd) 3563 { 3564 struct bnx2x_mcast_obj *o = p->mcast_obj; 3565 struct bnx2x_raw_obj *r = &o->raw; 3566 int rc = 0, old_reg_size; 3567 3568 /* This is needed to recover number of currently configured mcast macs 3569 * in case of failure. 3570 */ 3571 old_reg_size = o->get_registry_size(o); 3572 3573 /* Do some calculations and checks */ 3574 rc = o->validate(bp, p, cmd); 3575 if (rc) 3576 return rc; 3577 3578 /* Return if there is no work to do */ 3579 if ((!p->mcast_list_len) && (!o->check_sched(o))) 3580 return 0; 3581 3582 DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n", 3583 o->total_pending_num, p->mcast_list_len, o->max_cmd_len); 3584 3585 /* Enqueue the current command to the pending list if we can't complete 3586 * it in the current iteration 3587 */ 3588 if (r->check_pending(r) || 3589 ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) { 3590 rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd); 3591 if (rc < 0) 3592 goto error_exit1; 3593 3594 /* As long as the current command is in a command list we 3595 * don't need to handle it separately. 3596 */ 3597 p->mcast_list_len = 0; 3598 } 3599 3600 if (!r->check_pending(r)) { 3601 3602 /* Set 'pending' state */ 3603 r->set_pending(r); 3604 3605 /* Configure the new classification in the chip */ 3606 rc = o->config_mcast(bp, p, cmd); 3607 if (rc < 0) 3608 goto error_exit2; 3609 3610 /* Wait for a ramrod completion if was requested */ 3611 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) 3612 rc = o->wait_comp(bp, o); 3613 } 3614 3615 return rc; 3616 3617 error_exit2: 3618 r->clear_pending(r); 3619 3620 error_exit1: 3621 o->revert(bp, p, old_reg_size); 3622 3623 return rc; 3624 } 3625 3626 static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o) 3627 { 3628 smp_mb__before_clear_bit(); 3629 clear_bit(o->sched_state, o->raw.pstate); 3630 smp_mb__after_clear_bit(); 3631 } 3632 3633 static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o) 3634 { 3635 smp_mb__before_clear_bit(); 3636 set_bit(o->sched_state, o->raw.pstate); 3637 smp_mb__after_clear_bit(); 3638 } 3639 3640 static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o) 3641 { 3642 return !!test_bit(o->sched_state, o->raw.pstate); 3643 } 3644 3645 static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o) 3646 { 3647 return o->raw.check_pending(&o->raw) || o->check_sched(o); 3648 } 3649 3650 void bnx2x_init_mcast_obj(struct bnx2x *bp, 3651 struct bnx2x_mcast_obj *mcast_obj, 3652 u8 mcast_cl_id, u32 mcast_cid, u8 func_id, 3653 u8 engine_id, void *rdata, dma_addr_t rdata_mapping, 3654 int state, unsigned long *pstate, bnx2x_obj_type type) 3655 { 3656 memset(mcast_obj, 0, sizeof(*mcast_obj)); 3657 3658 bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id, 3659 rdata, rdata_mapping, state, pstate, type); 3660 3661 mcast_obj->engine_id = engine_id; 3662 3663 INIT_LIST_HEAD(&mcast_obj->pending_cmds_head); 3664 3665 mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED; 3666 mcast_obj->check_sched = bnx2x_mcast_check_sched; 3667 mcast_obj->set_sched = bnx2x_mcast_set_sched; 3668 mcast_obj->clear_sched = bnx2x_mcast_clear_sched; 3669 3670 if (CHIP_IS_E1(bp)) { 3671 mcast_obj->config_mcast = bnx2x_mcast_setup_e1; 3672 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd; 3673 mcast_obj->hdl_restore = 3674 bnx2x_mcast_handle_restore_cmd_e1; 3675 mcast_obj->check_pending = bnx2x_mcast_check_pending; 3676 3677 if (CHIP_REV_IS_SLOW(bp)) 3678 mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI; 3679 else 3680 mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST; 3681 3682 mcast_obj->wait_comp = bnx2x_mcast_wait; 3683 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1; 3684 mcast_obj->validate = bnx2x_mcast_validate_e1; 3685 mcast_obj->revert = bnx2x_mcast_revert_e1; 3686 mcast_obj->get_registry_size = 3687 bnx2x_mcast_get_registry_size_exact; 3688 mcast_obj->set_registry_size = 3689 bnx2x_mcast_set_registry_size_exact; 3690 3691 /* 57710 is the only chip that uses the exact match for mcast 3692 * at the moment. 3693 */ 3694 INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs); 3695 3696 } else if (CHIP_IS_E1H(bp)) { 3697 mcast_obj->config_mcast = bnx2x_mcast_setup_e1h; 3698 mcast_obj->enqueue_cmd = NULL; 3699 mcast_obj->hdl_restore = NULL; 3700 mcast_obj->check_pending = bnx2x_mcast_check_pending; 3701 3702 /* 57711 doesn't send a ramrod, so it has unlimited credit 3703 * for one command. 3704 */ 3705 mcast_obj->max_cmd_len = -1; 3706 mcast_obj->wait_comp = bnx2x_mcast_wait; 3707 mcast_obj->set_one_rule = NULL; 3708 mcast_obj->validate = bnx2x_mcast_validate_e1h; 3709 mcast_obj->revert = bnx2x_mcast_revert_e1h; 3710 mcast_obj->get_registry_size = 3711 bnx2x_mcast_get_registry_size_aprox; 3712 mcast_obj->set_registry_size = 3713 bnx2x_mcast_set_registry_size_aprox; 3714 } else { 3715 mcast_obj->config_mcast = bnx2x_mcast_setup_e2; 3716 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd; 3717 mcast_obj->hdl_restore = 3718 bnx2x_mcast_handle_restore_cmd_e2; 3719 mcast_obj->check_pending = bnx2x_mcast_check_pending; 3720 /* TODO: There should be a proper HSI define for this number!!! 3721 */ 3722 mcast_obj->max_cmd_len = 16; 3723 mcast_obj->wait_comp = bnx2x_mcast_wait; 3724 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2; 3725 mcast_obj->validate = bnx2x_mcast_validate_e2; 3726 mcast_obj->revert = bnx2x_mcast_revert_e2; 3727 mcast_obj->get_registry_size = 3728 bnx2x_mcast_get_registry_size_aprox; 3729 mcast_obj->set_registry_size = 3730 bnx2x_mcast_set_registry_size_aprox; 3731 } 3732 } 3733 3734 /*************************** Credit handling **********************************/ 3735 3736 /** 3737 * atomic_add_ifless - add if the result is less than a given value. 3738 * 3739 * @v: pointer of type atomic_t 3740 * @a: the amount to add to v... 3741 * @u: ...if (v + a) is less than u. 3742 * 3743 * returns true if (v + a) was less than u, and false otherwise. 3744 * 3745 */ 3746 static inline bool __atomic_add_ifless(atomic_t *v, int a, int u) 3747 { 3748 int c, old; 3749 3750 c = atomic_read(v); 3751 for (;;) { 3752 if (unlikely(c + a >= u)) 3753 return false; 3754 3755 old = atomic_cmpxchg((v), c, c + a); 3756 if (likely(old == c)) 3757 break; 3758 c = old; 3759 } 3760 3761 return true; 3762 } 3763 3764 /** 3765 * atomic_dec_ifmoe - dec if the result is more or equal than a given value. 3766 * 3767 * @v: pointer of type atomic_t 3768 * @a: the amount to dec from v... 3769 * @u: ...if (v - a) is more or equal than u. 3770 * 3771 * returns true if (v - a) was more or equal than u, and false 3772 * otherwise. 3773 */ 3774 static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u) 3775 { 3776 int c, old; 3777 3778 c = atomic_read(v); 3779 for (;;) { 3780 if (unlikely(c - a < u)) 3781 return false; 3782 3783 old = atomic_cmpxchg((v), c, c - a); 3784 if (likely(old == c)) 3785 break; 3786 c = old; 3787 } 3788 3789 return true; 3790 } 3791 3792 static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt) 3793 { 3794 bool rc; 3795 3796 smp_mb(); 3797 rc = __atomic_dec_ifmoe(&o->credit, cnt, 0); 3798 smp_mb(); 3799 3800 return rc; 3801 } 3802 3803 static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt) 3804 { 3805 bool rc; 3806 3807 smp_mb(); 3808 3809 /* Don't let to refill if credit + cnt > pool_sz */ 3810 rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1); 3811 3812 smp_mb(); 3813 3814 return rc; 3815 } 3816 3817 static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o) 3818 { 3819 int cur_credit; 3820 3821 smp_mb(); 3822 cur_credit = atomic_read(&o->credit); 3823 3824 return cur_credit; 3825 } 3826 3827 static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o, 3828 int cnt) 3829 { 3830 return true; 3831 } 3832 3833 3834 static bool bnx2x_credit_pool_get_entry( 3835 struct bnx2x_credit_pool_obj *o, 3836 int *offset) 3837 { 3838 int idx, vec, i; 3839 3840 *offset = -1; 3841 3842 /* Find "internal cam-offset" then add to base for this object... */ 3843 for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) { 3844 3845 /* Skip the current vector if there are no free entries in it */ 3846 if (!o->pool_mirror[vec]) 3847 continue; 3848 3849 /* If we've got here we are going to find a free entry */ 3850 for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0; 3851 i < BIT_VEC64_ELEM_SZ; idx++, i++) 3852 3853 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) { 3854 /* Got one!! */ 3855 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx); 3856 *offset = o->base_pool_offset + idx; 3857 return true; 3858 } 3859 } 3860 3861 return false; 3862 } 3863 3864 static bool bnx2x_credit_pool_put_entry( 3865 struct bnx2x_credit_pool_obj *o, 3866 int offset) 3867 { 3868 if (offset < o->base_pool_offset) 3869 return false; 3870 3871 offset -= o->base_pool_offset; 3872 3873 if (offset >= o->pool_sz) 3874 return false; 3875 3876 /* Return the entry to the pool */ 3877 BIT_VEC64_SET_BIT(o->pool_mirror, offset); 3878 3879 return true; 3880 } 3881 3882 static bool bnx2x_credit_pool_put_entry_always_true( 3883 struct bnx2x_credit_pool_obj *o, 3884 int offset) 3885 { 3886 return true; 3887 } 3888 3889 static bool bnx2x_credit_pool_get_entry_always_true( 3890 struct bnx2x_credit_pool_obj *o, 3891 int *offset) 3892 { 3893 *offset = -1; 3894 return true; 3895 } 3896 /** 3897 * bnx2x_init_credit_pool - initialize credit pool internals. 3898 * 3899 * @p: 3900 * @base: Base entry in the CAM to use. 3901 * @credit: pool size. 3902 * 3903 * If base is negative no CAM entries handling will be performed. 3904 * If credit is negative pool operations will always succeed (unlimited pool). 3905 * 3906 */ 3907 static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p, 3908 int base, int credit) 3909 { 3910 /* Zero the object first */ 3911 memset(p, 0, sizeof(*p)); 3912 3913 /* Set the table to all 1s */ 3914 memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror)); 3915 3916 /* Init a pool as full */ 3917 atomic_set(&p->credit, credit); 3918 3919 /* The total poll size */ 3920 p->pool_sz = credit; 3921 3922 p->base_pool_offset = base; 3923 3924 /* Commit the change */ 3925 smp_mb(); 3926 3927 p->check = bnx2x_credit_pool_check; 3928 3929 /* if pool credit is negative - disable the checks */ 3930 if (credit >= 0) { 3931 p->put = bnx2x_credit_pool_put; 3932 p->get = bnx2x_credit_pool_get; 3933 p->put_entry = bnx2x_credit_pool_put_entry; 3934 p->get_entry = bnx2x_credit_pool_get_entry; 3935 } else { 3936 p->put = bnx2x_credit_pool_always_true; 3937 p->get = bnx2x_credit_pool_always_true; 3938 p->put_entry = bnx2x_credit_pool_put_entry_always_true; 3939 p->get_entry = bnx2x_credit_pool_get_entry_always_true; 3940 } 3941 3942 /* If base is negative - disable entries handling */ 3943 if (base < 0) { 3944 p->put_entry = bnx2x_credit_pool_put_entry_always_true; 3945 p->get_entry = bnx2x_credit_pool_get_entry_always_true; 3946 } 3947 } 3948 3949 void bnx2x_init_mac_credit_pool(struct bnx2x *bp, 3950 struct bnx2x_credit_pool_obj *p, u8 func_id, 3951 u8 func_num) 3952 { 3953 /* TODO: this will be defined in consts as well... */ 3954 #define BNX2X_CAM_SIZE_EMUL 5 3955 3956 int cam_sz; 3957 3958 if (CHIP_IS_E1(bp)) { 3959 /* In E1, Multicast is saved in cam... */ 3960 if (!CHIP_REV_IS_SLOW(bp)) 3961 cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST; 3962 else 3963 cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI; 3964 3965 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz); 3966 3967 } else if (CHIP_IS_E1H(bp)) { 3968 /* CAM credit is equaly divided between all active functions 3969 * on the PORT!. 3970 */ 3971 if ((func_num > 0)) { 3972 if (!CHIP_REV_IS_SLOW(bp)) 3973 cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num)); 3974 else 3975 cam_sz = BNX2X_CAM_SIZE_EMUL; 3976 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz); 3977 } else { 3978 /* this should never happen! Block MAC operations. */ 3979 bnx2x_init_credit_pool(p, 0, 0); 3980 } 3981 3982 } else { 3983 3984 /* 3985 * CAM credit is equaly divided between all active functions 3986 * on the PATH. 3987 */ 3988 if ((func_num > 0)) { 3989 if (!CHIP_REV_IS_SLOW(bp)) 3990 cam_sz = (MAX_MAC_CREDIT_E2 / func_num); 3991 else 3992 cam_sz = BNX2X_CAM_SIZE_EMUL; 3993 3994 /* 3995 * No need for CAM entries handling for 57712 and 3996 * newer. 3997 */ 3998 bnx2x_init_credit_pool(p, -1, cam_sz); 3999 } else { 4000 /* this should never happen! Block MAC operations. */ 4001 bnx2x_init_credit_pool(p, 0, 0); 4002 } 4003 4004 } 4005 } 4006 4007 void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, 4008 struct bnx2x_credit_pool_obj *p, 4009 u8 func_id, 4010 u8 func_num) 4011 { 4012 if (CHIP_IS_E1x(bp)) { 4013 /* 4014 * There is no VLAN credit in HW on 57710 and 57711 only 4015 * MAC / MAC-VLAN can be set 4016 */ 4017 bnx2x_init_credit_pool(p, 0, -1); 4018 } else { 4019 /* 4020 * CAM credit is equaly divided between all active functions 4021 * on the PATH. 4022 */ 4023 if (func_num > 0) { 4024 int credit = MAX_VLAN_CREDIT_E2 / func_num; 4025 bnx2x_init_credit_pool(p, func_id * credit, credit); 4026 } else 4027 /* this should never happen! Block VLAN operations. */ 4028 bnx2x_init_credit_pool(p, 0, 0); 4029 } 4030 } 4031 4032 /****************** RSS Configuration ******************/ 4033 /** 4034 * bnx2x_debug_print_ind_table - prints the indirection table configuration. 4035 * 4036 * @bp: driver hanlde 4037 * @p: pointer to rss configuration 4038 * 4039 * Prints it when NETIF_MSG_IFUP debug level is configured. 4040 */ 4041 static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp, 4042 struct bnx2x_config_rss_params *p) 4043 { 4044 int i; 4045 4046 DP(BNX2X_MSG_SP, "Setting indirection table to:\n"); 4047 DP(BNX2X_MSG_SP, "0x0000: "); 4048 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { 4049 DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]); 4050 4051 /* Print 4 bytes in a line */ 4052 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) && 4053 (((i + 1) & 0x3) == 0)) { 4054 DP_CONT(BNX2X_MSG_SP, "\n"); 4055 DP(BNX2X_MSG_SP, "0x%04x: ", i + 1); 4056 } 4057 } 4058 4059 DP_CONT(BNX2X_MSG_SP, "\n"); 4060 } 4061 4062 /** 4063 * bnx2x_setup_rss - configure RSS 4064 * 4065 * @bp: device handle 4066 * @p: rss configuration 4067 * 4068 * sends on UPDATE ramrod for that matter. 4069 */ 4070 static int bnx2x_setup_rss(struct bnx2x *bp, 4071 struct bnx2x_config_rss_params *p) 4072 { 4073 struct bnx2x_rss_config_obj *o = p->rss_obj; 4074 struct bnx2x_raw_obj *r = &o->raw; 4075 struct eth_rss_update_ramrod_data *data = 4076 (struct eth_rss_update_ramrod_data *)(r->rdata); 4077 u8 rss_mode = 0; 4078 int rc; 4079 4080 memset(data, 0, sizeof(*data)); 4081 4082 DP(BNX2X_MSG_SP, "Configuring RSS\n"); 4083 4084 /* Set an echo field */ 4085 data->echo = (r->cid & BNX2X_SWCID_MASK) | 4086 (r->state << BNX2X_SWCID_SHIFT); 4087 4088 /* RSS mode */ 4089 if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags)) 4090 rss_mode = ETH_RSS_MODE_DISABLED; 4091 else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags)) 4092 rss_mode = ETH_RSS_MODE_REGULAR; 4093 else if (test_bit(BNX2X_RSS_MODE_VLAN_PRI, &p->rss_flags)) 4094 rss_mode = ETH_RSS_MODE_VLAN_PRI; 4095 else if (test_bit(BNX2X_RSS_MODE_E1HOV_PRI, &p->rss_flags)) 4096 rss_mode = ETH_RSS_MODE_E1HOV_PRI; 4097 else if (test_bit(BNX2X_RSS_MODE_IP_DSCP, &p->rss_flags)) 4098 rss_mode = ETH_RSS_MODE_IP_DSCP; 4099 4100 data->rss_mode = rss_mode; 4101 4102 DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode); 4103 4104 /* RSS capabilities */ 4105 if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags)) 4106 data->capabilities |= 4107 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY; 4108 4109 if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags)) 4110 data->capabilities |= 4111 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY; 4112 4113 if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags)) 4114 data->capabilities |= 4115 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY; 4116 4117 if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags)) 4118 data->capabilities |= 4119 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY; 4120 4121 /* Hashing mask */ 4122 data->rss_result_mask = p->rss_result_mask; 4123 4124 /* RSS engine ID */ 4125 data->rss_engine_id = o->engine_id; 4126 4127 DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id); 4128 4129 /* Indirection table */ 4130 memcpy(data->indirection_table, p->ind_table, 4131 T_ETH_INDIRECTION_TABLE_SIZE); 4132 4133 /* Remember the last configuration */ 4134 memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); 4135 4136 /* Print the indirection table */ 4137 if (netif_msg_ifup(bp)) 4138 bnx2x_debug_print_ind_table(bp, p); 4139 4140 /* RSS keys */ 4141 if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) { 4142 memcpy(&data->rss_key[0], &p->rss_key[0], 4143 sizeof(data->rss_key)); 4144 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; 4145 } 4146 4147 /* 4148 * No need for an explicit memory barrier here as long we would 4149 * need to ensure the ordering of writing to the SPQ element 4150 * and updating of the SPQ producer which involves a memory 4151 * read and we will have to put a full memory barrier there 4152 * (inside bnx2x_sp_post()). 4153 */ 4154 4155 /* Send a ramrod */ 4156 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid, 4157 U64_HI(r->rdata_mapping), 4158 U64_LO(r->rdata_mapping), 4159 ETH_CONNECTION_TYPE); 4160 4161 if (rc < 0) 4162 return rc; 4163 4164 return 1; 4165 } 4166 4167 void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj, 4168 u8 *ind_table) 4169 { 4170 memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table)); 4171 } 4172 4173 int bnx2x_config_rss(struct bnx2x *bp, 4174 struct bnx2x_config_rss_params *p) 4175 { 4176 int rc; 4177 struct bnx2x_rss_config_obj *o = p->rss_obj; 4178 struct bnx2x_raw_obj *r = &o->raw; 4179 4180 /* Do nothing if only driver cleanup was requested */ 4181 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) 4182 return 0; 4183 4184 r->set_pending(r); 4185 4186 rc = o->config_rss(bp, p); 4187 if (rc < 0) { 4188 r->clear_pending(r); 4189 return rc; 4190 } 4191 4192 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) 4193 rc = r->wait_comp(bp, r); 4194 4195 return rc; 4196 } 4197 4198 4199 void bnx2x_init_rss_config_obj(struct bnx2x *bp, 4200 struct bnx2x_rss_config_obj *rss_obj, 4201 u8 cl_id, u32 cid, u8 func_id, u8 engine_id, 4202 void *rdata, dma_addr_t rdata_mapping, 4203 int state, unsigned long *pstate, 4204 bnx2x_obj_type type) 4205 { 4206 bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata, 4207 rdata_mapping, state, pstate, type); 4208 4209 rss_obj->engine_id = engine_id; 4210 rss_obj->config_rss = bnx2x_setup_rss; 4211 } 4212 4213 /********************** Queue state object ***********************************/ 4214 4215 /** 4216 * bnx2x_queue_state_change - perform Queue state change transition 4217 * 4218 * @bp: device handle 4219 * @params: parameters to perform the transition 4220 * 4221 * returns 0 in case of successfully completed transition, negative error 4222 * code in case of failure, positive (EBUSY) value if there is a completion 4223 * to that is still pending (possible only if RAMROD_COMP_WAIT is 4224 * not set in params->ramrod_flags for asynchronous commands). 4225 * 4226 */ 4227 int bnx2x_queue_state_change(struct bnx2x *bp, 4228 struct bnx2x_queue_state_params *params) 4229 { 4230 struct bnx2x_queue_sp_obj *o = params->q_obj; 4231 int rc, pending_bit; 4232 unsigned long *pending = &o->pending; 4233 4234 /* Check that the requested transition is legal */ 4235 if (o->check_transition(bp, o, params)) 4236 return -EINVAL; 4237 4238 /* Set "pending" bit */ 4239 pending_bit = o->set_pending(o, params); 4240 4241 /* Don't send a command if only driver cleanup was requested */ 4242 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) 4243 o->complete_cmd(bp, o, pending_bit); 4244 else { 4245 /* Send a ramrod */ 4246 rc = o->send_cmd(bp, params); 4247 if (rc) { 4248 o->next_state = BNX2X_Q_STATE_MAX; 4249 clear_bit(pending_bit, pending); 4250 smp_mb__after_clear_bit(); 4251 return rc; 4252 } 4253 4254 if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) { 4255 rc = o->wait_comp(bp, o, pending_bit); 4256 if (rc) 4257 return rc; 4258 4259 return 0; 4260 } 4261 } 4262 4263 return !!test_bit(pending_bit, pending); 4264 } 4265 4266 4267 static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj, 4268 struct bnx2x_queue_state_params *params) 4269 { 4270 enum bnx2x_queue_cmd cmd = params->cmd, bit; 4271 4272 /* ACTIVATE and DEACTIVATE commands are implemented on top of 4273 * UPDATE command. 4274 */ 4275 if ((cmd == BNX2X_Q_CMD_ACTIVATE) || 4276 (cmd == BNX2X_Q_CMD_DEACTIVATE)) 4277 bit = BNX2X_Q_CMD_UPDATE; 4278 else 4279 bit = cmd; 4280 4281 set_bit(bit, &obj->pending); 4282 return bit; 4283 } 4284 4285 static int bnx2x_queue_wait_comp(struct bnx2x *bp, 4286 struct bnx2x_queue_sp_obj *o, 4287 enum bnx2x_queue_cmd cmd) 4288 { 4289 return bnx2x_state_wait(bp, cmd, &o->pending); 4290 } 4291 4292 /** 4293 * bnx2x_queue_comp_cmd - complete the state change command. 4294 * 4295 * @bp: device handle 4296 * @o: 4297 * @cmd: 4298 * 4299 * Checks that the arrived completion is expected. 4300 */ 4301 static int bnx2x_queue_comp_cmd(struct bnx2x *bp, 4302 struct bnx2x_queue_sp_obj *o, 4303 enum bnx2x_queue_cmd cmd) 4304 { 4305 unsigned long cur_pending = o->pending; 4306 4307 if (!test_and_clear_bit(cmd, &cur_pending)) { 4308 BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n", 4309 cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], 4310 o->state, cur_pending, o->next_state); 4311 return -EINVAL; 4312 } 4313 4314 if (o->next_tx_only >= o->max_cos) 4315 /* >= becuase tx only must always be smaller than cos since the 4316 * primary connection suports COS 0 4317 */ 4318 BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d", 4319 o->next_tx_only, o->max_cos); 4320 4321 DP(BNX2X_MSG_SP, 4322 "Completing command %d for queue %d, setting state to %d\n", 4323 cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state); 4324 4325 if (o->next_tx_only) /* print num tx-only if any exist */ 4326 DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n", 4327 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only); 4328 4329 o->state = o->next_state; 4330 o->num_tx_only = o->next_tx_only; 4331 o->next_state = BNX2X_Q_STATE_MAX; 4332 4333 /* It's important that o->state and o->next_state are 4334 * updated before o->pending. 4335 */ 4336 wmb(); 4337 4338 clear_bit(cmd, &o->pending); 4339 smp_mb__after_clear_bit(); 4340 4341 return 0; 4342 } 4343 4344 static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp, 4345 struct bnx2x_queue_state_params *cmd_params, 4346 struct client_init_ramrod_data *data) 4347 { 4348 struct bnx2x_queue_setup_params *params = &cmd_params->params.setup; 4349 4350 /* Rx data */ 4351 4352 /* IPv6 TPA supported for E2 and above only */ 4353 data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, ¶ms->flags) * 4354 CLIENT_INIT_RX_DATA_TPA_EN_IPV6; 4355 } 4356 4357 static void bnx2x_q_fill_init_general_data(struct bnx2x *bp, 4358 struct bnx2x_queue_sp_obj *o, 4359 struct bnx2x_general_setup_params *params, 4360 struct client_init_general_data *gen_data, 4361 unsigned long *flags) 4362 { 4363 gen_data->client_id = o->cl_id; 4364 4365 if (test_bit(BNX2X_Q_FLG_STATS, flags)) { 4366 gen_data->statistics_counter_id = 4367 params->stat_id; 4368 gen_data->statistics_en_flg = 1; 4369 gen_data->statistics_zero_flg = 4370 test_bit(BNX2X_Q_FLG_ZERO_STATS, flags); 4371 } else 4372 gen_data->statistics_counter_id = 4373 DISABLE_STATISTIC_COUNTER_ID_VALUE; 4374 4375 gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags); 4376 gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags); 4377 gen_data->sp_client_id = params->spcl_id; 4378 gen_data->mtu = cpu_to_le16(params->mtu); 4379 gen_data->func_id = o->func_id; 4380 4381 4382 gen_data->cos = params->cos; 4383 4384 gen_data->traffic_type = 4385 test_bit(BNX2X_Q_FLG_FCOE, flags) ? 4386 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW; 4387 4388 DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n", 4389 gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg); 4390 } 4391 4392 static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o, 4393 struct bnx2x_txq_setup_params *params, 4394 struct client_init_tx_data *tx_data, 4395 unsigned long *flags) 4396 { 4397 tx_data->enforce_security_flg = 4398 test_bit(BNX2X_Q_FLG_TX_SEC, flags); 4399 tx_data->default_vlan = 4400 cpu_to_le16(params->default_vlan); 4401 tx_data->default_vlan_flg = 4402 test_bit(BNX2X_Q_FLG_DEF_VLAN, flags); 4403 tx_data->tx_switching_flg = 4404 test_bit(BNX2X_Q_FLG_TX_SWITCH, flags); 4405 tx_data->anti_spoofing_flg = 4406 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags); 4407 tx_data->tx_status_block_id = params->fw_sb_id; 4408 tx_data->tx_sb_index_number = params->sb_cq_index; 4409 tx_data->tss_leading_client_id = params->tss_leading_cl_id; 4410 4411 tx_data->tx_bd_page_base.lo = 4412 cpu_to_le32(U64_LO(params->dscr_map)); 4413 tx_data->tx_bd_page_base.hi = 4414 cpu_to_le32(U64_HI(params->dscr_map)); 4415 4416 /* Don't configure any Tx switching mode during queue SETUP */ 4417 tx_data->state = 0; 4418 } 4419 4420 static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o, 4421 struct rxq_pause_params *params, 4422 struct client_init_rx_data *rx_data) 4423 { 4424 /* flow control data */ 4425 rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo); 4426 rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi); 4427 rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo); 4428 rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi); 4429 rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo); 4430 rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi); 4431 rx_data->rx_cos_mask = cpu_to_le16(params->pri_map); 4432 } 4433 4434 static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o, 4435 struct bnx2x_rxq_setup_params *params, 4436 struct client_init_rx_data *rx_data, 4437 unsigned long *flags) 4438 { 4439 rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) * 4440 CLIENT_INIT_RX_DATA_TPA_EN_IPV4; 4441 rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) * 4442 CLIENT_INIT_RX_DATA_TPA_MODE; 4443 rx_data->vmqueue_mode_en_flg = 0; 4444 4445 rx_data->cache_line_alignment_log_size = 4446 params->cache_line_log; 4447 rx_data->enable_dynamic_hc = 4448 test_bit(BNX2X_Q_FLG_DHC, flags); 4449 rx_data->max_sges_for_packet = params->max_sges_pkt; 4450 rx_data->client_qzone_id = params->cl_qzone_id; 4451 rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz); 4452 4453 /* Always start in DROP_ALL mode */ 4454 rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL | 4455 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL); 4456 4457 /* We don't set drop flags */ 4458 rx_data->drop_ip_cs_err_flg = 0; 4459 rx_data->drop_tcp_cs_err_flg = 0; 4460 rx_data->drop_ttl0_flg = 0; 4461 rx_data->drop_udp_cs_err_flg = 0; 4462 rx_data->inner_vlan_removal_enable_flg = 4463 test_bit(BNX2X_Q_FLG_VLAN, flags); 4464 rx_data->outer_vlan_removal_enable_flg = 4465 test_bit(BNX2X_Q_FLG_OV, flags); 4466 rx_data->status_block_id = params->fw_sb_id; 4467 rx_data->rx_sb_index_number = params->sb_cq_index; 4468 rx_data->max_tpa_queues = params->max_tpa_queues; 4469 rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz); 4470 rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz); 4471 rx_data->bd_page_base.lo = 4472 cpu_to_le32(U64_LO(params->dscr_map)); 4473 rx_data->bd_page_base.hi = 4474 cpu_to_le32(U64_HI(params->dscr_map)); 4475 rx_data->sge_page_base.lo = 4476 cpu_to_le32(U64_LO(params->sge_map)); 4477 rx_data->sge_page_base.hi = 4478 cpu_to_le32(U64_HI(params->sge_map)); 4479 rx_data->cqe_page_base.lo = 4480 cpu_to_le32(U64_LO(params->rcq_map)); 4481 rx_data->cqe_page_base.hi = 4482 cpu_to_le32(U64_HI(params->rcq_map)); 4483 rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags); 4484 4485 if (test_bit(BNX2X_Q_FLG_MCAST, flags)) { 4486 rx_data->approx_mcast_engine_id = params->mcast_engine_id; 4487 rx_data->is_approx_mcast = 1; 4488 } 4489 4490 rx_data->rss_engine_id = params->rss_engine_id; 4491 4492 /* silent vlan removal */ 4493 rx_data->silent_vlan_removal_flg = 4494 test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags); 4495 rx_data->silent_vlan_value = 4496 cpu_to_le16(params->silent_removal_value); 4497 rx_data->silent_vlan_mask = 4498 cpu_to_le16(params->silent_removal_mask); 4499 4500 } 4501 4502 /* initialize the general, tx and rx parts of a queue object */ 4503 static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp, 4504 struct bnx2x_queue_state_params *cmd_params, 4505 struct client_init_ramrod_data *data) 4506 { 4507 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj, 4508 &cmd_params->params.setup.gen_params, 4509 &data->general, 4510 &cmd_params->params.setup.flags); 4511 4512 bnx2x_q_fill_init_tx_data(cmd_params->q_obj, 4513 &cmd_params->params.setup.txq_params, 4514 &data->tx, 4515 &cmd_params->params.setup.flags); 4516 4517 bnx2x_q_fill_init_rx_data(cmd_params->q_obj, 4518 &cmd_params->params.setup.rxq_params, 4519 &data->rx, 4520 &cmd_params->params.setup.flags); 4521 4522 bnx2x_q_fill_init_pause_data(cmd_params->q_obj, 4523 &cmd_params->params.setup.pause_params, 4524 &data->rx); 4525 } 4526 4527 /* initialize the general and tx parts of a tx-only queue object */ 4528 static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp, 4529 struct bnx2x_queue_state_params *cmd_params, 4530 struct tx_queue_init_ramrod_data *data) 4531 { 4532 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj, 4533 &cmd_params->params.tx_only.gen_params, 4534 &data->general, 4535 &cmd_params->params.tx_only.flags); 4536 4537 bnx2x_q_fill_init_tx_data(cmd_params->q_obj, 4538 &cmd_params->params.tx_only.txq_params, 4539 &data->tx, 4540 &cmd_params->params.tx_only.flags); 4541 4542 DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x", 4543 cmd_params->q_obj->cids[0], 4544 data->tx.tx_bd_page_base.lo, 4545 data->tx.tx_bd_page_base.hi); 4546 } 4547 4548 /** 4549 * bnx2x_q_init - init HW/FW queue 4550 * 4551 * @bp: device handle 4552 * @params: 4553 * 4554 * HW/FW initial Queue configuration: 4555 * - HC: Rx and Tx 4556 * - CDU context validation 4557 * 4558 */ 4559 static inline int bnx2x_q_init(struct bnx2x *bp, 4560 struct bnx2x_queue_state_params *params) 4561 { 4562 struct bnx2x_queue_sp_obj *o = params->q_obj; 4563 struct bnx2x_queue_init_params *init = ¶ms->params.init; 4564 u16 hc_usec; 4565 u8 cos; 4566 4567 /* Tx HC configuration */ 4568 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) && 4569 test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) { 4570 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0; 4571 4572 bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id, 4573 init->tx.sb_cq_index, 4574 !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags), 4575 hc_usec); 4576 } 4577 4578 /* Rx HC configuration */ 4579 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) && 4580 test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) { 4581 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0; 4582 4583 bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id, 4584 init->rx.sb_cq_index, 4585 !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags), 4586 hc_usec); 4587 } 4588 4589 /* Set CDU context validation values */ 4590 for (cos = 0; cos < o->max_cos; cos++) { 4591 DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n", 4592 o->cids[cos], cos); 4593 DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]); 4594 bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]); 4595 } 4596 4597 /* As no ramrod is sent, complete the command immediately */ 4598 o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT); 4599 4600 mmiowb(); 4601 smp_mb(); 4602 4603 return 0; 4604 } 4605 4606 static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp, 4607 struct bnx2x_queue_state_params *params) 4608 { 4609 struct bnx2x_queue_sp_obj *o = params->q_obj; 4610 struct client_init_ramrod_data *rdata = 4611 (struct client_init_ramrod_data *)o->rdata; 4612 dma_addr_t data_mapping = o->rdata_mapping; 4613 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; 4614 4615 /* Clear the ramrod data */ 4616 memset(rdata, 0, sizeof(*rdata)); 4617 4618 /* Fill the ramrod data */ 4619 bnx2x_q_fill_setup_data_cmn(bp, params, rdata); 4620 4621 /* 4622 * No need for an explicit memory barrier here as long we would 4623 * need to ensure the ordering of writing to the SPQ element 4624 * and updating of the SPQ producer which involves a memory 4625 * read and we will have to put a full memory barrier there 4626 * (inside bnx2x_sp_post()). 4627 */ 4628 4629 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], 4630 U64_HI(data_mapping), 4631 U64_LO(data_mapping), ETH_CONNECTION_TYPE); 4632 } 4633 4634 static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp, 4635 struct bnx2x_queue_state_params *params) 4636 { 4637 struct bnx2x_queue_sp_obj *o = params->q_obj; 4638 struct client_init_ramrod_data *rdata = 4639 (struct client_init_ramrod_data *)o->rdata; 4640 dma_addr_t data_mapping = o->rdata_mapping; 4641 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; 4642 4643 /* Clear the ramrod data */ 4644 memset(rdata, 0, sizeof(*rdata)); 4645 4646 /* Fill the ramrod data */ 4647 bnx2x_q_fill_setup_data_cmn(bp, params, rdata); 4648 bnx2x_q_fill_setup_data_e2(bp, params, rdata); 4649 4650 /* 4651 * No need for an explicit memory barrier here as long we would 4652 * need to ensure the ordering of writing to the SPQ element 4653 * and updating of the SPQ producer which involves a memory 4654 * read and we will have to put a full memory barrier there 4655 * (inside bnx2x_sp_post()). 4656 */ 4657 4658 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], 4659 U64_HI(data_mapping), 4660 U64_LO(data_mapping), ETH_CONNECTION_TYPE); 4661 } 4662 4663 static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp, 4664 struct bnx2x_queue_state_params *params) 4665 { 4666 struct bnx2x_queue_sp_obj *o = params->q_obj; 4667 struct tx_queue_init_ramrod_data *rdata = 4668 (struct tx_queue_init_ramrod_data *)o->rdata; 4669 dma_addr_t data_mapping = o->rdata_mapping; 4670 int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP; 4671 struct bnx2x_queue_setup_tx_only_params *tx_only_params = 4672 ¶ms->params.tx_only; 4673 u8 cid_index = tx_only_params->cid_index; 4674 4675 4676 if (cid_index >= o->max_cos) { 4677 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", 4678 o->cl_id, cid_index); 4679 return -EINVAL; 4680 } 4681 4682 DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n", 4683 tx_only_params->gen_params.cos, 4684 tx_only_params->gen_params.spcl_id); 4685 4686 /* Clear the ramrod data */ 4687 memset(rdata, 0, sizeof(*rdata)); 4688 4689 /* Fill the ramrod data */ 4690 bnx2x_q_fill_setup_tx_only(bp, params, rdata); 4691 4692 DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n", 4693 o->cids[cid_index], rdata->general.client_id, 4694 rdata->general.sp_client_id, rdata->general.cos); 4695 4696 /* 4697 * No need for an explicit memory barrier here as long we would 4698 * need to ensure the ordering of writing to the SPQ element 4699 * and updating of the SPQ producer which involves a memory 4700 * read and we will have to put a full memory barrier there 4701 * (inside bnx2x_sp_post()). 4702 */ 4703 4704 return bnx2x_sp_post(bp, ramrod, o->cids[cid_index], 4705 U64_HI(data_mapping), 4706 U64_LO(data_mapping), ETH_CONNECTION_TYPE); 4707 } 4708 4709 static void bnx2x_q_fill_update_data(struct bnx2x *bp, 4710 struct bnx2x_queue_sp_obj *obj, 4711 struct bnx2x_queue_update_params *params, 4712 struct client_update_ramrod_data *data) 4713 { 4714 /* Client ID of the client to update */ 4715 data->client_id = obj->cl_id; 4716 4717 /* Function ID of the client to update */ 4718 data->func_id = obj->func_id; 4719 4720 /* Default VLAN value */ 4721 data->default_vlan = cpu_to_le16(params->def_vlan); 4722 4723 /* Inner VLAN stripping */ 4724 data->inner_vlan_removal_enable_flg = 4725 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, ¶ms->update_flags); 4726 data->inner_vlan_removal_change_flg = 4727 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG, 4728 ¶ms->update_flags); 4729 4730 /* Outer VLAN sripping */ 4731 data->outer_vlan_removal_enable_flg = 4732 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, ¶ms->update_flags); 4733 data->outer_vlan_removal_change_flg = 4734 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG, 4735 ¶ms->update_flags); 4736 4737 /* Drop packets that have source MAC that doesn't belong to this 4738 * Queue. 4739 */ 4740 data->anti_spoofing_enable_flg = 4741 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, ¶ms->update_flags); 4742 data->anti_spoofing_change_flg = 4743 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, ¶ms->update_flags); 4744 4745 /* Activate/Deactivate */ 4746 data->activate_flg = 4747 test_bit(BNX2X_Q_UPDATE_ACTIVATE, ¶ms->update_flags); 4748 data->activate_change_flg = 4749 test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, ¶ms->update_flags); 4750 4751 /* Enable default VLAN */ 4752 data->default_vlan_enable_flg = 4753 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, ¶ms->update_flags); 4754 data->default_vlan_change_flg = 4755 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, 4756 ¶ms->update_flags); 4757 4758 /* silent vlan removal */ 4759 data->silent_vlan_change_flg = 4760 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, 4761 ¶ms->update_flags); 4762 data->silent_vlan_removal_flg = 4763 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, ¶ms->update_flags); 4764 data->silent_vlan_value = cpu_to_le16(params->silent_removal_value); 4765 data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask); 4766 } 4767 4768 static inline int bnx2x_q_send_update(struct bnx2x *bp, 4769 struct bnx2x_queue_state_params *params) 4770 { 4771 struct bnx2x_queue_sp_obj *o = params->q_obj; 4772 struct client_update_ramrod_data *rdata = 4773 (struct client_update_ramrod_data *)o->rdata; 4774 dma_addr_t data_mapping = o->rdata_mapping; 4775 struct bnx2x_queue_update_params *update_params = 4776 ¶ms->params.update; 4777 u8 cid_index = update_params->cid_index; 4778 4779 if (cid_index >= o->max_cos) { 4780 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", 4781 o->cl_id, cid_index); 4782 return -EINVAL; 4783 } 4784 4785 4786 /* Clear the ramrod data */ 4787 memset(rdata, 0, sizeof(*rdata)); 4788 4789 /* Fill the ramrod data */ 4790 bnx2x_q_fill_update_data(bp, o, update_params, rdata); 4791 4792 /* 4793 * No need for an explicit memory barrier here as long we would 4794 * need to ensure the ordering of writing to the SPQ element 4795 * and updating of the SPQ producer which involves a memory 4796 * read and we will have to put a full memory barrier there 4797 * (inside bnx2x_sp_post()). 4798 */ 4799 4800 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, 4801 o->cids[cid_index], U64_HI(data_mapping), 4802 U64_LO(data_mapping), ETH_CONNECTION_TYPE); 4803 } 4804 4805 /** 4806 * bnx2x_q_send_deactivate - send DEACTIVATE command 4807 * 4808 * @bp: device handle 4809 * @params: 4810 * 4811 * implemented using the UPDATE command. 4812 */ 4813 static inline int bnx2x_q_send_deactivate(struct bnx2x *bp, 4814 struct bnx2x_queue_state_params *params) 4815 { 4816 struct bnx2x_queue_update_params *update = ¶ms->params.update; 4817 4818 memset(update, 0, sizeof(*update)); 4819 4820 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags); 4821 4822 return bnx2x_q_send_update(bp, params); 4823 } 4824 4825 /** 4826 * bnx2x_q_send_activate - send ACTIVATE command 4827 * 4828 * @bp: device handle 4829 * @params: 4830 * 4831 * implemented using the UPDATE command. 4832 */ 4833 static inline int bnx2x_q_send_activate(struct bnx2x *bp, 4834 struct bnx2x_queue_state_params *params) 4835 { 4836 struct bnx2x_queue_update_params *update = ¶ms->params.update; 4837 4838 memset(update, 0, sizeof(*update)); 4839 4840 __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags); 4841 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags); 4842 4843 return bnx2x_q_send_update(bp, params); 4844 } 4845 4846 static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp, 4847 struct bnx2x_queue_state_params *params) 4848 { 4849 /* TODO: Not implemented yet. */ 4850 return -1; 4851 } 4852 4853 static inline int bnx2x_q_send_halt(struct bnx2x *bp, 4854 struct bnx2x_queue_state_params *params) 4855 { 4856 struct bnx2x_queue_sp_obj *o = params->q_obj; 4857 4858 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 4859 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id, 4860 ETH_CONNECTION_TYPE); 4861 } 4862 4863 static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp, 4864 struct bnx2x_queue_state_params *params) 4865 { 4866 struct bnx2x_queue_sp_obj *o = params->q_obj; 4867 u8 cid_idx = params->params.cfc_del.cid_index; 4868 4869 if (cid_idx >= o->max_cos) { 4870 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", 4871 o->cl_id, cid_idx); 4872 return -EINVAL; 4873 } 4874 4875 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, 4876 o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE); 4877 } 4878 4879 static inline int bnx2x_q_send_terminate(struct bnx2x *bp, 4880 struct bnx2x_queue_state_params *params) 4881 { 4882 struct bnx2x_queue_sp_obj *o = params->q_obj; 4883 u8 cid_index = params->params.terminate.cid_index; 4884 4885 if (cid_index >= o->max_cos) { 4886 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", 4887 o->cl_id, cid_index); 4888 return -EINVAL; 4889 } 4890 4891 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, 4892 o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE); 4893 } 4894 4895 static inline int bnx2x_q_send_empty(struct bnx2x *bp, 4896 struct bnx2x_queue_state_params *params) 4897 { 4898 struct bnx2x_queue_sp_obj *o = params->q_obj; 4899 4900 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY, 4901 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0, 4902 ETH_CONNECTION_TYPE); 4903 } 4904 4905 static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp, 4906 struct bnx2x_queue_state_params *params) 4907 { 4908 switch (params->cmd) { 4909 case BNX2X_Q_CMD_INIT: 4910 return bnx2x_q_init(bp, params); 4911 case BNX2X_Q_CMD_SETUP_TX_ONLY: 4912 return bnx2x_q_send_setup_tx_only(bp, params); 4913 case BNX2X_Q_CMD_DEACTIVATE: 4914 return bnx2x_q_send_deactivate(bp, params); 4915 case BNX2X_Q_CMD_ACTIVATE: 4916 return bnx2x_q_send_activate(bp, params); 4917 case BNX2X_Q_CMD_UPDATE: 4918 return bnx2x_q_send_update(bp, params); 4919 case BNX2X_Q_CMD_UPDATE_TPA: 4920 return bnx2x_q_send_update_tpa(bp, params); 4921 case BNX2X_Q_CMD_HALT: 4922 return bnx2x_q_send_halt(bp, params); 4923 case BNX2X_Q_CMD_CFC_DEL: 4924 return bnx2x_q_send_cfc_del(bp, params); 4925 case BNX2X_Q_CMD_TERMINATE: 4926 return bnx2x_q_send_terminate(bp, params); 4927 case BNX2X_Q_CMD_EMPTY: 4928 return bnx2x_q_send_empty(bp, params); 4929 default: 4930 BNX2X_ERR("Unknown command: %d\n", params->cmd); 4931 return -EINVAL; 4932 } 4933 } 4934 4935 static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp, 4936 struct bnx2x_queue_state_params *params) 4937 { 4938 switch (params->cmd) { 4939 case BNX2X_Q_CMD_SETUP: 4940 return bnx2x_q_send_setup_e1x(bp, params); 4941 case BNX2X_Q_CMD_INIT: 4942 case BNX2X_Q_CMD_SETUP_TX_ONLY: 4943 case BNX2X_Q_CMD_DEACTIVATE: 4944 case BNX2X_Q_CMD_ACTIVATE: 4945 case BNX2X_Q_CMD_UPDATE: 4946 case BNX2X_Q_CMD_UPDATE_TPA: 4947 case BNX2X_Q_CMD_HALT: 4948 case BNX2X_Q_CMD_CFC_DEL: 4949 case BNX2X_Q_CMD_TERMINATE: 4950 case BNX2X_Q_CMD_EMPTY: 4951 return bnx2x_queue_send_cmd_cmn(bp, params); 4952 default: 4953 BNX2X_ERR("Unknown command: %d\n", params->cmd); 4954 return -EINVAL; 4955 } 4956 } 4957 4958 static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp, 4959 struct bnx2x_queue_state_params *params) 4960 { 4961 switch (params->cmd) { 4962 case BNX2X_Q_CMD_SETUP: 4963 return bnx2x_q_send_setup_e2(bp, params); 4964 case BNX2X_Q_CMD_INIT: 4965 case BNX2X_Q_CMD_SETUP_TX_ONLY: 4966 case BNX2X_Q_CMD_DEACTIVATE: 4967 case BNX2X_Q_CMD_ACTIVATE: 4968 case BNX2X_Q_CMD_UPDATE: 4969 case BNX2X_Q_CMD_UPDATE_TPA: 4970 case BNX2X_Q_CMD_HALT: 4971 case BNX2X_Q_CMD_CFC_DEL: 4972 case BNX2X_Q_CMD_TERMINATE: 4973 case BNX2X_Q_CMD_EMPTY: 4974 return bnx2x_queue_send_cmd_cmn(bp, params); 4975 default: 4976 BNX2X_ERR("Unknown command: %d\n", params->cmd); 4977 return -EINVAL; 4978 } 4979 } 4980 4981 /** 4982 * bnx2x_queue_chk_transition - check state machine of a regular Queue 4983 * 4984 * @bp: device handle 4985 * @o: 4986 * @params: 4987 * 4988 * (not Forwarding) 4989 * It both checks if the requested command is legal in a current 4990 * state and, if it's legal, sets a `next_state' in the object 4991 * that will be used in the completion flow to set the `state' 4992 * of the object. 4993 * 4994 * returns 0 if a requested command is a legal transition, 4995 * -EINVAL otherwise. 4996 */ 4997 static int bnx2x_queue_chk_transition(struct bnx2x *bp, 4998 struct bnx2x_queue_sp_obj *o, 4999 struct bnx2x_queue_state_params *params) 5000 { 5001 enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX; 5002 enum bnx2x_queue_cmd cmd = params->cmd; 5003 struct bnx2x_queue_update_params *update_params = 5004 ¶ms->params.update; 5005 u8 next_tx_only = o->num_tx_only; 5006 5007 /* 5008 * Forget all pending for completion commands if a driver only state 5009 * transition has been requested. 5010 */ 5011 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { 5012 o->pending = 0; 5013 o->next_state = BNX2X_Q_STATE_MAX; 5014 } 5015 5016 /* 5017 * Don't allow a next state transition if we are in the middle of 5018 * the previous one. 5019 */ 5020 if (o->pending) 5021 return -EBUSY; 5022 5023 switch (state) { 5024 case BNX2X_Q_STATE_RESET: 5025 if (cmd == BNX2X_Q_CMD_INIT) 5026 next_state = BNX2X_Q_STATE_INITIALIZED; 5027 5028 break; 5029 case BNX2X_Q_STATE_INITIALIZED: 5030 if (cmd == BNX2X_Q_CMD_SETUP) { 5031 if (test_bit(BNX2X_Q_FLG_ACTIVE, 5032 ¶ms->params.setup.flags)) 5033 next_state = BNX2X_Q_STATE_ACTIVE; 5034 else 5035 next_state = BNX2X_Q_STATE_INACTIVE; 5036 } 5037 5038 break; 5039 case BNX2X_Q_STATE_ACTIVE: 5040 if (cmd == BNX2X_Q_CMD_DEACTIVATE) 5041 next_state = BNX2X_Q_STATE_INACTIVE; 5042 5043 else if ((cmd == BNX2X_Q_CMD_EMPTY) || 5044 (cmd == BNX2X_Q_CMD_UPDATE_TPA)) 5045 next_state = BNX2X_Q_STATE_ACTIVE; 5046 5047 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) { 5048 next_state = BNX2X_Q_STATE_MULTI_COS; 5049 next_tx_only = 1; 5050 } 5051 5052 else if (cmd == BNX2X_Q_CMD_HALT) 5053 next_state = BNX2X_Q_STATE_STOPPED; 5054 5055 else if (cmd == BNX2X_Q_CMD_UPDATE) { 5056 /* If "active" state change is requested, update the 5057 * state accordingly. 5058 */ 5059 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, 5060 &update_params->update_flags) && 5061 !test_bit(BNX2X_Q_UPDATE_ACTIVATE, 5062 &update_params->update_flags)) 5063 next_state = BNX2X_Q_STATE_INACTIVE; 5064 else 5065 next_state = BNX2X_Q_STATE_ACTIVE; 5066 } 5067 5068 break; 5069 case BNX2X_Q_STATE_MULTI_COS: 5070 if (cmd == BNX2X_Q_CMD_TERMINATE) 5071 next_state = BNX2X_Q_STATE_MCOS_TERMINATED; 5072 5073 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) { 5074 next_state = BNX2X_Q_STATE_MULTI_COS; 5075 next_tx_only = o->num_tx_only + 1; 5076 } 5077 5078 else if ((cmd == BNX2X_Q_CMD_EMPTY) || 5079 (cmd == BNX2X_Q_CMD_UPDATE_TPA)) 5080 next_state = BNX2X_Q_STATE_MULTI_COS; 5081 5082 else if (cmd == BNX2X_Q_CMD_UPDATE) { 5083 /* If "active" state change is requested, update the 5084 * state accordingly. 5085 */ 5086 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, 5087 &update_params->update_flags) && 5088 !test_bit(BNX2X_Q_UPDATE_ACTIVATE, 5089 &update_params->update_flags)) 5090 next_state = BNX2X_Q_STATE_INACTIVE; 5091 else 5092 next_state = BNX2X_Q_STATE_MULTI_COS; 5093 } 5094 5095 break; 5096 case BNX2X_Q_STATE_MCOS_TERMINATED: 5097 if (cmd == BNX2X_Q_CMD_CFC_DEL) { 5098 next_tx_only = o->num_tx_only - 1; 5099 if (next_tx_only == 0) 5100 next_state = BNX2X_Q_STATE_ACTIVE; 5101 else 5102 next_state = BNX2X_Q_STATE_MULTI_COS; 5103 } 5104 5105 break; 5106 case BNX2X_Q_STATE_INACTIVE: 5107 if (cmd == BNX2X_Q_CMD_ACTIVATE) 5108 next_state = BNX2X_Q_STATE_ACTIVE; 5109 5110 else if ((cmd == BNX2X_Q_CMD_EMPTY) || 5111 (cmd == BNX2X_Q_CMD_UPDATE_TPA)) 5112 next_state = BNX2X_Q_STATE_INACTIVE; 5113 5114 else if (cmd == BNX2X_Q_CMD_HALT) 5115 next_state = BNX2X_Q_STATE_STOPPED; 5116 5117 else if (cmd == BNX2X_Q_CMD_UPDATE) { 5118 /* If "active" state change is requested, update the 5119 * state accordingly. 5120 */ 5121 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, 5122 &update_params->update_flags) && 5123 test_bit(BNX2X_Q_UPDATE_ACTIVATE, 5124 &update_params->update_flags)){ 5125 if (o->num_tx_only == 0) 5126 next_state = BNX2X_Q_STATE_ACTIVE; 5127 else /* tx only queues exist for this queue */ 5128 next_state = BNX2X_Q_STATE_MULTI_COS; 5129 } else 5130 next_state = BNX2X_Q_STATE_INACTIVE; 5131 } 5132 5133 break; 5134 case BNX2X_Q_STATE_STOPPED: 5135 if (cmd == BNX2X_Q_CMD_TERMINATE) 5136 next_state = BNX2X_Q_STATE_TERMINATED; 5137 5138 break; 5139 case BNX2X_Q_STATE_TERMINATED: 5140 if (cmd == BNX2X_Q_CMD_CFC_DEL) 5141 next_state = BNX2X_Q_STATE_RESET; 5142 5143 break; 5144 default: 5145 BNX2X_ERR("Illegal state: %d\n", state); 5146 } 5147 5148 /* Transition is assured */ 5149 if (next_state != BNX2X_Q_STATE_MAX) { 5150 DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n", 5151 state, cmd, next_state); 5152 o->next_state = next_state; 5153 o->next_tx_only = next_tx_only; 5154 return 0; 5155 } 5156 5157 DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd); 5158 5159 return -EINVAL; 5160 } 5161 5162 void bnx2x_init_queue_obj(struct bnx2x *bp, 5163 struct bnx2x_queue_sp_obj *obj, 5164 u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id, 5165 void *rdata, 5166 dma_addr_t rdata_mapping, unsigned long type) 5167 { 5168 memset(obj, 0, sizeof(*obj)); 5169 5170 /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */ 5171 BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt); 5172 5173 memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt); 5174 obj->max_cos = cid_cnt; 5175 obj->cl_id = cl_id; 5176 obj->func_id = func_id; 5177 obj->rdata = rdata; 5178 obj->rdata_mapping = rdata_mapping; 5179 obj->type = type; 5180 obj->next_state = BNX2X_Q_STATE_MAX; 5181 5182 if (CHIP_IS_E1x(bp)) 5183 obj->send_cmd = bnx2x_queue_send_cmd_e1x; 5184 else 5185 obj->send_cmd = bnx2x_queue_send_cmd_e2; 5186 5187 obj->check_transition = bnx2x_queue_chk_transition; 5188 5189 obj->complete_cmd = bnx2x_queue_comp_cmd; 5190 obj->wait_comp = bnx2x_queue_wait_comp; 5191 obj->set_pending = bnx2x_queue_set_pending; 5192 } 5193 5194 /********************** Function state object *********************************/ 5195 enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp, 5196 struct bnx2x_func_sp_obj *o) 5197 { 5198 /* in the middle of transaction - return INVALID state */ 5199 if (o->pending) 5200 return BNX2X_F_STATE_MAX; 5201 5202 /* 5203 * unsure the order of reading of o->pending and o->state 5204 * o->pending should be read first 5205 */ 5206 rmb(); 5207 5208 return o->state; 5209 } 5210 5211 static int bnx2x_func_wait_comp(struct bnx2x *bp, 5212 struct bnx2x_func_sp_obj *o, 5213 enum bnx2x_func_cmd cmd) 5214 { 5215 return bnx2x_state_wait(bp, cmd, &o->pending); 5216 } 5217 5218 /** 5219 * bnx2x_func_state_change_comp - complete the state machine transition 5220 * 5221 * @bp: device handle 5222 * @o: 5223 * @cmd: 5224 * 5225 * Called on state change transition. Completes the state 5226 * machine transition only - no HW interaction. 5227 */ 5228 static inline int bnx2x_func_state_change_comp(struct bnx2x *bp, 5229 struct bnx2x_func_sp_obj *o, 5230 enum bnx2x_func_cmd cmd) 5231 { 5232 unsigned long cur_pending = o->pending; 5233 5234 if (!test_and_clear_bit(cmd, &cur_pending)) { 5235 BNX2X_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n", 5236 cmd, BP_FUNC(bp), o->state, 5237 cur_pending, o->next_state); 5238 return -EINVAL; 5239 } 5240 5241 DP(BNX2X_MSG_SP, 5242 "Completing command %d for func %d, setting state to %d\n", 5243 cmd, BP_FUNC(bp), o->next_state); 5244 5245 o->state = o->next_state; 5246 o->next_state = BNX2X_F_STATE_MAX; 5247 5248 /* It's important that o->state and o->next_state are 5249 * updated before o->pending. 5250 */ 5251 wmb(); 5252 5253 clear_bit(cmd, &o->pending); 5254 smp_mb__after_clear_bit(); 5255 5256 return 0; 5257 } 5258 5259 /** 5260 * bnx2x_func_comp_cmd - complete the state change command 5261 * 5262 * @bp: device handle 5263 * @o: 5264 * @cmd: 5265 * 5266 * Checks that the arrived completion is expected. 5267 */ 5268 static int bnx2x_func_comp_cmd(struct bnx2x *bp, 5269 struct bnx2x_func_sp_obj *o, 5270 enum bnx2x_func_cmd cmd) 5271 { 5272 /* Complete the state machine part first, check if it's a 5273 * legal completion. 5274 */ 5275 int rc = bnx2x_func_state_change_comp(bp, o, cmd); 5276 return rc; 5277 } 5278 5279 /** 5280 * bnx2x_func_chk_transition - perform function state machine transition 5281 * 5282 * @bp: device handle 5283 * @o: 5284 * @params: 5285 * 5286 * It both checks if the requested command is legal in a current 5287 * state and, if it's legal, sets a `next_state' in the object 5288 * that will be used in the completion flow to set the `state' 5289 * of the object. 5290 * 5291 * returns 0 if a requested command is a legal transition, 5292 * -EINVAL otherwise. 5293 */ 5294 static int bnx2x_func_chk_transition(struct bnx2x *bp, 5295 struct bnx2x_func_sp_obj *o, 5296 struct bnx2x_func_state_params *params) 5297 { 5298 enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX; 5299 enum bnx2x_func_cmd cmd = params->cmd; 5300 5301 /* 5302 * Forget all pending for completion commands if a driver only state 5303 * transition has been requested. 5304 */ 5305 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { 5306 o->pending = 0; 5307 o->next_state = BNX2X_F_STATE_MAX; 5308 } 5309 5310 /* 5311 * Don't allow a next state transition if we are in the middle of 5312 * the previous one. 5313 */ 5314 if (o->pending) 5315 return -EBUSY; 5316 5317 switch (state) { 5318 case BNX2X_F_STATE_RESET: 5319 if (cmd == BNX2X_F_CMD_HW_INIT) 5320 next_state = BNX2X_F_STATE_INITIALIZED; 5321 5322 break; 5323 case BNX2X_F_STATE_INITIALIZED: 5324 if (cmd == BNX2X_F_CMD_START) 5325 next_state = BNX2X_F_STATE_STARTED; 5326 5327 else if (cmd == BNX2X_F_CMD_HW_RESET) 5328 next_state = BNX2X_F_STATE_RESET; 5329 5330 break; 5331 case BNX2X_F_STATE_STARTED: 5332 if (cmd == BNX2X_F_CMD_STOP) 5333 next_state = BNX2X_F_STATE_INITIALIZED; 5334 else if (cmd == BNX2X_F_CMD_TX_STOP) 5335 next_state = BNX2X_F_STATE_TX_STOPPED; 5336 5337 break; 5338 case BNX2X_F_STATE_TX_STOPPED: 5339 if (cmd == BNX2X_F_CMD_TX_START) 5340 next_state = BNX2X_F_STATE_STARTED; 5341 5342 break; 5343 default: 5344 BNX2X_ERR("Unknown state: %d\n", state); 5345 } 5346 5347 /* Transition is assured */ 5348 if (next_state != BNX2X_F_STATE_MAX) { 5349 DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n", 5350 state, cmd, next_state); 5351 o->next_state = next_state; 5352 return 0; 5353 } 5354 5355 DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n", 5356 state, cmd); 5357 5358 return -EINVAL; 5359 } 5360 5361 /** 5362 * bnx2x_func_init_func - performs HW init at function stage 5363 * 5364 * @bp: device handle 5365 * @drv: 5366 * 5367 * Init HW when the current phase is 5368 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only 5369 * HW blocks. 5370 */ 5371 static inline int bnx2x_func_init_func(struct bnx2x *bp, 5372 const struct bnx2x_func_sp_drv_ops *drv) 5373 { 5374 return drv->init_hw_func(bp); 5375 } 5376 5377 /** 5378 * bnx2x_func_init_port - performs HW init at port stage 5379 * 5380 * @bp: device handle 5381 * @drv: 5382 * 5383 * Init HW when the current phase is 5384 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and 5385 * FUNCTION-only HW blocks. 5386 * 5387 */ 5388 static inline int bnx2x_func_init_port(struct bnx2x *bp, 5389 const struct bnx2x_func_sp_drv_ops *drv) 5390 { 5391 int rc = drv->init_hw_port(bp); 5392 if (rc) 5393 return rc; 5394 5395 return bnx2x_func_init_func(bp, drv); 5396 } 5397 5398 /** 5399 * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage 5400 * 5401 * @bp: device handle 5402 * @drv: 5403 * 5404 * Init HW when the current phase is 5405 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP, 5406 * PORT-only and FUNCTION-only HW blocks. 5407 */ 5408 static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp, 5409 const struct bnx2x_func_sp_drv_ops *drv) 5410 { 5411 int rc = drv->init_hw_cmn_chip(bp); 5412 if (rc) 5413 return rc; 5414 5415 return bnx2x_func_init_port(bp, drv); 5416 } 5417 5418 /** 5419 * bnx2x_func_init_cmn - performs HW init at common stage 5420 * 5421 * @bp: device handle 5422 * @drv: 5423 * 5424 * Init HW when the current phase is 5425 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON, 5426 * PORT-only and FUNCTION-only HW blocks. 5427 */ 5428 static inline int bnx2x_func_init_cmn(struct bnx2x *bp, 5429 const struct bnx2x_func_sp_drv_ops *drv) 5430 { 5431 int rc = drv->init_hw_cmn(bp); 5432 if (rc) 5433 return rc; 5434 5435 return bnx2x_func_init_port(bp, drv); 5436 } 5437 5438 static int bnx2x_func_hw_init(struct bnx2x *bp, 5439 struct bnx2x_func_state_params *params) 5440 { 5441 u32 load_code = params->params.hw_init.load_phase; 5442 struct bnx2x_func_sp_obj *o = params->f_obj; 5443 const struct bnx2x_func_sp_drv_ops *drv = o->drv; 5444 int rc = 0; 5445 5446 DP(BNX2X_MSG_SP, "function %d load_code %x\n", 5447 BP_ABS_FUNC(bp), load_code); 5448 5449 /* Prepare buffers for unzipping the FW */ 5450 rc = drv->gunzip_init(bp); 5451 if (rc) 5452 return rc; 5453 5454 /* Prepare FW */ 5455 rc = drv->init_fw(bp); 5456 if (rc) { 5457 BNX2X_ERR("Error loading firmware\n"); 5458 goto init_err; 5459 } 5460 5461 /* Handle the beginning of COMMON_XXX pases separatelly... */ 5462 switch (load_code) { 5463 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: 5464 rc = bnx2x_func_init_cmn_chip(bp, drv); 5465 if (rc) 5466 goto init_err; 5467 5468 break; 5469 case FW_MSG_CODE_DRV_LOAD_COMMON: 5470 rc = bnx2x_func_init_cmn(bp, drv); 5471 if (rc) 5472 goto init_err; 5473 5474 break; 5475 case FW_MSG_CODE_DRV_LOAD_PORT: 5476 rc = bnx2x_func_init_port(bp, drv); 5477 if (rc) 5478 goto init_err; 5479 5480 break; 5481 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 5482 rc = bnx2x_func_init_func(bp, drv); 5483 if (rc) 5484 goto init_err; 5485 5486 break; 5487 default: 5488 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code); 5489 rc = -EINVAL; 5490 } 5491 5492 init_err: 5493 drv->gunzip_end(bp); 5494 5495 /* In case of success, complete the comand immediatelly: no ramrods 5496 * have been sent. 5497 */ 5498 if (!rc) 5499 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT); 5500 5501 return rc; 5502 } 5503 5504 /** 5505 * bnx2x_func_reset_func - reset HW at function stage 5506 * 5507 * @bp: device handle 5508 * @drv: 5509 * 5510 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only 5511 * FUNCTION-only HW blocks. 5512 */ 5513 static inline void bnx2x_func_reset_func(struct bnx2x *bp, 5514 const struct bnx2x_func_sp_drv_ops *drv) 5515 { 5516 drv->reset_hw_func(bp); 5517 } 5518 5519 /** 5520 * bnx2x_func_reset_port - reser HW at port stage 5521 * 5522 * @bp: device handle 5523 * @drv: 5524 * 5525 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset 5526 * FUNCTION-only and PORT-only HW blocks. 5527 * 5528 * !!!IMPORTANT!!! 5529 * 5530 * It's important to call reset_port before reset_func() as the last thing 5531 * reset_func does is pf_disable() thus disabling PGLUE_B, which 5532 * makes impossible any DMAE transactions. 5533 */ 5534 static inline void bnx2x_func_reset_port(struct bnx2x *bp, 5535 const struct bnx2x_func_sp_drv_ops *drv) 5536 { 5537 drv->reset_hw_port(bp); 5538 bnx2x_func_reset_func(bp, drv); 5539 } 5540 5541 /** 5542 * bnx2x_func_reset_cmn - reser HW at common stage 5543 * 5544 * @bp: device handle 5545 * @drv: 5546 * 5547 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and 5548 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON, 5549 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks. 5550 */ 5551 static inline void bnx2x_func_reset_cmn(struct bnx2x *bp, 5552 const struct bnx2x_func_sp_drv_ops *drv) 5553 { 5554 bnx2x_func_reset_port(bp, drv); 5555 drv->reset_hw_cmn(bp); 5556 } 5557 5558 5559 static inline int bnx2x_func_hw_reset(struct bnx2x *bp, 5560 struct bnx2x_func_state_params *params) 5561 { 5562 u32 reset_phase = params->params.hw_reset.reset_phase; 5563 struct bnx2x_func_sp_obj *o = params->f_obj; 5564 const struct bnx2x_func_sp_drv_ops *drv = o->drv; 5565 5566 DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp), 5567 reset_phase); 5568 5569 switch (reset_phase) { 5570 case FW_MSG_CODE_DRV_UNLOAD_COMMON: 5571 bnx2x_func_reset_cmn(bp, drv); 5572 break; 5573 case FW_MSG_CODE_DRV_UNLOAD_PORT: 5574 bnx2x_func_reset_port(bp, drv); 5575 break; 5576 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION: 5577 bnx2x_func_reset_func(bp, drv); 5578 break; 5579 default: 5580 BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n", 5581 reset_phase); 5582 break; 5583 } 5584 5585 /* Complete the comand immediatelly: no ramrods have been sent. */ 5586 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET); 5587 5588 return 0; 5589 } 5590 5591 static inline int bnx2x_func_send_start(struct bnx2x *bp, 5592 struct bnx2x_func_state_params *params) 5593 { 5594 struct bnx2x_func_sp_obj *o = params->f_obj; 5595 struct function_start_data *rdata = 5596 (struct function_start_data *)o->rdata; 5597 dma_addr_t data_mapping = o->rdata_mapping; 5598 struct bnx2x_func_start_params *start_params = ¶ms->params.start; 5599 5600 memset(rdata, 0, sizeof(*rdata)); 5601 5602 /* Fill the ramrod data with provided parameters */ 5603 rdata->function_mode = cpu_to_le16(start_params->mf_mode); 5604 rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag); 5605 rdata->path_id = BP_PATH(bp); 5606 rdata->network_cos_mode = start_params->network_cos_mode; 5607 5608 /* 5609 * No need for an explicit memory barrier here as long we would 5610 * need to ensure the ordering of writing to the SPQ element 5611 * and updating of the SPQ producer which involves a memory 5612 * read and we will have to put a full memory barrier there 5613 * (inside bnx2x_sp_post()). 5614 */ 5615 5616 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 5617 U64_HI(data_mapping), 5618 U64_LO(data_mapping), NONE_CONNECTION_TYPE); 5619 } 5620 5621 static inline int bnx2x_func_send_stop(struct bnx2x *bp, 5622 struct bnx2x_func_state_params *params) 5623 { 5624 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 5625 NONE_CONNECTION_TYPE); 5626 } 5627 5628 static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp, 5629 struct bnx2x_func_state_params *params) 5630 { 5631 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0, 5632 NONE_CONNECTION_TYPE); 5633 } 5634 static inline int bnx2x_func_send_tx_start(struct bnx2x *bp, 5635 struct bnx2x_func_state_params *params) 5636 { 5637 struct bnx2x_func_sp_obj *o = params->f_obj; 5638 struct flow_control_configuration *rdata = 5639 (struct flow_control_configuration *)o->rdata; 5640 dma_addr_t data_mapping = o->rdata_mapping; 5641 struct bnx2x_func_tx_start_params *tx_start_params = 5642 ¶ms->params.tx_start; 5643 int i; 5644 5645 memset(rdata, 0, sizeof(*rdata)); 5646 5647 rdata->dcb_enabled = tx_start_params->dcb_enabled; 5648 rdata->dcb_version = tx_start_params->dcb_version; 5649 rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en; 5650 5651 for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++) 5652 rdata->traffic_type_to_priority_cos[i] = 5653 tx_start_params->traffic_type_to_priority_cos[i]; 5654 5655 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0, 5656 U64_HI(data_mapping), 5657 U64_LO(data_mapping), NONE_CONNECTION_TYPE); 5658 } 5659 5660 static int bnx2x_func_send_cmd(struct bnx2x *bp, 5661 struct bnx2x_func_state_params *params) 5662 { 5663 switch (params->cmd) { 5664 case BNX2X_F_CMD_HW_INIT: 5665 return bnx2x_func_hw_init(bp, params); 5666 case BNX2X_F_CMD_START: 5667 return bnx2x_func_send_start(bp, params); 5668 case BNX2X_F_CMD_STOP: 5669 return bnx2x_func_send_stop(bp, params); 5670 case BNX2X_F_CMD_HW_RESET: 5671 return bnx2x_func_hw_reset(bp, params); 5672 case BNX2X_F_CMD_TX_STOP: 5673 return bnx2x_func_send_tx_stop(bp, params); 5674 case BNX2X_F_CMD_TX_START: 5675 return bnx2x_func_send_tx_start(bp, params); 5676 default: 5677 BNX2X_ERR("Unknown command: %d\n", params->cmd); 5678 return -EINVAL; 5679 } 5680 } 5681 5682 void bnx2x_init_func_obj(struct bnx2x *bp, 5683 struct bnx2x_func_sp_obj *obj, 5684 void *rdata, dma_addr_t rdata_mapping, 5685 struct bnx2x_func_sp_drv_ops *drv_iface) 5686 { 5687 memset(obj, 0, sizeof(*obj)); 5688 5689 mutex_init(&obj->one_pending_mutex); 5690 5691 obj->rdata = rdata; 5692 obj->rdata_mapping = rdata_mapping; 5693 5694 obj->send_cmd = bnx2x_func_send_cmd; 5695 obj->check_transition = bnx2x_func_chk_transition; 5696 obj->complete_cmd = bnx2x_func_comp_cmd; 5697 obj->wait_comp = bnx2x_func_wait_comp; 5698 5699 obj->drv = drv_iface; 5700 } 5701 5702 /** 5703 * bnx2x_func_state_change - perform Function state change transition 5704 * 5705 * @bp: device handle 5706 * @params: parameters to perform the transaction 5707 * 5708 * returns 0 in case of successfully completed transition, 5709 * negative error code in case of failure, positive 5710 * (EBUSY) value if there is a completion to that is 5711 * still pending (possible only if RAMROD_COMP_WAIT is 5712 * not set in params->ramrod_flags for asynchronous 5713 * commands). 5714 */ 5715 int bnx2x_func_state_change(struct bnx2x *bp, 5716 struct bnx2x_func_state_params *params) 5717 { 5718 struct bnx2x_func_sp_obj *o = params->f_obj; 5719 int rc; 5720 enum bnx2x_func_cmd cmd = params->cmd; 5721 unsigned long *pending = &o->pending; 5722 5723 mutex_lock(&o->one_pending_mutex); 5724 5725 /* Check that the requested transition is legal */ 5726 if (o->check_transition(bp, o, params)) { 5727 mutex_unlock(&o->one_pending_mutex); 5728 return -EINVAL; 5729 } 5730 5731 /* Set "pending" bit */ 5732 set_bit(cmd, pending); 5733 5734 /* Don't send a command if only driver cleanup was requested */ 5735 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { 5736 bnx2x_func_state_change_comp(bp, o, cmd); 5737 mutex_unlock(&o->one_pending_mutex); 5738 } else { 5739 /* Send a ramrod */ 5740 rc = o->send_cmd(bp, params); 5741 5742 mutex_unlock(&o->one_pending_mutex); 5743 5744 if (rc) { 5745 o->next_state = BNX2X_F_STATE_MAX; 5746 clear_bit(cmd, pending); 5747 smp_mb__after_clear_bit(); 5748 return rc; 5749 } 5750 5751 if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) { 5752 rc = o->wait_comp(bp, o, cmd); 5753 if (rc) 5754 return rc; 5755 5756 return 0; 5757 } 5758 } 5759 5760 return !!test_bit(cmd, pending); 5761 } 5762