1 /* bnx2x_sp.c: Broadcom Everest network driver. 2 * 3 * Copyright (c) 2011-2012 Broadcom Corporation 4 * 5 * Unless you and Broadcom execute a separate written software license 6 * agreement governing use of this software, this software is licensed to you 7 * under the terms of the GNU General Public License version 2, available 8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). 9 * 10 * Notwithstanding the above, under no circumstances may you combine this 11 * software in any way with any other Broadcom software provided under a 12 * license other than the GPL, without Broadcom's express prior written 13 * consent. 14 * 15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 16 * Written by: Vladislav Zolotarov 17 * 18 */ 19 20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 21 22 #include <linux/module.h> 23 #include <linux/crc32.h> 24 #include <linux/netdevice.h> 25 #include <linux/etherdevice.h> 26 #include <linux/crc32c.h> 27 #include "bnx2x.h" 28 #include "bnx2x_cmn.h" 29 #include "bnx2x_sp.h" 30 31 #define BNX2X_MAX_EMUL_MULTI 16 32 33 #define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN) 34 35 /**** Exe Queue interfaces ****/ 36 37 /** 38 * bnx2x_exe_queue_init - init the Exe Queue object 39 * 40 * @o: poiter to the object 41 * @exe_len: length 42 * @owner: poiter to the owner 43 * @validate: validate function pointer 44 * @optimize: optimize function pointer 45 * @exec: execute function pointer 46 * @get: get function pointer 47 */ 48 static inline void bnx2x_exe_queue_init(struct bnx2x *bp, 49 struct bnx2x_exe_queue_obj *o, 50 int exe_len, 51 union bnx2x_qable_obj *owner, 52 exe_q_validate validate, 53 exe_q_remove remove, 54 exe_q_optimize optimize, 55 exe_q_execute exec, 56 exe_q_get get) 57 { 58 memset(o, 0, sizeof(*o)); 59 60 INIT_LIST_HEAD(&o->exe_queue); 61 INIT_LIST_HEAD(&o->pending_comp); 62 63 spin_lock_init(&o->lock); 64 65 o->exe_chunk_len = exe_len; 66 o->owner = owner; 67 68 /* Owner specific callbacks */ 69 o->validate = validate; 70 o->remove = remove; 71 o->optimize = optimize; 72 o->execute = exec; 73 o->get = get; 74 75 DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n", 76 exe_len); 77 } 78 79 static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp, 80 struct bnx2x_exeq_elem *elem) 81 { 82 DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n"); 83 kfree(elem); 84 } 85 86 static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o) 87 { 88 struct bnx2x_exeq_elem *elem; 89 int cnt = 0; 90 91 spin_lock_bh(&o->lock); 92 93 list_for_each_entry(elem, &o->exe_queue, link) 94 cnt++; 95 96 spin_unlock_bh(&o->lock); 97 98 return cnt; 99 } 100 101 /** 102 * bnx2x_exe_queue_add - add a new element to the execution queue 103 * 104 * @bp: driver handle 105 * @o: queue 106 * @cmd: new command to add 107 * @restore: true - do not optimize the command 108 * 109 * If the element is optimized or is illegal, frees it. 110 */ 111 static inline int bnx2x_exe_queue_add(struct bnx2x *bp, 112 struct bnx2x_exe_queue_obj *o, 113 struct bnx2x_exeq_elem *elem, 114 bool restore) 115 { 116 int rc; 117 118 spin_lock_bh(&o->lock); 119 120 if (!restore) { 121 /* Try to cancel this element queue */ 122 rc = o->optimize(bp, o->owner, elem); 123 if (rc) 124 goto free_and_exit; 125 126 /* Check if this request is ok */ 127 rc = o->validate(bp, o->owner, elem); 128 if (rc) { 129 DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc); 130 goto free_and_exit; 131 } 132 } 133 134 /* If so, add it to the execution queue */ 135 list_add_tail(&elem->link, &o->exe_queue); 136 137 spin_unlock_bh(&o->lock); 138 139 return 0; 140 141 free_and_exit: 142 bnx2x_exe_queue_free_elem(bp, elem); 143 144 spin_unlock_bh(&o->lock); 145 146 return rc; 147 148 } 149 150 static inline void __bnx2x_exe_queue_reset_pending( 151 struct bnx2x *bp, 152 struct bnx2x_exe_queue_obj *o) 153 { 154 struct bnx2x_exeq_elem *elem; 155 156 while (!list_empty(&o->pending_comp)) { 157 elem = list_first_entry(&o->pending_comp, 158 struct bnx2x_exeq_elem, link); 159 160 list_del(&elem->link); 161 bnx2x_exe_queue_free_elem(bp, elem); 162 } 163 } 164 165 static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp, 166 struct bnx2x_exe_queue_obj *o) 167 { 168 169 spin_lock_bh(&o->lock); 170 171 __bnx2x_exe_queue_reset_pending(bp, o); 172 173 spin_unlock_bh(&o->lock); 174 175 } 176 177 /** 178 * bnx2x_exe_queue_step - execute one execution chunk atomically 179 * 180 * @bp: driver handle 181 * @o: queue 182 * @ramrod_flags: flags 183 * 184 * (Atomicy is ensured using the exe_queue->lock). 185 */ 186 static inline int bnx2x_exe_queue_step(struct bnx2x *bp, 187 struct bnx2x_exe_queue_obj *o, 188 unsigned long *ramrod_flags) 189 { 190 struct bnx2x_exeq_elem *elem, spacer; 191 int cur_len = 0, rc; 192 193 memset(&spacer, 0, sizeof(spacer)); 194 195 spin_lock_bh(&o->lock); 196 197 /* 198 * Next step should not be performed until the current is finished, 199 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to 200 * properly clear object internals without sending any command to the FW 201 * which also implies there won't be any completion to clear the 202 * 'pending' list. 203 */ 204 if (!list_empty(&o->pending_comp)) { 205 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { 206 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n"); 207 __bnx2x_exe_queue_reset_pending(bp, o); 208 } else { 209 spin_unlock_bh(&o->lock); 210 return 1; 211 } 212 } 213 214 /* 215 * Run through the pending commands list and create a next 216 * execution chunk. 217 */ 218 while (!list_empty(&o->exe_queue)) { 219 elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem, 220 link); 221 WARN_ON(!elem->cmd_len); 222 223 if (cur_len + elem->cmd_len <= o->exe_chunk_len) { 224 cur_len += elem->cmd_len; 225 /* 226 * Prevent from both lists being empty when moving an 227 * element. This will allow the call of 228 * bnx2x_exe_queue_empty() without locking. 229 */ 230 list_add_tail(&spacer.link, &o->pending_comp); 231 mb(); 232 list_move_tail(&elem->link, &o->pending_comp); 233 list_del(&spacer.link); 234 } else 235 break; 236 } 237 238 /* Sanity check */ 239 if (!cur_len) { 240 spin_unlock_bh(&o->lock); 241 return 0; 242 } 243 244 rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags); 245 if (rc < 0) 246 /* 247 * In case of an error return the commands back to the queue 248 * and reset the pending_comp. 249 */ 250 list_splice_init(&o->pending_comp, &o->exe_queue); 251 else if (!rc) 252 /* 253 * If zero is returned, means there are no outstanding pending 254 * completions and we may dismiss the pending list. 255 */ 256 __bnx2x_exe_queue_reset_pending(bp, o); 257 258 spin_unlock_bh(&o->lock); 259 return rc; 260 } 261 262 static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o) 263 { 264 bool empty = list_empty(&o->exe_queue); 265 266 /* Don't reorder!!! */ 267 mb(); 268 269 return empty && list_empty(&o->pending_comp); 270 } 271 272 static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem( 273 struct bnx2x *bp) 274 { 275 DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n"); 276 return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC); 277 } 278 279 /************************ raw_obj functions ***********************************/ 280 static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o) 281 { 282 return !!test_bit(o->state, o->pstate); 283 } 284 285 static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o) 286 { 287 smp_mb__before_clear_bit(); 288 clear_bit(o->state, o->pstate); 289 smp_mb__after_clear_bit(); 290 } 291 292 static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o) 293 { 294 smp_mb__before_clear_bit(); 295 set_bit(o->state, o->pstate); 296 smp_mb__after_clear_bit(); 297 } 298 299 /** 300 * bnx2x_state_wait - wait until the given bit(state) is cleared 301 * 302 * @bp: device handle 303 * @state: state which is to be cleared 304 * @state_p: state buffer 305 * 306 */ 307 static inline int bnx2x_state_wait(struct bnx2x *bp, int state, 308 unsigned long *pstate) 309 { 310 /* can take a while if any port is running */ 311 int cnt = 5000; 312 313 314 if (CHIP_REV_IS_EMUL(bp)) 315 cnt *= 20; 316 317 DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state); 318 319 might_sleep(); 320 while (cnt--) { 321 if (!test_bit(state, pstate)) { 322 #ifdef BNX2X_STOP_ON_ERROR 323 DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt); 324 #endif 325 return 0; 326 } 327 328 usleep_range(1000, 1000); 329 330 if (bp->panic) 331 return -EIO; 332 } 333 334 /* timeout! */ 335 BNX2X_ERR("timeout waiting for state %d\n", state); 336 #ifdef BNX2X_STOP_ON_ERROR 337 bnx2x_panic(); 338 #endif 339 340 return -EBUSY; 341 } 342 343 static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw) 344 { 345 return bnx2x_state_wait(bp, raw->state, raw->pstate); 346 } 347 348 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ 349 /* credit handling callbacks */ 350 static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset) 351 { 352 struct bnx2x_credit_pool_obj *mp = o->macs_pool; 353 354 WARN_ON(!mp); 355 356 return mp->get_entry(mp, offset); 357 } 358 359 static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o) 360 { 361 struct bnx2x_credit_pool_obj *mp = o->macs_pool; 362 363 WARN_ON(!mp); 364 365 return mp->get(mp, 1); 366 } 367 368 static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset) 369 { 370 struct bnx2x_credit_pool_obj *vp = o->vlans_pool; 371 372 WARN_ON(!vp); 373 374 return vp->get_entry(vp, offset); 375 } 376 377 static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o) 378 { 379 struct bnx2x_credit_pool_obj *vp = o->vlans_pool; 380 381 WARN_ON(!vp); 382 383 return vp->get(vp, 1); 384 } 385 386 static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o) 387 { 388 struct bnx2x_credit_pool_obj *mp = o->macs_pool; 389 struct bnx2x_credit_pool_obj *vp = o->vlans_pool; 390 391 if (!mp->get(mp, 1)) 392 return false; 393 394 if (!vp->get(vp, 1)) { 395 mp->put(mp, 1); 396 return false; 397 } 398 399 return true; 400 } 401 402 static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset) 403 { 404 struct bnx2x_credit_pool_obj *mp = o->macs_pool; 405 406 return mp->put_entry(mp, offset); 407 } 408 409 static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o) 410 { 411 struct bnx2x_credit_pool_obj *mp = o->macs_pool; 412 413 return mp->put(mp, 1); 414 } 415 416 static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset) 417 { 418 struct bnx2x_credit_pool_obj *vp = o->vlans_pool; 419 420 return vp->put_entry(vp, offset); 421 } 422 423 static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o) 424 { 425 struct bnx2x_credit_pool_obj *vp = o->vlans_pool; 426 427 return vp->put(vp, 1); 428 } 429 430 static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o) 431 { 432 struct bnx2x_credit_pool_obj *mp = o->macs_pool; 433 struct bnx2x_credit_pool_obj *vp = o->vlans_pool; 434 435 if (!mp->put(mp, 1)) 436 return false; 437 438 if (!vp->put(vp, 1)) { 439 mp->get(mp, 1); 440 return false; 441 } 442 443 return true; 444 } 445 446 static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, 447 int n, u8 *buf) 448 { 449 struct bnx2x_vlan_mac_registry_elem *pos; 450 u8 *next = buf; 451 int counter = 0; 452 453 /* traverse list */ 454 list_for_each_entry(pos, &o->head, link) { 455 if (counter < n) { 456 /* place leading zeroes in buffer */ 457 memset(next, 0, MAC_LEADING_ZERO_CNT); 458 459 /* place mac after leading zeroes*/ 460 memcpy(next + MAC_LEADING_ZERO_CNT, pos->u.mac.mac, 461 ETH_ALEN); 462 463 /* calculate address of next element and 464 * advance counter 465 */ 466 counter++; 467 next = buf + counter * ALIGN(ETH_ALEN, sizeof(u32)); 468 469 DP(BNX2X_MSG_SP, "copied element number %d to address %p element was %pM\n", 470 counter, next, pos->u.mac.mac); 471 } 472 } 473 return counter * ETH_ALEN; 474 } 475 476 /* check_add() callbacks */ 477 static int bnx2x_check_mac_add(struct bnx2x *bp, 478 struct bnx2x_vlan_mac_obj *o, 479 union bnx2x_classification_ramrod_data *data) 480 { 481 struct bnx2x_vlan_mac_registry_elem *pos; 482 483 DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n", data->mac.mac); 484 485 if (!is_valid_ether_addr(data->mac.mac)) 486 return -EINVAL; 487 488 /* Check if a requested MAC already exists */ 489 list_for_each_entry(pos, &o->head, link) 490 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) 491 return -EEXIST; 492 493 return 0; 494 } 495 496 static int bnx2x_check_vlan_add(struct bnx2x *bp, 497 struct bnx2x_vlan_mac_obj *o, 498 union bnx2x_classification_ramrod_data *data) 499 { 500 struct bnx2x_vlan_mac_registry_elem *pos; 501 502 DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan); 503 504 list_for_each_entry(pos, &o->head, link) 505 if (data->vlan.vlan == pos->u.vlan.vlan) 506 return -EEXIST; 507 508 return 0; 509 } 510 511 static int bnx2x_check_vlan_mac_add(struct bnx2x *bp, 512 struct bnx2x_vlan_mac_obj *o, 513 union bnx2x_classification_ramrod_data *data) 514 { 515 struct bnx2x_vlan_mac_registry_elem *pos; 516 517 DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n", 518 data->vlan_mac.mac, data->vlan_mac.vlan); 519 520 list_for_each_entry(pos, &o->head, link) 521 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && 522 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, 523 ETH_ALEN))) 524 return -EEXIST; 525 526 return 0; 527 } 528 529 530 /* check_del() callbacks */ 531 static struct bnx2x_vlan_mac_registry_elem * 532 bnx2x_check_mac_del(struct bnx2x *bp, 533 struct bnx2x_vlan_mac_obj *o, 534 union bnx2x_classification_ramrod_data *data) 535 { 536 struct bnx2x_vlan_mac_registry_elem *pos; 537 538 DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac); 539 540 list_for_each_entry(pos, &o->head, link) 541 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) 542 return pos; 543 544 return NULL; 545 } 546 547 static struct bnx2x_vlan_mac_registry_elem * 548 bnx2x_check_vlan_del(struct bnx2x *bp, 549 struct bnx2x_vlan_mac_obj *o, 550 union bnx2x_classification_ramrod_data *data) 551 { 552 struct bnx2x_vlan_mac_registry_elem *pos; 553 554 DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan); 555 556 list_for_each_entry(pos, &o->head, link) 557 if (data->vlan.vlan == pos->u.vlan.vlan) 558 return pos; 559 560 return NULL; 561 } 562 563 static struct bnx2x_vlan_mac_registry_elem * 564 bnx2x_check_vlan_mac_del(struct bnx2x *bp, 565 struct bnx2x_vlan_mac_obj *o, 566 union bnx2x_classification_ramrod_data *data) 567 { 568 struct bnx2x_vlan_mac_registry_elem *pos; 569 570 DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n", 571 data->vlan_mac.mac, data->vlan_mac.vlan); 572 573 list_for_each_entry(pos, &o->head, link) 574 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && 575 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, 576 ETH_ALEN))) 577 return pos; 578 579 return NULL; 580 } 581 582 /* check_move() callback */ 583 static bool bnx2x_check_move(struct bnx2x *bp, 584 struct bnx2x_vlan_mac_obj *src_o, 585 struct bnx2x_vlan_mac_obj *dst_o, 586 union bnx2x_classification_ramrod_data *data) 587 { 588 struct bnx2x_vlan_mac_registry_elem *pos; 589 int rc; 590 591 /* Check if we can delete the requested configuration from the first 592 * object. 593 */ 594 pos = src_o->check_del(bp, src_o, data); 595 596 /* check if configuration can be added */ 597 rc = dst_o->check_add(bp, dst_o, data); 598 599 /* If this classification can not be added (is already set) 600 * or can't be deleted - return an error. 601 */ 602 if (rc || !pos) 603 return false; 604 605 return true; 606 } 607 608 static bool bnx2x_check_move_always_err( 609 struct bnx2x *bp, 610 struct bnx2x_vlan_mac_obj *src_o, 611 struct bnx2x_vlan_mac_obj *dst_o, 612 union bnx2x_classification_ramrod_data *data) 613 { 614 return false; 615 } 616 617 618 static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o) 619 { 620 struct bnx2x_raw_obj *raw = &o->raw; 621 u8 rx_tx_flag = 0; 622 623 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) || 624 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) 625 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD; 626 627 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) || 628 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) 629 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD; 630 631 return rx_tx_flag; 632 } 633 634 635 void bnx2x_set_mac_in_nig(struct bnx2x *bp, 636 bool add, unsigned char *dev_addr, int index) 637 { 638 u32 wb_data[2]; 639 u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM : 640 NIG_REG_LLH0_FUNC_MEM; 641 642 if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp)) 643 return; 644 645 if (index > BNX2X_LLH_CAM_MAX_PF_LINE) 646 return; 647 648 DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n", 649 (add ? "ADD" : "DELETE"), index); 650 651 if (add) { 652 /* LLH_FUNC_MEM is a u64 WB register */ 653 reg_offset += 8*index; 654 655 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) | 656 (dev_addr[4] << 8) | dev_addr[5]); 657 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]); 658 659 REG_WR_DMAE(bp, reg_offset, wb_data, 2); 660 } 661 662 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE : 663 NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add); 664 } 665 666 /** 667 * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod 668 * 669 * @bp: device handle 670 * @o: queue for which we want to configure this rule 671 * @add: if true the command is an ADD command, DEL otherwise 672 * @opcode: CLASSIFY_RULE_OPCODE_XXX 673 * @hdr: pointer to a header to setup 674 * 675 */ 676 static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp, 677 struct bnx2x_vlan_mac_obj *o, bool add, int opcode, 678 struct eth_classify_cmd_header *hdr) 679 { 680 struct bnx2x_raw_obj *raw = &o->raw; 681 682 hdr->client_id = raw->cl_id; 683 hdr->func_id = raw->func_id; 684 685 /* Rx or/and Tx (internal switching) configuration ? */ 686 hdr->cmd_general_data |= 687 bnx2x_vlan_mac_get_rx_tx_flag(o); 688 689 if (add) 690 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD; 691 692 hdr->cmd_general_data |= 693 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT); 694 } 695 696 /** 697 * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header 698 * 699 * @cid: connection id 700 * @type: BNX2X_FILTER_XXX_PENDING 701 * @hdr: poiter to header to setup 702 * @rule_cnt: 703 * 704 * currently we always configure one rule and echo field to contain a CID and an 705 * opcode type. 706 */ 707 static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type, 708 struct eth_classify_header *hdr, int rule_cnt) 709 { 710 hdr->echo = (cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT); 711 hdr->rule_cnt = (u8)rule_cnt; 712 } 713 714 715 /* hw_config() callbacks */ 716 static void bnx2x_set_one_mac_e2(struct bnx2x *bp, 717 struct bnx2x_vlan_mac_obj *o, 718 struct bnx2x_exeq_elem *elem, int rule_idx, 719 int cam_offset) 720 { 721 struct bnx2x_raw_obj *raw = &o->raw; 722 struct eth_classify_rules_ramrod_data *data = 723 (struct eth_classify_rules_ramrod_data *)(raw->rdata); 724 int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd; 725 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; 726 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; 727 unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags; 728 u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac; 729 730 /* 731 * Set LLH CAM entry: currently only iSCSI and ETH macs are 732 * relevant. In addition, current implementation is tuned for a 733 * single ETH MAC. 734 * 735 * When multiple unicast ETH MACs PF configuration in switch 736 * independent mode is required (NetQ, multiple netdev MACs, 737 * etc.), consider better utilisation of 8 per function MAC 738 * entries in the LLH register. There is also 739 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the 740 * total number of CAM entries to 16. 741 * 742 * Currently we won't configure NIG for MACs other than a primary ETH 743 * MAC and iSCSI L2 MAC. 744 * 745 * If this MAC is moving from one Queue to another, no need to change 746 * NIG configuration. 747 */ 748 if (cmd != BNX2X_VLAN_MAC_MOVE) { 749 if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags)) 750 bnx2x_set_mac_in_nig(bp, add, mac, 751 BNX2X_LLH_CAM_ISCSI_ETH_LINE); 752 else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags)) 753 bnx2x_set_mac_in_nig(bp, add, mac, 754 BNX2X_LLH_CAM_ETH_LINE); 755 } 756 757 /* Reset the ramrod data buffer for the first rule */ 758 if (rule_idx == 0) 759 memset(data, 0, sizeof(*data)); 760 761 /* Setup a command header */ 762 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC, 763 &rule_entry->mac.header); 764 765 DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n", 766 (add ? "add" : "delete"), mac, raw->cl_id); 767 768 /* Set a MAC itself */ 769 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb, 770 &rule_entry->mac.mac_mid, 771 &rule_entry->mac.mac_lsb, mac); 772 773 /* MOVE: Add a rule that will add this MAC to the target Queue */ 774 if (cmd == BNX2X_VLAN_MAC_MOVE) { 775 rule_entry++; 776 rule_cnt++; 777 778 /* Setup ramrod data */ 779 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, 780 elem->cmd_data.vlan_mac.target_obj, 781 true, CLASSIFY_RULE_OPCODE_MAC, 782 &rule_entry->mac.header); 783 784 /* Set a MAC itself */ 785 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb, 786 &rule_entry->mac.mac_mid, 787 &rule_entry->mac.mac_lsb, mac); 788 } 789 790 /* Set the ramrod data header */ 791 /* TODO: take this to the higher level in order to prevent multiple 792 writing */ 793 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, 794 rule_cnt); 795 } 796 797 /** 798 * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod 799 * 800 * @bp: device handle 801 * @o: queue 802 * @type: 803 * @cam_offset: offset in cam memory 804 * @hdr: pointer to a header to setup 805 * 806 * E1/E1H 807 */ 808 static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp, 809 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, 810 struct mac_configuration_hdr *hdr) 811 { 812 struct bnx2x_raw_obj *r = &o->raw; 813 814 hdr->length = 1; 815 hdr->offset = (u8)cam_offset; 816 hdr->client_id = 0xff; 817 hdr->echo = ((r->cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT)); 818 } 819 820 static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp, 821 struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac, 822 u16 vlan_id, struct mac_configuration_entry *cfg_entry) 823 { 824 struct bnx2x_raw_obj *r = &o->raw; 825 u32 cl_bit_vec = (1 << r->cl_id); 826 827 cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec); 828 cfg_entry->pf_id = r->func_id; 829 cfg_entry->vlan_id = cpu_to_le16(vlan_id); 830 831 if (add) { 832 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE, 833 T_ETH_MAC_COMMAND_SET); 834 SET_FLAG(cfg_entry->flags, 835 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode); 836 837 /* Set a MAC in a ramrod data */ 838 bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr, 839 &cfg_entry->middle_mac_addr, 840 &cfg_entry->lsb_mac_addr, mac); 841 } else 842 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE, 843 T_ETH_MAC_COMMAND_INVALIDATE); 844 } 845 846 static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp, 847 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add, 848 u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config) 849 { 850 struct mac_configuration_entry *cfg_entry = &config->config_table[0]; 851 struct bnx2x_raw_obj *raw = &o->raw; 852 853 bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset, 854 &config->hdr); 855 bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id, 856 cfg_entry); 857 858 DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n", 859 (add ? "setting" : "clearing"), 860 mac, raw->cl_id, cam_offset); 861 } 862 863 /** 864 * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data 865 * 866 * @bp: device handle 867 * @o: bnx2x_vlan_mac_obj 868 * @elem: bnx2x_exeq_elem 869 * @rule_idx: rule_idx 870 * @cam_offset: cam_offset 871 */ 872 static void bnx2x_set_one_mac_e1x(struct bnx2x *bp, 873 struct bnx2x_vlan_mac_obj *o, 874 struct bnx2x_exeq_elem *elem, int rule_idx, 875 int cam_offset) 876 { 877 struct bnx2x_raw_obj *raw = &o->raw; 878 struct mac_configuration_cmd *config = 879 (struct mac_configuration_cmd *)(raw->rdata); 880 /* 881 * 57710 and 57711 do not support MOVE command, 882 * so it's either ADD or DEL 883 */ 884 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? 885 true : false; 886 887 /* Reset the ramrod data buffer */ 888 memset(config, 0, sizeof(*config)); 889 890 bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state, 891 cam_offset, add, 892 elem->cmd_data.vlan_mac.u.mac.mac, 0, 893 ETH_VLAN_FILTER_ANY_VLAN, config); 894 } 895 896 static void bnx2x_set_one_vlan_e2(struct bnx2x *bp, 897 struct bnx2x_vlan_mac_obj *o, 898 struct bnx2x_exeq_elem *elem, int rule_idx, 899 int cam_offset) 900 { 901 struct bnx2x_raw_obj *raw = &o->raw; 902 struct eth_classify_rules_ramrod_data *data = 903 (struct eth_classify_rules_ramrod_data *)(raw->rdata); 904 int rule_cnt = rule_idx + 1; 905 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; 906 int cmd = elem->cmd_data.vlan_mac.cmd; 907 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; 908 u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan; 909 910 /* Reset the ramrod data buffer for the first rule */ 911 if (rule_idx == 0) 912 memset(data, 0, sizeof(*data)); 913 914 /* Set a rule header */ 915 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN, 916 &rule_entry->vlan.header); 917 918 DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"), 919 vlan); 920 921 /* Set a VLAN itself */ 922 rule_entry->vlan.vlan = cpu_to_le16(vlan); 923 924 /* MOVE: Add a rule that will add this MAC to the target Queue */ 925 if (cmd == BNX2X_VLAN_MAC_MOVE) { 926 rule_entry++; 927 rule_cnt++; 928 929 /* Setup ramrod data */ 930 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, 931 elem->cmd_data.vlan_mac.target_obj, 932 true, CLASSIFY_RULE_OPCODE_VLAN, 933 &rule_entry->vlan.header); 934 935 /* Set a VLAN itself */ 936 rule_entry->vlan.vlan = cpu_to_le16(vlan); 937 } 938 939 /* Set the ramrod data header */ 940 /* TODO: take this to the higher level in order to prevent multiple 941 writing */ 942 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, 943 rule_cnt); 944 } 945 946 static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp, 947 struct bnx2x_vlan_mac_obj *o, 948 struct bnx2x_exeq_elem *elem, 949 int rule_idx, int cam_offset) 950 { 951 struct bnx2x_raw_obj *raw = &o->raw; 952 struct eth_classify_rules_ramrod_data *data = 953 (struct eth_classify_rules_ramrod_data *)(raw->rdata); 954 int rule_cnt = rule_idx + 1; 955 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; 956 int cmd = elem->cmd_data.vlan_mac.cmd; 957 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; 958 u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan; 959 u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac; 960 961 962 /* Reset the ramrod data buffer for the first rule */ 963 if (rule_idx == 0) 964 memset(data, 0, sizeof(*data)); 965 966 /* Set a rule header */ 967 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR, 968 &rule_entry->pair.header); 969 970 /* Set VLAN and MAC themselvs */ 971 rule_entry->pair.vlan = cpu_to_le16(vlan); 972 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, 973 &rule_entry->pair.mac_mid, 974 &rule_entry->pair.mac_lsb, mac); 975 976 /* MOVE: Add a rule that will add this MAC to the target Queue */ 977 if (cmd == BNX2X_VLAN_MAC_MOVE) { 978 rule_entry++; 979 rule_cnt++; 980 981 /* Setup ramrod data */ 982 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, 983 elem->cmd_data.vlan_mac.target_obj, 984 true, CLASSIFY_RULE_OPCODE_PAIR, 985 &rule_entry->pair.header); 986 987 /* Set a VLAN itself */ 988 rule_entry->pair.vlan = cpu_to_le16(vlan); 989 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, 990 &rule_entry->pair.mac_mid, 991 &rule_entry->pair.mac_lsb, mac); 992 } 993 994 /* Set the ramrod data header */ 995 /* TODO: take this to the higher level in order to prevent multiple 996 writing */ 997 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, 998 rule_cnt); 999 } 1000 1001 /** 1002 * bnx2x_set_one_vlan_mac_e1h - 1003 * 1004 * @bp: device handle 1005 * @o: bnx2x_vlan_mac_obj 1006 * @elem: bnx2x_exeq_elem 1007 * @rule_idx: rule_idx 1008 * @cam_offset: cam_offset 1009 */ 1010 static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp, 1011 struct bnx2x_vlan_mac_obj *o, 1012 struct bnx2x_exeq_elem *elem, 1013 int rule_idx, int cam_offset) 1014 { 1015 struct bnx2x_raw_obj *raw = &o->raw; 1016 struct mac_configuration_cmd *config = 1017 (struct mac_configuration_cmd *)(raw->rdata); 1018 /* 1019 * 57710 and 57711 do not support MOVE command, 1020 * so it's either ADD or DEL 1021 */ 1022 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? 1023 true : false; 1024 1025 /* Reset the ramrod data buffer */ 1026 memset(config, 0, sizeof(*config)); 1027 1028 bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING, 1029 cam_offset, add, 1030 elem->cmd_data.vlan_mac.u.vlan_mac.mac, 1031 elem->cmd_data.vlan_mac.u.vlan_mac.vlan, 1032 ETH_VLAN_FILTER_CLASSIFY, config); 1033 } 1034 1035 #define list_next_entry(pos, member) \ 1036 list_entry((pos)->member.next, typeof(*(pos)), member) 1037 1038 /** 1039 * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element 1040 * 1041 * @bp: device handle 1042 * @p: command parameters 1043 * @ppos: pointer to the cooky 1044 * 1045 * reconfigure next MAC/VLAN/VLAN-MAC element from the 1046 * previously configured elements list. 1047 * 1048 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken 1049 * into an account 1050 * 1051 * pointer to the cooky - that should be given back in the next call to make 1052 * function handle the next element. If *ppos is set to NULL it will restart the 1053 * iterator. If returned *ppos == NULL this means that the last element has been 1054 * handled. 1055 * 1056 */ 1057 static int bnx2x_vlan_mac_restore(struct bnx2x *bp, 1058 struct bnx2x_vlan_mac_ramrod_params *p, 1059 struct bnx2x_vlan_mac_registry_elem **ppos) 1060 { 1061 struct bnx2x_vlan_mac_registry_elem *pos; 1062 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; 1063 1064 /* If list is empty - there is nothing to do here */ 1065 if (list_empty(&o->head)) { 1066 *ppos = NULL; 1067 return 0; 1068 } 1069 1070 /* make a step... */ 1071 if (*ppos == NULL) 1072 *ppos = list_first_entry(&o->head, 1073 struct bnx2x_vlan_mac_registry_elem, 1074 link); 1075 else 1076 *ppos = list_next_entry(*ppos, link); 1077 1078 pos = *ppos; 1079 1080 /* If it's the last step - return NULL */ 1081 if (list_is_last(&pos->link, &o->head)) 1082 *ppos = NULL; 1083 1084 /* Prepare a 'user_req' */ 1085 memcpy(&p->user_req.u, &pos->u, sizeof(pos->u)); 1086 1087 /* Set the command */ 1088 p->user_req.cmd = BNX2X_VLAN_MAC_ADD; 1089 1090 /* Set vlan_mac_flags */ 1091 p->user_req.vlan_mac_flags = pos->vlan_mac_flags; 1092 1093 /* Set a restore bit */ 1094 __set_bit(RAMROD_RESTORE, &p->ramrod_flags); 1095 1096 return bnx2x_config_vlan_mac(bp, p); 1097 } 1098 1099 /* 1100 * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a 1101 * pointer to an element with a specific criteria and NULL if such an element 1102 * hasn't been found. 1103 */ 1104 static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac( 1105 struct bnx2x_exe_queue_obj *o, 1106 struct bnx2x_exeq_elem *elem) 1107 { 1108 struct bnx2x_exeq_elem *pos; 1109 struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac; 1110 1111 /* Check pending for execution commands */ 1112 list_for_each_entry(pos, &o->exe_queue, link) 1113 if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data, 1114 sizeof(*data)) && 1115 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) 1116 return pos; 1117 1118 return NULL; 1119 } 1120 1121 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan( 1122 struct bnx2x_exe_queue_obj *o, 1123 struct bnx2x_exeq_elem *elem) 1124 { 1125 struct bnx2x_exeq_elem *pos; 1126 struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan; 1127 1128 /* Check pending for execution commands */ 1129 list_for_each_entry(pos, &o->exe_queue, link) 1130 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data, 1131 sizeof(*data)) && 1132 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) 1133 return pos; 1134 1135 return NULL; 1136 } 1137 1138 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac( 1139 struct bnx2x_exe_queue_obj *o, 1140 struct bnx2x_exeq_elem *elem) 1141 { 1142 struct bnx2x_exeq_elem *pos; 1143 struct bnx2x_vlan_mac_ramrod_data *data = 1144 &elem->cmd_data.vlan_mac.u.vlan_mac; 1145 1146 /* Check pending for execution commands */ 1147 list_for_each_entry(pos, &o->exe_queue, link) 1148 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data, 1149 sizeof(*data)) && 1150 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) 1151 return pos; 1152 1153 return NULL; 1154 } 1155 1156 /** 1157 * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed 1158 * 1159 * @bp: device handle 1160 * @qo: bnx2x_qable_obj 1161 * @elem: bnx2x_exeq_elem 1162 * 1163 * Checks that the requested configuration can be added. If yes and if 1164 * requested, consume CAM credit. 1165 * 1166 * The 'validate' is run after the 'optimize'. 1167 * 1168 */ 1169 static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp, 1170 union bnx2x_qable_obj *qo, 1171 struct bnx2x_exeq_elem *elem) 1172 { 1173 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac; 1174 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; 1175 int rc; 1176 1177 /* Check the registry */ 1178 rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u); 1179 if (rc) { 1180 DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n"); 1181 return rc; 1182 } 1183 1184 /* 1185 * Check if there is a pending ADD command for this 1186 * MAC/VLAN/VLAN-MAC. Return an error if there is. 1187 */ 1188 if (exeq->get(exeq, elem)) { 1189 DP(BNX2X_MSG_SP, "There is a pending ADD command already\n"); 1190 return -EEXIST; 1191 } 1192 1193 /* 1194 * TODO: Check the pending MOVE from other objects where this 1195 * object is a destination object. 1196 */ 1197 1198 /* Consume the credit if not requested not to */ 1199 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 1200 &elem->cmd_data.vlan_mac.vlan_mac_flags) || 1201 o->get_credit(o))) 1202 return -EINVAL; 1203 1204 return 0; 1205 } 1206 1207 /** 1208 * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed 1209 * 1210 * @bp: device handle 1211 * @qo: quable object to check 1212 * @elem: element that needs to be deleted 1213 * 1214 * Checks that the requested configuration can be deleted. If yes and if 1215 * requested, returns a CAM credit. 1216 * 1217 * The 'validate' is run after the 'optimize'. 1218 */ 1219 static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp, 1220 union bnx2x_qable_obj *qo, 1221 struct bnx2x_exeq_elem *elem) 1222 { 1223 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac; 1224 struct bnx2x_vlan_mac_registry_elem *pos; 1225 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; 1226 struct bnx2x_exeq_elem query_elem; 1227 1228 /* If this classification can not be deleted (doesn't exist) 1229 * - return a BNX2X_EXIST. 1230 */ 1231 pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u); 1232 if (!pos) { 1233 DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n"); 1234 return -EEXIST; 1235 } 1236 1237 /* 1238 * Check if there are pending DEL or MOVE commands for this 1239 * MAC/VLAN/VLAN-MAC. Return an error if so. 1240 */ 1241 memcpy(&query_elem, elem, sizeof(query_elem)); 1242 1243 /* Check for MOVE commands */ 1244 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE; 1245 if (exeq->get(exeq, &query_elem)) { 1246 BNX2X_ERR("There is a pending MOVE command already\n"); 1247 return -EINVAL; 1248 } 1249 1250 /* Check for DEL commands */ 1251 if (exeq->get(exeq, elem)) { 1252 DP(BNX2X_MSG_SP, "There is a pending DEL command already\n"); 1253 return -EEXIST; 1254 } 1255 1256 /* Return the credit to the credit pool if not requested not to */ 1257 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 1258 &elem->cmd_data.vlan_mac.vlan_mac_flags) || 1259 o->put_credit(o))) { 1260 BNX2X_ERR("Failed to return a credit\n"); 1261 return -EINVAL; 1262 } 1263 1264 return 0; 1265 } 1266 1267 /** 1268 * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed 1269 * 1270 * @bp: device handle 1271 * @qo: quable object to check (source) 1272 * @elem: element that needs to be moved 1273 * 1274 * Checks that the requested configuration can be moved. If yes and if 1275 * requested, returns a CAM credit. 1276 * 1277 * The 'validate' is run after the 'optimize'. 1278 */ 1279 static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp, 1280 union bnx2x_qable_obj *qo, 1281 struct bnx2x_exeq_elem *elem) 1282 { 1283 struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac; 1284 struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj; 1285 struct bnx2x_exeq_elem query_elem; 1286 struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue; 1287 struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue; 1288 1289 /* 1290 * Check if we can perform this operation based on the current registry 1291 * state. 1292 */ 1293 if (!src_o->check_move(bp, src_o, dest_o, 1294 &elem->cmd_data.vlan_mac.u)) { 1295 DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n"); 1296 return -EINVAL; 1297 } 1298 1299 /* 1300 * Check if there is an already pending DEL or MOVE command for the 1301 * source object or ADD command for a destination object. Return an 1302 * error if so. 1303 */ 1304 memcpy(&query_elem, elem, sizeof(query_elem)); 1305 1306 /* Check DEL on source */ 1307 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL; 1308 if (src_exeq->get(src_exeq, &query_elem)) { 1309 BNX2X_ERR("There is a pending DEL command on the source queue already\n"); 1310 return -EINVAL; 1311 } 1312 1313 /* Check MOVE on source */ 1314 if (src_exeq->get(src_exeq, elem)) { 1315 DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n"); 1316 return -EEXIST; 1317 } 1318 1319 /* Check ADD on destination */ 1320 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD; 1321 if (dest_exeq->get(dest_exeq, &query_elem)) { 1322 BNX2X_ERR("There is a pending ADD command on the destination queue already\n"); 1323 return -EINVAL; 1324 } 1325 1326 /* Consume the credit if not requested not to */ 1327 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST, 1328 &elem->cmd_data.vlan_mac.vlan_mac_flags) || 1329 dest_o->get_credit(dest_o))) 1330 return -EINVAL; 1331 1332 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 1333 &elem->cmd_data.vlan_mac.vlan_mac_flags) || 1334 src_o->put_credit(src_o))) { 1335 /* return the credit taken from dest... */ 1336 dest_o->put_credit(dest_o); 1337 return -EINVAL; 1338 } 1339 1340 return 0; 1341 } 1342 1343 static int bnx2x_validate_vlan_mac(struct bnx2x *bp, 1344 union bnx2x_qable_obj *qo, 1345 struct bnx2x_exeq_elem *elem) 1346 { 1347 switch (elem->cmd_data.vlan_mac.cmd) { 1348 case BNX2X_VLAN_MAC_ADD: 1349 return bnx2x_validate_vlan_mac_add(bp, qo, elem); 1350 case BNX2X_VLAN_MAC_DEL: 1351 return bnx2x_validate_vlan_mac_del(bp, qo, elem); 1352 case BNX2X_VLAN_MAC_MOVE: 1353 return bnx2x_validate_vlan_mac_move(bp, qo, elem); 1354 default: 1355 return -EINVAL; 1356 } 1357 } 1358 1359 static int bnx2x_remove_vlan_mac(struct bnx2x *bp, 1360 union bnx2x_qable_obj *qo, 1361 struct bnx2x_exeq_elem *elem) 1362 { 1363 int rc = 0; 1364 1365 /* If consumption wasn't required, nothing to do */ 1366 if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 1367 &elem->cmd_data.vlan_mac.vlan_mac_flags)) 1368 return 0; 1369 1370 switch (elem->cmd_data.vlan_mac.cmd) { 1371 case BNX2X_VLAN_MAC_ADD: 1372 case BNX2X_VLAN_MAC_MOVE: 1373 rc = qo->vlan_mac.put_credit(&qo->vlan_mac); 1374 break; 1375 case BNX2X_VLAN_MAC_DEL: 1376 rc = qo->vlan_mac.get_credit(&qo->vlan_mac); 1377 break; 1378 default: 1379 return -EINVAL; 1380 } 1381 1382 if (rc != true) 1383 return -EINVAL; 1384 1385 return 0; 1386 } 1387 1388 /** 1389 * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes. 1390 * 1391 * @bp: device handle 1392 * @o: bnx2x_vlan_mac_obj 1393 * 1394 */ 1395 static int bnx2x_wait_vlan_mac(struct bnx2x *bp, 1396 struct bnx2x_vlan_mac_obj *o) 1397 { 1398 int cnt = 5000, rc; 1399 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; 1400 struct bnx2x_raw_obj *raw = &o->raw; 1401 1402 while (cnt--) { 1403 /* Wait for the current command to complete */ 1404 rc = raw->wait_comp(bp, raw); 1405 if (rc) 1406 return rc; 1407 1408 /* Wait until there are no pending commands */ 1409 if (!bnx2x_exe_queue_empty(exeq)) 1410 usleep_range(1000, 1000); 1411 else 1412 return 0; 1413 } 1414 1415 return -EBUSY; 1416 } 1417 1418 /** 1419 * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod 1420 * 1421 * @bp: device handle 1422 * @o: bnx2x_vlan_mac_obj 1423 * @cqe: 1424 * @cont: if true schedule next execution chunk 1425 * 1426 */ 1427 static int bnx2x_complete_vlan_mac(struct bnx2x *bp, 1428 struct bnx2x_vlan_mac_obj *o, 1429 union event_ring_elem *cqe, 1430 unsigned long *ramrod_flags) 1431 { 1432 struct bnx2x_raw_obj *r = &o->raw; 1433 int rc; 1434 1435 /* Reset pending list */ 1436 bnx2x_exe_queue_reset_pending(bp, &o->exe_queue); 1437 1438 /* Clear pending */ 1439 r->clear_pending(r); 1440 1441 /* If ramrod failed this is most likely a SW bug */ 1442 if (cqe->message.error) 1443 return -EINVAL; 1444 1445 /* Run the next bulk of pending commands if requeted */ 1446 if (test_bit(RAMROD_CONT, ramrod_flags)) { 1447 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); 1448 if (rc < 0) 1449 return rc; 1450 } 1451 1452 /* If there is more work to do return PENDING */ 1453 if (!bnx2x_exe_queue_empty(&o->exe_queue)) 1454 return 1; 1455 1456 return 0; 1457 } 1458 1459 /** 1460 * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands. 1461 * 1462 * @bp: device handle 1463 * @o: bnx2x_qable_obj 1464 * @elem: bnx2x_exeq_elem 1465 */ 1466 static int bnx2x_optimize_vlan_mac(struct bnx2x *bp, 1467 union bnx2x_qable_obj *qo, 1468 struct bnx2x_exeq_elem *elem) 1469 { 1470 struct bnx2x_exeq_elem query, *pos; 1471 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac; 1472 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; 1473 1474 memcpy(&query, elem, sizeof(query)); 1475 1476 switch (elem->cmd_data.vlan_mac.cmd) { 1477 case BNX2X_VLAN_MAC_ADD: 1478 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL; 1479 break; 1480 case BNX2X_VLAN_MAC_DEL: 1481 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD; 1482 break; 1483 default: 1484 /* Don't handle anything other than ADD or DEL */ 1485 return 0; 1486 } 1487 1488 /* If we found the appropriate element - delete it */ 1489 pos = exeq->get(exeq, &query); 1490 if (pos) { 1491 1492 /* Return the credit of the optimized command */ 1493 if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 1494 &pos->cmd_data.vlan_mac.vlan_mac_flags)) { 1495 if ((query.cmd_data.vlan_mac.cmd == 1496 BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) { 1497 BNX2X_ERR("Failed to return the credit for the optimized ADD command\n"); 1498 return -EINVAL; 1499 } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */ 1500 BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n"); 1501 return -EINVAL; 1502 } 1503 } 1504 1505 DP(BNX2X_MSG_SP, "Optimizing %s command\n", 1506 (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? 1507 "ADD" : "DEL"); 1508 1509 list_del(&pos->link); 1510 bnx2x_exe_queue_free_elem(bp, pos); 1511 return 1; 1512 } 1513 1514 return 0; 1515 } 1516 1517 /** 1518 * bnx2x_vlan_mac_get_registry_elem - prepare a registry element 1519 * 1520 * @bp: device handle 1521 * @o: 1522 * @elem: 1523 * @restore: 1524 * @re: 1525 * 1526 * prepare a registry element according to the current command request. 1527 */ 1528 static inline int bnx2x_vlan_mac_get_registry_elem( 1529 struct bnx2x *bp, 1530 struct bnx2x_vlan_mac_obj *o, 1531 struct bnx2x_exeq_elem *elem, 1532 bool restore, 1533 struct bnx2x_vlan_mac_registry_elem **re) 1534 { 1535 int cmd = elem->cmd_data.vlan_mac.cmd; 1536 struct bnx2x_vlan_mac_registry_elem *reg_elem; 1537 1538 /* Allocate a new registry element if needed. */ 1539 if (!restore && 1540 ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) { 1541 reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC); 1542 if (!reg_elem) 1543 return -ENOMEM; 1544 1545 /* Get a new CAM offset */ 1546 if (!o->get_cam_offset(o, ®_elem->cam_offset)) { 1547 /* 1548 * This shell never happen, because we have checked the 1549 * CAM availiability in the 'validate'. 1550 */ 1551 WARN_ON(1); 1552 kfree(reg_elem); 1553 return -EINVAL; 1554 } 1555 1556 DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset); 1557 1558 /* Set a VLAN-MAC data */ 1559 memcpy(®_elem->u, &elem->cmd_data.vlan_mac.u, 1560 sizeof(reg_elem->u)); 1561 1562 /* Copy the flags (needed for DEL and RESTORE flows) */ 1563 reg_elem->vlan_mac_flags = 1564 elem->cmd_data.vlan_mac.vlan_mac_flags; 1565 } else /* DEL, RESTORE */ 1566 reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u); 1567 1568 *re = reg_elem; 1569 return 0; 1570 } 1571 1572 /** 1573 * bnx2x_execute_vlan_mac - execute vlan mac command 1574 * 1575 * @bp: device handle 1576 * @qo: 1577 * @exe_chunk: 1578 * @ramrod_flags: 1579 * 1580 * go and send a ramrod! 1581 */ 1582 static int bnx2x_execute_vlan_mac(struct bnx2x *bp, 1583 union bnx2x_qable_obj *qo, 1584 struct list_head *exe_chunk, 1585 unsigned long *ramrod_flags) 1586 { 1587 struct bnx2x_exeq_elem *elem; 1588 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj; 1589 struct bnx2x_raw_obj *r = &o->raw; 1590 int rc, idx = 0; 1591 bool restore = test_bit(RAMROD_RESTORE, ramrod_flags); 1592 bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags); 1593 struct bnx2x_vlan_mac_registry_elem *reg_elem; 1594 int cmd; 1595 1596 /* 1597 * If DRIVER_ONLY execution is requested, cleanup a registry 1598 * and exit. Otherwise send a ramrod to FW. 1599 */ 1600 if (!drv_only) { 1601 WARN_ON(r->check_pending(r)); 1602 1603 /* Set pending */ 1604 r->set_pending(r); 1605 1606 /* Fill tha ramrod data */ 1607 list_for_each_entry(elem, exe_chunk, link) { 1608 cmd = elem->cmd_data.vlan_mac.cmd; 1609 /* 1610 * We will add to the target object in MOVE command, so 1611 * change the object for a CAM search. 1612 */ 1613 if (cmd == BNX2X_VLAN_MAC_MOVE) 1614 cam_obj = elem->cmd_data.vlan_mac.target_obj; 1615 else 1616 cam_obj = o; 1617 1618 rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj, 1619 elem, restore, 1620 ®_elem); 1621 if (rc) 1622 goto error_exit; 1623 1624 WARN_ON(!reg_elem); 1625 1626 /* Push a new entry into the registry */ 1627 if (!restore && 1628 ((cmd == BNX2X_VLAN_MAC_ADD) || 1629 (cmd == BNX2X_VLAN_MAC_MOVE))) 1630 list_add(®_elem->link, &cam_obj->head); 1631 1632 /* Configure a single command in a ramrod data buffer */ 1633 o->set_one_rule(bp, o, elem, idx, 1634 reg_elem->cam_offset); 1635 1636 /* MOVE command consumes 2 entries in the ramrod data */ 1637 if (cmd == BNX2X_VLAN_MAC_MOVE) 1638 idx += 2; 1639 else 1640 idx++; 1641 } 1642 1643 /* 1644 * No need for an explicit memory barrier here as long we would 1645 * need to ensure the ordering of writing to the SPQ element 1646 * and updating of the SPQ producer which involves a memory 1647 * read and we will have to put a full memory barrier there 1648 * (inside bnx2x_sp_post()). 1649 */ 1650 1651 rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid, 1652 U64_HI(r->rdata_mapping), 1653 U64_LO(r->rdata_mapping), 1654 ETH_CONNECTION_TYPE); 1655 if (rc) 1656 goto error_exit; 1657 } 1658 1659 /* Now, when we are done with the ramrod - clean up the registry */ 1660 list_for_each_entry(elem, exe_chunk, link) { 1661 cmd = elem->cmd_data.vlan_mac.cmd; 1662 if ((cmd == BNX2X_VLAN_MAC_DEL) || 1663 (cmd == BNX2X_VLAN_MAC_MOVE)) { 1664 reg_elem = o->check_del(bp, o, 1665 &elem->cmd_data.vlan_mac.u); 1666 1667 WARN_ON(!reg_elem); 1668 1669 o->put_cam_offset(o, reg_elem->cam_offset); 1670 list_del(®_elem->link); 1671 kfree(reg_elem); 1672 } 1673 } 1674 1675 if (!drv_only) 1676 return 1; 1677 else 1678 return 0; 1679 1680 error_exit: 1681 r->clear_pending(r); 1682 1683 /* Cleanup a registry in case of a failure */ 1684 list_for_each_entry(elem, exe_chunk, link) { 1685 cmd = elem->cmd_data.vlan_mac.cmd; 1686 1687 if (cmd == BNX2X_VLAN_MAC_MOVE) 1688 cam_obj = elem->cmd_data.vlan_mac.target_obj; 1689 else 1690 cam_obj = o; 1691 1692 /* Delete all newly added above entries */ 1693 if (!restore && 1694 ((cmd == BNX2X_VLAN_MAC_ADD) || 1695 (cmd == BNX2X_VLAN_MAC_MOVE))) { 1696 reg_elem = o->check_del(bp, cam_obj, 1697 &elem->cmd_data.vlan_mac.u); 1698 if (reg_elem) { 1699 list_del(®_elem->link); 1700 kfree(reg_elem); 1701 } 1702 } 1703 } 1704 1705 return rc; 1706 } 1707 1708 static inline int bnx2x_vlan_mac_push_new_cmd( 1709 struct bnx2x *bp, 1710 struct bnx2x_vlan_mac_ramrod_params *p) 1711 { 1712 struct bnx2x_exeq_elem *elem; 1713 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; 1714 bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags); 1715 1716 /* Allocate the execution queue element */ 1717 elem = bnx2x_exe_queue_alloc_elem(bp); 1718 if (!elem) 1719 return -ENOMEM; 1720 1721 /* Set the command 'length' */ 1722 switch (p->user_req.cmd) { 1723 case BNX2X_VLAN_MAC_MOVE: 1724 elem->cmd_len = 2; 1725 break; 1726 default: 1727 elem->cmd_len = 1; 1728 } 1729 1730 /* Fill the object specific info */ 1731 memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req)); 1732 1733 /* Try to add a new command to the pending list */ 1734 return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore); 1735 } 1736 1737 /** 1738 * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules. 1739 * 1740 * @bp: device handle 1741 * @p: 1742 * 1743 */ 1744 int bnx2x_config_vlan_mac( 1745 struct bnx2x *bp, 1746 struct bnx2x_vlan_mac_ramrod_params *p) 1747 { 1748 int rc = 0; 1749 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; 1750 unsigned long *ramrod_flags = &p->ramrod_flags; 1751 bool cont = test_bit(RAMROD_CONT, ramrod_flags); 1752 struct bnx2x_raw_obj *raw = &o->raw; 1753 1754 /* 1755 * Add new elements to the execution list for commands that require it. 1756 */ 1757 if (!cont) { 1758 rc = bnx2x_vlan_mac_push_new_cmd(bp, p); 1759 if (rc) 1760 return rc; 1761 } 1762 1763 /* 1764 * If nothing will be executed further in this iteration we want to 1765 * return PENDING if there are pending commands 1766 */ 1767 if (!bnx2x_exe_queue_empty(&o->exe_queue)) 1768 rc = 1; 1769 1770 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { 1771 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n"); 1772 raw->clear_pending(raw); 1773 } 1774 1775 /* Execute commands if required */ 1776 if (cont || test_bit(RAMROD_EXEC, ramrod_flags) || 1777 test_bit(RAMROD_COMP_WAIT, ramrod_flags)) { 1778 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); 1779 if (rc < 0) 1780 return rc; 1781 } 1782 1783 /* 1784 * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set 1785 * then user want to wait until the last command is done. 1786 */ 1787 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) { 1788 /* 1789 * Wait maximum for the current exe_queue length iterations plus 1790 * one (for the current pending command). 1791 */ 1792 int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1; 1793 1794 while (!bnx2x_exe_queue_empty(&o->exe_queue) && 1795 max_iterations--) { 1796 1797 /* Wait for the current command to complete */ 1798 rc = raw->wait_comp(bp, raw); 1799 if (rc) 1800 return rc; 1801 1802 /* Make a next step */ 1803 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, 1804 ramrod_flags); 1805 if (rc < 0) 1806 return rc; 1807 } 1808 1809 return 0; 1810 } 1811 1812 return rc; 1813 } 1814 1815 1816 1817 /** 1818 * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec 1819 * 1820 * @bp: device handle 1821 * @o: 1822 * @vlan_mac_flags: 1823 * @ramrod_flags: execution flags to be used for this deletion 1824 * 1825 * if the last operation has completed successfully and there are no 1826 * moreelements left, positive value if the last operation has completed 1827 * successfully and there are more previously configured elements, negative 1828 * value is current operation has failed. 1829 */ 1830 static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, 1831 struct bnx2x_vlan_mac_obj *o, 1832 unsigned long *vlan_mac_flags, 1833 unsigned long *ramrod_flags) 1834 { 1835 struct bnx2x_vlan_mac_registry_elem *pos = NULL; 1836 int rc = 0; 1837 struct bnx2x_vlan_mac_ramrod_params p; 1838 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; 1839 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n; 1840 1841 /* Clear pending commands first */ 1842 1843 spin_lock_bh(&exeq->lock); 1844 1845 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) { 1846 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags == 1847 *vlan_mac_flags) { 1848 rc = exeq->remove(bp, exeq->owner, exeq_pos); 1849 if (rc) { 1850 BNX2X_ERR("Failed to remove command\n"); 1851 spin_unlock_bh(&exeq->lock); 1852 return rc; 1853 } 1854 list_del(&exeq_pos->link); 1855 } 1856 } 1857 1858 spin_unlock_bh(&exeq->lock); 1859 1860 /* Prepare a command request */ 1861 memset(&p, 0, sizeof(p)); 1862 p.vlan_mac_obj = o; 1863 p.ramrod_flags = *ramrod_flags; 1864 p.user_req.cmd = BNX2X_VLAN_MAC_DEL; 1865 1866 /* 1867 * Add all but the last VLAN-MAC to the execution queue without actually 1868 * execution anything. 1869 */ 1870 __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags); 1871 __clear_bit(RAMROD_EXEC, &p.ramrod_flags); 1872 __clear_bit(RAMROD_CONT, &p.ramrod_flags); 1873 1874 list_for_each_entry(pos, &o->head, link) { 1875 if (pos->vlan_mac_flags == *vlan_mac_flags) { 1876 p.user_req.vlan_mac_flags = pos->vlan_mac_flags; 1877 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u)); 1878 rc = bnx2x_config_vlan_mac(bp, &p); 1879 if (rc < 0) { 1880 BNX2X_ERR("Failed to add a new DEL command\n"); 1881 return rc; 1882 } 1883 } 1884 } 1885 1886 p.ramrod_flags = *ramrod_flags; 1887 __set_bit(RAMROD_CONT, &p.ramrod_flags); 1888 1889 return bnx2x_config_vlan_mac(bp, &p); 1890 } 1891 1892 static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id, 1893 u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state, 1894 unsigned long *pstate, bnx2x_obj_type type) 1895 { 1896 raw->func_id = func_id; 1897 raw->cid = cid; 1898 raw->cl_id = cl_id; 1899 raw->rdata = rdata; 1900 raw->rdata_mapping = rdata_mapping; 1901 raw->state = state; 1902 raw->pstate = pstate; 1903 raw->obj_type = type; 1904 raw->check_pending = bnx2x_raw_check_pending; 1905 raw->clear_pending = bnx2x_raw_clear_pending; 1906 raw->set_pending = bnx2x_raw_set_pending; 1907 raw->wait_comp = bnx2x_raw_wait; 1908 } 1909 1910 static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o, 1911 u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, 1912 int state, unsigned long *pstate, bnx2x_obj_type type, 1913 struct bnx2x_credit_pool_obj *macs_pool, 1914 struct bnx2x_credit_pool_obj *vlans_pool) 1915 { 1916 INIT_LIST_HEAD(&o->head); 1917 1918 o->macs_pool = macs_pool; 1919 o->vlans_pool = vlans_pool; 1920 1921 o->delete_all = bnx2x_vlan_mac_del_all; 1922 o->restore = bnx2x_vlan_mac_restore; 1923 o->complete = bnx2x_complete_vlan_mac; 1924 o->wait = bnx2x_wait_vlan_mac; 1925 1926 bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping, 1927 state, pstate, type); 1928 } 1929 1930 1931 void bnx2x_init_mac_obj(struct bnx2x *bp, 1932 struct bnx2x_vlan_mac_obj *mac_obj, 1933 u8 cl_id, u32 cid, u8 func_id, void *rdata, 1934 dma_addr_t rdata_mapping, int state, 1935 unsigned long *pstate, bnx2x_obj_type type, 1936 struct bnx2x_credit_pool_obj *macs_pool) 1937 { 1938 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj; 1939 1940 bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata, 1941 rdata_mapping, state, pstate, type, 1942 macs_pool, NULL); 1943 1944 /* CAM credit pool handling */ 1945 mac_obj->get_credit = bnx2x_get_credit_mac; 1946 mac_obj->put_credit = bnx2x_put_credit_mac; 1947 mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac; 1948 mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac; 1949 1950 if (CHIP_IS_E1x(bp)) { 1951 mac_obj->set_one_rule = bnx2x_set_one_mac_e1x; 1952 mac_obj->check_del = bnx2x_check_mac_del; 1953 mac_obj->check_add = bnx2x_check_mac_add; 1954 mac_obj->check_move = bnx2x_check_move_always_err; 1955 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC; 1956 1957 /* Exe Queue */ 1958 bnx2x_exe_queue_init(bp, 1959 &mac_obj->exe_queue, 1, qable_obj, 1960 bnx2x_validate_vlan_mac, 1961 bnx2x_remove_vlan_mac, 1962 bnx2x_optimize_vlan_mac, 1963 bnx2x_execute_vlan_mac, 1964 bnx2x_exeq_get_mac); 1965 } else { 1966 mac_obj->set_one_rule = bnx2x_set_one_mac_e2; 1967 mac_obj->check_del = bnx2x_check_mac_del; 1968 mac_obj->check_add = bnx2x_check_mac_add; 1969 mac_obj->check_move = bnx2x_check_move; 1970 mac_obj->ramrod_cmd = 1971 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; 1972 mac_obj->get_n_elements = bnx2x_get_n_elements; 1973 1974 /* Exe Queue */ 1975 bnx2x_exe_queue_init(bp, 1976 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT, 1977 qable_obj, bnx2x_validate_vlan_mac, 1978 bnx2x_remove_vlan_mac, 1979 bnx2x_optimize_vlan_mac, 1980 bnx2x_execute_vlan_mac, 1981 bnx2x_exeq_get_mac); 1982 } 1983 } 1984 1985 void bnx2x_init_vlan_obj(struct bnx2x *bp, 1986 struct bnx2x_vlan_mac_obj *vlan_obj, 1987 u8 cl_id, u32 cid, u8 func_id, void *rdata, 1988 dma_addr_t rdata_mapping, int state, 1989 unsigned long *pstate, bnx2x_obj_type type, 1990 struct bnx2x_credit_pool_obj *vlans_pool) 1991 { 1992 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj; 1993 1994 bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata, 1995 rdata_mapping, state, pstate, type, NULL, 1996 vlans_pool); 1997 1998 vlan_obj->get_credit = bnx2x_get_credit_vlan; 1999 vlan_obj->put_credit = bnx2x_put_credit_vlan; 2000 vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan; 2001 vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan; 2002 2003 if (CHIP_IS_E1x(bp)) { 2004 BNX2X_ERR("Do not support chips others than E2 and newer\n"); 2005 BUG(); 2006 } else { 2007 vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2; 2008 vlan_obj->check_del = bnx2x_check_vlan_del; 2009 vlan_obj->check_add = bnx2x_check_vlan_add; 2010 vlan_obj->check_move = bnx2x_check_move; 2011 vlan_obj->ramrod_cmd = 2012 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; 2013 2014 /* Exe Queue */ 2015 bnx2x_exe_queue_init(bp, 2016 &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT, 2017 qable_obj, bnx2x_validate_vlan_mac, 2018 bnx2x_remove_vlan_mac, 2019 bnx2x_optimize_vlan_mac, 2020 bnx2x_execute_vlan_mac, 2021 bnx2x_exeq_get_vlan); 2022 } 2023 } 2024 2025 void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, 2026 struct bnx2x_vlan_mac_obj *vlan_mac_obj, 2027 u8 cl_id, u32 cid, u8 func_id, void *rdata, 2028 dma_addr_t rdata_mapping, int state, 2029 unsigned long *pstate, bnx2x_obj_type type, 2030 struct bnx2x_credit_pool_obj *macs_pool, 2031 struct bnx2x_credit_pool_obj *vlans_pool) 2032 { 2033 union bnx2x_qable_obj *qable_obj = 2034 (union bnx2x_qable_obj *)vlan_mac_obj; 2035 2036 bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata, 2037 rdata_mapping, state, pstate, type, 2038 macs_pool, vlans_pool); 2039 2040 /* CAM pool handling */ 2041 vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac; 2042 vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac; 2043 /* 2044 * CAM offset is relevant for 57710 and 57711 chips only which have a 2045 * single CAM for both MACs and VLAN-MAC pairs. So the offset 2046 * will be taken from MACs' pool object only. 2047 */ 2048 vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac; 2049 vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac; 2050 2051 if (CHIP_IS_E1(bp)) { 2052 BNX2X_ERR("Do not support chips others than E2\n"); 2053 BUG(); 2054 } else if (CHIP_IS_E1H(bp)) { 2055 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h; 2056 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del; 2057 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add; 2058 vlan_mac_obj->check_move = bnx2x_check_move_always_err; 2059 vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC; 2060 2061 /* Exe Queue */ 2062 bnx2x_exe_queue_init(bp, 2063 &vlan_mac_obj->exe_queue, 1, qable_obj, 2064 bnx2x_validate_vlan_mac, 2065 bnx2x_remove_vlan_mac, 2066 bnx2x_optimize_vlan_mac, 2067 bnx2x_execute_vlan_mac, 2068 bnx2x_exeq_get_vlan_mac); 2069 } else { 2070 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2; 2071 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del; 2072 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add; 2073 vlan_mac_obj->check_move = bnx2x_check_move; 2074 vlan_mac_obj->ramrod_cmd = 2075 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; 2076 2077 /* Exe Queue */ 2078 bnx2x_exe_queue_init(bp, 2079 &vlan_mac_obj->exe_queue, 2080 CLASSIFY_RULES_COUNT, 2081 qable_obj, bnx2x_validate_vlan_mac, 2082 bnx2x_remove_vlan_mac, 2083 bnx2x_optimize_vlan_mac, 2084 bnx2x_execute_vlan_mac, 2085 bnx2x_exeq_get_vlan_mac); 2086 } 2087 2088 } 2089 2090 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ 2091 static inline void __storm_memset_mac_filters(struct bnx2x *bp, 2092 struct tstorm_eth_mac_filter_config *mac_filters, 2093 u16 pf_id) 2094 { 2095 size_t size = sizeof(struct tstorm_eth_mac_filter_config); 2096 2097 u32 addr = BAR_TSTRORM_INTMEM + 2098 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id); 2099 2100 __storm_memset_struct(bp, addr, size, (u32 *)mac_filters); 2101 } 2102 2103 static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp, 2104 struct bnx2x_rx_mode_ramrod_params *p) 2105 { 2106 /* update the bp MAC filter structure */ 2107 u32 mask = (1 << p->cl_id); 2108 2109 struct tstorm_eth_mac_filter_config *mac_filters = 2110 (struct tstorm_eth_mac_filter_config *)p->rdata; 2111 2112 /* initial seeting is drop-all */ 2113 u8 drop_all_ucast = 1, drop_all_mcast = 1; 2114 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0; 2115 u8 unmatched_unicast = 0; 2116 2117 /* In e1x there we only take into account rx acceot flag since tx switching 2118 * isn't enabled. */ 2119 if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags)) 2120 /* accept matched ucast */ 2121 drop_all_ucast = 0; 2122 2123 if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags)) 2124 /* accept matched mcast */ 2125 drop_all_mcast = 0; 2126 2127 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) { 2128 /* accept all mcast */ 2129 drop_all_ucast = 0; 2130 accp_all_ucast = 1; 2131 } 2132 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) { 2133 /* accept all mcast */ 2134 drop_all_mcast = 0; 2135 accp_all_mcast = 1; 2136 } 2137 if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags)) 2138 /* accept (all) bcast */ 2139 accp_all_bcast = 1; 2140 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags)) 2141 /* accept unmatched unicasts */ 2142 unmatched_unicast = 1; 2143 2144 mac_filters->ucast_drop_all = drop_all_ucast ? 2145 mac_filters->ucast_drop_all | mask : 2146 mac_filters->ucast_drop_all & ~mask; 2147 2148 mac_filters->mcast_drop_all = drop_all_mcast ? 2149 mac_filters->mcast_drop_all | mask : 2150 mac_filters->mcast_drop_all & ~mask; 2151 2152 mac_filters->ucast_accept_all = accp_all_ucast ? 2153 mac_filters->ucast_accept_all | mask : 2154 mac_filters->ucast_accept_all & ~mask; 2155 2156 mac_filters->mcast_accept_all = accp_all_mcast ? 2157 mac_filters->mcast_accept_all | mask : 2158 mac_filters->mcast_accept_all & ~mask; 2159 2160 mac_filters->bcast_accept_all = accp_all_bcast ? 2161 mac_filters->bcast_accept_all | mask : 2162 mac_filters->bcast_accept_all & ~mask; 2163 2164 mac_filters->unmatched_unicast = unmatched_unicast ? 2165 mac_filters->unmatched_unicast | mask : 2166 mac_filters->unmatched_unicast & ~mask; 2167 2168 DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n" 2169 "accp_mcast 0x%x\naccp_bcast 0x%x\n", 2170 mac_filters->ucast_drop_all, mac_filters->mcast_drop_all, 2171 mac_filters->ucast_accept_all, mac_filters->mcast_accept_all, 2172 mac_filters->bcast_accept_all); 2173 2174 /* write the MAC filter structure*/ 2175 __storm_memset_mac_filters(bp, mac_filters, p->func_id); 2176 2177 /* The operation is completed */ 2178 clear_bit(p->state, p->pstate); 2179 smp_mb__after_clear_bit(); 2180 2181 return 0; 2182 } 2183 2184 /* Setup ramrod data */ 2185 static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid, 2186 struct eth_classify_header *hdr, 2187 u8 rule_cnt) 2188 { 2189 hdr->echo = cid; 2190 hdr->rule_cnt = rule_cnt; 2191 } 2192 2193 static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp, 2194 unsigned long accept_flags, 2195 struct eth_filter_rules_cmd *cmd, 2196 bool clear_accept_all) 2197 { 2198 u16 state; 2199 2200 /* start with 'drop-all' */ 2201 state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL | 2202 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; 2203 2204 if (accept_flags) { 2205 if (test_bit(BNX2X_ACCEPT_UNICAST, &accept_flags)) 2206 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; 2207 2208 if (test_bit(BNX2X_ACCEPT_MULTICAST, &accept_flags)) 2209 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; 2210 2211 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept_flags)) { 2212 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; 2213 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; 2214 } 2215 2216 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags)) { 2217 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; 2218 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; 2219 } 2220 if (test_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags)) 2221 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; 2222 2223 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &accept_flags)) { 2224 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; 2225 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; 2226 } 2227 if (test_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags)) 2228 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN; 2229 } 2230 2231 /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */ 2232 if (clear_accept_all) { 2233 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; 2234 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; 2235 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; 2236 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; 2237 } 2238 2239 cmd->state = cpu_to_le16(state); 2240 2241 } 2242 2243 static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, 2244 struct bnx2x_rx_mode_ramrod_params *p) 2245 { 2246 struct eth_filter_rules_ramrod_data *data = p->rdata; 2247 int rc; 2248 u8 rule_idx = 0; 2249 2250 /* Reset the ramrod data buffer */ 2251 memset(data, 0, sizeof(*data)); 2252 2253 /* Setup ramrod data */ 2254 2255 /* Tx (internal switching) */ 2256 if (test_bit(RAMROD_TX, &p->ramrod_flags)) { 2257 data->rules[rule_idx].client_id = p->cl_id; 2258 data->rules[rule_idx].func_id = p->func_id; 2259 2260 data->rules[rule_idx].cmd_general_data = 2261 ETH_FILTER_RULES_CMD_TX_CMD; 2262 2263 bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags, 2264 &(data->rules[rule_idx++]), false); 2265 } 2266 2267 /* Rx */ 2268 if (test_bit(RAMROD_RX, &p->ramrod_flags)) { 2269 data->rules[rule_idx].client_id = p->cl_id; 2270 data->rules[rule_idx].func_id = p->func_id; 2271 2272 data->rules[rule_idx].cmd_general_data = 2273 ETH_FILTER_RULES_CMD_RX_CMD; 2274 2275 bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags, 2276 &(data->rules[rule_idx++]), false); 2277 } 2278 2279 2280 /* 2281 * If FCoE Queue configuration has been requested configure the Rx and 2282 * internal switching modes for this queue in separate rules. 2283 * 2284 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort: 2285 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED. 2286 */ 2287 if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) { 2288 /* Tx (internal switching) */ 2289 if (test_bit(RAMROD_TX, &p->ramrod_flags)) { 2290 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id); 2291 data->rules[rule_idx].func_id = p->func_id; 2292 2293 data->rules[rule_idx].cmd_general_data = 2294 ETH_FILTER_RULES_CMD_TX_CMD; 2295 2296 bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags, 2297 &(data->rules[rule_idx++]), 2298 true); 2299 } 2300 2301 /* Rx */ 2302 if (test_bit(RAMROD_RX, &p->ramrod_flags)) { 2303 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id); 2304 data->rules[rule_idx].func_id = p->func_id; 2305 2306 data->rules[rule_idx].cmd_general_data = 2307 ETH_FILTER_RULES_CMD_RX_CMD; 2308 2309 bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags, 2310 &(data->rules[rule_idx++]), 2311 true); 2312 } 2313 } 2314 2315 /* 2316 * Set the ramrod header (most importantly - number of rules to 2317 * configure). 2318 */ 2319 bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx); 2320 2321 DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n", 2322 data->header.rule_cnt, p->rx_accept_flags, 2323 p->tx_accept_flags); 2324 2325 /* 2326 * No need for an explicit memory barrier here as long we would 2327 * need to ensure the ordering of writing to the SPQ element 2328 * and updating of the SPQ producer which involves a memory 2329 * read and we will have to put a full memory barrier there 2330 * (inside bnx2x_sp_post()). 2331 */ 2332 2333 /* Send a ramrod */ 2334 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid, 2335 U64_HI(p->rdata_mapping), 2336 U64_LO(p->rdata_mapping), 2337 ETH_CONNECTION_TYPE); 2338 if (rc) 2339 return rc; 2340 2341 /* Ramrod completion is pending */ 2342 return 1; 2343 } 2344 2345 static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp, 2346 struct bnx2x_rx_mode_ramrod_params *p) 2347 { 2348 return bnx2x_state_wait(bp, p->state, p->pstate); 2349 } 2350 2351 static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp, 2352 struct bnx2x_rx_mode_ramrod_params *p) 2353 { 2354 /* Do nothing */ 2355 return 0; 2356 } 2357 2358 int bnx2x_config_rx_mode(struct bnx2x *bp, 2359 struct bnx2x_rx_mode_ramrod_params *p) 2360 { 2361 int rc; 2362 2363 /* Configure the new classification in the chip */ 2364 rc = p->rx_mode_obj->config_rx_mode(bp, p); 2365 if (rc < 0) 2366 return rc; 2367 2368 /* Wait for a ramrod completion if was requested */ 2369 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) { 2370 rc = p->rx_mode_obj->wait_comp(bp, p); 2371 if (rc) 2372 return rc; 2373 } 2374 2375 return rc; 2376 } 2377 2378 void bnx2x_init_rx_mode_obj(struct bnx2x *bp, 2379 struct bnx2x_rx_mode_obj *o) 2380 { 2381 if (CHIP_IS_E1x(bp)) { 2382 o->wait_comp = bnx2x_empty_rx_mode_wait; 2383 o->config_rx_mode = bnx2x_set_rx_mode_e1x; 2384 } else { 2385 o->wait_comp = bnx2x_wait_rx_mode_comp_e2; 2386 o->config_rx_mode = bnx2x_set_rx_mode_e2; 2387 } 2388 } 2389 2390 /********************* Multicast verbs: SET, CLEAR ****************************/ 2391 static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac) 2392 { 2393 return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff; 2394 } 2395 2396 struct bnx2x_mcast_mac_elem { 2397 struct list_head link; 2398 u8 mac[ETH_ALEN]; 2399 u8 pad[2]; /* For a natural alignment of the following buffer */ 2400 }; 2401 2402 struct bnx2x_pending_mcast_cmd { 2403 struct list_head link; 2404 int type; /* BNX2X_MCAST_CMD_X */ 2405 union { 2406 struct list_head macs_head; 2407 u32 macs_num; /* Needed for DEL command */ 2408 int next_bin; /* Needed for RESTORE flow with aprox match */ 2409 } data; 2410 2411 bool done; /* set to true, when the command has been handled, 2412 * practically used in 57712 handling only, where one pending 2413 * command may be handled in a few operations. As long as for 2414 * other chips every operation handling is completed in a 2415 * single ramrod, there is no need to utilize this field. 2416 */ 2417 }; 2418 2419 static int bnx2x_mcast_wait(struct bnx2x *bp, 2420 struct bnx2x_mcast_obj *o) 2421 { 2422 if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) || 2423 o->raw.wait_comp(bp, &o->raw)) 2424 return -EBUSY; 2425 2426 return 0; 2427 } 2428 2429 static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp, 2430 struct bnx2x_mcast_obj *o, 2431 struct bnx2x_mcast_ramrod_params *p, 2432 int cmd) 2433 { 2434 int total_sz; 2435 struct bnx2x_pending_mcast_cmd *new_cmd; 2436 struct bnx2x_mcast_mac_elem *cur_mac = NULL; 2437 struct bnx2x_mcast_list_elem *pos; 2438 int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ? 2439 p->mcast_list_len : 0); 2440 2441 /* If the command is empty ("handle pending commands only"), break */ 2442 if (!p->mcast_list_len) 2443 return 0; 2444 2445 total_sz = sizeof(*new_cmd) + 2446 macs_list_len * sizeof(struct bnx2x_mcast_mac_elem); 2447 2448 /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */ 2449 new_cmd = kzalloc(total_sz, GFP_ATOMIC); 2450 2451 if (!new_cmd) 2452 return -ENOMEM; 2453 2454 DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n", 2455 cmd, macs_list_len); 2456 2457 INIT_LIST_HEAD(&new_cmd->data.macs_head); 2458 2459 new_cmd->type = cmd; 2460 new_cmd->done = false; 2461 2462 switch (cmd) { 2463 case BNX2X_MCAST_CMD_ADD: 2464 cur_mac = (struct bnx2x_mcast_mac_elem *) 2465 ((u8 *)new_cmd + sizeof(*new_cmd)); 2466 2467 /* Push the MACs of the current command into the pendig command 2468 * MACs list: FIFO 2469 */ 2470 list_for_each_entry(pos, &p->mcast_list, link) { 2471 memcpy(cur_mac->mac, pos->mac, ETH_ALEN); 2472 list_add_tail(&cur_mac->link, &new_cmd->data.macs_head); 2473 cur_mac++; 2474 } 2475 2476 break; 2477 2478 case BNX2X_MCAST_CMD_DEL: 2479 new_cmd->data.macs_num = p->mcast_list_len; 2480 break; 2481 2482 case BNX2X_MCAST_CMD_RESTORE: 2483 new_cmd->data.next_bin = 0; 2484 break; 2485 2486 default: 2487 kfree(new_cmd); 2488 BNX2X_ERR("Unknown command: %d\n", cmd); 2489 return -EINVAL; 2490 } 2491 2492 /* Push the new pending command to the tail of the pending list: FIFO */ 2493 list_add_tail(&new_cmd->link, &o->pending_cmds_head); 2494 2495 o->set_sched(o); 2496 2497 return 1; 2498 } 2499 2500 /** 2501 * bnx2x_mcast_get_next_bin - get the next set bin (index) 2502 * 2503 * @o: 2504 * @last: index to start looking from (including) 2505 * 2506 * returns the next found (set) bin or a negative value if none is found. 2507 */ 2508 static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last) 2509 { 2510 int i, j, inner_start = last % BIT_VEC64_ELEM_SZ; 2511 2512 for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) { 2513 if (o->registry.aprox_match.vec[i]) 2514 for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) { 2515 int cur_bit = j + BIT_VEC64_ELEM_SZ * i; 2516 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match. 2517 vec, cur_bit)) { 2518 return cur_bit; 2519 } 2520 } 2521 inner_start = 0; 2522 } 2523 2524 /* None found */ 2525 return -1; 2526 } 2527 2528 /** 2529 * bnx2x_mcast_clear_first_bin - find the first set bin and clear it 2530 * 2531 * @o: 2532 * 2533 * returns the index of the found bin or -1 if none is found 2534 */ 2535 static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o) 2536 { 2537 int cur_bit = bnx2x_mcast_get_next_bin(o, 0); 2538 2539 if (cur_bit >= 0) 2540 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit); 2541 2542 return cur_bit; 2543 } 2544 2545 static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o) 2546 { 2547 struct bnx2x_raw_obj *raw = &o->raw; 2548 u8 rx_tx_flag = 0; 2549 2550 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) || 2551 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) 2552 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD; 2553 2554 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) || 2555 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) 2556 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD; 2557 2558 return rx_tx_flag; 2559 } 2560 2561 static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp, 2562 struct bnx2x_mcast_obj *o, int idx, 2563 union bnx2x_mcast_config_data *cfg_data, 2564 int cmd) 2565 { 2566 struct bnx2x_raw_obj *r = &o->raw; 2567 struct eth_multicast_rules_ramrod_data *data = 2568 (struct eth_multicast_rules_ramrod_data *)(r->rdata); 2569 u8 func_id = r->func_id; 2570 u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o); 2571 int bin; 2572 2573 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) 2574 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD; 2575 2576 data->rules[idx].cmd_general_data |= rx_tx_add_flag; 2577 2578 /* Get a bin and update a bins' vector */ 2579 switch (cmd) { 2580 case BNX2X_MCAST_CMD_ADD: 2581 bin = bnx2x_mcast_bin_from_mac(cfg_data->mac); 2582 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin); 2583 break; 2584 2585 case BNX2X_MCAST_CMD_DEL: 2586 /* If there were no more bins to clear 2587 * (bnx2x_mcast_clear_first_bin() returns -1) then we would 2588 * clear any (0xff) bin. 2589 * See bnx2x_mcast_validate_e2() for explanation when it may 2590 * happen. 2591 */ 2592 bin = bnx2x_mcast_clear_first_bin(o); 2593 break; 2594 2595 case BNX2X_MCAST_CMD_RESTORE: 2596 bin = cfg_data->bin; 2597 break; 2598 2599 default: 2600 BNX2X_ERR("Unknown command: %d\n", cmd); 2601 return; 2602 } 2603 2604 DP(BNX2X_MSG_SP, "%s bin %d\n", 2605 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ? 2606 "Setting" : "Clearing"), bin); 2607 2608 data->rules[idx].bin_id = (u8)bin; 2609 data->rules[idx].func_id = func_id; 2610 data->rules[idx].engine_id = o->engine_id; 2611 } 2612 2613 /** 2614 * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry 2615 * 2616 * @bp: device handle 2617 * @o: 2618 * @start_bin: index in the registry to start from (including) 2619 * @rdata_idx: index in the ramrod data to start from 2620 * 2621 * returns last handled bin index or -1 if all bins have been handled 2622 */ 2623 static inline int bnx2x_mcast_handle_restore_cmd_e2( 2624 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin, 2625 int *rdata_idx) 2626 { 2627 int cur_bin, cnt = *rdata_idx; 2628 union bnx2x_mcast_config_data cfg_data = {0}; 2629 2630 /* go through the registry and configure the bins from it */ 2631 for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0; 2632 cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) { 2633 2634 cfg_data.bin = (u8)cur_bin; 2635 o->set_one_rule(bp, o, cnt, &cfg_data, 2636 BNX2X_MCAST_CMD_RESTORE); 2637 2638 cnt++; 2639 2640 DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin); 2641 2642 /* Break if we reached the maximum number 2643 * of rules. 2644 */ 2645 if (cnt >= o->max_cmd_len) 2646 break; 2647 } 2648 2649 *rdata_idx = cnt; 2650 2651 return cur_bin; 2652 } 2653 2654 static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp, 2655 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos, 2656 int *line_idx) 2657 { 2658 struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n; 2659 int cnt = *line_idx; 2660 union bnx2x_mcast_config_data cfg_data = {0}; 2661 2662 list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head, 2663 link) { 2664 2665 cfg_data.mac = &pmac_pos->mac[0]; 2666 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type); 2667 2668 cnt++; 2669 2670 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n", 2671 pmac_pos->mac); 2672 2673 list_del(&pmac_pos->link); 2674 2675 /* Break if we reached the maximum number 2676 * of rules. 2677 */ 2678 if (cnt >= o->max_cmd_len) 2679 break; 2680 } 2681 2682 *line_idx = cnt; 2683 2684 /* if no more MACs to configure - we are done */ 2685 if (list_empty(&cmd_pos->data.macs_head)) 2686 cmd_pos->done = true; 2687 } 2688 2689 static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp, 2690 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos, 2691 int *line_idx) 2692 { 2693 int cnt = *line_idx; 2694 2695 while (cmd_pos->data.macs_num) { 2696 o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type); 2697 2698 cnt++; 2699 2700 cmd_pos->data.macs_num--; 2701 2702 DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n", 2703 cmd_pos->data.macs_num, cnt); 2704 2705 /* Break if we reached the maximum 2706 * number of rules. 2707 */ 2708 if (cnt >= o->max_cmd_len) 2709 break; 2710 } 2711 2712 *line_idx = cnt; 2713 2714 /* If we cleared all bins - we are done */ 2715 if (!cmd_pos->data.macs_num) 2716 cmd_pos->done = true; 2717 } 2718 2719 static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp, 2720 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos, 2721 int *line_idx) 2722 { 2723 cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin, 2724 line_idx); 2725 2726 if (cmd_pos->data.next_bin < 0) 2727 /* If o->set_restore returned -1 we are done */ 2728 cmd_pos->done = true; 2729 else 2730 /* Start from the next bin next time */ 2731 cmd_pos->data.next_bin++; 2732 } 2733 2734 static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp, 2735 struct bnx2x_mcast_ramrod_params *p) 2736 { 2737 struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n; 2738 int cnt = 0; 2739 struct bnx2x_mcast_obj *o = p->mcast_obj; 2740 2741 list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head, 2742 link) { 2743 switch (cmd_pos->type) { 2744 case BNX2X_MCAST_CMD_ADD: 2745 bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt); 2746 break; 2747 2748 case BNX2X_MCAST_CMD_DEL: 2749 bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt); 2750 break; 2751 2752 case BNX2X_MCAST_CMD_RESTORE: 2753 bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos, 2754 &cnt); 2755 break; 2756 2757 default: 2758 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type); 2759 return -EINVAL; 2760 } 2761 2762 /* If the command has been completed - remove it from the list 2763 * and free the memory 2764 */ 2765 if (cmd_pos->done) { 2766 list_del(&cmd_pos->link); 2767 kfree(cmd_pos); 2768 } 2769 2770 /* Break if we reached the maximum number of rules */ 2771 if (cnt >= o->max_cmd_len) 2772 break; 2773 } 2774 2775 return cnt; 2776 } 2777 2778 static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp, 2779 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p, 2780 int *line_idx) 2781 { 2782 struct bnx2x_mcast_list_elem *mlist_pos; 2783 union bnx2x_mcast_config_data cfg_data = {0}; 2784 int cnt = *line_idx; 2785 2786 list_for_each_entry(mlist_pos, &p->mcast_list, link) { 2787 cfg_data.mac = mlist_pos->mac; 2788 o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD); 2789 2790 cnt++; 2791 2792 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n", 2793 mlist_pos->mac); 2794 } 2795 2796 *line_idx = cnt; 2797 } 2798 2799 static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp, 2800 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p, 2801 int *line_idx) 2802 { 2803 int cnt = *line_idx, i; 2804 2805 for (i = 0; i < p->mcast_list_len; i++) { 2806 o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL); 2807 2808 cnt++; 2809 2810 DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n", 2811 p->mcast_list_len - i - 1); 2812 } 2813 2814 *line_idx = cnt; 2815 } 2816 2817 /** 2818 * bnx2x_mcast_handle_current_cmd - 2819 * 2820 * @bp: device handle 2821 * @p: 2822 * @cmd: 2823 * @start_cnt: first line in the ramrod data that may be used 2824 * 2825 * This function is called iff there is enough place for the current command in 2826 * the ramrod data. 2827 * Returns number of lines filled in the ramrod data in total. 2828 */ 2829 static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp, 2830 struct bnx2x_mcast_ramrod_params *p, int cmd, 2831 int start_cnt) 2832 { 2833 struct bnx2x_mcast_obj *o = p->mcast_obj; 2834 int cnt = start_cnt; 2835 2836 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len); 2837 2838 switch (cmd) { 2839 case BNX2X_MCAST_CMD_ADD: 2840 bnx2x_mcast_hdl_add(bp, o, p, &cnt); 2841 break; 2842 2843 case BNX2X_MCAST_CMD_DEL: 2844 bnx2x_mcast_hdl_del(bp, o, p, &cnt); 2845 break; 2846 2847 case BNX2X_MCAST_CMD_RESTORE: 2848 o->hdl_restore(bp, o, 0, &cnt); 2849 break; 2850 2851 default: 2852 BNX2X_ERR("Unknown command: %d\n", cmd); 2853 return -EINVAL; 2854 } 2855 2856 /* The current command has been handled */ 2857 p->mcast_list_len = 0; 2858 2859 return cnt; 2860 } 2861 2862 static int bnx2x_mcast_validate_e2(struct bnx2x *bp, 2863 struct bnx2x_mcast_ramrod_params *p, 2864 int cmd) 2865 { 2866 struct bnx2x_mcast_obj *o = p->mcast_obj; 2867 int reg_sz = o->get_registry_size(o); 2868 2869 switch (cmd) { 2870 /* DEL command deletes all currently configured MACs */ 2871 case BNX2X_MCAST_CMD_DEL: 2872 o->set_registry_size(o, 0); 2873 /* Don't break */ 2874 2875 /* RESTORE command will restore the entire multicast configuration */ 2876 case BNX2X_MCAST_CMD_RESTORE: 2877 /* Here we set the approximate amount of work to do, which in 2878 * fact may be only less as some MACs in postponed ADD 2879 * command(s) scheduled before this command may fall into 2880 * the same bin and the actual number of bins set in the 2881 * registry would be less than we estimated here. See 2882 * bnx2x_mcast_set_one_rule_e2() for further details. 2883 */ 2884 p->mcast_list_len = reg_sz; 2885 break; 2886 2887 case BNX2X_MCAST_CMD_ADD: 2888 case BNX2X_MCAST_CMD_CONT: 2889 /* Here we assume that all new MACs will fall into new bins. 2890 * However we will correct the real registry size after we 2891 * handle all pending commands. 2892 */ 2893 o->set_registry_size(o, reg_sz + p->mcast_list_len); 2894 break; 2895 2896 default: 2897 BNX2X_ERR("Unknown command: %d\n", cmd); 2898 return -EINVAL; 2899 2900 } 2901 2902 /* Increase the total number of MACs pending to be configured */ 2903 o->total_pending_num += p->mcast_list_len; 2904 2905 return 0; 2906 } 2907 2908 static void bnx2x_mcast_revert_e2(struct bnx2x *bp, 2909 struct bnx2x_mcast_ramrod_params *p, 2910 int old_num_bins) 2911 { 2912 struct bnx2x_mcast_obj *o = p->mcast_obj; 2913 2914 o->set_registry_size(o, old_num_bins); 2915 o->total_pending_num -= p->mcast_list_len; 2916 } 2917 2918 /** 2919 * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values 2920 * 2921 * @bp: device handle 2922 * @p: 2923 * @len: number of rules to handle 2924 */ 2925 static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp, 2926 struct bnx2x_mcast_ramrod_params *p, 2927 u8 len) 2928 { 2929 struct bnx2x_raw_obj *r = &p->mcast_obj->raw; 2930 struct eth_multicast_rules_ramrod_data *data = 2931 (struct eth_multicast_rules_ramrod_data *)(r->rdata); 2932 2933 data->header.echo = ((r->cid & BNX2X_SWCID_MASK) | 2934 (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT)); 2935 data->header.rule_cnt = len; 2936 } 2937 2938 /** 2939 * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins 2940 * 2941 * @bp: device handle 2942 * @o: 2943 * 2944 * Recalculate the actual number of set bins in the registry using Brian 2945 * Kernighan's algorithm: it's execution complexity is as a number of set bins. 2946 * 2947 * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1(). 2948 */ 2949 static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp, 2950 struct bnx2x_mcast_obj *o) 2951 { 2952 int i, cnt = 0; 2953 u64 elem; 2954 2955 for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) { 2956 elem = o->registry.aprox_match.vec[i]; 2957 for (; elem; cnt++) 2958 elem &= elem - 1; 2959 } 2960 2961 o->set_registry_size(o, cnt); 2962 2963 return 0; 2964 } 2965 2966 static int bnx2x_mcast_setup_e2(struct bnx2x *bp, 2967 struct bnx2x_mcast_ramrod_params *p, 2968 int cmd) 2969 { 2970 struct bnx2x_raw_obj *raw = &p->mcast_obj->raw; 2971 struct bnx2x_mcast_obj *o = p->mcast_obj; 2972 struct eth_multicast_rules_ramrod_data *data = 2973 (struct eth_multicast_rules_ramrod_data *)(raw->rdata); 2974 int cnt = 0, rc; 2975 2976 /* Reset the ramrod data buffer */ 2977 memset(data, 0, sizeof(*data)); 2978 2979 cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p); 2980 2981 /* If there are no more pending commands - clear SCHEDULED state */ 2982 if (list_empty(&o->pending_cmds_head)) 2983 o->clear_sched(o); 2984 2985 /* The below may be true iff there was enough room in ramrod 2986 * data for all pending commands and for the current 2987 * command. Otherwise the current command would have been added 2988 * to the pending commands and p->mcast_list_len would have been 2989 * zeroed. 2990 */ 2991 if (p->mcast_list_len > 0) 2992 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt); 2993 2994 /* We've pulled out some MACs - update the total number of 2995 * outstanding. 2996 */ 2997 o->total_pending_num -= cnt; 2998 2999 /* send a ramrod */ 3000 WARN_ON(o->total_pending_num < 0); 3001 WARN_ON(cnt > o->max_cmd_len); 3002 3003 bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt); 3004 3005 /* Update a registry size if there are no more pending operations. 3006 * 3007 * We don't want to change the value of the registry size if there are 3008 * pending operations because we want it to always be equal to the 3009 * exact or the approximate number (see bnx2x_mcast_validate_e2()) of 3010 * set bins after the last requested operation in order to properly 3011 * evaluate the size of the next DEL/RESTORE operation. 3012 * 3013 * Note that we update the registry itself during command(s) handling 3014 * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we 3015 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but 3016 * with a limited amount of update commands (per MAC/bin) and we don't 3017 * know in this scope what the actual state of bins configuration is 3018 * going to be after this ramrod. 3019 */ 3020 if (!o->total_pending_num) 3021 bnx2x_mcast_refresh_registry_e2(bp, o); 3022 3023 /* 3024 * If CLEAR_ONLY was requested - don't send a ramrod and clear 3025 * RAMROD_PENDING status immediately. 3026 */ 3027 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { 3028 raw->clear_pending(raw); 3029 return 0; 3030 } else { 3031 /* 3032 * No need for an explicit memory barrier here as long we would 3033 * need to ensure the ordering of writing to the SPQ element 3034 * and updating of the SPQ producer which involves a memory 3035 * read and we will have to put a full memory barrier there 3036 * (inside bnx2x_sp_post()). 3037 */ 3038 3039 /* Send a ramrod */ 3040 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES, 3041 raw->cid, U64_HI(raw->rdata_mapping), 3042 U64_LO(raw->rdata_mapping), 3043 ETH_CONNECTION_TYPE); 3044 if (rc) 3045 return rc; 3046 3047 /* Ramrod completion is pending */ 3048 return 1; 3049 } 3050 } 3051 3052 static int bnx2x_mcast_validate_e1h(struct bnx2x *bp, 3053 struct bnx2x_mcast_ramrod_params *p, 3054 int cmd) 3055 { 3056 /* Mark, that there is a work to do */ 3057 if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE)) 3058 p->mcast_list_len = 1; 3059 3060 return 0; 3061 } 3062 3063 static void bnx2x_mcast_revert_e1h(struct bnx2x *bp, 3064 struct bnx2x_mcast_ramrod_params *p, 3065 int old_num_bins) 3066 { 3067 /* Do nothing */ 3068 } 3069 3070 #define BNX2X_57711_SET_MC_FILTER(filter, bit) \ 3071 do { \ 3072 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \ 3073 } while (0) 3074 3075 static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp, 3076 struct bnx2x_mcast_obj *o, 3077 struct bnx2x_mcast_ramrod_params *p, 3078 u32 *mc_filter) 3079 { 3080 struct bnx2x_mcast_list_elem *mlist_pos; 3081 int bit; 3082 3083 list_for_each_entry(mlist_pos, &p->mcast_list, link) { 3084 bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac); 3085 BNX2X_57711_SET_MC_FILTER(mc_filter, bit); 3086 3087 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n", 3088 mlist_pos->mac, bit); 3089 3090 /* bookkeeping... */ 3091 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, 3092 bit); 3093 } 3094 } 3095 3096 static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp, 3097 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p, 3098 u32 *mc_filter) 3099 { 3100 int bit; 3101 3102 for (bit = bnx2x_mcast_get_next_bin(o, 0); 3103 bit >= 0; 3104 bit = bnx2x_mcast_get_next_bin(o, bit + 1)) { 3105 BNX2X_57711_SET_MC_FILTER(mc_filter, bit); 3106 DP(BNX2X_MSG_SP, "About to set bin %d\n", bit); 3107 } 3108 } 3109 3110 /* On 57711 we write the multicast MACs' aproximate match 3111 * table by directly into the TSTORM's internal RAM. So we don't 3112 * really need to handle any tricks to make it work. 3113 */ 3114 static int bnx2x_mcast_setup_e1h(struct bnx2x *bp, 3115 struct bnx2x_mcast_ramrod_params *p, 3116 int cmd) 3117 { 3118 int i; 3119 struct bnx2x_mcast_obj *o = p->mcast_obj; 3120 struct bnx2x_raw_obj *r = &o->raw; 3121 3122 /* If CLEAR_ONLY has been requested - clear the registry 3123 * and clear a pending bit. 3124 */ 3125 if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { 3126 u32 mc_filter[MC_HASH_SIZE] = {0}; 3127 3128 /* Set the multicast filter bits before writing it into 3129 * the internal memory. 3130 */ 3131 switch (cmd) { 3132 case BNX2X_MCAST_CMD_ADD: 3133 bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter); 3134 break; 3135 3136 case BNX2X_MCAST_CMD_DEL: 3137 DP(BNX2X_MSG_SP, 3138 "Invalidating multicast MACs configuration\n"); 3139 3140 /* clear the registry */ 3141 memset(o->registry.aprox_match.vec, 0, 3142 sizeof(o->registry.aprox_match.vec)); 3143 break; 3144 3145 case BNX2X_MCAST_CMD_RESTORE: 3146 bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter); 3147 break; 3148 3149 default: 3150 BNX2X_ERR("Unknown command: %d\n", cmd); 3151 return -EINVAL; 3152 } 3153 3154 /* Set the mcast filter in the internal memory */ 3155 for (i = 0; i < MC_HASH_SIZE; i++) 3156 REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]); 3157 } else 3158 /* clear the registry */ 3159 memset(o->registry.aprox_match.vec, 0, 3160 sizeof(o->registry.aprox_match.vec)); 3161 3162 /* We are done */ 3163 r->clear_pending(r); 3164 3165 return 0; 3166 } 3167 3168 static int bnx2x_mcast_validate_e1(struct bnx2x *bp, 3169 struct bnx2x_mcast_ramrod_params *p, 3170 int cmd) 3171 { 3172 struct bnx2x_mcast_obj *o = p->mcast_obj; 3173 int reg_sz = o->get_registry_size(o); 3174 3175 switch (cmd) { 3176 /* DEL command deletes all currently configured MACs */ 3177 case BNX2X_MCAST_CMD_DEL: 3178 o->set_registry_size(o, 0); 3179 /* Don't break */ 3180 3181 /* RESTORE command will restore the entire multicast configuration */ 3182 case BNX2X_MCAST_CMD_RESTORE: 3183 p->mcast_list_len = reg_sz; 3184 DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n", 3185 cmd, p->mcast_list_len); 3186 break; 3187 3188 case BNX2X_MCAST_CMD_ADD: 3189 case BNX2X_MCAST_CMD_CONT: 3190 /* Multicast MACs on 57710 are configured as unicast MACs and 3191 * there is only a limited number of CAM entries for that 3192 * matter. 3193 */ 3194 if (p->mcast_list_len > o->max_cmd_len) { 3195 BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n", 3196 o->max_cmd_len); 3197 return -EINVAL; 3198 } 3199 /* Every configured MAC should be cleared if DEL command is 3200 * called. Only the last ADD command is relevant as long as 3201 * every ADD commands overrides the previous configuration. 3202 */ 3203 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len); 3204 if (p->mcast_list_len > 0) 3205 o->set_registry_size(o, p->mcast_list_len); 3206 3207 break; 3208 3209 default: 3210 BNX2X_ERR("Unknown command: %d\n", cmd); 3211 return -EINVAL; 3212 3213 } 3214 3215 /* We want to ensure that commands are executed one by one for 57710. 3216 * Therefore each none-empty command will consume o->max_cmd_len. 3217 */ 3218 if (p->mcast_list_len) 3219 o->total_pending_num += o->max_cmd_len; 3220 3221 return 0; 3222 } 3223 3224 static void bnx2x_mcast_revert_e1(struct bnx2x *bp, 3225 struct bnx2x_mcast_ramrod_params *p, 3226 int old_num_macs) 3227 { 3228 struct bnx2x_mcast_obj *o = p->mcast_obj; 3229 3230 o->set_registry_size(o, old_num_macs); 3231 3232 /* If current command hasn't been handled yet and we are 3233 * here means that it's meant to be dropped and we have to 3234 * update the number of outstandling MACs accordingly. 3235 */ 3236 if (p->mcast_list_len) 3237 o->total_pending_num -= o->max_cmd_len; 3238 } 3239 3240 static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp, 3241 struct bnx2x_mcast_obj *o, int idx, 3242 union bnx2x_mcast_config_data *cfg_data, 3243 int cmd) 3244 { 3245 struct bnx2x_raw_obj *r = &o->raw; 3246 struct mac_configuration_cmd *data = 3247 (struct mac_configuration_cmd *)(r->rdata); 3248 3249 /* copy mac */ 3250 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) { 3251 bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr, 3252 &data->config_table[idx].middle_mac_addr, 3253 &data->config_table[idx].lsb_mac_addr, 3254 cfg_data->mac); 3255 3256 data->config_table[idx].vlan_id = 0; 3257 data->config_table[idx].pf_id = r->func_id; 3258 data->config_table[idx].clients_bit_vector = 3259 cpu_to_le32(1 << r->cl_id); 3260 3261 SET_FLAG(data->config_table[idx].flags, 3262 MAC_CONFIGURATION_ENTRY_ACTION_TYPE, 3263 T_ETH_MAC_COMMAND_SET); 3264 } 3265 } 3266 3267 /** 3268 * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd 3269 * 3270 * @bp: device handle 3271 * @p: 3272 * @len: number of rules to handle 3273 */ 3274 static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp, 3275 struct bnx2x_mcast_ramrod_params *p, 3276 u8 len) 3277 { 3278 struct bnx2x_raw_obj *r = &p->mcast_obj->raw; 3279 struct mac_configuration_cmd *data = 3280 (struct mac_configuration_cmd *)(r->rdata); 3281 3282 u8 offset = (CHIP_REV_IS_SLOW(bp) ? 3283 BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) : 3284 BNX2X_MAX_MULTICAST*(1 + r->func_id)); 3285 3286 data->hdr.offset = offset; 3287 data->hdr.client_id = 0xff; 3288 data->hdr.echo = ((r->cid & BNX2X_SWCID_MASK) | 3289 (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT)); 3290 data->hdr.length = len; 3291 } 3292 3293 /** 3294 * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710 3295 * 3296 * @bp: device handle 3297 * @o: 3298 * @start_idx: index in the registry to start from 3299 * @rdata_idx: index in the ramrod data to start from 3300 * 3301 * restore command for 57710 is like all other commands - always a stand alone 3302 * command - start_idx and rdata_idx will always be 0. This function will always 3303 * succeed. 3304 * returns -1 to comply with 57712 variant. 3305 */ 3306 static inline int bnx2x_mcast_handle_restore_cmd_e1( 3307 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx, 3308 int *rdata_idx) 3309 { 3310 struct bnx2x_mcast_mac_elem *elem; 3311 int i = 0; 3312 union bnx2x_mcast_config_data cfg_data = {0}; 3313 3314 /* go through the registry and configure the MACs from it. */ 3315 list_for_each_entry(elem, &o->registry.exact_match.macs, link) { 3316 cfg_data.mac = &elem->mac[0]; 3317 o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE); 3318 3319 i++; 3320 3321 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n", 3322 cfg_data.mac); 3323 } 3324 3325 *rdata_idx = i; 3326 3327 return -1; 3328 } 3329 3330 3331 static inline int bnx2x_mcast_handle_pending_cmds_e1( 3332 struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p) 3333 { 3334 struct bnx2x_pending_mcast_cmd *cmd_pos; 3335 struct bnx2x_mcast_mac_elem *pmac_pos; 3336 struct bnx2x_mcast_obj *o = p->mcast_obj; 3337 union bnx2x_mcast_config_data cfg_data = {0}; 3338 int cnt = 0; 3339 3340 3341 /* If nothing to be done - return */ 3342 if (list_empty(&o->pending_cmds_head)) 3343 return 0; 3344 3345 /* Handle the first command */ 3346 cmd_pos = list_first_entry(&o->pending_cmds_head, 3347 struct bnx2x_pending_mcast_cmd, link); 3348 3349 switch (cmd_pos->type) { 3350 case BNX2X_MCAST_CMD_ADD: 3351 list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) { 3352 cfg_data.mac = &pmac_pos->mac[0]; 3353 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type); 3354 3355 cnt++; 3356 3357 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n", 3358 pmac_pos->mac); 3359 } 3360 break; 3361 3362 case BNX2X_MCAST_CMD_DEL: 3363 cnt = cmd_pos->data.macs_num; 3364 DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt); 3365 break; 3366 3367 case BNX2X_MCAST_CMD_RESTORE: 3368 o->hdl_restore(bp, o, 0, &cnt); 3369 break; 3370 3371 default: 3372 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type); 3373 return -EINVAL; 3374 } 3375 3376 list_del(&cmd_pos->link); 3377 kfree(cmd_pos); 3378 3379 return cnt; 3380 } 3381 3382 /** 3383 * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr(). 3384 * 3385 * @fw_hi: 3386 * @fw_mid: 3387 * @fw_lo: 3388 * @mac: 3389 */ 3390 static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid, 3391 __le16 *fw_lo, u8 *mac) 3392 { 3393 mac[1] = ((u8 *)fw_hi)[0]; 3394 mac[0] = ((u8 *)fw_hi)[1]; 3395 mac[3] = ((u8 *)fw_mid)[0]; 3396 mac[2] = ((u8 *)fw_mid)[1]; 3397 mac[5] = ((u8 *)fw_lo)[0]; 3398 mac[4] = ((u8 *)fw_lo)[1]; 3399 } 3400 3401 /** 3402 * bnx2x_mcast_refresh_registry_e1 - 3403 * 3404 * @bp: device handle 3405 * @cnt: 3406 * 3407 * Check the ramrod data first entry flag to see if it's a DELETE or ADD command 3408 * and update the registry correspondingly: if ADD - allocate a memory and add 3409 * the entries to the registry (list), if DELETE - clear the registry and free 3410 * the memory. 3411 */ 3412 static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp, 3413 struct bnx2x_mcast_obj *o) 3414 { 3415 struct bnx2x_raw_obj *raw = &o->raw; 3416 struct bnx2x_mcast_mac_elem *elem; 3417 struct mac_configuration_cmd *data = 3418 (struct mac_configuration_cmd *)(raw->rdata); 3419 3420 /* If first entry contains a SET bit - the command was ADD, 3421 * otherwise - DEL_ALL 3422 */ 3423 if (GET_FLAG(data->config_table[0].flags, 3424 MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) { 3425 int i, len = data->hdr.length; 3426 3427 /* Break if it was a RESTORE command */ 3428 if (!list_empty(&o->registry.exact_match.macs)) 3429 return 0; 3430 3431 elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC); 3432 if (!elem) { 3433 BNX2X_ERR("Failed to allocate registry memory\n"); 3434 return -ENOMEM; 3435 } 3436 3437 for (i = 0; i < len; i++, elem++) { 3438 bnx2x_get_fw_mac_addr( 3439 &data->config_table[i].msb_mac_addr, 3440 &data->config_table[i].middle_mac_addr, 3441 &data->config_table[i].lsb_mac_addr, 3442 elem->mac); 3443 DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n", 3444 elem->mac); 3445 list_add_tail(&elem->link, 3446 &o->registry.exact_match.macs); 3447 } 3448 } else { 3449 elem = list_first_entry(&o->registry.exact_match.macs, 3450 struct bnx2x_mcast_mac_elem, link); 3451 DP(BNX2X_MSG_SP, "Deleting a registry\n"); 3452 kfree(elem); 3453 INIT_LIST_HEAD(&o->registry.exact_match.macs); 3454 } 3455 3456 return 0; 3457 } 3458 3459 static int bnx2x_mcast_setup_e1(struct bnx2x *bp, 3460 struct bnx2x_mcast_ramrod_params *p, 3461 int cmd) 3462 { 3463 struct bnx2x_mcast_obj *o = p->mcast_obj; 3464 struct bnx2x_raw_obj *raw = &o->raw; 3465 struct mac_configuration_cmd *data = 3466 (struct mac_configuration_cmd *)(raw->rdata); 3467 int cnt = 0, i, rc; 3468 3469 /* Reset the ramrod data buffer */ 3470 memset(data, 0, sizeof(*data)); 3471 3472 /* First set all entries as invalid */ 3473 for (i = 0; i < o->max_cmd_len ; i++) 3474 SET_FLAG(data->config_table[i].flags, 3475 MAC_CONFIGURATION_ENTRY_ACTION_TYPE, 3476 T_ETH_MAC_COMMAND_INVALIDATE); 3477 3478 /* Handle pending commands first */ 3479 cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p); 3480 3481 /* If there are no more pending commands - clear SCHEDULED state */ 3482 if (list_empty(&o->pending_cmds_head)) 3483 o->clear_sched(o); 3484 3485 /* The below may be true iff there were no pending commands */ 3486 if (!cnt) 3487 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0); 3488 3489 /* For 57710 every command has o->max_cmd_len length to ensure that 3490 * commands are done one at a time. 3491 */ 3492 o->total_pending_num -= o->max_cmd_len; 3493 3494 /* send a ramrod */ 3495 3496 WARN_ON(cnt > o->max_cmd_len); 3497 3498 /* Set ramrod header (in particular, a number of entries to update) */ 3499 bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt); 3500 3501 /* update a registry: we need the registry contents to be always up 3502 * to date in order to be able to execute a RESTORE opcode. Here 3503 * we use the fact that for 57710 we sent one command at a time 3504 * hence we may take the registry update out of the command handling 3505 * and do it in a simpler way here. 3506 */ 3507 rc = bnx2x_mcast_refresh_registry_e1(bp, o); 3508 if (rc) 3509 return rc; 3510 3511 /* 3512 * If CLEAR_ONLY was requested - don't send a ramrod and clear 3513 * RAMROD_PENDING status immediately. 3514 */ 3515 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { 3516 raw->clear_pending(raw); 3517 return 0; 3518 } else { 3519 /* 3520 * No need for an explicit memory barrier here as long we would 3521 * need to ensure the ordering of writing to the SPQ element 3522 * and updating of the SPQ producer which involves a memory 3523 * read and we will have to put a full memory barrier there 3524 * (inside bnx2x_sp_post()). 3525 */ 3526 3527 /* Send a ramrod */ 3528 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid, 3529 U64_HI(raw->rdata_mapping), 3530 U64_LO(raw->rdata_mapping), 3531 ETH_CONNECTION_TYPE); 3532 if (rc) 3533 return rc; 3534 3535 /* Ramrod completion is pending */ 3536 return 1; 3537 } 3538 3539 } 3540 3541 static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o) 3542 { 3543 return o->registry.exact_match.num_macs_set; 3544 } 3545 3546 static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o) 3547 { 3548 return o->registry.aprox_match.num_bins_set; 3549 } 3550 3551 static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o, 3552 int n) 3553 { 3554 o->registry.exact_match.num_macs_set = n; 3555 } 3556 3557 static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o, 3558 int n) 3559 { 3560 o->registry.aprox_match.num_bins_set = n; 3561 } 3562 3563 int bnx2x_config_mcast(struct bnx2x *bp, 3564 struct bnx2x_mcast_ramrod_params *p, 3565 int cmd) 3566 { 3567 struct bnx2x_mcast_obj *o = p->mcast_obj; 3568 struct bnx2x_raw_obj *r = &o->raw; 3569 int rc = 0, old_reg_size; 3570 3571 /* This is needed to recover number of currently configured mcast macs 3572 * in case of failure. 3573 */ 3574 old_reg_size = o->get_registry_size(o); 3575 3576 /* Do some calculations and checks */ 3577 rc = o->validate(bp, p, cmd); 3578 if (rc) 3579 return rc; 3580 3581 /* Return if there is no work to do */ 3582 if ((!p->mcast_list_len) && (!o->check_sched(o))) 3583 return 0; 3584 3585 DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n", 3586 o->total_pending_num, p->mcast_list_len, o->max_cmd_len); 3587 3588 /* Enqueue the current command to the pending list if we can't complete 3589 * it in the current iteration 3590 */ 3591 if (r->check_pending(r) || 3592 ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) { 3593 rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd); 3594 if (rc < 0) 3595 goto error_exit1; 3596 3597 /* As long as the current command is in a command list we 3598 * don't need to handle it separately. 3599 */ 3600 p->mcast_list_len = 0; 3601 } 3602 3603 if (!r->check_pending(r)) { 3604 3605 /* Set 'pending' state */ 3606 r->set_pending(r); 3607 3608 /* Configure the new classification in the chip */ 3609 rc = o->config_mcast(bp, p, cmd); 3610 if (rc < 0) 3611 goto error_exit2; 3612 3613 /* Wait for a ramrod completion if was requested */ 3614 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) 3615 rc = o->wait_comp(bp, o); 3616 } 3617 3618 return rc; 3619 3620 error_exit2: 3621 r->clear_pending(r); 3622 3623 error_exit1: 3624 o->revert(bp, p, old_reg_size); 3625 3626 return rc; 3627 } 3628 3629 static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o) 3630 { 3631 smp_mb__before_clear_bit(); 3632 clear_bit(o->sched_state, o->raw.pstate); 3633 smp_mb__after_clear_bit(); 3634 } 3635 3636 static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o) 3637 { 3638 smp_mb__before_clear_bit(); 3639 set_bit(o->sched_state, o->raw.pstate); 3640 smp_mb__after_clear_bit(); 3641 } 3642 3643 static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o) 3644 { 3645 return !!test_bit(o->sched_state, o->raw.pstate); 3646 } 3647 3648 static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o) 3649 { 3650 return o->raw.check_pending(&o->raw) || o->check_sched(o); 3651 } 3652 3653 void bnx2x_init_mcast_obj(struct bnx2x *bp, 3654 struct bnx2x_mcast_obj *mcast_obj, 3655 u8 mcast_cl_id, u32 mcast_cid, u8 func_id, 3656 u8 engine_id, void *rdata, dma_addr_t rdata_mapping, 3657 int state, unsigned long *pstate, bnx2x_obj_type type) 3658 { 3659 memset(mcast_obj, 0, sizeof(*mcast_obj)); 3660 3661 bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id, 3662 rdata, rdata_mapping, state, pstate, type); 3663 3664 mcast_obj->engine_id = engine_id; 3665 3666 INIT_LIST_HEAD(&mcast_obj->pending_cmds_head); 3667 3668 mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED; 3669 mcast_obj->check_sched = bnx2x_mcast_check_sched; 3670 mcast_obj->set_sched = bnx2x_mcast_set_sched; 3671 mcast_obj->clear_sched = bnx2x_mcast_clear_sched; 3672 3673 if (CHIP_IS_E1(bp)) { 3674 mcast_obj->config_mcast = bnx2x_mcast_setup_e1; 3675 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd; 3676 mcast_obj->hdl_restore = 3677 bnx2x_mcast_handle_restore_cmd_e1; 3678 mcast_obj->check_pending = bnx2x_mcast_check_pending; 3679 3680 if (CHIP_REV_IS_SLOW(bp)) 3681 mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI; 3682 else 3683 mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST; 3684 3685 mcast_obj->wait_comp = bnx2x_mcast_wait; 3686 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1; 3687 mcast_obj->validate = bnx2x_mcast_validate_e1; 3688 mcast_obj->revert = bnx2x_mcast_revert_e1; 3689 mcast_obj->get_registry_size = 3690 bnx2x_mcast_get_registry_size_exact; 3691 mcast_obj->set_registry_size = 3692 bnx2x_mcast_set_registry_size_exact; 3693 3694 /* 57710 is the only chip that uses the exact match for mcast 3695 * at the moment. 3696 */ 3697 INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs); 3698 3699 } else if (CHIP_IS_E1H(bp)) { 3700 mcast_obj->config_mcast = bnx2x_mcast_setup_e1h; 3701 mcast_obj->enqueue_cmd = NULL; 3702 mcast_obj->hdl_restore = NULL; 3703 mcast_obj->check_pending = bnx2x_mcast_check_pending; 3704 3705 /* 57711 doesn't send a ramrod, so it has unlimited credit 3706 * for one command. 3707 */ 3708 mcast_obj->max_cmd_len = -1; 3709 mcast_obj->wait_comp = bnx2x_mcast_wait; 3710 mcast_obj->set_one_rule = NULL; 3711 mcast_obj->validate = bnx2x_mcast_validate_e1h; 3712 mcast_obj->revert = bnx2x_mcast_revert_e1h; 3713 mcast_obj->get_registry_size = 3714 bnx2x_mcast_get_registry_size_aprox; 3715 mcast_obj->set_registry_size = 3716 bnx2x_mcast_set_registry_size_aprox; 3717 } else { 3718 mcast_obj->config_mcast = bnx2x_mcast_setup_e2; 3719 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd; 3720 mcast_obj->hdl_restore = 3721 bnx2x_mcast_handle_restore_cmd_e2; 3722 mcast_obj->check_pending = bnx2x_mcast_check_pending; 3723 /* TODO: There should be a proper HSI define for this number!!! 3724 */ 3725 mcast_obj->max_cmd_len = 16; 3726 mcast_obj->wait_comp = bnx2x_mcast_wait; 3727 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2; 3728 mcast_obj->validate = bnx2x_mcast_validate_e2; 3729 mcast_obj->revert = bnx2x_mcast_revert_e2; 3730 mcast_obj->get_registry_size = 3731 bnx2x_mcast_get_registry_size_aprox; 3732 mcast_obj->set_registry_size = 3733 bnx2x_mcast_set_registry_size_aprox; 3734 } 3735 } 3736 3737 /*************************** Credit handling **********************************/ 3738 3739 /** 3740 * atomic_add_ifless - add if the result is less than a given value. 3741 * 3742 * @v: pointer of type atomic_t 3743 * @a: the amount to add to v... 3744 * @u: ...if (v + a) is less than u. 3745 * 3746 * returns true if (v + a) was less than u, and false otherwise. 3747 * 3748 */ 3749 static inline bool __atomic_add_ifless(atomic_t *v, int a, int u) 3750 { 3751 int c, old; 3752 3753 c = atomic_read(v); 3754 for (;;) { 3755 if (unlikely(c + a >= u)) 3756 return false; 3757 3758 old = atomic_cmpxchg((v), c, c + a); 3759 if (likely(old == c)) 3760 break; 3761 c = old; 3762 } 3763 3764 return true; 3765 } 3766 3767 /** 3768 * atomic_dec_ifmoe - dec if the result is more or equal than a given value. 3769 * 3770 * @v: pointer of type atomic_t 3771 * @a: the amount to dec from v... 3772 * @u: ...if (v - a) is more or equal than u. 3773 * 3774 * returns true if (v - a) was more or equal than u, and false 3775 * otherwise. 3776 */ 3777 static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u) 3778 { 3779 int c, old; 3780 3781 c = atomic_read(v); 3782 for (;;) { 3783 if (unlikely(c - a < u)) 3784 return false; 3785 3786 old = atomic_cmpxchg((v), c, c - a); 3787 if (likely(old == c)) 3788 break; 3789 c = old; 3790 } 3791 3792 return true; 3793 } 3794 3795 static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt) 3796 { 3797 bool rc; 3798 3799 smp_mb(); 3800 rc = __atomic_dec_ifmoe(&o->credit, cnt, 0); 3801 smp_mb(); 3802 3803 return rc; 3804 } 3805 3806 static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt) 3807 { 3808 bool rc; 3809 3810 smp_mb(); 3811 3812 /* Don't let to refill if credit + cnt > pool_sz */ 3813 rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1); 3814 3815 smp_mb(); 3816 3817 return rc; 3818 } 3819 3820 static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o) 3821 { 3822 int cur_credit; 3823 3824 smp_mb(); 3825 cur_credit = atomic_read(&o->credit); 3826 3827 return cur_credit; 3828 } 3829 3830 static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o, 3831 int cnt) 3832 { 3833 return true; 3834 } 3835 3836 3837 static bool bnx2x_credit_pool_get_entry( 3838 struct bnx2x_credit_pool_obj *o, 3839 int *offset) 3840 { 3841 int idx, vec, i; 3842 3843 *offset = -1; 3844 3845 /* Find "internal cam-offset" then add to base for this object... */ 3846 for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) { 3847 3848 /* Skip the current vector if there are no free entries in it */ 3849 if (!o->pool_mirror[vec]) 3850 continue; 3851 3852 /* If we've got here we are going to find a free entry */ 3853 for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0; 3854 i < BIT_VEC64_ELEM_SZ; idx++, i++) 3855 3856 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) { 3857 /* Got one!! */ 3858 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx); 3859 *offset = o->base_pool_offset + idx; 3860 return true; 3861 } 3862 } 3863 3864 return false; 3865 } 3866 3867 static bool bnx2x_credit_pool_put_entry( 3868 struct bnx2x_credit_pool_obj *o, 3869 int offset) 3870 { 3871 if (offset < o->base_pool_offset) 3872 return false; 3873 3874 offset -= o->base_pool_offset; 3875 3876 if (offset >= o->pool_sz) 3877 return false; 3878 3879 /* Return the entry to the pool */ 3880 BIT_VEC64_SET_BIT(o->pool_mirror, offset); 3881 3882 return true; 3883 } 3884 3885 static bool bnx2x_credit_pool_put_entry_always_true( 3886 struct bnx2x_credit_pool_obj *o, 3887 int offset) 3888 { 3889 return true; 3890 } 3891 3892 static bool bnx2x_credit_pool_get_entry_always_true( 3893 struct bnx2x_credit_pool_obj *o, 3894 int *offset) 3895 { 3896 *offset = -1; 3897 return true; 3898 } 3899 /** 3900 * bnx2x_init_credit_pool - initialize credit pool internals. 3901 * 3902 * @p: 3903 * @base: Base entry in the CAM to use. 3904 * @credit: pool size. 3905 * 3906 * If base is negative no CAM entries handling will be performed. 3907 * If credit is negative pool operations will always succeed (unlimited pool). 3908 * 3909 */ 3910 static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p, 3911 int base, int credit) 3912 { 3913 /* Zero the object first */ 3914 memset(p, 0, sizeof(*p)); 3915 3916 /* Set the table to all 1s */ 3917 memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror)); 3918 3919 /* Init a pool as full */ 3920 atomic_set(&p->credit, credit); 3921 3922 /* The total poll size */ 3923 p->pool_sz = credit; 3924 3925 p->base_pool_offset = base; 3926 3927 /* Commit the change */ 3928 smp_mb(); 3929 3930 p->check = bnx2x_credit_pool_check; 3931 3932 /* if pool credit is negative - disable the checks */ 3933 if (credit >= 0) { 3934 p->put = bnx2x_credit_pool_put; 3935 p->get = bnx2x_credit_pool_get; 3936 p->put_entry = bnx2x_credit_pool_put_entry; 3937 p->get_entry = bnx2x_credit_pool_get_entry; 3938 } else { 3939 p->put = bnx2x_credit_pool_always_true; 3940 p->get = bnx2x_credit_pool_always_true; 3941 p->put_entry = bnx2x_credit_pool_put_entry_always_true; 3942 p->get_entry = bnx2x_credit_pool_get_entry_always_true; 3943 } 3944 3945 /* If base is negative - disable entries handling */ 3946 if (base < 0) { 3947 p->put_entry = bnx2x_credit_pool_put_entry_always_true; 3948 p->get_entry = bnx2x_credit_pool_get_entry_always_true; 3949 } 3950 } 3951 3952 void bnx2x_init_mac_credit_pool(struct bnx2x *bp, 3953 struct bnx2x_credit_pool_obj *p, u8 func_id, 3954 u8 func_num) 3955 { 3956 /* TODO: this will be defined in consts as well... */ 3957 #define BNX2X_CAM_SIZE_EMUL 5 3958 3959 int cam_sz; 3960 3961 if (CHIP_IS_E1(bp)) { 3962 /* In E1, Multicast is saved in cam... */ 3963 if (!CHIP_REV_IS_SLOW(bp)) 3964 cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST; 3965 else 3966 cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI; 3967 3968 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz); 3969 3970 } else if (CHIP_IS_E1H(bp)) { 3971 /* CAM credit is equaly divided between all active functions 3972 * on the PORT!. 3973 */ 3974 if ((func_num > 0)) { 3975 if (!CHIP_REV_IS_SLOW(bp)) 3976 cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num)); 3977 else 3978 cam_sz = BNX2X_CAM_SIZE_EMUL; 3979 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz); 3980 } else { 3981 /* this should never happen! Block MAC operations. */ 3982 bnx2x_init_credit_pool(p, 0, 0); 3983 } 3984 3985 } else { 3986 3987 /* 3988 * CAM credit is equaly divided between all active functions 3989 * on the PATH. 3990 */ 3991 if ((func_num > 0)) { 3992 if (!CHIP_REV_IS_SLOW(bp)) 3993 cam_sz = (MAX_MAC_CREDIT_E2 / func_num); 3994 else 3995 cam_sz = BNX2X_CAM_SIZE_EMUL; 3996 3997 /* 3998 * No need for CAM entries handling for 57712 and 3999 * newer. 4000 */ 4001 bnx2x_init_credit_pool(p, -1, cam_sz); 4002 } else { 4003 /* this should never happen! Block MAC operations. */ 4004 bnx2x_init_credit_pool(p, 0, 0); 4005 } 4006 4007 } 4008 } 4009 4010 void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, 4011 struct bnx2x_credit_pool_obj *p, 4012 u8 func_id, 4013 u8 func_num) 4014 { 4015 if (CHIP_IS_E1x(bp)) { 4016 /* 4017 * There is no VLAN credit in HW on 57710 and 57711 only 4018 * MAC / MAC-VLAN can be set 4019 */ 4020 bnx2x_init_credit_pool(p, 0, -1); 4021 } else { 4022 /* 4023 * CAM credit is equaly divided between all active functions 4024 * on the PATH. 4025 */ 4026 if (func_num > 0) { 4027 int credit = MAX_VLAN_CREDIT_E2 / func_num; 4028 bnx2x_init_credit_pool(p, func_id * credit, credit); 4029 } else 4030 /* this should never happen! Block VLAN operations. */ 4031 bnx2x_init_credit_pool(p, 0, 0); 4032 } 4033 } 4034 4035 /****************** RSS Configuration ******************/ 4036 /** 4037 * bnx2x_debug_print_ind_table - prints the indirection table configuration. 4038 * 4039 * @bp: driver hanlde 4040 * @p: pointer to rss configuration 4041 * 4042 * Prints it when NETIF_MSG_IFUP debug level is configured. 4043 */ 4044 static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp, 4045 struct bnx2x_config_rss_params *p) 4046 { 4047 int i; 4048 4049 DP(BNX2X_MSG_SP, "Setting indirection table to:\n"); 4050 DP(BNX2X_MSG_SP, "0x0000: "); 4051 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { 4052 DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]); 4053 4054 /* Print 4 bytes in a line */ 4055 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) && 4056 (((i + 1) & 0x3) == 0)) { 4057 DP_CONT(BNX2X_MSG_SP, "\n"); 4058 DP(BNX2X_MSG_SP, "0x%04x: ", i + 1); 4059 } 4060 } 4061 4062 DP_CONT(BNX2X_MSG_SP, "\n"); 4063 } 4064 4065 /** 4066 * bnx2x_setup_rss - configure RSS 4067 * 4068 * @bp: device handle 4069 * @p: rss configuration 4070 * 4071 * sends on UPDATE ramrod for that matter. 4072 */ 4073 static int bnx2x_setup_rss(struct bnx2x *bp, 4074 struct bnx2x_config_rss_params *p) 4075 { 4076 struct bnx2x_rss_config_obj *o = p->rss_obj; 4077 struct bnx2x_raw_obj *r = &o->raw; 4078 struct eth_rss_update_ramrod_data *data = 4079 (struct eth_rss_update_ramrod_data *)(r->rdata); 4080 u8 rss_mode = 0; 4081 int rc; 4082 4083 memset(data, 0, sizeof(*data)); 4084 4085 DP(BNX2X_MSG_SP, "Configuring RSS\n"); 4086 4087 /* Set an echo field */ 4088 data->echo = (r->cid & BNX2X_SWCID_MASK) | 4089 (r->state << BNX2X_SWCID_SHIFT); 4090 4091 /* RSS mode */ 4092 if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags)) 4093 rss_mode = ETH_RSS_MODE_DISABLED; 4094 else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags)) 4095 rss_mode = ETH_RSS_MODE_REGULAR; 4096 4097 data->rss_mode = rss_mode; 4098 4099 DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode); 4100 4101 /* RSS capabilities */ 4102 if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags)) 4103 data->capabilities |= 4104 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY; 4105 4106 if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags)) 4107 data->capabilities |= 4108 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY; 4109 4110 if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags)) 4111 data->capabilities |= 4112 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY; 4113 4114 if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags)) 4115 data->capabilities |= 4116 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY; 4117 4118 if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags)) 4119 data->capabilities |= 4120 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY; 4121 4122 if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags)) 4123 data->capabilities |= 4124 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY; 4125 4126 /* Hashing mask */ 4127 data->rss_result_mask = p->rss_result_mask; 4128 4129 /* RSS engine ID */ 4130 data->rss_engine_id = o->engine_id; 4131 4132 DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id); 4133 4134 /* Indirection table */ 4135 memcpy(data->indirection_table, p->ind_table, 4136 T_ETH_INDIRECTION_TABLE_SIZE); 4137 4138 /* Remember the last configuration */ 4139 memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); 4140 4141 /* Print the indirection table */ 4142 if (netif_msg_ifup(bp)) 4143 bnx2x_debug_print_ind_table(bp, p); 4144 4145 /* RSS keys */ 4146 if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) { 4147 memcpy(&data->rss_key[0], &p->rss_key[0], 4148 sizeof(data->rss_key)); 4149 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; 4150 } 4151 4152 /* 4153 * No need for an explicit memory barrier here as long we would 4154 * need to ensure the ordering of writing to the SPQ element 4155 * and updating of the SPQ producer which involves a memory 4156 * read and we will have to put a full memory barrier there 4157 * (inside bnx2x_sp_post()). 4158 */ 4159 4160 /* Send a ramrod */ 4161 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid, 4162 U64_HI(r->rdata_mapping), 4163 U64_LO(r->rdata_mapping), 4164 ETH_CONNECTION_TYPE); 4165 4166 if (rc < 0) 4167 return rc; 4168 4169 return 1; 4170 } 4171 4172 void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj, 4173 u8 *ind_table) 4174 { 4175 memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table)); 4176 } 4177 4178 int bnx2x_config_rss(struct bnx2x *bp, 4179 struct bnx2x_config_rss_params *p) 4180 { 4181 int rc; 4182 struct bnx2x_rss_config_obj *o = p->rss_obj; 4183 struct bnx2x_raw_obj *r = &o->raw; 4184 4185 /* Do nothing if only driver cleanup was requested */ 4186 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) 4187 return 0; 4188 4189 r->set_pending(r); 4190 4191 rc = o->config_rss(bp, p); 4192 if (rc < 0) { 4193 r->clear_pending(r); 4194 return rc; 4195 } 4196 4197 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) 4198 rc = r->wait_comp(bp, r); 4199 4200 return rc; 4201 } 4202 4203 4204 void bnx2x_init_rss_config_obj(struct bnx2x *bp, 4205 struct bnx2x_rss_config_obj *rss_obj, 4206 u8 cl_id, u32 cid, u8 func_id, u8 engine_id, 4207 void *rdata, dma_addr_t rdata_mapping, 4208 int state, unsigned long *pstate, 4209 bnx2x_obj_type type) 4210 { 4211 bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata, 4212 rdata_mapping, state, pstate, type); 4213 4214 rss_obj->engine_id = engine_id; 4215 rss_obj->config_rss = bnx2x_setup_rss; 4216 } 4217 4218 /********************** Queue state object ***********************************/ 4219 4220 /** 4221 * bnx2x_queue_state_change - perform Queue state change transition 4222 * 4223 * @bp: device handle 4224 * @params: parameters to perform the transition 4225 * 4226 * returns 0 in case of successfully completed transition, negative error 4227 * code in case of failure, positive (EBUSY) value if there is a completion 4228 * to that is still pending (possible only if RAMROD_COMP_WAIT is 4229 * not set in params->ramrod_flags for asynchronous commands). 4230 * 4231 */ 4232 int bnx2x_queue_state_change(struct bnx2x *bp, 4233 struct bnx2x_queue_state_params *params) 4234 { 4235 struct bnx2x_queue_sp_obj *o = params->q_obj; 4236 int rc, pending_bit; 4237 unsigned long *pending = &o->pending; 4238 4239 /* Check that the requested transition is legal */ 4240 if (o->check_transition(bp, o, params)) 4241 return -EINVAL; 4242 4243 /* Set "pending" bit */ 4244 pending_bit = o->set_pending(o, params); 4245 4246 /* Don't send a command if only driver cleanup was requested */ 4247 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) 4248 o->complete_cmd(bp, o, pending_bit); 4249 else { 4250 /* Send a ramrod */ 4251 rc = o->send_cmd(bp, params); 4252 if (rc) { 4253 o->next_state = BNX2X_Q_STATE_MAX; 4254 clear_bit(pending_bit, pending); 4255 smp_mb__after_clear_bit(); 4256 return rc; 4257 } 4258 4259 if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) { 4260 rc = o->wait_comp(bp, o, pending_bit); 4261 if (rc) 4262 return rc; 4263 4264 return 0; 4265 } 4266 } 4267 4268 return !!test_bit(pending_bit, pending); 4269 } 4270 4271 4272 static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj, 4273 struct bnx2x_queue_state_params *params) 4274 { 4275 enum bnx2x_queue_cmd cmd = params->cmd, bit; 4276 4277 /* ACTIVATE and DEACTIVATE commands are implemented on top of 4278 * UPDATE command. 4279 */ 4280 if ((cmd == BNX2X_Q_CMD_ACTIVATE) || 4281 (cmd == BNX2X_Q_CMD_DEACTIVATE)) 4282 bit = BNX2X_Q_CMD_UPDATE; 4283 else 4284 bit = cmd; 4285 4286 set_bit(bit, &obj->pending); 4287 return bit; 4288 } 4289 4290 static int bnx2x_queue_wait_comp(struct bnx2x *bp, 4291 struct bnx2x_queue_sp_obj *o, 4292 enum bnx2x_queue_cmd cmd) 4293 { 4294 return bnx2x_state_wait(bp, cmd, &o->pending); 4295 } 4296 4297 /** 4298 * bnx2x_queue_comp_cmd - complete the state change command. 4299 * 4300 * @bp: device handle 4301 * @o: 4302 * @cmd: 4303 * 4304 * Checks that the arrived completion is expected. 4305 */ 4306 static int bnx2x_queue_comp_cmd(struct bnx2x *bp, 4307 struct bnx2x_queue_sp_obj *o, 4308 enum bnx2x_queue_cmd cmd) 4309 { 4310 unsigned long cur_pending = o->pending; 4311 4312 if (!test_and_clear_bit(cmd, &cur_pending)) { 4313 BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n", 4314 cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], 4315 o->state, cur_pending, o->next_state); 4316 return -EINVAL; 4317 } 4318 4319 if (o->next_tx_only >= o->max_cos) 4320 /* >= becuase tx only must always be smaller than cos since the 4321 * primary connection supports COS 0 4322 */ 4323 BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d", 4324 o->next_tx_only, o->max_cos); 4325 4326 DP(BNX2X_MSG_SP, 4327 "Completing command %d for queue %d, setting state to %d\n", 4328 cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state); 4329 4330 if (o->next_tx_only) /* print num tx-only if any exist */ 4331 DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n", 4332 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only); 4333 4334 o->state = o->next_state; 4335 o->num_tx_only = o->next_tx_only; 4336 o->next_state = BNX2X_Q_STATE_MAX; 4337 4338 /* It's important that o->state and o->next_state are 4339 * updated before o->pending. 4340 */ 4341 wmb(); 4342 4343 clear_bit(cmd, &o->pending); 4344 smp_mb__after_clear_bit(); 4345 4346 return 0; 4347 } 4348 4349 static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp, 4350 struct bnx2x_queue_state_params *cmd_params, 4351 struct client_init_ramrod_data *data) 4352 { 4353 struct bnx2x_queue_setup_params *params = &cmd_params->params.setup; 4354 4355 /* Rx data */ 4356 4357 /* IPv6 TPA supported for E2 and above only */ 4358 data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, ¶ms->flags) * 4359 CLIENT_INIT_RX_DATA_TPA_EN_IPV6; 4360 } 4361 4362 static void bnx2x_q_fill_init_general_data(struct bnx2x *bp, 4363 struct bnx2x_queue_sp_obj *o, 4364 struct bnx2x_general_setup_params *params, 4365 struct client_init_general_data *gen_data, 4366 unsigned long *flags) 4367 { 4368 gen_data->client_id = o->cl_id; 4369 4370 if (test_bit(BNX2X_Q_FLG_STATS, flags)) { 4371 gen_data->statistics_counter_id = 4372 params->stat_id; 4373 gen_data->statistics_en_flg = 1; 4374 gen_data->statistics_zero_flg = 4375 test_bit(BNX2X_Q_FLG_ZERO_STATS, flags); 4376 } else 4377 gen_data->statistics_counter_id = 4378 DISABLE_STATISTIC_COUNTER_ID_VALUE; 4379 4380 gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags); 4381 gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags); 4382 gen_data->sp_client_id = params->spcl_id; 4383 gen_data->mtu = cpu_to_le16(params->mtu); 4384 gen_data->func_id = o->func_id; 4385 4386 4387 gen_data->cos = params->cos; 4388 4389 gen_data->traffic_type = 4390 test_bit(BNX2X_Q_FLG_FCOE, flags) ? 4391 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW; 4392 4393 DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n", 4394 gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg); 4395 } 4396 4397 static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o, 4398 struct bnx2x_txq_setup_params *params, 4399 struct client_init_tx_data *tx_data, 4400 unsigned long *flags) 4401 { 4402 tx_data->enforce_security_flg = 4403 test_bit(BNX2X_Q_FLG_TX_SEC, flags); 4404 tx_data->default_vlan = 4405 cpu_to_le16(params->default_vlan); 4406 tx_data->default_vlan_flg = 4407 test_bit(BNX2X_Q_FLG_DEF_VLAN, flags); 4408 tx_data->tx_switching_flg = 4409 test_bit(BNX2X_Q_FLG_TX_SWITCH, flags); 4410 tx_data->anti_spoofing_flg = 4411 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags); 4412 tx_data->force_default_pri_flg = 4413 test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags); 4414 4415 tx_data->tx_status_block_id = params->fw_sb_id; 4416 tx_data->tx_sb_index_number = params->sb_cq_index; 4417 tx_data->tss_leading_client_id = params->tss_leading_cl_id; 4418 4419 tx_data->tx_bd_page_base.lo = 4420 cpu_to_le32(U64_LO(params->dscr_map)); 4421 tx_data->tx_bd_page_base.hi = 4422 cpu_to_le32(U64_HI(params->dscr_map)); 4423 4424 /* Don't configure any Tx switching mode during queue SETUP */ 4425 tx_data->state = 0; 4426 } 4427 4428 static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o, 4429 struct rxq_pause_params *params, 4430 struct client_init_rx_data *rx_data) 4431 { 4432 /* flow control data */ 4433 rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo); 4434 rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi); 4435 rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo); 4436 rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi); 4437 rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo); 4438 rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi); 4439 rx_data->rx_cos_mask = cpu_to_le16(params->pri_map); 4440 } 4441 4442 static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o, 4443 struct bnx2x_rxq_setup_params *params, 4444 struct client_init_rx_data *rx_data, 4445 unsigned long *flags) 4446 { 4447 rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) * 4448 CLIENT_INIT_RX_DATA_TPA_EN_IPV4; 4449 rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) * 4450 CLIENT_INIT_RX_DATA_TPA_MODE; 4451 rx_data->vmqueue_mode_en_flg = 0; 4452 4453 rx_data->cache_line_alignment_log_size = 4454 params->cache_line_log; 4455 rx_data->enable_dynamic_hc = 4456 test_bit(BNX2X_Q_FLG_DHC, flags); 4457 rx_data->max_sges_for_packet = params->max_sges_pkt; 4458 rx_data->client_qzone_id = params->cl_qzone_id; 4459 rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz); 4460 4461 /* Always start in DROP_ALL mode */ 4462 rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL | 4463 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL); 4464 4465 /* We don't set drop flags */ 4466 rx_data->drop_ip_cs_err_flg = 0; 4467 rx_data->drop_tcp_cs_err_flg = 0; 4468 rx_data->drop_ttl0_flg = 0; 4469 rx_data->drop_udp_cs_err_flg = 0; 4470 rx_data->inner_vlan_removal_enable_flg = 4471 test_bit(BNX2X_Q_FLG_VLAN, flags); 4472 rx_data->outer_vlan_removal_enable_flg = 4473 test_bit(BNX2X_Q_FLG_OV, flags); 4474 rx_data->status_block_id = params->fw_sb_id; 4475 rx_data->rx_sb_index_number = params->sb_cq_index; 4476 rx_data->max_tpa_queues = params->max_tpa_queues; 4477 rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz); 4478 rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz); 4479 rx_data->bd_page_base.lo = 4480 cpu_to_le32(U64_LO(params->dscr_map)); 4481 rx_data->bd_page_base.hi = 4482 cpu_to_le32(U64_HI(params->dscr_map)); 4483 rx_data->sge_page_base.lo = 4484 cpu_to_le32(U64_LO(params->sge_map)); 4485 rx_data->sge_page_base.hi = 4486 cpu_to_le32(U64_HI(params->sge_map)); 4487 rx_data->cqe_page_base.lo = 4488 cpu_to_le32(U64_LO(params->rcq_map)); 4489 rx_data->cqe_page_base.hi = 4490 cpu_to_le32(U64_HI(params->rcq_map)); 4491 rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags); 4492 4493 if (test_bit(BNX2X_Q_FLG_MCAST, flags)) { 4494 rx_data->approx_mcast_engine_id = params->mcast_engine_id; 4495 rx_data->is_approx_mcast = 1; 4496 } 4497 4498 rx_data->rss_engine_id = params->rss_engine_id; 4499 4500 /* silent vlan removal */ 4501 rx_data->silent_vlan_removal_flg = 4502 test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags); 4503 rx_data->silent_vlan_value = 4504 cpu_to_le16(params->silent_removal_value); 4505 rx_data->silent_vlan_mask = 4506 cpu_to_le16(params->silent_removal_mask); 4507 4508 } 4509 4510 /* initialize the general, tx and rx parts of a queue object */ 4511 static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp, 4512 struct bnx2x_queue_state_params *cmd_params, 4513 struct client_init_ramrod_data *data) 4514 { 4515 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj, 4516 &cmd_params->params.setup.gen_params, 4517 &data->general, 4518 &cmd_params->params.setup.flags); 4519 4520 bnx2x_q_fill_init_tx_data(cmd_params->q_obj, 4521 &cmd_params->params.setup.txq_params, 4522 &data->tx, 4523 &cmd_params->params.setup.flags); 4524 4525 bnx2x_q_fill_init_rx_data(cmd_params->q_obj, 4526 &cmd_params->params.setup.rxq_params, 4527 &data->rx, 4528 &cmd_params->params.setup.flags); 4529 4530 bnx2x_q_fill_init_pause_data(cmd_params->q_obj, 4531 &cmd_params->params.setup.pause_params, 4532 &data->rx); 4533 } 4534 4535 /* initialize the general and tx parts of a tx-only queue object */ 4536 static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp, 4537 struct bnx2x_queue_state_params *cmd_params, 4538 struct tx_queue_init_ramrod_data *data) 4539 { 4540 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj, 4541 &cmd_params->params.tx_only.gen_params, 4542 &data->general, 4543 &cmd_params->params.tx_only.flags); 4544 4545 bnx2x_q_fill_init_tx_data(cmd_params->q_obj, 4546 &cmd_params->params.tx_only.txq_params, 4547 &data->tx, 4548 &cmd_params->params.tx_only.flags); 4549 4550 DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x", 4551 cmd_params->q_obj->cids[0], 4552 data->tx.tx_bd_page_base.lo, 4553 data->tx.tx_bd_page_base.hi); 4554 } 4555 4556 /** 4557 * bnx2x_q_init - init HW/FW queue 4558 * 4559 * @bp: device handle 4560 * @params: 4561 * 4562 * HW/FW initial Queue configuration: 4563 * - HC: Rx and Tx 4564 * - CDU context validation 4565 * 4566 */ 4567 static inline int bnx2x_q_init(struct bnx2x *bp, 4568 struct bnx2x_queue_state_params *params) 4569 { 4570 struct bnx2x_queue_sp_obj *o = params->q_obj; 4571 struct bnx2x_queue_init_params *init = ¶ms->params.init; 4572 u16 hc_usec; 4573 u8 cos; 4574 4575 /* Tx HC configuration */ 4576 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) && 4577 test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) { 4578 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0; 4579 4580 bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id, 4581 init->tx.sb_cq_index, 4582 !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags), 4583 hc_usec); 4584 } 4585 4586 /* Rx HC configuration */ 4587 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) && 4588 test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) { 4589 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0; 4590 4591 bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id, 4592 init->rx.sb_cq_index, 4593 !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags), 4594 hc_usec); 4595 } 4596 4597 /* Set CDU context validation values */ 4598 for (cos = 0; cos < o->max_cos; cos++) { 4599 DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n", 4600 o->cids[cos], cos); 4601 DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]); 4602 bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]); 4603 } 4604 4605 /* As no ramrod is sent, complete the command immediately */ 4606 o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT); 4607 4608 mmiowb(); 4609 smp_mb(); 4610 4611 return 0; 4612 } 4613 4614 static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp, 4615 struct bnx2x_queue_state_params *params) 4616 { 4617 struct bnx2x_queue_sp_obj *o = params->q_obj; 4618 struct client_init_ramrod_data *rdata = 4619 (struct client_init_ramrod_data *)o->rdata; 4620 dma_addr_t data_mapping = o->rdata_mapping; 4621 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; 4622 4623 /* Clear the ramrod data */ 4624 memset(rdata, 0, sizeof(*rdata)); 4625 4626 /* Fill the ramrod data */ 4627 bnx2x_q_fill_setup_data_cmn(bp, params, rdata); 4628 4629 /* 4630 * No need for an explicit memory barrier here as long we would 4631 * need to ensure the ordering of writing to the SPQ element 4632 * and updating of the SPQ producer which involves a memory 4633 * read and we will have to put a full memory barrier there 4634 * (inside bnx2x_sp_post()). 4635 */ 4636 4637 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], 4638 U64_HI(data_mapping), 4639 U64_LO(data_mapping), ETH_CONNECTION_TYPE); 4640 } 4641 4642 static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp, 4643 struct bnx2x_queue_state_params *params) 4644 { 4645 struct bnx2x_queue_sp_obj *o = params->q_obj; 4646 struct client_init_ramrod_data *rdata = 4647 (struct client_init_ramrod_data *)o->rdata; 4648 dma_addr_t data_mapping = o->rdata_mapping; 4649 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; 4650 4651 /* Clear the ramrod data */ 4652 memset(rdata, 0, sizeof(*rdata)); 4653 4654 /* Fill the ramrod data */ 4655 bnx2x_q_fill_setup_data_cmn(bp, params, rdata); 4656 bnx2x_q_fill_setup_data_e2(bp, params, rdata); 4657 4658 /* 4659 * No need for an explicit memory barrier here as long we would 4660 * need to ensure the ordering of writing to the SPQ element 4661 * and updating of the SPQ producer which involves a memory 4662 * read and we will have to put a full memory barrier there 4663 * (inside bnx2x_sp_post()). 4664 */ 4665 4666 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], 4667 U64_HI(data_mapping), 4668 U64_LO(data_mapping), ETH_CONNECTION_TYPE); 4669 } 4670 4671 static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp, 4672 struct bnx2x_queue_state_params *params) 4673 { 4674 struct bnx2x_queue_sp_obj *o = params->q_obj; 4675 struct tx_queue_init_ramrod_data *rdata = 4676 (struct tx_queue_init_ramrod_data *)o->rdata; 4677 dma_addr_t data_mapping = o->rdata_mapping; 4678 int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP; 4679 struct bnx2x_queue_setup_tx_only_params *tx_only_params = 4680 ¶ms->params.tx_only; 4681 u8 cid_index = tx_only_params->cid_index; 4682 4683 4684 if (cid_index >= o->max_cos) { 4685 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", 4686 o->cl_id, cid_index); 4687 return -EINVAL; 4688 } 4689 4690 DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n", 4691 tx_only_params->gen_params.cos, 4692 tx_only_params->gen_params.spcl_id); 4693 4694 /* Clear the ramrod data */ 4695 memset(rdata, 0, sizeof(*rdata)); 4696 4697 /* Fill the ramrod data */ 4698 bnx2x_q_fill_setup_tx_only(bp, params, rdata); 4699 4700 DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n", 4701 o->cids[cid_index], rdata->general.client_id, 4702 rdata->general.sp_client_id, rdata->general.cos); 4703 4704 /* 4705 * No need for an explicit memory barrier here as long we would 4706 * need to ensure the ordering of writing to the SPQ element 4707 * and updating of the SPQ producer which involves a memory 4708 * read and we will have to put a full memory barrier there 4709 * (inside bnx2x_sp_post()). 4710 */ 4711 4712 return bnx2x_sp_post(bp, ramrod, o->cids[cid_index], 4713 U64_HI(data_mapping), 4714 U64_LO(data_mapping), ETH_CONNECTION_TYPE); 4715 } 4716 4717 static void bnx2x_q_fill_update_data(struct bnx2x *bp, 4718 struct bnx2x_queue_sp_obj *obj, 4719 struct bnx2x_queue_update_params *params, 4720 struct client_update_ramrod_data *data) 4721 { 4722 /* Client ID of the client to update */ 4723 data->client_id = obj->cl_id; 4724 4725 /* Function ID of the client to update */ 4726 data->func_id = obj->func_id; 4727 4728 /* Default VLAN value */ 4729 data->default_vlan = cpu_to_le16(params->def_vlan); 4730 4731 /* Inner VLAN stripping */ 4732 data->inner_vlan_removal_enable_flg = 4733 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, ¶ms->update_flags); 4734 data->inner_vlan_removal_change_flg = 4735 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG, 4736 ¶ms->update_flags); 4737 4738 /* Outer VLAN sripping */ 4739 data->outer_vlan_removal_enable_flg = 4740 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, ¶ms->update_flags); 4741 data->outer_vlan_removal_change_flg = 4742 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG, 4743 ¶ms->update_flags); 4744 4745 /* Drop packets that have source MAC that doesn't belong to this 4746 * Queue. 4747 */ 4748 data->anti_spoofing_enable_flg = 4749 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, ¶ms->update_flags); 4750 data->anti_spoofing_change_flg = 4751 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, ¶ms->update_flags); 4752 4753 /* Activate/Deactivate */ 4754 data->activate_flg = 4755 test_bit(BNX2X_Q_UPDATE_ACTIVATE, ¶ms->update_flags); 4756 data->activate_change_flg = 4757 test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, ¶ms->update_flags); 4758 4759 /* Enable default VLAN */ 4760 data->default_vlan_enable_flg = 4761 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, ¶ms->update_flags); 4762 data->default_vlan_change_flg = 4763 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, 4764 ¶ms->update_flags); 4765 4766 /* silent vlan removal */ 4767 data->silent_vlan_change_flg = 4768 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, 4769 ¶ms->update_flags); 4770 data->silent_vlan_removal_flg = 4771 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, ¶ms->update_flags); 4772 data->silent_vlan_value = cpu_to_le16(params->silent_removal_value); 4773 data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask); 4774 } 4775 4776 static inline int bnx2x_q_send_update(struct bnx2x *bp, 4777 struct bnx2x_queue_state_params *params) 4778 { 4779 struct bnx2x_queue_sp_obj *o = params->q_obj; 4780 struct client_update_ramrod_data *rdata = 4781 (struct client_update_ramrod_data *)o->rdata; 4782 dma_addr_t data_mapping = o->rdata_mapping; 4783 struct bnx2x_queue_update_params *update_params = 4784 ¶ms->params.update; 4785 u8 cid_index = update_params->cid_index; 4786 4787 if (cid_index >= o->max_cos) { 4788 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", 4789 o->cl_id, cid_index); 4790 return -EINVAL; 4791 } 4792 4793 4794 /* Clear the ramrod data */ 4795 memset(rdata, 0, sizeof(*rdata)); 4796 4797 /* Fill the ramrod data */ 4798 bnx2x_q_fill_update_data(bp, o, update_params, rdata); 4799 4800 /* 4801 * No need for an explicit memory barrier here as long we would 4802 * need to ensure the ordering of writing to the SPQ element 4803 * and updating of the SPQ producer which involves a memory 4804 * read and we will have to put a full memory barrier there 4805 * (inside bnx2x_sp_post()). 4806 */ 4807 4808 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, 4809 o->cids[cid_index], U64_HI(data_mapping), 4810 U64_LO(data_mapping), ETH_CONNECTION_TYPE); 4811 } 4812 4813 /** 4814 * bnx2x_q_send_deactivate - send DEACTIVATE command 4815 * 4816 * @bp: device handle 4817 * @params: 4818 * 4819 * implemented using the UPDATE command. 4820 */ 4821 static inline int bnx2x_q_send_deactivate(struct bnx2x *bp, 4822 struct bnx2x_queue_state_params *params) 4823 { 4824 struct bnx2x_queue_update_params *update = ¶ms->params.update; 4825 4826 memset(update, 0, sizeof(*update)); 4827 4828 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags); 4829 4830 return bnx2x_q_send_update(bp, params); 4831 } 4832 4833 /** 4834 * bnx2x_q_send_activate - send ACTIVATE command 4835 * 4836 * @bp: device handle 4837 * @params: 4838 * 4839 * implemented using the UPDATE command. 4840 */ 4841 static inline int bnx2x_q_send_activate(struct bnx2x *bp, 4842 struct bnx2x_queue_state_params *params) 4843 { 4844 struct bnx2x_queue_update_params *update = ¶ms->params.update; 4845 4846 memset(update, 0, sizeof(*update)); 4847 4848 __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags); 4849 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags); 4850 4851 return bnx2x_q_send_update(bp, params); 4852 } 4853 4854 static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp, 4855 struct bnx2x_queue_state_params *params) 4856 { 4857 /* TODO: Not implemented yet. */ 4858 return -1; 4859 } 4860 4861 static inline int bnx2x_q_send_halt(struct bnx2x *bp, 4862 struct bnx2x_queue_state_params *params) 4863 { 4864 struct bnx2x_queue_sp_obj *o = params->q_obj; 4865 4866 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 4867 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id, 4868 ETH_CONNECTION_TYPE); 4869 } 4870 4871 static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp, 4872 struct bnx2x_queue_state_params *params) 4873 { 4874 struct bnx2x_queue_sp_obj *o = params->q_obj; 4875 u8 cid_idx = params->params.cfc_del.cid_index; 4876 4877 if (cid_idx >= o->max_cos) { 4878 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", 4879 o->cl_id, cid_idx); 4880 return -EINVAL; 4881 } 4882 4883 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, 4884 o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE); 4885 } 4886 4887 static inline int bnx2x_q_send_terminate(struct bnx2x *bp, 4888 struct bnx2x_queue_state_params *params) 4889 { 4890 struct bnx2x_queue_sp_obj *o = params->q_obj; 4891 u8 cid_index = params->params.terminate.cid_index; 4892 4893 if (cid_index >= o->max_cos) { 4894 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", 4895 o->cl_id, cid_index); 4896 return -EINVAL; 4897 } 4898 4899 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, 4900 o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE); 4901 } 4902 4903 static inline int bnx2x_q_send_empty(struct bnx2x *bp, 4904 struct bnx2x_queue_state_params *params) 4905 { 4906 struct bnx2x_queue_sp_obj *o = params->q_obj; 4907 4908 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY, 4909 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0, 4910 ETH_CONNECTION_TYPE); 4911 } 4912 4913 static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp, 4914 struct bnx2x_queue_state_params *params) 4915 { 4916 switch (params->cmd) { 4917 case BNX2X_Q_CMD_INIT: 4918 return bnx2x_q_init(bp, params); 4919 case BNX2X_Q_CMD_SETUP_TX_ONLY: 4920 return bnx2x_q_send_setup_tx_only(bp, params); 4921 case BNX2X_Q_CMD_DEACTIVATE: 4922 return bnx2x_q_send_deactivate(bp, params); 4923 case BNX2X_Q_CMD_ACTIVATE: 4924 return bnx2x_q_send_activate(bp, params); 4925 case BNX2X_Q_CMD_UPDATE: 4926 return bnx2x_q_send_update(bp, params); 4927 case BNX2X_Q_CMD_UPDATE_TPA: 4928 return bnx2x_q_send_update_tpa(bp, params); 4929 case BNX2X_Q_CMD_HALT: 4930 return bnx2x_q_send_halt(bp, params); 4931 case BNX2X_Q_CMD_CFC_DEL: 4932 return bnx2x_q_send_cfc_del(bp, params); 4933 case BNX2X_Q_CMD_TERMINATE: 4934 return bnx2x_q_send_terminate(bp, params); 4935 case BNX2X_Q_CMD_EMPTY: 4936 return bnx2x_q_send_empty(bp, params); 4937 default: 4938 BNX2X_ERR("Unknown command: %d\n", params->cmd); 4939 return -EINVAL; 4940 } 4941 } 4942 4943 static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp, 4944 struct bnx2x_queue_state_params *params) 4945 { 4946 switch (params->cmd) { 4947 case BNX2X_Q_CMD_SETUP: 4948 return bnx2x_q_send_setup_e1x(bp, params); 4949 case BNX2X_Q_CMD_INIT: 4950 case BNX2X_Q_CMD_SETUP_TX_ONLY: 4951 case BNX2X_Q_CMD_DEACTIVATE: 4952 case BNX2X_Q_CMD_ACTIVATE: 4953 case BNX2X_Q_CMD_UPDATE: 4954 case BNX2X_Q_CMD_UPDATE_TPA: 4955 case BNX2X_Q_CMD_HALT: 4956 case BNX2X_Q_CMD_CFC_DEL: 4957 case BNX2X_Q_CMD_TERMINATE: 4958 case BNX2X_Q_CMD_EMPTY: 4959 return bnx2x_queue_send_cmd_cmn(bp, params); 4960 default: 4961 BNX2X_ERR("Unknown command: %d\n", params->cmd); 4962 return -EINVAL; 4963 } 4964 } 4965 4966 static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp, 4967 struct bnx2x_queue_state_params *params) 4968 { 4969 switch (params->cmd) { 4970 case BNX2X_Q_CMD_SETUP: 4971 return bnx2x_q_send_setup_e2(bp, params); 4972 case BNX2X_Q_CMD_INIT: 4973 case BNX2X_Q_CMD_SETUP_TX_ONLY: 4974 case BNX2X_Q_CMD_DEACTIVATE: 4975 case BNX2X_Q_CMD_ACTIVATE: 4976 case BNX2X_Q_CMD_UPDATE: 4977 case BNX2X_Q_CMD_UPDATE_TPA: 4978 case BNX2X_Q_CMD_HALT: 4979 case BNX2X_Q_CMD_CFC_DEL: 4980 case BNX2X_Q_CMD_TERMINATE: 4981 case BNX2X_Q_CMD_EMPTY: 4982 return bnx2x_queue_send_cmd_cmn(bp, params); 4983 default: 4984 BNX2X_ERR("Unknown command: %d\n", params->cmd); 4985 return -EINVAL; 4986 } 4987 } 4988 4989 /** 4990 * bnx2x_queue_chk_transition - check state machine of a regular Queue 4991 * 4992 * @bp: device handle 4993 * @o: 4994 * @params: 4995 * 4996 * (not Forwarding) 4997 * It both checks if the requested command is legal in a current 4998 * state and, if it's legal, sets a `next_state' in the object 4999 * that will be used in the completion flow to set the `state' 5000 * of the object. 5001 * 5002 * returns 0 if a requested command is a legal transition, 5003 * -EINVAL otherwise. 5004 */ 5005 static int bnx2x_queue_chk_transition(struct bnx2x *bp, 5006 struct bnx2x_queue_sp_obj *o, 5007 struct bnx2x_queue_state_params *params) 5008 { 5009 enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX; 5010 enum bnx2x_queue_cmd cmd = params->cmd; 5011 struct bnx2x_queue_update_params *update_params = 5012 ¶ms->params.update; 5013 u8 next_tx_only = o->num_tx_only; 5014 5015 /* 5016 * Forget all pending for completion commands if a driver only state 5017 * transition has been requested. 5018 */ 5019 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { 5020 o->pending = 0; 5021 o->next_state = BNX2X_Q_STATE_MAX; 5022 } 5023 5024 /* 5025 * Don't allow a next state transition if we are in the middle of 5026 * the previous one. 5027 */ 5028 if (o->pending) 5029 return -EBUSY; 5030 5031 switch (state) { 5032 case BNX2X_Q_STATE_RESET: 5033 if (cmd == BNX2X_Q_CMD_INIT) 5034 next_state = BNX2X_Q_STATE_INITIALIZED; 5035 5036 break; 5037 case BNX2X_Q_STATE_INITIALIZED: 5038 if (cmd == BNX2X_Q_CMD_SETUP) { 5039 if (test_bit(BNX2X_Q_FLG_ACTIVE, 5040 ¶ms->params.setup.flags)) 5041 next_state = BNX2X_Q_STATE_ACTIVE; 5042 else 5043 next_state = BNX2X_Q_STATE_INACTIVE; 5044 } 5045 5046 break; 5047 case BNX2X_Q_STATE_ACTIVE: 5048 if (cmd == BNX2X_Q_CMD_DEACTIVATE) 5049 next_state = BNX2X_Q_STATE_INACTIVE; 5050 5051 else if ((cmd == BNX2X_Q_CMD_EMPTY) || 5052 (cmd == BNX2X_Q_CMD_UPDATE_TPA)) 5053 next_state = BNX2X_Q_STATE_ACTIVE; 5054 5055 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) { 5056 next_state = BNX2X_Q_STATE_MULTI_COS; 5057 next_tx_only = 1; 5058 } 5059 5060 else if (cmd == BNX2X_Q_CMD_HALT) 5061 next_state = BNX2X_Q_STATE_STOPPED; 5062 5063 else if (cmd == BNX2X_Q_CMD_UPDATE) { 5064 /* If "active" state change is requested, update the 5065 * state accordingly. 5066 */ 5067 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, 5068 &update_params->update_flags) && 5069 !test_bit(BNX2X_Q_UPDATE_ACTIVATE, 5070 &update_params->update_flags)) 5071 next_state = BNX2X_Q_STATE_INACTIVE; 5072 else 5073 next_state = BNX2X_Q_STATE_ACTIVE; 5074 } 5075 5076 break; 5077 case BNX2X_Q_STATE_MULTI_COS: 5078 if (cmd == BNX2X_Q_CMD_TERMINATE) 5079 next_state = BNX2X_Q_STATE_MCOS_TERMINATED; 5080 5081 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) { 5082 next_state = BNX2X_Q_STATE_MULTI_COS; 5083 next_tx_only = o->num_tx_only + 1; 5084 } 5085 5086 else if ((cmd == BNX2X_Q_CMD_EMPTY) || 5087 (cmd == BNX2X_Q_CMD_UPDATE_TPA)) 5088 next_state = BNX2X_Q_STATE_MULTI_COS; 5089 5090 else if (cmd == BNX2X_Q_CMD_UPDATE) { 5091 /* If "active" state change is requested, update the 5092 * state accordingly. 5093 */ 5094 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, 5095 &update_params->update_flags) && 5096 !test_bit(BNX2X_Q_UPDATE_ACTIVATE, 5097 &update_params->update_flags)) 5098 next_state = BNX2X_Q_STATE_INACTIVE; 5099 else 5100 next_state = BNX2X_Q_STATE_MULTI_COS; 5101 } 5102 5103 break; 5104 case BNX2X_Q_STATE_MCOS_TERMINATED: 5105 if (cmd == BNX2X_Q_CMD_CFC_DEL) { 5106 next_tx_only = o->num_tx_only - 1; 5107 if (next_tx_only == 0) 5108 next_state = BNX2X_Q_STATE_ACTIVE; 5109 else 5110 next_state = BNX2X_Q_STATE_MULTI_COS; 5111 } 5112 5113 break; 5114 case BNX2X_Q_STATE_INACTIVE: 5115 if (cmd == BNX2X_Q_CMD_ACTIVATE) 5116 next_state = BNX2X_Q_STATE_ACTIVE; 5117 5118 else if ((cmd == BNX2X_Q_CMD_EMPTY) || 5119 (cmd == BNX2X_Q_CMD_UPDATE_TPA)) 5120 next_state = BNX2X_Q_STATE_INACTIVE; 5121 5122 else if (cmd == BNX2X_Q_CMD_HALT) 5123 next_state = BNX2X_Q_STATE_STOPPED; 5124 5125 else if (cmd == BNX2X_Q_CMD_UPDATE) { 5126 /* If "active" state change is requested, update the 5127 * state accordingly. 5128 */ 5129 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, 5130 &update_params->update_flags) && 5131 test_bit(BNX2X_Q_UPDATE_ACTIVATE, 5132 &update_params->update_flags)){ 5133 if (o->num_tx_only == 0) 5134 next_state = BNX2X_Q_STATE_ACTIVE; 5135 else /* tx only queues exist for this queue */ 5136 next_state = BNX2X_Q_STATE_MULTI_COS; 5137 } else 5138 next_state = BNX2X_Q_STATE_INACTIVE; 5139 } 5140 5141 break; 5142 case BNX2X_Q_STATE_STOPPED: 5143 if (cmd == BNX2X_Q_CMD_TERMINATE) 5144 next_state = BNX2X_Q_STATE_TERMINATED; 5145 5146 break; 5147 case BNX2X_Q_STATE_TERMINATED: 5148 if (cmd == BNX2X_Q_CMD_CFC_DEL) 5149 next_state = BNX2X_Q_STATE_RESET; 5150 5151 break; 5152 default: 5153 BNX2X_ERR("Illegal state: %d\n", state); 5154 } 5155 5156 /* Transition is assured */ 5157 if (next_state != BNX2X_Q_STATE_MAX) { 5158 DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n", 5159 state, cmd, next_state); 5160 o->next_state = next_state; 5161 o->next_tx_only = next_tx_only; 5162 return 0; 5163 } 5164 5165 DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd); 5166 5167 return -EINVAL; 5168 } 5169 5170 void bnx2x_init_queue_obj(struct bnx2x *bp, 5171 struct bnx2x_queue_sp_obj *obj, 5172 u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id, 5173 void *rdata, 5174 dma_addr_t rdata_mapping, unsigned long type) 5175 { 5176 memset(obj, 0, sizeof(*obj)); 5177 5178 /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */ 5179 BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt); 5180 5181 memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt); 5182 obj->max_cos = cid_cnt; 5183 obj->cl_id = cl_id; 5184 obj->func_id = func_id; 5185 obj->rdata = rdata; 5186 obj->rdata_mapping = rdata_mapping; 5187 obj->type = type; 5188 obj->next_state = BNX2X_Q_STATE_MAX; 5189 5190 if (CHIP_IS_E1x(bp)) 5191 obj->send_cmd = bnx2x_queue_send_cmd_e1x; 5192 else 5193 obj->send_cmd = bnx2x_queue_send_cmd_e2; 5194 5195 obj->check_transition = bnx2x_queue_chk_transition; 5196 5197 obj->complete_cmd = bnx2x_queue_comp_cmd; 5198 obj->wait_comp = bnx2x_queue_wait_comp; 5199 obj->set_pending = bnx2x_queue_set_pending; 5200 } 5201 5202 /********************** Function state object *********************************/ 5203 enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp, 5204 struct bnx2x_func_sp_obj *o) 5205 { 5206 /* in the middle of transaction - return INVALID state */ 5207 if (o->pending) 5208 return BNX2X_F_STATE_MAX; 5209 5210 /* 5211 * unsure the order of reading of o->pending and o->state 5212 * o->pending should be read first 5213 */ 5214 rmb(); 5215 5216 return o->state; 5217 } 5218 5219 static int bnx2x_func_wait_comp(struct bnx2x *bp, 5220 struct bnx2x_func_sp_obj *o, 5221 enum bnx2x_func_cmd cmd) 5222 { 5223 return bnx2x_state_wait(bp, cmd, &o->pending); 5224 } 5225 5226 /** 5227 * bnx2x_func_state_change_comp - complete the state machine transition 5228 * 5229 * @bp: device handle 5230 * @o: 5231 * @cmd: 5232 * 5233 * Called on state change transition. Completes the state 5234 * machine transition only - no HW interaction. 5235 */ 5236 static inline int bnx2x_func_state_change_comp(struct bnx2x *bp, 5237 struct bnx2x_func_sp_obj *o, 5238 enum bnx2x_func_cmd cmd) 5239 { 5240 unsigned long cur_pending = o->pending; 5241 5242 if (!test_and_clear_bit(cmd, &cur_pending)) { 5243 BNX2X_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n", 5244 cmd, BP_FUNC(bp), o->state, 5245 cur_pending, o->next_state); 5246 return -EINVAL; 5247 } 5248 5249 DP(BNX2X_MSG_SP, 5250 "Completing command %d for func %d, setting state to %d\n", 5251 cmd, BP_FUNC(bp), o->next_state); 5252 5253 o->state = o->next_state; 5254 o->next_state = BNX2X_F_STATE_MAX; 5255 5256 /* It's important that o->state and o->next_state are 5257 * updated before o->pending. 5258 */ 5259 wmb(); 5260 5261 clear_bit(cmd, &o->pending); 5262 smp_mb__after_clear_bit(); 5263 5264 return 0; 5265 } 5266 5267 /** 5268 * bnx2x_func_comp_cmd - complete the state change command 5269 * 5270 * @bp: device handle 5271 * @o: 5272 * @cmd: 5273 * 5274 * Checks that the arrived completion is expected. 5275 */ 5276 static int bnx2x_func_comp_cmd(struct bnx2x *bp, 5277 struct bnx2x_func_sp_obj *o, 5278 enum bnx2x_func_cmd cmd) 5279 { 5280 /* Complete the state machine part first, check if it's a 5281 * legal completion. 5282 */ 5283 int rc = bnx2x_func_state_change_comp(bp, o, cmd); 5284 return rc; 5285 } 5286 5287 /** 5288 * bnx2x_func_chk_transition - perform function state machine transition 5289 * 5290 * @bp: device handle 5291 * @o: 5292 * @params: 5293 * 5294 * It both checks if the requested command is legal in a current 5295 * state and, if it's legal, sets a `next_state' in the object 5296 * that will be used in the completion flow to set the `state' 5297 * of the object. 5298 * 5299 * returns 0 if a requested command is a legal transition, 5300 * -EINVAL otherwise. 5301 */ 5302 static int bnx2x_func_chk_transition(struct bnx2x *bp, 5303 struct bnx2x_func_sp_obj *o, 5304 struct bnx2x_func_state_params *params) 5305 { 5306 enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX; 5307 enum bnx2x_func_cmd cmd = params->cmd; 5308 5309 /* 5310 * Forget all pending for completion commands if a driver only state 5311 * transition has been requested. 5312 */ 5313 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { 5314 o->pending = 0; 5315 o->next_state = BNX2X_F_STATE_MAX; 5316 } 5317 5318 /* 5319 * Don't allow a next state transition if we are in the middle of 5320 * the previous one. 5321 */ 5322 if (o->pending) 5323 return -EBUSY; 5324 5325 switch (state) { 5326 case BNX2X_F_STATE_RESET: 5327 if (cmd == BNX2X_F_CMD_HW_INIT) 5328 next_state = BNX2X_F_STATE_INITIALIZED; 5329 5330 break; 5331 case BNX2X_F_STATE_INITIALIZED: 5332 if (cmd == BNX2X_F_CMD_START) 5333 next_state = BNX2X_F_STATE_STARTED; 5334 5335 else if (cmd == BNX2X_F_CMD_HW_RESET) 5336 next_state = BNX2X_F_STATE_RESET; 5337 5338 break; 5339 case BNX2X_F_STATE_STARTED: 5340 if (cmd == BNX2X_F_CMD_STOP) 5341 next_state = BNX2X_F_STATE_INITIALIZED; 5342 /* afex ramrods can be sent only in started mode, and only 5343 * if not pending for function_stop ramrod completion 5344 * for these events - next state remained STARTED. 5345 */ 5346 else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) && 5347 (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) 5348 next_state = BNX2X_F_STATE_STARTED; 5349 5350 else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) && 5351 (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) 5352 next_state = BNX2X_F_STATE_STARTED; 5353 5354 /* Switch_update ramrod can be sent in either started or 5355 * tx_stopped state, and it doesn't change the state. 5356 */ 5357 else if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) && 5358 (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) 5359 next_state = BNX2X_F_STATE_STARTED; 5360 5361 else if (cmd == BNX2X_F_CMD_TX_STOP) 5362 next_state = BNX2X_F_STATE_TX_STOPPED; 5363 5364 break; 5365 case BNX2X_F_STATE_TX_STOPPED: 5366 if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) && 5367 (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) 5368 next_state = BNX2X_F_STATE_TX_STOPPED; 5369 5370 else if (cmd == BNX2X_F_CMD_TX_START) 5371 next_state = BNX2X_F_STATE_STARTED; 5372 5373 break; 5374 default: 5375 BNX2X_ERR("Unknown state: %d\n", state); 5376 } 5377 5378 /* Transition is assured */ 5379 if (next_state != BNX2X_F_STATE_MAX) { 5380 DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n", 5381 state, cmd, next_state); 5382 o->next_state = next_state; 5383 return 0; 5384 } 5385 5386 DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n", 5387 state, cmd); 5388 5389 return -EINVAL; 5390 } 5391 5392 /** 5393 * bnx2x_func_init_func - performs HW init at function stage 5394 * 5395 * @bp: device handle 5396 * @drv: 5397 * 5398 * Init HW when the current phase is 5399 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only 5400 * HW blocks. 5401 */ 5402 static inline int bnx2x_func_init_func(struct bnx2x *bp, 5403 const struct bnx2x_func_sp_drv_ops *drv) 5404 { 5405 return drv->init_hw_func(bp); 5406 } 5407 5408 /** 5409 * bnx2x_func_init_port - performs HW init at port stage 5410 * 5411 * @bp: device handle 5412 * @drv: 5413 * 5414 * Init HW when the current phase is 5415 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and 5416 * FUNCTION-only HW blocks. 5417 * 5418 */ 5419 static inline int bnx2x_func_init_port(struct bnx2x *bp, 5420 const struct bnx2x_func_sp_drv_ops *drv) 5421 { 5422 int rc = drv->init_hw_port(bp); 5423 if (rc) 5424 return rc; 5425 5426 return bnx2x_func_init_func(bp, drv); 5427 } 5428 5429 /** 5430 * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage 5431 * 5432 * @bp: device handle 5433 * @drv: 5434 * 5435 * Init HW when the current phase is 5436 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP, 5437 * PORT-only and FUNCTION-only HW blocks. 5438 */ 5439 static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp, 5440 const struct bnx2x_func_sp_drv_ops *drv) 5441 { 5442 int rc = drv->init_hw_cmn_chip(bp); 5443 if (rc) 5444 return rc; 5445 5446 return bnx2x_func_init_port(bp, drv); 5447 } 5448 5449 /** 5450 * bnx2x_func_init_cmn - performs HW init at common stage 5451 * 5452 * @bp: device handle 5453 * @drv: 5454 * 5455 * Init HW when the current phase is 5456 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON, 5457 * PORT-only and FUNCTION-only HW blocks. 5458 */ 5459 static inline int bnx2x_func_init_cmn(struct bnx2x *bp, 5460 const struct bnx2x_func_sp_drv_ops *drv) 5461 { 5462 int rc = drv->init_hw_cmn(bp); 5463 if (rc) 5464 return rc; 5465 5466 return bnx2x_func_init_port(bp, drv); 5467 } 5468 5469 static int bnx2x_func_hw_init(struct bnx2x *bp, 5470 struct bnx2x_func_state_params *params) 5471 { 5472 u32 load_code = params->params.hw_init.load_phase; 5473 struct bnx2x_func_sp_obj *o = params->f_obj; 5474 const struct bnx2x_func_sp_drv_ops *drv = o->drv; 5475 int rc = 0; 5476 5477 DP(BNX2X_MSG_SP, "function %d load_code %x\n", 5478 BP_ABS_FUNC(bp), load_code); 5479 5480 /* Prepare buffers for unzipping the FW */ 5481 rc = drv->gunzip_init(bp); 5482 if (rc) 5483 return rc; 5484 5485 /* Prepare FW */ 5486 rc = drv->init_fw(bp); 5487 if (rc) { 5488 BNX2X_ERR("Error loading firmware\n"); 5489 goto init_err; 5490 } 5491 5492 /* Handle the beginning of COMMON_XXX pases separatelly... */ 5493 switch (load_code) { 5494 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: 5495 rc = bnx2x_func_init_cmn_chip(bp, drv); 5496 if (rc) 5497 goto init_err; 5498 5499 break; 5500 case FW_MSG_CODE_DRV_LOAD_COMMON: 5501 rc = bnx2x_func_init_cmn(bp, drv); 5502 if (rc) 5503 goto init_err; 5504 5505 break; 5506 case FW_MSG_CODE_DRV_LOAD_PORT: 5507 rc = bnx2x_func_init_port(bp, drv); 5508 if (rc) 5509 goto init_err; 5510 5511 break; 5512 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 5513 rc = bnx2x_func_init_func(bp, drv); 5514 if (rc) 5515 goto init_err; 5516 5517 break; 5518 default: 5519 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code); 5520 rc = -EINVAL; 5521 } 5522 5523 init_err: 5524 drv->gunzip_end(bp); 5525 5526 /* In case of success, complete the comand immediatelly: no ramrods 5527 * have been sent. 5528 */ 5529 if (!rc) 5530 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT); 5531 5532 return rc; 5533 } 5534 5535 /** 5536 * bnx2x_func_reset_func - reset HW at function stage 5537 * 5538 * @bp: device handle 5539 * @drv: 5540 * 5541 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only 5542 * FUNCTION-only HW blocks. 5543 */ 5544 static inline void bnx2x_func_reset_func(struct bnx2x *bp, 5545 const struct bnx2x_func_sp_drv_ops *drv) 5546 { 5547 drv->reset_hw_func(bp); 5548 } 5549 5550 /** 5551 * bnx2x_func_reset_port - reser HW at port stage 5552 * 5553 * @bp: device handle 5554 * @drv: 5555 * 5556 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset 5557 * FUNCTION-only and PORT-only HW blocks. 5558 * 5559 * !!!IMPORTANT!!! 5560 * 5561 * It's important to call reset_port before reset_func() as the last thing 5562 * reset_func does is pf_disable() thus disabling PGLUE_B, which 5563 * makes impossible any DMAE transactions. 5564 */ 5565 static inline void bnx2x_func_reset_port(struct bnx2x *bp, 5566 const struct bnx2x_func_sp_drv_ops *drv) 5567 { 5568 drv->reset_hw_port(bp); 5569 bnx2x_func_reset_func(bp, drv); 5570 } 5571 5572 /** 5573 * bnx2x_func_reset_cmn - reser HW at common stage 5574 * 5575 * @bp: device handle 5576 * @drv: 5577 * 5578 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and 5579 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON, 5580 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks. 5581 */ 5582 static inline void bnx2x_func_reset_cmn(struct bnx2x *bp, 5583 const struct bnx2x_func_sp_drv_ops *drv) 5584 { 5585 bnx2x_func_reset_port(bp, drv); 5586 drv->reset_hw_cmn(bp); 5587 } 5588 5589 5590 static inline int bnx2x_func_hw_reset(struct bnx2x *bp, 5591 struct bnx2x_func_state_params *params) 5592 { 5593 u32 reset_phase = params->params.hw_reset.reset_phase; 5594 struct bnx2x_func_sp_obj *o = params->f_obj; 5595 const struct bnx2x_func_sp_drv_ops *drv = o->drv; 5596 5597 DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp), 5598 reset_phase); 5599 5600 switch (reset_phase) { 5601 case FW_MSG_CODE_DRV_UNLOAD_COMMON: 5602 bnx2x_func_reset_cmn(bp, drv); 5603 break; 5604 case FW_MSG_CODE_DRV_UNLOAD_PORT: 5605 bnx2x_func_reset_port(bp, drv); 5606 break; 5607 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION: 5608 bnx2x_func_reset_func(bp, drv); 5609 break; 5610 default: 5611 BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n", 5612 reset_phase); 5613 break; 5614 } 5615 5616 /* Complete the comand immediatelly: no ramrods have been sent. */ 5617 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET); 5618 5619 return 0; 5620 } 5621 5622 static inline int bnx2x_func_send_start(struct bnx2x *bp, 5623 struct bnx2x_func_state_params *params) 5624 { 5625 struct bnx2x_func_sp_obj *o = params->f_obj; 5626 struct function_start_data *rdata = 5627 (struct function_start_data *)o->rdata; 5628 dma_addr_t data_mapping = o->rdata_mapping; 5629 struct bnx2x_func_start_params *start_params = ¶ms->params.start; 5630 5631 memset(rdata, 0, sizeof(*rdata)); 5632 5633 /* Fill the ramrod data with provided parameters */ 5634 rdata->function_mode = (u8)start_params->mf_mode; 5635 rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag); 5636 rdata->path_id = BP_PATH(bp); 5637 rdata->network_cos_mode = start_params->network_cos_mode; 5638 5639 /* 5640 * No need for an explicit memory barrier here as long we would 5641 * need to ensure the ordering of writing to the SPQ element 5642 * and updating of the SPQ producer which involves a memory 5643 * read and we will have to put a full memory barrier there 5644 * (inside bnx2x_sp_post()). 5645 */ 5646 5647 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 5648 U64_HI(data_mapping), 5649 U64_LO(data_mapping), NONE_CONNECTION_TYPE); 5650 } 5651 5652 static inline int bnx2x_func_send_switch_update(struct bnx2x *bp, 5653 struct bnx2x_func_state_params *params) 5654 { 5655 struct bnx2x_func_sp_obj *o = params->f_obj; 5656 struct function_update_data *rdata = 5657 (struct function_update_data *)o->rdata; 5658 dma_addr_t data_mapping = o->rdata_mapping; 5659 struct bnx2x_func_switch_update_params *switch_update_params = 5660 ¶ms->params.switch_update; 5661 5662 memset(rdata, 0, sizeof(*rdata)); 5663 5664 /* Fill the ramrod data with provided parameters */ 5665 rdata->tx_switch_suspend_change_flg = 1; 5666 rdata->tx_switch_suspend = switch_update_params->suspend; 5667 rdata->echo = SWITCH_UPDATE; 5668 5669 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0, 5670 U64_HI(data_mapping), 5671 U64_LO(data_mapping), NONE_CONNECTION_TYPE); 5672 } 5673 5674 static inline int bnx2x_func_send_afex_update(struct bnx2x *bp, 5675 struct bnx2x_func_state_params *params) 5676 { 5677 struct bnx2x_func_sp_obj *o = params->f_obj; 5678 struct function_update_data *rdata = 5679 (struct function_update_data *)o->afex_rdata; 5680 dma_addr_t data_mapping = o->afex_rdata_mapping; 5681 struct bnx2x_func_afex_update_params *afex_update_params = 5682 ¶ms->params.afex_update; 5683 5684 memset(rdata, 0, sizeof(*rdata)); 5685 5686 /* Fill the ramrod data with provided parameters */ 5687 rdata->vif_id_change_flg = 1; 5688 rdata->vif_id = cpu_to_le16(afex_update_params->vif_id); 5689 rdata->afex_default_vlan_change_flg = 1; 5690 rdata->afex_default_vlan = 5691 cpu_to_le16(afex_update_params->afex_default_vlan); 5692 rdata->allowed_priorities_change_flg = 1; 5693 rdata->allowed_priorities = afex_update_params->allowed_priorities; 5694 rdata->echo = AFEX_UPDATE; 5695 5696 /* No need for an explicit memory barrier here as long we would 5697 * need to ensure the ordering of writing to the SPQ element 5698 * and updating of the SPQ producer which involves a memory 5699 * read and we will have to put a full memory barrier there 5700 * (inside bnx2x_sp_post()). 5701 */ 5702 DP(BNX2X_MSG_SP, 5703 "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n", 5704 rdata->vif_id, 5705 rdata->afex_default_vlan, rdata->allowed_priorities); 5706 5707 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0, 5708 U64_HI(data_mapping), 5709 U64_LO(data_mapping), NONE_CONNECTION_TYPE); 5710 } 5711 5712 static 5713 inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp, 5714 struct bnx2x_func_state_params *params) 5715 { 5716 struct bnx2x_func_sp_obj *o = params->f_obj; 5717 struct afex_vif_list_ramrod_data *rdata = 5718 (struct afex_vif_list_ramrod_data *)o->afex_rdata; 5719 struct bnx2x_func_afex_viflists_params *afex_viflist_params = 5720 ¶ms->params.afex_viflists; 5721 u64 *p_rdata = (u64 *)rdata; 5722 5723 memset(rdata, 0, sizeof(*rdata)); 5724 5725 /* Fill the ramrod data with provided parameters */ 5726 rdata->vif_list_index = afex_viflist_params->vif_list_index; 5727 rdata->func_bit_map = afex_viflist_params->func_bit_map; 5728 rdata->afex_vif_list_command = 5729 afex_viflist_params->afex_vif_list_command; 5730 rdata->func_to_clear = afex_viflist_params->func_to_clear; 5731 5732 /* send in echo type of sub command */ 5733 rdata->echo = afex_viflist_params->afex_vif_list_command; 5734 5735 /* No need for an explicit memory barrier here as long we would 5736 * need to ensure the ordering of writing to the SPQ element 5737 * and updating of the SPQ producer which involves a memory 5738 * read and we will have to put a full memory barrier there 5739 * (inside bnx2x_sp_post()). 5740 */ 5741 5742 DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n", 5743 rdata->afex_vif_list_command, rdata->vif_list_index, 5744 rdata->func_bit_map, rdata->func_to_clear); 5745 5746 /* this ramrod sends data directly and not through DMA mapping */ 5747 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0, 5748 U64_HI(*p_rdata), U64_LO(*p_rdata), 5749 NONE_CONNECTION_TYPE); 5750 } 5751 5752 static inline int bnx2x_func_send_stop(struct bnx2x *bp, 5753 struct bnx2x_func_state_params *params) 5754 { 5755 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 5756 NONE_CONNECTION_TYPE); 5757 } 5758 5759 static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp, 5760 struct bnx2x_func_state_params *params) 5761 { 5762 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0, 5763 NONE_CONNECTION_TYPE); 5764 } 5765 static inline int bnx2x_func_send_tx_start(struct bnx2x *bp, 5766 struct bnx2x_func_state_params *params) 5767 { 5768 struct bnx2x_func_sp_obj *o = params->f_obj; 5769 struct flow_control_configuration *rdata = 5770 (struct flow_control_configuration *)o->rdata; 5771 dma_addr_t data_mapping = o->rdata_mapping; 5772 struct bnx2x_func_tx_start_params *tx_start_params = 5773 ¶ms->params.tx_start; 5774 int i; 5775 5776 memset(rdata, 0, sizeof(*rdata)); 5777 5778 rdata->dcb_enabled = tx_start_params->dcb_enabled; 5779 rdata->dcb_version = tx_start_params->dcb_version; 5780 rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en; 5781 5782 for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++) 5783 rdata->traffic_type_to_priority_cos[i] = 5784 tx_start_params->traffic_type_to_priority_cos[i]; 5785 5786 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0, 5787 U64_HI(data_mapping), 5788 U64_LO(data_mapping), NONE_CONNECTION_TYPE); 5789 } 5790 5791 static int bnx2x_func_send_cmd(struct bnx2x *bp, 5792 struct bnx2x_func_state_params *params) 5793 { 5794 switch (params->cmd) { 5795 case BNX2X_F_CMD_HW_INIT: 5796 return bnx2x_func_hw_init(bp, params); 5797 case BNX2X_F_CMD_START: 5798 return bnx2x_func_send_start(bp, params); 5799 case BNX2X_F_CMD_STOP: 5800 return bnx2x_func_send_stop(bp, params); 5801 case BNX2X_F_CMD_HW_RESET: 5802 return bnx2x_func_hw_reset(bp, params); 5803 case BNX2X_F_CMD_AFEX_UPDATE: 5804 return bnx2x_func_send_afex_update(bp, params); 5805 case BNX2X_F_CMD_AFEX_VIFLISTS: 5806 return bnx2x_func_send_afex_viflists(bp, params); 5807 case BNX2X_F_CMD_TX_STOP: 5808 return bnx2x_func_send_tx_stop(bp, params); 5809 case BNX2X_F_CMD_TX_START: 5810 return bnx2x_func_send_tx_start(bp, params); 5811 case BNX2X_F_CMD_SWITCH_UPDATE: 5812 return bnx2x_func_send_switch_update(bp, params); 5813 default: 5814 BNX2X_ERR("Unknown command: %d\n", params->cmd); 5815 return -EINVAL; 5816 } 5817 } 5818 5819 void bnx2x_init_func_obj(struct bnx2x *bp, 5820 struct bnx2x_func_sp_obj *obj, 5821 void *rdata, dma_addr_t rdata_mapping, 5822 void *afex_rdata, dma_addr_t afex_rdata_mapping, 5823 struct bnx2x_func_sp_drv_ops *drv_iface) 5824 { 5825 memset(obj, 0, sizeof(*obj)); 5826 5827 mutex_init(&obj->one_pending_mutex); 5828 5829 obj->rdata = rdata; 5830 obj->rdata_mapping = rdata_mapping; 5831 obj->afex_rdata = afex_rdata; 5832 obj->afex_rdata_mapping = afex_rdata_mapping; 5833 obj->send_cmd = bnx2x_func_send_cmd; 5834 obj->check_transition = bnx2x_func_chk_transition; 5835 obj->complete_cmd = bnx2x_func_comp_cmd; 5836 obj->wait_comp = bnx2x_func_wait_comp; 5837 5838 obj->drv = drv_iface; 5839 } 5840 5841 /** 5842 * bnx2x_func_state_change - perform Function state change transition 5843 * 5844 * @bp: device handle 5845 * @params: parameters to perform the transaction 5846 * 5847 * returns 0 in case of successfully completed transition, 5848 * negative error code in case of failure, positive 5849 * (EBUSY) value if there is a completion to that is 5850 * still pending (possible only if RAMROD_COMP_WAIT is 5851 * not set in params->ramrod_flags for asynchronous 5852 * commands). 5853 */ 5854 int bnx2x_func_state_change(struct bnx2x *bp, 5855 struct bnx2x_func_state_params *params) 5856 { 5857 struct bnx2x_func_sp_obj *o = params->f_obj; 5858 int rc, cnt = 300; 5859 enum bnx2x_func_cmd cmd = params->cmd; 5860 unsigned long *pending = &o->pending; 5861 5862 mutex_lock(&o->one_pending_mutex); 5863 5864 /* Check that the requested transition is legal */ 5865 rc = o->check_transition(bp, o, params); 5866 if ((rc == -EBUSY) && 5867 (test_bit(RAMROD_RETRY, ¶ms->ramrod_flags))) { 5868 while ((rc == -EBUSY) && (--cnt > 0)) { 5869 mutex_unlock(&o->one_pending_mutex); 5870 msleep(10); 5871 mutex_lock(&o->one_pending_mutex); 5872 rc = o->check_transition(bp, o, params); 5873 } 5874 if (rc == -EBUSY) { 5875 mutex_unlock(&o->one_pending_mutex); 5876 BNX2X_ERR("timeout waiting for previous ramrod completion\n"); 5877 return rc; 5878 } 5879 } else if (rc) { 5880 mutex_unlock(&o->one_pending_mutex); 5881 return rc; 5882 } 5883 5884 /* Set "pending" bit */ 5885 set_bit(cmd, pending); 5886 5887 /* Don't send a command if only driver cleanup was requested */ 5888 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { 5889 bnx2x_func_state_change_comp(bp, o, cmd); 5890 mutex_unlock(&o->one_pending_mutex); 5891 } else { 5892 /* Send a ramrod */ 5893 rc = o->send_cmd(bp, params); 5894 5895 mutex_unlock(&o->one_pending_mutex); 5896 5897 if (rc) { 5898 o->next_state = BNX2X_F_STATE_MAX; 5899 clear_bit(cmd, pending); 5900 smp_mb__after_clear_bit(); 5901 return rc; 5902 } 5903 5904 if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) { 5905 rc = o->wait_comp(bp, o, cmd); 5906 if (rc) 5907 return rc; 5908 5909 return 0; 5910 } 5911 } 5912 5913 return !!test_bit(cmd, pending); 5914 } 5915