14ad79e13SYuval Mintz /* bnx2x_sp.c: Qlogic Everest network driver. 2adfc5217SJeff Kirsher * 34ad79e13SYuval Mintz * Copyright 2011-2013 Broadcom Corporation 44ad79e13SYuval Mintz * Copyright (c) 2014 QLogic Corporation 54ad79e13SYuval Mintz * All rights reserved 6adfc5217SJeff Kirsher * 74ad79e13SYuval Mintz * Unless you and Qlogic execute a separate written software license 8adfc5217SJeff Kirsher * agreement governing use of this software, this software is licensed to you 9adfc5217SJeff Kirsher * under the terms of the GNU General Public License version 2, available 104ad79e13SYuval Mintz * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL"). 11adfc5217SJeff Kirsher * 12adfc5217SJeff Kirsher * Notwithstanding the above, under no circumstances may you combine this 134ad79e13SYuval Mintz * software in any way with any other Qlogic software provided under a 144ad79e13SYuval Mintz * license other than the GPL, without Qlogic's express prior written 15adfc5217SJeff Kirsher * consent. 16adfc5217SJeff Kirsher * 1708f6dd89SAriel Elior * Maintained by: Ariel Elior <ariel.elior@qlogic.com> 18adfc5217SJeff Kirsher * Written by: Vladislav Zolotarov 19adfc5217SJeff Kirsher * 20adfc5217SJeff Kirsher */ 21f1deab50SJoe Perches 22f1deab50SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 23f1deab50SJoe Perches 24adfc5217SJeff Kirsher #include <linux/module.h> 25adfc5217SJeff Kirsher #include <linux/crc32.h> 26adfc5217SJeff Kirsher #include <linux/netdevice.h> 27adfc5217SJeff Kirsher #include <linux/etherdevice.h> 28adfc5217SJeff Kirsher #include <linux/crc32c.h> 29adfc5217SJeff Kirsher #include "bnx2x.h" 30adfc5217SJeff Kirsher #include "bnx2x_cmn.h" 31adfc5217SJeff Kirsher #include "bnx2x_sp.h" 32adfc5217SJeff Kirsher 33adfc5217SJeff Kirsher #define BNX2X_MAX_EMUL_MULTI 16 34adfc5217SJeff Kirsher 35adfc5217SJeff Kirsher /**** Exe Queue interfaces ****/ 36adfc5217SJeff Kirsher 37adfc5217SJeff Kirsher /** 38adfc5217SJeff Kirsher * bnx2x_exe_queue_init - init the Exe Queue object 39adfc5217SJeff Kirsher * 4016a5fd92SYuval Mintz * @o: pointer to the object 41adfc5217SJeff Kirsher * @exe_len: length 4216a5fd92SYuval Mintz * @owner: pointer to the owner 43adfc5217SJeff Kirsher * @validate: validate function pointer 44adfc5217SJeff Kirsher * @optimize: optimize function pointer 45adfc5217SJeff Kirsher * @exec: execute function pointer 46adfc5217SJeff Kirsher * @get: get function pointer 47adfc5217SJeff Kirsher */ 48adfc5217SJeff Kirsher static inline void bnx2x_exe_queue_init(struct bnx2x *bp, 49adfc5217SJeff Kirsher struct bnx2x_exe_queue_obj *o, 50adfc5217SJeff Kirsher int exe_len, 51adfc5217SJeff Kirsher union bnx2x_qable_obj *owner, 52adfc5217SJeff Kirsher exe_q_validate validate, 53460a25cdSYuval Mintz exe_q_remove remove, 54adfc5217SJeff Kirsher exe_q_optimize optimize, 55adfc5217SJeff Kirsher exe_q_execute exec, 56adfc5217SJeff Kirsher exe_q_get get) 57adfc5217SJeff Kirsher { 58adfc5217SJeff Kirsher memset(o, 0, sizeof(*o)); 59adfc5217SJeff Kirsher 60adfc5217SJeff Kirsher INIT_LIST_HEAD(&o->exe_queue); 61adfc5217SJeff Kirsher INIT_LIST_HEAD(&o->pending_comp); 62adfc5217SJeff Kirsher 63adfc5217SJeff Kirsher spin_lock_init(&o->lock); 64adfc5217SJeff Kirsher 65adfc5217SJeff Kirsher o->exe_chunk_len = exe_len; 66adfc5217SJeff Kirsher o->owner = owner; 67adfc5217SJeff Kirsher 68adfc5217SJeff Kirsher /* Owner specific callbacks */ 69adfc5217SJeff Kirsher o->validate = validate; 70460a25cdSYuval Mintz o->remove = remove; 71adfc5217SJeff Kirsher o->optimize = optimize; 72adfc5217SJeff Kirsher o->execute = exec; 73adfc5217SJeff Kirsher o->get = get; 74adfc5217SJeff Kirsher 7551c1a580SMerav Sicron DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n", 7651c1a580SMerav Sicron exe_len); 77adfc5217SJeff Kirsher } 78adfc5217SJeff Kirsher 79adfc5217SJeff Kirsher static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp, 80adfc5217SJeff Kirsher struct bnx2x_exeq_elem *elem) 81adfc5217SJeff Kirsher { 82adfc5217SJeff Kirsher DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n"); 83adfc5217SJeff Kirsher kfree(elem); 84adfc5217SJeff Kirsher } 85adfc5217SJeff Kirsher 86adfc5217SJeff Kirsher static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o) 87adfc5217SJeff Kirsher { 88adfc5217SJeff Kirsher struct bnx2x_exeq_elem *elem; 89adfc5217SJeff Kirsher int cnt = 0; 90adfc5217SJeff Kirsher 91adfc5217SJeff Kirsher spin_lock_bh(&o->lock); 92adfc5217SJeff Kirsher 93adfc5217SJeff Kirsher list_for_each_entry(elem, &o->exe_queue, link) 94adfc5217SJeff Kirsher cnt++; 95adfc5217SJeff Kirsher 96adfc5217SJeff Kirsher spin_unlock_bh(&o->lock); 97adfc5217SJeff Kirsher 98adfc5217SJeff Kirsher return cnt; 99adfc5217SJeff Kirsher } 100adfc5217SJeff Kirsher 101adfc5217SJeff Kirsher /** 102adfc5217SJeff Kirsher * bnx2x_exe_queue_add - add a new element to the execution queue 103adfc5217SJeff Kirsher * 104adfc5217SJeff Kirsher * @bp: driver handle 105adfc5217SJeff Kirsher * @o: queue 106adfc5217SJeff Kirsher * @cmd: new command to add 107adfc5217SJeff Kirsher * @restore: true - do not optimize the command 108adfc5217SJeff Kirsher * 109adfc5217SJeff Kirsher * If the element is optimized or is illegal, frees it. 110adfc5217SJeff Kirsher */ 111adfc5217SJeff Kirsher static inline int bnx2x_exe_queue_add(struct bnx2x *bp, 112adfc5217SJeff Kirsher struct bnx2x_exe_queue_obj *o, 113adfc5217SJeff Kirsher struct bnx2x_exeq_elem *elem, 114adfc5217SJeff Kirsher bool restore) 115adfc5217SJeff Kirsher { 116adfc5217SJeff Kirsher int rc; 117adfc5217SJeff Kirsher 118adfc5217SJeff Kirsher spin_lock_bh(&o->lock); 119adfc5217SJeff Kirsher 120adfc5217SJeff Kirsher if (!restore) { 121adfc5217SJeff Kirsher /* Try to cancel this element queue */ 122adfc5217SJeff Kirsher rc = o->optimize(bp, o->owner, elem); 123adfc5217SJeff Kirsher if (rc) 124adfc5217SJeff Kirsher goto free_and_exit; 125adfc5217SJeff Kirsher 126adfc5217SJeff Kirsher /* Check if this request is ok */ 127adfc5217SJeff Kirsher rc = o->validate(bp, o->owner, elem); 128adfc5217SJeff Kirsher if (rc) { 1292384d6aaSDmitry Kravkov DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc); 130adfc5217SJeff Kirsher goto free_and_exit; 131adfc5217SJeff Kirsher } 132adfc5217SJeff Kirsher } 133adfc5217SJeff Kirsher 134adfc5217SJeff Kirsher /* If so, add it to the execution queue */ 135adfc5217SJeff Kirsher list_add_tail(&elem->link, &o->exe_queue); 136adfc5217SJeff Kirsher 137adfc5217SJeff Kirsher spin_unlock_bh(&o->lock); 138adfc5217SJeff Kirsher 139adfc5217SJeff Kirsher return 0; 140adfc5217SJeff Kirsher 141adfc5217SJeff Kirsher free_and_exit: 142adfc5217SJeff Kirsher bnx2x_exe_queue_free_elem(bp, elem); 143adfc5217SJeff Kirsher 144adfc5217SJeff Kirsher spin_unlock_bh(&o->lock); 145adfc5217SJeff Kirsher 146adfc5217SJeff Kirsher return rc; 147adfc5217SJeff Kirsher } 148adfc5217SJeff Kirsher 149adfc5217SJeff Kirsher static inline void __bnx2x_exe_queue_reset_pending( 150adfc5217SJeff Kirsher struct bnx2x *bp, 151adfc5217SJeff Kirsher struct bnx2x_exe_queue_obj *o) 152adfc5217SJeff Kirsher { 153adfc5217SJeff Kirsher struct bnx2x_exeq_elem *elem; 154adfc5217SJeff Kirsher 155adfc5217SJeff Kirsher while (!list_empty(&o->pending_comp)) { 156adfc5217SJeff Kirsher elem = list_first_entry(&o->pending_comp, 157adfc5217SJeff Kirsher struct bnx2x_exeq_elem, link); 158adfc5217SJeff Kirsher 159adfc5217SJeff Kirsher list_del(&elem->link); 160adfc5217SJeff Kirsher bnx2x_exe_queue_free_elem(bp, elem); 161adfc5217SJeff Kirsher } 162adfc5217SJeff Kirsher } 163adfc5217SJeff Kirsher 164adfc5217SJeff Kirsher /** 165adfc5217SJeff Kirsher * bnx2x_exe_queue_step - execute one execution chunk atomically 166adfc5217SJeff Kirsher * 167adfc5217SJeff Kirsher * @bp: driver handle 168adfc5217SJeff Kirsher * @o: queue 169adfc5217SJeff Kirsher * @ramrod_flags: flags 170adfc5217SJeff Kirsher * 1718b09be5fSYuval Mintz * (Should be called while holding the exe_queue->lock). 172adfc5217SJeff Kirsher */ 173adfc5217SJeff Kirsher static inline int bnx2x_exe_queue_step(struct bnx2x *bp, 174adfc5217SJeff Kirsher struct bnx2x_exe_queue_obj *o, 175adfc5217SJeff Kirsher unsigned long *ramrod_flags) 176adfc5217SJeff Kirsher { 177adfc5217SJeff Kirsher struct bnx2x_exeq_elem *elem, spacer; 178adfc5217SJeff Kirsher int cur_len = 0, rc; 179adfc5217SJeff Kirsher 180adfc5217SJeff Kirsher memset(&spacer, 0, sizeof(spacer)); 181adfc5217SJeff Kirsher 18216a5fd92SYuval Mintz /* Next step should not be performed until the current is finished, 183adfc5217SJeff Kirsher * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to 184adfc5217SJeff Kirsher * properly clear object internals without sending any command to the FW 185adfc5217SJeff Kirsher * which also implies there won't be any completion to clear the 186adfc5217SJeff Kirsher * 'pending' list. 187adfc5217SJeff Kirsher */ 188adfc5217SJeff Kirsher if (!list_empty(&o->pending_comp)) { 189adfc5217SJeff Kirsher if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { 19051c1a580SMerav Sicron DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n"); 191adfc5217SJeff Kirsher __bnx2x_exe_queue_reset_pending(bp, o); 192adfc5217SJeff Kirsher } else { 193adfc5217SJeff Kirsher return 1; 194adfc5217SJeff Kirsher } 195adfc5217SJeff Kirsher } 196adfc5217SJeff Kirsher 19716a5fd92SYuval Mintz /* Run through the pending commands list and create a next 198adfc5217SJeff Kirsher * execution chunk. 199adfc5217SJeff Kirsher */ 200adfc5217SJeff Kirsher while (!list_empty(&o->exe_queue)) { 201adfc5217SJeff Kirsher elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem, 202adfc5217SJeff Kirsher link); 203adfc5217SJeff Kirsher WARN_ON(!elem->cmd_len); 204adfc5217SJeff Kirsher 205adfc5217SJeff Kirsher if (cur_len + elem->cmd_len <= o->exe_chunk_len) { 206adfc5217SJeff Kirsher cur_len += elem->cmd_len; 20716a5fd92SYuval Mintz /* Prevent from both lists being empty when moving an 208adfc5217SJeff Kirsher * element. This will allow the call of 209adfc5217SJeff Kirsher * bnx2x_exe_queue_empty() without locking. 210adfc5217SJeff Kirsher */ 211adfc5217SJeff Kirsher list_add_tail(&spacer.link, &o->pending_comp); 212adfc5217SJeff Kirsher mb(); 2137933aa5cSWei Yongjun list_move_tail(&elem->link, &o->pending_comp); 214adfc5217SJeff Kirsher list_del(&spacer.link); 215adfc5217SJeff Kirsher } else 216adfc5217SJeff Kirsher break; 217adfc5217SJeff Kirsher } 218adfc5217SJeff Kirsher 219adfc5217SJeff Kirsher /* Sanity check */ 2208b09be5fSYuval Mintz if (!cur_len) 221adfc5217SJeff Kirsher return 0; 222adfc5217SJeff Kirsher 223adfc5217SJeff Kirsher rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags); 224adfc5217SJeff Kirsher if (rc < 0) 22516a5fd92SYuval Mintz /* In case of an error return the commands back to the queue 226adfc5217SJeff Kirsher * and reset the pending_comp. 227adfc5217SJeff Kirsher */ 228adfc5217SJeff Kirsher list_splice_init(&o->pending_comp, &o->exe_queue); 229adfc5217SJeff Kirsher else if (!rc) 23016a5fd92SYuval Mintz /* If zero is returned, means there are no outstanding pending 231adfc5217SJeff Kirsher * completions and we may dismiss the pending list. 232adfc5217SJeff Kirsher */ 233adfc5217SJeff Kirsher __bnx2x_exe_queue_reset_pending(bp, o); 234adfc5217SJeff Kirsher 235adfc5217SJeff Kirsher return rc; 236adfc5217SJeff Kirsher } 237adfc5217SJeff Kirsher 238adfc5217SJeff Kirsher static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o) 239adfc5217SJeff Kirsher { 240adfc5217SJeff Kirsher bool empty = list_empty(&o->exe_queue); 241adfc5217SJeff Kirsher 242adfc5217SJeff Kirsher /* Don't reorder!!! */ 243adfc5217SJeff Kirsher mb(); 244adfc5217SJeff Kirsher 245adfc5217SJeff Kirsher return empty && list_empty(&o->pending_comp); 246adfc5217SJeff Kirsher } 247adfc5217SJeff Kirsher 248adfc5217SJeff Kirsher static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem( 249adfc5217SJeff Kirsher struct bnx2x *bp) 250adfc5217SJeff Kirsher { 251adfc5217SJeff Kirsher DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n"); 252adfc5217SJeff Kirsher return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC); 253adfc5217SJeff Kirsher } 254adfc5217SJeff Kirsher 255adfc5217SJeff Kirsher /************************ raw_obj functions ***********************************/ 256adfc5217SJeff Kirsher static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o) 257adfc5217SJeff Kirsher { 258adfc5217SJeff Kirsher return !!test_bit(o->state, o->pstate); 259adfc5217SJeff Kirsher } 260adfc5217SJeff Kirsher 261adfc5217SJeff Kirsher static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o) 262adfc5217SJeff Kirsher { 2634e857c58SPeter Zijlstra smp_mb__before_atomic(); 264adfc5217SJeff Kirsher clear_bit(o->state, o->pstate); 2654e857c58SPeter Zijlstra smp_mb__after_atomic(); 266adfc5217SJeff Kirsher } 267adfc5217SJeff Kirsher 268adfc5217SJeff Kirsher static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o) 269adfc5217SJeff Kirsher { 2704e857c58SPeter Zijlstra smp_mb__before_atomic(); 271adfc5217SJeff Kirsher set_bit(o->state, o->pstate); 2724e857c58SPeter Zijlstra smp_mb__after_atomic(); 273adfc5217SJeff Kirsher } 274adfc5217SJeff Kirsher 275adfc5217SJeff Kirsher /** 276adfc5217SJeff Kirsher * bnx2x_state_wait - wait until the given bit(state) is cleared 277adfc5217SJeff Kirsher * 278adfc5217SJeff Kirsher * @bp: device handle 279adfc5217SJeff Kirsher * @state: state which is to be cleared 280adfc5217SJeff Kirsher * @state_p: state buffer 281adfc5217SJeff Kirsher * 282adfc5217SJeff Kirsher */ 283adfc5217SJeff Kirsher static inline int bnx2x_state_wait(struct bnx2x *bp, int state, 284adfc5217SJeff Kirsher unsigned long *pstate) 285adfc5217SJeff Kirsher { 286adfc5217SJeff Kirsher /* can take a while if any port is running */ 287adfc5217SJeff Kirsher int cnt = 5000; 288adfc5217SJeff Kirsher 289adfc5217SJeff Kirsher if (CHIP_REV_IS_EMUL(bp)) 290adfc5217SJeff Kirsher cnt *= 20; 291adfc5217SJeff Kirsher 292adfc5217SJeff Kirsher DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state); 293adfc5217SJeff Kirsher 294adfc5217SJeff Kirsher might_sleep(); 295adfc5217SJeff Kirsher while (cnt--) { 296adfc5217SJeff Kirsher if (!test_bit(state, pstate)) { 297adfc5217SJeff Kirsher #ifdef BNX2X_STOP_ON_ERROR 298adfc5217SJeff Kirsher DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt); 299adfc5217SJeff Kirsher #endif 300adfc5217SJeff Kirsher return 0; 301adfc5217SJeff Kirsher } 302adfc5217SJeff Kirsher 3030926d499SYuval Mintz usleep_range(1000, 2000); 304adfc5217SJeff Kirsher 305adfc5217SJeff Kirsher if (bp->panic) 306adfc5217SJeff Kirsher return -EIO; 307adfc5217SJeff Kirsher } 308adfc5217SJeff Kirsher 309adfc5217SJeff Kirsher /* timeout! */ 310adfc5217SJeff Kirsher BNX2X_ERR("timeout waiting for state %d\n", state); 311adfc5217SJeff Kirsher #ifdef BNX2X_STOP_ON_ERROR 312adfc5217SJeff Kirsher bnx2x_panic(); 313adfc5217SJeff Kirsher #endif 314adfc5217SJeff Kirsher 315adfc5217SJeff Kirsher return -EBUSY; 316adfc5217SJeff Kirsher } 317adfc5217SJeff Kirsher 318adfc5217SJeff Kirsher static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw) 319adfc5217SJeff Kirsher { 320adfc5217SJeff Kirsher return bnx2x_state_wait(bp, raw->state, raw->pstate); 321adfc5217SJeff Kirsher } 322adfc5217SJeff Kirsher 323adfc5217SJeff Kirsher /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ 324adfc5217SJeff Kirsher /* credit handling callbacks */ 325adfc5217SJeff Kirsher static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset) 326adfc5217SJeff Kirsher { 327adfc5217SJeff Kirsher struct bnx2x_credit_pool_obj *mp = o->macs_pool; 328adfc5217SJeff Kirsher 329adfc5217SJeff Kirsher WARN_ON(!mp); 330adfc5217SJeff Kirsher 331adfc5217SJeff Kirsher return mp->get_entry(mp, offset); 332adfc5217SJeff Kirsher } 333adfc5217SJeff Kirsher 334adfc5217SJeff Kirsher static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o) 335adfc5217SJeff Kirsher { 336adfc5217SJeff Kirsher struct bnx2x_credit_pool_obj *mp = o->macs_pool; 337adfc5217SJeff Kirsher 338adfc5217SJeff Kirsher WARN_ON(!mp); 339adfc5217SJeff Kirsher 340adfc5217SJeff Kirsher return mp->get(mp, 1); 341adfc5217SJeff Kirsher } 342adfc5217SJeff Kirsher 343adfc5217SJeff Kirsher static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset) 344adfc5217SJeff Kirsher { 345adfc5217SJeff Kirsher struct bnx2x_credit_pool_obj *vp = o->vlans_pool; 346adfc5217SJeff Kirsher 347adfc5217SJeff Kirsher WARN_ON(!vp); 348adfc5217SJeff Kirsher 349adfc5217SJeff Kirsher return vp->get_entry(vp, offset); 350adfc5217SJeff Kirsher } 351adfc5217SJeff Kirsher 352adfc5217SJeff Kirsher static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o) 353adfc5217SJeff Kirsher { 354adfc5217SJeff Kirsher struct bnx2x_credit_pool_obj *vp = o->vlans_pool; 355adfc5217SJeff Kirsher 356adfc5217SJeff Kirsher WARN_ON(!vp); 357adfc5217SJeff Kirsher 358adfc5217SJeff Kirsher return vp->get(vp, 1); 359adfc5217SJeff Kirsher } 36005cc5a39SYuval Mintz 36105cc5a39SYuval Mintz static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o) 36205cc5a39SYuval Mintz { 36305cc5a39SYuval Mintz struct bnx2x_credit_pool_obj *mp = o->macs_pool; 36405cc5a39SYuval Mintz struct bnx2x_credit_pool_obj *vp = o->vlans_pool; 36505cc5a39SYuval Mintz 36605cc5a39SYuval Mintz if (!mp->get(mp, 1)) 36705cc5a39SYuval Mintz return false; 36805cc5a39SYuval Mintz 36905cc5a39SYuval Mintz if (!vp->get(vp, 1)) { 37005cc5a39SYuval Mintz mp->put(mp, 1); 37105cc5a39SYuval Mintz return false; 37205cc5a39SYuval Mintz } 37305cc5a39SYuval Mintz 37405cc5a39SYuval Mintz return true; 37505cc5a39SYuval Mintz } 37605cc5a39SYuval Mintz 377adfc5217SJeff Kirsher static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset) 378adfc5217SJeff Kirsher { 379adfc5217SJeff Kirsher struct bnx2x_credit_pool_obj *mp = o->macs_pool; 380adfc5217SJeff Kirsher 381adfc5217SJeff Kirsher return mp->put_entry(mp, offset); 382adfc5217SJeff Kirsher } 383adfc5217SJeff Kirsher 384adfc5217SJeff Kirsher static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o) 385adfc5217SJeff Kirsher { 386adfc5217SJeff Kirsher struct bnx2x_credit_pool_obj *mp = o->macs_pool; 387adfc5217SJeff Kirsher 388adfc5217SJeff Kirsher return mp->put(mp, 1); 389adfc5217SJeff Kirsher } 390adfc5217SJeff Kirsher 391adfc5217SJeff Kirsher static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset) 392adfc5217SJeff Kirsher { 393adfc5217SJeff Kirsher struct bnx2x_credit_pool_obj *vp = o->vlans_pool; 394adfc5217SJeff Kirsher 395adfc5217SJeff Kirsher return vp->put_entry(vp, offset); 396adfc5217SJeff Kirsher } 397adfc5217SJeff Kirsher 398adfc5217SJeff Kirsher static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o) 399adfc5217SJeff Kirsher { 400adfc5217SJeff Kirsher struct bnx2x_credit_pool_obj *vp = o->vlans_pool; 401adfc5217SJeff Kirsher 402adfc5217SJeff Kirsher return vp->put(vp, 1); 403adfc5217SJeff Kirsher } 404adfc5217SJeff Kirsher 40505cc5a39SYuval Mintz static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o) 40605cc5a39SYuval Mintz { 40705cc5a39SYuval Mintz struct bnx2x_credit_pool_obj *mp = o->macs_pool; 40805cc5a39SYuval Mintz struct bnx2x_credit_pool_obj *vp = o->vlans_pool; 40905cc5a39SYuval Mintz 41005cc5a39SYuval Mintz if (!mp->put(mp, 1)) 41105cc5a39SYuval Mintz return false; 41205cc5a39SYuval Mintz 41305cc5a39SYuval Mintz if (!vp->put(vp, 1)) { 41405cc5a39SYuval Mintz mp->get(mp, 1); 41505cc5a39SYuval Mintz return false; 41605cc5a39SYuval Mintz } 41705cc5a39SYuval Mintz 41805cc5a39SYuval Mintz return true; 41905cc5a39SYuval Mintz } 42005cc5a39SYuval Mintz 4218b09be5fSYuval Mintz /** 4228b09be5fSYuval Mintz * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock 4238b09be5fSYuval Mintz * 4248b09be5fSYuval Mintz * @bp: device handle 4258b09be5fSYuval Mintz * @o: vlan_mac object 4268b09be5fSYuval Mintz * 4278b09be5fSYuval Mintz * @details: Non-blocking implementation; should be called under execution 4288b09be5fSYuval Mintz * queue lock. 4298b09be5fSYuval Mintz */ 4308b09be5fSYuval Mintz static int __bnx2x_vlan_mac_h_write_trylock(struct bnx2x *bp, 4318b09be5fSYuval Mintz struct bnx2x_vlan_mac_obj *o) 4328b09be5fSYuval Mintz { 4338b09be5fSYuval Mintz if (o->head_reader) { 4348b09be5fSYuval Mintz DP(BNX2X_MSG_SP, "vlan_mac_lock writer - There are readers; Busy\n"); 4358b09be5fSYuval Mintz return -EBUSY; 4368b09be5fSYuval Mintz } 4378b09be5fSYuval Mintz 4388b09be5fSYuval Mintz DP(BNX2X_MSG_SP, "vlan_mac_lock writer - Taken\n"); 4398b09be5fSYuval Mintz return 0; 4408b09be5fSYuval Mintz } 4418b09be5fSYuval Mintz 4428b09be5fSYuval Mintz /** 4438b09be5fSYuval Mintz * __bnx2x_vlan_mac_h_exec_pending - execute step instead of a previous step 4448b09be5fSYuval Mintz * 4458b09be5fSYuval Mintz * @bp: device handle 4468b09be5fSYuval Mintz * @o: vlan_mac object 4478b09be5fSYuval Mintz * 4488b09be5fSYuval Mintz * @details Should be called under execution queue lock; notice it might release 4498b09be5fSYuval Mintz * and reclaim it during its run. 4508b09be5fSYuval Mintz */ 4518b09be5fSYuval Mintz static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp, 4528b09be5fSYuval Mintz struct bnx2x_vlan_mac_obj *o) 4538b09be5fSYuval Mintz { 4548b09be5fSYuval Mintz int rc; 4558b09be5fSYuval Mintz unsigned long ramrod_flags = o->saved_ramrod_flags; 4568b09be5fSYuval Mintz 4578b09be5fSYuval Mintz DP(BNX2X_MSG_SP, "vlan_mac_lock execute pending command with ramrod flags %lu\n", 4588b09be5fSYuval Mintz ramrod_flags); 4598b09be5fSYuval Mintz o->head_exe_request = false; 4608b09be5fSYuval Mintz o->saved_ramrod_flags = 0; 4618b09be5fSYuval Mintz rc = bnx2x_exe_queue_step(bp, &o->exe_queue, &ramrod_flags); 4629d18d270SYuval Mintz if ((rc != 0) && (rc != 1)) { 4638b09be5fSYuval Mintz BNX2X_ERR("execution of pending commands failed with rc %d\n", 4648b09be5fSYuval Mintz rc); 4658b09be5fSYuval Mintz #ifdef BNX2X_STOP_ON_ERROR 4668b09be5fSYuval Mintz bnx2x_panic(); 4678b09be5fSYuval Mintz #endif 4688b09be5fSYuval Mintz } 4698b09be5fSYuval Mintz } 4708b09be5fSYuval Mintz 4718b09be5fSYuval Mintz /** 4728b09be5fSYuval Mintz * __bnx2x_vlan_mac_h_pend - Pend an execution step which couldn't run 4738b09be5fSYuval Mintz * 4748b09be5fSYuval Mintz * @bp: device handle 4758b09be5fSYuval Mintz * @o: vlan_mac object 4768b09be5fSYuval Mintz * @ramrod_flags: ramrod flags of missed execution 4778b09be5fSYuval Mintz * 4788b09be5fSYuval Mintz * @details Should be called under execution queue lock. 4798b09be5fSYuval Mintz */ 4808b09be5fSYuval Mintz static void __bnx2x_vlan_mac_h_pend(struct bnx2x *bp, 4818b09be5fSYuval Mintz struct bnx2x_vlan_mac_obj *o, 4828b09be5fSYuval Mintz unsigned long ramrod_flags) 4838b09be5fSYuval Mintz { 4848b09be5fSYuval Mintz o->head_exe_request = true; 4858b09be5fSYuval Mintz o->saved_ramrod_flags = ramrod_flags; 4868b09be5fSYuval Mintz DP(BNX2X_MSG_SP, "Placing pending execution with ramrod flags %lu\n", 4878b09be5fSYuval Mintz ramrod_flags); 4888b09be5fSYuval Mintz } 4898b09be5fSYuval Mintz 4908b09be5fSYuval Mintz /** 4918b09be5fSYuval Mintz * __bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock 4928b09be5fSYuval Mintz * 4938b09be5fSYuval Mintz * @bp: device handle 4948b09be5fSYuval Mintz * @o: vlan_mac object 4958b09be5fSYuval Mintz * 4968b09be5fSYuval Mintz * @details Should be called under execution queue lock. Notice if a pending 4978b09be5fSYuval Mintz * execution exists, it would perform it - possibly releasing and 4988b09be5fSYuval Mintz * reclaiming the execution queue lock. 4998b09be5fSYuval Mintz */ 5008b09be5fSYuval Mintz static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp, 5018b09be5fSYuval Mintz struct bnx2x_vlan_mac_obj *o) 5028b09be5fSYuval Mintz { 5038b09be5fSYuval Mintz /* It's possible a new pending execution was added since this writer 5048b09be5fSYuval Mintz * executed. If so, execute again. [Ad infinitum] 5058b09be5fSYuval Mintz */ 5068b09be5fSYuval Mintz while (o->head_exe_request) { 5078b09be5fSYuval Mintz DP(BNX2X_MSG_SP, "vlan_mac_lock - writer release encountered a pending request\n"); 5088b09be5fSYuval Mintz __bnx2x_vlan_mac_h_exec_pending(bp, o); 5098b09be5fSYuval Mintz } 5108b09be5fSYuval Mintz } 5118b09be5fSYuval Mintz 5128b09be5fSYuval Mintz 5138b09be5fSYuval Mintz /** 5148b09be5fSYuval Mintz * __bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock 5158b09be5fSYuval Mintz * 5168b09be5fSYuval Mintz * @bp: device handle 5178b09be5fSYuval Mintz * @o: vlan_mac object 5188b09be5fSYuval Mintz * 5198b09be5fSYuval Mintz * @details Should be called under the execution queue lock. May sleep. May 5208b09be5fSYuval Mintz * release and reclaim execution queue lock during its run. 5218b09be5fSYuval Mintz */ 5228b09be5fSYuval Mintz static int __bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp, 5238b09be5fSYuval Mintz struct bnx2x_vlan_mac_obj *o) 5248b09be5fSYuval Mintz { 5258b09be5fSYuval Mintz /* If we got here, we're holding lock --> no WRITER exists */ 5268b09be5fSYuval Mintz o->head_reader++; 5278b09be5fSYuval Mintz DP(BNX2X_MSG_SP, "vlan_mac_lock - locked reader - number %d\n", 5288b09be5fSYuval Mintz o->head_reader); 5298b09be5fSYuval Mintz 5308b09be5fSYuval Mintz return 0; 5318b09be5fSYuval Mintz } 5328b09be5fSYuval Mintz 5338b09be5fSYuval Mintz /** 5348b09be5fSYuval Mintz * bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock 5358b09be5fSYuval Mintz * 5368b09be5fSYuval Mintz * @bp: device handle 5378b09be5fSYuval Mintz * @o: vlan_mac object 5388b09be5fSYuval Mintz * 5398b09be5fSYuval Mintz * @details May sleep. Claims and releases execution queue lock during its run. 5408b09be5fSYuval Mintz */ 5418b09be5fSYuval Mintz int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp, 5428b09be5fSYuval Mintz struct bnx2x_vlan_mac_obj *o) 5438b09be5fSYuval Mintz { 5448b09be5fSYuval Mintz int rc; 5458b09be5fSYuval Mintz 5468b09be5fSYuval Mintz spin_lock_bh(&o->exe_queue.lock); 5478b09be5fSYuval Mintz rc = __bnx2x_vlan_mac_h_read_lock(bp, o); 5488b09be5fSYuval Mintz spin_unlock_bh(&o->exe_queue.lock); 5498b09be5fSYuval Mintz 5508b09be5fSYuval Mintz return rc; 5518b09be5fSYuval Mintz } 5528b09be5fSYuval Mintz 5538b09be5fSYuval Mintz /** 5548b09be5fSYuval Mintz * __bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock 5558b09be5fSYuval Mintz * 5568b09be5fSYuval Mintz * @bp: device handle 5578b09be5fSYuval Mintz * @o: vlan_mac object 5588b09be5fSYuval Mintz * 5598b09be5fSYuval Mintz * @details Should be called under execution queue lock. Notice if a pending 5608b09be5fSYuval Mintz * execution exists, it would be performed if this was the last 5618b09be5fSYuval Mintz * reader. possibly releasing and reclaiming the execution queue lock. 5628b09be5fSYuval Mintz */ 5638b09be5fSYuval Mintz static void __bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp, 5648b09be5fSYuval Mintz struct bnx2x_vlan_mac_obj *o) 5658b09be5fSYuval Mintz { 5668b09be5fSYuval Mintz if (!o->head_reader) { 5678b09be5fSYuval Mintz BNX2X_ERR("Need to release vlan mac reader lock, but lock isn't taken\n"); 5688b09be5fSYuval Mintz #ifdef BNX2X_STOP_ON_ERROR 5698b09be5fSYuval Mintz bnx2x_panic(); 5708b09be5fSYuval Mintz #endif 5718b09be5fSYuval Mintz } else { 5728b09be5fSYuval Mintz o->head_reader--; 5738b09be5fSYuval Mintz DP(BNX2X_MSG_SP, "vlan_mac_lock - decreased readers to %d\n", 5748b09be5fSYuval Mintz o->head_reader); 5758b09be5fSYuval Mintz } 5768b09be5fSYuval Mintz 5778b09be5fSYuval Mintz /* It's possible a new pending execution was added, and that this reader 5788b09be5fSYuval Mintz * was last - if so we need to execute the command. 5798b09be5fSYuval Mintz */ 5808b09be5fSYuval Mintz if (!o->head_reader && o->head_exe_request) { 5818b09be5fSYuval Mintz DP(BNX2X_MSG_SP, "vlan_mac_lock - reader release encountered a pending request\n"); 5828b09be5fSYuval Mintz 5838b09be5fSYuval Mintz /* Writer release will do the trick */ 5848b09be5fSYuval Mintz __bnx2x_vlan_mac_h_write_unlock(bp, o); 5858b09be5fSYuval Mintz } 5868b09be5fSYuval Mintz } 5878b09be5fSYuval Mintz 5888b09be5fSYuval Mintz /** 5898b09be5fSYuval Mintz * bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock 5908b09be5fSYuval Mintz * 5918b09be5fSYuval Mintz * @bp: device handle 5928b09be5fSYuval Mintz * @o: vlan_mac object 5938b09be5fSYuval Mintz * 5948b09be5fSYuval Mintz * @details Notice if a pending execution exists, it would be performed if this 5958b09be5fSYuval Mintz * was the last reader. Claims and releases the execution queue lock 5968b09be5fSYuval Mintz * during its run. 5978b09be5fSYuval Mintz */ 5988b09be5fSYuval Mintz void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp, 5998b09be5fSYuval Mintz struct bnx2x_vlan_mac_obj *o) 6008b09be5fSYuval Mintz { 6018b09be5fSYuval Mintz spin_lock_bh(&o->exe_queue.lock); 6028b09be5fSYuval Mintz __bnx2x_vlan_mac_h_read_unlock(bp, o); 6038b09be5fSYuval Mintz spin_unlock_bh(&o->exe_queue.lock); 6048b09be5fSYuval Mintz } 6058b09be5fSYuval Mintz 606ed5162a0SAriel Elior static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, 6073ec9f9caSAriel Elior int n, u8 *base, u8 stride, u8 size) 608ed5162a0SAriel Elior { 609ed5162a0SAriel Elior struct bnx2x_vlan_mac_registry_elem *pos; 6103ec9f9caSAriel Elior u8 *next = base; 611ed5162a0SAriel Elior int counter = 0; 6128b09be5fSYuval Mintz int read_lock; 6138b09be5fSYuval Mintz 6148b09be5fSYuval Mintz DP(BNX2X_MSG_SP, "get_n_elements - taking vlan_mac_lock (reader)\n"); 6158b09be5fSYuval Mintz read_lock = bnx2x_vlan_mac_h_read_lock(bp, o); 6168b09be5fSYuval Mintz if (read_lock != 0) 6178b09be5fSYuval Mintz BNX2X_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n"); 618ed5162a0SAriel Elior 619ed5162a0SAriel Elior /* traverse list */ 620ed5162a0SAriel Elior list_for_each_entry(pos, &o->head, link) { 621ed5162a0SAriel Elior if (counter < n) { 6223ec9f9caSAriel Elior memcpy(next, &pos->u, size); 623ed5162a0SAriel Elior counter++; 6243ec9f9caSAriel Elior DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n", 6253ec9f9caSAriel Elior counter, next); 6263ec9f9caSAriel Elior next += stride + size; 627ed5162a0SAriel Elior } 628ed5162a0SAriel Elior } 6298b09be5fSYuval Mintz 6308b09be5fSYuval Mintz if (read_lock == 0) { 6318b09be5fSYuval Mintz DP(BNX2X_MSG_SP, "get_n_elements - releasing vlan_mac_lock (reader)\n"); 6328b09be5fSYuval Mintz bnx2x_vlan_mac_h_read_unlock(bp, o); 6338b09be5fSYuval Mintz } 6348b09be5fSYuval Mintz 635ed5162a0SAriel Elior return counter * ETH_ALEN; 636ed5162a0SAriel Elior } 637ed5162a0SAriel Elior 638adfc5217SJeff Kirsher /* check_add() callbacks */ 63951c1a580SMerav Sicron static int bnx2x_check_mac_add(struct bnx2x *bp, 64051c1a580SMerav Sicron struct bnx2x_vlan_mac_obj *o, 641adfc5217SJeff Kirsher union bnx2x_classification_ramrod_data *data) 642adfc5217SJeff Kirsher { 643adfc5217SJeff Kirsher struct bnx2x_vlan_mac_registry_elem *pos; 644adfc5217SJeff Kirsher 64551c1a580SMerav Sicron DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n", data->mac.mac); 64651c1a580SMerav Sicron 647adfc5217SJeff Kirsher if (!is_valid_ether_addr(data->mac.mac)) 648adfc5217SJeff Kirsher return -EINVAL; 649adfc5217SJeff Kirsher 650adfc5217SJeff Kirsher /* Check if a requested MAC already exists */ 651adfc5217SJeff Kirsher list_for_each_entry(pos, &o->head, link) 6528fd90de8Sdingtianhong if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) && 65391226790SDmitry Kravkov (data->mac.is_inner_mac == pos->u.mac.is_inner_mac)) 654adfc5217SJeff Kirsher return -EEXIST; 655adfc5217SJeff Kirsher 656adfc5217SJeff Kirsher return 0; 657adfc5217SJeff Kirsher } 658adfc5217SJeff Kirsher 65951c1a580SMerav Sicron static int bnx2x_check_vlan_add(struct bnx2x *bp, 66051c1a580SMerav Sicron struct bnx2x_vlan_mac_obj *o, 661adfc5217SJeff Kirsher union bnx2x_classification_ramrod_data *data) 662adfc5217SJeff Kirsher { 663adfc5217SJeff Kirsher struct bnx2x_vlan_mac_registry_elem *pos; 664adfc5217SJeff Kirsher 66551c1a580SMerav Sicron DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan); 66651c1a580SMerav Sicron 667adfc5217SJeff Kirsher list_for_each_entry(pos, &o->head, link) 668adfc5217SJeff Kirsher if (data->vlan.vlan == pos->u.vlan.vlan) 669adfc5217SJeff Kirsher return -EEXIST; 670adfc5217SJeff Kirsher 671adfc5217SJeff Kirsher return 0; 672adfc5217SJeff Kirsher } 673adfc5217SJeff Kirsher 67405cc5a39SYuval Mintz static int bnx2x_check_vlan_mac_add(struct bnx2x *bp, 67505cc5a39SYuval Mintz struct bnx2x_vlan_mac_obj *o, 67605cc5a39SYuval Mintz union bnx2x_classification_ramrod_data *data) 67705cc5a39SYuval Mintz { 67805cc5a39SYuval Mintz struct bnx2x_vlan_mac_registry_elem *pos; 67905cc5a39SYuval Mintz 68005cc5a39SYuval Mintz DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n", 68105cc5a39SYuval Mintz data->vlan_mac.mac, data->vlan_mac.vlan); 68205cc5a39SYuval Mintz 68305cc5a39SYuval Mintz list_for_each_entry(pos, &o->head, link) 68405cc5a39SYuval Mintz if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && 68505cc5a39SYuval Mintz (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, 68605cc5a39SYuval Mintz ETH_ALEN)) && 68705cc5a39SYuval Mintz (data->vlan_mac.is_inner_mac == 68805cc5a39SYuval Mintz pos->u.vlan_mac.is_inner_mac)) 68905cc5a39SYuval Mintz return -EEXIST; 69005cc5a39SYuval Mintz 69105cc5a39SYuval Mintz return 0; 69205cc5a39SYuval Mintz } 69305cc5a39SYuval Mintz 694adfc5217SJeff Kirsher /* check_del() callbacks */ 695adfc5217SJeff Kirsher static struct bnx2x_vlan_mac_registry_elem * 69651c1a580SMerav Sicron bnx2x_check_mac_del(struct bnx2x *bp, 69751c1a580SMerav Sicron struct bnx2x_vlan_mac_obj *o, 698adfc5217SJeff Kirsher union bnx2x_classification_ramrod_data *data) 699adfc5217SJeff Kirsher { 700adfc5217SJeff Kirsher struct bnx2x_vlan_mac_registry_elem *pos; 701adfc5217SJeff Kirsher 70251c1a580SMerav Sicron DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac); 70351c1a580SMerav Sicron 704adfc5217SJeff Kirsher list_for_each_entry(pos, &o->head, link) 7058fd90de8Sdingtianhong if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) && 70691226790SDmitry Kravkov (data->mac.is_inner_mac == pos->u.mac.is_inner_mac)) 707adfc5217SJeff Kirsher return pos; 708adfc5217SJeff Kirsher 709adfc5217SJeff Kirsher return NULL; 710adfc5217SJeff Kirsher } 711adfc5217SJeff Kirsher 712adfc5217SJeff Kirsher static struct bnx2x_vlan_mac_registry_elem * 71351c1a580SMerav Sicron bnx2x_check_vlan_del(struct bnx2x *bp, 71451c1a580SMerav Sicron struct bnx2x_vlan_mac_obj *o, 715adfc5217SJeff Kirsher union bnx2x_classification_ramrod_data *data) 716adfc5217SJeff Kirsher { 717adfc5217SJeff Kirsher struct bnx2x_vlan_mac_registry_elem *pos; 718adfc5217SJeff Kirsher 71951c1a580SMerav Sicron DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan); 72051c1a580SMerav Sicron 721adfc5217SJeff Kirsher list_for_each_entry(pos, &o->head, link) 722adfc5217SJeff Kirsher if (data->vlan.vlan == pos->u.vlan.vlan) 723adfc5217SJeff Kirsher return pos; 724adfc5217SJeff Kirsher 725adfc5217SJeff Kirsher return NULL; 726adfc5217SJeff Kirsher } 727adfc5217SJeff Kirsher 72805cc5a39SYuval Mintz static struct bnx2x_vlan_mac_registry_elem * 72905cc5a39SYuval Mintz bnx2x_check_vlan_mac_del(struct bnx2x *bp, 73005cc5a39SYuval Mintz struct bnx2x_vlan_mac_obj *o, 73105cc5a39SYuval Mintz union bnx2x_classification_ramrod_data *data) 73205cc5a39SYuval Mintz { 73305cc5a39SYuval Mintz struct bnx2x_vlan_mac_registry_elem *pos; 73405cc5a39SYuval Mintz 73505cc5a39SYuval Mintz DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n", 73605cc5a39SYuval Mintz data->vlan_mac.mac, data->vlan_mac.vlan); 73705cc5a39SYuval Mintz 73805cc5a39SYuval Mintz list_for_each_entry(pos, &o->head, link) 73905cc5a39SYuval Mintz if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && 74005cc5a39SYuval Mintz (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, 74105cc5a39SYuval Mintz ETH_ALEN)) && 74205cc5a39SYuval Mintz (data->vlan_mac.is_inner_mac == 74305cc5a39SYuval Mintz pos->u.vlan_mac.is_inner_mac)) 74405cc5a39SYuval Mintz return pos; 74505cc5a39SYuval Mintz 74605cc5a39SYuval Mintz return NULL; 74705cc5a39SYuval Mintz } 74805cc5a39SYuval Mintz 749adfc5217SJeff Kirsher /* check_move() callback */ 75051c1a580SMerav Sicron static bool bnx2x_check_move(struct bnx2x *bp, 75151c1a580SMerav Sicron struct bnx2x_vlan_mac_obj *src_o, 752adfc5217SJeff Kirsher struct bnx2x_vlan_mac_obj *dst_o, 753adfc5217SJeff Kirsher union bnx2x_classification_ramrod_data *data) 754adfc5217SJeff Kirsher { 755adfc5217SJeff Kirsher struct bnx2x_vlan_mac_registry_elem *pos; 756adfc5217SJeff Kirsher int rc; 757adfc5217SJeff Kirsher 758adfc5217SJeff Kirsher /* Check if we can delete the requested configuration from the first 759adfc5217SJeff Kirsher * object. 760adfc5217SJeff Kirsher */ 76151c1a580SMerav Sicron pos = src_o->check_del(bp, src_o, data); 762adfc5217SJeff Kirsher 763adfc5217SJeff Kirsher /* check if configuration can be added */ 76451c1a580SMerav Sicron rc = dst_o->check_add(bp, dst_o, data); 765adfc5217SJeff Kirsher 766adfc5217SJeff Kirsher /* If this classification can not be added (is already set) 767adfc5217SJeff Kirsher * or can't be deleted - return an error. 768adfc5217SJeff Kirsher */ 769adfc5217SJeff Kirsher if (rc || !pos) 770adfc5217SJeff Kirsher return false; 771adfc5217SJeff Kirsher 772adfc5217SJeff Kirsher return true; 773adfc5217SJeff Kirsher } 774adfc5217SJeff Kirsher 775adfc5217SJeff Kirsher static bool bnx2x_check_move_always_err( 77651c1a580SMerav Sicron struct bnx2x *bp, 777adfc5217SJeff Kirsher struct bnx2x_vlan_mac_obj *src_o, 778adfc5217SJeff Kirsher struct bnx2x_vlan_mac_obj *dst_o, 779adfc5217SJeff Kirsher union bnx2x_classification_ramrod_data *data) 780adfc5217SJeff Kirsher { 781adfc5217SJeff Kirsher return false; 782adfc5217SJeff Kirsher } 783adfc5217SJeff Kirsher 784adfc5217SJeff Kirsher static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o) 785adfc5217SJeff Kirsher { 786adfc5217SJeff Kirsher struct bnx2x_raw_obj *raw = &o->raw; 787adfc5217SJeff Kirsher u8 rx_tx_flag = 0; 788adfc5217SJeff Kirsher 789adfc5217SJeff Kirsher if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) || 790adfc5217SJeff Kirsher (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) 791adfc5217SJeff Kirsher rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD; 792adfc5217SJeff Kirsher 793adfc5217SJeff Kirsher if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) || 794adfc5217SJeff Kirsher (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) 795adfc5217SJeff Kirsher rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD; 796adfc5217SJeff Kirsher 797adfc5217SJeff Kirsher return rx_tx_flag; 798adfc5217SJeff Kirsher } 799adfc5217SJeff Kirsher 800a8f47eb7Sstephen hemminger static void bnx2x_set_mac_in_nig(struct bnx2x *bp, 801adfc5217SJeff Kirsher bool add, unsigned char *dev_addr, int index) 802adfc5217SJeff Kirsher { 803adfc5217SJeff Kirsher u32 wb_data[2]; 804adfc5217SJeff Kirsher u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM : 805adfc5217SJeff Kirsher NIG_REG_LLH0_FUNC_MEM; 806adfc5217SJeff Kirsher 807a3348722SBarak Witkowski if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp)) 808a3348722SBarak Witkowski return; 809a3348722SBarak Witkowski 810a3348722SBarak Witkowski if (index > BNX2X_LLH_CAM_MAX_PF_LINE) 811adfc5217SJeff Kirsher return; 812adfc5217SJeff Kirsher 813adfc5217SJeff Kirsher DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n", 814adfc5217SJeff Kirsher (add ? "ADD" : "DELETE"), index); 815adfc5217SJeff Kirsher 816adfc5217SJeff Kirsher if (add) { 817adfc5217SJeff Kirsher /* LLH_FUNC_MEM is a u64 WB register */ 818adfc5217SJeff Kirsher reg_offset += 8*index; 819adfc5217SJeff Kirsher 820adfc5217SJeff Kirsher wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) | 821adfc5217SJeff Kirsher (dev_addr[4] << 8) | dev_addr[5]); 822adfc5217SJeff Kirsher wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]); 823adfc5217SJeff Kirsher 824adfc5217SJeff Kirsher REG_WR_DMAE(bp, reg_offset, wb_data, 2); 825adfc5217SJeff Kirsher } 826adfc5217SJeff Kirsher 827adfc5217SJeff Kirsher REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE : 828adfc5217SJeff Kirsher NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add); 829adfc5217SJeff Kirsher } 830adfc5217SJeff Kirsher 831adfc5217SJeff Kirsher /** 832adfc5217SJeff Kirsher * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod 833adfc5217SJeff Kirsher * 834adfc5217SJeff Kirsher * @bp: device handle 835adfc5217SJeff Kirsher * @o: queue for which we want to configure this rule 836adfc5217SJeff Kirsher * @add: if true the command is an ADD command, DEL otherwise 837adfc5217SJeff Kirsher * @opcode: CLASSIFY_RULE_OPCODE_XXX 838adfc5217SJeff Kirsher * @hdr: pointer to a header to setup 839adfc5217SJeff Kirsher * 840adfc5217SJeff Kirsher */ 841adfc5217SJeff Kirsher static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp, 842adfc5217SJeff Kirsher struct bnx2x_vlan_mac_obj *o, bool add, int opcode, 843adfc5217SJeff Kirsher struct eth_classify_cmd_header *hdr) 844adfc5217SJeff Kirsher { 845adfc5217SJeff Kirsher struct bnx2x_raw_obj *raw = &o->raw; 846adfc5217SJeff Kirsher 847adfc5217SJeff Kirsher hdr->client_id = raw->cl_id; 848adfc5217SJeff Kirsher hdr->func_id = raw->func_id; 849adfc5217SJeff Kirsher 850adfc5217SJeff Kirsher /* Rx or/and Tx (internal switching) configuration ? */ 851adfc5217SJeff Kirsher hdr->cmd_general_data |= 852adfc5217SJeff Kirsher bnx2x_vlan_mac_get_rx_tx_flag(o); 853adfc5217SJeff Kirsher 854adfc5217SJeff Kirsher if (add) 855adfc5217SJeff Kirsher hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD; 856adfc5217SJeff Kirsher 857adfc5217SJeff Kirsher hdr->cmd_general_data |= 858adfc5217SJeff Kirsher (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT); 859adfc5217SJeff Kirsher } 860adfc5217SJeff Kirsher 861adfc5217SJeff Kirsher /** 862adfc5217SJeff Kirsher * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header 863adfc5217SJeff Kirsher * 864adfc5217SJeff Kirsher * @cid: connection id 865adfc5217SJeff Kirsher * @type: BNX2X_FILTER_XXX_PENDING 86616a5fd92SYuval Mintz * @hdr: pointer to header to setup 867adfc5217SJeff Kirsher * @rule_cnt: 868adfc5217SJeff Kirsher * 869adfc5217SJeff Kirsher * currently we always configure one rule and echo field to contain a CID and an 870adfc5217SJeff Kirsher * opcode type. 871adfc5217SJeff Kirsher */ 872adfc5217SJeff Kirsher static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type, 873adfc5217SJeff Kirsher struct eth_classify_header *hdr, int rule_cnt) 874adfc5217SJeff Kirsher { 87586564c3fSYuval Mintz hdr->echo = cpu_to_le32((cid & BNX2X_SWCID_MASK) | 87686564c3fSYuval Mintz (type << BNX2X_SWCID_SHIFT)); 877adfc5217SJeff Kirsher hdr->rule_cnt = (u8)rule_cnt; 878adfc5217SJeff Kirsher } 879adfc5217SJeff Kirsher 880adfc5217SJeff Kirsher /* hw_config() callbacks */ 881adfc5217SJeff Kirsher static void bnx2x_set_one_mac_e2(struct bnx2x *bp, 882adfc5217SJeff Kirsher struct bnx2x_vlan_mac_obj *o, 883adfc5217SJeff Kirsher struct bnx2x_exeq_elem *elem, int rule_idx, 884adfc5217SJeff Kirsher int cam_offset) 885adfc5217SJeff Kirsher { 886adfc5217SJeff Kirsher struct bnx2x_raw_obj *raw = &o->raw; 887adfc5217SJeff Kirsher struct eth_classify_rules_ramrod_data *data = 888adfc5217SJeff Kirsher (struct eth_classify_rules_ramrod_data *)(raw->rdata); 889adfc5217SJeff Kirsher int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd; 890adfc5217SJeff Kirsher union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; 891adfc5217SJeff Kirsher bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; 892adfc5217SJeff Kirsher unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags; 893adfc5217SJeff Kirsher u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac; 894adfc5217SJeff Kirsher 89516a5fd92SYuval Mintz /* Set LLH CAM entry: currently only iSCSI and ETH macs are 896adfc5217SJeff Kirsher * relevant. In addition, current implementation is tuned for a 897adfc5217SJeff Kirsher * single ETH MAC. 898adfc5217SJeff Kirsher * 899adfc5217SJeff Kirsher * When multiple unicast ETH MACs PF configuration in switch 900adfc5217SJeff Kirsher * independent mode is required (NetQ, multiple netdev MACs, 901adfc5217SJeff Kirsher * etc.), consider better utilisation of 8 per function MAC 902adfc5217SJeff Kirsher * entries in the LLH register. There is also 903adfc5217SJeff Kirsher * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the 904adfc5217SJeff Kirsher * total number of CAM entries to 16. 905adfc5217SJeff Kirsher * 906adfc5217SJeff Kirsher * Currently we won't configure NIG for MACs other than a primary ETH 907adfc5217SJeff Kirsher * MAC and iSCSI L2 MAC. 908adfc5217SJeff Kirsher * 909adfc5217SJeff Kirsher * If this MAC is moving from one Queue to another, no need to change 910adfc5217SJeff Kirsher * NIG configuration. 911adfc5217SJeff Kirsher */ 912adfc5217SJeff Kirsher if (cmd != BNX2X_VLAN_MAC_MOVE) { 913adfc5217SJeff Kirsher if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags)) 914adfc5217SJeff Kirsher bnx2x_set_mac_in_nig(bp, add, mac, 9150a52fd01SYuval Mintz BNX2X_LLH_CAM_ISCSI_ETH_LINE); 916adfc5217SJeff Kirsher else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags)) 9170a52fd01SYuval Mintz bnx2x_set_mac_in_nig(bp, add, mac, 9180a52fd01SYuval Mintz BNX2X_LLH_CAM_ETH_LINE); 919adfc5217SJeff Kirsher } 920adfc5217SJeff Kirsher 921adfc5217SJeff Kirsher /* Reset the ramrod data buffer for the first rule */ 922adfc5217SJeff Kirsher if (rule_idx == 0) 923adfc5217SJeff Kirsher memset(data, 0, sizeof(*data)); 924adfc5217SJeff Kirsher 925adfc5217SJeff Kirsher /* Setup a command header */ 926adfc5217SJeff Kirsher bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC, 927adfc5217SJeff Kirsher &rule_entry->mac.header); 928adfc5217SJeff Kirsher 9290f9dad10SJoe Perches DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n", 93051c1a580SMerav Sicron (add ? "add" : "delete"), mac, raw->cl_id); 931adfc5217SJeff Kirsher 932adfc5217SJeff Kirsher /* Set a MAC itself */ 933adfc5217SJeff Kirsher bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb, 934adfc5217SJeff Kirsher &rule_entry->mac.mac_mid, 935adfc5217SJeff Kirsher &rule_entry->mac.mac_lsb, mac); 93691226790SDmitry Kravkov rule_entry->mac.inner_mac = 93791226790SDmitry Kravkov cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac); 938adfc5217SJeff Kirsher 939adfc5217SJeff Kirsher /* MOVE: Add a rule that will add this MAC to the target Queue */ 940adfc5217SJeff Kirsher if (cmd == BNX2X_VLAN_MAC_MOVE) { 941adfc5217SJeff Kirsher rule_entry++; 942adfc5217SJeff Kirsher rule_cnt++; 943adfc5217SJeff Kirsher 944adfc5217SJeff Kirsher /* Setup ramrod data */ 945adfc5217SJeff Kirsher bnx2x_vlan_mac_set_cmd_hdr_e2(bp, 946adfc5217SJeff Kirsher elem->cmd_data.vlan_mac.target_obj, 947adfc5217SJeff Kirsher true, CLASSIFY_RULE_OPCODE_MAC, 948adfc5217SJeff Kirsher &rule_entry->mac.header); 949adfc5217SJeff Kirsher 950adfc5217SJeff Kirsher /* Set a MAC itself */ 951adfc5217SJeff Kirsher bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb, 952adfc5217SJeff Kirsher &rule_entry->mac.mac_mid, 953adfc5217SJeff Kirsher &rule_entry->mac.mac_lsb, mac); 95491226790SDmitry Kravkov rule_entry->mac.inner_mac = 95591226790SDmitry Kravkov cpu_to_le16(elem->cmd_data.vlan_mac. 95691226790SDmitry Kravkov u.mac.is_inner_mac); 957adfc5217SJeff Kirsher } 958adfc5217SJeff Kirsher 959adfc5217SJeff Kirsher /* Set the ramrod data header */ 960adfc5217SJeff Kirsher /* TODO: take this to the higher level in order to prevent multiple 961adfc5217SJeff Kirsher writing */ 962adfc5217SJeff Kirsher bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, 963adfc5217SJeff Kirsher rule_cnt); 964adfc5217SJeff Kirsher } 965adfc5217SJeff Kirsher 966adfc5217SJeff Kirsher /** 967adfc5217SJeff Kirsher * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod 968adfc5217SJeff Kirsher * 969adfc5217SJeff Kirsher * @bp: device handle 970adfc5217SJeff Kirsher * @o: queue 971adfc5217SJeff Kirsher * @type: 972adfc5217SJeff Kirsher * @cam_offset: offset in cam memory 973adfc5217SJeff Kirsher * @hdr: pointer to a header to setup 974adfc5217SJeff Kirsher * 975adfc5217SJeff Kirsher * E1/E1H 976adfc5217SJeff Kirsher */ 977adfc5217SJeff Kirsher static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp, 978adfc5217SJeff Kirsher struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, 979adfc5217SJeff Kirsher struct mac_configuration_hdr *hdr) 980adfc5217SJeff Kirsher { 981adfc5217SJeff Kirsher struct bnx2x_raw_obj *r = &o->raw; 982adfc5217SJeff Kirsher 983adfc5217SJeff Kirsher hdr->length = 1; 984adfc5217SJeff Kirsher hdr->offset = (u8)cam_offset; 98586564c3fSYuval Mintz hdr->client_id = cpu_to_le16(0xff); 98686564c3fSYuval Mintz hdr->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) | 98786564c3fSYuval Mintz (type << BNX2X_SWCID_SHIFT)); 988adfc5217SJeff Kirsher } 989adfc5217SJeff Kirsher 990adfc5217SJeff Kirsher static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp, 991adfc5217SJeff Kirsher struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac, 992adfc5217SJeff Kirsher u16 vlan_id, struct mac_configuration_entry *cfg_entry) 993adfc5217SJeff Kirsher { 994adfc5217SJeff Kirsher struct bnx2x_raw_obj *r = &o->raw; 995adfc5217SJeff Kirsher u32 cl_bit_vec = (1 << r->cl_id); 996adfc5217SJeff Kirsher 997adfc5217SJeff Kirsher cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec); 998adfc5217SJeff Kirsher cfg_entry->pf_id = r->func_id; 999adfc5217SJeff Kirsher cfg_entry->vlan_id = cpu_to_le16(vlan_id); 1000adfc5217SJeff Kirsher 1001adfc5217SJeff Kirsher if (add) { 1002adfc5217SJeff Kirsher SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE, 1003adfc5217SJeff Kirsher T_ETH_MAC_COMMAND_SET); 1004adfc5217SJeff Kirsher SET_FLAG(cfg_entry->flags, 1005adfc5217SJeff Kirsher MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode); 1006adfc5217SJeff Kirsher 1007adfc5217SJeff Kirsher /* Set a MAC in a ramrod data */ 1008adfc5217SJeff Kirsher bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr, 1009adfc5217SJeff Kirsher &cfg_entry->middle_mac_addr, 1010adfc5217SJeff Kirsher &cfg_entry->lsb_mac_addr, mac); 1011adfc5217SJeff Kirsher } else 1012adfc5217SJeff Kirsher SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE, 1013adfc5217SJeff Kirsher T_ETH_MAC_COMMAND_INVALIDATE); 1014adfc5217SJeff Kirsher } 1015adfc5217SJeff Kirsher 1016adfc5217SJeff Kirsher static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp, 1017adfc5217SJeff Kirsher struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add, 1018adfc5217SJeff Kirsher u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config) 1019adfc5217SJeff Kirsher { 1020adfc5217SJeff Kirsher struct mac_configuration_entry *cfg_entry = &config->config_table[0]; 1021adfc5217SJeff Kirsher struct bnx2x_raw_obj *raw = &o->raw; 1022adfc5217SJeff Kirsher 1023adfc5217SJeff Kirsher bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset, 1024adfc5217SJeff Kirsher &config->hdr); 1025adfc5217SJeff Kirsher bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id, 1026adfc5217SJeff Kirsher cfg_entry); 1027adfc5217SJeff Kirsher 10280f9dad10SJoe Perches DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n", 102951c1a580SMerav Sicron (add ? "setting" : "clearing"), 10300f9dad10SJoe Perches mac, raw->cl_id, cam_offset); 1031adfc5217SJeff Kirsher } 1032adfc5217SJeff Kirsher 1033adfc5217SJeff Kirsher /** 1034adfc5217SJeff Kirsher * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data 1035adfc5217SJeff Kirsher * 1036adfc5217SJeff Kirsher * @bp: device handle 1037adfc5217SJeff Kirsher * @o: bnx2x_vlan_mac_obj 1038adfc5217SJeff Kirsher * @elem: bnx2x_exeq_elem 1039adfc5217SJeff Kirsher * @rule_idx: rule_idx 1040adfc5217SJeff Kirsher * @cam_offset: cam_offset 1041adfc5217SJeff Kirsher */ 1042adfc5217SJeff Kirsher static void bnx2x_set_one_mac_e1x(struct bnx2x *bp, 1043adfc5217SJeff Kirsher struct bnx2x_vlan_mac_obj *o, 1044adfc5217SJeff Kirsher struct bnx2x_exeq_elem *elem, int rule_idx, 1045adfc5217SJeff Kirsher int cam_offset) 1046adfc5217SJeff Kirsher { 1047adfc5217SJeff Kirsher struct bnx2x_raw_obj *raw = &o->raw; 1048adfc5217SJeff Kirsher struct mac_configuration_cmd *config = 1049adfc5217SJeff Kirsher (struct mac_configuration_cmd *)(raw->rdata); 105016a5fd92SYuval Mintz /* 57710 and 57711 do not support MOVE command, 1051adfc5217SJeff Kirsher * so it's either ADD or DEL 1052adfc5217SJeff Kirsher */ 1053adfc5217SJeff Kirsher bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? 1054adfc5217SJeff Kirsher true : false; 1055adfc5217SJeff Kirsher 1056adfc5217SJeff Kirsher /* Reset the ramrod data buffer */ 1057adfc5217SJeff Kirsher memset(config, 0, sizeof(*config)); 1058adfc5217SJeff Kirsher 105933ac338cSYuval Mintz bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state, 1060adfc5217SJeff Kirsher cam_offset, add, 1061adfc5217SJeff Kirsher elem->cmd_data.vlan_mac.u.mac.mac, 0, 1062adfc5217SJeff Kirsher ETH_VLAN_FILTER_ANY_VLAN, config); 1063adfc5217SJeff Kirsher } 1064adfc5217SJeff Kirsher 1065adfc5217SJeff Kirsher static void bnx2x_set_one_vlan_e2(struct bnx2x *bp, 1066adfc5217SJeff Kirsher struct bnx2x_vlan_mac_obj *o, 1067adfc5217SJeff Kirsher struct bnx2x_exeq_elem *elem, int rule_idx, 1068adfc5217SJeff Kirsher int cam_offset) 1069adfc5217SJeff Kirsher { 1070adfc5217SJeff Kirsher struct bnx2x_raw_obj *raw = &o->raw; 1071adfc5217SJeff Kirsher struct eth_classify_rules_ramrod_data *data = 1072adfc5217SJeff Kirsher (struct eth_classify_rules_ramrod_data *)(raw->rdata); 1073adfc5217SJeff Kirsher int rule_cnt = rule_idx + 1; 1074adfc5217SJeff Kirsher union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; 107586564c3fSYuval Mintz enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd; 1076adfc5217SJeff Kirsher bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; 1077adfc5217SJeff Kirsher u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan; 1078adfc5217SJeff Kirsher 1079adfc5217SJeff Kirsher /* Reset the ramrod data buffer for the first rule */ 1080adfc5217SJeff Kirsher if (rule_idx == 0) 1081adfc5217SJeff Kirsher memset(data, 0, sizeof(*data)); 1082adfc5217SJeff Kirsher 1083adfc5217SJeff Kirsher /* Set a rule header */ 1084adfc5217SJeff Kirsher bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN, 1085adfc5217SJeff Kirsher &rule_entry->vlan.header); 1086adfc5217SJeff Kirsher 1087adfc5217SJeff Kirsher DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"), 1088adfc5217SJeff Kirsher vlan); 1089adfc5217SJeff Kirsher 1090adfc5217SJeff Kirsher /* Set a VLAN itself */ 1091adfc5217SJeff Kirsher rule_entry->vlan.vlan = cpu_to_le16(vlan); 1092adfc5217SJeff Kirsher 1093adfc5217SJeff Kirsher /* MOVE: Add a rule that will add this MAC to the target Queue */ 1094adfc5217SJeff Kirsher if (cmd == BNX2X_VLAN_MAC_MOVE) { 1095adfc5217SJeff Kirsher rule_entry++; 1096adfc5217SJeff Kirsher rule_cnt++; 1097adfc5217SJeff Kirsher 1098adfc5217SJeff Kirsher /* Setup ramrod data */ 1099adfc5217SJeff Kirsher bnx2x_vlan_mac_set_cmd_hdr_e2(bp, 1100adfc5217SJeff Kirsher elem->cmd_data.vlan_mac.target_obj, 1101adfc5217SJeff Kirsher true, CLASSIFY_RULE_OPCODE_VLAN, 1102adfc5217SJeff Kirsher &rule_entry->vlan.header); 1103adfc5217SJeff Kirsher 1104adfc5217SJeff Kirsher /* Set a VLAN itself */ 1105adfc5217SJeff Kirsher rule_entry->vlan.vlan = cpu_to_le16(vlan); 1106adfc5217SJeff Kirsher } 1107adfc5217SJeff Kirsher 1108adfc5217SJeff Kirsher /* Set the ramrod data header */ 1109adfc5217SJeff Kirsher /* TODO: take this to the higher level in order to prevent multiple 1110adfc5217SJeff Kirsher writing */ 1111adfc5217SJeff Kirsher bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, 1112adfc5217SJeff Kirsher rule_cnt); 1113adfc5217SJeff Kirsher } 1114adfc5217SJeff Kirsher 111505cc5a39SYuval Mintz static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp, 111605cc5a39SYuval Mintz struct bnx2x_vlan_mac_obj *o, 111705cc5a39SYuval Mintz struct bnx2x_exeq_elem *elem, 111805cc5a39SYuval Mintz int rule_idx, int cam_offset) 111905cc5a39SYuval Mintz { 112005cc5a39SYuval Mintz struct bnx2x_raw_obj *raw = &o->raw; 112105cc5a39SYuval Mintz struct eth_classify_rules_ramrod_data *data = 112205cc5a39SYuval Mintz (struct eth_classify_rules_ramrod_data *)(raw->rdata); 112305cc5a39SYuval Mintz int rule_cnt = rule_idx + 1; 112405cc5a39SYuval Mintz union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; 112505cc5a39SYuval Mintz enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd; 112605cc5a39SYuval Mintz bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; 112705cc5a39SYuval Mintz u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan; 112805cc5a39SYuval Mintz u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac; 112905cc5a39SYuval Mintz u16 inner_mac; 113005cc5a39SYuval Mintz 113105cc5a39SYuval Mintz /* Reset the ramrod data buffer for the first rule */ 113205cc5a39SYuval Mintz if (rule_idx == 0) 113305cc5a39SYuval Mintz memset(data, 0, sizeof(*data)); 113405cc5a39SYuval Mintz 113505cc5a39SYuval Mintz /* Set a rule header */ 113605cc5a39SYuval Mintz bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR, 113705cc5a39SYuval Mintz &rule_entry->pair.header); 113805cc5a39SYuval Mintz 113905cc5a39SYuval Mintz /* Set VLAN and MAC themselves */ 114005cc5a39SYuval Mintz rule_entry->pair.vlan = cpu_to_le16(vlan); 114105cc5a39SYuval Mintz bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, 114205cc5a39SYuval Mintz &rule_entry->pair.mac_mid, 114305cc5a39SYuval Mintz &rule_entry->pair.mac_lsb, mac); 114405cc5a39SYuval Mintz inner_mac = elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac; 114505cc5a39SYuval Mintz rule_entry->pair.inner_mac = cpu_to_le16(inner_mac); 114605cc5a39SYuval Mintz /* MOVE: Add a rule that will add this MAC/VLAN to the target Queue */ 114705cc5a39SYuval Mintz if (cmd == BNX2X_VLAN_MAC_MOVE) { 114805cc5a39SYuval Mintz struct bnx2x_vlan_mac_obj *target_obj; 114905cc5a39SYuval Mintz 115005cc5a39SYuval Mintz rule_entry++; 115105cc5a39SYuval Mintz rule_cnt++; 115205cc5a39SYuval Mintz 115305cc5a39SYuval Mintz /* Setup ramrod data */ 115405cc5a39SYuval Mintz target_obj = elem->cmd_data.vlan_mac.target_obj; 115505cc5a39SYuval Mintz bnx2x_vlan_mac_set_cmd_hdr_e2(bp, target_obj, 115605cc5a39SYuval Mintz true, CLASSIFY_RULE_OPCODE_PAIR, 115705cc5a39SYuval Mintz &rule_entry->pair.header); 115805cc5a39SYuval Mintz 115905cc5a39SYuval Mintz /* Set a VLAN itself */ 116005cc5a39SYuval Mintz rule_entry->pair.vlan = cpu_to_le16(vlan); 116105cc5a39SYuval Mintz bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, 116205cc5a39SYuval Mintz &rule_entry->pair.mac_mid, 116305cc5a39SYuval Mintz &rule_entry->pair.mac_lsb, mac); 116405cc5a39SYuval Mintz rule_entry->pair.inner_mac = cpu_to_le16(inner_mac); 116505cc5a39SYuval Mintz } 116605cc5a39SYuval Mintz 116705cc5a39SYuval Mintz /* Set the ramrod data header */ 116805cc5a39SYuval Mintz bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, 116905cc5a39SYuval Mintz rule_cnt); 117005cc5a39SYuval Mintz } 117105cc5a39SYuval Mintz 117205cc5a39SYuval Mintz /** 117305cc5a39SYuval Mintz * bnx2x_set_one_vlan_mac_e1h - 117405cc5a39SYuval Mintz * 117505cc5a39SYuval Mintz * @bp: device handle 117605cc5a39SYuval Mintz * @o: bnx2x_vlan_mac_obj 117705cc5a39SYuval Mintz * @elem: bnx2x_exeq_elem 117805cc5a39SYuval Mintz * @rule_idx: rule_idx 117905cc5a39SYuval Mintz * @cam_offset: cam_offset 118005cc5a39SYuval Mintz */ 118105cc5a39SYuval Mintz static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp, 118205cc5a39SYuval Mintz struct bnx2x_vlan_mac_obj *o, 118305cc5a39SYuval Mintz struct bnx2x_exeq_elem *elem, 118405cc5a39SYuval Mintz int rule_idx, int cam_offset) 118505cc5a39SYuval Mintz { 118605cc5a39SYuval Mintz struct bnx2x_raw_obj *raw = &o->raw; 118705cc5a39SYuval Mintz struct mac_configuration_cmd *config = 118805cc5a39SYuval Mintz (struct mac_configuration_cmd *)(raw->rdata); 118905cc5a39SYuval Mintz /* 57710 and 57711 do not support MOVE command, 119005cc5a39SYuval Mintz * so it's either ADD or DEL 119105cc5a39SYuval Mintz */ 119205cc5a39SYuval Mintz bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? 119305cc5a39SYuval Mintz true : false; 119405cc5a39SYuval Mintz 119505cc5a39SYuval Mintz /* Reset the ramrod data buffer */ 119605cc5a39SYuval Mintz memset(config, 0, sizeof(*config)); 119705cc5a39SYuval Mintz 119805cc5a39SYuval Mintz bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING, 119905cc5a39SYuval Mintz cam_offset, add, 120005cc5a39SYuval Mintz elem->cmd_data.vlan_mac.u.vlan_mac.mac, 120105cc5a39SYuval Mintz elem->cmd_data.vlan_mac.u.vlan_mac.vlan, 120205cc5a39SYuval Mintz ETH_VLAN_FILTER_CLASSIFY, config); 120305cc5a39SYuval Mintz } 120405cc5a39SYuval Mintz 1205adfc5217SJeff Kirsher /** 1206adfc5217SJeff Kirsher * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element 1207adfc5217SJeff Kirsher * 1208adfc5217SJeff Kirsher * @bp: device handle 1209adfc5217SJeff Kirsher * @p: command parameters 121016a5fd92SYuval Mintz * @ppos: pointer to the cookie 1211adfc5217SJeff Kirsher * 1212adfc5217SJeff Kirsher * reconfigure next MAC/VLAN/VLAN-MAC element from the 1213adfc5217SJeff Kirsher * previously configured elements list. 1214adfc5217SJeff Kirsher * 1215adfc5217SJeff Kirsher * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken 1216adfc5217SJeff Kirsher * into an account 1217adfc5217SJeff Kirsher * 121816a5fd92SYuval Mintz * pointer to the cookie - that should be given back in the next call to make 1219adfc5217SJeff Kirsher * function handle the next element. If *ppos is set to NULL it will restart the 1220adfc5217SJeff Kirsher * iterator. If returned *ppos == NULL this means that the last element has been 1221adfc5217SJeff Kirsher * handled. 1222adfc5217SJeff Kirsher * 1223adfc5217SJeff Kirsher */ 1224adfc5217SJeff Kirsher static int bnx2x_vlan_mac_restore(struct bnx2x *bp, 1225adfc5217SJeff Kirsher struct bnx2x_vlan_mac_ramrod_params *p, 1226adfc5217SJeff Kirsher struct bnx2x_vlan_mac_registry_elem **ppos) 1227adfc5217SJeff Kirsher { 1228adfc5217SJeff Kirsher struct bnx2x_vlan_mac_registry_elem *pos; 1229adfc5217SJeff Kirsher struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; 1230adfc5217SJeff Kirsher 1231adfc5217SJeff Kirsher /* If list is empty - there is nothing to do here */ 1232adfc5217SJeff Kirsher if (list_empty(&o->head)) { 1233adfc5217SJeff Kirsher *ppos = NULL; 1234adfc5217SJeff Kirsher return 0; 1235adfc5217SJeff Kirsher } 1236adfc5217SJeff Kirsher 1237adfc5217SJeff Kirsher /* make a step... */ 1238adfc5217SJeff Kirsher if (*ppos == NULL) 1239adfc5217SJeff Kirsher *ppos = list_first_entry(&o->head, 1240adfc5217SJeff Kirsher struct bnx2x_vlan_mac_registry_elem, 1241adfc5217SJeff Kirsher link); 1242adfc5217SJeff Kirsher else 1243adfc5217SJeff Kirsher *ppos = list_next_entry(*ppos, link); 1244adfc5217SJeff Kirsher 1245adfc5217SJeff Kirsher pos = *ppos; 1246adfc5217SJeff Kirsher 1247adfc5217SJeff Kirsher /* If it's the last step - return NULL */ 1248adfc5217SJeff Kirsher if (list_is_last(&pos->link, &o->head)) 1249adfc5217SJeff Kirsher *ppos = NULL; 1250adfc5217SJeff Kirsher 1251adfc5217SJeff Kirsher /* Prepare a 'user_req' */ 1252adfc5217SJeff Kirsher memcpy(&p->user_req.u, &pos->u, sizeof(pos->u)); 1253adfc5217SJeff Kirsher 1254adfc5217SJeff Kirsher /* Set the command */ 1255adfc5217SJeff Kirsher p->user_req.cmd = BNX2X_VLAN_MAC_ADD; 1256adfc5217SJeff Kirsher 1257adfc5217SJeff Kirsher /* Set vlan_mac_flags */ 1258adfc5217SJeff Kirsher p->user_req.vlan_mac_flags = pos->vlan_mac_flags; 1259adfc5217SJeff Kirsher 1260adfc5217SJeff Kirsher /* Set a restore bit */ 1261adfc5217SJeff Kirsher __set_bit(RAMROD_RESTORE, &p->ramrod_flags); 1262adfc5217SJeff Kirsher 1263adfc5217SJeff Kirsher return bnx2x_config_vlan_mac(bp, p); 1264adfc5217SJeff Kirsher } 1265adfc5217SJeff Kirsher 126616a5fd92SYuval Mintz /* bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a 1267adfc5217SJeff Kirsher * pointer to an element with a specific criteria and NULL if such an element 1268adfc5217SJeff Kirsher * hasn't been found. 1269adfc5217SJeff Kirsher */ 1270adfc5217SJeff Kirsher static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac( 1271adfc5217SJeff Kirsher struct bnx2x_exe_queue_obj *o, 1272adfc5217SJeff Kirsher struct bnx2x_exeq_elem *elem) 1273adfc5217SJeff Kirsher { 1274adfc5217SJeff Kirsher struct bnx2x_exeq_elem *pos; 1275adfc5217SJeff Kirsher struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac; 1276adfc5217SJeff Kirsher 1277adfc5217SJeff Kirsher /* Check pending for execution commands */ 1278adfc5217SJeff Kirsher list_for_each_entry(pos, &o->exe_queue, link) 1279adfc5217SJeff Kirsher if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data, 1280adfc5217SJeff Kirsher sizeof(*data)) && 1281adfc5217SJeff Kirsher (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) 1282adfc5217SJeff Kirsher return pos; 1283adfc5217SJeff Kirsher 1284adfc5217SJeff Kirsher return NULL; 1285adfc5217SJeff Kirsher } 1286adfc5217SJeff Kirsher 1287adfc5217SJeff Kirsher static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan( 1288adfc5217SJeff Kirsher struct bnx2x_exe_queue_obj *o, 1289adfc5217SJeff Kirsher struct bnx2x_exeq_elem *elem) 1290adfc5217SJeff Kirsher { 1291adfc5217SJeff Kirsher struct bnx2x_exeq_elem *pos; 1292adfc5217SJeff Kirsher struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan; 1293adfc5217SJeff Kirsher 1294adfc5217SJeff Kirsher /* Check pending for execution commands */ 1295adfc5217SJeff Kirsher list_for_each_entry(pos, &o->exe_queue, link) 1296adfc5217SJeff Kirsher if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data, 1297adfc5217SJeff Kirsher sizeof(*data)) && 1298adfc5217SJeff Kirsher (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) 1299adfc5217SJeff Kirsher return pos; 1300adfc5217SJeff Kirsher 1301adfc5217SJeff Kirsher return NULL; 1302adfc5217SJeff Kirsher } 1303adfc5217SJeff Kirsher 130405cc5a39SYuval Mintz static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac( 130505cc5a39SYuval Mintz struct bnx2x_exe_queue_obj *o, 130605cc5a39SYuval Mintz struct bnx2x_exeq_elem *elem) 130705cc5a39SYuval Mintz { 130805cc5a39SYuval Mintz struct bnx2x_exeq_elem *pos; 130905cc5a39SYuval Mintz struct bnx2x_vlan_mac_ramrod_data *data = 131005cc5a39SYuval Mintz &elem->cmd_data.vlan_mac.u.vlan_mac; 131105cc5a39SYuval Mintz 131205cc5a39SYuval Mintz /* Check pending for execution commands */ 131305cc5a39SYuval Mintz list_for_each_entry(pos, &o->exe_queue, link) 131405cc5a39SYuval Mintz if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data, 131505cc5a39SYuval Mintz sizeof(*data)) && 131605cc5a39SYuval Mintz (pos->cmd_data.vlan_mac.cmd == 131705cc5a39SYuval Mintz elem->cmd_data.vlan_mac.cmd)) 131805cc5a39SYuval Mintz return pos; 131905cc5a39SYuval Mintz 132005cc5a39SYuval Mintz return NULL; 132105cc5a39SYuval Mintz } 132205cc5a39SYuval Mintz 1323adfc5217SJeff Kirsher /** 1324adfc5217SJeff Kirsher * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed 1325adfc5217SJeff Kirsher * 1326adfc5217SJeff Kirsher * @bp: device handle 1327adfc5217SJeff Kirsher * @qo: bnx2x_qable_obj 1328adfc5217SJeff Kirsher * @elem: bnx2x_exeq_elem 1329adfc5217SJeff Kirsher * 1330adfc5217SJeff Kirsher * Checks that the requested configuration can be added. If yes and if 1331adfc5217SJeff Kirsher * requested, consume CAM credit. 1332adfc5217SJeff Kirsher * 1333adfc5217SJeff Kirsher * The 'validate' is run after the 'optimize'. 1334adfc5217SJeff Kirsher * 1335adfc5217SJeff Kirsher */ 1336adfc5217SJeff Kirsher static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp, 1337adfc5217SJeff Kirsher union bnx2x_qable_obj *qo, 1338adfc5217SJeff Kirsher struct bnx2x_exeq_elem *elem) 1339adfc5217SJeff Kirsher { 1340adfc5217SJeff Kirsher struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac; 1341adfc5217SJeff Kirsher struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; 1342adfc5217SJeff Kirsher int rc; 1343adfc5217SJeff Kirsher 1344adfc5217SJeff Kirsher /* Check the registry */ 134551c1a580SMerav Sicron rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u); 1346adfc5217SJeff Kirsher if (rc) { 134751c1a580SMerav Sicron DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n"); 1348adfc5217SJeff Kirsher return rc; 1349adfc5217SJeff Kirsher } 1350adfc5217SJeff Kirsher 135116a5fd92SYuval Mintz /* Check if there is a pending ADD command for this 1352adfc5217SJeff Kirsher * MAC/VLAN/VLAN-MAC. Return an error if there is. 1353adfc5217SJeff Kirsher */ 1354adfc5217SJeff Kirsher if (exeq->get(exeq, elem)) { 1355adfc5217SJeff Kirsher DP(BNX2X_MSG_SP, "There is a pending ADD command already\n"); 1356adfc5217SJeff Kirsher return -EEXIST; 1357adfc5217SJeff Kirsher } 1358adfc5217SJeff Kirsher 135916a5fd92SYuval Mintz /* TODO: Check the pending MOVE from other objects where this 1360adfc5217SJeff Kirsher * object is a destination object. 1361adfc5217SJeff Kirsher */ 1362adfc5217SJeff Kirsher 1363adfc5217SJeff Kirsher /* Consume the credit if not requested not to */ 1364adfc5217SJeff Kirsher if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 1365adfc5217SJeff Kirsher &elem->cmd_data.vlan_mac.vlan_mac_flags) || 1366adfc5217SJeff Kirsher o->get_credit(o))) 1367adfc5217SJeff Kirsher return -EINVAL; 1368adfc5217SJeff Kirsher 1369adfc5217SJeff Kirsher return 0; 1370adfc5217SJeff Kirsher } 1371adfc5217SJeff Kirsher 1372adfc5217SJeff Kirsher /** 1373adfc5217SJeff Kirsher * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed 1374adfc5217SJeff Kirsher * 1375adfc5217SJeff Kirsher * @bp: device handle 1376adfc5217SJeff Kirsher * @qo: quable object to check 1377adfc5217SJeff Kirsher * @elem: element that needs to be deleted 1378adfc5217SJeff Kirsher * 1379adfc5217SJeff Kirsher * Checks that the requested configuration can be deleted. If yes and if 1380adfc5217SJeff Kirsher * requested, returns a CAM credit. 1381adfc5217SJeff Kirsher * 1382adfc5217SJeff Kirsher * The 'validate' is run after the 'optimize'. 1383adfc5217SJeff Kirsher */ 1384adfc5217SJeff Kirsher static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp, 1385adfc5217SJeff Kirsher union bnx2x_qable_obj *qo, 1386adfc5217SJeff Kirsher struct bnx2x_exeq_elem *elem) 1387adfc5217SJeff Kirsher { 1388adfc5217SJeff Kirsher struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac; 1389adfc5217SJeff Kirsher struct bnx2x_vlan_mac_registry_elem *pos; 1390adfc5217SJeff Kirsher struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; 1391adfc5217SJeff Kirsher struct bnx2x_exeq_elem query_elem; 1392adfc5217SJeff Kirsher 1393adfc5217SJeff Kirsher /* If this classification can not be deleted (doesn't exist) 1394adfc5217SJeff Kirsher * - return a BNX2X_EXIST. 1395adfc5217SJeff Kirsher */ 139651c1a580SMerav Sicron pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u); 1397adfc5217SJeff Kirsher if (!pos) { 139851c1a580SMerav Sicron DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n"); 1399adfc5217SJeff Kirsher return -EEXIST; 1400adfc5217SJeff Kirsher } 1401adfc5217SJeff Kirsher 140216a5fd92SYuval Mintz /* Check if there are pending DEL or MOVE commands for this 1403adfc5217SJeff Kirsher * MAC/VLAN/VLAN-MAC. Return an error if so. 1404adfc5217SJeff Kirsher */ 1405adfc5217SJeff Kirsher memcpy(&query_elem, elem, sizeof(query_elem)); 1406adfc5217SJeff Kirsher 1407adfc5217SJeff Kirsher /* Check for MOVE commands */ 1408adfc5217SJeff Kirsher query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE; 1409adfc5217SJeff Kirsher if (exeq->get(exeq, &query_elem)) { 1410adfc5217SJeff Kirsher BNX2X_ERR("There is a pending MOVE command already\n"); 1411adfc5217SJeff Kirsher return -EINVAL; 1412adfc5217SJeff Kirsher } 1413adfc5217SJeff Kirsher 1414adfc5217SJeff Kirsher /* Check for DEL commands */ 1415adfc5217SJeff Kirsher if (exeq->get(exeq, elem)) { 1416adfc5217SJeff Kirsher DP(BNX2X_MSG_SP, "There is a pending DEL command already\n"); 1417adfc5217SJeff Kirsher return -EEXIST; 1418adfc5217SJeff Kirsher } 1419adfc5217SJeff Kirsher 1420adfc5217SJeff Kirsher /* Return the credit to the credit pool if not requested not to */ 1421adfc5217SJeff Kirsher if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 1422adfc5217SJeff Kirsher &elem->cmd_data.vlan_mac.vlan_mac_flags) || 1423adfc5217SJeff Kirsher o->put_credit(o))) { 1424adfc5217SJeff Kirsher BNX2X_ERR("Failed to return a credit\n"); 1425adfc5217SJeff Kirsher return -EINVAL; 1426adfc5217SJeff Kirsher } 1427adfc5217SJeff Kirsher 1428adfc5217SJeff Kirsher return 0; 1429adfc5217SJeff Kirsher } 1430adfc5217SJeff Kirsher 1431adfc5217SJeff Kirsher /** 1432adfc5217SJeff Kirsher * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed 1433adfc5217SJeff Kirsher * 1434adfc5217SJeff Kirsher * @bp: device handle 1435adfc5217SJeff Kirsher * @qo: quable object to check (source) 1436adfc5217SJeff Kirsher * @elem: element that needs to be moved 1437adfc5217SJeff Kirsher * 1438adfc5217SJeff Kirsher * Checks that the requested configuration can be moved. If yes and if 1439adfc5217SJeff Kirsher * requested, returns a CAM credit. 1440adfc5217SJeff Kirsher * 1441adfc5217SJeff Kirsher * The 'validate' is run after the 'optimize'. 1442adfc5217SJeff Kirsher */ 1443adfc5217SJeff Kirsher static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp, 1444adfc5217SJeff Kirsher union bnx2x_qable_obj *qo, 1445adfc5217SJeff Kirsher struct bnx2x_exeq_elem *elem) 1446adfc5217SJeff Kirsher { 1447adfc5217SJeff Kirsher struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac; 1448adfc5217SJeff Kirsher struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj; 1449adfc5217SJeff Kirsher struct bnx2x_exeq_elem query_elem; 1450adfc5217SJeff Kirsher struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue; 1451adfc5217SJeff Kirsher struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue; 1452adfc5217SJeff Kirsher 145316a5fd92SYuval Mintz /* Check if we can perform this operation based on the current registry 1454adfc5217SJeff Kirsher * state. 1455adfc5217SJeff Kirsher */ 145651c1a580SMerav Sicron if (!src_o->check_move(bp, src_o, dest_o, 145751c1a580SMerav Sicron &elem->cmd_data.vlan_mac.u)) { 145851c1a580SMerav Sicron DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n"); 1459adfc5217SJeff Kirsher return -EINVAL; 1460adfc5217SJeff Kirsher } 1461adfc5217SJeff Kirsher 146216a5fd92SYuval Mintz /* Check if there is an already pending DEL or MOVE command for the 1463adfc5217SJeff Kirsher * source object or ADD command for a destination object. Return an 1464adfc5217SJeff Kirsher * error if so. 1465adfc5217SJeff Kirsher */ 1466adfc5217SJeff Kirsher memcpy(&query_elem, elem, sizeof(query_elem)); 1467adfc5217SJeff Kirsher 1468adfc5217SJeff Kirsher /* Check DEL on source */ 1469adfc5217SJeff Kirsher query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL; 1470adfc5217SJeff Kirsher if (src_exeq->get(src_exeq, &query_elem)) { 147151c1a580SMerav Sicron BNX2X_ERR("There is a pending DEL command on the source queue already\n"); 1472adfc5217SJeff Kirsher return -EINVAL; 1473adfc5217SJeff Kirsher } 1474adfc5217SJeff Kirsher 1475adfc5217SJeff Kirsher /* Check MOVE on source */ 1476adfc5217SJeff Kirsher if (src_exeq->get(src_exeq, elem)) { 1477adfc5217SJeff Kirsher DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n"); 1478adfc5217SJeff Kirsher return -EEXIST; 1479adfc5217SJeff Kirsher } 1480adfc5217SJeff Kirsher 1481adfc5217SJeff Kirsher /* Check ADD on destination */ 1482adfc5217SJeff Kirsher query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD; 1483adfc5217SJeff Kirsher if (dest_exeq->get(dest_exeq, &query_elem)) { 148451c1a580SMerav Sicron BNX2X_ERR("There is a pending ADD command on the destination queue already\n"); 1485adfc5217SJeff Kirsher return -EINVAL; 1486adfc5217SJeff Kirsher } 1487adfc5217SJeff Kirsher 1488adfc5217SJeff Kirsher /* Consume the credit if not requested not to */ 1489adfc5217SJeff Kirsher if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST, 1490adfc5217SJeff Kirsher &elem->cmd_data.vlan_mac.vlan_mac_flags) || 1491adfc5217SJeff Kirsher dest_o->get_credit(dest_o))) 1492adfc5217SJeff Kirsher return -EINVAL; 1493adfc5217SJeff Kirsher 1494adfc5217SJeff Kirsher if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 1495adfc5217SJeff Kirsher &elem->cmd_data.vlan_mac.vlan_mac_flags) || 1496adfc5217SJeff Kirsher src_o->put_credit(src_o))) { 1497adfc5217SJeff Kirsher /* return the credit taken from dest... */ 1498adfc5217SJeff Kirsher dest_o->put_credit(dest_o); 1499adfc5217SJeff Kirsher return -EINVAL; 1500adfc5217SJeff Kirsher } 1501adfc5217SJeff Kirsher 1502adfc5217SJeff Kirsher return 0; 1503adfc5217SJeff Kirsher } 1504adfc5217SJeff Kirsher 1505adfc5217SJeff Kirsher static int bnx2x_validate_vlan_mac(struct bnx2x *bp, 1506adfc5217SJeff Kirsher union bnx2x_qable_obj *qo, 1507adfc5217SJeff Kirsher struct bnx2x_exeq_elem *elem) 1508adfc5217SJeff Kirsher { 1509adfc5217SJeff Kirsher switch (elem->cmd_data.vlan_mac.cmd) { 1510adfc5217SJeff Kirsher case BNX2X_VLAN_MAC_ADD: 1511adfc5217SJeff Kirsher return bnx2x_validate_vlan_mac_add(bp, qo, elem); 1512adfc5217SJeff Kirsher case BNX2X_VLAN_MAC_DEL: 1513adfc5217SJeff Kirsher return bnx2x_validate_vlan_mac_del(bp, qo, elem); 1514adfc5217SJeff Kirsher case BNX2X_VLAN_MAC_MOVE: 1515adfc5217SJeff Kirsher return bnx2x_validate_vlan_mac_move(bp, qo, elem); 1516adfc5217SJeff Kirsher default: 1517adfc5217SJeff Kirsher return -EINVAL; 1518adfc5217SJeff Kirsher } 1519adfc5217SJeff Kirsher } 1520adfc5217SJeff Kirsher 1521460a25cdSYuval Mintz static int bnx2x_remove_vlan_mac(struct bnx2x *bp, 1522460a25cdSYuval Mintz union bnx2x_qable_obj *qo, 1523460a25cdSYuval Mintz struct bnx2x_exeq_elem *elem) 1524460a25cdSYuval Mintz { 1525460a25cdSYuval Mintz int rc = 0; 1526460a25cdSYuval Mintz 1527460a25cdSYuval Mintz /* If consumption wasn't required, nothing to do */ 1528460a25cdSYuval Mintz if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 1529460a25cdSYuval Mintz &elem->cmd_data.vlan_mac.vlan_mac_flags)) 1530460a25cdSYuval Mintz return 0; 1531460a25cdSYuval Mintz 1532460a25cdSYuval Mintz switch (elem->cmd_data.vlan_mac.cmd) { 1533460a25cdSYuval Mintz case BNX2X_VLAN_MAC_ADD: 1534460a25cdSYuval Mintz case BNX2X_VLAN_MAC_MOVE: 1535460a25cdSYuval Mintz rc = qo->vlan_mac.put_credit(&qo->vlan_mac); 1536460a25cdSYuval Mintz break; 1537460a25cdSYuval Mintz case BNX2X_VLAN_MAC_DEL: 1538460a25cdSYuval Mintz rc = qo->vlan_mac.get_credit(&qo->vlan_mac); 1539460a25cdSYuval Mintz break; 1540460a25cdSYuval Mintz default: 1541460a25cdSYuval Mintz return -EINVAL; 1542460a25cdSYuval Mintz } 1543460a25cdSYuval Mintz 1544460a25cdSYuval Mintz if (rc != true) 1545460a25cdSYuval Mintz return -EINVAL; 1546460a25cdSYuval Mintz 1547460a25cdSYuval Mintz return 0; 1548460a25cdSYuval Mintz } 1549460a25cdSYuval Mintz 1550adfc5217SJeff Kirsher /** 155116a5fd92SYuval Mintz * bnx2x_wait_vlan_mac - passively wait for 5 seconds until all work completes. 1552adfc5217SJeff Kirsher * 1553adfc5217SJeff Kirsher * @bp: device handle 1554adfc5217SJeff Kirsher * @o: bnx2x_vlan_mac_obj 1555adfc5217SJeff Kirsher * 1556adfc5217SJeff Kirsher */ 1557adfc5217SJeff Kirsher static int bnx2x_wait_vlan_mac(struct bnx2x *bp, 1558adfc5217SJeff Kirsher struct bnx2x_vlan_mac_obj *o) 1559adfc5217SJeff Kirsher { 1560adfc5217SJeff Kirsher int cnt = 5000, rc; 1561adfc5217SJeff Kirsher struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; 1562adfc5217SJeff Kirsher struct bnx2x_raw_obj *raw = &o->raw; 1563adfc5217SJeff Kirsher 1564adfc5217SJeff Kirsher while (cnt--) { 1565adfc5217SJeff Kirsher /* Wait for the current command to complete */ 1566adfc5217SJeff Kirsher rc = raw->wait_comp(bp, raw); 1567adfc5217SJeff Kirsher if (rc) 1568adfc5217SJeff Kirsher return rc; 1569adfc5217SJeff Kirsher 1570adfc5217SJeff Kirsher /* Wait until there are no pending commands */ 1571adfc5217SJeff Kirsher if (!bnx2x_exe_queue_empty(exeq)) 15720926d499SYuval Mintz usleep_range(1000, 2000); 1573adfc5217SJeff Kirsher else 1574adfc5217SJeff Kirsher return 0; 1575adfc5217SJeff Kirsher } 1576adfc5217SJeff Kirsher 1577adfc5217SJeff Kirsher return -EBUSY; 1578adfc5217SJeff Kirsher } 1579adfc5217SJeff Kirsher 15808b09be5fSYuval Mintz static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp, 15818b09be5fSYuval Mintz struct bnx2x_vlan_mac_obj *o, 15828b09be5fSYuval Mintz unsigned long *ramrod_flags) 15838b09be5fSYuval Mintz { 15848b09be5fSYuval Mintz int rc = 0; 15858b09be5fSYuval Mintz 15868b09be5fSYuval Mintz spin_lock_bh(&o->exe_queue.lock); 15878b09be5fSYuval Mintz 15888b09be5fSYuval Mintz DP(BNX2X_MSG_SP, "vlan_mac_execute_step - trying to take writer lock\n"); 15898b09be5fSYuval Mintz rc = __bnx2x_vlan_mac_h_write_trylock(bp, o); 15908b09be5fSYuval Mintz 15918b09be5fSYuval Mintz if (rc != 0) { 15928b09be5fSYuval Mintz __bnx2x_vlan_mac_h_pend(bp, o, *ramrod_flags); 15938b09be5fSYuval Mintz 15948ac1ed79SJoe Perches /* Calling function should not differentiate between this case 15958b09be5fSYuval Mintz * and the case in which there is already a pending ramrod 15968b09be5fSYuval Mintz */ 15978b09be5fSYuval Mintz rc = 1; 15988b09be5fSYuval Mintz } else { 15998b09be5fSYuval Mintz rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); 16008b09be5fSYuval Mintz } 16018b09be5fSYuval Mintz spin_unlock_bh(&o->exe_queue.lock); 16028b09be5fSYuval Mintz 16038b09be5fSYuval Mintz return rc; 16048b09be5fSYuval Mintz } 16058b09be5fSYuval Mintz 1606adfc5217SJeff Kirsher /** 1607adfc5217SJeff Kirsher * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod 1608adfc5217SJeff Kirsher * 1609adfc5217SJeff Kirsher * @bp: device handle 1610adfc5217SJeff Kirsher * @o: bnx2x_vlan_mac_obj 1611adfc5217SJeff Kirsher * @cqe: 1612adfc5217SJeff Kirsher * @cont: if true schedule next execution chunk 1613adfc5217SJeff Kirsher * 1614adfc5217SJeff Kirsher */ 1615adfc5217SJeff Kirsher static int bnx2x_complete_vlan_mac(struct bnx2x *bp, 1616adfc5217SJeff Kirsher struct bnx2x_vlan_mac_obj *o, 1617adfc5217SJeff Kirsher union event_ring_elem *cqe, 1618adfc5217SJeff Kirsher unsigned long *ramrod_flags) 1619adfc5217SJeff Kirsher { 1620adfc5217SJeff Kirsher struct bnx2x_raw_obj *r = &o->raw; 1621adfc5217SJeff Kirsher int rc; 1622adfc5217SJeff Kirsher 16238b09be5fSYuval Mintz /* Clearing the pending list & raw state should be made 16248b09be5fSYuval Mintz * atomically (as execution flow assumes they represent the same). 16258b09be5fSYuval Mintz */ 16268b09be5fSYuval Mintz spin_lock_bh(&o->exe_queue.lock); 16278b09be5fSYuval Mintz 1628adfc5217SJeff Kirsher /* Reset pending list */ 16298b09be5fSYuval Mintz __bnx2x_exe_queue_reset_pending(bp, &o->exe_queue); 1630adfc5217SJeff Kirsher 1631adfc5217SJeff Kirsher /* Clear pending */ 1632adfc5217SJeff Kirsher r->clear_pending(r); 1633adfc5217SJeff Kirsher 16348b09be5fSYuval Mintz spin_unlock_bh(&o->exe_queue.lock); 16358b09be5fSYuval Mintz 1636adfc5217SJeff Kirsher /* If ramrod failed this is most likely a SW bug */ 1637adfc5217SJeff Kirsher if (cqe->message.error) 1638adfc5217SJeff Kirsher return -EINVAL; 1639adfc5217SJeff Kirsher 16402de67439SYuval Mintz /* Run the next bulk of pending commands if requested */ 1641adfc5217SJeff Kirsher if (test_bit(RAMROD_CONT, ramrod_flags)) { 16428b09be5fSYuval Mintz rc = __bnx2x_vlan_mac_execute_step(bp, o, ramrod_flags); 16438b09be5fSYuval Mintz 1644adfc5217SJeff Kirsher if (rc < 0) 1645adfc5217SJeff Kirsher return rc; 1646adfc5217SJeff Kirsher } 1647adfc5217SJeff Kirsher 1648adfc5217SJeff Kirsher /* If there is more work to do return PENDING */ 1649adfc5217SJeff Kirsher if (!bnx2x_exe_queue_empty(&o->exe_queue)) 1650adfc5217SJeff Kirsher return 1; 1651adfc5217SJeff Kirsher 1652adfc5217SJeff Kirsher return 0; 1653adfc5217SJeff Kirsher } 1654adfc5217SJeff Kirsher 1655adfc5217SJeff Kirsher /** 1656adfc5217SJeff Kirsher * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands. 1657adfc5217SJeff Kirsher * 1658adfc5217SJeff Kirsher * @bp: device handle 1659adfc5217SJeff Kirsher * @o: bnx2x_qable_obj 1660adfc5217SJeff Kirsher * @elem: bnx2x_exeq_elem 1661adfc5217SJeff Kirsher */ 1662adfc5217SJeff Kirsher static int bnx2x_optimize_vlan_mac(struct bnx2x *bp, 1663adfc5217SJeff Kirsher union bnx2x_qable_obj *qo, 1664adfc5217SJeff Kirsher struct bnx2x_exeq_elem *elem) 1665adfc5217SJeff Kirsher { 1666adfc5217SJeff Kirsher struct bnx2x_exeq_elem query, *pos; 1667adfc5217SJeff Kirsher struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac; 1668adfc5217SJeff Kirsher struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; 1669adfc5217SJeff Kirsher 1670adfc5217SJeff Kirsher memcpy(&query, elem, sizeof(query)); 1671adfc5217SJeff Kirsher 1672adfc5217SJeff Kirsher switch (elem->cmd_data.vlan_mac.cmd) { 1673adfc5217SJeff Kirsher case BNX2X_VLAN_MAC_ADD: 1674adfc5217SJeff Kirsher query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL; 1675adfc5217SJeff Kirsher break; 1676adfc5217SJeff Kirsher case BNX2X_VLAN_MAC_DEL: 1677adfc5217SJeff Kirsher query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD; 1678adfc5217SJeff Kirsher break; 1679adfc5217SJeff Kirsher default: 1680adfc5217SJeff Kirsher /* Don't handle anything other than ADD or DEL */ 1681adfc5217SJeff Kirsher return 0; 1682adfc5217SJeff Kirsher } 1683adfc5217SJeff Kirsher 1684adfc5217SJeff Kirsher /* If we found the appropriate element - delete it */ 1685adfc5217SJeff Kirsher pos = exeq->get(exeq, &query); 1686adfc5217SJeff Kirsher if (pos) { 1687adfc5217SJeff Kirsher 1688adfc5217SJeff Kirsher /* Return the credit of the optimized command */ 1689adfc5217SJeff Kirsher if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 1690adfc5217SJeff Kirsher &pos->cmd_data.vlan_mac.vlan_mac_flags)) { 1691adfc5217SJeff Kirsher if ((query.cmd_data.vlan_mac.cmd == 1692adfc5217SJeff Kirsher BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) { 169351c1a580SMerav Sicron BNX2X_ERR("Failed to return the credit for the optimized ADD command\n"); 1694adfc5217SJeff Kirsher return -EINVAL; 1695adfc5217SJeff Kirsher } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */ 169651c1a580SMerav Sicron BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n"); 1697adfc5217SJeff Kirsher return -EINVAL; 1698adfc5217SJeff Kirsher } 1699adfc5217SJeff Kirsher } 1700adfc5217SJeff Kirsher 1701adfc5217SJeff Kirsher DP(BNX2X_MSG_SP, "Optimizing %s command\n", 1702adfc5217SJeff Kirsher (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? 1703adfc5217SJeff Kirsher "ADD" : "DEL"); 1704adfc5217SJeff Kirsher 1705adfc5217SJeff Kirsher list_del(&pos->link); 1706adfc5217SJeff Kirsher bnx2x_exe_queue_free_elem(bp, pos); 1707adfc5217SJeff Kirsher return 1; 1708adfc5217SJeff Kirsher } 1709adfc5217SJeff Kirsher 1710adfc5217SJeff Kirsher return 0; 1711adfc5217SJeff Kirsher } 1712adfc5217SJeff Kirsher 1713adfc5217SJeff Kirsher /** 1714adfc5217SJeff Kirsher * bnx2x_vlan_mac_get_registry_elem - prepare a registry element 1715adfc5217SJeff Kirsher * 1716adfc5217SJeff Kirsher * @bp: device handle 1717adfc5217SJeff Kirsher * @o: 1718adfc5217SJeff Kirsher * @elem: 1719adfc5217SJeff Kirsher * @restore: 1720adfc5217SJeff Kirsher * @re: 1721adfc5217SJeff Kirsher * 1722adfc5217SJeff Kirsher * prepare a registry element according to the current command request. 1723adfc5217SJeff Kirsher */ 1724adfc5217SJeff Kirsher static inline int bnx2x_vlan_mac_get_registry_elem( 1725adfc5217SJeff Kirsher struct bnx2x *bp, 1726adfc5217SJeff Kirsher struct bnx2x_vlan_mac_obj *o, 1727adfc5217SJeff Kirsher struct bnx2x_exeq_elem *elem, 1728adfc5217SJeff Kirsher bool restore, 1729adfc5217SJeff Kirsher struct bnx2x_vlan_mac_registry_elem **re) 1730adfc5217SJeff Kirsher { 173186564c3fSYuval Mintz enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd; 1732adfc5217SJeff Kirsher struct bnx2x_vlan_mac_registry_elem *reg_elem; 1733adfc5217SJeff Kirsher 1734adfc5217SJeff Kirsher /* Allocate a new registry element if needed. */ 1735adfc5217SJeff Kirsher if (!restore && 1736adfc5217SJeff Kirsher ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) { 1737adfc5217SJeff Kirsher reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC); 1738adfc5217SJeff Kirsher if (!reg_elem) 1739adfc5217SJeff Kirsher return -ENOMEM; 1740adfc5217SJeff Kirsher 1741adfc5217SJeff Kirsher /* Get a new CAM offset */ 1742adfc5217SJeff Kirsher if (!o->get_cam_offset(o, ®_elem->cam_offset)) { 174316a5fd92SYuval Mintz /* This shall never happen, because we have checked the 174416a5fd92SYuval Mintz * CAM availability in the 'validate'. 1745adfc5217SJeff Kirsher */ 1746adfc5217SJeff Kirsher WARN_ON(1); 1747adfc5217SJeff Kirsher kfree(reg_elem); 1748adfc5217SJeff Kirsher return -EINVAL; 1749adfc5217SJeff Kirsher } 1750adfc5217SJeff Kirsher 1751adfc5217SJeff Kirsher DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset); 1752adfc5217SJeff Kirsher 1753adfc5217SJeff Kirsher /* Set a VLAN-MAC data */ 1754adfc5217SJeff Kirsher memcpy(®_elem->u, &elem->cmd_data.vlan_mac.u, 1755adfc5217SJeff Kirsher sizeof(reg_elem->u)); 1756adfc5217SJeff Kirsher 1757adfc5217SJeff Kirsher /* Copy the flags (needed for DEL and RESTORE flows) */ 1758adfc5217SJeff Kirsher reg_elem->vlan_mac_flags = 1759adfc5217SJeff Kirsher elem->cmd_data.vlan_mac.vlan_mac_flags; 1760adfc5217SJeff Kirsher } else /* DEL, RESTORE */ 176151c1a580SMerav Sicron reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u); 1762adfc5217SJeff Kirsher 1763adfc5217SJeff Kirsher *re = reg_elem; 1764adfc5217SJeff Kirsher return 0; 1765adfc5217SJeff Kirsher } 1766adfc5217SJeff Kirsher 1767adfc5217SJeff Kirsher /** 1768adfc5217SJeff Kirsher * bnx2x_execute_vlan_mac - execute vlan mac command 1769adfc5217SJeff Kirsher * 1770adfc5217SJeff Kirsher * @bp: device handle 1771adfc5217SJeff Kirsher * @qo: 1772adfc5217SJeff Kirsher * @exe_chunk: 1773adfc5217SJeff Kirsher * @ramrod_flags: 1774adfc5217SJeff Kirsher * 1775adfc5217SJeff Kirsher * go and send a ramrod! 1776adfc5217SJeff Kirsher */ 1777adfc5217SJeff Kirsher static int bnx2x_execute_vlan_mac(struct bnx2x *bp, 1778adfc5217SJeff Kirsher union bnx2x_qable_obj *qo, 1779adfc5217SJeff Kirsher struct list_head *exe_chunk, 1780adfc5217SJeff Kirsher unsigned long *ramrod_flags) 1781adfc5217SJeff Kirsher { 1782adfc5217SJeff Kirsher struct bnx2x_exeq_elem *elem; 1783adfc5217SJeff Kirsher struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj; 1784adfc5217SJeff Kirsher struct bnx2x_raw_obj *r = &o->raw; 1785adfc5217SJeff Kirsher int rc, idx = 0; 1786adfc5217SJeff Kirsher bool restore = test_bit(RAMROD_RESTORE, ramrod_flags); 1787adfc5217SJeff Kirsher bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags); 1788adfc5217SJeff Kirsher struct bnx2x_vlan_mac_registry_elem *reg_elem; 178986564c3fSYuval Mintz enum bnx2x_vlan_mac_cmd cmd; 1790adfc5217SJeff Kirsher 179116a5fd92SYuval Mintz /* If DRIVER_ONLY execution is requested, cleanup a registry 1792adfc5217SJeff Kirsher * and exit. Otherwise send a ramrod to FW. 1793adfc5217SJeff Kirsher */ 1794adfc5217SJeff Kirsher if (!drv_only) { 1795adfc5217SJeff Kirsher WARN_ON(r->check_pending(r)); 1796adfc5217SJeff Kirsher 1797adfc5217SJeff Kirsher /* Set pending */ 1798adfc5217SJeff Kirsher r->set_pending(r); 1799adfc5217SJeff Kirsher 180016a5fd92SYuval Mintz /* Fill the ramrod data */ 1801adfc5217SJeff Kirsher list_for_each_entry(elem, exe_chunk, link) { 1802adfc5217SJeff Kirsher cmd = elem->cmd_data.vlan_mac.cmd; 180316a5fd92SYuval Mintz /* We will add to the target object in MOVE command, so 1804adfc5217SJeff Kirsher * change the object for a CAM search. 1805adfc5217SJeff Kirsher */ 1806adfc5217SJeff Kirsher if (cmd == BNX2X_VLAN_MAC_MOVE) 1807adfc5217SJeff Kirsher cam_obj = elem->cmd_data.vlan_mac.target_obj; 1808adfc5217SJeff Kirsher else 1809adfc5217SJeff Kirsher cam_obj = o; 1810adfc5217SJeff Kirsher 1811adfc5217SJeff Kirsher rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj, 1812adfc5217SJeff Kirsher elem, restore, 1813adfc5217SJeff Kirsher ®_elem); 1814adfc5217SJeff Kirsher if (rc) 1815adfc5217SJeff Kirsher goto error_exit; 1816adfc5217SJeff Kirsher 1817adfc5217SJeff Kirsher WARN_ON(!reg_elem); 1818adfc5217SJeff Kirsher 1819adfc5217SJeff Kirsher /* Push a new entry into the registry */ 1820adfc5217SJeff Kirsher if (!restore && 1821adfc5217SJeff Kirsher ((cmd == BNX2X_VLAN_MAC_ADD) || 1822adfc5217SJeff Kirsher (cmd == BNX2X_VLAN_MAC_MOVE))) 1823adfc5217SJeff Kirsher list_add(®_elem->link, &cam_obj->head); 1824adfc5217SJeff Kirsher 1825adfc5217SJeff Kirsher /* Configure a single command in a ramrod data buffer */ 1826adfc5217SJeff Kirsher o->set_one_rule(bp, o, elem, idx, 1827adfc5217SJeff Kirsher reg_elem->cam_offset); 1828adfc5217SJeff Kirsher 1829adfc5217SJeff Kirsher /* MOVE command consumes 2 entries in the ramrod data */ 1830adfc5217SJeff Kirsher if (cmd == BNX2X_VLAN_MAC_MOVE) 1831adfc5217SJeff Kirsher idx += 2; 1832adfc5217SJeff Kirsher else 1833adfc5217SJeff Kirsher idx++; 1834adfc5217SJeff Kirsher } 1835adfc5217SJeff Kirsher 183616a5fd92SYuval Mintz /* No need for an explicit memory barrier here as long we would 1837adfc5217SJeff Kirsher * need to ensure the ordering of writing to the SPQ element 1838adfc5217SJeff Kirsher * and updating of the SPQ producer which involves a memory 1839adfc5217SJeff Kirsher * read and we will have to put a full memory barrier there 1840adfc5217SJeff Kirsher * (inside bnx2x_sp_post()). 1841adfc5217SJeff Kirsher */ 1842adfc5217SJeff Kirsher 1843adfc5217SJeff Kirsher rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid, 1844adfc5217SJeff Kirsher U64_HI(r->rdata_mapping), 1845adfc5217SJeff Kirsher U64_LO(r->rdata_mapping), 1846adfc5217SJeff Kirsher ETH_CONNECTION_TYPE); 1847adfc5217SJeff Kirsher if (rc) 1848adfc5217SJeff Kirsher goto error_exit; 1849adfc5217SJeff Kirsher } 1850adfc5217SJeff Kirsher 1851adfc5217SJeff Kirsher /* Now, when we are done with the ramrod - clean up the registry */ 1852adfc5217SJeff Kirsher list_for_each_entry(elem, exe_chunk, link) { 1853adfc5217SJeff Kirsher cmd = elem->cmd_data.vlan_mac.cmd; 1854adfc5217SJeff Kirsher if ((cmd == BNX2X_VLAN_MAC_DEL) || 1855adfc5217SJeff Kirsher (cmd == BNX2X_VLAN_MAC_MOVE)) { 185651c1a580SMerav Sicron reg_elem = o->check_del(bp, o, 185751c1a580SMerav Sicron &elem->cmd_data.vlan_mac.u); 1858adfc5217SJeff Kirsher 1859adfc5217SJeff Kirsher WARN_ON(!reg_elem); 1860adfc5217SJeff Kirsher 1861adfc5217SJeff Kirsher o->put_cam_offset(o, reg_elem->cam_offset); 1862adfc5217SJeff Kirsher list_del(®_elem->link); 1863adfc5217SJeff Kirsher kfree(reg_elem); 1864adfc5217SJeff Kirsher } 1865adfc5217SJeff Kirsher } 1866adfc5217SJeff Kirsher 1867adfc5217SJeff Kirsher if (!drv_only) 1868adfc5217SJeff Kirsher return 1; 1869adfc5217SJeff Kirsher else 1870adfc5217SJeff Kirsher return 0; 1871adfc5217SJeff Kirsher 1872adfc5217SJeff Kirsher error_exit: 1873adfc5217SJeff Kirsher r->clear_pending(r); 1874adfc5217SJeff Kirsher 1875adfc5217SJeff Kirsher /* Cleanup a registry in case of a failure */ 1876adfc5217SJeff Kirsher list_for_each_entry(elem, exe_chunk, link) { 1877adfc5217SJeff Kirsher cmd = elem->cmd_data.vlan_mac.cmd; 1878adfc5217SJeff Kirsher 1879adfc5217SJeff Kirsher if (cmd == BNX2X_VLAN_MAC_MOVE) 1880adfc5217SJeff Kirsher cam_obj = elem->cmd_data.vlan_mac.target_obj; 1881adfc5217SJeff Kirsher else 1882adfc5217SJeff Kirsher cam_obj = o; 1883adfc5217SJeff Kirsher 1884adfc5217SJeff Kirsher /* Delete all newly added above entries */ 1885adfc5217SJeff Kirsher if (!restore && 1886adfc5217SJeff Kirsher ((cmd == BNX2X_VLAN_MAC_ADD) || 1887adfc5217SJeff Kirsher (cmd == BNX2X_VLAN_MAC_MOVE))) { 188851c1a580SMerav Sicron reg_elem = o->check_del(bp, cam_obj, 1889adfc5217SJeff Kirsher &elem->cmd_data.vlan_mac.u); 1890adfc5217SJeff Kirsher if (reg_elem) { 1891adfc5217SJeff Kirsher list_del(®_elem->link); 1892adfc5217SJeff Kirsher kfree(reg_elem); 1893adfc5217SJeff Kirsher } 1894adfc5217SJeff Kirsher } 1895adfc5217SJeff Kirsher } 1896adfc5217SJeff Kirsher 1897adfc5217SJeff Kirsher return rc; 1898adfc5217SJeff Kirsher } 1899adfc5217SJeff Kirsher 1900adfc5217SJeff Kirsher static inline int bnx2x_vlan_mac_push_new_cmd( 1901adfc5217SJeff Kirsher struct bnx2x *bp, 1902adfc5217SJeff Kirsher struct bnx2x_vlan_mac_ramrod_params *p) 1903adfc5217SJeff Kirsher { 1904adfc5217SJeff Kirsher struct bnx2x_exeq_elem *elem; 1905adfc5217SJeff Kirsher struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; 1906adfc5217SJeff Kirsher bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags); 1907adfc5217SJeff Kirsher 1908adfc5217SJeff Kirsher /* Allocate the execution queue element */ 1909adfc5217SJeff Kirsher elem = bnx2x_exe_queue_alloc_elem(bp); 1910adfc5217SJeff Kirsher if (!elem) 1911adfc5217SJeff Kirsher return -ENOMEM; 1912adfc5217SJeff Kirsher 1913adfc5217SJeff Kirsher /* Set the command 'length' */ 1914adfc5217SJeff Kirsher switch (p->user_req.cmd) { 1915adfc5217SJeff Kirsher case BNX2X_VLAN_MAC_MOVE: 1916adfc5217SJeff Kirsher elem->cmd_len = 2; 1917adfc5217SJeff Kirsher break; 1918adfc5217SJeff Kirsher default: 1919adfc5217SJeff Kirsher elem->cmd_len = 1; 1920adfc5217SJeff Kirsher } 1921adfc5217SJeff Kirsher 1922adfc5217SJeff Kirsher /* Fill the object specific info */ 1923adfc5217SJeff Kirsher memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req)); 1924adfc5217SJeff Kirsher 1925adfc5217SJeff Kirsher /* Try to add a new command to the pending list */ 1926adfc5217SJeff Kirsher return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore); 1927adfc5217SJeff Kirsher } 1928adfc5217SJeff Kirsher 1929adfc5217SJeff Kirsher /** 1930adfc5217SJeff Kirsher * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules. 1931adfc5217SJeff Kirsher * 1932adfc5217SJeff Kirsher * @bp: device handle 1933adfc5217SJeff Kirsher * @p: 1934adfc5217SJeff Kirsher * 1935adfc5217SJeff Kirsher */ 19368b09be5fSYuval Mintz int bnx2x_config_vlan_mac(struct bnx2x *bp, 1937adfc5217SJeff Kirsher struct bnx2x_vlan_mac_ramrod_params *p) 1938adfc5217SJeff Kirsher { 1939adfc5217SJeff Kirsher int rc = 0; 1940adfc5217SJeff Kirsher struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; 1941adfc5217SJeff Kirsher unsigned long *ramrod_flags = &p->ramrod_flags; 1942adfc5217SJeff Kirsher bool cont = test_bit(RAMROD_CONT, ramrod_flags); 1943adfc5217SJeff Kirsher struct bnx2x_raw_obj *raw = &o->raw; 1944adfc5217SJeff Kirsher 1945adfc5217SJeff Kirsher /* 1946adfc5217SJeff Kirsher * Add new elements to the execution list for commands that require it. 1947adfc5217SJeff Kirsher */ 1948adfc5217SJeff Kirsher if (!cont) { 1949adfc5217SJeff Kirsher rc = bnx2x_vlan_mac_push_new_cmd(bp, p); 1950adfc5217SJeff Kirsher if (rc) 1951adfc5217SJeff Kirsher return rc; 1952adfc5217SJeff Kirsher } 1953adfc5217SJeff Kirsher 195416a5fd92SYuval Mintz /* If nothing will be executed further in this iteration we want to 1955adfc5217SJeff Kirsher * return PENDING if there are pending commands 1956adfc5217SJeff Kirsher */ 1957adfc5217SJeff Kirsher if (!bnx2x_exe_queue_empty(&o->exe_queue)) 1958adfc5217SJeff Kirsher rc = 1; 1959adfc5217SJeff Kirsher 1960adfc5217SJeff Kirsher if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { 196151c1a580SMerav Sicron DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n"); 1962adfc5217SJeff Kirsher raw->clear_pending(raw); 1963adfc5217SJeff Kirsher } 1964adfc5217SJeff Kirsher 1965adfc5217SJeff Kirsher /* Execute commands if required */ 1966adfc5217SJeff Kirsher if (cont || test_bit(RAMROD_EXEC, ramrod_flags) || 1967adfc5217SJeff Kirsher test_bit(RAMROD_COMP_WAIT, ramrod_flags)) { 19688b09be5fSYuval Mintz rc = __bnx2x_vlan_mac_execute_step(bp, p->vlan_mac_obj, 19698b09be5fSYuval Mintz &p->ramrod_flags); 1970adfc5217SJeff Kirsher if (rc < 0) 1971adfc5217SJeff Kirsher return rc; 1972adfc5217SJeff Kirsher } 1973adfc5217SJeff Kirsher 197416a5fd92SYuval Mintz /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set 1975adfc5217SJeff Kirsher * then user want to wait until the last command is done. 1976adfc5217SJeff Kirsher */ 1977adfc5217SJeff Kirsher if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) { 197816a5fd92SYuval Mintz /* Wait maximum for the current exe_queue length iterations plus 1979adfc5217SJeff Kirsher * one (for the current pending command). 1980adfc5217SJeff Kirsher */ 1981adfc5217SJeff Kirsher int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1; 1982adfc5217SJeff Kirsher 1983adfc5217SJeff Kirsher while (!bnx2x_exe_queue_empty(&o->exe_queue) && 1984adfc5217SJeff Kirsher max_iterations--) { 1985adfc5217SJeff Kirsher 1986adfc5217SJeff Kirsher /* Wait for the current command to complete */ 1987adfc5217SJeff Kirsher rc = raw->wait_comp(bp, raw); 1988adfc5217SJeff Kirsher if (rc) 1989adfc5217SJeff Kirsher return rc; 1990adfc5217SJeff Kirsher 1991adfc5217SJeff Kirsher /* Make a next step */ 19928b09be5fSYuval Mintz rc = __bnx2x_vlan_mac_execute_step(bp, 19938b09be5fSYuval Mintz p->vlan_mac_obj, 19948b09be5fSYuval Mintz &p->ramrod_flags); 1995adfc5217SJeff Kirsher if (rc < 0) 1996adfc5217SJeff Kirsher return rc; 1997adfc5217SJeff Kirsher } 1998adfc5217SJeff Kirsher 1999adfc5217SJeff Kirsher return 0; 2000adfc5217SJeff Kirsher } 2001adfc5217SJeff Kirsher 2002adfc5217SJeff Kirsher return rc; 2003adfc5217SJeff Kirsher } 2004adfc5217SJeff Kirsher 2005adfc5217SJeff Kirsher /** 2006adfc5217SJeff Kirsher * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec 2007adfc5217SJeff Kirsher * 2008adfc5217SJeff Kirsher * @bp: device handle 2009adfc5217SJeff Kirsher * @o: 2010adfc5217SJeff Kirsher * @vlan_mac_flags: 2011adfc5217SJeff Kirsher * @ramrod_flags: execution flags to be used for this deletion 2012adfc5217SJeff Kirsher * 2013adfc5217SJeff Kirsher * if the last operation has completed successfully and there are no 2014adfc5217SJeff Kirsher * more elements left, positive value if the last operation has completed 2015adfc5217SJeff Kirsher * successfully and there are more previously configured elements, negative 2016adfc5217SJeff Kirsher * value is current operation has failed. 2017adfc5217SJeff Kirsher */ 2018adfc5217SJeff Kirsher static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, 2019adfc5217SJeff Kirsher struct bnx2x_vlan_mac_obj *o, 2020adfc5217SJeff Kirsher unsigned long *vlan_mac_flags, 2021adfc5217SJeff Kirsher unsigned long *ramrod_flags) 2022adfc5217SJeff Kirsher { 2023adfc5217SJeff Kirsher struct bnx2x_vlan_mac_registry_elem *pos = NULL; 2024adfc5217SJeff Kirsher struct bnx2x_vlan_mac_ramrod_params p; 2025adfc5217SJeff Kirsher struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; 2026adfc5217SJeff Kirsher struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n; 2027e8379c79SYuval Mintz unsigned long flags; 20288b09be5fSYuval Mintz int read_lock; 20298b09be5fSYuval Mintz int rc = 0; 2030adfc5217SJeff Kirsher 2031adfc5217SJeff Kirsher /* Clear pending commands first */ 2032adfc5217SJeff Kirsher 2033adfc5217SJeff Kirsher spin_lock_bh(&exeq->lock); 2034adfc5217SJeff Kirsher 2035adfc5217SJeff Kirsher list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) { 2036e8379c79SYuval Mintz flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags; 2037e8379c79SYuval Mintz if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) == 2038e8379c79SYuval Mintz BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) { 2039460a25cdSYuval Mintz rc = exeq->remove(bp, exeq->owner, exeq_pos); 2040460a25cdSYuval Mintz if (rc) { 2041460a25cdSYuval Mintz BNX2X_ERR("Failed to remove command\n"); 2042a44acd55SDan Carpenter spin_unlock_bh(&exeq->lock); 2043460a25cdSYuval Mintz return rc; 2044460a25cdSYuval Mintz } 2045adfc5217SJeff Kirsher list_del(&exeq_pos->link); 204607ef7becSYuval Mintz bnx2x_exe_queue_free_elem(bp, exeq_pos); 2047adfc5217SJeff Kirsher } 2048460a25cdSYuval Mintz } 2049adfc5217SJeff Kirsher 2050adfc5217SJeff Kirsher spin_unlock_bh(&exeq->lock); 2051adfc5217SJeff Kirsher 2052adfc5217SJeff Kirsher /* Prepare a command request */ 2053adfc5217SJeff Kirsher memset(&p, 0, sizeof(p)); 2054adfc5217SJeff Kirsher p.vlan_mac_obj = o; 2055adfc5217SJeff Kirsher p.ramrod_flags = *ramrod_flags; 2056adfc5217SJeff Kirsher p.user_req.cmd = BNX2X_VLAN_MAC_DEL; 2057adfc5217SJeff Kirsher 205816a5fd92SYuval Mintz /* Add all but the last VLAN-MAC to the execution queue without actually 2059adfc5217SJeff Kirsher * execution anything. 2060adfc5217SJeff Kirsher */ 2061adfc5217SJeff Kirsher __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags); 2062adfc5217SJeff Kirsher __clear_bit(RAMROD_EXEC, &p.ramrod_flags); 2063adfc5217SJeff Kirsher __clear_bit(RAMROD_CONT, &p.ramrod_flags); 2064adfc5217SJeff Kirsher 20658b09be5fSYuval Mintz DP(BNX2X_MSG_SP, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n"); 20668b09be5fSYuval Mintz read_lock = bnx2x_vlan_mac_h_read_lock(bp, o); 20678b09be5fSYuval Mintz if (read_lock != 0) 20688b09be5fSYuval Mintz return read_lock; 20698b09be5fSYuval Mintz 2070adfc5217SJeff Kirsher list_for_each_entry(pos, &o->head, link) { 2071e8379c79SYuval Mintz flags = pos->vlan_mac_flags; 2072e8379c79SYuval Mintz if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) == 2073e8379c79SYuval Mintz BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) { 2074adfc5217SJeff Kirsher p.user_req.vlan_mac_flags = pos->vlan_mac_flags; 2075adfc5217SJeff Kirsher memcpy(&p.user_req.u, &pos->u, sizeof(pos->u)); 2076adfc5217SJeff Kirsher rc = bnx2x_config_vlan_mac(bp, &p); 2077adfc5217SJeff Kirsher if (rc < 0) { 2078adfc5217SJeff Kirsher BNX2X_ERR("Failed to add a new DEL command\n"); 20798b09be5fSYuval Mintz bnx2x_vlan_mac_h_read_unlock(bp, o); 2080adfc5217SJeff Kirsher return rc; 2081adfc5217SJeff Kirsher } 2082adfc5217SJeff Kirsher } 2083adfc5217SJeff Kirsher } 2084adfc5217SJeff Kirsher 20858b09be5fSYuval Mintz DP(BNX2X_MSG_SP, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n"); 20868b09be5fSYuval Mintz bnx2x_vlan_mac_h_read_unlock(bp, o); 20878b09be5fSYuval Mintz 2088adfc5217SJeff Kirsher p.ramrod_flags = *ramrod_flags; 2089adfc5217SJeff Kirsher __set_bit(RAMROD_CONT, &p.ramrod_flags); 2090adfc5217SJeff Kirsher 2091adfc5217SJeff Kirsher return bnx2x_config_vlan_mac(bp, &p); 2092adfc5217SJeff Kirsher } 2093adfc5217SJeff Kirsher 2094adfc5217SJeff Kirsher static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id, 2095adfc5217SJeff Kirsher u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state, 2096adfc5217SJeff Kirsher unsigned long *pstate, bnx2x_obj_type type) 2097adfc5217SJeff Kirsher { 2098adfc5217SJeff Kirsher raw->func_id = func_id; 2099adfc5217SJeff Kirsher raw->cid = cid; 2100adfc5217SJeff Kirsher raw->cl_id = cl_id; 2101adfc5217SJeff Kirsher raw->rdata = rdata; 2102adfc5217SJeff Kirsher raw->rdata_mapping = rdata_mapping; 2103adfc5217SJeff Kirsher raw->state = state; 2104adfc5217SJeff Kirsher raw->pstate = pstate; 2105adfc5217SJeff Kirsher raw->obj_type = type; 2106adfc5217SJeff Kirsher raw->check_pending = bnx2x_raw_check_pending; 2107adfc5217SJeff Kirsher raw->clear_pending = bnx2x_raw_clear_pending; 2108adfc5217SJeff Kirsher raw->set_pending = bnx2x_raw_set_pending; 2109adfc5217SJeff Kirsher raw->wait_comp = bnx2x_raw_wait; 2110adfc5217SJeff Kirsher } 2111adfc5217SJeff Kirsher 2112adfc5217SJeff Kirsher static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o, 2113adfc5217SJeff Kirsher u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, 2114adfc5217SJeff Kirsher int state, unsigned long *pstate, bnx2x_obj_type type, 2115adfc5217SJeff Kirsher struct bnx2x_credit_pool_obj *macs_pool, 2116adfc5217SJeff Kirsher struct bnx2x_credit_pool_obj *vlans_pool) 2117adfc5217SJeff Kirsher { 2118adfc5217SJeff Kirsher INIT_LIST_HEAD(&o->head); 21198b09be5fSYuval Mintz o->head_reader = 0; 21208b09be5fSYuval Mintz o->head_exe_request = false; 21218b09be5fSYuval Mintz o->saved_ramrod_flags = 0; 2122adfc5217SJeff Kirsher 2123adfc5217SJeff Kirsher o->macs_pool = macs_pool; 2124adfc5217SJeff Kirsher o->vlans_pool = vlans_pool; 2125adfc5217SJeff Kirsher 2126adfc5217SJeff Kirsher o->delete_all = bnx2x_vlan_mac_del_all; 2127adfc5217SJeff Kirsher o->restore = bnx2x_vlan_mac_restore; 2128adfc5217SJeff Kirsher o->complete = bnx2x_complete_vlan_mac; 2129adfc5217SJeff Kirsher o->wait = bnx2x_wait_vlan_mac; 2130adfc5217SJeff Kirsher 2131adfc5217SJeff Kirsher bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping, 2132adfc5217SJeff Kirsher state, pstate, type); 2133adfc5217SJeff Kirsher } 2134adfc5217SJeff Kirsher 2135adfc5217SJeff Kirsher void bnx2x_init_mac_obj(struct bnx2x *bp, 2136adfc5217SJeff Kirsher struct bnx2x_vlan_mac_obj *mac_obj, 2137adfc5217SJeff Kirsher u8 cl_id, u32 cid, u8 func_id, void *rdata, 2138adfc5217SJeff Kirsher dma_addr_t rdata_mapping, int state, 2139adfc5217SJeff Kirsher unsigned long *pstate, bnx2x_obj_type type, 2140adfc5217SJeff Kirsher struct bnx2x_credit_pool_obj *macs_pool) 2141adfc5217SJeff Kirsher { 2142adfc5217SJeff Kirsher union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj; 2143adfc5217SJeff Kirsher 2144adfc5217SJeff Kirsher bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata, 2145adfc5217SJeff Kirsher rdata_mapping, state, pstate, type, 2146adfc5217SJeff Kirsher macs_pool, NULL); 2147adfc5217SJeff Kirsher 2148adfc5217SJeff Kirsher /* CAM credit pool handling */ 2149adfc5217SJeff Kirsher mac_obj->get_credit = bnx2x_get_credit_mac; 2150adfc5217SJeff Kirsher mac_obj->put_credit = bnx2x_put_credit_mac; 2151adfc5217SJeff Kirsher mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac; 2152adfc5217SJeff Kirsher mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac; 2153adfc5217SJeff Kirsher 2154adfc5217SJeff Kirsher if (CHIP_IS_E1x(bp)) { 2155adfc5217SJeff Kirsher mac_obj->set_one_rule = bnx2x_set_one_mac_e1x; 2156adfc5217SJeff Kirsher mac_obj->check_del = bnx2x_check_mac_del; 2157adfc5217SJeff Kirsher mac_obj->check_add = bnx2x_check_mac_add; 2158adfc5217SJeff Kirsher mac_obj->check_move = bnx2x_check_move_always_err; 2159adfc5217SJeff Kirsher mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC; 2160adfc5217SJeff Kirsher 2161adfc5217SJeff Kirsher /* Exe Queue */ 2162adfc5217SJeff Kirsher bnx2x_exe_queue_init(bp, 2163adfc5217SJeff Kirsher &mac_obj->exe_queue, 1, qable_obj, 2164adfc5217SJeff Kirsher bnx2x_validate_vlan_mac, 2165460a25cdSYuval Mintz bnx2x_remove_vlan_mac, 2166adfc5217SJeff Kirsher bnx2x_optimize_vlan_mac, 2167adfc5217SJeff Kirsher bnx2x_execute_vlan_mac, 2168adfc5217SJeff Kirsher bnx2x_exeq_get_mac); 2169adfc5217SJeff Kirsher } else { 2170adfc5217SJeff Kirsher mac_obj->set_one_rule = bnx2x_set_one_mac_e2; 2171adfc5217SJeff Kirsher mac_obj->check_del = bnx2x_check_mac_del; 2172adfc5217SJeff Kirsher mac_obj->check_add = bnx2x_check_mac_add; 2173adfc5217SJeff Kirsher mac_obj->check_move = bnx2x_check_move; 2174adfc5217SJeff Kirsher mac_obj->ramrod_cmd = 2175adfc5217SJeff Kirsher RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; 2176ed5162a0SAriel Elior mac_obj->get_n_elements = bnx2x_get_n_elements; 2177adfc5217SJeff Kirsher 2178adfc5217SJeff Kirsher /* Exe Queue */ 2179adfc5217SJeff Kirsher bnx2x_exe_queue_init(bp, 2180adfc5217SJeff Kirsher &mac_obj->exe_queue, CLASSIFY_RULES_COUNT, 2181adfc5217SJeff Kirsher qable_obj, bnx2x_validate_vlan_mac, 2182460a25cdSYuval Mintz bnx2x_remove_vlan_mac, 2183adfc5217SJeff Kirsher bnx2x_optimize_vlan_mac, 2184adfc5217SJeff Kirsher bnx2x_execute_vlan_mac, 2185adfc5217SJeff Kirsher bnx2x_exeq_get_mac); 2186adfc5217SJeff Kirsher } 2187adfc5217SJeff Kirsher } 2188adfc5217SJeff Kirsher 2189adfc5217SJeff Kirsher void bnx2x_init_vlan_obj(struct bnx2x *bp, 2190adfc5217SJeff Kirsher struct bnx2x_vlan_mac_obj *vlan_obj, 2191adfc5217SJeff Kirsher u8 cl_id, u32 cid, u8 func_id, void *rdata, 2192adfc5217SJeff Kirsher dma_addr_t rdata_mapping, int state, 2193adfc5217SJeff Kirsher unsigned long *pstate, bnx2x_obj_type type, 2194adfc5217SJeff Kirsher struct bnx2x_credit_pool_obj *vlans_pool) 2195adfc5217SJeff Kirsher { 2196adfc5217SJeff Kirsher union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj; 2197adfc5217SJeff Kirsher 2198adfc5217SJeff Kirsher bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata, 2199adfc5217SJeff Kirsher rdata_mapping, state, pstate, type, NULL, 2200adfc5217SJeff Kirsher vlans_pool); 2201adfc5217SJeff Kirsher 2202adfc5217SJeff Kirsher vlan_obj->get_credit = bnx2x_get_credit_vlan; 2203adfc5217SJeff Kirsher vlan_obj->put_credit = bnx2x_put_credit_vlan; 2204adfc5217SJeff Kirsher vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan; 2205adfc5217SJeff Kirsher vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan; 2206adfc5217SJeff Kirsher 2207adfc5217SJeff Kirsher if (CHIP_IS_E1x(bp)) { 2208adfc5217SJeff Kirsher BNX2X_ERR("Do not support chips others than E2 and newer\n"); 2209adfc5217SJeff Kirsher BUG(); 2210adfc5217SJeff Kirsher } else { 2211adfc5217SJeff Kirsher vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2; 2212adfc5217SJeff Kirsher vlan_obj->check_del = bnx2x_check_vlan_del; 2213adfc5217SJeff Kirsher vlan_obj->check_add = bnx2x_check_vlan_add; 2214adfc5217SJeff Kirsher vlan_obj->check_move = bnx2x_check_move; 2215adfc5217SJeff Kirsher vlan_obj->ramrod_cmd = 2216adfc5217SJeff Kirsher RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; 22173ec9f9caSAriel Elior vlan_obj->get_n_elements = bnx2x_get_n_elements; 2218adfc5217SJeff Kirsher 2219adfc5217SJeff Kirsher /* Exe Queue */ 2220adfc5217SJeff Kirsher bnx2x_exe_queue_init(bp, 2221adfc5217SJeff Kirsher &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT, 2222adfc5217SJeff Kirsher qable_obj, bnx2x_validate_vlan_mac, 2223460a25cdSYuval Mintz bnx2x_remove_vlan_mac, 2224adfc5217SJeff Kirsher bnx2x_optimize_vlan_mac, 2225adfc5217SJeff Kirsher bnx2x_execute_vlan_mac, 2226adfc5217SJeff Kirsher bnx2x_exeq_get_vlan); 2227adfc5217SJeff Kirsher } 2228adfc5217SJeff Kirsher } 2229adfc5217SJeff Kirsher 223005cc5a39SYuval Mintz void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, 223105cc5a39SYuval Mintz struct bnx2x_vlan_mac_obj *vlan_mac_obj, 223205cc5a39SYuval Mintz u8 cl_id, u32 cid, u8 func_id, void *rdata, 223305cc5a39SYuval Mintz dma_addr_t rdata_mapping, int state, 223405cc5a39SYuval Mintz unsigned long *pstate, bnx2x_obj_type type, 223505cc5a39SYuval Mintz struct bnx2x_credit_pool_obj *macs_pool, 223605cc5a39SYuval Mintz struct bnx2x_credit_pool_obj *vlans_pool) 223705cc5a39SYuval Mintz { 223805cc5a39SYuval Mintz union bnx2x_qable_obj *qable_obj = 223905cc5a39SYuval Mintz (union bnx2x_qable_obj *)vlan_mac_obj; 224005cc5a39SYuval Mintz 224105cc5a39SYuval Mintz bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata, 224205cc5a39SYuval Mintz rdata_mapping, state, pstate, type, 224305cc5a39SYuval Mintz macs_pool, vlans_pool); 224405cc5a39SYuval Mintz 224505cc5a39SYuval Mintz /* CAM pool handling */ 224605cc5a39SYuval Mintz vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac; 224705cc5a39SYuval Mintz vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac; 224805cc5a39SYuval Mintz /* CAM offset is relevant for 57710 and 57711 chips only which have a 224905cc5a39SYuval Mintz * single CAM for both MACs and VLAN-MAC pairs. So the offset 225005cc5a39SYuval Mintz * will be taken from MACs' pool object only. 225105cc5a39SYuval Mintz */ 225205cc5a39SYuval Mintz vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac; 225305cc5a39SYuval Mintz vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac; 225405cc5a39SYuval Mintz 225505cc5a39SYuval Mintz if (CHIP_IS_E1(bp)) { 225605cc5a39SYuval Mintz BNX2X_ERR("Do not support chips others than E2\n"); 225705cc5a39SYuval Mintz BUG(); 225805cc5a39SYuval Mintz } else if (CHIP_IS_E1H(bp)) { 225905cc5a39SYuval Mintz vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h; 226005cc5a39SYuval Mintz vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del; 226105cc5a39SYuval Mintz vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add; 226205cc5a39SYuval Mintz vlan_mac_obj->check_move = bnx2x_check_move_always_err; 226305cc5a39SYuval Mintz vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC; 226405cc5a39SYuval Mintz 226505cc5a39SYuval Mintz /* Exe Queue */ 226605cc5a39SYuval Mintz bnx2x_exe_queue_init(bp, 226705cc5a39SYuval Mintz &vlan_mac_obj->exe_queue, 1, qable_obj, 226805cc5a39SYuval Mintz bnx2x_validate_vlan_mac, 226905cc5a39SYuval Mintz bnx2x_remove_vlan_mac, 227005cc5a39SYuval Mintz bnx2x_optimize_vlan_mac, 227105cc5a39SYuval Mintz bnx2x_execute_vlan_mac, 227205cc5a39SYuval Mintz bnx2x_exeq_get_vlan_mac); 227305cc5a39SYuval Mintz } else { 227405cc5a39SYuval Mintz vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2; 227505cc5a39SYuval Mintz vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del; 227605cc5a39SYuval Mintz vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add; 227705cc5a39SYuval Mintz vlan_mac_obj->check_move = bnx2x_check_move; 227805cc5a39SYuval Mintz vlan_mac_obj->ramrod_cmd = 227905cc5a39SYuval Mintz RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; 228005cc5a39SYuval Mintz 228105cc5a39SYuval Mintz /* Exe Queue */ 228205cc5a39SYuval Mintz bnx2x_exe_queue_init(bp, 228305cc5a39SYuval Mintz &vlan_mac_obj->exe_queue, 228405cc5a39SYuval Mintz CLASSIFY_RULES_COUNT, 228505cc5a39SYuval Mintz qable_obj, bnx2x_validate_vlan_mac, 228605cc5a39SYuval Mintz bnx2x_remove_vlan_mac, 228705cc5a39SYuval Mintz bnx2x_optimize_vlan_mac, 228805cc5a39SYuval Mintz bnx2x_execute_vlan_mac, 228905cc5a39SYuval Mintz bnx2x_exeq_get_vlan_mac); 229005cc5a39SYuval Mintz } 229105cc5a39SYuval Mintz } 2292adfc5217SJeff Kirsher /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ 2293adfc5217SJeff Kirsher static inline void __storm_memset_mac_filters(struct bnx2x *bp, 2294adfc5217SJeff Kirsher struct tstorm_eth_mac_filter_config *mac_filters, 2295adfc5217SJeff Kirsher u16 pf_id) 2296adfc5217SJeff Kirsher { 2297adfc5217SJeff Kirsher size_t size = sizeof(struct tstorm_eth_mac_filter_config); 2298adfc5217SJeff Kirsher 2299adfc5217SJeff Kirsher u32 addr = BAR_TSTRORM_INTMEM + 2300adfc5217SJeff Kirsher TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id); 2301adfc5217SJeff Kirsher 2302adfc5217SJeff Kirsher __storm_memset_struct(bp, addr, size, (u32 *)mac_filters); 2303adfc5217SJeff Kirsher } 2304adfc5217SJeff Kirsher 2305adfc5217SJeff Kirsher static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp, 2306adfc5217SJeff Kirsher struct bnx2x_rx_mode_ramrod_params *p) 2307adfc5217SJeff Kirsher { 2308adfc5217SJeff Kirsher /* update the bp MAC filter structure */ 2309adfc5217SJeff Kirsher u32 mask = (1 << p->cl_id); 2310adfc5217SJeff Kirsher 2311adfc5217SJeff Kirsher struct tstorm_eth_mac_filter_config *mac_filters = 2312adfc5217SJeff Kirsher (struct tstorm_eth_mac_filter_config *)p->rdata; 2313adfc5217SJeff Kirsher 231416a5fd92SYuval Mintz /* initial setting is drop-all */ 2315adfc5217SJeff Kirsher u8 drop_all_ucast = 1, drop_all_mcast = 1; 2316adfc5217SJeff Kirsher u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0; 2317adfc5217SJeff Kirsher u8 unmatched_unicast = 0; 2318adfc5217SJeff Kirsher 231916a5fd92SYuval Mintz /* In e1x there we only take into account rx accept flag since tx switching 2320adfc5217SJeff Kirsher * isn't enabled. */ 2321adfc5217SJeff Kirsher if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags)) 2322adfc5217SJeff Kirsher /* accept matched ucast */ 2323adfc5217SJeff Kirsher drop_all_ucast = 0; 2324adfc5217SJeff Kirsher 2325adfc5217SJeff Kirsher if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags)) 2326adfc5217SJeff Kirsher /* accept matched mcast */ 2327adfc5217SJeff Kirsher drop_all_mcast = 0; 2328adfc5217SJeff Kirsher 2329adfc5217SJeff Kirsher if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) { 2330adfc5217SJeff Kirsher /* accept all mcast */ 2331adfc5217SJeff Kirsher drop_all_ucast = 0; 2332adfc5217SJeff Kirsher accp_all_ucast = 1; 2333adfc5217SJeff Kirsher } 2334adfc5217SJeff Kirsher if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) { 2335adfc5217SJeff Kirsher /* accept all mcast */ 2336adfc5217SJeff Kirsher drop_all_mcast = 0; 2337adfc5217SJeff Kirsher accp_all_mcast = 1; 2338adfc5217SJeff Kirsher } 2339adfc5217SJeff Kirsher if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags)) 2340adfc5217SJeff Kirsher /* accept (all) bcast */ 2341adfc5217SJeff Kirsher accp_all_bcast = 1; 2342adfc5217SJeff Kirsher if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags)) 2343adfc5217SJeff Kirsher /* accept unmatched unicasts */ 2344adfc5217SJeff Kirsher unmatched_unicast = 1; 2345adfc5217SJeff Kirsher 2346adfc5217SJeff Kirsher mac_filters->ucast_drop_all = drop_all_ucast ? 2347adfc5217SJeff Kirsher mac_filters->ucast_drop_all | mask : 2348adfc5217SJeff Kirsher mac_filters->ucast_drop_all & ~mask; 2349adfc5217SJeff Kirsher 2350adfc5217SJeff Kirsher mac_filters->mcast_drop_all = drop_all_mcast ? 2351adfc5217SJeff Kirsher mac_filters->mcast_drop_all | mask : 2352adfc5217SJeff Kirsher mac_filters->mcast_drop_all & ~mask; 2353adfc5217SJeff Kirsher 2354adfc5217SJeff Kirsher mac_filters->ucast_accept_all = accp_all_ucast ? 2355adfc5217SJeff Kirsher mac_filters->ucast_accept_all | mask : 2356adfc5217SJeff Kirsher mac_filters->ucast_accept_all & ~mask; 2357adfc5217SJeff Kirsher 2358adfc5217SJeff Kirsher mac_filters->mcast_accept_all = accp_all_mcast ? 2359adfc5217SJeff Kirsher mac_filters->mcast_accept_all | mask : 2360adfc5217SJeff Kirsher mac_filters->mcast_accept_all & ~mask; 2361adfc5217SJeff Kirsher 2362adfc5217SJeff Kirsher mac_filters->bcast_accept_all = accp_all_bcast ? 2363adfc5217SJeff Kirsher mac_filters->bcast_accept_all | mask : 2364adfc5217SJeff Kirsher mac_filters->bcast_accept_all & ~mask; 2365adfc5217SJeff Kirsher 2366adfc5217SJeff Kirsher mac_filters->unmatched_unicast = unmatched_unicast ? 2367adfc5217SJeff Kirsher mac_filters->unmatched_unicast | mask : 2368adfc5217SJeff Kirsher mac_filters->unmatched_unicast & ~mask; 2369adfc5217SJeff Kirsher 2370adfc5217SJeff Kirsher DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n" 2371adfc5217SJeff Kirsher "accp_mcast 0x%x\naccp_bcast 0x%x\n", 237251c1a580SMerav Sicron mac_filters->ucast_drop_all, mac_filters->mcast_drop_all, 237351c1a580SMerav Sicron mac_filters->ucast_accept_all, mac_filters->mcast_accept_all, 2374adfc5217SJeff Kirsher mac_filters->bcast_accept_all); 2375adfc5217SJeff Kirsher 2376adfc5217SJeff Kirsher /* write the MAC filter structure*/ 2377adfc5217SJeff Kirsher __storm_memset_mac_filters(bp, mac_filters, p->func_id); 2378adfc5217SJeff Kirsher 2379adfc5217SJeff Kirsher /* The operation is completed */ 2380adfc5217SJeff Kirsher clear_bit(p->state, p->pstate); 23814e857c58SPeter Zijlstra smp_mb__after_atomic(); 2382adfc5217SJeff Kirsher 2383adfc5217SJeff Kirsher return 0; 2384adfc5217SJeff Kirsher } 2385adfc5217SJeff Kirsher 2386adfc5217SJeff Kirsher /* Setup ramrod data */ 2387adfc5217SJeff Kirsher static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid, 2388adfc5217SJeff Kirsher struct eth_classify_header *hdr, 2389adfc5217SJeff Kirsher u8 rule_cnt) 2390adfc5217SJeff Kirsher { 239186564c3fSYuval Mintz hdr->echo = cpu_to_le32(cid); 2392adfc5217SJeff Kirsher hdr->rule_cnt = rule_cnt; 2393adfc5217SJeff Kirsher } 2394adfc5217SJeff Kirsher 2395adfc5217SJeff Kirsher static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp, 2396924d75abSYuval Mintz unsigned long *accept_flags, 2397adfc5217SJeff Kirsher struct eth_filter_rules_cmd *cmd, 2398adfc5217SJeff Kirsher bool clear_accept_all) 2399adfc5217SJeff Kirsher { 2400adfc5217SJeff Kirsher u16 state; 2401adfc5217SJeff Kirsher 2402adfc5217SJeff Kirsher /* start with 'drop-all' */ 2403adfc5217SJeff Kirsher state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL | 2404adfc5217SJeff Kirsher ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; 2405adfc5217SJeff Kirsher 2406924d75abSYuval Mintz if (test_bit(BNX2X_ACCEPT_UNICAST, accept_flags)) 2407adfc5217SJeff Kirsher state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; 2408adfc5217SJeff Kirsher 2409924d75abSYuval Mintz if (test_bit(BNX2X_ACCEPT_MULTICAST, accept_flags)) 2410adfc5217SJeff Kirsher state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; 2411adfc5217SJeff Kirsher 2412924d75abSYuval Mintz if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, accept_flags)) { 2413adfc5217SJeff Kirsher state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; 2414adfc5217SJeff Kirsher state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; 2415adfc5217SJeff Kirsher } 2416adfc5217SJeff Kirsher 2417924d75abSYuval Mintz if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, accept_flags)) { 2418adfc5217SJeff Kirsher state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; 2419adfc5217SJeff Kirsher state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; 2420adfc5217SJeff Kirsher } 2421924d75abSYuval Mintz 2422924d75abSYuval Mintz if (test_bit(BNX2X_ACCEPT_BROADCAST, accept_flags)) 2423adfc5217SJeff Kirsher state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; 2424adfc5217SJeff Kirsher 2425924d75abSYuval Mintz if (test_bit(BNX2X_ACCEPT_UNMATCHED, accept_flags)) { 2426adfc5217SJeff Kirsher state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; 2427adfc5217SJeff Kirsher state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; 2428adfc5217SJeff Kirsher } 2429924d75abSYuval Mintz 2430924d75abSYuval Mintz if (test_bit(BNX2X_ACCEPT_ANY_VLAN, accept_flags)) 2431adfc5217SJeff Kirsher state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN; 2432adfc5217SJeff Kirsher 2433adfc5217SJeff Kirsher /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */ 2434adfc5217SJeff Kirsher if (clear_accept_all) { 2435adfc5217SJeff Kirsher state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; 2436adfc5217SJeff Kirsher state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; 2437adfc5217SJeff Kirsher state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; 2438adfc5217SJeff Kirsher state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; 2439adfc5217SJeff Kirsher } 2440adfc5217SJeff Kirsher 2441adfc5217SJeff Kirsher cmd->state = cpu_to_le16(state); 2442adfc5217SJeff Kirsher } 2443adfc5217SJeff Kirsher 2444adfc5217SJeff Kirsher static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, 2445adfc5217SJeff Kirsher struct bnx2x_rx_mode_ramrod_params *p) 2446adfc5217SJeff Kirsher { 2447adfc5217SJeff Kirsher struct eth_filter_rules_ramrod_data *data = p->rdata; 2448adfc5217SJeff Kirsher int rc; 2449adfc5217SJeff Kirsher u8 rule_idx = 0; 2450adfc5217SJeff Kirsher 2451adfc5217SJeff Kirsher /* Reset the ramrod data buffer */ 2452adfc5217SJeff Kirsher memset(data, 0, sizeof(*data)); 2453adfc5217SJeff Kirsher 2454adfc5217SJeff Kirsher /* Setup ramrod data */ 2455adfc5217SJeff Kirsher 2456adfc5217SJeff Kirsher /* Tx (internal switching) */ 2457adfc5217SJeff Kirsher if (test_bit(RAMROD_TX, &p->ramrod_flags)) { 2458adfc5217SJeff Kirsher data->rules[rule_idx].client_id = p->cl_id; 2459adfc5217SJeff Kirsher data->rules[rule_idx].func_id = p->func_id; 2460adfc5217SJeff Kirsher 2461adfc5217SJeff Kirsher data->rules[rule_idx].cmd_general_data = 2462adfc5217SJeff Kirsher ETH_FILTER_RULES_CMD_TX_CMD; 2463adfc5217SJeff Kirsher 2464924d75abSYuval Mintz bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags, 2465924d75abSYuval Mintz &(data->rules[rule_idx++]), 2466924d75abSYuval Mintz false); 2467adfc5217SJeff Kirsher } 2468adfc5217SJeff Kirsher 2469adfc5217SJeff Kirsher /* Rx */ 2470adfc5217SJeff Kirsher if (test_bit(RAMROD_RX, &p->ramrod_flags)) { 2471adfc5217SJeff Kirsher data->rules[rule_idx].client_id = p->cl_id; 2472adfc5217SJeff Kirsher data->rules[rule_idx].func_id = p->func_id; 2473adfc5217SJeff Kirsher 2474adfc5217SJeff Kirsher data->rules[rule_idx].cmd_general_data = 2475adfc5217SJeff Kirsher ETH_FILTER_RULES_CMD_RX_CMD; 2476adfc5217SJeff Kirsher 2477924d75abSYuval Mintz bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags, 2478924d75abSYuval Mintz &(data->rules[rule_idx++]), 2479924d75abSYuval Mintz false); 2480adfc5217SJeff Kirsher } 2481adfc5217SJeff Kirsher 248216a5fd92SYuval Mintz /* If FCoE Queue configuration has been requested configure the Rx and 2483adfc5217SJeff Kirsher * internal switching modes for this queue in separate rules. 2484adfc5217SJeff Kirsher * 2485adfc5217SJeff Kirsher * FCoE queue shell never be set to ACCEPT_ALL packets of any sort: 2486adfc5217SJeff Kirsher * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED. 2487adfc5217SJeff Kirsher */ 2488adfc5217SJeff Kirsher if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) { 2489adfc5217SJeff Kirsher /* Tx (internal switching) */ 2490adfc5217SJeff Kirsher if (test_bit(RAMROD_TX, &p->ramrod_flags)) { 2491adfc5217SJeff Kirsher data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id); 2492adfc5217SJeff Kirsher data->rules[rule_idx].func_id = p->func_id; 2493adfc5217SJeff Kirsher 2494adfc5217SJeff Kirsher data->rules[rule_idx].cmd_general_data = 2495adfc5217SJeff Kirsher ETH_FILTER_RULES_CMD_TX_CMD; 2496adfc5217SJeff Kirsher 2497924d75abSYuval Mintz bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags, 2498924d75abSYuval Mintz &(data->rules[rule_idx]), 2499adfc5217SJeff Kirsher true); 2500924d75abSYuval Mintz rule_idx++; 2501adfc5217SJeff Kirsher } 2502adfc5217SJeff Kirsher 2503adfc5217SJeff Kirsher /* Rx */ 2504adfc5217SJeff Kirsher if (test_bit(RAMROD_RX, &p->ramrod_flags)) { 2505adfc5217SJeff Kirsher data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id); 2506adfc5217SJeff Kirsher data->rules[rule_idx].func_id = p->func_id; 2507adfc5217SJeff Kirsher 2508adfc5217SJeff Kirsher data->rules[rule_idx].cmd_general_data = 2509adfc5217SJeff Kirsher ETH_FILTER_RULES_CMD_RX_CMD; 2510adfc5217SJeff Kirsher 2511924d75abSYuval Mintz bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags, 2512924d75abSYuval Mintz &(data->rules[rule_idx]), 2513adfc5217SJeff Kirsher true); 2514924d75abSYuval Mintz rule_idx++; 2515adfc5217SJeff Kirsher } 2516adfc5217SJeff Kirsher } 2517adfc5217SJeff Kirsher 251816a5fd92SYuval Mintz /* Set the ramrod header (most importantly - number of rules to 2519adfc5217SJeff Kirsher * configure). 2520adfc5217SJeff Kirsher */ 2521adfc5217SJeff Kirsher bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx); 2522adfc5217SJeff Kirsher 252351c1a580SMerav Sicron DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n", 2524adfc5217SJeff Kirsher data->header.rule_cnt, p->rx_accept_flags, 2525adfc5217SJeff Kirsher p->tx_accept_flags); 2526adfc5217SJeff Kirsher 252714a94ebdSMichal Kalderon /* No need for an explicit memory barrier here as long as we 252814a94ebdSMichal Kalderon * ensure the ordering of writing to the SPQ element 2529adfc5217SJeff Kirsher * and updating of the SPQ producer which involves a memory 253014a94ebdSMichal Kalderon * read. If the memory read is removed we will have to put a 253114a94ebdSMichal Kalderon * full memory barrier there (inside bnx2x_sp_post()). 2532adfc5217SJeff Kirsher */ 2533adfc5217SJeff Kirsher 2534adfc5217SJeff Kirsher /* Send a ramrod */ 2535adfc5217SJeff Kirsher rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid, 2536adfc5217SJeff Kirsher U64_HI(p->rdata_mapping), 2537adfc5217SJeff Kirsher U64_LO(p->rdata_mapping), 2538adfc5217SJeff Kirsher ETH_CONNECTION_TYPE); 2539adfc5217SJeff Kirsher if (rc) 2540adfc5217SJeff Kirsher return rc; 2541adfc5217SJeff Kirsher 2542adfc5217SJeff Kirsher /* Ramrod completion is pending */ 2543adfc5217SJeff Kirsher return 1; 2544adfc5217SJeff Kirsher } 2545adfc5217SJeff Kirsher 2546adfc5217SJeff Kirsher static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp, 2547adfc5217SJeff Kirsher struct bnx2x_rx_mode_ramrod_params *p) 2548adfc5217SJeff Kirsher { 2549adfc5217SJeff Kirsher return bnx2x_state_wait(bp, p->state, p->pstate); 2550adfc5217SJeff Kirsher } 2551adfc5217SJeff Kirsher 2552adfc5217SJeff Kirsher static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp, 2553adfc5217SJeff Kirsher struct bnx2x_rx_mode_ramrod_params *p) 2554adfc5217SJeff Kirsher { 2555adfc5217SJeff Kirsher /* Do nothing */ 2556adfc5217SJeff Kirsher return 0; 2557adfc5217SJeff Kirsher } 2558adfc5217SJeff Kirsher 2559adfc5217SJeff Kirsher int bnx2x_config_rx_mode(struct bnx2x *bp, 2560adfc5217SJeff Kirsher struct bnx2x_rx_mode_ramrod_params *p) 2561adfc5217SJeff Kirsher { 2562adfc5217SJeff Kirsher int rc; 2563adfc5217SJeff Kirsher 2564adfc5217SJeff Kirsher /* Configure the new classification in the chip */ 2565adfc5217SJeff Kirsher rc = p->rx_mode_obj->config_rx_mode(bp, p); 2566adfc5217SJeff Kirsher if (rc < 0) 2567adfc5217SJeff Kirsher return rc; 2568adfc5217SJeff Kirsher 2569adfc5217SJeff Kirsher /* Wait for a ramrod completion if was requested */ 2570adfc5217SJeff Kirsher if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) { 2571adfc5217SJeff Kirsher rc = p->rx_mode_obj->wait_comp(bp, p); 2572adfc5217SJeff Kirsher if (rc) 2573adfc5217SJeff Kirsher return rc; 2574adfc5217SJeff Kirsher } 2575adfc5217SJeff Kirsher 2576adfc5217SJeff Kirsher return rc; 2577adfc5217SJeff Kirsher } 2578adfc5217SJeff Kirsher 2579adfc5217SJeff Kirsher void bnx2x_init_rx_mode_obj(struct bnx2x *bp, 2580adfc5217SJeff Kirsher struct bnx2x_rx_mode_obj *o) 2581adfc5217SJeff Kirsher { 2582adfc5217SJeff Kirsher if (CHIP_IS_E1x(bp)) { 2583adfc5217SJeff Kirsher o->wait_comp = bnx2x_empty_rx_mode_wait; 2584adfc5217SJeff Kirsher o->config_rx_mode = bnx2x_set_rx_mode_e1x; 2585adfc5217SJeff Kirsher } else { 2586adfc5217SJeff Kirsher o->wait_comp = bnx2x_wait_rx_mode_comp_e2; 2587adfc5217SJeff Kirsher o->config_rx_mode = bnx2x_set_rx_mode_e2; 2588adfc5217SJeff Kirsher } 2589adfc5217SJeff Kirsher } 2590adfc5217SJeff Kirsher 2591adfc5217SJeff Kirsher /********************* Multicast verbs: SET, CLEAR ****************************/ 2592adfc5217SJeff Kirsher static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac) 2593adfc5217SJeff Kirsher { 2594adfc5217SJeff Kirsher return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff; 2595adfc5217SJeff Kirsher } 2596adfc5217SJeff Kirsher 2597adfc5217SJeff Kirsher struct bnx2x_mcast_mac_elem { 2598adfc5217SJeff Kirsher struct list_head link; 2599adfc5217SJeff Kirsher u8 mac[ETH_ALEN]; 2600adfc5217SJeff Kirsher u8 pad[2]; /* For a natural alignment of the following buffer */ 2601adfc5217SJeff Kirsher }; 2602adfc5217SJeff Kirsher 2603c7b7b483SYuval Mintz struct bnx2x_mcast_bin_elem { 2604c7b7b483SYuval Mintz struct list_head link; 2605c7b7b483SYuval Mintz int bin; 2606c7b7b483SYuval Mintz int type; /* BNX2X_MCAST_CMD_SET_{ADD, DEL} */ 2607c7b7b483SYuval Mintz }; 2608c7b7b483SYuval Mintz 26093129e159SJason Baron union bnx2x_mcast_elem { 26103129e159SJason Baron struct bnx2x_mcast_bin_elem bin_elem; 26113129e159SJason Baron struct bnx2x_mcast_mac_elem mac_elem; 26123129e159SJason Baron }; 26133129e159SJason Baron 26143129e159SJason Baron struct bnx2x_mcast_elem_group { 26153129e159SJason Baron struct list_head mcast_group_link; 26163129e159SJason Baron union bnx2x_mcast_elem mcast_elems[]; 26173129e159SJason Baron }; 26183129e159SJason Baron 26193129e159SJason Baron #define MCAST_MAC_ELEMS_PER_PG \ 26203129e159SJason Baron ((PAGE_SIZE - sizeof(struct bnx2x_mcast_elem_group)) / \ 26213129e159SJason Baron sizeof(union bnx2x_mcast_elem)) 26223129e159SJason Baron 2623adfc5217SJeff Kirsher struct bnx2x_pending_mcast_cmd { 2624adfc5217SJeff Kirsher struct list_head link; 26253129e159SJason Baron struct list_head group_head; 2626adfc5217SJeff Kirsher int type; /* BNX2X_MCAST_CMD_X */ 2627adfc5217SJeff Kirsher union { 2628adfc5217SJeff Kirsher struct list_head macs_head; 2629adfc5217SJeff Kirsher u32 macs_num; /* Needed for DEL command */ 2630adfc5217SJeff Kirsher int next_bin; /* Needed for RESTORE flow with aprox match */ 2631adfc5217SJeff Kirsher } data; 2632adfc5217SJeff Kirsher 2633c7b7b483SYuval Mintz bool set_convert; /* in case type == BNX2X_MCAST_CMD_SET, this is set 2634c7b7b483SYuval Mintz * when macs_head had been converted to a list of 2635c7b7b483SYuval Mintz * bnx2x_mcast_bin_elem. 2636c7b7b483SYuval Mintz */ 2637c7b7b483SYuval Mintz 2638adfc5217SJeff Kirsher bool done; /* set to true, when the command has been handled, 2639adfc5217SJeff Kirsher * practically used in 57712 handling only, where one pending 2640adfc5217SJeff Kirsher * command may be handled in a few operations. As long as for 2641adfc5217SJeff Kirsher * other chips every operation handling is completed in a 2642adfc5217SJeff Kirsher * single ramrod, there is no need to utilize this field. 2643adfc5217SJeff Kirsher */ 2644adfc5217SJeff Kirsher }; 2645adfc5217SJeff Kirsher 2646adfc5217SJeff Kirsher static int bnx2x_mcast_wait(struct bnx2x *bp, 2647adfc5217SJeff Kirsher struct bnx2x_mcast_obj *o) 2648adfc5217SJeff Kirsher { 2649adfc5217SJeff Kirsher if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) || 2650adfc5217SJeff Kirsher o->raw.wait_comp(bp, &o->raw)) 2651adfc5217SJeff Kirsher return -EBUSY; 2652adfc5217SJeff Kirsher 2653adfc5217SJeff Kirsher return 0; 2654adfc5217SJeff Kirsher } 2655adfc5217SJeff Kirsher 26563129e159SJason Baron static void bnx2x_free_groups(struct list_head *mcast_group_list) 26573129e159SJason Baron { 26583129e159SJason Baron struct bnx2x_mcast_elem_group *current_mcast_group; 26593129e159SJason Baron 26603129e159SJason Baron while (!list_empty(mcast_group_list)) { 26613129e159SJason Baron current_mcast_group = list_first_entry(mcast_group_list, 26623129e159SJason Baron struct bnx2x_mcast_elem_group, 26633129e159SJason Baron mcast_group_link); 26643129e159SJason Baron list_del(¤t_mcast_group->mcast_group_link); 26653129e159SJason Baron free_page((unsigned long)current_mcast_group); 26663129e159SJason Baron } 26673129e159SJason Baron } 26683129e159SJason Baron 2669adfc5217SJeff Kirsher static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp, 2670adfc5217SJeff Kirsher struct bnx2x_mcast_obj *o, 2671adfc5217SJeff Kirsher struct bnx2x_mcast_ramrod_params *p, 267286564c3fSYuval Mintz enum bnx2x_mcast_cmd cmd) 2673adfc5217SJeff Kirsher { 2674adfc5217SJeff Kirsher struct bnx2x_pending_mcast_cmd *new_cmd; 2675adfc5217SJeff Kirsher struct bnx2x_mcast_list_elem *pos; 26763129e159SJason Baron struct bnx2x_mcast_elem_group *elem_group; 26773129e159SJason Baron struct bnx2x_mcast_mac_elem *mac_elem; 26783129e159SJason Baron int total_elems = 0, macs_list_len = 0, offset = 0; 2679c7b7b483SYuval Mintz 2680c7b7b483SYuval Mintz /* When adding MACs we'll need to store their values */ 2681c7b7b483SYuval Mintz if (cmd == BNX2X_MCAST_CMD_ADD || cmd == BNX2X_MCAST_CMD_SET) 2682c7b7b483SYuval Mintz macs_list_len = p->mcast_list_len; 2683adfc5217SJeff Kirsher 2684adfc5217SJeff Kirsher /* If the command is empty ("handle pending commands only"), break */ 2685adfc5217SJeff Kirsher if (!p->mcast_list_len) 2686adfc5217SJeff Kirsher return 0; 2687adfc5217SJeff Kirsher 2688adfc5217SJeff Kirsher /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */ 26893129e159SJason Baron new_cmd = kzalloc(sizeof(*new_cmd), GFP_ATOMIC); 2690adfc5217SJeff Kirsher if (!new_cmd) 2691adfc5217SJeff Kirsher return -ENOMEM; 2692adfc5217SJeff Kirsher 26933129e159SJason Baron INIT_LIST_HEAD(&new_cmd->data.macs_head); 26943129e159SJason Baron INIT_LIST_HEAD(&new_cmd->group_head); 26953129e159SJason Baron new_cmd->type = cmd; 26963129e159SJason Baron new_cmd->done = false; 26973129e159SJason Baron 269851c1a580SMerav Sicron DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n", 269951c1a580SMerav Sicron cmd, macs_list_len); 2700adfc5217SJeff Kirsher 2701adfc5217SJeff Kirsher switch (cmd) { 2702adfc5217SJeff Kirsher case BNX2X_MCAST_CMD_ADD: 2703c7b7b483SYuval Mintz case BNX2X_MCAST_CMD_SET: 27043129e159SJason Baron /* For a set command, we need to allocate sufficient memory for 27053129e159SJason Baron * all the bins, since we can't analyze at this point how much 27063129e159SJason Baron * memory would be required. 2707adfc5217SJeff Kirsher */ 27083129e159SJason Baron total_elems = macs_list_len; 27093129e159SJason Baron if (cmd == BNX2X_MCAST_CMD_SET) { 27103129e159SJason Baron if (total_elems < BNX2X_MCAST_BINS_NUM) 27113129e159SJason Baron total_elems = BNX2X_MCAST_BINS_NUM; 2712adfc5217SJeff Kirsher } 27133129e159SJason Baron while (total_elems > 0) { 27143129e159SJason Baron elem_group = (struct bnx2x_mcast_elem_group *) 27153129e159SJason Baron __get_free_page(GFP_ATOMIC | __GFP_ZERO); 27163129e159SJason Baron if (!elem_group) { 27173129e159SJason Baron bnx2x_free_groups(&new_cmd->group_head); 2718e96e0edeSjbaron@akamai.com kfree(new_cmd); 27193129e159SJason Baron return -ENOMEM; 27203129e159SJason Baron } 27213129e159SJason Baron total_elems -= MCAST_MAC_ELEMS_PER_PG; 27223129e159SJason Baron list_add_tail(&elem_group->mcast_group_link, 27233129e159SJason Baron &new_cmd->group_head); 27243129e159SJason Baron } 27253129e159SJason Baron elem_group = list_first_entry(&new_cmd->group_head, 27263129e159SJason Baron struct bnx2x_mcast_elem_group, 27273129e159SJason Baron mcast_group_link); 27283129e159SJason Baron list_for_each_entry(pos, &p->mcast_list, link) { 27293129e159SJason Baron mac_elem = &elem_group->mcast_elems[offset].mac_elem; 27303129e159SJason Baron memcpy(mac_elem->mac, pos->mac, ETH_ALEN); 27313129e159SJason Baron /* Push the MACs of the current command into the pending 27323129e159SJason Baron * command MACs list: FIFO 27333129e159SJason Baron */ 27343129e159SJason Baron list_add_tail(&mac_elem->link, 27353129e159SJason Baron &new_cmd->data.macs_head); 27363129e159SJason Baron offset++; 27373129e159SJason Baron if (offset == MCAST_MAC_ELEMS_PER_PG) { 27383129e159SJason Baron offset = 0; 27393129e159SJason Baron elem_group = list_next_entry(elem_group, 27403129e159SJason Baron mcast_group_link); 27413129e159SJason Baron } 27423129e159SJason Baron } 2743adfc5217SJeff Kirsher break; 2744adfc5217SJeff Kirsher 2745adfc5217SJeff Kirsher case BNX2X_MCAST_CMD_DEL: 2746adfc5217SJeff Kirsher new_cmd->data.macs_num = p->mcast_list_len; 2747adfc5217SJeff Kirsher break; 2748adfc5217SJeff Kirsher 2749adfc5217SJeff Kirsher case BNX2X_MCAST_CMD_RESTORE: 2750adfc5217SJeff Kirsher new_cmd->data.next_bin = 0; 2751adfc5217SJeff Kirsher break; 2752adfc5217SJeff Kirsher 2753adfc5217SJeff Kirsher default: 27548b6d5c09SJesper Juhl kfree(new_cmd); 2755adfc5217SJeff Kirsher BNX2X_ERR("Unknown command: %d\n", cmd); 2756adfc5217SJeff Kirsher return -EINVAL; 2757adfc5217SJeff Kirsher } 2758adfc5217SJeff Kirsher 2759adfc5217SJeff Kirsher /* Push the new pending command to the tail of the pending list: FIFO */ 2760adfc5217SJeff Kirsher list_add_tail(&new_cmd->link, &o->pending_cmds_head); 2761adfc5217SJeff Kirsher 2762adfc5217SJeff Kirsher o->set_sched(o); 2763adfc5217SJeff Kirsher 2764adfc5217SJeff Kirsher return 1; 2765adfc5217SJeff Kirsher } 2766adfc5217SJeff Kirsher 2767adfc5217SJeff Kirsher /** 2768adfc5217SJeff Kirsher * bnx2x_mcast_get_next_bin - get the next set bin (index) 2769adfc5217SJeff Kirsher * 2770adfc5217SJeff Kirsher * @o: 2771adfc5217SJeff Kirsher * @last: index to start looking from (including) 2772adfc5217SJeff Kirsher * 2773adfc5217SJeff Kirsher * returns the next found (set) bin or a negative value if none is found. 2774adfc5217SJeff Kirsher */ 2775adfc5217SJeff Kirsher static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last) 2776adfc5217SJeff Kirsher { 2777adfc5217SJeff Kirsher int i, j, inner_start = last % BIT_VEC64_ELEM_SZ; 2778adfc5217SJeff Kirsher 2779adfc5217SJeff Kirsher for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) { 2780adfc5217SJeff Kirsher if (o->registry.aprox_match.vec[i]) 2781adfc5217SJeff Kirsher for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) { 2782adfc5217SJeff Kirsher int cur_bit = j + BIT_VEC64_ELEM_SZ * i; 2783adfc5217SJeff Kirsher if (BIT_VEC64_TEST_BIT(o->registry.aprox_match. 2784adfc5217SJeff Kirsher vec, cur_bit)) { 2785adfc5217SJeff Kirsher return cur_bit; 2786adfc5217SJeff Kirsher } 2787adfc5217SJeff Kirsher } 2788adfc5217SJeff Kirsher inner_start = 0; 2789adfc5217SJeff Kirsher } 2790adfc5217SJeff Kirsher 2791adfc5217SJeff Kirsher /* None found */ 2792adfc5217SJeff Kirsher return -1; 2793adfc5217SJeff Kirsher } 2794adfc5217SJeff Kirsher 2795adfc5217SJeff Kirsher /** 2796adfc5217SJeff Kirsher * bnx2x_mcast_clear_first_bin - find the first set bin and clear it 2797adfc5217SJeff Kirsher * 2798adfc5217SJeff Kirsher * @o: 2799adfc5217SJeff Kirsher * 2800adfc5217SJeff Kirsher * returns the index of the found bin or -1 if none is found 2801adfc5217SJeff Kirsher */ 2802adfc5217SJeff Kirsher static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o) 2803adfc5217SJeff Kirsher { 2804adfc5217SJeff Kirsher int cur_bit = bnx2x_mcast_get_next_bin(o, 0); 2805adfc5217SJeff Kirsher 2806adfc5217SJeff Kirsher if (cur_bit >= 0) 2807adfc5217SJeff Kirsher BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit); 2808adfc5217SJeff Kirsher 2809adfc5217SJeff Kirsher return cur_bit; 2810adfc5217SJeff Kirsher } 2811adfc5217SJeff Kirsher 2812adfc5217SJeff Kirsher static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o) 2813adfc5217SJeff Kirsher { 2814adfc5217SJeff Kirsher struct bnx2x_raw_obj *raw = &o->raw; 2815adfc5217SJeff Kirsher u8 rx_tx_flag = 0; 2816adfc5217SJeff Kirsher 2817adfc5217SJeff Kirsher if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) || 2818adfc5217SJeff Kirsher (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) 2819adfc5217SJeff Kirsher rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD; 2820adfc5217SJeff Kirsher 2821adfc5217SJeff Kirsher if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) || 2822adfc5217SJeff Kirsher (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) 2823adfc5217SJeff Kirsher rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD; 2824adfc5217SJeff Kirsher 2825adfc5217SJeff Kirsher return rx_tx_flag; 2826adfc5217SJeff Kirsher } 2827adfc5217SJeff Kirsher 2828adfc5217SJeff Kirsher static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp, 2829adfc5217SJeff Kirsher struct bnx2x_mcast_obj *o, int idx, 2830adfc5217SJeff Kirsher union bnx2x_mcast_config_data *cfg_data, 283186564c3fSYuval Mintz enum bnx2x_mcast_cmd cmd) 2832adfc5217SJeff Kirsher { 2833adfc5217SJeff Kirsher struct bnx2x_raw_obj *r = &o->raw; 2834adfc5217SJeff Kirsher struct eth_multicast_rules_ramrod_data *data = 2835adfc5217SJeff Kirsher (struct eth_multicast_rules_ramrod_data *)(r->rdata); 2836adfc5217SJeff Kirsher u8 func_id = r->func_id; 2837adfc5217SJeff Kirsher u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o); 2838adfc5217SJeff Kirsher int bin; 2839adfc5217SJeff Kirsher 2840c7b7b483SYuval Mintz if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE) || 2841c7b7b483SYuval Mintz (cmd == BNX2X_MCAST_CMD_SET_ADD)) 2842adfc5217SJeff Kirsher rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD; 2843adfc5217SJeff Kirsher 2844adfc5217SJeff Kirsher data->rules[idx].cmd_general_data |= rx_tx_add_flag; 2845adfc5217SJeff Kirsher 2846adfc5217SJeff Kirsher /* Get a bin and update a bins' vector */ 2847adfc5217SJeff Kirsher switch (cmd) { 2848adfc5217SJeff Kirsher case BNX2X_MCAST_CMD_ADD: 2849adfc5217SJeff Kirsher bin = bnx2x_mcast_bin_from_mac(cfg_data->mac); 2850adfc5217SJeff Kirsher BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin); 2851adfc5217SJeff Kirsher break; 2852adfc5217SJeff Kirsher 2853adfc5217SJeff Kirsher case BNX2X_MCAST_CMD_DEL: 2854adfc5217SJeff Kirsher /* If there were no more bins to clear 2855adfc5217SJeff Kirsher * (bnx2x_mcast_clear_first_bin() returns -1) then we would 2856adfc5217SJeff Kirsher * clear any (0xff) bin. 2857adfc5217SJeff Kirsher * See bnx2x_mcast_validate_e2() for explanation when it may 2858adfc5217SJeff Kirsher * happen. 2859adfc5217SJeff Kirsher */ 2860adfc5217SJeff Kirsher bin = bnx2x_mcast_clear_first_bin(o); 2861adfc5217SJeff Kirsher break; 2862adfc5217SJeff Kirsher 2863adfc5217SJeff Kirsher case BNX2X_MCAST_CMD_RESTORE: 2864adfc5217SJeff Kirsher bin = cfg_data->bin; 2865adfc5217SJeff Kirsher break; 2866adfc5217SJeff Kirsher 2867c7b7b483SYuval Mintz case BNX2X_MCAST_CMD_SET_ADD: 2868c7b7b483SYuval Mintz bin = cfg_data->bin; 2869c7b7b483SYuval Mintz BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin); 2870c7b7b483SYuval Mintz break; 2871c7b7b483SYuval Mintz 2872c7b7b483SYuval Mintz case BNX2X_MCAST_CMD_SET_DEL: 2873c7b7b483SYuval Mintz bin = cfg_data->bin; 2874c7b7b483SYuval Mintz BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, bin); 2875c7b7b483SYuval Mintz break; 2876c7b7b483SYuval Mintz 2877adfc5217SJeff Kirsher default: 2878adfc5217SJeff Kirsher BNX2X_ERR("Unknown command: %d\n", cmd); 2879adfc5217SJeff Kirsher return; 2880adfc5217SJeff Kirsher } 2881adfc5217SJeff Kirsher 2882adfc5217SJeff Kirsher DP(BNX2X_MSG_SP, "%s bin %d\n", 2883adfc5217SJeff Kirsher ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ? 2884adfc5217SJeff Kirsher "Setting" : "Clearing"), bin); 2885adfc5217SJeff Kirsher 2886adfc5217SJeff Kirsher data->rules[idx].bin_id = (u8)bin; 2887adfc5217SJeff Kirsher data->rules[idx].func_id = func_id; 2888adfc5217SJeff Kirsher data->rules[idx].engine_id = o->engine_id; 2889adfc5217SJeff Kirsher } 2890adfc5217SJeff Kirsher 2891adfc5217SJeff Kirsher /** 2892adfc5217SJeff Kirsher * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry 2893adfc5217SJeff Kirsher * 2894adfc5217SJeff Kirsher * @bp: device handle 2895adfc5217SJeff Kirsher * @o: 2896adfc5217SJeff Kirsher * @start_bin: index in the registry to start from (including) 2897adfc5217SJeff Kirsher * @rdata_idx: index in the ramrod data to start from 2898adfc5217SJeff Kirsher * 2899adfc5217SJeff Kirsher * returns last handled bin index or -1 if all bins have been handled 2900adfc5217SJeff Kirsher */ 2901adfc5217SJeff Kirsher static inline int bnx2x_mcast_handle_restore_cmd_e2( 2902adfc5217SJeff Kirsher struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin, 2903adfc5217SJeff Kirsher int *rdata_idx) 2904adfc5217SJeff Kirsher { 2905adfc5217SJeff Kirsher int cur_bin, cnt = *rdata_idx; 290686564c3fSYuval Mintz union bnx2x_mcast_config_data cfg_data = {NULL}; 2907adfc5217SJeff Kirsher 2908adfc5217SJeff Kirsher /* go through the registry and configure the bins from it */ 2909adfc5217SJeff Kirsher for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0; 2910adfc5217SJeff Kirsher cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) { 2911adfc5217SJeff Kirsher 2912adfc5217SJeff Kirsher cfg_data.bin = (u8)cur_bin; 2913adfc5217SJeff Kirsher o->set_one_rule(bp, o, cnt, &cfg_data, 2914adfc5217SJeff Kirsher BNX2X_MCAST_CMD_RESTORE); 2915adfc5217SJeff Kirsher 2916adfc5217SJeff Kirsher cnt++; 2917adfc5217SJeff Kirsher 2918adfc5217SJeff Kirsher DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin); 2919adfc5217SJeff Kirsher 2920adfc5217SJeff Kirsher /* Break if we reached the maximum number 2921adfc5217SJeff Kirsher * of rules. 2922adfc5217SJeff Kirsher */ 2923adfc5217SJeff Kirsher if (cnt >= o->max_cmd_len) 2924adfc5217SJeff Kirsher break; 2925adfc5217SJeff Kirsher } 2926adfc5217SJeff Kirsher 2927adfc5217SJeff Kirsher *rdata_idx = cnt; 2928adfc5217SJeff Kirsher 2929adfc5217SJeff Kirsher return cur_bin; 2930adfc5217SJeff Kirsher } 2931adfc5217SJeff Kirsher 2932adfc5217SJeff Kirsher static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp, 2933adfc5217SJeff Kirsher struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos, 2934adfc5217SJeff Kirsher int *line_idx) 2935adfc5217SJeff Kirsher { 2936adfc5217SJeff Kirsher struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n; 2937adfc5217SJeff Kirsher int cnt = *line_idx; 293886564c3fSYuval Mintz union bnx2x_mcast_config_data cfg_data = {NULL}; 2939adfc5217SJeff Kirsher 2940adfc5217SJeff Kirsher list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head, 2941adfc5217SJeff Kirsher link) { 2942adfc5217SJeff Kirsher 2943adfc5217SJeff Kirsher cfg_data.mac = &pmac_pos->mac[0]; 2944adfc5217SJeff Kirsher o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type); 2945adfc5217SJeff Kirsher 2946adfc5217SJeff Kirsher cnt++; 2947adfc5217SJeff Kirsher 29480f9dad10SJoe Perches DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n", 29490f9dad10SJoe Perches pmac_pos->mac); 2950adfc5217SJeff Kirsher 2951adfc5217SJeff Kirsher list_del(&pmac_pos->link); 2952adfc5217SJeff Kirsher 2953adfc5217SJeff Kirsher /* Break if we reached the maximum number 2954adfc5217SJeff Kirsher * of rules. 2955adfc5217SJeff Kirsher */ 2956adfc5217SJeff Kirsher if (cnt >= o->max_cmd_len) 2957adfc5217SJeff Kirsher break; 2958adfc5217SJeff Kirsher } 2959adfc5217SJeff Kirsher 2960adfc5217SJeff Kirsher *line_idx = cnt; 2961adfc5217SJeff Kirsher 2962adfc5217SJeff Kirsher /* if no more MACs to configure - we are done */ 2963adfc5217SJeff Kirsher if (list_empty(&cmd_pos->data.macs_head)) 2964adfc5217SJeff Kirsher cmd_pos->done = true; 2965adfc5217SJeff Kirsher } 2966adfc5217SJeff Kirsher 2967adfc5217SJeff Kirsher static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp, 2968adfc5217SJeff Kirsher struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos, 2969adfc5217SJeff Kirsher int *line_idx) 2970adfc5217SJeff Kirsher { 2971adfc5217SJeff Kirsher int cnt = *line_idx; 2972adfc5217SJeff Kirsher 2973adfc5217SJeff Kirsher while (cmd_pos->data.macs_num) { 2974adfc5217SJeff Kirsher o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type); 2975adfc5217SJeff Kirsher 2976adfc5217SJeff Kirsher cnt++; 2977adfc5217SJeff Kirsher 2978adfc5217SJeff Kirsher cmd_pos->data.macs_num--; 2979adfc5217SJeff Kirsher 2980adfc5217SJeff Kirsher DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n", 2981adfc5217SJeff Kirsher cmd_pos->data.macs_num, cnt); 2982adfc5217SJeff Kirsher 2983adfc5217SJeff Kirsher /* Break if we reached the maximum 2984adfc5217SJeff Kirsher * number of rules. 2985adfc5217SJeff Kirsher */ 2986adfc5217SJeff Kirsher if (cnt >= o->max_cmd_len) 2987adfc5217SJeff Kirsher break; 2988adfc5217SJeff Kirsher } 2989adfc5217SJeff Kirsher 2990adfc5217SJeff Kirsher *line_idx = cnt; 2991adfc5217SJeff Kirsher 2992adfc5217SJeff Kirsher /* If we cleared all bins - we are done */ 2993adfc5217SJeff Kirsher if (!cmd_pos->data.macs_num) 2994adfc5217SJeff Kirsher cmd_pos->done = true; 2995adfc5217SJeff Kirsher } 2996adfc5217SJeff Kirsher 2997adfc5217SJeff Kirsher static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp, 2998adfc5217SJeff Kirsher struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos, 2999adfc5217SJeff Kirsher int *line_idx) 3000adfc5217SJeff Kirsher { 3001adfc5217SJeff Kirsher cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin, 3002adfc5217SJeff Kirsher line_idx); 3003adfc5217SJeff Kirsher 3004adfc5217SJeff Kirsher if (cmd_pos->data.next_bin < 0) 3005adfc5217SJeff Kirsher /* If o->set_restore returned -1 we are done */ 3006adfc5217SJeff Kirsher cmd_pos->done = true; 3007adfc5217SJeff Kirsher else 3008adfc5217SJeff Kirsher /* Start from the next bin next time */ 3009adfc5217SJeff Kirsher cmd_pos->data.next_bin++; 3010adfc5217SJeff Kirsher } 3011adfc5217SJeff Kirsher 3012c7b7b483SYuval Mintz static void 3013c7b7b483SYuval Mintz bnx2x_mcast_hdl_pending_set_e2_convert(struct bnx2x *bp, 3014c7b7b483SYuval Mintz struct bnx2x_mcast_obj *o, 3015c7b7b483SYuval Mintz struct bnx2x_pending_mcast_cmd *cmd_pos) 3016c7b7b483SYuval Mintz { 3017c7b7b483SYuval Mintz u64 cur[BNX2X_MCAST_VEC_SZ], req[BNX2X_MCAST_VEC_SZ]; 3018c7b7b483SYuval Mintz struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n; 3019c7b7b483SYuval Mintz struct bnx2x_mcast_bin_elem *p_item; 30203129e159SJason Baron struct bnx2x_mcast_elem_group *elem_group; 30213129e159SJason Baron int cnt = 0, mac_cnt = 0, offset = 0, i; 3022c7b7b483SYuval Mintz 3023c7b7b483SYuval Mintz memset(req, 0, sizeof(u64) * BNX2X_MCAST_VEC_SZ); 3024c7b7b483SYuval Mintz memcpy(cur, o->registry.aprox_match.vec, 3025c7b7b483SYuval Mintz sizeof(u64) * BNX2X_MCAST_VEC_SZ); 3026c7b7b483SYuval Mintz 3027c7b7b483SYuval Mintz /* Fill `current' with the required set of bins to configure */ 3028c7b7b483SYuval Mintz list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head, 3029c7b7b483SYuval Mintz link) { 3030c7b7b483SYuval Mintz int bin = bnx2x_mcast_bin_from_mac(pmac_pos->mac); 3031c7b7b483SYuval Mintz 3032c7b7b483SYuval Mintz DP(BNX2X_MSG_SP, "Set contains %pM mcast MAC\n", 3033c7b7b483SYuval Mintz pmac_pos->mac); 3034c7b7b483SYuval Mintz 3035c7b7b483SYuval Mintz BIT_VEC64_SET_BIT(req, bin); 3036c7b7b483SYuval Mintz list_del(&pmac_pos->link); 3037c7b7b483SYuval Mintz mac_cnt++; 3038c7b7b483SYuval Mintz } 3039c7b7b483SYuval Mintz 3040c7b7b483SYuval Mintz /* We no longer have use for the MACs; Need to re-use memory for 3041c7b7b483SYuval Mintz * a list that will be used to configure bins. 3042c7b7b483SYuval Mintz */ 3043c7b7b483SYuval Mintz cmd_pos->set_convert = true; 3044c7b7b483SYuval Mintz INIT_LIST_HEAD(&cmd_pos->data.macs_head); 30453129e159SJason Baron elem_group = list_first_entry(&cmd_pos->group_head, 30463129e159SJason Baron struct bnx2x_mcast_elem_group, 30473129e159SJason Baron mcast_group_link); 3048c7b7b483SYuval Mintz for (i = 0; i < BNX2X_MCAST_BINS_NUM; i++) { 3049c7b7b483SYuval Mintz bool b_current = !!BIT_VEC64_TEST_BIT(cur, i); 3050c7b7b483SYuval Mintz bool b_required = !!BIT_VEC64_TEST_BIT(req, i); 3051c7b7b483SYuval Mintz 3052c7b7b483SYuval Mintz if (b_current == b_required) 3053c7b7b483SYuval Mintz continue; 3054c7b7b483SYuval Mintz 30553129e159SJason Baron p_item = &elem_group->mcast_elems[offset].bin_elem; 3056c7b7b483SYuval Mintz p_item->bin = i; 3057c7b7b483SYuval Mintz p_item->type = b_required ? BNX2X_MCAST_CMD_SET_ADD 3058c7b7b483SYuval Mintz : BNX2X_MCAST_CMD_SET_DEL; 3059c7b7b483SYuval Mintz list_add_tail(&p_item->link , &cmd_pos->data.macs_head); 3060c7b7b483SYuval Mintz cnt++; 30613129e159SJason Baron offset++; 30623129e159SJason Baron if (offset == MCAST_MAC_ELEMS_PER_PG) { 30633129e159SJason Baron offset = 0; 30643129e159SJason Baron elem_group = list_next_entry(elem_group, 30653129e159SJason Baron mcast_group_link); 30663129e159SJason Baron } 3067c7b7b483SYuval Mintz } 3068c7b7b483SYuval Mintz 3069c7b7b483SYuval Mintz /* We now definitely know how many commands are hiding here. 3070c7b7b483SYuval Mintz * Also need to correct the disruption we've added to guarantee this 3071c7b7b483SYuval Mintz * would be enqueued. 3072c7b7b483SYuval Mintz */ 3073c7b7b483SYuval Mintz o->total_pending_num -= (o->max_cmd_len + mac_cnt); 3074c7b7b483SYuval Mintz o->total_pending_num += cnt; 3075c7b7b483SYuval Mintz 3076c7b7b483SYuval Mintz DP(BNX2X_MSG_SP, "o->total_pending_num=%d\n", o->total_pending_num); 3077c7b7b483SYuval Mintz } 3078c7b7b483SYuval Mintz 3079c7b7b483SYuval Mintz static void 3080c7b7b483SYuval Mintz bnx2x_mcast_hdl_pending_set_e2(struct bnx2x *bp, 3081c7b7b483SYuval Mintz struct bnx2x_mcast_obj *o, 3082c7b7b483SYuval Mintz struct bnx2x_pending_mcast_cmd *cmd_pos, 3083c7b7b483SYuval Mintz int *cnt) 3084c7b7b483SYuval Mintz { 3085c7b7b483SYuval Mintz union bnx2x_mcast_config_data cfg_data = {NULL}; 3086c7b7b483SYuval Mintz struct bnx2x_mcast_bin_elem *p_item, *p_item_n; 3087c7b7b483SYuval Mintz 3088c7b7b483SYuval Mintz /* This is actually a 2-part scheme - it starts by converting the MACs 3089c7b7b483SYuval Mintz * into a list of bins to be added/removed, and correcting the numbers 3090c7b7b483SYuval Mintz * on the object. this is now allowed, as we're now sure that all 3091c7b7b483SYuval Mintz * previous configured requests have already applied. 3092c7b7b483SYuval Mintz * The second part is actually adding rules for the newly introduced 3093c7b7b483SYuval Mintz * entries [like all the rest of the hdl_pending functions]. 3094c7b7b483SYuval Mintz */ 3095c7b7b483SYuval Mintz if (!cmd_pos->set_convert) 3096c7b7b483SYuval Mintz bnx2x_mcast_hdl_pending_set_e2_convert(bp, o, cmd_pos); 3097c7b7b483SYuval Mintz 3098c7b7b483SYuval Mintz list_for_each_entry_safe(p_item, p_item_n, &cmd_pos->data.macs_head, 3099c7b7b483SYuval Mintz link) { 3100c7b7b483SYuval Mintz cfg_data.bin = (u8)p_item->bin; 3101c7b7b483SYuval Mintz o->set_one_rule(bp, o, *cnt, &cfg_data, p_item->type); 3102c7b7b483SYuval Mintz (*cnt)++; 3103c7b7b483SYuval Mintz 3104c7b7b483SYuval Mintz list_del(&p_item->link); 3105c7b7b483SYuval Mintz 3106c7b7b483SYuval Mintz /* Break if we reached the maximum number of rules. */ 3107c7b7b483SYuval Mintz if (*cnt >= o->max_cmd_len) 3108c7b7b483SYuval Mintz break; 3109c7b7b483SYuval Mintz } 3110c7b7b483SYuval Mintz 3111c7b7b483SYuval Mintz /* if no more MACs to configure - we are done */ 3112c7b7b483SYuval Mintz if (list_empty(&cmd_pos->data.macs_head)) 3113c7b7b483SYuval Mintz cmd_pos->done = true; 3114c7b7b483SYuval Mintz } 3115c7b7b483SYuval Mintz 3116adfc5217SJeff Kirsher static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp, 3117adfc5217SJeff Kirsher struct bnx2x_mcast_ramrod_params *p) 3118adfc5217SJeff Kirsher { 3119adfc5217SJeff Kirsher struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n; 3120adfc5217SJeff Kirsher int cnt = 0; 3121adfc5217SJeff Kirsher struct bnx2x_mcast_obj *o = p->mcast_obj; 3122adfc5217SJeff Kirsher 3123adfc5217SJeff Kirsher list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head, 3124adfc5217SJeff Kirsher link) { 3125adfc5217SJeff Kirsher switch (cmd_pos->type) { 3126adfc5217SJeff Kirsher case BNX2X_MCAST_CMD_ADD: 3127adfc5217SJeff Kirsher bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt); 3128adfc5217SJeff Kirsher break; 3129adfc5217SJeff Kirsher 3130adfc5217SJeff Kirsher case BNX2X_MCAST_CMD_DEL: 3131adfc5217SJeff Kirsher bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt); 3132adfc5217SJeff Kirsher break; 3133adfc5217SJeff Kirsher 3134adfc5217SJeff Kirsher case BNX2X_MCAST_CMD_RESTORE: 3135adfc5217SJeff Kirsher bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos, 3136adfc5217SJeff Kirsher &cnt); 3137adfc5217SJeff Kirsher break; 3138adfc5217SJeff Kirsher 3139c7b7b483SYuval Mintz case BNX2X_MCAST_CMD_SET: 3140c7b7b483SYuval Mintz bnx2x_mcast_hdl_pending_set_e2(bp, o, cmd_pos, &cnt); 3141c7b7b483SYuval Mintz break; 3142c7b7b483SYuval Mintz 3143adfc5217SJeff Kirsher default: 3144adfc5217SJeff Kirsher BNX2X_ERR("Unknown command: %d\n", cmd_pos->type); 3145adfc5217SJeff Kirsher return -EINVAL; 3146adfc5217SJeff Kirsher } 3147adfc5217SJeff Kirsher 3148adfc5217SJeff Kirsher /* If the command has been completed - remove it from the list 3149adfc5217SJeff Kirsher * and free the memory 3150adfc5217SJeff Kirsher */ 3151adfc5217SJeff Kirsher if (cmd_pos->done) { 3152adfc5217SJeff Kirsher list_del(&cmd_pos->link); 31533129e159SJason Baron bnx2x_free_groups(&cmd_pos->group_head); 3154adfc5217SJeff Kirsher kfree(cmd_pos); 3155adfc5217SJeff Kirsher } 3156adfc5217SJeff Kirsher 3157adfc5217SJeff Kirsher /* Break if we reached the maximum number of rules */ 3158adfc5217SJeff Kirsher if (cnt >= o->max_cmd_len) 3159adfc5217SJeff Kirsher break; 3160adfc5217SJeff Kirsher } 3161adfc5217SJeff Kirsher 3162adfc5217SJeff Kirsher return cnt; 3163adfc5217SJeff Kirsher } 3164adfc5217SJeff Kirsher 3165adfc5217SJeff Kirsher static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp, 3166adfc5217SJeff Kirsher struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p, 3167adfc5217SJeff Kirsher int *line_idx) 3168adfc5217SJeff Kirsher { 3169adfc5217SJeff Kirsher struct bnx2x_mcast_list_elem *mlist_pos; 317086564c3fSYuval Mintz union bnx2x_mcast_config_data cfg_data = {NULL}; 3171adfc5217SJeff Kirsher int cnt = *line_idx; 3172adfc5217SJeff Kirsher 3173adfc5217SJeff Kirsher list_for_each_entry(mlist_pos, &p->mcast_list, link) { 3174adfc5217SJeff Kirsher cfg_data.mac = mlist_pos->mac; 3175adfc5217SJeff Kirsher o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD); 3176adfc5217SJeff Kirsher 3177adfc5217SJeff Kirsher cnt++; 3178adfc5217SJeff Kirsher 31790f9dad10SJoe Perches DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n", 31800f9dad10SJoe Perches mlist_pos->mac); 3181adfc5217SJeff Kirsher } 3182adfc5217SJeff Kirsher 3183adfc5217SJeff Kirsher *line_idx = cnt; 3184adfc5217SJeff Kirsher } 3185adfc5217SJeff Kirsher 3186adfc5217SJeff Kirsher static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp, 3187adfc5217SJeff Kirsher struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p, 3188adfc5217SJeff Kirsher int *line_idx) 3189adfc5217SJeff Kirsher { 3190adfc5217SJeff Kirsher int cnt = *line_idx, i; 3191adfc5217SJeff Kirsher 3192adfc5217SJeff Kirsher for (i = 0; i < p->mcast_list_len; i++) { 3193adfc5217SJeff Kirsher o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL); 3194adfc5217SJeff Kirsher 3195adfc5217SJeff Kirsher cnt++; 3196adfc5217SJeff Kirsher 3197adfc5217SJeff Kirsher DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n", 3198adfc5217SJeff Kirsher p->mcast_list_len - i - 1); 3199adfc5217SJeff Kirsher } 3200adfc5217SJeff Kirsher 3201adfc5217SJeff Kirsher *line_idx = cnt; 3202adfc5217SJeff Kirsher } 3203adfc5217SJeff Kirsher 3204adfc5217SJeff Kirsher /** 3205adfc5217SJeff Kirsher * bnx2x_mcast_handle_current_cmd - 3206adfc5217SJeff Kirsher * 3207adfc5217SJeff Kirsher * @bp: device handle 3208adfc5217SJeff Kirsher * @p: 3209adfc5217SJeff Kirsher * @cmd: 3210adfc5217SJeff Kirsher * @start_cnt: first line in the ramrod data that may be used 3211adfc5217SJeff Kirsher * 3212adfc5217SJeff Kirsher * This function is called iff there is enough place for the current command in 3213adfc5217SJeff Kirsher * the ramrod data. 3214adfc5217SJeff Kirsher * Returns number of lines filled in the ramrod data in total. 3215adfc5217SJeff Kirsher */ 3216adfc5217SJeff Kirsher static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp, 321786564c3fSYuval Mintz struct bnx2x_mcast_ramrod_params *p, 321886564c3fSYuval Mintz enum bnx2x_mcast_cmd cmd, 3219adfc5217SJeff Kirsher int start_cnt) 3220adfc5217SJeff Kirsher { 3221adfc5217SJeff Kirsher struct bnx2x_mcast_obj *o = p->mcast_obj; 3222adfc5217SJeff Kirsher int cnt = start_cnt; 3223adfc5217SJeff Kirsher 3224adfc5217SJeff Kirsher DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len); 3225adfc5217SJeff Kirsher 3226adfc5217SJeff Kirsher switch (cmd) { 3227adfc5217SJeff Kirsher case BNX2X_MCAST_CMD_ADD: 3228adfc5217SJeff Kirsher bnx2x_mcast_hdl_add(bp, o, p, &cnt); 3229adfc5217SJeff Kirsher break; 3230adfc5217SJeff Kirsher 3231adfc5217SJeff Kirsher case BNX2X_MCAST_CMD_DEL: 3232adfc5217SJeff Kirsher bnx2x_mcast_hdl_del(bp, o, p, &cnt); 3233adfc5217SJeff Kirsher break; 3234adfc5217SJeff Kirsher 3235adfc5217SJeff Kirsher case BNX2X_MCAST_CMD_RESTORE: 3236adfc5217SJeff Kirsher o->hdl_restore(bp, o, 0, &cnt); 3237adfc5217SJeff Kirsher break; 3238adfc5217SJeff Kirsher 3239adfc5217SJeff Kirsher default: 3240adfc5217SJeff Kirsher BNX2X_ERR("Unknown command: %d\n", cmd); 3241adfc5217SJeff Kirsher return -EINVAL; 3242adfc5217SJeff Kirsher } 3243adfc5217SJeff Kirsher 3244adfc5217SJeff Kirsher /* The current command has been handled */ 3245adfc5217SJeff Kirsher p->mcast_list_len = 0; 3246adfc5217SJeff Kirsher 3247adfc5217SJeff Kirsher return cnt; 3248adfc5217SJeff Kirsher } 3249adfc5217SJeff Kirsher 3250adfc5217SJeff Kirsher static int bnx2x_mcast_validate_e2(struct bnx2x *bp, 3251adfc5217SJeff Kirsher struct bnx2x_mcast_ramrod_params *p, 325286564c3fSYuval Mintz enum bnx2x_mcast_cmd cmd) 3253adfc5217SJeff Kirsher { 3254adfc5217SJeff Kirsher struct bnx2x_mcast_obj *o = p->mcast_obj; 3255adfc5217SJeff Kirsher int reg_sz = o->get_registry_size(o); 3256adfc5217SJeff Kirsher 3257adfc5217SJeff Kirsher switch (cmd) { 3258adfc5217SJeff Kirsher /* DEL command deletes all currently configured MACs */ 3259adfc5217SJeff Kirsher case BNX2X_MCAST_CMD_DEL: 3260adfc5217SJeff Kirsher o->set_registry_size(o, 0); 3261adfc5217SJeff Kirsher /* Don't break */ 3262adfc5217SJeff Kirsher 3263adfc5217SJeff Kirsher /* RESTORE command will restore the entire multicast configuration */ 3264adfc5217SJeff Kirsher case BNX2X_MCAST_CMD_RESTORE: 3265adfc5217SJeff Kirsher /* Here we set the approximate amount of work to do, which in 3266adfc5217SJeff Kirsher * fact may be only less as some MACs in postponed ADD 3267adfc5217SJeff Kirsher * command(s) scheduled before this command may fall into 3268adfc5217SJeff Kirsher * the same bin and the actual number of bins set in the 3269adfc5217SJeff Kirsher * registry would be less than we estimated here. See 3270adfc5217SJeff Kirsher * bnx2x_mcast_set_one_rule_e2() for further details. 3271adfc5217SJeff Kirsher */ 3272adfc5217SJeff Kirsher p->mcast_list_len = reg_sz; 3273adfc5217SJeff Kirsher break; 3274adfc5217SJeff Kirsher 3275adfc5217SJeff Kirsher case BNX2X_MCAST_CMD_ADD: 3276adfc5217SJeff Kirsher case BNX2X_MCAST_CMD_CONT: 3277adfc5217SJeff Kirsher /* Here we assume that all new MACs will fall into new bins. 3278adfc5217SJeff Kirsher * However we will correct the real registry size after we 3279adfc5217SJeff Kirsher * handle all pending commands. 3280adfc5217SJeff Kirsher */ 3281adfc5217SJeff Kirsher o->set_registry_size(o, reg_sz + p->mcast_list_len); 3282adfc5217SJeff Kirsher break; 3283adfc5217SJeff Kirsher 3284c7b7b483SYuval Mintz case BNX2X_MCAST_CMD_SET: 3285c7b7b483SYuval Mintz /* We can only learn how many commands would actually be used 3286c7b7b483SYuval Mintz * when this is being configured. So for now, simply guarantee 3287c7b7b483SYuval Mintz * the command will be enqueued [to refrain from adding logic 3288c7b7b483SYuval Mintz * that handles this and THEN learns it needs several ramrods]. 3289c7b7b483SYuval Mintz * Just like for ADD/Cont, the mcast_list_len might be an over 3290c7b7b483SYuval Mintz * estimation; or even more so, since we don't take into 3291c7b7b483SYuval Mintz * account the possibility of removal of existing bins. 3292c7b7b483SYuval Mintz */ 3293c7b7b483SYuval Mintz o->set_registry_size(o, reg_sz + p->mcast_list_len); 3294c7b7b483SYuval Mintz o->total_pending_num += o->max_cmd_len; 3295c7b7b483SYuval Mintz break; 3296c7b7b483SYuval Mintz 3297adfc5217SJeff Kirsher default: 3298adfc5217SJeff Kirsher BNX2X_ERR("Unknown command: %d\n", cmd); 3299adfc5217SJeff Kirsher return -EINVAL; 3300adfc5217SJeff Kirsher } 3301adfc5217SJeff Kirsher 3302adfc5217SJeff Kirsher /* Increase the total number of MACs pending to be configured */ 3303adfc5217SJeff Kirsher o->total_pending_num += p->mcast_list_len; 3304adfc5217SJeff Kirsher 3305adfc5217SJeff Kirsher return 0; 3306adfc5217SJeff Kirsher } 3307adfc5217SJeff Kirsher 3308adfc5217SJeff Kirsher static void bnx2x_mcast_revert_e2(struct bnx2x *bp, 3309adfc5217SJeff Kirsher struct bnx2x_mcast_ramrod_params *p, 3310c7b7b483SYuval Mintz int old_num_bins, 3311c7b7b483SYuval Mintz enum bnx2x_mcast_cmd cmd) 3312adfc5217SJeff Kirsher { 3313adfc5217SJeff Kirsher struct bnx2x_mcast_obj *o = p->mcast_obj; 3314adfc5217SJeff Kirsher 3315adfc5217SJeff Kirsher o->set_registry_size(o, old_num_bins); 3316adfc5217SJeff Kirsher o->total_pending_num -= p->mcast_list_len; 3317c7b7b483SYuval Mintz 3318c7b7b483SYuval Mintz if (cmd == BNX2X_MCAST_CMD_SET) 3319c7b7b483SYuval Mintz o->total_pending_num -= o->max_cmd_len; 3320adfc5217SJeff Kirsher } 3321adfc5217SJeff Kirsher 3322adfc5217SJeff Kirsher /** 3323adfc5217SJeff Kirsher * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values 3324adfc5217SJeff Kirsher * 3325adfc5217SJeff Kirsher * @bp: device handle 3326adfc5217SJeff Kirsher * @p: 3327adfc5217SJeff Kirsher * @len: number of rules to handle 3328adfc5217SJeff Kirsher */ 3329adfc5217SJeff Kirsher static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp, 3330adfc5217SJeff Kirsher struct bnx2x_mcast_ramrod_params *p, 3331adfc5217SJeff Kirsher u8 len) 3332adfc5217SJeff Kirsher { 3333adfc5217SJeff Kirsher struct bnx2x_raw_obj *r = &p->mcast_obj->raw; 3334adfc5217SJeff Kirsher struct eth_multicast_rules_ramrod_data *data = 3335adfc5217SJeff Kirsher (struct eth_multicast_rules_ramrod_data *)(r->rdata); 3336adfc5217SJeff Kirsher 333786564c3fSYuval Mintz data->header.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) | 333886564c3fSYuval Mintz (BNX2X_FILTER_MCAST_PENDING << 333986564c3fSYuval Mintz BNX2X_SWCID_SHIFT)); 3340adfc5217SJeff Kirsher data->header.rule_cnt = len; 3341adfc5217SJeff Kirsher } 3342adfc5217SJeff Kirsher 3343adfc5217SJeff Kirsher /** 3344adfc5217SJeff Kirsher * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins 3345adfc5217SJeff Kirsher * 3346adfc5217SJeff Kirsher * @bp: device handle 3347adfc5217SJeff Kirsher * @o: 3348adfc5217SJeff Kirsher * 3349adfc5217SJeff Kirsher * Recalculate the actual number of set bins in the registry using Brian 3350adfc5217SJeff Kirsher * Kernighan's algorithm: it's execution complexity is as a number of set bins. 3351adfc5217SJeff Kirsher * 3352adfc5217SJeff Kirsher * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1(). 3353adfc5217SJeff Kirsher */ 3354adfc5217SJeff Kirsher static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp, 3355adfc5217SJeff Kirsher struct bnx2x_mcast_obj *o) 3356adfc5217SJeff Kirsher { 3357adfc5217SJeff Kirsher int i, cnt = 0; 3358adfc5217SJeff Kirsher u64 elem; 3359adfc5217SJeff Kirsher 3360adfc5217SJeff Kirsher for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) { 3361adfc5217SJeff Kirsher elem = o->registry.aprox_match.vec[i]; 3362adfc5217SJeff Kirsher for (; elem; cnt++) 3363adfc5217SJeff Kirsher elem &= elem - 1; 3364adfc5217SJeff Kirsher } 3365adfc5217SJeff Kirsher 3366adfc5217SJeff Kirsher o->set_registry_size(o, cnt); 3367adfc5217SJeff Kirsher 3368adfc5217SJeff Kirsher return 0; 3369adfc5217SJeff Kirsher } 3370adfc5217SJeff Kirsher 3371adfc5217SJeff Kirsher static int bnx2x_mcast_setup_e2(struct bnx2x *bp, 3372adfc5217SJeff Kirsher struct bnx2x_mcast_ramrod_params *p, 337386564c3fSYuval Mintz enum bnx2x_mcast_cmd cmd) 3374adfc5217SJeff Kirsher { 3375adfc5217SJeff Kirsher struct bnx2x_raw_obj *raw = &p->mcast_obj->raw; 3376adfc5217SJeff Kirsher struct bnx2x_mcast_obj *o = p->mcast_obj; 3377adfc5217SJeff Kirsher struct eth_multicast_rules_ramrod_data *data = 3378adfc5217SJeff Kirsher (struct eth_multicast_rules_ramrod_data *)(raw->rdata); 3379adfc5217SJeff Kirsher int cnt = 0, rc; 3380adfc5217SJeff Kirsher 3381adfc5217SJeff Kirsher /* Reset the ramrod data buffer */ 3382adfc5217SJeff Kirsher memset(data, 0, sizeof(*data)); 3383adfc5217SJeff Kirsher 3384adfc5217SJeff Kirsher cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p); 3385adfc5217SJeff Kirsher 3386adfc5217SJeff Kirsher /* If there are no more pending commands - clear SCHEDULED state */ 3387adfc5217SJeff Kirsher if (list_empty(&o->pending_cmds_head)) 3388adfc5217SJeff Kirsher o->clear_sched(o); 3389adfc5217SJeff Kirsher 3390adfc5217SJeff Kirsher /* The below may be true iff there was enough room in ramrod 3391adfc5217SJeff Kirsher * data for all pending commands and for the current 3392adfc5217SJeff Kirsher * command. Otherwise the current command would have been added 3393adfc5217SJeff Kirsher * to the pending commands and p->mcast_list_len would have been 3394adfc5217SJeff Kirsher * zeroed. 3395adfc5217SJeff Kirsher */ 3396adfc5217SJeff Kirsher if (p->mcast_list_len > 0) 3397adfc5217SJeff Kirsher cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt); 3398adfc5217SJeff Kirsher 3399adfc5217SJeff Kirsher /* We've pulled out some MACs - update the total number of 3400adfc5217SJeff Kirsher * outstanding. 3401adfc5217SJeff Kirsher */ 3402adfc5217SJeff Kirsher o->total_pending_num -= cnt; 3403adfc5217SJeff Kirsher 3404adfc5217SJeff Kirsher /* send a ramrod */ 3405adfc5217SJeff Kirsher WARN_ON(o->total_pending_num < 0); 3406adfc5217SJeff Kirsher WARN_ON(cnt > o->max_cmd_len); 3407adfc5217SJeff Kirsher 3408adfc5217SJeff Kirsher bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt); 3409adfc5217SJeff Kirsher 3410adfc5217SJeff Kirsher /* Update a registry size if there are no more pending operations. 3411adfc5217SJeff Kirsher * 3412adfc5217SJeff Kirsher * We don't want to change the value of the registry size if there are 3413adfc5217SJeff Kirsher * pending operations because we want it to always be equal to the 3414adfc5217SJeff Kirsher * exact or the approximate number (see bnx2x_mcast_validate_e2()) of 3415adfc5217SJeff Kirsher * set bins after the last requested operation in order to properly 3416adfc5217SJeff Kirsher * evaluate the size of the next DEL/RESTORE operation. 3417adfc5217SJeff Kirsher * 3418adfc5217SJeff Kirsher * Note that we update the registry itself during command(s) handling 3419adfc5217SJeff Kirsher * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we 3420adfc5217SJeff Kirsher * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but 3421adfc5217SJeff Kirsher * with a limited amount of update commands (per MAC/bin) and we don't 3422adfc5217SJeff Kirsher * know in this scope what the actual state of bins configuration is 3423adfc5217SJeff Kirsher * going to be after this ramrod. 3424adfc5217SJeff Kirsher */ 3425adfc5217SJeff Kirsher if (!o->total_pending_num) 3426adfc5217SJeff Kirsher bnx2x_mcast_refresh_registry_e2(bp, o); 3427adfc5217SJeff Kirsher 342816a5fd92SYuval Mintz /* If CLEAR_ONLY was requested - don't send a ramrod and clear 3429c7b7b483SYuval Mintz * RAMROD_PENDING status immediately. due to the SET option, it's also 3430c7b7b483SYuval Mintz * possible that after evaluating the differences there's no need for 3431c7b7b483SYuval Mintz * a ramrod. In that case, we can skip it as well. 3432adfc5217SJeff Kirsher */ 3433c7b7b483SYuval Mintz if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags) || !cnt) { 3434adfc5217SJeff Kirsher raw->clear_pending(raw); 3435adfc5217SJeff Kirsher return 0; 3436adfc5217SJeff Kirsher } else { 343714a94ebdSMichal Kalderon /* No need for an explicit memory barrier here as long as we 343814a94ebdSMichal Kalderon * ensure the ordering of writing to the SPQ element 3439adfc5217SJeff Kirsher * and updating of the SPQ producer which involves a memory 344014a94ebdSMichal Kalderon * read. If the memory read is removed we will have to put a 344114a94ebdSMichal Kalderon * full memory barrier there (inside bnx2x_sp_post()). 3442adfc5217SJeff Kirsher */ 3443adfc5217SJeff Kirsher 3444adfc5217SJeff Kirsher /* Send a ramrod */ 3445adfc5217SJeff Kirsher rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES, 3446adfc5217SJeff Kirsher raw->cid, U64_HI(raw->rdata_mapping), 3447adfc5217SJeff Kirsher U64_LO(raw->rdata_mapping), 3448adfc5217SJeff Kirsher ETH_CONNECTION_TYPE); 3449adfc5217SJeff Kirsher if (rc) 3450adfc5217SJeff Kirsher return rc; 3451adfc5217SJeff Kirsher 3452adfc5217SJeff Kirsher /* Ramrod completion is pending */ 3453adfc5217SJeff Kirsher return 1; 3454adfc5217SJeff Kirsher } 3455adfc5217SJeff Kirsher } 3456adfc5217SJeff Kirsher 3457adfc5217SJeff Kirsher static int bnx2x_mcast_validate_e1h(struct bnx2x *bp, 3458adfc5217SJeff Kirsher struct bnx2x_mcast_ramrod_params *p, 345986564c3fSYuval Mintz enum bnx2x_mcast_cmd cmd) 3460adfc5217SJeff Kirsher { 3461c7b7b483SYuval Mintz if (cmd == BNX2X_MCAST_CMD_SET) { 3462c7b7b483SYuval Mintz BNX2X_ERR("Can't use `set' command on e1h!\n"); 3463c7b7b483SYuval Mintz return -EINVAL; 3464c7b7b483SYuval Mintz } 3465c7b7b483SYuval Mintz 3466adfc5217SJeff Kirsher /* Mark, that there is a work to do */ 3467adfc5217SJeff Kirsher if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE)) 3468adfc5217SJeff Kirsher p->mcast_list_len = 1; 3469adfc5217SJeff Kirsher 3470adfc5217SJeff Kirsher return 0; 3471adfc5217SJeff Kirsher } 3472adfc5217SJeff Kirsher 3473adfc5217SJeff Kirsher static void bnx2x_mcast_revert_e1h(struct bnx2x *bp, 3474adfc5217SJeff Kirsher struct bnx2x_mcast_ramrod_params *p, 3475c7b7b483SYuval Mintz int old_num_bins, 3476c7b7b483SYuval Mintz enum bnx2x_mcast_cmd cmd) 3477adfc5217SJeff Kirsher { 3478adfc5217SJeff Kirsher /* Do nothing */ 3479adfc5217SJeff Kirsher } 3480adfc5217SJeff Kirsher 3481adfc5217SJeff Kirsher #define BNX2X_57711_SET_MC_FILTER(filter, bit) \ 3482adfc5217SJeff Kirsher do { \ 3483adfc5217SJeff Kirsher (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \ 3484adfc5217SJeff Kirsher } while (0) 3485adfc5217SJeff Kirsher 3486adfc5217SJeff Kirsher static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp, 3487adfc5217SJeff Kirsher struct bnx2x_mcast_obj *o, 3488adfc5217SJeff Kirsher struct bnx2x_mcast_ramrod_params *p, 3489adfc5217SJeff Kirsher u32 *mc_filter) 3490adfc5217SJeff Kirsher { 3491adfc5217SJeff Kirsher struct bnx2x_mcast_list_elem *mlist_pos; 3492adfc5217SJeff Kirsher int bit; 3493adfc5217SJeff Kirsher 3494adfc5217SJeff Kirsher list_for_each_entry(mlist_pos, &p->mcast_list, link) { 3495adfc5217SJeff Kirsher bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac); 3496adfc5217SJeff Kirsher BNX2X_57711_SET_MC_FILTER(mc_filter, bit); 3497adfc5217SJeff Kirsher 34980f9dad10SJoe Perches DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n", 34990f9dad10SJoe Perches mlist_pos->mac, bit); 3500adfc5217SJeff Kirsher 3501adfc5217SJeff Kirsher /* bookkeeping... */ 3502adfc5217SJeff Kirsher BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, 3503adfc5217SJeff Kirsher bit); 3504adfc5217SJeff Kirsher } 3505adfc5217SJeff Kirsher } 3506adfc5217SJeff Kirsher 3507adfc5217SJeff Kirsher static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp, 3508adfc5217SJeff Kirsher struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p, 3509adfc5217SJeff Kirsher u32 *mc_filter) 3510adfc5217SJeff Kirsher { 3511adfc5217SJeff Kirsher int bit; 3512adfc5217SJeff Kirsher 3513adfc5217SJeff Kirsher for (bit = bnx2x_mcast_get_next_bin(o, 0); 3514adfc5217SJeff Kirsher bit >= 0; 3515adfc5217SJeff Kirsher bit = bnx2x_mcast_get_next_bin(o, bit + 1)) { 3516adfc5217SJeff Kirsher BNX2X_57711_SET_MC_FILTER(mc_filter, bit); 3517adfc5217SJeff Kirsher DP(BNX2X_MSG_SP, "About to set bin %d\n", bit); 3518adfc5217SJeff Kirsher } 3519adfc5217SJeff Kirsher } 3520adfc5217SJeff Kirsher 352116a5fd92SYuval Mintz /* On 57711 we write the multicast MACs' approximate match 3522adfc5217SJeff Kirsher * table by directly into the TSTORM's internal RAM. So we don't 3523adfc5217SJeff Kirsher * really need to handle any tricks to make it work. 3524adfc5217SJeff Kirsher */ 3525adfc5217SJeff Kirsher static int bnx2x_mcast_setup_e1h(struct bnx2x *bp, 3526adfc5217SJeff Kirsher struct bnx2x_mcast_ramrod_params *p, 352786564c3fSYuval Mintz enum bnx2x_mcast_cmd cmd) 3528adfc5217SJeff Kirsher { 3529adfc5217SJeff Kirsher int i; 3530adfc5217SJeff Kirsher struct bnx2x_mcast_obj *o = p->mcast_obj; 3531adfc5217SJeff Kirsher struct bnx2x_raw_obj *r = &o->raw; 3532adfc5217SJeff Kirsher 3533adfc5217SJeff Kirsher /* If CLEAR_ONLY has been requested - clear the registry 3534adfc5217SJeff Kirsher * and clear a pending bit. 3535adfc5217SJeff Kirsher */ 3536adfc5217SJeff Kirsher if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { 3537adfc5217SJeff Kirsher u32 mc_filter[MC_HASH_SIZE] = {0}; 3538adfc5217SJeff Kirsher 3539adfc5217SJeff Kirsher /* Set the multicast filter bits before writing it into 3540adfc5217SJeff Kirsher * the internal memory. 3541adfc5217SJeff Kirsher */ 3542adfc5217SJeff Kirsher switch (cmd) { 3543adfc5217SJeff Kirsher case BNX2X_MCAST_CMD_ADD: 3544adfc5217SJeff Kirsher bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter); 3545adfc5217SJeff Kirsher break; 3546adfc5217SJeff Kirsher 3547adfc5217SJeff Kirsher case BNX2X_MCAST_CMD_DEL: 354894f05b0fSJoe Perches DP(BNX2X_MSG_SP, 354994f05b0fSJoe Perches "Invalidating multicast MACs configuration\n"); 3550adfc5217SJeff Kirsher 3551adfc5217SJeff Kirsher /* clear the registry */ 3552adfc5217SJeff Kirsher memset(o->registry.aprox_match.vec, 0, 3553adfc5217SJeff Kirsher sizeof(o->registry.aprox_match.vec)); 3554adfc5217SJeff Kirsher break; 3555adfc5217SJeff Kirsher 3556adfc5217SJeff Kirsher case BNX2X_MCAST_CMD_RESTORE: 3557adfc5217SJeff Kirsher bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter); 3558adfc5217SJeff Kirsher break; 3559adfc5217SJeff Kirsher 3560adfc5217SJeff Kirsher default: 3561adfc5217SJeff Kirsher BNX2X_ERR("Unknown command: %d\n", cmd); 3562adfc5217SJeff Kirsher return -EINVAL; 3563adfc5217SJeff Kirsher } 3564adfc5217SJeff Kirsher 3565adfc5217SJeff Kirsher /* Set the mcast filter in the internal memory */ 3566adfc5217SJeff Kirsher for (i = 0; i < MC_HASH_SIZE; i++) 3567adfc5217SJeff Kirsher REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]); 3568adfc5217SJeff Kirsher } else 3569adfc5217SJeff Kirsher /* clear the registry */ 3570adfc5217SJeff Kirsher memset(o->registry.aprox_match.vec, 0, 3571adfc5217SJeff Kirsher sizeof(o->registry.aprox_match.vec)); 3572adfc5217SJeff Kirsher 3573adfc5217SJeff Kirsher /* We are done */ 3574adfc5217SJeff Kirsher r->clear_pending(r); 3575adfc5217SJeff Kirsher 3576adfc5217SJeff Kirsher return 0; 3577adfc5217SJeff Kirsher } 3578adfc5217SJeff Kirsher 3579adfc5217SJeff Kirsher static int bnx2x_mcast_validate_e1(struct bnx2x *bp, 3580adfc5217SJeff Kirsher struct bnx2x_mcast_ramrod_params *p, 358186564c3fSYuval Mintz enum bnx2x_mcast_cmd cmd) 3582adfc5217SJeff Kirsher { 3583adfc5217SJeff Kirsher struct bnx2x_mcast_obj *o = p->mcast_obj; 3584adfc5217SJeff Kirsher int reg_sz = o->get_registry_size(o); 3585adfc5217SJeff Kirsher 3586c7b7b483SYuval Mintz if (cmd == BNX2X_MCAST_CMD_SET) { 3587c7b7b483SYuval Mintz BNX2X_ERR("Can't use `set' command on e1!\n"); 3588c7b7b483SYuval Mintz return -EINVAL; 3589c7b7b483SYuval Mintz } 3590c7b7b483SYuval Mintz 3591adfc5217SJeff Kirsher switch (cmd) { 3592adfc5217SJeff Kirsher /* DEL command deletes all currently configured MACs */ 3593adfc5217SJeff Kirsher case BNX2X_MCAST_CMD_DEL: 3594adfc5217SJeff Kirsher o->set_registry_size(o, 0); 3595adfc5217SJeff Kirsher /* Don't break */ 3596adfc5217SJeff Kirsher 3597adfc5217SJeff Kirsher /* RESTORE command will restore the entire multicast configuration */ 3598adfc5217SJeff Kirsher case BNX2X_MCAST_CMD_RESTORE: 3599adfc5217SJeff Kirsher p->mcast_list_len = reg_sz; 3600adfc5217SJeff Kirsher DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n", 3601adfc5217SJeff Kirsher cmd, p->mcast_list_len); 3602adfc5217SJeff Kirsher break; 3603adfc5217SJeff Kirsher 3604adfc5217SJeff Kirsher case BNX2X_MCAST_CMD_ADD: 3605adfc5217SJeff Kirsher case BNX2X_MCAST_CMD_CONT: 3606adfc5217SJeff Kirsher /* Multicast MACs on 57710 are configured as unicast MACs and 3607adfc5217SJeff Kirsher * there is only a limited number of CAM entries for that 3608adfc5217SJeff Kirsher * matter. 3609adfc5217SJeff Kirsher */ 3610adfc5217SJeff Kirsher if (p->mcast_list_len > o->max_cmd_len) { 361151c1a580SMerav Sicron BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n", 361251c1a580SMerav Sicron o->max_cmd_len); 3613adfc5217SJeff Kirsher return -EINVAL; 3614adfc5217SJeff Kirsher } 3615adfc5217SJeff Kirsher /* Every configured MAC should be cleared if DEL command is 3616adfc5217SJeff Kirsher * called. Only the last ADD command is relevant as long as 3617adfc5217SJeff Kirsher * every ADD commands overrides the previous configuration. 3618adfc5217SJeff Kirsher */ 3619adfc5217SJeff Kirsher DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len); 3620adfc5217SJeff Kirsher if (p->mcast_list_len > 0) 3621adfc5217SJeff Kirsher o->set_registry_size(o, p->mcast_list_len); 3622adfc5217SJeff Kirsher 3623adfc5217SJeff Kirsher break; 3624adfc5217SJeff Kirsher 3625adfc5217SJeff Kirsher default: 3626adfc5217SJeff Kirsher BNX2X_ERR("Unknown command: %d\n", cmd); 3627adfc5217SJeff Kirsher return -EINVAL; 3628adfc5217SJeff Kirsher } 3629adfc5217SJeff Kirsher 3630adfc5217SJeff Kirsher /* We want to ensure that commands are executed one by one for 57710. 3631adfc5217SJeff Kirsher * Therefore each none-empty command will consume o->max_cmd_len. 3632adfc5217SJeff Kirsher */ 3633adfc5217SJeff Kirsher if (p->mcast_list_len) 3634adfc5217SJeff Kirsher o->total_pending_num += o->max_cmd_len; 3635adfc5217SJeff Kirsher 3636adfc5217SJeff Kirsher return 0; 3637adfc5217SJeff Kirsher } 3638adfc5217SJeff Kirsher 3639adfc5217SJeff Kirsher static void bnx2x_mcast_revert_e1(struct bnx2x *bp, 3640adfc5217SJeff Kirsher struct bnx2x_mcast_ramrod_params *p, 3641c7b7b483SYuval Mintz int old_num_macs, 3642c7b7b483SYuval Mintz enum bnx2x_mcast_cmd cmd) 3643adfc5217SJeff Kirsher { 3644adfc5217SJeff Kirsher struct bnx2x_mcast_obj *o = p->mcast_obj; 3645adfc5217SJeff Kirsher 3646adfc5217SJeff Kirsher o->set_registry_size(o, old_num_macs); 3647adfc5217SJeff Kirsher 3648adfc5217SJeff Kirsher /* If current command hasn't been handled yet and we are 3649adfc5217SJeff Kirsher * here means that it's meant to be dropped and we have to 365016a5fd92SYuval Mintz * update the number of outstanding MACs accordingly. 3651adfc5217SJeff Kirsher */ 3652adfc5217SJeff Kirsher if (p->mcast_list_len) 3653adfc5217SJeff Kirsher o->total_pending_num -= o->max_cmd_len; 3654adfc5217SJeff Kirsher } 3655adfc5217SJeff Kirsher 3656adfc5217SJeff Kirsher static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp, 3657adfc5217SJeff Kirsher struct bnx2x_mcast_obj *o, int idx, 3658adfc5217SJeff Kirsher union bnx2x_mcast_config_data *cfg_data, 365986564c3fSYuval Mintz enum bnx2x_mcast_cmd cmd) 3660adfc5217SJeff Kirsher { 3661adfc5217SJeff Kirsher struct bnx2x_raw_obj *r = &o->raw; 3662adfc5217SJeff Kirsher struct mac_configuration_cmd *data = 3663adfc5217SJeff Kirsher (struct mac_configuration_cmd *)(r->rdata); 3664adfc5217SJeff Kirsher 3665adfc5217SJeff Kirsher /* copy mac */ 3666adfc5217SJeff Kirsher if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) { 3667adfc5217SJeff Kirsher bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr, 3668adfc5217SJeff Kirsher &data->config_table[idx].middle_mac_addr, 3669adfc5217SJeff Kirsher &data->config_table[idx].lsb_mac_addr, 3670adfc5217SJeff Kirsher cfg_data->mac); 3671adfc5217SJeff Kirsher 3672adfc5217SJeff Kirsher data->config_table[idx].vlan_id = 0; 3673adfc5217SJeff Kirsher data->config_table[idx].pf_id = r->func_id; 3674adfc5217SJeff Kirsher data->config_table[idx].clients_bit_vector = 3675adfc5217SJeff Kirsher cpu_to_le32(1 << r->cl_id); 3676adfc5217SJeff Kirsher 3677adfc5217SJeff Kirsher SET_FLAG(data->config_table[idx].flags, 3678adfc5217SJeff Kirsher MAC_CONFIGURATION_ENTRY_ACTION_TYPE, 3679adfc5217SJeff Kirsher T_ETH_MAC_COMMAND_SET); 3680adfc5217SJeff Kirsher } 3681adfc5217SJeff Kirsher } 3682adfc5217SJeff Kirsher 3683adfc5217SJeff Kirsher /** 3684adfc5217SJeff Kirsher * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd 3685adfc5217SJeff Kirsher * 3686adfc5217SJeff Kirsher * @bp: device handle 3687adfc5217SJeff Kirsher * @p: 3688adfc5217SJeff Kirsher * @len: number of rules to handle 3689adfc5217SJeff Kirsher */ 3690adfc5217SJeff Kirsher static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp, 3691adfc5217SJeff Kirsher struct bnx2x_mcast_ramrod_params *p, 3692adfc5217SJeff Kirsher u8 len) 3693adfc5217SJeff Kirsher { 3694adfc5217SJeff Kirsher struct bnx2x_raw_obj *r = &p->mcast_obj->raw; 3695adfc5217SJeff Kirsher struct mac_configuration_cmd *data = 3696adfc5217SJeff Kirsher (struct mac_configuration_cmd *)(r->rdata); 3697adfc5217SJeff Kirsher 3698adfc5217SJeff Kirsher u8 offset = (CHIP_REV_IS_SLOW(bp) ? 3699adfc5217SJeff Kirsher BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) : 3700adfc5217SJeff Kirsher BNX2X_MAX_MULTICAST*(1 + r->func_id)); 3701adfc5217SJeff Kirsher 3702adfc5217SJeff Kirsher data->hdr.offset = offset; 370386564c3fSYuval Mintz data->hdr.client_id = cpu_to_le16(0xff); 370486564c3fSYuval Mintz data->hdr.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) | 370586564c3fSYuval Mintz (BNX2X_FILTER_MCAST_PENDING << 370686564c3fSYuval Mintz BNX2X_SWCID_SHIFT)); 3707adfc5217SJeff Kirsher data->hdr.length = len; 3708adfc5217SJeff Kirsher } 3709adfc5217SJeff Kirsher 3710adfc5217SJeff Kirsher /** 3711adfc5217SJeff Kirsher * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710 3712adfc5217SJeff Kirsher * 3713adfc5217SJeff Kirsher * @bp: device handle 3714adfc5217SJeff Kirsher * @o: 3715adfc5217SJeff Kirsher * @start_idx: index in the registry to start from 3716adfc5217SJeff Kirsher * @rdata_idx: index in the ramrod data to start from 3717adfc5217SJeff Kirsher * 3718adfc5217SJeff Kirsher * restore command for 57710 is like all other commands - always a stand alone 3719adfc5217SJeff Kirsher * command - start_idx and rdata_idx will always be 0. This function will always 3720adfc5217SJeff Kirsher * succeed. 3721adfc5217SJeff Kirsher * returns -1 to comply with 57712 variant. 3722adfc5217SJeff Kirsher */ 3723adfc5217SJeff Kirsher static inline int bnx2x_mcast_handle_restore_cmd_e1( 3724adfc5217SJeff Kirsher struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx, 3725adfc5217SJeff Kirsher int *rdata_idx) 3726adfc5217SJeff Kirsher { 3727adfc5217SJeff Kirsher struct bnx2x_mcast_mac_elem *elem; 3728adfc5217SJeff Kirsher int i = 0; 372986564c3fSYuval Mintz union bnx2x_mcast_config_data cfg_data = {NULL}; 3730adfc5217SJeff Kirsher 3731adfc5217SJeff Kirsher /* go through the registry and configure the MACs from it. */ 3732adfc5217SJeff Kirsher list_for_each_entry(elem, &o->registry.exact_match.macs, link) { 3733adfc5217SJeff Kirsher cfg_data.mac = &elem->mac[0]; 3734adfc5217SJeff Kirsher o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE); 3735adfc5217SJeff Kirsher 3736adfc5217SJeff Kirsher i++; 3737adfc5217SJeff Kirsher 37380f9dad10SJoe Perches DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n", 37390f9dad10SJoe Perches cfg_data.mac); 3740adfc5217SJeff Kirsher } 3741adfc5217SJeff Kirsher 3742adfc5217SJeff Kirsher *rdata_idx = i; 3743adfc5217SJeff Kirsher 3744adfc5217SJeff Kirsher return -1; 3745adfc5217SJeff Kirsher } 3746adfc5217SJeff Kirsher 3747adfc5217SJeff Kirsher static inline int bnx2x_mcast_handle_pending_cmds_e1( 3748adfc5217SJeff Kirsher struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p) 3749adfc5217SJeff Kirsher { 3750adfc5217SJeff Kirsher struct bnx2x_pending_mcast_cmd *cmd_pos; 3751adfc5217SJeff Kirsher struct bnx2x_mcast_mac_elem *pmac_pos; 3752adfc5217SJeff Kirsher struct bnx2x_mcast_obj *o = p->mcast_obj; 375386564c3fSYuval Mintz union bnx2x_mcast_config_data cfg_data = {NULL}; 3754adfc5217SJeff Kirsher int cnt = 0; 3755adfc5217SJeff Kirsher 3756adfc5217SJeff Kirsher /* If nothing to be done - return */ 3757adfc5217SJeff Kirsher if (list_empty(&o->pending_cmds_head)) 3758adfc5217SJeff Kirsher return 0; 3759adfc5217SJeff Kirsher 3760adfc5217SJeff Kirsher /* Handle the first command */ 3761adfc5217SJeff Kirsher cmd_pos = list_first_entry(&o->pending_cmds_head, 3762adfc5217SJeff Kirsher struct bnx2x_pending_mcast_cmd, link); 3763adfc5217SJeff Kirsher 3764adfc5217SJeff Kirsher switch (cmd_pos->type) { 3765adfc5217SJeff Kirsher case BNX2X_MCAST_CMD_ADD: 3766adfc5217SJeff Kirsher list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) { 3767adfc5217SJeff Kirsher cfg_data.mac = &pmac_pos->mac[0]; 3768adfc5217SJeff Kirsher o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type); 3769adfc5217SJeff Kirsher 3770adfc5217SJeff Kirsher cnt++; 3771adfc5217SJeff Kirsher 37720f9dad10SJoe Perches DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n", 37730f9dad10SJoe Perches pmac_pos->mac); 3774adfc5217SJeff Kirsher } 3775adfc5217SJeff Kirsher break; 3776adfc5217SJeff Kirsher 3777adfc5217SJeff Kirsher case BNX2X_MCAST_CMD_DEL: 3778adfc5217SJeff Kirsher cnt = cmd_pos->data.macs_num; 3779adfc5217SJeff Kirsher DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt); 3780adfc5217SJeff Kirsher break; 3781adfc5217SJeff Kirsher 3782adfc5217SJeff Kirsher case BNX2X_MCAST_CMD_RESTORE: 3783adfc5217SJeff Kirsher o->hdl_restore(bp, o, 0, &cnt); 3784adfc5217SJeff Kirsher break; 3785adfc5217SJeff Kirsher 3786adfc5217SJeff Kirsher default: 3787adfc5217SJeff Kirsher BNX2X_ERR("Unknown command: %d\n", cmd_pos->type); 3788adfc5217SJeff Kirsher return -EINVAL; 3789adfc5217SJeff Kirsher } 3790adfc5217SJeff Kirsher 3791adfc5217SJeff Kirsher list_del(&cmd_pos->link); 37923129e159SJason Baron bnx2x_free_groups(&cmd_pos->group_head); 3793adfc5217SJeff Kirsher kfree(cmd_pos); 3794adfc5217SJeff Kirsher 3795adfc5217SJeff Kirsher return cnt; 3796adfc5217SJeff Kirsher } 3797adfc5217SJeff Kirsher 3798adfc5217SJeff Kirsher /** 3799adfc5217SJeff Kirsher * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr(). 3800adfc5217SJeff Kirsher * 3801adfc5217SJeff Kirsher * @fw_hi: 3802adfc5217SJeff Kirsher * @fw_mid: 3803adfc5217SJeff Kirsher * @fw_lo: 3804adfc5217SJeff Kirsher * @mac: 3805adfc5217SJeff Kirsher */ 3806adfc5217SJeff Kirsher static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid, 3807adfc5217SJeff Kirsher __le16 *fw_lo, u8 *mac) 3808adfc5217SJeff Kirsher { 3809adfc5217SJeff Kirsher mac[1] = ((u8 *)fw_hi)[0]; 3810adfc5217SJeff Kirsher mac[0] = ((u8 *)fw_hi)[1]; 3811adfc5217SJeff Kirsher mac[3] = ((u8 *)fw_mid)[0]; 3812adfc5217SJeff Kirsher mac[2] = ((u8 *)fw_mid)[1]; 3813adfc5217SJeff Kirsher mac[5] = ((u8 *)fw_lo)[0]; 3814adfc5217SJeff Kirsher mac[4] = ((u8 *)fw_lo)[1]; 3815adfc5217SJeff Kirsher } 3816adfc5217SJeff Kirsher 3817adfc5217SJeff Kirsher /** 3818adfc5217SJeff Kirsher * bnx2x_mcast_refresh_registry_e1 - 3819adfc5217SJeff Kirsher * 3820adfc5217SJeff Kirsher * @bp: device handle 3821adfc5217SJeff Kirsher * @cnt: 3822adfc5217SJeff Kirsher * 3823adfc5217SJeff Kirsher * Check the ramrod data first entry flag to see if it's a DELETE or ADD command 3824adfc5217SJeff Kirsher * and update the registry correspondingly: if ADD - allocate a memory and add 3825adfc5217SJeff Kirsher * the entries to the registry (list), if DELETE - clear the registry and free 3826adfc5217SJeff Kirsher * the memory. 3827adfc5217SJeff Kirsher */ 3828adfc5217SJeff Kirsher static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp, 3829adfc5217SJeff Kirsher struct bnx2x_mcast_obj *o) 3830adfc5217SJeff Kirsher { 3831adfc5217SJeff Kirsher struct bnx2x_raw_obj *raw = &o->raw; 3832adfc5217SJeff Kirsher struct bnx2x_mcast_mac_elem *elem; 3833adfc5217SJeff Kirsher struct mac_configuration_cmd *data = 3834adfc5217SJeff Kirsher (struct mac_configuration_cmd *)(raw->rdata); 3835adfc5217SJeff Kirsher 3836adfc5217SJeff Kirsher /* If first entry contains a SET bit - the command was ADD, 3837adfc5217SJeff Kirsher * otherwise - DEL_ALL 3838adfc5217SJeff Kirsher */ 3839adfc5217SJeff Kirsher if (GET_FLAG(data->config_table[0].flags, 3840adfc5217SJeff Kirsher MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) { 3841adfc5217SJeff Kirsher int i, len = data->hdr.length; 3842adfc5217SJeff Kirsher 3843adfc5217SJeff Kirsher /* Break if it was a RESTORE command */ 3844adfc5217SJeff Kirsher if (!list_empty(&o->registry.exact_match.macs)) 3845adfc5217SJeff Kirsher return 0; 3846adfc5217SJeff Kirsher 384701e23742SThomas Meyer elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC); 3848adfc5217SJeff Kirsher if (!elem) { 3849adfc5217SJeff Kirsher BNX2X_ERR("Failed to allocate registry memory\n"); 3850adfc5217SJeff Kirsher return -ENOMEM; 3851adfc5217SJeff Kirsher } 3852adfc5217SJeff Kirsher 3853adfc5217SJeff Kirsher for (i = 0; i < len; i++, elem++) { 3854adfc5217SJeff Kirsher bnx2x_get_fw_mac_addr( 3855adfc5217SJeff Kirsher &data->config_table[i].msb_mac_addr, 3856adfc5217SJeff Kirsher &data->config_table[i].middle_mac_addr, 3857adfc5217SJeff Kirsher &data->config_table[i].lsb_mac_addr, 3858adfc5217SJeff Kirsher elem->mac); 38590f9dad10SJoe Perches DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n", 38600f9dad10SJoe Perches elem->mac); 3861adfc5217SJeff Kirsher list_add_tail(&elem->link, 3862adfc5217SJeff Kirsher &o->registry.exact_match.macs); 3863adfc5217SJeff Kirsher } 3864adfc5217SJeff Kirsher } else { 3865adfc5217SJeff Kirsher elem = list_first_entry(&o->registry.exact_match.macs, 3866adfc5217SJeff Kirsher struct bnx2x_mcast_mac_elem, link); 3867adfc5217SJeff Kirsher DP(BNX2X_MSG_SP, "Deleting a registry\n"); 3868adfc5217SJeff Kirsher kfree(elem); 3869adfc5217SJeff Kirsher INIT_LIST_HEAD(&o->registry.exact_match.macs); 3870adfc5217SJeff Kirsher } 3871adfc5217SJeff Kirsher 3872adfc5217SJeff Kirsher return 0; 3873adfc5217SJeff Kirsher } 3874adfc5217SJeff Kirsher 3875adfc5217SJeff Kirsher static int bnx2x_mcast_setup_e1(struct bnx2x *bp, 3876adfc5217SJeff Kirsher struct bnx2x_mcast_ramrod_params *p, 387786564c3fSYuval Mintz enum bnx2x_mcast_cmd cmd) 3878adfc5217SJeff Kirsher { 3879adfc5217SJeff Kirsher struct bnx2x_mcast_obj *o = p->mcast_obj; 3880adfc5217SJeff Kirsher struct bnx2x_raw_obj *raw = &o->raw; 3881adfc5217SJeff Kirsher struct mac_configuration_cmd *data = 3882adfc5217SJeff Kirsher (struct mac_configuration_cmd *)(raw->rdata); 3883adfc5217SJeff Kirsher int cnt = 0, i, rc; 3884adfc5217SJeff Kirsher 3885adfc5217SJeff Kirsher /* Reset the ramrod data buffer */ 3886adfc5217SJeff Kirsher memset(data, 0, sizeof(*data)); 3887adfc5217SJeff Kirsher 3888adfc5217SJeff Kirsher /* First set all entries as invalid */ 3889adfc5217SJeff Kirsher for (i = 0; i < o->max_cmd_len ; i++) 3890adfc5217SJeff Kirsher SET_FLAG(data->config_table[i].flags, 3891adfc5217SJeff Kirsher MAC_CONFIGURATION_ENTRY_ACTION_TYPE, 3892adfc5217SJeff Kirsher T_ETH_MAC_COMMAND_INVALIDATE); 3893adfc5217SJeff Kirsher 3894adfc5217SJeff Kirsher /* Handle pending commands first */ 3895adfc5217SJeff Kirsher cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p); 3896adfc5217SJeff Kirsher 3897adfc5217SJeff Kirsher /* If there are no more pending commands - clear SCHEDULED state */ 3898adfc5217SJeff Kirsher if (list_empty(&o->pending_cmds_head)) 3899adfc5217SJeff Kirsher o->clear_sched(o); 3900adfc5217SJeff Kirsher 3901adfc5217SJeff Kirsher /* The below may be true iff there were no pending commands */ 3902adfc5217SJeff Kirsher if (!cnt) 3903adfc5217SJeff Kirsher cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0); 3904adfc5217SJeff Kirsher 3905adfc5217SJeff Kirsher /* For 57710 every command has o->max_cmd_len length to ensure that 3906adfc5217SJeff Kirsher * commands are done one at a time. 3907adfc5217SJeff Kirsher */ 3908adfc5217SJeff Kirsher o->total_pending_num -= o->max_cmd_len; 3909adfc5217SJeff Kirsher 3910adfc5217SJeff Kirsher /* send a ramrod */ 3911adfc5217SJeff Kirsher 3912adfc5217SJeff Kirsher WARN_ON(cnt > o->max_cmd_len); 3913adfc5217SJeff Kirsher 3914adfc5217SJeff Kirsher /* Set ramrod header (in particular, a number of entries to update) */ 3915adfc5217SJeff Kirsher bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt); 3916adfc5217SJeff Kirsher 3917adfc5217SJeff Kirsher /* update a registry: we need the registry contents to be always up 3918adfc5217SJeff Kirsher * to date in order to be able to execute a RESTORE opcode. Here 3919adfc5217SJeff Kirsher * we use the fact that for 57710 we sent one command at a time 3920adfc5217SJeff Kirsher * hence we may take the registry update out of the command handling 3921adfc5217SJeff Kirsher * and do it in a simpler way here. 3922adfc5217SJeff Kirsher */ 3923adfc5217SJeff Kirsher rc = bnx2x_mcast_refresh_registry_e1(bp, o); 3924adfc5217SJeff Kirsher if (rc) 3925adfc5217SJeff Kirsher return rc; 3926adfc5217SJeff Kirsher 392716a5fd92SYuval Mintz /* If CLEAR_ONLY was requested - don't send a ramrod and clear 3928adfc5217SJeff Kirsher * RAMROD_PENDING status immediately. 3929adfc5217SJeff Kirsher */ 3930adfc5217SJeff Kirsher if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { 3931adfc5217SJeff Kirsher raw->clear_pending(raw); 3932adfc5217SJeff Kirsher return 0; 3933adfc5217SJeff Kirsher } else { 393414a94ebdSMichal Kalderon /* No need for an explicit memory barrier here as long as we 393514a94ebdSMichal Kalderon * ensure the ordering of writing to the SPQ element 3936adfc5217SJeff Kirsher * and updating of the SPQ producer which involves a memory 393714a94ebdSMichal Kalderon * read. If the memory read is removed we will have to put a 393814a94ebdSMichal Kalderon * full memory barrier there (inside bnx2x_sp_post()). 3939adfc5217SJeff Kirsher */ 3940adfc5217SJeff Kirsher 3941adfc5217SJeff Kirsher /* Send a ramrod */ 3942adfc5217SJeff Kirsher rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid, 3943adfc5217SJeff Kirsher U64_HI(raw->rdata_mapping), 3944adfc5217SJeff Kirsher U64_LO(raw->rdata_mapping), 3945adfc5217SJeff Kirsher ETH_CONNECTION_TYPE); 3946adfc5217SJeff Kirsher if (rc) 3947adfc5217SJeff Kirsher return rc; 3948adfc5217SJeff Kirsher 3949adfc5217SJeff Kirsher /* Ramrod completion is pending */ 3950adfc5217SJeff Kirsher return 1; 3951adfc5217SJeff Kirsher } 3952adfc5217SJeff Kirsher } 3953adfc5217SJeff Kirsher 3954adfc5217SJeff Kirsher static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o) 3955adfc5217SJeff Kirsher { 3956adfc5217SJeff Kirsher return o->registry.exact_match.num_macs_set; 3957adfc5217SJeff Kirsher } 3958adfc5217SJeff Kirsher 3959adfc5217SJeff Kirsher static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o) 3960adfc5217SJeff Kirsher { 3961adfc5217SJeff Kirsher return o->registry.aprox_match.num_bins_set; 3962adfc5217SJeff Kirsher } 3963adfc5217SJeff Kirsher 3964adfc5217SJeff Kirsher static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o, 3965adfc5217SJeff Kirsher int n) 3966adfc5217SJeff Kirsher { 3967adfc5217SJeff Kirsher o->registry.exact_match.num_macs_set = n; 3968adfc5217SJeff Kirsher } 3969adfc5217SJeff Kirsher 3970adfc5217SJeff Kirsher static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o, 3971adfc5217SJeff Kirsher int n) 3972adfc5217SJeff Kirsher { 3973adfc5217SJeff Kirsher o->registry.aprox_match.num_bins_set = n; 3974adfc5217SJeff Kirsher } 3975adfc5217SJeff Kirsher 3976adfc5217SJeff Kirsher int bnx2x_config_mcast(struct bnx2x *bp, 3977adfc5217SJeff Kirsher struct bnx2x_mcast_ramrod_params *p, 397886564c3fSYuval Mintz enum bnx2x_mcast_cmd cmd) 3979adfc5217SJeff Kirsher { 3980adfc5217SJeff Kirsher struct bnx2x_mcast_obj *o = p->mcast_obj; 3981adfc5217SJeff Kirsher struct bnx2x_raw_obj *r = &o->raw; 3982adfc5217SJeff Kirsher int rc = 0, old_reg_size; 3983adfc5217SJeff Kirsher 3984adfc5217SJeff Kirsher /* This is needed to recover number of currently configured mcast macs 3985adfc5217SJeff Kirsher * in case of failure. 3986adfc5217SJeff Kirsher */ 3987adfc5217SJeff Kirsher old_reg_size = o->get_registry_size(o); 3988adfc5217SJeff Kirsher 3989adfc5217SJeff Kirsher /* Do some calculations and checks */ 3990adfc5217SJeff Kirsher rc = o->validate(bp, p, cmd); 3991adfc5217SJeff Kirsher if (rc) 3992adfc5217SJeff Kirsher return rc; 3993adfc5217SJeff Kirsher 3994adfc5217SJeff Kirsher /* Return if there is no work to do */ 3995adfc5217SJeff Kirsher if ((!p->mcast_list_len) && (!o->check_sched(o))) 3996adfc5217SJeff Kirsher return 0; 3997adfc5217SJeff Kirsher 399851c1a580SMerav Sicron DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n", 399951c1a580SMerav Sicron o->total_pending_num, p->mcast_list_len, o->max_cmd_len); 4000adfc5217SJeff Kirsher 4001adfc5217SJeff Kirsher /* Enqueue the current command to the pending list if we can't complete 4002adfc5217SJeff Kirsher * it in the current iteration 4003adfc5217SJeff Kirsher */ 4004adfc5217SJeff Kirsher if (r->check_pending(r) || 4005adfc5217SJeff Kirsher ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) { 4006adfc5217SJeff Kirsher rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd); 4007adfc5217SJeff Kirsher if (rc < 0) 4008adfc5217SJeff Kirsher goto error_exit1; 4009adfc5217SJeff Kirsher 4010adfc5217SJeff Kirsher /* As long as the current command is in a command list we 4011adfc5217SJeff Kirsher * don't need to handle it separately. 4012adfc5217SJeff Kirsher */ 4013adfc5217SJeff Kirsher p->mcast_list_len = 0; 4014adfc5217SJeff Kirsher } 4015adfc5217SJeff Kirsher 4016adfc5217SJeff Kirsher if (!r->check_pending(r)) { 4017adfc5217SJeff Kirsher 4018adfc5217SJeff Kirsher /* Set 'pending' state */ 4019adfc5217SJeff Kirsher r->set_pending(r); 4020adfc5217SJeff Kirsher 4021adfc5217SJeff Kirsher /* Configure the new classification in the chip */ 4022adfc5217SJeff Kirsher rc = o->config_mcast(bp, p, cmd); 4023adfc5217SJeff Kirsher if (rc < 0) 4024adfc5217SJeff Kirsher goto error_exit2; 4025adfc5217SJeff Kirsher 4026adfc5217SJeff Kirsher /* Wait for a ramrod completion if was requested */ 4027adfc5217SJeff Kirsher if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) 4028adfc5217SJeff Kirsher rc = o->wait_comp(bp, o); 4029adfc5217SJeff Kirsher } 4030adfc5217SJeff Kirsher 4031adfc5217SJeff Kirsher return rc; 4032adfc5217SJeff Kirsher 4033adfc5217SJeff Kirsher error_exit2: 4034adfc5217SJeff Kirsher r->clear_pending(r); 4035adfc5217SJeff Kirsher 4036adfc5217SJeff Kirsher error_exit1: 4037c7b7b483SYuval Mintz o->revert(bp, p, old_reg_size, cmd); 4038adfc5217SJeff Kirsher 4039adfc5217SJeff Kirsher return rc; 4040adfc5217SJeff Kirsher } 4041adfc5217SJeff Kirsher 4042adfc5217SJeff Kirsher static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o) 4043adfc5217SJeff Kirsher { 40444e857c58SPeter Zijlstra smp_mb__before_atomic(); 4045adfc5217SJeff Kirsher clear_bit(o->sched_state, o->raw.pstate); 40464e857c58SPeter Zijlstra smp_mb__after_atomic(); 4047adfc5217SJeff Kirsher } 4048adfc5217SJeff Kirsher 4049adfc5217SJeff Kirsher static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o) 4050adfc5217SJeff Kirsher { 40514e857c58SPeter Zijlstra smp_mb__before_atomic(); 4052adfc5217SJeff Kirsher set_bit(o->sched_state, o->raw.pstate); 40534e857c58SPeter Zijlstra smp_mb__after_atomic(); 4054adfc5217SJeff Kirsher } 4055adfc5217SJeff Kirsher 4056adfc5217SJeff Kirsher static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o) 4057adfc5217SJeff Kirsher { 4058adfc5217SJeff Kirsher return !!test_bit(o->sched_state, o->raw.pstate); 4059adfc5217SJeff Kirsher } 4060adfc5217SJeff Kirsher 4061adfc5217SJeff Kirsher static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o) 4062adfc5217SJeff Kirsher { 4063adfc5217SJeff Kirsher return o->raw.check_pending(&o->raw) || o->check_sched(o); 4064adfc5217SJeff Kirsher } 4065adfc5217SJeff Kirsher 4066adfc5217SJeff Kirsher void bnx2x_init_mcast_obj(struct bnx2x *bp, 4067adfc5217SJeff Kirsher struct bnx2x_mcast_obj *mcast_obj, 4068adfc5217SJeff Kirsher u8 mcast_cl_id, u32 mcast_cid, u8 func_id, 4069adfc5217SJeff Kirsher u8 engine_id, void *rdata, dma_addr_t rdata_mapping, 4070adfc5217SJeff Kirsher int state, unsigned long *pstate, bnx2x_obj_type type) 4071adfc5217SJeff Kirsher { 4072adfc5217SJeff Kirsher memset(mcast_obj, 0, sizeof(*mcast_obj)); 4073adfc5217SJeff Kirsher 4074adfc5217SJeff Kirsher bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id, 4075adfc5217SJeff Kirsher rdata, rdata_mapping, state, pstate, type); 4076adfc5217SJeff Kirsher 4077adfc5217SJeff Kirsher mcast_obj->engine_id = engine_id; 4078adfc5217SJeff Kirsher 4079adfc5217SJeff Kirsher INIT_LIST_HEAD(&mcast_obj->pending_cmds_head); 4080adfc5217SJeff Kirsher 4081adfc5217SJeff Kirsher mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED; 4082adfc5217SJeff Kirsher mcast_obj->check_sched = bnx2x_mcast_check_sched; 4083adfc5217SJeff Kirsher mcast_obj->set_sched = bnx2x_mcast_set_sched; 4084adfc5217SJeff Kirsher mcast_obj->clear_sched = bnx2x_mcast_clear_sched; 4085adfc5217SJeff Kirsher 4086adfc5217SJeff Kirsher if (CHIP_IS_E1(bp)) { 4087adfc5217SJeff Kirsher mcast_obj->config_mcast = bnx2x_mcast_setup_e1; 4088adfc5217SJeff Kirsher mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd; 4089adfc5217SJeff Kirsher mcast_obj->hdl_restore = 4090adfc5217SJeff Kirsher bnx2x_mcast_handle_restore_cmd_e1; 4091adfc5217SJeff Kirsher mcast_obj->check_pending = bnx2x_mcast_check_pending; 4092adfc5217SJeff Kirsher 4093adfc5217SJeff Kirsher if (CHIP_REV_IS_SLOW(bp)) 4094adfc5217SJeff Kirsher mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI; 4095adfc5217SJeff Kirsher else 4096adfc5217SJeff Kirsher mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST; 4097adfc5217SJeff Kirsher 4098adfc5217SJeff Kirsher mcast_obj->wait_comp = bnx2x_mcast_wait; 4099adfc5217SJeff Kirsher mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1; 4100adfc5217SJeff Kirsher mcast_obj->validate = bnx2x_mcast_validate_e1; 4101adfc5217SJeff Kirsher mcast_obj->revert = bnx2x_mcast_revert_e1; 4102adfc5217SJeff Kirsher mcast_obj->get_registry_size = 4103adfc5217SJeff Kirsher bnx2x_mcast_get_registry_size_exact; 4104adfc5217SJeff Kirsher mcast_obj->set_registry_size = 4105adfc5217SJeff Kirsher bnx2x_mcast_set_registry_size_exact; 4106adfc5217SJeff Kirsher 4107adfc5217SJeff Kirsher /* 57710 is the only chip that uses the exact match for mcast 4108adfc5217SJeff Kirsher * at the moment. 4109adfc5217SJeff Kirsher */ 4110adfc5217SJeff Kirsher INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs); 4111adfc5217SJeff Kirsher 4112adfc5217SJeff Kirsher } else if (CHIP_IS_E1H(bp)) { 4113adfc5217SJeff Kirsher mcast_obj->config_mcast = bnx2x_mcast_setup_e1h; 4114adfc5217SJeff Kirsher mcast_obj->enqueue_cmd = NULL; 4115adfc5217SJeff Kirsher mcast_obj->hdl_restore = NULL; 4116adfc5217SJeff Kirsher mcast_obj->check_pending = bnx2x_mcast_check_pending; 4117adfc5217SJeff Kirsher 4118adfc5217SJeff Kirsher /* 57711 doesn't send a ramrod, so it has unlimited credit 4119adfc5217SJeff Kirsher * for one command. 4120adfc5217SJeff Kirsher */ 4121adfc5217SJeff Kirsher mcast_obj->max_cmd_len = -1; 4122adfc5217SJeff Kirsher mcast_obj->wait_comp = bnx2x_mcast_wait; 4123adfc5217SJeff Kirsher mcast_obj->set_one_rule = NULL; 4124adfc5217SJeff Kirsher mcast_obj->validate = bnx2x_mcast_validate_e1h; 4125adfc5217SJeff Kirsher mcast_obj->revert = bnx2x_mcast_revert_e1h; 4126adfc5217SJeff Kirsher mcast_obj->get_registry_size = 4127adfc5217SJeff Kirsher bnx2x_mcast_get_registry_size_aprox; 4128adfc5217SJeff Kirsher mcast_obj->set_registry_size = 4129adfc5217SJeff Kirsher bnx2x_mcast_set_registry_size_aprox; 4130adfc5217SJeff Kirsher } else { 4131adfc5217SJeff Kirsher mcast_obj->config_mcast = bnx2x_mcast_setup_e2; 4132adfc5217SJeff Kirsher mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd; 4133adfc5217SJeff Kirsher mcast_obj->hdl_restore = 4134adfc5217SJeff Kirsher bnx2x_mcast_handle_restore_cmd_e2; 4135adfc5217SJeff Kirsher mcast_obj->check_pending = bnx2x_mcast_check_pending; 4136adfc5217SJeff Kirsher /* TODO: There should be a proper HSI define for this number!!! 4137adfc5217SJeff Kirsher */ 4138adfc5217SJeff Kirsher mcast_obj->max_cmd_len = 16; 4139adfc5217SJeff Kirsher mcast_obj->wait_comp = bnx2x_mcast_wait; 4140adfc5217SJeff Kirsher mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2; 4141adfc5217SJeff Kirsher mcast_obj->validate = bnx2x_mcast_validate_e2; 4142adfc5217SJeff Kirsher mcast_obj->revert = bnx2x_mcast_revert_e2; 4143adfc5217SJeff Kirsher mcast_obj->get_registry_size = 4144adfc5217SJeff Kirsher bnx2x_mcast_get_registry_size_aprox; 4145adfc5217SJeff Kirsher mcast_obj->set_registry_size = 4146adfc5217SJeff Kirsher bnx2x_mcast_set_registry_size_aprox; 4147adfc5217SJeff Kirsher } 4148adfc5217SJeff Kirsher } 4149adfc5217SJeff Kirsher 4150adfc5217SJeff Kirsher /*************************** Credit handling **********************************/ 4151adfc5217SJeff Kirsher 4152adfc5217SJeff Kirsher /** 4153adfc5217SJeff Kirsher * atomic_add_ifless - add if the result is less than a given value. 4154adfc5217SJeff Kirsher * 4155adfc5217SJeff Kirsher * @v: pointer of type atomic_t 4156adfc5217SJeff Kirsher * @a: the amount to add to v... 4157adfc5217SJeff Kirsher * @u: ...if (v + a) is less than u. 4158adfc5217SJeff Kirsher * 4159adfc5217SJeff Kirsher * returns true if (v + a) was less than u, and false otherwise. 4160adfc5217SJeff Kirsher * 4161adfc5217SJeff Kirsher */ 4162adfc5217SJeff Kirsher static inline bool __atomic_add_ifless(atomic_t *v, int a, int u) 4163adfc5217SJeff Kirsher { 4164adfc5217SJeff Kirsher int c, old; 4165adfc5217SJeff Kirsher 4166adfc5217SJeff Kirsher c = atomic_read(v); 4167adfc5217SJeff Kirsher for (;;) { 4168adfc5217SJeff Kirsher if (unlikely(c + a >= u)) 4169adfc5217SJeff Kirsher return false; 4170adfc5217SJeff Kirsher 4171adfc5217SJeff Kirsher old = atomic_cmpxchg((v), c, c + a); 4172adfc5217SJeff Kirsher if (likely(old == c)) 4173adfc5217SJeff Kirsher break; 4174adfc5217SJeff Kirsher c = old; 4175adfc5217SJeff Kirsher } 4176adfc5217SJeff Kirsher 4177adfc5217SJeff Kirsher return true; 4178adfc5217SJeff Kirsher } 4179adfc5217SJeff Kirsher 4180adfc5217SJeff Kirsher /** 4181adfc5217SJeff Kirsher * atomic_dec_ifmoe - dec if the result is more or equal than a given value. 4182adfc5217SJeff Kirsher * 4183adfc5217SJeff Kirsher * @v: pointer of type atomic_t 4184adfc5217SJeff Kirsher * @a: the amount to dec from v... 4185adfc5217SJeff Kirsher * @u: ...if (v - a) is more or equal than u. 4186adfc5217SJeff Kirsher * 4187adfc5217SJeff Kirsher * returns true if (v - a) was more or equal than u, and false 4188adfc5217SJeff Kirsher * otherwise. 4189adfc5217SJeff Kirsher */ 4190adfc5217SJeff Kirsher static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u) 4191adfc5217SJeff Kirsher { 4192adfc5217SJeff Kirsher int c, old; 4193adfc5217SJeff Kirsher 4194adfc5217SJeff Kirsher c = atomic_read(v); 4195adfc5217SJeff Kirsher for (;;) { 4196adfc5217SJeff Kirsher if (unlikely(c - a < u)) 4197adfc5217SJeff Kirsher return false; 4198adfc5217SJeff Kirsher 4199adfc5217SJeff Kirsher old = atomic_cmpxchg((v), c, c - a); 4200adfc5217SJeff Kirsher if (likely(old == c)) 4201adfc5217SJeff Kirsher break; 4202adfc5217SJeff Kirsher c = old; 4203adfc5217SJeff Kirsher } 4204adfc5217SJeff Kirsher 4205adfc5217SJeff Kirsher return true; 4206adfc5217SJeff Kirsher } 4207adfc5217SJeff Kirsher 4208adfc5217SJeff Kirsher static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt) 4209adfc5217SJeff Kirsher { 4210adfc5217SJeff Kirsher bool rc; 4211adfc5217SJeff Kirsher 4212adfc5217SJeff Kirsher smp_mb(); 4213adfc5217SJeff Kirsher rc = __atomic_dec_ifmoe(&o->credit, cnt, 0); 4214adfc5217SJeff Kirsher smp_mb(); 4215adfc5217SJeff Kirsher 4216adfc5217SJeff Kirsher return rc; 4217adfc5217SJeff Kirsher } 4218adfc5217SJeff Kirsher 4219adfc5217SJeff Kirsher static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt) 4220adfc5217SJeff Kirsher { 4221adfc5217SJeff Kirsher bool rc; 4222adfc5217SJeff Kirsher 4223adfc5217SJeff Kirsher smp_mb(); 4224adfc5217SJeff Kirsher 4225adfc5217SJeff Kirsher /* Don't let to refill if credit + cnt > pool_sz */ 4226adfc5217SJeff Kirsher rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1); 4227adfc5217SJeff Kirsher 4228adfc5217SJeff Kirsher smp_mb(); 4229adfc5217SJeff Kirsher 4230adfc5217SJeff Kirsher return rc; 4231adfc5217SJeff Kirsher } 4232adfc5217SJeff Kirsher 4233adfc5217SJeff Kirsher static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o) 4234adfc5217SJeff Kirsher { 4235adfc5217SJeff Kirsher int cur_credit; 4236adfc5217SJeff Kirsher 4237adfc5217SJeff Kirsher smp_mb(); 4238adfc5217SJeff Kirsher cur_credit = atomic_read(&o->credit); 4239adfc5217SJeff Kirsher 4240adfc5217SJeff Kirsher return cur_credit; 4241adfc5217SJeff Kirsher } 4242adfc5217SJeff Kirsher 4243adfc5217SJeff Kirsher static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o, 4244adfc5217SJeff Kirsher int cnt) 4245adfc5217SJeff Kirsher { 4246adfc5217SJeff Kirsher return true; 4247adfc5217SJeff Kirsher } 4248adfc5217SJeff Kirsher 4249adfc5217SJeff Kirsher static bool bnx2x_credit_pool_get_entry( 4250adfc5217SJeff Kirsher struct bnx2x_credit_pool_obj *o, 4251adfc5217SJeff Kirsher int *offset) 4252adfc5217SJeff Kirsher { 4253adfc5217SJeff Kirsher int idx, vec, i; 4254adfc5217SJeff Kirsher 4255adfc5217SJeff Kirsher *offset = -1; 4256adfc5217SJeff Kirsher 4257adfc5217SJeff Kirsher /* Find "internal cam-offset" then add to base for this object... */ 4258adfc5217SJeff Kirsher for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) { 4259adfc5217SJeff Kirsher 4260adfc5217SJeff Kirsher /* Skip the current vector if there are no free entries in it */ 4261adfc5217SJeff Kirsher if (!o->pool_mirror[vec]) 4262adfc5217SJeff Kirsher continue; 4263adfc5217SJeff Kirsher 4264adfc5217SJeff Kirsher /* If we've got here we are going to find a free entry */ 4265c54e9bd3SDmitry Kravkov for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0; 4266adfc5217SJeff Kirsher i < BIT_VEC64_ELEM_SZ; idx++, i++) 4267adfc5217SJeff Kirsher 4268adfc5217SJeff Kirsher if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) { 4269adfc5217SJeff Kirsher /* Got one!! */ 4270adfc5217SJeff Kirsher BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx); 4271adfc5217SJeff Kirsher *offset = o->base_pool_offset + idx; 4272adfc5217SJeff Kirsher return true; 4273adfc5217SJeff Kirsher } 4274adfc5217SJeff Kirsher } 4275adfc5217SJeff Kirsher 4276adfc5217SJeff Kirsher return false; 4277adfc5217SJeff Kirsher } 4278adfc5217SJeff Kirsher 4279adfc5217SJeff Kirsher static bool bnx2x_credit_pool_put_entry( 4280adfc5217SJeff Kirsher struct bnx2x_credit_pool_obj *o, 4281adfc5217SJeff Kirsher int offset) 4282adfc5217SJeff Kirsher { 4283adfc5217SJeff Kirsher if (offset < o->base_pool_offset) 4284adfc5217SJeff Kirsher return false; 4285adfc5217SJeff Kirsher 4286adfc5217SJeff Kirsher offset -= o->base_pool_offset; 4287adfc5217SJeff Kirsher 4288adfc5217SJeff Kirsher if (offset >= o->pool_sz) 4289adfc5217SJeff Kirsher return false; 4290adfc5217SJeff Kirsher 4291adfc5217SJeff Kirsher /* Return the entry to the pool */ 4292adfc5217SJeff Kirsher BIT_VEC64_SET_BIT(o->pool_mirror, offset); 4293adfc5217SJeff Kirsher 4294adfc5217SJeff Kirsher return true; 4295adfc5217SJeff Kirsher } 4296adfc5217SJeff Kirsher 4297adfc5217SJeff Kirsher static bool bnx2x_credit_pool_put_entry_always_true( 4298adfc5217SJeff Kirsher struct bnx2x_credit_pool_obj *o, 4299adfc5217SJeff Kirsher int offset) 4300adfc5217SJeff Kirsher { 4301adfc5217SJeff Kirsher return true; 4302adfc5217SJeff Kirsher } 4303adfc5217SJeff Kirsher 4304adfc5217SJeff Kirsher static bool bnx2x_credit_pool_get_entry_always_true( 4305adfc5217SJeff Kirsher struct bnx2x_credit_pool_obj *o, 4306adfc5217SJeff Kirsher int *offset) 4307adfc5217SJeff Kirsher { 4308adfc5217SJeff Kirsher *offset = -1; 4309adfc5217SJeff Kirsher return true; 4310adfc5217SJeff Kirsher } 4311adfc5217SJeff Kirsher /** 4312adfc5217SJeff Kirsher * bnx2x_init_credit_pool - initialize credit pool internals. 4313adfc5217SJeff Kirsher * 4314adfc5217SJeff Kirsher * @p: 4315adfc5217SJeff Kirsher * @base: Base entry in the CAM to use. 4316adfc5217SJeff Kirsher * @credit: pool size. 4317adfc5217SJeff Kirsher * 4318adfc5217SJeff Kirsher * If base is negative no CAM entries handling will be performed. 4319adfc5217SJeff Kirsher * If credit is negative pool operations will always succeed (unlimited pool). 4320adfc5217SJeff Kirsher * 4321adfc5217SJeff Kirsher */ 432205cc5a39SYuval Mintz void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p, 4323adfc5217SJeff Kirsher int base, int credit) 4324adfc5217SJeff Kirsher { 4325adfc5217SJeff Kirsher /* Zero the object first */ 4326adfc5217SJeff Kirsher memset(p, 0, sizeof(*p)); 4327adfc5217SJeff Kirsher 4328adfc5217SJeff Kirsher /* Set the table to all 1s */ 4329adfc5217SJeff Kirsher memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror)); 4330adfc5217SJeff Kirsher 4331adfc5217SJeff Kirsher /* Init a pool as full */ 4332adfc5217SJeff Kirsher atomic_set(&p->credit, credit); 4333adfc5217SJeff Kirsher 4334adfc5217SJeff Kirsher /* The total poll size */ 4335adfc5217SJeff Kirsher p->pool_sz = credit; 4336adfc5217SJeff Kirsher 4337adfc5217SJeff Kirsher p->base_pool_offset = base; 4338adfc5217SJeff Kirsher 4339adfc5217SJeff Kirsher /* Commit the change */ 4340adfc5217SJeff Kirsher smp_mb(); 4341adfc5217SJeff Kirsher 4342adfc5217SJeff Kirsher p->check = bnx2x_credit_pool_check; 4343adfc5217SJeff Kirsher 4344adfc5217SJeff Kirsher /* if pool credit is negative - disable the checks */ 4345adfc5217SJeff Kirsher if (credit >= 0) { 4346adfc5217SJeff Kirsher p->put = bnx2x_credit_pool_put; 4347adfc5217SJeff Kirsher p->get = bnx2x_credit_pool_get; 4348adfc5217SJeff Kirsher p->put_entry = bnx2x_credit_pool_put_entry; 4349adfc5217SJeff Kirsher p->get_entry = bnx2x_credit_pool_get_entry; 4350adfc5217SJeff Kirsher } else { 4351adfc5217SJeff Kirsher p->put = bnx2x_credit_pool_always_true; 4352adfc5217SJeff Kirsher p->get = bnx2x_credit_pool_always_true; 4353adfc5217SJeff Kirsher p->put_entry = bnx2x_credit_pool_put_entry_always_true; 4354adfc5217SJeff Kirsher p->get_entry = bnx2x_credit_pool_get_entry_always_true; 4355adfc5217SJeff Kirsher } 4356adfc5217SJeff Kirsher 4357adfc5217SJeff Kirsher /* If base is negative - disable entries handling */ 4358adfc5217SJeff Kirsher if (base < 0) { 4359adfc5217SJeff Kirsher p->put_entry = bnx2x_credit_pool_put_entry_always_true; 4360adfc5217SJeff Kirsher p->get_entry = bnx2x_credit_pool_get_entry_always_true; 4361adfc5217SJeff Kirsher } 4362adfc5217SJeff Kirsher } 4363adfc5217SJeff Kirsher 4364adfc5217SJeff Kirsher void bnx2x_init_mac_credit_pool(struct bnx2x *bp, 4365adfc5217SJeff Kirsher struct bnx2x_credit_pool_obj *p, u8 func_id, 4366adfc5217SJeff Kirsher u8 func_num) 4367adfc5217SJeff Kirsher { 4368adfc5217SJeff Kirsher /* TODO: this will be defined in consts as well... */ 4369adfc5217SJeff Kirsher #define BNX2X_CAM_SIZE_EMUL 5 4370adfc5217SJeff Kirsher 4371adfc5217SJeff Kirsher int cam_sz; 4372adfc5217SJeff Kirsher 4373adfc5217SJeff Kirsher if (CHIP_IS_E1(bp)) { 4374adfc5217SJeff Kirsher /* In E1, Multicast is saved in cam... */ 4375adfc5217SJeff Kirsher if (!CHIP_REV_IS_SLOW(bp)) 4376adfc5217SJeff Kirsher cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST; 4377adfc5217SJeff Kirsher else 4378adfc5217SJeff Kirsher cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI; 4379adfc5217SJeff Kirsher 4380adfc5217SJeff Kirsher bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz); 4381adfc5217SJeff Kirsher 4382adfc5217SJeff Kirsher } else if (CHIP_IS_E1H(bp)) { 4383adfc5217SJeff Kirsher /* CAM credit is equaly divided between all active functions 4384adfc5217SJeff Kirsher * on the PORT!. 4385adfc5217SJeff Kirsher */ 4386adfc5217SJeff Kirsher if ((func_num > 0)) { 4387adfc5217SJeff Kirsher if (!CHIP_REV_IS_SLOW(bp)) 4388adfc5217SJeff Kirsher cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num)); 4389adfc5217SJeff Kirsher else 4390adfc5217SJeff Kirsher cam_sz = BNX2X_CAM_SIZE_EMUL; 4391adfc5217SJeff Kirsher bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz); 4392adfc5217SJeff Kirsher } else { 4393adfc5217SJeff Kirsher /* this should never happen! Block MAC operations. */ 4394adfc5217SJeff Kirsher bnx2x_init_credit_pool(p, 0, 0); 4395adfc5217SJeff Kirsher } 4396adfc5217SJeff Kirsher 4397adfc5217SJeff Kirsher } else { 4398adfc5217SJeff Kirsher 439916a5fd92SYuval Mintz /* CAM credit is equaly divided between all active functions 4400adfc5217SJeff Kirsher * on the PATH. 4401adfc5217SJeff Kirsher */ 440205cc5a39SYuval Mintz if (func_num > 0) { 4403adfc5217SJeff Kirsher if (!CHIP_REV_IS_SLOW(bp)) 440405cc5a39SYuval Mintz cam_sz = PF_MAC_CREDIT_E2(bp, func_num); 4405adfc5217SJeff Kirsher else 4406adfc5217SJeff Kirsher cam_sz = BNX2X_CAM_SIZE_EMUL; 4407adfc5217SJeff Kirsher 440816a5fd92SYuval Mintz /* No need for CAM entries handling for 57712 and 4409adfc5217SJeff Kirsher * newer. 4410adfc5217SJeff Kirsher */ 4411adfc5217SJeff Kirsher bnx2x_init_credit_pool(p, -1, cam_sz); 4412adfc5217SJeff Kirsher } else { 4413adfc5217SJeff Kirsher /* this should never happen! Block MAC operations. */ 4414adfc5217SJeff Kirsher bnx2x_init_credit_pool(p, 0, 0); 4415adfc5217SJeff Kirsher } 4416adfc5217SJeff Kirsher } 4417adfc5217SJeff Kirsher } 4418adfc5217SJeff Kirsher 4419adfc5217SJeff Kirsher void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, 4420adfc5217SJeff Kirsher struct bnx2x_credit_pool_obj *p, 4421adfc5217SJeff Kirsher u8 func_id, 4422adfc5217SJeff Kirsher u8 func_num) 4423adfc5217SJeff Kirsher { 4424adfc5217SJeff Kirsher if (CHIP_IS_E1x(bp)) { 442516a5fd92SYuval Mintz /* There is no VLAN credit in HW on 57710 and 57711 only 4426adfc5217SJeff Kirsher * MAC / MAC-VLAN can be set 4427adfc5217SJeff Kirsher */ 4428adfc5217SJeff Kirsher bnx2x_init_credit_pool(p, 0, -1); 4429adfc5217SJeff Kirsher } else { 443016a5fd92SYuval Mintz /* CAM credit is equally divided between all active functions 4431adfc5217SJeff Kirsher * on the PATH. 4432adfc5217SJeff Kirsher */ 4433adfc5217SJeff Kirsher if (func_num > 0) { 443405cc5a39SYuval Mintz int credit = PF_VLAN_CREDIT_E2(bp, func_num); 443505cc5a39SYuval Mintz 443605cc5a39SYuval Mintz bnx2x_init_credit_pool(p, -1/*unused for E2*/, credit); 4437adfc5217SJeff Kirsher } else 4438adfc5217SJeff Kirsher /* this should never happen! Block VLAN operations. */ 4439adfc5217SJeff Kirsher bnx2x_init_credit_pool(p, 0, 0); 4440adfc5217SJeff Kirsher } 4441adfc5217SJeff Kirsher } 4442adfc5217SJeff Kirsher 4443adfc5217SJeff Kirsher /****************** RSS Configuration ******************/ 4444adfc5217SJeff Kirsher /** 4445adfc5217SJeff Kirsher * bnx2x_debug_print_ind_table - prints the indirection table configuration. 4446adfc5217SJeff Kirsher * 444716a5fd92SYuval Mintz * @bp: driver handle 4448adfc5217SJeff Kirsher * @p: pointer to rss configuration 4449adfc5217SJeff Kirsher * 4450adfc5217SJeff Kirsher * Prints it when NETIF_MSG_IFUP debug level is configured. 4451adfc5217SJeff Kirsher */ 4452adfc5217SJeff Kirsher static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp, 4453adfc5217SJeff Kirsher struct bnx2x_config_rss_params *p) 4454adfc5217SJeff Kirsher { 4455adfc5217SJeff Kirsher int i; 4456adfc5217SJeff Kirsher 4457adfc5217SJeff Kirsher DP(BNX2X_MSG_SP, "Setting indirection table to:\n"); 4458adfc5217SJeff Kirsher DP(BNX2X_MSG_SP, "0x0000: "); 4459adfc5217SJeff Kirsher for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { 4460adfc5217SJeff Kirsher DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]); 4461adfc5217SJeff Kirsher 4462adfc5217SJeff Kirsher /* Print 4 bytes in a line */ 4463adfc5217SJeff Kirsher if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) && 4464adfc5217SJeff Kirsher (((i + 1) & 0x3) == 0)) { 4465adfc5217SJeff Kirsher DP_CONT(BNX2X_MSG_SP, "\n"); 4466adfc5217SJeff Kirsher DP(BNX2X_MSG_SP, "0x%04x: ", i + 1); 4467adfc5217SJeff Kirsher } 4468adfc5217SJeff Kirsher } 4469adfc5217SJeff Kirsher 4470adfc5217SJeff Kirsher DP_CONT(BNX2X_MSG_SP, "\n"); 4471adfc5217SJeff Kirsher } 4472adfc5217SJeff Kirsher 4473adfc5217SJeff Kirsher /** 4474adfc5217SJeff Kirsher * bnx2x_setup_rss - configure RSS 4475adfc5217SJeff Kirsher * 4476adfc5217SJeff Kirsher * @bp: device handle 4477adfc5217SJeff Kirsher * @p: rss configuration 4478adfc5217SJeff Kirsher * 4479adfc5217SJeff Kirsher * sends on UPDATE ramrod for that matter. 4480adfc5217SJeff Kirsher */ 4481adfc5217SJeff Kirsher static int bnx2x_setup_rss(struct bnx2x *bp, 4482adfc5217SJeff Kirsher struct bnx2x_config_rss_params *p) 4483adfc5217SJeff Kirsher { 4484adfc5217SJeff Kirsher struct bnx2x_rss_config_obj *o = p->rss_obj; 4485adfc5217SJeff Kirsher struct bnx2x_raw_obj *r = &o->raw; 4486adfc5217SJeff Kirsher struct eth_rss_update_ramrod_data *data = 4487adfc5217SJeff Kirsher (struct eth_rss_update_ramrod_data *)(r->rdata); 4488e42780b6SDmitry Kravkov u16 caps = 0; 4489adfc5217SJeff Kirsher u8 rss_mode = 0; 4490adfc5217SJeff Kirsher int rc; 4491adfc5217SJeff Kirsher 4492adfc5217SJeff Kirsher memset(data, 0, sizeof(*data)); 4493adfc5217SJeff Kirsher 4494adfc5217SJeff Kirsher DP(BNX2X_MSG_SP, "Configuring RSS\n"); 4495adfc5217SJeff Kirsher 4496adfc5217SJeff Kirsher /* Set an echo field */ 449786564c3fSYuval Mintz data->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) | 449886564c3fSYuval Mintz (r->state << BNX2X_SWCID_SHIFT)); 4499adfc5217SJeff Kirsher 4500adfc5217SJeff Kirsher /* RSS mode */ 4501adfc5217SJeff Kirsher if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags)) 4502adfc5217SJeff Kirsher rss_mode = ETH_RSS_MODE_DISABLED; 4503adfc5217SJeff Kirsher else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags)) 4504adfc5217SJeff Kirsher rss_mode = ETH_RSS_MODE_REGULAR; 4505adfc5217SJeff Kirsher 4506adfc5217SJeff Kirsher data->rss_mode = rss_mode; 4507adfc5217SJeff Kirsher 4508adfc5217SJeff Kirsher DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode); 4509adfc5217SJeff Kirsher 4510adfc5217SJeff Kirsher /* RSS capabilities */ 4511adfc5217SJeff Kirsher if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags)) 4512e42780b6SDmitry Kravkov caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY; 4513adfc5217SJeff Kirsher 4514adfc5217SJeff Kirsher if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags)) 4515e42780b6SDmitry Kravkov caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY; 4516adfc5217SJeff Kirsher 45175d317c6aSMerav Sicron if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags)) 4518e42780b6SDmitry Kravkov caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY; 45195d317c6aSMerav Sicron 4520adfc5217SJeff Kirsher if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags)) 4521e42780b6SDmitry Kravkov caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY; 4522adfc5217SJeff Kirsher 4523adfc5217SJeff Kirsher if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags)) 4524e42780b6SDmitry Kravkov caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY; 4525adfc5217SJeff Kirsher 45265d317c6aSMerav Sicron if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags)) 4527e42780b6SDmitry Kravkov caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY; 4528e42780b6SDmitry Kravkov 452928311f8eSYuval Mintz if (test_bit(BNX2X_RSS_IPV4_VXLAN, &p->rss_flags)) 453028311f8eSYuval Mintz caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY; 453128311f8eSYuval Mintz 453228311f8eSYuval Mintz if (test_bit(BNX2X_RSS_IPV6_VXLAN, &p->rss_flags)) 453328311f8eSYuval Mintz caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY; 453428311f8eSYuval Mintz 453528311f8eSYuval Mintz if (test_bit(BNX2X_RSS_TUNN_INNER_HDRS, &p->rss_flags)) 453628311f8eSYuval Mintz caps |= ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY; 4537e42780b6SDmitry Kravkov 453856daf66dSYuval Mintz /* RSS keys */ 453956daf66dSYuval Mintz if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) { 4540d682d2bdSEric Dumazet u8 *dst = (u8 *)(data->rss_key) + sizeof(data->rss_key); 4541d682d2bdSEric Dumazet const u8 *src = (const u8 *)p->rss_key; 4542d682d2bdSEric Dumazet int i; 4543d682d2bdSEric Dumazet 4544d682d2bdSEric Dumazet /* Apparently, bnx2x reads this array in reverse order 4545d682d2bdSEric Dumazet * We need to byte swap rss_key to comply with Toeplitz specs. 4546d682d2bdSEric Dumazet */ 4547d682d2bdSEric Dumazet for (i = 0; i < sizeof(data->rss_key); i++) 4548d682d2bdSEric Dumazet *--dst = *src++; 4549d682d2bdSEric Dumazet 455056daf66dSYuval Mintz caps |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; 455156daf66dSYuval Mintz } 455256daf66dSYuval Mintz 4553e42780b6SDmitry Kravkov data->capabilities = cpu_to_le16(caps); 45545d317c6aSMerav Sicron 4555adfc5217SJeff Kirsher /* Hashing mask */ 4556adfc5217SJeff Kirsher data->rss_result_mask = p->rss_result_mask; 4557adfc5217SJeff Kirsher 4558adfc5217SJeff Kirsher /* RSS engine ID */ 4559adfc5217SJeff Kirsher data->rss_engine_id = o->engine_id; 4560adfc5217SJeff Kirsher 4561adfc5217SJeff Kirsher DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id); 4562adfc5217SJeff Kirsher 4563adfc5217SJeff Kirsher /* Indirection table */ 4564adfc5217SJeff Kirsher memcpy(data->indirection_table, p->ind_table, 4565adfc5217SJeff Kirsher T_ETH_INDIRECTION_TABLE_SIZE); 4566adfc5217SJeff Kirsher 4567adfc5217SJeff Kirsher /* Remember the last configuration */ 4568adfc5217SJeff Kirsher memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); 4569adfc5217SJeff Kirsher 4570adfc5217SJeff Kirsher /* Print the indirection table */ 4571adfc5217SJeff Kirsher if (netif_msg_ifup(bp)) 4572adfc5217SJeff Kirsher bnx2x_debug_print_ind_table(bp, p); 4573adfc5217SJeff Kirsher 457414a94ebdSMichal Kalderon /* No need for an explicit memory barrier here as long as we 457514a94ebdSMichal Kalderon * ensure the ordering of writing to the SPQ element 4576adfc5217SJeff Kirsher * and updating of the SPQ producer which involves a memory 457714a94ebdSMichal Kalderon * read. If the memory read is removed we will have to put a 457814a94ebdSMichal Kalderon * full memory barrier there (inside bnx2x_sp_post()). 4579adfc5217SJeff Kirsher */ 4580adfc5217SJeff Kirsher 4581adfc5217SJeff Kirsher /* Send a ramrod */ 4582adfc5217SJeff Kirsher rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid, 4583adfc5217SJeff Kirsher U64_HI(r->rdata_mapping), 4584adfc5217SJeff Kirsher U64_LO(r->rdata_mapping), 4585adfc5217SJeff Kirsher ETH_CONNECTION_TYPE); 4586adfc5217SJeff Kirsher 4587adfc5217SJeff Kirsher if (rc < 0) 4588adfc5217SJeff Kirsher return rc; 4589adfc5217SJeff Kirsher 4590adfc5217SJeff Kirsher return 1; 4591adfc5217SJeff Kirsher } 4592adfc5217SJeff Kirsher 4593adfc5217SJeff Kirsher void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj, 4594adfc5217SJeff Kirsher u8 *ind_table) 4595adfc5217SJeff Kirsher { 4596adfc5217SJeff Kirsher memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table)); 4597adfc5217SJeff Kirsher } 4598adfc5217SJeff Kirsher 4599adfc5217SJeff Kirsher int bnx2x_config_rss(struct bnx2x *bp, 4600adfc5217SJeff Kirsher struct bnx2x_config_rss_params *p) 4601adfc5217SJeff Kirsher { 4602adfc5217SJeff Kirsher int rc; 4603adfc5217SJeff Kirsher struct bnx2x_rss_config_obj *o = p->rss_obj; 4604adfc5217SJeff Kirsher struct bnx2x_raw_obj *r = &o->raw; 4605adfc5217SJeff Kirsher 4606adfc5217SJeff Kirsher /* Do nothing if only driver cleanup was requested */ 46075b622918SMichal Kalderon if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { 46085b622918SMichal Kalderon DP(BNX2X_MSG_SP, "Not configuring RSS ramrod_flags=%lx\n", 46095b622918SMichal Kalderon p->ramrod_flags); 4610adfc5217SJeff Kirsher return 0; 46115b622918SMichal Kalderon } 4612adfc5217SJeff Kirsher 4613adfc5217SJeff Kirsher r->set_pending(r); 4614adfc5217SJeff Kirsher 4615adfc5217SJeff Kirsher rc = o->config_rss(bp, p); 4616adfc5217SJeff Kirsher if (rc < 0) { 4617adfc5217SJeff Kirsher r->clear_pending(r); 4618adfc5217SJeff Kirsher return rc; 4619adfc5217SJeff Kirsher } 4620adfc5217SJeff Kirsher 4621adfc5217SJeff Kirsher if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) 4622adfc5217SJeff Kirsher rc = r->wait_comp(bp, r); 4623adfc5217SJeff Kirsher 4624adfc5217SJeff Kirsher return rc; 4625adfc5217SJeff Kirsher } 4626adfc5217SJeff Kirsher 4627adfc5217SJeff Kirsher void bnx2x_init_rss_config_obj(struct bnx2x *bp, 4628adfc5217SJeff Kirsher struct bnx2x_rss_config_obj *rss_obj, 4629adfc5217SJeff Kirsher u8 cl_id, u32 cid, u8 func_id, u8 engine_id, 4630adfc5217SJeff Kirsher void *rdata, dma_addr_t rdata_mapping, 4631adfc5217SJeff Kirsher int state, unsigned long *pstate, 4632adfc5217SJeff Kirsher bnx2x_obj_type type) 4633adfc5217SJeff Kirsher { 4634adfc5217SJeff Kirsher bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata, 4635adfc5217SJeff Kirsher rdata_mapping, state, pstate, type); 4636adfc5217SJeff Kirsher 4637adfc5217SJeff Kirsher rss_obj->engine_id = engine_id; 4638adfc5217SJeff Kirsher rss_obj->config_rss = bnx2x_setup_rss; 4639adfc5217SJeff Kirsher } 4640adfc5217SJeff Kirsher 4641adfc5217SJeff Kirsher /********************** Queue state object ***********************************/ 4642adfc5217SJeff Kirsher 4643adfc5217SJeff Kirsher /** 4644adfc5217SJeff Kirsher * bnx2x_queue_state_change - perform Queue state change transition 4645adfc5217SJeff Kirsher * 4646adfc5217SJeff Kirsher * @bp: device handle 4647adfc5217SJeff Kirsher * @params: parameters to perform the transition 4648adfc5217SJeff Kirsher * 4649adfc5217SJeff Kirsher * returns 0 in case of successfully completed transition, negative error 4650adfc5217SJeff Kirsher * code in case of failure, positive (EBUSY) value if there is a completion 4651adfc5217SJeff Kirsher * to that is still pending (possible only if RAMROD_COMP_WAIT is 4652adfc5217SJeff Kirsher * not set in params->ramrod_flags for asynchronous commands). 4653adfc5217SJeff Kirsher * 4654adfc5217SJeff Kirsher */ 4655adfc5217SJeff Kirsher int bnx2x_queue_state_change(struct bnx2x *bp, 4656adfc5217SJeff Kirsher struct bnx2x_queue_state_params *params) 4657adfc5217SJeff Kirsher { 4658adfc5217SJeff Kirsher struct bnx2x_queue_sp_obj *o = params->q_obj; 4659adfc5217SJeff Kirsher int rc, pending_bit; 4660adfc5217SJeff Kirsher unsigned long *pending = &o->pending; 4661adfc5217SJeff Kirsher 4662adfc5217SJeff Kirsher /* Check that the requested transition is legal */ 466304c46736SYuval Mintz rc = o->check_transition(bp, o, params); 466404c46736SYuval Mintz if (rc) { 466504c46736SYuval Mintz BNX2X_ERR("check transition returned an error. rc %d\n", rc); 4666adfc5217SJeff Kirsher return -EINVAL; 466704c46736SYuval Mintz } 4668adfc5217SJeff Kirsher 4669adfc5217SJeff Kirsher /* Set "pending" bit */ 467004c46736SYuval Mintz DP(BNX2X_MSG_SP, "pending bit was=%lx\n", o->pending); 4671adfc5217SJeff Kirsher pending_bit = o->set_pending(o, params); 467204c46736SYuval Mintz DP(BNX2X_MSG_SP, "pending bit now=%lx\n", o->pending); 4673adfc5217SJeff Kirsher 4674adfc5217SJeff Kirsher /* Don't send a command if only driver cleanup was requested */ 4675adfc5217SJeff Kirsher if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) 4676adfc5217SJeff Kirsher o->complete_cmd(bp, o, pending_bit); 4677adfc5217SJeff Kirsher else { 4678adfc5217SJeff Kirsher /* Send a ramrod */ 4679adfc5217SJeff Kirsher rc = o->send_cmd(bp, params); 4680adfc5217SJeff Kirsher if (rc) { 4681adfc5217SJeff Kirsher o->next_state = BNX2X_Q_STATE_MAX; 4682adfc5217SJeff Kirsher clear_bit(pending_bit, pending); 46834e857c58SPeter Zijlstra smp_mb__after_atomic(); 4684adfc5217SJeff Kirsher return rc; 4685adfc5217SJeff Kirsher } 4686adfc5217SJeff Kirsher 4687adfc5217SJeff Kirsher if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) { 4688adfc5217SJeff Kirsher rc = o->wait_comp(bp, o, pending_bit); 4689adfc5217SJeff Kirsher if (rc) 4690adfc5217SJeff Kirsher return rc; 4691adfc5217SJeff Kirsher 4692adfc5217SJeff Kirsher return 0; 4693adfc5217SJeff Kirsher } 4694adfc5217SJeff Kirsher } 4695adfc5217SJeff Kirsher 4696adfc5217SJeff Kirsher return !!test_bit(pending_bit, pending); 4697adfc5217SJeff Kirsher } 4698adfc5217SJeff Kirsher 4699adfc5217SJeff Kirsher static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj, 4700adfc5217SJeff Kirsher struct bnx2x_queue_state_params *params) 4701adfc5217SJeff Kirsher { 4702adfc5217SJeff Kirsher enum bnx2x_queue_cmd cmd = params->cmd, bit; 4703adfc5217SJeff Kirsher 4704adfc5217SJeff Kirsher /* ACTIVATE and DEACTIVATE commands are implemented on top of 4705adfc5217SJeff Kirsher * UPDATE command. 4706adfc5217SJeff Kirsher */ 4707adfc5217SJeff Kirsher if ((cmd == BNX2X_Q_CMD_ACTIVATE) || 4708adfc5217SJeff Kirsher (cmd == BNX2X_Q_CMD_DEACTIVATE)) 4709adfc5217SJeff Kirsher bit = BNX2X_Q_CMD_UPDATE; 4710adfc5217SJeff Kirsher else 4711adfc5217SJeff Kirsher bit = cmd; 4712adfc5217SJeff Kirsher 4713adfc5217SJeff Kirsher set_bit(bit, &obj->pending); 4714adfc5217SJeff Kirsher return bit; 4715adfc5217SJeff Kirsher } 4716adfc5217SJeff Kirsher 4717adfc5217SJeff Kirsher static int bnx2x_queue_wait_comp(struct bnx2x *bp, 4718adfc5217SJeff Kirsher struct bnx2x_queue_sp_obj *o, 4719adfc5217SJeff Kirsher enum bnx2x_queue_cmd cmd) 4720adfc5217SJeff Kirsher { 4721adfc5217SJeff Kirsher return bnx2x_state_wait(bp, cmd, &o->pending); 4722adfc5217SJeff Kirsher } 4723adfc5217SJeff Kirsher 4724adfc5217SJeff Kirsher /** 4725adfc5217SJeff Kirsher * bnx2x_queue_comp_cmd - complete the state change command. 4726adfc5217SJeff Kirsher * 4727adfc5217SJeff Kirsher * @bp: device handle 4728adfc5217SJeff Kirsher * @o: 4729adfc5217SJeff Kirsher * @cmd: 4730adfc5217SJeff Kirsher * 4731adfc5217SJeff Kirsher * Checks that the arrived completion is expected. 4732adfc5217SJeff Kirsher */ 4733adfc5217SJeff Kirsher static int bnx2x_queue_comp_cmd(struct bnx2x *bp, 4734adfc5217SJeff Kirsher struct bnx2x_queue_sp_obj *o, 4735adfc5217SJeff Kirsher enum bnx2x_queue_cmd cmd) 4736adfc5217SJeff Kirsher { 4737adfc5217SJeff Kirsher unsigned long cur_pending = o->pending; 4738adfc5217SJeff Kirsher 4739adfc5217SJeff Kirsher if (!test_and_clear_bit(cmd, &cur_pending)) { 474051c1a580SMerav Sicron BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n", 474151c1a580SMerav Sicron cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], 4742adfc5217SJeff Kirsher o->state, cur_pending, o->next_state); 4743adfc5217SJeff Kirsher return -EINVAL; 4744adfc5217SJeff Kirsher } 4745adfc5217SJeff Kirsher 4746adfc5217SJeff Kirsher if (o->next_tx_only >= o->max_cos) 474716a5fd92SYuval Mintz /* >= because tx only must always be smaller than cos since the 474802582e9bSMasanari Iida * primary connection supports COS 0 4749adfc5217SJeff Kirsher */ 4750adfc5217SJeff Kirsher BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d", 4751adfc5217SJeff Kirsher o->next_tx_only, o->max_cos); 4752adfc5217SJeff Kirsher 475351c1a580SMerav Sicron DP(BNX2X_MSG_SP, 475451c1a580SMerav Sicron "Completing command %d for queue %d, setting state to %d\n", 475551c1a580SMerav Sicron cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state); 4756adfc5217SJeff Kirsher 4757adfc5217SJeff Kirsher if (o->next_tx_only) /* print num tx-only if any exist */ 475894f05b0fSJoe Perches DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n", 4759adfc5217SJeff Kirsher o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only); 4760adfc5217SJeff Kirsher 4761adfc5217SJeff Kirsher o->state = o->next_state; 4762adfc5217SJeff Kirsher o->num_tx_only = o->next_tx_only; 4763adfc5217SJeff Kirsher o->next_state = BNX2X_Q_STATE_MAX; 4764adfc5217SJeff Kirsher 4765adfc5217SJeff Kirsher /* It's important that o->state and o->next_state are 4766adfc5217SJeff Kirsher * updated before o->pending. 4767adfc5217SJeff Kirsher */ 4768adfc5217SJeff Kirsher wmb(); 4769adfc5217SJeff Kirsher 4770adfc5217SJeff Kirsher clear_bit(cmd, &o->pending); 47714e857c58SPeter Zijlstra smp_mb__after_atomic(); 4772adfc5217SJeff Kirsher 4773adfc5217SJeff Kirsher return 0; 4774adfc5217SJeff Kirsher } 4775adfc5217SJeff Kirsher 4776adfc5217SJeff Kirsher static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp, 4777adfc5217SJeff Kirsher struct bnx2x_queue_state_params *cmd_params, 4778adfc5217SJeff Kirsher struct client_init_ramrod_data *data) 4779adfc5217SJeff Kirsher { 4780adfc5217SJeff Kirsher struct bnx2x_queue_setup_params *params = &cmd_params->params.setup; 4781adfc5217SJeff Kirsher 4782adfc5217SJeff Kirsher /* Rx data */ 4783adfc5217SJeff Kirsher 4784adfc5217SJeff Kirsher /* IPv6 TPA supported for E2 and above only */ 4785adfc5217SJeff Kirsher data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, ¶ms->flags) * 4786adfc5217SJeff Kirsher CLIENT_INIT_RX_DATA_TPA_EN_IPV6; 4787adfc5217SJeff Kirsher } 4788adfc5217SJeff Kirsher 4789adfc5217SJeff Kirsher static void bnx2x_q_fill_init_general_data(struct bnx2x *bp, 4790adfc5217SJeff Kirsher struct bnx2x_queue_sp_obj *o, 4791adfc5217SJeff Kirsher struct bnx2x_general_setup_params *params, 4792adfc5217SJeff Kirsher struct client_init_general_data *gen_data, 4793adfc5217SJeff Kirsher unsigned long *flags) 4794adfc5217SJeff Kirsher { 4795adfc5217SJeff Kirsher gen_data->client_id = o->cl_id; 4796adfc5217SJeff Kirsher 4797adfc5217SJeff Kirsher if (test_bit(BNX2X_Q_FLG_STATS, flags)) { 4798adfc5217SJeff Kirsher gen_data->statistics_counter_id = 4799adfc5217SJeff Kirsher params->stat_id; 4800adfc5217SJeff Kirsher gen_data->statistics_en_flg = 1; 4801adfc5217SJeff Kirsher gen_data->statistics_zero_flg = 4802adfc5217SJeff Kirsher test_bit(BNX2X_Q_FLG_ZERO_STATS, flags); 4803adfc5217SJeff Kirsher } else 4804adfc5217SJeff Kirsher gen_data->statistics_counter_id = 4805adfc5217SJeff Kirsher DISABLE_STATISTIC_COUNTER_ID_VALUE; 4806adfc5217SJeff Kirsher 4807adfc5217SJeff Kirsher gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags); 4808adfc5217SJeff Kirsher gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags); 4809adfc5217SJeff Kirsher gen_data->sp_client_id = params->spcl_id; 4810adfc5217SJeff Kirsher gen_data->mtu = cpu_to_le16(params->mtu); 4811adfc5217SJeff Kirsher gen_data->func_id = o->func_id; 4812adfc5217SJeff Kirsher 4813adfc5217SJeff Kirsher gen_data->cos = params->cos; 4814adfc5217SJeff Kirsher 4815adfc5217SJeff Kirsher gen_data->traffic_type = 4816adfc5217SJeff Kirsher test_bit(BNX2X_Q_FLG_FCOE, flags) ? 4817adfc5217SJeff Kirsher LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW; 4818adfc5217SJeff Kirsher 481902dc4025SYuval Mintz gen_data->fp_hsi_ver = params->fp_hsi; 4820e42780b6SDmitry Kravkov 482194f05b0fSJoe Perches DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n", 4822adfc5217SJeff Kirsher gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg); 4823adfc5217SJeff Kirsher } 4824adfc5217SJeff Kirsher 4825adfc5217SJeff Kirsher static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o, 4826adfc5217SJeff Kirsher struct bnx2x_txq_setup_params *params, 4827adfc5217SJeff Kirsher struct client_init_tx_data *tx_data, 4828adfc5217SJeff Kirsher unsigned long *flags) 4829adfc5217SJeff Kirsher { 4830adfc5217SJeff Kirsher tx_data->enforce_security_flg = 4831adfc5217SJeff Kirsher test_bit(BNX2X_Q_FLG_TX_SEC, flags); 4832adfc5217SJeff Kirsher tx_data->default_vlan = 4833adfc5217SJeff Kirsher cpu_to_le16(params->default_vlan); 4834adfc5217SJeff Kirsher tx_data->default_vlan_flg = 4835adfc5217SJeff Kirsher test_bit(BNX2X_Q_FLG_DEF_VLAN, flags); 4836adfc5217SJeff Kirsher tx_data->tx_switching_flg = 4837adfc5217SJeff Kirsher test_bit(BNX2X_Q_FLG_TX_SWITCH, flags); 4838adfc5217SJeff Kirsher tx_data->anti_spoofing_flg = 4839adfc5217SJeff Kirsher test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags); 4840a3348722SBarak Witkowski tx_data->force_default_pri_flg = 4841a3348722SBarak Witkowski test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags); 4842e42780b6SDmitry Kravkov tx_data->refuse_outband_vlan_flg = 4843e42780b6SDmitry Kravkov test_bit(BNX2X_Q_FLG_REFUSE_OUTBAND_VLAN, flags); 4844e287a75cSDmitry Kravkov tx_data->tunnel_lso_inc_ip_id = 4845e287a75cSDmitry Kravkov test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags); 484691226790SDmitry Kravkov tx_data->tunnel_non_lso_pcsum_location = 4847e42780b6SDmitry Kravkov test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT : 4848e42780b6SDmitry Kravkov CSUM_ON_BD; 484991226790SDmitry Kravkov 4850adfc5217SJeff Kirsher tx_data->tx_status_block_id = params->fw_sb_id; 4851adfc5217SJeff Kirsher tx_data->tx_sb_index_number = params->sb_cq_index; 4852adfc5217SJeff Kirsher tx_data->tss_leading_client_id = params->tss_leading_cl_id; 4853adfc5217SJeff Kirsher 4854adfc5217SJeff Kirsher tx_data->tx_bd_page_base.lo = 4855adfc5217SJeff Kirsher cpu_to_le32(U64_LO(params->dscr_map)); 4856adfc5217SJeff Kirsher tx_data->tx_bd_page_base.hi = 4857adfc5217SJeff Kirsher cpu_to_le32(U64_HI(params->dscr_map)); 4858adfc5217SJeff Kirsher 4859adfc5217SJeff Kirsher /* Don't configure any Tx switching mode during queue SETUP */ 4860adfc5217SJeff Kirsher tx_data->state = 0; 4861adfc5217SJeff Kirsher } 4862adfc5217SJeff Kirsher 4863adfc5217SJeff Kirsher static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o, 4864adfc5217SJeff Kirsher struct rxq_pause_params *params, 4865adfc5217SJeff Kirsher struct client_init_rx_data *rx_data) 4866adfc5217SJeff Kirsher { 4867adfc5217SJeff Kirsher /* flow control data */ 4868adfc5217SJeff Kirsher rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo); 4869adfc5217SJeff Kirsher rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi); 4870adfc5217SJeff Kirsher rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo); 4871adfc5217SJeff Kirsher rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi); 4872adfc5217SJeff Kirsher rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo); 4873adfc5217SJeff Kirsher rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi); 4874adfc5217SJeff Kirsher rx_data->rx_cos_mask = cpu_to_le16(params->pri_map); 4875adfc5217SJeff Kirsher } 4876adfc5217SJeff Kirsher 4877adfc5217SJeff Kirsher static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o, 4878adfc5217SJeff Kirsher struct bnx2x_rxq_setup_params *params, 4879adfc5217SJeff Kirsher struct client_init_rx_data *rx_data, 4880adfc5217SJeff Kirsher unsigned long *flags) 4881adfc5217SJeff Kirsher { 4882adfc5217SJeff Kirsher rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) * 4883adfc5217SJeff Kirsher CLIENT_INIT_RX_DATA_TPA_EN_IPV4; 4884621b4d66SDmitry Kravkov rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) * 4885621b4d66SDmitry Kravkov CLIENT_INIT_RX_DATA_TPA_MODE; 4886adfc5217SJeff Kirsher rx_data->vmqueue_mode_en_flg = 0; 4887adfc5217SJeff Kirsher 4888adfc5217SJeff Kirsher rx_data->cache_line_alignment_log_size = 4889adfc5217SJeff Kirsher params->cache_line_log; 4890adfc5217SJeff Kirsher rx_data->enable_dynamic_hc = 4891adfc5217SJeff Kirsher test_bit(BNX2X_Q_FLG_DHC, flags); 4892adfc5217SJeff Kirsher rx_data->max_sges_for_packet = params->max_sges_pkt; 4893adfc5217SJeff Kirsher rx_data->client_qzone_id = params->cl_qzone_id; 4894adfc5217SJeff Kirsher rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz); 4895adfc5217SJeff Kirsher 4896adfc5217SJeff Kirsher /* Always start in DROP_ALL mode */ 4897adfc5217SJeff Kirsher rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL | 4898adfc5217SJeff Kirsher CLIENT_INIT_RX_DATA_MCAST_DROP_ALL); 4899adfc5217SJeff Kirsher 4900adfc5217SJeff Kirsher /* We don't set drop flags */ 4901adfc5217SJeff Kirsher rx_data->drop_ip_cs_err_flg = 0; 4902adfc5217SJeff Kirsher rx_data->drop_tcp_cs_err_flg = 0; 4903adfc5217SJeff Kirsher rx_data->drop_ttl0_flg = 0; 4904adfc5217SJeff Kirsher rx_data->drop_udp_cs_err_flg = 0; 4905adfc5217SJeff Kirsher rx_data->inner_vlan_removal_enable_flg = 4906adfc5217SJeff Kirsher test_bit(BNX2X_Q_FLG_VLAN, flags); 4907adfc5217SJeff Kirsher rx_data->outer_vlan_removal_enable_flg = 4908adfc5217SJeff Kirsher test_bit(BNX2X_Q_FLG_OV, flags); 4909adfc5217SJeff Kirsher rx_data->status_block_id = params->fw_sb_id; 4910adfc5217SJeff Kirsher rx_data->rx_sb_index_number = params->sb_cq_index; 4911adfc5217SJeff Kirsher rx_data->max_tpa_queues = params->max_tpa_queues; 4912adfc5217SJeff Kirsher rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz); 4913adfc5217SJeff Kirsher rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz); 4914adfc5217SJeff Kirsher rx_data->bd_page_base.lo = 4915adfc5217SJeff Kirsher cpu_to_le32(U64_LO(params->dscr_map)); 4916adfc5217SJeff Kirsher rx_data->bd_page_base.hi = 4917adfc5217SJeff Kirsher cpu_to_le32(U64_HI(params->dscr_map)); 4918adfc5217SJeff Kirsher rx_data->sge_page_base.lo = 4919adfc5217SJeff Kirsher cpu_to_le32(U64_LO(params->sge_map)); 4920adfc5217SJeff Kirsher rx_data->sge_page_base.hi = 4921adfc5217SJeff Kirsher cpu_to_le32(U64_HI(params->sge_map)); 4922adfc5217SJeff Kirsher rx_data->cqe_page_base.lo = 4923adfc5217SJeff Kirsher cpu_to_le32(U64_LO(params->rcq_map)); 4924adfc5217SJeff Kirsher rx_data->cqe_page_base.hi = 4925adfc5217SJeff Kirsher cpu_to_le32(U64_HI(params->rcq_map)); 4926adfc5217SJeff Kirsher rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags); 4927adfc5217SJeff Kirsher 4928adfc5217SJeff Kirsher if (test_bit(BNX2X_Q_FLG_MCAST, flags)) { 4929259afa1fSYuval Mintz rx_data->approx_mcast_engine_id = params->mcast_engine_id; 4930adfc5217SJeff Kirsher rx_data->is_approx_mcast = 1; 4931adfc5217SJeff Kirsher } 4932adfc5217SJeff Kirsher 4933adfc5217SJeff Kirsher rx_data->rss_engine_id = params->rss_engine_id; 4934adfc5217SJeff Kirsher 4935adfc5217SJeff Kirsher /* silent vlan removal */ 4936adfc5217SJeff Kirsher rx_data->silent_vlan_removal_flg = 4937adfc5217SJeff Kirsher test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags); 4938adfc5217SJeff Kirsher rx_data->silent_vlan_value = 4939adfc5217SJeff Kirsher cpu_to_le16(params->silent_removal_value); 4940adfc5217SJeff Kirsher rx_data->silent_vlan_mask = 4941adfc5217SJeff Kirsher cpu_to_le16(params->silent_removal_mask); 4942adfc5217SJeff Kirsher } 4943adfc5217SJeff Kirsher 4944adfc5217SJeff Kirsher /* initialize the general, tx and rx parts of a queue object */ 4945adfc5217SJeff Kirsher static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp, 4946adfc5217SJeff Kirsher struct bnx2x_queue_state_params *cmd_params, 4947adfc5217SJeff Kirsher struct client_init_ramrod_data *data) 4948adfc5217SJeff Kirsher { 4949adfc5217SJeff Kirsher bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj, 4950adfc5217SJeff Kirsher &cmd_params->params.setup.gen_params, 4951adfc5217SJeff Kirsher &data->general, 4952adfc5217SJeff Kirsher &cmd_params->params.setup.flags); 4953adfc5217SJeff Kirsher 4954adfc5217SJeff Kirsher bnx2x_q_fill_init_tx_data(cmd_params->q_obj, 4955adfc5217SJeff Kirsher &cmd_params->params.setup.txq_params, 4956adfc5217SJeff Kirsher &data->tx, 4957adfc5217SJeff Kirsher &cmd_params->params.setup.flags); 4958adfc5217SJeff Kirsher 4959adfc5217SJeff Kirsher bnx2x_q_fill_init_rx_data(cmd_params->q_obj, 4960adfc5217SJeff Kirsher &cmd_params->params.setup.rxq_params, 4961adfc5217SJeff Kirsher &data->rx, 4962adfc5217SJeff Kirsher &cmd_params->params.setup.flags); 4963adfc5217SJeff Kirsher 4964adfc5217SJeff Kirsher bnx2x_q_fill_init_pause_data(cmd_params->q_obj, 4965adfc5217SJeff Kirsher &cmd_params->params.setup.pause_params, 4966adfc5217SJeff Kirsher &data->rx); 4967adfc5217SJeff Kirsher } 4968adfc5217SJeff Kirsher 4969adfc5217SJeff Kirsher /* initialize the general and tx parts of a tx-only queue object */ 4970adfc5217SJeff Kirsher static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp, 4971adfc5217SJeff Kirsher struct bnx2x_queue_state_params *cmd_params, 4972adfc5217SJeff Kirsher struct tx_queue_init_ramrod_data *data) 4973adfc5217SJeff Kirsher { 4974adfc5217SJeff Kirsher bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj, 4975adfc5217SJeff Kirsher &cmd_params->params.tx_only.gen_params, 4976adfc5217SJeff Kirsher &data->general, 4977adfc5217SJeff Kirsher &cmd_params->params.tx_only.flags); 4978adfc5217SJeff Kirsher 4979adfc5217SJeff Kirsher bnx2x_q_fill_init_tx_data(cmd_params->q_obj, 4980adfc5217SJeff Kirsher &cmd_params->params.tx_only.txq_params, 4981adfc5217SJeff Kirsher &data->tx, 4982adfc5217SJeff Kirsher &cmd_params->params.tx_only.flags); 4983adfc5217SJeff Kirsher 498451c1a580SMerav Sicron DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x", 498551c1a580SMerav Sicron cmd_params->q_obj->cids[0], 498651c1a580SMerav Sicron data->tx.tx_bd_page_base.lo, 498751c1a580SMerav Sicron data->tx.tx_bd_page_base.hi); 4988adfc5217SJeff Kirsher } 4989adfc5217SJeff Kirsher 4990adfc5217SJeff Kirsher /** 4991adfc5217SJeff Kirsher * bnx2x_q_init - init HW/FW queue 4992adfc5217SJeff Kirsher * 4993adfc5217SJeff Kirsher * @bp: device handle 4994adfc5217SJeff Kirsher * @params: 4995adfc5217SJeff Kirsher * 4996adfc5217SJeff Kirsher * HW/FW initial Queue configuration: 4997adfc5217SJeff Kirsher * - HC: Rx and Tx 4998adfc5217SJeff Kirsher * - CDU context validation 4999adfc5217SJeff Kirsher * 5000adfc5217SJeff Kirsher */ 5001adfc5217SJeff Kirsher static inline int bnx2x_q_init(struct bnx2x *bp, 5002adfc5217SJeff Kirsher struct bnx2x_queue_state_params *params) 5003adfc5217SJeff Kirsher { 5004adfc5217SJeff Kirsher struct bnx2x_queue_sp_obj *o = params->q_obj; 5005adfc5217SJeff Kirsher struct bnx2x_queue_init_params *init = ¶ms->params.init; 5006adfc5217SJeff Kirsher u16 hc_usec; 5007adfc5217SJeff Kirsher u8 cos; 5008adfc5217SJeff Kirsher 5009adfc5217SJeff Kirsher /* Tx HC configuration */ 5010adfc5217SJeff Kirsher if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) && 5011adfc5217SJeff Kirsher test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) { 5012adfc5217SJeff Kirsher hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0; 5013adfc5217SJeff Kirsher 5014adfc5217SJeff Kirsher bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id, 5015adfc5217SJeff Kirsher init->tx.sb_cq_index, 5016adfc5217SJeff Kirsher !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags), 5017adfc5217SJeff Kirsher hc_usec); 5018adfc5217SJeff Kirsher } 5019adfc5217SJeff Kirsher 5020adfc5217SJeff Kirsher /* Rx HC configuration */ 5021adfc5217SJeff Kirsher if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) && 5022adfc5217SJeff Kirsher test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) { 5023adfc5217SJeff Kirsher hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0; 5024adfc5217SJeff Kirsher 5025adfc5217SJeff Kirsher bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id, 5026adfc5217SJeff Kirsher init->rx.sb_cq_index, 5027adfc5217SJeff Kirsher !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags), 5028adfc5217SJeff Kirsher hc_usec); 5029adfc5217SJeff Kirsher } 5030adfc5217SJeff Kirsher 5031adfc5217SJeff Kirsher /* Set CDU context validation values */ 5032adfc5217SJeff Kirsher for (cos = 0; cos < o->max_cos; cos++) { 503394f05b0fSJoe Perches DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n", 5034adfc5217SJeff Kirsher o->cids[cos], cos); 503594f05b0fSJoe Perches DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]); 5036adfc5217SJeff Kirsher bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]); 5037adfc5217SJeff Kirsher } 5038adfc5217SJeff Kirsher 5039adfc5217SJeff Kirsher /* As no ramrod is sent, complete the command immediately */ 5040adfc5217SJeff Kirsher o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT); 5041adfc5217SJeff Kirsher 5042adfc5217SJeff Kirsher mmiowb(); 5043adfc5217SJeff Kirsher smp_mb(); 5044adfc5217SJeff Kirsher 5045adfc5217SJeff Kirsher return 0; 5046adfc5217SJeff Kirsher } 5047adfc5217SJeff Kirsher 5048adfc5217SJeff Kirsher static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp, 5049adfc5217SJeff Kirsher struct bnx2x_queue_state_params *params) 5050adfc5217SJeff Kirsher { 5051adfc5217SJeff Kirsher struct bnx2x_queue_sp_obj *o = params->q_obj; 5052adfc5217SJeff Kirsher struct client_init_ramrod_data *rdata = 5053adfc5217SJeff Kirsher (struct client_init_ramrod_data *)o->rdata; 5054adfc5217SJeff Kirsher dma_addr_t data_mapping = o->rdata_mapping; 5055adfc5217SJeff Kirsher int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; 5056adfc5217SJeff Kirsher 5057adfc5217SJeff Kirsher /* Clear the ramrod data */ 5058adfc5217SJeff Kirsher memset(rdata, 0, sizeof(*rdata)); 5059adfc5217SJeff Kirsher 5060adfc5217SJeff Kirsher /* Fill the ramrod data */ 5061adfc5217SJeff Kirsher bnx2x_q_fill_setup_data_cmn(bp, params, rdata); 5062adfc5217SJeff Kirsher 506314a94ebdSMichal Kalderon /* No need for an explicit memory barrier here as long as we 506414a94ebdSMichal Kalderon * ensure the ordering of writing to the SPQ element 5065adfc5217SJeff Kirsher * and updating of the SPQ producer which involves a memory 506614a94ebdSMichal Kalderon * read. If the memory read is removed we will have to put a 506714a94ebdSMichal Kalderon * full memory barrier there (inside bnx2x_sp_post()). 5068adfc5217SJeff Kirsher */ 5069adfc5217SJeff Kirsher return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], 5070adfc5217SJeff Kirsher U64_HI(data_mapping), 5071adfc5217SJeff Kirsher U64_LO(data_mapping), ETH_CONNECTION_TYPE); 5072adfc5217SJeff Kirsher } 5073adfc5217SJeff Kirsher 5074adfc5217SJeff Kirsher static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp, 5075adfc5217SJeff Kirsher struct bnx2x_queue_state_params *params) 5076adfc5217SJeff Kirsher { 5077adfc5217SJeff Kirsher struct bnx2x_queue_sp_obj *o = params->q_obj; 5078adfc5217SJeff Kirsher struct client_init_ramrod_data *rdata = 5079adfc5217SJeff Kirsher (struct client_init_ramrod_data *)o->rdata; 5080adfc5217SJeff Kirsher dma_addr_t data_mapping = o->rdata_mapping; 5081adfc5217SJeff Kirsher int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; 5082adfc5217SJeff Kirsher 5083adfc5217SJeff Kirsher /* Clear the ramrod data */ 5084adfc5217SJeff Kirsher memset(rdata, 0, sizeof(*rdata)); 5085adfc5217SJeff Kirsher 5086adfc5217SJeff Kirsher /* Fill the ramrod data */ 5087adfc5217SJeff Kirsher bnx2x_q_fill_setup_data_cmn(bp, params, rdata); 5088adfc5217SJeff Kirsher bnx2x_q_fill_setup_data_e2(bp, params, rdata); 5089adfc5217SJeff Kirsher 509014a94ebdSMichal Kalderon /* No need for an explicit memory barrier here as long as we 509114a94ebdSMichal Kalderon * ensure the ordering of writing to the SPQ element 5092adfc5217SJeff Kirsher * and updating of the SPQ producer which involves a memory 509314a94ebdSMichal Kalderon * read. If the memory read is removed we will have to put a 509414a94ebdSMichal Kalderon * full memory barrier there (inside bnx2x_sp_post()). 5095adfc5217SJeff Kirsher */ 5096adfc5217SJeff Kirsher return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], 5097adfc5217SJeff Kirsher U64_HI(data_mapping), 5098adfc5217SJeff Kirsher U64_LO(data_mapping), ETH_CONNECTION_TYPE); 5099adfc5217SJeff Kirsher } 5100adfc5217SJeff Kirsher 5101adfc5217SJeff Kirsher static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp, 5102adfc5217SJeff Kirsher struct bnx2x_queue_state_params *params) 5103adfc5217SJeff Kirsher { 5104adfc5217SJeff Kirsher struct bnx2x_queue_sp_obj *o = params->q_obj; 5105adfc5217SJeff Kirsher struct tx_queue_init_ramrod_data *rdata = 5106adfc5217SJeff Kirsher (struct tx_queue_init_ramrod_data *)o->rdata; 5107adfc5217SJeff Kirsher dma_addr_t data_mapping = o->rdata_mapping; 5108adfc5217SJeff Kirsher int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP; 5109adfc5217SJeff Kirsher struct bnx2x_queue_setup_tx_only_params *tx_only_params = 5110adfc5217SJeff Kirsher ¶ms->params.tx_only; 5111adfc5217SJeff Kirsher u8 cid_index = tx_only_params->cid_index; 5112adfc5217SJeff Kirsher 5113adfc5217SJeff Kirsher if (cid_index >= o->max_cos) { 5114adfc5217SJeff Kirsher BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", 5115adfc5217SJeff Kirsher o->cl_id, cid_index); 5116adfc5217SJeff Kirsher return -EINVAL; 5117adfc5217SJeff Kirsher } 5118adfc5217SJeff Kirsher 511994f05b0fSJoe Perches DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n", 5120adfc5217SJeff Kirsher tx_only_params->gen_params.cos, 5121adfc5217SJeff Kirsher tx_only_params->gen_params.spcl_id); 5122adfc5217SJeff Kirsher 5123adfc5217SJeff Kirsher /* Clear the ramrod data */ 5124adfc5217SJeff Kirsher memset(rdata, 0, sizeof(*rdata)); 5125adfc5217SJeff Kirsher 5126adfc5217SJeff Kirsher /* Fill the ramrod data */ 5127adfc5217SJeff Kirsher bnx2x_q_fill_setup_tx_only(bp, params, rdata); 5128adfc5217SJeff Kirsher 512951c1a580SMerav Sicron DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n", 513051c1a580SMerav Sicron o->cids[cid_index], rdata->general.client_id, 5131adfc5217SJeff Kirsher rdata->general.sp_client_id, rdata->general.cos); 5132adfc5217SJeff Kirsher 513314a94ebdSMichal Kalderon /* No need for an explicit memory barrier here as long as we 513414a94ebdSMichal Kalderon * ensure the ordering of writing to the SPQ element 5135adfc5217SJeff Kirsher * and updating of the SPQ producer which involves a memory 513614a94ebdSMichal Kalderon * read. If the memory read is removed we will have to put a 513714a94ebdSMichal Kalderon * full memory barrier there (inside bnx2x_sp_post()). 5138adfc5217SJeff Kirsher */ 5139adfc5217SJeff Kirsher return bnx2x_sp_post(bp, ramrod, o->cids[cid_index], 5140adfc5217SJeff Kirsher U64_HI(data_mapping), 5141adfc5217SJeff Kirsher U64_LO(data_mapping), ETH_CONNECTION_TYPE); 5142adfc5217SJeff Kirsher } 5143adfc5217SJeff Kirsher 5144adfc5217SJeff Kirsher static void bnx2x_q_fill_update_data(struct bnx2x *bp, 5145adfc5217SJeff Kirsher struct bnx2x_queue_sp_obj *obj, 5146adfc5217SJeff Kirsher struct bnx2x_queue_update_params *params, 5147adfc5217SJeff Kirsher struct client_update_ramrod_data *data) 5148adfc5217SJeff Kirsher { 5149adfc5217SJeff Kirsher /* Client ID of the client to update */ 5150adfc5217SJeff Kirsher data->client_id = obj->cl_id; 5151adfc5217SJeff Kirsher 5152adfc5217SJeff Kirsher /* Function ID of the client to update */ 5153adfc5217SJeff Kirsher data->func_id = obj->func_id; 5154adfc5217SJeff Kirsher 5155adfc5217SJeff Kirsher /* Default VLAN value */ 5156adfc5217SJeff Kirsher data->default_vlan = cpu_to_le16(params->def_vlan); 5157adfc5217SJeff Kirsher 5158adfc5217SJeff Kirsher /* Inner VLAN stripping */ 5159adfc5217SJeff Kirsher data->inner_vlan_removal_enable_flg = 5160adfc5217SJeff Kirsher test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, ¶ms->update_flags); 5161adfc5217SJeff Kirsher data->inner_vlan_removal_change_flg = 5162adfc5217SJeff Kirsher test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG, 5163adfc5217SJeff Kirsher ¶ms->update_flags); 5164adfc5217SJeff Kirsher 516516a5fd92SYuval Mintz /* Outer VLAN stripping */ 5166adfc5217SJeff Kirsher data->outer_vlan_removal_enable_flg = 5167adfc5217SJeff Kirsher test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, ¶ms->update_flags); 5168adfc5217SJeff Kirsher data->outer_vlan_removal_change_flg = 5169adfc5217SJeff Kirsher test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG, 5170adfc5217SJeff Kirsher ¶ms->update_flags); 5171adfc5217SJeff Kirsher 5172adfc5217SJeff Kirsher /* Drop packets that have source MAC that doesn't belong to this 5173adfc5217SJeff Kirsher * Queue. 5174adfc5217SJeff Kirsher */ 5175adfc5217SJeff Kirsher data->anti_spoofing_enable_flg = 5176adfc5217SJeff Kirsher test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, ¶ms->update_flags); 5177adfc5217SJeff Kirsher data->anti_spoofing_change_flg = 5178adfc5217SJeff Kirsher test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, ¶ms->update_flags); 5179adfc5217SJeff Kirsher 5180adfc5217SJeff Kirsher /* Activate/Deactivate */ 5181adfc5217SJeff Kirsher data->activate_flg = 5182adfc5217SJeff Kirsher test_bit(BNX2X_Q_UPDATE_ACTIVATE, ¶ms->update_flags); 5183adfc5217SJeff Kirsher data->activate_change_flg = 5184adfc5217SJeff Kirsher test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, ¶ms->update_flags); 5185adfc5217SJeff Kirsher 5186adfc5217SJeff Kirsher /* Enable default VLAN */ 5187adfc5217SJeff Kirsher data->default_vlan_enable_flg = 5188adfc5217SJeff Kirsher test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, ¶ms->update_flags); 5189adfc5217SJeff Kirsher data->default_vlan_change_flg = 5190adfc5217SJeff Kirsher test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, 5191adfc5217SJeff Kirsher ¶ms->update_flags); 5192adfc5217SJeff Kirsher 5193adfc5217SJeff Kirsher /* silent vlan removal */ 5194adfc5217SJeff Kirsher data->silent_vlan_change_flg = 5195adfc5217SJeff Kirsher test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, 5196adfc5217SJeff Kirsher ¶ms->update_flags); 5197adfc5217SJeff Kirsher data->silent_vlan_removal_flg = 5198adfc5217SJeff Kirsher test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, ¶ms->update_flags); 5199adfc5217SJeff Kirsher data->silent_vlan_value = cpu_to_le16(params->silent_removal_value); 5200adfc5217SJeff Kirsher data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask); 5201c14db202SYuval Mintz 5202c14db202SYuval Mintz /* tx switching */ 5203c14db202SYuval Mintz data->tx_switching_flg = 5204c14db202SYuval Mintz test_bit(BNX2X_Q_UPDATE_TX_SWITCHING, ¶ms->update_flags); 5205c14db202SYuval Mintz data->tx_switching_change_flg = 5206c14db202SYuval Mintz test_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG, 5207c14db202SYuval Mintz ¶ms->update_flags); 5208eeed018cSMichal Kalderon 5209eeed018cSMichal Kalderon /* PTP */ 5210eeed018cSMichal Kalderon data->handle_ptp_pkts_flg = 5211eeed018cSMichal Kalderon test_bit(BNX2X_Q_UPDATE_PTP_PKTS, ¶ms->update_flags); 5212eeed018cSMichal Kalderon data->handle_ptp_pkts_change_flg = 5213eeed018cSMichal Kalderon test_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG, ¶ms->update_flags); 5214adfc5217SJeff Kirsher } 5215adfc5217SJeff Kirsher 5216adfc5217SJeff Kirsher static inline int bnx2x_q_send_update(struct bnx2x *bp, 5217adfc5217SJeff Kirsher struct bnx2x_queue_state_params *params) 5218adfc5217SJeff Kirsher { 5219adfc5217SJeff Kirsher struct bnx2x_queue_sp_obj *o = params->q_obj; 5220adfc5217SJeff Kirsher struct client_update_ramrod_data *rdata = 5221adfc5217SJeff Kirsher (struct client_update_ramrod_data *)o->rdata; 5222adfc5217SJeff Kirsher dma_addr_t data_mapping = o->rdata_mapping; 5223adfc5217SJeff Kirsher struct bnx2x_queue_update_params *update_params = 5224adfc5217SJeff Kirsher ¶ms->params.update; 5225adfc5217SJeff Kirsher u8 cid_index = update_params->cid_index; 5226adfc5217SJeff Kirsher 5227adfc5217SJeff Kirsher if (cid_index >= o->max_cos) { 5228adfc5217SJeff Kirsher BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", 5229adfc5217SJeff Kirsher o->cl_id, cid_index); 5230adfc5217SJeff Kirsher return -EINVAL; 5231adfc5217SJeff Kirsher } 5232adfc5217SJeff Kirsher 5233adfc5217SJeff Kirsher /* Clear the ramrod data */ 5234adfc5217SJeff Kirsher memset(rdata, 0, sizeof(*rdata)); 5235adfc5217SJeff Kirsher 5236adfc5217SJeff Kirsher /* Fill the ramrod data */ 5237adfc5217SJeff Kirsher bnx2x_q_fill_update_data(bp, o, update_params, rdata); 5238adfc5217SJeff Kirsher 523914a94ebdSMichal Kalderon /* No need for an explicit memory barrier here as long as we 524014a94ebdSMichal Kalderon * ensure the ordering of writing to the SPQ element 5241adfc5217SJeff Kirsher * and updating of the SPQ producer which involves a memory 524214a94ebdSMichal Kalderon * read. If the memory read is removed we will have to put a 524314a94ebdSMichal Kalderon * full memory barrier there (inside bnx2x_sp_post()). 5244adfc5217SJeff Kirsher */ 5245adfc5217SJeff Kirsher return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, 5246adfc5217SJeff Kirsher o->cids[cid_index], U64_HI(data_mapping), 5247adfc5217SJeff Kirsher U64_LO(data_mapping), ETH_CONNECTION_TYPE); 5248adfc5217SJeff Kirsher } 5249adfc5217SJeff Kirsher 5250adfc5217SJeff Kirsher /** 5251adfc5217SJeff Kirsher * bnx2x_q_send_deactivate - send DEACTIVATE command 5252adfc5217SJeff Kirsher * 5253adfc5217SJeff Kirsher * @bp: device handle 5254adfc5217SJeff Kirsher * @params: 5255adfc5217SJeff Kirsher * 5256adfc5217SJeff Kirsher * implemented using the UPDATE command. 5257adfc5217SJeff Kirsher */ 5258adfc5217SJeff Kirsher static inline int bnx2x_q_send_deactivate(struct bnx2x *bp, 5259adfc5217SJeff Kirsher struct bnx2x_queue_state_params *params) 5260adfc5217SJeff Kirsher { 5261adfc5217SJeff Kirsher struct bnx2x_queue_update_params *update = ¶ms->params.update; 5262adfc5217SJeff Kirsher 5263adfc5217SJeff Kirsher memset(update, 0, sizeof(*update)); 5264adfc5217SJeff Kirsher 5265adfc5217SJeff Kirsher __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags); 5266adfc5217SJeff Kirsher 5267adfc5217SJeff Kirsher return bnx2x_q_send_update(bp, params); 5268adfc5217SJeff Kirsher } 5269adfc5217SJeff Kirsher 5270adfc5217SJeff Kirsher /** 5271adfc5217SJeff Kirsher * bnx2x_q_send_activate - send ACTIVATE command 5272adfc5217SJeff Kirsher * 5273adfc5217SJeff Kirsher * @bp: device handle 5274adfc5217SJeff Kirsher * @params: 5275adfc5217SJeff Kirsher * 5276adfc5217SJeff Kirsher * implemented using the UPDATE command. 5277adfc5217SJeff Kirsher */ 5278adfc5217SJeff Kirsher static inline int bnx2x_q_send_activate(struct bnx2x *bp, 5279adfc5217SJeff Kirsher struct bnx2x_queue_state_params *params) 5280adfc5217SJeff Kirsher { 5281adfc5217SJeff Kirsher struct bnx2x_queue_update_params *update = ¶ms->params.update; 5282adfc5217SJeff Kirsher 5283adfc5217SJeff Kirsher memset(update, 0, sizeof(*update)); 5284adfc5217SJeff Kirsher 5285adfc5217SJeff Kirsher __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags); 5286adfc5217SJeff Kirsher __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags); 5287adfc5217SJeff Kirsher 5288adfc5217SJeff Kirsher return bnx2x_q_send_update(bp, params); 5289adfc5217SJeff Kirsher } 5290adfc5217SJeff Kirsher 529114a94ebdSMichal Kalderon static void bnx2x_q_fill_update_tpa_data(struct bnx2x *bp, 529214a94ebdSMichal Kalderon struct bnx2x_queue_sp_obj *obj, 529314a94ebdSMichal Kalderon struct bnx2x_queue_update_tpa_params *params, 529414a94ebdSMichal Kalderon struct tpa_update_ramrod_data *data) 529514a94ebdSMichal Kalderon { 529614a94ebdSMichal Kalderon data->client_id = obj->cl_id; 529714a94ebdSMichal Kalderon data->complete_on_both_clients = params->complete_on_both_clients; 529814a94ebdSMichal Kalderon data->dont_verify_rings_pause_thr_flg = 529914a94ebdSMichal Kalderon params->dont_verify_thr; 530014a94ebdSMichal Kalderon data->max_agg_size = cpu_to_le16(params->max_agg_sz); 530114a94ebdSMichal Kalderon data->max_sges_for_packet = params->max_sges_pkt; 530214a94ebdSMichal Kalderon data->max_tpa_queues = params->max_tpa_queues; 530314a94ebdSMichal Kalderon data->sge_buff_size = cpu_to_le16(params->sge_buff_sz); 530414a94ebdSMichal Kalderon data->sge_page_base_hi = cpu_to_le32(U64_HI(params->sge_map)); 530514a94ebdSMichal Kalderon data->sge_page_base_lo = cpu_to_le32(U64_LO(params->sge_map)); 530614a94ebdSMichal Kalderon data->sge_pause_thr_high = cpu_to_le16(params->sge_pause_thr_high); 530714a94ebdSMichal Kalderon data->sge_pause_thr_low = cpu_to_le16(params->sge_pause_thr_low); 530814a94ebdSMichal Kalderon data->tpa_mode = params->tpa_mode; 530914a94ebdSMichal Kalderon data->update_ipv4 = params->update_ipv4; 531014a94ebdSMichal Kalderon data->update_ipv6 = params->update_ipv6; 531114a94ebdSMichal Kalderon } 531214a94ebdSMichal Kalderon 5313adfc5217SJeff Kirsher static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp, 5314adfc5217SJeff Kirsher struct bnx2x_queue_state_params *params) 5315adfc5217SJeff Kirsher { 531614a94ebdSMichal Kalderon struct bnx2x_queue_sp_obj *o = params->q_obj; 531714a94ebdSMichal Kalderon struct tpa_update_ramrod_data *rdata = 531814a94ebdSMichal Kalderon (struct tpa_update_ramrod_data *)o->rdata; 531914a94ebdSMichal Kalderon dma_addr_t data_mapping = o->rdata_mapping; 532014a94ebdSMichal Kalderon struct bnx2x_queue_update_tpa_params *update_tpa_params = 532114a94ebdSMichal Kalderon ¶ms->params.update_tpa; 532214a94ebdSMichal Kalderon u16 type; 532314a94ebdSMichal Kalderon 532414a94ebdSMichal Kalderon /* Clear the ramrod data */ 532514a94ebdSMichal Kalderon memset(rdata, 0, sizeof(*rdata)); 532614a94ebdSMichal Kalderon 532714a94ebdSMichal Kalderon /* Fill the ramrod data */ 532814a94ebdSMichal Kalderon bnx2x_q_fill_update_tpa_data(bp, o, update_tpa_params, rdata); 532914a94ebdSMichal Kalderon 533014a94ebdSMichal Kalderon /* Add the function id inside the type, so that sp post function 533114a94ebdSMichal Kalderon * doesn't automatically add the PF func-id, this is required 533214a94ebdSMichal Kalderon * for operations done by PFs on behalf of their VFs 533314a94ebdSMichal Kalderon */ 533414a94ebdSMichal Kalderon type = ETH_CONNECTION_TYPE | 533514a94ebdSMichal Kalderon ((o->func_id) << SPE_HDR_FUNCTION_ID_SHIFT); 533614a94ebdSMichal Kalderon 533714a94ebdSMichal Kalderon /* No need for an explicit memory barrier here as long as we 533814a94ebdSMichal Kalderon * ensure the ordering of writing to the SPQ element 533914a94ebdSMichal Kalderon * and updating of the SPQ producer which involves a memory 534014a94ebdSMichal Kalderon * read. If the memory read is removed we will have to put a 534114a94ebdSMichal Kalderon * full memory barrier there (inside bnx2x_sp_post()). 534214a94ebdSMichal Kalderon */ 534314a94ebdSMichal Kalderon return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TPA_UPDATE, 534414a94ebdSMichal Kalderon o->cids[BNX2X_PRIMARY_CID_INDEX], 534514a94ebdSMichal Kalderon U64_HI(data_mapping), 534614a94ebdSMichal Kalderon U64_LO(data_mapping), type); 5347adfc5217SJeff Kirsher } 5348adfc5217SJeff Kirsher 5349adfc5217SJeff Kirsher static inline int bnx2x_q_send_halt(struct bnx2x *bp, 5350adfc5217SJeff Kirsher struct bnx2x_queue_state_params *params) 5351adfc5217SJeff Kirsher { 5352adfc5217SJeff Kirsher struct bnx2x_queue_sp_obj *o = params->q_obj; 5353adfc5217SJeff Kirsher 5354adfc5217SJeff Kirsher return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 5355adfc5217SJeff Kirsher o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id, 5356adfc5217SJeff Kirsher ETH_CONNECTION_TYPE); 5357adfc5217SJeff Kirsher } 5358adfc5217SJeff Kirsher 5359adfc5217SJeff Kirsher static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp, 5360adfc5217SJeff Kirsher struct bnx2x_queue_state_params *params) 5361adfc5217SJeff Kirsher { 5362adfc5217SJeff Kirsher struct bnx2x_queue_sp_obj *o = params->q_obj; 5363adfc5217SJeff Kirsher u8 cid_idx = params->params.cfc_del.cid_index; 5364adfc5217SJeff Kirsher 5365adfc5217SJeff Kirsher if (cid_idx >= o->max_cos) { 5366adfc5217SJeff Kirsher BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", 5367adfc5217SJeff Kirsher o->cl_id, cid_idx); 5368adfc5217SJeff Kirsher return -EINVAL; 5369adfc5217SJeff Kirsher } 5370adfc5217SJeff Kirsher 5371adfc5217SJeff Kirsher return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, 5372adfc5217SJeff Kirsher o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE); 5373adfc5217SJeff Kirsher } 5374adfc5217SJeff Kirsher 5375adfc5217SJeff Kirsher static inline int bnx2x_q_send_terminate(struct bnx2x *bp, 5376adfc5217SJeff Kirsher struct bnx2x_queue_state_params *params) 5377adfc5217SJeff Kirsher { 5378adfc5217SJeff Kirsher struct bnx2x_queue_sp_obj *o = params->q_obj; 5379adfc5217SJeff Kirsher u8 cid_index = params->params.terminate.cid_index; 5380adfc5217SJeff Kirsher 5381adfc5217SJeff Kirsher if (cid_index >= o->max_cos) { 5382adfc5217SJeff Kirsher BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", 5383adfc5217SJeff Kirsher o->cl_id, cid_index); 5384adfc5217SJeff Kirsher return -EINVAL; 5385adfc5217SJeff Kirsher } 5386adfc5217SJeff Kirsher 5387adfc5217SJeff Kirsher return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, 5388adfc5217SJeff Kirsher o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE); 5389adfc5217SJeff Kirsher } 5390adfc5217SJeff Kirsher 5391adfc5217SJeff Kirsher static inline int bnx2x_q_send_empty(struct bnx2x *bp, 5392adfc5217SJeff Kirsher struct bnx2x_queue_state_params *params) 5393adfc5217SJeff Kirsher { 5394adfc5217SJeff Kirsher struct bnx2x_queue_sp_obj *o = params->q_obj; 5395adfc5217SJeff Kirsher 5396adfc5217SJeff Kirsher return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY, 5397adfc5217SJeff Kirsher o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0, 5398adfc5217SJeff Kirsher ETH_CONNECTION_TYPE); 5399adfc5217SJeff Kirsher } 5400adfc5217SJeff Kirsher 5401adfc5217SJeff Kirsher static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp, 5402adfc5217SJeff Kirsher struct bnx2x_queue_state_params *params) 5403adfc5217SJeff Kirsher { 5404adfc5217SJeff Kirsher switch (params->cmd) { 5405adfc5217SJeff Kirsher case BNX2X_Q_CMD_INIT: 5406adfc5217SJeff Kirsher return bnx2x_q_init(bp, params); 5407adfc5217SJeff Kirsher case BNX2X_Q_CMD_SETUP_TX_ONLY: 5408adfc5217SJeff Kirsher return bnx2x_q_send_setup_tx_only(bp, params); 5409adfc5217SJeff Kirsher case BNX2X_Q_CMD_DEACTIVATE: 5410adfc5217SJeff Kirsher return bnx2x_q_send_deactivate(bp, params); 5411adfc5217SJeff Kirsher case BNX2X_Q_CMD_ACTIVATE: 5412adfc5217SJeff Kirsher return bnx2x_q_send_activate(bp, params); 5413adfc5217SJeff Kirsher case BNX2X_Q_CMD_UPDATE: 5414adfc5217SJeff Kirsher return bnx2x_q_send_update(bp, params); 5415adfc5217SJeff Kirsher case BNX2X_Q_CMD_UPDATE_TPA: 5416adfc5217SJeff Kirsher return bnx2x_q_send_update_tpa(bp, params); 5417adfc5217SJeff Kirsher case BNX2X_Q_CMD_HALT: 5418adfc5217SJeff Kirsher return bnx2x_q_send_halt(bp, params); 5419adfc5217SJeff Kirsher case BNX2X_Q_CMD_CFC_DEL: 5420adfc5217SJeff Kirsher return bnx2x_q_send_cfc_del(bp, params); 5421adfc5217SJeff Kirsher case BNX2X_Q_CMD_TERMINATE: 5422adfc5217SJeff Kirsher return bnx2x_q_send_terminate(bp, params); 5423adfc5217SJeff Kirsher case BNX2X_Q_CMD_EMPTY: 5424adfc5217SJeff Kirsher return bnx2x_q_send_empty(bp, params); 5425adfc5217SJeff Kirsher default: 5426adfc5217SJeff Kirsher BNX2X_ERR("Unknown command: %d\n", params->cmd); 5427adfc5217SJeff Kirsher return -EINVAL; 5428adfc5217SJeff Kirsher } 5429adfc5217SJeff Kirsher } 5430adfc5217SJeff Kirsher 5431adfc5217SJeff Kirsher static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp, 5432adfc5217SJeff Kirsher struct bnx2x_queue_state_params *params) 5433adfc5217SJeff Kirsher { 5434adfc5217SJeff Kirsher switch (params->cmd) { 5435adfc5217SJeff Kirsher case BNX2X_Q_CMD_SETUP: 5436adfc5217SJeff Kirsher return bnx2x_q_send_setup_e1x(bp, params); 5437adfc5217SJeff Kirsher case BNX2X_Q_CMD_INIT: 5438adfc5217SJeff Kirsher case BNX2X_Q_CMD_SETUP_TX_ONLY: 5439adfc5217SJeff Kirsher case BNX2X_Q_CMD_DEACTIVATE: 5440adfc5217SJeff Kirsher case BNX2X_Q_CMD_ACTIVATE: 5441adfc5217SJeff Kirsher case BNX2X_Q_CMD_UPDATE: 5442adfc5217SJeff Kirsher case BNX2X_Q_CMD_UPDATE_TPA: 5443adfc5217SJeff Kirsher case BNX2X_Q_CMD_HALT: 5444adfc5217SJeff Kirsher case BNX2X_Q_CMD_CFC_DEL: 5445adfc5217SJeff Kirsher case BNX2X_Q_CMD_TERMINATE: 5446adfc5217SJeff Kirsher case BNX2X_Q_CMD_EMPTY: 5447adfc5217SJeff Kirsher return bnx2x_queue_send_cmd_cmn(bp, params); 5448adfc5217SJeff Kirsher default: 5449adfc5217SJeff Kirsher BNX2X_ERR("Unknown command: %d\n", params->cmd); 5450adfc5217SJeff Kirsher return -EINVAL; 5451adfc5217SJeff Kirsher } 5452adfc5217SJeff Kirsher } 5453adfc5217SJeff Kirsher 5454adfc5217SJeff Kirsher static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp, 5455adfc5217SJeff Kirsher struct bnx2x_queue_state_params *params) 5456adfc5217SJeff Kirsher { 5457adfc5217SJeff Kirsher switch (params->cmd) { 5458adfc5217SJeff Kirsher case BNX2X_Q_CMD_SETUP: 5459adfc5217SJeff Kirsher return bnx2x_q_send_setup_e2(bp, params); 5460adfc5217SJeff Kirsher case BNX2X_Q_CMD_INIT: 5461adfc5217SJeff Kirsher case BNX2X_Q_CMD_SETUP_TX_ONLY: 5462adfc5217SJeff Kirsher case BNX2X_Q_CMD_DEACTIVATE: 5463adfc5217SJeff Kirsher case BNX2X_Q_CMD_ACTIVATE: 5464adfc5217SJeff Kirsher case BNX2X_Q_CMD_UPDATE: 5465adfc5217SJeff Kirsher case BNX2X_Q_CMD_UPDATE_TPA: 5466adfc5217SJeff Kirsher case BNX2X_Q_CMD_HALT: 5467adfc5217SJeff Kirsher case BNX2X_Q_CMD_CFC_DEL: 5468adfc5217SJeff Kirsher case BNX2X_Q_CMD_TERMINATE: 5469adfc5217SJeff Kirsher case BNX2X_Q_CMD_EMPTY: 5470adfc5217SJeff Kirsher return bnx2x_queue_send_cmd_cmn(bp, params); 5471adfc5217SJeff Kirsher default: 5472adfc5217SJeff Kirsher BNX2X_ERR("Unknown command: %d\n", params->cmd); 5473adfc5217SJeff Kirsher return -EINVAL; 5474adfc5217SJeff Kirsher } 5475adfc5217SJeff Kirsher } 5476adfc5217SJeff Kirsher 5477adfc5217SJeff Kirsher /** 5478adfc5217SJeff Kirsher * bnx2x_queue_chk_transition - check state machine of a regular Queue 5479adfc5217SJeff Kirsher * 5480adfc5217SJeff Kirsher * @bp: device handle 5481adfc5217SJeff Kirsher * @o: 5482adfc5217SJeff Kirsher * @params: 5483adfc5217SJeff Kirsher * 5484adfc5217SJeff Kirsher * (not Forwarding) 5485adfc5217SJeff Kirsher * It both checks if the requested command is legal in a current 5486adfc5217SJeff Kirsher * state and, if it's legal, sets a `next_state' in the object 5487adfc5217SJeff Kirsher * that will be used in the completion flow to set the `state' 5488adfc5217SJeff Kirsher * of the object. 5489adfc5217SJeff Kirsher * 5490adfc5217SJeff Kirsher * returns 0 if a requested command is a legal transition, 5491adfc5217SJeff Kirsher * -EINVAL otherwise. 5492adfc5217SJeff Kirsher */ 5493adfc5217SJeff Kirsher static int bnx2x_queue_chk_transition(struct bnx2x *bp, 5494adfc5217SJeff Kirsher struct bnx2x_queue_sp_obj *o, 5495adfc5217SJeff Kirsher struct bnx2x_queue_state_params *params) 5496adfc5217SJeff Kirsher { 5497adfc5217SJeff Kirsher enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX; 5498adfc5217SJeff Kirsher enum bnx2x_queue_cmd cmd = params->cmd; 5499adfc5217SJeff Kirsher struct bnx2x_queue_update_params *update_params = 5500adfc5217SJeff Kirsher ¶ms->params.update; 5501adfc5217SJeff Kirsher u8 next_tx_only = o->num_tx_only; 5502adfc5217SJeff Kirsher 550316a5fd92SYuval Mintz /* Forget all pending for completion commands if a driver only state 5504adfc5217SJeff Kirsher * transition has been requested. 5505adfc5217SJeff Kirsher */ 5506adfc5217SJeff Kirsher if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { 5507adfc5217SJeff Kirsher o->pending = 0; 5508adfc5217SJeff Kirsher o->next_state = BNX2X_Q_STATE_MAX; 5509adfc5217SJeff Kirsher } 5510adfc5217SJeff Kirsher 551116a5fd92SYuval Mintz /* Don't allow a next state transition if we are in the middle of 5512adfc5217SJeff Kirsher * the previous one. 5513adfc5217SJeff Kirsher */ 551404c46736SYuval Mintz if (o->pending) { 551504c46736SYuval Mintz BNX2X_ERR("Blocking transition since pending was %lx\n", 551604c46736SYuval Mintz o->pending); 5517adfc5217SJeff Kirsher return -EBUSY; 551804c46736SYuval Mintz } 5519adfc5217SJeff Kirsher 5520adfc5217SJeff Kirsher switch (state) { 5521adfc5217SJeff Kirsher case BNX2X_Q_STATE_RESET: 5522adfc5217SJeff Kirsher if (cmd == BNX2X_Q_CMD_INIT) 5523adfc5217SJeff Kirsher next_state = BNX2X_Q_STATE_INITIALIZED; 5524adfc5217SJeff Kirsher 5525adfc5217SJeff Kirsher break; 5526adfc5217SJeff Kirsher case BNX2X_Q_STATE_INITIALIZED: 5527adfc5217SJeff Kirsher if (cmd == BNX2X_Q_CMD_SETUP) { 5528adfc5217SJeff Kirsher if (test_bit(BNX2X_Q_FLG_ACTIVE, 5529adfc5217SJeff Kirsher ¶ms->params.setup.flags)) 5530adfc5217SJeff Kirsher next_state = BNX2X_Q_STATE_ACTIVE; 5531adfc5217SJeff Kirsher else 5532adfc5217SJeff Kirsher next_state = BNX2X_Q_STATE_INACTIVE; 5533adfc5217SJeff Kirsher } 5534adfc5217SJeff Kirsher 5535adfc5217SJeff Kirsher break; 5536adfc5217SJeff Kirsher case BNX2X_Q_STATE_ACTIVE: 5537adfc5217SJeff Kirsher if (cmd == BNX2X_Q_CMD_DEACTIVATE) 5538adfc5217SJeff Kirsher next_state = BNX2X_Q_STATE_INACTIVE; 5539adfc5217SJeff Kirsher 5540adfc5217SJeff Kirsher else if ((cmd == BNX2X_Q_CMD_EMPTY) || 5541adfc5217SJeff Kirsher (cmd == BNX2X_Q_CMD_UPDATE_TPA)) 5542adfc5217SJeff Kirsher next_state = BNX2X_Q_STATE_ACTIVE; 5543adfc5217SJeff Kirsher 5544adfc5217SJeff Kirsher else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) { 5545adfc5217SJeff Kirsher next_state = BNX2X_Q_STATE_MULTI_COS; 5546adfc5217SJeff Kirsher next_tx_only = 1; 5547adfc5217SJeff Kirsher } 5548adfc5217SJeff Kirsher 5549adfc5217SJeff Kirsher else if (cmd == BNX2X_Q_CMD_HALT) 5550adfc5217SJeff Kirsher next_state = BNX2X_Q_STATE_STOPPED; 5551adfc5217SJeff Kirsher 5552adfc5217SJeff Kirsher else if (cmd == BNX2X_Q_CMD_UPDATE) { 5553adfc5217SJeff Kirsher /* If "active" state change is requested, update the 5554adfc5217SJeff Kirsher * state accordingly. 5555adfc5217SJeff Kirsher */ 5556adfc5217SJeff Kirsher if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, 5557adfc5217SJeff Kirsher &update_params->update_flags) && 5558adfc5217SJeff Kirsher !test_bit(BNX2X_Q_UPDATE_ACTIVATE, 5559adfc5217SJeff Kirsher &update_params->update_flags)) 5560adfc5217SJeff Kirsher next_state = BNX2X_Q_STATE_INACTIVE; 5561adfc5217SJeff Kirsher else 5562adfc5217SJeff Kirsher next_state = BNX2X_Q_STATE_ACTIVE; 5563adfc5217SJeff Kirsher } 5564adfc5217SJeff Kirsher 5565adfc5217SJeff Kirsher break; 5566adfc5217SJeff Kirsher case BNX2X_Q_STATE_MULTI_COS: 5567adfc5217SJeff Kirsher if (cmd == BNX2X_Q_CMD_TERMINATE) 5568adfc5217SJeff Kirsher next_state = BNX2X_Q_STATE_MCOS_TERMINATED; 5569adfc5217SJeff Kirsher 5570adfc5217SJeff Kirsher else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) { 5571adfc5217SJeff Kirsher next_state = BNX2X_Q_STATE_MULTI_COS; 5572adfc5217SJeff Kirsher next_tx_only = o->num_tx_only + 1; 5573adfc5217SJeff Kirsher } 5574adfc5217SJeff Kirsher 5575adfc5217SJeff Kirsher else if ((cmd == BNX2X_Q_CMD_EMPTY) || 5576adfc5217SJeff Kirsher (cmd == BNX2X_Q_CMD_UPDATE_TPA)) 5577adfc5217SJeff Kirsher next_state = BNX2X_Q_STATE_MULTI_COS; 5578adfc5217SJeff Kirsher 5579adfc5217SJeff Kirsher else if (cmd == BNX2X_Q_CMD_UPDATE) { 5580adfc5217SJeff Kirsher /* If "active" state change is requested, update the 5581adfc5217SJeff Kirsher * state accordingly. 5582adfc5217SJeff Kirsher */ 5583adfc5217SJeff Kirsher if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, 5584adfc5217SJeff Kirsher &update_params->update_flags) && 5585adfc5217SJeff Kirsher !test_bit(BNX2X_Q_UPDATE_ACTIVATE, 5586adfc5217SJeff Kirsher &update_params->update_flags)) 5587adfc5217SJeff Kirsher next_state = BNX2X_Q_STATE_INACTIVE; 5588adfc5217SJeff Kirsher else 5589adfc5217SJeff Kirsher next_state = BNX2X_Q_STATE_MULTI_COS; 5590adfc5217SJeff Kirsher } 5591adfc5217SJeff Kirsher 5592adfc5217SJeff Kirsher break; 5593adfc5217SJeff Kirsher case BNX2X_Q_STATE_MCOS_TERMINATED: 5594adfc5217SJeff Kirsher if (cmd == BNX2X_Q_CMD_CFC_DEL) { 5595adfc5217SJeff Kirsher next_tx_only = o->num_tx_only - 1; 5596adfc5217SJeff Kirsher if (next_tx_only == 0) 5597adfc5217SJeff Kirsher next_state = BNX2X_Q_STATE_ACTIVE; 5598adfc5217SJeff Kirsher else 5599adfc5217SJeff Kirsher next_state = BNX2X_Q_STATE_MULTI_COS; 5600adfc5217SJeff Kirsher } 5601adfc5217SJeff Kirsher 5602adfc5217SJeff Kirsher break; 5603adfc5217SJeff Kirsher case BNX2X_Q_STATE_INACTIVE: 5604adfc5217SJeff Kirsher if (cmd == BNX2X_Q_CMD_ACTIVATE) 5605adfc5217SJeff Kirsher next_state = BNX2X_Q_STATE_ACTIVE; 5606adfc5217SJeff Kirsher 5607adfc5217SJeff Kirsher else if ((cmd == BNX2X_Q_CMD_EMPTY) || 5608adfc5217SJeff Kirsher (cmd == BNX2X_Q_CMD_UPDATE_TPA)) 5609adfc5217SJeff Kirsher next_state = BNX2X_Q_STATE_INACTIVE; 5610adfc5217SJeff Kirsher 5611adfc5217SJeff Kirsher else if (cmd == BNX2X_Q_CMD_HALT) 5612adfc5217SJeff Kirsher next_state = BNX2X_Q_STATE_STOPPED; 5613adfc5217SJeff Kirsher 5614adfc5217SJeff Kirsher else if (cmd == BNX2X_Q_CMD_UPDATE) { 5615adfc5217SJeff Kirsher /* If "active" state change is requested, update the 5616adfc5217SJeff Kirsher * state accordingly. 5617adfc5217SJeff Kirsher */ 5618adfc5217SJeff Kirsher if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, 5619adfc5217SJeff Kirsher &update_params->update_flags) && 5620adfc5217SJeff Kirsher test_bit(BNX2X_Q_UPDATE_ACTIVATE, 5621adfc5217SJeff Kirsher &update_params->update_flags)){ 5622adfc5217SJeff Kirsher if (o->num_tx_only == 0) 5623adfc5217SJeff Kirsher next_state = BNX2X_Q_STATE_ACTIVE; 5624adfc5217SJeff Kirsher else /* tx only queues exist for this queue */ 5625adfc5217SJeff Kirsher next_state = BNX2X_Q_STATE_MULTI_COS; 5626adfc5217SJeff Kirsher } else 5627adfc5217SJeff Kirsher next_state = BNX2X_Q_STATE_INACTIVE; 5628adfc5217SJeff Kirsher } 5629adfc5217SJeff Kirsher 5630adfc5217SJeff Kirsher break; 5631adfc5217SJeff Kirsher case BNX2X_Q_STATE_STOPPED: 5632adfc5217SJeff Kirsher if (cmd == BNX2X_Q_CMD_TERMINATE) 5633adfc5217SJeff Kirsher next_state = BNX2X_Q_STATE_TERMINATED; 5634adfc5217SJeff Kirsher 5635adfc5217SJeff Kirsher break; 5636adfc5217SJeff Kirsher case BNX2X_Q_STATE_TERMINATED: 5637adfc5217SJeff Kirsher if (cmd == BNX2X_Q_CMD_CFC_DEL) 5638adfc5217SJeff Kirsher next_state = BNX2X_Q_STATE_RESET; 5639adfc5217SJeff Kirsher 5640adfc5217SJeff Kirsher break; 5641adfc5217SJeff Kirsher default: 5642adfc5217SJeff Kirsher BNX2X_ERR("Illegal state: %d\n", state); 5643adfc5217SJeff Kirsher } 5644adfc5217SJeff Kirsher 5645adfc5217SJeff Kirsher /* Transition is assured */ 5646adfc5217SJeff Kirsher if (next_state != BNX2X_Q_STATE_MAX) { 5647adfc5217SJeff Kirsher DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n", 5648adfc5217SJeff Kirsher state, cmd, next_state); 5649adfc5217SJeff Kirsher o->next_state = next_state; 5650adfc5217SJeff Kirsher o->next_tx_only = next_tx_only; 5651adfc5217SJeff Kirsher return 0; 5652adfc5217SJeff Kirsher } 5653adfc5217SJeff Kirsher 5654adfc5217SJeff Kirsher DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd); 5655adfc5217SJeff Kirsher 5656adfc5217SJeff Kirsher return -EINVAL; 5657adfc5217SJeff Kirsher } 5658adfc5217SJeff Kirsher 5659adfc5217SJeff Kirsher void bnx2x_init_queue_obj(struct bnx2x *bp, 5660adfc5217SJeff Kirsher struct bnx2x_queue_sp_obj *obj, 5661adfc5217SJeff Kirsher u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id, 5662adfc5217SJeff Kirsher void *rdata, 5663adfc5217SJeff Kirsher dma_addr_t rdata_mapping, unsigned long type) 5664adfc5217SJeff Kirsher { 5665adfc5217SJeff Kirsher memset(obj, 0, sizeof(*obj)); 5666adfc5217SJeff Kirsher 5667adfc5217SJeff Kirsher /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */ 5668adfc5217SJeff Kirsher BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt); 5669adfc5217SJeff Kirsher 5670adfc5217SJeff Kirsher memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt); 5671adfc5217SJeff Kirsher obj->max_cos = cid_cnt; 5672adfc5217SJeff Kirsher obj->cl_id = cl_id; 5673adfc5217SJeff Kirsher obj->func_id = func_id; 5674adfc5217SJeff Kirsher obj->rdata = rdata; 5675adfc5217SJeff Kirsher obj->rdata_mapping = rdata_mapping; 5676adfc5217SJeff Kirsher obj->type = type; 5677adfc5217SJeff Kirsher obj->next_state = BNX2X_Q_STATE_MAX; 5678adfc5217SJeff Kirsher 5679adfc5217SJeff Kirsher if (CHIP_IS_E1x(bp)) 5680adfc5217SJeff Kirsher obj->send_cmd = bnx2x_queue_send_cmd_e1x; 5681adfc5217SJeff Kirsher else 5682adfc5217SJeff Kirsher obj->send_cmd = bnx2x_queue_send_cmd_e2; 5683adfc5217SJeff Kirsher 5684adfc5217SJeff Kirsher obj->check_transition = bnx2x_queue_chk_transition; 5685adfc5217SJeff Kirsher 5686adfc5217SJeff Kirsher obj->complete_cmd = bnx2x_queue_comp_cmd; 5687adfc5217SJeff Kirsher obj->wait_comp = bnx2x_queue_wait_comp; 5688adfc5217SJeff Kirsher obj->set_pending = bnx2x_queue_set_pending; 5689adfc5217SJeff Kirsher } 5690adfc5217SJeff Kirsher 569167c431a5SAriel Elior /* return a queue object's logical state*/ 569267c431a5SAriel Elior int bnx2x_get_q_logical_state(struct bnx2x *bp, 569367c431a5SAriel Elior struct bnx2x_queue_sp_obj *obj) 569467c431a5SAriel Elior { 569567c431a5SAriel Elior switch (obj->state) { 569667c431a5SAriel Elior case BNX2X_Q_STATE_ACTIVE: 569767c431a5SAriel Elior case BNX2X_Q_STATE_MULTI_COS: 569867c431a5SAriel Elior return BNX2X_Q_LOGICAL_STATE_ACTIVE; 569967c431a5SAriel Elior case BNX2X_Q_STATE_RESET: 570067c431a5SAriel Elior case BNX2X_Q_STATE_INITIALIZED: 570167c431a5SAriel Elior case BNX2X_Q_STATE_MCOS_TERMINATED: 570267c431a5SAriel Elior case BNX2X_Q_STATE_INACTIVE: 570367c431a5SAriel Elior case BNX2X_Q_STATE_STOPPED: 570467c431a5SAriel Elior case BNX2X_Q_STATE_TERMINATED: 570567c431a5SAriel Elior case BNX2X_Q_STATE_FLRED: 570667c431a5SAriel Elior return BNX2X_Q_LOGICAL_STATE_STOPPED; 570767c431a5SAriel Elior default: 570867c431a5SAriel Elior return -EINVAL; 570967c431a5SAriel Elior } 571067c431a5SAriel Elior } 571167c431a5SAriel Elior 5712adfc5217SJeff Kirsher /********************** Function state object *********************************/ 5713adfc5217SJeff Kirsher enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp, 5714adfc5217SJeff Kirsher struct bnx2x_func_sp_obj *o) 5715adfc5217SJeff Kirsher { 5716adfc5217SJeff Kirsher /* in the middle of transaction - return INVALID state */ 5717adfc5217SJeff Kirsher if (o->pending) 5718adfc5217SJeff Kirsher return BNX2X_F_STATE_MAX; 5719adfc5217SJeff Kirsher 572016a5fd92SYuval Mintz /* unsure the order of reading of o->pending and o->state 5721adfc5217SJeff Kirsher * o->pending should be read first 5722adfc5217SJeff Kirsher */ 5723adfc5217SJeff Kirsher rmb(); 5724adfc5217SJeff Kirsher 5725adfc5217SJeff Kirsher return o->state; 5726adfc5217SJeff Kirsher } 5727adfc5217SJeff Kirsher 5728adfc5217SJeff Kirsher static int bnx2x_func_wait_comp(struct bnx2x *bp, 5729adfc5217SJeff Kirsher struct bnx2x_func_sp_obj *o, 5730adfc5217SJeff Kirsher enum bnx2x_func_cmd cmd) 5731adfc5217SJeff Kirsher { 5732adfc5217SJeff Kirsher return bnx2x_state_wait(bp, cmd, &o->pending); 5733adfc5217SJeff Kirsher } 5734adfc5217SJeff Kirsher 5735adfc5217SJeff Kirsher /** 5736adfc5217SJeff Kirsher * bnx2x_func_state_change_comp - complete the state machine transition 5737adfc5217SJeff Kirsher * 5738adfc5217SJeff Kirsher * @bp: device handle 5739adfc5217SJeff Kirsher * @o: 5740adfc5217SJeff Kirsher * @cmd: 5741adfc5217SJeff Kirsher * 5742adfc5217SJeff Kirsher * Called on state change transition. Completes the state 5743adfc5217SJeff Kirsher * machine transition only - no HW interaction. 5744adfc5217SJeff Kirsher */ 5745adfc5217SJeff Kirsher static inline int bnx2x_func_state_change_comp(struct bnx2x *bp, 5746adfc5217SJeff Kirsher struct bnx2x_func_sp_obj *o, 5747adfc5217SJeff Kirsher enum bnx2x_func_cmd cmd) 5748adfc5217SJeff Kirsher { 5749adfc5217SJeff Kirsher unsigned long cur_pending = o->pending; 5750adfc5217SJeff Kirsher 5751adfc5217SJeff Kirsher if (!test_and_clear_bit(cmd, &cur_pending)) { 575251c1a580SMerav Sicron BNX2X_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n", 575351c1a580SMerav Sicron cmd, BP_FUNC(bp), o->state, 575451c1a580SMerav Sicron cur_pending, o->next_state); 5755adfc5217SJeff Kirsher return -EINVAL; 5756adfc5217SJeff Kirsher } 5757adfc5217SJeff Kirsher 575894f05b0fSJoe Perches DP(BNX2X_MSG_SP, 575994f05b0fSJoe Perches "Completing command %d for func %d, setting state to %d\n", 576094f05b0fSJoe Perches cmd, BP_FUNC(bp), o->next_state); 5761adfc5217SJeff Kirsher 5762adfc5217SJeff Kirsher o->state = o->next_state; 5763adfc5217SJeff Kirsher o->next_state = BNX2X_F_STATE_MAX; 5764adfc5217SJeff Kirsher 5765adfc5217SJeff Kirsher /* It's important that o->state and o->next_state are 5766adfc5217SJeff Kirsher * updated before o->pending. 5767adfc5217SJeff Kirsher */ 5768adfc5217SJeff Kirsher wmb(); 5769adfc5217SJeff Kirsher 5770adfc5217SJeff Kirsher clear_bit(cmd, &o->pending); 57714e857c58SPeter Zijlstra smp_mb__after_atomic(); 5772adfc5217SJeff Kirsher 5773adfc5217SJeff Kirsher return 0; 5774adfc5217SJeff Kirsher } 5775adfc5217SJeff Kirsher 5776adfc5217SJeff Kirsher /** 5777adfc5217SJeff Kirsher * bnx2x_func_comp_cmd - complete the state change command 5778adfc5217SJeff Kirsher * 5779adfc5217SJeff Kirsher * @bp: device handle 5780adfc5217SJeff Kirsher * @o: 5781adfc5217SJeff Kirsher * @cmd: 5782adfc5217SJeff Kirsher * 5783adfc5217SJeff Kirsher * Checks that the arrived completion is expected. 5784adfc5217SJeff Kirsher */ 5785adfc5217SJeff Kirsher static int bnx2x_func_comp_cmd(struct bnx2x *bp, 5786adfc5217SJeff Kirsher struct bnx2x_func_sp_obj *o, 5787adfc5217SJeff Kirsher enum bnx2x_func_cmd cmd) 5788adfc5217SJeff Kirsher { 5789adfc5217SJeff Kirsher /* Complete the state machine part first, check if it's a 5790adfc5217SJeff Kirsher * legal completion. 5791adfc5217SJeff Kirsher */ 5792adfc5217SJeff Kirsher int rc = bnx2x_func_state_change_comp(bp, o, cmd); 5793adfc5217SJeff Kirsher return rc; 5794adfc5217SJeff Kirsher } 5795adfc5217SJeff Kirsher 5796adfc5217SJeff Kirsher /** 5797adfc5217SJeff Kirsher * bnx2x_func_chk_transition - perform function state machine transition 5798adfc5217SJeff Kirsher * 5799adfc5217SJeff Kirsher * @bp: device handle 5800adfc5217SJeff Kirsher * @o: 5801adfc5217SJeff Kirsher * @params: 5802adfc5217SJeff Kirsher * 5803adfc5217SJeff Kirsher * It both checks if the requested command is legal in a current 5804adfc5217SJeff Kirsher * state and, if it's legal, sets a `next_state' in the object 5805adfc5217SJeff Kirsher * that will be used in the completion flow to set the `state' 5806adfc5217SJeff Kirsher * of the object. 5807adfc5217SJeff Kirsher * 5808adfc5217SJeff Kirsher * returns 0 if a requested command is a legal transition, 5809adfc5217SJeff Kirsher * -EINVAL otherwise. 5810adfc5217SJeff Kirsher */ 5811adfc5217SJeff Kirsher static int bnx2x_func_chk_transition(struct bnx2x *bp, 5812adfc5217SJeff Kirsher struct bnx2x_func_sp_obj *o, 5813adfc5217SJeff Kirsher struct bnx2x_func_state_params *params) 5814adfc5217SJeff Kirsher { 5815adfc5217SJeff Kirsher enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX; 5816adfc5217SJeff Kirsher enum bnx2x_func_cmd cmd = params->cmd; 5817adfc5217SJeff Kirsher 581816a5fd92SYuval Mintz /* Forget all pending for completion commands if a driver only state 5819adfc5217SJeff Kirsher * transition has been requested. 5820adfc5217SJeff Kirsher */ 5821adfc5217SJeff Kirsher if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { 5822adfc5217SJeff Kirsher o->pending = 0; 5823adfc5217SJeff Kirsher o->next_state = BNX2X_F_STATE_MAX; 5824adfc5217SJeff Kirsher } 5825adfc5217SJeff Kirsher 582616a5fd92SYuval Mintz /* Don't allow a next state transition if we are in the middle of 5827adfc5217SJeff Kirsher * the previous one. 5828adfc5217SJeff Kirsher */ 5829adfc5217SJeff Kirsher if (o->pending) 5830adfc5217SJeff Kirsher return -EBUSY; 5831adfc5217SJeff Kirsher 5832adfc5217SJeff Kirsher switch (state) { 5833adfc5217SJeff Kirsher case BNX2X_F_STATE_RESET: 5834adfc5217SJeff Kirsher if (cmd == BNX2X_F_CMD_HW_INIT) 5835adfc5217SJeff Kirsher next_state = BNX2X_F_STATE_INITIALIZED; 5836adfc5217SJeff Kirsher 5837adfc5217SJeff Kirsher break; 5838adfc5217SJeff Kirsher case BNX2X_F_STATE_INITIALIZED: 5839adfc5217SJeff Kirsher if (cmd == BNX2X_F_CMD_START) 5840adfc5217SJeff Kirsher next_state = BNX2X_F_STATE_STARTED; 5841adfc5217SJeff Kirsher 5842adfc5217SJeff Kirsher else if (cmd == BNX2X_F_CMD_HW_RESET) 5843adfc5217SJeff Kirsher next_state = BNX2X_F_STATE_RESET; 5844adfc5217SJeff Kirsher 5845adfc5217SJeff Kirsher break; 5846adfc5217SJeff Kirsher case BNX2X_F_STATE_STARTED: 5847adfc5217SJeff Kirsher if (cmd == BNX2X_F_CMD_STOP) 5848adfc5217SJeff Kirsher next_state = BNX2X_F_STATE_INITIALIZED; 5849a3348722SBarak Witkowski /* afex ramrods can be sent only in started mode, and only 5850a3348722SBarak Witkowski * if not pending for function_stop ramrod completion 5851a3348722SBarak Witkowski * for these events - next state remained STARTED. 5852a3348722SBarak Witkowski */ 5853a3348722SBarak Witkowski else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) && 5854a3348722SBarak Witkowski (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) 5855a3348722SBarak Witkowski next_state = BNX2X_F_STATE_STARTED; 5856a3348722SBarak Witkowski 5857a3348722SBarak Witkowski else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) && 5858a3348722SBarak Witkowski (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) 5859a3348722SBarak Witkowski next_state = BNX2X_F_STATE_STARTED; 586055c11941SMerav Sicron 586155c11941SMerav Sicron /* Switch_update ramrod can be sent in either started or 586255c11941SMerav Sicron * tx_stopped state, and it doesn't change the state. 586355c11941SMerav Sicron */ 586455c11941SMerav Sicron else if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) && 586555c11941SMerav Sicron (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) 586655c11941SMerav Sicron next_state = BNX2X_F_STATE_STARTED; 586755c11941SMerav Sicron 5868eeed018cSMichal Kalderon else if ((cmd == BNX2X_F_CMD_SET_TIMESYNC) && 5869eeed018cSMichal Kalderon (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) 5870eeed018cSMichal Kalderon next_state = BNX2X_F_STATE_STARTED; 5871eeed018cSMichal Kalderon 5872adfc5217SJeff Kirsher else if (cmd == BNX2X_F_CMD_TX_STOP) 5873adfc5217SJeff Kirsher next_state = BNX2X_F_STATE_TX_STOPPED; 5874adfc5217SJeff Kirsher 5875adfc5217SJeff Kirsher break; 5876adfc5217SJeff Kirsher case BNX2X_F_STATE_TX_STOPPED: 587755c11941SMerav Sicron if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) && 587855c11941SMerav Sicron (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) 587955c11941SMerav Sicron next_state = BNX2X_F_STATE_TX_STOPPED; 588055c11941SMerav Sicron 5881eeed018cSMichal Kalderon else if ((cmd == BNX2X_F_CMD_SET_TIMESYNC) && 5882eeed018cSMichal Kalderon (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) 5883eeed018cSMichal Kalderon next_state = BNX2X_F_STATE_TX_STOPPED; 5884eeed018cSMichal Kalderon 588555c11941SMerav Sicron else if (cmd == BNX2X_F_CMD_TX_START) 5886adfc5217SJeff Kirsher next_state = BNX2X_F_STATE_STARTED; 5887adfc5217SJeff Kirsher 5888adfc5217SJeff Kirsher break; 5889adfc5217SJeff Kirsher default: 5890adfc5217SJeff Kirsher BNX2X_ERR("Unknown state: %d\n", state); 5891adfc5217SJeff Kirsher } 5892adfc5217SJeff Kirsher 5893adfc5217SJeff Kirsher /* Transition is assured */ 5894adfc5217SJeff Kirsher if (next_state != BNX2X_F_STATE_MAX) { 5895adfc5217SJeff Kirsher DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n", 5896adfc5217SJeff Kirsher state, cmd, next_state); 5897adfc5217SJeff Kirsher o->next_state = next_state; 5898adfc5217SJeff Kirsher return 0; 5899adfc5217SJeff Kirsher } 5900adfc5217SJeff Kirsher 5901adfc5217SJeff Kirsher DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n", 5902adfc5217SJeff Kirsher state, cmd); 5903adfc5217SJeff Kirsher 5904adfc5217SJeff Kirsher return -EINVAL; 5905adfc5217SJeff Kirsher } 5906adfc5217SJeff Kirsher 5907adfc5217SJeff Kirsher /** 5908adfc5217SJeff Kirsher * bnx2x_func_init_func - performs HW init at function stage 5909adfc5217SJeff Kirsher * 5910adfc5217SJeff Kirsher * @bp: device handle 5911adfc5217SJeff Kirsher * @drv: 5912adfc5217SJeff Kirsher * 5913adfc5217SJeff Kirsher * Init HW when the current phase is 5914adfc5217SJeff Kirsher * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only 5915adfc5217SJeff Kirsher * HW blocks. 5916adfc5217SJeff Kirsher */ 5917adfc5217SJeff Kirsher static inline int bnx2x_func_init_func(struct bnx2x *bp, 5918adfc5217SJeff Kirsher const struct bnx2x_func_sp_drv_ops *drv) 5919adfc5217SJeff Kirsher { 5920adfc5217SJeff Kirsher return drv->init_hw_func(bp); 5921adfc5217SJeff Kirsher } 5922adfc5217SJeff Kirsher 5923adfc5217SJeff Kirsher /** 5924adfc5217SJeff Kirsher * bnx2x_func_init_port - performs HW init at port stage 5925adfc5217SJeff Kirsher * 5926adfc5217SJeff Kirsher * @bp: device handle 5927adfc5217SJeff Kirsher * @drv: 5928adfc5217SJeff Kirsher * 5929adfc5217SJeff Kirsher * Init HW when the current phase is 5930adfc5217SJeff Kirsher * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and 5931adfc5217SJeff Kirsher * FUNCTION-only HW blocks. 5932adfc5217SJeff Kirsher * 5933adfc5217SJeff Kirsher */ 5934adfc5217SJeff Kirsher static inline int bnx2x_func_init_port(struct bnx2x *bp, 5935adfc5217SJeff Kirsher const struct bnx2x_func_sp_drv_ops *drv) 5936adfc5217SJeff Kirsher { 5937adfc5217SJeff Kirsher int rc = drv->init_hw_port(bp); 5938adfc5217SJeff Kirsher if (rc) 5939adfc5217SJeff Kirsher return rc; 5940adfc5217SJeff Kirsher 5941adfc5217SJeff Kirsher return bnx2x_func_init_func(bp, drv); 5942adfc5217SJeff Kirsher } 5943adfc5217SJeff Kirsher 5944adfc5217SJeff Kirsher /** 5945adfc5217SJeff Kirsher * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage 5946adfc5217SJeff Kirsher * 5947adfc5217SJeff Kirsher * @bp: device handle 5948adfc5217SJeff Kirsher * @drv: 5949adfc5217SJeff Kirsher * 5950adfc5217SJeff Kirsher * Init HW when the current phase is 5951adfc5217SJeff Kirsher * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP, 5952adfc5217SJeff Kirsher * PORT-only and FUNCTION-only HW blocks. 5953adfc5217SJeff Kirsher */ 5954adfc5217SJeff Kirsher static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp, 5955adfc5217SJeff Kirsher const struct bnx2x_func_sp_drv_ops *drv) 5956adfc5217SJeff Kirsher { 5957adfc5217SJeff Kirsher int rc = drv->init_hw_cmn_chip(bp); 5958adfc5217SJeff Kirsher if (rc) 5959adfc5217SJeff Kirsher return rc; 5960adfc5217SJeff Kirsher 5961adfc5217SJeff Kirsher return bnx2x_func_init_port(bp, drv); 5962adfc5217SJeff Kirsher } 5963adfc5217SJeff Kirsher 5964adfc5217SJeff Kirsher /** 5965adfc5217SJeff Kirsher * bnx2x_func_init_cmn - performs HW init at common stage 5966adfc5217SJeff Kirsher * 5967adfc5217SJeff Kirsher * @bp: device handle 5968adfc5217SJeff Kirsher * @drv: 5969adfc5217SJeff Kirsher * 5970adfc5217SJeff Kirsher * Init HW when the current phase is 5971adfc5217SJeff Kirsher * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON, 5972adfc5217SJeff Kirsher * PORT-only and FUNCTION-only HW blocks. 5973adfc5217SJeff Kirsher */ 5974adfc5217SJeff Kirsher static inline int bnx2x_func_init_cmn(struct bnx2x *bp, 5975adfc5217SJeff Kirsher const struct bnx2x_func_sp_drv_ops *drv) 5976adfc5217SJeff Kirsher { 5977adfc5217SJeff Kirsher int rc = drv->init_hw_cmn(bp); 5978adfc5217SJeff Kirsher if (rc) 5979adfc5217SJeff Kirsher return rc; 5980adfc5217SJeff Kirsher 5981adfc5217SJeff Kirsher return bnx2x_func_init_port(bp, drv); 5982adfc5217SJeff Kirsher } 5983adfc5217SJeff Kirsher 5984adfc5217SJeff Kirsher static int bnx2x_func_hw_init(struct bnx2x *bp, 5985adfc5217SJeff Kirsher struct bnx2x_func_state_params *params) 5986adfc5217SJeff Kirsher { 5987adfc5217SJeff Kirsher u32 load_code = params->params.hw_init.load_phase; 5988adfc5217SJeff Kirsher struct bnx2x_func_sp_obj *o = params->f_obj; 5989adfc5217SJeff Kirsher const struct bnx2x_func_sp_drv_ops *drv = o->drv; 5990adfc5217SJeff Kirsher int rc = 0; 5991adfc5217SJeff Kirsher 5992adfc5217SJeff Kirsher DP(BNX2X_MSG_SP, "function %d load_code %x\n", 5993adfc5217SJeff Kirsher BP_ABS_FUNC(bp), load_code); 5994adfc5217SJeff Kirsher 5995adfc5217SJeff Kirsher /* Prepare buffers for unzipping the FW */ 5996adfc5217SJeff Kirsher rc = drv->gunzip_init(bp); 5997adfc5217SJeff Kirsher if (rc) 5998adfc5217SJeff Kirsher return rc; 5999adfc5217SJeff Kirsher 6000adfc5217SJeff Kirsher /* Prepare FW */ 6001adfc5217SJeff Kirsher rc = drv->init_fw(bp); 6002adfc5217SJeff Kirsher if (rc) { 6003adfc5217SJeff Kirsher BNX2X_ERR("Error loading firmware\n"); 6004eb2afd4aSDmitry Kravkov goto init_err; 6005adfc5217SJeff Kirsher } 6006adfc5217SJeff Kirsher 600716a5fd92SYuval Mintz /* Handle the beginning of COMMON_XXX pases separately... */ 6008adfc5217SJeff Kirsher switch (load_code) { 6009adfc5217SJeff Kirsher case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: 6010adfc5217SJeff Kirsher rc = bnx2x_func_init_cmn_chip(bp, drv); 6011adfc5217SJeff Kirsher if (rc) 6012eb2afd4aSDmitry Kravkov goto init_err; 6013adfc5217SJeff Kirsher 6014adfc5217SJeff Kirsher break; 6015adfc5217SJeff Kirsher case FW_MSG_CODE_DRV_LOAD_COMMON: 6016adfc5217SJeff Kirsher rc = bnx2x_func_init_cmn(bp, drv); 6017adfc5217SJeff Kirsher if (rc) 6018eb2afd4aSDmitry Kravkov goto init_err; 6019adfc5217SJeff Kirsher 6020adfc5217SJeff Kirsher break; 6021adfc5217SJeff Kirsher case FW_MSG_CODE_DRV_LOAD_PORT: 6022adfc5217SJeff Kirsher rc = bnx2x_func_init_port(bp, drv); 6023adfc5217SJeff Kirsher if (rc) 6024eb2afd4aSDmitry Kravkov goto init_err; 6025adfc5217SJeff Kirsher 6026adfc5217SJeff Kirsher break; 6027adfc5217SJeff Kirsher case FW_MSG_CODE_DRV_LOAD_FUNCTION: 6028adfc5217SJeff Kirsher rc = bnx2x_func_init_func(bp, drv); 6029adfc5217SJeff Kirsher if (rc) 6030eb2afd4aSDmitry Kravkov goto init_err; 6031adfc5217SJeff Kirsher 6032adfc5217SJeff Kirsher break; 6033adfc5217SJeff Kirsher default: 6034adfc5217SJeff Kirsher BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code); 6035adfc5217SJeff Kirsher rc = -EINVAL; 6036adfc5217SJeff Kirsher } 6037adfc5217SJeff Kirsher 6038eb2afd4aSDmitry Kravkov init_err: 6039adfc5217SJeff Kirsher drv->gunzip_end(bp); 6040adfc5217SJeff Kirsher 604116a5fd92SYuval Mintz /* In case of success, complete the command immediately: no ramrods 6042adfc5217SJeff Kirsher * have been sent. 6043adfc5217SJeff Kirsher */ 6044adfc5217SJeff Kirsher if (!rc) 6045adfc5217SJeff Kirsher o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT); 6046adfc5217SJeff Kirsher 6047adfc5217SJeff Kirsher return rc; 6048adfc5217SJeff Kirsher } 6049adfc5217SJeff Kirsher 6050adfc5217SJeff Kirsher /** 6051adfc5217SJeff Kirsher * bnx2x_func_reset_func - reset HW at function stage 6052adfc5217SJeff Kirsher * 6053adfc5217SJeff Kirsher * @bp: device handle 6054adfc5217SJeff Kirsher * @drv: 6055adfc5217SJeff Kirsher * 6056adfc5217SJeff Kirsher * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only 6057adfc5217SJeff Kirsher * FUNCTION-only HW blocks. 6058adfc5217SJeff Kirsher */ 6059adfc5217SJeff Kirsher static inline void bnx2x_func_reset_func(struct bnx2x *bp, 6060adfc5217SJeff Kirsher const struct bnx2x_func_sp_drv_ops *drv) 6061adfc5217SJeff Kirsher { 6062adfc5217SJeff Kirsher drv->reset_hw_func(bp); 6063adfc5217SJeff Kirsher } 6064adfc5217SJeff Kirsher 6065adfc5217SJeff Kirsher /** 606616a5fd92SYuval Mintz * bnx2x_func_reset_port - reset HW at port stage 6067adfc5217SJeff Kirsher * 6068adfc5217SJeff Kirsher * @bp: device handle 6069adfc5217SJeff Kirsher * @drv: 6070adfc5217SJeff Kirsher * 6071adfc5217SJeff Kirsher * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset 6072adfc5217SJeff Kirsher * FUNCTION-only and PORT-only HW blocks. 6073adfc5217SJeff Kirsher * 6074adfc5217SJeff Kirsher * !!!IMPORTANT!!! 6075adfc5217SJeff Kirsher * 6076adfc5217SJeff Kirsher * It's important to call reset_port before reset_func() as the last thing 6077adfc5217SJeff Kirsher * reset_func does is pf_disable() thus disabling PGLUE_B, which 6078adfc5217SJeff Kirsher * makes impossible any DMAE transactions. 6079adfc5217SJeff Kirsher */ 6080adfc5217SJeff Kirsher static inline void bnx2x_func_reset_port(struct bnx2x *bp, 6081adfc5217SJeff Kirsher const struct bnx2x_func_sp_drv_ops *drv) 6082adfc5217SJeff Kirsher { 6083adfc5217SJeff Kirsher drv->reset_hw_port(bp); 6084adfc5217SJeff Kirsher bnx2x_func_reset_func(bp, drv); 6085adfc5217SJeff Kirsher } 6086adfc5217SJeff Kirsher 6087adfc5217SJeff Kirsher /** 608816a5fd92SYuval Mintz * bnx2x_func_reset_cmn - reset HW at common stage 6089adfc5217SJeff Kirsher * 6090adfc5217SJeff Kirsher * @bp: device handle 6091adfc5217SJeff Kirsher * @drv: 6092adfc5217SJeff Kirsher * 6093adfc5217SJeff Kirsher * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and 6094adfc5217SJeff Kirsher * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON, 6095adfc5217SJeff Kirsher * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks. 6096adfc5217SJeff Kirsher */ 6097adfc5217SJeff Kirsher static inline void bnx2x_func_reset_cmn(struct bnx2x *bp, 6098adfc5217SJeff Kirsher const struct bnx2x_func_sp_drv_ops *drv) 6099adfc5217SJeff Kirsher { 6100adfc5217SJeff Kirsher bnx2x_func_reset_port(bp, drv); 6101adfc5217SJeff Kirsher drv->reset_hw_cmn(bp); 6102adfc5217SJeff Kirsher } 6103adfc5217SJeff Kirsher 6104adfc5217SJeff Kirsher static inline int bnx2x_func_hw_reset(struct bnx2x *bp, 6105adfc5217SJeff Kirsher struct bnx2x_func_state_params *params) 6106adfc5217SJeff Kirsher { 6107adfc5217SJeff Kirsher u32 reset_phase = params->params.hw_reset.reset_phase; 6108adfc5217SJeff Kirsher struct bnx2x_func_sp_obj *o = params->f_obj; 6109adfc5217SJeff Kirsher const struct bnx2x_func_sp_drv_ops *drv = o->drv; 6110adfc5217SJeff Kirsher 6111adfc5217SJeff Kirsher DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp), 6112adfc5217SJeff Kirsher reset_phase); 6113adfc5217SJeff Kirsher 6114adfc5217SJeff Kirsher switch (reset_phase) { 6115adfc5217SJeff Kirsher case FW_MSG_CODE_DRV_UNLOAD_COMMON: 6116adfc5217SJeff Kirsher bnx2x_func_reset_cmn(bp, drv); 6117adfc5217SJeff Kirsher break; 6118adfc5217SJeff Kirsher case FW_MSG_CODE_DRV_UNLOAD_PORT: 6119adfc5217SJeff Kirsher bnx2x_func_reset_port(bp, drv); 6120adfc5217SJeff Kirsher break; 6121adfc5217SJeff Kirsher case FW_MSG_CODE_DRV_UNLOAD_FUNCTION: 6122adfc5217SJeff Kirsher bnx2x_func_reset_func(bp, drv); 6123adfc5217SJeff Kirsher break; 6124adfc5217SJeff Kirsher default: 6125adfc5217SJeff Kirsher BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n", 6126adfc5217SJeff Kirsher reset_phase); 6127adfc5217SJeff Kirsher break; 6128adfc5217SJeff Kirsher } 6129adfc5217SJeff Kirsher 613016a5fd92SYuval Mintz /* Complete the command immediately: no ramrods have been sent. */ 6131adfc5217SJeff Kirsher o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET); 6132adfc5217SJeff Kirsher 6133adfc5217SJeff Kirsher return 0; 6134adfc5217SJeff Kirsher } 6135adfc5217SJeff Kirsher 6136adfc5217SJeff Kirsher static inline int bnx2x_func_send_start(struct bnx2x *bp, 6137adfc5217SJeff Kirsher struct bnx2x_func_state_params *params) 6138adfc5217SJeff Kirsher { 6139adfc5217SJeff Kirsher struct bnx2x_func_sp_obj *o = params->f_obj; 6140adfc5217SJeff Kirsher struct function_start_data *rdata = 6141adfc5217SJeff Kirsher (struct function_start_data *)o->rdata; 6142adfc5217SJeff Kirsher dma_addr_t data_mapping = o->rdata_mapping; 6143adfc5217SJeff Kirsher struct bnx2x_func_start_params *start_params = ¶ms->params.start; 6144adfc5217SJeff Kirsher 6145adfc5217SJeff Kirsher memset(rdata, 0, sizeof(*rdata)); 6146adfc5217SJeff Kirsher 6147adfc5217SJeff Kirsher /* Fill the ramrod data with provided parameters */ 614896bed4b9SYuval Mintz rdata->function_mode = (u8)start_params->mf_mode; 6149ab4a7139SAriel Elior rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag); 6150adfc5217SJeff Kirsher rdata->path_id = BP_PATH(bp); 6151adfc5217SJeff Kirsher rdata->network_cos_mode = start_params->network_cos_mode; 615228311f8eSYuval Mintz 615328311f8eSYuval Mintz rdata->vxlan_dst_port = cpu_to_le16(start_params->vxlan_dst_port); 615428311f8eSYuval Mintz rdata->geneve_dst_port = cpu_to_le16(start_params->geneve_dst_port); 615528311f8eSYuval Mintz rdata->inner_clss_l2gre = start_params->inner_clss_l2gre; 615628311f8eSYuval Mintz rdata->inner_clss_l2geneve = start_params->inner_clss_l2geneve; 615728311f8eSYuval Mintz rdata->inner_clss_vxlan = start_params->inner_clss_vxlan; 615828311f8eSYuval Mintz rdata->inner_rss = start_params->inner_rss; 615928311f8eSYuval Mintz 61607609647eSYuval Mintz rdata->sd_accept_mf_clss_fail = start_params->class_fail; 61617609647eSYuval Mintz if (start_params->class_fail_ethtype) { 61627609647eSYuval Mintz rdata->sd_accept_mf_clss_fail_match_ethtype = 1; 61637609647eSYuval Mintz rdata->sd_accept_mf_clss_fail_ethtype = 61647609647eSYuval Mintz cpu_to_le16(start_params->class_fail_ethtype); 61657609647eSYuval Mintz } 6166adfc5217SJeff Kirsher 61677609647eSYuval Mintz rdata->sd_vlan_force_pri_flg = start_params->sd_vlan_force_pri; 61687609647eSYuval Mintz rdata->sd_vlan_force_pri_val = start_params->sd_vlan_force_pri_val; 61697609647eSYuval Mintz if (start_params->sd_vlan_eth_type) 61707609647eSYuval Mintz rdata->sd_vlan_eth_type = 61717609647eSYuval Mintz cpu_to_le16(start_params->sd_vlan_eth_type); 61727609647eSYuval Mintz else 61737609647eSYuval Mintz rdata->sd_vlan_eth_type = 61747609647eSYuval Mintz cpu_to_le16(0x8100); 61757609647eSYuval Mintz 61767609647eSYuval Mintz rdata->no_added_tags = start_params->no_added_tags; 617728311f8eSYuval Mintz 617828311f8eSYuval Mintz rdata->c2s_pri_tt_valid = start_params->c2s_pri_valid; 617928311f8eSYuval Mintz if (rdata->c2s_pri_tt_valid) { 618028311f8eSYuval Mintz memcpy(rdata->c2s_pri_trans_table.val, 618128311f8eSYuval Mintz start_params->c2s_pri, 618228311f8eSYuval Mintz MAX_VLAN_PRIORITIES); 618328311f8eSYuval Mintz rdata->c2s_pri_default = start_params->c2s_pri_default; 618428311f8eSYuval Mintz } 61851bc277f7SDmitry Kravkov /* No need for an explicit memory barrier here as long we would 6186adfc5217SJeff Kirsher * need to ensure the ordering of writing to the SPQ element 6187adfc5217SJeff Kirsher * and updating of the SPQ producer which involves a memory 6188adfc5217SJeff Kirsher * read and we will have to put a full memory barrier there 6189adfc5217SJeff Kirsher * (inside bnx2x_sp_post()). 6190adfc5217SJeff Kirsher */ 6191adfc5217SJeff Kirsher 6192adfc5217SJeff Kirsher return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 6193adfc5217SJeff Kirsher U64_HI(data_mapping), 6194adfc5217SJeff Kirsher U64_LO(data_mapping), NONE_CONNECTION_TYPE); 6195adfc5217SJeff Kirsher } 6196adfc5217SJeff Kirsher 619755c11941SMerav Sicron static inline int bnx2x_func_send_switch_update(struct bnx2x *bp, 619855c11941SMerav Sicron struct bnx2x_func_state_params *params) 619955c11941SMerav Sicron { 620055c11941SMerav Sicron struct bnx2x_func_sp_obj *o = params->f_obj; 620155c11941SMerav Sicron struct function_update_data *rdata = 620255c11941SMerav Sicron (struct function_update_data *)o->rdata; 620355c11941SMerav Sicron dma_addr_t data_mapping = o->rdata_mapping; 620455c11941SMerav Sicron struct bnx2x_func_switch_update_params *switch_update_params = 620555c11941SMerav Sicron ¶ms->params.switch_update; 620655c11941SMerav Sicron 620755c11941SMerav Sicron memset(rdata, 0, sizeof(*rdata)); 620855c11941SMerav Sicron 620955c11941SMerav Sicron /* Fill the ramrod data with provided parameters */ 6210e42780b6SDmitry Kravkov if (test_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG, 6211e42780b6SDmitry Kravkov &switch_update_params->changes)) { 621255c11941SMerav Sicron rdata->tx_switch_suspend_change_flg = 1; 6213e42780b6SDmitry Kravkov rdata->tx_switch_suspend = 6214e42780b6SDmitry Kravkov test_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND, 6215e42780b6SDmitry Kravkov &switch_update_params->changes); 6216e42780b6SDmitry Kravkov } 6217e42780b6SDmitry Kravkov 62187609647eSYuval Mintz if (test_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG, 62197609647eSYuval Mintz &switch_update_params->changes)) { 62207609647eSYuval Mintz rdata->sd_vlan_tag_change_flg = 1; 62217609647eSYuval Mintz rdata->sd_vlan_tag = 62227609647eSYuval Mintz cpu_to_le16(switch_update_params->vlan); 62237609647eSYuval Mintz } 62247609647eSYuval Mintz 62257609647eSYuval Mintz if (test_bit(BNX2X_F_UPDATE_SD_VLAN_ETH_TYPE_CHNG, 62267609647eSYuval Mintz &switch_update_params->changes)) { 62277609647eSYuval Mintz rdata->sd_vlan_eth_type_change_flg = 1; 62287609647eSYuval Mintz rdata->sd_vlan_eth_type = 62297609647eSYuval Mintz cpu_to_le16(switch_update_params->vlan_eth_type); 62307609647eSYuval Mintz } 62317609647eSYuval Mintz 62327609647eSYuval Mintz if (test_bit(BNX2X_F_UPDATE_VLAN_FORCE_PRIO_CHNG, 62337609647eSYuval Mintz &switch_update_params->changes)) { 62347609647eSYuval Mintz rdata->sd_vlan_force_pri_change_flg = 1; 62357609647eSYuval Mintz if (test_bit(BNX2X_F_UPDATE_VLAN_FORCE_PRIO_FLAG, 62367609647eSYuval Mintz &switch_update_params->changes)) 62377609647eSYuval Mintz rdata->sd_vlan_force_pri_flg = 1; 62387609647eSYuval Mintz rdata->sd_vlan_force_pri_flg = 62397609647eSYuval Mintz switch_update_params->vlan_force_prio; 62407609647eSYuval Mintz } 62417609647eSYuval Mintz 6242e42780b6SDmitry Kravkov if (test_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG, 6243e42780b6SDmitry Kravkov &switch_update_params->changes)) { 6244e42780b6SDmitry Kravkov rdata->update_tunn_cfg_flg = 1; 624528311f8eSYuval Mintz if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GRE, 6246e42780b6SDmitry Kravkov &switch_update_params->changes)) 624728311f8eSYuval Mintz rdata->inner_clss_l2gre = 1; 624828311f8eSYuval Mintz if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN, 6249e42780b6SDmitry Kravkov &switch_update_params->changes)) 625028311f8eSYuval Mintz rdata->inner_clss_vxlan = 1; 625128311f8eSYuval Mintz if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GENEVE, 625228311f8eSYuval Mintz &switch_update_params->changes)) 625328311f8eSYuval Mintz rdata->inner_clss_l2geneve = 1; 625428311f8eSYuval Mintz if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_RSS, 625528311f8eSYuval Mintz &switch_update_params->changes)) 625628311f8eSYuval Mintz rdata->inner_rss = 1; 625728311f8eSYuval Mintz rdata->vxlan_dst_port = 625828311f8eSYuval Mintz cpu_to_le16(switch_update_params->vxlan_dst_port); 625928311f8eSYuval Mintz rdata->geneve_dst_port = 626028311f8eSYuval Mintz cpu_to_le16(switch_update_params->geneve_dst_port); 6261e42780b6SDmitry Kravkov } 6262e42780b6SDmitry Kravkov 626355c11941SMerav Sicron rdata->echo = SWITCH_UPDATE; 626455c11941SMerav Sicron 626514a94ebdSMichal Kalderon /* No need for an explicit memory barrier here as long as we 626614a94ebdSMichal Kalderon * ensure the ordering of writing to the SPQ element 626714a94ebdSMichal Kalderon * and updating of the SPQ producer which involves a memory 626814a94ebdSMichal Kalderon * read. If the memory read is removed we will have to put a 626914a94ebdSMichal Kalderon * full memory barrier there (inside bnx2x_sp_post()). 627014a94ebdSMichal Kalderon */ 627155c11941SMerav Sicron return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0, 627255c11941SMerav Sicron U64_HI(data_mapping), 627355c11941SMerav Sicron U64_LO(data_mapping), NONE_CONNECTION_TYPE); 627455c11941SMerav Sicron } 627555c11941SMerav Sicron 6276a3348722SBarak Witkowski static inline int bnx2x_func_send_afex_update(struct bnx2x *bp, 6277a3348722SBarak Witkowski struct bnx2x_func_state_params *params) 6278a3348722SBarak Witkowski { 6279a3348722SBarak Witkowski struct bnx2x_func_sp_obj *o = params->f_obj; 6280a3348722SBarak Witkowski struct function_update_data *rdata = 6281a3348722SBarak Witkowski (struct function_update_data *)o->afex_rdata; 6282a3348722SBarak Witkowski dma_addr_t data_mapping = o->afex_rdata_mapping; 6283a3348722SBarak Witkowski struct bnx2x_func_afex_update_params *afex_update_params = 6284a3348722SBarak Witkowski ¶ms->params.afex_update; 6285a3348722SBarak Witkowski 6286a3348722SBarak Witkowski memset(rdata, 0, sizeof(*rdata)); 6287a3348722SBarak Witkowski 6288a3348722SBarak Witkowski /* Fill the ramrod data with provided parameters */ 6289a3348722SBarak Witkowski rdata->vif_id_change_flg = 1; 6290a3348722SBarak Witkowski rdata->vif_id = cpu_to_le16(afex_update_params->vif_id); 6291a3348722SBarak Witkowski rdata->afex_default_vlan_change_flg = 1; 6292a3348722SBarak Witkowski rdata->afex_default_vlan = 6293a3348722SBarak Witkowski cpu_to_le16(afex_update_params->afex_default_vlan); 6294a3348722SBarak Witkowski rdata->allowed_priorities_change_flg = 1; 6295a3348722SBarak Witkowski rdata->allowed_priorities = afex_update_params->allowed_priorities; 629655c11941SMerav Sicron rdata->echo = AFEX_UPDATE; 6297a3348722SBarak Witkowski 629814a94ebdSMichal Kalderon /* No need for an explicit memory barrier here as long as we 629914a94ebdSMichal Kalderon * ensure the ordering of writing to the SPQ element 6300a3348722SBarak Witkowski * and updating of the SPQ producer which involves a memory 630114a94ebdSMichal Kalderon * read. If the memory read is removed we will have to put a 630214a94ebdSMichal Kalderon * full memory barrier there (inside bnx2x_sp_post()). 6303a3348722SBarak Witkowski */ 6304a3348722SBarak Witkowski DP(BNX2X_MSG_SP, 6305a3348722SBarak Witkowski "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n", 6306a3348722SBarak Witkowski rdata->vif_id, 6307a3348722SBarak Witkowski rdata->afex_default_vlan, rdata->allowed_priorities); 6308a3348722SBarak Witkowski 6309a3348722SBarak Witkowski return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0, 6310a3348722SBarak Witkowski U64_HI(data_mapping), 6311a3348722SBarak Witkowski U64_LO(data_mapping), NONE_CONNECTION_TYPE); 6312a3348722SBarak Witkowski } 6313a3348722SBarak Witkowski 6314a3348722SBarak Witkowski static 6315a3348722SBarak Witkowski inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp, 6316a3348722SBarak Witkowski struct bnx2x_func_state_params *params) 6317a3348722SBarak Witkowski { 6318a3348722SBarak Witkowski struct bnx2x_func_sp_obj *o = params->f_obj; 6319a3348722SBarak Witkowski struct afex_vif_list_ramrod_data *rdata = 6320a3348722SBarak Witkowski (struct afex_vif_list_ramrod_data *)o->afex_rdata; 632186564c3fSYuval Mintz struct bnx2x_func_afex_viflists_params *afex_vif_params = 6322a3348722SBarak Witkowski ¶ms->params.afex_viflists; 6323a3348722SBarak Witkowski u64 *p_rdata = (u64 *)rdata; 6324a3348722SBarak Witkowski 6325a3348722SBarak Witkowski memset(rdata, 0, sizeof(*rdata)); 6326a3348722SBarak Witkowski 6327a3348722SBarak Witkowski /* Fill the ramrod data with provided parameters */ 632886564c3fSYuval Mintz rdata->vif_list_index = cpu_to_le16(afex_vif_params->vif_list_index); 632986564c3fSYuval Mintz rdata->func_bit_map = afex_vif_params->func_bit_map; 633086564c3fSYuval Mintz rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command; 633186564c3fSYuval Mintz rdata->func_to_clear = afex_vif_params->func_to_clear; 6332a3348722SBarak Witkowski 6333a3348722SBarak Witkowski /* send in echo type of sub command */ 633486564c3fSYuval Mintz rdata->echo = afex_vif_params->afex_vif_list_command; 6335a3348722SBarak Witkowski 6336a3348722SBarak Witkowski /* No need for an explicit memory barrier here as long we would 6337a3348722SBarak Witkowski * need to ensure the ordering of writing to the SPQ element 6338a3348722SBarak Witkowski * and updating of the SPQ producer which involves a memory 6339a3348722SBarak Witkowski * read and we will have to put a full memory barrier there 6340a3348722SBarak Witkowski * (inside bnx2x_sp_post()). 6341a3348722SBarak Witkowski */ 6342a3348722SBarak Witkowski 6343a3348722SBarak Witkowski DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n", 6344a3348722SBarak Witkowski rdata->afex_vif_list_command, rdata->vif_list_index, 6345a3348722SBarak Witkowski rdata->func_bit_map, rdata->func_to_clear); 6346a3348722SBarak Witkowski 6347a3348722SBarak Witkowski /* this ramrod sends data directly and not through DMA mapping */ 6348a3348722SBarak Witkowski return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0, 6349a3348722SBarak Witkowski U64_HI(*p_rdata), U64_LO(*p_rdata), 6350a3348722SBarak Witkowski NONE_CONNECTION_TYPE); 6351a3348722SBarak Witkowski } 6352a3348722SBarak Witkowski 6353adfc5217SJeff Kirsher static inline int bnx2x_func_send_stop(struct bnx2x *bp, 6354adfc5217SJeff Kirsher struct bnx2x_func_state_params *params) 6355adfc5217SJeff Kirsher { 6356adfc5217SJeff Kirsher return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 6357adfc5217SJeff Kirsher NONE_CONNECTION_TYPE); 6358adfc5217SJeff Kirsher } 6359adfc5217SJeff Kirsher 6360adfc5217SJeff Kirsher static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp, 6361adfc5217SJeff Kirsher struct bnx2x_func_state_params *params) 6362adfc5217SJeff Kirsher { 6363adfc5217SJeff Kirsher return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0, 6364adfc5217SJeff Kirsher NONE_CONNECTION_TYPE); 6365adfc5217SJeff Kirsher } 6366adfc5217SJeff Kirsher static inline int bnx2x_func_send_tx_start(struct bnx2x *bp, 6367adfc5217SJeff Kirsher struct bnx2x_func_state_params *params) 6368adfc5217SJeff Kirsher { 6369adfc5217SJeff Kirsher struct bnx2x_func_sp_obj *o = params->f_obj; 6370adfc5217SJeff Kirsher struct flow_control_configuration *rdata = 6371adfc5217SJeff Kirsher (struct flow_control_configuration *)o->rdata; 6372adfc5217SJeff Kirsher dma_addr_t data_mapping = o->rdata_mapping; 6373adfc5217SJeff Kirsher struct bnx2x_func_tx_start_params *tx_start_params = 6374adfc5217SJeff Kirsher ¶ms->params.tx_start; 6375adfc5217SJeff Kirsher int i; 6376adfc5217SJeff Kirsher 6377adfc5217SJeff Kirsher memset(rdata, 0, sizeof(*rdata)); 6378adfc5217SJeff Kirsher 6379adfc5217SJeff Kirsher rdata->dcb_enabled = tx_start_params->dcb_enabled; 6380adfc5217SJeff Kirsher rdata->dcb_version = tx_start_params->dcb_version; 6381adfc5217SJeff Kirsher rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en; 6382adfc5217SJeff Kirsher 6383adfc5217SJeff Kirsher for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++) 6384adfc5217SJeff Kirsher rdata->traffic_type_to_priority_cos[i] = 6385adfc5217SJeff Kirsher tx_start_params->traffic_type_to_priority_cos[i]; 6386adfc5217SJeff Kirsher 638728311f8eSYuval Mintz for (i = 0; i < MAX_TRAFFIC_TYPES; i++) 638828311f8eSYuval Mintz rdata->dcb_outer_pri[i] = tx_start_params->dcb_outer_pri[i]; 638914a94ebdSMichal Kalderon /* No need for an explicit memory barrier here as long as we 639014a94ebdSMichal Kalderon * ensure the ordering of writing to the SPQ element 639114a94ebdSMichal Kalderon * and updating of the SPQ producer which involves a memory 639214a94ebdSMichal Kalderon * read. If the memory read is removed we will have to put a 639314a94ebdSMichal Kalderon * full memory barrier there (inside bnx2x_sp_post()). 639414a94ebdSMichal Kalderon */ 6395adfc5217SJeff Kirsher return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0, 6396adfc5217SJeff Kirsher U64_HI(data_mapping), 6397adfc5217SJeff Kirsher U64_LO(data_mapping), NONE_CONNECTION_TYPE); 6398adfc5217SJeff Kirsher } 6399adfc5217SJeff Kirsher 6400eeed018cSMichal Kalderon static inline 6401eeed018cSMichal Kalderon int bnx2x_func_send_set_timesync(struct bnx2x *bp, 6402eeed018cSMichal Kalderon struct bnx2x_func_state_params *params) 6403eeed018cSMichal Kalderon { 6404eeed018cSMichal Kalderon struct bnx2x_func_sp_obj *o = params->f_obj; 6405eeed018cSMichal Kalderon struct set_timesync_ramrod_data *rdata = 6406eeed018cSMichal Kalderon (struct set_timesync_ramrod_data *)o->rdata; 6407eeed018cSMichal Kalderon dma_addr_t data_mapping = o->rdata_mapping; 6408eeed018cSMichal Kalderon struct bnx2x_func_set_timesync_params *set_timesync_params = 6409eeed018cSMichal Kalderon ¶ms->params.set_timesync; 6410eeed018cSMichal Kalderon 6411eeed018cSMichal Kalderon memset(rdata, 0, sizeof(*rdata)); 6412eeed018cSMichal Kalderon 6413eeed018cSMichal Kalderon /* Fill the ramrod data with provided parameters */ 6414eeed018cSMichal Kalderon rdata->drift_adjust_cmd = set_timesync_params->drift_adjust_cmd; 6415eeed018cSMichal Kalderon rdata->offset_cmd = set_timesync_params->offset_cmd; 6416eeed018cSMichal Kalderon rdata->add_sub_drift_adjust_value = 6417eeed018cSMichal Kalderon set_timesync_params->add_sub_drift_adjust_value; 6418eeed018cSMichal Kalderon rdata->drift_adjust_value = set_timesync_params->drift_adjust_value; 6419eeed018cSMichal Kalderon rdata->drift_adjust_period = set_timesync_params->drift_adjust_period; 64208f15c613SMichal Kalderon rdata->offset_delta.lo = 64218f15c613SMichal Kalderon cpu_to_le32(U64_LO(set_timesync_params->offset_delta)); 64228f15c613SMichal Kalderon rdata->offset_delta.hi = 64238f15c613SMichal Kalderon cpu_to_le32(U64_HI(set_timesync_params->offset_delta)); 6424eeed018cSMichal Kalderon 6425eeed018cSMichal Kalderon DP(BNX2X_MSG_SP, "Set timesync command params: drift_cmd = %d, offset_cmd = %d, add_sub_drift = %d, drift_val = %d, drift_period = %d, offset_lo = %d, offset_hi = %d\n", 6426eeed018cSMichal Kalderon rdata->drift_adjust_cmd, rdata->offset_cmd, 6427eeed018cSMichal Kalderon rdata->add_sub_drift_adjust_value, rdata->drift_adjust_value, 6428eeed018cSMichal Kalderon rdata->drift_adjust_period, rdata->offset_delta.lo, 6429eeed018cSMichal Kalderon rdata->offset_delta.hi); 6430eeed018cSMichal Kalderon 6431eeed018cSMichal Kalderon return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_TIMESYNC, 0, 6432eeed018cSMichal Kalderon U64_HI(data_mapping), 6433eeed018cSMichal Kalderon U64_LO(data_mapping), NONE_CONNECTION_TYPE); 6434eeed018cSMichal Kalderon } 6435eeed018cSMichal Kalderon 6436adfc5217SJeff Kirsher static int bnx2x_func_send_cmd(struct bnx2x *bp, 6437adfc5217SJeff Kirsher struct bnx2x_func_state_params *params) 6438adfc5217SJeff Kirsher { 6439adfc5217SJeff Kirsher switch (params->cmd) { 6440adfc5217SJeff Kirsher case BNX2X_F_CMD_HW_INIT: 6441adfc5217SJeff Kirsher return bnx2x_func_hw_init(bp, params); 6442adfc5217SJeff Kirsher case BNX2X_F_CMD_START: 6443adfc5217SJeff Kirsher return bnx2x_func_send_start(bp, params); 6444adfc5217SJeff Kirsher case BNX2X_F_CMD_STOP: 6445adfc5217SJeff Kirsher return bnx2x_func_send_stop(bp, params); 6446adfc5217SJeff Kirsher case BNX2X_F_CMD_HW_RESET: 6447adfc5217SJeff Kirsher return bnx2x_func_hw_reset(bp, params); 6448a3348722SBarak Witkowski case BNX2X_F_CMD_AFEX_UPDATE: 6449a3348722SBarak Witkowski return bnx2x_func_send_afex_update(bp, params); 6450a3348722SBarak Witkowski case BNX2X_F_CMD_AFEX_VIFLISTS: 6451a3348722SBarak Witkowski return bnx2x_func_send_afex_viflists(bp, params); 6452adfc5217SJeff Kirsher case BNX2X_F_CMD_TX_STOP: 6453adfc5217SJeff Kirsher return bnx2x_func_send_tx_stop(bp, params); 6454adfc5217SJeff Kirsher case BNX2X_F_CMD_TX_START: 6455adfc5217SJeff Kirsher return bnx2x_func_send_tx_start(bp, params); 645655c11941SMerav Sicron case BNX2X_F_CMD_SWITCH_UPDATE: 645755c11941SMerav Sicron return bnx2x_func_send_switch_update(bp, params); 6458eeed018cSMichal Kalderon case BNX2X_F_CMD_SET_TIMESYNC: 6459eeed018cSMichal Kalderon return bnx2x_func_send_set_timesync(bp, params); 6460adfc5217SJeff Kirsher default: 6461adfc5217SJeff Kirsher BNX2X_ERR("Unknown command: %d\n", params->cmd); 6462adfc5217SJeff Kirsher return -EINVAL; 6463adfc5217SJeff Kirsher } 6464adfc5217SJeff Kirsher } 6465adfc5217SJeff Kirsher 6466adfc5217SJeff Kirsher void bnx2x_init_func_obj(struct bnx2x *bp, 6467adfc5217SJeff Kirsher struct bnx2x_func_sp_obj *obj, 6468adfc5217SJeff Kirsher void *rdata, dma_addr_t rdata_mapping, 6469a3348722SBarak Witkowski void *afex_rdata, dma_addr_t afex_rdata_mapping, 6470adfc5217SJeff Kirsher struct bnx2x_func_sp_drv_ops *drv_iface) 6471adfc5217SJeff Kirsher { 6472adfc5217SJeff Kirsher memset(obj, 0, sizeof(*obj)); 6473adfc5217SJeff Kirsher 6474adfc5217SJeff Kirsher mutex_init(&obj->one_pending_mutex); 6475adfc5217SJeff Kirsher 6476adfc5217SJeff Kirsher obj->rdata = rdata; 6477adfc5217SJeff Kirsher obj->rdata_mapping = rdata_mapping; 6478a3348722SBarak Witkowski obj->afex_rdata = afex_rdata; 6479a3348722SBarak Witkowski obj->afex_rdata_mapping = afex_rdata_mapping; 6480adfc5217SJeff Kirsher obj->send_cmd = bnx2x_func_send_cmd; 6481adfc5217SJeff Kirsher obj->check_transition = bnx2x_func_chk_transition; 6482adfc5217SJeff Kirsher obj->complete_cmd = bnx2x_func_comp_cmd; 6483adfc5217SJeff Kirsher obj->wait_comp = bnx2x_func_wait_comp; 6484adfc5217SJeff Kirsher 6485adfc5217SJeff Kirsher obj->drv = drv_iface; 6486adfc5217SJeff Kirsher } 6487adfc5217SJeff Kirsher 6488adfc5217SJeff Kirsher /** 6489adfc5217SJeff Kirsher * bnx2x_func_state_change - perform Function state change transition 6490adfc5217SJeff Kirsher * 6491adfc5217SJeff Kirsher * @bp: device handle 6492adfc5217SJeff Kirsher * @params: parameters to perform the transaction 6493adfc5217SJeff Kirsher * 6494adfc5217SJeff Kirsher * returns 0 in case of successfully completed transition, 6495adfc5217SJeff Kirsher * negative error code in case of failure, positive 6496adfc5217SJeff Kirsher * (EBUSY) value if there is a completion to that is 6497adfc5217SJeff Kirsher * still pending (possible only if RAMROD_COMP_WAIT is 6498adfc5217SJeff Kirsher * not set in params->ramrod_flags for asynchronous 6499adfc5217SJeff Kirsher * commands). 6500adfc5217SJeff Kirsher */ 6501adfc5217SJeff Kirsher int bnx2x_func_state_change(struct bnx2x *bp, 6502adfc5217SJeff Kirsher struct bnx2x_func_state_params *params) 6503adfc5217SJeff Kirsher { 6504adfc5217SJeff Kirsher struct bnx2x_func_sp_obj *o = params->f_obj; 650555c11941SMerav Sicron int rc, cnt = 300; 6506adfc5217SJeff Kirsher enum bnx2x_func_cmd cmd = params->cmd; 6507adfc5217SJeff Kirsher unsigned long *pending = &o->pending; 6508adfc5217SJeff Kirsher 6509adfc5217SJeff Kirsher mutex_lock(&o->one_pending_mutex); 6510adfc5217SJeff Kirsher 6511adfc5217SJeff Kirsher /* Check that the requested transition is legal */ 651255c11941SMerav Sicron rc = o->check_transition(bp, o, params); 651355c11941SMerav Sicron if ((rc == -EBUSY) && 651455c11941SMerav Sicron (test_bit(RAMROD_RETRY, ¶ms->ramrod_flags))) { 651555c11941SMerav Sicron while ((rc == -EBUSY) && (--cnt > 0)) { 6516adfc5217SJeff Kirsher mutex_unlock(&o->one_pending_mutex); 651755c11941SMerav Sicron msleep(10); 651855c11941SMerav Sicron mutex_lock(&o->one_pending_mutex); 651955c11941SMerav Sicron rc = o->check_transition(bp, o, params); 652055c11941SMerav Sicron } 652155c11941SMerav Sicron if (rc == -EBUSY) { 652255c11941SMerav Sicron mutex_unlock(&o->one_pending_mutex); 652355c11941SMerav Sicron BNX2X_ERR("timeout waiting for previous ramrod completion\n"); 652455c11941SMerav Sicron return rc; 652555c11941SMerav Sicron } 652655c11941SMerav Sicron } else if (rc) { 652755c11941SMerav Sicron mutex_unlock(&o->one_pending_mutex); 652855c11941SMerav Sicron return rc; 6529adfc5217SJeff Kirsher } 6530adfc5217SJeff Kirsher 6531adfc5217SJeff Kirsher /* Set "pending" bit */ 6532adfc5217SJeff Kirsher set_bit(cmd, pending); 6533adfc5217SJeff Kirsher 6534adfc5217SJeff Kirsher /* Don't send a command if only driver cleanup was requested */ 6535adfc5217SJeff Kirsher if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { 6536adfc5217SJeff Kirsher bnx2x_func_state_change_comp(bp, o, cmd); 6537adfc5217SJeff Kirsher mutex_unlock(&o->one_pending_mutex); 6538adfc5217SJeff Kirsher } else { 6539adfc5217SJeff Kirsher /* Send a ramrod */ 6540adfc5217SJeff Kirsher rc = o->send_cmd(bp, params); 6541adfc5217SJeff Kirsher 6542adfc5217SJeff Kirsher mutex_unlock(&o->one_pending_mutex); 6543adfc5217SJeff Kirsher 6544adfc5217SJeff Kirsher if (rc) { 6545adfc5217SJeff Kirsher o->next_state = BNX2X_F_STATE_MAX; 6546adfc5217SJeff Kirsher clear_bit(cmd, pending); 65474e857c58SPeter Zijlstra smp_mb__after_atomic(); 6548adfc5217SJeff Kirsher return rc; 6549adfc5217SJeff Kirsher } 6550adfc5217SJeff Kirsher 6551adfc5217SJeff Kirsher if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) { 6552adfc5217SJeff Kirsher rc = o->wait_comp(bp, o, cmd); 6553adfc5217SJeff Kirsher if (rc) 6554adfc5217SJeff Kirsher return rc; 6555adfc5217SJeff Kirsher 6556adfc5217SJeff Kirsher return 0; 6557adfc5217SJeff Kirsher } 6558adfc5217SJeff Kirsher } 6559adfc5217SJeff Kirsher 6560adfc5217SJeff Kirsher return !!test_bit(cmd, pending); 6561adfc5217SJeff Kirsher } 6562